diff options
-rw-r--r-- | Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml | 16 | ||||
-rw-r--r-- | Documentation/devicetree/bindings/remoteproc/xlnx,zynqmp-r5fss.yaml | 135 | ||||
-rw-r--r-- | arch/arm64/boot/dts/xilinx/zynqmp.dtsi | 33 | ||||
-rw-r--r-- | drivers/firmware/xilinx/zynqmp.c | 97 | ||||
-rw-r--r-- | drivers/remoteproc/Kconfig | 13 | ||||
-rw-r--r-- | drivers/remoteproc/Makefile | 1 | ||||
-rw-r--r-- | drivers/remoteproc/imx_dsp_rproc.c | 12 | ||||
-rw-r--r-- | drivers/remoteproc/imx_rproc.c | 298 | ||||
-rw-r--r-- | drivers/remoteproc/qcom_q6v5_pas.c | 4 | ||||
-rw-r--r-- | drivers/remoteproc/qcom_q6v5_wcss.c | 6 | ||||
-rw-r--r-- | drivers/remoteproc/qcom_sysmon.c | 13 | ||||
-rw-r--r-- | drivers/remoteproc/remoteproc_core.c | 19 | ||||
-rw-r--r-- | drivers/remoteproc/xlnx_r5_remoteproc.c | 1067 | ||||
-rw-r--r-- | include/dt-bindings/power/xlnx-zynqmp-power.h | 6 | ||||
-rw-r--r-- | include/linux/firmware/xlnx-zynqmp.h | 60 |
15 files changed, 1760 insertions, 20 deletions
diff --git a/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml index e732255b9019..ae2eab4452dd 100644 --- a/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml +++ b/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml @@ -22,6 +22,8 @@ properties: - fsl,imx8mn-cm7 - fsl,imx8mp-cm7 - fsl,imx8mq-cm4 + - fsl,imx8qm-cm4 + - fsl,imx8qxp-cm4 - fsl,imx8ulp-cm33 - fsl,imx93-cm33 @@ -54,12 +56,26 @@ properties: minItems: 1 maxItems: 32 + power-domains: + maxItems: 8 + fsl,auto-boot: $ref: /schemas/types.yaml#/definitions/flag description: Indicate whether need to load the default firmware and start the remote processor automatically. + fsl,entry-address: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + Specify CPU entry address for SCU enabled processor. + + fsl,resource-id: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + This property is to specify the resource id of the remote processor in SoC + which supports SCFW + required: - compatible diff --git a/Documentation/devicetree/bindings/remoteproc/xlnx,zynqmp-r5fss.yaml b/Documentation/devicetree/bindings/remoteproc/xlnx,zynqmp-r5fss.yaml new file mode 100644 index 000000000000..9f677367dd9f --- /dev/null +++ b/Documentation/devicetree/bindings/remoteproc/xlnx,zynqmp-r5fss.yaml @@ -0,0 +1,135 @@ +# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/remoteproc/xlnx,zynqmp-r5fss.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Xilinx R5F processor subsystem + +maintainers: + - Ben Levinsky <ben.levinsky@amd.com> + - Tanmay Shah <tanmay.shah@amd.com> + +description: | + The Xilinx platforms include a pair of Cortex-R5F processors (RPU) for + real-time processing based on the Cortex-R5F processor core from ARM. + The Cortex-R5F processor implements the Arm v7-R architecture and includes a + floating-point unit that implements the Arm VFPv3 instruction set. + +properties: + compatible: + const: xlnx,zynqmp-r5fss + + xlnx,cluster-mode: + $ref: /schemas/types.yaml#/definitions/uint32 + enum: [0, 1, 2] + description: | + The RPU MPCore can operate in split mode (Dual-processor performance), Safety + lock-step mode(Both RPU cores execute the same code in lock-step, + clock-for-clock) or Single CPU mode (RPU core 0 is held in reset while + core 1 runs normally). The processor does not support dynamic configuration. + Switching between modes is only permitted immediately after a processor reset. + If set to 1 then lockstep mode and if 0 then split mode. + If set to 2 then single CPU mode. When not defined, default will be lockstep mode. + In summary, + 0: split mode + 1: lockstep mode (default) + 2: single cpu mode + +patternProperties: + "^r5f-[a-f0-9]+$": + type: object + description: | + The RPU is located in the Low Power Domain of the Processor Subsystem. + Each processor includes separate L1 instruction and data caches and + tightly coupled memories (TCM). System memory is cacheable, but the TCM + memory space is non-cacheable. + + Each RPU contains one 64KB memory and two 32KB memories that + are accessed via the TCM A and B port interfaces, for a total of 128KB + per processor. In lock-step mode, the processor has access to 256KB of + TCM memory. + + properties: + compatible: + const: xlnx,zynqmp-r5f + + power-domains: + maxItems: 1 + + mboxes: + minItems: 1 + items: + - description: mailbox channel to send data to RPU + - description: mailbox channel to receive data from RPU + + mbox-names: + minItems: 1 + items: + - const: tx + - const: rx + + sram: + $ref: /schemas/types.yaml#/definitions/phandle-array + minItems: 1 + maxItems: 8 + items: + maxItems: 1 + description: | + phandles to one or more reserved on-chip SRAM regions. Other than TCM, + the RPU can execute instructions and access data from the OCM memory, + the main DDR memory, and other system memories. + + The regions should be defined as child nodes of the respective SRAM + node, and should be defined as per the generic bindings in + Documentation/devicetree/bindings/sram/sram.yaml + + memory-region: + description: | + List of phandles to the reserved memory regions associated with the + remoteproc device. This is variable and describes the memories shared with + the remote processor (e.g. remoteproc firmware and carveouts, rpmsg + vrings, ...). This reserved memory region will be allocated in DDR memory. + minItems: 1 + maxItems: 8 + items: + - description: region used for RPU firmware image section + - description: vdev buffer + - description: vring0 + - description: vring1 + additionalItems: true + + required: + - compatible + - power-domains + + unevaluatedProperties: false + +required: + - compatible + +additionalProperties: false + +examples: + - | + remoteproc { + compatible = "xlnx,zynqmp-r5fss"; + xlnx,cluster-mode = <1>; + + r5f-0 { + compatible = "xlnx,zynqmp-r5f"; + power-domains = <&zynqmp_firmware 0x7>; + memory-region = <&rproc_0_fw_image>, <&rpu0vdev0buffer>, <&rpu0vdev0vring0>, <&rpu0vdev0vring1>; + mboxes = <&ipi_mailbox_rpu0 0>, <&ipi_mailbox_rpu0 1>; + mbox-names = "tx", "rx"; + }; + + r5f-1 { + compatible = "xlnx,zynqmp-r5f"; + power-domains = <&zynqmp_firmware 0x8>; + memory-region = <&rproc_1_fw_image>, <&rpu1vdev0buffer>, <&rpu1vdev0vring0>, <&rpu1vdev0vring1>; + mboxes = <&ipi_mailbox_rpu1 0>, <&ipi_mailbox_rpu1 1>; + mbox-names = "tx", "rx"; + }; + }; +... diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi index 307c76cd8544..4325cb8526ed 100644 --- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi +++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi @@ -100,6 +100,22 @@ }; }; + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + rproc_0_fw_image: memory@3ed00000 { + no-map; + reg = <0x0 0x3ed00000 0x0 0x40000>; + }; + + rproc_1_fw_image: memory@3ef00000 { + no-map; + reg = <0x0 0x3ef00000 0x0 0x40000>; + }; + }; + zynqmp_ipi: zynqmp_ipi { compatible = "xlnx,zynqmp-ipi-mailbox"; interrupt-parent = <&gic>; @@ -203,6 +219,23 @@ ranges; }; + remoteproc { + compatible = "xlnx,zynqmp-r5fss"; + xlnx,cluster-mode = <1>; + + r5f-0 { + compatible = "xlnx,zynqmp-r5f"; + power-domains = <&zynqmp_firmware PD_RPU_0>; + memory-region = <&rproc_0_fw_image>; + }; + + r5f-1 { + compatible = "xlnx,zynqmp-r5f"; + power-domains = <&zynqmp_firmware PD_RPU_1>; + memory-region = <&rproc_1_fw_image>; + }; + }; + amba: axi { compatible = "simple-bus"; #address-cells = <2>; diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index 6bc6b6c84241..129f68d7a6f5 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -1167,6 +1167,103 @@ int zynqmp_pm_release_node(const u32 node) EXPORT_SYMBOL_GPL(zynqmp_pm_release_node); /** + * zynqmp_pm_get_rpu_mode() - Get RPU mode + * @node_id: Node ID of the device + * @rpu_mode: return by reference value + * either split or lockstep + * + * Return: return 0 on success or error+reason. + * if success, then rpu_mode will be set + * to current rpu mode. + */ +int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode) +{ + u32 ret_payload[PAYLOAD_ARG_CNT]; + int ret; + + ret = zynqmp_pm_invoke_fn(PM_IOCTL, node_id, + IOCTL_GET_RPU_OPER_MODE, 0, 0, ret_payload); + + /* only set rpu_mode if no error */ + if (ret == XST_PM_SUCCESS) + *rpu_mode = ret_payload[0]; + + return ret; +} +EXPORT_SYMBOL_GPL(zynqmp_pm_get_rpu_mode); + +/** + * zynqmp_pm_set_rpu_mode() - Set RPU mode + * @node_id: Node ID of the device + * @rpu_mode: Argument 1 to requested IOCTL call. either split or lockstep + * + * This function is used to set RPU mode to split or + * lockstep + * + * Return: Returns status, either success or error+reason + */ +int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode) +{ + return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, + IOCTL_SET_RPU_OPER_MODE, (u32)rpu_mode, + 0, NULL); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_set_rpu_mode); + +/** + * zynqmp_pm_set_tcm_config - configure TCM + * @node_id: Firmware specific TCM subsystem ID + * @tcm_mode: Argument 1 to requested IOCTL call + * either PM_RPU_TCM_COMB or PM_RPU_TCM_SPLIT + * + * This function is used to set RPU mode to split or combined + * + * Return: status: 0 for success, else failure + */ +int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode) +{ + return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, + IOCTL_TCM_COMB_CONFIG, (u32)tcm_mode, 0, + NULL); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_set_tcm_config); + +/** + * zynqmp_pm_force_pwrdwn - PM call to request for another PU or subsystem to + * be powered down forcefully + * @node: Node ID of the targeted PU or subsystem + * @ack: Flag to specify whether acknowledge is requested + * + * Return: status, either success or error+reason + */ +int zynqmp_pm_force_pwrdwn(const u32 node, + const enum zynqmp_pm_request_ack ack) +{ + return zynqmp_pm_invoke_fn(PM_FORCE_POWERDOWN, node, ack, 0, 0, NULL); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_force_pwrdwn); + +/** + * zynqmp_pm_request_wake - PM call to wake up selected master or subsystem + * @node: Node ID of the master or subsystem + * @set_addr: Specifies whether the address argument is relevant + * @address: Address from which to resume when woken up + * @ack: Flag to specify whether acknowledge requested + * + * Return: status, either success or error+reason + */ +int zynqmp_pm_request_wake(const u32 node, + const bool set_addr, + const u64 address, + const enum zynqmp_pm_request_ack ack) +{ + /* set_addr flag is encoded into 1st bit of address */ + return zynqmp_pm_invoke_fn(PM_REQUEST_WAKEUP, node, address | set_addr, + address >> 32, ack, NULL); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_request_wake); + +/** * zynqmp_pm_set_requirement() - PM call to set requirement for PM slaves * @node: Node ID of the slave * @capabilities: Requested capabilities of the slave diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index 166019786653..a850e9f486dd 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -352,6 +352,19 @@ config TI_K3_R5_REMOTEPROC It's safe to say N here if you're not interested in utilizing a slave processor. +config XLNX_R5_REMOTEPROC + tristate "Xilinx R5 remoteproc support" + depends on PM && ARCH_ZYNQMP + select ZYNQMP_FIRMWARE + select RPMSG_VIRTIO + select MAILBOX + select ZYNQMP_IPI_MBOX + help + Say y or m here to support Xilinx R5 remote processors via the remote + processor framework. + + It's safe to say N if not interested in using RPU r5f cores. + endif # REMOTEPROC endmenu diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile index 5478c7cb9e07..91314a9b43ce 100644 --- a/drivers/remoteproc/Makefile +++ b/drivers/remoteproc/Makefile @@ -38,3 +38,4 @@ obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o obj-$(CONFIG_STM32_RPROC) += stm32_rproc.o obj-$(CONFIG_TI_K3_DSP_REMOTEPROC) += ti_k3_dsp_remoteproc.o obj-$(CONFIG_TI_K3_R5_REMOTEPROC) += ti_k3_r5_remoteproc.o +obj-$(CONFIG_XLNX_R5_REMOTEPROC) += xlnx_r5_remoteproc.o diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c index 899aa8dd12f0..95da1cbefacf 100644 --- a/drivers/remoteproc/imx_dsp_rproc.c +++ b/drivers/remoteproc/imx_dsp_rproc.c @@ -347,9 +347,6 @@ static int imx_dsp_rproc_stop(struct rproc *rproc) struct device *dev = rproc->dev.parent; int ret = 0; - /* Make sure work is finished */ - flush_work(&priv->rproc_work); - if (rproc->state == RPROC_CRASHED) { priv->flags &= ~REMOTE_IS_READY; return 0; @@ -432,9 +429,18 @@ static void imx_dsp_rproc_vq_work(struct work_struct *work) { struct imx_dsp_rproc *priv = container_of(work, struct imx_dsp_rproc, rproc_work); + struct rproc *rproc = priv->rproc; + + mutex_lock(&rproc->lock); + + if (rproc->state != RPROC_RUNNING) + goto unlock_mutex; rproc_vq_interrupt(priv->rproc, 0); rproc_vq_interrupt(priv->rproc, 1); + +unlock_mutex: + mutex_unlock(&rproc->lock); } /** diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c index 7cc4fd207e2d..9fc978e0393c 100644 --- a/drivers/remoteproc/imx_rproc.c +++ b/drivers/remoteproc/imx_rproc.c @@ -3,9 +3,11 @@ * Copyright (c) 2017 Pengutronix, Oleksij Rempel <kernel@pengutronix.de> */ +#include <dt-bindings/firmware/imx/rsrc.h> #include <linux/arm-smccc.h> #include <linux/clk.h> #include <linux/err.h> +#include <linux/firmware/imx/sci.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/mailbox_client.h> @@ -15,6 +17,7 @@ #include <linux/of_reserved_mem.h> #include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/pm_domain.h> #include <linux/regmap.h> #include <linux/remoteproc.h> #include <linux/workqueue.h> @@ -59,6 +62,8 @@ #define IMX_SIP_RPROC_STARTED 0x01 #define IMX_SIP_RPROC_STOP 0x02 +#define IMX_SC_IRQ_GROUP_REBOOTED 5 + /** * struct imx_rproc_mem - slim internal memory structure * @cpu_addr: MPU virtual address of the memory region @@ -71,10 +76,17 @@ struct imx_rproc_mem { size_t size; }; -/* att flags */ +/* att flags: lower 16 bits specifying core, higher 16 bits for flags */ /* M4 own area. Can be mapped at probe */ -#define ATT_OWN BIT(1) -#define ATT_IOMEM BIT(2) +#define ATT_OWN BIT(31) +#define ATT_IOMEM BIT(30) + +#define ATT_CORE_MASK 0xffff +#define ATT_CORE(I) BIT((I)) + +static int imx_rproc_xtr_mbox_init(struct rproc *rproc); +static void imx_rproc_free_mbox(struct rproc *rproc); +static int imx_rproc_detach_pd(struct rproc *rproc); struct imx_rproc { struct device *dev; @@ -89,6 +101,15 @@ struct imx_rproc { struct work_struct rproc_work; struct workqueue_struct *workqueue; void __iomem *rsc_table; + struct imx_sc_ipc *ipc_handle; + struct notifier_block rproc_nb; + u32 rproc_pt; /* partition id */ + u32 rsrc_id; /* resource id */ + u32 entry; /* cpu start address */ + int num_pd; + u32 core_index; + struct device **pd_dev; + struct device_link **pd_dev_link; }; static const struct imx_rproc_att imx_rproc_att_imx93[] = { @@ -113,8 +134,33 @@ static const struct imx_rproc_att imx_rproc_att_imx93[] = { { 0x80000000, 0x80000000, 0x10000000, 0 }, { 0x90000000, 0x80000000, 0x10000000, 0 }, - { 0xC0000000, 0xa0000000, 0x10000000, 0 }, - { 0xD0000000, 0xa0000000, 0x10000000, 0 }, + { 0xC0000000, 0xC0000000, 0x10000000, 0 }, + { 0xD0000000, 0xC0000000, 0x10000000, 0 }, +}; + +static const struct imx_rproc_att imx_rproc_att_imx8qm[] = { + /* dev addr , sys addr , size , flags */ + { 0x08000000, 0x08000000, 0x10000000, 0}, + /* TCML */ + { 0x1FFE0000, 0x34FE0000, 0x00020000, ATT_OWN | ATT_IOMEM | ATT_CORE(0)}, + { 0x1FFE0000, 0x38FE0000, 0x00020000, ATT_OWN | ATT_IOMEM | ATT_CORE(1)}, + /* TCMU */ + { 0x20000000, 0x35000000, 0x00020000, ATT_OWN | ATT_IOMEM | ATT_CORE(0)}, + { 0x20000000, 0x39000000, 0x00020000, ATT_OWN | ATT_IOMEM | ATT_CORE(1)}, + /* DDR (Data) */ + { 0x80000000, 0x80000000, 0x60000000, 0 }, +}; + +static const struct imx_rproc_att imx_rproc_att_imx8qxp[] = { + { 0x08000000, 0x08000000, 0x10000000, 0 }, + /* TCML/U */ + { 0x1FFE0000, 0x34FE0000, 0x00040000, ATT_OWN | ATT_IOMEM }, + /* OCRAM(Low 96KB) */ + { 0x21000000, 0x00100000, 0x00018000, 0 }, + /* OCRAM */ + { 0x21100000, 0x00100000, 0x00040000, 0 }, + /* DDR (Data) */ + { 0x80000000, 0x80000000, 0x60000000, 0 }, }; static const struct imx_rproc_att imx_rproc_att_imx8mn[] = { @@ -255,6 +301,18 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mq = { .method = IMX_RPROC_MMIO, }; +static const struct imx_rproc_dcfg imx_rproc_cfg_imx8qm = { + .att = imx_rproc_att_imx8qm, + .att_size = ARRAY_SIZE(imx_rproc_att_imx8qm), + .method = IMX_RPROC_SCU_API, +}; + +static const struct imx_rproc_dcfg imx_rproc_cfg_imx8qxp = { + .att = imx_rproc_att_imx8qxp, + .att_size = ARRAY_SIZE(imx_rproc_att_imx8qxp), + .method = IMX_RPROC_SCU_API, +}; + static const struct imx_rproc_dcfg imx_rproc_cfg_imx8ulp = { .att = imx_rproc_att_imx8ulp, .att_size = ARRAY_SIZE(imx_rproc_att_imx8ulp), @@ -301,6 +359,10 @@ static int imx_rproc_start(struct rproc *rproc) struct arm_smccc_res res; int ret; + ret = imx_rproc_xtr_mbox_init(rproc); + if (ret) + return ret; + switch (dcfg->method) { case IMX_RPROC_MMIO: ret = regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask, @@ -310,6 +372,9 @@ static int imx_rproc_start(struct rproc *rproc) arm_smccc_smc(IMX_SIP_RPROC, IMX_SIP_RPROC_START, 0, 0, 0, 0, 0, 0, &res); ret = res.a0; break; + case IMX_RPROC_SCU_API: + ret = imx_sc_pm_cpu_start(priv->ipc_handle, priv->rsrc_id, true, priv->entry); + break; default: return -EOPNOTSUPP; } @@ -339,12 +404,17 @@ static int imx_rproc_stop(struct rproc *rproc) if (res.a1) dev_info(dev, "Not in wfi, force stopped\n"); break; + case IMX_RPROC_SCU_API: + ret = imx_sc_pm_cpu_start(priv->ipc_handle, priv->rsrc_id, false, priv->entry); + break; default: return -EOPNOTSUPP; } if (ret) dev_err(dev, "Failed to stop remote core\n"); + else + imx_rproc_free_mbox(rproc); return ret; } @@ -359,6 +429,17 @@ static int imx_rproc_da_to_sys(struct imx_rproc *priv, u64 da, for (i = 0; i < dcfg->att_size; i++) { const struct imx_rproc_att *att = &dcfg->att[i]; + /* + * Ignore entries not belong to current core: + * i.MX8QM has dual general M4_[0,1] cores, M4_0's own entries + * has "ATT_CORE(0) & BIT(0)" true, M4_1's own entries has + * "ATT_CORE(1) & BIT(1)" true. + */ + if (att->flags & ATT_CORE_MASK) { + if (!((BIT(priv->core_index)) & (att->flags & ATT_CORE_MASK))) + continue; + } + if (da >= att->da && da + len < att->da + att->size) { unsigned int offset = da - att->da; @@ -519,6 +600,22 @@ static void imx_rproc_kick(struct rproc *rproc, int vqid) static int imx_rproc_attach(struct rproc *rproc) { + return imx_rproc_xtr_mbox_init(rproc); +} + +static int imx_rproc_detach(struct rproc *rproc) +{ + struct imx_rproc *priv = rproc->priv; + const struct imx_rproc_dcfg *dcfg = priv->dcfg; + + if (dcfg->method != IMX_RPROC_SCU_API) + return -EOPNOTSUPP; + + if (imx_sc_rm_is_resource_owned(priv->ipc_handle, priv->rsrc_id)) + return -EOPNOTSUPP; + + imx_rproc_free_mbox(rproc); + return 0; } @@ -537,6 +634,7 @@ static struct resource_table *imx_rproc_get_loaded_rsc_table(struct rproc *rproc static const struct rproc_ops imx_rproc_ops = { .prepare = imx_rproc_prepare, .attach = imx_rproc_attach, + .detach = imx_rproc_detach, .start = imx_rproc_start, .stop = imx_rproc_stop, .kick = imx_rproc_kick, @@ -647,6 +745,18 @@ static int imx_rproc_xtr_mbox_init(struct rproc *rproc) struct device *dev = priv->dev; struct mbox_client *cl; + /* + * stop() and detach() will free the mbox channels, so need + * to request mbox channels in start() and attach(). + * + * Because start() and attach() not able to handle mbox defer + * probe, imx_rproc_xtr_mbox_init is also called in probe(). + * The check is to avoid request mbox again when start() or + * attach() after probe() returns success. + */ + if (priv->tx_ch && priv->rx_ch) + return 0; + if (!of_get_property(dev->of_node, "mbox-names", NULL)) return 0; @@ -676,8 +786,119 @@ static void imx_rproc_free_mbox(struct rproc *rproc) { struct imx_rproc *priv = rproc->priv; - mbox_free_channel(priv->tx_ch); - mbox_free_channel(priv->rx_ch); + if (priv->tx_ch) { + mbox_free_channel(priv->tx_ch); + priv->tx_ch = NULL; + } + + if (priv->rx_ch) { + mbox_free_channel(priv->rx_ch); + priv->rx_ch = NULL; + } +} + +static void imx_rproc_put_scu(struct rproc *rproc) +{ + struct imx_rproc *priv = rproc->priv; + const struct imx_rproc_dcfg *dcfg = priv->dcfg; + + if (dcfg->method != IMX_RPROC_SCU_API) + return; + + if (imx_sc_rm_is_resource_owned(priv->ipc_handle, priv->rsrc_id)) { + imx_rproc_detach_pd(rproc); + return; + } + + imx_scu_irq_group_enable(IMX_SC_IRQ_GROUP_REBOOTED, BIT(priv->rproc_pt), false); + imx_scu_irq_unregister_notifier(&priv->rproc_nb); +} + +static int imx_rproc_partition_notify(struct notifier_block *nb, + unsigned long event, void *group) +{ + struct imx_rproc *priv = container_of(nb, struct imx_rproc, rproc_nb); + + /* Ignore other irqs */ + if (!((event & BIT(priv->rproc_pt)) && (*(u8 *)group == IMX_SC_IRQ_GROUP_REBOOTED))) + return 0; + + rproc_report_crash(priv->rproc, RPROC_WATCHDOG); + + pr_info("Partition%d reset!\n", priv->rproc_pt); + + return 0; +} + +static int imx_rproc_attach_pd(struct imx_rproc *priv) +{ + struct device *dev = priv->dev; + int ret, i; + + /* + * If there is only one power-domain entry, the platform driver framework + * will handle it, no need handle it in this driver. + */ + priv->num_pd = of_count_phandle_with_args(dev->of_node, "power-domains", + "#power-domain-cells"); + if (priv->num_pd <= 1) + return 0; + + priv->pd_dev = devm_kmalloc_array(dev, priv->num_pd, sizeof(*priv->pd_dev), GFP_KERNEL); + if (!priv->pd_dev) + return -ENOMEM; + + priv->pd_dev_link = devm_kmalloc_array(dev, priv->num_pd, sizeof(*priv->pd_dev_link), + GFP_KERNEL); + + if (!priv->pd_dev_link) + return -ENOMEM; + + for (i = 0; i < priv->num_pd; i++) { + priv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i); + if (IS_ERR(priv->pd_dev[i])) { + ret = PTR_ERR(priv->pd_dev[i]); + goto detach_pd; + } + + priv->pd_dev_link[i] = device_link_add(dev, priv->pd_dev[i], DL_FLAG_STATELESS | + DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); + if (!priv->pd_dev_link[i]) { + dev_pm_domain_detach(priv->pd_dev[i], false); + ret = -EINVAL; + goto detach_pd; + } + } + + return 0; + +detach_pd: + while (--i >= 0) { + device_link_del(priv->pd_dev_link[i]); + dev_pm_domain_detach(priv->pd_dev[i], false); + } + + return ret; +} + +static int imx_rproc_detach_pd(struct rproc *rproc) +{ + struct imx_rproc *priv = rproc->priv; + int i; + + /* + * If there is only one power-domain entry, the platform driver framework + * will handle it, no need handle it in this driver. + */ + if (priv->num_pd <= 1) + return 0; + + for (i = 0; i < priv->num_pd; i++) { + device_link_del(priv->pd_dev_link[i]); + dev_pm_domain_detach(priv->pd_dev[i], false); + } + + return 0; } static int imx_rproc_detect_mode(struct imx_rproc *priv) @@ -689,6 +910,7 @@ static int imx_rproc_detect_mode(struct imx_rproc *priv) struct arm_smccc_res res; int ret; u32 val; + u8 pt; switch (dcfg->method) { case IMX_RPROC_NONE: @@ -699,6 +921,61 @@ static int imx_rproc_detect_mode(struct imx_rproc *priv) if (res.a0) priv->rproc->state = RPROC_DETACHED; return 0; + case IMX_RPROC_SCU_API: + ret = imx_scu_get_handle(&priv->ipc_handle); + if (ret) + return ret; + ret = of_property_read_u32(dev->of_node, "fsl,resource-id", &priv->rsrc_id); + if (ret) { + dev_err(dev, "No fsl,resource-id property\n"); + return ret; + } + + if (priv->rsrc_id == IMX_SC_R_M4_1_PID0) + priv->core_index = 1; + else + priv->core_index = 0; + + /* + * If Mcore resource is not owned by Acore partition, It is kicked by ROM, + * and Linux could only do IPC with Mcore and nothing else. + */ + if (imx_sc_rm_is_resource_owned(priv->ipc_handle, priv->rsrc_id)) { + if (of_property_read_u32(dev->of_node, "fsl,entry-address", &priv->entry)) + return -EINVAL; + + return imx_rproc_attach_pd(priv); + } + + priv->rproc->state = RPROC_DETACHED; + priv->rproc->recovery_disabled = false; + rproc_set_feature(priv->rproc, RPROC_FEAT_ATTACH_ON_RECOVERY); + + /* Get partition id and enable irq in SCFW */ + ret = imx_sc_rm_get_resource_owner(priv->ipc_handle, priv->rsrc_id, &pt); + if (ret) { + dev_err(dev, "not able to get resource owner\n"); + return ret; + } + + priv->rproc_pt = pt; + priv->rproc_nb.notifier_call = imx_rproc_partition_notify; + + ret = imx_scu_irq_register_notifier(&priv->rproc_nb); + if (ret) { + dev_err(dev, "register scu notifier failed, %d\n", ret); + return ret; + } + + ret = imx_scu_irq_group_enable(IMX_SC_IRQ_GROUP_REBOOTED, BIT(priv->rproc_pt), + true); + if (ret) { + imx_scu_irq_unregister_notifier(&priv->rproc_nb); + dev_err(dev, "Enable irq failed, %d\n", ret); + return ret; + } + + return 0; default: break; } @@ -803,7 +1080,7 @@ static int imx_rproc_probe(struct platform_device *pdev) ret = imx_rproc_clk_enable(priv); if (ret) - goto err_put_mbox; + goto err_put_scu; INIT_WORK(&priv->rproc_work, imx_rproc_vq_work); @@ -820,6 +1097,8 @@ static int imx_rproc_probe(struct platform_device *pdev) err_put_clk: clk_disable_unprepare(priv->clk); +err_put_scu: + imx_rproc_put_scu(rproc); err_put_mbox: imx_rproc_free_mbox(rproc); err_put_wkq: @@ -837,6 +1116,7 @@ static int imx_rproc_remove(struct platform_device *pdev) clk_disable_unprepare(priv->clk); rproc_del(rproc); + imx_rproc_put_scu(rproc); imx_rproc_free_mbox(rproc); destroy_workqueue(priv->workqueue); rproc_free(rproc); @@ -852,6 +1132,8 @@ static const struct of_device_id imx_rproc_of_match[] = { { .compatible = "fsl,imx8mm-cm4", .data = &imx_rproc_cfg_imx8mq }, { .compatible = "fsl,imx8mn-cm7", .data = &imx_rproc_cfg_imx8mn }, { .compatible = "fsl,imx8mp-cm7", .data = &imx_rproc_cfg_imx8mn }, + { .compatible = "fsl,imx8qxp-cm4", .data = &imx_rproc_cfg_imx8qxp }, + { .compatible = "fsl,imx8qm-cm4", .data = &imx_rproc_cfg_imx8qm }, { .compatible = "fsl,imx8ulp-cm33", .data = &imx_rproc_cfg_imx8ulp }, { .compatible = "fsl,imx93-cm33", .data = &imx_rproc_cfg_imx93 }, {}, diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index 6afd0941e552..dc6f07ca8341 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -449,6 +449,7 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp) } ret = of_address_to_resource(node, 0, &r); + of_node_put(node); if (ret) return ret; @@ -556,6 +557,7 @@ static int adsp_probe(struct platform_device *pdev) detach_proxy_pds: adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count); free_rproc: + device_init_wakeup(adsp->dev, false); rproc_free(rproc); return ret; @@ -572,6 +574,8 @@ static int adsp_remove(struct platform_device *pdev) qcom_remove_sysmon_subdev(adsp->sysmon); qcom_remove_smd_subdev(adsp->rproc, &adsp->smd_subdev); qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev); + adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count); + device_init_wakeup(adsp->dev, false); rproc_free(adsp->rproc); return 0; diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c index bb0947f7770e..ba24d745b2d6 100644 --- a/drivers/remoteproc/qcom_q6v5_wcss.c +++ b/drivers/remoteproc/qcom_q6v5_wcss.c @@ -351,7 +351,7 @@ static int q6v5_wcss_qcs404_power_on(struct q6v5_wcss *wcss) if (ret) { dev_err(wcss->dev, "xo cbcr enabling timed out (rc:%d)\n", ret); - return ret; + goto disable_xo_cbcr_clk; } writel(0, wcss->reg_base + Q6SS_CGC_OVERRIDE); @@ -417,6 +417,7 @@ disable_sleep_cbcr_clk: val = readl(wcss->reg_base + Q6SS_SLEEP_CBCR); val &= ~Q6SS_CLK_ENABLE; writel(val, wcss->reg_base + Q6SS_SLEEP_CBCR); +disable_xo_cbcr_clk: val = readl(wcss->reg_base + Q6SS_XO_CBCR); val &= ~Q6SS_CLK_ENABLE; writel(val, wcss->reg_base + Q6SS_XO_CBCR); @@ -827,6 +828,9 @@ static int q6v5_wcss_init_mmio(struct q6v5_wcss *wcss, int ret; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6"); + if (!res) + return -EINVAL; + wcss->reg_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!wcss->reg_base) diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c index 57dde2a69b9d..85393d5eb005 100644 --- a/drivers/remoteproc/qcom_sysmon.c +++ b/drivers/remoteproc/qcom_sysmon.c @@ -190,7 +190,7 @@ struct ssctl_shutdown_resp { struct qmi_response_type_v01 resp; }; -static struct qmi_elem_info ssctl_shutdown_resp_ei[] = { +static const struct qmi_elem_info ssctl_shutdown_resp_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -211,7 +211,7 @@ struct ssctl_subsys_event_req { u32 evt_driven; }; -static struct qmi_elem_info ssctl_subsys_event_req_ei[] = { +static const struct qmi_elem_info ssctl_subsys_event_req_ei[] = { { .data_type = QMI_DATA_LEN, .elem_len = 1, @@ -269,7 +269,7 @@ struct ssctl_subsys_event_resp { struct qmi_response_type_v01 resp; }; -static struct qmi_elem_info ssctl_subsys_event_resp_ei[] = { +static const struct qmi_elem_info ssctl_subsys_event_resp_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -283,7 +283,7 @@ static struct qmi_elem_info ssctl_subsys_event_resp_ei[] = { {} }; -static struct qmi_elem_info ssctl_shutdown_ind_ei[] = { +static const struct qmi_elem_info ssctl_shutdown_ind_ei[] = { {} }; @@ -652,7 +652,9 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc, if (sysmon->shutdown_irq != -ENODATA) { dev_err(sysmon->dev, "failed to retrieve shutdown-ack IRQ\n"); - return ERR_PTR(sysmon->shutdown_irq); + ret = sysmon->shutdown_irq; + kfree(sysmon); + return ERR_PTR(ret); } } else { ret = devm_request_threaded_irq(sysmon->dev, @@ -663,6 +665,7 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc, if (ret) { dev_err(sysmon->dev, "failed to acquire shutdown-ack IRQ\n"); + kfree(sysmon); return ERR_PTR(ret); } } diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 8768cb64f560..1cd4815a6dd1 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -509,7 +509,13 @@ static int rproc_handle_vdev(struct rproc *rproc, void *ptr, rvdev_data.rsc_offset = offset; rvdev_data.rsc = rsc; - pdev = platform_device_register_data(dev, "rproc-virtio", rvdev_data.index, &rvdev_data, + /* + * When there is more than one remote processor, rproc->nb_vdev number is + * same for each separate instances of "rproc". If rvdev_data.index is used + * as device id, then we get duplication in sysfs, so need to use + * PLATFORM_DEVID_AUTO to auto select device id. + */ + pdev = platform_device_register_data(dev, "rproc-virtio", PLATFORM_DEVID_AUTO, &rvdev_data, sizeof(rvdev_data)); if (IS_ERR(pdev)) { dev_err(dev, "failed to create rproc-virtio device\n"); @@ -1862,12 +1868,18 @@ static void rproc_crash_handler_work(struct work_struct *work) mutex_lock(&rproc->lock); - if (rproc->state == RPROC_CRASHED || rproc->state == RPROC_OFFLINE) { + if (rproc->state == RPROC_CRASHED) { /* handle only the first crash detected */ mutex_unlock(&rproc->lock); return; } + if (rproc->state == RPROC_OFFLINE) { + /* Don't recover if the remote processor was stopped */ + mutex_unlock(&rproc->lock); + goto out; + } + rproc->state = RPROC_CRASHED; dev_err(dev, "handling crash #%u in %s\n", ++rproc->crash_cnt, rproc->name); @@ -1877,6 +1889,7 @@ static void rproc_crash_handler_work(struct work_struct *work) if (!rproc->recovery_disabled) rproc_trigger_recovery(rproc); +out: pm_relax(rproc->dev.parent); } @@ -2106,7 +2119,7 @@ struct rproc *rproc_get_by_phandle(phandle phandle) rcu_read_lock(); list_for_each_entry_rcu(r, &rproc_list, node) { - if (r->dev.parent && r->dev.parent->of_node == np) { + if (r->dev.parent && device_match_of_node(r->dev.parent, np)) { /* prevent underlying implementation from being removed */ if (!try_module_get(r->dev.parent->driver->owner)) { dev_err(&r->dev, "can't get owner\n"); diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c new file mode 100644 index 000000000000..2db57d394155 --- /dev/null +++ b/drivers/remoteproc/xlnx_r5_remoteproc.c @@ -0,0 +1,1067 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ZynqMP R5 Remote Processor driver + * + */ + +#include <dt-bindings/power/xlnx-zynqmp-power.h> +#include <linux/dma-mapping.h> +#include <linux/firmware/xlnx-zynqmp.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/of_reserved_mem.h> +#include <linux/platform_device.h> +#include <linux/remoteproc.h> +#include <linux/slab.h> + +#include "remoteproc_internal.h" + +/* + * settings for RPU cluster mode which + * reflects possible values of xlnx,cluster-mode dt-property + */ +enum zynqmp_r5_cluster_mode { + SPLIT_MODE = 0, /* When cores run as separate processor */ + LOCKSTEP_MODE = 1, /* cores execute same code in lockstep,clk-for-clk */ + SINGLE_CPU_MODE = 2, /* core0 is held in reset and only core1 runs */ +}; + +/** + * struct mem_bank_data - Memory Bank description + * + * @addr: Start address of memory bank + * @size: Size of Memory bank + * @pm_domain_id: Power-domains id of memory bank for firmware to turn on/off + * @bank_name: name of the bank for remoteproc framework + */ +struct mem_bank_data { + phys_addr_t addr; + size_t size; + u32 pm_domain_id; + char *bank_name; +}; + +/* + * Hardcoded TCM bank values. This will be removed once TCM bindings are + * accepted for system-dt specifications and upstreamed in linux kernel + */ +static const struct mem_bank_data zynqmp_tcm_banks[] = { + {0xffe00000UL, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each */ + {0xffe20000UL, 0x10000UL, PD_R5_0_BTCM, "btcm0"}, + {0xffe90000UL, 0x10000UL, PD_R5_1_ATCM, "atcm1"}, + {0xffeb0000UL, 0x10000UL, PD_R5_1_BTCM, "btcm1"}, +}; + +/** + * struct zynqmp_r5_core + * + * @dev: device of RPU instance + * @np: device node of RPU instance + * @tcm_bank_count: number TCM banks accessible to this RPU + * @tcm_banks: array of each TCM bank data + * @rmem_count: Number of reserved mem regions + * @rmem: reserved memory region nodes from device tree + * @rproc: rproc handle + * @pm_domain_id: RPU CPU power domain id + */ +struct zynqmp_r5_core { + struct device *dev; + struct device_node *np; + int tcm_bank_count; + struct mem_bank_data **tcm_banks; + int rmem_count; + struct reserved_mem **rmem; + struct rproc *rproc; + u32 pm_domain_id; +}; + +/** + * struct zynqmp_r5_cluster + * + * @dev: r5f subsystem cluster device node + * @mode: cluster mode of type zynqmp_r5_cluster_mode + * @core_count: number of r5 cores used for this cluster mode + * @r5_cores: Array of pointers pointing to r5 core + */ +struct zynqmp_r5_cluster { + struct device *dev; + enum zynqmp_r5_cluster_mode mode; + int core_count; + struct zynqmp_r5_core **r5_cores; +}; + +/* + * zynqmp_r5_set_mode() + * + * set RPU cluster and TCM operation mode + * + * @r5_core: pointer to zynqmp_r5_core type object + * @fw_reg_val: value expected by firmware to configure RPU cluster mode + * @tcm_mode: value expected by fw to configure TCM mode (lockstep or split) + * + * Return: 0 for success and < 0 for failure + */ +static int zynqmp_r5_set_mode(struct zynqmp_r5_core *r5_core, + enum rpu_oper_mode fw_reg_val, + enum rpu_tcm_comb tcm_mode) +{ + int ret; + + ret = zynqmp_pm_set_rpu_mode(r5_core->pm_domain_id, fw_reg_val); + if (ret < 0) { + dev_err(r5_core->dev, "failed to set RPU mode\n"); + return ret; + } + + ret = zynqmp_pm_set_tcm_config(r5_core->pm_domain_id, tcm_mode); + if (ret < 0) + dev_err(r5_core->dev, "failed to configure TCM\n"); + + return ret; +} + +/* + * zynqmp_r5_rproc_start() + * @rproc: single R5 core's corresponding rproc instance + * + * Start R5 Core from designated boot address. + * + * return 0 on success, otherwise non-zero value on failure + */ +static int zynqmp_r5_rproc_start(struct rproc *rproc) +{ + struct zynqmp_r5_core *r5_core = rproc->priv; + enum rpu_boot_mem bootmem; + int ret; + + /* + * The exception vector pointers (EVP) refer to the base-address of + * exception vectors (for reset, IRQ, FIQ, etc). The reset-vector + * starts at the base-address and subsequent vectors are on 4-byte + * boundaries. + * + * Exception vectors can start either from 0x0000_0000 (LOVEC) or + * from 0xFFFF_0000 (HIVEC) which is mapped in the OCM (On-Chip Memory) + * + * Usually firmware will put Exception vectors at LOVEC. + * + * It is not recommend that you change the exception vector. + * Changing the EVP to HIVEC will result in increased interrupt latency + * and jitter. Also, if the OCM is secured and the Cortex-R5F processor + * is non-secured, then the Cortex-R5F processor cannot access the + * HIVEC exception vectors in the OCM. + */ + bootmem = (rproc->bootaddr >= 0xFFFC0000) ? + PM_RPU_BOOTMEM_HIVEC : PM_RPU_BOOTMEM_LOVEC; + + dev_dbg(r5_core->dev, "RPU boot addr 0x%llx from %s.", rproc->bootaddr, + bootmem == PM_RPU_BOOTMEM_HIVEC ? "OCM" : "TCM"); + + ret = zynqmp_pm_request_wake(r5_core->pm_domain_id, 1, + bootmem, ZYNQMP_PM_REQUEST_ACK_NO); + if (ret) + dev_err(r5_core->dev, + "failed to start RPU = 0x%x\n", r5_core->pm_domain_id); + return ret; +} + +/* + * zynqmp_r5_rproc_stop() + * @rproc: single R5 core's corresponding rproc instance + * + * Power down R5 Core. + * + * return 0 on success, otherwise non-zero value on failure + */ +static int zynqmp_r5_rproc_stop(struct rproc *rproc) +{ + struct zynqmp_r5_core *r5_core = rproc->priv; + int ret; + + ret = zynqmp_pm_force_pwrdwn(r5_core->pm_domain_id, + ZYNQMP_PM_REQUEST_ACK_BLOCKING); + if (ret) + dev_err(r5_core->dev, "failed to stop remoteproc RPU %d\n", ret); + + return ret; +} + +/* + * zynqmp_r5_mem_region_map() + * @rproc: single R5 core's corresponding rproc instance + * @mem: mem descriptor to map reserved memory-regions + * + * Callback to map va for memory-region's carveout. + * + * return 0 on success, otherwise non-zero value on failure + */ +static int zynqmp_r5_mem_region_map(struct rproc *rproc, + struct rproc_mem_entry *mem) +{ + void __iomem *va; + + va = ioremap_wc(mem->dma, mem->len); + if (IS_ERR_OR_NULL(va)) + return -ENOMEM; + + mem->va = (void *)va; + + return 0; +} + +/* + * zynqmp_r5_rproc_mem_unmap + * @rproc: single R5 core's corresponding rproc instance + * @mem: mem entry to unmap + * + * Unmap memory-region carveout + * + * return: always returns 0 + */ +static int zynqmp_r5_mem_region_unmap(struct rproc *rproc, + struct rproc_mem_entry *mem) +{ + iounmap((void __iomem *)mem->va); + return 0; +} + +/* + * add_mem_regions_carveout() + * @rproc: single R5 core's corresponding rproc instance + * + * Construct rproc mem carveouts from memory-region property nodes + * + * return 0 on success, otherwise non-zero value on failure + */ +static int add_mem_regions_carveout(struct rproc *rproc) +{ + struct rproc_mem_entry *rproc_mem; + struct zynqmp_r5_core *r5_core; + struct reserved_mem *rmem; + int i, num_mem_regions; + + r5_core = (struct zynqmp_r5_core *)rproc->priv; + num_mem_regions = r5_core->rmem_count; + + for (i = 0; i < num_mem_regions; i++) { + rmem = r5_core->rmem[i]; + + if (!strncmp(rmem->name, "vdev0buffer", strlen("vdev0buffer"))) { + /* Init reserved memory for vdev buffer */ + rproc_mem = rproc_of_resm_mem_entry_init(&rproc->dev, i, + rmem->size, + rmem->base, + rmem->name); + } else { + /* Register associated reserved memory regions */ + rproc_mem = rproc_mem_entry_init(&rproc->dev, NULL, + (dma_addr_t)rmem->base, + rmem->size, rmem->base, + zynqmp_r5_mem_region_map, + zynqmp_r5_mem_region_unmap, + rmem->name); + } + + if (!rproc_mem) + return -ENOMEM; + + rproc_add_carveout(rproc, rproc_mem); + + dev_dbg(&rproc->dev, "reserved mem carveout %s addr=%llx, size=0x%llx", + rmem->name, rmem->base, rmem->size); + } + + return 0; +} + +/* + * tcm_mem_unmap() + * @rproc: single R5 core's corresponding rproc instance + * @mem: tcm mem entry to unmap + * + * Unmap TCM banks when powering down R5 core. + * + * return always 0 + */ +static int tcm_mem_unmap(struct rproc *rproc, struct rproc_mem_entry *mem) +{ + iounmap((void __iomem *)mem->va); + + return 0; +} + +/* + * tcm_mem_map() + * @rproc: single R5 core's corresponding rproc instance + * @mem: tcm memory entry descriptor + * + * Given TCM bank entry, this func setup virtual address for TCM bank + * remoteproc carveout. It also takes care of va to da address translation + * + * return 0 on success, otherwise non-zero value on failure + */ +static int tcm_mem_map(struct rproc *rproc, + struct rproc_mem_entry *mem) +{ + void __iomem *va; + + va = ioremap_wc(mem->dma, mem->len); + if (IS_ERR_OR_NULL(va)) + return -ENOMEM; + + /* Update memory entry va */ + mem->va = (void *)va; + + /* clear TCMs */ + memset_io(va, 0, mem->len); + + /* + * The R5s expect their TCM banks to be at address 0x0 and 0x2000, + * while on the Linux side they are at 0xffexxxxx. + * + * Zero out the high 12 bits of the address. This will give + * expected values for TCM Banks 0A and 0B (0x0 and 0x20000). + */ + mem->da &= 0x000fffff; + + /* + * TCM Banks 1A and 1B still have to be translated. + * + * Below handle these two banks' absolute addresses (0xffe90000 and + * 0xffeb0000) and convert to the expected relative addresses + * (0x0 and 0x20000). + */ + if (mem->da == 0x90000 || mem->da == 0xB0000) + mem->da -= 0x90000; + + /* if translated TCM bank address is not valid report error */ + if (mem->da != 0x0 && mem->da != 0x20000) { + dev_err(&rproc->dev, "invalid TCM address: %x\n", mem->da); + return -EINVAL; + } + return 0; +} + +/* + * add_tcm_carveout_split_mode() + * @rproc: single R5 core's corresponding rproc instance + * + * allocate and add remoteproc carveout for TCM memory in split mode + * + * return 0 on success, otherwise non-zero value on failure + */ +static int add_tcm_carveout_split_mode(struct rproc *rproc) +{ + struct rproc_mem_entry *rproc_mem; + struct zynqmp_r5_core *r5_core; + int i, num_banks, ret; + phys_addr_t bank_addr; + struct device *dev; + u32 pm_domain_id; + size_t bank_size; + char *bank_name; + + r5_core = (struct zynqmp_r5_core *)rproc->priv; + dev = r5_core->dev; + num_banks = r5_core->tcm_bank_count; + + /* + * Power-on Each 64KB TCM, + * register its address space, map and unmap functions + * and add carveouts accordingly + */ + for (i = 0; i < num_banks; i++) { + bank_addr = r5_core->tcm_banks[i]->addr; + bank_name = r5_core->tcm_banks[i]->bank_name; + bank_size = r5_core->tcm_banks[i]->size; + pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id; + + ret = zynqmp_pm_request_node(pm_domain_id, + ZYNQMP_PM_CAPABILITY_ACCESS, 0, + ZYNQMP_PM_REQUEST_ACK_BLOCKING); + if (ret < 0) { + dev_err(dev, "failed to turn on TCM 0x%x", pm_domain_id); + goto release_tcm_split; + } + + dev_dbg(dev, "TCM carveout split mode %s addr=%llx, size=0x%lx", + bank_name, bank_addr, bank_size); + + rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr, + bank_size, bank_addr, + tcm_mem_map, tcm_mem_unmap, + bank_name); + if (!rproc_mem) { + ret = -ENOMEM; + zynqmp_pm_release_node(pm_domain_id); + goto release_tcm_split; + } + + rproc_add_carveout(rproc, rproc_mem); + } + + return 0; + +release_tcm_split: + /* If failed, Turn off all TCM banks turned on before */ + for (i--; i >= 0; i--) { + pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id; + zynqmp_pm_release_node(pm_domain_id); + } + return ret; +} + +/* + * add_tcm_carveout_lockstep_mode() + * @rproc: single R5 core's corresponding rproc instance + * + * allocate and add remoteproc carveout for TCM memory in lockstep mode + * + * return 0 on success, otherwise non-zero value on failure + */ +static int add_tcm_carveout_lockstep_mode(struct rproc *rproc) +{ + struct rproc_mem_entry *rproc_mem; + struct zynqmp_r5_core *r5_core; + int i, num_banks, ret; + phys_addr_t bank_addr; + size_t bank_size = 0; + struct device *dev; + u32 pm_domain_id; + char *bank_name; + + r5_core = (struct zynqmp_r5_core *)rproc->priv; + dev = r5_core->dev; + + /* Go through zynqmp banks for r5 node */ + num_banks = r5_core->tcm_bank_count; + + /* + * In lockstep mode, TCM is contiguous memory block + * However, each TCM block still needs to be enabled individually. + * So, Enable each TCM block individually, but add their size + * to create contiguous memory region. + */ + bank_addr = r5_core->tcm_banks[0]->addr; + bank_name = r5_core->tcm_banks[0]->bank_name; + + for (i = 0; i < num_banks; i++) { + bank_size += r5_core->tcm_banks[i]->size; + pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id; + + /* Turn on each TCM bank individually */ + ret = zynqmp_pm_request_node(pm_domain_id, + ZYNQMP_PM_CAPABILITY_ACCESS, 0, + ZYNQMP_PM_REQUEST_ACK_BLOCKING); + if (ret < 0) { + dev_err(dev, "failed to turn on TCM 0x%x", pm_domain_id); + goto release_tcm_lockstep; + } + } + + dev_dbg(dev, "TCM add carveout lockstep mode %s addr=0x%llx, size=0x%lx", + bank_name, bank_addr, bank_size); + + /* Register TCM address range, TCM map and unmap functions */ + rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr, + bank_size, bank_addr, + tcm_mem_map, tcm_mem_unmap, + bank_name); + if (!rproc_mem) { + ret = -ENOMEM; + goto release_tcm_lockstep; + } + + /* If registration is success, add carveouts */ + rproc_add_carveout(rproc, rproc_mem); + + return 0; + +release_tcm_lockstep: + /* If failed, Turn off all TCM banks turned on before */ + for (i--; i >= 0; i--) { + pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id; + zynqmp_pm_release_node(pm_domain_id); + } + return ret; +} + +/* + * add_tcm_banks() + * @rproc: single R5 core's corresponding rproc instance + * + * allocate and add remoteproc carveouts for TCM memory based on cluster mode + * + * return 0 on success, otherwise non-zero value on failure + */ +static int add_tcm_banks(struct rproc *rproc) +{ + struct zynqmp_r5_cluster *cluster; + struct zynqmp_r5_core *r5_core; + struct device *dev; + + r5_core = (struct zynqmp_r5_core *)rproc->priv; + if (!r5_core) + return -EINVAL; + + dev = r5_core->dev; + + cluster = dev_get_drvdata(dev->parent); + if (!cluster) { + dev_err(dev->parent, "Invalid driver data\n"); + return -EINVAL; + } + + /* + * In lockstep mode TCM banks are one contiguous memory region of 256Kb + * In split mode, each TCM bank is 64Kb and not contiguous. + * We add memory carveouts accordingly. + */ + if (cluster->mode == SPLIT_MODE) + return add_tcm_carveout_split_mode(rproc); + else if (cluster->mode == LOCKSTEP_MODE) + return add_tcm_carveout_lockstep_mode(rproc); + + return -EINVAL; +} + +/* + * zynqmp_r5_parse_fw() + * @rproc: single R5 core's corresponding rproc instance + * @fw: ptr to firmware to be loaded onto r5 core + * + * get resource table if available + * + * return 0 on success, otherwise non-zero value on failure + */ +static int zynqmp_r5_parse_fw(struct rproc *rproc, const struct firmware *fw) +{ + int ret; + + ret = rproc_elf_load_rsc_table(rproc, fw); + if (ret == -EINVAL) { + /* + * resource table only required for IPC. + * if not present, this is not necessarily an error; + * for example, loading r5 hello world application + * so simply inform user and keep going. + */ + dev_info(&rproc->dev, "no resource table found.\n"); + ret = 0; + } + return ret; +} + +/** + * zynqmp_r5_rproc_prepare() + * adds carveouts for TCM bank and reserved memory regions + * + * @rproc: Device node of each rproc + * + * Return: 0 for success else < 0 error code + */ +static int zynqmp_r5_rproc_prepare(struct rproc *rproc) +{ + int ret; + + ret = add_tcm_banks(rproc); + if (ret) { + dev_err(&rproc->dev, "failed to get TCM banks, err %d\n", ret); + return ret; + } + + ret = add_mem_regions_carveout(rproc); + if (ret) { + dev_err(&rproc->dev, "failed to get reserve mem regions %d\n", ret); + return ret; + } + + return 0; +} + +/** + * zynqmp_r5_rproc_unprepare() + * Turns off TCM banks using power-domain id + * + * @rproc: Device node of each rproc + * + * Return: always 0 + */ +static int zynqmp_r5_rproc_unprepare(struct rproc *rproc) +{ + struct zynqmp_r5_core *r5_core; + u32 pm_domain_id; + int i; + + r5_core = (struct zynqmp_r5_core *)rproc->priv; + + for (i = 0; i < r5_core->tcm_bank_count; i++) { + pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id; + if (zynqmp_pm_release_node(pm_domain_id)) + dev_warn(r5_core->dev, + "can't turn off TCM bank 0x%x", pm_domain_id); + } + + return 0; +} + +static const struct rproc_ops zynqmp_r5_rproc_ops = { + .prepare = zynqmp_r5_rproc_prepare, + .unprepare = zynqmp_r5_rproc_unprepare, + .start = zynqmp_r5_rproc_start, + .stop = zynqmp_r5_rproc_stop, + .load = rproc_elf_load_segments, + .parse_fw = zynqmp_r5_parse_fw, + .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table, + .sanity_check = rproc_elf_sanity_check, + .get_boot_addr = rproc_elf_get_boot_addr, +}; + +/** + * zynqmp_r5_add_rproc_core() + * Allocate and add struct rproc object for each r5f core + * This is called for each individual r5f core + * + * @cdev: Device node of each r5 core + * + * Return: zynqmp_r5_core object for success else error code pointer + */ +static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev) +{ + struct zynqmp_r5_core *r5_core; + struct rproc *r5_rproc; + int ret; + + /* Set up DMA mask */ + ret = dma_set_coherent_mask(cdev, DMA_BIT_MASK(32)); + if (ret) + return ERR_PTR(ret); + + /* Allocate remoteproc instance */ + r5_rproc = rproc_alloc(cdev, dev_name(cdev), + &zynqmp_r5_rproc_ops, + NULL, sizeof(struct zynqmp_r5_core)); + if (!r5_rproc) { + dev_err(cdev, "failed to allocate memory for rproc instance\n"); + return ERR_PTR(-ENOMEM); + } + + r5_rproc->auto_boot = false; + r5_core = (struct zynqmp_r5_core *)r5_rproc->priv; + r5_core->dev = cdev; + r5_core->np = dev_of_node(cdev); + if (!r5_core->np) { + dev_err(cdev, "can't get device node for r5 core\n"); + ret = -EINVAL; + goto free_rproc; + } + + /* Add R5 remoteproc core */ + ret = rproc_add(r5_rproc); + if (ret) { + dev_err(cdev, "failed to add r5 remoteproc\n"); + goto free_rproc; + } + + r5_core->rproc = r5_rproc; + return r5_core; + +free_rproc: + rproc_free(r5_rproc); + return ERR_PTR(ret); +} + +/** + * zynqmp_r5_get_tcm_node() + * Ideally this function should parse tcm node and store information + * in r5_core instance. For now, Hardcoded TCM information is used. + * This approach is used as TCM bindings for system-dt is being developed + * + * @cluster: pointer to zynqmp_r5_cluster type object + * + * Return: 0 for success and < 0 error code for failure. + */ +static int zynqmp_r5_get_tcm_node(struct zynqmp_r5_cluster *cluster) +{ + struct device *dev = cluster->dev; + struct zynqmp_r5_core *r5_core; + int tcm_bank_count, tcm_node; + int i, j; + + tcm_bank_count = ARRAY_SIZE(zynqmp_tcm_banks); + + /* count per core tcm banks */ + tcm_bank_count = tcm_bank_count / cluster->core_count; + + /* + * r5 core 0 will use all of TCM banks in lockstep mode. + * In split mode, r5 core0 will use 128k and r5 core1 will use another + * 128k. Assign TCM banks to each core accordingly + */ + tcm_node = 0; + for (i = 0; i < cluster->core_count; i++) { + r5_core = cluster->r5_cores[i]; + r5_core->tcm_banks = devm_kcalloc(dev, tcm_bank_count, + sizeof(struct mem_bank_data *), + GFP_KERNEL); + if (!r5_core->tcm_banks) + return -ENOMEM; + + for (j = 0; j < tcm_bank_count; j++) { + /* + * Use pre-defined TCM reg values. + * Eventually this should be replaced by values + * parsed from dts. + */ + r5_core->tcm_banks[j] = + (struct mem_bank_data *)&zynqmp_tcm_banks[tcm_node]; + tcm_node++; + } + + r5_core->tcm_bank_count = tcm_bank_count; + } + + return 0; +} + +/** + * zynqmp_r5_get_mem_region_node() + * parse memory-region property and get reserved mem regions + * + * @r5_core: pointer to zynqmp_r5_core type object + * + * Return: 0 for success and error code for failure. + */ +static int zynqmp_r5_get_mem_region_node(struct zynqmp_r5_core *r5_core) +{ + struct device_node *np, *rmem_np; + struct reserved_mem **rmem; + int res_mem_count, i; + struct device *dev; + + dev = r5_core->dev; + np = r5_core->np; + + res_mem_count = of_property_count_elems_of_size(np, "memory-region", + sizeof(phandle)); + if (res_mem_count <= 0) { + dev_warn(dev, "failed to get memory-region property %d\n", + res_mem_count); + return 0; + } + + rmem = devm_kcalloc(dev, res_mem_count, + sizeof(struct reserved_mem *), GFP_KERNEL); + if (!rmem) + return -ENOMEM; + + for (i = 0; i < res_mem_count; i++) { + rmem_np = of_parse_phandle(np, "memory-region", i); + if (!rmem_np) + goto release_rmem; + + rmem[i] = of_reserved_mem_lookup(rmem_np); + if (!rmem[i]) { + of_node_put(rmem_np); + goto release_rmem; + } + + of_node_put(rmem_np); + } + + r5_core->rmem_count = res_mem_count; + r5_core->rmem = rmem; + return 0; + +release_rmem: + return -EINVAL; +} + +/* + * zynqmp_r5_core_init() + * Create and initialize zynqmp_r5_core type object + * + * @cluster: pointer to zynqmp_r5_cluster type object + * @fw_reg_val: value expected by firmware to configure RPU cluster mode + * @tcm_mode: value expected by fw to configure TCM mode (lockstep or split) + * + * Return: 0 for success and error code for failure. + */ +static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster, + enum rpu_oper_mode fw_reg_val, + enum rpu_tcm_comb tcm_mode) +{ + struct device *dev = cluster->dev; + struct zynqmp_r5_core *r5_core; + int ret, i; + + ret = zynqmp_r5_get_tcm_node(cluster); + if (ret < 0) { + dev_err(dev, "can't get tcm node, err %d\n", ret); + return ret; + } + + for (i = 0; i < cluster->core_count; i++) { + r5_core = cluster->r5_cores[i]; + + ret = zynqmp_r5_get_mem_region_node(r5_core); + if (ret) + dev_warn(dev, "memory-region prop failed %d\n", ret); + + /* Initialize r5 cores with power-domains parsed from dts */ + ret = of_property_read_u32_index(r5_core->np, "power-domains", + 1, &r5_core->pm_domain_id); + if (ret) { + dev_err(dev, "failed to get power-domains property\n"); + return ret; + } + + ret = zynqmp_r5_set_mode(r5_core, fw_reg_val, tcm_mode); + if (ret) { + dev_err(dev, "failed to set r5 cluster mode %d, err %d\n", + cluster->mode, ret); + return ret; + } + } + + return 0; +} + +/* + * zynqmp_r5_cluster_init() + * Create and initialize zynqmp_r5_cluster type object + * + * @cluster: pointer to zynqmp_r5_cluster type object + * + * Return: 0 for success and error code for failure. + */ +static int zynqmp_r5_cluster_init(struct zynqmp_r5_cluster *cluster) +{ + enum zynqmp_r5_cluster_mode cluster_mode = LOCKSTEP_MODE; + struct device *dev = cluster->dev; + struct device_node *dev_node = dev_of_node(dev); + struct platform_device *child_pdev; + struct zynqmp_r5_core **r5_cores; + enum rpu_oper_mode fw_reg_val; + struct device **child_devs; + struct device_node *child; + enum rpu_tcm_comb tcm_mode; + int core_count, ret, i; + + ret = of_property_read_u32(dev_node, "xlnx,cluster-mode", &cluster_mode); + + /* + * on success returns 0, if not defined then returns -EINVAL, + * In that case, default is LOCKSTEP mode. Other than that + * returns relative error code < 0. + */ + if (ret != -EINVAL && ret != 0) { + dev_err(dev, "Invalid xlnx,cluster-mode property\n"); + return ret; + } + + /* + * For now driver only supports split mode and lockstep mode. + * fail driver probe if either of that is not set in dts. + */ + if (cluster_mode == LOCKSTEP_MODE) { + tcm_mode = PM_RPU_TCM_COMB; + fw_reg_val = PM_RPU_MODE_LOCKSTEP; + } else if (cluster_mode == SPLIT_MODE) { + tcm_mode = PM_RPU_TCM_SPLIT; + fw_reg_val = PM_RPU_MODE_SPLIT; + } else { + dev_err(dev, "driver does not support cluster mode %d\n", cluster_mode); + return -EINVAL; + } + + /* + * Number of cores is decided by number of child nodes of + * r5f subsystem node in dts. If Split mode is used in dts + * 2 child nodes are expected. + * In lockstep mode if two child nodes are available, + * only use first child node and consider it as core0 + * and ignore core1 dt node. + */ + core_count = of_get_available_child_count(dev_node); + if (core_count == 0) { + dev_err(dev, "Invalid number of r5 cores %d", core_count); + return -EINVAL; + } else if (cluster_mode == SPLIT_MODE && core_count != 2) { + dev_err(dev, "Invalid number of r5 cores for split mode\n"); + return -EINVAL; + } else if (cluster_mode == LOCKSTEP_MODE && core_count == 2) { + dev_warn(dev, "Only r5 core0 will be used\n"); + core_count = 1; + } + + child_devs = kcalloc(core_count, sizeof(struct device *), GFP_KERNEL); + if (!child_devs) + return -ENOMEM; + + r5_cores = kcalloc(core_count, + sizeof(struct zynqmp_r5_core *), GFP_KERNEL); + if (!r5_cores) { + kfree(child_devs); + return -ENOMEM; + } + + i = 0; + for_each_available_child_of_node(dev_node, child) { + child_pdev = of_find_device_by_node(child); + if (!child_pdev) { + of_node_put(child); + ret = -ENODEV; + goto release_r5_cores; + } + + child_devs[i] = &child_pdev->dev; + + /* create and add remoteproc instance of type struct rproc */ + r5_cores[i] = zynqmp_r5_add_rproc_core(&child_pdev->dev); + if (IS_ERR(r5_cores[i])) { + of_node_put(child); + ret = PTR_ERR(r5_cores[i]); + r5_cores[i] = NULL; + goto release_r5_cores; + } + + /* + * If two child nodes are available in dts in lockstep mode, + * then ignore second child node. + */ + if (cluster_mode == LOCKSTEP_MODE) { + of_node_put(child); + break; + } + + i++; + } + + cluster->mode = cluster_mode; + cluster->core_count = core_count; + cluster->r5_cores = r5_cores; + + ret = zynqmp_r5_core_init(cluster, fw_reg_val, tcm_mode); + if (ret < 0) { + dev_err(dev, "failed to init r5 core err %d\n", ret); + cluster->core_count = 0; + cluster->r5_cores = NULL; + + /* + * at this point rproc resources for each core are allocated. + * adjust index to free resources in reverse order + */ + i = core_count - 1; + goto release_r5_cores; + } + + kfree(child_devs); + return 0; + +release_r5_cores: + while (i >= 0) { + put_device(child_devs[i]); + if (r5_cores[i]) { + of_reserved_mem_device_release(r5_cores[i]->dev); + rproc_del(r5_cores[i]->rproc); + rproc_free(r5_cores[i]->rproc); + } + i--; + } + kfree(r5_cores); + kfree(child_devs); + return ret; +} + +static void zynqmp_r5_cluster_exit(void *data) +{ + struct platform_device *pdev = (struct platform_device *)data; + struct zynqmp_r5_cluster *cluster; + struct zynqmp_r5_core *r5_core; + int i; + + cluster = (struct zynqmp_r5_cluster *)platform_get_drvdata(pdev); + if (!cluster) + return; + + for (i = 0; i < cluster->core_count; i++) { + r5_core = cluster->r5_cores[i]; + of_reserved_mem_device_release(r5_core->dev); + put_device(r5_core->dev); + rproc_del(r5_core->rproc); + rproc_free(r5_core->rproc); + } + + kfree(cluster->r5_cores); + kfree(cluster); + platform_set_drvdata(pdev, NULL); +} + +/* + * zynqmp_r5_remoteproc_probe() + * parse device-tree, initialize hardware and allocate required resources + * and remoteproc ops + * + * @pdev: domain platform device for R5 cluster + * + * Return: 0 for success and < 0 for failure. + */ +static int zynqmp_r5_remoteproc_probe(struct platform_device *pdev) +{ + struct zynqmp_r5_cluster *cluster; + struct device *dev = &pdev->dev; + int ret; + + cluster = kzalloc(sizeof(*cluster), GFP_KERNEL); + if (!cluster) + return -ENOMEM; + + cluster->dev = dev; + + ret = devm_of_platform_populate(dev); + if (ret) { + dev_err_probe(dev, ret, "failed to populate platform dev\n"); + kfree(cluster); + return ret; + } + + /* wire in so each core can be cleaned up at driver remove */ + platform_set_drvdata(pdev, cluster); + + ret = zynqmp_r5_cluster_init(cluster); + if (ret) { + kfree(cluster); + platform_set_drvdata(pdev, NULL); + dev_err_probe(dev, ret, "Invalid r5f subsystem device tree\n"); + return ret; + } + + ret = devm_add_action_or_reset(dev, zynqmp_r5_cluster_exit, pdev); + if (ret) + return ret; + + return 0; +} + +/* Match table for OF platform binding */ +static const struct of_device_id zynqmp_r5_remoteproc_match[] = { + { .compatible = "xlnx,zynqmp-r5fss", }, + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(of, zynqmp_r5_remoteproc_match); + +static struct platform_driver zynqmp_r5_remoteproc_driver = { + .probe = zynqmp_r5_remoteproc_probe, + .driver = { + .name = "zynqmp_r5_remoteproc", + .of_match_table = zynqmp_r5_remoteproc_match, + }, +}; +module_platform_driver(zynqmp_r5_remoteproc_driver); + +MODULE_DESCRIPTION("Xilinx R5F remote processor driver"); +MODULE_AUTHOR("Xilinx Inc."); +MODULE_LICENSE("GPL"); diff --git a/include/dt-bindings/power/xlnx-zynqmp-power.h b/include/dt-bindings/power/xlnx-zynqmp-power.h index 0d9a412fd5e0..618024cbb20d 100644 --- a/include/dt-bindings/power/xlnx-zynqmp-power.h +++ b/include/dt-bindings/power/xlnx-zynqmp-power.h @@ -6,6 +6,12 @@ #ifndef _DT_BINDINGS_ZYNQMP_POWER_H #define _DT_BINDINGS_ZYNQMP_POWER_H +#define PD_RPU_0 7 +#define PD_RPU_1 8 +#define PD_R5_0_ATCM 15 +#define PD_R5_0_BTCM 16 +#define PD_R5_1_ATCM 17 +#define PD_R5_1_BTCM 18 #define PD_USB_0 22 #define PD_USB_1 23 #define PD_TTC_0 24 diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index fac37680ffe7..b986e267d149 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -12,6 +12,7 @@ #ifndef __FIRMWARE_ZYNQMP_H__ #define __FIRMWARE_ZYNQMP_H__ +#include <linux/types.h> #include <linux/err.h> @@ -87,6 +88,8 @@ enum pm_api_cb_id { enum pm_api_id { PM_GET_API_VERSION = 1, PM_REGISTER_NOTIFIER = 5, + PM_FORCE_POWERDOWN = 8, + PM_REQUEST_WAKEUP = 10, PM_SYSTEM_SHUTDOWN = 12, PM_REQUEST_NODE = 13, PM_RELEASE_NODE = 14, @@ -135,6 +138,10 @@ enum pm_ret_status { }; enum pm_ioctl_id { + IOCTL_GET_RPU_OPER_MODE = 0, + IOCTL_SET_RPU_OPER_MODE = 1, + IOCTL_RPU_BOOT_ADDR_CONFIG = 2, + IOCTL_TCM_COMB_CONFIG = 3, IOCTL_SET_TAPDELAY_BYPASS = 4, IOCTL_SD_DLL_RESET = 6, IOCTL_SET_SD_TAPDELAY = 7, @@ -176,6 +183,21 @@ enum pm_query_id { PM_QID_CLOCK_GET_MAX_DIVISOR = 13, }; +enum rpu_oper_mode { + PM_RPU_MODE_LOCKSTEP = 0, + PM_RPU_MODE_SPLIT = 1, +}; + +enum rpu_boot_mem { + PM_RPU_BOOTMEM_LOVEC = 0, + PM_RPU_BOOTMEM_HIVEC = 1, +}; + +enum rpu_tcm_comb { + PM_RPU_TCM_SPLIT = 0, + PM_RPU_TCM_COMB = 1, +}; + enum zynqmp_pm_reset_action { PM_RESET_ACTION_RELEASE = 0, PM_RESET_ACTION_ASSERT = 1, @@ -516,6 +538,15 @@ int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id); int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value); int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload); int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset); +int zynqmp_pm_force_pwrdwn(const u32 target, + const enum zynqmp_pm_request_ack ack); +int zynqmp_pm_request_wake(const u32 node, + const bool set_addr, + const u64 address, + const enum zynqmp_pm_request_ack ack); +int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode); +int zynqmp_pm_set_rpu_mode(u32 node_id, u32 arg1); +int zynqmp_pm_set_tcm_config(u32 node_id, u32 arg1); int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value); int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config, u32 value); @@ -795,6 +826,35 @@ static inline int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset) return -ENODEV; } +static inline int zynqmp_pm_force_pwrdwn(const u32 target, + const enum zynqmp_pm_request_ack ack) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_request_wake(const u32 node, + const bool set_addr, + const u64 address, + const enum zynqmp_pm_request_ack ack) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_set_rpu_mode(u32 node_id, u32 arg1) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_set_tcm_config(u32 node_id, u32 arg1) +{ + return -ENODEV; +} + static inline int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value) |