From dfa25e9f0f9a41bc7dae42e1f57e7bbab10d8cc0 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Thu, 16 Sep 2021 11:33:34 +0100 Subject: firmware: arm_scmi: Review some virtio log messages Be more verbose avoiding to use _once flavour of dev_info/_err/_notice. Remove usage of __func_ to identify which vqueue is referred in some error messages and explicitly name the TX/RX vqueue. Link: https://lore.kernel.org/r/20210916103336.7243-1-cristian.marussi@arm.com Cc: "Michael S. Tsirkin" Cc: Sudeep Holla Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/virtio.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c index 87039c5c03fd..c30f82cc59ac 100644 --- a/drivers/firmware/arm_scmi/virtio.c +++ b/drivers/firmware/arm_scmi/virtio.c @@ -95,7 +95,7 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC); if (rc) - dev_err_once(dev, "failed to add to virtqueue (%d)\n", rc); + dev_err(dev, "failed to add to RX virtqueue (%d)\n", rc); else virtqueue_kick(vioch->vqueue); @@ -193,8 +193,8 @@ static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo) static int virtio_link_supplier(struct device *dev) { if (!scmi_vdev) { - dev_notice_once(dev, - "Deferring probe after not finding a bound scmi-virtio device\n"); + dev_notice(dev, + "Deferring probe after not finding a bound scmi-virtio device\n"); return -EPROBE_DEFER; } @@ -334,9 +334,8 @@ static int virtio_send_message(struct scmi_chan_info *cinfo, rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC); if (rc) { list_add(&msg->list, &vioch->free_list); - dev_err_once(vioch->cinfo->dev, - "%s() failed to add to virtqueue (%d)\n", __func__, - rc); + dev_err(vioch->cinfo->dev, + "failed to add to TX virtqueue (%d)\n", rc); } else { virtqueue_kick(vioch->vqueue); } @@ -427,10 +426,10 @@ static int scmi_vio_probe(struct virtio_device *vdev) sz /= DESCRIPTORS_PER_TX_MSG; if (sz > MSG_TOKEN_MAX) { - dev_info_once(dev, - "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n", - channels[i].is_rx ? "rx" : "tx", - sz, MSG_TOKEN_MAX); + dev_info(dev, + "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n", + channels[i].is_rx ? "rx" : "tx", + sz, MSG_TOKEN_MAX); sz = MSG_TOKEN_MAX; } channels[i].max_msg = sz; -- cgit v1.2.3 From 5f90f189a052f6fc46048f6ce29a37b709548b81 Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Thu, 28 Oct 2021 16:00:09 +0200 Subject: firmware: arm_scmi: Add optee transport Add a new transport channel to the SCMI firmware interface driver for SCMI message exchange based on optee transport channel. The optee transport is realized by connecting and invoking OP-TEE SCMI service interface PTA. Optee transport support (CONFIG_ARM_SCMI_TRANSPORT_OPTEE) is default enabled when optee driver (CONFIG_OPTEE) is enabled. Effective optee transport is setup upon OP-TEE SCMI service discovery at optee device initialization. For this SCMI UUID is registered to the optee bus for probing. This is done from the link_supplier operator of the SCMI optee transport. The optee transport can use a statically defined shared memory in which case SCMI device tree node defines it using an "arm,scmi-shmem" compatible phandle through property shmem. Alternatively, optee transport allocates the shared memory buffer from the optee driver when no shmem property is defined. The protocol used to exchange SCMI message over that shared memory is negotiated between optee transport driver and the OP-TEE service through capabilities exchange. OP-TEE SCMI service is integrated in OP-TEE since its release tag 3.13.0. The service interface is published in [1]. Link: [1] https://github.com/OP-TEE/optee_os/blob/3.13.0/lib/libutee/include/pta_scmi_client.h Link: https://lore.kernel.org/r/20211028140009.23331-2-etienne.carriere@linaro.org Cc: Cristian Marussi Cc: Sudeep Holla Reviewed-by: Cristian Marussi Signed-off-by: Etienne Carriere Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/Kconfig | 12 + drivers/firmware/arm_scmi/Makefile | 1 + drivers/firmware/arm_scmi/common.h | 3 + drivers/firmware/arm_scmi/driver.c | 3 + drivers/firmware/arm_scmi/optee.c | 581 +++++++++++++++++++++++++++++++++++++ 5 files changed, 600 insertions(+) create mode 100644 drivers/firmware/arm_scmi/optee.c (limited to 'drivers') diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig index 3d7081e84853..da1daa593204 100644 --- a/drivers/firmware/arm_scmi/Kconfig +++ b/drivers/firmware/arm_scmi/Kconfig @@ -54,6 +54,18 @@ config ARM_SCMI_TRANSPORT_MAILBOX If you want the ARM SCMI PROTOCOL stack to include support for a transport based on mailboxes, answer Y. +config ARM_SCMI_TRANSPORT_OPTEE + bool "SCMI transport based on OP-TEE service" + depends on OPTEE=y || OPTEE=ARM_SCMI_PROTOCOL + select ARM_SCMI_HAVE_TRANSPORT + select ARM_SCMI_HAVE_SHMEM + default y + help + This enables the OP-TEE service based transport for SCMI. + + If you want the ARM SCMI PROTOCOL stack to include support for a + transport based on OP-TEE SCMI service, answer Y. + config ARM_SCMI_TRANSPORT_SMC bool "SCMI transport based on SMC" depends on HAVE_ARM_SMCCC_DISCOVERY diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index 1dcf123d64ab..ef66ec8ca917 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -6,6 +6,7 @@ scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += mailbox.o scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o +scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \ $(scmi-transport-y) diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index dea1bfbe1052..6438b5248c24 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -421,6 +421,9 @@ extern const struct scmi_desc scmi_smc_desc; #ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO extern const struct scmi_desc scmi_virtio_desc; #endif +#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE +extern const struct scmi_desc scmi_optee_desc; +#endif void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv); void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id); diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index b406b3f78f46..768926a77f5d 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -1994,6 +1994,9 @@ static const struct of_device_id scmi_of_match[] = { #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX { .compatible = "arm,scmi", .data = &scmi_mailbox_desc }, #endif +#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE + { .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc }, +#endif #ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc}, #endif diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c new file mode 100644 index 000000000000..d9819b0197ec --- /dev/null +++ b/drivers/firmware/arm_scmi/optee.c @@ -0,0 +1,581 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019-2021 Linaro Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" + +#define SCMI_OPTEE_MAX_MSG_SIZE 128 + +enum scmi_optee_pta_cmd { + /* + * PTA_SCMI_CMD_CAPABILITIES - Get channel capabilities + * + * [out] value[0].a: Capability bit mask (enum pta_scmi_caps) + * [out] value[0].b: Extended capabilities or 0 + */ + PTA_SCMI_CMD_CAPABILITIES = 0, + + /* + * PTA_SCMI_CMD_PROCESS_SMT_CHANNEL - Process SCMI message in SMT buffer + * + * [in] value[0].a: Channel handle + * + * Shared memory used for SCMI message/response exhange is expected + * already identified and bound to channel handle in both SCMI agent + * and SCMI server (OP-TEE) parts. + * The memory uses SMT header to carry SCMI meta-data (protocol ID and + * protocol message ID). + */ + PTA_SCMI_CMD_PROCESS_SMT_CHANNEL = 1, + + /* + * PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE - Process SMT/SCMI message + * + * [in] value[0].a: Channel handle + * [in/out] memref[1]: Message/response buffer (SMT and SCMI payload) + * + * Shared memory used for SCMI message/response is a SMT buffer + * referenced by param[1]. It shall be 128 bytes large to fit response + * payload whatever message playload size. + * The memory uses SMT header to carry SCMI meta-data (protocol ID and + * protocol message ID). + */ + PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE = 2, + + /* + * PTA_SCMI_CMD_GET_CHANNEL - Get channel handle + * + * SCMI shm information are 0 if agent expects to use OP-TEE regular SHM + * + * [in] value[0].a: Channel identifier + * [out] value[0].a: Returned channel handle + * [in] value[0].b: Requested capabilities mask (enum pta_scmi_caps) + */ + PTA_SCMI_CMD_GET_CHANNEL = 3, +}; + +/* + * OP-TEE SCMI service capabilities bit flags (32bit) + * + * PTA_SCMI_CAPS_SMT_HEADER + * When set, OP-TEE supports command using SMT header protocol (SCMI shmem) in + * shared memory buffers to carry SCMI protocol synchronisation information. + */ +#define PTA_SCMI_CAPS_NONE 0 +#define PTA_SCMI_CAPS_SMT_HEADER BIT(0) + +/** + * struct scmi_optee_channel - Description of an OP-TEE SCMI channel + * + * @channel_id: OP-TEE channel ID used for this transport + * @tee_session: TEE session identifier + * @caps: OP-TEE SCMI channel capabilities + * @mu: Mutex protection on channel access + * @cinfo: SCMI channel information + * @shmem: Virtual base address of the shared memory + * @tee_shm: Reference to TEE shared memory or NULL if using static shmem + * @link: Reference in agent's channel list + */ +struct scmi_optee_channel { + u32 channel_id; + u32 tee_session; + u32 caps; + struct mutex mu; + struct scmi_chan_info *cinfo; + struct scmi_shared_mem __iomem *shmem; + struct tee_shm *tee_shm; + struct list_head link; +}; + +/** + * struct scmi_optee_agent - OP-TEE transport private data + * + * @dev: Device used for communication with TEE + * @tee_ctx: TEE context used for communication + * @caps: Supported channel capabilities + * @mu: Mutex for protection of @channel_list + * @channel_list: List of all created channels for the agent + */ +struct scmi_optee_agent { + struct device *dev; + struct tee_context *tee_ctx; + u32 caps; + struct mutex mu; + struct list_head channel_list; +}; + +/* There can be only 1 SCMI service in OP-TEE we connect to */ +static struct scmi_optee_agent *scmi_optee_private; + +/* Forward reference to scmi_optee transport initialization */ +static int scmi_optee_init(void); + +/* Open a session toward SCMI OP-TEE service with REE_KERNEL identity */ +static int open_session(struct scmi_optee_agent *agent, u32 *tee_session) +{ + struct device *dev = agent->dev; + struct tee_client_device *scmi_pta = to_tee_client_device(dev); + struct tee_ioctl_open_session_arg arg = { }; + int ret; + + memcpy(arg.uuid, scmi_pta->id.uuid.b, TEE_IOCTL_UUID_LEN); + arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL; + + ret = tee_client_open_session(agent->tee_ctx, &arg, NULL); + if (ret < 0 || arg.ret) { + dev_err(dev, "Can't open tee session: %d / %#x\n", ret, arg.ret); + return -EOPNOTSUPP; + } + + *tee_session = arg.session; + + return 0; +} + +static void close_session(struct scmi_optee_agent *agent, u32 tee_session) +{ + tee_client_close_session(agent->tee_ctx, tee_session); +} + +static int get_capabilities(struct scmi_optee_agent *agent) +{ + struct tee_ioctl_invoke_arg arg = { }; + struct tee_param param[1] = { }; + u32 caps; + u32 tee_session; + int ret; + + ret = open_session(agent, &tee_session); + if (ret) + return ret; + + arg.func = PTA_SCMI_CMD_CAPABILITIES; + arg.session = tee_session; + arg.num_params = 1; + + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT; + + ret = tee_client_invoke_func(agent->tee_ctx, &arg, param); + + close_session(agent, tee_session); + + if (ret < 0 || arg.ret) { + dev_err(agent->dev, "Can't get capabilities: %d / %#x\n", ret, arg.ret); + return -EOPNOTSUPP; + } + + caps = param[0].u.value.a; + + if (!(caps & PTA_SCMI_CAPS_SMT_HEADER)) { + dev_err(agent->dev, "OP-TEE SCMI PTA doesn't support SMT\n"); + return -EOPNOTSUPP; + } + + agent->caps = caps; + + return 0; +} + +static int get_channel(struct scmi_optee_channel *channel) +{ + struct device *dev = scmi_optee_private->dev; + struct tee_ioctl_invoke_arg arg = { }; + struct tee_param param[1] = { }; + unsigned int caps = PTA_SCMI_CAPS_SMT_HEADER; + int ret; + + arg.func = PTA_SCMI_CMD_GET_CHANNEL; + arg.session = channel->tee_session; + arg.num_params = 1; + + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT; + param[0].u.value.a = channel->channel_id; + param[0].u.value.b = caps; + + ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param); + + if (ret || arg.ret) { + dev_err(dev, "Can't get channel with caps %#x: %d / %#x\n", caps, ret, arg.ret); + return -EOPNOTSUPP; + } + + /* From now on use channel identifer provided by OP-TEE SCMI service */ + channel->channel_id = param[0].u.value.a; + channel->caps = caps; + + return 0; +} + +static int invoke_process_smt_channel(struct scmi_optee_channel *channel) +{ + struct tee_ioctl_invoke_arg arg = { }; + struct tee_param param[2] = { }; + int ret; + + arg.session = channel->tee_session; + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; + param[0].u.value.a = channel->channel_id; + + if (channel->tee_shm) { + param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT; + param[1].u.memref.shm = channel->tee_shm; + param[1].u.memref.size = SCMI_OPTEE_MAX_MSG_SIZE; + arg.num_params = 2; + arg.func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE; + } else { + arg.num_params = 1; + arg.func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL; + } + + ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param); + if (ret < 0 || arg.ret) { + dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n", + channel->channel_id, ret, arg.ret); + return -EIO; + } + + return 0; +} + +static int scmi_optee_link_supplier(struct device *dev) +{ + if (!scmi_optee_private) { + if (scmi_optee_init()) + dev_dbg(dev, "Optee bus not yet ready\n"); + + /* Wait for optee bus */ + return -EPROBE_DEFER; + } + + if (!device_link_add(dev, scmi_optee_private->dev, DL_FLAG_AUTOREMOVE_CONSUMER)) { + dev_err(dev, "Adding link to supplier optee device failed\n"); + return -ECANCELED; + } + + return 0; +} + +static bool scmi_optee_chan_available(struct device *dev, int idx) +{ + u32 channel_id; + + return !of_property_read_u32_index(dev->of_node, "linaro,optee-channel-id", + idx, &channel_id); +} + +static void scmi_optee_clear_channel(struct scmi_chan_info *cinfo) +{ + struct scmi_optee_channel *channel = cinfo->transport_info; + + shmem_clear_channel(channel->shmem); +} + +static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *channel) +{ + const size_t msg_size = SCMI_OPTEE_MAX_MSG_SIZE; + + channel->tee_shm = tee_shm_alloc_kernel_buf(scmi_optee_private->tee_ctx, msg_size); + if (IS_ERR(channel->tee_shm)) { + dev_err(channel->cinfo->dev, "shmem allocation failed\n"); + return -ENOMEM; + } + + channel->shmem = (void *)tee_shm_get_va(channel->tee_shm, 0); + memset(channel->shmem, 0, msg_size); + shmem_clear_channel(channel->shmem); + + return 0; +} + +static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo, + struct scmi_optee_channel *channel) +{ + struct device_node *np; + resource_size_t size; + struct resource res; + int ret; + + np = of_parse_phandle(cinfo->dev->of_node, "shmem", 0); + if (!of_device_is_compatible(np, "arm,scmi-shmem")) { + ret = -ENXIO; + goto out; + } + + ret = of_address_to_resource(np, 0, &res); + if (ret) { + dev_err(dev, "Failed to get SCMI Tx shared memory\n"); + goto out; + } + + size = resource_size(&res); + + channel->shmem = devm_ioremap(dev, res.start, size); + if (!channel->shmem) { + dev_err(dev, "Failed to ioremap SCMI Tx shared memory\n"); + ret = -EADDRNOTAVAIL; + goto out; + } + + ret = 0; + +out: + of_node_put(np); + + return ret; +} + +static int setup_shmem(struct device *dev, struct scmi_chan_info *cinfo, + struct scmi_optee_channel *channel) +{ + if (of_find_property(cinfo->dev->of_node, "shmem", NULL)) + return setup_static_shmem(dev, cinfo, channel); + else + return setup_dynamic_shmem(dev, channel); +} + +static int scmi_optee_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx) +{ + struct scmi_optee_channel *channel; + uint32_t channel_id; + int ret; + + if (!tx) + return -ENODEV; + + channel = devm_kzalloc(dev, sizeof(*channel), GFP_KERNEL); + if (!channel) + return -ENOMEM; + + ret = of_property_read_u32_index(cinfo->dev->of_node, "linaro,optee-channel-id", + 0, &channel_id); + if (ret) + return ret; + + cinfo->transport_info = channel; + channel->cinfo = cinfo; + channel->channel_id = channel_id; + mutex_init(&channel->mu); + + ret = setup_shmem(dev, cinfo, channel); + if (ret) + return ret; + + ret = open_session(scmi_optee_private, &channel->tee_session); + if (ret) + goto err_free_shm; + + ret = get_channel(channel); + if (ret) + goto err_close_sess; + + mutex_lock(&scmi_optee_private->mu); + list_add(&channel->link, &scmi_optee_private->channel_list); + mutex_unlock(&scmi_optee_private->mu); + + return 0; + +err_close_sess: + close_session(scmi_optee_private, channel->tee_session); +err_free_shm: + if (channel->tee_shm) + tee_shm_free(channel->tee_shm); + + return ret; +} + +static int scmi_optee_chan_free(int id, void *p, void *data) +{ + struct scmi_chan_info *cinfo = p; + struct scmi_optee_channel *channel = cinfo->transport_info; + + mutex_lock(&scmi_optee_private->mu); + list_del(&channel->link); + mutex_unlock(&scmi_optee_private->mu); + + close_session(scmi_optee_private, channel->tee_session); + + if (channel->tee_shm) { + tee_shm_free(channel->tee_shm); + channel->tee_shm = NULL; + } + + cinfo->transport_info = NULL; + channel->cinfo = NULL; + + scmi_free_channel(cinfo, data, id); + + return 0; +} + +static struct scmi_shared_mem *get_channel_shm(struct scmi_optee_channel *chan, + struct scmi_xfer *xfer) +{ + if (!chan) + return NULL; + + return chan->shmem; +} + + +static int scmi_optee_send_message(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_optee_channel *channel = cinfo->transport_info; + struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer); + int ret; + + mutex_lock(&channel->mu); + shmem_tx_prepare(shmem, xfer); + + ret = invoke_process_smt_channel(channel); + + scmi_rx_callback(cinfo, shmem_read_header(shmem), NULL); + mutex_unlock(&channel->mu); + + return ret; +} + +static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_optee_channel *channel = cinfo->transport_info; + struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer); + + shmem_fetch_response(shmem, xfer); +} + +static bool scmi_optee_poll_done(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_optee_channel *channel = cinfo->transport_info; + struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer); + + return shmem_poll_done(shmem, xfer); +} + +static struct scmi_transport_ops scmi_optee_ops = { + .link_supplier = scmi_optee_link_supplier, + .chan_available = scmi_optee_chan_available, + .chan_setup = scmi_optee_chan_setup, + .chan_free = scmi_optee_chan_free, + .send_message = scmi_optee_send_message, + .fetch_response = scmi_optee_fetch_response, + .clear_channel = scmi_optee_clear_channel, + .poll_done = scmi_optee_poll_done, +}; + +static int scmi_optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data) +{ + return ver->impl_id == TEE_IMPL_ID_OPTEE; +} + +static int scmi_optee_service_probe(struct device *dev) +{ + struct scmi_optee_agent *agent; + struct tee_context *tee_ctx; + int ret; + + /* Only one SCMI OP-TEE device allowed */ + if (scmi_optee_private) { + dev_err(dev, "An SCMI OP-TEE device was already initialized: only one allowed\n"); + return -EBUSY; + } + + tee_ctx = tee_client_open_context(NULL, scmi_optee_ctx_match, NULL, NULL); + if (IS_ERR(tee_ctx)) + return -ENODEV; + + agent = devm_kzalloc(dev, sizeof(*agent), GFP_KERNEL); + if (!agent) { + ret = -ENOMEM; + goto err; + } + + agent->dev = dev; + agent->tee_ctx = tee_ctx; + INIT_LIST_HEAD(&agent->channel_list); + + ret = get_capabilities(agent); + if (ret) + goto err; + + /* Ensure agent resources are all visible before scmi_optee_private is */ + smp_mb(); + scmi_optee_private = agent; + + return 0; + +err: + tee_client_close_context(tee_ctx); + + return ret; +} + +static int scmi_optee_service_remove(struct device *dev) +{ + struct scmi_optee_agent *agent = scmi_optee_private; + + if (!scmi_optee_private) + return -EINVAL; + + if (!list_empty(&scmi_optee_private->channel_list)) + return -EBUSY; + + /* Ensure cleared reference is visible before resources are released */ + smp_store_mb(scmi_optee_private, NULL); + + tee_client_close_context(agent->tee_ctx); + + return 0; +} + +static const struct tee_client_device_id scmi_optee_service_id[] = { + { + UUID_INIT(0xa8cfe406, 0xd4f5, 0x4a2e, + 0x9f, 0x8d, 0xa2, 0x5d, 0xc7, 0x54, 0xc0, 0x99) + }, + { } +}; + +MODULE_DEVICE_TABLE(tee, scmi_optee_service_id); + +static struct tee_client_driver scmi_optee_driver = { + .id_table = scmi_optee_service_id, + .driver = { + .name = "scmi-optee", + .bus = &tee_bus_type, + .probe = scmi_optee_service_probe, + .remove = scmi_optee_service_remove, + }, +}; + +static int scmi_optee_init(void) +{ + return driver_register(&scmi_optee_driver.driver); +} + +static void scmi_optee_exit(void) +{ + if (scmi_optee_private) + driver_unregister(&scmi_optee_driver.driver); +} + +const struct scmi_desc scmi_optee_desc = { + .transport_exit = scmi_optee_exit, + .ops = &scmi_optee_ops, + .max_rx_timeout_ms = 30, + .max_msg = 20, + .max_msg_size = SCMI_OPTEE_MAX_MSG_SIZE, +}; -- cgit v1.2.3 From 530897ecdb3d69d71757c353a003e7138a791bcc Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 15 Nov 2021 10:29:10 +0000 Subject: firmware: arm_scmi: Make virtio Version_1 compliance optional Introduce a compilation option to disable strict enforcement of compliance against VirtIO Version_1 backends, so as to allow to support also Legacy VirtIO devices implementations. Link: https://lore.kernel.org/r/20211115102910.7639-1-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/Kconfig | 15 +++++++++++++++ drivers/firmware/arm_scmi/virtio.c | 3 ++- 2 files changed, 17 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig index da1daa593204..638ecec89ff1 100644 --- a/drivers/firmware/arm_scmi/Kconfig +++ b/drivers/firmware/arm_scmi/Kconfig @@ -89,6 +89,21 @@ config ARM_SCMI_TRANSPORT_VIRTIO If you want the ARM SCMI PROTOCOL stack to include support for a transport based on VirtIO, answer Y. +config ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE + bool "SCMI VirtIO transport Version 1 compliance" + depends on ARM_SCMI_TRANSPORT_VIRTIO + default y + help + This enforces strict compliance with VirtIO Version 1 specification. + + If you want the ARM SCMI VirtIO transport layer to refuse to work + with Legacy VirtIO backends and instead support only VirtIO Version 1 + devices (or above), answer Y. + + If you want instead to support also old Legacy VirtIO backends (like + the ones implemented by kvmtool) and let the core Kernel VirtIO layer + take care of the needed conversions, say N. + endif #ARM_SCMI_PROTOCOL config ARM_SCMI_POWER_DOMAIN diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c index c30f82cc59ac..fd0f6f91fc0b 100644 --- a/drivers/firmware/arm_scmi/virtio.c +++ b/drivers/firmware/arm_scmi/virtio.c @@ -459,12 +459,13 @@ static void scmi_vio_remove(struct virtio_device *vdev) static int scmi_vio_validate(struct virtio_device *vdev) { +#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { dev_err(&vdev->dev, "device does not comply with spec version 1.x\n"); return -EINVAL; } - +#endif return 0; } -- cgit v1.2.3 From 61bc76be367e9928c2c49fbde9783f4821446482 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 23 Nov 2021 08:36:20 +0000 Subject: firmware: arm_scmi: optee: Fix missing mutex_init() The driver allocates the mutex but not initialize it. Use mutex_init() on it to initialize it correctly. Link: https://lore.kernel.org/r/20211123083620.2366860-1-weiyongjun1@huawei.com Fixes: 5f90f189a052 ("firmware: arm_scmi: Add optee transport") Reported-by: Hulk Robot Reviewed-by: Etienne Carriere Signed-off-by: Wei Yongjun Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/optee.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c index d9819b0197ec..901737c9f5f8 100644 --- a/drivers/firmware/arm_scmi/optee.c +++ b/drivers/firmware/arm_scmi/optee.c @@ -506,6 +506,7 @@ static int scmi_optee_service_probe(struct device *dev) agent->dev = dev; agent->tee_ctx = tee_ctx; INIT_LIST_HEAD(&agent->channel_list); + mutex_init(&agent->mu); ret = get_capabilities(agent); if (ret) -- cgit v1.2.3 From afc9c1e26bc7d3145bd1112d74bbe8d0152da934 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Thu, 25 Nov 2021 15:07:30 +0000 Subject: firmware: arm_scmi: optee: Drop the support for the OPTEE shared dynamic buffer The shared memory buffer allocated by the optee driver is normal cached memory and can't be used with IOMEM APIs used in shmem_*. We currently support only IO memory for shared memory and supporting normal cached memory needs more changes and needs to be thought through properly. So for now, let us drop the support for this OPTEE shared buffer. Link: https://lore.kernel.org/r/20211125150730.188487-1-sudeep.holla@arm.com Cc: Cristian Marussi Cc: Etienne Carriere Reviewed-by: Etienne Carriere Reviewed-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/optee.c | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c index 901737c9f5f8..175b39bcd470 100644 --- a/drivers/firmware/arm_scmi/optee.c +++ b/drivers/firmware/arm_scmi/optee.c @@ -282,23 +282,6 @@ static void scmi_optee_clear_channel(struct scmi_chan_info *cinfo) shmem_clear_channel(channel->shmem); } -static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *channel) -{ - const size_t msg_size = SCMI_OPTEE_MAX_MSG_SIZE; - - channel->tee_shm = tee_shm_alloc_kernel_buf(scmi_optee_private->tee_ctx, msg_size); - if (IS_ERR(channel->tee_shm)) { - dev_err(channel->cinfo->dev, "shmem allocation failed\n"); - return -ENOMEM; - } - - channel->shmem = (void *)tee_shm_get_va(channel->tee_shm, 0); - memset(channel->shmem, 0, msg_size); - shmem_clear_channel(channel->shmem); - - return 0; -} - static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo, struct scmi_optee_channel *channel) { @@ -342,7 +325,7 @@ static int setup_shmem(struct device *dev, struct scmi_chan_info *cinfo, if (of_find_property(cinfo->dev->of_node, "shmem", NULL)) return setup_static_shmem(dev, cinfo, channel); else - return setup_dynamic_shmem(dev, channel); + return -ENOMEM; } static int scmi_optee_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx) -- cgit v1.2.3 From d211ddeb511af5998dbd3e555be0fbe6033459d9 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 29 Nov 2021 19:11:41 +0000 Subject: firmware: arm_scmi: Perform earlier cinfo lookup call in do_xfer Lookup cinfo data early in do_xfer so as to avoid any further init work on xfer structure in case of error. No functional change. Link: https://lore.kernel.org/r/20211129191156.29322-2-cristian.marussi@arm.com Reviewed-by: Florian Fainelli Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/driver.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 768926a77f5d..3cf161f3bcc7 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -766,6 +766,10 @@ static int do_xfer(const struct scmi_protocol_handle *ph, return -EINVAL; } + cinfo = idr_find(&info->tx_idr, pi->proto->id); + if (unlikely(!cinfo)) + return -EINVAL; + /* * Initialise protocol id now from protocol handle to avoid it being * overridden by mistake (or malice) by the protocol code mangling with @@ -774,10 +778,6 @@ static int do_xfer(const struct scmi_protocol_handle *ph, xfer->hdr.protocol_id = pi->proto->id; reinit_completion(&xfer->done); - cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id); - if (unlikely(!cinfo)) - return -EINVAL; - trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id, xfer->hdr.protocol_id, xfer->hdr.seq, xfer->hdr.poll_completion); -- cgit v1.2.3 From 582730b9cbcc534a39beaf3aa9078e2c431ff39f Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 29 Nov 2021 19:11:42 +0000 Subject: firmware: arm_scmi: Set polling timeout to max_rx_timeout_ms Use transport specific transmission timeout (max_rx_timeout_ms) also for polling transactions. Initially when polling mode was added, it was intended to be used only in scheduler context and hence the choice of 100us for the polling timeout. However the only user for that was dropped for other SCMI concurrency issues, so it shouldn't cause any issue to increase this timeout value now. Link: https://lore.kernel.org/r/20211129191156.29322-3-cristian.marussi@arm.com Reviewed-by: Florian Fainelli Signed-off-by: Cristian Marussi [sudeep.holla: Updated commit message with historical facts about 100us timeout] Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/driver.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 3cf161f3bcc7..568562121f64 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -724,8 +724,6 @@ static void xfer_put(const struct scmi_protocol_handle *ph, __scmi_xfer_put(&info->tx_minfo, xfer); } -#define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC) - static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer, ktime_t stop) { @@ -799,7 +797,8 @@ static int do_xfer(const struct scmi_protocol_handle *ph, } if (xfer->hdr.poll_completion) { - ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS); + ktime_t stop = ktime_add_ms(ktime_get(), + info->desc->max_rx_timeout_ms); spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop)); if (ktime_before(ktime_get(), stop)) { -- cgit v1.2.3 From 5a731aebd31bf840a93deae12bdfd831513e7211 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 29 Nov 2021 19:11:43 +0000 Subject: firmware: arm_scmi: Refactor message response path Refactor code path waiting for message responses into a dedicated helper function. No functional change. Link: https://lore.kernel.org/r/20211129191156.29322-4-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/driver.c | 88 ++++++++++++++++++++++++-------------- 1 file changed, 56 insertions(+), 32 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 568562121f64..9a8d6bfd4ebb 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -738,6 +738,61 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, ktime_after(ktime_get(), stop); } +/** + * scmi_wait_for_message_response - An helper to group all the possible ways of + * waiting for a synchronous message response. + * + * @cinfo: SCMI channel info + * @xfer: Reference to the transfer being waited for. + * + * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on + * configuration flags like xfer->hdr.poll_completion. + * + * Return: 0 on Success, error otherwise. + */ +static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_info *info = handle_to_scmi_info(cinfo->handle); + struct device *dev = info->dev; + int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms; + + if (xfer->hdr.poll_completion) { + ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms); + + spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop)); + if (ktime_before(ktime_get(), stop)) { + unsigned long flags; + + /* + * Do not fetch_response if an out-of-order delayed + * response is being processed. + */ + spin_lock_irqsave(&xfer->lock, flags); + if (xfer->state == SCMI_XFER_SENT_OK) { + info->desc->ops->fetch_response(cinfo, xfer); + xfer->state = SCMI_XFER_RESP_OK; + } + spin_unlock_irqrestore(&xfer->lock, flags); + } else { + dev_err(dev, + "timed out in resp(caller: %pS) - polling\n", + (void *)_RET_IP_); + ret = -ETIMEDOUT; + } + } else { + /* And we wait for the response. */ + if (!wait_for_completion_timeout(&xfer->done, + msecs_to_jiffies(timeout_ms))) { + dev_err(dev, "timed out in resp(caller: %pS)\n", + (void *)_RET_IP_); + ret = -ETIMEDOUT; + } + } + + return ret; +} + /** * do_xfer() - Do one transfer * @@ -752,7 +807,6 @@ static int do_xfer(const struct scmi_protocol_handle *ph, struct scmi_xfer *xfer) { int ret; - int timeout; const struct scmi_protocol_instance *pi = ph_to_pi(ph); struct scmi_info *info = handle_to_scmi_info(pi->handle); struct device *dev = info->dev; @@ -796,37 +850,7 @@ static int do_xfer(const struct scmi_protocol_handle *ph, return ret; } - if (xfer->hdr.poll_completion) { - ktime_t stop = ktime_add_ms(ktime_get(), - info->desc->max_rx_timeout_ms); - - spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop)); - if (ktime_before(ktime_get(), stop)) { - unsigned long flags; - - /* - * Do not fetch_response if an out-of-order delayed - * response is being processed. - */ - spin_lock_irqsave(&xfer->lock, flags); - if (xfer->state == SCMI_XFER_SENT_OK) { - info->desc->ops->fetch_response(cinfo, xfer); - xfer->state = SCMI_XFER_RESP_OK; - } - spin_unlock_irqrestore(&xfer->lock, flags); - } else { - ret = -ETIMEDOUT; - } - } else { - /* And we wait for the response. */ - timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); - if (!wait_for_completion_timeout(&xfer->done, timeout)) { - dev_err(dev, "timed out in resp(caller: %pS)\n", - (void *)_RET_IP_); - ret = -ETIMEDOUT; - } - } - + ret = scmi_wait_for_message_response(cinfo, xfer); if (!ret && xfer->hdr.status) ret = scmi_to_linux_errno(xfer->hdr.status); -- cgit v1.2.3 From f872af09094c042ac46e64d030f223b63ead5967 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 29 Nov 2021 19:11:45 +0000 Subject: firmware: arm_scmi: Use new trace event scmi_xfer_response_wait Use new trace event to mark start of waiting for response section. Link: https://lore.kernel.org/r/20211129191156.29322-6-cristian.marussi@arm.com Reviewed-by: Florian Fainelli Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/driver.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 9a8d6bfd4ebb..476b91845e40 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -757,6 +757,11 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo, struct device *dev = info->dev; int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms; + trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id, + xfer->hdr.protocol_id, xfer->hdr.seq, + timeout_ms, + xfer->hdr.poll_completion); + if (xfer->hdr.poll_completion) { ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms); -- cgit v1.2.3 From a690b7e6e774b7c43fed37d5bd3b6e037f3b3db9 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 20 Dec 2021 19:56:36 +0000 Subject: firmware: arm_scmi: Add configurable polling mode for transports SCMI communications along TX channels can optionally be provided of a completion interrupt; when such interrupt is not available, command transactions should rely on polling, where the SCMI core takes care to repeatedly evaluate the transport-specific .poll_done() function, if available, to determine if and when a request was fully completed or timed out. Such mechanism is already present and working on a single transfer base: SCMI protocols can indeed enable hdr.poll_completion on specific commands ahead of each transfer and cause that transaction to be handled with polling. Introduce a couple of flags to be able to enforce such polling behaviour globally at will: - scmi_desc.force_polling: to statically switch the whole transport to polling mode. - scmi_chan_info.no_completion_irq: to switch a single channel dynamically to polling mode if, at runtime, is determined that no completion interrupt was available for such channel. Link: https://lore.kernel.org/r/20211220195646.44498-2-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/common.h | 8 ++++++++ drivers/firmware/arm_scmi/driver.c | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) (limited to 'drivers') diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 6438b5248c24..652e5d95ee65 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -339,11 +339,16 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id); * @dev: Reference to device in the SCMI hierarchy corresponding to this * channel * @handle: Pointer to SCMI entity handle + * @no_completion_irq: Flag to indicate that this channel has no completion + * interrupt mechanism for synchronous commands. + * This can be dynamically set by transports at run-time + * inside their provided .chan_setup(). * @transport_info: Transport layer related information */ struct scmi_chan_info { struct device *dev; struct scmi_handle *handle; + bool no_completion_irq; void *transport_info; }; @@ -402,6 +407,8 @@ struct scmi_device *scmi_child_dev_find(struct device *parent, * be pending simultaneously in the system. May be overridden by the * get_max_msg op. * @max_msg_size: Maximum size of data per message that can be handled. + * @force_polling: Flag to force this whole transport to use SCMI core polling + * mechanism instead of completion interrupts even if available. */ struct scmi_desc { int (*transport_init)(void); @@ -410,6 +417,7 @@ struct scmi_desc { int max_rx_timeout_ms; int max_msg; int max_msg_size; + const bool force_polling; }; #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 476b91845e40..7579f54b0047 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -609,6 +609,24 @@ static inline void scmi_clear_channel(struct scmi_info *info, info->desc->ops->clear_channel(cinfo); } +static inline bool is_polling_required(struct scmi_chan_info *cinfo, + struct scmi_info *info) +{ + return cinfo->no_completion_irq || info->desc->force_polling; +} + +static inline bool is_transport_polling_capable(struct scmi_info *info) +{ + return info->desc->ops->poll_done; +} + +static inline bool is_polling_enabled(struct scmi_chan_info *cinfo, + struct scmi_info *info) +{ + return is_polling_required(cinfo, info) && + is_transport_polling_capable(info); +} + static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv) { @@ -817,6 +835,7 @@ static int do_xfer(const struct scmi_protocol_handle *ph, struct device *dev = info->dev; struct scmi_chan_info *cinfo; + /* Check for polling request on custom command xfers at first */ if (xfer->hdr.poll_completion && !info->desc->ops->poll_done) { dev_warn_once(dev, "Polling mode is not supported by transport.\n"); @@ -827,6 +846,10 @@ static int do_xfer(const struct scmi_protocol_handle *ph, if (unlikely(!cinfo)) return -EINVAL; + /* True ONLY if also supported by transport. */ + if (is_polling_enabled(cinfo, info)) + xfer->hdr.poll_completion = true; + /* * Initialise protocol id now from protocol handle to avoid it being * overridden by mistake (or malice) by the protocol code mangling with @@ -1527,6 +1550,16 @@ static int scmi_chan_setup(struct scmi_info *info, struct device *dev, if (ret) return ret; + if (tx && is_polling_required(cinfo, info)) { + if (is_transport_polling_capable(info)) + dev_info(dev, + "Enabled polling mode TX channel - prot_id:%d\n", + prot_id); + else + dev_warn(dev, + "Polling mode NOT supported by transport.\n"); + } + idr_alloc: ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL); if (ret != prot_id) { -- cgit v1.2.3 From f716cbd33f038af87824c30e165b3b70e4c6be1e Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 20 Dec 2021 19:56:37 +0000 Subject: firmware: arm_scmi: Make smc transport use common completions When a completion irq is available use it and delegate command completion handling to the core SCMI completion mechanism. If no completion irq is available revert to polling, using the core common polling machinery. Link: https://lore.kernel.org/r/20211220195646.44498-3-cristian.marussi@arm.com Reviewed-by: Florian Fainelli Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/smc.c | 39 ++++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c index 4effecc3bb46..d6c6ad9f6bab 100644 --- a/drivers/firmware/arm_scmi/smc.c +++ b/drivers/firmware/arm_scmi/smc.c @@ -25,8 +25,6 @@ * @shmem: Transmit/Receive shared memory area * @shmem_lock: Lock to protect access to Tx/Rx shared memory area * @func_id: smc/hvc call function id - * @irq: Optional; employed when platforms indicates msg completion by intr. - * @tx_complete: Optional, employed only when irq is valid. */ struct scmi_smc { @@ -34,15 +32,14 @@ struct scmi_smc { struct scmi_shared_mem __iomem *shmem; struct mutex shmem_lock; u32 func_id; - int irq; - struct completion tx_complete; }; static irqreturn_t smc_msg_done_isr(int irq, void *data) { struct scmi_smc *scmi_info = data; - complete(&scmi_info->tx_complete); + scmi_rx_callback(scmi_info->cinfo, + shmem_read_header(scmi_info->shmem), NULL); return IRQ_HANDLED; } @@ -111,8 +108,8 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, dev_err(dev, "failed to setup SCMI smc irq\n"); return ret; } - init_completion(&scmi_info->tx_complete); - scmi_info->irq = irq; + } else { + cinfo->no_completion_irq = true; } scmi_info->func_id = func_id; @@ -142,26 +139,22 @@ static int smc_send_message(struct scmi_chan_info *cinfo, struct scmi_smc *scmi_info = cinfo->transport_info; struct arm_smccc_res res; + /* + * Channel lock will be released only once response has been + * surely fully retrieved, so after .mark_txdone() + */ mutex_lock(&scmi_info->shmem_lock); shmem_tx_prepare(scmi_info->shmem, xfer); - if (scmi_info->irq) - reinit_completion(&scmi_info->tx_complete); - arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res); - if (scmi_info->irq) - wait_for_completion(&scmi_info->tx_complete); - - scmi_rx_callback(scmi_info->cinfo, - shmem_read_header(scmi_info->shmem), NULL); - - mutex_unlock(&scmi_info->shmem_lock); - /* Only SMCCC_RET_NOT_SUPPORTED is valid error code */ - if (res.a0) + if (res.a0) { + mutex_unlock(&scmi_info->shmem_lock); return -EOPNOTSUPP; + } + return 0; } @@ -173,6 +166,13 @@ static void smc_fetch_response(struct scmi_chan_info *cinfo, shmem_fetch_response(scmi_info->shmem, xfer); } +static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret) +{ + struct scmi_smc *scmi_info = cinfo->transport_info; + + mutex_unlock(&scmi_info->shmem_lock); +} + static bool smc_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer) { @@ -186,6 +186,7 @@ static const struct scmi_transport_ops scmi_smc_ops = { .chan_setup = smc_chan_setup, .chan_free = smc_chan_free, .send_message = smc_send_message, + .mark_txdone = smc_mark_txdone, .fetch_response = smc_fetch_response, .poll_done = smc_poll_done, }; -- cgit v1.2.3 From 31d2f803c19c1a7ad8d05c20b6cd83e8a647fb5c Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 20 Dec 2021 19:56:38 +0000 Subject: firmware: arm_scmi: Add sync_cmds_completed_on_ret transport flag Add a flag to let the transport signal to the core if its handling of sync command implies that, after .send_message has returned successfully, the requested command can be assumed to be fully and completely executed on SCMI platform side so that any possible response value is already immediately available to be retrieved by a .fetch_response: in other words the polling phase can be skipped in such a case and the response values accessed straight away. Note that all of the above applies only when polling mode of operation was selected by the core: if instead a completion IRQ was found to be available the normal response processing path based on completions will still be followed. Link: https://lore.kernel.org/r/20211220195646.44498-4-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/common.h | 8 ++++++++ drivers/firmware/arm_scmi/driver.c | 34 ++++++++++++++++++++++++---------- 2 files changed, 32 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 652e5d95ee65..24b1d1ac5f12 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -409,6 +409,13 @@ struct scmi_device *scmi_child_dev_find(struct device *parent, * @max_msg_size: Maximum size of data per message that can be handled. * @force_polling: Flag to force this whole transport to use SCMI core polling * mechanism instead of completion interrupts even if available. + * @sync_cmds_completed_on_ret: Flag to indicate that the transport assures + * synchronous-command messages are atomically + * completed on .send_message: no need to poll + * actively waiting for a response. + * Used by core internally only when polling is + * selected as a waiting for reply method: i.e. + * if a completion irq was found use that anyway. */ struct scmi_desc { int (*transport_init)(void); @@ -418,6 +425,7 @@ struct scmi_desc { int max_msg; int max_msg_size; const bool force_polling; + const bool sync_cmds_completed_on_ret; }; #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 7579f54b0047..a1c33d36800b 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -617,7 +617,8 @@ static inline bool is_polling_required(struct scmi_chan_info *cinfo, static inline bool is_transport_polling_capable(struct scmi_info *info) { - return info->desc->ops->poll_done; + return info->desc->ops->poll_done || + info->desc->sync_cmds_completed_on_ret; } static inline bool is_polling_enabled(struct scmi_chan_info *cinfo, @@ -781,10 +782,28 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo, xfer->hdr.poll_completion); if (xfer->hdr.poll_completion) { - ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms); + /* + * Real polling is needed only if transport has NOT declared + * itself to support synchronous commands replies. + */ + if (!info->desc->sync_cmds_completed_on_ret) { + /* + * Poll on xfer using transport provided .poll_done(); + * assumes no completion interrupt was available. + */ + ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms); + + spin_until_cond(scmi_xfer_done_no_timeout(cinfo, + xfer, stop)); + if (ktime_after(ktime_get(), stop)) { + dev_err(dev, + "timed out in resp(caller: %pS) - polling\n", + (void *)_RET_IP_); + ret = -ETIMEDOUT; + } + } - spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop)); - if (ktime_before(ktime_get(), stop)) { + if (!ret) { unsigned long flags; /* @@ -797,11 +816,6 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo, xfer->state = SCMI_XFER_RESP_OK; } spin_unlock_irqrestore(&xfer->lock, flags); - } else { - dev_err(dev, - "timed out in resp(caller: %pS) - polling\n", - (void *)_RET_IP_); - ret = -ETIMEDOUT; } } else { /* And we wait for the response. */ @@ -836,7 +850,7 @@ static int do_xfer(const struct scmi_protocol_handle *ph, struct scmi_chan_info *cinfo; /* Check for polling request on custom command xfers at first */ - if (xfer->hdr.poll_completion && !info->desc->ops->poll_done) { + if (xfer->hdr.poll_completion && !is_transport_polling_capable(info)) { dev_warn_once(dev, "Polling mode is not supported by transport.\n"); return -EINVAL; -- cgit v1.2.3 From 117542b81fe7b12002afc0cfdcf6cdd3ebfc0f18 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 20 Dec 2021 19:56:39 +0000 Subject: firmware: arm_scmi: Make smc support sync_cmds_completed_on_ret Enable sync_cmds_completed_on_ret in the SMC transport descriptor and remove SMC specific .poll_done callback support since polling is bypassed when sync_cmds_completed_on_ret is set. Link: https://lore.kernel.org/r/20211220195646.44498-5-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/smc.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c index d6c6ad9f6bab..df0defd9f8bb 100644 --- a/drivers/firmware/arm_scmi/smc.c +++ b/drivers/firmware/arm_scmi/smc.c @@ -173,14 +173,6 @@ static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret) mutex_unlock(&scmi_info->shmem_lock); } -static bool -smc_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer) -{ - struct scmi_smc *scmi_info = cinfo->transport_info; - - return shmem_poll_done(scmi_info->shmem, xfer); -} - static const struct scmi_transport_ops scmi_smc_ops = { .chan_available = smc_chan_available, .chan_setup = smc_chan_setup, @@ -188,7 +180,6 @@ static const struct scmi_transport_ops scmi_smc_ops = { .send_message = smc_send_message, .mark_txdone = smc_mark_txdone, .fetch_response = smc_fetch_response, - .poll_done = smc_poll_done, }; const struct scmi_desc scmi_smc_desc = { @@ -196,4 +187,13 @@ const struct scmi_desc scmi_smc_desc = { .max_rx_timeout_ms = 30, .max_msg = 20, .max_msg_size = 128, + /* + * Setting .sync_cmds_atomic_replies to true for SMC assumes that, + * once the SMC instruction has completed successfully, the issued + * SCMI command would have been already fully processed by the SCMI + * platform firmware and so any possible response value expected + * for the issued command will be immmediately ready to be fetched + * from the shared memory area. + */ + .sync_cmds_completed_on_ret = true, }; -- cgit v1.2.3 From bf322084fec30b92423911db0169a3610008fc15 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 20 Dec 2021 19:56:40 +0000 Subject: firmware: arm_scmi: Make optee support sync_cmds_completed_on_ret Declare each OPTEE SCMI channel as not having a completion_irq so as to enable polling mode and then enable also .sync_cmds_completed_on_ret flag in the OPTEE transport descriptor so that real polling is itself effectively bypassed on the rx path: once the optee command invocation has successfully returned the core will directly fetch the response from the shared memory area. Remove OPTEE SCMI transport specific .poll_done callback support since real polling is effectively bypassed when .sync_cmds_completed_on_ret is set. Add OPTEE SCMI transport specific .mark_txdone callback support in order to properly handle channel locking along the tx path. Link: https://lore.kernel.org/r/20211220195646.44498-6-cristian.marussi@arm.com Cc: Etienne Carriere Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/optee.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c index 175b39bcd470..f460e12be4ea 100644 --- a/drivers/firmware/arm_scmi/optee.c +++ b/drivers/firmware/arm_scmi/optee.c @@ -363,6 +363,9 @@ static int scmi_optee_chan_setup(struct scmi_chan_info *cinfo, struct device *de if (ret) goto err_close_sess; + /* Enable polling */ + cinfo->no_completion_irq = true; + mutex_lock(&scmi_optee_private->mu); list_add(&channel->link, &scmi_optee_private->channel_list); mutex_unlock(&scmi_optee_private->mu); @@ -423,9 +426,8 @@ static int scmi_optee_send_message(struct scmi_chan_info *cinfo, shmem_tx_prepare(shmem, xfer); ret = invoke_process_smt_channel(channel); - - scmi_rx_callback(cinfo, shmem_read_header(shmem), NULL); - mutex_unlock(&channel->mu); + if (ret) + mutex_unlock(&channel->mu); return ret; } @@ -439,13 +441,11 @@ static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo, shmem_fetch_response(shmem, xfer); } -static bool scmi_optee_poll_done(struct scmi_chan_info *cinfo, - struct scmi_xfer *xfer) +static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret) { struct scmi_optee_channel *channel = cinfo->transport_info; - struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer); - return shmem_poll_done(shmem, xfer); + mutex_unlock(&channel->mu); } static struct scmi_transport_ops scmi_optee_ops = { @@ -454,9 +454,9 @@ static struct scmi_transport_ops scmi_optee_ops = { .chan_setup = scmi_optee_chan_setup, .chan_free = scmi_optee_chan_free, .send_message = scmi_optee_send_message, + .mark_txdone = scmi_optee_mark_txdone, .fetch_response = scmi_optee_fetch_response, .clear_channel = scmi_optee_clear_channel, - .poll_done = scmi_optee_poll_done, }; static int scmi_optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data) @@ -562,4 +562,5 @@ const struct scmi_desc scmi_optee_desc = { .max_rx_timeout_ms = 30, .max_msg = 20, .max_msg_size = SCMI_OPTEE_MAX_MSG_SIZE, + .sync_cmds_completed_on_ret = true, }; -- cgit v1.2.3 From 69255e746890274e887ba36a403019380cde0b48 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 20 Dec 2021 19:56:41 +0000 Subject: firmware: arm_scmi: Add support for atomic transports An SCMI transport can be configured as .atomic_enabled in order to signal to the SCMI core that all its TX path is executed in atomic context and that, when requested, polling mode should be used while waiting for command responses. When a specific platform configuration had properly configured such a transport as .atomic_enabled, the SCMI core will also take care not to sleep in the corresponding RX path while waiting for a response if that specific command transaction was requested as atomic using polling mode. Asynchronous commands should not be used in an atomic context and so a warning is emitted if polling was requested for an asynchronous command. Add also a method to check, from the SCMI drivers, if the underlying SCMI transport is currently configured to support atomic transactions: this will be used by upper layers to determine if atomic requests can be supported at all on this SCMI instance. Link: https://lore.kernel.org/r/20211220195646.44498-7-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/common.h | 4 +++ drivers/firmware/arm_scmi/driver.c | 51 ++++++++++++++++++++++++++++++++++++-- include/linux/scmi_protocol.h | 8 ++++++ 3 files changed, 61 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 24b1d1ac5f12..01d42c2069d4 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -416,6 +416,9 @@ struct scmi_device *scmi_child_dev_find(struct device *parent, * Used by core internally only when polling is * selected as a waiting for reply method: i.e. * if a completion irq was found use that anyway. + * @atomic_enabled: Flag to indicate that this transport, which is assured not + * to sleep anywhere on the TX path, can be used in atomic mode + * when requested. */ struct scmi_desc { int (*transport_init)(void); @@ -426,6 +429,7 @@ struct scmi_desc { int max_msg_size; const bool force_polling; const bool sync_cmds_completed_on_ret; + const bool atomic_enabled; }; #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index a1c33d36800b..78924db59290 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -923,6 +923,20 @@ static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph, * @ph: Pointer to SCMI protocol handle * @xfer: Transfer to initiate and wait for response * + * Using asynchronous commands in atomic/polling mode should be avoided since + * it could cause long busy-waiting here, so ignore polling for the delayed + * response and WARN if it was requested for this command transaction since + * upper layers should refrain from issuing such kind of requests. + * + * The only other option would have been to refrain from using any asynchronous + * command even if made available, when an atomic transport is detected, and + * instead forcibly use the synchronous version (thing that can be easily + * attained at the protocol layer), but this would also have led to longer + * stalls of the channel for synchronous commands and possibly timeouts. + * (in other words there is usually a good reason if a platform provides an + * asynchronous version of a command and we should prefer to use it...just not + * when using atomic/polling mode) + * * Return: -ETIMEDOUT in case of no delayed response, if transmit error, * return corresponding error, else if all goes well, return 0. */ @@ -934,12 +948,24 @@ static int do_xfer_with_response(const struct scmi_protocol_handle *ph, xfer->async_done = &async_response; + /* + * Delayed responses should not be polled, so an async command should + * not have been used when requiring an atomic/poll context; WARN and + * perform instead a sleeping wait. + * (Note Async + IgnoreDelayedResponses are sent via do_xfer) + */ + WARN_ON_ONCE(xfer->hdr.poll_completion); + ret = do_xfer(ph, xfer); if (!ret) { - if (!wait_for_completion_timeout(xfer->async_done, timeout)) + if (!wait_for_completion_timeout(xfer->async_done, timeout)) { + dev_err(ph->dev, + "timed out in delayed resp(caller: %pS)\n", + (void *)_RET_IP_); ret = -ETIMEDOUT; - else if (xfer->hdr.status) + } else if (xfer->hdr.status) { ret = scmi_to_linux_errno(xfer->hdr.status); + } } xfer->async_done = NULL; @@ -1373,6 +1399,22 @@ static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id) WARN_ON(ret); } +/** + * scmi_is_transport_atomic - Method to check if underlying transport for an + * SCMI instance is configured as atomic. + * + * @handle: A reference to the SCMI platform instance. + * + * Return: True if transport is configured as atomic + */ +static bool scmi_is_transport_atomic(const struct scmi_handle *handle) +{ + struct scmi_info *info = handle_to_scmi_info(handle); + + return info->desc->atomic_enabled && + is_transport_polling_capable(info); +} + static inline struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info) { @@ -1910,6 +1952,7 @@ static int scmi_probe(struct platform_device *pdev) handle->version = &info->version; handle->devm_protocol_get = scmi_devm_protocol_get; handle->devm_protocol_put = scmi_devm_protocol_put; + handle->is_transport_atomic = scmi_is_transport_atomic; if (desc->ops->link_supplier) { ret = desc->ops->link_supplier(dev); @@ -1928,6 +1971,10 @@ static int scmi_probe(struct platform_device *pdev) if (scmi_notification_init(handle)) dev_err(dev, "SCMI Notifications NOT available.\n"); + if (info->desc->atomic_enabled && !is_transport_polling_capable(info)) + dev_err(dev, + "Transport is not polling capable. Atomic mode not supported.\n"); + /* * Trigger SCMI Base protocol initialization. * It's mandatory and won't be ever released/deinit until the diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 80e781c51ddc..9f895cb81818 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -612,6 +612,13 @@ struct scmi_notify_ops { * @devm_protocol_get: devres managed method to acquire a protocol and get specific * operations and a dedicated protocol handler * @devm_protocol_put: devres managed method to release a protocol + * @is_transport_atomic: method to check if the underlying transport for this + * instance handle is configured to support atomic + * transactions for commands. + * Some users of the SCMI stack in the upper layers could + * be interested to know if they can assume SCMI + * command transactions associated to this handle will + * never sleep and act accordingly. * @notify_ops: pointer to set of notifications related operations */ struct scmi_handle { @@ -622,6 +629,7 @@ struct scmi_handle { (*devm_protocol_get)(struct scmi_device *sdev, u8 proto, struct scmi_protocol_handle **ph); void (*devm_protocol_put)(struct scmi_device *sdev, u8 proto); + bool (*is_transport_atomic)(const struct scmi_handle *handle); const struct scmi_notify_ops *notify_ops; }; -- cgit v1.2.3 From 0bfdca8a8661aaa2433b3f7b74d83e3520aa56e5 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 20 Dec 2021 19:56:42 +0000 Subject: firmware: arm_scmi: Add atomic mode support to smc transport Add a Kernel configuration option to enable SCMI SMC transport atomic mode operation for selected SCMI transactions and leave it as default disabled. Substitute mutex usages with busy-waiting and declare smc transport as .atomic_enabled if such Kernel configuration option is enabled. Link: https://lore.kernel.org/r/20211220195646.44498-8-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/Kconfig | 14 ++++++++++ drivers/firmware/arm_scmi/smc.c | 56 ++++++++++++++++++++++++++++++++++----- 2 files changed, 64 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig index 638ecec89ff1..d429326433d1 100644 --- a/drivers/firmware/arm_scmi/Kconfig +++ b/drivers/firmware/arm_scmi/Kconfig @@ -78,6 +78,20 @@ config ARM_SCMI_TRANSPORT_SMC If you want the ARM SCMI PROTOCOL stack to include support for a transport based on SMC, answer Y. +config ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE + bool "Enable atomic mode support for SCMI SMC transport" + depends on ARM_SCMI_TRANSPORT_SMC + help + Enable support of atomic operation for SCMI SMC based transport. + + If you want the SCMI SMC based transport to operate in atomic + mode, avoiding any kind of sleeping behaviour for selected + transactions on the TX path, answer Y. + Enabling atomic mode operations allows any SCMI driver using this + transport to optionally ask for atomic SCMI transactions and operate + in atomic context too, at the price of using a number of busy-waiting + primitives all over instead. If unsure say N. + config ARM_SCMI_TRANSPORT_VIRTIO bool "SCMI transport based on VirtIO" depends on VIRTIO=y || VIRTIO=ARM_SCMI_PROTOCOL diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c index df0defd9f8bb..6c7871a40611 100644 --- a/drivers/firmware/arm_scmi/smc.c +++ b/drivers/firmware/arm_scmi/smc.c @@ -7,6 +7,7 @@ */ #include +#include #include #include #include @@ -14,6 +15,7 @@ #include #include #include +#include #include #include "common.h" @@ -23,14 +25,20 @@ * * @cinfo: SCMI channel info * @shmem: Transmit/Receive shared memory area - * @shmem_lock: Lock to protect access to Tx/Rx shared memory area + * @shmem_lock: Lock to protect access to Tx/Rx shared memory area. + * Used when NOT operating in atomic mode. + * @inflight: Atomic flag to protect access to Tx/Rx shared memory area. + * Used when operating in atomic mode. * @func_id: smc/hvc call function id */ struct scmi_smc { struct scmi_chan_info *cinfo; struct scmi_shared_mem __iomem *shmem; + /* Protect access to shmem area */ struct mutex shmem_lock; +#define INFLIGHT_NONE MSG_TOKEN_MAX + atomic_t inflight; u32 func_id; }; @@ -54,6 +62,41 @@ static bool smc_chan_available(struct device *dev, int idx) return true; } +static inline void smc_channel_lock_init(struct scmi_smc *scmi_info) +{ + if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE)) + atomic_set(&scmi_info->inflight, INFLIGHT_NONE); + else + mutex_init(&scmi_info->shmem_lock); +} + +static bool smc_xfer_inflight(struct scmi_xfer *xfer, atomic_t *inflight) +{ + int ret; + + ret = atomic_cmpxchg(inflight, INFLIGHT_NONE, xfer->hdr.seq); + + return ret == INFLIGHT_NONE; +} + +static inline void +smc_channel_lock_acquire(struct scmi_smc *scmi_info, + struct scmi_xfer *xfer __maybe_unused) +{ + if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE)) + spin_until_cond(smc_xfer_inflight(xfer, &scmi_info->inflight)); + else + mutex_lock(&scmi_info->shmem_lock); +} + +static inline void smc_channel_lock_release(struct scmi_smc *scmi_info) +{ + if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE)) + atomic_set(&scmi_info->inflight, INFLIGHT_NONE); + else + mutex_unlock(&scmi_info->shmem_lock); +} + static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx) { @@ -114,7 +157,7 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, scmi_info->func_id = func_id; scmi_info->cinfo = cinfo; - mutex_init(&scmi_info->shmem_lock); + smc_channel_lock_init(scmi_info); cinfo->transport_info = scmi_info; return 0; @@ -140,10 +183,10 @@ static int smc_send_message(struct scmi_chan_info *cinfo, struct arm_smccc_res res; /* - * Channel lock will be released only once response has been + * Channel will be released only once response has been * surely fully retrieved, so after .mark_txdone() */ - mutex_lock(&scmi_info->shmem_lock); + smc_channel_lock_acquire(scmi_info, xfer); shmem_tx_prepare(scmi_info->shmem, xfer); @@ -151,7 +194,7 @@ static int smc_send_message(struct scmi_chan_info *cinfo, /* Only SMCCC_RET_NOT_SUPPORTED is valid error code */ if (res.a0) { - mutex_unlock(&scmi_info->shmem_lock); + smc_channel_lock_release(scmi_info); return -EOPNOTSUPP; } @@ -170,7 +213,7 @@ static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret) { struct scmi_smc *scmi_info = cinfo->transport_info; - mutex_unlock(&scmi_info->shmem_lock); + smc_channel_lock_release(scmi_info); } static const struct scmi_transport_ops scmi_smc_ops = { @@ -196,4 +239,5 @@ const struct scmi_desc scmi_smc_desc = { * from the shared memory area. */ .sync_cmds_completed_on_ret = true, + .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE), }; -- cgit v1.2.3 From 94d0cd1da14af0042c8ee7c2cf401dfc321c575c Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 20 Dec 2021 19:56:43 +0000 Subject: firmware: arm_scmi: Add new parameter to mark_txdone Add a new xfer parameter to mark_txdone transport operation which enables the SCMI core to optionally pass back into the transport layer a reference to the xfer descriptor that is being handled. Link: https://lore.kernel.org/r/20211220195646.44498-9-cristian.marussi@arm.com Reviewed-by: Florian Fainelli Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/common.h | 3 ++- drivers/firmware/arm_scmi/driver.c | 2 +- drivers/firmware/arm_scmi/mailbox.c | 3 ++- drivers/firmware/arm_scmi/optee.c | 3 ++- drivers/firmware/arm_scmi/smc.c | 3 ++- 5 files changed, 9 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 01d42c2069d4..4fda84bfab42 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -378,7 +378,8 @@ struct scmi_transport_ops { unsigned int (*get_max_msg)(struct scmi_chan_info *base_cinfo); int (*send_message)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer); - void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret); + void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret, + struct scmi_xfer *xfer); void (*fetch_response)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer); void (*fetch_notification)(struct scmi_chan_info *cinfo, diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 78924db59290..c2e7897ff56e 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -897,7 +897,7 @@ static int do_xfer(const struct scmi_protocol_handle *ph, ret = scmi_to_linux_errno(xfer->hdr.status); if (info->desc->ops->mark_txdone) - info->desc->ops->mark_txdone(cinfo, ret); + info->desc->ops->mark_txdone(cinfo, ret, xfer); trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id, xfer->hdr.protocol_id, xfer->hdr.seq, ret); diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c index e09eb12bf421..08ff4d110beb 100644 --- a/drivers/firmware/arm_scmi/mailbox.c +++ b/drivers/firmware/arm_scmi/mailbox.c @@ -140,7 +140,8 @@ static int mailbox_send_message(struct scmi_chan_info *cinfo, return ret; } -static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret) +static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret, + struct scmi_xfer *__unused) { struct scmi_mailbox *smbox = cinfo->transport_info; diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c index f460e12be4ea..734f1eeee161 100644 --- a/drivers/firmware/arm_scmi/optee.c +++ b/drivers/firmware/arm_scmi/optee.c @@ -441,7 +441,8 @@ static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo, shmem_fetch_response(shmem, xfer); } -static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret) +static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret, + struct scmi_xfer *__unused) { struct scmi_optee_channel *channel = cinfo->transport_info; diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c index 6c7871a40611..745acfdd0b3d 100644 --- a/drivers/firmware/arm_scmi/smc.c +++ b/drivers/firmware/arm_scmi/smc.c @@ -209,7 +209,8 @@ static void smc_fetch_response(struct scmi_chan_info *cinfo, shmem_fetch_response(scmi_info->shmem, xfer); } -static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret) +static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret, + struct scmi_xfer *__unused) { struct scmi_smc *scmi_info = cinfo->transport_info; -- cgit v1.2.3 From 860122d80251c64484883324128ca82fa35423ef Mon Sep 17 00:00:00 2001 From: Biju Das Date: Mon, 10 Jan 2022 13:46:49 +0000 Subject: soc: renesas: Identify RZ/V2L SoC Add support for identifying the RZ/V2L (R9A07G054) SoC. Signed-off-by: Biju Das Signed-off-by: Lad Prabhakar Link: https://lore.kernel.org/r/20220110134659.30424-3-prabhakar.mahadev-lad.rj@bp.renesas.com Signed-off-by: Geert Uytterhoeven --- drivers/soc/renesas/Kconfig | 5 +++++ drivers/soc/renesas/renesas-soc.c | 13 +++++++++++++ 2 files changed, 18 insertions(+) (limited to 'drivers') diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig index 2cbd03db2cc7..90f4f98be29c 100644 --- a/drivers/soc/renesas/Kconfig +++ b/drivers/soc/renesas/Kconfig @@ -296,6 +296,11 @@ config ARCH_R9A07G044 help This enables support for the Renesas RZ/G2L SoC variants. +config ARCH_R9A07G054 + bool "ARM64 Platform support for RZ/V2L" + help + This enables support for the Renesas RZ/V2L SoC variants. + endif # ARM64 config RST_RCAR diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c index 62540ffc581a..8a672d0a4dae 100644 --- a/drivers/soc/renesas/renesas-soc.c +++ b/drivers/soc/renesas/renesas-soc.c @@ -64,6 +64,10 @@ static const struct renesas_family fam_rzg2l __initconst __maybe_unused = { .name = "RZ/G2L", }; +static const struct renesas_family fam_rzv2l __initconst __maybe_unused = { + .name = "RZ/V2L", +}; + static const struct renesas_family fam_shmobile __initconst __maybe_unused = { .name = "SH-Mobile", .reg = 0xe600101c, /* CCCR (Common Chip Code Register) */ @@ -144,6 +148,11 @@ static const struct renesas_soc soc_rz_g2l __initconst __maybe_unused = { .id = 0x841c447, }; +static const struct renesas_soc soc_rz_v2l __initconst __maybe_unused = { + .family = &fam_rzv2l, + .id = 0x8447447, +}; + static const struct renesas_soc soc_rcar_m1a __initconst __maybe_unused = { .family = &fam_rcar_gen1, }; @@ -334,6 +343,9 @@ static const struct of_device_id renesas_socs[] __initconst = { #if defined(CONFIG_ARCH_R9A07G044) { .compatible = "renesas,r9a07g044", .data = &soc_rz_g2l }, #endif +#if defined(CONFIG_ARCH_R9A07G054) + { .compatible = "renesas,r9a07g054", .data = &soc_rz_v2l }, +#endif #ifdef CONFIG_ARCH_SH73A0 { .compatible = "renesas,sh73a0", .data = &soc_shmobile_ag5 }, #endif @@ -367,6 +379,7 @@ static const struct renesas_id id_prr __initconst = { static const struct of_device_id renesas_ids[] __initconst = { { .compatible = "renesas,bsid", .data = &id_bsid }, { .compatible = "renesas,r9a07g044-sysc", .data = &id_rzg2l }, + { .compatible = "renesas,r9a07g054-sysc", .data = &id_rzg2l }, { .compatible = "renesas,prr", .data = &id_prr }, { /* sentinel */ } }; -- cgit v1.2.3 From a6945f4566d4f77a4054720f6649ff921fe1ae64 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Thu, 13 Jan 2022 19:10:55 +0800 Subject: memory: mtk-smi: handle positive return value for clk_bulk_prepare_enable Function clk_bulk_prepare_enable() returns 0 for success or a negative number for error, although the common style for the callers is to check always for any non-zero return value (just like its implementation in clk.h does). Adjust the code to such coding style. Signed-off-by: Yong Wu Reviewed-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20220113111057.29918-6-yong.wu@mediatek.com [krzysztof: rewrite commit msg] Signed-off-by: Krzysztof Kozlowski --- drivers/memory/mtk-smi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index e201e5976f34..5ebd7176f133 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -480,7 +480,7 @@ static int __maybe_unused mtk_smi_larb_resume(struct device *dev) int ret; ret = clk_bulk_prepare_enable(larb->smi.clk_num, larb->smi.clks); - if (ret < 0) + if (ret) return ret; /* Configure the basic setting for this larb */ -- cgit v1.2.3 From 8956500e5d5bf541a945299999b0bf4866dc0daf Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Thu, 13 Jan 2022 19:10:56 +0800 Subject: memory: mtk-smi: Add sleep ctrl function Sleep control means that when the larb goes to sleep, we should wait a bit until all the current commands are finished. Thus, when the larb runtime suspends, we need to enable this function to wait until all the existed commands are finished. When the larb resumes, just disable this function. This function only improves the safety of bus. Add a new flag for this function. Prepare for mt8186. Signed-off-by: Anan Sun Signed-off-by: Yong Wu Reviewed-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20220113111057.29918-7-yong.wu@mediatek.com Signed-off-by: Krzysztof Kozlowski --- drivers/memory/mtk-smi.c | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) (limited to 'drivers') diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index 5ebd7176f133..8da7aef27765 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -32,6 +33,10 @@ #define SMI_DUMMY 0x444 /* SMI LARB */ +#define SMI_LARB_SLP_CON 0xc +#define SLP_PROT_EN BIT(0) +#define SLP_PROT_RDY BIT(16) + #define SMI_LARB_CMD_THRT_CON 0x24 #define SMI_LARB_THRT_RD_NU_LMT_MSK GENMASK(7, 4) #define SMI_LARB_THRT_RD_NU_LMT (5 << 4) @@ -81,6 +86,7 @@ #define MTK_SMI_FLAG_THRT_UPDATE BIT(0) #define MTK_SMI_FLAG_SW_FLAG BIT(1) +#define MTK_SMI_FLAG_SLEEP_CTL BIT(2) #define MTK_SMI_CAPS(flags, _x) (!!((flags) & (_x))) struct mtk_smi_reg_pair { @@ -371,6 +377,26 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = { {} }; +static int mtk_smi_larb_sleep_ctrl_enable(struct mtk_smi_larb *larb) +{ + int ret; + u32 tmp; + + writel_relaxed(SLP_PROT_EN, larb->base + SMI_LARB_SLP_CON); + ret = readl_poll_timeout_atomic(larb->base + SMI_LARB_SLP_CON, + tmp, !!(tmp & SLP_PROT_RDY), 10, 1000); + if (ret) { + /* TODO: Reset this larb if it fails here. */ + dev_err(larb->smi.dev, "sleep ctrl is not ready(0x%x).\n", tmp); + } + return ret; +} + +static void mtk_smi_larb_sleep_ctrl_disable(struct mtk_smi_larb *larb) +{ + writel_relaxed(0, larb->base + SMI_LARB_SLP_CON); +} + static int mtk_smi_device_link_common(struct device *dev, struct device **com_dev) { struct platform_device *smi_com_pdev; @@ -483,6 +509,9 @@ static int __maybe_unused mtk_smi_larb_resume(struct device *dev) if (ret) return ret; + if (MTK_SMI_CAPS(larb->larb_gen->flags_general, MTK_SMI_FLAG_SLEEP_CTL)) + mtk_smi_larb_sleep_ctrl_disable(larb); + /* Configure the basic setting for this larb */ larb_gen->config_port(dev); @@ -492,6 +521,13 @@ static int __maybe_unused mtk_smi_larb_resume(struct device *dev) static int __maybe_unused mtk_smi_larb_suspend(struct device *dev) { struct mtk_smi_larb *larb = dev_get_drvdata(dev); + int ret; + + if (MTK_SMI_CAPS(larb->larb_gen->flags_general, MTK_SMI_FLAG_SLEEP_CTL)) { + ret = mtk_smi_larb_sleep_ctrl_enable(larb); + if (ret) + return ret; + } clk_bulk_disable_unprepare(larb->smi.clk_num, larb->smi.clks); return 0; -- cgit v1.2.3 From 86a010bfc73983aa8cd914f1e5f73962b0406678 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Thu, 13 Jan 2022 19:10:57 +0800 Subject: memory: mtk-smi: mt8186: Add smi support Add mt8186 SMI support. Signed-off-by: Yong Wu Acked-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20220113111057.29918-8-yong.wu@mediatek.com Signed-off-by: Krzysztof Kozlowski --- drivers/memory/mtk-smi.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'drivers') diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index 8da7aef27765..377ef019c4cf 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -355,6 +355,11 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt8183 = { /* IPU0 | IPU1 | CCU */ }; +static const struct mtk_smi_larb_gen mtk_smi_larb_mt8186 = { + .config_port = mtk_smi_larb_config_port_gen2_general, + .flags_general = MTK_SMI_FLAG_SLEEP_CTL, +}; + static const struct mtk_smi_larb_gen mtk_smi_larb_mt8192 = { .config_port = mtk_smi_larb_config_port_gen2_general, }; @@ -372,6 +377,7 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = { {.compatible = "mediatek,mt8167-smi-larb", .data = &mtk_smi_larb_mt8167}, {.compatible = "mediatek,mt8173-smi-larb", .data = &mtk_smi_larb_mt8173}, {.compatible = "mediatek,mt8183-smi-larb", .data = &mtk_smi_larb_mt8183}, + {.compatible = "mediatek,mt8186-smi-larb", .data = &mtk_smi_larb_mt8186}, {.compatible = "mediatek,mt8192-smi-larb", .data = &mtk_smi_larb_mt8192}, {.compatible = "mediatek,mt8195-smi-larb", .data = &mtk_smi_larb_mt8195}, {} @@ -580,6 +586,12 @@ static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = { F_MMU1_LARB(7), }; +static const struct mtk_smi_common_plat mtk_smi_common_mt8186 = { + .type = MTK_SMI_GEN2, + .has_gals = true, + .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(4) | F_MMU1_LARB(7), +}; + static const struct mtk_smi_common_plat mtk_smi_common_mt8192 = { .type = MTK_SMI_GEN2, .has_gals = true, @@ -614,6 +626,7 @@ static const struct of_device_id mtk_smi_common_of_ids[] = { {.compatible = "mediatek,mt8167-smi-common", .data = &mtk_smi_common_gen2}, {.compatible = "mediatek,mt8173-smi-common", .data = &mtk_smi_common_gen2}, {.compatible = "mediatek,mt8183-smi-common", .data = &mtk_smi_common_mt8183}, + {.compatible = "mediatek,mt8186-smi-common", .data = &mtk_smi_common_mt8186}, {.compatible = "mediatek,mt8192-smi-common", .data = &mtk_smi_common_mt8192}, {.compatible = "mediatek,mt8195-smi-common-vdo", .data = &mtk_smi_common_mt8195_vdo}, {.compatible = "mediatek,mt8195-smi-common-vpp", .data = &mtk_smi_common_mt8195_vpp}, -- cgit v1.2.3 From 4f346005aaed641042ca18171c4383a6a85f6a8b Mon Sep 17 00:00:00 2001 From: Alim Akhtar Date: Mon, 24 Jan 2022 19:46:32 +0530 Subject: clk: samsung: fsd: Add initial clock support Add initial clock support for FSD (Full Self-Driving) SoC which is required to bring-up platforms based on this SoC. Cc: linux-fsd@tesla.com Signed-off-by: Jayati Sahu Signed-off-by: Ajay Kumar Signed-off-by: Pankaj Dubey Signed-off-by: Alim Akhtar Acked-by: Sylwester Nawrocki Link: https://lore.kernel.org/r/20220124141644.71052-5-alim.akhtar@samsung.com Signed-off-by: Krzysztof Kozlowski --- drivers/clk/samsung/Kconfig | 8 ++ drivers/clk/samsung/Makefile | 1 + drivers/clk/samsung/clk-fsd.c | 310 ++++++++++++++++++++++++++++++++++++++++++ drivers/clk/samsung/clk-pll.c | 1 + drivers/clk/samsung/clk-pll.h | 1 + 5 files changed, 321 insertions(+) create mode 100644 drivers/clk/samsung/clk-fsd.c (limited to 'drivers') diff --git a/drivers/clk/samsung/Kconfig b/drivers/clk/samsung/Kconfig index 0e18d6ff2916..5f64c58f120f 100644 --- a/drivers/clk/samsung/Kconfig +++ b/drivers/clk/samsung/Kconfig @@ -11,6 +11,7 @@ config COMMON_CLK_SAMSUNG select EXYNOS_5410_COMMON_CLK if ARM && SOC_EXYNOS5410 select EXYNOS_5420_COMMON_CLK if ARM && SOC_EXYNOS5420 select EXYNOS_ARM64_COMMON_CLK if ARM64 && ARCH_EXYNOS + select TESLA_FSD_COMMON_CLK if ARM64 && ARCH_TESLA_FSD config S3C64XX_COMMON_CLK bool "Samsung S3C64xx clock controller support" if COMPILE_TEST @@ -124,3 +125,10 @@ config S3C2443_COMMON_CLK help Support for the clock controller present on the Samsung S3C2416/S3C2443 SoCs. Choose Y here only if you build for this SoC. + +config TESLA_FSD_COMMON_CLK + bool "Tesla FSD clock controller support" if COMPILE_TEST + depends on COMMON_CLK_SAMSUNG + help + Support for the clock controller present on the Tesla FSD SoC. + Choose Y here only if you build for this SoC. diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile index 0df74916a895..17e5d1cb9da2 100644 --- a/drivers/clk/samsung/Makefile +++ b/drivers/clk/samsung/Makefile @@ -26,3 +26,4 @@ obj-$(CONFIG_S3C2412_COMMON_CLK)+= clk-s3c2412.o obj-$(CONFIG_S3C2443_COMMON_CLK)+= clk-s3c2443.o obj-$(CONFIG_S3C64XX_COMMON_CLK) += clk-s3c64xx.o obj-$(CONFIG_S5PV210_COMMON_CLK) += clk-s5pv210.o clk-s5pv210-audss.o +obj-$(CONFIG_TESLA_FSD_COMMON_CLK) += clk-fsd.o diff --git a/drivers/clk/samsung/clk-fsd.c b/drivers/clk/samsung/clk-fsd.c new file mode 100644 index 000000000000..ae35c4303b55 --- /dev/null +++ b/drivers/clk/samsung/clk-fsd.c @@ -0,0 +1,310 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2022 Samsung Electronics Co., Ltd. + * https://www.samsung.com + * Copyright (c) 2017-2022 Tesla, Inc. + * https://www.tesla.com + * + * Common Clock Framework support for FSD SoC. + */ + +#include +#include +#include +#include + +#include + +#include "clk.h" + +/* Register Offset definitions for CMU_CMU (0x11c10000) */ +#define PLL_LOCKTIME_PLL_SHARED0 0x0 +#define PLL_LOCKTIME_PLL_SHARED1 0x4 +#define PLL_LOCKTIME_PLL_SHARED2 0x8 +#define PLL_LOCKTIME_PLL_SHARED3 0xc +#define PLL_CON0_PLL_SHARED0 0x100 +#define PLL_CON0_PLL_SHARED1 0x120 +#define PLL_CON0_PLL_SHARED2 0x140 +#define PLL_CON0_PLL_SHARED3 0x160 +#define MUX_CMU_CIS0_CLKMUX 0x1000 +#define MUX_CMU_CIS1_CLKMUX 0x1004 +#define MUX_CMU_CIS2_CLKMUX 0x1008 +#define MUX_CMU_CPUCL_SWITCHMUX 0x100c +#define MUX_CMU_FSYS1_ACLK_MUX 0x1014 +#define MUX_PLL_SHARED0_MUX 0x1020 +#define MUX_PLL_SHARED1_MUX 0x1024 +#define DIV_CMU_CIS0_CLK 0x1800 +#define DIV_CMU_CIS1_CLK 0x1804 +#define DIV_CMU_CIS2_CLK 0x1808 +#define DIV_CMU_CMU_ACLK 0x180c +#define DIV_CMU_CPUCL_SWITCH 0x1810 +#define DIV_CMU_FSYS0_SHARED0DIV4 0x181c +#define DIV_CMU_FSYS0_SHARED1DIV3 0x1820 +#define DIV_CMU_FSYS0_SHARED1DIV4 0x1824 +#define DIV_CMU_FSYS1_SHARED0DIV4 0x1828 +#define DIV_CMU_FSYS1_SHARED0DIV8 0x182c +#define DIV_CMU_IMEM_ACLK 0x1834 +#define DIV_CMU_IMEM_DMACLK 0x1838 +#define DIV_CMU_IMEM_TCUCLK 0x183c +#define DIV_CMU_PERIC_SHARED0DIV20 0x1844 +#define DIV_CMU_PERIC_SHARED0DIV3_TBUCLK 0x1848 +#define DIV_CMU_PERIC_SHARED1DIV36 0x184c +#define DIV_CMU_PERIC_SHARED1DIV4_DMACLK 0x1850 +#define DIV_PLL_SHARED0_DIV2 0x1858 +#define DIV_PLL_SHARED0_DIV3 0x185c +#define DIV_PLL_SHARED0_DIV4 0x1860 +#define DIV_PLL_SHARED0_DIV6 0x1864 +#define DIV_PLL_SHARED1_DIV3 0x1868 +#define DIV_PLL_SHARED1_DIV36 0x186c +#define DIV_PLL_SHARED1_DIV4 0x1870 +#define DIV_PLL_SHARED1_DIV9 0x1874 +#define GAT_CMU_CIS0_CLKGATE 0x2000 +#define GAT_CMU_CIS1_CLKGATE 0x2004 +#define GAT_CMU_CIS2_CLKGATE 0x2008 +#define GAT_CMU_CPUCL_SWITCH_GATE 0x200c +#define GAT_CMU_FSYS0_SHARED0DIV4_GATE 0x2018 +#define GAT_CMU_FSYS0_SHARED1DIV4_CLK 0x201c +#define GAT_CMU_FSYS0_SHARED1DIV4_GATE 0x2020 +#define GAT_CMU_FSYS1_SHARED0DIV4_GATE 0x2024 +#define GAT_CMU_FSYS1_SHARED1DIV4_GATE 0x2028 +#define GAT_CMU_IMEM_ACLK_GATE 0x2030 +#define GAT_CMU_IMEM_DMACLK_GATE 0x2034 +#define GAT_CMU_IMEM_TCUCLK_GATE 0x2038 +#define GAT_CMU_PERIC_SHARED0DIVE3_TBUCLK_GATE 0x2040 +#define GAT_CMU_PERIC_SHARED0DIVE4_GATE 0x2044 +#define GAT_CMU_PERIC_SHARED1DIV4_DMACLK_GATE 0x2048 +#define GAT_CMU_PERIC_SHARED1DIVE4_GATE 0x204c +#define GAT_CMU_CMU_CMU_IPCLKPORT_PCLK 0x2054 +#define GAT_CMU_AXI2APB_CMU_IPCLKPORT_ACLK 0x2058 +#define GAT_CMU_NS_BRDG_CMU_IPCLKPORT_CLK__PSOC_CMU__CLK_CMU 0x205c +#define GAT_CMU_SYSREG_CMU_IPCLKPORT_PCLK 0x2060 + +static const unsigned long cmu_clk_regs[] __initconst = { + PLL_LOCKTIME_PLL_SHARED0, + PLL_LOCKTIME_PLL_SHARED1, + PLL_LOCKTIME_PLL_SHARED2, + PLL_LOCKTIME_PLL_SHARED3, + PLL_CON0_PLL_SHARED0, + PLL_CON0_PLL_SHARED1, + PLL_CON0_PLL_SHARED2, + PLL_CON0_PLL_SHARED3, + MUX_CMU_CIS0_CLKMUX, + MUX_CMU_CIS1_CLKMUX, + MUX_CMU_CIS2_CLKMUX, + MUX_CMU_CPUCL_SWITCHMUX, + MUX_CMU_FSYS1_ACLK_MUX, + MUX_PLL_SHARED0_MUX, + MUX_PLL_SHARED1_MUX, + DIV_CMU_CIS0_CLK, + DIV_CMU_CIS1_CLK, + DIV_CMU_CIS2_CLK, + DIV_CMU_CMU_ACLK, + DIV_CMU_CPUCL_SWITCH, + DIV_CMU_FSYS0_SHARED0DIV4, + DIV_CMU_FSYS0_SHARED1DIV3, + DIV_CMU_FSYS0_SHARED1DIV4, + DIV_CMU_FSYS1_SHARED0DIV4, + DIV_CMU_FSYS1_SHARED0DIV8, + DIV_CMU_IMEM_ACLK, + DIV_CMU_IMEM_DMACLK, + DIV_CMU_IMEM_TCUCLK, + DIV_CMU_PERIC_SHARED0DIV20, + DIV_CMU_PERIC_SHARED0DIV3_TBUCLK, + DIV_CMU_PERIC_SHARED1DIV36, + DIV_CMU_PERIC_SHARED1DIV4_DMACLK, + DIV_PLL_SHARED0_DIV2, + DIV_PLL_SHARED0_DIV3, + DIV_PLL_SHARED0_DIV4, + DIV_PLL_SHARED0_DIV6, + DIV_PLL_SHARED1_DIV3, + DIV_PLL_SHARED1_DIV36, + DIV_PLL_SHARED1_DIV4, + DIV_PLL_SHARED1_DIV9, + GAT_CMU_CIS0_CLKGATE, + GAT_CMU_CIS1_CLKGATE, + GAT_CMU_CIS2_CLKGATE, + GAT_CMU_CPUCL_SWITCH_GATE, + GAT_CMU_FSYS0_SHARED0DIV4_GATE, + GAT_CMU_FSYS0_SHARED1DIV4_CLK, + GAT_CMU_FSYS0_SHARED1DIV4_GATE, + GAT_CMU_FSYS1_SHARED0DIV4_GATE, + GAT_CMU_FSYS1_SHARED1DIV4_GATE, + GAT_CMU_IMEM_ACLK_GATE, + GAT_CMU_IMEM_DMACLK_GATE, + GAT_CMU_IMEM_TCUCLK_GATE, + GAT_CMU_PERIC_SHARED0DIVE3_TBUCLK_GATE, + GAT_CMU_PERIC_SHARED0DIVE4_GATE, + GAT_CMU_PERIC_SHARED1DIV4_DMACLK_GATE, + GAT_CMU_PERIC_SHARED1DIVE4_GATE, + GAT_CMU_CMU_CMU_IPCLKPORT_PCLK, + GAT_CMU_AXI2APB_CMU_IPCLKPORT_ACLK, + GAT_CMU_NS_BRDG_CMU_IPCLKPORT_CLK__PSOC_CMU__CLK_CMU, + GAT_CMU_SYSREG_CMU_IPCLKPORT_PCLK, +}; + +static const struct samsung_pll_rate_table pll_shared0_rate_table[] __initconst = { + PLL_35XX_RATE(24 * MHZ, 2000000000U, 250, 3, 0), +}; + +static const struct samsung_pll_rate_table pll_shared1_rate_table[] __initconst = { + PLL_35XX_RATE(24 * MHZ, 2400000000U, 200, 2, 0), +}; + +static const struct samsung_pll_rate_table pll_shared2_rate_table[] __initconst = { + PLL_35XX_RATE(24 * MHZ, 2400000000U, 200, 2, 0), +}; + +static const struct samsung_pll_rate_table pll_shared3_rate_table[] __initconst = { + PLL_35XX_RATE(24 * MHZ, 1800000000U, 150, 2, 0), +}; + +static const struct samsung_pll_clock cmu_pll_clks[] __initconst = { + PLL(pll_142xx, 0, "fout_pll_shared0", "fin_pll", PLL_LOCKTIME_PLL_SHARED0, + PLL_CON0_PLL_SHARED0, pll_shared0_rate_table), + PLL(pll_142xx, 0, "fout_pll_shared1", "fin_pll", PLL_LOCKTIME_PLL_SHARED1, + PLL_CON0_PLL_SHARED1, pll_shared1_rate_table), + PLL(pll_142xx, 0, "fout_pll_shared2", "fin_pll", PLL_LOCKTIME_PLL_SHARED2, + PLL_CON0_PLL_SHARED2, pll_shared2_rate_table), + PLL(pll_142xx, 0, "fout_pll_shared3", "fin_pll", PLL_LOCKTIME_PLL_SHARED3, + PLL_CON0_PLL_SHARED3, pll_shared3_rate_table), +}; + +/* List of parent clocks for Muxes in CMU_CMU */ +PNAME(mout_cmu_shared0_pll_p) = { "fin_pll", "fout_pll_shared0" }; +PNAME(mout_cmu_shared1_pll_p) = { "fin_pll", "fout_pll_shared1" }; +PNAME(mout_cmu_shared2_pll_p) = { "fin_pll", "fout_pll_shared2" }; +PNAME(mout_cmu_shared3_pll_p) = { "fin_pll", "fout_pll_shared3" }; +PNAME(mout_cmu_cis0_clkmux_p) = { "fin_pll", "dout_cmu_pll_shared0_div4" }; +PNAME(mout_cmu_cis1_clkmux_p) = { "fin_pll", "dout_cmu_pll_shared0_div4" }; +PNAME(mout_cmu_cis2_clkmux_p) = { "fin_pll", "dout_cmu_pll_shared0_div4" }; +PNAME(mout_cmu_cpucl_switchmux_p) = { "mout_cmu_pll_shared2", "mout_cmu_pll_shared0_mux" }; +PNAME(mout_cmu_fsys1_aclk_mux_p) = { "dout_cmu_pll_shared0_div4", "fin_pll" }; +PNAME(mout_cmu_pll_shared0_mux_p) = { "fin_pll", "mout_cmu_pll_shared0" }; +PNAME(mout_cmu_pll_shared1_mux_p) = { "fin_pll", "mout_cmu_pll_shared1" }; + +static const struct samsung_mux_clock cmu_mux_clks[] __initconst = { + MUX(0, "mout_cmu_pll_shared0", mout_cmu_shared0_pll_p, PLL_CON0_PLL_SHARED0, 4, 1), + MUX(0, "mout_cmu_pll_shared1", mout_cmu_shared1_pll_p, PLL_CON0_PLL_SHARED1, 4, 1), + MUX(0, "mout_cmu_pll_shared2", mout_cmu_shared2_pll_p, PLL_CON0_PLL_SHARED2, 4, 1), + MUX(0, "mout_cmu_pll_shared3", mout_cmu_shared3_pll_p, PLL_CON0_PLL_SHARED3, 4, 1), + MUX(0, "mout_cmu_cis0_clkmux", mout_cmu_cis0_clkmux_p, MUX_CMU_CIS0_CLKMUX, 0, 1), + MUX(0, "mout_cmu_cis1_clkmux", mout_cmu_cis1_clkmux_p, MUX_CMU_CIS1_CLKMUX, 0, 1), + MUX(0, "mout_cmu_cis2_clkmux", mout_cmu_cis2_clkmux_p, MUX_CMU_CIS2_CLKMUX, 0, 1), + MUX(0, "mout_cmu_cpucl_switchmux", mout_cmu_cpucl_switchmux_p, + MUX_CMU_CPUCL_SWITCHMUX, 0, 1), + MUX(0, "mout_cmu_fsys1_aclk_mux", mout_cmu_fsys1_aclk_mux_p, MUX_CMU_FSYS1_ACLK_MUX, 0, 1), + MUX(0, "mout_cmu_pll_shared0_mux", mout_cmu_pll_shared0_mux_p, MUX_PLL_SHARED0_MUX, 0, 1), + MUX(0, "mout_cmu_pll_shared1_mux", mout_cmu_pll_shared1_mux_p, MUX_PLL_SHARED1_MUX, 0, 1), +}; + +static const struct samsung_div_clock cmu_div_clks[] __initconst = { + DIV(0, "dout_cmu_cis0_clk", "cmu_cis0_clkgate", DIV_CMU_CIS0_CLK, 0, 4), + DIV(0, "dout_cmu_cis1_clk", "cmu_cis1_clkgate", DIV_CMU_CIS1_CLK, 0, 4), + DIV(0, "dout_cmu_cis2_clk", "cmu_cis2_clkgate", DIV_CMU_CIS2_CLK, 0, 4), + DIV(0, "dout_cmu_cmu_aclk", "dout_cmu_pll_shared1_div9", DIV_CMU_CMU_ACLK, 0, 4), + DIV(0, "dout_cmu_cpucl_switch", "cmu_cpucl_switch_gate", DIV_CMU_CPUCL_SWITCH, 0, 4), + DIV(DOUT_CMU_FSYS0_SHARED0DIV4, "dout_cmu_fsys0_shared0div4", "cmu_fsys0_shared0div4_gate", + DIV_CMU_FSYS0_SHARED0DIV4, 0, 4), + DIV(0, "dout_cmu_fsys0_shared1div3", "cmu_fsys0_shared1div4_clk", + DIV_CMU_FSYS0_SHARED1DIV3, 0, 4), + DIV(DOUT_CMU_FSYS0_SHARED1DIV4, "dout_cmu_fsys0_shared1div4", "cmu_fsys0_shared1div4_gate", + DIV_CMU_FSYS0_SHARED1DIV4, 0, 4), + DIV(DOUT_CMU_FSYS1_SHARED0DIV4, "dout_cmu_fsys1_shared0div4", "cmu_fsys1_shared0div4_gate", + DIV_CMU_FSYS1_SHARED0DIV4, 0, 4), + DIV(DOUT_CMU_FSYS1_SHARED0DIV8, "dout_cmu_fsys1_shared0div8", "cmu_fsys1_shared1div4_gate", + DIV_CMU_FSYS1_SHARED0DIV8, 0, 4), + DIV(DOUT_CMU_IMEM_ACLK, "dout_cmu_imem_aclk", "cmu_imem_aclk_gate", + DIV_CMU_IMEM_ACLK, 0, 4), + DIV(DOUT_CMU_IMEM_DMACLK, "dout_cmu_imem_dmaclk", "cmu_imem_dmaclk_gate", + DIV_CMU_IMEM_DMACLK, 0, 4), + DIV(DOUT_CMU_IMEM_TCUCLK, "dout_cmu_imem_tcuclk", "cmu_imem_tcuclk_gate", + DIV_CMU_IMEM_TCUCLK, 0, 4), + DIV(DOUT_CMU_PERIC_SHARED0DIV20, "dout_cmu_peric_shared0div20", + "cmu_peric_shared0dive4_gate", DIV_CMU_PERIC_SHARED0DIV20, 0, 4), + DIV(DOUT_CMU_PERIC_SHARED0DIV3_TBUCLK, "dout_cmu_peric_shared0div3_tbuclk", + "cmu_peric_shared0dive3_tbuclk_gate", DIV_CMU_PERIC_SHARED0DIV3_TBUCLK, 0, 4), + DIV(DOUT_CMU_PERIC_SHARED1DIV36, "dout_cmu_peric_shared1div36", + "cmu_peric_shared1dive4_gate", DIV_CMU_PERIC_SHARED1DIV36, 0, 4), + DIV(DOUT_CMU_PERIC_SHARED1DIV4_DMACLK, "dout_cmu_peric_shared1div4_dmaclk", + "cmu_peric_shared1div4_dmaclk_gate", DIV_CMU_PERIC_SHARED1DIV4_DMACLK, 0, 4), + DIV(0, "dout_cmu_pll_shared0_div2", "mout_cmu_pll_shared0_mux", + DIV_PLL_SHARED0_DIV2, 0, 4), + DIV(0, "dout_cmu_pll_shared0_div3", "mout_cmu_pll_shared0_mux", + DIV_PLL_SHARED0_DIV3, 0, 4), + DIV(DOUT_CMU_PLL_SHARED0_DIV4, "dout_cmu_pll_shared0_div4", "dout_cmu_pll_shared0_div2", + DIV_PLL_SHARED0_DIV4, 0, 4), + DIV(DOUT_CMU_PLL_SHARED0_DIV6, "dout_cmu_pll_shared0_div6", "dout_cmu_pll_shared0_div3", + DIV_PLL_SHARED0_DIV6, 0, 4), + DIV(0, "dout_cmu_pll_shared1_div3", "mout_cmu_pll_shared1_mux", + DIV_PLL_SHARED1_DIV3, 0, 4), + DIV(0, "dout_cmu_pll_shared1_div36", "dout_cmu_pll_shared1_div9", + DIV_PLL_SHARED1_DIV36, 0, 4), + DIV(0, "dout_cmu_pll_shared1_div4", "mout_cmu_pll_shared1_mux", + DIV_PLL_SHARED1_DIV4, 0, 4), + DIV(0, "dout_cmu_pll_shared1_div9", "dout_cmu_pll_shared1_div3", + DIV_PLL_SHARED1_DIV9, 0, 4), +}; + +static const struct samsung_gate_clock cmu_gate_clks[] __initconst = { + GATE(0, "cmu_cis0_clkgate", "mout_cmu_cis0_clkmux", GAT_CMU_CIS0_CLKGATE, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, "cmu_cis1_clkgate", "mout_cmu_cis1_clkmux", GAT_CMU_CIS1_CLKGATE, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, "cmu_cis2_clkgate", "mout_cmu_cis2_clkmux", GAT_CMU_CIS2_CLKGATE, 21, + CLK_IGNORE_UNUSED, 0), + GATE(CMU_CPUCL_SWITCH_GATE, "cmu_cpucl_switch_gate", "mout_cmu_cpucl_switchmux", + GAT_CMU_CPUCL_SWITCH_GATE, 21, CLK_IGNORE_UNUSED, 0), + GATE(GAT_CMU_FSYS0_SHARED0DIV4, "cmu_fsys0_shared0div4_gate", "dout_cmu_pll_shared0_div4", + GAT_CMU_FSYS0_SHARED0DIV4_GATE, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cmu_fsys0_shared1div4_clk", "dout_cmu_pll_shared1_div3", + GAT_CMU_FSYS0_SHARED1DIV4_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cmu_fsys0_shared1div4_gate", "dout_cmu_pll_shared1_div4", + GAT_CMU_FSYS0_SHARED1DIV4_GATE, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cmu_fsys1_shared0div4_gate", "mout_cmu_fsys1_aclk_mux", + GAT_CMU_FSYS1_SHARED0DIV4_GATE, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cmu_fsys1_shared1div4_gate", "dout_cmu_fsys1_shared0div4", + GAT_CMU_FSYS1_SHARED1DIV4_GATE, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cmu_imem_aclk_gate", "dout_cmu_pll_shared1_div9", GAT_CMU_IMEM_ACLK_GATE, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, "cmu_imem_dmaclk_gate", "mout_cmu_pll_shared1_mux", GAT_CMU_IMEM_DMACLK_GATE, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, "cmu_imem_tcuclk_gate", "dout_cmu_pll_shared0_div3", GAT_CMU_IMEM_TCUCLK_GATE, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, "cmu_peric_shared0dive3_tbuclk_gate", "dout_cmu_pll_shared0_div3", + GAT_CMU_PERIC_SHARED0DIVE3_TBUCLK_GATE, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cmu_peric_shared0dive4_gate", "dout_cmu_pll_shared0_div4", + GAT_CMU_PERIC_SHARED0DIVE4_GATE, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cmu_peric_shared1div4_dmaclk_gate", "dout_cmu_pll_shared1_div4", + GAT_CMU_PERIC_SHARED1DIV4_DMACLK_GATE, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cmu_peric_shared1dive4_gate", "dout_cmu_pll_shared1_div36", + GAT_CMU_PERIC_SHARED1DIVE4_GATE, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cmu_uid_cmu_cmu_cmu_ipclkport_pclk", "dout_cmu_cmu_aclk", + GAT_CMU_CMU_CMU_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cmu_uid_axi2apb_cmu_ipclkport_aclk", "dout_cmu_cmu_aclk", + GAT_CMU_AXI2APB_CMU_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cmu_uid_ns_brdg_cmu_ipclkport_clk__psoc_cmu__clk_cmu", "dout_cmu_cmu_aclk", + GAT_CMU_NS_BRDG_CMU_IPCLKPORT_CLK__PSOC_CMU__CLK_CMU, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cmu_uid_sysreg_cmu_ipclkport_pclk", "dout_cmu_cmu_aclk", + GAT_CMU_SYSREG_CMU_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), +}; + +static const struct samsung_cmu_info cmu_cmu_info __initconst = { + .pll_clks = cmu_pll_clks, + .nr_pll_clks = ARRAY_SIZE(cmu_pll_clks), + .mux_clks = cmu_mux_clks, + .nr_mux_clks = ARRAY_SIZE(cmu_mux_clks), + .div_clks = cmu_div_clks, + .nr_div_clks = ARRAY_SIZE(cmu_div_clks), + .gate_clks = cmu_gate_clks, + .nr_gate_clks = ARRAY_SIZE(cmu_gate_clks), + .nr_clk_ids = CMU_NR_CLK, + .clk_regs = cmu_clk_regs, + .nr_clk_regs = ARRAY_SIZE(cmu_clk_regs), +}; + +static void __init fsd_clk_cmu_init(struct device_node *np) +{ + samsung_cmu_register_one(np, &cmu_cmu_info); +} + +CLK_OF_DECLARE(fsd_clk_cmu, "tesla,fsd-clock-cmu", fsd_clk_cmu_init); diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c index 70cdc87f714e..fe383471c5f0 100644 --- a/drivers/clk/samsung/clk-pll.c +++ b/drivers/clk/samsung/clk-pll.c @@ -1469,6 +1469,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx, case pll_1450x: case pll_1451x: case pll_1452x: + case pll_142xx: pll->enable_offs = PLL35XX_ENABLE_SHIFT; pll->lock_offs = PLL35XX_LOCK_STAT_SHIFT; if (!pll->rate_table) diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h index c83a20195f6d..a9892c2d1f57 100644 --- a/drivers/clk/samsung/clk-pll.h +++ b/drivers/clk/samsung/clk-pll.h @@ -39,6 +39,7 @@ enum samsung_pll_type { pll_1460x, pll_0822x, pll_0831x, + pll_142xx, }; #define PLL_RATE(_fin, _m, _p, _s, _k, _ks) \ -- cgit v1.2.3 From e3f3dc3810d3765128d28b241f4afb761d81678a Mon Sep 17 00:00:00 2001 From: Alim Akhtar Date: Mon, 24 Jan 2022 19:46:33 +0530 Subject: clk: samsung: fsd: Add cmu_peric block clock information Add CMU_PERIC block clock information needed for various IPs functions found in this block. Cc: linux-fsd@tesla.com Signed-off-by: Aswani Reddy Signed-off-by: Niyas Ahmed S T Signed-off-by: Chandrasekar R Signed-off-by: Jayati Sahu Signed-off-by: Sriranjani P Signed-off-by: Ajay Kumar Signed-off-by: Pankaj Dubey Signed-off-by: Alim Akhtar Reviewed-by: Krzysztof Kozlowski Acked-by: Sylwester Nawrocki Link: https://lore.kernel.org/r/20220124141644.71052-6-alim.akhtar@samsung.com Signed-off-by: Krzysztof Kozlowski --- drivers/clk/samsung/clk-fsd.c | 405 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 405 insertions(+) (limited to 'drivers') diff --git a/drivers/clk/samsung/clk-fsd.c b/drivers/clk/samsung/clk-fsd.c index ae35c4303b55..a3d328318814 100644 --- a/drivers/clk/samsung/clk-fsd.c +++ b/drivers/clk/samsung/clk-fsd.c @@ -8,14 +8,19 @@ * Common Clock Framework support for FSD SoC. */ +#include #include #include #include #include +#include +#include +#include #include #include "clk.h" +#include "clk-exynos-arm64.h" /* Register Offset definitions for CMU_CMU (0x11c10000) */ #define PLL_LOCKTIME_PLL_SHARED0 0x0 @@ -308,3 +313,403 @@ static void __init fsd_clk_cmu_init(struct device_node *np) } CLK_OF_DECLARE(fsd_clk_cmu, "tesla,fsd-clock-cmu", fsd_clk_cmu_init); + +/* Register Offset definitions for CMU_PERIC (0x14010000) */ +#define PLL_CON0_PERIC_DMACLK_MUX 0x100 +#define PLL_CON0_PERIC_EQOS_BUSCLK_MUX 0x120 +#define PLL_CON0_PERIC_PCLK_MUX 0x140 +#define PLL_CON0_PERIC_TBUCLK_MUX 0x160 +#define PLL_CON0_SPI_CLK 0x180 +#define PLL_CON0_SPI_PCLK 0x1a0 +#define PLL_CON0_UART_CLK 0x1c0 +#define PLL_CON0_UART_PCLK 0x1e0 +#define MUX_PERIC_EQOS_PHYRXCLK 0x1000 +#define DIV_EQOS_BUSCLK 0x1800 +#define DIV_PERIC_MCAN_CLK 0x1804 +#define DIV_RGMII_CLK 0x1808 +#define DIV_RII_CLK 0x180c +#define DIV_RMII_CLK 0x1810 +#define DIV_SPI_CLK 0x1814 +#define DIV_UART_CLK 0x1818 +#define GAT_EQOS_TOP_IPCLKPORT_CLK_PTP_REF_I 0x2000 +#define GAT_GPIO_PERIC_IPCLKPORT_OSCCLK 0x2004 +#define GAT_PERIC_ADC0_IPCLKPORT_I_OSCCLK 0x2008 +#define GAT_PERIC_CMU_PERIC_IPCLKPORT_PCLK 0x200c +#define GAT_PERIC_PWM0_IPCLKPORT_I_OSCCLK 0x2010 +#define GAT_PERIC_PWM1_IPCLKPORT_I_OSCCLK 0x2014 +#define GAT_ASYNC_APB_DMA0_IPCLKPORT_PCLKM 0x2018 +#define GAT_ASYNC_APB_DMA0_IPCLKPORT_PCLKS 0x201c +#define GAT_ASYNC_APB_DMA1_IPCLKPORT_PCLKM 0x2020 +#define GAT_ASYNC_APB_DMA1_IPCLKPORT_PCLKS 0x2024 +#define GAT_AXI2APB_PERIC0_IPCLKPORT_ACLK 0x2028 +#define GAT_AXI2APB_PERIC1_IPCLKPORT_ACLK 0x202c +#define GAT_AXI2APB_PERIC2_IPCLKPORT_ACLK 0x2030 +#define GAT_BUS_D_PERIC_IPCLKPORT_DMACLK 0x2034 +#define GAT_BUS_D_PERIC_IPCLKPORT_EQOSCLK 0x2038 +#define GAT_BUS_D_PERIC_IPCLKPORT_MAINCLK 0x203c +#define GAT_BUS_P_PERIC_IPCLKPORT_EQOSCLK 0x2040 +#define GAT_BUS_P_PERIC_IPCLKPORT_MAINCLK 0x2044 +#define GAT_BUS_P_PERIC_IPCLKPORT_SMMUCLK 0x2048 +#define GAT_EQOS_TOP_IPCLKPORT_ACLK_I 0x204c +#define GAT_EQOS_TOP_IPCLKPORT_CLK_RX_I 0x2050 +#define GAT_EQOS_TOP_IPCLKPORT_HCLK_I 0x2054 +#define GAT_EQOS_TOP_IPCLKPORT_RGMII_CLK_I 0x2058 +#define GAT_EQOS_TOP_IPCLKPORT_RII_CLK_I 0x205c +#define GAT_EQOS_TOP_IPCLKPORT_RMII_CLK_I 0x2060 +#define GAT_GPIO_PERIC_IPCLKPORT_PCLK 0x2064 +#define GAT_NS_BRDG_PERIC_IPCLKPORT_CLK__PSOC_PERIC__CLK_PERIC_D 0x2068 +#define GAT_NS_BRDG_PERIC_IPCLKPORT_CLK__PSOC_PERIC__CLK_PERIC_P 0x206c +#define GAT_PERIC_ADC0_IPCLKPORT_PCLK_S0 0x2070 +#define GAT_PERIC_DMA0_IPCLKPORT_ACLK 0x2074 +#define GAT_PERIC_DMA1_IPCLKPORT_ACLK 0x2078 +#define GAT_PERIC_I2C0_IPCLKPORT_I_PCLK 0x207c +#define GAT_PERIC_I2C1_IPCLKPORT_I_PCLK 0x2080 +#define GAT_PERIC_I2C2_IPCLKPORT_I_PCLK 0x2084 +#define GAT_PERIC_I2C3_IPCLKPORT_I_PCLK 0x2088 +#define GAT_PERIC_I2C4_IPCLKPORT_I_PCLK 0x208c +#define GAT_PERIC_I2C5_IPCLKPORT_I_PCLK 0x2090 +#define GAT_PERIC_I2C6_IPCLKPORT_I_PCLK 0x2094 +#define GAT_PERIC_I2C7_IPCLKPORT_I_PCLK 0x2098 +#define GAT_PERIC_MCAN0_IPCLKPORT_CCLK 0x209c +#define GAT_PERIC_MCAN0_IPCLKPORT_PCLK 0x20a0 +#define GAT_PERIC_MCAN1_IPCLKPORT_CCLK 0x20a4 +#define GAT_PERIC_MCAN1_IPCLKPORT_PCLK 0x20a8 +#define GAT_PERIC_MCAN2_IPCLKPORT_CCLK 0x20ac +#define GAT_PERIC_MCAN2_IPCLKPORT_PCLK 0x20b0 +#define GAT_PERIC_MCAN3_IPCLKPORT_CCLK 0x20b4 +#define GAT_PERIC_MCAN3_IPCLKPORT_PCLK 0x20b8 +#define GAT_PERIC_PWM0_IPCLKPORT_I_PCLK_S0 0x20bc +#define GAT_PERIC_PWM1_IPCLKPORT_I_PCLK_S0 0x20c0 +#define GAT_PERIC_SMMU_IPCLKPORT_CCLK 0x20c4 +#define GAT_PERIC_SMMU_IPCLKPORT_PERIC_BCLK 0x20c8 +#define GAT_PERIC_SPI0_IPCLKPORT_I_PCLK 0x20cc +#define GAT_PERIC_SPI0_IPCLKPORT_I_SCLK_SPI 0x20d0 +#define GAT_PERIC_SPI1_IPCLKPORT_I_PCLK 0x20d4 +#define GAT_PERIC_SPI1_IPCLKPORT_I_SCLK_SPI 0x20d8 +#define GAT_PERIC_SPI2_IPCLKPORT_I_PCLK 0x20dc +#define GAT_PERIC_SPI2_IPCLKPORT_I_SCLK_SPI 0x20e0 +#define GAT_PERIC_TDM0_IPCLKPORT_HCLK_M 0x20e4 +#define GAT_PERIC_TDM0_IPCLKPORT_PCLK 0x20e8 +#define GAT_PERIC_TDM1_IPCLKPORT_HCLK_M 0x20ec +#define GAT_PERIC_TDM1_IPCLKPORT_PCLK 0x20f0 +#define GAT_PERIC_UART0_IPCLKPORT_I_SCLK_UART 0x20f4 +#define GAT_PERIC_UART0_IPCLKPORT_PCLK 0x20f8 +#define GAT_PERIC_UART1_IPCLKPORT_I_SCLK_UART 0x20fc +#define GAT_PERIC_UART1_IPCLKPORT_PCLK 0x2100 +#define GAT_SYSREG_PERI_IPCLKPORT_PCLK 0x2104 + +static const unsigned long peric_clk_regs[] __initconst = { + PLL_CON0_PERIC_DMACLK_MUX, + PLL_CON0_PERIC_EQOS_BUSCLK_MUX, + PLL_CON0_PERIC_PCLK_MUX, + PLL_CON0_PERIC_TBUCLK_MUX, + PLL_CON0_SPI_CLK, + PLL_CON0_SPI_PCLK, + PLL_CON0_UART_CLK, + PLL_CON0_UART_PCLK, + MUX_PERIC_EQOS_PHYRXCLK, + DIV_EQOS_BUSCLK, + DIV_PERIC_MCAN_CLK, + DIV_RGMII_CLK, + DIV_RII_CLK, + DIV_RMII_CLK, + DIV_SPI_CLK, + DIV_UART_CLK, + GAT_EQOS_TOP_IPCLKPORT_CLK_PTP_REF_I, + GAT_GPIO_PERIC_IPCLKPORT_OSCCLK, + GAT_PERIC_ADC0_IPCLKPORT_I_OSCCLK, + GAT_PERIC_CMU_PERIC_IPCLKPORT_PCLK, + GAT_PERIC_PWM0_IPCLKPORT_I_OSCCLK, + GAT_PERIC_PWM1_IPCLKPORT_I_OSCCLK, + GAT_ASYNC_APB_DMA0_IPCLKPORT_PCLKM, + GAT_ASYNC_APB_DMA0_IPCLKPORT_PCLKS, + GAT_ASYNC_APB_DMA1_IPCLKPORT_PCLKM, + GAT_ASYNC_APB_DMA1_IPCLKPORT_PCLKS, + GAT_AXI2APB_PERIC0_IPCLKPORT_ACLK, + GAT_AXI2APB_PERIC1_IPCLKPORT_ACLK, + GAT_AXI2APB_PERIC2_IPCLKPORT_ACLK, + GAT_BUS_D_PERIC_IPCLKPORT_DMACLK, + GAT_BUS_D_PERIC_IPCLKPORT_EQOSCLK, + GAT_BUS_D_PERIC_IPCLKPORT_MAINCLK, + GAT_BUS_P_PERIC_IPCLKPORT_EQOSCLK, + GAT_BUS_P_PERIC_IPCLKPORT_MAINCLK, + GAT_BUS_P_PERIC_IPCLKPORT_SMMUCLK, + GAT_EQOS_TOP_IPCLKPORT_ACLK_I, + GAT_EQOS_TOP_IPCLKPORT_CLK_RX_I, + GAT_EQOS_TOP_IPCLKPORT_HCLK_I, + GAT_EQOS_TOP_IPCLKPORT_RGMII_CLK_I, + GAT_EQOS_TOP_IPCLKPORT_RII_CLK_I, + GAT_EQOS_TOP_IPCLKPORT_RMII_CLK_I, + GAT_GPIO_PERIC_IPCLKPORT_PCLK, + GAT_NS_BRDG_PERIC_IPCLKPORT_CLK__PSOC_PERIC__CLK_PERIC_D, + GAT_NS_BRDG_PERIC_IPCLKPORT_CLK__PSOC_PERIC__CLK_PERIC_P, + GAT_PERIC_ADC0_IPCLKPORT_PCLK_S0, + GAT_PERIC_DMA0_IPCLKPORT_ACLK, + GAT_PERIC_DMA1_IPCLKPORT_ACLK, + GAT_PERIC_I2C0_IPCLKPORT_I_PCLK, + GAT_PERIC_I2C1_IPCLKPORT_I_PCLK, + GAT_PERIC_I2C2_IPCLKPORT_I_PCLK, + GAT_PERIC_I2C3_IPCLKPORT_I_PCLK, + GAT_PERIC_I2C4_IPCLKPORT_I_PCLK, + GAT_PERIC_I2C5_IPCLKPORT_I_PCLK, + GAT_PERIC_I2C6_IPCLKPORT_I_PCLK, + GAT_PERIC_I2C7_IPCLKPORT_I_PCLK, + GAT_PERIC_MCAN0_IPCLKPORT_CCLK, + GAT_PERIC_MCAN0_IPCLKPORT_PCLK, + GAT_PERIC_MCAN1_IPCLKPORT_CCLK, + GAT_PERIC_MCAN1_IPCLKPORT_PCLK, + GAT_PERIC_MCAN2_IPCLKPORT_CCLK, + GAT_PERIC_MCAN2_IPCLKPORT_PCLK, + GAT_PERIC_MCAN3_IPCLKPORT_CCLK, + GAT_PERIC_MCAN3_IPCLKPORT_PCLK, + GAT_PERIC_PWM0_IPCLKPORT_I_PCLK_S0, + GAT_PERIC_PWM1_IPCLKPORT_I_PCLK_S0, + GAT_PERIC_SMMU_IPCLKPORT_CCLK, + GAT_PERIC_SMMU_IPCLKPORT_PERIC_BCLK, + GAT_PERIC_SPI0_IPCLKPORT_I_PCLK, + GAT_PERIC_SPI0_IPCLKPORT_I_SCLK_SPI, + GAT_PERIC_SPI1_IPCLKPORT_I_PCLK, + GAT_PERIC_SPI1_IPCLKPORT_I_SCLK_SPI, + GAT_PERIC_SPI2_IPCLKPORT_I_PCLK, + GAT_PERIC_SPI2_IPCLKPORT_I_SCLK_SPI, + GAT_PERIC_TDM0_IPCLKPORT_HCLK_M, + GAT_PERIC_TDM0_IPCLKPORT_PCLK, + GAT_PERIC_TDM1_IPCLKPORT_HCLK_M, + GAT_PERIC_TDM1_IPCLKPORT_PCLK, + GAT_PERIC_UART0_IPCLKPORT_I_SCLK_UART, + GAT_PERIC_UART0_IPCLKPORT_PCLK, + GAT_PERIC_UART1_IPCLKPORT_I_SCLK_UART, + GAT_PERIC_UART1_IPCLKPORT_PCLK, + GAT_SYSREG_PERI_IPCLKPORT_PCLK, +}; + +static const struct samsung_fixed_rate_clock peric_fixed_clks[] __initconst = { + FRATE(PERIC_EQOS_PHYRXCLK, "eqos_phyrxclk", NULL, 0, 125000000), +}; + +/* List of parent clocks for Muxes in CMU_PERIC */ +PNAME(mout_peric_dmaclk_p) = { "fin_pll", "cmu_peric_shared1div4_dmaclk_gate" }; +PNAME(mout_peric_eqos_busclk_p) = { "fin_pll", "dout_cmu_pll_shared0_div4" }; +PNAME(mout_peric_pclk_p) = { "fin_pll", "dout_cmu_peric_shared1div36" }; +PNAME(mout_peric_tbuclk_p) = { "fin_pll", "dout_cmu_peric_shared0div3_tbuclk" }; +PNAME(mout_peric_spi_clk_p) = { "fin_pll", "dout_cmu_peric_shared0div20" }; +PNAME(mout_peric_spi_pclk_p) = { "fin_pll", "dout_cmu_peric_shared1div36" }; +PNAME(mout_peric_uart_clk_p) = { "fin_pll", "dout_cmu_peric_shared1div4_dmaclk" }; +PNAME(mout_peric_uart_pclk_p) = { "fin_pll", "dout_cmu_peric_shared1div36" }; +PNAME(mout_peric_eqos_phyrxclk_p) = { "dout_peric_rgmii_clk", "eqos_phyrxclk" }; + +static const struct samsung_mux_clock peric_mux_clks[] __initconst = { + MUX(0, "mout_peric_dmaclk", mout_peric_dmaclk_p, PLL_CON0_PERIC_DMACLK_MUX, 4, 1), + MUX(0, "mout_peric_eqos_busclk", mout_peric_eqos_busclk_p, + PLL_CON0_PERIC_EQOS_BUSCLK_MUX, 4, 1), + MUX(0, "mout_peric_pclk", mout_peric_pclk_p, PLL_CON0_PERIC_PCLK_MUX, 4, 1), + MUX(0, "mout_peric_tbuclk", mout_peric_tbuclk_p, PLL_CON0_PERIC_TBUCLK_MUX, 4, 1), + MUX(0, "mout_peric_spi_clk", mout_peric_spi_clk_p, PLL_CON0_SPI_CLK, 4, 1), + MUX(0, "mout_peric_spi_pclk", mout_peric_spi_pclk_p, PLL_CON0_SPI_PCLK, 4, 1), + MUX(0, "mout_peric_uart_clk", mout_peric_uart_clk_p, PLL_CON0_UART_CLK, 4, 1), + MUX(0, "mout_peric_uart_pclk", mout_peric_uart_pclk_p, PLL_CON0_UART_PCLK, 4, 1), + MUX(PERIC_EQOS_PHYRXCLK_MUX, "mout_peric_eqos_phyrxclk", mout_peric_eqos_phyrxclk_p, + MUX_PERIC_EQOS_PHYRXCLK, 0, 1), +}; + +static const struct samsung_div_clock peric_div_clks[] __initconst = { + DIV(0, "dout_peric_eqos_busclk", "mout_peric_eqos_busclk", DIV_EQOS_BUSCLK, 0, 4), + DIV(0, "dout_peric_mcan_clk", "mout_peric_dmaclk", DIV_PERIC_MCAN_CLK, 0, 4), + DIV(PERIC_DOUT_RGMII_CLK, "dout_peric_rgmii_clk", "mout_peric_eqos_busclk", + DIV_RGMII_CLK, 0, 4), + DIV(0, "dout_peric_rii_clk", "dout_peric_rmii_clk", DIV_RII_CLK, 0, 4), + DIV(0, "dout_peric_rmii_clk", "dout_peric_rgmii_clk", DIV_RMII_CLK, 0, 4), + DIV(0, "dout_peric_spi_clk", "mout_peric_spi_clk", DIV_SPI_CLK, 0, 6), + DIV(0, "dout_peric_uart_clk", "mout_peric_uart_clk", DIV_UART_CLK, 0, 6), +}; + +static const struct samsung_gate_clock peric_gate_clks[] __initconst = { + GATE(PERIC_EQOS_TOP_IPCLKPORT_CLK_PTP_REF_I, "peric_eqos_top_ipclkport_clk_ptp_ref_i", + "fin_pll", GAT_EQOS_TOP_IPCLKPORT_CLK_PTP_REF_I, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "peric_gpio_peric_ipclkport_oscclk", "fin_pll", GAT_GPIO_PERIC_IPCLKPORT_OSCCLK, + 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_PCLK_ADCIF, "peric_adc0_ipclkport_i_oscclk", "fin_pll", + GAT_PERIC_ADC0_IPCLKPORT_I_OSCCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "peric_cmu_peric_ipclkport_pclk", "mout_peric_pclk", + GAT_PERIC_CMU_PERIC_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "peric_pwm0_ipclkport_i_oscclk", "fin_pll", GAT_PERIC_PWM0_IPCLKPORT_I_OSCCLK, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, "peric_pwm1_ipclkport_i_oscclk", "fin_pll", GAT_PERIC_PWM1_IPCLKPORT_I_OSCCLK, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, "peric_async_apb_dma0_ipclkport_pclkm", "mout_peric_dmaclk", + GAT_ASYNC_APB_DMA0_IPCLKPORT_PCLKM, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "peric_async_apb_dma0_ipclkport_pclks", "mout_peric_pclk", + GAT_ASYNC_APB_DMA0_IPCLKPORT_PCLKS, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "peric_async_apb_dma1_ipclkport_pclkm", "mout_peric_dmaclk", + GAT_ASYNC_APB_DMA1_IPCLKPORT_PCLKM, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "peric_async_apb_dma1_ipclkport_pclks", "mout_peric_pclk", + GAT_ASYNC_APB_DMA1_IPCLKPORT_PCLKS, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "peric_axi2apb_peric0_ipclkport_aclk", "mout_peric_pclk", + GAT_AXI2APB_PERIC0_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "peric_axi2apb_peric1_ipclkport_aclk", "mout_peric_pclk", + GAT_AXI2APB_PERIC1_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "peric_axi2apb_peric2_ipclkport_aclk", "mout_peric_pclk", + GAT_AXI2APB_PERIC2_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "peric_bus_d_peric_ipclkport_dmaclk", "mout_peric_dmaclk", + GAT_BUS_D_PERIC_IPCLKPORT_DMACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_BUS_D_PERIC_IPCLKPORT_EQOSCLK, "peric_bus_d_peric_ipclkport_eqosclk", + "dout_peric_eqos_busclk", GAT_BUS_D_PERIC_IPCLKPORT_EQOSCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "peric_bus_d_peric_ipclkport_mainclk", "mout_peric_tbuclk", + GAT_BUS_D_PERIC_IPCLKPORT_MAINCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_BUS_P_PERIC_IPCLKPORT_EQOSCLK, "peric_bus_p_peric_ipclkport_eqosclk", + "dout_peric_eqos_busclk", GAT_BUS_P_PERIC_IPCLKPORT_EQOSCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "peric_bus_p_peric_ipclkport_mainclk", "mout_peric_pclk", + GAT_BUS_P_PERIC_IPCLKPORT_MAINCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "peric_bus_p_peric_ipclkport_smmuclk", "mout_peric_tbuclk", + GAT_BUS_P_PERIC_IPCLKPORT_SMMUCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_EQOS_TOP_IPCLKPORT_ACLK_I, "peric_eqos_top_ipclkport_aclk_i", + "dout_peric_eqos_busclk", GAT_EQOS_TOP_IPCLKPORT_ACLK_I, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_EQOS_TOP_IPCLKPORT_CLK_RX_I, "peric_eqos_top_ipclkport_clk_rx_i", + "mout_peric_eqos_phyrxclk", GAT_EQOS_TOP_IPCLKPORT_CLK_RX_I, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_EQOS_TOP_IPCLKPORT_HCLK_I, "peric_eqos_top_ipclkport_hclk_i", + "dout_peric_eqos_busclk", GAT_EQOS_TOP_IPCLKPORT_HCLK_I, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_EQOS_TOP_IPCLKPORT_RGMII_CLK_I, "peric_eqos_top_ipclkport_rgmii_clk_i", + "dout_peric_rgmii_clk", GAT_EQOS_TOP_IPCLKPORT_RGMII_CLK_I, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "peric_eqos_top_ipclkport_rii_clk_i", "dout_peric_rii_clk", + GAT_EQOS_TOP_IPCLKPORT_RII_CLK_I, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "peric_eqos_top_ipclkport_rmii_clk_i", "dout_peric_rmii_clk", + GAT_EQOS_TOP_IPCLKPORT_RMII_CLK_I, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "peric_gpio_peric_ipclkport_pclk", "mout_peric_pclk", + GAT_GPIO_PERIC_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "peric_ns_brdg_peric_ipclkport_clk__psoc_peric__clk_peric_d", "mout_peric_tbuclk", + GAT_NS_BRDG_PERIC_IPCLKPORT_CLK__PSOC_PERIC__CLK_PERIC_D, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "peric_ns_brdg_peric_ipclkport_clk__psoc_peric__clk_peric_p", "mout_peric_pclk", + GAT_NS_BRDG_PERIC_IPCLKPORT_CLK__PSOC_PERIC__CLK_PERIC_P, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "peric_adc0_ipclkport_pclk_s0", "mout_peric_pclk", + GAT_PERIC_ADC0_IPCLKPORT_PCLK_S0, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_DMA0_IPCLKPORT_ACLK, "peric_dma0_ipclkport_aclk", "mout_peric_dmaclk", + GAT_PERIC_DMA0_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_DMA1_IPCLKPORT_ACLK, "peric_dma1_ipclkport_aclk", "mout_peric_dmaclk", + GAT_PERIC_DMA1_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_PCLK_HSI2C0, "peric_i2c0_ipclkport_i_pclk", "mout_peric_pclk", + GAT_PERIC_I2C0_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_PCLK_HSI2C1, "peric_i2c1_ipclkport_i_pclk", "mout_peric_pclk", + GAT_PERIC_I2C1_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_PCLK_HSI2C2, "peric_i2c2_ipclkport_i_pclk", "mout_peric_pclk", + GAT_PERIC_I2C2_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_PCLK_HSI2C3, "peric_i2c3_ipclkport_i_pclk", "mout_peric_pclk", + GAT_PERIC_I2C3_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_PCLK_HSI2C4, "peric_i2c4_ipclkport_i_pclk", "mout_peric_pclk", + GAT_PERIC_I2C4_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_PCLK_HSI2C5, "peric_i2c5_ipclkport_i_pclk", "mout_peric_pclk", + GAT_PERIC_I2C5_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_PCLK_HSI2C6, "peric_i2c6_ipclkport_i_pclk", "mout_peric_pclk", + GAT_PERIC_I2C6_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_PCLK_HSI2C7, "peric_i2c7_ipclkport_i_pclk", "mout_peric_pclk", + GAT_PERIC_I2C7_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_MCAN0_IPCLKPORT_CCLK, "peric_mcan0_ipclkport_cclk", "dout_peric_mcan_clk", + GAT_PERIC_MCAN0_IPCLKPORT_CCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_MCAN0_IPCLKPORT_PCLK, "peric_mcan0_ipclkport_pclk", "mout_peric_pclk", + GAT_PERIC_MCAN0_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_MCAN1_IPCLKPORT_CCLK, "peric_mcan1_ipclkport_cclk", "dout_peric_mcan_clk", + GAT_PERIC_MCAN1_IPCLKPORT_CCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_MCAN1_IPCLKPORT_PCLK, "peric_mcan1_ipclkport_pclk", "mout_peric_pclk", + GAT_PERIC_MCAN1_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_MCAN2_IPCLKPORT_CCLK, "peric_mcan2_ipclkport_cclk", "dout_peric_mcan_clk", + GAT_PERIC_MCAN2_IPCLKPORT_CCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_MCAN2_IPCLKPORT_PCLK, "peric_mcan2_ipclkport_pclk", "mout_peric_pclk", + GAT_PERIC_MCAN2_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_MCAN3_IPCLKPORT_CCLK, "peric_mcan3_ipclkport_cclk", "dout_peric_mcan_clk", + GAT_PERIC_MCAN3_IPCLKPORT_CCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_MCAN3_IPCLKPORT_PCLK, "peric_mcan3_ipclkport_pclk", "mout_peric_pclk", + GAT_PERIC_MCAN3_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_PWM0_IPCLKPORT_I_PCLK_S0, "peric_pwm0_ipclkport_i_pclk_s0", "mout_peric_pclk", + GAT_PERIC_PWM0_IPCLKPORT_I_PCLK_S0, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_PWM1_IPCLKPORT_I_PCLK_S0, "peric_pwm1_ipclkport_i_pclk_s0", "mout_peric_pclk", + GAT_PERIC_PWM1_IPCLKPORT_I_PCLK_S0, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "peric_smmu_ipclkport_cclk", "mout_peric_tbuclk", + GAT_PERIC_SMMU_IPCLKPORT_CCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "peric_smmu_ipclkport_peric_bclk", "mout_peric_tbuclk", + GAT_PERIC_SMMU_IPCLKPORT_PERIC_BCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_PCLK_SPI0, "peric_spi0_ipclkport_i_pclk", "mout_peric_spi_pclk", + GAT_PERIC_SPI0_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_SCLK_SPI0, "peric_spi0_ipclkport_i_sclk_spi", "dout_peric_spi_clk", + GAT_PERIC_SPI0_IPCLKPORT_I_SCLK_SPI, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_PCLK_SPI1, "peric_spi1_ipclkport_i_pclk", "mout_peric_spi_pclk", + GAT_PERIC_SPI1_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_SCLK_SPI1, "peric_spi1_ipclkport_i_sclk_spi", "dout_peric_spi_clk", + GAT_PERIC_SPI1_IPCLKPORT_I_SCLK_SPI, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_PCLK_SPI2, "peric_spi2_ipclkport_i_pclk", "mout_peric_spi_pclk", + GAT_PERIC_SPI2_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_SCLK_SPI2, "peric_spi2_ipclkport_i_sclk_spi", "dout_peric_spi_clk", + GAT_PERIC_SPI2_IPCLKPORT_I_SCLK_SPI, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_HCLK_TDM0, "peric_tdm0_ipclkport_hclk_m", "mout_peric_pclk", + GAT_PERIC_TDM0_IPCLKPORT_HCLK_M, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_PCLK_TDM0, "peric_tdm0_ipclkport_pclk", "mout_peric_pclk", + GAT_PERIC_TDM0_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_HCLK_TDM1, "peric_tdm1_ipclkport_hclk_m", "mout_peric_pclk", + GAT_PERIC_TDM1_IPCLKPORT_HCLK_M, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_PCLK_TDM1, "peric_tdm1_ipclkport_pclk", "mout_peric_pclk", + GAT_PERIC_TDM1_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_SCLK_UART0, "peric_uart0_ipclkport_i_sclk_uart", "dout_peric_uart_clk", + GAT_PERIC_UART0_IPCLKPORT_I_SCLK_UART, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_PCLK_UART0, "peric_uart0_ipclkport_pclk", "mout_peric_uart_pclk", + GAT_PERIC_UART0_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_SCLK_UART1, "peric_uart1_ipclkport_i_sclk_uart", "dout_peric_uart_clk", + GAT_PERIC_UART1_IPCLKPORT_I_SCLK_UART, 21, CLK_IGNORE_UNUSED, 0), + GATE(PERIC_PCLK_UART1, "peric_uart1_ipclkport_pclk", "mout_peric_uart_pclk", + GAT_PERIC_UART1_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "peric_sysreg_peri_ipclkport_pclk", "mout_peric_pclk", + GAT_SYSREG_PERI_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), +}; + +static const struct samsung_cmu_info peric_cmu_info __initconst = { + .mux_clks = peric_mux_clks, + .nr_mux_clks = ARRAY_SIZE(peric_mux_clks), + .div_clks = peric_div_clks, + .nr_div_clks = ARRAY_SIZE(peric_div_clks), + .gate_clks = peric_gate_clks, + .nr_gate_clks = ARRAY_SIZE(peric_gate_clks), + .fixed_clks = peric_fixed_clks, + .nr_fixed_clks = ARRAY_SIZE(peric_fixed_clks), + .nr_clk_ids = PERIC_NR_CLK, + .clk_regs = peric_clk_regs, + .nr_clk_regs = ARRAY_SIZE(peric_clk_regs), + .clk_name = "dout_cmu_pll_shared0_div4", +}; + +/** + * fsd_cmu_probe - Probe function for FSD platform clocks + * @pdev: Pointer to platform device + * + * Configure clock hierarchy for clock domains of FSD platform + */ +static int __init fsd_cmu_probe(struct platform_device *pdev) +{ + const struct samsung_cmu_info *info; + struct device *dev = &pdev->dev; + + info = of_device_get_match_data(dev); + exynos_arm64_register_cmu(dev, dev->of_node, info); + + return 0; +} + +/* CMUs which belong to Power Domains and need runtime PM to be implemented */ +static const struct of_device_id fsd_cmu_of_match[] = { + { + .compatible = "tesla,fsd-clock-peric", + .data = &peric_cmu_info, + }, { + }, +}; + +static struct platform_driver fsd_cmu_driver __refdata = { + .driver = { + .name = "fsd-cmu", + .of_match_table = fsd_cmu_of_match, + .suppress_bind_attrs = true, + }, + .probe = fsd_cmu_probe, +}; + +static int __init fsd_cmu_init(void) +{ + return platform_driver_register(&fsd_cmu_driver); +} +core_initcall(fsd_cmu_init); -- cgit v1.2.3 From a15e367b02543f96ae845baf7be4526080437305 Mon Sep 17 00:00:00 2001 From: Alim Akhtar Date: Mon, 24 Jan 2022 19:46:34 +0530 Subject: clk: samsung: fsd: Add cmu_fsys0 clock information CMU_FSYS0 block has IPs like UFS, EQOS, PCIe etc, lets add the related clock information for the same. Cc: linux-fsd@tesla.com Signed-off-by: Pankaj Dubey Signed-off-by: Shradha Todi Signed-off-by: Jayati Sahu Signed-off-by: Ajay Kumar Signed-off-by: Alim Akhtar Reviewed-by: Krzysztof Kozlowski Acked-by: Sylwester Nawrocki Link: https://lore.kernel.org/r/20220124141644.71052-7-alim.akhtar@samsung.com Signed-off-by: Krzysztof Kozlowski --- drivers/clk/samsung/clk-fsd.c | 302 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 302 insertions(+) (limited to 'drivers') diff --git a/drivers/clk/samsung/clk-fsd.c b/drivers/clk/samsung/clk-fsd.c index a3d328318814..785c493be2b6 100644 --- a/drivers/clk/samsung/clk-fsd.c +++ b/drivers/clk/samsung/clk-fsd.c @@ -673,6 +673,305 @@ static const struct samsung_cmu_info peric_cmu_info __initconst = { .clk_name = "dout_cmu_pll_shared0_div4", }; +/* Register Offset definitions for CMU_FSYS0 (0x15010000) */ +#define PLL_CON0_CLKCMU_FSYS0_UNIPRO 0x100 +#define PLL_CON0_CLK_FSYS0_SLAVEBUSCLK 0x140 +#define PLL_CON0_EQOS_RGMII_125_MUX1 0x160 +#define DIV_CLK_UNIPRO 0x1800 +#define DIV_EQS_RGMII_CLK_125 0x1804 +#define DIV_PERIBUS_GRP 0x1808 +#define DIV_EQOS_RII_CLK2O5 0x180c +#define DIV_EQOS_RMIICLK_25 0x1810 +#define DIV_PCIE_PHY_OSCCLK 0x1814 +#define GAT_FSYS0_EQOS_TOP0_IPCLKPORT_CLK_PTP_REF_I 0x2004 +#define GAT_FSYS0_EQOS_TOP0_IPCLKPORT_CLK_RX_I 0x2008 +#define GAT_FSYS0_FSYS0_CMU_FSYS0_IPCLKPORT_PCLK 0x200c +#define GAT_FSYS0_GPIO_FSYS0_IPCLKPORT_OSCCLK 0x2010 +#define GAT_FSYS0_PCIE_TOP_IPCLKPORT_PCIEG3_PHY_X4_INST_0_PLL_REFCLK_FROM_XO 0x2014 +#define GAT_FSYS0_PCIE_TOP_IPCLKPORT_PIPE_PAL_INST_0_I_IMMORTAL_CLK 0x2018 +#define GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_AUX_CLK_SOC 0x201c +#define GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_MPHY_REFCLK_IXTAL24 0x2020 +#define GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_MPHY_REFCLK_IXTAL26 0x2024 +#define GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_MPHY_REFCLK_IXTAL24 0x2028 +#define GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_MPHY_REFCLK_IXTAL26 0x202c +#define GAT_FSYS0_AHBBR_FSYS0_IPCLKPORT_HCLK 0x2038 +#define GAT_FSYS0_AXI2APB_FSYS0_IPCLKPORT_ACLK 0x203c +#define GAT_FSYS0_BUS_D_FSYS0_IPCLKPORT_MAINCLK 0x2040 +#define GAT_FSYS0_BUS_D_FSYS0_IPCLKPORT_PERICLK 0x2044 +#define GAT_FSYS0_BUS_P_FSYS0_IPCLKPORT_MAINCLK 0x2048 +#define GAT_FSYS0_BUS_P_FSYS0_IPCLKPORT_TCUCLK 0x204c +#define GAT_FSYS0_CPE425_IPCLKPORT_ACLK 0x2050 +#define GAT_FSYS0_EQOS_TOP0_IPCLKPORT_ACLK_I 0x2054 +#define GAT_FSYS0_EQOS_TOP0_IPCLKPORT_HCLK_I 0x2058 +#define GAT_FSYS0_EQOS_TOP0_IPCLKPORT_RGMII_CLK_I 0x205c +#define GAT_FSYS0_EQOS_TOP0_IPCLKPORT_RII_CLK_I 0x2060 +#define GAT_FSYS0_EQOS_TOP0_IPCLKPORT_RMII_CLK_I 0x2064 +#define GAT_FSYS0_GPIO_FSYS0_IPCLKPORT_PCLK 0x2068 +#define GAT_FSYS0_NS_BRDG_FSYS0_IPCLKPORT_CLK__PSOC_FSYS0__CLK_FSYS0_D 0x206c +#define GAT_FSYS0_NS_BRDG_FSYS0_IPCLKPORT_CLK__PSOC_FSYS0__CLK_FSYS0_D1 0x2070 +#define GAT_FSYS0_NS_BRDG_FSYS0_IPCLKPORT_CLK__PSOC_FSYS0__CLK_FSYS0_P 0x2074 +#define GAT_FSYS0_NS_BRDG_FSYS0_IPCLKPORT_CLK__PSOC_FSYS0__CLK_FSYS0_S 0x2078 +#define GAT_FSYS0_PCIE_TOP_IPCLKPORT_PCIEG3_PHY_X4_INST_0_I_APB_PCLK 0x207c +#define GAT_FSYS0_PCIE_TOP_IPCLKPORT_PCIEG3_PHY_X4_INST_0_PLL_REFCLK_FROM_SYSPLL 0x2080 +#define GAT_FSYS0_PCIE_TOP_IPCLKPORT_PIPE_PAL_INST_0_I_APB_PCLK_0 0x2084 +#define GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_DBI_ACLK_SOC 0x2088 +#define GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK 0x208c +#define GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_MSTR_ACLK_SOC 0x2090 +#define GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_SLV_ACLK_SOC 0x2094 +#define GAT_FSYS0_SMMU_FSYS0_IPCLKPORT_CCLK 0x2098 +#define GAT_FSYS0_SMMU_FSYS0_IPCLKPORT_FSYS0_BCLK 0x209c +#define GAT_FSYS0_SYSREG_FSYS0_IPCLKPORT_PCLK 0x20a0 +#define GAT_FSYS0_UFS_TOP0_IPCLKPORT_HCLK_BUS 0x20a4 +#define GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_ACLK 0x20a8 +#define GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_CLK_UNIPRO 0x20ac +#define GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_FMP_CLK 0x20b0 +#define GAT_FSYS0_UFS_TOP1_IPCLKPORT_HCLK_BUS 0x20b4 +#define GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_ACLK 0x20b8 +#define GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_CLK_UNIPRO 0x20bc +#define GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_FMP_CLK 0x20c0 +#define GAT_FSYS0_RII_CLK_DIVGATE 0x20d4 + +static const unsigned long fsys0_clk_regs[] __initconst = { + PLL_CON0_CLKCMU_FSYS0_UNIPRO, + PLL_CON0_CLK_FSYS0_SLAVEBUSCLK, + PLL_CON0_EQOS_RGMII_125_MUX1, + DIV_CLK_UNIPRO, + DIV_EQS_RGMII_CLK_125, + DIV_PERIBUS_GRP, + DIV_EQOS_RII_CLK2O5, + DIV_EQOS_RMIICLK_25, + DIV_PCIE_PHY_OSCCLK, + GAT_FSYS0_EQOS_TOP0_IPCLKPORT_CLK_PTP_REF_I, + GAT_FSYS0_EQOS_TOP0_IPCLKPORT_CLK_RX_I, + GAT_FSYS0_FSYS0_CMU_FSYS0_IPCLKPORT_PCLK, + GAT_FSYS0_GPIO_FSYS0_IPCLKPORT_OSCCLK, + GAT_FSYS0_PCIE_TOP_IPCLKPORT_PCIEG3_PHY_X4_INST_0_PLL_REFCLK_FROM_XO, + GAT_FSYS0_PCIE_TOP_IPCLKPORT_PIPE_PAL_INST_0_I_IMMORTAL_CLK, + GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_AUX_CLK_SOC, + GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_MPHY_REFCLK_IXTAL24, + GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_MPHY_REFCLK_IXTAL26, + GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_MPHY_REFCLK_IXTAL24, + GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_MPHY_REFCLK_IXTAL26, + GAT_FSYS0_AHBBR_FSYS0_IPCLKPORT_HCLK, + GAT_FSYS0_AXI2APB_FSYS0_IPCLKPORT_ACLK, + GAT_FSYS0_BUS_D_FSYS0_IPCLKPORT_MAINCLK, + GAT_FSYS0_BUS_D_FSYS0_IPCLKPORT_PERICLK, + GAT_FSYS0_BUS_P_FSYS0_IPCLKPORT_MAINCLK, + GAT_FSYS0_BUS_P_FSYS0_IPCLKPORT_TCUCLK, + GAT_FSYS0_CPE425_IPCLKPORT_ACLK, + GAT_FSYS0_EQOS_TOP0_IPCLKPORT_ACLK_I, + GAT_FSYS0_EQOS_TOP0_IPCLKPORT_HCLK_I, + GAT_FSYS0_EQOS_TOP0_IPCLKPORT_RGMII_CLK_I, + GAT_FSYS0_EQOS_TOP0_IPCLKPORT_RII_CLK_I, + GAT_FSYS0_EQOS_TOP0_IPCLKPORT_RMII_CLK_I, + GAT_FSYS0_GPIO_FSYS0_IPCLKPORT_PCLK, + GAT_FSYS0_NS_BRDG_FSYS0_IPCLKPORT_CLK__PSOC_FSYS0__CLK_FSYS0_D, + GAT_FSYS0_NS_BRDG_FSYS0_IPCLKPORT_CLK__PSOC_FSYS0__CLK_FSYS0_D1, + GAT_FSYS0_NS_BRDG_FSYS0_IPCLKPORT_CLK__PSOC_FSYS0__CLK_FSYS0_P, + GAT_FSYS0_NS_BRDG_FSYS0_IPCLKPORT_CLK__PSOC_FSYS0__CLK_FSYS0_S, + GAT_FSYS0_PCIE_TOP_IPCLKPORT_PCIEG3_PHY_X4_INST_0_I_APB_PCLK, + GAT_FSYS0_PCIE_TOP_IPCLKPORT_PCIEG3_PHY_X4_INST_0_PLL_REFCLK_FROM_SYSPLL, + GAT_FSYS0_PCIE_TOP_IPCLKPORT_PIPE_PAL_INST_0_I_APB_PCLK_0, + GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_DBI_ACLK_SOC, + GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK, + GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_MSTR_ACLK_SOC, + GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_SLV_ACLK_SOC, + GAT_FSYS0_SMMU_FSYS0_IPCLKPORT_CCLK, + GAT_FSYS0_SMMU_FSYS0_IPCLKPORT_FSYS0_BCLK, + GAT_FSYS0_SYSREG_FSYS0_IPCLKPORT_PCLK, + GAT_FSYS0_UFS_TOP0_IPCLKPORT_HCLK_BUS, + GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_ACLK, + GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_CLK_UNIPRO, + GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_FMP_CLK, + GAT_FSYS0_UFS_TOP1_IPCLKPORT_HCLK_BUS, + GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_ACLK, + GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_CLK_UNIPRO, + GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_FMP_CLK, + GAT_FSYS0_RII_CLK_DIVGATE, +}; + +static const struct samsung_fixed_rate_clock fsys0_fixed_clks[] __initconst = { + FRATE(0, "pad_eqos0_phyrxclk", NULL, 0, 125000000), + FRATE(0, "i_mphy_refclk_ixtal26", NULL, 0, 26000000), + FRATE(0, "xtal_clk_pcie_phy", NULL, 0, 100000000), +}; + +/* List of parent clocks for Muxes in CMU_FSYS0 */ +PNAME(mout_fsys0_clkcmu_fsys0_unipro_p) = { "fin_pll", "dout_cmu_pll_shared0_div6" }; +PNAME(mout_fsys0_clk_fsys0_slavebusclk_p) = { "fin_pll", "dout_cmu_fsys0_shared1div4" }; +PNAME(mout_fsys0_eqos_rgmii_125_mux1_p) = { "fin_pll", "dout_cmu_fsys0_shared0div4" }; + +static const struct samsung_mux_clock fsys0_mux_clks[] __initconst = { + MUX(0, "mout_fsys0_clkcmu_fsys0_unipro", mout_fsys0_clkcmu_fsys0_unipro_p, + PLL_CON0_CLKCMU_FSYS0_UNIPRO, 4, 1), + MUX(0, "mout_fsys0_clk_fsys0_slavebusclk", mout_fsys0_clk_fsys0_slavebusclk_p, + PLL_CON0_CLK_FSYS0_SLAVEBUSCLK, 4, 1), + MUX(0, "mout_fsys0_eqos_rgmii_125_mux1", mout_fsys0_eqos_rgmii_125_mux1_p, + PLL_CON0_EQOS_RGMII_125_MUX1, 4, 1), +}; + +static const struct samsung_div_clock fsys0_div_clks[] __initconst = { + DIV(0, "dout_fsys0_clk_unipro", "mout_fsys0_clkcmu_fsys0_unipro", DIV_CLK_UNIPRO, 0, 4), + DIV(0, "dout_fsys0_eqs_rgmii_clk_125", "mout_fsys0_eqos_rgmii_125_mux1", + DIV_EQS_RGMII_CLK_125, 0, 4), + DIV(FSYS0_DOUT_FSYS0_PERIBUS_GRP, "dout_fsys0_peribus_grp", + "mout_fsys0_clk_fsys0_slavebusclk", DIV_PERIBUS_GRP, 0, 4), + DIV(0, "dout_fsys0_eqos_rii_clk2o5", "fsys0_rii_clk_divgate", DIV_EQOS_RII_CLK2O5, 0, 4), + DIV(0, "dout_fsys0_eqos_rmiiclk_25", "mout_fsys0_eqos_rgmii_125_mux1", + DIV_EQOS_RMIICLK_25, 0, 5), + DIV(0, "dout_fsys0_pcie_phy_oscclk", "mout_fsys0_eqos_rgmii_125_mux1", + DIV_PCIE_PHY_OSCCLK, 0, 4), +}; + +static const struct samsung_gate_clock fsys0_gate_clks[] __initconst = { + GATE(FSYS0_EQOS_TOP0_IPCLKPORT_CLK_RX_I, "fsys0_eqos_top0_ipclkport_clk_rx_i", + "pad_eqos0_phyrxclk", GAT_FSYS0_EQOS_TOP0_IPCLKPORT_CLK_RX_I, 21, + CLK_IGNORE_UNUSED, 0), + GATE(PCIE_SUBCTRL_INST0_AUX_CLK_SOC, + "fsys0_pcie_top_ipclkport_fsd_pcie_sub_ctrl_inst_0_aux_clk_soc", "fin_pll", + GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_AUX_CLK_SOC, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys0_fsys0_cmu_fsys0_ipclkport_pclk", "dout_fsys0_peribus_grp", + GAT_FSYS0_FSYS0_CMU_FSYS0_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, + "fsys0_pcie_top_ipclkport_pcieg3_phy_x4_inst_0_pll_refclk_from_xo", + "xtal_clk_pcie_phy", + GAT_FSYS0_PCIE_TOP_IPCLKPORT_PCIEG3_PHY_X4_INST_0_PLL_REFCLK_FROM_XO, 21, + CLK_IGNORE_UNUSED, 0), + GATE(UFS0_MPHY_REFCLK_IXTAL24, "fsys0_ufs_top0_ipclkport_i_mphy_refclk_ixtal24", + "i_mphy_refclk_ixtal26", GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_MPHY_REFCLK_IXTAL24, 21, + CLK_IGNORE_UNUSED, 0), + GATE(UFS0_MPHY_REFCLK_IXTAL26, "fsys0_ufs_top0_ipclkport_i_mphy_refclk_ixtal26", + "i_mphy_refclk_ixtal26", GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_MPHY_REFCLK_IXTAL26, 21, + CLK_IGNORE_UNUSED, 0), + GATE(UFS1_MPHY_REFCLK_IXTAL24, "fsys0_ufs_top1_ipclkport_i_mphy_refclk_ixtal24", + "i_mphy_refclk_ixtal26", GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_MPHY_REFCLK_IXTAL24, 21, + CLK_IGNORE_UNUSED, 0), + GATE(UFS1_MPHY_REFCLK_IXTAL26, "fsys0_ufs_top1_ipclkport_i_mphy_refclk_ixtal26", + "i_mphy_refclk_ixtal26", GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_MPHY_REFCLK_IXTAL26, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys0_ahbbr_fsys0_ipclkport_hclk", "dout_fsys0_peribus_grp", + GAT_FSYS0_AHBBR_FSYS0_IPCLKPORT_HCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys0_axi2apb_fsys0_ipclkport_aclk", "dout_fsys0_peribus_grp", + GAT_FSYS0_AXI2APB_FSYS0_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys0_bus_d_fsys0_ipclkport_mainclk", "mout_fsys0_clk_fsys0_slavebusclk", + GAT_FSYS0_BUS_D_FSYS0_IPCLKPORT_MAINCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys0_bus_d_fsys0_ipclkport_periclk", "dout_fsys0_peribus_grp", + GAT_FSYS0_BUS_D_FSYS0_IPCLKPORT_PERICLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys0_bus_p_fsys0_ipclkport_mainclk", "dout_fsys0_peribus_grp", + GAT_FSYS0_BUS_P_FSYS0_IPCLKPORT_MAINCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys0_bus_p_fsys0_ipclkport_tcuclk", "mout_fsys0_eqos_rgmii_125_mux1", + GAT_FSYS0_BUS_P_FSYS0_IPCLKPORT_TCUCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys0_cpe425_ipclkport_aclk", "mout_fsys0_clk_fsys0_slavebusclk", + GAT_FSYS0_CPE425_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(FSYS0_EQOS_TOP0_IPCLKPORT_ACLK_I, "fsys0_eqos_top0_ipclkport_aclk_i", + "dout_fsys0_peribus_grp", GAT_FSYS0_EQOS_TOP0_IPCLKPORT_ACLK_I, 21, + CLK_IGNORE_UNUSED, 0), + GATE(FSYS0_EQOS_TOP0_IPCLKPORT_HCLK_I, "fsys0_eqos_top0_ipclkport_hclk_i", + "dout_fsys0_peribus_grp", GAT_FSYS0_EQOS_TOP0_IPCLKPORT_HCLK_I, 21, + CLK_IGNORE_UNUSED, 0), + GATE(FSYS0_EQOS_TOP0_IPCLKPORT_RGMII_CLK_I, "fsys0_eqos_top0_ipclkport_rgmii_clk_i", + "dout_fsys0_eqs_rgmii_clk_125", GAT_FSYS0_EQOS_TOP0_IPCLKPORT_RGMII_CLK_I, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys0_eqos_top0_ipclkport_rii_clk_i", "dout_fsys0_eqos_rii_clk2o5", + GAT_FSYS0_EQOS_TOP0_IPCLKPORT_RII_CLK_I, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys0_eqos_top0_ipclkport_rmii_clk_i", "dout_fsys0_eqos_rmiiclk_25", + GAT_FSYS0_EQOS_TOP0_IPCLKPORT_RMII_CLK_I, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys0_gpio_fsys0_ipclkport_pclk", "dout_fsys0_peribus_grp", + GAT_FSYS0_GPIO_FSYS0_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys0_gpio_fsys0_ipclkport_oscclk", "fin_pll", + GAT_FSYS0_GPIO_FSYS0_IPCLKPORT_OSCCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys0_ns_brdg_fsys0_ipclkport_clk__psoc_fsys0__clk_fsys0_d", + "mout_fsys0_clk_fsys0_slavebusclk", + GAT_FSYS0_NS_BRDG_FSYS0_IPCLKPORT_CLK__PSOC_FSYS0__CLK_FSYS0_D, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys0_ns_brdg_fsys0_ipclkport_clk__psoc_fsys0__clk_fsys0_d1", + "mout_fsys0_eqos_rgmii_125_mux1", + GAT_FSYS0_NS_BRDG_FSYS0_IPCLKPORT_CLK__PSOC_FSYS0__CLK_FSYS0_D1, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys0_ns_brdg_fsys0_ipclkport_clk__psoc_fsys0__clk_fsys0_p", + "dout_fsys0_peribus_grp", + GAT_FSYS0_NS_BRDG_FSYS0_IPCLKPORT_CLK__PSOC_FSYS0__CLK_FSYS0_P, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys0_ns_brdg_fsys0_ipclkport_clk__psoc_fsys0__clk_fsys0_s", + "mout_fsys0_clk_fsys0_slavebusclk", + GAT_FSYS0_NS_BRDG_FSYS0_IPCLKPORT_CLK__PSOC_FSYS0__CLK_FSYS0_S, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys0_pcie_top_ipclkport_pcieg3_phy_x4_inst_0_i_apb_pclk", + "dout_fsys0_peribus_grp", + GAT_FSYS0_PCIE_TOP_IPCLKPORT_PCIEG3_PHY_X4_INST_0_I_APB_PCLK, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, + "fsys0_pcie_top_ipclkport_pcieg3_phy_x4_inst_0_pll_refclk_from_syspll", + "dout_fsys0_pcie_phy_oscclk", + GAT_FSYS0_PCIE_TOP_IPCLKPORT_PCIEG3_PHY_X4_INST_0_PLL_REFCLK_FROM_SYSPLL, + 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys0_pcie_top_ipclkport_pipe_pal_inst_0_i_apb_pclk_0", "dout_fsys0_peribus_grp", + GAT_FSYS0_PCIE_TOP_IPCLKPORT_PIPE_PAL_INST_0_I_APB_PCLK_0, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys0_pcie_top_ipclkport_pipe_pal_inst_0_i_immortal_clk", "fin_pll", + GAT_FSYS0_PCIE_TOP_IPCLKPORT_PIPE_PAL_INST_0_I_IMMORTAL_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PCIE_SUBCTRL_INST0_DBI_ACLK_SOC, + "fsys0_pcie_top_ipclkport_fsd_pcie_sub_ctrl_inst_0_dbi_aclk_soc", + "dout_fsys0_peribus_grp", + GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_DBI_ACLK_SOC, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys0_pcie_top_ipclkport_fsd_pcie_sub_ctrl_inst_0_i_driver_apb_clk", + "dout_fsys0_peribus_grp", + GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK, 21, + CLK_IGNORE_UNUSED, 0), + GATE(PCIE_SUBCTRL_INST0_MSTR_ACLK_SOC, + "fsys0_pcie_top_ipclkport_fsd_pcie_sub_ctrl_inst_0_mstr_aclk_soc", + "mout_fsys0_clk_fsys0_slavebusclk", + GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_MSTR_ACLK_SOC, 21, + CLK_IGNORE_UNUSED, 0), + GATE(PCIE_SUBCTRL_INST0_SLV_ACLK_SOC, + "fsys0_pcie_top_ipclkport_fsd_pcie_sub_ctrl_inst_0_slv_aclk_soc", + "mout_fsys0_clk_fsys0_slavebusclk", + GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_SLV_ACLK_SOC, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys0_smmu_fsys0_ipclkport_cclk", "mout_fsys0_eqos_rgmii_125_mux1", + GAT_FSYS0_SMMU_FSYS0_IPCLKPORT_CCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys0_smmu_fsys0_ipclkport_fsys0_bclk", "mout_fsys0_clk_fsys0_slavebusclk", + GAT_FSYS0_SMMU_FSYS0_IPCLKPORT_FSYS0_BCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys0_sysreg_fsys0_ipclkport_pclk", "dout_fsys0_peribus_grp", + GAT_FSYS0_SYSREG_FSYS0_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(UFS0_TOP0_HCLK_BUS, "fsys0_ufs_top0_ipclkport_hclk_bus", "dout_fsys0_peribus_grp", + GAT_FSYS0_UFS_TOP0_IPCLKPORT_HCLK_BUS, 21, CLK_IGNORE_UNUSED, 0), + GATE(UFS0_TOP0_ACLK, "fsys0_ufs_top0_ipclkport_i_aclk", "dout_fsys0_peribus_grp", + GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(UFS0_TOP0_CLK_UNIPRO, "fsys0_ufs_top0_ipclkport_i_clk_unipro", "dout_fsys0_clk_unipro", + GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_CLK_UNIPRO, 21, CLK_IGNORE_UNUSED, 0), + GATE(UFS0_TOP0_FMP_CLK, "fsys0_ufs_top0_ipclkport_i_fmp_clk", "dout_fsys0_peribus_grp", + GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_FMP_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(UFS1_TOP1_HCLK_BUS, "fsys0_ufs_top1_ipclkport_hclk_bus", "dout_fsys0_peribus_grp", + GAT_FSYS0_UFS_TOP1_IPCLKPORT_HCLK_BUS, 21, CLK_IGNORE_UNUSED, 0), + GATE(UFS1_TOP1_ACLK, "fsys0_ufs_top1_ipclkport_i_aclk", "dout_fsys0_peribus_grp", + GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(UFS1_TOP1_CLK_UNIPRO, "fsys0_ufs_top1_ipclkport_i_clk_unipro", "dout_fsys0_clk_unipro", + GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_CLK_UNIPRO, 21, CLK_IGNORE_UNUSED, 0), + GATE(UFS1_TOP1_FMP_CLK, "fsys0_ufs_top1_ipclkport_i_fmp_clk", "dout_fsys0_peribus_grp", + GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_FMP_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys0_rii_clk_divgate", "dout_fsys0_eqos_rmiiclk_25", GAT_FSYS0_RII_CLK_DIVGATE, + 21, CLK_IGNORE_UNUSED, 0), + GATE(FSYS0_EQOS_TOP0_IPCLKPORT_CLK_PTP_REF_I, "fsys0_eqos_top0_ipclkport_clk_ptp_ref_i", + "fin_pll", GAT_FSYS0_EQOS_TOP0_IPCLKPORT_CLK_PTP_REF_I, 21, CLK_IGNORE_UNUSED, 0), +}; + +static const struct samsung_cmu_info fsys0_cmu_info __initconst = { + .mux_clks = fsys0_mux_clks, + .nr_mux_clks = ARRAY_SIZE(fsys0_mux_clks), + .div_clks = fsys0_div_clks, + .nr_div_clks = ARRAY_SIZE(fsys0_div_clks), + .gate_clks = fsys0_gate_clks, + .nr_gate_clks = ARRAY_SIZE(fsys0_gate_clks), + .fixed_clks = fsys0_fixed_clks, + .nr_fixed_clks = ARRAY_SIZE(fsys0_fixed_clks), + .nr_clk_ids = FSYS0_NR_CLK, + .clk_regs = fsys0_clk_regs, + .nr_clk_regs = ARRAY_SIZE(fsys0_clk_regs), + .clk_name = "dout_cmu_fsys0_shared1div4", +}; + /** * fsd_cmu_probe - Probe function for FSD platform clocks * @pdev: Pointer to platform device @@ -695,6 +994,9 @@ static const struct of_device_id fsd_cmu_of_match[] = { { .compatible = "tesla,fsd-clock-peric", .data = &peric_cmu_info, + }, { + .compatible = "tesla,fsd-clock-fsys0", + .data = &fsys0_cmu_info, }, { }, }; -- cgit v1.2.3 From bfbce52e4649b9a2c7296a6296ffbdfc3b07de2e Mon Sep 17 00:00:00 2001 From: Alim Akhtar Date: Mon, 24 Jan 2022 19:46:35 +0530 Subject: clk: samsung: fsd: Add cmu_fsys1 clock information Adds cmu_fsys1 block clock information which are needed for PCIe IPs in block FSYS1. Cc: linux-fsd@tesla.com Signed-off-by: Pankaj Dubey Signed-off-by: Ajay Kumar Signed-off-by: Alim Akhtar Reviewed-by: Krzysztof Kozlowski Acked-by: Sylwester Nawrocki Link: https://lore.kernel.org/r/20220124141644.71052-8-alim.akhtar@samsung.com Signed-off-by: Krzysztof Kozlowski --- drivers/clk/samsung/clk-fsd.c | 175 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 175 insertions(+) (limited to 'drivers') diff --git a/drivers/clk/samsung/clk-fsd.c b/drivers/clk/samsung/clk-fsd.c index 785c493be2b6..19c3ea35a6ea 100644 --- a/drivers/clk/samsung/clk-fsd.c +++ b/drivers/clk/samsung/clk-fsd.c @@ -972,6 +972,178 @@ static const struct samsung_cmu_info fsys0_cmu_info __initconst = { .clk_name = "dout_cmu_fsys0_shared1div4", }; +/* Register Offset definitions for CMU_FSYS1 (0x16810000) */ +#define PLL_CON0_ACLK_FSYS1_BUSP_MUX 0x100 +#define PLL_CON0_PCLKL_FSYS1_BUSP_MUX 0x180 +#define DIV_CLK_FSYS1_PHY0_OSCCLK 0x1800 +#define DIV_CLK_FSYS1_PHY1_OSCCLK 0x1804 +#define GAT_FSYS1_CMU_FSYS1_IPCLKPORT_PCLK 0x2000 +#define GAT_FSYS1_PCIE_LINK0_IPCLKPORT_AUXCLK 0x2004 +#define GAT_FSYS1_PCIE_LINK0_IPCLKPORT_I_SOC_REF_CLK 0x2008 +#define GAT_FSYS1_PCIE_LINK1_IPCLKPORT_AUXCLK 0x200c +#define GAT_FSYS1_PCIE_PHY0_IPCLKPORT_I_REF_XTAL 0x202c +#define GAT_FSYS1_PHY0_OSCCLLK 0x2034 +#define GAT_FSYS1_PHY1_OSCCLK 0x2038 +#define GAT_FSYS1_AXI2APB_FSYS1_IPCLKPORT_ACLK 0x203c +#define GAT_FSYS1_BUS_D0_FSYS1_IPCLKPORT_MAINCLK 0x2040 +#define GAT_FSYS1_BUS_S0_FSYS1_IPCLKPORT_M250CLK 0x2048 +#define GAT_FSYS1_BUS_S0_FSYS1_IPCLKPORT_MAINCLK 0x204c +#define GAT_FSYS1_CPE425_0_FSYS1_IPCLKPORT_ACLK 0x2054 +#define GAT_FSYS1_NS_BRDG_FSYS1_IPCLKPORT_CLK__PSOC_FSYS1__CLK_FSYS1_D0 0x205c +#define GAT_FSYS1_NS_BRDG_FSYS1_IPCLKPORT_CLK__PSOC_FSYS1__CLK_FSYS1_S0 0x2064 +#define GAT_FSYS1_PCIE_LINK0_IPCLKPORT_DBI_ACLK 0x206c +#define GAT_FSYS1_PCIE_LINK0_IPCLKPORT_I_APB_CLK 0x2070 +#define GAT_FSYS1_PCIE_LINK0_IPCLKPORT_I_DRIVER_APB_CLK 0x2074 +#define GAT_FSYS1_PCIE_LINK0_IPCLKPORT_MSTR_ACLK 0x2078 +#define GAT_FSYS1_PCIE_LINK0_IPCLKPORT_SLV_ACLK 0x207c +#define GAT_FSYS1_PCIE_LINK1_IPCLKPORT_DBI_ACLK 0x2080 +#define GAT_FSYS1_PCIE_LINK1_IPCLKPORT_I_DRIVER_APB_CLK 0x2084 +#define GAT_FSYS1_PCIE_LINK1_IPCLKPORT_MSTR_ACLK 0x2088 +#define GAT_FSYS1_PCIE_LINK1_IPCLKPORT_SLV_ACLK 0x208c +#define GAT_FSYS1_PCIE_PHY0_IPCLKPORT_I_APB_CLK 0x20a4 +#define GAT_FSYS1_PCIE_PHY0_IPCLKPORT_I_REF_SOC_PLL 0x20a8 +#define GAT_FSYS1_SYSREG_FSYS1_IPCLKPORT_PCLK 0x20b4 +#define GAT_FSYS1_TBU0_FSYS1_IPCLKPORT_ACLK 0x20b8 + +static const unsigned long fsys1_clk_regs[] __initconst = { + PLL_CON0_ACLK_FSYS1_BUSP_MUX, + PLL_CON0_PCLKL_FSYS1_BUSP_MUX, + DIV_CLK_FSYS1_PHY0_OSCCLK, + DIV_CLK_FSYS1_PHY1_OSCCLK, + GAT_FSYS1_CMU_FSYS1_IPCLKPORT_PCLK, + GAT_FSYS1_PCIE_LINK0_IPCLKPORT_AUXCLK, + GAT_FSYS1_PCIE_LINK0_IPCLKPORT_I_SOC_REF_CLK, + GAT_FSYS1_PCIE_LINK1_IPCLKPORT_AUXCLK, + GAT_FSYS1_PCIE_PHY0_IPCLKPORT_I_REF_XTAL, + GAT_FSYS1_PHY0_OSCCLLK, + GAT_FSYS1_PHY1_OSCCLK, + GAT_FSYS1_AXI2APB_FSYS1_IPCLKPORT_ACLK, + GAT_FSYS1_BUS_D0_FSYS1_IPCLKPORT_MAINCLK, + GAT_FSYS1_BUS_S0_FSYS1_IPCLKPORT_M250CLK, + GAT_FSYS1_BUS_S0_FSYS1_IPCLKPORT_MAINCLK, + GAT_FSYS1_CPE425_0_FSYS1_IPCLKPORT_ACLK, + GAT_FSYS1_NS_BRDG_FSYS1_IPCLKPORT_CLK__PSOC_FSYS1__CLK_FSYS1_D0, + GAT_FSYS1_NS_BRDG_FSYS1_IPCLKPORT_CLK__PSOC_FSYS1__CLK_FSYS1_S0, + GAT_FSYS1_PCIE_LINK0_IPCLKPORT_DBI_ACLK, + GAT_FSYS1_PCIE_LINK0_IPCLKPORT_I_APB_CLK, + GAT_FSYS1_PCIE_LINK0_IPCLKPORT_I_DRIVER_APB_CLK, + GAT_FSYS1_PCIE_LINK0_IPCLKPORT_MSTR_ACLK, + GAT_FSYS1_PCIE_LINK0_IPCLKPORT_SLV_ACLK, + GAT_FSYS1_PCIE_LINK1_IPCLKPORT_DBI_ACLK, + GAT_FSYS1_PCIE_LINK1_IPCLKPORT_I_DRIVER_APB_CLK, + GAT_FSYS1_PCIE_LINK1_IPCLKPORT_MSTR_ACLK, + GAT_FSYS1_PCIE_LINK1_IPCLKPORT_SLV_ACLK, + GAT_FSYS1_PCIE_PHY0_IPCLKPORT_I_APB_CLK, + GAT_FSYS1_PCIE_PHY0_IPCLKPORT_I_REF_SOC_PLL, + GAT_FSYS1_SYSREG_FSYS1_IPCLKPORT_PCLK, + GAT_FSYS1_TBU0_FSYS1_IPCLKPORT_ACLK, +}; + +static const struct samsung_fixed_rate_clock fsys1_fixed_clks[] __initconst = { + FRATE(0, "clk_fsys1_phy0_ref", NULL, 0, 100000000), + FRATE(0, "clk_fsys1_phy1_ref", NULL, 0, 100000000), +}; + +/* List of parent clocks for Muxes in CMU_FSYS1 */ +PNAME(mout_fsys1_pclkl_fsys1_busp_mux_p) = { "fin_pll", "dout_cmu_fsys1_shared0div8" }; +PNAME(mout_fsys1_aclk_fsys1_busp_mux_p) = { "fin_pll", "dout_cmu_fsys1_shared0div4" }; + +static const struct samsung_mux_clock fsys1_mux_clks[] __initconst = { + MUX(0, "mout_fsys1_pclkl_fsys1_busp_mux", mout_fsys1_pclkl_fsys1_busp_mux_p, + PLL_CON0_PCLKL_FSYS1_BUSP_MUX, 4, 1), + MUX(0, "mout_fsys1_aclk_fsys1_busp_mux", mout_fsys1_aclk_fsys1_busp_mux_p, + PLL_CON0_ACLK_FSYS1_BUSP_MUX, 4, 1), +}; + +static const struct samsung_div_clock fsys1_div_clks[] __initconst = { + DIV(0, "dout_fsys1_clk_fsys1_phy0_oscclk", "fsys1_phy0_osccllk", + DIV_CLK_FSYS1_PHY0_OSCCLK, 0, 4), + DIV(0, "dout_fsys1_clk_fsys1_phy1_oscclk", "fsys1_phy1_oscclk", + DIV_CLK_FSYS1_PHY1_OSCCLK, 0, 4), +}; + +static const struct samsung_gate_clock fsys1_gate_clks[] __initconst = { + GATE(0, "fsys1_cmu_fsys1_ipclkport_pclk", "mout_fsys1_pclkl_fsys1_busp_mux", + GAT_FSYS1_CMU_FSYS1_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys1_pcie_phy0_ipclkport_i_ref_xtal", "clk_fsys1_phy0_ref", + GAT_FSYS1_PCIE_PHY0_IPCLKPORT_I_REF_XTAL, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys1_phy0_osccllk", "mout_fsys1_aclk_fsys1_busp_mux", + GAT_FSYS1_PHY0_OSCCLLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys1_phy1_oscclk", "mout_fsys1_aclk_fsys1_busp_mux", + GAT_FSYS1_PHY1_OSCCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys1_axi2apb_fsys1_ipclkport_aclk", "mout_fsys1_pclkl_fsys1_busp_mux", + GAT_FSYS1_AXI2APB_FSYS1_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys1_bus_d0_fsys1_ipclkport_mainclk", "mout_fsys1_aclk_fsys1_busp_mux", + GAT_FSYS1_BUS_D0_FSYS1_IPCLKPORT_MAINCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys1_bus_s0_fsys1_ipclkport_m250clk", "mout_fsys1_pclkl_fsys1_busp_mux", + GAT_FSYS1_BUS_S0_FSYS1_IPCLKPORT_M250CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys1_bus_s0_fsys1_ipclkport_mainclk", "mout_fsys1_aclk_fsys1_busp_mux", + GAT_FSYS1_BUS_S0_FSYS1_IPCLKPORT_MAINCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys1_cpe425_0_fsys1_ipclkport_aclk", "mout_fsys1_aclk_fsys1_busp_mux", + GAT_FSYS1_CPE425_0_FSYS1_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys1_ns_brdg_fsys1_ipclkport_clk__psoc_fsys1__clk_fsys1_d0", + "mout_fsys1_aclk_fsys1_busp_mux", + GAT_FSYS1_NS_BRDG_FSYS1_IPCLKPORT_CLK__PSOC_FSYS1__CLK_FSYS1_D0, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys1_ns_brdg_fsys1_ipclkport_clk__psoc_fsys1__clk_fsys1_s0", + "mout_fsys1_aclk_fsys1_busp_mux", + GAT_FSYS1_NS_BRDG_FSYS1_IPCLKPORT_CLK__PSOC_FSYS1__CLK_FSYS1_S0, 21, + CLK_IGNORE_UNUSED, 0), + GATE(PCIE_LINK0_IPCLKPORT_DBI_ACLK, "fsys1_pcie_link0_ipclkport_dbi_aclk", + "mout_fsys1_aclk_fsys1_busp_mux", GAT_FSYS1_PCIE_LINK0_IPCLKPORT_DBI_ACLK, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys1_pcie_link0_ipclkport_i_apb_clk", "mout_fsys1_pclkl_fsys1_busp_mux", + GAT_FSYS1_PCIE_LINK0_IPCLKPORT_I_APB_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys1_pcie_link0_ipclkport_i_soc_ref_clk", "fin_pll", + GAT_FSYS1_PCIE_LINK0_IPCLKPORT_I_SOC_REF_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys1_pcie_link0_ipclkport_i_driver_apb_clk", "mout_fsys1_pclkl_fsys1_busp_mux", + GAT_FSYS1_PCIE_LINK0_IPCLKPORT_I_DRIVER_APB_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PCIE_LINK0_IPCLKPORT_MSTR_ACLK, "fsys1_pcie_link0_ipclkport_mstr_aclk", + "mout_fsys1_aclk_fsys1_busp_mux", GAT_FSYS1_PCIE_LINK0_IPCLKPORT_MSTR_ACLK, 21, + CLK_IGNORE_UNUSED, 0), + GATE(PCIE_LINK0_IPCLKPORT_SLV_ACLK, "fsys1_pcie_link0_ipclkport_slv_aclk", + "mout_fsys1_aclk_fsys1_busp_mux", GAT_FSYS1_PCIE_LINK0_IPCLKPORT_SLV_ACLK, 21, + CLK_IGNORE_UNUSED, 0), + GATE(PCIE_LINK1_IPCLKPORT_DBI_ACLK, "fsys1_pcie_link1_ipclkport_dbi_aclk", + "mout_fsys1_aclk_fsys1_busp_mux", GAT_FSYS1_PCIE_LINK1_IPCLKPORT_DBI_ACLK, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys1_pcie_link1_ipclkport_i_driver_apb_clk", "mout_fsys1_pclkl_fsys1_busp_mux", + GAT_FSYS1_PCIE_LINK1_IPCLKPORT_I_DRIVER_APB_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PCIE_LINK1_IPCLKPORT_MSTR_ACLK, "fsys1_pcie_link1_ipclkport_mstr_aclk", + "mout_fsys1_aclk_fsys1_busp_mux", GAT_FSYS1_PCIE_LINK1_IPCLKPORT_MSTR_ACLK, 21, + CLK_IGNORE_UNUSED, 0), + GATE(PCIE_LINK1_IPCLKPORT_SLV_ACLK, "fsys1_pcie_link1_ipclkport_slv_aclk", + "mout_fsys1_aclk_fsys1_busp_mux", GAT_FSYS1_PCIE_LINK1_IPCLKPORT_SLV_ACLK, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys1_pcie_phy0_ipclkport_i_apb_clk", "mout_fsys1_pclkl_fsys1_busp_mux", + GAT_FSYS1_PCIE_PHY0_IPCLKPORT_I_APB_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PCIE_LINK0_IPCLKPORT_AUX_ACLK, "fsys1_pcie_link0_ipclkport_auxclk", "fin_pll", + GAT_FSYS1_PCIE_LINK0_IPCLKPORT_AUXCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(PCIE_LINK1_IPCLKPORT_AUX_ACLK, "fsys1_pcie_link1_ipclkport_auxclk", "fin_pll", + GAT_FSYS1_PCIE_LINK1_IPCLKPORT_AUXCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys1_pcie_phy0_ipclkport_i_ref_soc_pll", "dout_fsys1_clk_fsys1_phy0_oscclk", + GAT_FSYS1_PCIE_PHY0_IPCLKPORT_I_REF_SOC_PLL, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys1_sysreg_fsys1_ipclkport_pclk", "mout_fsys1_pclkl_fsys1_busp_mux", + GAT_FSYS1_SYSREG_FSYS1_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "fsys1_tbu0_fsys1_ipclkport_aclk", "mout_fsys1_aclk_fsys1_busp_mux", + GAT_FSYS1_TBU0_FSYS1_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0), +}; + +static const struct samsung_cmu_info fsys1_cmu_info __initconst = { + .mux_clks = fsys1_mux_clks, + .nr_mux_clks = ARRAY_SIZE(fsys1_mux_clks), + .div_clks = fsys1_div_clks, + .nr_div_clks = ARRAY_SIZE(fsys1_div_clks), + .gate_clks = fsys1_gate_clks, + .nr_gate_clks = ARRAY_SIZE(fsys1_gate_clks), + .fixed_clks = fsys1_fixed_clks, + .nr_fixed_clks = ARRAY_SIZE(fsys1_fixed_clks), + .nr_clk_ids = FSYS1_NR_CLK, + .clk_regs = fsys1_clk_regs, + .nr_clk_regs = ARRAY_SIZE(fsys1_clk_regs), + .clk_name = "dout_cmu_fsys1_shared0div4", +}; + /** * fsd_cmu_probe - Probe function for FSD platform clocks * @pdev: Pointer to platform device @@ -997,6 +1169,9 @@ static const struct of_device_id fsd_cmu_of_match[] = { }, { .compatible = "tesla,fsd-clock-fsys0", .data = &fsys0_cmu_info, + }, { + .compatible = "tesla,fsd-clock-fsys1", + .data = &fsys1_cmu_info, }, { }, }; -- cgit v1.2.3 From ca0fdfd131c7d33984d8feeda23a99e883ffb0cb Mon Sep 17 00:00:00 2001 From: Alim Akhtar Date: Mon, 24 Jan 2022 19:46:36 +0530 Subject: clk: samsung: fsd: Add cmu_imem block clock information Adds cmu_imem clock related code, imem block contains IPs like WDT, DMA, TMU etc, these clocks are required for such IP function. Cc: linux-fsd@tesla.com Signed-off-by: Arjun K V Signed-off-by: Pankaj Dubey Signed-off-by: Tauseef Nomani Signed-off-by: Alim Akhtar Reviewed-by: Krzysztof Kozlowski Acked-by: Sylwester Nawrocki Link: https://lore.kernel.org/r/20220124141644.71052-9-alim.akhtar@samsung.com Signed-off-by: Krzysztof Kozlowski --- drivers/clk/samsung/clk-fsd.c | 283 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 283 insertions(+) (limited to 'drivers') diff --git a/drivers/clk/samsung/clk-fsd.c b/drivers/clk/samsung/clk-fsd.c index 19c3ea35a6ea..f15b5b6b8eca 100644 --- a/drivers/clk/samsung/clk-fsd.c +++ b/drivers/clk/samsung/clk-fsd.c @@ -1144,6 +1144,289 @@ static const struct samsung_cmu_info fsys1_cmu_info __initconst = { .clk_name = "dout_cmu_fsys1_shared0div4", }; +/* Register Offset definitions for CMU_IMEM (0x10010000) */ +#define PLL_CON0_CLK_IMEM_ACLK 0x100 +#define PLL_CON0_CLK_IMEM_INTMEMCLK 0x120 +#define PLL_CON0_CLK_IMEM_TCUCLK 0x140 +#define DIV_OSCCLK_IMEM_TMUTSCLK 0x1800 +#define GAT_IMEM_IMEM_CMU_IMEM_IPCLKPORT_PCLK 0x2000 +#define GAT_IMEM_MCT_IPCLKPORT_OSCCLK__ALO 0x2004 +#define GAT_IMEM_OTP_CON_TOP_IPCLKPORT_I_OSCCLK 0x2008 +#define GAT_IMEM_RSTNSYNC_OSCCLK_IPCLKPORT_CLK 0x200c +#define GAT_IMEM_TMU_CPU0_IPCLKPORT_I_CLK 0x2010 +#define GAT_IMEM_TMU_CPU0_IPCLKPORT_I_CLK_TS 0x2014 +#define GAT_IMEM_TMU_CPU2_IPCLKPORT_I_CLK 0x2018 +#define GAT_IMEM_TMU_CPU2_IPCLKPORT_I_CLK_TS 0x201c +#define GAT_IMEM_TMU_GPU_IPCLKPORT_I_CLK 0x2020 +#define GAT_IMEM_TMU_GPU_IPCLKPORT_I_CLK_TS 0x2024 +#define GAT_IMEM_TMU_GT_IPCLKPORT_I_CLK 0x2028 +#define GAT_IMEM_TMU_GT_IPCLKPORT_I_CLK_TS 0x202c +#define GAT_IMEM_TMU_TOP_IPCLKPORT_I_CLK 0x2030 +#define GAT_IMEM_TMU_TOP_IPCLKPORT_I_CLK_TS 0x2034 +#define GAT_IMEM_WDT0_IPCLKPORT_CLK 0x2038 +#define GAT_IMEM_WDT1_IPCLKPORT_CLK 0x203c +#define GAT_IMEM_WDT2_IPCLKPORT_CLK 0x2040 +#define GAT_IMEM_ADM_AXI4ST_I0_IMEM_IPCLKPORT_ACLKM 0x2044 +#define GAT_IMEM_ADM_AXI4ST_I1_IMEM_IPCLKPORT_ACLKM 0x2048 +#define GAT_IMEM_ADM_AXI4ST_I2_IMEM_IPCLKPORT_ACLKM 0x204c +#define GAT_IMEM_ADS_AXI4ST_I0_IMEM_IPCLKPORT_ACLKS 0x2050 +#define GAT_IMEM_ADS_AXI4ST_I1_IMEM_IPCLKPORT_ACLKS 0x2054 +#define GAT_IMEM_ADS_AXI4ST_I2_IMEM_IPCLKPORT_ACLKS 0x2058 +#define GAT_IMEM_ASYNC_DMA0_IPCLKPORT_PCLKM 0x205c +#define GAT_IMEM_ASYNC_DMA0_IPCLKPORT_PCLKS 0x2060 +#define GAT_IMEM_ASYNC_DMA1_IPCLKPORT_PCLKM 0x2064 +#define GAT_IMEM_ASYNC_DMA1_IPCLKPORT_PCLKS 0x2068 +#define GAT_IMEM_AXI2APB_IMEMP0_IPCLKPORT_ACLK 0x206c +#define GAT_IMEM_AXI2APB_IMEMP1_IPCLKPORT_ACLK 0x2070 +#define GAT_IMEM_BUS_D_IMEM_IPCLKPORT_MAINCLK 0x2074 +#define GAT_IMEM_BUS_P_IMEM_IPCLKPORT_MAINCLK 0x2078 +#define GAT_IMEM_BUS_P_IMEM_IPCLKPORT_PERICLK 0x207c +#define GAT_IMEM_BUS_P_IMEM_IPCLKPORT_TCUCLK 0x2080 +#define GAT_IMEM_DMA0_IPCLKPORT_ACLK 0x2084 +#define GAT_IMEM_DMA1_IPCLKPORT_ACLK 0x2088 +#define GAT_IMEM_GIC500_INPUT_SYNC_IPCLKPORT_CLK 0x208c +#define GAT_IMEM_GIC_IPCLKPORT_CLK 0x2090 +#define GAT_IMEM_INTMEM_IPCLKPORT_ACLK 0x2094 +#define GAT_IMEM_MAILBOX_SCS_CA72_IPCLKPORT_PCLK 0x2098 +#define GAT_IMEM_MAILBOX_SMS_CA72_IPCLKPORT_PCLK 0x209c +#define GAT_IMEM_MCT_IPCLKPORT_PCLK 0x20a0 +#define GAT_IMEM_NS_BRDG_IMEM_IPCLKPORT_CLK__PSCO_IMEM__CLK_IMEM_D 0x20a4 +#define GAT_IMEM_NS_BRDG_IMEM_IPCLKPORT_CLK__PSCO_IMEM__CLK_IMEM_TCU 0x20a8 +#define GAT_IMEM_NS_BRDG_IMEM_IPCLKPORT_CLK__PSOC_IMEM__CLK_IMEM_P 0x20ac +#define GAT_IMEM_OTP_CON_TOP_IPCLKPORT_PCLK 0x20b0 +#define GAT_IMEM_RSTNSYNC_ACLK_IPCLKPORT_CLK 0x20b4 +#define GAT_IMEM_RSTNSYNC_INTMEMCLK_IPCLKPORT_CLK 0x20b8 +#define GAT_IMEM_RSTNSYNC_TCUCLK_IPCLKPORT_CLK 0x20bc +#define GAT_IMEM_SFRIF_TMU0_IMEM_IPCLKPORT_PCLK 0x20c0 +#define GAT_IMEM_SFRIF_TMU1_IMEM_IPCLKPORT_PCLK 0x20c4 +#define GAT_IMEM_SYSREG_IMEM_IPCLKPORT_PCLK 0x20c8 +#define GAT_IMEM_TBU_IMEM_IPCLKPORT_ACLK 0x20cc +#define GAT_IMEM_TCU_IPCLKPORT_ACLK 0x20d0 +#define GAT_IMEM_WDT0_IPCLKPORT_PCLK 0x20d4 +#define GAT_IMEM_WDT1_IPCLKPORT_PCLK 0x20d8 +#define GAT_IMEM_WDT2_IPCLKPORT_PCLK 0x20dc + +static const unsigned long imem_clk_regs[] __initconst = { + PLL_CON0_CLK_IMEM_ACLK, + PLL_CON0_CLK_IMEM_INTMEMCLK, + PLL_CON0_CLK_IMEM_TCUCLK, + DIV_OSCCLK_IMEM_TMUTSCLK, + GAT_IMEM_IMEM_CMU_IMEM_IPCLKPORT_PCLK, + GAT_IMEM_MCT_IPCLKPORT_OSCCLK__ALO, + GAT_IMEM_OTP_CON_TOP_IPCLKPORT_I_OSCCLK, + GAT_IMEM_RSTNSYNC_OSCCLK_IPCLKPORT_CLK, + GAT_IMEM_TMU_CPU0_IPCLKPORT_I_CLK, + GAT_IMEM_TMU_CPU0_IPCLKPORT_I_CLK_TS, + GAT_IMEM_TMU_CPU2_IPCLKPORT_I_CLK, + GAT_IMEM_TMU_CPU2_IPCLKPORT_I_CLK_TS, + GAT_IMEM_TMU_GPU_IPCLKPORT_I_CLK, + GAT_IMEM_TMU_GPU_IPCLKPORT_I_CLK_TS, + GAT_IMEM_TMU_GT_IPCLKPORT_I_CLK, + GAT_IMEM_TMU_GT_IPCLKPORT_I_CLK_TS, + GAT_IMEM_TMU_TOP_IPCLKPORT_I_CLK, + GAT_IMEM_TMU_TOP_IPCLKPORT_I_CLK_TS, + GAT_IMEM_WDT0_IPCLKPORT_CLK, + GAT_IMEM_WDT1_IPCLKPORT_CLK, + GAT_IMEM_WDT2_IPCLKPORT_CLK, + GAT_IMEM_ADM_AXI4ST_I0_IMEM_IPCLKPORT_ACLKM, + GAT_IMEM_ADM_AXI4ST_I1_IMEM_IPCLKPORT_ACLKM, + GAT_IMEM_ADM_AXI4ST_I2_IMEM_IPCLKPORT_ACLKM, + GAT_IMEM_ADS_AXI4ST_I0_IMEM_IPCLKPORT_ACLKS, + GAT_IMEM_ADS_AXI4ST_I1_IMEM_IPCLKPORT_ACLKS, + GAT_IMEM_ADS_AXI4ST_I2_IMEM_IPCLKPORT_ACLKS, + GAT_IMEM_ASYNC_DMA0_IPCLKPORT_PCLKM, + GAT_IMEM_ASYNC_DMA0_IPCLKPORT_PCLKS, + GAT_IMEM_ASYNC_DMA1_IPCLKPORT_PCLKM, + GAT_IMEM_ASYNC_DMA1_IPCLKPORT_PCLKS, + GAT_IMEM_AXI2APB_IMEMP0_IPCLKPORT_ACLK, + GAT_IMEM_AXI2APB_IMEMP1_IPCLKPORT_ACLK, + GAT_IMEM_BUS_D_IMEM_IPCLKPORT_MAINCLK, + GAT_IMEM_BUS_P_IMEM_IPCLKPORT_MAINCLK, + GAT_IMEM_BUS_P_IMEM_IPCLKPORT_PERICLK, + GAT_IMEM_BUS_P_IMEM_IPCLKPORT_TCUCLK, + GAT_IMEM_DMA0_IPCLKPORT_ACLK, + GAT_IMEM_DMA1_IPCLKPORT_ACLK, + GAT_IMEM_GIC500_INPUT_SYNC_IPCLKPORT_CLK, + GAT_IMEM_GIC_IPCLKPORT_CLK, + GAT_IMEM_INTMEM_IPCLKPORT_ACLK, + GAT_IMEM_MAILBOX_SCS_CA72_IPCLKPORT_PCLK, + GAT_IMEM_MAILBOX_SMS_CA72_IPCLKPORT_PCLK, + GAT_IMEM_MCT_IPCLKPORT_PCLK, + GAT_IMEM_NS_BRDG_IMEM_IPCLKPORT_CLK__PSCO_IMEM__CLK_IMEM_D, + GAT_IMEM_NS_BRDG_IMEM_IPCLKPORT_CLK__PSCO_IMEM__CLK_IMEM_TCU, + GAT_IMEM_NS_BRDG_IMEM_IPCLKPORT_CLK__PSOC_IMEM__CLK_IMEM_P, + GAT_IMEM_OTP_CON_TOP_IPCLKPORT_PCLK, + GAT_IMEM_RSTNSYNC_ACLK_IPCLKPORT_CLK, + GAT_IMEM_RSTNSYNC_INTMEMCLK_IPCLKPORT_CLK, + GAT_IMEM_RSTNSYNC_TCUCLK_IPCLKPORT_CLK, + GAT_IMEM_SFRIF_TMU0_IMEM_IPCLKPORT_PCLK, + GAT_IMEM_SFRIF_TMU1_IMEM_IPCLKPORT_PCLK, + GAT_IMEM_SYSREG_IMEM_IPCLKPORT_PCLK, + GAT_IMEM_TBU_IMEM_IPCLKPORT_ACLK, + GAT_IMEM_TCU_IPCLKPORT_ACLK, + GAT_IMEM_WDT0_IPCLKPORT_PCLK, + GAT_IMEM_WDT1_IPCLKPORT_PCLK, + GAT_IMEM_WDT2_IPCLKPORT_PCLK, +}; + +PNAME(mout_imem_clk_imem_tcuclk_p) = { "fin_pll", "dout_cmu_imem_tcuclk" }; +PNAME(mout_imem_clk_imem_aclk_p) = { "fin_pll", "dout_cmu_imem_aclk" }; +PNAME(mout_imem_clk_imem_intmemclk_p) = { "fin_pll", "dout_cmu_imem_dmaclk" }; + +static const struct samsung_mux_clock imem_mux_clks[] __initconst = { + MUX(0, "mout_imem_clk_imem_tcuclk", mout_imem_clk_imem_tcuclk_p, + PLL_CON0_CLK_IMEM_TCUCLK, 4, 1), + MUX(0, "mout_imem_clk_imem_aclk", mout_imem_clk_imem_aclk_p, PLL_CON0_CLK_IMEM_ACLK, 4, 1), + MUX(0, "mout_imem_clk_imem_intmemclk", mout_imem_clk_imem_intmemclk_p, + PLL_CON0_CLK_IMEM_INTMEMCLK, 4, 1), +}; + +static const struct samsung_div_clock imem_div_clks[] __initconst = { + DIV(0, "dout_imem_oscclk_imem_tmutsclk", "fin_pll", DIV_OSCCLK_IMEM_TMUTSCLK, 0, 4), +}; + +static const struct samsung_gate_clock imem_gate_clks[] __initconst = { + GATE(0, "imem_imem_cmu_imem_ipclkport_pclk", "mout_imem_clk_imem_aclk", + GAT_IMEM_IMEM_CMU_IMEM_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_otp_con_top_ipclkport_i_oscclk", "fin_pll", + GAT_IMEM_OTP_CON_TOP_IPCLKPORT_I_OSCCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_tmu_top_ipclkport_i_clk", "fin_pll", + GAT_IMEM_TMU_TOP_IPCLKPORT_I_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_tmu_gt_ipclkport_i_clk", "fin_pll", + GAT_IMEM_TMU_GT_IPCLKPORT_I_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_tmu_cpu0_ipclkport_i_clk", "fin_pll", + GAT_IMEM_TMU_CPU0_IPCLKPORT_I_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_tmu_gpu_ipclkport_i_clk", "fin_pll", + GAT_IMEM_TMU_GPU_IPCLKPORT_I_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_mct_ipclkport_oscclk__alo", "fin_pll", + GAT_IMEM_MCT_IPCLKPORT_OSCCLK__ALO, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_wdt0_ipclkport_clk", "fin_pll", + GAT_IMEM_WDT0_IPCLKPORT_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_wdt1_ipclkport_clk", "fin_pll", + GAT_IMEM_WDT1_IPCLKPORT_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_wdt2_ipclkport_clk", "fin_pll", + GAT_IMEM_WDT2_IPCLKPORT_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(IMEM_TMU_CPU0_IPCLKPORT_I_CLK_TS, "imem_tmu_cpu0_ipclkport_i_clk_ts", + "dout_imem_oscclk_imem_tmutsclk", + GAT_IMEM_TMU_CPU0_IPCLKPORT_I_CLK_TS, 21, CLK_IGNORE_UNUSED, 0), + GATE(IMEM_TMU_CPU2_IPCLKPORT_I_CLK_TS, "imem_tmu_cpu2_ipclkport_i_clk_ts", + "dout_imem_oscclk_imem_tmutsclk", + GAT_IMEM_TMU_CPU2_IPCLKPORT_I_CLK_TS, 21, CLK_IGNORE_UNUSED, 0), + GATE(IMEM_TMU_GPU_IPCLKPORT_I_CLK_TS, "imem_tmu_gpu_ipclkport_i_clk_ts", + "dout_imem_oscclk_imem_tmutsclk", + GAT_IMEM_TMU_GPU_IPCLKPORT_I_CLK_TS, 21, CLK_IGNORE_UNUSED, 0), + GATE(IMEM_TMU_GT_IPCLKPORT_I_CLK_TS, "imem_tmu_gt_ipclkport_i_clk_ts", + "dout_imem_oscclk_imem_tmutsclk", + GAT_IMEM_TMU_GT_IPCLKPORT_I_CLK_TS, 21, CLK_IGNORE_UNUSED, 0), + GATE(IMEM_TMU_TOP_IPCLKPORT_I_CLK_TS, "imem_tmu_top_ipclkport_i_clk_ts", + "dout_imem_oscclk_imem_tmutsclk", + GAT_IMEM_TMU_TOP_IPCLKPORT_I_CLK_TS, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_adm_axi4st_i0_imem_ipclkport_aclkm", "mout_imem_clk_imem_aclk", + GAT_IMEM_ADM_AXI4ST_I0_IMEM_IPCLKPORT_ACLKM, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_adm_axi4st_i1_imem_ipclkport_aclkm", "mout_imem_clk_imem_aclk", + GAT_IMEM_ADM_AXI4ST_I1_IMEM_IPCLKPORT_ACLKM, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_adm_axi4st_i2_imem_ipclkport_aclkm", "mout_imem_clk_imem_aclk", + GAT_IMEM_ADM_AXI4ST_I2_IMEM_IPCLKPORT_ACLKM, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_ads_axi4st_i0_imem_ipclkport_aclks", "mout_imem_clk_imem_aclk", + GAT_IMEM_ADS_AXI4ST_I0_IMEM_IPCLKPORT_ACLKS, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_ads_axi4st_i1_imem_ipclkport_aclks", "mout_imem_clk_imem_aclk", + GAT_IMEM_ADS_AXI4ST_I1_IMEM_IPCLKPORT_ACLKS, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_ads_axi4st_i2_imem_ipclkport_aclks", "mout_imem_clk_imem_aclk", + GAT_IMEM_ADS_AXI4ST_I2_IMEM_IPCLKPORT_ACLKS, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_async_dma0_ipclkport_pclkm", "mout_imem_clk_imem_tcuclk", + GAT_IMEM_ASYNC_DMA0_IPCLKPORT_PCLKM, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_async_dma0_ipclkport_pclks", "mout_imem_clk_imem_aclk", + GAT_IMEM_ASYNC_DMA0_IPCLKPORT_PCLKS, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_async_dma1_ipclkport_pclkm", "mout_imem_clk_imem_tcuclk", + GAT_IMEM_ASYNC_DMA1_IPCLKPORT_PCLKM, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_async_dma1_ipclkport_pclks", "mout_imem_clk_imem_aclk", + GAT_IMEM_ASYNC_DMA1_IPCLKPORT_PCLKS, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_axi2apb_imemp0_ipclkport_aclk", "mout_imem_clk_imem_aclk", + GAT_IMEM_AXI2APB_IMEMP0_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_axi2apb_imemp1_ipclkport_aclk", "mout_imem_clk_imem_aclk", + GAT_IMEM_AXI2APB_IMEMP1_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_bus_d_imem_ipclkport_mainclk", "mout_imem_clk_imem_tcuclk", + GAT_IMEM_BUS_D_IMEM_IPCLKPORT_MAINCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_bus_p_imem_ipclkport_mainclk", "mout_imem_clk_imem_aclk", + GAT_IMEM_BUS_P_IMEM_IPCLKPORT_MAINCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_bus_p_imem_ipclkport_pericclk", "mout_imem_clk_imem_aclk", + GAT_IMEM_BUS_P_IMEM_IPCLKPORT_PERICLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_bus_p_imem_ipclkport_tcuclk", "mout_imem_clk_imem_tcuclk", + GAT_IMEM_BUS_P_IMEM_IPCLKPORT_TCUCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(IMEM_DMA0_IPCLKPORT_ACLK, "imem_dma0_ipclkport_aclk", "mout_imem_clk_imem_tcuclk", + GAT_IMEM_DMA0_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0), + GATE(IMEM_DMA1_IPCLKPORT_ACLK, "imem_dma1_ipclkport_aclk", "mout_imem_clk_imem_tcuclk", + GAT_IMEM_DMA1_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0), + GATE(0, "imem_gic500_input_sync_ipclkport_clk", "mout_imem_clk_imem_aclk", + GAT_IMEM_GIC500_INPUT_SYNC_IPCLKPORT_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_gic_ipclkport_clk", "mout_imem_clk_imem_aclk", + GAT_IMEM_GIC_IPCLKPORT_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_intmem_ipclkport_aclk", "mout_imem_clk_imem_intmemclk", + GAT_IMEM_INTMEM_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_mailbox_scs_ca72_ipclkport_pclk", "mout_imem_clk_imem_aclk", + GAT_IMEM_MAILBOX_SCS_CA72_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_mailbox_sms_ca72_ipclkport_pclk", "mout_imem_clk_imem_aclk", + GAT_IMEM_MAILBOX_SMS_CA72_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(IMEM_MCT_PCLK, "imem_mct_ipclkport_pclk", "mout_imem_clk_imem_aclk", + GAT_IMEM_MCT_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_ns_brdg_imem_ipclkport_clk__psco_imem__clk_imem_d", + "mout_imem_clk_imem_tcuclk", + GAT_IMEM_NS_BRDG_IMEM_IPCLKPORT_CLK__PSCO_IMEM__CLK_IMEM_D, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_ns_brdg_imem_ipclkport_clk__psco_imem__clk_imem_tcu", + "mout_imem_clk_imem_tcuclk", + GAT_IMEM_NS_BRDG_IMEM_IPCLKPORT_CLK__PSCO_IMEM__CLK_IMEM_TCU, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_ns_brdg_imem_ipclkport_clk__psoc_imem__clk_imem_p", "mout_imem_clk_imem_aclk", + GAT_IMEM_NS_BRDG_IMEM_IPCLKPORT_CLK__PSOC_IMEM__CLK_IMEM_P, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_otp_con_top_ipclkport_pclk", "mout_imem_clk_imem_aclk", + GAT_IMEM_OTP_CON_TOP_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_rstnsync_aclk_ipclkport_clk", "mout_imem_clk_imem_aclk", + GAT_IMEM_RSTNSYNC_ACLK_IPCLKPORT_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_rstnsync_oscclk_ipclkport_clk", "fin_pll", + GAT_IMEM_RSTNSYNC_OSCCLK_IPCLKPORT_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_rstnsync_intmemclk_ipclkport_clk", "mout_imem_clk_imem_intmemclk", + GAT_IMEM_RSTNSYNC_INTMEMCLK_IPCLKPORT_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_rstnsync_tcuclk_ipclkport_clk", "mout_imem_clk_imem_tcuclk", + GAT_IMEM_RSTNSYNC_TCUCLK_IPCLKPORT_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_sfrif_tmu0_imem_ipclkport_pclk", "mout_imem_clk_imem_aclk", + GAT_IMEM_SFRIF_TMU0_IMEM_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_sfrif_tmu1_imem_ipclkport_pclk", "mout_imem_clk_imem_aclk", + GAT_IMEM_SFRIF_TMU1_IMEM_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_tmu_cpu2_ipclkport_i_clk", "fin_pll", + GAT_IMEM_TMU_CPU2_IPCLKPORT_I_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_sysreg_imem_ipclkport_pclk", "mout_imem_clk_imem_aclk", + GAT_IMEM_SYSREG_IMEM_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_tbu_imem_ipclkport_aclk", "mout_imem_clk_imem_tcuclk", + GAT_IMEM_TBU_IMEM_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "imem_tcu_ipclkport_aclk", "mout_imem_clk_imem_tcuclk", + GAT_IMEM_TCU_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(IMEM_WDT0_IPCLKPORT_PCLK, "imem_wdt0_ipclkport_pclk", "mout_imem_clk_imem_aclk", + GAT_IMEM_WDT0_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(IMEM_WDT1_IPCLKPORT_PCLK, "imem_wdt1_ipclkport_pclk", "mout_imem_clk_imem_aclk", + GAT_IMEM_WDT1_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(IMEM_WDT2_IPCLKPORT_PCLK, "imem_wdt2_ipclkport_pclk", "mout_imem_clk_imem_aclk", + GAT_IMEM_WDT2_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), +}; + +static const struct samsung_cmu_info imem_cmu_info __initconst = { + .mux_clks = imem_mux_clks, + .nr_mux_clks = ARRAY_SIZE(imem_mux_clks), + .div_clks = imem_div_clks, + .nr_div_clks = ARRAY_SIZE(imem_div_clks), + .gate_clks = imem_gate_clks, + .nr_gate_clks = ARRAY_SIZE(imem_gate_clks), + .nr_clk_ids = IMEM_NR_CLK, + .clk_regs = imem_clk_regs, + .nr_clk_regs = ARRAY_SIZE(imem_clk_regs), +}; + +static void __init fsd_clk_imem_init(struct device_node *np) +{ + samsung_cmu_register_one(np, &imem_cmu_info); +} + +CLK_OF_DECLARE(fsd_clk_imem, "tesla,fsd-clock-imem", fsd_clk_imem_init); + /** * fsd_cmu_probe - Probe function for FSD platform clocks * @pdev: Pointer to platform device -- cgit v1.2.3 From 75c50afaa0d9a3e8f96940451bed6d0ccc6a0a03 Mon Sep 17 00:00:00 2001 From: Alim Akhtar Date: Mon, 24 Jan 2022 19:46:37 +0530 Subject: clk: samsung: fsd: Add cmu_mfc block clock information Adds cmu_mfc clock related code, these clocks are required for MFC IP. Cc: linux-fsd@tesla.com Signed-off-by: Smitha T Murthy Signed-off-by: Pankaj Dubey Signed-off-by: Alim Akhtar Reviewed-by: Krzysztof Kozlowski Acked-by: Sylwester Nawrocki Link: https://lore.kernel.org/r/20220124141644.71052-10-alim.akhtar@samsung.com Signed-off-by: Krzysztof Kozlowski --- drivers/clk/samsung/clk-fsd.c | 121 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) (limited to 'drivers') diff --git a/drivers/clk/samsung/clk-fsd.c b/drivers/clk/samsung/clk-fsd.c index f15b5b6b8eca..f9c4b4c5e0cb 100644 --- a/drivers/clk/samsung/clk-fsd.c +++ b/drivers/clk/samsung/clk-fsd.c @@ -1427,6 +1427,124 @@ static void __init fsd_clk_imem_init(struct device_node *np) CLK_OF_DECLARE(fsd_clk_imem, "tesla,fsd-clock-imem", fsd_clk_imem_init); +/* Register Offset definitions for CMU_MFC (0x12810000) */ +#define PLL_LOCKTIME_PLL_MFC 0x0 +#define PLL_CON0_PLL_MFC 0x100 +#define MUX_MFC_BUSD 0x1000 +#define MUX_MFC_BUSP 0x1008 +#define DIV_MFC_BUSD_DIV4 0x1800 +#define GAT_MFC_CMU_MFC_IPCLKPORT_PCLK 0x2000 +#define GAT_MFC_AS_P_MFC_IPCLKPORT_PCLKM 0x2004 +#define GAT_MFC_AS_P_MFC_IPCLKPORT_PCLKS 0x2008 +#define GAT_MFC_AXI2APB_MFC_IPCLKPORT_ACLK 0x200c +#define GAT_MFC_MFC_IPCLKPORT_ACLK 0x2010 +#define GAT_MFC_NS_BRDG_MFC_IPCLKPORT_CLK__PMFC__CLK_MFC_D 0x2018 +#define GAT_MFC_NS_BRDG_MFC_IPCLKPORT_CLK__PMFC__CLK_MFC_P 0x201c +#define GAT_MFC_PPMU_MFCD0_IPCLKPORT_ACLK 0x2028 +#define GAT_MFC_PPMU_MFCD0_IPCLKPORT_PCLK 0x202c +#define GAT_MFC_PPMU_MFCD1_IPCLKPORT_ACLK 0x2030 +#define GAT_MFC_PPMU_MFCD1_IPCLKPORT_PCLK 0x2034 +#define GAT_MFC_SYSREG_MFC_IPCLKPORT_PCLK 0x2038 +#define GAT_MFC_TBU_MFCD0_IPCLKPORT_CLK 0x203c +#define GAT_MFC_TBU_MFCD1_IPCLKPORT_CLK 0x2040 +#define GAT_MFC_BUSD_DIV4_GATE 0x2044 +#define GAT_MFC_BUSD_GATE 0x2048 + +static const unsigned long mfc_clk_regs[] __initconst = { + PLL_LOCKTIME_PLL_MFC, + PLL_CON0_PLL_MFC, + MUX_MFC_BUSD, + MUX_MFC_BUSP, + DIV_MFC_BUSD_DIV4, + GAT_MFC_CMU_MFC_IPCLKPORT_PCLK, + GAT_MFC_AS_P_MFC_IPCLKPORT_PCLKM, + GAT_MFC_AS_P_MFC_IPCLKPORT_PCLKS, + GAT_MFC_AXI2APB_MFC_IPCLKPORT_ACLK, + GAT_MFC_MFC_IPCLKPORT_ACLK, + GAT_MFC_NS_BRDG_MFC_IPCLKPORT_CLK__PMFC__CLK_MFC_D, + GAT_MFC_NS_BRDG_MFC_IPCLKPORT_CLK__PMFC__CLK_MFC_P, + GAT_MFC_PPMU_MFCD0_IPCLKPORT_ACLK, + GAT_MFC_PPMU_MFCD0_IPCLKPORT_PCLK, + GAT_MFC_PPMU_MFCD1_IPCLKPORT_ACLK, + GAT_MFC_PPMU_MFCD1_IPCLKPORT_PCLK, + GAT_MFC_SYSREG_MFC_IPCLKPORT_PCLK, + GAT_MFC_TBU_MFCD0_IPCLKPORT_CLK, + GAT_MFC_TBU_MFCD1_IPCLKPORT_CLK, + GAT_MFC_BUSD_DIV4_GATE, + GAT_MFC_BUSD_GATE, +}; + +static const struct samsung_pll_rate_table pll_mfc_rate_table[] __initconst = { + PLL_35XX_RATE(24 * MHZ, 666000000U, 111, 4, 0), +}; + +static const struct samsung_pll_clock mfc_pll_clks[] __initconst = { + PLL(pll_142xx, 0, "fout_pll_mfc", "fin_pll", + PLL_LOCKTIME_PLL_MFC, PLL_CON0_PLL_MFC, pll_mfc_rate_table), +}; + +PNAME(mout_mfc_pll_p) = { "fin_pll", "fout_pll_mfc" }; +PNAME(mout_mfc_busp_p) = { "fin_pll", "dout_mfc_busd_div4" }; +PNAME(mout_mfc_busd_p) = { "fin_pll", "mfc_busd_gate" }; + +static const struct samsung_mux_clock mfc_mux_clks[] __initconst = { + MUX(0, "mout_mfc_pll", mout_mfc_pll_p, PLL_CON0_PLL_MFC, 4, 1), + MUX(0, "mout_mfc_busp", mout_mfc_busp_p, MUX_MFC_BUSP, 0, 1), + MUX(0, "mout_mfc_busd", mout_mfc_busd_p, MUX_MFC_BUSD, 0, 1), +}; + +static const struct samsung_div_clock mfc_div_clks[] __initconst = { + DIV(0, "dout_mfc_busd_div4", "mfc_busd_div4_gate", DIV_MFC_BUSD_DIV4, 0, 4), +}; + +static const struct samsung_gate_clock mfc_gate_clks[] __initconst = { + GATE(0, "mfc_cmu_mfc_ipclkport_pclk", "mout_mfc_busp", + GAT_MFC_CMU_MFC_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "mfc_as_p_mfc_ipclkport_pclkm", "mout_mfc_busd", + GAT_MFC_AS_P_MFC_IPCLKPORT_PCLKM, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "mfc_as_p_mfc_ipclkport_pclks", "mout_mfc_busp", + GAT_MFC_AS_P_MFC_IPCLKPORT_PCLKS, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "mfc_axi2apb_mfc_ipclkport_aclk", "mout_mfc_busp", + GAT_MFC_AXI2APB_MFC_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(MFC_MFC_IPCLKPORT_ACLK, "mfc_mfc_ipclkport_aclk", "mout_mfc_busd", + GAT_MFC_MFC_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "mfc_ns_brdg_mfc_ipclkport_clk__pmfc__clk_mfc_d", "mout_mfc_busd", + GAT_MFC_NS_BRDG_MFC_IPCLKPORT_CLK__PMFC__CLK_MFC_D, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "mfc_ns_brdg_mfc_ipclkport_clk__pmfc__clk_mfc_p", "mout_mfc_busp", + GAT_MFC_NS_BRDG_MFC_IPCLKPORT_CLK__PMFC__CLK_MFC_P, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "mfc_ppmu_mfcd0_ipclkport_aclk", "mout_mfc_busd", + GAT_MFC_PPMU_MFCD0_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "mfc_ppmu_mfcd0_ipclkport_pclk", "mout_mfc_busp", + GAT_MFC_PPMU_MFCD0_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "mfc_ppmu_mfcd1_ipclkport_aclk", "mout_mfc_busd", + GAT_MFC_PPMU_MFCD1_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "mfc_ppmu_mfcd1_ipclkport_pclk", "mout_mfc_busp", + GAT_MFC_PPMU_MFCD1_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "mfc_sysreg_mfc_ipclkport_pclk", "mout_mfc_busp", + GAT_MFC_SYSREG_MFC_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "mfc_tbu_mfcd0_ipclkport_clk", "mout_mfc_busd", + GAT_MFC_TBU_MFCD0_IPCLKPORT_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "mfc_tbu_mfcd1_ipclkport_clk", "mout_mfc_busd", + GAT_MFC_TBU_MFCD1_IPCLKPORT_CLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "mfc_busd_div4_gate", "mout_mfc_pll", + GAT_MFC_BUSD_DIV4_GATE, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "mfc_busd_gate", "mout_mfc_pll", GAT_MFC_BUSD_GATE, 21, CLK_IS_CRITICAL, 0), +}; + +static const struct samsung_cmu_info mfc_cmu_info __initconst = { + .pll_clks = mfc_pll_clks, + .nr_pll_clks = ARRAY_SIZE(mfc_pll_clks), + .mux_clks = mfc_mux_clks, + .nr_mux_clks = ARRAY_SIZE(mfc_mux_clks), + .div_clks = mfc_div_clks, + .nr_div_clks = ARRAY_SIZE(mfc_div_clks), + .gate_clks = mfc_gate_clks, + .nr_gate_clks = ARRAY_SIZE(mfc_gate_clks), + .nr_clk_ids = MFC_NR_CLK, + .clk_regs = mfc_clk_regs, + .nr_clk_regs = ARRAY_SIZE(mfc_clk_regs), +}; + /** * fsd_cmu_probe - Probe function for FSD platform clocks * @pdev: Pointer to platform device @@ -1455,6 +1573,9 @@ static const struct of_device_id fsd_cmu_of_match[] = { }, { .compatible = "tesla,fsd-clock-fsys1", .data = &fsys1_cmu_info, + }, { + .compatible = "tesla,fsd-clock-mfc", + .data = &mfc_cmu_info, }, { }, }; -- cgit v1.2.3 From b826c3e4de1a44ad8e5536d86d5ef062a54ed2b2 Mon Sep 17 00:00:00 2001 From: Alim Akhtar Date: Mon, 24 Jan 2022 19:46:38 +0530 Subject: clk: samsung: fsd: Add cam_csi block clock information Adds clocks for BLK_CAM_CSI block, this is needed for CSI to work. Cc: linux-fsd@tesla.com Signed-off-by: Sathyakam M Signed-off-by: Pankaj Dubey Signed-off-by: Alim Akhtar Reviewed-by: Krzysztof Kozlowski Acked-by: Sylwester Nawrocki Link: https://lore.kernel.org/r/20220124141644.71052-11-alim.akhtar@samsung.com Signed-off-by: Krzysztof Kozlowski --- drivers/clk/samsung/clk-fsd.c | 207 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 207 insertions(+) (limited to 'drivers') diff --git a/drivers/clk/samsung/clk-fsd.c b/drivers/clk/samsung/clk-fsd.c index f9c4b4c5e0cb..5d009c70e97d 100644 --- a/drivers/clk/samsung/clk-fsd.c +++ b/drivers/clk/samsung/clk-fsd.c @@ -1545,6 +1545,210 @@ static const struct samsung_cmu_info mfc_cmu_info __initconst = { .nr_clk_regs = ARRAY_SIZE(mfc_clk_regs), }; +/* Register Offset definitions for CMU_CAM_CSI (0x12610000) */ +#define PLL_LOCKTIME_PLL_CAM_CSI 0x0 +#define PLL_CON0_PLL_CAM_CSI 0x100 +#define DIV_CAM_CSI0_ACLK 0x1800 +#define DIV_CAM_CSI1_ACLK 0x1804 +#define DIV_CAM_CSI2_ACLK 0x1808 +#define DIV_CAM_CSI_BUSD 0x180c +#define DIV_CAM_CSI_BUSP 0x1810 +#define GAT_CAM_CSI_CMU_CAM_CSI_IPCLKPORT_PCLK 0x2000 +#define GAT_CAM_AXI2APB_CAM_CSI_IPCLKPORT_ACLK 0x2004 +#define GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__CLK_CSI0 0x2008 +#define GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__CLK_CSI1 0x200c +#define GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__CLK_CSI2 0x2010 +#define GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__CLK_SOC_NOC 0x2014 +#define GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__NOC 0x2018 +#define GAT_CAM_CSI0_0_IPCLKPORT_I_ACLK 0x201c +#define GAT_CAM_CSI0_0_IPCLKPORT_I_PCLK 0x2020 +#define GAT_CAM_CSI0_1_IPCLKPORT_I_ACLK 0x2024 +#define GAT_CAM_CSI0_1_IPCLKPORT_I_PCLK 0x2028 +#define GAT_CAM_CSI0_2_IPCLKPORT_I_ACLK 0x202c +#define GAT_CAM_CSI0_2_IPCLKPORT_I_PCLK 0x2030 +#define GAT_CAM_CSI0_3_IPCLKPORT_I_ACLK 0x2034 +#define GAT_CAM_CSI0_3_IPCLKPORT_I_PCLK 0x2038 +#define GAT_CAM_CSI1_0_IPCLKPORT_I_ACLK 0x203c +#define GAT_CAM_CSI1_0_IPCLKPORT_I_PCLK 0x2040 +#define GAT_CAM_CSI1_1_IPCLKPORT_I_ACLK 0x2044 +#define GAT_CAM_CSI1_1_IPCLKPORT_I_PCLK 0x2048 +#define GAT_CAM_CSI1_2_IPCLKPORT_I_ACLK 0x204c +#define GAT_CAM_CSI1_2_IPCLKPORT_I_PCLK 0x2050 +#define GAT_CAM_CSI1_3_IPCLKPORT_I_ACLK 0x2054 +#define GAT_CAM_CSI1_3_IPCLKPORT_I_PCLK 0x2058 +#define GAT_CAM_CSI2_0_IPCLKPORT_I_ACLK 0x205c +#define GAT_CAM_CSI2_0_IPCLKPORT_I_PCLK 0x2060 +#define GAT_CAM_CSI2_1_IPCLKPORT_I_ACLK 0x2064 +#define GAT_CAM_CSI2_1_IPCLKPORT_I_PCLK 0x2068 +#define GAT_CAM_CSI2_2_IPCLKPORT_I_ACLK 0x206c +#define GAT_CAM_CSI2_2_IPCLKPORT_I_PCLK 0x2070 +#define GAT_CAM_CSI2_3_IPCLKPORT_I_ACLK 0x2074 +#define GAT_CAM_CSI2_3_IPCLKPORT_I_PCLK 0x2078 +#define GAT_CAM_NS_BRDG_CAM_CSI_IPCLKPORT_CLK__PSOC_CAM_CSI__CLK_CAM_CSI_D 0x207c +#define GAT_CAM_NS_BRDG_CAM_CSI_IPCLKPORT_CLK__PSOC_CAM_CSI__CLK_CAM_CSI_P 0x2080 +#define GAT_CAM_SYSREG_CAM_CSI_IPCLKPORT_PCLK 0x2084 +#define GAT_CAM_TBU_CAM_CSI_IPCLKPORT_ACLK 0x2088 + +static const unsigned long cam_csi_clk_regs[] __initconst = { + PLL_LOCKTIME_PLL_CAM_CSI, + PLL_CON0_PLL_CAM_CSI, + DIV_CAM_CSI0_ACLK, + DIV_CAM_CSI1_ACLK, + DIV_CAM_CSI2_ACLK, + DIV_CAM_CSI_BUSD, + DIV_CAM_CSI_BUSP, + GAT_CAM_CSI_CMU_CAM_CSI_IPCLKPORT_PCLK, + GAT_CAM_AXI2APB_CAM_CSI_IPCLKPORT_ACLK, + GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__CLK_CSI0, + GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__CLK_CSI1, + GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__CLK_CSI2, + GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__CLK_SOC_NOC, + GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__NOC, + GAT_CAM_CSI0_0_IPCLKPORT_I_ACLK, + GAT_CAM_CSI0_0_IPCLKPORT_I_PCLK, + GAT_CAM_CSI0_1_IPCLKPORT_I_ACLK, + GAT_CAM_CSI0_1_IPCLKPORT_I_PCLK, + GAT_CAM_CSI0_2_IPCLKPORT_I_ACLK, + GAT_CAM_CSI0_2_IPCLKPORT_I_PCLK, + GAT_CAM_CSI0_3_IPCLKPORT_I_ACLK, + GAT_CAM_CSI0_3_IPCLKPORT_I_PCLK, + GAT_CAM_CSI1_0_IPCLKPORT_I_ACLK, + GAT_CAM_CSI1_0_IPCLKPORT_I_PCLK, + GAT_CAM_CSI1_1_IPCLKPORT_I_ACLK, + GAT_CAM_CSI1_1_IPCLKPORT_I_PCLK, + GAT_CAM_CSI1_2_IPCLKPORT_I_ACLK, + GAT_CAM_CSI1_2_IPCLKPORT_I_PCLK, + GAT_CAM_CSI1_3_IPCLKPORT_I_ACLK, + GAT_CAM_CSI1_3_IPCLKPORT_I_PCLK, + GAT_CAM_CSI2_0_IPCLKPORT_I_ACLK, + GAT_CAM_CSI2_0_IPCLKPORT_I_PCLK, + GAT_CAM_CSI2_1_IPCLKPORT_I_ACLK, + GAT_CAM_CSI2_1_IPCLKPORT_I_PCLK, + GAT_CAM_CSI2_2_IPCLKPORT_I_ACLK, + GAT_CAM_CSI2_2_IPCLKPORT_I_PCLK, + GAT_CAM_CSI2_3_IPCLKPORT_I_ACLK, + GAT_CAM_CSI2_3_IPCLKPORT_I_PCLK, + GAT_CAM_NS_BRDG_CAM_CSI_IPCLKPORT_CLK__PSOC_CAM_CSI__CLK_CAM_CSI_D, + GAT_CAM_NS_BRDG_CAM_CSI_IPCLKPORT_CLK__PSOC_CAM_CSI__CLK_CAM_CSI_P, + GAT_CAM_SYSREG_CAM_CSI_IPCLKPORT_PCLK, + GAT_CAM_TBU_CAM_CSI_IPCLKPORT_ACLK, +}; + +static const struct samsung_pll_rate_table pll_cam_csi_rate_table[] __initconst = { + PLL_35XX_RATE(24 * MHZ, 1066000000U, 533, 12, 0), +}; + +static const struct samsung_pll_clock cam_csi_pll_clks[] __initconst = { + PLL(pll_142xx, 0, "fout_pll_cam_csi", "fin_pll", + PLL_LOCKTIME_PLL_CAM_CSI, PLL_CON0_PLL_CAM_CSI, pll_cam_csi_rate_table), +}; + +PNAME(mout_cam_csi_pll_p) = { "fin_pll", "fout_pll_cam_csi" }; + +static const struct samsung_mux_clock cam_csi_mux_clks[] __initconst = { + MUX(0, "mout_cam_csi_pll", mout_cam_csi_pll_p, PLL_CON0_PLL_CAM_CSI, 4, 1), +}; + +static const struct samsung_div_clock cam_csi_div_clks[] __initconst = { + DIV(0, "dout_cam_csi0_aclk", "mout_cam_csi_pll", DIV_CAM_CSI0_ACLK, 0, 4), + DIV(0, "dout_cam_csi1_aclk", "mout_cam_csi_pll", DIV_CAM_CSI1_ACLK, 0, 4), + DIV(0, "dout_cam_csi2_aclk", "mout_cam_csi_pll", DIV_CAM_CSI2_ACLK, 0, 4), + DIV(0, "dout_cam_csi_busd", "mout_cam_csi_pll", DIV_CAM_CSI_BUSD, 0, 4), + DIV(0, "dout_cam_csi_busp", "mout_cam_csi_pll", DIV_CAM_CSI_BUSP, 0, 4), +}; + +static const struct samsung_gate_clock cam_csi_gate_clks[] __initconst = { + GATE(0, "cam_csi_cmu_cam_csi_ipclkport_pclk", "dout_cam_csi_busp", + GAT_CAM_CSI_CMU_CAM_CSI_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cam_axi2apb_cam_csi_ipclkport_aclk", "dout_cam_csi_busp", + GAT_CAM_AXI2APB_CAM_CSI_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cam_csi_bus_d_cam_csi_ipclkport_clk__system__clk_csi0", "dout_cam_csi0_aclk", + GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__CLK_CSI0, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cam_csi_bus_d_cam_csi_ipclkport_clk__system__clk_csi1", "dout_cam_csi1_aclk", + GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__CLK_CSI1, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cam_csi_bus_d_cam_csi_ipclkport_clk__system__clk_csi2", "dout_cam_csi2_aclk", + GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__CLK_CSI2, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cam_csi_bus_d_cam_csi_ipclkport_clk__system__clk_soc_noc", "dout_cam_csi_busd", + GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__CLK_SOC_NOC, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, "cam_csi_bus_d_cam_csi_ipclkport_clk__system__noc", "dout_cam_csi_busd", + GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__NOC, 21, CLK_IGNORE_UNUSED, 0), + GATE(CAM_CSI0_0_IPCLKPORT_I_ACLK, "cam_csi0_0_ipclkport_i_aclk", "dout_cam_csi0_aclk", + GAT_CAM_CSI0_0_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cam_csi0_0_ipclkport_i_pclk", "dout_cam_csi_busp", + GAT_CAM_CSI0_0_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(CAM_CSI0_1_IPCLKPORT_I_ACLK, "cam_csi0_1_ipclkport_i_aclk", "dout_cam_csi0_aclk", + GAT_CAM_CSI0_1_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cam_csi0_1_ipclkport_i_pclk", "dout_cam_csi_busp", + GAT_CAM_CSI0_1_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(CAM_CSI0_2_IPCLKPORT_I_ACLK, "cam_csi0_2_ipclkport_i_aclk", "dout_cam_csi0_aclk", + GAT_CAM_CSI0_2_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cam_csi0_2_ipclkport_i_pclk", "dout_cam_csi_busp", + GAT_CAM_CSI0_2_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(CAM_CSI0_3_IPCLKPORT_I_ACLK, "cam_csi0_3_ipclkport_i_aclk", "dout_cam_csi0_aclk", + GAT_CAM_CSI0_3_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cam_csi0_3_ipclkport_i_pclk", "dout_cam_csi_busp", + GAT_CAM_CSI0_3_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(CAM_CSI1_0_IPCLKPORT_I_ACLK, "cam_csi1_0_ipclkport_i_aclk", "dout_cam_csi1_aclk", + GAT_CAM_CSI1_0_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cam_csi1_0_ipclkport_i_pclk", "dout_cam_csi_busp", + GAT_CAM_CSI1_0_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(CAM_CSI1_1_IPCLKPORT_I_ACLK, "cam_csi1_1_ipclkport_i_aclk", "dout_cam_csi1_aclk", + GAT_CAM_CSI1_1_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cam_csi1_1_ipclkport_i_pclk", "dout_cam_csi_busp", + GAT_CAM_CSI1_1_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(CAM_CSI1_2_IPCLKPORT_I_ACLK, "cam_csi1_2_ipclkport_i_aclk", "dout_cam_csi1_aclk", + GAT_CAM_CSI1_2_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cam_csi1_2_ipclkport_i_pclk", "dout_cam_csi_busp", + GAT_CAM_CSI1_2_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(CAM_CSI1_3_IPCLKPORT_I_ACLK, "cam_csi1_3_ipclkport_i_aclk", "dout_cam_csi1_aclk", + GAT_CAM_CSI1_3_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cam_csi1_3_ipclkport_i_pclk", "dout_cam_csi_busp", + GAT_CAM_CSI1_3_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(CAM_CSI2_0_IPCLKPORT_I_ACLK, "cam_csi2_0_ipclkport_i_aclk", "dout_cam_csi2_aclk", + GAT_CAM_CSI2_0_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cam_csi2_0_ipclkport_i_pclk", "dout_cam_csi_busp", + GAT_CAM_CSI2_0_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(CAM_CSI2_1_IPCLKPORT_I_ACLK, "cam_csi2_1_ipclkport_i_aclk", "dout_cam_csi2_aclk", + GAT_CAM_CSI2_1_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cam_csi2_1_ipclkport_i_pclk", "dout_cam_csi_busp", + GAT_CAM_CSI2_1_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(CAM_CSI2_2_IPCLKPORT_I_ACLK, "cam_csi2_2_ipclkport_i_aclk", "dout_cam_csi2_aclk", + GAT_CAM_CSI2_2_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cam_csi2_2_ipclkport_i_pclk", "dout_cam_csi_busp", + GAT_CAM_CSI2_2_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(CAM_CSI2_3_IPCLKPORT_I_ACLK, "cam_csi2_3_ipclkport_i_aclk", "dout_cam_csi2_aclk", + GAT_CAM_CSI2_3_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cam_csi2_3_ipclkport_i_pclk", "dout_cam_csi_busp", + GAT_CAM_CSI2_3_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cam_ns_brdg_cam_csi_ipclkport_clk__psoc_cam_csi__clk_cam_csi_d", + "dout_cam_csi_busd", + GAT_CAM_NS_BRDG_CAM_CSI_IPCLKPORT_CLK__PSOC_CAM_CSI__CLK_CAM_CSI_D, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, "cam_ns_brdg_cam_csi_ipclkport_clk__psoc_cam_csi__clk_cam_csi_p", + "dout_cam_csi_busp", + GAT_CAM_NS_BRDG_CAM_CSI_IPCLKPORT_CLK__PSOC_CAM_CSI__CLK_CAM_CSI_P, 21, + CLK_IGNORE_UNUSED, 0), + GATE(0, "cam_sysreg_cam_csi_ipclkport_pclk", "dout_cam_csi_busp", + GAT_CAM_SYSREG_CAM_CSI_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(0, "cam_tbu_cam_csi_ipclkport_aclk", "dout_cam_csi_busd", + GAT_CAM_TBU_CAM_CSI_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0), +}; + +static const struct samsung_cmu_info cam_csi_cmu_info __initconst = { + .pll_clks = cam_csi_pll_clks, + .nr_pll_clks = ARRAY_SIZE(cam_csi_pll_clks), + .mux_clks = cam_csi_mux_clks, + .nr_mux_clks = ARRAY_SIZE(cam_csi_mux_clks), + .div_clks = cam_csi_div_clks, + .nr_div_clks = ARRAY_SIZE(cam_csi_div_clks), + .gate_clks = cam_csi_gate_clks, + .nr_gate_clks = ARRAY_SIZE(cam_csi_gate_clks), + .nr_clk_ids = CAM_CSI_NR_CLK, + .clk_regs = cam_csi_clk_regs, + .nr_clk_regs = ARRAY_SIZE(cam_csi_clk_regs), +}; + /** * fsd_cmu_probe - Probe function for FSD platform clocks * @pdev: Pointer to platform device @@ -1576,6 +1780,9 @@ static const struct of_device_id fsd_cmu_of_match[] = { }, { .compatible = "tesla,fsd-clock-mfc", .data = &mfc_cmu_info, + }, { + .compatible = "tesla,fsd-clock-cam_csi", + .data = &cam_csi_cmu_info, }, { }, }; -- cgit v1.2.3 From e3aabb3c7dbe66201b45d7b2c20132196f491ad4 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Wed, 22 Dec 2021 07:32:14 +0300 Subject: memory: tegra30-emc: Print additional memory info Print out memory type and LPDDR2 configuration on Tegra30, making it similar to the memory info printed by the Tegra20 memory driver. This info is useful for debugging purposes. Tested-by: Svyatoslav Ryhel # T30 ASUS TF201 LPDDR2 Signed-off-by: Dmitry Osipenko Link: https://lore.kernel.org/r/20211222043215.28237-1-digetx@gmail.com Signed-off-by: Krzysztof Kozlowski --- drivers/memory/tegra/Kconfig | 1 + drivers/memory/tegra/tegra30-emc.c | 131 ++++++++++++++++++++++++++++++++++--- 2 files changed, 122 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/memory/tegra/Kconfig b/drivers/memory/tegra/Kconfig index 7951764b4efe..3fe83d7c2bf8 100644 --- a/drivers/memory/tegra/Kconfig +++ b/drivers/memory/tegra/Kconfig @@ -28,6 +28,7 @@ config TEGRA30_EMC default y depends on ARCH_TEGRA_3x_SOC || COMPILE_TEST select PM_OPP + select DDR help This driver is for the External Memory Controller (EMC) found on Tegra30 chips. The EMC controls the external DRAM on the board. diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c index 80f98d717e13..9ba2a9e5316b 100644 --- a/drivers/memory/tegra/tegra30-emc.c +++ b/drivers/memory/tegra/tegra30-emc.c @@ -9,6 +9,7 @@ * Copyright (C) 2019 GRATE-DRIVER project */ +#include #include #include #include @@ -31,11 +32,15 @@ #include #include +#include "../jedec_ddr.h" +#include "../of_memory.h" + #include "mc.h" #define EMC_INTSTATUS 0x000 #define EMC_INTMASK 0x004 #define EMC_DBG 0x008 +#define EMC_ADR_CFG 0x010 #define EMC_CFG 0x00c #define EMC_REFCTRL 0x020 #define EMC_TIMING_CONTROL 0x028 @@ -81,6 +86,7 @@ #define EMC_EMRS 0x0d0 #define EMC_SELF_REF 0x0e0 #define EMC_MRW 0x0e8 +#define EMC_MRR 0x0ec #define EMC_XM2DQSPADCTRL3 0x0f8 #define EMC_FBIO_SPARE 0x100 #define EMC_FBIO_CFG5 0x104 @@ -208,6 +214,13 @@ #define EMC_REFRESH_OVERFLOW_INT BIT(3) #define EMC_CLKCHANGE_COMPLETE_INT BIT(4) +#define EMC_MRR_DIVLD_INT BIT(5) + +#define EMC_MRR_DEV_SELECTN GENMASK(31, 30) +#define EMC_MRR_MRR_MA GENMASK(23, 16) +#define EMC_MRR_MRR_DATA GENMASK(15, 0) + +#define EMC_ADR_CFG_EMEM_NUMDEV BIT(0) enum emc_dram_type { DRAM_TYPE_DDR3, @@ -378,6 +391,8 @@ struct tegra_emc { /* protect shared rate-change code path */ struct mutex rate_lock; + + bool mrr_error; }; static int emc_seq_update_timing(struct tegra_emc *emc) @@ -1008,12 +1023,18 @@ static int emc_load_timings_from_dt(struct tegra_emc *emc, return 0; } -static struct device_node *emc_find_node_by_ram_code(struct device *dev) +static struct device_node *emc_find_node_by_ram_code(struct tegra_emc *emc) { + struct device *dev = emc->dev; struct device_node *np; u32 value, ram_code; int err; + if (emc->mrr_error) { + dev_warn(dev, "memory timings skipped due to MRR error\n"); + return NULL; + } + if (of_get_child_count(dev->of_node) == 0) { dev_info_once(dev, "device-tree doesn't have memory timings\n"); return NULL; @@ -1035,11 +1056,73 @@ static struct device_node *emc_find_node_by_ram_code(struct device *dev) return NULL; } +static int emc_read_lpddr_mode_register(struct tegra_emc *emc, + unsigned int emem_dev, + unsigned int register_addr, + unsigned int *register_data) +{ + u32 memory_dev = emem_dev ? 1 : 2; + u32 val, mr_mask = 0xff; + int err; + + /* clear data-valid interrupt status */ + writel_relaxed(EMC_MRR_DIVLD_INT, emc->regs + EMC_INTSTATUS); + + /* issue mode register read request */ + val = FIELD_PREP(EMC_MRR_DEV_SELECTN, memory_dev); + val |= FIELD_PREP(EMC_MRR_MRR_MA, register_addr); + + writel_relaxed(val, emc->regs + EMC_MRR); + + /* wait for the LPDDR2 data-valid interrupt */ + err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, val, + val & EMC_MRR_DIVLD_INT, + 1, 100); + if (err) { + dev_err(emc->dev, "mode register %u read failed: %d\n", + register_addr, err); + emc->mrr_error = true; + return err; + } + + /* read out mode register data */ + val = readl_relaxed(emc->regs + EMC_MRR); + *register_data = FIELD_GET(EMC_MRR_MRR_DATA, val) & mr_mask; + + return 0; +} + +static void emc_read_lpddr_sdram_info(struct tegra_emc *emc, + unsigned int emem_dev) +{ + union lpddr2_basic_config4 basic_conf4; + unsigned int manufacturer_id; + unsigned int revision_id1; + unsigned int revision_id2; + + /* these registers are standard for all LPDDR JEDEC memory chips */ + emc_read_lpddr_mode_register(emc, emem_dev, 5, &manufacturer_id); + emc_read_lpddr_mode_register(emc, emem_dev, 6, &revision_id1); + emc_read_lpddr_mode_register(emc, emem_dev, 7, &revision_id2); + emc_read_lpddr_mode_register(emc, emem_dev, 8, &basic_conf4.value); + + dev_info(emc->dev, "SDRAM[dev%u]: manufacturer: 0x%x (%s) rev1: 0x%x rev2: 0x%x prefetch: S%u density: %uMbit iowidth: %ubit\n", + emem_dev, manufacturer_id, + lpddr2_jedec_manufacturer(manufacturer_id), + revision_id1, revision_id2, + 4 >> basic_conf4.arch_type, + 64 << basic_conf4.density, + 32 >> basic_conf4.io_width); +} + static int emc_setup_hw(struct tegra_emc *emc) { + u32 fbio_cfg5, emc_cfg, emc_dbg, emc_adr_cfg; u32 intmask = EMC_REFRESH_OVERFLOW_INT; - u32 fbio_cfg5, emc_cfg, emc_dbg; + static bool print_sdram_info_once; enum emc_dram_type dram_type; + const char *dram_type_str; + unsigned int emem_numdev; fbio_cfg5 = readl_relaxed(emc->regs + EMC_FBIO_CFG5); dram_type = fbio_cfg5 & EMC_FBIO_CFG5_DRAM_TYPE_MASK; @@ -1076,6 +1159,34 @@ static int emc_setup_hw(struct tegra_emc *emc) emc_dbg &= ~EMC_DBG_FORCE_UPDATE; writel_relaxed(emc_dbg, emc->regs + EMC_DBG); + switch (dram_type) { + case DRAM_TYPE_DDR1: + dram_type_str = "DDR1"; + break; + case DRAM_TYPE_LPDDR2: + dram_type_str = "LPDDR2"; + break; + case DRAM_TYPE_DDR2: + dram_type_str = "DDR2"; + break; + case DRAM_TYPE_DDR3: + dram_type_str = "DDR3"; + break; + } + + emc_adr_cfg = readl_relaxed(emc->regs + EMC_ADR_CFG); + emem_numdev = FIELD_GET(EMC_ADR_CFG_EMEM_NUMDEV, emc_adr_cfg) + 1; + + dev_info_once(emc->dev, "%u %s %s attached\n", emem_numdev, + dram_type_str, emem_numdev == 2 ? "devices" : "device"); + + if (dram_type == DRAM_TYPE_LPDDR2 && !print_sdram_info_once) { + while (emem_numdev--) + emc_read_lpddr_sdram_info(emc, emem_numdev); + + print_sdram_info_once = true; + } + return 0; } @@ -1538,14 +1649,6 @@ static int tegra_emc_probe(struct platform_device *pdev) emc->clk_nb.notifier_call = emc_clk_change_notify; emc->dev = &pdev->dev; - np = emc_find_node_by_ram_code(&pdev->dev); - if (np) { - err = emc_load_timings_from_dt(emc, np); - of_node_put(np); - if (err) - return err; - } - emc->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(emc->regs)) return PTR_ERR(emc->regs); @@ -1554,6 +1657,14 @@ static int tegra_emc_probe(struct platform_device *pdev) if (err) return err; + np = emc_find_node_by_ram_code(emc); + if (np) { + err = emc_load_timings_from_dt(emc, np); + of_node_put(np); + if (err) + return err; + } + err = platform_get_irq(pdev, 0); if (err < 0) return err; -- cgit v1.2.3 From 9ff684342ee7d3ea2755c6e9b60bc43085baa3ad Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Wed, 22 Dec 2021 07:32:15 +0300 Subject: memory: tegra20-emc: Correct memory device mask Memory chip select is swapped when we read mode register, correct it. We didn't have devices that use a single LPDDR chip and both chips are always identical, hence this change is just a minor improvement. Fixes: 131dd9a436d8 ("memory: tegra20-emc: Support matching timings by LPDDR2 configuration") Signed-off-by: Dmitry Osipenko Link: https://lore.kernel.org/r/20211222043215.28237-2-digetx@gmail.com Signed-off-by: Krzysztof Kozlowski --- drivers/memory/tegra/tegra20-emc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c index 497b6edbf3ca..25ba3c5e4ad6 100644 --- a/drivers/memory/tegra/tegra20-emc.c +++ b/drivers/memory/tegra/tegra20-emc.c @@ -540,7 +540,7 @@ static int emc_read_lpddr_mode_register(struct tegra_emc *emc, unsigned int register_addr, unsigned int *register_data) { - u32 memory_dev = emem_dev + 1; + u32 memory_dev = emem_dev ? 1 : 2; u32 val, mr_mask = 0xff; int err; -- cgit v1.2.3 From 205e17766c78c4dd8dbd1e88ac723401ec3ce5ee Mon Sep 17 00:00:00 2001 From: AngeloGioacchino Del Regno Date: Fri, 15 Oct 2021 17:15:57 +0200 Subject: memory: mtk-smi: Use ARRAY_SIZE to define MTK_SMI_CLK_NR_MAX This definition is tied to the number of SMI common clocks (the array mtk_smi_common_clks): improve the definition by using the ARRAY_SIZE macro instead. That will also reduce room for mistakes when updating the aforementioned array in the future. Signed-off-by: AngeloGioacchino Del Regno Reviewed-by: Yong Wu Link: https://lore.kernel.org/r/20211015151557.510726-1-angelogioacchino.delregno@collabora.com Signed-off-by: Krzysztof Kozlowski --- drivers/memory/mtk-smi.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index e201e5976f34..a0d50ce71e9c 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -94,8 +94,6 @@ enum mtk_smi_type { MTK_SMI_GEN2_SUB_COMM, /* gen2 smi sub common */ }; -#define MTK_SMI_CLK_NR_MAX 4 - /* larbs: Require apb/smi clocks while gals is optional. */ static const char * const mtk_smi_larb_clks[] = {"apb", "smi", "gals"}; #define MTK_SMI_LARB_REQ_CLK_NR 2 @@ -106,6 +104,7 @@ static const char * const mtk_smi_larb_clks[] = {"apb", "smi", "gals"}; * sub common: Require apb/smi/gals0 clocks in has_gals case. Otherwise, only apb/smi are required. */ static const char * const mtk_smi_common_clks[] = {"apb", "smi", "gals0", "gals1"}; +#define MTK_SMI_CLK_NR_MAX ARRAY_SIZE(mtk_smi_common_clks) #define MTK_SMI_COM_REQ_CLK_NR 2 #define MTK_SMI_COM_GALS_REQ_CLK_NR MTK_SMI_CLK_NR_MAX #define MTK_SMI_SUB_COM_GALS_REQ_CLK_NR 3 -- cgit v1.2.3 From 3e25f800afb82bd9e5f82458c0c71f1623b31ee5 Mon Sep 17 00:00:00 2001 From: Li Yang Date: Tue, 16 Nov 2021 15:18:46 -0600 Subject: memory: fsl_ifc: populate child devices without relying on simple-bus After we update the binding to not use simple-bus compatible for the controller, we need the driver to populate the child devices explicitly. Signed-off-by: Li Yang Link: https://lore.kernel.org/r/20211116211846.16335-3-leoyang.li@nxp.com Signed-off-by: Krzysztof Kozlowski --- drivers/memory/fsl_ifc.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers') diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c index 75a8c38df939..2f6939da21cd 100644 --- a/drivers/memory/fsl_ifc.c +++ b/drivers/memory/fsl_ifc.c @@ -88,6 +88,7 @@ static int fsl_ifc_ctrl_remove(struct platform_device *dev) { struct fsl_ifc_ctrl *ctrl = dev_get_drvdata(&dev->dev); + of_platform_depopulate(&dev->dev); free_irq(ctrl->nand_irq, ctrl); free_irq(ctrl->irq, ctrl); @@ -285,8 +286,16 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev) } } + /* legacy dts may still use "simple-bus" compatible */ + ret = of_platform_populate(dev->dev.of_node, NULL, NULL, + &dev->dev); + if (ret) + goto err_free_nandirq; + return 0; +err_free_nandirq: + free_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_ctrl_dev); err_free_irq: free_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_dev); err_unmap_nandirq: -- cgit v1.2.3 From 0123af535b9c090cf05dcf500f9303bae5849691 Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Sun, 28 Nov 2021 21:41:58 +0100 Subject: memory: tegra: Constify struct thermal_cooling_device_ops The only usage of tegra210_emc_cd_ops is to pass its address to devm_thermal_of_cooling_device_register() which is a pointer to const struct thermal_cooling_device_ops. Make it const to allow the compiler to put it in read-only memory. Signed-off-by: Rikard Falkeborn Link: https://lore.kernel.org/r/20211128204158.19544-1-rikard.falkeborn@gmail.com Signed-off-by: Krzysztof Kozlowski --- drivers/memory/tegra/tegra210-emc-core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/memory/tegra/tegra210-emc-core.c b/drivers/memory/tegra/tegra210-emc-core.c index 13584f9317a4..cbe1a7723514 100644 --- a/drivers/memory/tegra/tegra210-emc-core.c +++ b/drivers/memory/tegra/tegra210-emc-core.c @@ -711,7 +711,7 @@ static int tegra210_emc_cd_set_state(struct thermal_cooling_device *cd, return 0; } -static struct thermal_cooling_device_ops tegra210_emc_cd_ops = { +static const struct thermal_cooling_device_ops tegra210_emc_cd_ops = { .get_max_state = tegra210_emc_cd_max_state, .get_cur_state = tegra210_emc_cd_get_state, .set_cur_state = tegra210_emc_cd_set_state, -- cgit v1.2.3 From e29ed0d1051d9fc619f9268224ab436d34d1f8db Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Sun, 12 Dec 2021 11:33:47 +0800 Subject: memory: brcmstb_dpfe: fix typo in a comment The double `to' in the comment in line 427 is repeated. Remove it from the comment. Signed-off-by: Jason Wang Acked-by: Markus Mayer Acked-by: Florian Fainelli Link: https://lore.kernel.org/r/20211212033347.67921-1-wangborong@cdjrlc.com Signed-off-by: Krzysztof Kozlowski --- drivers/memory/brcmstb_dpfe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c index f43ba69fbb3e..14412002775d 100644 --- a/drivers/memory/brcmstb_dpfe.c +++ b/drivers/memory/brcmstb_dpfe.c @@ -424,7 +424,7 @@ static void __finalize_command(struct brcmstb_dpfe_priv *priv) /* * It depends on the API version which MBOX register we have to write to - * to signal we are done. + * signal we are done. */ release_mbox = (priv->dpfe_api->version < 2) ? REG_TO_HOST_MBOX : REG_TO_DCPU_MBOX; -- cgit v1.2.3 From c137fb8909c1d14f2fca43ceaaf68ba26731741d Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Wed, 8 Dec 2021 17:12:11 -0300 Subject: soc: imx: Remove Layerscape check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since commit 4ebd29f91629 ("soc: imx: Register SoC device only on i.MX boards") the soc-imx driver is only registered on i.MX platforms as intended. This means that we no longer need to do a specific check for Layerscape. Remove the now unneeded "fsl,ls1021a" check. Signed-off-by: Fabio Estevam Reviewed-by: Horia Geantă Signed-off-by: Shawn Guo --- drivers/soc/imx/soc-imx.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/soc/imx/soc-imx.c b/drivers/soc/imx/soc-imx.c index 77bc12039c3d..fab668c83f98 100644 --- a/drivers/soc/imx/soc-imx.c +++ b/drivers/soc/imx/soc-imx.c @@ -40,9 +40,6 @@ static int __init imx_soc_device_init(void) if (!__mxc_cpu_type) return 0; - if (of_machine_is_compatible("fsl,ls1021a")) - return 0; - soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); if (!soc_dev_attr) return -ENOMEM; -- cgit v1.2.3 From 7823e5aa5d1dd9ed5849923c165eb8f29ad23c54 Mon Sep 17 00:00:00 2001 From: Marijn Suijten Date: Wed, 8 Dec 2021 09:34:21 +0100 Subject: firmware: qcom: scm: Remove reassignment to desc following initializer Member assignments to qcom_scm_desc were moved into struct initializers in 57d3b816718c ("firmware: qcom_scm: Remove thin wrappers") including the case in qcom_scm_iommu_secure_ptbl_init, except that the - now duplicate - assignment to desc was left in place. While not harmful, remove this unnecessary extra reassignment. Fixes: 57d3b816718c ("firmware: qcom_scm: Remove thin wrappers") Signed-off-by: Marijn Suijten Reviewed-by: AngeloGioacchino Del Regno Reviewed-by: Alex Elder Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20211208083423.22037-2-marijn.suijten@somainline.org --- drivers/firmware/qcom_scm.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 7db8066b19fd..3f67bf774821 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -749,12 +749,6 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) }; int ret; - desc.args[0] = addr; - desc.args[1] = size; - desc.args[2] = spare; - desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, - QCOM_SCM_VAL); - ret = qcom_scm_call(__scm->dev, &desc, NULL); /* the pg table has been initialized already, ignore the error */ -- cgit v1.2.3 From 943515090ec67f81f6f93febfddb8c9118357e97 Mon Sep 17 00:00:00 2001 From: AngeloGioacchino Del Regno Date: Wed, 8 Dec 2021 09:34:22 +0100 Subject: firmware: qcom: scm: Add function to set the maximum IOMMU pool size This is not necessary for basic functionality of the IOMMU, but it's an optimization that tells to the TZ what's the maximum mappable size for the secure IOMMUs, so that it can optimize the data structures in the TZ itself. Signed-off-by: AngeloGioacchino Del Regno [Marijn: ported from 5.3 to the unified architecture in 5.11] Signed-off-by: Marijn Suijten Reviewed-by: Konrad Dybcio Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20211208083423.22037-3-marijn.suijten@somainline.org --- drivers/firmware/qcom_scm.c | 15 +++++++++++++++ drivers/firmware/qcom_scm.h | 1 + include/linux/qcom_scm.h | 1 + 3 files changed, 17 insertions(+) (limited to 'drivers') diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 3f67bf774821..d5a9ba15e2ba 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -759,6 +759,21 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) } EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init); +int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE, + .arginfo = QCOM_SCM_ARGS(2), + .args[0] = size, + .args[1] = spare, + .owner = ARM_SMCCC_OWNER_SIP, + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL(qcom_scm_iommu_set_cp_pool_size); + int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, u32 cp_nonpixel_start, u32 cp_nonpixel_size) diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h index d92156ceb3ac..bb627941702b 100644 --- a/drivers/firmware/qcom_scm.h +++ b/drivers/firmware/qcom_scm.h @@ -100,6 +100,7 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, #define QCOM_SCM_MP_RESTORE_SEC_CFG 0x02 #define QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE 0x03 #define QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT 0x04 +#define QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE 0x05 #define QCOM_SCM_MP_VIDEO_VAR 0x08 #define QCOM_SCM_MP_ASSIGN 0x16 diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 81cad9e1e412..8a065f8660c1 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -83,6 +83,7 @@ extern bool qcom_scm_restore_sec_cfg_available(void); extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare); extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size); extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare); +extern int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size); extern int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, u32 cp_nonpixel_start, u32 cp_nonpixel_size); -- cgit v1.2.3 From 071a13332de894cb3c38b17c82350f1e4167c023 Mon Sep 17 00:00:00 2001 From: AngeloGioacchino Del Regno Date: Wed, 8 Dec 2021 09:34:23 +0100 Subject: firmware: qcom: scm: Add function to set IOMMU pagetable addressing Add a function to change the IOMMU pagetable addressing to AArch32 LPAE or AArch64. If doing that, then this must be done for each IOMMU context (not necessarily at the same time). Signed-off-by: AngeloGioacchino Del Regno [Marijn: ported from 5.3 to the unified architecture in 5.11] Signed-off-by: Marijn Suijten Reviewed-by: Konrad Dybcio Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20211208083423.22037-4-marijn.suijten@somainline.org --- drivers/firmware/qcom_scm.c | 16 ++++++++++++++++ drivers/firmware/qcom_scm.h | 1 + include/linux/qcom_scm.h | 1 + 3 files changed, 18 insertions(+) (limited to 'drivers') diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index d5a9ba15e2ba..6f7096120023 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -1140,6 +1140,22 @@ int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) } EXPORT_SYMBOL(qcom_scm_hdcp_req); +int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_SMMU_PROGRAM, + .cmd = QCOM_SCM_SMMU_PT_FORMAT, + .arginfo = QCOM_SCM_ARGS(3), + .args[0] = sec_id, + .args[1] = ctx_num, + .args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */ + .owner = ARM_SMCCC_OWNER_SIP, + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL(qcom_scm_iommu_set_pt_format); + int qcom_scm_qsmmu500_wait_safe_toggle(bool en) { struct qcom_scm_desc desc = { diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h index bb627941702b..a348f2c214e5 100644 --- a/drivers/firmware/qcom_scm.h +++ b/drivers/firmware/qcom_scm.h @@ -120,6 +120,7 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, #define QCOM_SCM_LMH_LIMIT_DCVSH 0x10 #define QCOM_SCM_SVC_SMMU_PROGRAM 0x15 +#define QCOM_SCM_SMMU_PT_FORMAT 0x01 #define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03 #define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02 diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 8a065f8660c1..ca4a88d7cbdc 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -108,6 +108,7 @@ extern bool qcom_scm_hdcp_available(void); extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp); +extern int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt); extern int qcom_scm_qsmmu500_wait_safe_toggle(bool en); extern int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, -- cgit v1.2.3 From a9ff0638a4063e6b8a0aa38e9995826565f3d529 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Thu, 23 Dec 2021 08:54:41 +0100 Subject: soc: qcom: llcc: Use devm_bitmap_zalloc() when applicable 'drv_data->bitmap' is a bitmap. So use 'devm_bitmap_zalloc()' to simplify code, improve the semantic. This also fixes a spurious indentation. Signed-off-by: Christophe JAILLET Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/3ee83f75afa8754fade4fff6a03b57f0ae3ccc28.1640245993.git.christophe.jaillet@wanadoo.fr --- drivers/soc/qcom/llcc-qcom.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index ec52f29c8867..00274a93406b 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -632,9 +632,8 @@ static int qcom_llcc_probe(struct platform_device *pdev) for (i = 0; i < num_banks; i++) drv_data->offsets[i] = i * BANK_OFFSET_STRIDE; - drv_data->bitmap = devm_kcalloc(dev, - BITS_TO_LONGS(drv_data->max_slices), sizeof(unsigned long), - GFP_KERNEL); + drv_data->bitmap = devm_bitmap_zalloc(dev, drv_data->max_slices, + GFP_KERNEL); if (!drv_data->bitmap) { ret = -ENOMEM; goto err; -- cgit v1.2.3 From a5d32f6d2e59a654036d5a4f59d9202302b23388 Mon Sep 17 00:00:00 2001 From: Yang Li Date: Thu, 30 Dec 2021 22:12:45 +0800 Subject: firmware: qcom: scm: Fix some kernel-doc comments Fix qcom_scm_call(), qcom_scm_call_atomic, and qcom_scm_cpu_power_down() kernel-doc comment to remove remove warnings found by running scripts/kernel-doc, which is caused by using 'make W=1'. drivers/firmware/qcom_scm.c:191: warning: Function parameter or member 'res' not described in 'qcom_scm_call' drivers/firmware/qcom_scm.c:191: warning: Excess function parameter 'svc_id' description in 'qcom_scm_call' drivers/firmware/qcom_scm.c:191: warning: Excess function parameter 'cmd_id' description in 'qcom_scm_call' drivers/firmware/qcom_scm.c:219: warning: Excess function parameter 'svc_id' description in 'qcom_scm_call_atomic' drivers/firmware/qcom_scm.c:219: warning: Excess function parameter 'cmd_id' description in 'qcom_scm_call_atomic' drivers/firmware/qcom_scm.c:360: warning: Function parameter or member 'flags' not described in 'qcom_scm_cpu_power_down' Reported-by: Abaci Robot Signed-off-by: Yang Li Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20211230141245.29444-1-yang.lee@linux.alibaba.com --- drivers/firmware/qcom_scm.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 6f7096120023..927738882e54 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -179,9 +179,8 @@ found: /** * qcom_scm_call() - Invoke a syscall in the secure world * @dev: device - * @svc_id: service identifier - * @cmd_id: command identifier * @desc: Descriptor structure containing arguments and return values + * @res: Structure containing results from SMC/HVC call * * Sends a command to the SCM and waits for the command to finish processing. * This should *only* be called in pre-emptible context. @@ -205,8 +204,6 @@ static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc, /** * qcom_scm_call_atomic() - atomic variation of qcom_scm_call() * @dev: device - * @svc_id: service identifier - * @cmd_id: command identifier * @desc: Descriptor structure containing arguments and return values * @res: Structure containing results from SMC/HVC call * @@ -350,7 +347,7 @@ EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); /** * qcom_scm_cpu_power_down() - Power down the cpu - * @flags - Flags to flush cache + * @flags: Flags to flush cache * * This is an end point to power down cpu. If there was a pending interrupt, * the control would return from this function, otherwise, the cpu jumps to the -- cgit v1.2.3 From 5a811126d38f9767a20cc271b34db7c8efc5a46c Mon Sep 17 00:00:00 2001 From: Jiasheng Jiang Date: Fri, 31 Dec 2021 17:44:19 +0800 Subject: soc: qcom: rpmpd: Check for null return of devm_kcalloc Because of the possible failure of the allocation, data->domains might be NULL pointer and will cause the dereference of the NULL pointer later. Therefore, it might be better to check it and directly return -ENOMEM without releasing data manually if fails, because the comment of the devm_kmalloc() says "Memory allocated with this function is automatically freed on driver detach.". Fixes: bbe3a66c3f5a ("soc: qcom: rpmpd: Add a Power domain driver to model corners") Signed-off-by: Jiasheng Jiang Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20211231094419.1941054-1-jiasheng@iscas.ac.cn --- drivers/soc/qcom/rpmpd.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c index 0a8d8d24bfb7..624b5630feb8 100644 --- a/drivers/soc/qcom/rpmpd.c +++ b/drivers/soc/qcom/rpmpd.c @@ -610,6 +610,9 @@ static int rpmpd_probe(struct platform_device *pdev) data->domains = devm_kcalloc(&pdev->dev, num, sizeof(*data->domains), GFP_KERNEL); + if (!data->domains) + return -ENOMEM; + data->num_domains = num; for (i = 0; i < num; i++) { -- cgit v1.2.3 From 0ff027027e05a866491bbb53494f0e2a61354c85 Mon Sep 17 00:00:00 2001 From: Miaoqian Lin Date: Fri, 7 Jan 2022 07:31:26 +0000 Subject: soc: qcom: ocmem: Fix missing put_device() call in of_get_ocmem The reference taken by 'of_find_device_by_node()' must be released when not needed anymore. Add the corresponding 'put_device()' in the error handling path. Fixes: 01f937ffc468 ("soc: qcom: ocmem: don't return NULL in of_get_ocmem") Signed-off-by: Miaoqian Lin Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20220107073126.2335-1-linmq006@gmail.com --- drivers/soc/qcom/ocmem.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c index d2dacbbaafbd..97fd24c178f8 100644 --- a/drivers/soc/qcom/ocmem.c +++ b/drivers/soc/qcom/ocmem.c @@ -206,6 +206,7 @@ struct ocmem *of_get_ocmem(struct device *dev) ocmem = platform_get_drvdata(pdev); if (!ocmem) { dev_err(dev, "Cannot get ocmem\n"); + put_device(&pdev->dev); return ERR_PTR(-ENODEV); } return ocmem; -- cgit v1.2.3 From 4b41a9d0fe3db5f91078a380f62f0572c3ecf2dd Mon Sep 17 00:00:00 2001 From: Miaoqian Lin Date: Sat, 8 Jan 2022 09:59:31 +0000 Subject: soc: qcom: aoss: Fix missing put_device call in qmp_get The reference taken by 'of_find_device_by_node()' must be released when not needed anymore. Add the corresponding 'put_device()' in the error handling paths. Fixes: 8c75d585b931 ("soc: qcom: aoss: Expose send for generic usecase") Signed-off-by: Miaoqian Lin Reviewed-by: Stephen Boyd Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20220108095931.21527-1-linmq006@gmail.com --- drivers/soc/qcom/qcom_aoss.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c index cbe5e39fdaeb..563ae0a501dc 100644 --- a/drivers/soc/qcom/qcom_aoss.c +++ b/drivers/soc/qcom/qcom_aoss.c @@ -451,7 +451,11 @@ struct qmp *qmp_get(struct device *dev) qmp = platform_get_drvdata(pdev); - return qmp ? qmp : ERR_PTR(-EPROBE_DEFER); + if (!qmp) { + put_device(&pdev->dev); + return ERR_PTR(-EPROBE_DEFER); + } + return qmp; } EXPORT_SYMBOL(qmp_get); -- cgit v1.2.3 From 4e6ae78ee61957800657d56ba78a10f034de174e Mon Sep 17 00:00:00 2001 From: Xu Wang Date: Fri, 14 Jan 2022 08:50:19 +0000 Subject: soc: qcom: apr: Remove redundant 'flush_workqueue()' calls 'destroy_workqueue()' already drains the queue before destroying it, so there is no need to flush it explicitly. Remove the redundant 'flush_workqueue()' calls. Signed-off-by: Xu Wang Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20220114085019.42904-1-vulab@iscas.ac.cn --- drivers/soc/qcom/apr.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c index 82ca12c9328a..3caabd873322 100644 --- a/drivers/soc/qcom/apr.c +++ b/drivers/soc/qcom/apr.c @@ -653,7 +653,6 @@ static void apr_remove(struct rpmsg_device *rpdev) pdr_handle_release(apr->pdr); device_for_each_child(&rpdev->dev, NULL, apr_remove_device); - flush_workqueue(apr->rxwq); destroy_workqueue(apr->rxwq); } -- cgit v1.2.3 From 8030cb9a55688c1339edd284d9d6ce5f9fc75160 Mon Sep 17 00:00:00 2001 From: Daniel Thompson Date: Thu, 27 Jan 2022 17:35:54 +0000 Subject: soc: qcom: aoss: remove spurious IRQF_ONESHOT flags Quoting the header comments, IRQF_ONESHOT is "Used by threaded interrupts which need to keep the irq line disabled until the threaded handler has been run.". When applied to an interrupt that doesn't request a threaded irq then IRQF_ONESHOT has a lesser known (undocumented?) side effect, which it to disable the forced threading of the irq. For "normal" kernels (without forced threading) then, if there is no thread_fn, then IRQF_ONESHOT is a nop. In this case disabling forced threading is not appropriate for this driver because it calls wake_up_all() and this API cannot be called from no-thread interrupt handlers on PREEMPT_RT systems (deadlock risk, triggers sleeping-while-atomic warnings). Fix this by removing IRQF_ONESHOT. Fixes: 2209481409b7 ("soc: qcom: Add AOSS QMP driver") Signed-off-by: Daniel Thompson [bjorn: Added Fixes tag] Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20220127173554.158111-1-daniel.thompson@linaro.org --- drivers/soc/qcom/qcom_aoss.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c index 563ae0a501dc..a59bb34e5eba 100644 --- a/drivers/soc/qcom/qcom_aoss.c +++ b/drivers/soc/qcom/qcom_aoss.c @@ -501,7 +501,7 @@ static int qmp_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - ret = devm_request_irq(&pdev->dev, irq, qmp_intr, IRQF_ONESHOT, + ret = devm_request_irq(&pdev->dev, irq, qmp_intr, 0, "aoss-qmp", qmp); if (ret < 0) { dev_err(&pdev->dev, "failed to request interrupt\n"); -- cgit v1.2.3 From 0b59bc00a6936e8670b58d4307a2cfba341d40d0 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Sat, 29 Jan 2022 18:34:07 +0100 Subject: clk: samsung: fix missing Tesla FSD dependency on Exynos The Tesla FSD clock controller driver uses shared parts from Exynos ARM64 clock drivers, so add proper dependency to fix COMPILE_TEST build errors like: /usr/bin/aarch64-linux-gnu-ld: drivers/clk/samsung/clk-fsd.o: in function `fsd_cmu_probe': clk-fsd.c:(.init.text+0x9c): undefined reference to `exynos_arm64_register_cmu' Reported-by: kernel test robot Fixes: e3f3dc3810d3 ("clk: samsung: fsd: Add cmu_peric block clock information") Signed-off-by: Krzysztof Kozlowski Reviewed-by: Alim Akhtar Link: https://lore.kernel.org/r/20220129173407.278591-1-krzysztof.kozlowski@canonical.com --- drivers/clk/samsung/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/clk/samsung/Kconfig b/drivers/clk/samsung/Kconfig index 5f64c58f120f..8e8245ab3fd1 100644 --- a/drivers/clk/samsung/Kconfig +++ b/drivers/clk/samsung/Kconfig @@ -129,6 +129,7 @@ config S3C2443_COMMON_CLK config TESLA_FSD_COMMON_CLK bool "Tesla FSD clock controller support" if COMPILE_TEST depends on COMMON_CLK_SAMSUNG + depends on EXYNOS_ARM64_COMMON_CLK help Support for the clock controller present on the Tesla FSD SoC. Choose Y here only if you build for this SoC. -- cgit v1.2.3 From a8eba8dde5fbf0b9f62a38230af6d66c389c37fc Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Thu, 23 Dec 2021 17:14:46 +0100 Subject: soc: ti: k3-ringacc: Use devm_bitmap_zalloc() when applicable 'rings_inuse' and 'proxy_inuse' are bitmaps. So use 'devm_bitmap_zalloc()' to simplify code and improve the semantic. Signed-off-by: Christophe JAILLET Signed-off-by: Nishanth Menon Link: https://lore.kernel.org/r/45544b0d97a7bea7764292852842adf5085a7700.1640276001.git.christophe.jaillet@wanadoo.fr --- drivers/soc/ti/k3-ringacc.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c index 31ab6c657fec..f7bf18b8229a 100644 --- a/drivers/soc/ti/k3-ringacc.c +++ b/drivers/soc/ti/k3-ringacc.c @@ -1402,12 +1402,10 @@ static int k3_ringacc_init(struct platform_device *pdev, sizeof(*ringacc->rings) * ringacc->num_rings, GFP_KERNEL); - ringacc->rings_inuse = devm_kcalloc(dev, - BITS_TO_LONGS(ringacc->num_rings), - sizeof(unsigned long), GFP_KERNEL); - ringacc->proxy_inuse = devm_kcalloc(dev, - BITS_TO_LONGS(ringacc->num_proxies), - sizeof(unsigned long), GFP_KERNEL); + ringacc->rings_inuse = devm_bitmap_zalloc(dev, ringacc->num_rings, + GFP_KERNEL); + ringacc->proxy_inuse = devm_bitmap_zalloc(dev, ringacc->num_proxies, + GFP_KERNEL); if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse) return -ENOMEM; @@ -1483,9 +1481,8 @@ struct k3_ringacc *k3_ringacc_dmarings_init(struct platform_device *pdev, sizeof(*ringacc->rings) * ringacc->num_rings * 2, GFP_KERNEL); - ringacc->rings_inuse = devm_kcalloc(dev, - BITS_TO_LONGS(ringacc->num_rings), - sizeof(unsigned long), GFP_KERNEL); + ringacc->rings_inuse = devm_bitmap_zalloc(dev, ringacc->num_rings, + GFP_KERNEL); if (!ringacc->rings || !ringacc->rings_inuse) return ERR_PTR(-ENOMEM); -- cgit v1.2.3 From 001d7c83704bc98c28cc6444d2e7518d12ed029f Mon Sep 17 00:00:00 2001 From: Lad Prabhakar Date: Wed, 5 Jan 2022 18:03:22 +0000 Subject: soc: ti: smartreflex: Use platform_get_irq_optional() to get the interrupt platform_get_resource(pdev, IORESOURCE_IRQ, ..) relies on static allocation of IRQ resources in DT core code, this causes an issue when using hierarchical interrupt domains using "interrupts" property in the node as this bypasses the hierarchical setup and messes up the irq chaining. In preparation for removal of static setup of IRQ resource from DT core code use platform_get_irq_optional(). While at it return 0 instead of returning ret in the probe success path. Signed-off-by: Lad Prabhakar Signed-off-by: Nishanth Menon Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20220105180323.8563-1-prabhakar.mahadev-lad.rj@bp.renesas.com --- drivers/soc/ti/smartreflex.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/soc/ti/smartreflex.c b/drivers/soc/ti/smartreflex.c index b5b2fa538d5c..ad2bb72e640c 100644 --- a/drivers/soc/ti/smartreflex.c +++ b/drivers/soc/ti/smartreflex.c @@ -819,7 +819,7 @@ static int omap_sr_probe(struct platform_device *pdev) { struct omap_sr *sr_info; struct omap_sr_data *pdata = pdev->dev.platform_data; - struct resource *mem, *irq; + struct resource *mem; struct dentry *nvalue_dir; int i, ret = 0; @@ -844,7 +844,11 @@ static int omap_sr_probe(struct platform_device *pdev) if (IS_ERR(sr_info->base)) return PTR_ERR(sr_info->base); - irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + ret = platform_get_irq_optional(pdev, 0); + if (ret < 0 && ret != -ENXIO) + return dev_err_probe(&pdev->dev, ret, "failed to get IRQ resource\n"); + if (ret > 0) + sr_info->irq = ret; sr_info->fck = devm_clk_get(pdev->dev.parent, "fck"); if (IS_ERR(sr_info->fck)) @@ -870,9 +874,6 @@ static int omap_sr_probe(struct platform_device *pdev) sr_info->autocomp_active = false; sr_info->ip_type = pdata->ip_type; - if (irq) - sr_info->irq = irq->start; - sr_set_clk_length(sr_info); list_add(&sr_info->node, &sr_list); @@ -926,7 +927,7 @@ static int omap_sr_probe(struct platform_device *pdev) } - return ret; + return 0; err_debugfs: debugfs_remove_recursive(sr_info->dbg_dir); -- cgit v1.2.3 From a181bcfca937b34467e6cd63d7de6073176616e1 Mon Sep 17 00:00:00 2001 From: Peiwei Hu Date: Tue, 28 Dec 2021 18:01:03 +0800 Subject: firmware: ti_sci: inproper error handling of ti_sci_probe goto out instead of returning directly in error exiting Signed-off-by: Peiwei Hu Signed-off-by: Nishanth Menon Link: https://lore.kernel.org/r/tencent_0D5124AF8235001703711A7A09703F918806@qq.com --- drivers/firmware/ti_sci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index 5ae2040b8b02..4697edc125b1 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c @@ -3412,7 +3412,7 @@ static int ti_sci_probe(struct platform_device *pdev) ret = register_restart_handler(&info->nb); if (ret) { dev_err(dev, "reboot registration fail(%d)\n", ret); - return ret; + goto out; } } -- cgit v1.2.3 From c3d66a164c726cc3b072232d3b6d87575d194084 Mon Sep 17 00:00:00 2001 From: Miaoqian Lin Date: Fri, 14 Jan 2022 06:28:40 +0000 Subject: soc: ti: wkup_m3_ipc: Fix IRQ check in wkup_m3_ipc_probe platform_get_irq() returns negative error number instead 0 on failure. And the doc of platform_get_irq() provides a usage example: int irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; Fix the check of return value to catch errors correctly. Fixes: cdd5de500b2c ("soc: ti: Add wkup_m3_ipc driver") Signed-off-by: Miaoqian Lin Signed-off-by: Nishanth Menon Acked-by: Dave Gerlach Link: https://lore.kernel.org/r/20220114062840.16620-1-linmq006@gmail.com --- drivers/soc/ti/wkup_m3_ipc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c index 72386bd393fe..2f03ced0f411 100644 --- a/drivers/soc/ti/wkup_m3_ipc.c +++ b/drivers/soc/ti/wkup_m3_ipc.c @@ -450,9 +450,9 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev) return PTR_ERR(m3_ipc->ipc_mem_base); irq = platform_get_irq(pdev, 0); - if (!irq) { + if (irq < 0) { dev_err(&pdev->dev, "no irq resource\n"); - return -ENXIO; + return irq; } ret = devm_request_irq(dev, irq, wkup_m3_txev_handler, -- cgit v1.2.3 From 3a99f121fe0bfa4b65ff74d9e980018caf54c2d4 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Thu, 27 Jan 2022 18:55:01 -0800 Subject: firmware: qcom: scm: Introduce pas_metadata context Starting with Qualcomm SM8450, some new security enhancements has been done in the secure world, which results in the requirement to keep the metadata segment accessible by the secure world from init_image() until auth_and_reset(). Introduce a "PAS metadata context" object that can be passed to init_image() for tracking the mapped memory and a related release function for client drivers to release the mapping once either auth_and_reset() has been invoked or in error handling paths on the way there. Signed-off-by: Bjorn Andersson Reviewed-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20220128025513.97188-2-bjorn.andersson@linaro.org --- drivers/firmware/qcom_scm.c | 39 ++++++++++++++++++++++++++++++++++----- drivers/soc/qcom/mdt_loader.c | 2 +- include/linux/qcom_scm.h | 10 +++++++++- 3 files changed, 44 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 927738882e54..00f8a50b9f6a 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -432,10 +432,16 @@ static void qcom_scm_set_download_mode(bool enable) * and optional blob of data used for authenticating the metadata * and the rest of the firmware * @size: size of the metadata + * @ctx: optional metadata context * - * Returns 0 on success. + * Return: 0 on success. + * + * Upon successful return, the PAS metadata context (@ctx) will be used to + * track the metadata allocation, this needs to be released by invoking + * qcom_scm_pas_metadata_release() by the caller. */ -int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size) +int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size, + struct qcom_scm_pas_metadata *ctx) { dma_addr_t mdata_phys; void *mdata_buf; @@ -464,7 +470,7 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size) ret = qcom_scm_clk_enable(); if (ret) - goto free_metadata; + goto out; desc.args[1] = mdata_phys; @@ -472,13 +478,36 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size) qcom_scm_clk_disable(); -free_metadata: - dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); +out: + if (ret < 0 || !ctx) { + dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); + } else if (ctx) { + ctx->ptr = mdata_buf; + ctx->phys = mdata_phys; + ctx->size = size; + } return ret ? : res.result[0]; } EXPORT_SYMBOL(qcom_scm_pas_init_image); +/** + * qcom_scm_pas_metadata_release() - release metadata context + * @ctx: metadata context + */ +void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx) +{ + if (!ctx->ptr) + return; + + dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys); + + ctx->ptr = NULL; + ctx->phys = 0; + ctx->size = 0; +} +EXPORT_SYMBOL(qcom_scm_pas_metadata_release); + /** * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral * for firmware loading diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c index 72fc2b539213..b00586db5391 100644 --- a/drivers/soc/qcom/mdt_loader.c +++ b/drivers/soc/qcom/mdt_loader.c @@ -171,7 +171,7 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, goto out; } - ret = qcom_scm_pas_init_image(pas_id, metadata, metadata_len); + ret = qcom_scm_pas_init_image(pas_id, metadata, metadata_len, NULL); kfree(metadata); if (ret) { diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index ca4a88d7cbdc..681748619890 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -68,8 +68,16 @@ extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus); extern void qcom_scm_cpu_power_down(u32 flags); extern int qcom_scm_set_remote_state(u32 state, u32 id); +struct qcom_scm_pas_metadata { + void *ptr; + dma_addr_t phys; + ssize_t size; +}; + extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, - size_t size); + size_t size, + struct qcom_scm_pas_metadata *ctx); +void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx); extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size); extern int qcom_scm_pas_auth_and_reset(u32 peripheral); -- cgit v1.2.3 From 26c1f17013a8292fa2bd59917bace883e1fe6afa Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Thu, 27 Jan 2022 18:55:02 -0800 Subject: soc: qcom: mdt_loader: Split out split-file-loader Spotted in a SM8450 device, the hash metadata segment is split out in a separate .bNN file which means that the logic for loading split out segmenents needs to be duplicated in qcom_mdt_read_metadata(). Split out the existing logic to a helper function that can be used in both code paths. Signed-off-by: Bjorn Andersson Reviewed-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20220128025513.97188-3-bjorn.andersson@linaro.org --- drivers/soc/qcom/mdt_loader.c | 72 ++++++++++++++++++++++++------------------- 1 file changed, 41 insertions(+), 31 deletions(-) (limited to 'drivers') diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c index b00586db5391..c9e5bdfac371 100644 --- a/drivers/soc/qcom/mdt_loader.c +++ b/drivers/soc/qcom/mdt_loader.c @@ -31,6 +31,44 @@ static bool mdt_phdr_valid(const struct elf32_phdr *phdr) return true; } +static ssize_t mdt_load_split_segment(void *ptr, const struct elf32_phdr *phdrs, + unsigned int segment, const char *fw_name, + struct device *dev) +{ + const struct elf32_phdr *phdr = &phdrs[segment]; + const struct firmware *seg_fw; + char *seg_name; + ssize_t ret; + + if (strlen(fw_name) < 4) + return -EINVAL; + + seg_name = kstrdup(fw_name, GFP_KERNEL); + if (!seg_name) + return -ENOMEM; + + sprintf(seg_name + strlen(fw_name) - 3, "b%02d", segment); + ret = request_firmware_into_buf(&seg_fw, seg_name, dev, + ptr, phdr->p_filesz); + if (ret) { + dev_err(dev, "error %zd loading %s\n", ret, seg_name); + kfree(seg_name); + return ret; + } + + if (seg_fw->size != phdr->p_filesz) { + dev_err(dev, + "failed to load segment %d from truncated file %s\n", + segment, seg_name); + ret = -EINVAL; + } + + release_firmware(seg_fw); + kfree(seg_name); + + return ret; +} + /** * qcom_mdt_get_size() - acquire size of the memory region needed to load mdt * @fw: firmware object for the mdt file @@ -127,22 +165,19 @@ void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len) EXPORT_SYMBOL_GPL(qcom_mdt_read_metadata); static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, - const char *firmware, int pas_id, void *mem_region, + const char *fw_name, int pas_id, void *mem_region, phys_addr_t mem_phys, size_t mem_size, phys_addr_t *reloc_base, bool pas_init) { const struct elf32_phdr *phdrs; const struct elf32_phdr *phdr; const struct elf32_hdr *ehdr; - const struct firmware *seg_fw; phys_addr_t mem_reloc; phys_addr_t min_addr = PHYS_ADDR_MAX; phys_addr_t max_addr = 0; size_t metadata_len; - size_t fw_name_len; ssize_t offset; void *metadata; - char *fw_name; bool relocate = false; void *ptr; int ret = 0; @@ -154,14 +189,6 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, ehdr = (struct elf32_hdr *)fw->data; phdrs = (struct elf32_phdr *)(ehdr + 1); - fw_name_len = strlen(firmware); - if (fw_name_len <= 4) - return -EINVAL; - - fw_name = kstrdup(firmware, GFP_KERNEL); - if (!fw_name) - return -ENOMEM; - if (pas_init) { metadata = qcom_mdt_read_metadata(fw, &metadata_len); if (IS_ERR(metadata)) { @@ -258,25 +285,9 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz); } else if (phdr->p_filesz) { /* Firmware not large enough, load split-out segments */ - sprintf(fw_name + fw_name_len - 3, "b%02d", i); - ret = request_firmware_into_buf(&seg_fw, fw_name, dev, - ptr, phdr->p_filesz); - if (ret) { - dev_err(dev, "error %d loading %s\n", - ret, fw_name); + ret = mdt_load_split_segment(ptr, phdrs, i, fw_name, dev); + if (ret) break; - } - - if (seg_fw->size != phdr->p_filesz) { - dev_err(dev, - "failed to load segment %d from truncated file %s\n", - i, fw_name); - release_firmware(seg_fw); - ret = -EINVAL; - break; - } - - release_firmware(seg_fw); } if (phdr->p_memsz > phdr->p_filesz) @@ -287,7 +298,6 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, *reloc_base = mem_reloc; out: - kfree(fw_name); return ret; } -- cgit v1.2.3 From 8bd42e2341a7857010001f08ee1729ced3b0e394 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Thu, 27 Jan 2022 18:55:03 -0800 Subject: soc: qcom: mdt_loader: Allow hash segment to be split out It's been observed that some firmware found in a Qualcomm SM8450 device has the hash table in a separate .bNN file. Use the newly extracted helper function to load this segment from the separate file, if it's determined that the hashes are not part of the already loaded firmware. In order to do this, the function needs access to the firmware basename and to provide more useful error messages a struct device to associate the errors with. Signed-off-by: Bjorn Andersson Reviewed-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20220128025513.97188-4-bjorn.andersson@linaro.org --- drivers/remoteproc/qcom_q6v5_mss.c | 7 ++++--- drivers/soc/qcom/mdt_loader.c | 29 +++++++++++++++++++++-------- include/linux/soc/qcom/mdt_loader.h | 6 ++++-- 3 files changed, 29 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index 43ea8455546c..a2c231a17b2b 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -928,7 +928,8 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc, regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); } -static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) +static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw, + const char *fw_name) { unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS; dma_addr_t phys; @@ -939,7 +940,7 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) void *ptr; int ret; - metadata = qcom_mdt_read_metadata(fw, &size); + metadata = qcom_mdt_read_metadata(fw, &size, fw_name, qproc->dev); if (IS_ERR(metadata)) return PTR_ERR(metadata); @@ -1289,7 +1290,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc) /* Initialize the RMB validator */ writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); - ret = q6v5_mpss_init_image(qproc, fw); + ret = q6v5_mpss_init_image(qproc, fw, qproc->hexagon_mdt_image); if (ret) goto release_firmware; diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c index c9e5bdfac371..4372d8e38b29 100644 --- a/drivers/soc/qcom/mdt_loader.c +++ b/drivers/soc/qcom/mdt_loader.c @@ -121,13 +121,15 @@ EXPORT_SYMBOL_GPL(qcom_mdt_get_size); * * Return: pointer to data, or ERR_PTR() */ -void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len) +void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len, + const char *fw_name, struct device *dev) { const struct elf32_phdr *phdrs; const struct elf32_hdr *ehdr; size_t hash_offset; size_t hash_size; size_t ehdr_size; + ssize_t ret; void *data; ehdr = (struct elf32_hdr *)fw->data; @@ -149,14 +151,25 @@ void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len) if (!data) return ERR_PTR(-ENOMEM); - /* Is the header and hash already packed */ - if (ehdr_size + hash_size == fw->size) + /* Copy ELF header */ + memcpy(data, fw->data, ehdr_size); + + if (ehdr_size + hash_size == fw->size) { + /* Firmware is split and hash is packed following the ELF header */ hash_offset = phdrs[0].p_filesz; - else + memcpy(data + ehdr_size, fw->data + hash_offset, hash_size); + } else if (phdrs[1].p_offset + hash_size <= fw->size) { + /* Hash is in its own segment, but within the loaded file */ hash_offset = phdrs[1].p_offset; - - memcpy(data, fw->data, ehdr_size); - memcpy(data + ehdr_size, fw->data + hash_offset, hash_size); + memcpy(data + ehdr_size, fw->data + hash_offset, hash_size); + } else { + /* Hash is in its own segment, beyond the loaded file */ + ret = mdt_load_split_segment(data + ehdr_size, phdrs, 1, fw_name, dev); + if (ret) { + kfree(data); + return ERR_PTR(ret); + } + } *data_len = ehdr_size + hash_size; @@ -190,7 +203,7 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, phdrs = (struct elf32_phdr *)(ehdr + 1); if (pas_init) { - metadata = qcom_mdt_read_metadata(fw, &metadata_len); + metadata = qcom_mdt_read_metadata(fw, &metadata_len, fw_name, dev); if (IS_ERR(metadata)) { ret = PTR_ERR(metadata); dev_err(dev, "error %d reading firmware %s metadata\n", diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h index afd47217996b..46bdb7bace9a 100644 --- a/include/linux/soc/qcom/mdt_loader.h +++ b/include/linux/soc/qcom/mdt_loader.h @@ -23,7 +23,8 @@ int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw, const char *fw_name, int pas_id, void *mem_region, phys_addr_t mem_phys, size_t mem_size, phys_addr_t *reloc_base); -void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len); +void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len, + const char *fw_name, struct device *dev); #else /* !IS_ENABLED(CONFIG_QCOM_MDT_LOADER) */ @@ -51,7 +52,8 @@ static inline int qcom_mdt_load_no_init(struct device *dev, } static inline void *qcom_mdt_read_metadata(const struct firmware *fw, - size_t *data_len) + size_t *data_len, const char *fw_name, + struct device *dev) { return ERR_PTR(-ENODEV); } -- cgit v1.2.3 From 64fb5eb87d5815ff3811b7dc85f87abc5c38b580 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Thu, 27 Jan 2022 18:55:04 -0800 Subject: soc: qcom: mdt_loader: Allow hash to reside in any segment It's been observed that some firmware found on Qualcomm SM8450 devices carries the hash segment as the last segment in the ELF. Extend the support to allow picking the hash from any segment in the MDT/MBN. Signed-off-by: Bjorn Andersson Reviewed-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20220128025513.97188-5-bjorn.andersson@linaro.org --- drivers/soc/qcom/mdt_loader.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c index 4372d8e38b29..c5bd13b05c1a 100644 --- a/drivers/soc/qcom/mdt_loader.c +++ b/drivers/soc/qcom/mdt_loader.c @@ -126,9 +126,11 @@ void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len, { const struct elf32_phdr *phdrs; const struct elf32_hdr *ehdr; + unsigned int hash_segment = 0; size_t hash_offset; size_t hash_size; size_t ehdr_size; + unsigned int i; ssize_t ret; void *data; @@ -141,11 +143,20 @@ void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len, if (phdrs[0].p_type == PT_LOAD) return ERR_PTR(-EINVAL); - if ((phdrs[1].p_flags & QCOM_MDT_TYPE_MASK) != QCOM_MDT_TYPE_HASH) + for (i = 1; i < ehdr->e_phnum; i++) { + if ((phdrs[i].p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) { + hash_segment = i; + break; + } + } + + if (!hash_segment) { + dev_err(dev, "no hash segment found in %s\n", fw_name); return ERR_PTR(-EINVAL); + } ehdr_size = phdrs[0].p_filesz; - hash_size = phdrs[1].p_filesz; + hash_size = phdrs[hash_segment].p_filesz; data = kmalloc(ehdr_size + hash_size, GFP_KERNEL); if (!data) @@ -158,13 +169,13 @@ void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len, /* Firmware is split and hash is packed following the ELF header */ hash_offset = phdrs[0].p_filesz; memcpy(data + ehdr_size, fw->data + hash_offset, hash_size); - } else if (phdrs[1].p_offset + hash_size <= fw->size) { + } else if (phdrs[hash_segment].p_offset + hash_size <= fw->size) { /* Hash is in its own segment, but within the loaded file */ - hash_offset = phdrs[1].p_offset; + hash_offset = phdrs[hash_segment].p_offset; memcpy(data + ehdr_size, fw->data + hash_offset, hash_size); } else { /* Hash is in its own segment, beyond the loaded file */ - ret = mdt_load_split_segment(data + ehdr_size, phdrs, 1, fw_name, dev); + ret = mdt_load_split_segment(data + ehdr_size, phdrs, hash_segment, fw_name, dev); if (ret) { kfree(data); return ERR_PTR(ret); -- cgit v1.2.3 From ea90330fa329e4bee009223a1d5a7d9bcc364df2 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Thu, 27 Jan 2022 18:55:05 -0800 Subject: soc: qcom: mdt_loader: Extend check for split firmware Some of the Qualcomm SM8450 firmware files are padded such that the start of the first segment falls within the .mdt file but the segment to be loaded is stored as a separate .bNN file. Extend the condition to only attempt to read a segment inline if the entire segment would be available. Signed-off-by: Bjorn Andersson Reviewed-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20220128025513.97188-6-bjorn.andersson@linaro.org --- drivers/soc/qcom/mdt_loader.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c index c5bd13b05c1a..37e2e734bc5d 100644 --- a/drivers/soc/qcom/mdt_loader.c +++ b/drivers/soc/qcom/mdt_loader.c @@ -297,7 +297,8 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, ptr = mem_region + offset; - if (phdr->p_filesz && phdr->p_offset < fw->size) { + if (phdr->p_filesz && phdr->p_offset < fw->size && + phdr->p_offset + phdr->p_filesz < fw->size) { /* Firmware is large enough to be non-split */ if (phdr->p_offset + phdr->p_filesz > fw->size) { dev_err(dev, "file %s segment %d would be truncated\n", -- cgit v1.2.3 From 75d7213ce19135b8f309099f6618a03e9b397271 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Thu, 27 Jan 2022 18:55:06 -0800 Subject: soc: qcom: mdt_loader: Reorder parts of __qcom_mdt_load() Move the traversal of the program headers to the start of the function, to make sure that min_ and max_addr are in scope as the call to qcom_scm_pas_mem_setup() is moved in the next commit. Signed-off-by: Bjorn Andersson Reviewed-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20220128025513.97188-7-bjorn.andersson@linaro.org --- drivers/soc/qcom/mdt_loader.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c index 37e2e734bc5d..ee991784a738 100644 --- a/drivers/soc/qcom/mdt_loader.c +++ b/drivers/soc/qcom/mdt_loader.c @@ -213,6 +213,22 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, ehdr = (struct elf32_hdr *)fw->data; phdrs = (struct elf32_phdr *)(ehdr + 1); + for (i = 0; i < ehdr->e_phnum; i++) { + phdr = &phdrs[i]; + + if (!mdt_phdr_valid(phdr)) + continue; + + if (phdr->p_flags & QCOM_MDT_RELOCATABLE) + relocate = true; + + if (phdr->p_paddr < min_addr) + min_addr = phdr->p_paddr; + + if (phdr->p_paddr + phdr->p_memsz > max_addr) + max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); + } + if (pas_init) { metadata = qcom_mdt_read_metadata(fw, &metadata_len, fw_name, dev); if (IS_ERR(metadata)) { @@ -233,22 +249,6 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, } } - for (i = 0; i < ehdr->e_phnum; i++) { - phdr = &phdrs[i]; - - if (!mdt_phdr_valid(phdr)) - continue; - - if (phdr->p_flags & QCOM_MDT_RELOCATABLE) - relocate = true; - - if (phdr->p_paddr < min_addr) - min_addr = phdr->p_paddr; - - if (phdr->p_paddr + phdr->p_memsz > max_addr) - max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); - } - if (relocate) { if (pas_init) { ret = qcom_scm_pas_mem_setup(pas_id, mem_phys, -- cgit v1.2.3 From ebeb20a9cd3f045a3371ccf3782b6cbcce62a7c9 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Thu, 27 Jan 2022 18:55:07 -0800 Subject: soc: qcom: mdt_loader: Always invoke PAS mem_setup After spelunking various old kernel trees no finds has been found indicating that the PAS mem_setup call should actually be made conditional on the image being relocatable. Group the two PAS operations together, to facilitate splitting them out in a following patch. Signed-off-by: Bjorn Andersson Reviewed-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20220128025513.97188-8-bjorn.andersson@linaro.org --- drivers/soc/qcom/mdt_loader.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c index ee991784a738..c8d43dc50cff 100644 --- a/drivers/soc/qcom/mdt_loader.c +++ b/drivers/soc/qcom/mdt_loader.c @@ -247,20 +247,17 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, ret, fw_name); goto out; } - } - if (relocate) { - if (pas_init) { - ret = qcom_scm_pas_mem_setup(pas_id, mem_phys, - max_addr - min_addr); - if (ret) { - /* Unable to set up relocation */ - dev_err(dev, "error %d setting up firmware %s\n", - ret, fw_name); - goto out; - } + ret = qcom_scm_pas_mem_setup(pas_id, mem_phys, max_addr - min_addr); + if (ret) { + /* Unable to set up relocation */ + dev_err(dev, "error %d setting up firmware %s\n", + ret, fw_name); + goto out; } + } + if (relocate) { /* * The image is relocatable, so offset each segment based on * the lowest segment address. -- cgit v1.2.3 From f4e526ff7e38e27bb87d53131d227a6fd6f73ab5 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Thu, 27 Jan 2022 18:55:08 -0800 Subject: soc: qcom: mdt_loader: Extract PAS operations Rather than passing a boolean to indicate if the PAS operations should be performed from within __mdt_load(), extract them to their own helper function. This will allow clients to invoke this directly, with some qcom_scm_pas_metadata context that they later needs to release, without further having to complicate the prototype of qcom_mdt_load(). Signed-off-by: Bjorn Andersson Reviewed-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20220128025513.97188-9-bjorn.andersson@linaro.org --- drivers/soc/qcom/mdt_loader.c | 110 ++++++++++++++++++++++++------------ include/linux/soc/qcom/mdt_loader.h | 11 ++++ 2 files changed, 85 insertions(+), 36 deletions(-) (limited to 'drivers') diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c index c8d43dc50cff..f0b1d969567c 100644 --- a/drivers/soc/qcom/mdt_loader.c +++ b/drivers/soc/qcom/mdt_loader.c @@ -188,6 +188,74 @@ void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len, } EXPORT_SYMBOL_GPL(qcom_mdt_read_metadata); +/** + * qcom_mdt_pas_init() - initialize PAS region for firmware loading + * @dev: device handle to associate resources with + * @fw: firmware object for the mdt file + * @firmware: name of the firmware, for construction of segment file names + * @pas_id: PAS identifier + * @mem_phys: physical address of allocated memory region + * @ctx: PAS metadata context, to be released by caller + * + * Returns 0 on success, negative errno otherwise. + */ +int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw, + const char *fw_name, int pas_id, phys_addr_t mem_phys, + struct qcom_scm_pas_metadata *ctx) +{ + const struct elf32_phdr *phdrs; + const struct elf32_phdr *phdr; + const struct elf32_hdr *ehdr; + phys_addr_t min_addr = PHYS_ADDR_MAX; + phys_addr_t max_addr = 0; + size_t metadata_len; + void *metadata; + int ret; + int i; + + ehdr = (struct elf32_hdr *)fw->data; + phdrs = (struct elf32_phdr *)(ehdr + 1); + + for (i = 0; i < ehdr->e_phnum; i++) { + phdr = &phdrs[i]; + + if (!mdt_phdr_valid(phdr)) + continue; + + if (phdr->p_paddr < min_addr) + min_addr = phdr->p_paddr; + + if (phdr->p_paddr + phdr->p_memsz > max_addr) + max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); + } + + metadata = qcom_mdt_read_metadata(fw, &metadata_len, fw_name, dev); + if (IS_ERR(metadata)) { + ret = PTR_ERR(metadata); + dev_err(dev, "error %d reading firmware %s metadata\n", ret, fw_name); + goto out; + } + + ret = qcom_scm_pas_init_image(pas_id, metadata, metadata_len, ctx); + kfree(metadata); + if (ret) { + /* Invalid firmware metadata */ + dev_err(dev, "error %d initializing firmware %s\n", ret, fw_name); + goto out; + } + + ret = qcom_scm_pas_mem_setup(pas_id, mem_phys, max_addr - min_addr); + if (ret) { + /* Unable to set up relocation */ + dev_err(dev, "error %d setting up firmware %s\n", ret, fw_name); + goto out; + } + +out: + return ret; +} +EXPORT_SYMBOL_GPL(qcom_mdt_pas_init); + static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, const char *fw_name, int pas_id, void *mem_region, phys_addr_t mem_phys, size_t mem_size, @@ -198,10 +266,7 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, const struct elf32_hdr *ehdr; phys_addr_t mem_reloc; phys_addr_t min_addr = PHYS_ADDR_MAX; - phys_addr_t max_addr = 0; - size_t metadata_len; ssize_t offset; - void *metadata; bool relocate = false; void *ptr; int ret = 0; @@ -224,37 +289,6 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, if (phdr->p_paddr < min_addr) min_addr = phdr->p_paddr; - - if (phdr->p_paddr + phdr->p_memsz > max_addr) - max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); - } - - if (pas_init) { - metadata = qcom_mdt_read_metadata(fw, &metadata_len, fw_name, dev); - if (IS_ERR(metadata)) { - ret = PTR_ERR(metadata); - dev_err(dev, "error %d reading firmware %s metadata\n", - ret, fw_name); - goto out; - } - - ret = qcom_scm_pas_init_image(pas_id, metadata, metadata_len, NULL); - - kfree(metadata); - if (ret) { - /* Invalid firmware metadata */ - dev_err(dev, "error %d initializing firmware %s\n", - ret, fw_name); - goto out; - } - - ret = qcom_scm_pas_mem_setup(pas_id, mem_phys, max_addr - min_addr); - if (ret) { - /* Unable to set up relocation */ - dev_err(dev, "error %d setting up firmware %s\n", - ret, fw_name); - goto out; - } } if (relocate) { @@ -319,8 +353,6 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, if (reloc_base) *reloc_base = mem_reloc; -out: - return ret; } @@ -342,6 +374,12 @@ int qcom_mdt_load(struct device *dev, const struct firmware *fw, phys_addr_t mem_phys, size_t mem_size, phys_addr_t *reloc_base) { + int ret; + + ret = qcom_mdt_pas_init(dev, fw, firmware, pas_id, mem_phys, NULL); + if (ret) + return ret; + return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys, mem_size, reloc_base, true); } diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h index 46bdb7bace9a..9e8e60421192 100644 --- a/include/linux/soc/qcom/mdt_loader.h +++ b/include/linux/soc/qcom/mdt_loader.h @@ -10,10 +10,14 @@ struct device; struct firmware; +struct qcom_scm_pas_metadata; #if IS_ENABLED(CONFIG_QCOM_MDT_LOADER) ssize_t qcom_mdt_get_size(const struct firmware *fw); +int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw, + const char *fw_name, int pas_id, phys_addr_t mem_phys, + struct qcom_scm_pas_metadata *pas_metadata_ctx); int qcom_mdt_load(struct device *dev, const struct firmware *fw, const char *fw_name, int pas_id, void *mem_region, phys_addr_t mem_phys, size_t mem_size, @@ -33,6 +37,13 @@ static inline ssize_t qcom_mdt_get_size(const struct firmware *fw) return -ENODEV; } +static inline int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw, + const char *fw_name, int pas_id, phys_addr_t mem_phys, + struct qcom_scm_pas_metadata *pas_metadata_ctx) +{ + return -ENODEV; +} + static inline int qcom_mdt_load(struct device *dev, const struct firmware *fw, const char *fw_name, int pas_id, void *mem_region, phys_addr_t mem_phys, -- cgit v1.2.3 From 94749156e6bc0b3c2bb65e2a95babdef44b7d591 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Thu, 27 Jan 2022 18:55:09 -0800 Subject: remoteproc: qcom: pas: Carry PAS metadata context Starting with Qualcomm SM8450 the metadata object shared with the secure world during authentication and booting of a remoteproc needs to be alive from init_image() until auth_and_reset(). Use the newly introduced "PAS metadata context" object to track this context from load until the firmware has been booted. In the even that load is performed but the process for some reason doesn't reach auth_and_reset the unprepare callback is used to clean up the allocated memory. Signed-off-by: Bjorn Andersson Reviewed-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20220128025513.97188-10-bjorn.andersson@linaro.org --- drivers/remoteproc/qcom_q6v5_pas.c | 32 +++++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index 184bb7cdf95a..5e806f657fec 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -79,6 +79,8 @@ struct qcom_adsp { struct qcom_rproc_subdev smd_subdev; struct qcom_rproc_ssr ssr_subdev; struct qcom_sysmon *sysmon; + + struct qcom_scm_pas_metadata pas_metadata; }; static void adsp_minidump(struct rproc *rproc) @@ -126,14 +128,34 @@ static void adsp_pds_disable(struct qcom_adsp *adsp, struct device **pds, } } +static int adsp_unprepare(struct rproc *rproc) +{ + struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; + + /* + * adsp_load() did pass pas_metadata to the SCM driver for storing + * metadata context. It might have been released already if + * auth_and_reset() was successful, but in other cases clean it up + * here. + */ + qcom_scm_pas_metadata_release(&adsp->pas_metadata); + + return 0; +} + static int adsp_load(struct rproc *rproc, const struct firmware *fw) { struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; int ret; - ret = qcom_mdt_load(adsp->dev, fw, rproc->firmware, adsp->pas_id, - adsp->mem_region, adsp->mem_phys, adsp->mem_size, - &adsp->mem_reloc); + ret = qcom_mdt_pas_init(adsp->dev, fw, rproc->firmware, adsp->pas_id, + adsp->mem_phys, &adsp->pas_metadata); + if (ret) + return ret; + + ret = qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware, adsp->pas_id, + adsp->mem_region, adsp->mem_phys, adsp->mem_size, + &adsp->mem_reloc); if (ret) return ret; @@ -185,6 +207,8 @@ static int adsp_start(struct rproc *rproc) goto disable_px_supply; } + qcom_scm_pas_metadata_release(&adsp->pas_metadata); + return 0; disable_px_supply: @@ -255,6 +279,7 @@ static unsigned long adsp_panic(struct rproc *rproc) } static const struct rproc_ops adsp_ops = { + .unprepare = adsp_unprepare, .start = adsp_start, .stop = adsp_stop, .da_to_va = adsp_da_to_va, @@ -264,6 +289,7 @@ static const struct rproc_ops adsp_ops = { }; static const struct rproc_ops adsp_minidump_ops = { + .unprepare = adsp_unprepare, .start = adsp_start, .stop = adsp_stop, .da_to_va = adsp_da_to_va, -- cgit v1.2.3 From 5cef9b48458dee48c62f61deca4d3df87b66b52b Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Thu, 27 Jan 2022 18:55:11 -0800 Subject: remoteproc: qcom: pas: Add SM8450 remoteproc support Add audio, compute, sensor and modem remoteproc compatibles to the PAS remoteproc driver. The resources needed for each one matches those of SM8350, so its descs are reused. Signed-off-by: Bjorn Andersson Reviewed-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20220128025513.97188-12-bjorn.andersson@linaro.org --- drivers/remoteproc/qcom_q6v5_pas.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index 5e806f657fec..1ae47cc153e5 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -879,6 +879,10 @@ static const struct of_device_id adsp_of_match[] = { { .compatible = "qcom,sm8350-cdsp-pas", .data = &sm8350_cdsp_resource}, { .compatible = "qcom,sm8350-slpi-pas", .data = &sm8350_slpi_resource}, { .compatible = "qcom,sm8350-mpss-pas", .data = &mpss_resource_init}, + { .compatible = "qcom,sm8450-adsp-pas", .data = &sm8350_adsp_resource}, + { .compatible = "qcom,sm8450-cdsp-pas", .data = &sm8350_cdsp_resource}, + { .compatible = "qcom,sm8450-slpi-pas", .data = &sm8350_slpi_resource}, + { .compatible = "qcom,sm8450-mpss-pas", .data = &mpss_resource_init}, { }, }; MODULE_DEVICE_TABLE(of, adsp_of_match); -- cgit v1.2.3 From 0ee30ace67e425ab83a1673bf51f50b577328cf9 Mon Sep 17 00:00:00 2001 From: Stephan Gerhold Date: Wed, 1 Dec 2021 14:05:02 +0100 Subject: cpuidle: qcom-spm: Check if any CPU is managed by SPM At the moment, the "qcom-spm-cpuidle" platform device is always created, even if none of the CPUs is actually managed by the SPM. On non-qcom platforms this will result in infinite probe-deferral due to the failing qcom_scm_is_available() call. To avoid this, look through the CPU DT nodes and check if there is actually any CPU managed by a SPM (as indicated by the qcom,saw property). It should also be available because e.g. MSM8916 has qcom,saw defined but it's typically not enabled with ARM64/PSCI firmwares. This is needed in preparation of a follow-up change that calls qcom_scm_set_warm_boot_addr() a single time before registering any cpuidle drivers. Otherwise this call might be made even on devices that have this driver enabled but actually make use of PSCI. Fixes: 60f3692b5f0b ("cpuidle: qcom_spm: Detach state machine from main SPM handling") Reported-by: Marek Szyprowski Link: https://lore.kernel.org/r/86e3e09f-a8d7-3dff-3fc6-ddd7d30c5d78@samsung.com/ Signed-off-by: Stephan Gerhold Tested-by: Marek Szyprowski Acked-by: Daniel Lezcano Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20211201130505.257379-2-stephan@gerhold.net --- drivers/cpuidle/cpuidle-qcom-spm.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'drivers') diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c index 01e77913a414..5f27dcc6c110 100644 --- a/drivers/cpuidle/cpuidle-qcom-spm.c +++ b/drivers/cpuidle/cpuidle-qcom-spm.c @@ -155,6 +155,22 @@ static struct platform_driver spm_cpuidle_driver = { }, }; +static bool __init qcom_spm_find_any_cpu(void) +{ + struct device_node *cpu_node, *saw_node; + + for_each_of_cpu_node(cpu_node) { + saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0); + if (of_device_is_available(saw_node)) { + of_node_put(saw_node); + of_node_put(cpu_node); + return true; + } + of_node_put(saw_node); + } + return false; +} + static int __init qcom_spm_cpuidle_init(void) { struct platform_device *pdev; @@ -164,6 +180,10 @@ static int __init qcom_spm_cpuidle_init(void) if (ret) return ret; + /* Make sure there is actually any CPU managed by the SPM */ + if (!qcom_spm_find_any_cpu()) + return 0; + pdev = platform_device_register_simple("qcom-spm-cpuidle", -1, NULL, 0); if (IS_ERR(pdev)) { -- cgit v1.2.3 From 7734c4b507cefbcf2f7a2a806e79c43e52528c5f Mon Sep 17 00:00:00 2001 From: Stephan Gerhold Date: Wed, 1 Dec 2021 14:05:03 +0100 Subject: firmware: qcom: scm: Simplify set_cold/warm_boot_addr() The qcom_scm_set_cold/warm_boot_addr() implementations have a lot of functionality that is actually not used. For example, set_warm_boot_addr() caches the last used entry address and skips making the SCM call when the entry address is unchanged. But there is actually just a single call of qcom_scm_set_warm_boot_addr() in the whole kernel tree, which always configures the entry address to cpu_resume_arm(). Simplify this by having a single qcom_scm_set_boot_addr() function for both cold and warm boot address. This is totally sufficient for the functionality supported in the mainline tree. Signed-off-by: Stephan Gerhold Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20211201130505.257379-3-stephan@gerhold.net --- drivers/firmware/qcom_scm.c | 105 +++++++++++--------------------------------- drivers/firmware/qcom_scm.h | 1 + 2 files changed, 27 insertions(+), 79 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 00f8a50b9f6a..1bcc139c9165 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -49,26 +49,12 @@ struct qcom_scm_mem_map_info { __le64 mem_size; }; -#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00 -#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01 -#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08 -#define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20 - -#define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04 -#define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02 -#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10 -#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40 - -struct qcom_scm_wb_entry { - int flag; - void *entry; +/* Each bit configures cold/warm boot address for one of the 4 CPUs */ +static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = { + 0, BIT(0), BIT(3), BIT(5) }; - -static struct qcom_scm_wb_entry qcom_scm_wb[] = { - { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 }, - { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 }, - { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 }, - { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 }, +static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = { + BIT(2), BIT(1), BIT(4), BIT(6) }; static const char * const qcom_scm_convention_names[] = { @@ -257,49 +243,41 @@ static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id, return ret ? false : !!res.result[0]; } -/** - * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus - * @entry: Entry point function for the cpus - * @cpus: The cpumask of cpus that will use the entry point - * - * Set the Linux entry point for the SCM to transfer control to when coming - * out of a power down. CPU power down may be executed on cpuidle or hotplug. - */ -int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) +static int qcom_scm_set_boot_addr(void *entry, const cpumask_t *cpus, + const u8 *cpu_bits) { - int ret; - int flags = 0; int cpu; + unsigned int flags = 0; struct qcom_scm_desc desc = { .svc = QCOM_SCM_SVC_BOOT, .cmd = QCOM_SCM_BOOT_SET_ADDR, .arginfo = QCOM_SCM_ARGS(2), + .owner = ARM_SMCCC_OWNER_SIP, }; - /* - * Reassign only if we are switching from hotplug entry point - * to cpuidle entry point or vice versa. - */ for_each_cpu(cpu, cpus) { - if (entry == qcom_scm_wb[cpu].entry) - continue; - flags |= qcom_scm_wb[cpu].flag; + if (cpu >= QCOM_SCM_BOOT_MAX_CPUS) + return -EINVAL; + flags |= cpu_bits[cpu]; } - /* No change in entry function */ - if (!flags) - return 0; - desc.args[0] = flags; desc.args[1] = virt_to_phys(entry); - ret = qcom_scm_call(__scm->dev, &desc, NULL); - if (!ret) { - for_each_cpu(cpu, cpus) - qcom_scm_wb[cpu].entry = entry; - } + return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); +} - return ret; +/** + * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus + * @entry: Entry point function for the cpus + * @cpus: The cpumask of cpus that will use the entry point + * + * Set the Linux entry point for the SCM to transfer control to when coming + * out of a power down. CPU power down may be executed on cpuidle or hotplug. + */ +int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) +{ + return qcom_scm_set_boot_addr(entry, cpus, qcom_scm_cpu_warm_bits); } EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); @@ -307,41 +285,10 @@ EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus * @entry: Entry point function for the cpus * @cpus: The cpumask of cpus that will use the entry point - * - * Set the cold boot address of the cpus. Any cpu outside the supported - * range would be removed from the cpu present mask. */ int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) { - int flags = 0; - int cpu; - int scm_cb_flags[] = { - QCOM_SCM_FLAG_COLDBOOT_CPU0, - QCOM_SCM_FLAG_COLDBOOT_CPU1, - QCOM_SCM_FLAG_COLDBOOT_CPU2, - QCOM_SCM_FLAG_COLDBOOT_CPU3, - }; - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_BOOT, - .cmd = QCOM_SCM_BOOT_SET_ADDR, - .arginfo = QCOM_SCM_ARGS(2), - .owner = ARM_SMCCC_OWNER_SIP, - }; - - if (!cpus || cpumask_empty(cpus)) - return -EINVAL; - - for_each_cpu(cpu, cpus) { - if (cpu < ARRAY_SIZE(scm_cb_flags)) - flags |= scm_cb_flags[cpu]; - else - set_cpu_present(cpu, false); - } - - desc.args[0] = flags; - desc.args[1] = virt_to_phys(entry); - - return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); + return qcom_scm_set_boot_addr(entry, cpus, qcom_scm_cpu_cold_bits); } EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h index a348f2c214e5..122b7baa3e2d 100644 --- a/drivers/firmware/qcom_scm.h +++ b/drivers/firmware/qcom_scm.h @@ -80,6 +80,7 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, #define QCOM_SCM_BOOT_SET_DLOAD_MODE 0x10 #define QCOM_SCM_BOOT_SET_REMOTE_STATE 0x0a #define QCOM_SCM_FLUSH_FLAG_MASK 0x3 +#define QCOM_SCM_BOOT_MAX_CPUS 4 #define QCOM_SCM_SVC_PIL 0x02 #define QCOM_SCM_PIL_PAS_INIT_IMAGE 0x01 -- cgit v1.2.3 From 52beb1fc237d67cdc64277dc90047767a6fc52d7 Mon Sep 17 00:00:00 2001 From: Stephan Gerhold Date: Wed, 1 Dec 2021 14:05:04 +0100 Subject: firmware: qcom: scm: Drop cpumask parameter from set_boot_addr() qcom_scm_set_cold/warm_boot_addr() currently take a cpumask parameter, but it's not very useful because at the end we always set the same entry address for all CPUs. This also allows speeding up probe of cpuidle-qcom-spm a bit because only one SCM call needs to be made to the TrustZone firmware, instead of one per CPU. The main reason for this change is that it allows implementing the "multi-cluster" variant of the set_boot_addr() call more easily without having to rely on functions that break in certain build configurations or that are not exported to modules. Signed-off-by: Stephan Gerhold Acked-by: Daniel Lezcano Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20211201130505.257379-4-stephan@gerhold.net --- arch/arm/mach-qcom/platsmp.c | 3 +-- drivers/cpuidle/cpuidle-qcom-spm.c | 8 ++++---- drivers/firmware/qcom_scm.c | 19 ++++++++----------- include/linux/qcom_scm.h | 4 ++-- 4 files changed, 15 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c index 58a4228455ce..65a0d5ce2bb3 100644 --- a/arch/arm/mach-qcom/platsmp.c +++ b/arch/arm/mach-qcom/platsmp.c @@ -357,8 +357,7 @@ static void __init qcom_smp_prepare_cpus(unsigned int max_cpus) { int cpu; - if (qcom_scm_set_cold_boot_addr(secondary_startup_arm, - cpu_present_mask)) { + if (qcom_scm_set_cold_boot_addr(secondary_startup_arm)) { for_each_present_cpu(cpu) { if (cpu == smp_processor_id()) continue; diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c index 5f27dcc6c110..beedf22cbe78 100644 --- a/drivers/cpuidle/cpuidle-qcom-spm.c +++ b/drivers/cpuidle/cpuidle-qcom-spm.c @@ -122,10 +122,6 @@ static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu) if (ret <= 0) return ret ? : -ENODEV; - ret = qcom_scm_set_warm_boot_addr(cpu_resume_arm, cpumask_of(cpu)); - if (ret) - return ret; - return cpuidle_register(&data->cpuidle_driver, NULL); } @@ -136,6 +132,10 @@ static int spm_cpuidle_drv_probe(struct platform_device *pdev) if (!qcom_scm_is_available()) return -EPROBE_DEFER; + ret = qcom_scm_set_warm_boot_addr(cpu_resume_arm); + if (ret) + return dev_err_probe(&pdev->dev, ret, "set warm boot addr failed"); + for_each_possible_cpu(cpu) { ret = spm_cpuidle_register(&pdev->dev, cpu); if (ret && ret != -ENODEV) { diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 1bcc139c9165..0382f9fa4501 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -243,8 +243,7 @@ static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id, return ret ? false : !!res.result[0]; } -static int qcom_scm_set_boot_addr(void *entry, const cpumask_t *cpus, - const u8 *cpu_bits) +static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits) { int cpu; unsigned int flags = 0; @@ -255,7 +254,7 @@ static int qcom_scm_set_boot_addr(void *entry, const cpumask_t *cpus, .owner = ARM_SMCCC_OWNER_SIP, }; - for_each_cpu(cpu, cpus) { + for_each_present_cpu(cpu) { if (cpu >= QCOM_SCM_BOOT_MAX_CPUS) return -EINVAL; flags |= cpu_bits[cpu]; @@ -268,27 +267,25 @@ static int qcom_scm_set_boot_addr(void *entry, const cpumask_t *cpus, } /** - * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus + * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus * @entry: Entry point function for the cpus - * @cpus: The cpumask of cpus that will use the entry point * * Set the Linux entry point for the SCM to transfer control to when coming * out of a power down. CPU power down may be executed on cpuidle or hotplug. */ -int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) +int qcom_scm_set_warm_boot_addr(void *entry) { - return qcom_scm_set_boot_addr(entry, cpus, qcom_scm_cpu_warm_bits); + return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits); } EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); /** - * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus + * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus * @entry: Entry point function for the cpus - * @cpus: The cpumask of cpus that will use the entry point */ -int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) +int qcom_scm_set_cold_boot_addr(void *entry) { - return qcom_scm_set_boot_addr(entry, cpus, qcom_scm_cpu_cold_bits); + return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits); } EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 681748619890..f8335644a01a 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -63,8 +63,8 @@ enum qcom_scm_ice_cipher { extern bool qcom_scm_is_available(void); -extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); -extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus); +extern int qcom_scm_set_cold_boot_addr(void *entry); +extern int qcom_scm_set_warm_boot_addr(void *entry); extern void qcom_scm_cpu_power_down(u32 flags); extern int qcom_scm_set_remote_state(u32 state, u32 id); -- cgit v1.2.3 From f60a317bcbea5c5b8923d6de6c7288850fdd83fb Mon Sep 17 00:00:00 2001 From: Stephan Gerhold Date: Wed, 1 Dec 2021 14:05:05 +0100 Subject: firmware: qcom: scm: Add support for MC boot address API It looks like the old QCOM_SCM_BOOT_SET_ADDR API is broken on some MSM8916 firmware versions that implement the newer SMC32 calling convention. It just returns -EINVAL no matter which arguments are being passed. This does not cause any problems downstream because it first tries to use the new multi-cluster API replacement which is working fine. Implement support for the multi-cluster variant of the SCM call by attempting it first but still fallback to the old call in case of an error. Also, to be absolutely sure only use the multi-cluster variant with the SMC calling convention since older platforms should not need this. Signed-off-by: Stephan Gerhold Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20211201130505.257379-5-stephan@gerhold.net --- drivers/firmware/qcom_scm.c | 32 ++++++++++++++++++++++++++++++-- drivers/firmware/qcom_scm.h | 4 ++++ 2 files changed, 34 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 0382f9fa4501..491bbf70c94a 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -266,6 +266,28 @@ static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits) return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); } +static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_BOOT, + .cmd = QCOM_SCM_BOOT_SET_ADDR_MC, + .owner = ARM_SMCCC_OWNER_SIP, + .arginfo = QCOM_SCM_ARGS(6), + .args = { + virt_to_phys(entry), + /* Apply to all CPUs in all affinity levels */ + ~0ULL, ~0ULL, ~0ULL, ~0ULL, + flags, + }, + }; + + /* Need a device for DMA of the additional arguments */ + if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY) + return -EOPNOTSUPP; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} + /** * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus * @entry: Entry point function for the cpus @@ -275,7 +297,10 @@ static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits) */ int qcom_scm_set_warm_boot_addr(void *entry) { - return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits); + if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT)) + /* Fallback to old SCM call */ + return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits); + return 0; } EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); @@ -285,7 +310,10 @@ EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); */ int qcom_scm_set_cold_boot_addr(void *entry) { - return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits); + if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT)) + /* Fallback to old SCM call */ + return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits); + return 0; } EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h index 122b7baa3e2d..0d51eef2472f 100644 --- a/drivers/firmware/qcom_scm.h +++ b/drivers/firmware/qcom_scm.h @@ -78,9 +78,13 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, #define QCOM_SCM_BOOT_SET_ADDR 0x01 #define QCOM_SCM_BOOT_TERMINATE_PC 0x02 #define QCOM_SCM_BOOT_SET_DLOAD_MODE 0x10 +#define QCOM_SCM_BOOT_SET_ADDR_MC 0x11 #define QCOM_SCM_BOOT_SET_REMOTE_STATE 0x0a #define QCOM_SCM_FLUSH_FLAG_MASK 0x3 #define QCOM_SCM_BOOT_MAX_CPUS 4 +#define QCOM_SCM_BOOT_MC_FLAG_AARCH64 BIT(0) +#define QCOM_SCM_BOOT_MC_FLAG_COLDBOOT BIT(1) +#define QCOM_SCM_BOOT_MC_FLAG_WARMBOOT BIT(2) #define QCOM_SCM_SVC_PIL 0x02 #define QCOM_SCM_PIL_PAS_INIT_IMAGE 0x01 -- cgit v1.2.3 From 12fbfd665fc473800d25d0f3ca4617c82cff42dd Mon Sep 17 00:00:00 2001 From: AngeloGioacchino Del Regno Date: Fri, 4 Feb 2022 13:55:43 +0100 Subject: memory: mtk-smi: Enable sleep ctrl safety function for MT8195 Enable the sleep ctrl function to wait until all the queued commands are executed before suspending the LARBs, like done for MT8186. Signed-off-by: AngeloGioacchino Del Regno Reviewed-by: Macpaul Lin Link: https://lore.kernel.org/r/20220204125543.1189151-1-angelogioacchino.delregno@collabora.com Signed-off-by: Krzysztof Kozlowski --- drivers/memory/mtk-smi.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index 377ef019c4cf..903c2202c3b7 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -366,7 +366,8 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt8192 = { static const struct mtk_smi_larb_gen mtk_smi_larb_mt8195 = { .config_port = mtk_smi_larb_config_port_gen2_general, - .flags_general = MTK_SMI_FLAG_THRT_UPDATE | MTK_SMI_FLAG_SW_FLAG, + .flags_general = MTK_SMI_FLAG_THRT_UPDATE | MTK_SMI_FLAG_SW_FLAG | + MTK_SMI_FLAG_SLEEP_CTL, .ostd = mtk_smi_larb_mt8195_ostd, }; -- cgit v1.2.3 From cb5508e47e60b85ac033edd8c52245ad51360eb4 Mon Sep 17 00:00:00 2001 From: Biju Das Date: Fri, 21 Jan 2022 01:41:17 +0000 Subject: soc: renesas: Add support for reading product revision for RZ/G2L family As per RZ/G2L HW manual (Rev.1.00 Sep, 2021) DEV_ID [31:28] indicates product revision. Use this information to populate the revision info for RZ/G2L family. Signed-off-by: Biju Das Signed-off-by: Lad Prabhakar Link: https://lore.kernel.org/r/20220121014117.21248-1-prabhakar.mahadev-lad.rj@bp.renesas.com Signed-off-by: Geert Uytterhoeven --- drivers/soc/renesas/renesas-soc.c | 55 +++++++++++++++++++++++---------------- 1 file changed, 33 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c index 8a672d0a4dae..92c7b42250ee 100644 --- a/drivers/soc/renesas/renesas-soc.c +++ b/drivers/soc/renesas/renesas-soc.c @@ -393,9 +393,11 @@ static int __init renesas_soc_init(void) const struct renesas_soc *soc; const struct renesas_id *id; void __iomem *chipid = NULL; + const char *rev_prefix = ""; struct soc_device *soc_dev; struct device_node *np; const char *soc_id; + int ret; match = of_match_node(renesas_socs, of_root); if (!match) @@ -416,6 +418,17 @@ static int __init renesas_soc_init(void) chipid = ioremap(family->reg, 4); } + soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); + if (!soc_dev_attr) + return -ENOMEM; + + np = of_find_node_by_path("/"); + of_property_read_string(np, "model", &soc_dev_attr->machine); + of_node_put(np); + + soc_dev_attr->family = kstrdup_const(family->name, GFP_KERNEL); + soc_dev_attr->soc_id = kstrdup_const(soc_id, GFP_KERNEL); + if (chipid) { product = readl(chipid + id->offset); iounmap(chipid); @@ -430,41 +443,39 @@ static int __init renesas_soc_init(void) eshi = ((product >> 4) & 0x0f) + 1; eslo = product & 0xf; + soc_dev_attr->revision = kasprintf(GFP_KERNEL, "ES%u.%u", + eshi, eslo); + } else if (id == &id_rzg2l) { + eshi = ((product >> 28) & 0x0f); + soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%u", + eshi); + rev_prefix = "Rev "; } if (soc->id && ((product & id->mask) >> __ffs(id->mask)) != soc->id) { pr_warn("SoC mismatch (product = 0x%x)\n", product); - return -ENODEV; + ret = -ENODEV; + goto free_soc_dev_attr; } } - soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); - if (!soc_dev_attr) - return -ENOMEM; - - np = of_find_node_by_path("/"); - of_property_read_string(np, "model", &soc_dev_attr->machine); - of_node_put(np); - - soc_dev_attr->family = kstrdup_const(family->name, GFP_KERNEL); - soc_dev_attr->soc_id = kstrdup_const(soc_id, GFP_KERNEL); - if (eshi) - soc_dev_attr->revision = kasprintf(GFP_KERNEL, "ES%u.%u", eshi, - eslo); - - pr_info("Detected Renesas %s %s %s\n", soc_dev_attr->family, - soc_dev_attr->soc_id, soc_dev_attr->revision ?: ""); + pr_info("Detected Renesas %s %s %s%s\n", soc_dev_attr->family, + soc_dev_attr->soc_id, rev_prefix, soc_dev_attr->revision ?: ""); soc_dev = soc_device_register(soc_dev_attr); if (IS_ERR(soc_dev)) { - kfree(soc_dev_attr->revision); - kfree_const(soc_dev_attr->soc_id); - kfree_const(soc_dev_attr->family); - kfree(soc_dev_attr); - return PTR_ERR(soc_dev); + ret = PTR_ERR(soc_dev); + goto free_soc_dev_attr; } return 0; + +free_soc_dev_attr: + kfree(soc_dev_attr->revision); + kfree_const(soc_dev_attr->soc_id); + kfree_const(soc_dev_attr->family); + kfree(soc_dev_attr); + return ret; } early_initcall(renesas_soc_init); -- cgit v1.2.3 From cdf157faaafe36c0823148587a78147200898e87 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 3 Feb 2022 09:22:01 +0100 Subject: firmware: arm_scmi: Disable ftrace for Clang Thumb2 builds The SMC calling convention designates R0-R7 as input registers in AArch32 mode, and this conflicts with the compiler's use of R7 as a frame pointer when building in Thumb2 mode. Generally, we don't enable the frame pointer, and GCC happily enables the -pg profiling hooks without them. However, Clang refuses, and errors out with the message below: drivers/firmware/arm_scmi/smc.c:152:2: error: write to reserved register 'R7' arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res); ^ include/linux/arm-smccc.h:550:4: note: expanded from macro 'arm_smccc_1_1_invoke' arm_smccc_1_1_smc(__VA_ARGS__); \ ^ Let's just disable ftrace for the compilation unit when building this configuration. Link: https://lore.kernel.org/r/20220203082204.1176734-11-ardb@kernel.org Reviewed-by: Nick Desaulniers Signed-off-by: Ard Biesheuvel Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/Makefile | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index ef66ec8ca917..8d4afadda38c 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -12,3 +12,10 @@ scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \ $(scmi-transport-y) obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o + +ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy) +# The use of R7 in the SMCCC conflicts with the compiler's use of R7 as a frame +# pointer in Thumb2 mode, which is forcibly enabled by Clang when profiling +# hooks are inserted via the -pg switch. +CFLAGS_REMOVE_smc.o += $(CC_FLAGS_FTRACE) +endif -- cgit v1.2.3 From 4e890b2228fd14fa6269175e9816bf27ff989e84 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Sun, 6 Feb 2022 14:58:06 +0100 Subject: memory: of: parse max-freq property Passing the memory timings maximum frequency as an unit address was a workaround and instead 'max-freq' is preferred. Look for 'max-freq' first and then fallback to 'reg'. Signed-off-by: Krzysztof Kozlowski Reviewed-by: Alim Akhtar Reviewed-by: Dmitry Osipenko Reviewed-by: Rob Herring Link: https://lore.kernel.org/r/20220206135807.211767-8-krzysztof.kozlowski@canonical.com --- drivers/memory/of_memory.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/memory/of_memory.c b/drivers/memory/of_memory.c index b94408954d85..bac5c7f34936 100644 --- a/drivers/memory/of_memory.c +++ b/drivers/memory/of_memory.c @@ -212,8 +212,10 @@ static int of_lpddr3_do_get_timings(struct device_node *np, { int ret; - /* The 'reg' param required since DT has changed, used as 'max-freq' */ - ret = of_property_read_u32(np, "reg", &tim->max_freq); + ret = of_property_read_u32(np, "max-freq", &tim->max_freq); + if (ret) + /* Deprecated way of passing max-freq as 'reg' */ + ret = of_property_read_u32(np, "reg", &tim->max_freq); ret |= of_property_read_u32(np, "min-freq", &tim->min_freq); ret |= of_property_read_u32(np, "tRFC", &tim->tRFC); ret |= of_property_read_u32(np, "tRRD", &tim->tRRD); -- cgit v1.2.3 From 76ee15ae1b13a53a355246f92039c8373e8ba601 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Wed, 9 Feb 2022 21:10:43 -0800 Subject: soc: qcom: socinfo: Add some more PMICs and SoCs Add SM8350, SC8280XP, SA8540P and one more SM8450 and various PMICs found on boards on these platforms to the socinfo driver. Signed-off-by: Bjorn Andersson Tested-by: Vinod Koul Reviewed-by: Vinod Koul Link: https://lore.kernel.org/r/20220210051043.748275-1-bjorn.andersson@linaro.org --- drivers/soc/qcom/socinfo.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers') diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index 6dc0f39c0ec3..8b38d134720a 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -104,6 +104,14 @@ static const char *const pmic_models[] = { [36] = "PM8009", [38] = "PM8150C", [41] = "SMB2351", + [47] = "PMK8350", + [48] = "PM8350", + [49] = "PM8350C", + [50] = "PM8350B", + [51] = "PMR735A", + [52] = "PMR735B", + [58] = "PM8450", + [65] = "PM8010", }; #endif /* CONFIG_DEBUG_FS */ @@ -314,10 +322,14 @@ static const struct soc_id soc_id[] = { { 422, "IPQ6010" }, { 425, "SC7180" }, { 434, "SM6350" }, + { 439, "SM8350" }, + { 449, "SC8280XP" }, { 453, "IPQ6005" }, { 455, "QRB5165" }, { 457, "SM8450" }, { 459, "SM7225" }, + { 460, "SA8540P" }, + { 480, "SM8450" }, }; static const char *socinfo_machine(struct device *dev, unsigned int id) -- cgit v1.2.3 From 06b24ab364403094884b71234b44e17f746e5090 Mon Sep 17 00:00:00 2001 From: Huang Yiwei Date: Fri, 28 Jan 2022 13:17:08 +0530 Subject: soc: qcom: llcc: Add support for 16 ways of allocation Add support for 16 ways of allocation for LLCC HW version 2.1.0 and later. Signed-off-by: Huang Yiwei Signed-off-by: Sai Prakash Ranjan Tested-by: Vinod Koul Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/a7a5f64259c2c02628f03fb59b91e9fa78da2dfb.1643355594.git.quic_saipraka@quicinc.com --- drivers/soc/qcom/llcc-qcom.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index 00274a93406b..c45146c63423 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -29,8 +29,8 @@ #define ATTR1_FIXED_SIZE_SHIFT 0x03 #define ATTR1_PRIORITY_SHIFT 0x04 #define ATTR1_MAX_CAP_SHIFT 0x10 -#define ATTR0_RES_WAYS_MASK GENMASK(11, 0) -#define ATTR0_BONUS_WAYS_MASK GENMASK(27, 16) +#define ATTR0_RES_WAYS_MASK GENMASK(15, 0) +#define ATTR0_BONUS_WAYS_MASK GENMASK(31, 16) #define ATTR0_BONUS_WAYS_SHIFT 0x10 #define LLCC_STATUS_READ_DELAY 100 -- cgit v1.2.3 From 8008e7902f28eb9e5459b21d375b3e5b4090efff Mon Sep 17 00:00:00 2001 From: Sai Prakash Ranjan Date: Fri, 28 Jan 2022 13:17:09 +0530 Subject: soc: qcom: llcc: Update the logic for version info extraction LLCC HW version info is made up of major, branch, minor and echo version bits each of which are 8bits. Several features in newer LLCC HW are based on the full version rather than just major or minor versions such as write-subcache enable which is applicable for versions v2.0.0.0 and later, also upcoming write-subcache cacheable for SM8450 SoC which is only present in versions v2.1.0.0 and later, so it makes it easier and cleaner to just directly compare with the full version than adding additional major/branch/ minor/echo version checks. So remove the earlier major version check and add full version check for those features. Signed-off-by: Sai Prakash Ranjan Tested-by: Vinod Koul Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/a82d7c32348c51fcc2b63e220d91b318bf706c83.1643355594.git.quic_saipraka@quicinc.com --- drivers/soc/qcom/llcc-qcom.c | 9 +++++---- include/linux/soc/qcom/llcc-qcom.h | 4 ++-- 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index c45146c63423..f15f4c51e997 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -37,7 +37,6 @@ #define CACHE_LINE_SIZE_SHIFT 6 #define LLCC_COMMON_HW_INFO 0x00030000 -#define LLCC_MAJOR_VERSION_MASK GENMASK(31, 24) #define LLCC_COMMON_STATUS0 0x0003000c #define LLCC_LB_CNT_MASK GENMASK(31, 28) @@ -55,6 +54,8 @@ #define BANK_OFFSET_STRIDE 0x80000 +#define LLCC_VERSION_2_0_0_0 0x02000000 + /** * struct llcc_slice_config - Data associated with the llcc slice * @usecase_id: Unique id for the client's use case @@ -504,7 +505,7 @@ static int _qcom_llcc_cfg_program(const struct llcc_slice_config *config, return ret; } - if (drv_data->major_version == 2) { + if (drv_data->version >= LLCC_VERSION_2_0_0_0) { u32 wren; wren = config->write_scid_en << config->slice_id; @@ -598,12 +599,12 @@ static int qcom_llcc_probe(struct platform_device *pdev) goto err; } - /* Extract major version of the IP */ + /* Extract version of the IP */ ret = regmap_read(drv_data->bcast_regmap, LLCC_COMMON_HW_INFO, &version); if (ret) goto err; - drv_data->major_version = FIELD_GET(LLCC_MAJOR_VERSION_MASK, version); + drv_data->version = version; ret = regmap_read(drv_data->regmap, LLCC_COMMON_STATUS0, &num_banks); diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h index 9e8fd92c96b7..beecf00b707d 100644 --- a/include/linux/soc/qcom/llcc-qcom.h +++ b/include/linux/soc/qcom/llcc-qcom.h @@ -83,7 +83,7 @@ struct llcc_edac_reg_data { * @bitmap: Bit map to track the active slice ids * @offsets: Pointer to the bank offsets array * @ecc_irq: interrupt for llcc cache error detection and reporting - * @major_version: Indicates the LLCC major version + * @version: Indicates the LLCC version */ struct llcc_drv_data { struct regmap *regmap; @@ -96,7 +96,7 @@ struct llcc_drv_data { unsigned long *bitmap; u32 *offsets; int ecc_irq; - u32 major_version; + u32 version; }; #if IS_ENABLED(CONFIG_QCOM_LLCC) -- cgit v1.2.3 From 2b8175a1f108361c2c1a11b27415631994efbfce Mon Sep 17 00:00:00 2001 From: Sai Prakash Ranjan Date: Fri, 28 Jan 2022 13:17:10 +0530 Subject: soc: qcom: llcc: Add write-cache cacheable support Newer SoCs with LLCC IP version 2.1.0.0 and later support write sub-cache cacheable feature. Use a separate llcc_slice_config member "write_scid_cacheable_en" to identify this feature and program LLCC_TRP_SCID_WRSC_CACHEABLE_EN register to enable it. Signed-off-by: Sai Prakash Ranjan Tested-by: Vinod Koul Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/83372c8178f579d055ec58212ce5af5d55abadd4.1643355594.git.quic_saipraka@quicinc.com --- drivers/soc/qcom/llcc-qcom.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'drivers') diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index f15f4c51e997..af674fc6f680 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -51,10 +51,12 @@ #define LLCC_TRP_SCID_DIS_CAP_ALLOC 0x21f00 #define LLCC_TRP_PCB_ACT 0x21f04 #define LLCC_TRP_WRSC_EN 0x21f20 +#define LLCC_TRP_WRSC_CACHEABLE_EN 0x21f2c #define BANK_OFFSET_STRIDE 0x80000 #define LLCC_VERSION_2_0_0_0 0x02000000 +#define LLCC_VERSION_2_1_0_0 0x02010000 /** * struct llcc_slice_config - Data associated with the llcc slice @@ -80,6 +82,8 @@ * collapse. * @activate_on_init: Activate the slice immediately after it is programmed * @write_scid_en: Bit enables write cache support for a given scid. + * @write_scid_cacheable_en: Enables write cache cacheable support for a + * given scid (not supported on v2 or older hardware). */ struct llcc_slice_config { u32 usecase_id; @@ -95,6 +99,7 @@ struct llcc_slice_config { bool retain_on_pc; bool activate_on_init; bool write_scid_en; + bool write_scid_cacheable_en; }; struct qcom_llcc_config { @@ -515,6 +520,16 @@ static int _qcom_llcc_cfg_program(const struct llcc_slice_config *config, return ret; } + if (drv_data->version >= LLCC_VERSION_2_1_0_0) { + u32 wr_cache_en; + + wr_cache_en = config->write_scid_cacheable_en << config->slice_id; + ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_WRSC_CACHEABLE_EN, + BIT(config->slice_id), wr_cache_en); + if (ret) + return ret; + } + if (config->activate_on_init) { desc.slice_id = config->slice_id; ret = llcc_slice_activate(&desc); -- cgit v1.2.3 From bc88a42075cd85cedfcea5fbd75817e57e091b88 Mon Sep 17 00:00:00 2001 From: Sai Prakash Ranjan Date: Fri, 28 Jan 2022 13:17:11 +0530 Subject: soc: qcom: llcc: Add missing llcc configuration data Add missing llcc configuration data for few chipsets which were not added during initial post. Signed-off-by: Sai Prakash Ranjan Tested-by: Vinod Koul Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/143d11bacaca086406fdd10fc32f91eccd943527.1643355594.git.quic_saipraka@quicinc.com --- drivers/soc/qcom/llcc-qcom.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index af674fc6f680..ccf6afcf6be5 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -244,21 +244,25 @@ static const struct qcom_llcc_config sdm845_cfg = { static const struct qcom_llcc_config sm6350_cfg = { .sct_data = sm6350_data, .size = ARRAY_SIZE(sm6350_data), + .need_llcc_cfg = true, }; static const struct qcom_llcc_config sm8150_cfg = { .sct_data = sm8150_data, .size = ARRAY_SIZE(sm8150_data), + .need_llcc_cfg = true, }; static const struct qcom_llcc_config sm8250_cfg = { .sct_data = sm8250_data, .size = ARRAY_SIZE(sm8250_data), + .need_llcc_cfg = true, }; static const struct qcom_llcc_config sm8350_cfg = { .sct_data = sm8350_data, .size = ARRAY_SIZE(sm8350_data), + .need_llcc_cfg = true, }; static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER; -- cgit v1.2.3 From 424ad93c23e2984298c38d644dfc3b69281924a2 Mon Sep 17 00:00:00 2001 From: Sai Prakash Ranjan Date: Fri, 28 Jan 2022 13:17:12 +0530 Subject: soc: qcom: llcc: Update register offsets for newer LLCC HW Newer LLCC HW have different register offsets for several registers, currently of which LLCC hardware info and status are used to identify the LLCC version information and other data. So use separate table to keep track of these register offsets which vary by different LLCC HW versions and eases any future addition in variations of register offsets for newer hardware. Signed-off-by: Sai Prakash Ranjan Tested-by: Vinod Koul Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/c655d16d945aef2d7fc8e7c212f3e1c58a84eb95.1643355594.git.quic_saipraka@quicinc.com --- drivers/soc/qcom/llcc-qcom.c | 36 +++++++++++++++++++++++++++++------- 1 file changed, 29 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index ccf6afcf6be5..efe962b9a1fe 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -36,9 +36,6 @@ #define CACHE_LINE_SIZE_SHIFT 6 -#define LLCC_COMMON_HW_INFO 0x00030000 - -#define LLCC_COMMON_STATUS0 0x0003000c #define LLCC_LB_CNT_MASK GENMASK(31, 28) #define LLCC_LB_CNT_SHIFT 28 @@ -106,6 +103,12 @@ struct qcom_llcc_config { const struct llcc_slice_config *sct_data; int size; bool need_llcc_cfg; + const u32 *reg_offset; +}; + +enum llcc_reg_offset { + LLCC_COMMON_HW_INFO, + LLCC_COMMON_STATUS0, }; static const struct llcc_slice_config sc7180_data[] = { @@ -223,46 +226,63 @@ static const struct llcc_slice_config sm8350_data[] = { { LLCC_CPUHWT, 5, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 }, }; +static const u32 llcc_v1_2_reg_offset[] = { + [LLCC_COMMON_HW_INFO] = 0x00030000, + [LLCC_COMMON_STATUS0] = 0x0003000c, +}; + +static const u32 llcc_v21_reg_offset[] = { + [LLCC_COMMON_HW_INFO] = 0x00034000, + [LLCC_COMMON_STATUS0] = 0x0003400c, +}; + static const struct qcom_llcc_config sc7180_cfg = { .sct_data = sc7180_data, .size = ARRAY_SIZE(sc7180_data), .need_llcc_cfg = true, + .reg_offset = llcc_v1_2_reg_offset, }; static const struct qcom_llcc_config sc7280_cfg = { .sct_data = sc7280_data, .size = ARRAY_SIZE(sc7280_data), .need_llcc_cfg = true, + .reg_offset = llcc_v1_2_reg_offset, }; static const struct qcom_llcc_config sdm845_cfg = { .sct_data = sdm845_data, .size = ARRAY_SIZE(sdm845_data), .need_llcc_cfg = false, + .reg_offset = llcc_v1_2_reg_offset, }; static const struct qcom_llcc_config sm6350_cfg = { .sct_data = sm6350_data, .size = ARRAY_SIZE(sm6350_data), .need_llcc_cfg = true, + .reg_offset = llcc_v1_2_reg_offset, }; static const struct qcom_llcc_config sm8150_cfg = { .sct_data = sm8150_data, .size = ARRAY_SIZE(sm8150_data), .need_llcc_cfg = true, + .reg_offset = llcc_v1_2_reg_offset, }; static const struct qcom_llcc_config sm8250_cfg = { .sct_data = sm8250_data, .size = ARRAY_SIZE(sm8250_data), .need_llcc_cfg = true, + .reg_offset = llcc_v1_2_reg_offset, }; static const struct qcom_llcc_config sm8350_cfg = { .sct_data = sm8350_data, .size = ARRAY_SIZE(sm8350_data), .need_llcc_cfg = true, + .reg_offset = llcc_v1_2_reg_offset, }; static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER; @@ -618,15 +638,18 @@ static int qcom_llcc_probe(struct platform_device *pdev) goto err; } + cfg = of_device_get_match_data(&pdev->dev); + /* Extract version of the IP */ - ret = regmap_read(drv_data->bcast_regmap, LLCC_COMMON_HW_INFO, &version); + ret = regmap_read(drv_data->bcast_regmap, cfg->reg_offset[LLCC_COMMON_HW_INFO], + &version); if (ret) goto err; drv_data->version = version; - ret = regmap_read(drv_data->regmap, LLCC_COMMON_STATUS0, - &num_banks); + ret = regmap_read(drv_data->regmap, cfg->reg_offset[LLCC_COMMON_STATUS0], + &num_banks); if (ret) goto err; @@ -634,7 +657,6 @@ static int qcom_llcc_probe(struct platform_device *pdev) num_banks >>= LLCC_LB_CNT_SHIFT; drv_data->num_banks = num_banks; - cfg = of_device_get_match_data(&pdev->dev); llcc_cfg = cfg->sct_data; sz = cfg->size; -- cgit v1.2.3 From a6e9d7ef252c44a4f33b4403cd367430697dd9be Mon Sep 17 00:00:00 2001 From: Sai Prakash Ranjan Date: Fri, 28 Jan 2022 13:17:13 +0530 Subject: soc: qcom: llcc: Add configuration data for SM8450 SoC Add LLCC configuration data for SM8450 SoC. Signed-off-by: Sai Prakash Ranjan Tested-by: Vinod Koul Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/fec944cb8f2a4a70785903c6bfec629c6f31b6a4.1643355594.git.quic_saipraka@quicinc.com --- drivers/soc/qcom/llcc-qcom.c | 34 ++++++++++++++++++++++++++++++++++ include/linux/soc/qcom/llcc-qcom.h | 5 +++++ 2 files changed, 39 insertions(+) (limited to 'drivers') diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index efe962b9a1fe..eecafeded56f 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -226,6 +226,32 @@ static const struct llcc_slice_config sm8350_data[] = { { LLCC_CPUHWT, 5, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 }, }; +static const struct llcc_slice_config sm8450_data[] = { + {LLCC_CPUSS, 1, 3072, 1, 0, 0xFFFF, 0x0, 0, 0, 0, 1, 1, 0, 0 }, + {LLCC_VIDSC0, 2, 512, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_AUDIO, 6, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 }, + {LLCC_MDMHPGRW, 7, 1024, 3, 0, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_MODHW, 9, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_CMPT, 10, 4096, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_GPUHTW, 11, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_GPU, 12, 2048, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 1, 0 }, + {LLCC_MMUHWT, 13, 768, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0 }, + {LLCC_DISP, 16, 4096, 2, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_MDMPNG, 21, 1024, 1, 1, 0xF000, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_AUDHW, 22, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 }, + {LLCC_CVP, 28, 256, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_MODPE, 29, 64, 1, 1, 0xF000, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0xF0, 1, 0, 0, 1, 0, 0, 0 }, + {LLCC_WRCACHE, 31, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0 }, + {LLCC_CVPFW, 17, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_CPUSS1, 3, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_CAMEXP0, 4, 256, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_CPUMTE, 23, 256, 1, 1, 0x0FFF, 0x0, 0, 0, 0, 0, 1, 0, 0 }, + {LLCC_CPUHWT, 5, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 1, 0, 0 }, + {LLCC_CAMEXP1, 27, 256, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_AENPU, 8, 2048, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 }, +}; + static const u32 llcc_v1_2_reg_offset[] = { [LLCC_COMMON_HW_INFO] = 0x00030000, [LLCC_COMMON_STATUS0] = 0x0003000c, @@ -285,6 +311,13 @@ static const struct qcom_llcc_config sm8350_cfg = { .reg_offset = llcc_v1_2_reg_offset, }; +static const struct qcom_llcc_config sm8450_cfg = { + .sct_data = sm8450_data, + .size = ARRAY_SIZE(sm8450_data), + .need_llcc_cfg = true, + .reg_offset = llcc_v21_reg_offset, +}; + static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER; /** @@ -713,6 +746,7 @@ static const struct of_device_id qcom_llcc_of_match[] = { { .compatible = "qcom,sm8150-llcc", .data = &sm8150_cfg }, { .compatible = "qcom,sm8250-llcc", .data = &sm8250_cfg }, { .compatible = "qcom,sm8350-llcc", .data = &sm8350_cfg }, + { .compatible = "qcom,sm8450-llcc", .data = &sm8450_cfg }, { } }; diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h index beecf00b707d..0bc21ee58fac 100644 --- a/include/linux/soc/qcom/llcc-qcom.h +++ b/include/linux/soc/qcom/llcc-qcom.h @@ -35,7 +35,12 @@ #define LLCC_WRCACHE 31 #define LLCC_CVPFW 32 #define LLCC_CPUSS1 33 +#define LLCC_CAMEXP0 34 +#define LLCC_CPUMTE 35 #define LLCC_CPUHWT 36 +#define LLCC_MDMCLAD2 37 +#define LLCC_CAMEXP1 38 +#define LLCC_AENPU 45 /** * struct llcc_slice_desc - Cache slice descriptor -- cgit v1.2.3 From 608d7c325e855cb4a853afef3cd9f0df594bd12d Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Tue, 25 Jan 2022 11:11:21 -0600 Subject: soc: imx: imx8m-blk-ctrl: add i.MX8MQ VPU blk-ctrl This adds the necessary bits to drive the VPU blk-ctrl on the i.MX8MQ, to avoid putting more of this functionality into the decoder driver. Signed-off-by: Lucas Stach Signed-off-by: Adam Ford Signed-off-by: Shawn Guo --- drivers/soc/imx/imx8m-blk-ctrl.c | 66 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) (limited to 'drivers') diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c index 511e74f0db8a..122f9c884b38 100644 --- a/drivers/soc/imx/imx8m-blk-ctrl.c +++ b/drivers/soc/imx/imx8m-blk-ctrl.c @@ -15,6 +15,7 @@ #include #include +#include #define BLK_SFT_RSTN 0x0 #define BLK_CLK_EN 0x4 @@ -589,6 +590,68 @@ static const struct imx8m_blk_ctrl_data imx8mn_disp_blk_ctl_dev_data = { .num_domains = ARRAY_SIZE(imx8mn_disp_blk_ctl_domain_data), }; +static int imx8mq_vpu_power_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct imx8m_blk_ctrl *bc = container_of(nb, struct imx8m_blk_ctrl, + power_nb); + + if (action != GENPD_NOTIFY_ON && action != GENPD_NOTIFY_PRE_OFF) + return NOTIFY_OK; + + /* + * The ADB in the VPUMIX domain has no separate reset and clock + * enable bits, but is ungated and reset together with the VPUs. The + * reset and clock enable inputs to the ADB is a logical OR of the + * VPU bits. In order to set the G2 fuse bits, the G2 clock must + * also be enabled. + */ + regmap_set_bits(bc->regmap, BLK_SFT_RSTN, BIT(0) | BIT(1)); + regmap_set_bits(bc->regmap, BLK_CLK_EN, BIT(0) | BIT(1)); + + if (action == GENPD_NOTIFY_ON) { + /* + * On power up we have no software backchannel to the GPC to + * wait for the ADB handshake to happen, so we just delay for a + * bit. On power down the GPC driver waits for the handshake. + */ + udelay(5); + + /* set "fuse" bits to enable the VPUs */ + regmap_set_bits(bc->regmap, 0x8, 0xffffffff); + regmap_set_bits(bc->regmap, 0xc, 0xffffffff); + regmap_set_bits(bc->regmap, 0x10, 0xffffffff); + } + + return NOTIFY_OK; +} + +static const struct imx8m_blk_ctrl_domain_data imx8mq_vpu_blk_ctl_domain_data[] = { + [IMX8MQ_VPUBLK_PD_G1] = { + .name = "vpublk-g1", + .clk_names = (const char *[]){ "g1", }, + .num_clks = 1, + .gpc_name = "g1", + .rst_mask = BIT(1), + .clk_mask = BIT(1), + }, + [IMX8MQ_VPUBLK_PD_G2] = { + .name = "vpublk-g2", + .clk_names = (const char *[]){ "g2", }, + .num_clks = 1, + .gpc_name = "g2", + .rst_mask = BIT(0), + .clk_mask = BIT(0), + }, +}; + +static const struct imx8m_blk_ctrl_data imx8mq_vpu_blk_ctl_dev_data = { + .max_reg = 0x14, + .power_notifier_fn = imx8mq_vpu_power_notifier, + .domains = imx8mq_vpu_blk_ctl_domain_data, + .num_domains = ARRAY_SIZE(imx8mq_vpu_blk_ctl_domain_data), +}; + static const struct of_device_id imx8m_blk_ctrl_of_match[] = { { .compatible = "fsl,imx8mm-vpu-blk-ctrl", @@ -599,6 +662,9 @@ static const struct of_device_id imx8m_blk_ctrl_of_match[] = { }, { .compatible = "fsl,imx8mn-disp-blk-ctrl", .data = &imx8mn_disp_blk_ctl_dev_data + }, { + .compatible = "fsl,imx8mq-vpu-blk-ctrl", + .data = &imx8mq_vpu_blk_ctl_dev_data }, { /* Sentinel */ } -- cgit v1.2.3 From 6d240170811aad7330e6d0b3857fb0d4d9c82b56 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Mon, 7 Feb 2022 10:05:40 +0800 Subject: firmware: imx: add get resource owner api Add resource owner management API, this API could be used to check whether M4 is under control of Linux. Signed-off-by: Peng Fan Signed-off-by: Shawn Guo --- drivers/firmware/imx/rm.c | 45 +++++++++++++++++++++++++++++++++++++ include/linux/firmware/imx/svc/rm.h | 5 +++++ 2 files changed, 50 insertions(+) (limited to 'drivers') diff --git a/drivers/firmware/imx/rm.c b/drivers/firmware/imx/rm.c index a12db6ff323b..d492b99e1c6c 100644 --- a/drivers/firmware/imx/rm.c +++ b/drivers/firmware/imx/rm.c @@ -43,3 +43,48 @@ bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource) return hdr->func; } EXPORT_SYMBOL(imx_sc_rm_is_resource_owned); + +struct imx_sc_msg_rm_get_resource_owner { + struct imx_sc_rpc_msg hdr; + union { + struct { + u16 resource; + } req; + struct { + u8 val; + } resp; + } data; +} __packed __aligned(4); + +/* + * This function get @resource partition number + * + * @param[in] ipc IPC handle + * @param[in] resource resource the control is associated with + * @param[out] pt pointer to return the partition number + * + * @return Returns 0 for success and < 0 for errors. + */ +int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt) +{ + struct imx_sc_msg_rm_get_resource_owner msg; + struct imx_sc_rpc_msg *hdr = &msg.hdr; + int ret; + + hdr->ver = IMX_SC_RPC_VERSION; + hdr->svc = IMX_SC_RPC_SVC_RM; + hdr->func = IMX_SC_RM_FUNC_GET_RESOURCE_OWNER; + hdr->size = 2; + + msg.data.req.resource = resource; + + ret = imx_scu_call_rpc(ipc, &msg, true); + if (ret) + return ret; + + if (pt) + *pt = msg.data.resp.val; + + return 0; +} +EXPORT_SYMBOL(imx_sc_rm_get_resource_owner); diff --git a/include/linux/firmware/imx/svc/rm.h b/include/linux/firmware/imx/svc/rm.h index 456b6a59d29b..31456f897aa9 100644 --- a/include/linux/firmware/imx/svc/rm.h +++ b/include/linux/firmware/imx/svc/rm.h @@ -59,11 +59,16 @@ enum imx_sc_rm_func { #if IS_ENABLED(CONFIG_IMX_SCU) bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource); +int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt); #else static inline bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource) { return true; } +static inline int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt) +{ + return -EOPNOTSUPP; +} #endif #endif -- cgit v1.2.3 From f7b67642dd98617dc569836cdcba041c7ff00cbb Mon Sep 17 00:00:00 2001 From: Cai Huoqing Date: Wed, 9 Feb 2022 11:27:43 +0800 Subject: tee: amdtee: Make use of the helper macro LIST_HEAD() Replace "struct list_head head = LIST_HEAD_INIT(head)" with "LIST_HEAD(head)" to simplify the code. Signed-off-by: Cai Huoqing Reviewed-by: Rijo Thomas Signed-off-by: Jens Wiklander --- drivers/tee/amdtee/call.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/tee/amdtee/call.c b/drivers/tee/amdtee/call.c index 07f36ac834c8..cec6e70f0ac9 100644 --- a/drivers/tee/amdtee/call.c +++ b/drivers/tee/amdtee/call.c @@ -122,7 +122,7 @@ static int amd_params_to_tee_params(struct tee_param *tee, u32 count, } static DEFINE_MUTEX(ta_refcount_mutex); -static struct list_head ta_list = LIST_HEAD_INIT(ta_list); +static LIST_HEAD(ta_list); static u32 get_ta_refcount(u32 ta_handle) { -- cgit v1.2.3 From e7ddab0847408976c4f2234592f8df4e7551c95b Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 4 Feb 2022 10:33:50 +0100 Subject: hwrng: optee-rng: use tee_shm_alloc_kernel_buf() Uses the new simplified tee_shm_alloc_kernel_buf() function instead of the old deprecated tee_shm_alloc() function which required specific TEE_SHM-flags. Reviewed-by: Sumit Garg Signed-off-by: Jens Wiklander --- drivers/char/hw_random/optee-rng.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/optee-rng.c b/drivers/char/hw_random/optee-rng.c index 135a82590923..a948c0727b2b 100644 --- a/drivers/char/hw_random/optee-rng.c +++ b/drivers/char/hw_random/optee-rng.c @@ -145,10 +145,10 @@ static int optee_rng_init(struct hwrng *rng) struct optee_rng_private *pvt_data = to_optee_rng_private(rng); struct tee_shm *entropy_shm_pool = NULL; - entropy_shm_pool = tee_shm_alloc(pvt_data->ctx, MAX_ENTROPY_REQ_SZ, - TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); + entropy_shm_pool = tee_shm_alloc_kernel_buf(pvt_data->ctx, + MAX_ENTROPY_REQ_SZ); if (IS_ERR(entropy_shm_pool)) { - dev_err(pvt_data->dev, "tee_shm_alloc failed\n"); + dev_err(pvt_data->dev, "tee_shm_alloc_kernel_buf failed\n"); return PTR_ERR(entropy_shm_pool); } -- cgit v1.2.3 From f41b6be1ebdae452819551ed35a46e6fd32bf467 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 4 Feb 2022 10:33:51 +0100 Subject: tee: remove unused tee_shm_pool_alloc_res_mem() None of the drivers in the TEE subsystem uses tee_shm_pool_alloc_res_mem() so remove the function. Reviewed-by: Sumit Garg Signed-off-by: Jens Wiklander --- drivers/tee/tee_shm_pool.c | 56 ---------------------------------------------- include/linux/tee_drv.h | 30 ------------------------- 2 files changed, 86 deletions(-) (limited to 'drivers') diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c index fcbb461fc59c..a9f9d50fd181 100644 --- a/drivers/tee/tee_shm_pool.c +++ b/drivers/tee/tee_shm_pool.c @@ -47,62 +47,6 @@ static const struct tee_shm_pool_mgr_ops pool_ops_generic = { .destroy_poolmgr = pool_op_gen_destroy_poolmgr, }; -/** - * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved - * memory range - * @priv_info: Information for driver private shared memory pool - * @dmabuf_info: Information for dma-buf shared memory pool - * - * Start and end of pools will must be page aligned. - * - * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied - * in @dmabuf, others will use the range provided by @priv. - * - * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. - */ -struct tee_shm_pool * -tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info, - struct tee_shm_pool_mem_info *dmabuf_info) -{ - struct tee_shm_pool_mgr *priv_mgr; - struct tee_shm_pool_mgr *dmabuf_mgr; - void *rc; - - /* - * Create the pool for driver private shared memory - */ - rc = tee_shm_pool_mgr_alloc_res_mem(priv_info->vaddr, priv_info->paddr, - priv_info->size, - 3 /* 8 byte aligned */); - if (IS_ERR(rc)) - return rc; - priv_mgr = rc; - - /* - * Create the pool for dma_buf shared memory - */ - rc = tee_shm_pool_mgr_alloc_res_mem(dmabuf_info->vaddr, - dmabuf_info->paddr, - dmabuf_info->size, PAGE_SHIFT); - if (IS_ERR(rc)) - goto err_free_priv_mgr; - dmabuf_mgr = rc; - - rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr); - if (IS_ERR(rc)) - goto err_free_dmabuf_mgr; - - return rc; - -err_free_dmabuf_mgr: - tee_shm_pool_mgr_destroy(dmabuf_mgr); -err_free_priv_mgr: - tee_shm_pool_mgr_destroy(priv_mgr); - - return rc; -} -EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem); - struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr, phys_addr_t paddr, size_t size, diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 5e1533ee3785..6b0f0d01ebdf 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -278,36 +278,6 @@ static inline void tee_shm_pool_mgr_destroy(struct tee_shm_pool_mgr *poolm) poolm->ops->destroy_poolmgr(poolm); } -/** - * struct tee_shm_pool_mem_info - holds information needed to create a shared - * memory pool - * @vaddr: Virtual address of start of pool - * @paddr: Physical address of start of pool - * @size: Size in bytes of the pool - */ -struct tee_shm_pool_mem_info { - unsigned long vaddr; - phys_addr_t paddr; - size_t size; -}; - -/** - * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved - * memory range - * @priv_info: Information for driver private shared memory pool - * @dmabuf_info: Information for dma-buf shared memory pool - * - * Start and end of pools will must be page aligned. - * - * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied - * in @dmabuf, others will use the range provided by @priv. - * - * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. - */ -struct tee_shm_pool * -tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info, - struct tee_shm_pool_mem_info *dmabuf_info); - /** * tee_shm_pool_free() - Free a shared memory pool * @pool: The shared memory pool to free -- cgit v1.2.3 From 71cc47d4cc1f7a333584e0f2f7c863c71a6d3ced Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 4 Feb 2022 10:33:52 +0100 Subject: tee: add tee_shm_alloc_user_buf() Adds a new function tee_shm_alloc_user_buf() for user mode allocations, replacing passing the flags TEE_SHM_MAPPED | TEE_SHM_DMA_BUF to tee_shm_alloc(). Reviewed-by: Sumit Garg Signed-off-by: Jens Wiklander --- drivers/tee/tee_core.c | 2 +- drivers/tee/tee_private.h | 2 ++ drivers/tee/tee_shm.c | 17 +++++++++++++++++ drivers/tee/tee_shm_pool.c | 2 +- include/linux/tee_drv.h | 2 +- 5 files changed, 22 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index 3fc426dad2df..a15812baaeb1 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -297,7 +297,7 @@ static int tee_ioctl_shm_alloc(struct tee_context *ctx, if (data.flags) return -EINVAL; - shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); + shm = tee_shm_alloc_user_buf(ctx, data.size); if (IS_ERR(shm)) return PTR_ERR(shm); diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h index e55204df31ce..e09c8aa5d967 100644 --- a/drivers/tee/tee_private.h +++ b/drivers/tee/tee_private.h @@ -68,4 +68,6 @@ void tee_device_put(struct tee_device *teedev); void teedev_ctx_get(struct tee_context *ctx); void teedev_ctx_put(struct tee_context *ctx); +struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size); + #endif /*TEE_PRIVATE_H*/ diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 499fccba3d74..7e7e762fc1de 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -127,6 +127,23 @@ err_dev_put: } EXPORT_SYMBOL_GPL(tee_shm_alloc); +/** + * tee_shm_alloc_user_buf() - Allocate shared memory for user space + * @ctx: Context that allocates the shared memory + * @size: Requested size of shared memory + * + * Memory allocated as user space shared memory is automatically freed when + * the TEE file pointer is closed. The primary usage of this function is + * when the TEE driver doesn't support registering ordinary user space + * memory. + * + * @returns a pointer to 'struct tee_shm' + */ +struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size) +{ + return tee_shm_alloc(ctx, size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); +} + /** * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer * @ctx: Context that allocates the shared memory diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c index a9f9d50fd181..54c11aa374a8 100644 --- a/drivers/tee/tee_shm_pool.c +++ b/drivers/tee/tee_shm_pool.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2015, Linaro Limited + * Copyright (c) 2015 Linaro Limited */ #include #include diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 6b0f0d01ebdf..a4393c8c38f3 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2015-2016, Linaro Limited + * Copyright (c) 2015-2016 Linaro Limited */ #ifndef __TEE_DRV_H -- cgit v1.2.3 From d88e0493a054c9fe72ade41a42d42e958ee6503d Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 4 Feb 2022 10:33:53 +0100 Subject: tee: simplify shm pool handling Replaces the shared memory pool based on two pools with a single pool. The alloc() function pointer in struct tee_shm_pool_ops gets another parameter, align. This makes it possible to make less than page aligned allocations from the optional reserved shared memory pool while still making user space allocations page aligned. With in practice unchanged behaviour using only a single pool for bookkeeping. The allocation algorithm in the static OP-TEE shared memory pool is changed from best-fit to first-fit since only the latter supports an alignment parameter. The best-fit algorithm was previously the default choice and not a conscious one. The optee and amdtee drivers are updated as needed to work with this changed pool handling. This also removes OPTEE_SHM_NUM_PRIV_PAGES which becomes obsolete with this change as the private pages can be mixed with the payload pages. The OP-TEE driver changes minimum alignment for argument struct from 8 bytes to 512 bytes. A typical OP-TEE private shm allocation is 224 bytes (argument struct with 6 parameters, needed for open session). So with an alignment of 512 well waste a bit more than 50%. Before this we had a single page reserved for this so worst case usage compared to that would be 3 pages instead of 1 page. However, this worst case only occurs if there is a high pressure from multiple threads on secure world. All in all this should scale up and down better than fixed boundaries. Reviewed-by: Sumit Garg Signed-off-by: Jens Wiklander --- drivers/tee/amdtee/shm_pool.c | 55 ++++++------------- drivers/tee/optee/Kconfig | 8 --- drivers/tee/optee/core.c | 11 ++-- drivers/tee/optee/ffa_abi.c | 55 +++++-------------- drivers/tee/optee/optee_private.h | 4 +- drivers/tee/optee/smc_abi.c | 108 +++++++++++--------------------------- drivers/tee/tee_private.h | 11 ---- drivers/tee/tee_shm.c | 29 +++++----- drivers/tee/tee_shm_pool.c | 106 ++++++++++++------------------------- include/linux/tee_drv.h | 60 +++++++-------------- 10 files changed, 137 insertions(+), 310 deletions(-) (limited to 'drivers') diff --git a/drivers/tee/amdtee/shm_pool.c b/drivers/tee/amdtee/shm_pool.c index 065854e2db18..f87f96a291c9 100644 --- a/drivers/tee/amdtee/shm_pool.c +++ b/drivers/tee/amdtee/shm_pool.c @@ -8,13 +8,17 @@ #include #include "amdtee_private.h" -static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm, - size_t size) +static int pool_op_alloc(struct tee_shm_pool *pool, struct tee_shm *shm, + size_t size, size_t align) { unsigned int order = get_order(size); unsigned long va; int rc; + /* + * Ignore alignment since this is already going to be page aligned + * and there's no need for any larger alignment. + */ va = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); if (!va) return -ENOMEM; @@ -34,7 +38,7 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm, return 0; } -static void pool_op_free(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm) +static void pool_op_free(struct tee_shm_pool *pool, struct tee_shm *shm) { /* Unmap the shared memory from TEE */ amdtee_unmap_shmem(shm); @@ -42,52 +46,25 @@ static void pool_op_free(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm) shm->kaddr = NULL; } -static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm) +static void pool_op_destroy_pool(struct tee_shm_pool *pool) { - kfree(poolm); + kfree(pool); } -static const struct tee_shm_pool_mgr_ops pool_ops = { +static const struct tee_shm_pool_ops pool_ops = { .alloc = pool_op_alloc, .free = pool_op_free, - .destroy_poolmgr = pool_op_destroy_poolmgr, + .destroy_pool = pool_op_destroy_pool, }; -static struct tee_shm_pool_mgr *pool_mem_mgr_alloc(void) -{ - struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); - - if (!mgr) - return ERR_PTR(-ENOMEM); - - mgr->ops = &pool_ops; - - return mgr; -} - struct tee_shm_pool *amdtee_config_shm(void) { - struct tee_shm_pool_mgr *priv_mgr; - struct tee_shm_pool_mgr *dmabuf_mgr; - void *rc; + struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL); - rc = pool_mem_mgr_alloc(); - if (IS_ERR(rc)) - return rc; - priv_mgr = rc; - - rc = pool_mem_mgr_alloc(); - if (IS_ERR(rc)) { - tee_shm_pool_mgr_destroy(priv_mgr); - return rc; - } - dmabuf_mgr = rc; + if (!pool) + return ERR_PTR(-ENOMEM); - rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr); - if (IS_ERR(rc)) { - tee_shm_pool_mgr_destroy(priv_mgr); - tee_shm_pool_mgr_destroy(dmabuf_mgr); - } + pool->ops = &pool_ops; - return rc; + return pool; } diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig index 3ca71e3812ed..f121c224e682 100644 --- a/drivers/tee/optee/Kconfig +++ b/drivers/tee/optee/Kconfig @@ -7,11 +7,3 @@ config OPTEE help This implements the OP-TEE Trusted Execution Environment (TEE) driver. - -config OPTEE_SHM_NUM_PRIV_PAGES - int "Private Shared Memory Pages" - default 1 - depends on OPTEE - help - This sets the number of private shared memory pages to be - used by OP-TEE TEE driver. diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index 17a6f51d3089..f4bccb5f0e93 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -18,8 +18,8 @@ #include #include "optee_private.h" -int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm, - struct tee_shm *shm, size_t size, +int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm, + size_t size, size_t align, int (*shm_register)(struct tee_context *ctx, struct tee_shm *shm, struct page **pages, @@ -30,6 +30,10 @@ int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm, struct page *page; int rc = 0; + /* + * Ignore alignment since this is already going to be page aligned + * and there's no need for any larger alignment. + */ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); if (!page) return -ENOMEM; @@ -51,7 +55,6 @@ int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm, for (i = 0; i < nr_pages; i++) pages[i] = page + i; - shm->flags |= TEE_SHM_REGISTER; rc = shm_register(shm->ctx, shm, pages, nr_pages, (unsigned long)shm->kaddr); kfree(pages); @@ -62,7 +65,7 @@ int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm, return 0; err: - __free_pages(page, order); + free_pages((unsigned long)shm->kaddr, order); return rc; } diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c index 545f61af1248..91dd80945bf1 100644 --- a/drivers/tee/optee/ffa_abi.c +++ b/drivers/tee/optee/ffa_abi.c @@ -369,14 +369,14 @@ static int optee_ffa_shm_unregister_supp(struct tee_context *ctx, * The main function is optee_ffa_shm_pool_alloc_pages(). */ -static int pool_ffa_op_alloc(struct tee_shm_pool_mgr *poolm, - struct tee_shm *shm, size_t size) +static int pool_ffa_op_alloc(struct tee_shm_pool *pool, + struct tee_shm *shm, size_t size, size_t align) { - return optee_pool_op_alloc_helper(poolm, shm, size, + return optee_pool_op_alloc_helper(pool, shm, size, align, optee_ffa_shm_register); } -static void pool_ffa_op_free(struct tee_shm_pool_mgr *poolm, +static void pool_ffa_op_free(struct tee_shm_pool *pool, struct tee_shm *shm) { optee_ffa_shm_unregister(shm->ctx, shm); @@ -384,15 +384,15 @@ static void pool_ffa_op_free(struct tee_shm_pool_mgr *poolm, shm->kaddr = NULL; } -static void pool_ffa_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm) +static void pool_ffa_op_destroy_pool(struct tee_shm_pool *pool) { - kfree(poolm); + kfree(pool); } -static const struct tee_shm_pool_mgr_ops pool_ffa_ops = { +static const struct tee_shm_pool_ops pool_ffa_ops = { .alloc = pool_ffa_op_alloc, .free = pool_ffa_op_free, - .destroy_poolmgr = pool_ffa_op_destroy_poolmgr, + .destroy_pool = pool_ffa_op_destroy_pool, }; /** @@ -401,16 +401,16 @@ static const struct tee_shm_pool_mgr_ops pool_ffa_ops = { * This pool is used with OP-TEE over FF-A. In this case command buffers * and such are allocated from kernel's own memory. */ -static struct tee_shm_pool_mgr *optee_ffa_shm_pool_alloc_pages(void) +static struct tee_shm_pool *optee_ffa_shm_pool_alloc_pages(void) { - struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); + struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL); - if (!mgr) + if (!pool) return ERR_PTR(-ENOMEM); - mgr->ops = &pool_ffa_ops; + pool->ops = &pool_ffa_ops; - return mgr; + return pool; } /* @@ -691,33 +691,6 @@ static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev, return true; } -static struct tee_shm_pool *optee_ffa_config_dyn_shm(void) -{ - struct tee_shm_pool_mgr *priv_mgr; - struct tee_shm_pool_mgr *dmabuf_mgr; - void *rc; - - rc = optee_ffa_shm_pool_alloc_pages(); - if (IS_ERR(rc)) - return rc; - priv_mgr = rc; - - rc = optee_ffa_shm_pool_alloc_pages(); - if (IS_ERR(rc)) { - tee_shm_pool_mgr_destroy(priv_mgr); - return rc; - } - dmabuf_mgr = rc; - - rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr); - if (IS_ERR(rc)) { - tee_shm_pool_mgr_destroy(priv_mgr); - tee_shm_pool_mgr_destroy(dmabuf_mgr); - } - - return rc; -} - static void optee_ffa_get_version(struct tee_device *teedev, struct tee_ioctl_version_data *vers) { @@ -815,7 +788,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev) if (!optee) return -ENOMEM; - pool = optee_ffa_config_dyn_shm(); + pool = optee_ffa_shm_pool_alloc_pages(); if (IS_ERR(pool)) { rc = PTR_ERR(pool); goto err_free_optee; diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h index 92bc47bef95f..df3a483bbf46 100644 --- a/drivers/tee/optee/optee_private.h +++ b/drivers/tee/optee/optee_private.h @@ -229,8 +229,8 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session); int optee_enumerate_devices(u32 func); void optee_unregister_devices(void); -int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm, - struct tee_shm *shm, size_t size, +int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm, + size_t size, size_t align, int (*shm_register)(struct tee_context *ctx, struct tee_shm *shm, struct page **pages, diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c index bacd1a1d79ee..3dc8cbea1a24 100644 --- a/drivers/tee/optee/smc_abi.c +++ b/drivers/tee/optee/smc_abi.c @@ -42,7 +42,15 @@ * 6. Driver initialization. */ -#define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES +/* + * A typical OP-TEE private shm allocation is 224 bytes (argument struct + * with 6 parameters, needed for open session). So with an alignment of 512 + * we'll waste a bit more than 50%. However, it's only expected that we'll + * have a handful of these structs allocated at a time. Most memory will + * be allocated aligned to the page size, So all in all this should scale + * up and down quite well. + */ +#define OPTEE_MIN_STATIC_POOL_ALIGN 9 /* 512 bytes aligned */ /* * 1. Convert between struct tee_param and struct optee_msg_param @@ -532,20 +540,21 @@ static int optee_shm_unregister_supp(struct tee_context *ctx, * The main function is optee_shm_pool_alloc_pages(). */ -static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, - struct tee_shm *shm, size_t size) +static int pool_op_alloc(struct tee_shm_pool *pool, + struct tee_shm *shm, size_t size, size_t align) { /* * Shared memory private to the OP-TEE driver doesn't need * to be registered with OP-TEE. */ if (shm->flags & TEE_SHM_PRIV) - return optee_pool_op_alloc_helper(poolm, shm, size, NULL); + return optee_pool_op_alloc_helper(pool, shm, size, align, NULL); - return optee_pool_op_alloc_helper(poolm, shm, size, optee_shm_register); + return optee_pool_op_alloc_helper(pool, shm, size, align, + optee_shm_register); } -static void pool_op_free(struct tee_shm_pool_mgr *poolm, +static void pool_op_free(struct tee_shm_pool *pool, struct tee_shm *shm) { if (!(shm->flags & TEE_SHM_PRIV)) @@ -555,15 +564,15 @@ static void pool_op_free(struct tee_shm_pool_mgr *poolm, shm->kaddr = NULL; } -static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm) +static void pool_op_destroy_pool(struct tee_shm_pool *pool) { - kfree(poolm); + kfree(pool); } -static const struct tee_shm_pool_mgr_ops pool_ops = { +static const struct tee_shm_pool_ops pool_ops = { .alloc = pool_op_alloc, .free = pool_op_free, - .destroy_poolmgr = pool_op_destroy_poolmgr, + .destroy_pool = pool_op_destroy_pool, }; /** @@ -572,16 +581,16 @@ static const struct tee_shm_pool_mgr_ops pool_ops = { * This pool is used when OP-TEE supports dymanic SHM. In this case * command buffers and such are allocated from kernel's own memory. */ -static struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void) +static struct tee_shm_pool *optee_shm_pool_alloc_pages(void) { - struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); + struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL); - if (!mgr) + if (!pool) return ERR_PTR(-ENOMEM); - mgr->ops = &pool_ops; + pool->ops = &pool_ops; - return mgr; + return pool; } /* @@ -1153,33 +1162,6 @@ static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn, return true; } -static struct tee_shm_pool *optee_config_dyn_shm(void) -{ - struct tee_shm_pool_mgr *priv_mgr; - struct tee_shm_pool_mgr *dmabuf_mgr; - void *rc; - - rc = optee_shm_pool_alloc_pages(); - if (IS_ERR(rc)) - return rc; - priv_mgr = rc; - - rc = optee_shm_pool_alloc_pages(); - if (IS_ERR(rc)) { - tee_shm_pool_mgr_destroy(priv_mgr); - return rc; - } - dmabuf_mgr = rc; - - rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr); - if (IS_ERR(rc)) { - tee_shm_pool_mgr_destroy(priv_mgr); - tee_shm_pool_mgr_destroy(dmabuf_mgr); - } - - return rc; -} - static struct tee_shm_pool * optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm) { @@ -1193,10 +1175,7 @@ optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm) phys_addr_t begin; phys_addr_t end; void *va; - struct tee_shm_pool_mgr *priv_mgr; - struct tee_shm_pool_mgr *dmabuf_mgr; void *rc; - const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc); if (res.result.status != OPTEE_SMC_RETURN_OK) { @@ -1214,11 +1193,6 @@ optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm) paddr = begin; size = end - begin; - if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) { - pr_err("too small shared memory area\n"); - return ERR_PTR(-EINVAL); - } - va = memremap(paddr, size, MEMREMAP_WB); if (!va) { pr_err("shared memory ioremap failed\n"); @@ -1226,35 +1200,13 @@ optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm) } vaddr = (unsigned long)va; - rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz, - 3 /* 8 bytes aligned */); - if (IS_ERR(rc)) - goto err_memunmap; - priv_mgr = rc; - - vaddr += sz; - paddr += sz; - size -= sz; - - rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT); - if (IS_ERR(rc)) - goto err_free_priv_mgr; - dmabuf_mgr = rc; - - rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr); + rc = tee_shm_pool_alloc_res_mem(vaddr, paddr, size, + OPTEE_MIN_STATIC_POOL_ALIGN); if (IS_ERR(rc)) - goto err_free_dmabuf_mgr; - - *memremaped_shm = va; - - return rc; + memunmap(va); + else + *memremaped_shm = va; -err_free_dmabuf_mgr: - tee_shm_pool_mgr_destroy(dmabuf_mgr); -err_free_priv_mgr: - tee_shm_pool_mgr_destroy(priv_mgr); -err_memunmap: - memunmap(va); return rc; } @@ -1376,7 +1328,7 @@ static int optee_probe(struct platform_device *pdev) * Try to use dynamic shared memory if possible */ if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) - pool = optee_config_dyn_shm(); + pool = optee_shm_pool_alloc_pages(); /* * If dynamic shared memory is not available or failed - try static one diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h index e09c8aa5d967..7265f47c6d8e 100644 --- a/drivers/tee/tee_private.h +++ b/drivers/tee/tee_private.h @@ -12,17 +12,6 @@ #include #include -/** - * struct tee_shm_pool - shared memory pool - * @private_mgr: pool manager for shared memory only between kernel - * and secure world - * @dma_buf_mgr: pool manager for shared memory exported to user space - */ -struct tee_shm_pool { - struct tee_shm_pool_mgr *private_mgr; - struct tee_shm_pool_mgr *dma_buf_mgr; -}; - #define TEE_DEVICE_FLAG_REGISTERED 0x1 #define TEE_MAX_DEV_NAME_LEN 32 diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 7e7e762fc1de..f0a9cccd2f2c 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -31,14 +31,7 @@ static void release_registered_pages(struct tee_shm *shm) static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm) { if (shm->flags & TEE_SHM_POOL) { - struct tee_shm_pool_mgr *poolm; - - if (shm->flags & TEE_SHM_DMA_BUF) - poolm = teedev->pool->dma_buf_mgr; - else - poolm = teedev->pool->private_mgr; - - poolm->ops->free(poolm, shm); + teedev->pool->ops->free(teedev->pool, shm); } else if (shm->flags & TEE_SHM_REGISTER) { int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm); @@ -59,8 +52,8 @@ static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm) struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) { struct tee_device *teedev = ctx->teedev; - struct tee_shm_pool_mgr *poolm = NULL; struct tee_shm *shm; + size_t align; void *ret; int rc; @@ -93,12 +86,18 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) refcount_set(&shm->refcount, 1); shm->flags = flags | TEE_SHM_POOL; shm->ctx = ctx; - if (flags & TEE_SHM_DMA_BUF) - poolm = teedev->pool->dma_buf_mgr; - else - poolm = teedev->pool->private_mgr; + if (flags & TEE_SHM_DMA_BUF) { + align = PAGE_SIZE; + /* + * Request to register the shm in the pool allocator below + * if supported. + */ + shm->flags |= TEE_SHM_REGISTER; + } else { + align = 2 * sizeof(long); + } - rc = poolm->ops->alloc(poolm, shm, size); + rc = teedev->pool->ops->alloc(teedev->pool, shm, size, align); if (rc) { ret = ERR_PTR(rc); goto err_kfree; @@ -118,7 +117,7 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) return shm; err_pool_free: - poolm->ops->free(poolm, shm); + teedev->pool->ops->free(teedev->pool, shm); err_kfree: kfree(shm); err_dev_put: diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c index 54c11aa374a8..71e0f8ae69aa 100644 --- a/drivers/tee/tee_shm_pool.c +++ b/drivers/tee/tee_shm_pool.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2015 Linaro Limited + * Copyright (c) 2015, 2017, 2022 Linaro Limited */ #include #include @@ -9,14 +9,16 @@ #include #include "tee_private.h" -static int pool_op_gen_alloc(struct tee_shm_pool_mgr *poolm, - struct tee_shm *shm, size_t size) +static int pool_op_gen_alloc(struct tee_shm_pool *pool, struct tee_shm *shm, + size_t size, size_t align) { unsigned long va; - struct gen_pool *genpool = poolm->private_data; - size_t s = roundup(size, 1 << genpool->min_alloc_order); + struct gen_pool *genpool = pool->private_data; + size_t a = max_t(size_t, align, BIT(genpool->min_alloc_order)); + struct genpool_data_align data = { .align = a }; + size_t s = roundup(size, a); - va = gen_pool_alloc(genpool, s); + va = gen_pool_alloc_algo(genpool, s, gen_pool_first_fit_align, &data); if (!va) return -ENOMEM; @@ -24,107 +26,67 @@ static int pool_op_gen_alloc(struct tee_shm_pool_mgr *poolm, shm->kaddr = (void *)va; shm->paddr = gen_pool_virt_to_phys(genpool, va); shm->size = s; + /* + * This is from a static shared memory pool so no need to register + * each chunk, and no need to unregister later either. + */ + shm->flags &= ~TEE_SHM_REGISTER; return 0; } -static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm, - struct tee_shm *shm) +static void pool_op_gen_free(struct tee_shm_pool *pool, struct tee_shm *shm) { - gen_pool_free(poolm->private_data, (unsigned long)shm->kaddr, + gen_pool_free(pool->private_data, (unsigned long)shm->kaddr, shm->size); shm->kaddr = NULL; } -static void pool_op_gen_destroy_poolmgr(struct tee_shm_pool_mgr *poolm) +static void pool_op_gen_destroy_pool(struct tee_shm_pool *pool) { - gen_pool_destroy(poolm->private_data); - kfree(poolm); + gen_pool_destroy(pool->private_data); + kfree(pool); } -static const struct tee_shm_pool_mgr_ops pool_ops_generic = { +static const struct tee_shm_pool_ops pool_ops_generic = { .alloc = pool_op_gen_alloc, .free = pool_op_gen_free, - .destroy_poolmgr = pool_op_gen_destroy_poolmgr, + .destroy_pool = pool_op_gen_destroy_pool, }; -struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr, - phys_addr_t paddr, - size_t size, - int min_alloc_order) +struct tee_shm_pool *tee_shm_pool_alloc_res_mem(unsigned long vaddr, + phys_addr_t paddr, size_t size, + int min_alloc_order) { const size_t page_mask = PAGE_SIZE - 1; - struct tee_shm_pool_mgr *mgr; + struct tee_shm_pool *pool; int rc; /* Start and end must be page aligned */ if (vaddr & page_mask || paddr & page_mask || size & page_mask) return ERR_PTR(-EINVAL); - mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); - if (!mgr) + pool = kzalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) return ERR_PTR(-ENOMEM); - mgr->private_data = gen_pool_create(min_alloc_order, -1); - if (!mgr->private_data) { + pool->private_data = gen_pool_create(min_alloc_order, -1); + if (!pool->private_data) { rc = -ENOMEM; goto err; } - gen_pool_set_algo(mgr->private_data, gen_pool_best_fit, NULL); - rc = gen_pool_add_virt(mgr->private_data, vaddr, paddr, size, -1); + rc = gen_pool_add_virt(pool->private_data, vaddr, paddr, size, -1); if (rc) { - gen_pool_destroy(mgr->private_data); + gen_pool_destroy(pool->private_data); goto err; } - mgr->ops = &pool_ops_generic; + pool->ops = &pool_ops_generic; - return mgr; + return pool; err: - kfree(mgr); + kfree(pool); return ERR_PTR(rc); } -EXPORT_SYMBOL_GPL(tee_shm_pool_mgr_alloc_res_mem); - -static bool check_mgr_ops(struct tee_shm_pool_mgr *mgr) -{ - return mgr && mgr->ops && mgr->ops->alloc && mgr->ops->free && - mgr->ops->destroy_poolmgr; -} - -struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr, - struct tee_shm_pool_mgr *dmabuf_mgr) -{ - struct tee_shm_pool *pool; - - if (!check_mgr_ops(priv_mgr) || !check_mgr_ops(dmabuf_mgr)) - return ERR_PTR(-EINVAL); - - pool = kzalloc(sizeof(*pool), GFP_KERNEL); - if (!pool) - return ERR_PTR(-ENOMEM); - - pool->private_mgr = priv_mgr; - pool->dma_buf_mgr = dmabuf_mgr; - - return pool; -} -EXPORT_SYMBOL_GPL(tee_shm_pool_alloc); - -/** - * tee_shm_pool_free() - Free a shared memory pool - * @pool: The shared memory pool to free - * - * There must be no remaining shared memory allocated from this pool when - * this function is called. - */ -void tee_shm_pool_free(struct tee_shm_pool *pool) -{ - if (pool->private_mgr) - tee_shm_pool_mgr_destroy(pool->private_mgr); - if (pool->dma_buf_mgr) - tee_shm_pool_mgr_destroy(pool->dma_buf_mgr); - kfree(pool); -} -EXPORT_SYMBOL_GPL(tee_shm_pool_free); +EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem); diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index a4393c8c38f3..ed641dc314bd 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2015-2016 Linaro Limited + * Copyright (c) 2015-2022 Linaro Limited */ #ifndef __TEE_DRV_H @@ -221,62 +221,39 @@ struct tee_shm { }; /** - * struct tee_shm_pool_mgr - shared memory manager + * struct tee_shm_pool - shared memory pool * @ops: operations * @private_data: private data for the shared memory manager */ -struct tee_shm_pool_mgr { - const struct tee_shm_pool_mgr_ops *ops; +struct tee_shm_pool { + const struct tee_shm_pool_ops *ops; void *private_data; }; /** - * struct tee_shm_pool_mgr_ops - shared memory pool manager operations + * struct tee_shm_pool_ops - shared memory pool operations * @alloc: called when allocating shared memory * @free: called when freeing shared memory - * @destroy_poolmgr: called when destroying the pool manager + * @destroy_pool: called when destroying the pool */ -struct tee_shm_pool_mgr_ops { - int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm, - size_t size); - void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm); - void (*destroy_poolmgr)(struct tee_shm_pool_mgr *poolmgr); +struct tee_shm_pool_ops { + int (*alloc)(struct tee_shm_pool *pool, struct tee_shm *shm, + size_t size, size_t align); + void (*free)(struct tee_shm_pool *pool, struct tee_shm *shm); + void (*destroy_pool)(struct tee_shm_pool *pool); }; -/** - * tee_shm_pool_alloc() - Create a shared memory pool from shm managers - * @priv_mgr: manager for driver private shared memory allocations - * @dmabuf_mgr: manager for dma-buf shared memory allocations - * - * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied - * in @dmabuf, others will use the range provided by @priv. - * - * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. - */ -struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr, - struct tee_shm_pool_mgr *dmabuf_mgr); - /* - * tee_shm_pool_mgr_alloc_res_mem() - Create a shm manager for reserved - * memory + * tee_shm_pool_alloc_res_mem() - Create a shm manager for reserved memory * @vaddr: Virtual address of start of pool * @paddr: Physical address of start of pool * @size: Size in bytes of the pool * - * @returns pointer to a 'struct tee_shm_pool_mgr' or an ERR_PTR on failure. - */ -struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr, - phys_addr_t paddr, - size_t size, - int min_alloc_order); - -/** - * tee_shm_pool_mgr_destroy() - Free a shared memory manager + * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. */ -static inline void tee_shm_pool_mgr_destroy(struct tee_shm_pool_mgr *poolm) -{ - poolm->ops->destroy_poolmgr(poolm); -} +struct tee_shm_pool *tee_shm_pool_alloc_res_mem(unsigned long vaddr, + phys_addr_t paddr, size_t size, + int min_alloc_order); /** * tee_shm_pool_free() - Free a shared memory pool @@ -285,7 +262,10 @@ static inline void tee_shm_pool_mgr_destroy(struct tee_shm_pool_mgr *poolm) * The must be no remaining shared memory allocated from this pool when * this function is called. */ -void tee_shm_pool_free(struct tee_shm_pool *pool); +static inline void tee_shm_pool_free(struct tee_shm_pool *pool) +{ + pool->ops->destroy_pool(pool); +} /** * tee_get_drvdata() - Return driver_data pointer -- cgit v1.2.3 From 5d41f1b3e3282909b6bbceacb9aebe1d3c849a49 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 4 Feb 2022 10:33:54 +0100 Subject: tee: replace tee_shm_alloc() tee_shm_alloc() is replaced by three new functions, tee_shm_alloc_user_buf() - for user mode allocations, replacing passing the flags TEE_SHM_MAPPED | TEE_SHM_DMA_BUF tee_shm_alloc_kernel_buf() - for kernel mode allocations, slightly optimized compared to using the flags TEE_SHM_MAPPED | TEE_SHM_DMA_BUF. tee_shm_alloc_priv_buf() - primarily for TEE driver internal use. This also makes the interface easier to use as we can get rid of the somewhat hard to use flags parameter. The TEE subsystem and the TEE drivers are updated to use the new functions instead. Reviewed-by: Sumit Garg Signed-off-by: Jens Wiklander --- drivers/tee/optee/call.c | 2 +- drivers/tee/optee/device.c | 5 +- drivers/tee/optee/ffa_abi.c | 4 +- drivers/tee/optee/smc_abi.c | 6 +-- drivers/tee/tee_shm.c | 108 ++++++++++++++++++++++++++++---------------- include/linux/tee_drv.h | 16 +------ 6 files changed, 76 insertions(+), 65 deletions(-) (limited to 'drivers') diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c index b25cc1fac945..bd49ec934060 100644 --- a/drivers/tee/optee/call.c +++ b/drivers/tee/optee/call.c @@ -120,7 +120,7 @@ struct tee_shm *optee_get_msg_arg(struct tee_context *ctx, size_t num_params, if (optee->rpc_arg_count) sz += OPTEE_MSG_GET_ARG_SIZE(optee->rpc_arg_count); - shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV); + shm = tee_shm_alloc_priv_buf(ctx, sz); if (IS_ERR(shm)) return shm; diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c index 128a2d2a50a1..f3947be13e2e 100644 --- a/drivers/tee/optee/device.c +++ b/drivers/tee/optee/device.c @@ -121,10 +121,9 @@ static int __optee_enumerate_devices(u32 func) if (rc < 0 || !shm_size) goto out_sess; - device_shm = tee_shm_alloc(ctx, shm_size, - TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); + device_shm = tee_shm_alloc_kernel_buf(ctx, shm_size); if (IS_ERR(device_shm)) { - pr_err("tee_shm_alloc failed\n"); + pr_err("tee_shm_alloc_kernel_buf failed\n"); rc = PTR_ERR(device_shm); goto out_sess; } diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c index 91dd80945bf1..fb7345941024 100644 --- a/drivers/tee/optee/ffa_abi.c +++ b/drivers/tee/optee/ffa_abi.c @@ -440,8 +440,8 @@ static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx, shm = optee_rpc_cmd_alloc_suppl(ctx, arg->params[0].u.value.b); break; case OPTEE_RPC_SHM_TYPE_KERNEL: - shm = tee_shm_alloc(optee->ctx, arg->params[0].u.value.b, - TEE_SHM_MAPPED | TEE_SHM_PRIV); + shm = tee_shm_alloc_priv_buf(optee->ctx, + arg->params[0].u.value.b); break; default: arg->ret = TEEC_ERROR_BAD_PARAMETERS; diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c index 3dc8cbea1a24..7580d52b3852 100644 --- a/drivers/tee/optee/smc_abi.c +++ b/drivers/tee/optee/smc_abi.c @@ -661,8 +661,7 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, shm = optee_rpc_cmd_alloc_suppl(ctx, sz); break; case OPTEE_RPC_SHM_TYPE_KERNEL: - shm = tee_shm_alloc(optee->ctx, sz, - TEE_SHM_MAPPED | TEE_SHM_PRIV); + shm = tee_shm_alloc_priv_buf(optee->ctx, sz); break; default: arg->ret = TEEC_ERROR_BAD_PARAMETERS; @@ -787,8 +786,7 @@ static void optee_handle_rpc(struct tee_context *ctx, switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) { case OPTEE_SMC_RPC_FUNC_ALLOC: - shm = tee_shm_alloc(optee->ctx, param->a1, - TEE_SHM_MAPPED | TEE_SHM_PRIV); + shm = tee_shm_alloc_priv_buf(optee->ctx, param->a1); if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) { reg_pair_from_64(¶m->a1, ¶m->a2, pa); reg_pair_from_64(¶m->a4, ¶m->a5, diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index f0a9cccd2f2c..dd748d572691 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -49,25 +49,14 @@ static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm) tee_device_put(teedev); } -struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) +static struct tee_shm *shm_alloc_helper(struct tee_context *ctx, size_t size, + size_t align, u32 flags, int id) { struct tee_device *teedev = ctx->teedev; struct tee_shm *shm; - size_t align; void *ret; int rc; - if (!(flags & TEE_SHM_MAPPED)) { - dev_err(teedev->dev.parent, - "only mapped allocations supported\n"); - return ERR_PTR(-EINVAL); - } - - if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF | TEE_SHM_PRIV))) { - dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags); - return ERR_PTR(-EINVAL); - } - if (!tee_device_get(teedev)) return ERR_PTR(-EINVAL); @@ -84,18 +73,16 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) } refcount_set(&shm->refcount, 1); - shm->flags = flags | TEE_SHM_POOL; + shm->flags = flags; + shm->id = id; + + /* + * We're assigning this as it is needed if the shm is to be + * registered. If this function returns OK then the caller expected + * to call teedev_ctx_get() or clear shm->ctx in case it's not + * needed any longer. + */ shm->ctx = ctx; - if (flags & TEE_SHM_DMA_BUF) { - align = PAGE_SIZE; - /* - * Request to register the shm in the pool allocator below - * if supported. - */ - shm->flags |= TEE_SHM_REGISTER; - } else { - align = 2 * sizeof(long); - } rc = teedev->pool->ops->alloc(teedev->pool, shm, size, align); if (rc) { @@ -103,28 +90,14 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) goto err_kfree; } - if (flags & TEE_SHM_DMA_BUF) { - mutex_lock(&teedev->mutex); - shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); - mutex_unlock(&teedev->mutex); - if (shm->id < 0) { - ret = ERR_PTR(shm->id); - goto err_pool_free; - } - } - teedev_ctx_get(ctx); - return shm; -err_pool_free: - teedev->pool->ops->free(teedev->pool, shm); err_kfree: kfree(shm); err_dev_put: tee_device_put(teedev); return ret; } -EXPORT_SYMBOL_GPL(tee_shm_alloc); /** * tee_shm_alloc_user_buf() - Allocate shared memory for user space @@ -140,7 +113,36 @@ EXPORT_SYMBOL_GPL(tee_shm_alloc); */ struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size) { - return tee_shm_alloc(ctx, size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); + u32 flags = TEE_SHM_MAPPED | TEE_SHM_DMA_BUF | TEE_SHM_REGISTER | + TEE_SHM_POOL; + struct tee_device *teedev = ctx->teedev; + struct tee_shm *shm; + void *ret; + int id; + + mutex_lock(&teedev->mutex); + id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL); + mutex_unlock(&teedev->mutex); + if (id < 0) + return ERR_PTR(id); + + shm = shm_alloc_helper(ctx, size, PAGE_SIZE, flags, id); + if (IS_ERR(shm)) { + mutex_lock(&teedev->mutex); + idr_remove(&teedev->idr, id); + mutex_unlock(&teedev->mutex); + return shm; + } + + mutex_lock(&teedev->mutex); + ret = idr_replace(&teedev->idr, shm, id); + mutex_unlock(&teedev->mutex); + if (IS_ERR(ret)) { + tee_shm_free(shm); + return ret; + } + + return shm; } /** @@ -157,10 +159,36 @@ struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size) */ struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size) { - return tee_shm_alloc(ctx, size, TEE_SHM_MAPPED); + u32 flags = TEE_SHM_MAPPED | TEE_SHM_REGISTER | TEE_SHM_POOL; + + return shm_alloc_helper(ctx, size, PAGE_SIZE, flags, -1); } EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf); +/** + * tee_shm_alloc_priv_buf() - Allocate shared memory for a privately shared + * kernel buffer + * @ctx: Context that allocates the shared memory + * @size: Requested size of shared memory + * + * This function returns similar shared memory as + * tee_shm_alloc_kernel_buf(), but with the difference that the memory + * might not be registered in secure world in case the driver supports + * passing memory not registered in advance. + * + * This function should normally only be used internally in the TEE + * drivers. + * + * @returns a pointer to 'struct tee_shm' + */ +struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size) +{ + u32 flags = TEE_SHM_MAPPED | TEE_SHM_PRIV | TEE_SHM_POOL; + + return shm_alloc_helper(ctx, size, sizeof(long) * 2, flags, -1); +} +EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf); + struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, size_t length, u32 flags) { diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index ed641dc314bd..7f038f8787c7 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -273,21 +273,7 @@ static inline void tee_shm_pool_free(struct tee_shm_pool *pool) */ void *tee_get_drvdata(struct tee_device *teedev); -/** - * tee_shm_alloc() - Allocate shared memory - * @ctx: Context that allocates the shared memory - * @size: Requested size of shared memory - * @flags: Flags setting properties for the requested shared memory. - * - * Memory allocated as global shared memory is automatically freed when the - * TEE file pointer is closed. The @flags field uses the bits defined by - * TEE_SHM_* above. TEE_SHM_MAPPED must currently always be set. If - * TEE_SHM_DMA_BUF global shared memory will be allocated and associated - * with a dma-buf handle, else driver private memory. - * - * @returns a pointer to 'struct tee_shm' - */ -struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags); +struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size); struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size); /** -- cgit v1.2.3 From 924e32269228a4e2575e50e6fd6ed83cb57aa52a Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 4 Feb 2022 10:33:55 +0100 Subject: optee: add optee_pool_op_free_helper() Adds a common helper function to free a tee_shm allocated using the helper function optee_pool_op_alloc_helper(). Reviewed-by: Sumit Garg Signed-off-by: Jens Wiklander --- drivers/tee/optee/core.c | 10 ++++++++++ drivers/tee/optee/ffa_abi.c | 4 +--- drivers/tee/optee/optee_private.h | 3 +++ drivers/tee/optee/smc_abi.c | 7 +++---- 4 files changed, 17 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index f4bccb5f0e93..daf947e98d14 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -69,6 +69,16 @@ err: return rc; } +void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm, + int (*shm_unregister)(struct tee_context *ctx, + struct tee_shm *shm)) +{ + if (shm_unregister) + shm_unregister(shm->ctx, shm); + free_pages((unsigned long)shm->kaddr, get_order(shm->size)); + shm->kaddr = NULL; +} + static void optee_bus_scan(struct work_struct *work) { WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP)); diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c index fb7345941024..8cd9c70a9268 100644 --- a/drivers/tee/optee/ffa_abi.c +++ b/drivers/tee/optee/ffa_abi.c @@ -379,9 +379,7 @@ static int pool_ffa_op_alloc(struct tee_shm_pool *pool, static void pool_ffa_op_free(struct tee_shm_pool *pool, struct tee_shm *shm) { - optee_ffa_shm_unregister(shm->ctx, shm); - free_pages((unsigned long)shm->kaddr, get_order(shm->size)); - shm->kaddr = NULL; + optee_pool_op_free_helper(pool, shm, optee_ffa_shm_unregister); } static void pool_ffa_op_destroy_pool(struct tee_shm_pool *pool) diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h index df3a483bbf46..e77765c78878 100644 --- a/drivers/tee/optee/optee_private.h +++ b/drivers/tee/optee/optee_private.h @@ -236,6 +236,9 @@ int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm, struct page **pages, size_t num_pages, unsigned long start)); +void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm, + int (*shm_unregister)(struct tee_context *ctx, + struct tee_shm *shm)); void optee_remove_common(struct optee *optee); diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c index 7580d52b3852..ef3e27b602e6 100644 --- a/drivers/tee/optee/smc_abi.c +++ b/drivers/tee/optee/smc_abi.c @@ -558,10 +558,9 @@ static void pool_op_free(struct tee_shm_pool *pool, struct tee_shm *shm) { if (!(shm->flags & TEE_SHM_PRIV)) - optee_shm_unregister(shm->ctx, shm); - - free_pages((unsigned long)shm->kaddr, get_order(shm->size)); - shm->kaddr = NULL; + optee_pool_op_free_helper(pool, shm, optee_shm_unregister); + else + optee_pool_op_free_helper(pool, shm, NULL); } static void pool_op_destroy_pool(struct tee_shm_pool *pool) -- cgit v1.2.3 From 056d3fed3d1ff3f5d699be337f048f9eed2befaf Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 4 Feb 2022 10:33:56 +0100 Subject: tee: add tee_shm_register_{user,kernel}_buf() Adds the two new functions tee_shm_register_user_buf() and tee_shm_register_kernel_buf() which should be used instead of the old tee_shm_register(). This avoids having the caller supplying the flags parameter which exposes a bit more than desired of the internals of the TEE subsystem. Reviewed-by: Sumit Garg Signed-off-by: Jens Wiklander --- drivers/tee/tee_core.c | 3 +-- drivers/tee/tee_private.h | 2 ++ drivers/tee/tee_shm.c | 33 +++++++++++++++++++++++++++++++++ include/linux/tee_drv.h | 2 ++ 4 files changed, 38 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index a15812baaeb1..8aa1a4836b92 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -334,8 +334,7 @@ tee_ioctl_shm_register(struct tee_context *ctx, if (data.flags) return -EINVAL; - shm = tee_shm_register(ctx, data.addr, data.length, - TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED); + shm = tee_shm_register_user_buf(ctx, data.addr, data.length); if (IS_ERR(shm)) return PTR_ERR(shm); diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h index 7265f47c6d8e..409cadcc1cff 100644 --- a/drivers/tee/tee_private.h +++ b/drivers/tee/tee_private.h @@ -58,5 +58,7 @@ void teedev_ctx_get(struct tee_context *ctx); void teedev_ctx_put(struct tee_context *ctx); struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size); +struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx, + unsigned long addr, size_t length); #endif /*TEE_PRIVATE_H*/ diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index dd748d572691..359bab36e163 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -299,6 +299,39 @@ err: } EXPORT_SYMBOL_GPL(tee_shm_register); +/** + * tee_shm_register_user_buf() - Register a userspace shared memory buffer + * @ctx: Context that registers the shared memory + * @addr: The userspace address of the shared buffer + * @length: Length of the shared buffer + * + * @returns a pointer to 'struct tee_shm' + */ +struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx, + unsigned long addr, size_t length) +{ + return tee_shm_register(ctx, addr, length, + TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED); +} + +/** + * tee_shm_register_kernel_buf() - Register kernel memory to be shared with + * secure world + * @ctx: Context that registers the shared memory + * @addr: The buffer + * @length: Length of the buffer + * + * @returns a pointer to 'struct tee_shm' + */ + +struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx, + void *addr, size_t length) +{ + return tee_shm_register(ctx, (unsigned long)addr, length, + TEE_SHM_DMA_BUF | TEE_SHM_KERNEL_MAPPED); +} +EXPORT_SYMBOL_GPL(tee_shm_register_kernel_buf); + static int tee_shm_fop_release(struct inode *inode, struct file *filp) { tee_shm_put(filp->private_data); diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 7f038f8787c7..c9d2cc32a5ed 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -287,6 +287,8 @@ struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size); */ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, size_t length, u32 flags); +struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx, + void *addr, size_t length); /** * tee_shm_is_registered() - Check if shared memory object in registered in TEE -- cgit v1.2.3 From 53e16519c2eccdb2e1b123405466a29aaea1132e Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 4 Feb 2022 10:33:58 +0100 Subject: tee: replace tee_shm_register() tee_shm_register() is replaced by the previously introduced functions tee_shm_register_user_buf() and tee_shm_register_kernel_buf(). Since there are not external callers left we can remove tee_shm_register() and refactor the remains. Reviewed-by: Sumit Garg Signed-off-by: Jens Wiklander --- drivers/tee/tee_shm.c | 156 ++++++++++++++++++++++++++++-------------------- include/linux/tee_drv.h | 11 ---- 2 files changed, 90 insertions(+), 77 deletions(-) (limited to 'drivers') diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 359bab36e163..9db571253802 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -12,17 +12,43 @@ #include #include "tee_private.h" +static void shm_put_kernel_pages(struct page **pages, size_t page_count) +{ + size_t n; + + for (n = 0; n < page_count; n++) + put_page(pages[n]); +} + +static int shm_get_kernel_pages(unsigned long start, size_t page_count, + struct page **pages) +{ + struct kvec *kiov; + size_t n; + int rc; + + kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL); + if (!kiov) + return -ENOMEM; + + for (n = 0; n < page_count; n++) { + kiov[n].iov_base = (void *)(start + n * PAGE_SIZE); + kiov[n].iov_len = PAGE_SIZE; + } + + rc = get_kernel_pages(kiov, page_count, 0, pages); + kfree(kiov); + + return rc; +} + static void release_registered_pages(struct tee_shm *shm) { if (shm->pages) { - if (shm->flags & TEE_SHM_USER_MAPPED) { + if (shm->flags & TEE_SHM_USER_MAPPED) unpin_user_pages(shm->pages, shm->num_pages); - } else { - size_t n; - - for (n = 0; n < shm->num_pages; n++) - put_page(shm->pages[n]); - } + else + shm_put_kernel_pages(shm->pages, shm->num_pages); kfree(shm->pages); } @@ -189,28 +215,24 @@ struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size) } EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf); -struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, - size_t length, u32 flags) +static struct tee_shm * +register_shm_helper(struct tee_context *ctx, unsigned long addr, + size_t length, u32 flags, int id) { struct tee_device *teedev = ctx->teedev; - const u32 req_user_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED; - const u32 req_kernel_flags = TEE_SHM_DMA_BUF | TEE_SHM_KERNEL_MAPPED; struct tee_shm *shm; + unsigned long start; + size_t num_pages; void *ret; int rc; - int num_pages; - unsigned long start; - - if (flags != req_user_flags && flags != req_kernel_flags) - return ERR_PTR(-ENOTSUPP); if (!tee_device_get(teedev)) return ERR_PTR(-EINVAL); if (!teedev->desc->ops->shm_register || !teedev->desc->ops->shm_unregister) { - tee_device_put(teedev); - return ERR_PTR(-ENOTSUPP); + ret = ERR_PTR(-ENOTSUPP); + goto err_dev_put; } teedev_ctx_get(ctx); @@ -218,13 +240,13 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, shm = kzalloc(sizeof(*shm), GFP_KERNEL); if (!shm) { ret = ERR_PTR(-ENOMEM); - goto err; + goto err_ctx_put; } refcount_set(&shm->refcount, 1); - shm->flags = flags | TEE_SHM_REGISTER; + shm->flags = flags; shm->ctx = ctx; - shm->id = -1; + shm->id = id; addr = untagged_addr(addr); start = rounddown(addr, PAGE_SIZE); shm->offset = addr - start; @@ -233,71 +255,45 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL); if (!shm->pages) { ret = ERR_PTR(-ENOMEM); - goto err; + goto err_free_shm; } - if (flags & TEE_SHM_USER_MAPPED) { + if (flags & TEE_SHM_USER_MAPPED) rc = pin_user_pages_fast(start, num_pages, FOLL_WRITE, shm->pages); - } else { - struct kvec *kiov; - int i; - - kiov = kcalloc(num_pages, sizeof(*kiov), GFP_KERNEL); - if (!kiov) { - ret = ERR_PTR(-ENOMEM); - goto err; - } - - for (i = 0; i < num_pages; i++) { - kiov[i].iov_base = (void *)(start + i * PAGE_SIZE); - kiov[i].iov_len = PAGE_SIZE; - } - - rc = get_kernel_pages(kiov, num_pages, 0, shm->pages); - kfree(kiov); - } + else + rc = shm_get_kernel_pages(start, num_pages, shm->pages); if (rc > 0) shm->num_pages = rc; if (rc != num_pages) { if (rc >= 0) rc = -ENOMEM; ret = ERR_PTR(rc); - goto err; - } - - mutex_lock(&teedev->mutex); - shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); - mutex_unlock(&teedev->mutex); - - if (shm->id < 0) { - ret = ERR_PTR(shm->id); - goto err; + goto err_put_shm_pages; } rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages, shm->num_pages, start); if (rc) { ret = ERR_PTR(rc); - goto err; + goto err_put_shm_pages; } return shm; -err: - if (shm) { - if (shm->id >= 0) { - mutex_lock(&teedev->mutex); - idr_remove(&teedev->idr, shm->id); - mutex_unlock(&teedev->mutex); - } - release_registered_pages(shm); - } +err_put_shm_pages: + if (flags & TEE_SHM_USER_MAPPED) + unpin_user_pages(shm->pages, shm->num_pages); + else + shm_put_kernel_pages(shm->pages, shm->num_pages); + kfree(shm->pages); +err_free_shm: kfree(shm); +err_ctx_put: teedev_ctx_put(ctx); +err_dev_put: tee_device_put(teedev); return ret; } -EXPORT_SYMBOL_GPL(tee_shm_register); /** * tee_shm_register_user_buf() - Register a userspace shared memory buffer @@ -310,8 +306,35 @@ EXPORT_SYMBOL_GPL(tee_shm_register); struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx, unsigned long addr, size_t length) { - return tee_shm_register(ctx, addr, length, - TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED); + u32 flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED | TEE_SHM_REGISTER; + struct tee_device *teedev = ctx->teedev; + struct tee_shm *shm; + void *ret; + int id; + + mutex_lock(&teedev->mutex); + id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL); + mutex_unlock(&teedev->mutex); + if (id < 0) + return ERR_PTR(id); + + shm = register_shm_helper(ctx, addr, length, flags, id); + if (IS_ERR(shm)) { + mutex_lock(&teedev->mutex); + idr_remove(&teedev->idr, id); + mutex_unlock(&teedev->mutex); + return shm; + } + + mutex_lock(&teedev->mutex); + ret = idr_replace(&teedev->idr, shm, id); + mutex_unlock(&teedev->mutex); + if (IS_ERR(ret)) { + tee_shm_free(shm); + return ret; + } + + return shm; } /** @@ -327,8 +350,9 @@ struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx, struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx, void *addr, size_t length) { - return tee_shm_register(ctx, (unsigned long)addr, length, - TEE_SHM_DMA_BUF | TEE_SHM_KERNEL_MAPPED); + u32 flags = TEE_SHM_REGISTER | TEE_SHM_KERNEL_MAPPED; + + return register_shm_helper(ctx, (unsigned long)addr, length, flags, -1); } EXPORT_SYMBOL_GPL(tee_shm_register_kernel_buf); diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index c9d2cc32a5ed..a3b663ef0694 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -276,17 +276,6 @@ void *tee_get_drvdata(struct tee_device *teedev); struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size); struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size); -/** - * tee_shm_register() - Register shared memory buffer - * @ctx: Context that registers the shared memory - * @addr: Address is userspace of the shared buffer - * @length: Length of the shared buffer - * @flags: Flags setting properties for the requested shared memory. - * - * @returns a pointer to 'struct tee_shm' - */ -struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, - size_t length, u32 flags); struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx, void *addr, size_t length); -- cgit v1.2.3 From a45ea4efa358577c623d7353a6ba9af3c17f6ca0 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 4 Feb 2022 10:33:59 +0100 Subject: tee: refactor TEE_SHM_* flags Removes the redundant TEE_SHM_DMA_BUF, TEE_SHM_EXT_DMA_BUF, TEE_SHM_MAPPED and TEE_SHM_KERNEL_MAPPED flags. TEE_SHM_REGISTER is renamed to TEE_SHM_DYNAMIC in order to better match its usage. Assigns new values to the remaining flags to void gaps. Reviewed-by: Sumit Garg Signed-off-by: Jens Wiklander --- drivers/tee/optee/smc_abi.c | 4 ++-- drivers/tee/tee_shm.c | 23 +++++++++++------------ drivers/tee/tee_shm_pool.c | 2 +- include/linux/tee_drv.h | 21 +++++++++------------ 4 files changed, 23 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c index ef3e27b602e6..e924928c2673 100644 --- a/drivers/tee/optee/smc_abi.c +++ b/drivers/tee/optee/smc_abi.c @@ -238,7 +238,7 @@ static int optee_to_msg_param(struct optee *optee, case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: - if (tee_shm_is_registered(p->u.memref.shm)) + if (tee_shm_is_dynamic(p->u.memref.shm)) rc = to_msg_param_reg_mem(mp, p); else rc = to_msg_param_tmp_mem(mp, p); @@ -679,7 +679,7 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, sz = tee_shm_get_size(shm); - if (tee_shm_is_registered(shm)) { + if (tee_shm_is_dynamic(shm)) { struct page **pages; u64 *pages_list; size_t page_num; diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 9db571253802..f31e29e8f1ca 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -58,7 +58,7 @@ static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm) { if (shm->flags & TEE_SHM_POOL) { teedev->pool->ops->free(teedev->pool, shm); - } else if (shm->flags & TEE_SHM_REGISTER) { + } else if (shm->flags & TEE_SHM_DYNAMIC) { int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm); if (rc) @@ -139,8 +139,7 @@ err_dev_put: */ struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size) { - u32 flags = TEE_SHM_MAPPED | TEE_SHM_DMA_BUF | TEE_SHM_REGISTER | - TEE_SHM_POOL; + u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL; struct tee_device *teedev = ctx->teedev; struct tee_shm *shm; void *ret; @@ -185,7 +184,7 @@ struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size) */ struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size) { - u32 flags = TEE_SHM_MAPPED | TEE_SHM_REGISTER | TEE_SHM_POOL; + u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL; return shm_alloc_helper(ctx, size, PAGE_SIZE, flags, -1); } @@ -209,7 +208,7 @@ EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf); */ struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size) { - u32 flags = TEE_SHM_MAPPED | TEE_SHM_PRIV | TEE_SHM_POOL; + u32 flags = TEE_SHM_PRIV | TEE_SHM_POOL; return shm_alloc_helper(ctx, size, sizeof(long) * 2, flags, -1); } @@ -306,7 +305,7 @@ err_dev_put: struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx, unsigned long addr, size_t length) { - u32 flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED | TEE_SHM_REGISTER; + u32 flags = TEE_SHM_USER_MAPPED | TEE_SHM_DYNAMIC; struct tee_device *teedev = ctx->teedev; struct tee_shm *shm; void *ret; @@ -350,7 +349,7 @@ struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx, struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx, void *addr, size_t length) { - u32 flags = TEE_SHM_REGISTER | TEE_SHM_KERNEL_MAPPED; + u32 flags = TEE_SHM_DYNAMIC; return register_shm_helper(ctx, (unsigned long)addr, length, flags, -1); } @@ -394,7 +393,7 @@ int tee_shm_get_fd(struct tee_shm *shm) { int fd; - if (!(shm->flags & TEE_SHM_DMA_BUF)) + if (shm->id < 0) return -EINVAL; /* matched by tee_shm_put() in tee_shm_op_release() */ @@ -424,7 +423,7 @@ EXPORT_SYMBOL_GPL(tee_shm_free); */ int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa) { - if (!(shm->flags & TEE_SHM_MAPPED)) + if (!shm->kaddr) return -EINVAL; /* Check that we're in the range of the shm */ if ((char *)va < (char *)shm->kaddr) @@ -446,7 +445,7 @@ EXPORT_SYMBOL_GPL(tee_shm_va2pa); */ int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va) { - if (!(shm->flags & TEE_SHM_MAPPED)) + if (!shm->kaddr) return -EINVAL; /* Check that we're in the range of the shm */ if (pa < shm->paddr) @@ -474,7 +473,7 @@ EXPORT_SYMBOL_GPL(tee_shm_pa2va); */ void *tee_shm_get_va(struct tee_shm *shm, size_t offs) { - if (!(shm->flags & TEE_SHM_MAPPED)) + if (!shm->kaddr) return ERR_PTR(-EINVAL); if (offs >= shm->size) return ERR_PTR(-EINVAL); @@ -549,7 +548,7 @@ void tee_shm_put(struct tee_shm *shm) * the refcount_inc() in tee_shm_get_from_id() never starts * from 0. */ - if (shm->flags & TEE_SHM_DMA_BUF) + if (shm->id >= 0) idr_remove(&teedev->idr, shm->id); do_release = true; } diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c index 71e0f8ae69aa..058bfbac657a 100644 --- a/drivers/tee/tee_shm_pool.c +++ b/drivers/tee/tee_shm_pool.c @@ -30,7 +30,7 @@ static int pool_op_gen_alloc(struct tee_shm_pool *pool, struct tee_shm *shm, * This is from a static shared memory pool so no need to register * each chunk, and no need to unregister later either. */ - shm->flags &= ~TEE_SHM_REGISTER; + shm->flags &= ~TEE_SHM_DYNAMIC; return 0; } diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index a3b663ef0694..911cad324acc 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -20,14 +20,11 @@ * specific TEE driver. */ -#define TEE_SHM_MAPPED BIT(0) /* Memory mapped by the kernel */ -#define TEE_SHM_DMA_BUF BIT(1) /* Memory with dma-buf handle */ -#define TEE_SHM_EXT_DMA_BUF BIT(2) /* Memory with dma-buf handle */ -#define TEE_SHM_REGISTER BIT(3) /* Memory registered in secure world */ -#define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */ -#define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */ -#define TEE_SHM_KERNEL_MAPPED BIT(6) /* Memory mapped in kernel space */ -#define TEE_SHM_PRIV BIT(7) /* Memory private to TEE driver */ +#define TEE_SHM_DYNAMIC BIT(0) /* Dynamic shared memory registered */ + /* in secure world */ +#define TEE_SHM_USER_MAPPED BIT(1) /* Memory mapped in user space */ +#define TEE_SHM_POOL BIT(2) /* Memory allocated from pool */ +#define TEE_SHM_PRIV BIT(3) /* Memory private to TEE driver */ struct device; struct tee_device; @@ -280,13 +277,13 @@ struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx, void *addr, size_t length); /** - * tee_shm_is_registered() - Check if shared memory object in registered in TEE + * tee_shm_is_dynamic() - Check if shared memory object is of the dynamic kind * @shm: Shared memory handle - * @returns true if object is registered in TEE + * @returns true if object is dynamic shared memory */ -static inline bool tee_shm_is_registered(struct tee_shm *shm) +static inline bool tee_shm_is_dynamic(struct tee_shm *shm) { - return shm && (shm->flags & TEE_SHM_REGISTER); + return shm && (shm->flags & TEE_SHM_DYNAMIC); } /** -- cgit v1.2.3 From 16dcfe972b7e6f3fbe08771b8d8feaf9044b54fa Mon Sep 17 00:00:00 2001 From: Ming Qian Date: Wed, 26 Jan 2022 11:09:31 +0800 Subject: firmware: imx: scu-pd: imx8q: add vpu mu resources the vpu core depends on the mu resources. if they're missed, the vpu can't work. Signed-off-by: Ming Qian Signed-off-by: Shijie Qin Signed-off-by: Zhou Peng Signed-off-by: Shawn Guo --- drivers/firmware/imx/scu-pd.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c index ff6569c4a53b..af3d057e6421 100644 --- a/drivers/firmware/imx/scu-pd.c +++ b/drivers/firmware/imx/scu-pd.c @@ -155,6 +155,10 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = { { "vpu-pid", IMX_SC_R_VPU_PID0, 8, true, 0 }, { "vpu-dec0", IMX_SC_R_VPU_DEC_0, 1, false, 0 }, { "vpu-enc0", IMX_SC_R_VPU_ENC_0, 1, false, 0 }, + { "vpu-enc1", IMX_SC_R_VPU_ENC_1, 1, false, 0 }, + { "vpu-mu0", IMX_SC_R_VPU_MU_0, 1, false, 0 }, + { "vpu-mu1", IMX_SC_R_VPU_MU_1, 1, false, 0 }, + { "vpu-mu2", IMX_SC_R_VPU_MU_2, 1, false, 0 }, /* GPU SS */ { "gpu0-pid", IMX_SC_R_GPU_0_PID0, 4, true, 0 }, -- cgit v1.2.3 From 42e90eb53bf3f6fc43c667dd377779e3ebeb748a Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Thu, 17 Feb 2022 13:12:27 +0000 Subject: firmware: arm_scmi: Add a virtio channel refcount Currently SCMI VirtIO channels are marked with a ready flag and related lock to track channel lifetime and support proper synchronization at shutdown when virtqueues have to be stopped. This leads to some extended spinlocked sections with IRQs off on the RX path to keep hold of the ready flag and does not scale well especially when SCMI VirtIO polling mode will be introduced. Add an SCMI VirtIO channel dedicated refcount to track active users on both the TX and the RX path and properly enforce synchronization and cleanup at shutdown, inhibiting further usage of the channel once freed. Link: https://lore.kernel.org/r/20220217131234.50328-2-cristian.marussi@arm.com Cc: "Michael S. Tsirkin" Cc: Igor Skalkin Cc: Peter Hilber Cc: virtualization@lists.linux-foundation.org Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/virtio.c | 143 ++++++++++++++++++++++++------------- 1 file changed, 92 insertions(+), 51 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c index 97d7cf53b774..71b016d1a655 100644 --- a/drivers/firmware/arm_scmi/virtio.c +++ b/drivers/firmware/arm_scmi/virtio.c @@ -17,7 +17,9 @@ * virtqueue. Access to each virtqueue is protected by spinlocks. */ +#include #include +#include #include #include #include @@ -27,6 +29,7 @@ #include "common.h" +#define VIRTIO_MAX_RX_TIMEOUT_MS 60000 #define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */ #define VIRTIO_SCMI_MAX_PDU_SIZE \ (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD) @@ -39,23 +42,21 @@ * @cinfo: SCMI Tx or Rx channel * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only * @is_rx: Whether channel is an Rx channel - * @ready: Whether transport user is ready to hear about channel * @max_msg: Maximum number of pending messages for this channel. - * @lock: Protects access to all members except ready. - * @ready_lock: Protects access to ready. If required, it must be taken before - * lock. + * @lock: Protects access to all members except users. + * @shutdown_done: A reference to a completion used when freeing this channel. + * @users: A reference count to currently active users of this channel. */ struct scmi_vio_channel { struct virtqueue *vqueue; struct scmi_chan_info *cinfo; struct list_head free_list; bool is_rx; - bool ready; unsigned int max_msg; - /* lock to protect access to all members except ready. */ + /* lock to protect access to all members except users. */ spinlock_t lock; - /* lock to rotects access to ready flag. */ - spinlock_t ready_lock; + struct completion *shutdown_done; + refcount_t users; }; /** @@ -76,6 +77,63 @@ struct scmi_vio_msg { /* Only one SCMI VirtIO device can possibly exist */ static struct virtio_device *scmi_vdev; +static void scmi_vio_channel_ready(struct scmi_vio_channel *vioch, + struct scmi_chan_info *cinfo) +{ + unsigned long flags; + + spin_lock_irqsave(&vioch->lock, flags); + cinfo->transport_info = vioch; + /* Indirectly setting channel not available any more */ + vioch->cinfo = cinfo; + spin_unlock_irqrestore(&vioch->lock, flags); + + refcount_set(&vioch->users, 1); +} + +static inline bool scmi_vio_channel_acquire(struct scmi_vio_channel *vioch) +{ + return refcount_inc_not_zero(&vioch->users); +} + +static inline void scmi_vio_channel_release(struct scmi_vio_channel *vioch) +{ + if (refcount_dec_and_test(&vioch->users)) { + unsigned long flags; + + spin_lock_irqsave(&vioch->lock, flags); + if (vioch->shutdown_done) { + vioch->cinfo = NULL; + complete(vioch->shutdown_done); + } + spin_unlock_irqrestore(&vioch->lock, flags); + } +} + +static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch) +{ + unsigned long flags; + DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done); + + /* + * Prepare to wait for the last release if not already released + * or in progress. + */ + spin_lock_irqsave(&vioch->lock, flags); + if (!vioch->cinfo || vioch->shutdown_done) { + spin_unlock_irqrestore(&vioch->lock, flags); + return; + } + vioch->shutdown_done = &vioch_shutdown_done; + virtio_break_device(vioch->vqueue->vdev); + spin_unlock_irqrestore(&vioch->lock, flags); + + scmi_vio_channel_release(vioch); + + /* Let any possibly concurrent RX path release the channel */ + wait_for_completion(vioch->shutdown_done); +} + static bool scmi_vio_have_vq_rx(struct virtio_device *vdev) { return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS); @@ -119,7 +177,7 @@ static void scmi_finalize_message(struct scmi_vio_channel *vioch, static void scmi_vio_complete_cb(struct virtqueue *vqueue) { - unsigned long ready_flags; + unsigned long flags; unsigned int length; struct scmi_vio_channel *vioch; struct scmi_vio_msg *msg; @@ -130,27 +188,24 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue) vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index]; for (;;) { - spin_lock_irqsave(&vioch->ready_lock, ready_flags); - - if (!vioch->ready) { - if (!cb_enabled) - (void)virtqueue_enable_cb(vqueue); - goto unlock_ready_out; - } + if (!scmi_vio_channel_acquire(vioch)) + return; - /* IRQs already disabled here no need to irqsave */ - spin_lock(&vioch->lock); + spin_lock_irqsave(&vioch->lock, flags); if (cb_enabled) { virtqueue_disable_cb(vqueue); cb_enabled = false; } msg = virtqueue_get_buf(vqueue, &length); if (!msg) { - if (virtqueue_enable_cb(vqueue)) - goto unlock_out; + if (virtqueue_enable_cb(vqueue)) { + spin_unlock_irqrestore(&vioch->lock, flags); + scmi_vio_channel_release(vioch); + return; + } cb_enabled = true; } - spin_unlock(&vioch->lock); + spin_unlock_irqrestore(&vioch->lock, flags); if (msg) { msg->rx_len = length; @@ -161,19 +216,14 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue) } /* - * Release ready_lock and re-enable IRQs between loop iterations - * to allow virtio_chan_free() to possibly kick in and set the - * flag vioch->ready to false even in between processing of - * messages, so as to force outstanding messages to be ignored - * when system is shutting down. + * Release vio channel between loop iterations to allow + * virtio_chan_free() to eventually fully release it when + * shutting down; in such a case, any outstanding message will + * be ignored since this loop will bail out at the next + * iteration. */ - spin_unlock_irqrestore(&vioch->ready_lock, ready_flags); + scmi_vio_channel_release(vioch); } - -unlock_out: - spin_unlock(&vioch->lock); -unlock_ready_out: - spin_unlock_irqrestore(&vioch->ready_lock, ready_flags); } static const char *const scmi_vio_vqueue_names[] = { "tx", "rx" }; @@ -273,35 +323,20 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, } } - spin_lock_irqsave(&vioch->lock, flags); - cinfo->transport_info = vioch; - /* Indirectly setting channel not available any more */ - vioch->cinfo = cinfo; - spin_unlock_irqrestore(&vioch->lock, flags); - - spin_lock_irqsave(&vioch->ready_lock, flags); - vioch->ready = true; - spin_unlock_irqrestore(&vioch->ready_lock, flags); + scmi_vio_channel_ready(vioch, cinfo); return 0; } static int virtio_chan_free(int id, void *p, void *data) { - unsigned long flags; struct scmi_chan_info *cinfo = p; struct scmi_vio_channel *vioch = cinfo->transport_info; - spin_lock_irqsave(&vioch->ready_lock, flags); - vioch->ready = false; - spin_unlock_irqrestore(&vioch->ready_lock, flags); + scmi_vio_channel_cleanup_sync(vioch); scmi_free_channel(cinfo, data, id); - spin_lock_irqsave(&vioch->lock, flags); - vioch->cinfo = NULL; - spin_unlock_irqrestore(&vioch->lock, flags); - return 0; } @@ -316,10 +351,14 @@ static int virtio_send_message(struct scmi_chan_info *cinfo, int rc; struct scmi_vio_msg *msg; + if (!scmi_vio_channel_acquire(vioch)) + return -EINVAL; + spin_lock_irqsave(&vioch->lock, flags); if (list_empty(&vioch->free_list)) { spin_unlock_irqrestore(&vioch->lock, flags); + scmi_vio_channel_release(vioch); return -EBUSY; } @@ -342,6 +381,8 @@ static int virtio_send_message(struct scmi_chan_info *cinfo, spin_unlock_irqrestore(&vioch->lock, flags); + scmi_vio_channel_release(vioch); + return rc; } @@ -416,7 +457,6 @@ static int scmi_vio_probe(struct virtio_device *vdev) unsigned int sz; spin_lock_init(&channels[i].lock); - spin_lock_init(&channels[i].ready_lock); INIT_LIST_HEAD(&channels[i].free_list); channels[i].vqueue = vqs[i]; @@ -503,7 +543,8 @@ const struct scmi_desc scmi_virtio_desc = { .transport_init = virtio_scmi_init, .transport_exit = virtio_scmi_exit, .ops = &scmi_virtio_ops, - .max_rx_timeout_ms = 60000, /* for non-realtime virtio devices */ + /* for non-realtime virtio devices */ + .max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS, .max_msg = 0, /* overridden by virtio_get_max_msg() */ .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE, }; -- cgit v1.2.3 From 9a1699bda095e1ee2f5d0aa43a91d1fccca8b69c Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Thu, 17 Feb 2022 13:12:28 +0000 Subject: firmware: arm_scmi: Review virtio free_list handling Add a new spinlock dedicated to the access of the TX free list and a couple of helpers to get and put messages back and forth from the free_list. Link: https://lore.kernel.org/r/20220217131234.50328-3-cristian.marussi@arm.com Cc: "Michael S. Tsirkin" Cc: Igor Skalkin Cc: Peter Hilber Cc: virtualization@lists.linux-foundation.org Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/virtio.c | 88 ++++++++++++++++++++++++-------------- 1 file changed, 57 insertions(+), 31 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c index 71b016d1a655..483b192fcc2d 100644 --- a/drivers/firmware/arm_scmi/virtio.c +++ b/drivers/firmware/arm_scmi/virtio.c @@ -40,20 +40,23 @@ * * @vqueue: Associated virtqueue * @cinfo: SCMI Tx or Rx channel + * @free_lock: Protects access to the @free_list. * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only * @is_rx: Whether channel is an Rx channel * @max_msg: Maximum number of pending messages for this channel. - * @lock: Protects access to all members except users. + * @lock: Protects access to all members except users, free_list. * @shutdown_done: A reference to a completion used when freeing this channel. * @users: A reference count to currently active users of this channel. */ struct scmi_vio_channel { struct virtqueue *vqueue; struct scmi_chan_info *cinfo; + /* lock to protect access to the free list. */ + spinlock_t free_lock; struct list_head free_list; bool is_rx; unsigned int max_msg; - /* lock to protect access to all members except users. */ + /* lock to protect access to all members except users, free_list */ spinlock_t lock; struct completion *shutdown_done; refcount_t users; @@ -134,18 +137,49 @@ static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch) wait_for_completion(vioch->shutdown_done); } +/* Assumes to be called with vio channel acquired already */ +static struct scmi_vio_msg * +scmi_virtio_get_free_msg(struct scmi_vio_channel *vioch) +{ + unsigned long flags; + struct scmi_vio_msg *msg; + + spin_lock_irqsave(&vioch->free_lock, flags); + if (list_empty(&vioch->free_list)) { + spin_unlock_irqrestore(&vioch->free_lock, flags); + return NULL; + } + + msg = list_first_entry(&vioch->free_list, typeof(*msg), list); + list_del_init(&msg->list); + spin_unlock_irqrestore(&vioch->free_lock, flags); + + return msg; +} + +/* Assumes to be called with vio channel acquired already */ +static void scmi_virtio_put_free_msg(struct scmi_vio_channel *vioch, + struct scmi_vio_msg *msg) +{ + unsigned long flags; + + spin_lock_irqsave(&vioch->free_lock, flags); + list_add_tail(&msg->list, &vioch->free_list); + spin_unlock_irqrestore(&vioch->free_lock, flags); +} + static bool scmi_vio_have_vq_rx(struct virtio_device *vdev) { return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS); } static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, - struct scmi_vio_msg *msg, - struct device *dev) + struct scmi_vio_msg *msg) { struct scatterlist sg_in; int rc; unsigned long flags; + struct device *dev = &vioch->vqueue->vdev->dev; sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE); @@ -162,17 +196,17 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, return rc; } +/* + * Assume to be called with channel already acquired or not ready at all; + * vioch->lock MUST NOT have been already acquired. + */ static void scmi_finalize_message(struct scmi_vio_channel *vioch, struct scmi_vio_msg *msg) { - if (vioch->is_rx) { - scmi_vio_feed_vq_rx(vioch, msg, vioch->cinfo->dev); - } else { - /* Here IRQs are assumed to be already disabled by the caller */ - spin_lock(&vioch->lock); - list_add(&msg->list, &vioch->free_list); - spin_unlock(&vioch->lock); - } + if (vioch->is_rx) + scmi_vio_feed_vq_rx(vioch, msg); + else + scmi_virtio_put_free_msg(vioch, msg); } static void scmi_vio_complete_cb(struct virtqueue *vqueue) @@ -284,7 +318,6 @@ static bool virtio_chan_available(struct device *dev, int idx) static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx) { - unsigned long flags; struct scmi_vio_channel *vioch; int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX; int i; @@ -314,13 +347,7 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, if (!msg->input) return -ENOMEM; - if (tx) { - spin_lock_irqsave(&vioch->lock, flags); - list_add_tail(&msg->list, &vioch->free_list); - spin_unlock_irqrestore(&vioch->lock, flags); - } else { - scmi_vio_feed_vq_rx(vioch, msg, cinfo->dev); - } + scmi_finalize_message(vioch, msg); } scmi_vio_channel_ready(vioch, cinfo); @@ -354,33 +381,31 @@ static int virtio_send_message(struct scmi_chan_info *cinfo, if (!scmi_vio_channel_acquire(vioch)) return -EINVAL; - spin_lock_irqsave(&vioch->lock, flags); - - if (list_empty(&vioch->free_list)) { - spin_unlock_irqrestore(&vioch->lock, flags); + msg = scmi_virtio_get_free_msg(vioch); + if (!msg) { scmi_vio_channel_release(vioch); return -EBUSY; } - msg = list_first_entry(&vioch->free_list, typeof(*msg), list); - list_del(&msg->list); - msg_tx_prepare(msg->request, xfer); sg_init_one(&sg_out, msg->request, msg_command_size(xfer)); sg_init_one(&sg_in, msg->input, msg_response_size(xfer)); + spin_lock_irqsave(&vioch->lock, flags); + rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC); - if (rc) { - list_add(&msg->list, &vioch->free_list); + if (rc) dev_err(vioch->cinfo->dev, "failed to add to TX virtqueue (%d)\n", rc); - } else { + else virtqueue_kick(vioch->vqueue); - } spin_unlock_irqrestore(&vioch->lock, flags); + if (rc) + scmi_virtio_put_free_msg(vioch, msg); + scmi_vio_channel_release(vioch); return rc; @@ -457,6 +482,7 @@ static int scmi_vio_probe(struct virtio_device *vdev) unsigned int sz; spin_lock_init(&channels[i].lock); + spin_lock_init(&channels[i].free_lock); INIT_LIST_HEAD(&channels[i].free_list); channels[i].vqueue = vqs[i]; -- cgit v1.2.3 From 5a3b7185c47c061330bdd71233126181d55ed3d5 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Thu, 17 Feb 2022 13:12:29 +0000 Subject: firmware: arm_scmi: Add atomic mode support to virtio transport Add support for .mark_txdone and .poll_done transport operations to SCMI VirtIO transport as pre-requisites to enable atomic operations. Add a Kernel configuration option to enable SCMI VirtIO transport polling and atomic mode for selected SCMI transactions while leaving it default disabled. Link: https://lore.kernel.org/r/20220217131234.50328-4-cristian.marussi@arm.com Cc: "Michael S. Tsirkin" Cc: Igor Skalkin Cc: Peter Hilber Cc: virtualization@lists.linux-foundation.org Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/Kconfig | 15 ++ drivers/firmware/arm_scmi/driver.c | 6 +- drivers/firmware/arm_scmi/virtio.c | 390 +++++++++++++++++++++++++++++++++++-- 3 files changed, 391 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig index d429326433d1..7794bd41eaa0 100644 --- a/drivers/firmware/arm_scmi/Kconfig +++ b/drivers/firmware/arm_scmi/Kconfig @@ -118,6 +118,21 @@ config ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE the ones implemented by kvmtool) and let the core Kernel VirtIO layer take care of the needed conversions, say N. +config ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE + bool "Enable atomic mode for SCMI VirtIO transport" + depends on ARM_SCMI_TRANSPORT_VIRTIO + help + Enable support of atomic operation for SCMI VirtIO based transport. + + If you want the SCMI VirtIO based transport to operate in atomic + mode, avoiding any kind of sleeping behaviour for selected + transactions on the TX path, answer Y. + + Enabling atomic mode operations allows any SCMI driver using this + transport to optionally ask for atomic SCMI transactions and operate + in atomic context too, at the price of using a number of busy-waiting + primitives all over instead. If unsure say N. + endif #ARM_SCMI_PROTOCOL config ARM_SCMI_POWER_DOMAIN diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index c2e7897ff56e..4fd5a35ffa2f 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -648,7 +648,8 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, unpack_scmi_header(msg_hdr, &xfer->hdr); if (priv) - xfer->priv = priv; + /* Ensure order between xfer->priv store and following ops */ + smp_store_mb(xfer->priv, priv); info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size, xfer); scmi_notify(cinfo->handle, xfer->hdr.protocol_id, @@ -680,7 +681,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, xfer->rx.len = info->desc->max_msg_size; if (priv) - xfer->priv = priv; + /* Ensure order between xfer->priv store and following ops */ + smp_store_mb(xfer->priv, priv); info->desc->ops->fetch_response(cinfo, xfer); trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c index 483b192fcc2d..14709dbc96a1 100644 --- a/drivers/firmware/arm_scmi/virtio.c +++ b/drivers/firmware/arm_scmi/virtio.c @@ -3,8 +3,8 @@ * Virtio Transport driver for Arm System Control and Management Interface * (SCMI). * - * Copyright (C) 2020-2021 OpenSynergy. - * Copyright (C) 2021 ARM Ltd. + * Copyright (C) 2020-2022 OpenSynergy. + * Copyright (C) 2021-2022 ARM Ltd. */ /** @@ -42,9 +42,14 @@ * @cinfo: SCMI Tx or Rx channel * @free_lock: Protects access to the @free_list. * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only + * @deferred_tx_work: Worker for TX deferred replies processing + * @deferred_tx_wq: Workqueue for TX deferred replies + * @pending_lock: Protects access to the @pending_cmds_list. + * @pending_cmds_list: List of pre-fetched commands queueud for later processing * @is_rx: Whether channel is an Rx channel * @max_msg: Maximum number of pending messages for this channel. - * @lock: Protects access to all members except users, free_list. + * @lock: Protects access to all members except users, free_list and + * pending_cmds_list. * @shutdown_done: A reference to a completion used when freeing this channel. * @users: A reference count to currently active users of this channel. */ @@ -54,14 +59,29 @@ struct scmi_vio_channel { /* lock to protect access to the free list. */ spinlock_t free_lock; struct list_head free_list; + /* lock to protect access to the pending list. */ + spinlock_t pending_lock; + struct list_head pending_cmds_list; + struct work_struct deferred_tx_work; + struct workqueue_struct *deferred_tx_wq; bool is_rx; unsigned int max_msg; - /* lock to protect access to all members except users, free_list */ + /* + * Lock to protect access to all members except users, free_list and + * pending_cmds_list + */ spinlock_t lock; struct completion *shutdown_done; refcount_t users; }; +enum poll_states { + VIO_MSG_NOT_POLLED, + VIO_MSG_POLL_TIMEOUT, + VIO_MSG_POLLING, + VIO_MSG_POLL_DONE, +}; + /** * struct scmi_vio_msg - Transport PDU information * @@ -69,12 +89,23 @@ struct scmi_vio_channel { * @input: SDU used for (delayed) responses and notifications * @list: List which scmi_vio_msg may be part of * @rx_len: Input SDU size in bytes, once input has been received + * @poll_idx: Last used index registered for polling purposes if this message + * transaction reply was configured for polling. + * @poll_status: Polling state for this message. + * @poll_lock: A lock to protect @poll_status + * @users: A reference count to track this message users and avoid premature + * freeing (and reuse) when polling and IRQ execution paths interleave. */ struct scmi_vio_msg { struct scmi_msg_payld *request; struct scmi_msg_payld *input; struct list_head list; unsigned int rx_len; + unsigned int poll_idx; + enum poll_states poll_status; + /* Lock to protect access to poll_status */ + spinlock_t poll_lock; + refcount_t users; }; /* Only one SCMI VirtIO device can possibly exist */ @@ -117,6 +148,7 @@ static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch) { unsigned long flags; DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done); + void *deferred_wq = NULL; /* * Prepare to wait for the last release if not already released @@ -127,10 +159,19 @@ static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch) spin_unlock_irqrestore(&vioch->lock, flags); return; } + vioch->shutdown_done = &vioch_shutdown_done; virtio_break_device(vioch->vqueue->vdev); + if (!vioch->is_rx && vioch->deferred_tx_wq) { + deferred_wq = vioch->deferred_tx_wq; + /* Cannot be kicked anymore after this...*/ + vioch->deferred_tx_wq = NULL; + } spin_unlock_irqrestore(&vioch->lock, flags); + if (deferred_wq) + destroy_workqueue(deferred_wq); + scmi_vio_channel_release(vioch); /* Let any possibly concurrent RX path release the channel */ @@ -154,18 +195,34 @@ scmi_virtio_get_free_msg(struct scmi_vio_channel *vioch) list_del_init(&msg->list); spin_unlock_irqrestore(&vioch->free_lock, flags); + /* Still no users, no need to acquire poll_lock */ + msg->poll_status = VIO_MSG_NOT_POLLED; + refcount_set(&msg->users, 1); + return msg; } +static inline bool scmi_vio_msg_acquire(struct scmi_vio_msg *msg) +{ + return refcount_inc_not_zero(&msg->users); +} + /* Assumes to be called with vio channel acquired already */ -static void scmi_virtio_put_free_msg(struct scmi_vio_channel *vioch, - struct scmi_vio_msg *msg) +static inline bool scmi_vio_msg_release(struct scmi_vio_channel *vioch, + struct scmi_vio_msg *msg) { - unsigned long flags; + bool ret; - spin_lock_irqsave(&vioch->free_lock, flags); - list_add_tail(&msg->list, &vioch->free_list); - spin_unlock_irqrestore(&vioch->free_lock, flags); + ret = refcount_dec_and_test(&msg->users); + if (ret) { + unsigned long flags; + + spin_lock_irqsave(&vioch->free_lock, flags); + list_add_tail(&msg->list, &vioch->free_list); + spin_unlock_irqrestore(&vioch->free_lock, flags); + } + + return ret; } static bool scmi_vio_have_vq_rx(struct virtio_device *vdev) @@ -206,7 +263,7 @@ static void scmi_finalize_message(struct scmi_vio_channel *vioch, if (vioch->is_rx) scmi_vio_feed_vq_rx(vioch, msg); else - scmi_virtio_put_free_msg(vioch, msg); + scmi_vio_msg_release(vioch, msg); } static void scmi_vio_complete_cb(struct virtqueue *vqueue) @@ -230,6 +287,7 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue) virtqueue_disable_cb(vqueue); cb_enabled = false; } + msg = virtqueue_get_buf(vqueue, &length); if (!msg) { if (virtqueue_enable_cb(vqueue)) { @@ -260,6 +318,49 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue) } } +static void scmi_vio_deferred_tx_worker(struct work_struct *work) +{ + unsigned long flags; + struct scmi_vio_channel *vioch; + struct scmi_vio_msg *msg, *tmp; + + vioch = container_of(work, struct scmi_vio_channel, deferred_tx_work); + + if (!scmi_vio_channel_acquire(vioch)) + return; + + /* + * Process pre-fetched messages: these could be non-polled messages or + * late timed-out replies to polled messages dequeued by chance while + * polling for some other messages: this worker is in charge to process + * the valid non-expired messages and anyway finally free all of them. + */ + spin_lock_irqsave(&vioch->pending_lock, flags); + + /* Scan the list of possibly pre-fetched messages during polling. */ + list_for_each_entry_safe(msg, tmp, &vioch->pending_cmds_list, list) { + list_del(&msg->list); + + /* + * Channel is acquired here (cannot vanish) and this message + * is no more processed elsewhere so no poll_lock needed. + */ + if (msg->poll_status == VIO_MSG_NOT_POLLED) + scmi_rx_callback(vioch->cinfo, + msg_read_header(msg->input), msg); + + /* Free the processed message once done */ + scmi_vio_msg_release(vioch, msg); + } + + spin_unlock_irqrestore(&vioch->pending_lock, flags); + + /* Process possibly still pending messages */ + scmi_vio_complete_cb(vioch->vqueue); + + scmi_vio_channel_release(vioch); +} + static const char *const scmi_vio_vqueue_names[] = { "tx", "rx" }; static vq_callback_t *scmi_vio_complete_callbacks[] = { @@ -327,6 +428,19 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index]; + /* Setup a deferred worker for polling. */ + if (tx && !vioch->deferred_tx_wq) { + vioch->deferred_tx_wq = + alloc_workqueue(dev_name(&scmi_vdev->dev), + WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS, + 0); + if (!vioch->deferred_tx_wq) + return -ENOMEM; + + INIT_WORK(&vioch->deferred_tx_work, + scmi_vio_deferred_tx_worker); + } + for (i = 0; i < vioch->max_msg; i++) { struct scmi_vio_msg *msg; @@ -340,6 +454,8 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, GFP_KERNEL); if (!msg->request) return -ENOMEM; + spin_lock_init(&msg->poll_lock); + refcount_set(&msg->users, 1); } msg->input = devm_kzalloc(cinfo->dev, VIRTIO_SCMI_MAX_PDU_SIZE, @@ -394,6 +510,21 @@ static int virtio_send_message(struct scmi_chan_info *cinfo, spin_lock_irqsave(&vioch->lock, flags); + /* + * If polling was requested for this transaction: + * - retrieve last used index (will be used as polling reference) + * - bind the polled message to the xfer via .priv + * - grab an additional msg refcount for the poll-path + */ + if (xfer->hdr.poll_completion) { + msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue); + /* Still no users, no need to acquire poll_lock */ + msg->poll_status = VIO_MSG_POLLING; + scmi_vio_msg_acquire(msg); + /* Ensure initialized msg is visibly bound to xfer */ + smp_store_mb(xfer->priv, msg); + } + rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC); if (rc) dev_err(vioch->cinfo->dev, @@ -403,8 +534,13 @@ static int virtio_send_message(struct scmi_chan_info *cinfo, spin_unlock_irqrestore(&vioch->lock, flags); - if (rc) - scmi_virtio_put_free_msg(vioch, msg); + if (rc) { + /* Ensure order between xfer->priv clear and vq feeding */ + smp_store_mb(xfer->priv, NULL); + if (xfer->hdr.poll_completion) + scmi_vio_msg_release(vioch, msg); + scmi_vio_msg_release(vioch, msg); + } scmi_vio_channel_release(vioch); @@ -416,10 +552,8 @@ static void virtio_fetch_response(struct scmi_chan_info *cinfo, { struct scmi_vio_msg *msg = xfer->priv; - if (msg) { + if (msg) msg_fetch_response(msg->input, msg->rx_len, xfer); - xfer->priv = NULL; - } } static void virtio_fetch_notification(struct scmi_chan_info *cinfo, @@ -427,10 +561,225 @@ static void virtio_fetch_notification(struct scmi_chan_info *cinfo, { struct scmi_vio_msg *msg = xfer->priv; - if (msg) { + if (msg) msg_fetch_notification(msg->input, msg->rx_len, max_len, xfer); - xfer->priv = NULL; +} + +/** + * virtio_mark_txdone - Mark transmission done + * + * Free only completed polling transfer messages. + * + * Note that in the SCMI VirtIO transport we never explicitly release still + * outstanding but timed-out messages by forcibly re-adding them to the + * free-list inside the TX code path; we instead let IRQ/RX callbacks, or the + * TX deferred worker, eventually clean up such messages once, finally, a late + * reply is received and discarded (if ever). + * + * This approach was deemed preferable since those pending timed-out buffers are + * still effectively owned by the SCMI platform VirtIO device even after timeout + * expiration: forcibly freeing and reusing them before they had been returned + * explicitly by the SCMI platform could lead to subtle bugs due to message + * corruption. + * An SCMI platform VirtIO device which never returns message buffers is + * anyway broken and it will quickly lead to exhaustion of available messages. + * + * For this same reason, here, we take care to free only the polled messages + * that had been somehow replied (only if not by chance already processed on the + * IRQ path - the initial scmi_vio_msg_release() takes care of this) and also + * any timed-out polled message if that indeed appears to have been at least + * dequeued from the virtqueues (VIO_MSG_POLL_DONE): this is needed since such + * messages won't be freed elsewhere. Any other polled message is marked as + * VIO_MSG_POLL_TIMEOUT. + * + * Possible late replies to timed-out polled messages will be eventually freed + * by RX callbacks if delivered on the IRQ path or by the deferred TX worker if + * dequeued on some other polling path. + * + * @cinfo: SCMI channel info + * @ret: Transmission return code + * @xfer: Transfer descriptor + */ +static void virtio_mark_txdone(struct scmi_chan_info *cinfo, int ret, + struct scmi_xfer *xfer) +{ + unsigned long flags; + struct scmi_vio_channel *vioch = cinfo->transport_info; + struct scmi_vio_msg *msg = xfer->priv; + + if (!msg || !scmi_vio_channel_acquire(vioch)) + return; + + /* Ensure msg is unbound from xfer anyway at this point */ + smp_store_mb(xfer->priv, NULL); + + /* Must be a polled xfer and not already freed on the IRQ path */ + if (!xfer->hdr.poll_completion || scmi_vio_msg_release(vioch, msg)) { + scmi_vio_channel_release(vioch); + return; } + + spin_lock_irqsave(&msg->poll_lock, flags); + /* Do not free timedout polled messages only if still inflight */ + if (ret != -ETIMEDOUT || msg->poll_status == VIO_MSG_POLL_DONE) + scmi_vio_msg_release(vioch, msg); + else if (msg->poll_status == VIO_MSG_POLLING) + msg->poll_status = VIO_MSG_POLL_TIMEOUT; + spin_unlock_irqrestore(&msg->poll_lock, flags); + + scmi_vio_channel_release(vioch); +} + +/** + * virtio_poll_done - Provide polling support for VirtIO transport + * + * @cinfo: SCMI channel info + * @xfer: Reference to the transfer being poll for. + * + * VirtIO core provides a polling mechanism based only on last used indexes: + * this means that it is possible to poll the virtqueues waiting for something + * new to arrive from the host side, but the only way to check if the freshly + * arrived buffer was indeed what we were waiting for is to compare the newly + * arrived message descriptor with the one we are polling on. + * + * As a consequence it can happen to dequeue something different from the buffer + * we were poll-waiting for: if that is the case such early fetched buffers are + * then added to a the @pending_cmds_list list for later processing by a + * dedicated deferred worker. + * + * So, basically, once something new is spotted we proceed to de-queue all the + * freshly received used buffers until we found the one we were polling on, or, + * we have 'seemingly' emptied the virtqueue; if some buffers are still pending + * in the vqueue at the end of the polling loop (possible due to inherent races + * in virtqueues handling mechanisms), we similarly kick the deferred worker + * and let it process those, to avoid indefinitely looping in the .poll_done + * busy-waiting helper. + * + * Finally, we delegate to the deferred worker also the final free of any timed + * out reply to a polled message that we should dequeue. + * + * Note that, since we do NOT have per-message suppress notification mechanism, + * the message we are polling for could be alternatively delivered via usual + * IRQs callbacks on another core which happened to have IRQs enabled while we + * are actively polling for it here: in such a case it will be handled as such + * by scmi_rx_callback() and the polling loop in the SCMI Core TX path will be + * transparently terminated anyway. + * + * Return: True once polling has successfully completed. + */ +static bool virtio_poll_done(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + bool pending, found = false; + unsigned int length, any_prefetched = 0; + unsigned long flags; + struct scmi_vio_msg *next_msg, *msg = xfer->priv; + struct scmi_vio_channel *vioch = cinfo->transport_info; + + if (!msg) + return true; + + /* + * Processed already by other polling loop on another CPU ? + * + * Note that this message is acquired on the poll path so cannot vanish + * while inside this loop iteration even if concurrently processed on + * the IRQ path. + * + * Avoid to acquire poll_lock since polled_status can be changed + * in a relevant manner only later in this same thread of execution: + * any other possible changes made concurrently by other polling loops + * or by a reply delivered on the IRQ path have no meaningful impact on + * this loop iteration: in other words it is harmless to allow this + * possible race but let has avoid spinlocking with irqs off in this + * initial part of the polling loop. + */ + if (msg->poll_status == VIO_MSG_POLL_DONE) + return true; + + if (!scmi_vio_channel_acquire(vioch)) + return true; + + /* Has cmdq index moved at all ? */ + pending = virtqueue_poll(vioch->vqueue, msg->poll_idx); + if (!pending) { + scmi_vio_channel_release(vioch); + return false; + } + + spin_lock_irqsave(&vioch->lock, flags); + virtqueue_disable_cb(vioch->vqueue); + + /* + * Process all new messages till the polled-for message is found OR + * the vqueue is empty. + */ + while ((next_msg = virtqueue_get_buf(vioch->vqueue, &length))) { + bool next_msg_done = false; + + /* + * Mark any dequeued buffer message as VIO_MSG_POLL_DONE so + * that can be properly freed even on timeout in mark_txdone. + */ + spin_lock(&next_msg->poll_lock); + if (next_msg->poll_status == VIO_MSG_POLLING) { + next_msg->poll_status = VIO_MSG_POLL_DONE; + next_msg_done = true; + } + spin_unlock(&next_msg->poll_lock); + + next_msg->rx_len = length; + /* Is the message we were polling for ? */ + if (next_msg == msg) { + found = true; + break; + } else if (next_msg_done) { + /* Skip the rest if this was another polled msg */ + continue; + } + + /* + * Enqueue for later processing any non-polled message and any + * timed-out polled one that we happen to have dequeued. + */ + spin_lock(&next_msg->poll_lock); + if (next_msg->poll_status == VIO_MSG_NOT_POLLED || + next_msg->poll_status == VIO_MSG_POLL_TIMEOUT) { + spin_unlock(&next_msg->poll_lock); + + any_prefetched++; + spin_lock(&vioch->pending_lock); + list_add_tail(&next_msg->list, + &vioch->pending_cmds_list); + spin_unlock(&vioch->pending_lock); + } else { + spin_unlock(&next_msg->poll_lock); + } + } + + /* + * When the polling loop has successfully terminated if something + * else was queued in the meantime, it will be served by a deferred + * worker OR by the normal IRQ/callback OR by other poll loops. + * + * If we are still looking for the polled reply, the polling index has + * to be updated to the current vqueue last used index. + */ + if (found) { + pending = !virtqueue_enable_cb(vioch->vqueue); + } else { + msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue); + pending = virtqueue_poll(vioch->vqueue, msg->poll_idx); + } + + if (vioch->deferred_tx_wq && (any_prefetched || pending)) + queue_work(vioch->deferred_tx_wq, &vioch->deferred_tx_work); + + spin_unlock_irqrestore(&vioch->lock, flags); + + scmi_vio_channel_release(vioch); + + return found; } static const struct scmi_transport_ops scmi_virtio_ops = { @@ -442,6 +791,8 @@ static const struct scmi_transport_ops scmi_virtio_ops = { .send_message = virtio_send_message, .fetch_response = virtio_fetch_response, .fetch_notification = virtio_fetch_notification, + .mark_txdone = virtio_mark_txdone, + .poll_done = virtio_poll_done, }; static int scmi_vio_probe(struct virtio_device *vdev) @@ -484,6 +835,8 @@ static int scmi_vio_probe(struct virtio_device *vdev) spin_lock_init(&channels[i].lock); spin_lock_init(&channels[i].free_lock); INIT_LIST_HEAD(&channels[i].free_list); + spin_lock_init(&channels[i].pending_lock); + INIT_LIST_HEAD(&channels[i].pending_cmds_list); channels[i].vqueue = vqs[i]; sz = virtqueue_get_vring_size(channels[i].vqueue); @@ -573,4 +926,5 @@ const struct scmi_desc scmi_virtio_desc = { .max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS, .max_msg = 0, /* overridden by virtio_get_max_msg() */ .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE, + .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE), }; -- cgit v1.2.3 From 05976c5f3bff8f8b5230da4b39f7cd6dfba9943e Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Thu, 17 Feb 2022 13:12:31 +0000 Subject: firmware: arm_scmi: Support optional system wide atomic-threshold-us An SCMI agent can be configured system-wide with a well-defined atomic threshold: only SCMI synchronous command whose latency has been advertised by the SCMI platform to be lower or equal to this configured threshold will be considered for atomic operations, when requested and if supported by the underlying transport at all. Link: https://lore.kernel.org/r/20220217131234.50328-6-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/driver.c | 27 ++++++++++++++++++++++++--- include/linux/scmi_protocol.h | 5 ++++- 2 files changed, 28 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 4fd5a35ffa2f..7436c475e708 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -131,6 +131,12 @@ struct scmi_protocol_instance { * MAX_PROTOCOLS_IMP elements allocated by the base protocol * @active_protocols: IDR storing device_nodes for protocols actually defined * in the DT and confirmed as implemented by fw. + * @atomic_threshold: Optional system wide DT-configured threshold, expressed + * in microseconds, for atomic operations. + * Only SCMI synchronous commands reported by the platform + * to have an execution latency lesser-equal to the threshold + * should be considered for atomic mode operation: such + * decision is finally left up to the SCMI drivers. * @notify_priv: Pointer to private data structure specific to notifications. * @node: List head * @users: Number of users of this instance @@ -149,6 +155,7 @@ struct scmi_info { struct mutex protocols_mtx; u8 *protocols_imp; struct idr active_protocols; + unsigned int atomic_threshold; void *notify_priv; struct list_head node; int users; @@ -1406,15 +1413,22 @@ static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id) * SCMI instance is configured as atomic. * * @handle: A reference to the SCMI platform instance. + * @atomic_threshold: An optional return value for the system wide currently + * configured threshold for atomic operations. * * Return: True if transport is configured as atomic */ -static bool scmi_is_transport_atomic(const struct scmi_handle *handle) +static bool scmi_is_transport_atomic(const struct scmi_handle *handle, + unsigned int *atomic_threshold) { + bool ret; struct scmi_info *info = handle_to_scmi_info(handle); - return info->desc->atomic_enabled && - is_transport_polling_capable(info); + ret = info->desc->atomic_enabled && is_transport_polling_capable(info); + if (ret && atomic_threshold) + *atomic_threshold = info->atomic_threshold; + + return ret; } static inline @@ -1954,6 +1968,13 @@ static int scmi_probe(struct platform_device *pdev) handle->version = &info->version; handle->devm_protocol_get = scmi_devm_protocol_get; handle->devm_protocol_put = scmi_devm_protocol_put; + + /* System wide atomic threshold for atomic ops .. if any */ + if (!of_property_read_u32(np, "atomic-threshold-us", + &info->atomic_threshold)) + dev_info(dev, + "SCMI System wide atomic threshold set to %d us\n", + info->atomic_threshold); handle->is_transport_atomic = scmi_is_transport_atomic; if (desc->ops->link_supplier) { diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 9f895cb81818..fdf6bd83cc59 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -619,6 +619,8 @@ struct scmi_notify_ops { * be interested to know if they can assume SCMI * command transactions associated to this handle will * never sleep and act accordingly. + * An optional atomic threshold value could be returned + * where configured. * @notify_ops: pointer to set of notifications related operations */ struct scmi_handle { @@ -629,7 +631,8 @@ struct scmi_handle { (*devm_protocol_get)(struct scmi_device *sdev, u8 proto, struct scmi_protocol_handle **ph); void (*devm_protocol_put)(struct scmi_device *sdev, u8 proto); - bool (*is_transport_atomic)(const struct scmi_handle *handle); + bool (*is_transport_atomic)(const struct scmi_handle *handle, + unsigned int *atomic_threshold); const struct scmi_notify_ops *notify_ops; }; -- cgit v1.2.3 From b7bd36f2e9430a58aefdc326f8e6653e9b000243 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Thu, 17 Feb 2022 13:12:32 +0000 Subject: firmware: arm_scmi: Add atomic support to clock protocol Introduce new _atomic variant for SCMI clock protocol operations related to enable disable operations: when an atomic operation is required the xfer poll_completion flag is set for that transaction. Link: https://lore.kernel.org/r/20220217131234.50328-7-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/clock.c | 22 +++++++++++++++++++--- include/linux/scmi_protocol.h | 3 +++ 2 files changed, 22 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index 35b56c8ba0c0..72f930c0e3e2 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -273,7 +273,7 @@ static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph, static int scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id, - u32 config) + u32 config, bool atomic) { int ret; struct scmi_xfer *t; @@ -284,6 +284,8 @@ scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id, if (ret) return ret; + t->hdr.poll_completion = atomic; + cfg = t->tx.buf; cfg->id = cpu_to_le32(clk_id); cfg->attributes = cpu_to_le32(config); @@ -296,12 +298,24 @@ scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id, static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id) { - return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE); + return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, false); } static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id) { - return scmi_clock_config_set(ph, clk_id, 0); + return scmi_clock_config_set(ph, clk_id, 0, false); +} + +static int scmi_clock_enable_atomic(const struct scmi_protocol_handle *ph, + u32 clk_id) +{ + return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, true); +} + +static int scmi_clock_disable_atomic(const struct scmi_protocol_handle *ph, + u32 clk_id) +{ + return scmi_clock_config_set(ph, clk_id, 0, true); } static int scmi_clock_count_get(const struct scmi_protocol_handle *ph) @@ -330,6 +344,8 @@ static const struct scmi_clk_proto_ops clk_proto_ops = { .rate_set = scmi_clock_rate_set, .enable = scmi_clock_enable, .disable = scmi_clock_disable, + .enable_atomic = scmi_clock_enable_atomic, + .disable_atomic = scmi_clock_disable_atomic, }; static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph) diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index fdf6bd83cc59..306e576835f8 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -82,6 +82,9 @@ struct scmi_clk_proto_ops { u64 rate); int (*enable)(const struct scmi_protocol_handle *ph, u32 clk_id); int (*disable)(const struct scmi_protocol_handle *ph, u32 clk_id); + int (*enable_atomic)(const struct scmi_protocol_handle *ph, u32 clk_id); + int (*disable_atomic)(const struct scmi_protocol_handle *ph, + u32 clk_id); }; /** -- cgit v1.2.3 From 18f295b758b227857133f680a397a42e83c62f3f Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Thu, 17 Feb 2022 13:12:33 +0000 Subject: firmware: arm_scmi: Add support for clock_enable_latency An SCMI platform can optionally advertise an enable latency typically associated with a specific clock resource: add support for parsing such optional message field and export such information in the usual publicly accessible clock descriptor. Link: https://lore.kernel.org/r/20220217131234.50328-8-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/clock.c | 12 +++++++++--- include/linux/scmi_protocol.h | 1 + 2 files changed, 10 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index 72f930c0e3e2..cf6fed6dec77 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -27,7 +27,8 @@ struct scmi_msg_resp_clock_protocol_attributes { struct scmi_msg_resp_clock_attributes { __le32 attributes; #define CLOCK_ENABLE BIT(0) - u8 name[SCMI_MAX_STR_SIZE]; + u8 name[SCMI_MAX_STR_SIZE]; + __le32 clock_enable_latency; }; struct scmi_clock_set_config { @@ -116,10 +117,15 @@ static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph, attr = t->rx.buf; ret = ph->xops->do_xfer(ph, t); - if (!ret) + if (!ret) { strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE); - else + /* Is optional field clock_enable_latency provided ? */ + if (t->rx.len == sizeof(*attr)) + clk->enable_latency = + le32_to_cpu(attr->clock_enable_latency); + } else { clk->name[0] = '\0'; + } ph->xops->xfer_put(ph, t); return ret; diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 306e576835f8..b87551f41f9f 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -42,6 +42,7 @@ struct scmi_revision_info { struct scmi_clock_info { char name[SCMI_MAX_STR_SIZE]; + unsigned int enable_latency; bool rate_discrete; union { struct { -- cgit v1.2.3 From 38a0e5b735d6152d334d2f94b925a1c8a93bd7eb Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Thu, 17 Feb 2022 13:12:34 +0000 Subject: clk: scmi: Support atomic clock enable/disable API Support also atomic enable/disable clk_ops beside the bare non-atomic one (prepare/unprepare) when the underlying SCMI transport is configured to support atomic transactions for synchronous commands. Compare the SCMI system-wide configured atomic threshold latency time and the per-clock advertised enable latency (if any) to choose whether to provide sleeping prepare/unprepare vs atomic enable/disable. Link: https://lore.kernel.org/r/20220217131234.50328-9-cristian.marussi@arm.com Cc: Michael Turquette Cc: Stephen Boyd Cc: linux-clk@vger.kernel.org Acked-by: Stephen Boyd Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/clk/clk-scmi.c | 71 ++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 60 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c index 1e357d364ca2..2c7a830ce308 100644 --- a/drivers/clk/clk-scmi.c +++ b/drivers/clk/clk-scmi.c @@ -2,7 +2,7 @@ /* * System Control and Power Interface (SCMI) Protocol based clock driver * - * Copyright (C) 2018-2021 ARM Ltd. + * Copyright (C) 2018-2022 ARM Ltd. */ #include @@ -88,21 +88,51 @@ static void scmi_clk_disable(struct clk_hw *hw) scmi_proto_clk_ops->disable(clk->ph, clk->id); } +static int scmi_clk_atomic_enable(struct clk_hw *hw) +{ + struct scmi_clk *clk = to_scmi_clk(hw); + + return scmi_proto_clk_ops->enable_atomic(clk->ph, clk->id); +} + +static void scmi_clk_atomic_disable(struct clk_hw *hw) +{ + struct scmi_clk *clk = to_scmi_clk(hw); + + scmi_proto_clk_ops->disable_atomic(clk->ph, clk->id); +} + +/* + * We can provide enable/disable atomic callbacks only if the underlying SCMI + * transport for an SCMI instance is configured to handle SCMI commands in an + * atomic manner. + * + * When no SCMI atomic transport support is available we instead provide only + * the prepare/unprepare API, as allowed by the clock framework when atomic + * calls are not available. + * + * Two distinct sets of clk_ops are provided since we could have multiple SCMI + * instances with different underlying transport quality, so they cannot be + * shared. + */ static const struct clk_ops scmi_clk_ops = { .recalc_rate = scmi_clk_recalc_rate, .round_rate = scmi_clk_round_rate, .set_rate = scmi_clk_set_rate, - /* - * We can't provide enable/disable callback as we can't perform the same - * in atomic context. Since the clock framework provides standard API - * clk_prepare_enable that helps cases using clk_enable in non-atomic - * context, it should be fine providing prepare/unprepare. - */ .prepare = scmi_clk_enable, .unprepare = scmi_clk_disable, }; -static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk) +static const struct clk_ops scmi_atomic_clk_ops = { + .recalc_rate = scmi_clk_recalc_rate, + .round_rate = scmi_clk_round_rate, + .set_rate = scmi_clk_set_rate, + .enable = scmi_clk_atomic_enable, + .disable = scmi_clk_atomic_disable, +}; + +static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk, + const struct clk_ops *scmi_ops) { int ret; unsigned long min_rate, max_rate; @@ -110,7 +140,7 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk) struct clk_init_data init = { .flags = CLK_GET_RATE_NOCACHE, .num_parents = 0, - .ops = &scmi_clk_ops, + .ops = scmi_ops, .name = sclk->info->name, }; @@ -139,6 +169,8 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk) static int scmi_clocks_probe(struct scmi_device *sdev) { int idx, count, err; + unsigned int atomic_threshold; + bool is_atomic; struct clk_hw **hws; struct clk_hw_onecell_data *clk_data; struct device *dev = &sdev->dev; @@ -168,8 +200,11 @@ static int scmi_clocks_probe(struct scmi_device *sdev) clk_data->num = count; hws = clk_data->hws; + is_atomic = handle->is_transport_atomic(handle, &atomic_threshold); + for (idx = 0; idx < count; idx++) { struct scmi_clk *sclk; + const struct clk_ops *scmi_ops; sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL); if (!sclk) @@ -184,13 +219,27 @@ static int scmi_clocks_probe(struct scmi_device *sdev) sclk->id = idx; sclk->ph = ph; - err = scmi_clk_ops_init(dev, sclk); + /* + * Note that when transport is atomic but SCMI protocol did not + * specify (or support) an enable_latency associated with a + * clock, we default to use atomic operations mode. + */ + if (is_atomic && + sclk->info->enable_latency <= atomic_threshold) + scmi_ops = &scmi_atomic_clk_ops; + else + scmi_ops = &scmi_clk_ops; + + err = scmi_clk_ops_init(dev, sclk, scmi_ops); if (err) { dev_err(dev, "failed to register clock %d\n", idx); devm_kfree(dev, sclk); hws[idx] = NULL; } else { - dev_dbg(dev, "Registered clock:%s\n", sclk->info->name); + dev_dbg(dev, "Registered clock:%s%s\n", + sclk->info->name, + scmi_ops == &scmi_atomic_clk_ops ? + " (atomic ops)" : ""); hws[idx] = &sclk->hw; } } -- cgit v1.2.3 From e6cb5408289f4202f4088731a4ac98c7ffaedb9d Mon Sep 17 00:00:00 2001 From: Ivan Bornyakov Date: Tue, 22 Feb 2022 08:20:59 +0300 Subject: bus: imx-weim: add DT overlay support for WEIM bus Add OF reconfiguration notifier handler for WEIM bus to setup Chip Select timings on runtime creation of child devices. However, it is not possible to load another DT overlay with conflicting CS timings with previously loaded overlay, even if the first one is unloaded. The reason is that there is no acces to CS timing property of a device node being removed, thus we can't track which of configured CS are available for re-configuration. Signed-off-by: Ivan Bornyakov Signed-off-by: Shawn Guo --- drivers/bus/imx-weim.c | 135 +++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 126 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c index bccb275b65ba..60fbd42041dd 100644 --- a/drivers/bus/imx-weim.c +++ b/drivers/bus/imx-weim.c @@ -64,6 +64,11 @@ struct cs_timing_state { struct cs_timing cs[MAX_CS_COUNT]; }; +struct weim_priv { + void __iomem *base; + struct cs_timing_state timing_state; +}; + static const struct of_device_id weim_id_table[] = { /* i.MX1/21 */ { .compatible = "fsl,imx1-weim", .data = &imx1_weim_devtype, }, @@ -128,21 +133,26 @@ err: } /* Parse and set the timing for this device. */ -static int weim_timing_setup(struct device *dev, - struct device_node *np, void __iomem *base, - const struct imx_weim_devtype *devtype, - struct cs_timing_state *ts) +static int weim_timing_setup(struct device *dev, struct device_node *np, + const struct imx_weim_devtype *devtype) { u32 cs_idx, value[MAX_CS_REGS_COUNT]; int i, ret; int reg_idx, num_regs; struct cs_timing *cst; + struct weim_priv *priv; + struct cs_timing_state *ts; + void __iomem *base; if (WARN_ON(devtype->cs_regs_count > MAX_CS_REGS_COUNT)) return -EINVAL; if (WARN_ON(devtype->cs_count > MAX_CS_COUNT)) return -EINVAL; + priv = dev_get_drvdata(dev); + base = priv->base; + ts = &priv->timing_state; + ret = of_property_read_u32_array(np, "fsl,weim-cs-timing", value, devtype->cs_regs_count); if (ret) @@ -189,14 +199,15 @@ static int weim_timing_setup(struct device *dev, return 0; } -static int weim_parse_dt(struct platform_device *pdev, void __iomem *base) +static int weim_parse_dt(struct platform_device *pdev) { const struct of_device_id *of_id = of_match_device(weim_id_table, &pdev->dev); const struct imx_weim_devtype *devtype = of_id->data; struct device_node *child; int ret, have_child = 0; - struct cs_timing_state ts = {}; + struct weim_priv *priv; + void __iomem *base; u32 reg; if (devtype == &imx50_weim_devtype) { @@ -205,6 +216,9 @@ static int weim_parse_dt(struct platform_device *pdev, void __iomem *base) return ret; } + priv = dev_get_drvdata(&pdev->dev); + base = priv->base; + if (of_property_read_bool(pdev->dev.of_node, "fsl,burst-clk-enable")) { if (devtype->wcr_bcm) { reg = readl(base + devtype->wcr_offset); @@ -229,7 +243,7 @@ static int weim_parse_dt(struct platform_device *pdev, void __iomem *base) } for_each_available_child_of_node(pdev->dev.of_node, child) { - ret = weim_timing_setup(&pdev->dev, child, base, devtype, &ts); + ret = weim_timing_setup(&pdev->dev, child, devtype); if (ret) dev_warn(&pdev->dev, "%pOF set timing failed.\n", child); @@ -248,17 +262,25 @@ static int weim_parse_dt(struct platform_device *pdev, void __iomem *base) static int weim_probe(struct platform_device *pdev) { + struct weim_priv *priv; struct resource *res; struct clk *clk; void __iomem *base; int ret; + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + /* get the resource */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); + priv->base = base; + dev_set_drvdata(&pdev->dev, priv); + /* get the clock */ clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) @@ -269,7 +291,7 @@ static int weim_probe(struct platform_device *pdev) return ret; /* parse the device node */ - ret = weim_parse_dt(pdev, base); + ret = weim_parse_dt(pdev); if (ret) clk_disable_unprepare(clk); else @@ -278,6 +300,81 @@ static int weim_probe(struct platform_device *pdev) return ret; } +#if IS_ENABLED(CONFIG_OF_DYNAMIC) +static int of_weim_notify(struct notifier_block *nb, unsigned long action, + void *arg) +{ + const struct imx_weim_devtype *devtype; + struct of_reconfig_data *rd = arg; + const struct of_device_id *of_id; + struct platform_device *pdev; + int ret = NOTIFY_OK; + + switch (of_reconfig_get_state_change(action, rd)) { + case OF_RECONFIG_CHANGE_ADD: + of_id = of_match_node(weim_id_table, rd->dn->parent); + if (!of_id) + return NOTIFY_OK; /* not for us */ + + devtype = of_id->data; + + pdev = of_find_device_by_node(rd->dn->parent); + if (!pdev) { + pr_err("%s: could not find platform device for '%pOF'\n", + __func__, rd->dn->parent); + + return notifier_from_errno(-EINVAL); + } + + if (weim_timing_setup(&pdev->dev, rd->dn, devtype)) + dev_warn(&pdev->dev, + "Failed to setup timing for '%pOF'\n", rd->dn); + + if (!of_node_check_flag(rd->dn, OF_POPULATED)) { + if (!of_platform_device_create(rd->dn, NULL, &pdev->dev)) { + dev_err(&pdev->dev, + "Failed to create child device '%pOF'\n", + rd->dn); + ret = notifier_from_errno(-EINVAL); + } + } + + platform_device_put(pdev); + + break; + case OF_RECONFIG_CHANGE_REMOVE: + if (!of_node_check_flag(rd->dn, OF_POPULATED)) + return NOTIFY_OK; /* device already destroyed */ + + of_id = of_match_node(weim_id_table, rd->dn->parent); + if (!of_id) + return NOTIFY_OK; /* not for us */ + + pdev = of_find_device_by_node(rd->dn); + if (!pdev) { + dev_err(&pdev->dev, + "Could not find platform device for '%pOF'\n", + rd->dn); + + ret = notifier_from_errno(-EINVAL); + } else { + of_platform_device_destroy(&pdev->dev, NULL); + platform_device_put(pdev); + } + + break; + default: + break; + } + + return ret; +} + +struct notifier_block weim_of_notifier = { + .notifier_call = of_weim_notify, +}; +#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ + static struct platform_driver weim_driver = { .driver = { .name = "imx-weim", @@ -285,7 +382,27 @@ static struct platform_driver weim_driver = { }, .probe = weim_probe, }; -module_platform_driver(weim_driver); + +static int __init weim_init(void) +{ +#if IS_ENABLED(CONFIG_OF_DYNAMIC) + WARN_ON(of_reconfig_notifier_register(&weim_of_notifier)); +#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ + + return platform_driver_register(&weim_driver); +} +module_init(weim_init); + +static void __exit weim_exit(void) +{ +#if IS_ENABLED(CONFIG_OF_DYNAMIC) + of_reconfig_notifier_unregister(&weim_of_notifier); +#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ + + return platform_driver_unregister(&weim_driver); + +} +module_exit(weim_exit); MODULE_AUTHOR("Freescale Semiconductor Inc."); MODULE_DESCRIPTION("i.MX EIM Controller Driver"); -- cgit v1.2.3 From b89acaf8cad188d9a1387d3049ae036a10d9a1f3 Mon Sep 17 00:00:00 2001 From: Lad Prabhakar Date: Mon, 21 Feb 2022 22:24:50 +0000 Subject: soc: renesas: Kconfig: Explicitly select PM and PM_GENERIC_DOMAINS configs Explicitly select PM and PM_GENERIC_DOMAINS configs for ARCH_R9A07G044 and ARCH_R9A07G054 configs. PM and PM_GENERIC_DOMAINS configs are required for RZ/{G2L,V2L} SoC without these configs the SMARC EVK's won't boot. Signed-off-by: Lad Prabhakar Link: https://lore.kernel.org/r/20220221222450.5393-1-prabhakar.mahadev-lad.rj@bp.renesas.com Signed-off-by: Geert Uytterhoeven --- drivers/soc/renesas/Kconfig | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig index 90f4f98be29c..57b6292b46a3 100644 --- a/drivers/soc/renesas/Kconfig +++ b/drivers/soc/renesas/Kconfig @@ -293,11 +293,15 @@ config ARCH_R8A774B1 config ARCH_R9A07G044 bool "ARM64 Platform support for RZ/G2L" + select PM + select PM_GENERIC_DOMAINS help This enables support for the Renesas RZ/G2L SoC variants. config ARCH_R9A07G054 bool "ARM64 Platform support for RZ/V2L" + select PM + select PM_GENERIC_DOMAINS help This enables support for the Renesas RZ/V2L SoC variants. -- cgit v1.2.3 From a1b019872693c74d919db4e267f451fc7af9a21c Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Mon, 14 Feb 2022 19:48:19 -0800 Subject: soc: qcom: mdt_loader: Fix split-firmware condition The updated condition checking if a segment can be found in the loaded firmware blob, or need to be loaded from a separate file, incorrectly classifies segments that ends at the end of the loaded blob. The result is that the mdt loader attempts to load the segment from a separate file. Correct the conditional to use the loaded segment instead. Fixes: ea90330fa329 ("soc: qcom: mdt_loader: Extend check for split firmware") Signed-off-by: Bjorn Andersson Reviewed-by: Vinod Koul Link: https://lore.kernel.org/r/20220215034819.1209367-1-bjorn.andersson@linaro.org --- drivers/soc/qcom/mdt_loader.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c index f0b1d969567c..366db493579b 100644 --- a/drivers/soc/qcom/mdt_loader.c +++ b/drivers/soc/qcom/mdt_loader.c @@ -329,7 +329,7 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, ptr = mem_region + offset; if (phdr->p_filesz && phdr->p_offset < fw->size && - phdr->p_offset + phdr->p_filesz < fw->size) { + phdr->p_offset + phdr->p_filesz <= fw->size) { /* Firmware is large enough to be non-split */ if (phdr->p_offset + phdr->p_filesz > fw->size) { dev_err(dev, "file %s segment %d would be truncated\n", -- cgit v1.2.3 From 088659ad2a830124407edc38da278010c95bcc96 Mon Sep 17 00:00:00 2001 From: Lad Prabhakar Date: Thu, 24 Feb 2022 09:21:14 +0000 Subject: soc: renesas: Kconfig: Introduce ARCH_RZG2L config option MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Renesas RZ/G2L, RZ/G2LC, RZ/G2UL and RZ/V2L SoCs have identical IP blocks for which drivers are common. To avoid updating the Kconfig files for drivers in common to each SoC, introduce the ARCH_RZG2L config option. The ARCH_RZG2L config option will be selected by the above mentioned SoCs, and the ARCH_RZG2L config option will be used as a dependency for the drivers in common. While at it, move PM and PM_GENERIC_DOMAINS under the ARCH_RZG2L config option instead of keeping it for individual SoCs. Signed-off-by: Lad Prabhakar Reviewed-by: Biju Das Link: https://lore.kernel.org/r/20220224092114.25737-1-prabhakar.mahadev-lad.rj@bp.renesas.com Signed-off-by: Geert Uytterhoeven --- drivers/soc/renesas/Kconfig | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig index 57b6292b46a3..fdc99a05a7e0 100644 --- a/drivers/soc/renesas/Kconfig +++ b/drivers/soc/renesas/Kconfig @@ -40,6 +40,11 @@ config ARCH_RMOBILE select SYS_SUPPORTS_SH_TMU select SYSC_RMOBILE +config ARCH_RZG2L + bool + select PM + select PM_GENERIC_DOMAINS + config ARCH_RZN1 bool select ARM_AMBA @@ -293,15 +298,13 @@ config ARCH_R8A774B1 config ARCH_R9A07G044 bool "ARM64 Platform support for RZ/G2L" - select PM - select PM_GENERIC_DOMAINS + select ARCH_RZG2L help This enables support for the Renesas RZ/G2L SoC variants. config ARCH_R9A07G054 bool "ARM64 Platform support for RZ/V2L" - select PM - select PM_GENERIC_DOMAINS + select ARCH_RZG2L help This enables support for the Renesas RZ/V2L SoC variants. -- cgit v1.2.3 From 1e5cf1452eb0f17b6bd6d51786c7b39ba175f340 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Wed, 1 Dec 2021 02:23:40 +0300 Subject: soc/tegra: pmc: Enable core domain support for Tegra20 and Tegra30 All device drivers got runtime PM and OPP support. Flip the core domain support status for Tegra20 and Tegra30 SoCs. Reviewed-by: Ulf Hansson Signed-off-by: Dmitry Osipenko Signed-off-by: Thierry Reding --- drivers/soc/tegra/pmc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 5aceacbd8ce0..bf6e84aa0e33 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -3066,7 +3066,7 @@ static void tegra20_pmc_setup_irq_polarity(struct tegra_pmc *pmc, } static const struct tegra_pmc_soc tegra20_pmc_soc = { - .supports_core_domain = false, + .supports_core_domain = true, .num_powergates = ARRAY_SIZE(tegra20_powergates), .powergates = tegra20_powergates, .num_cpu_powergates = 0, @@ -3127,7 +3127,7 @@ static const char * const tegra30_reset_sources[] = { }; static const struct tegra_pmc_soc tegra30_pmc_soc = { - .supports_core_domain = false, + .supports_core_domain = true, .num_powergates = ARRAY_SIZE(tegra30_powergates), .powergates = tegra30_powergates, .num_cpu_powergates = ARRAY_SIZE(tegra30_cpu_powergates), -- cgit v1.2.3 From 6f259bf1619bbff6118c1e556044c0cb4020bbb6 Mon Sep 17 00:00:00 2001 From: kartik Date: Mon, 6 Dec 2021 17:22:45 +0530 Subject: soc/tegra: fuse: Update nvmem cell list Update tegra_fuse_cells with below entries: - gcplex-config-fuse: Configuration bits for GPU, used to enable/disable write protected region used for storing GPU firmware. - pdi0: Unique per chip public identifier. - pdi1: Unique per chip public identifier. Signed-off-by: Prathamesh Shete Signed-off-by: Kartik Signed-off-by: Thierry Reding --- drivers/soc/tegra/fuse/fuse-tegra.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c index 913103ee5432..10d2ae99babd 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra.c +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2013-2021, NVIDIA CORPORATION. All rights reserved. */ #include @@ -161,6 +161,12 @@ static const struct nvmem_cell_info tegra_fuse_cells[] = { .bytes = 4, .bit_offset = 0, .nbits = 32, + }, { + .name = "gcplex-config-fuse", + .offset = 0x1c8, + .bytes = 4, + .bit_offset = 0, + .nbits = 32, }, { .name = "tsensor-realignment", .offset = 0x1fc, @@ -179,6 +185,18 @@ static const struct nvmem_cell_info tegra_fuse_cells[] = { .bytes = 4, .bit_offset = 0, .nbits = 32, + }, { + .name = "pdi0", + .offset = 0x300, + .bytes = 4, + .bit_offset = 0, + .nbits = 32, + }, { + .name = "pdi1", + .offset = 0x304, + .bytes = 4, + .bit_offset = 0, + .nbits = 32, }, }; -- cgit v1.2.3 From b631c9c2ae934ce2a563853e8e6a591f7d34b48b Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Thu, 24 Feb 2022 13:27:28 +0100 Subject: soc/tegra: fuse: Explicitly cast to/from __iomem sparse is picky about casts between different address spaces. A cast to plain void * needs to be accompanied by a __force modifier and casting back to void __iomem * needs to be explicit to avoid warnings. Reported-by: kernel test robot Fixes: 88724b78a84c ("soc/tegra: fuse: Use resource-managed helpers") Signed-off-by: Thierry Reding Reviewed-by: Dmitry Osipenko Signed-off-by: Thierry Reding --- drivers/soc/tegra/fuse/fuse-tegra.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c index 10d2ae99babd..aa94fda282f4 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra.c +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -202,8 +202,8 @@ static const struct nvmem_cell_info tegra_fuse_cells[] = { static void tegra_fuse_restore(void *base) { + fuse->base = (void __iomem *)base; fuse->clk = NULL; - fuse->base = base; } static int tegra_fuse_probe(struct platform_device *pdev) @@ -213,7 +213,7 @@ static int tegra_fuse_probe(struct platform_device *pdev) struct resource *res; int err; - err = devm_add_action(&pdev->dev, tegra_fuse_restore, base); + err = devm_add_action(&pdev->dev, tegra_fuse_restore, (void __force *)base); if (err) return err; -- cgit v1.2.3 From 20f36361b7dd45787fa9872b3591f7148001eb6f Mon Sep 17 00:00:00 2001 From: Luca Weiss Date: Sun, 20 Feb 2022 23:30:02 +0100 Subject: soc: qcom: rpmpd: Add MSM8226 support Add the power domains preset in MSM8226. Signed-off-by: Luca Weiss Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20220220223004.507739-2-luca@z3ntu.xyz --- drivers/soc/qcom/rpmpd.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'drivers') diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c index 624b5630feb8..3b5b91621532 100644 --- a/drivers/soc/qcom/rpmpd.c +++ b/drivers/soc/qcom/rpmpd.c @@ -138,6 +138,22 @@ static const struct rpmpd_desc mdm9607_desc = { .max_state = RPM_SMD_LEVEL_TURBO, }; +/* msm8226 RPM Power Domains */ +DEFINE_RPMPD_PAIR(msm8226, vddcx, vddcx_ao, SMPA, CORNER, 1); +DEFINE_RPMPD_VFC(msm8226, vddcx_vfc, SMPA, 1); + +static struct rpmpd *msm8226_rpmpds[] = { + [MSM8226_VDDCX] = &msm8226_vddcx, + [MSM8226_VDDCX_AO] = &msm8226_vddcx_ao, + [MSM8226_VDDCX_VFC] = &msm8226_vddcx_vfc, +}; + +static const struct rpmpd_desc msm8226_desc = { + .rpmpds = msm8226_rpmpds, + .num_pds = ARRAY_SIZE(msm8226_rpmpds), + .max_state = MAX_CORNER_RPMPD_STATE, +}; + /* msm8939 RPM Power Domains */ DEFINE_RPMPD_PAIR(msm8939, vddmd, vddmd_ao, SMPA, CORNER, 1); DEFINE_RPMPD_VFC(msm8939, vddmd_vfc, SMPA, 1); @@ -436,6 +452,7 @@ static const struct rpmpd_desc qcm2290_desc = { static const struct of_device_id rpmpd_match_table[] = { { .compatible = "qcom,mdm9607-rpmpd", .data = &mdm9607_desc }, + { .compatible = "qcom,msm8226-rpmpd", .data = &msm8226_desc }, { .compatible = "qcom,msm8916-rpmpd", .data = &msm8916_desc }, { .compatible = "qcom,msm8939-rpmpd", .data = &msm8939_desc }, { .compatible = "qcom,msm8953-rpmpd", .data = &msm8953_desc }, -- cgit v1.2.3 From a06bf59d07f45a0a6ab4ab8ac69c1d708d3fadcb Mon Sep 17 00:00:00 2001 From: Julius Werner Date: Wed, 23 Feb 2022 16:34:20 -0800 Subject: memory: Update of_memory lpddr2 revision-id binding This patch updates the code parsing the "jedec,lpddr2" device tree binding to use the new `revision-id` property instead of the deprecated `revision-id1` and `revision-id2` properties if available. Signed-off-by: Julius Werner Link: https://lore.kernel.org/r/20220224003421.3440124-3-jwerner@chromium.org Signed-off-by: Krzysztof Kozlowski --- drivers/memory/of_memory.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/memory/of_memory.c b/drivers/memory/of_memory.c index bac5c7f34936..dbdf87bc0b78 100644 --- a/drivers/memory/of_memory.c +++ b/drivers/memory/of_memory.c @@ -318,14 +318,21 @@ const struct lpddr2_info struct property *prop; const char *cp; int err; - - err = of_property_read_u32(np, "revision-id1", &info.revision_id1); - if (err) - info.revision_id1 = -ENOENT; - - err = of_property_read_u32(np, "revision-id2", &info.revision_id2); - if (err) - info.revision_id2 = -ENOENT; + u32 revision_id[2]; + + err = of_property_read_u32_array(np, "revision-id", revision_id, 2); + if (!err) { + info.revision_id1 = revision_id[0]; + info.revision_id2 = revision_id[1]; + } else { + err = of_property_read_u32(np, "revision-id1", &info.revision_id1); + if (err) + info.revision_id1 = -ENOENT; + + err = of_property_read_u32(np, "revision-id2", &info.revision_id2); + if (err) + info.revision_id2 = -ENOENT; + } err = of_property_read_u32(np, "io-width", &info.io_width); if (err) -- cgit v1.2.3 From 194217df813a78234130576eaabbfe2524eae70c Mon Sep 17 00:00:00 2001 From: kartik Date: Tue, 25 Jan 2022 11:27:42 +0530 Subject: soc/tegra: pmc: Add Tegra234 wake events Enable the Tegra RTC alarm and power key wake-ups for Tegra234 Signed-off-by: kartik Signed-off-by: Thierry Reding --- drivers/soc/tegra/pmc.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index bf6e84aa0e33..fdf508e03400 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -3,7 +3,7 @@ * drivers/soc/tegra/pmc.c * * Copyright (c) 2010 Google, Inc - * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. * * Author: * Colin Cross @@ -54,6 +54,7 @@ #include #include #include +#include #include #define PMC_CNTRL 0x0 @@ -3788,6 +3789,11 @@ static const char * const tegra234_reset_sources[] = { "FUSECRC", }; +static const struct tegra_wake_event tegra234_wake_events[] = { + TEGRA_WAKE_GPIO("power", 29, 1, TEGRA234_AON_GPIO(EE, 4)), + TEGRA_WAKE_IRQ("rtc", 73, 10), +}; + static const struct tegra_pmc_soc tegra234_pmc_soc = { .supports_core_domain = false, .num_powergates = 0, @@ -3812,8 +3818,8 @@ static const struct tegra_pmc_soc tegra234_pmc_soc = { .num_reset_sources = ARRAY_SIZE(tegra234_reset_sources), .reset_levels = tegra186_reset_levels, .num_reset_levels = ARRAY_SIZE(tegra186_reset_levels), - .num_wake_events = 0, - .wake_events = NULL, + .num_wake_events = ARRAY_SIZE(tegra234_wake_events), + .wake_events = tegra234_wake_events, .pmc_clks_data = NULL, .num_pmc_clks = 0, .has_blink_output = false, -- cgit v1.2.3 From ac0ca395543af061f7ad77afcda0afb323d82468 Mon Sep 17 00:00:00 2001 From: Chun-Jie Chen Date: Sun, 30 Jan 2022 09:21:01 +0800 Subject: soc: mediatek: pm-domains: Add wakeup capacity support in power domain Due to some power domain needs to keep on for wakeup in system suspend, so add GENPD_FLAG_ACTIVE_WAKEUP support in Mediatek power domain driver. Fixes: 59b644b01cf4 ("soc: mediatek: Add MediaTek SCPSYS power domains") Signed-off-by: Chun-Jie Chen Reviewed-by: Chen-Yu Tsai Reviewed-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20220130012104.5292-3-chun-jie.chen@mediatek.com Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/mtk-pm-domains.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c index b762bc40f56b..afd2fd74802d 100644 --- a/drivers/soc/mediatek/mtk-pm-domains.c +++ b/drivers/soc/mediatek/mtk-pm-domains.c @@ -443,6 +443,9 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no pd->genpd.power_off = scpsys_power_off; pd->genpd.power_on = scpsys_power_on; + if (MTK_SCPD_CAPS(pd, MTK_SCPD_ACTIVE_WAKEUP)) + pd->genpd.flags |= GENPD_FLAG_ACTIVE_WAKEUP; + if (MTK_SCPD_CAPS(pd, MTK_SCPD_KEEP_DEFAULT_OFF)) pm_genpd_init(&pd->genpd, NULL, true); else -- cgit v1.2.3 From dcbf6831a53aa5936b31f86d28444e2b1c82ae90 Mon Sep 17 00:00:00 2001 From: Chun-Jie Chen Date: Sun, 30 Jan 2022 09:21:02 +0800 Subject: soc: mediatek: pm-domains: Remove unused macro Due to clk resource data will be allocated dynamically by searching parent count of clk in power domain node, so remove the unused marco MAX_SUBSYS_CLKS for static allocation. Signed-off-by: Chun-Jie Chen Reviewed-by: Enric Balletbo i Serra Reviewed-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20220130012104.5292-4-chun-jie.chen@mediatek.com Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/mtk-pm-domains.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/soc/mediatek/mtk-pm-domains.h b/drivers/soc/mediatek/mtk-pm-domains.h index c5ac649ae51b..089a31679806 100644 --- a/drivers/soc/mediatek/mtk-pm-domains.h +++ b/drivers/soc/mediatek/mtk-pm-domains.h @@ -72,8 +72,6 @@ struct scpsys_bus_prot_data { bool ignore_clr_ack; }; -#define MAX_SUBSYS_CLKS 10 - /** * struct scpsys_domain_data - scp domain data for power on/off flow * @name: The name of the power domain. -- cgit v1.2.3 From db2ca8608a9fcd6a26736308986a01750958bb20 Mon Sep 17 00:00:00 2001 From: Chun-Jie Chen Date: Sun, 30 Jan 2022 09:21:03 +0800 Subject: soc: mediatek: pm-domains: Move power status offset to power domain data MT8195 has more than 32 power domains so it needs two set of pwr_sta and pwr_sta2nd registers, so move the register offset from soc data into power domain data. Signed-off-by: Chun-Jie Chen Reviewed-by: Enric Balletbo i Serra Reviewed-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20220130012104.5292-5-chun-jie.chen@mediatek.com Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/mt8167-pm-domains.h | 16 ++++++++++-- drivers/soc/mediatek/mt8173-pm-domains.h | 22 ++++++++++++++-- drivers/soc/mediatek/mt8183-pm-domains.h | 32 +++++++++++++++++++++-- drivers/soc/mediatek/mt8192-pm-domains.h | 44 ++++++++++++++++++++++++++++++-- drivers/soc/mediatek/mtk-pm-domains.c | 4 +-- drivers/soc/mediatek/mtk-pm-domains.h | 4 +-- 6 files changed, 110 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/soc/mediatek/mt8167-pm-domains.h b/drivers/soc/mediatek/mt8167-pm-domains.h index 15559ddf26e4..4d6c32759606 100644 --- a/drivers/soc/mediatek/mt8167-pm-domains.h +++ b/drivers/soc/mediatek/mt8167-pm-domains.h @@ -18,6 +18,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8167[] = { .name = "mm", .sta_mask = PWR_STATUS_DISP, .ctl_offs = SPM_DIS_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, .sram_pdn_bits = GENMASK(11, 8), .sram_pdn_ack_bits = GENMASK(12, 12), .bp_infracfg = { @@ -30,6 +32,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8167[] = { .name = "vdec", .sta_mask = PWR_STATUS_VDEC, .ctl_offs = SPM_VDE_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), .caps = MTK_SCPD_ACTIVE_WAKEUP, @@ -38,6 +42,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8167[] = { .name = "isp", .sta_mask = PWR_STATUS_ISP, .ctl_offs = SPM_ISP_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, .sram_pdn_bits = GENMASK(11, 8), .sram_pdn_ack_bits = GENMASK(13, 12), .caps = MTK_SCPD_ACTIVE_WAKEUP, @@ -46,6 +52,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8167[] = { .name = "mfg_async", .sta_mask = MT8167_PWR_STATUS_MFG_ASYNC, .ctl_offs = SPM_MFG_ASYNC_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, .sram_pdn_bits = 0, .sram_pdn_ack_bits = 0, .bp_infracfg = { @@ -57,6 +65,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8167[] = { .name = "mfg_2d", .sta_mask = MT8167_PWR_STATUS_MFG_2D, .ctl_offs = SPM_MFG_2D_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, .sram_pdn_bits = GENMASK(11, 8), .sram_pdn_ack_bits = GENMASK(15, 12), }, @@ -64,6 +74,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8167[] = { .name = "mfg", .sta_mask = PWR_STATUS_MFG, .ctl_offs = SPM_MFG_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, .sram_pdn_bits = GENMASK(11, 8), .sram_pdn_ack_bits = GENMASK(15, 12), }, @@ -71,6 +83,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8167[] = { .name = "conn", .sta_mask = PWR_STATUS_CONN, .ctl_offs = SPM_CONN_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = 0, .caps = MTK_SCPD_ACTIVE_WAKEUP, @@ -85,8 +99,6 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8167[] = { static const struct scpsys_soc_data mt8167_scpsys_data = { .domains_data = scpsys_domain_data_mt8167, .num_domains = ARRAY_SIZE(scpsys_domain_data_mt8167), - .pwr_sta_offs = SPM_PWR_STATUS, - .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, }; #endif /* __SOC_MEDIATEK_MT8167_PM_DOMAINS_H */ diff --git a/drivers/soc/mediatek/mt8173-pm-domains.h b/drivers/soc/mediatek/mt8173-pm-domains.h index 714fa92575df..1a5dc63b7357 100644 --- a/drivers/soc/mediatek/mt8173-pm-domains.h +++ b/drivers/soc/mediatek/mt8173-pm-domains.h @@ -15,6 +15,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = { .name = "vdec", .sta_mask = PWR_STATUS_VDEC, .ctl_offs = SPM_VDE_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, .sram_pdn_bits = GENMASK(11, 8), .sram_pdn_ack_bits = GENMASK(12, 12), }, @@ -22,6 +24,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = { .name = "venc", .sta_mask = PWR_STATUS_VENC, .ctl_offs = SPM_VEN_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, .sram_pdn_bits = GENMASK(11, 8), .sram_pdn_ack_bits = GENMASK(15, 12), }, @@ -29,6 +33,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = { .name = "isp", .sta_mask = PWR_STATUS_ISP, .ctl_offs = SPM_ISP_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, .sram_pdn_bits = GENMASK(11, 8), .sram_pdn_ack_bits = GENMASK(13, 12), }, @@ -36,6 +42,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = { .name = "mm", .sta_mask = PWR_STATUS_DISP, .ctl_offs = SPM_DIS_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, .sram_pdn_bits = GENMASK(11, 8), .sram_pdn_ack_bits = GENMASK(12, 12), .bp_infracfg = { @@ -47,6 +55,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = { .name = "venc_lt", .sta_mask = PWR_STATUS_VENC_LT, .ctl_offs = SPM_VEN2_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, .sram_pdn_bits = GENMASK(11, 8), .sram_pdn_ack_bits = GENMASK(15, 12), }, @@ -54,6 +64,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = { .name = "audio", .sta_mask = PWR_STATUS_AUDIO, .ctl_offs = SPM_AUDIO_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, .sram_pdn_bits = GENMASK(11, 8), .sram_pdn_ack_bits = GENMASK(15, 12), }, @@ -61,6 +73,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = { .name = "usb", .sta_mask = PWR_STATUS_USB, .ctl_offs = SPM_USB_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, .sram_pdn_bits = GENMASK(11, 8), .sram_pdn_ack_bits = GENMASK(15, 12), .caps = MTK_SCPD_ACTIVE_WAKEUP, @@ -69,6 +83,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = { .name = "mfg_async", .sta_mask = PWR_STATUS_MFG_ASYNC, .ctl_offs = SPM_MFG_ASYNC_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, .sram_pdn_bits = GENMASK(11, 8), .sram_pdn_ack_bits = 0, .caps = MTK_SCPD_DOMAIN_SUPPLY, @@ -77,6 +93,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = { .name = "mfg_2d", .sta_mask = PWR_STATUS_MFG_2D, .ctl_offs = SPM_MFG_2D_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, .sram_pdn_bits = GENMASK(11, 8), .sram_pdn_ack_bits = GENMASK(13, 12), }, @@ -84,6 +102,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = { .name = "mfg", .sta_mask = PWR_STATUS_MFG, .ctl_offs = SPM_MFG_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, .sram_pdn_bits = GENMASK(13, 8), .sram_pdn_ack_bits = GENMASK(21, 16), .bp_infracfg = { @@ -98,8 +118,6 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = { static const struct scpsys_soc_data mt8173_scpsys_data = { .domains_data = scpsys_domain_data_mt8173, .num_domains = ARRAY_SIZE(scpsys_domain_data_mt8173), - .pwr_sta_offs = SPM_PWR_STATUS, - .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, }; #endif /* __SOC_MEDIATEK_MT8173_PM_DOMAINS_H */ diff --git a/drivers/soc/mediatek/mt8183-pm-domains.h b/drivers/soc/mediatek/mt8183-pm-domains.h index 98a9940d05fb..71b8757e552d 100644 --- a/drivers/soc/mediatek/mt8183-pm-domains.h +++ b/drivers/soc/mediatek/mt8183-pm-domains.h @@ -15,6 +15,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = { .name = "audio", .sta_mask = PWR_STATUS_AUDIO, .ctl_offs = 0x0314, + .pwr_sta_offs = 0x0180, + .pwr_sta2nd_offs = 0x0184, .sram_pdn_bits = GENMASK(11, 8), .sram_pdn_ack_bits = GENMASK(15, 12), }, @@ -22,6 +24,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = { .name = "conn", .sta_mask = PWR_STATUS_CONN, .ctl_offs = 0x032c, + .pwr_sta_offs = 0x0180, + .pwr_sta2nd_offs = 0x0184, .sram_pdn_bits = 0, .sram_pdn_ack_bits = 0, .bp_infracfg = { @@ -33,6 +37,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = { .name = "mfg_async", .sta_mask = PWR_STATUS_MFG_ASYNC, .ctl_offs = 0x0334, + .pwr_sta_offs = 0x0180, + .pwr_sta2nd_offs = 0x0184, .sram_pdn_bits = 0, .sram_pdn_ack_bits = 0, }, @@ -40,6 +46,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = { .name = "mfg", .sta_mask = PWR_STATUS_MFG, .ctl_offs = 0x0338, + .pwr_sta_offs = 0x0180, + .pwr_sta2nd_offs = 0x0184, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), .caps = MTK_SCPD_DOMAIN_SUPPLY, @@ -48,6 +56,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = { .name = "mfg_core0", .sta_mask = BIT(7), .ctl_offs = 0x034c, + .pwr_sta_offs = 0x0180, + .pwr_sta2nd_offs = 0x0184, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), }, @@ -55,6 +65,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = { .name = "mfg_core1", .sta_mask = BIT(20), .ctl_offs = 0x0310, + .pwr_sta_offs = 0x0180, + .pwr_sta2nd_offs = 0x0184, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), }, @@ -62,6 +74,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = { .name = "mfg_2d", .sta_mask = PWR_STATUS_MFG_2D, .ctl_offs = 0x0348, + .pwr_sta_offs = 0x0180, + .pwr_sta2nd_offs = 0x0184, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), .bp_infracfg = { @@ -75,6 +89,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = { .name = "disp", .sta_mask = PWR_STATUS_DISP, .ctl_offs = 0x030c, + .pwr_sta_offs = 0x0180, + .pwr_sta2nd_offs = 0x0184, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), .bp_infracfg = { @@ -94,6 +110,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = { .name = "cam", .sta_mask = BIT(25), .ctl_offs = 0x0344, + .pwr_sta_offs = 0x0180, + .pwr_sta2nd_offs = 0x0184, .sram_pdn_bits = GENMASK(9, 8), .sram_pdn_ack_bits = GENMASK(13, 12), .bp_infracfg = { @@ -117,6 +135,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = { .name = "isp", .sta_mask = PWR_STATUS_ISP, .ctl_offs = 0x0308, + .pwr_sta_offs = 0x0180, + .pwr_sta2nd_offs = 0x0184, .sram_pdn_bits = GENMASK(9, 8), .sram_pdn_ack_bits = GENMASK(13, 12), .bp_infracfg = { @@ -140,6 +160,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = { .name = "vdec", .sta_mask = BIT(31), .ctl_offs = 0x0300, + .pwr_sta_offs = 0x0180, + .pwr_sta2nd_offs = 0x0184, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), .bp_smi = { @@ -153,6 +175,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = { .name = "venc", .sta_mask = PWR_STATUS_VENC, .ctl_offs = 0x0304, + .pwr_sta_offs = 0x0180, + .pwr_sta2nd_offs = 0x0184, .sram_pdn_bits = GENMASK(11, 8), .sram_pdn_ack_bits = GENMASK(15, 12), .bp_smi = { @@ -166,6 +190,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = { .name = "vpu_top", .sta_mask = BIT(26), .ctl_offs = 0x0324, + .pwr_sta_offs = 0x0180, + .pwr_sta2nd_offs = 0x0184, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), .bp_infracfg = { @@ -193,6 +219,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = { .name = "vpu_core0", .sta_mask = BIT(27), .ctl_offs = 0x33c, + .pwr_sta_offs = 0x0180, + .pwr_sta2nd_offs = 0x0184, .sram_pdn_bits = GENMASK(11, 8), .sram_pdn_ack_bits = GENMASK(13, 12), .bp_infracfg = { @@ -211,6 +239,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = { .name = "vpu_core1", .sta_mask = BIT(28), .ctl_offs = 0x0340, + .pwr_sta_offs = 0x0180, + .pwr_sta2nd_offs = 0x0184, .sram_pdn_bits = GENMASK(11, 8), .sram_pdn_ack_bits = GENMASK(13, 12), .bp_infracfg = { @@ -230,8 +260,6 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = { static const struct scpsys_soc_data mt8183_scpsys_data = { .domains_data = scpsys_domain_data_mt8183, .num_domains = ARRAY_SIZE(scpsys_domain_data_mt8183), - .pwr_sta_offs = 0x0180, - .pwr_sta2nd_offs = 0x0184 }; #endif /* __SOC_MEDIATEK_MT8183_PM_DOMAINS_H */ diff --git a/drivers/soc/mediatek/mt8192-pm-domains.h b/drivers/soc/mediatek/mt8192-pm-domains.h index 543dda70de01..558c4ee4784a 100644 --- a/drivers/soc/mediatek/mt8192-pm-domains.h +++ b/drivers/soc/mediatek/mt8192-pm-domains.h @@ -15,6 +15,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { .name = "audio", .sta_mask = BIT(21), .ctl_offs = 0x0354, + .pwr_sta_offs = 0x016c, + .pwr_sta2nd_offs = 0x0170, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), .bp_infracfg = { @@ -28,6 +30,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { .name = "conn", .sta_mask = PWR_STATUS_CONN, .ctl_offs = 0x0304, + .pwr_sta_offs = 0x016c, + .pwr_sta2nd_offs = 0x0170, .sram_pdn_bits = 0, .sram_pdn_ack_bits = 0, .bp_infracfg = { @@ -50,6 +54,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { .name = "mfg0", .sta_mask = BIT(2), .ctl_offs = 0x0308, + .pwr_sta_offs = 0x016c, + .pwr_sta2nd_offs = 0x0170, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), }, @@ -57,6 +63,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { .name = "mfg1", .sta_mask = BIT(3), .ctl_offs = 0x030c, + .pwr_sta_offs = 0x016c, + .pwr_sta2nd_offs = 0x0170, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), .bp_infracfg = { @@ -82,6 +90,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { .name = "mfg2", .sta_mask = BIT(4), .ctl_offs = 0x0310, + .pwr_sta_offs = 0x016c, + .pwr_sta2nd_offs = 0x0170, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), }, @@ -89,6 +99,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { .name = "mfg3", .sta_mask = BIT(5), .ctl_offs = 0x0314, + .pwr_sta_offs = 0x016c, + .pwr_sta2nd_offs = 0x0170, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), }, @@ -96,6 +108,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { .name = "mfg4", .sta_mask = BIT(6), .ctl_offs = 0x0318, + .pwr_sta_offs = 0x016c, + .pwr_sta2nd_offs = 0x0170, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), }, @@ -103,6 +117,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { .name = "mfg5", .sta_mask = BIT(7), .ctl_offs = 0x031c, + .pwr_sta_offs = 0x016c, + .pwr_sta2nd_offs = 0x0170, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), }, @@ -110,6 +126,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { .name = "mfg6", .sta_mask = BIT(8), .ctl_offs = 0x0320, + .pwr_sta_offs = 0x016c, + .pwr_sta2nd_offs = 0x0170, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), }, @@ -117,6 +135,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { .name = "disp", .sta_mask = BIT(20), .ctl_offs = 0x0350, + .pwr_sta_offs = 0x016c, + .pwr_sta2nd_offs = 0x0170, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), .bp_infracfg = { @@ -146,6 +166,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { .name = "ipe", .sta_mask = BIT(14), .ctl_offs = 0x0338, + .pwr_sta_offs = 0x016c, + .pwr_sta2nd_offs = 0x0170, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), .bp_infracfg = { @@ -163,6 +185,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { .name = "isp", .sta_mask = BIT(12), .ctl_offs = 0x0330, + .pwr_sta_offs = 0x016c, + .pwr_sta2nd_offs = 0x0170, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), .bp_infracfg = { @@ -180,6 +204,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { .name = "isp2", .sta_mask = BIT(13), .ctl_offs = 0x0334, + .pwr_sta_offs = 0x016c, + .pwr_sta2nd_offs = 0x0170, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), .bp_infracfg = { @@ -197,6 +223,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { .name = "mdp", .sta_mask = BIT(19), .ctl_offs = 0x034c, + .pwr_sta_offs = 0x016c, + .pwr_sta2nd_offs = 0x0170, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), .bp_infracfg = { @@ -214,6 +242,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { .name = "venc", .sta_mask = BIT(17), .ctl_offs = 0x0344, + .pwr_sta_offs = 0x016c, + .pwr_sta2nd_offs = 0x0170, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), .bp_infracfg = { @@ -231,6 +261,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { .name = "vdec", .sta_mask = BIT(15), .ctl_offs = 0x033c, + .pwr_sta_offs = 0x016c, + .pwr_sta2nd_offs = 0x0170, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), .bp_infracfg = { @@ -248,6 +280,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { .name = "vdec2", .sta_mask = BIT(16), .ctl_offs = 0x0340, + .pwr_sta_offs = 0x016c, + .pwr_sta2nd_offs = 0x0170, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), }, @@ -255,6 +289,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { .name = "cam", .sta_mask = BIT(23), .ctl_offs = 0x035c, + .pwr_sta_offs = 0x016c, + .pwr_sta2nd_offs = 0x0170, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), .bp_infracfg = { @@ -284,6 +320,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { .name = "cam_rawa", .sta_mask = BIT(24), .ctl_offs = 0x0360, + .pwr_sta_offs = 0x016c, + .pwr_sta2nd_offs = 0x0170, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), }, @@ -291,6 +329,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { .name = "cam_rawb", .sta_mask = BIT(25), .ctl_offs = 0x0364, + .pwr_sta_offs = 0x016c, + .pwr_sta2nd_offs = 0x0170, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), }, @@ -298,6 +338,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { .name = "cam_rawc", .sta_mask = BIT(26), .ctl_offs = 0x0368, + .pwr_sta_offs = 0x016c, + .pwr_sta2nd_offs = 0x0170, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), }, @@ -306,8 +348,6 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { static const struct scpsys_soc_data mt8192_scpsys_data = { .domains_data = scpsys_domain_data_mt8192, .num_domains = ARRAY_SIZE(scpsys_domain_data_mt8192), - .pwr_sta_offs = 0x016c, - .pwr_sta2nd_offs = 0x0170, }; #endif /* __SOC_MEDIATEK_MT8192_PM_DOMAINS_H */ diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c index afd2fd74802d..ad06b6f90435 100644 --- a/drivers/soc/mediatek/mtk-pm-domains.c +++ b/drivers/soc/mediatek/mtk-pm-domains.c @@ -60,10 +60,10 @@ static bool scpsys_domain_is_on(struct scpsys_domain *pd) struct scpsys *scpsys = pd->scpsys; u32 status, status2; - regmap_read(scpsys->base, scpsys->soc_data->pwr_sta_offs, &status); + regmap_read(scpsys->base, pd->data->pwr_sta_offs, &status); status &= pd->data->sta_mask; - regmap_read(scpsys->base, scpsys->soc_data->pwr_sta2nd_offs, &status2); + regmap_read(scpsys->base, pd->data->pwr_sta2nd_offs, &status2); status2 &= pd->data->sta_mask; /* A domain is on when both status bits are set. */ diff --git a/drivers/soc/mediatek/mtk-pm-domains.h b/drivers/soc/mediatek/mtk-pm-domains.h index 089a31679806..c233ed828f86 100644 --- a/drivers/soc/mediatek/mtk-pm-domains.h +++ b/drivers/soc/mediatek/mtk-pm-domains.h @@ -92,13 +92,13 @@ struct scpsys_domain_data { u8 caps; const struct scpsys_bus_prot_data bp_infracfg[SPM_MAX_BUS_PROT_DATA]; const struct scpsys_bus_prot_data bp_smi[SPM_MAX_BUS_PROT_DATA]; + int pwr_sta_offs; + int pwr_sta2nd_offs; }; struct scpsys_soc_data { const struct scpsys_domain_data *domains_data; int num_domains; - int pwr_sta_offs; - int pwr_sta2nd_offs; }; #endif /* __SOC_MEDIATEK_MTK_PM_DOMAINS_H */ -- cgit v1.2.3 From 342479c86d3e8f9e946a07ff0cafbd36511ae30a Mon Sep 17 00:00:00 2001 From: Chun-Jie Chen Date: Sun, 30 Jan 2022 09:21:04 +0800 Subject: soc: mediatek: pm-domains: Add support for mt8195 Add domain control data including bus protection data size change due to more protection steps in mt8195. Signed-off-by: Chun-Jie Chen Reviewed-by: Chen-Yu Tsai Reviewed-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20220130012104.5292-6-chun-jie.chen@mediatek.com Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/mt8195-pm-domains.h | 613 +++++++++++++++++++++++++++++++ drivers/soc/mediatek/mtk-pm-domains.c | 5 + drivers/soc/mediatek/mtk-pm-domains.h | 2 +- include/linux/soc/mediatek/infracfg.h | 82 +++++ 4 files changed, 701 insertions(+), 1 deletion(-) create mode 100644 drivers/soc/mediatek/mt8195-pm-domains.h (limited to 'drivers') diff --git a/drivers/soc/mediatek/mt8195-pm-domains.h b/drivers/soc/mediatek/mt8195-pm-domains.h new file mode 100644 index 000000000000..938f4d51f5ae --- /dev/null +++ b/drivers/soc/mediatek/mt8195-pm-domains.h @@ -0,0 +1,613 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 MediaTek Inc. + * Author: Chun-Jie Chen + */ + +#ifndef __SOC_MEDIATEK_MT8195_PM_DOMAINS_H +#define __SOC_MEDIATEK_MT8195_PM_DOMAINS_H + +#include "mtk-pm-domains.h" +#include + +/* + * MT8195 power domain support + */ + +static const struct scpsys_domain_data scpsys_domain_data_mt8195[] = { + [MT8195_POWER_DOMAIN_PCIE_MAC_P0] = { + .name = "pcie_mac_p0", + .sta_mask = BIT(11), + .ctl_offs = 0x328, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VDNR_PCIE_MAC_P0, + MT8195_TOP_AXI_PROT_EN_VDNR_SET, + MT8195_TOP_AXI_PROT_EN_VDNR_CLR, + MT8195_TOP_AXI_PROT_EN_VDNR_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VDNR_1_PCIE_MAC_P0, + MT8195_TOP_AXI_PROT_EN_VDNR_1_SET, + MT8195_TOP_AXI_PROT_EN_VDNR_1_CLR, + MT8195_TOP_AXI_PROT_EN_VDNR_1_STA1), + }, + }, + [MT8195_POWER_DOMAIN_PCIE_MAC_P1] = { + .name = "pcie_mac_p1", + .sta_mask = BIT(12), + .ctl_offs = 0x32C, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VDNR_PCIE_MAC_P1, + MT8195_TOP_AXI_PROT_EN_VDNR_SET, + MT8195_TOP_AXI_PROT_EN_VDNR_CLR, + MT8195_TOP_AXI_PROT_EN_VDNR_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VDNR_1_PCIE_MAC_P1, + MT8195_TOP_AXI_PROT_EN_VDNR_1_SET, + MT8195_TOP_AXI_PROT_EN_VDNR_1_CLR, + MT8195_TOP_AXI_PROT_EN_VDNR_1_STA1), + }, + }, + [MT8195_POWER_DOMAIN_PCIE_PHY] = { + .name = "pcie_phy", + .sta_mask = BIT(13), + .ctl_offs = 0x330, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .caps = MTK_SCPD_ACTIVE_WAKEUP, + }, + [MT8195_POWER_DOMAIN_SSUSB_PCIE_PHY] = { + .name = "ssusb_pcie_phy", + .sta_mask = BIT(14), + .ctl_offs = 0x334, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .caps = MTK_SCPD_ACTIVE_WAKEUP, + }, + [MT8195_POWER_DOMAIN_CSI_RX_TOP] = { + .name = "csi_rx_top", + .sta_mask = BIT(18), + .ctl_offs = 0x3C4, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_ETHER] = { + .name = "ether", + .sta_mask = BIT(3), + .ctl_offs = 0x344, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_ACTIVE_WAKEUP, + }, + [MT8195_POWER_DOMAIN_ADSP] = { + .name = "adsp", + .sta_mask = BIT(10), + .ctl_offs = 0x360, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_2_ADSP, + MT8195_TOP_AXI_PROT_EN_2_SET, + MT8195_TOP_AXI_PROT_EN_2_CLR, + MT8195_TOP_AXI_PROT_EN_2_STA1), + }, + .caps = MTK_SCPD_SRAM_ISO | MTK_SCPD_ACTIVE_WAKEUP, + }, + [MT8195_POWER_DOMAIN_AUDIO] = { + .name = "audio", + .sta_mask = BIT(8), + .ctl_offs = 0x358, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_2_AUDIO, + MT8195_TOP_AXI_PROT_EN_2_SET, + MT8195_TOP_AXI_PROT_EN_2_CLR, + MT8195_TOP_AXI_PROT_EN_2_STA1), + }, + }, + [MT8195_POWER_DOMAIN_MFG0] = { + .name = "mfg0", + .sta_mask = BIT(1), + .ctl_offs = 0x300, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY, + }, + [MT8195_POWER_DOMAIN_MFG1] = { + .name = "mfg1", + .sta_mask = BIT(2), + .ctl_offs = 0x304, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MFG1, + MT8195_TOP_AXI_PROT_EN_SET, + MT8195_TOP_AXI_PROT_EN_CLR, + MT8195_TOP_AXI_PROT_EN_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_2_MFG1, + MT8195_TOP_AXI_PROT_EN_2_SET, + MT8195_TOP_AXI_PROT_EN_2_CLR, + MT8195_TOP_AXI_PROT_EN_2_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_1_MFG1, + MT8195_TOP_AXI_PROT_EN_1_SET, + MT8195_TOP_AXI_PROT_EN_1_CLR, + MT8195_TOP_AXI_PROT_EN_1_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_2_MFG1_2ND, + MT8195_TOP_AXI_PROT_EN_2_SET, + MT8195_TOP_AXI_PROT_EN_2_CLR, + MT8195_TOP_AXI_PROT_EN_2_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MFG1_2ND, + MT8195_TOP_AXI_PROT_EN_SET, + MT8195_TOP_AXI_PROT_EN_CLR, + MT8195_TOP_AXI_PROT_EN_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_MFG1, + MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET, + MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR, + MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA1), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_MFG2] = { + .name = "mfg2", + .sta_mask = BIT(3), + .ctl_offs = 0x308, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_MFG3] = { + .name = "mfg3", + .sta_mask = BIT(4), + .ctl_offs = 0x30C, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_MFG4] = { + .name = "mfg4", + .sta_mask = BIT(5), + .ctl_offs = 0x310, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_MFG5] = { + .name = "mfg5", + .sta_mask = BIT(6), + .ctl_offs = 0x314, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_MFG6] = { + .name = "mfg6", + .sta_mask = BIT(7), + .ctl_offs = 0x318, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_VPPSYS0] = { + .name = "vppsys0", + .sta_mask = BIT(11), + .ctl_offs = 0x364, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VPPSYS0, + MT8195_TOP_AXI_PROT_EN_SET, + MT8195_TOP_AXI_PROT_EN_CLR, + MT8195_TOP_AXI_PROT_EN_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS0, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VPPSYS0_2ND, + MT8195_TOP_AXI_PROT_EN_SET, + MT8195_TOP_AXI_PROT_EN_CLR, + MT8195_TOP_AXI_PROT_EN_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS0_2ND, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VPPSYS0, + MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET, + MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR, + MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA1), + }, + }, + [MT8195_POWER_DOMAIN_VDOSYS0] = { + .name = "vdosys0", + .sta_mask = BIT(13), + .ctl_offs = 0x36C, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VDOSYS0, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VDOSYS0, + MT8195_TOP_AXI_PROT_EN_SET, + MT8195_TOP_AXI_PROT_EN_CLR, + MT8195_TOP_AXI_PROT_EN_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VDOSYS0, + MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET, + MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR, + MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA1), + }, + }, + [MT8195_POWER_DOMAIN_VPPSYS1] = { + .name = "vppsys1", + .sta_mask = BIT(12), + .ctl_offs = 0x368, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VPPSYS1, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VPPSYS1_2ND, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS1, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + }, + }, + [MT8195_POWER_DOMAIN_VDOSYS1] = { + .name = "vdosys1", + .sta_mask = BIT(14), + .ctl_offs = 0x370, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VDOSYS1, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VDOSYS1_2ND, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VDOSYS1, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + }, + }, + [MT8195_POWER_DOMAIN_DP_TX] = { + .name = "dp_tx", + .sta_mask = BIT(16), + .ctl_offs = 0x378, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VDNR_1_DP_TX, + MT8195_TOP_AXI_PROT_EN_VDNR_1_SET, + MT8195_TOP_AXI_PROT_EN_VDNR_1_CLR, + MT8195_TOP_AXI_PROT_EN_VDNR_1_STA1), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_EPD_TX] = { + .name = "epd_tx", + .sta_mask = BIT(17), + .ctl_offs = 0x37C, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VDNR_1_EPD_TX, + MT8195_TOP_AXI_PROT_EN_VDNR_1_SET, + MT8195_TOP_AXI_PROT_EN_VDNR_1_CLR, + MT8195_TOP_AXI_PROT_EN_VDNR_1_STA1), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_HDMI_TX] = { + .name = "hdmi_tx", + .sta_mask = BIT(18), + .ctl_offs = 0x380, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_ACTIVE_WAKEUP, + }, + [MT8195_POWER_DOMAIN_WPESYS] = { + .name = "wpesys", + .sta_mask = BIT(15), + .ctl_offs = 0x374, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_WPESYS, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_WPESYS, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_WPESYS_2ND, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + }, + }, + [MT8195_POWER_DOMAIN_VDEC0] = { + .name = "vdec0", + .sta_mask = BIT(20), + .ctl_offs = 0x388, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VDEC0, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VDEC0, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VDEC0_2ND, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VDEC0_2ND, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_VDEC1] = { + .name = "vdec1", + .sta_mask = BIT(21), + .ctl_offs = 0x38C, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VDEC1, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VDEC1_2ND, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_VDEC2] = { + .name = "vdec2", + .sta_mask = BIT(22), + .ctl_offs = 0x390, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VDEC2, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VDEC2_2ND, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_VENC] = { + .name = "venc", + .sta_mask = BIT(23), + .ctl_offs = 0x394, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VENC, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VENC_2ND, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VENC, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_VENC_CORE1] = { + .name = "venc_core1", + .sta_mask = BIT(24), + .ctl_offs = 0x398, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VENC_CORE1, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VENC_CORE1, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_IMG] = { + .name = "img", + .sta_mask = BIT(29), + .ctl_offs = 0x3AC, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_IMG, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_IMG_2ND, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_DIP] = { + .name = "dip", + .sta_mask = BIT(30), + .ctl_offs = 0x3B0, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_IPE] = { + .name = "ipe", + .sta_mask = BIT(31), + .ctl_offs = 0x3B4, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_IPE, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_IPE, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_CAM] = { + .name = "cam", + .sta_mask = BIT(25), + .ctl_offs = 0x39C, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_2_CAM, + MT8195_TOP_AXI_PROT_EN_2_SET, + MT8195_TOP_AXI_PROT_EN_2_CLR, + MT8195_TOP_AXI_PROT_EN_2_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_CAM, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_1_CAM, + MT8195_TOP_AXI_PROT_EN_1_SET, + MT8195_TOP_AXI_PROT_EN_1_CLR, + MT8195_TOP_AXI_PROT_EN_1_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_CAM_2ND, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_CAM, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_CAM_RAWA] = { + .name = "cam_rawa", + .sta_mask = BIT(26), + .ctl_offs = 0x3A0, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_CAM_RAWB] = { + .name = "cam_rawb", + .sta_mask = BIT(27), + .ctl_offs = 0x3A4, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_CAM_MRAW] = { + .name = "cam_mraw", + .sta_mask = BIT(28), + .ctl_offs = 0x3A8, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, +}; + +static const struct scpsys_soc_data mt8195_scpsys_data = { + .domains_data = scpsys_domain_data_mt8195, + .num_domains = ARRAY_SIZE(scpsys_domain_data_mt8195), +}; + +#endif /* __SOC_MEDIATEK_MT8195_PM_DOMAINS_H */ diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c index ad06b6f90435..61973a306e97 100644 --- a/drivers/soc/mediatek/mtk-pm-domains.c +++ b/drivers/soc/mediatek/mtk-pm-domains.c @@ -20,6 +20,7 @@ #include "mt8173-pm-domains.h" #include "mt8183-pm-domains.h" #include "mt8192-pm-domains.h" +#include "mt8195-pm-domains.h" #define MTK_POLL_DELAY_US 10 #define MTK_POLL_TIMEOUT USEC_PER_SEC @@ -569,6 +570,10 @@ static const struct of_device_id scpsys_of_match[] = { .compatible = "mediatek,mt8192-power-controller", .data = &mt8192_scpsys_data, }, + { + .compatible = "mediatek,mt8195-power-controller", + .data = &mt8195_scpsys_data, + }, { } }; diff --git a/drivers/soc/mediatek/mtk-pm-domains.h b/drivers/soc/mediatek/mtk-pm-domains.h index c233ed828f86..daa24e890dd4 100644 --- a/drivers/soc/mediatek/mtk-pm-domains.h +++ b/drivers/soc/mediatek/mtk-pm-domains.h @@ -37,7 +37,7 @@ #define PWR_STATUS_AUDIO BIT(24) #define PWR_STATUS_USB BIT(25) -#define SPM_MAX_BUS_PROT_DATA 5 +#define SPM_MAX_BUS_PROT_DATA 6 #define _BUS_PROT(_mask, _set, _clr, _sta, _update, _ignore) { \ .bus_prot_mask = (_mask), \ diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h index 4615a228da51..d858e0bab7a2 100644 --- a/include/linux/soc/mediatek/infracfg.h +++ b/include/linux/soc/mediatek/infracfg.h @@ -2,6 +2,88 @@ #ifndef __SOC_MEDIATEK_INFRACFG_H #define __SOC_MEDIATEK_INFRACFG_H +#define MT8195_TOP_AXI_PROT_EN_STA1 0x228 +#define MT8195_TOP_AXI_PROT_EN_1_STA1 0x258 +#define MT8195_TOP_AXI_PROT_EN_SET 0x2a0 +#define MT8195_TOP_AXI_PROT_EN_CLR 0x2a4 +#define MT8195_TOP_AXI_PROT_EN_1_SET 0x2a8 +#define MT8195_TOP_AXI_PROT_EN_1_CLR 0x2ac +#define MT8195_TOP_AXI_PROT_EN_MM_SET 0x2d4 +#define MT8195_TOP_AXI_PROT_EN_MM_CLR 0x2d8 +#define MT8195_TOP_AXI_PROT_EN_MM_STA1 0x2ec +#define MT8195_TOP_AXI_PROT_EN_2_SET 0x714 +#define MT8195_TOP_AXI_PROT_EN_2_CLR 0x718 +#define MT8195_TOP_AXI_PROT_EN_2_STA1 0x724 +#define MT8195_TOP_AXI_PROT_EN_VDNR_SET 0xb84 +#define MT8195_TOP_AXI_PROT_EN_VDNR_CLR 0xb88 +#define MT8195_TOP_AXI_PROT_EN_VDNR_STA1 0xb90 +#define MT8195_TOP_AXI_PROT_EN_VDNR_1_SET 0xba4 +#define MT8195_TOP_AXI_PROT_EN_VDNR_1_CLR 0xba8 +#define MT8195_TOP_AXI_PROT_EN_VDNR_1_STA1 0xbb0 +#define MT8195_TOP_AXI_PROT_EN_VDNR_2_SET 0xbb8 +#define MT8195_TOP_AXI_PROT_EN_VDNR_2_CLR 0xbbc +#define MT8195_TOP_AXI_PROT_EN_VDNR_2_STA1 0xbc4 +#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET 0xbcc +#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR 0xbd0 +#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA1 0xbd8 +#define MT8195_TOP_AXI_PROT_EN_MM_2_SET 0xdcc +#define MT8195_TOP_AXI_PROT_EN_MM_2_CLR 0xdd0 +#define MT8195_TOP_AXI_PROT_EN_MM_2_STA1 0xdd8 + +#define MT8195_TOP_AXI_PROT_EN_VDOSYS0 BIT(6) +#define MT8195_TOP_AXI_PROT_EN_VPPSYS0 BIT(10) +#define MT8195_TOP_AXI_PROT_EN_MFG1 BIT(11) +#define MT8195_TOP_AXI_PROT_EN_MFG1_2ND GENMASK(22, 21) +#define MT8195_TOP_AXI_PROT_EN_VPPSYS0_2ND BIT(23) +#define MT8195_TOP_AXI_PROT_EN_1_MFG1 GENMASK(20, 19) +#define MT8195_TOP_AXI_PROT_EN_1_CAM BIT(22) +#define MT8195_TOP_AXI_PROT_EN_2_CAM BIT(0) +#define MT8195_TOP_AXI_PROT_EN_2_MFG1_2ND GENMASK(6, 5) +#define MT8195_TOP_AXI_PROT_EN_2_MFG1 BIT(7) +#define MT8195_TOP_AXI_PROT_EN_2_AUDIO (BIT(9) | BIT(11)) +#define MT8195_TOP_AXI_PROT_EN_2_ADSP (BIT(12) | GENMASK(16, 14)) +#define MT8195_TOP_AXI_PROT_EN_MM_CAM (BIT(0) | BIT(2) | BIT(4)) +#define MT8195_TOP_AXI_PROT_EN_MM_IPE BIT(1) +#define MT8195_TOP_AXI_PROT_EN_MM_IMG BIT(3) +#define MT8195_TOP_AXI_PROT_EN_MM_VDOSYS0 GENMASK(21, 17) +#define MT8195_TOP_AXI_PROT_EN_MM_VPPSYS1 GENMASK(8, 5) +#define MT8195_TOP_AXI_PROT_EN_MM_VENC (BIT(9) | BIT(11)) +#define MT8195_TOP_AXI_PROT_EN_MM_VENC_CORE1 (BIT(10) | BIT(12)) +#define MT8195_TOP_AXI_PROT_EN_MM_VDEC0 BIT(13) +#define MT8195_TOP_AXI_PROT_EN_MM_VDEC1 BIT(14) +#define MT8195_TOP_AXI_PROT_EN_MM_VDOSYS1_2ND BIT(22) +#define MT8195_TOP_AXI_PROT_EN_MM_VPPSYS1_2ND BIT(23) +#define MT8195_TOP_AXI_PROT_EN_MM_CAM_2ND BIT(24) +#define MT8195_TOP_AXI_PROT_EN_MM_IMG_2ND BIT(25) +#define MT8195_TOP_AXI_PROT_EN_MM_VENC_2ND BIT(26) +#define MT8195_TOP_AXI_PROT_EN_MM_WPESYS BIT(27) +#define MT8195_TOP_AXI_PROT_EN_MM_VDEC0_2ND BIT(28) +#define MT8195_TOP_AXI_PROT_EN_MM_VDEC1_2ND BIT(29) +#define MT8195_TOP_AXI_PROT_EN_MM_VDOSYS1 GENMASK(31, 30) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS0_2ND (GENMASK(1, 0) | BIT(4) | BIT(11)) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VENC BIT(2) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VENC_CORE1 (BIT(3) | BIT(15)) +#define MT8195_TOP_AXI_PROT_EN_MM_2_CAM (BIT(5) | BIT(17)) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS1 (GENMASK(7, 6) | BIT(18)) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS0 GENMASK(9, 8) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VDOSYS1 BIT(10) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VDEC2_2ND BIT(12) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VDEC0_2ND BIT(13) +#define MT8195_TOP_AXI_PROT_EN_MM_2_WPESYS_2ND BIT(14) +#define MT8195_TOP_AXI_PROT_EN_MM_2_IPE BIT(16) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VDEC2 BIT(21) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VDEC0 BIT(22) +#define MT8195_TOP_AXI_PROT_EN_MM_2_WPESYS GENMASK(24, 23) +#define MT8195_TOP_AXI_PROT_EN_VDNR_1_EPD_TX BIT(1) +#define MT8195_TOP_AXI_PROT_EN_VDNR_1_DP_TX BIT(2) +#define MT8195_TOP_AXI_PROT_EN_VDNR_PCIE_MAC_P0 (BIT(11) | BIT(28)) +#define MT8195_TOP_AXI_PROT_EN_VDNR_PCIE_MAC_P1 (BIT(12) | BIT(29)) +#define MT8195_TOP_AXI_PROT_EN_VDNR_1_PCIE_MAC_P0 BIT(13) +#define MT8195_TOP_AXI_PROT_EN_VDNR_1_PCIE_MAC_P1 BIT(14) +#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_MFG1 (BIT(17) | BIT(19)) +#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VPPSYS0 BIT(20) +#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VDOSYS0 BIT(21) + #define MT8192_TOP_AXI_PROT_EN_STA1 0x228 #define MT8192_TOP_AXI_PROT_EN_1_STA1 0x258 #define MT8192_TOP_AXI_PROT_EN_SET 0x2a0 -- cgit v1.2.3 From 88590cbc17033c86c8591d9f22401325961a8a59 Mon Sep 17 00:00:00 2001 From: Chun-Jie Chen Date: Tue, 15 Feb 2022 18:49:17 +0800 Subject: soc: mediatek: pm-domains: Add support for mt8186 Add power domain control data in mt8186. Signed-off-by: Chun-Jie Chen Link: https://lore.kernel.org/r/20220215104917.5726-3-chun-jie.chen@mediatek.com Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/mt8186-pm-domains.h | 344 +++++++++++++++++++++++++++++++ drivers/soc/mediatek/mtk-pm-domains.c | 5 + include/linux/soc/mediatek/infracfg.h | 48 +++++ 3 files changed, 397 insertions(+) create mode 100644 drivers/soc/mediatek/mt8186-pm-domains.h (limited to 'drivers') diff --git a/drivers/soc/mediatek/mt8186-pm-domains.h b/drivers/soc/mediatek/mt8186-pm-domains.h new file mode 100644 index 000000000000..bf2dd0cdc3a8 --- /dev/null +++ b/drivers/soc/mediatek/mt8186-pm-domains.h @@ -0,0 +1,344 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 MediaTek Inc. + * Author: Chun-Jie Chen + */ + +#ifndef __SOC_MEDIATEK_MT8186_PM_DOMAINS_H +#define __SOC_MEDIATEK_MT8186_PM_DOMAINS_H + +#include "mtk-pm-domains.h" +#include + +/* + * MT8186 power domain support + */ + +static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = { + [MT8186_POWER_DOMAIN_MFG0] = { + .name = "mfg0", + .sta_mask = BIT(2), + .ctl_offs = 0x308, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY, + }, + [MT8186_POWER_DOMAIN_MFG1] = { + .name = "mfg1", + .sta_mask = BIT(3), + .ctl_offs = 0x30c, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_MFG1_STEP1, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_MFG1_STEP2, + MT8186_TOP_AXI_PROT_EN_SET, + MT8186_TOP_AXI_PROT_EN_CLR, + MT8186_TOP_AXI_PROT_EN_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_MFG1_STEP3, + MT8186_TOP_AXI_PROT_EN_SET, + MT8186_TOP_AXI_PROT_EN_CLR, + MT8186_TOP_AXI_PROT_EN_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_MFG1_STEP4, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_MFG2] = { + .name = "mfg2", + .sta_mask = BIT(4), + .ctl_offs = 0x310, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_MFG3] = { + .name = "mfg3", + .sta_mask = BIT(5), + .ctl_offs = 0x314, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_SSUSB] = { + .name = "ssusb", + .sta_mask = BIT(20), + .ctl_offs = 0x9F0, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_ACTIVE_WAKEUP, + }, + [MT8186_POWER_DOMAIN_SSUSB_P1] = { + .name = "ssusb_p1", + .sta_mask = BIT(19), + .ctl_offs = 0x9F4, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_ACTIVE_WAKEUP, + }, + [MT8186_POWER_DOMAIN_DIS] = { + .name = "dis", + .sta_mask = BIT(21), + .ctl_offs = 0x354, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_DIS_STEP1, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_DIS_STEP2, + MT8186_TOP_AXI_PROT_EN_SET, + MT8186_TOP_AXI_PROT_EN_CLR, + MT8186_TOP_AXI_PROT_EN_STA), + }, + }, + [MT8186_POWER_DOMAIN_IMG] = { + .name = "img", + .sta_mask = BIT(13), + .ctl_offs = 0x334, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_IMG_STEP1, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_IMG_STEP2, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_IMG2] = { + .name = "img2", + .sta_mask = BIT(14), + .ctl_offs = 0x338, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_IPE] = { + .name = "ipe", + .sta_mask = BIT(15), + .ctl_offs = 0x33C, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_IPE_STEP1, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_IPE_STEP2, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_CAM] = { + .name = "cam", + .sta_mask = BIT(23), + .ctl_offs = 0x35C, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_CAM_STEP1, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_CAM_STEP2, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_CAM_RAWA] = { + .name = "cam_rawa", + .sta_mask = BIT(24), + .ctl_offs = 0x360, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_CAM_RAWB] = { + .name = "cam_rawb", + .sta_mask = BIT(25), + .ctl_offs = 0x364, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_VENC] = { + .name = "venc", + .sta_mask = BIT(18), + .ctl_offs = 0x348, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_VENC_STEP1, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_VENC_STEP2, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_VDEC] = { + .name = "vdec", + .sta_mask = BIT(16), + .ctl_offs = 0x340, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_VDEC_STEP1, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_VDEC_STEP2, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_WPE] = { + .name = "wpe", + .sta_mask = BIT(0), + .ctl_offs = 0x3F8, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_2_WPE_STEP1, + MT8186_TOP_AXI_PROT_EN_2_SET, + MT8186_TOP_AXI_PROT_EN_2_CLR, + MT8186_TOP_AXI_PROT_EN_2_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_2_WPE_STEP2, + MT8186_TOP_AXI_PROT_EN_2_SET, + MT8186_TOP_AXI_PROT_EN_2_CLR, + MT8186_TOP_AXI_PROT_EN_2_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_CONN_ON] = { + .name = "conn_on", + .sta_mask = BIT(1), + .ctl_offs = 0x304, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .bp_infracfg = { + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_CONN_ON_STEP1, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP2, + MT8186_TOP_AXI_PROT_EN_SET, + MT8186_TOP_AXI_PROT_EN_CLR, + MT8186_TOP_AXI_PROT_EN_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP3, + MT8186_TOP_AXI_PROT_EN_SET, + MT8186_TOP_AXI_PROT_EN_CLR, + MT8186_TOP_AXI_PROT_EN_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP4, + MT8186_TOP_AXI_PROT_EN_SET, + MT8186_TOP_AXI_PROT_EN_CLR, + MT8186_TOP_AXI_PROT_EN_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_ACTIVE_WAKEUP, + }, + [MT8186_POWER_DOMAIN_CSIRX_TOP] = { + .name = "csirx_top", + .sta_mask = BIT(6), + .ctl_offs = 0x318, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_ADSP_AO] = { + .name = "adsp_ao", + .sta_mask = BIT(17), + .ctl_offs = 0x9FC, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_ADSP_INFRA] = { + .name = "adsp_infra", + .sta_mask = BIT(10), + .ctl_offs = 0x9F8, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_ADSP_TOP] = { + .name = "adsp_top", + .sta_mask = BIT(31), + .ctl_offs = 0x3E4, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_3_ADSP_TOP_STEP1, + MT8186_TOP_AXI_PROT_EN_3_SET, + MT8186_TOP_AXI_PROT_EN_3_CLR, + MT8186_TOP_AXI_PROT_EN_3_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_3_ADSP_TOP_STEP2, + MT8186_TOP_AXI_PROT_EN_3_SET, + MT8186_TOP_AXI_PROT_EN_3_CLR, + MT8186_TOP_AXI_PROT_EN_3_STA), + }, + .caps = MTK_SCPD_SRAM_ISO | MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_ACTIVE_WAKEUP, + }, +}; + +static const struct scpsys_soc_data mt8186_scpsys_data = { + .domains_data = scpsys_domain_data_mt8186, + .num_domains = ARRAY_SIZE(scpsys_domain_data_mt8186), +}; + +#endif /* __SOC_MEDIATEK_MT8186_PM_DOMAINS_H */ diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c index 61973a306e97..5ced254b082b 100644 --- a/drivers/soc/mediatek/mtk-pm-domains.c +++ b/drivers/soc/mediatek/mtk-pm-domains.c @@ -19,6 +19,7 @@ #include "mt8167-pm-domains.h" #include "mt8173-pm-domains.h" #include "mt8183-pm-domains.h" +#include "mt8186-pm-domains.h" #include "mt8192-pm-domains.h" #include "mt8195-pm-domains.h" @@ -566,6 +567,10 @@ static const struct of_device_id scpsys_of_match[] = { .compatible = "mediatek,mt8183-power-controller", .data = &mt8183_scpsys_data, }, + { + .compatible = "mediatek,mt8186-power-controller", + .data = &mt8186_scpsys_data, + }, { .compatible = "mediatek,mt8192-power-controller", .data = &mt8192_scpsys_data, diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h index d858e0bab7a2..8a1c2040a28e 100644 --- a/include/linux/soc/mediatek/infracfg.h +++ b/include/linux/soc/mediatek/infracfg.h @@ -140,6 +140,54 @@ #define MT8192_TOP_AXI_PROT_EN_MM_2_MDP_2ND BIT(13) #define MT8192_TOP_AXI_PROT_EN_VDNR_CAM BIT(21) +#define MT8186_TOP_AXI_PROT_EN_SET (0x2A0) +#define MT8186_TOP_AXI_PROT_EN_CLR (0x2A4) +#define MT8186_TOP_AXI_PROT_EN_STA (0x228) +#define MT8186_TOP_AXI_PROT_EN_1_SET (0x2A8) +#define MT8186_TOP_AXI_PROT_EN_1_CLR (0x2AC) +#define MT8186_TOP_AXI_PROT_EN_1_STA (0x258) +#define MT8186_TOP_AXI_PROT_EN_2_SET (0x2B0) +#define MT8186_TOP_AXI_PROT_EN_2_CLR (0x2B4) +#define MT8186_TOP_AXI_PROT_EN_2_STA (0x26C) +#define MT8186_TOP_AXI_PROT_EN_3_SET (0x2B8) +#define MT8186_TOP_AXI_PROT_EN_3_CLR (0x2BC) +#define MT8186_TOP_AXI_PROT_EN_3_STA (0x2C8) + +/* MFG1 */ +#define MT8186_TOP_AXI_PROT_EN_1_MFG1_STEP1 (GENMASK(28, 27)) +#define MT8186_TOP_AXI_PROT_EN_MFG1_STEP2 (GENMASK(22, 21)) +#define MT8186_TOP_AXI_PROT_EN_MFG1_STEP3 (BIT(25)) +#define MT8186_TOP_AXI_PROT_EN_1_MFG1_STEP4 (BIT(29)) +/* DIS */ +#define MT8186_TOP_AXI_PROT_EN_1_DIS_STEP1 (GENMASK(12, 11)) +#define MT8186_TOP_AXI_PROT_EN_DIS_STEP2 (GENMASK(2, 1) | GENMASK(11, 10)) +/* IMG */ +#define MT8186_TOP_AXI_PROT_EN_1_IMG_STEP1 (BIT(23)) +#define MT8186_TOP_AXI_PROT_EN_1_IMG_STEP2 (BIT(15)) +/* IPE */ +#define MT8186_TOP_AXI_PROT_EN_1_IPE_STEP1 (BIT(24)) +#define MT8186_TOP_AXI_PROT_EN_1_IPE_STEP2 (BIT(16)) +/* CAM */ +#define MT8186_TOP_AXI_PROT_EN_1_CAM_STEP1 (GENMASK(22, 21)) +#define MT8186_TOP_AXI_PROT_EN_1_CAM_STEP2 (GENMASK(14, 13)) +/* VENC */ +#define MT8186_TOP_AXI_PROT_EN_1_VENC_STEP1 (BIT(31)) +#define MT8186_TOP_AXI_PROT_EN_1_VENC_STEP2 (BIT(19)) +/* VDEC */ +#define MT8186_TOP_AXI_PROT_EN_1_VDEC_STEP1 (BIT(30)) +#define MT8186_TOP_AXI_PROT_EN_1_VDEC_STEP2 (BIT(17)) +/* WPE */ +#define MT8186_TOP_AXI_PROT_EN_2_WPE_STEP1 (BIT(17)) +#define MT8186_TOP_AXI_PROT_EN_2_WPE_STEP2 (BIT(16)) +/* CONN_ON */ +#define MT8186_TOP_AXI_PROT_EN_1_CONN_ON_STEP1 (BIT(18)) +#define MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP2 (BIT(14)) +#define MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP3 (BIT(13)) +#define MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP4 (BIT(16)) +/* ADSP_TOP */ +#define MT8186_TOP_AXI_PROT_EN_3_ADSP_TOP_STEP1 (GENMASK(12, 11)) +#define MT8186_TOP_AXI_PROT_EN_3_ADSP_TOP_STEP2 (GENMASK(1, 0)) + #define MT8183_TOP_AXI_PROT_EN_STA1 0x228 #define MT8183_TOP_AXI_PROT_EN_STA1_1 0x258 #define MT8183_TOP_AXI_PROT_EN_SET 0x2a0 -- cgit v1.2.3 From 5f9b5b757e44de47ebdc116c14b90e3cc8bc7acb Mon Sep 17 00:00:00 2001 From: Yongqiang Niu Date: Tue, 22 Feb 2022 13:28:01 +0800 Subject: soc: mediatek: mmsys: add mt8186 mmsys routing table Add new routing table for MT8186. In MT8186, there are two routing pipelines for internal and external display. Internal display: OVL0->RDMA0->COLOR0->CCORR0->AAL0->GAMMA->POSTMASK0-> DITHER->DSI0 External display: OVL_2L0->RDMA1->DPI0 Signed-off-by: Yongqiang Niu Signed-off-by: Rex-BC Chen Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/mt8186-mmsys.h | 113 ++++++++++++++++++++++++++++++++++++ drivers/soc/mediatek/mtk-mmsys.c | 11 ++++ 2 files changed, 124 insertions(+) create mode 100644 drivers/soc/mediatek/mt8186-mmsys.h (limited to 'drivers') diff --git a/drivers/soc/mediatek/mt8186-mmsys.h b/drivers/soc/mediatek/mt8186-mmsys.h new file mode 100644 index 000000000000..7de329f2d729 --- /dev/null +++ b/drivers/soc/mediatek/mt8186-mmsys.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __SOC_MEDIATEK_MT8186_MMSYS_H +#define __SOC_MEDIATEK_MT8186_MMSYS_H + +#define MT8186_MMSYS_OVL_CON 0xF04 +#define MT8186_MMSYS_OVL0_CON_MASK 0x3 +#define MT8186_MMSYS_OVL0_2L_CON_MASK 0xC +#define MT8186_OVL0_GO_BLEND BIT(0) +#define MT8186_OVL0_GO_BG BIT(1) +#define MT8186_OVL0_2L_GO_BLEND BIT(2) +#define MT8186_OVL0_2L_GO_BG BIT(3) +#define MT8186_DISP_RDMA0_SOUT_SEL 0xF0C +#define MT8186_RDMA0_SOUT_SEL_MASK 0xF +#define MT8186_RDMA0_SOUT_TO_DSI0 (0) +#define MT8186_RDMA0_SOUT_TO_COLOR0 (1) +#define MT8186_RDMA0_SOUT_TO_DPI0 (2) +#define MT8186_DISP_OVL0_2L_MOUT_EN 0xF14 +#define MT8186_OVL0_2L_MOUT_EN_MASK 0xF +#define MT8186_OVL0_2L_MOUT_TO_RDMA0 BIT(0) +#define MT8186_OVL0_2L_MOUT_TO_RDMA1 BIT(3) +#define MT8186_DISP_OVL0_MOUT_EN 0xF18 +#define MT8186_OVL0_MOUT_EN_MASK 0xF +#define MT8186_OVL0_MOUT_TO_RDMA0 BIT(0) +#define MT8186_OVL0_MOUT_TO_RDMA1 BIT(3) +#define MT8186_DISP_DITHER0_MOUT_EN 0xF20 +#define MT8186_DITHER0_MOUT_EN_MASK 0xF +#define MT8186_DITHER0_MOUT_TO_DSI0 BIT(0) +#define MT8186_DITHER0_MOUT_TO_RDMA1 BIT(2) +#define MT8186_DITHER0_MOUT_TO_DPI0 BIT(3) +#define MT8186_DISP_RDMA0_SEL_IN 0xF28 +#define MT8186_RDMA0_SEL_IN_MASK 0xF +#define MT8186_RDMA0_FROM_OVL0 0 +#define MT8186_RDMA0_FROM_OVL0_2L 2 +#define MT8186_DISP_DSI0_SEL_IN 0xF30 +#define MT8186_DSI0_SEL_IN_MASK 0xF +#define MT8186_DSI0_FROM_RDMA0 0 +#define MT8186_DSI0_FROM_DITHER0 1 +#define MT8186_DSI0_FROM_RDMA1 2 +#define MT8186_DISP_RDMA1_MOUT_EN 0xF3C +#define MT8186_RDMA1_MOUT_EN_MASK 0xF +#define MT8186_RDMA1_MOUT_TO_DPI0_SEL BIT(0) +#define MT8186_RDMA1_MOUT_TO_DSI0_SEL BIT(2) +#define MT8186_DISP_RDMA1_SEL_IN 0xF40 +#define MT8186_RDMA1_SEL_IN_MASK 0xF +#define MT8186_RDMA1_FROM_OVL0 0 +#define MT8186_RDMA1_FROM_OVL0_2L 2 +#define MT8186_RDMA1_FROM_DITHER0 3 +#define MT8186_DISP_DPI0_SEL_IN 0xF44 +#define MT8186_DPI0_SEL_IN_MASK 0xF +#define MT8186_DPI0_FROM_RDMA1 0 +#define MT8186_DPI0_FROM_DITHER0 1 +#define MT8186_DPI0_FROM_RDMA0 2 + +static const struct mtk_mmsys_routes mmsys_mt8186_routing_table[] = { + { + DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0, + MT8186_DISP_OVL0_MOUT_EN, MT8186_OVL0_MOUT_EN_MASK, + MT8186_OVL0_MOUT_TO_RDMA0 + }, + { + DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0, + MT8186_DISP_RDMA0_SEL_IN, MT8186_RDMA0_SEL_IN_MASK, + MT8186_RDMA0_FROM_OVL0 + }, + { + DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0, + MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_CON_MASK, + MT8186_OVL0_GO_BLEND + }, + { + DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0, + MT8186_DISP_RDMA0_SOUT_SEL, MT8186_RDMA0_SOUT_SEL_MASK, + MT8186_RDMA0_SOUT_TO_COLOR0 + }, + { + DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0, + MT8186_DISP_DITHER0_MOUT_EN, MT8186_DITHER0_MOUT_EN_MASK, + MT8186_DITHER0_MOUT_TO_DSI0, + }, + { + DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0, + MT8186_DISP_DSI0_SEL_IN, MT8186_DSI0_SEL_IN_MASK, + MT8186_DSI0_FROM_DITHER0 + }, + { + DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA1, + MT8186_DISP_OVL0_2L_MOUT_EN, MT8186_OVL0_2L_MOUT_EN_MASK, + MT8186_OVL0_2L_MOUT_TO_RDMA1 + }, + { + DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA1, + MT8186_DISP_RDMA1_SEL_IN, MT8186_RDMA1_SEL_IN_MASK, + MT8186_RDMA1_FROM_OVL0_2L + }, + { + DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA1, + MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_2L_CON_MASK, + MT8186_OVL0_2L_GO_BLEND + }, + { + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0, + MT8186_DISP_RDMA1_MOUT_EN, MT8186_RDMA1_MOUT_EN_MASK, + MT8186_RDMA1_MOUT_TO_DPI0_SEL + }, + { + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0, + MT8186_DISP_DPI0_SEL_IN, MT8186_DPI0_SEL_IN_MASK, + MT8186_DPI0_FROM_RDMA1 + }, +}; + +#endif /* __SOC_MEDIATEK_MT8186_MMSYS_H */ diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c index 1e448f1ffefb..0da25069ffb3 100644 --- a/drivers/soc/mediatek/mtk-mmsys.c +++ b/drivers/soc/mediatek/mtk-mmsys.c @@ -15,6 +15,7 @@ #include "mtk-mmsys.h" #include "mt8167-mmsys.h" #include "mt8183-mmsys.h" +#include "mt8186-mmsys.h" #include "mt8192-mmsys.h" #include "mt8365-mmsys.h" @@ -56,6 +57,12 @@ static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = { .num_routes = ARRAY_SIZE(mmsys_mt8183_routing_table), }; +static const struct mtk_mmsys_driver_data mt8186_mmsys_driver_data = { + .clk_driver = "clk-mt8186-mm", + .routes = mmsys_mt8186_routing_table, + .num_routes = ARRAY_SIZE(mmsys_mt8186_routing_table), +}; + static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = { .clk_driver = "clk-mt8192-mm", .routes = mmsys_mt8192_routing_table, @@ -242,6 +249,10 @@ static const struct of_device_id of_match_mtk_mmsys[] = { .compatible = "mediatek,mt8183-mmsys", .data = &mt8183_mmsys_driver_data, }, + { + .compatible = "mediatek,mt8186-mmsys", + .data = &mt8186_mmsys_driver_data, + }, { .compatible = "mediatek,mt8192-mmsys", .data = &mt8192_mmsys_driver_data, -- cgit v1.2.3 From 15f1768365aed810826a61fef4a744437aa5b426 Mon Sep 17 00:00:00 2001 From: Yongqiang Niu Date: Tue, 22 Feb 2022 13:28:02 +0800 Subject: soc: mediatek: add MTK mutex support for MT8186 Add MTK mutex support for MT8186 SoC. We need MTK mutex to control timing of display modules and there are two display pipelines for MT8186 including internal and external display. MTK mutex for internal display: - Timing source: DSI - Control modules: OVL0/RDMA0/COLOR0/CCORR/AAL0/GAMMA/POSTMASK0/DITHER MTK mutex for external display: - Timing source : DPI - Control modules: OVL_2L0/RDMA1 Signed-off-by: Yongqiang Niu Signed-off-by: Rex-BC Chen Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/mtk-mutex.c | 45 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) (limited to 'drivers') diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c index 2ca55bb5a8be..aaf8fc1abb43 100644 --- a/drivers/soc/mediatek/mtk-mutex.c +++ b/drivers/soc/mediatek/mtk-mutex.c @@ -26,6 +26,23 @@ #define INT_MUTEX BIT(1) +#define MT8186_MUTEX_MOD_DISP_OVL0 0 +#define MT8186_MUTEX_MOD_DISP_OVL0_2L 1 +#define MT8186_MUTEX_MOD_DISP_RDMA0 2 +#define MT8186_MUTEX_MOD_DISP_COLOR0 4 +#define MT8186_MUTEX_MOD_DISP_CCORR0 5 +#define MT8186_MUTEX_MOD_DISP_AAL0 7 +#define MT8186_MUTEX_MOD_DISP_GAMMA0 8 +#define MT8186_MUTEX_MOD_DISP_POSTMASK0 9 +#define MT8186_MUTEX_MOD_DISP_DITHER0 10 +#define MT8186_MUTEX_MOD_DISP_RDMA1 17 + +#define MT8186_MUTEX_SOF_SINGLE_MODE 0 +#define MT8186_MUTEX_SOF_DSI0 1 +#define MT8186_MUTEX_SOF_DPI0 2 +#define MT8186_MUTEX_EOF_DSI0 (MT8186_MUTEX_SOF_DSI0 << 6) +#define MT8186_MUTEX_EOF_DPI0 (MT8186_MUTEX_SOF_DPI0 << 6) + #define MT8167_MUTEX_MOD_DISP_PWM 1 #define MT8167_MUTEX_MOD_DISP_OVL0 6 #define MT8167_MUTEX_MOD_DISP_OVL1 7 @@ -226,6 +243,19 @@ static const unsigned int mt8183_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_WDMA0] = MT8183_MUTEX_MOD_DISP_WDMA0, }; +static const unsigned int mt8186_mutex_mod[DDP_COMPONENT_ID_MAX] = { + [DDP_COMPONENT_AAL0] = MT8186_MUTEX_MOD_DISP_AAL0, + [DDP_COMPONENT_CCORR] = MT8186_MUTEX_MOD_DISP_CCORR0, + [DDP_COMPONENT_COLOR0] = MT8186_MUTEX_MOD_DISP_COLOR0, + [DDP_COMPONENT_DITHER] = MT8186_MUTEX_MOD_DISP_DITHER0, + [DDP_COMPONENT_GAMMA] = MT8186_MUTEX_MOD_DISP_GAMMA0, + [DDP_COMPONENT_OVL0] = MT8186_MUTEX_MOD_DISP_OVL0, + [DDP_COMPONENT_OVL_2L0] = MT8186_MUTEX_MOD_DISP_OVL0_2L, + [DDP_COMPONENT_POSTMASK0] = MT8186_MUTEX_MOD_DISP_POSTMASK0, + [DDP_COMPONENT_RDMA0] = MT8186_MUTEX_MOD_DISP_RDMA0, + [DDP_COMPONENT_RDMA1] = MT8186_MUTEX_MOD_DISP_RDMA1, +}; + static const unsigned int mt8192_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_AAL0] = MT8192_MUTEX_MOD_DISP_AAL0, [DDP_COMPONENT_CCORR] = MT8192_MUTEX_MOD_DISP_CCORR0, @@ -264,6 +294,12 @@ static const unsigned int mt8183_mutex_sof[MUTEX_SOF_DSI3 + 1] = { [MUTEX_SOF_DPI0] = MT8183_MUTEX_SOF_DPI0 | MT8183_MUTEX_EOF_DPI0, }; +static const unsigned int mt8186_mutex_sof[MUTEX_SOF_DSI3 + 1] = { + [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE, + [MUTEX_SOF_DSI0] = MT8186_MUTEX_SOF_DSI0 | MT8186_MUTEX_EOF_DSI0, + [MUTEX_SOF_DPI0] = MT8186_MUTEX_SOF_DPI0 | MT8186_MUTEX_EOF_DPI0, +}; + static const struct mtk_mutex_data mt2701_mutex_driver_data = { .mutex_mod = mt2701_mutex_mod, .mutex_sof = mt2712_mutex_sof, @@ -301,6 +337,13 @@ static const struct mtk_mutex_data mt8183_mutex_driver_data = { .no_clk = true, }; +static const struct mtk_mutex_data mt8186_mutex_driver_data = { + .mutex_mod = mt8186_mutex_mod, + .mutex_sof = mt8186_mutex_sof, + .mutex_mod_reg = MT8183_MUTEX0_MOD0, + .mutex_sof_reg = MT8183_MUTEX0_SOF0, +}; + static const struct mtk_mutex_data mt8192_mutex_driver_data = { .mutex_mod = mt8192_mutex_mod, .mutex_sof = mt8183_mutex_sof, @@ -540,6 +583,8 @@ static const struct of_device_id mutex_driver_dt_match[] = { .data = &mt8173_mutex_driver_data}, { .compatible = "mediatek,mt8183-disp-mutex", .data = &mt8183_mutex_driver_data}, + { .compatible = "mediatek,mt8186-disp-mutex", + .data = &mt8186_mutex_driver_data}, { .compatible = "mediatek,mt8192-disp-mutex", .data = &mt8192_mutex_driver_data}, {}, -- cgit v1.2.3 From c65d68e7e95a39da31d64d67d5bea6550b91fb43 Mon Sep 17 00:00:00 2001 From: Vignesh Raghavendra Date: Fri, 25 Feb 2022 17:32:35 +0530 Subject: soc: ti: k3-socinfo: Add AM62x JTAG ID Add JTAG ID entry to help identify AM62x SoC in kernel. Signed-off-by: Vignesh Raghavendra Reviewed-by: Bryan Brattlof Signed-off-by: Nishanth Menon Link: https://lore.kernel.org/r/20220225120239.1303821-2-vigneshr@ti.com --- drivers/soc/ti/k3-socinfo.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c index b6b2150aca4e..91f441ee6175 100644 --- a/drivers/soc/ti/k3-socinfo.c +++ b/drivers/soc/ti/k3-socinfo.c @@ -42,6 +42,7 @@ static const struct k3_soc_id { { 0xBB6D, "J7200" }, { 0xBB38, "AM64X" }, { 0xBB75, "J721S2"}, + { 0xBB7E, "AM62X" }, }; static int -- cgit v1.2.3 From dcfd5192563909219f6304b4e3e10db071158eef Mon Sep 17 00:00:00 2001 From: Alyssa Rosenzweig Date: Tue, 15 Feb 2022 13:46:51 -0500 Subject: soc: mediatek: mtk-infracfg: Disable ACP on MT8192 MT8192 contains an experimental Accelerator Coherency Port implementation, which does not work correctly but was unintentionally enabled by default. For correct operation of the GPU, we must set a chicken bit disabling ACP on MT8192. Adapted from the following downstream change to the out-of-tree, legacy Mali GPU driver: https://chromium-review.googlesource.com/c/chromiumos/third_party/kernel/+/2781271/5 Note this change is required for both Panfrost and the legacy kernel driver. Co-developed-by: Robin Murphy Signed-off-by: Robin Murphy Signed-off-by: Alyssa Rosenzweig Cc: Nick Fan Cc: Nicolas Boichat Cc: Chen-Yu Tsai Cc: Stephen Boyd Cc: AngeloGioacchino Del Regno Tested-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20220215184651.12168-1-alyssa.rosenzweig@collabora.com Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/mtk-infracfg.c | 19 +++++++++++++++++++ include/linux/soc/mediatek/infracfg.h | 3 +++ 2 files changed, 22 insertions(+) (limited to 'drivers') diff --git a/drivers/soc/mediatek/mtk-infracfg.c b/drivers/soc/mediatek/mtk-infracfg.c index 0590b68e0d78..2acf19676af2 100644 --- a/drivers/soc/mediatek/mtk-infracfg.c +++ b/drivers/soc/mediatek/mtk-infracfg.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -72,3 +73,21 @@ int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask, return ret; } + +static int __init mtk_infracfg_init(void) +{ + struct regmap *infracfg; + + /* + * MT8192 has an experimental path to route GPU traffic to the DSU's + * Accelerator Coherency Port, which is inadvertently enabled by + * default. It turns out not to work, so disable it to prevent spurious + * GPU faults. + */ + infracfg = syscon_regmap_lookup_by_compatible("mediatek,mt8192-infracfg"); + if (!IS_ERR(infracfg)) + regmap_set_bits(infracfg, MT8192_INFRA_CTRL, + MT8192_INFRA_CTRL_DISABLE_MFG2ACP); + return 0; +} +postcore_initcall(mtk_infracfg_init); diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h index 8a1c2040a28e..50804ac748bd 100644 --- a/include/linux/soc/mediatek/infracfg.h +++ b/include/linux/soc/mediatek/infracfg.h @@ -277,6 +277,9 @@ #define INFRA_TOPAXI_PROTECTEN_SET 0x0260 #define INFRA_TOPAXI_PROTECTEN_CLR 0x0264 +#define MT8192_INFRA_CTRL 0x290 +#define MT8192_INFRA_CTRL_DISABLE_MFG2ACP BIT(9) + #define REG_INFRA_MISC 0xf00 #define F_DDR_4GB_SUPPORT_EN BIT(13) -- cgit v1.2.3 From 831785f0e5b919c29e1bc5f9a74e9ebd38289e24 Mon Sep 17 00:00:00 2001 From: Rex-BC Chen Date: Thu, 17 Feb 2022 16:26:26 +0800 Subject: soc: mediatek: mmsys: add mmsys reset control for MT8186 Add mmsys reset control register 0x160 for MT8186. Signed-off-by: Rex-BC Chen Link: https://lore.kernel.org/r/20220217082626.15728-3-rex-bc.chen@mediatek.com Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/mt8186-mmsys.h | 2 ++ drivers/soc/mediatek/mtk-mmsys.c | 1 + 2 files changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/soc/mediatek/mt8186-mmsys.h b/drivers/soc/mediatek/mt8186-mmsys.h index 7de329f2d729..c72ccf86ea28 100644 --- a/drivers/soc/mediatek/mt8186-mmsys.h +++ b/drivers/soc/mediatek/mt8186-mmsys.h @@ -52,6 +52,8 @@ #define MT8186_DPI0_FROM_DITHER0 1 #define MT8186_DPI0_FROM_RDMA0 2 +#define MT8186_MMSYS_SW0_RST_B 0x160 + static const struct mtk_mmsys_routes mmsys_mt8186_routing_table[] = { { DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0, diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c index 0da25069ffb3..50c797d70ddd 100644 --- a/drivers/soc/mediatek/mtk-mmsys.c +++ b/drivers/soc/mediatek/mtk-mmsys.c @@ -61,6 +61,7 @@ static const struct mtk_mmsys_driver_data mt8186_mmsys_driver_data = { .clk_driver = "clk-mt8186-mm", .routes = mmsys_mt8186_routing_table, .num_routes = ARRAY_SIZE(mmsys_mt8186_routing_table), + .sw0_rst_offset = MT8186_MMSYS_SW0_RST_B, }; static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = { -- cgit v1.2.3 From 83a7175cbe3ee8cd105d6d0cd12b1a9d4ed40693 Mon Sep 17 00:00:00 2001 From: Johnson Wang Date: Mon, 7 Feb 2022 16:30:33 +0800 Subject: soc: mediatek: pwrap: add pwrap driver for MT8186 SoC MT8186 are highly integrated SoC and use PMIC_MT6366 for power management. This patch adds pwrap master driver to access PMIC_MT6366. Acked-by: AngeloGioacchino Del Regno Signed-off-by: Johnson Wang Link: https://lore.kernel.org/r/20220207083034.15327-2-johnson.wang@mediatek.com Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/mtk-pmic-wrap.c | 71 ++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) (limited to 'drivers') diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c index 952bc554f443..bf39a64f3ecc 100644 --- a/drivers/soc/mediatek/mtk-pmic-wrap.c +++ b/drivers/soc/mediatek/mtk-pmic-wrap.c @@ -30,6 +30,7 @@ #define PWRAP_GET_WACS_REQ(x) (((x) >> 19) & 0x00000001) #define PWRAP_STATE_SYNC_IDLE0 BIT(20) #define PWRAP_STATE_INIT_DONE0 BIT(21) +#define PWRAP_STATE_INIT_DONE0_MT8186 BIT(22) #define PWRAP_STATE_INIT_DONE1 BIT(15) /* macro for WACS FSM */ @@ -77,6 +78,7 @@ #define PWRAP_CAP_INT1_EN BIT(3) #define PWRAP_CAP_WDT_SRC1 BIT(4) #define PWRAP_CAP_ARB BIT(5) +#define PWRAP_CAP_ARB_MT8186 BIT(8) /* defines for slave device wrapper registers */ enum dew_regs { @@ -1063,6 +1065,55 @@ static int mt8516_regs[] = { [PWRAP_MSB_FIRST] = 0x170, }; +static int mt8186_regs[] = { + [PWRAP_MUX_SEL] = 0x0, + [PWRAP_WRAP_EN] = 0x4, + [PWRAP_DIO_EN] = 0x8, + [PWRAP_RDDMY] = 0x20, + [PWRAP_CSHEXT_WRITE] = 0x24, + [PWRAP_CSHEXT_READ] = 0x28, + [PWRAP_CSLEXT_WRITE] = 0x2C, + [PWRAP_CSLEXT_READ] = 0x30, + [PWRAP_EXT_CK_WRITE] = 0x34, + [PWRAP_STAUPD_CTRL] = 0x3C, + [PWRAP_STAUPD_GRPEN] = 0x40, + [PWRAP_EINT_STA0_ADR] = 0x44, + [PWRAP_EINT_STA1_ADR] = 0x48, + [PWRAP_INT_CLR] = 0xC8, + [PWRAP_INT_FLG] = 0xC4, + [PWRAP_MAN_EN] = 0x7C, + [PWRAP_MAN_CMD] = 0x80, + [PWRAP_WACS0_EN] = 0x8C, + [PWRAP_WACS1_EN] = 0x94, + [PWRAP_WACS2_EN] = 0x9C, + [PWRAP_INIT_DONE0] = 0x90, + [PWRAP_INIT_DONE1] = 0x98, + [PWRAP_INIT_DONE2] = 0xA0, + [PWRAP_INT_EN] = 0xBC, + [PWRAP_INT1_EN] = 0xCC, + [PWRAP_INT1_FLG] = 0xD4, + [PWRAP_INT1_CLR] = 0xD8, + [PWRAP_TIMER_EN] = 0xF0, + [PWRAP_WDT_UNIT] = 0xF8, + [PWRAP_WDT_SRC_EN] = 0xFC, + [PWRAP_WDT_SRC_EN_1] = 0x100, + [PWRAP_WDT_FLG] = 0x104, + [PWRAP_SPMINF_STA] = 0x1B4, + [PWRAP_DCM_EN] = 0x1EC, + [PWRAP_DCM_DBC_PRD] = 0x1F0, + [PWRAP_GPSINF_0_STA] = 0x204, + [PWRAP_GPSINF_1_STA] = 0x208, + [PWRAP_WACS0_CMD] = 0xC00, + [PWRAP_WACS0_RDATA] = 0xC04, + [PWRAP_WACS0_VLDCLR] = 0xC08, + [PWRAP_WACS1_CMD] = 0xC10, + [PWRAP_WACS1_RDATA] = 0xC14, + [PWRAP_WACS1_VLDCLR] = 0xC18, + [PWRAP_WACS2_CMD] = 0xC20, + [PWRAP_WACS2_RDATA] = 0xC24, + [PWRAP_WACS2_VLDCLR] = 0xC28, +}; + enum pmic_type { PMIC_MT6323, PMIC_MT6351, @@ -1083,6 +1134,7 @@ enum pwrap_type { PWRAP_MT8135, PWRAP_MT8173, PWRAP_MT8183, + PWRAP_MT8186, PWRAP_MT8195, PWRAP_MT8516, }; @@ -1535,6 +1587,7 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp) case PWRAP_MT6779: case PWRAP_MT6797: case PWRAP_MT8173: + case PWRAP_MT8186: case PWRAP_MT8516: pwrap_writel(wrp, 1, PWRAP_CIPHER_EN); break; @@ -2069,6 +2122,19 @@ static struct pmic_wrapper_type pwrap_mt8516 = { .init_soc_specific = NULL, }; +static struct pmic_wrapper_type pwrap_mt8186 = { + .regs = mt8186_regs, + .type = PWRAP_MT8186, + .arb_en_all = 0xfb27f, + .int_en_all = 0xfffffffe, /* disable WatchDog Timeout for bit 1 */ + .int1_en_all = 0x000017ff, /* disable Matching interrupt for bit 13 */ + .spi_w = PWRAP_MAN_CMD_SPI_WRITE, + .wdt_src = PWRAP_WDT_SRC_MASK_ALL, + .caps = PWRAP_CAP_INT1_EN | PWRAP_CAP_ARB_MT8186, + .init_reg_clock = pwrap_common_init_reg_clock, + .init_soc_specific = NULL, +}; + static const struct of_device_id of_pwrap_match_tbl[] = { { .compatible = "mediatek,mt2701-pwrap", @@ -2097,6 +2163,9 @@ static const struct of_device_id of_pwrap_match_tbl[] = { }, { .compatible = "mediatek,mt8183-pwrap", .data = &pwrap_mt8183, + }, { + .compatible = "mediatek,mt8186-pwrap", + .data = &pwrap_mt8186, }, { .compatible = "mediatek,mt8195-pwrap", .data = &pwrap_mt8195, @@ -2209,6 +2278,8 @@ static int pwrap_probe(struct platform_device *pdev) if (HAS_CAP(wrp->master->caps, PWRAP_CAP_ARB)) mask_done = PWRAP_STATE_INIT_DONE1; + else if (HAS_CAP(wrp->master->caps, PWRAP_CAP_ARB_MT8186)) + mask_done = PWRAP_STATE_INIT_DONE0_MT8186; else mask_done = PWRAP_STATE_INIT_DONE0; -- cgit v1.2.3 From fd7bd80b46373887b390852f490f21b07e209498 Mon Sep 17 00:00:00 2001 From: Jiasheng Jiang Date: Thu, 24 Feb 2022 10:54:44 +0800 Subject: memory: emif: Add check for setup_interrupts As the potential failure of the devm_request_threaded_irq(), it should be better to check the return value of the setup_interrupts() and return error if fails. Fixes: 68b4aee35d1f ("memory: emif: add interrupt and temperature handling") Signed-off-by: Jiasheng Jiang Link: https://lore.kernel.org/r/20220224025444.3256530-1-jiasheng@iscas.ac.cn Signed-off-by: Krzysztof Kozlowski --- drivers/memory/emif.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c index 762d0c0f0716..d4d4044e05b3 100644 --- a/drivers/memory/emif.c +++ b/drivers/memory/emif.c @@ -1117,7 +1117,7 @@ static int __init_or_module emif_probe(struct platform_device *pdev) { struct emif_data *emif; struct resource *res; - int irq; + int irq, ret; if (pdev->dev.of_node) emif = of_get_memory_device_details(pdev->dev.of_node, &pdev->dev); @@ -1147,7 +1147,9 @@ static int __init_or_module emif_probe(struct platform_device *pdev) emif_onetime_settings(emif); emif_debugfs_init(emif); disable_and_clear_all_interrupts(emif); - setup_interrupts(emif, irq); + ret = setup_interrupts(emif, irq); + if (ret) + goto error; /* One-time actions taken on probing the first device */ if (!emif1) { -- cgit v1.2.3 From 5b5ab1bfa1898c6d52936a57c25c5ceba2cb2f87 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 25 Feb 2022 05:25:52 -0800 Subject: memory: emif: check the pointer temp in get_device_details() The pointer temp is allocated by devm_kzalloc(), so it should be checked for error handling. Fixes: 7ec944538dde ("memory: emif: add basic infrastructure for EMIF driver") Signed-off-by: Jia-Ju Bai Link: https://lore.kernel.org/r/20220225132552.27894-1-baijiaju1990@gmail.com Signed-off-by: Krzysztof Kozlowski --- drivers/memory/emif.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c index d4d4044e05b3..ecc78d6f89ed 100644 --- a/drivers/memory/emif.c +++ b/drivers/memory/emif.c @@ -1025,7 +1025,7 @@ static struct emif_data *__init_or_module get_device_details( temp = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL); - if (!emif || !pd || !dev_info) { + if (!emif || !temp || !dev_info) { dev_err(dev, "%s:%d: allocation error\n", __func__, __LINE__); goto error; } -- cgit v1.2.3 From 62dc30150c06774a8122c52aedd0eddaceaf5940 Mon Sep 17 00:00:00 2001 From: Rex-BC Chen Date: Thu, 17 Feb 2022 16:26:25 +0800 Subject: soc: mediatek: mmsys: add sw0_rst_offset in mmsys driver data There are different software reset registers for difference MTK SoCs. Therefore, we add a new variable "sw0_rst_offset" to control it. Signed-off-by: Rex-BC Chen Reviewed-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20220217082626.15728-2-rex-bc.chen@mediatek.com Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/mt8183-mmsys.h | 2 ++ drivers/soc/mediatek/mtk-mmsys.c | 6 ++++-- drivers/soc/mediatek/mtk-mmsys.h | 3 +-- 3 files changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/soc/mediatek/mt8183-mmsys.h b/drivers/soc/mediatek/mt8183-mmsys.h index 9dee485807c9..0c021f4b76d2 100644 --- a/drivers/soc/mediatek/mt8183-mmsys.h +++ b/drivers/soc/mediatek/mt8183-mmsys.h @@ -25,6 +25,8 @@ #define MT8183_RDMA0_SOUT_COLOR0 0x1 #define MT8183_RDMA1_SOUT_DSI0 0x1 +#define MT8183_MMSYS_SW0_RST_B 0x140 + static const struct mtk_mmsys_routes mmsys_mt8183_routing_table[] = { { DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL_2L0, diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c index 50c797d70ddd..4fc4c2c9ea20 100644 --- a/drivers/soc/mediatek/mtk-mmsys.c +++ b/drivers/soc/mediatek/mtk-mmsys.c @@ -49,12 +49,14 @@ static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = { .clk_driver = "clk-mt8173-mm", .routes = mmsys_default_routing_table, .num_routes = ARRAY_SIZE(mmsys_default_routing_table), + .sw0_rst_offset = MT8183_MMSYS_SW0_RST_B, }; static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = { .clk_driver = "clk-mt8183-mm", .routes = mmsys_mt8183_routing_table, .num_routes = ARRAY_SIZE(mmsys_mt8183_routing_table), + .sw0_rst_offset = MT8183_MMSYS_SW0_RST_B, }; static const struct mtk_mmsys_driver_data mt8186_mmsys_driver_data = { @@ -129,14 +131,14 @@ static int mtk_mmsys_reset_update(struct reset_controller_dev *rcdev, unsigned l spin_lock_irqsave(&mmsys->lock, flags); - reg = readl_relaxed(mmsys->regs + MMSYS_SW0_RST_B); + reg = readl_relaxed(mmsys->regs + mmsys->data->sw0_rst_offset); if (assert) reg &= ~BIT(id); else reg |= BIT(id); - writel_relaxed(reg, mmsys->regs + MMSYS_SW0_RST_B); + writel_relaxed(reg, mmsys->regs + mmsys->data->sw0_rst_offset); spin_unlock_irqrestore(&mmsys->lock, flags); diff --git a/drivers/soc/mediatek/mtk-mmsys.h b/drivers/soc/mediatek/mtk-mmsys.h index 8b0ed05117ea..77f37f8c715b 100644 --- a/drivers/soc/mediatek/mtk-mmsys.h +++ b/drivers/soc/mediatek/mtk-mmsys.h @@ -78,8 +78,6 @@ #define DSI_SEL_IN_RDMA 0x1 #define DSI_SEL_IN_MASK 0x1 -#define MMSYS_SW0_RST_B 0x140 - struct mtk_mmsys_routes { u32 from_comp; u32 to_comp; @@ -92,6 +90,7 @@ struct mtk_mmsys_driver_data { const char *clk_driver; const struct mtk_mmsys_routes *routes; const unsigned int num_routes; + const u16 sw0_rst_offset; }; /* -- cgit v1.2.3 From def8abbb1464579c37f15b3d7a95e5f3bab758dc Mon Sep 17 00:00:00 2001 From: Mihai Sain Date: Wed, 2 Mar 2022 16:53:29 +0100 Subject: ARM: at91: add support in soc driver for new SAMA5D29 Add detection of new SAMA5D29 by the SoC driver. Signed-off-by: Mihai Sain Signed-off-by: Nicolas Ferre Reviewed-by: Claudiu Beznea Link: https://lore.kernel.org/r/20220302155329.27668-1-nicolas.ferre@microchip.com --- drivers/soc/atmel/soc.c | 3 +++ drivers/soc/atmel/soc.h | 1 + 2 files changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c index a490ad7e090f..b2d365ae0282 100644 --- a/drivers/soc/atmel/soc.c +++ b/drivers/soc/atmel/soc.c @@ -156,6 +156,9 @@ static const struct at91_soc socs[] __initconst = { AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK, AT91_CIDR_VERSION_MASK, SAMA5D28C_LD2G_EXID_MATCH, "sama5d28c 256MiB LPDDR2 SiP", "sama5d2"), + AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK, + AT91_CIDR_VERSION_MASK, SAMA5D29CN_EXID_MATCH, + "sama5d29", "sama5d2"), AT91_SOC(SAMA5D3_CIDR_MATCH, AT91_CIDR_MATCH_MASK, AT91_CIDR_VERSION_MASK, SAMA5D31_EXID_MATCH, "sama5d31", "sama5d3"), diff --git a/drivers/soc/atmel/soc.h b/drivers/soc/atmel/soc.h index c3eb3c8f0834..2ecaa75b00f0 100644 --- a/drivers/soc/atmel/soc.h +++ b/drivers/soc/atmel/soc.h @@ -95,6 +95,7 @@ at91_soc_init(const struct at91_soc *socs); #define SAMA5D28C_LD2G_EXID_MATCH 0x00000072 #define SAMA5D28CU_EXID_MATCH 0x00000010 #define SAMA5D28CN_EXID_MATCH 0x00000020 +#define SAMA5D29CN_EXID_MATCH 0x00000023 #define SAMA5D3_CIDR_MATCH 0x0a5c07c0 #define SAMA5D31_EXID_MATCH 0x00444300 -- cgit v1.2.3 From f2b03c1056ef5c0829678f51273eace3e6327884 Mon Sep 17 00:00:00 2001 From: Shunzhou Jiang Date: Mon, 7 Mar 2022 10:53:57 +0800 Subject: soc: s4: Add support for power domains controller Add support s4 Power controller. In s4, power control registers are in secure domain, and should be accessed by smc. Signed-off-by: Shunzhou Jiang Reviewed-by: Kevin Hilman Signed-off-by: Neil Armstrong Link: https://lore.kernel.org/r/20220307025357.1368673-3-shunzhou.jiang@amlogic.com --- drivers/soc/amlogic/meson-secure-pwrc.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'drivers') diff --git a/drivers/soc/amlogic/meson-secure-pwrc.c b/drivers/soc/amlogic/meson-secure-pwrc.c index 59bd195fa9c9..a10a417a87db 100644 --- a/drivers/soc/amlogic/meson-secure-pwrc.c +++ b/drivers/soc/amlogic/meson-secure-pwrc.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -119,6 +120,18 @@ static struct meson_secure_pwrc_domain_desc a1_pwrc_domains[] = { SEC_PD(RSA, 0), }; +static struct meson_secure_pwrc_domain_desc s4_pwrc_domains[] = { + SEC_PD(S4_DOS_HEVC, 0), + SEC_PD(S4_DOS_VDEC, 0), + SEC_PD(S4_VPU_HDMI, 0), + SEC_PD(S4_USB_COMB, 0), + SEC_PD(S4_GE2D, 0), + /* ETH is for ethernet online wakeup, and should be always on */ + SEC_PD(S4_ETH, GENPD_FLAG_ALWAYS_ON), + SEC_PD(S4_DEMOD, 0), + SEC_PD(S4_AUDIO, 0), +}; + static int meson_secure_pwrc_probe(struct platform_device *pdev) { int i; @@ -187,11 +200,20 @@ static struct meson_secure_pwrc_domain_data meson_secure_a1_pwrc_data = { .count = ARRAY_SIZE(a1_pwrc_domains), }; +static struct meson_secure_pwrc_domain_data meson_secure_s4_pwrc_data = { + .domains = s4_pwrc_domains, + .count = ARRAY_SIZE(s4_pwrc_domains), +}; + static const struct of_device_id meson_secure_pwrc_match_table[] = { { .compatible = "amlogic,meson-a1-pwrc", .data = &meson_secure_a1_pwrc_data, }, + { + .compatible = "amlogic,meson-s4-pwrc", + .data = &meson_secure_s4_pwrc_data, + }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, meson_secure_pwrc_match_table); -- cgit v1.2.3 From 8e145bc705e738ebe7ed6c53d93278981d9af356 Mon Sep 17 00:00:00 2001 From: Conor Dooley Date: Fri, 18 Mar 2022 17:21:08 +0000 Subject: soc/microchip: fix invalid free in mpfs_sys_controller_delete Fix an invalid kfree in mpfs_sys_controller_delete, by replacing the devm_kzalloc with a regular kzalloc. Change the error handling in the probe function to free the sys_controller struct if the probe fails. > cocci warnings: (new ones prefixed by >>) > >> drivers/soc/microchip/mpfs-sys-controller.c:73:1-6: WARNING: invalid free of devm_ allocated data Link: https://lore.kernel.org/linux-mm/202203180259.lgIylRZV-lkp@intel.com/ Fixes: d0054a470c33 ("soc: add microchip polarfire soc system controller") Reported-by: kernel test robot Signed-off-by: Conor Dooley Signed-off-by: Conor Dooley Signed-off-by: Arnd Bergmann --- drivers/soc/microchip/mpfs-sys-controller.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/soc/microchip/mpfs-sys-controller.c b/drivers/soc/microchip/mpfs-sys-controller.c index 2f4535929762..52291c231f0b 100644 --- a/drivers/soc/microchip/mpfs-sys-controller.c +++ b/drivers/soc/microchip/mpfs-sys-controller.c @@ -96,9 +96,9 @@ static int mpfs_sys_controller_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mpfs_sys_controller *sys_controller; - int i; + int i, ret; - sys_controller = devm_kzalloc(dev, sizeof(*sys_controller), GFP_KERNEL); + sys_controller = kzalloc(sizeof(*sys_controller), GFP_KERNEL); if (!sys_controller) return -ENOMEM; @@ -107,9 +107,12 @@ static int mpfs_sys_controller_probe(struct platform_device *pdev) sys_controller->client.tx_block = 1U; sys_controller->chan = mbox_request_channel(&sys_controller->client, 0); - if (IS_ERR(sys_controller->chan)) - return dev_err_probe(dev, PTR_ERR(sys_controller->chan), - "Failed to get mbox channel\n"); + if (IS_ERR(sys_controller->chan)) { + ret = dev_err_probe(dev, PTR_ERR(sys_controller->chan), + "Failed to get mbox channel\n"); + kfree(sys_controller); + return ret; + } init_completion(&sys_controller->c); kref_init(&sys_controller->consumers); -- cgit v1.2.3