summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2022-05-09 23:09:09 +0200
committerArnd Bergmann <arnd@arndb.de>2022-05-09 23:09:10 +0200
commit1901300bf356a74437117464e19fc5f278f88d9a (patch)
tree38447a86d53765e2839f1b901dda199c49b6c482
parent68edb53a4f4b54bd84ba8f88b6ea5c0815dd1581 (diff)
parent2a21f9e6d9a408dbd09a01caf5fff42c2f70fa82 (diff)
Merge tag 'ti-driver-soc-for-v5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux into arm/drivers
TI Driver updates for v5.19 * wkup_m3: io isolation, voltage scaling, vtt regulator and a debug option to stop m3 in suspend. * tisci: support for polled mode for system suspend, reset driver is now enabled for COMPILE_TEST * knav, dma.. misc cleanups for IS_ERR, pm_run_time*, and various other fixups. * tag 'ti-driver-soc-for-v5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux: soc: ti: wkup_m3_ipc: Add debug option to halt m3 in suspend soc: ti: wkup_m3_ipc: Add support for i2c voltage scaling soc: ti: wkup_m3_ipc: Add support for IO Isolation soc: ti: knav_qmss_queue: Use IS_ERR instead of IS_ERR_OR_NULL when checking knav_queue_open() result soc: ti: pm33xx: using pm_runtime_resume_and_get instead of pm_runtime_get_sync firmware: ti_sci: Switch transport to polled mode during system suspend soc: ti: wkup_m3_ipc: Add support for toggling VTT regulator soc: ti: knav_qmss_queue: Use pm_runtime_resume_and_get instead of pm_runtime_get_sync soc: ti: knav_dma: Use pm_runtime_resume_and_get instead of pm_runtime_get_sync reset: ti-sci: Allow building under COMPILE_TEST soc: ti: ti_sci_pm_domains: Check for null return of devm_kcalloc soc: ti: omap_prm: Use of_device_get_match_data() soc: ti: pruss: using pm_runtime_resume_and_get instead of pm_runtime_get_sync soc: ti: replace usage of found with dedicated list iterator variable soc: ti: wkup_m3_ipc: fix platform_get_irq.cocci warning Link: https://lore.kernel.org/r/20220507163424.pvqnwrxpoo73lmp2@debtless Signed-off-by: Arnd Bergmann <arnd@arndb.de>
-rw-r--r--drivers/firmware/ti_sci.c61
-rw-r--r--drivers/reset/Kconfig2
-rw-r--r--drivers/soc/ti/knav_dma.c29
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c21
-rw-r--r--drivers/soc/ti/omap_prm.c7
-rw-r--r--drivers/soc/ti/pm33xx.c6
-rw-r--r--drivers/soc/ti/pruss.c3
-rw-r--r--drivers/soc/ti/ti_sci_pm_domains.c2
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c210
-rw-r--r--include/linux/wkup_m3_ipc.h13
10 files changed, 302 insertions, 52 deletions
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 4697edc125b1..ebc32bbd9b83 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -2,7 +2,7 @@
/*
* Texas Instruments System Control Interface Protocol Driver
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon
*/
@@ -12,6 +12,7 @@
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
@@ -96,6 +97,7 @@ struct ti_sci_desc {
* @node: list head
* @host_id: Host ID
* @users: Number of users of this instance
+ * @is_suspending: Flag set to indicate in suspend path.
*/
struct ti_sci_info {
struct device *dev;
@@ -114,7 +116,7 @@ struct ti_sci_info {
u8 host_id;
/* protected by ti_sci_list_mutex */
int users;
-
+ bool is_suspending;
};
#define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl)
@@ -349,6 +351,8 @@ static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
xfer->tx_message.len = tx_message_size;
+ xfer->tx_message.chan_rx = info->chan_rx;
+ xfer->tx_message.timeout_rx_ms = info->desc->max_rx_timeout_ms;
xfer->rx_len = (u8)rx_message_size;
reinit_completion(&xfer->done);
@@ -406,6 +410,7 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info,
int ret;
int timeout;
struct device *dev = info->dev;
+ bool done_state = true;
ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
if (ret < 0)
@@ -413,13 +418,27 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info,
ret = 0;
- /* And we wait for the response. */
- timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
- if (!wait_for_completion_timeout(&xfer->done, timeout)) {
+ if (!info->is_suspending) {
+ /* And we wait for the response. */
+ timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
+ if (!wait_for_completion_timeout(&xfer->done, timeout))
+ ret = -ETIMEDOUT;
+ } else {
+ /*
+ * If we are suspending, we cannot use wait_for_completion_timeout
+ * during noirq phase, so we must manually poll the completion.
+ */
+ ret = read_poll_timeout_atomic(try_wait_for_completion, done_state,
+ true, 1,
+ info->desc->max_rx_timeout_ms * 1000,
+ false, &xfer->done);
+ }
+
+ if (ret == -ETIMEDOUT || !done_state) {
dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
(void *)_RET_IP_);
- ret = -ETIMEDOUT;
}
+
/*
* NOTE: we might prefer not to need the mailbox ticker to manage the
* transfer queueing since the protocol layer queues things by itself.
@@ -3264,6 +3283,35 @@ static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
return NOTIFY_BAD;
}
+static void ti_sci_set_is_suspending(struct ti_sci_info *info, bool is_suspending)
+{
+ info->is_suspending = is_suspending;
+}
+
+static int ti_sci_suspend(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+ /*
+ * We must switch operation to polled mode now as drivers and the genpd
+ * layer may make late TI SCI calls to change clock and device states
+ * from the noirq phase of suspend.
+ */
+ ti_sci_set_is_suspending(info, true);
+
+ return 0;
+}
+
+static int ti_sci_resume(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+
+ ti_sci_set_is_suspending(info, false);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(ti_sci_pm_ops, ti_sci_suspend, ti_sci_resume);
+
/* Description for K2G */
static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
.default_host_id = 2,
@@ -3472,6 +3520,7 @@ static struct platform_driver ti_sci_driver = {
.driver = {
.name = "ti-sci",
.of_match_table = of_match_ptr(ti_sci_of_match),
+ .pm = &ti_sci_pm_ops,
},
};
module_platform_driver(ti_sci_driver);
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index e0fc80e041ea..93c8d07ee328 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -240,7 +240,7 @@ config RESET_SUNXI
config RESET_TI_SCI
tristate "TI System Control Interface (TI-SCI) reset driver"
- depends on TI_SCI_PROTOCOL
+ depends on TI_SCI_PROTOCOL || COMPILE_TEST
help
This enables the reset driver support over TI System Control Interface
available on some new TI's SoCs. If you wish to use reset resources
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index 700d8eecd8c4..d756591de973 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -415,9 +415,8 @@ static int of_channel_match_helper(struct device_node *np, const char *name,
void *knav_dma_open_channel(struct device *dev, const char *name,
struct knav_dma_cfg *config)
{
- struct knav_dma_chan *chan;
- struct knav_dma_device *dma;
- bool found = false;
+ struct knav_dma_device *dma = NULL, *iter1;
+ struct knav_dma_chan *chan = NULL, *iter2;
int chan_num = -1;
const char *instance;
@@ -444,33 +443,32 @@ void *knav_dma_open_channel(struct device *dev, const char *name,
}
/* Look for correct dma instance */
- list_for_each_entry(dma, &kdev->list, list) {
- if (!strcmp(dma->name, instance)) {
- found = true;
+ list_for_each_entry(iter1, &kdev->list, list) {
+ if (!strcmp(iter1->name, instance)) {
+ dma = iter1;
break;
}
}
- if (!found) {
+ if (!dma) {
dev_err(kdev->dev, "No DMA instance with name %s\n", instance);
return (void *)-EINVAL;
}
/* Look for correct dma channel from dma instance */
- found = false;
- list_for_each_entry(chan, &dma->chan_list, list) {
+ list_for_each_entry(iter2, &dma->chan_list, list) {
if (config->direction == DMA_MEM_TO_DEV) {
- if (chan->channel == chan_num) {
- found = true;
+ if (iter2->channel == chan_num) {
+ chan = iter2;
break;
}
} else {
- if (chan->flow == chan_num) {
- found = true;
+ if (iter2->flow == chan_num) {
+ chan = iter2;
break;
}
}
}
- if (!found) {
+ if (!chan) {
dev_err(kdev->dev, "channel %d is not in DMA %s\n",
chan_num, instance);
return (void *)-EINVAL;
@@ -747,9 +745,8 @@ static int knav_dma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&kdev->list);
pm_runtime_enable(kdev->dev);
- ret = pm_runtime_get_sync(kdev->dev);
+ ret = pm_runtime_resume_and_get(kdev->dev);
if (ret < 0) {
- pm_runtime_put_noidle(kdev->dev);
dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret);
goto err_pm_disable;
}
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index 2ac3856b8d42..92af7d1b6f5b 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -758,10 +758,9 @@ void *knav_pool_create(const char *name,
int num_desc, int region_id)
{
struct knav_region *reg_itr, *region = NULL;
- struct knav_pool *pool, *pi;
+ struct knav_pool *pool, *pi = NULL, *iter;
struct list_head *node;
unsigned last_offset;
- bool slot_found;
int ret;
if (!kdev)
@@ -790,7 +789,7 @@ void *knav_pool_create(const char *name,
}
pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
- if (IS_ERR_OR_NULL(pool->queue)) {
+ if (IS_ERR(pool->queue)) {
dev_err(kdev->dev,
"failed to open queue for pool(%s), error %ld\n",
name, PTR_ERR(pool->queue));
@@ -816,18 +815,17 @@ void *knav_pool_create(const char *name,
* the request
*/
last_offset = 0;
- slot_found = false;
node = &region->pools;
- list_for_each_entry(pi, &region->pools, region_inst) {
- if ((pi->region_offset - last_offset) >= num_desc) {
- slot_found = true;
+ list_for_each_entry(iter, &region->pools, region_inst) {
+ if ((iter->region_offset - last_offset) >= num_desc) {
+ pi = iter;
break;
}
- last_offset = pi->region_offset + pi->num_desc;
+ last_offset = iter->region_offset + iter->num_desc;
}
- node = &pi->region_inst;
- if (slot_found) {
+ if (pi) {
+ node = &pi->region_inst;
pool->region = region;
pool->num_desc = num_desc;
pool->region_offset = last_offset;
@@ -1785,9 +1783,8 @@ static int knav_queue_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&kdev->pdsps);
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
dev_err(dev, "Failed to enable QMSS\n");
return ret;
}
diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c
index f32e1cbbe8c5..913b964374a4 100644
--- a/drivers/soc/ti/omap_prm.c
+++ b/drivers/soc/ti/omap_prm.c
@@ -941,23 +941,20 @@ static int omap_prm_probe(struct platform_device *pdev)
struct resource *res;
const struct omap_prm_data *data;
struct omap_prm *prm;
- const struct of_device_id *match;
int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- match = of_match_device(omap_prm_id_table, &pdev->dev);
- if (!match)
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
return -ENOTSUPP;
prm = devm_kzalloc(&pdev->dev, sizeof(*prm), GFP_KERNEL);
if (!prm)
return -ENOMEM;
- data = match->data;
-
while (data->base != res->start) {
if (!data->base)
return -EINVAL;
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index 7bab4bbaf02d..ce09c42eaed2 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -555,11 +555,9 @@ static int am33xx_pm_probe(struct platform_device *pdev)
#endif /* CONFIG_SUSPEND */
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
goto err_pm_runtime_disable;
- }
ret = pm_ops->init(am33xx_do_sram_idle);
if (ret) {
diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c
index b36779309e49..0e4ba0f89533 100644
--- a/drivers/soc/ti/pruss.c
+++ b/drivers/soc/ti/pruss.c
@@ -279,10 +279,9 @@ static int pruss_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pruss);
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "couldn't enable module\n");
- pm_runtime_put_noidle(dev);
goto rpm_disable;
}
diff --git a/drivers/soc/ti/ti_sci_pm_domains.c b/drivers/soc/ti/ti_sci_pm_domains.c
index 8afb3f45d263..a33ec7eaf23d 100644
--- a/drivers/soc/ti/ti_sci_pm_domains.c
+++ b/drivers/soc/ti/ti_sci_pm_domains.c
@@ -183,6 +183,8 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev)
devm_kcalloc(dev, max_id + 1,
sizeof(*pd_provider->data.domains),
GFP_KERNEL);
+ if (!pd_provider->data.domains)
+ return -ENOMEM;
pd_provider->data.num_domains = max_id + 1;
pd_provider->data.xlate = ti_sci_pd_xlate;
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index 2f03ced0f411..0076d467ff6b 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -7,7 +7,9 @@
* Dave Gerlach <d-gerlach@ti.com>
*/
+#include <linux/debugfs.h>
#include <linux/err.h>
+#include <linux/firmware.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
@@ -40,12 +42,30 @@
#define M3_FW_VERSION_MASK 0xffff
#define M3_WAKE_SRC_MASK 0xff
+#define IPC_MEM_TYPE_SHIFT (0x0)
+#define IPC_MEM_TYPE_MASK (0x7 << 0)
+#define IPC_VTT_STAT_SHIFT (0x3)
+#define IPC_VTT_STAT_MASK (0x1 << 3)
+#define IPC_VTT_GPIO_PIN_SHIFT (0x4)
+#define IPC_VTT_GPIO_PIN_MASK (0x3f << 4)
+#define IPC_IO_ISOLATION_STAT_SHIFT (10)
+#define IPC_IO_ISOLATION_STAT_MASK (0x1 << 10)
+
+#define IPC_DBG_HALT_SHIFT (11)
+#define IPC_DBG_HALT_MASK (0x1 << 11)
+
#define M3_STATE_UNKNOWN 0
#define M3_STATE_RESET 1
#define M3_STATE_INITED 2
#define M3_STATE_MSG_FOR_LP 3
#define M3_STATE_MSG_FOR_RESET 4
+#define WKUP_M3_SD_FW_MAGIC 0x570C
+
+#define WKUP_M3_DMEM_START 0x80000
+#define WKUP_M3_AUXDATA_OFFSET 0x1000
+#define WKUP_M3_AUXDATA_SIZE 0xFF
+
static struct wkup_m3_ipc *m3_ipc_state;
static const struct wkup_m3_wakeup_src wakeups[] = {
@@ -66,6 +86,148 @@ static const struct wkup_m3_wakeup_src wakeups[] = {
{.irq_nr = 0, .src = "Unknown"},
};
+/**
+ * wkup_m3_copy_aux_data - Copy auxiliary data to special region of m3 dmem
+ * @data - pointer to data
+ * @sz - size of data to copy (limit 256 bytes)
+ *
+ * Copies any additional blob of data to the wkup_m3 dmem to be used by the
+ * firmware
+ */
+static unsigned long wkup_m3_copy_aux_data(struct wkup_m3_ipc *m3_ipc,
+ const void *data, int sz)
+{
+ unsigned long aux_data_dev_addr;
+ void *aux_data_addr;
+
+ aux_data_dev_addr = WKUP_M3_DMEM_START + WKUP_M3_AUXDATA_OFFSET;
+ aux_data_addr = rproc_da_to_va(m3_ipc->rproc,
+ aux_data_dev_addr,
+ WKUP_M3_AUXDATA_SIZE,
+ NULL);
+ memcpy(aux_data_addr, data, sz);
+
+ return WKUP_M3_AUXDATA_OFFSET;
+}
+
+static void wkup_m3_scale_data_fw_cb(const struct firmware *fw, void *context)
+{
+ unsigned long val, aux_base;
+ struct wkup_m3_scale_data_header hdr;
+ struct wkup_m3_ipc *m3_ipc = context;
+ struct device *dev = m3_ipc->dev;
+
+ if (!fw) {
+ dev_err(dev, "Voltage scale fw name given but file missing.\n");
+ return;
+ }
+
+ memcpy(&hdr, fw->data, sizeof(hdr));
+
+ if (hdr.magic != WKUP_M3_SD_FW_MAGIC) {
+ dev_err(dev, "PM: Voltage Scale Data binary does not appear valid.\n");
+ goto release_sd_fw;
+ }
+
+ aux_base = wkup_m3_copy_aux_data(m3_ipc, fw->data + sizeof(hdr),
+ fw->size - sizeof(hdr));
+
+ val = (aux_base + hdr.sleep_offset);
+ val |= ((aux_base + hdr.wake_offset) << 16);
+
+ m3_ipc->volt_scale_offsets = val;
+
+release_sd_fw:
+ release_firmware(fw);
+};
+
+static int wkup_m3_init_scale_data(struct wkup_m3_ipc *m3_ipc,
+ struct device *dev)
+{
+ int ret = 0;
+
+ /*
+ * If no name is provided, user has already been warned, pm will
+ * still work so return 0
+ */
+
+ if (!m3_ipc->sd_fw_name)
+ return ret;
+
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
+ m3_ipc->sd_fw_name, dev, GFP_ATOMIC,
+ m3_ipc, wkup_m3_scale_data_fw_cb);
+
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void wkup_m3_set_halt_late(bool enabled)
+{
+ if (enabled)
+ m3_ipc_state->halt = (1 << IPC_DBG_HALT_SHIFT);
+ else
+ m3_ipc_state->halt = 0;
+}
+
+static int option_get(void *data, u64 *val)
+{
+ u32 *option = data;
+
+ *val = *option;
+
+ return 0;
+}
+
+static int option_set(void *data, u64 val)
+{
+ u32 *option = data;
+
+ *option = val;
+
+ if (option == &m3_ipc_state->halt) {
+ if (val)
+ wkup_m3_set_halt_late(true);
+ else
+ wkup_m3_set_halt_late(false);
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(wkup_m3_ipc_option_fops, option_get, option_set,
+ "%llu\n");
+
+static int wkup_m3_ipc_dbg_init(struct wkup_m3_ipc *m3_ipc)
+{
+ m3_ipc->dbg_path = debugfs_create_dir("wkup_m3_ipc", NULL);
+
+ if (!m3_ipc->dbg_path)
+ return -EINVAL;
+
+ (void)debugfs_create_file("enable_late_halt", 0644,
+ m3_ipc->dbg_path,
+ &m3_ipc->halt,
+ &wkup_m3_ipc_option_fops);
+
+ return 0;
+}
+
+static inline void wkup_m3_ipc_dbg_destroy(struct wkup_m3_ipc *m3_ipc)
+{
+ debugfs_remove_recursive(m3_ipc->dbg_path);
+}
+#else
+static inline int wkup_m3_ipc_dbg_init(struct wkup_m3_ipc *m3_ipc)
+{
+ return 0;
+}
+
+static inline void wkup_m3_ipc_dbg_destroy(struct wkup_m3_ipc *m3_ipc)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
static void am33xx_txev_eoi(struct wkup_m3_ipc *m3_ipc)
{
writel(AM33XX_M3_TXEV_ACK,
@@ -130,6 +292,7 @@ static irqreturn_t wkup_m3_txev_handler(int irq, void *ipc_data)
}
m3_ipc->state = M3_STATE_INITED;
+ wkup_m3_init_scale_data(m3_ipc, dev);
complete(&m3_ipc->sync_complete);
break;
case M3_STATE_MSG_FOR_RESET:
@@ -215,6 +378,17 @@ static int wkup_m3_is_available(struct wkup_m3_ipc *m3_ipc)
(m3_ipc->state != M3_STATE_UNKNOWN));
}
+static void wkup_m3_set_vtt_gpio(struct wkup_m3_ipc *m3_ipc, int gpio)
+{
+ m3_ipc->vtt_conf = (1 << IPC_VTT_STAT_SHIFT) |
+ (gpio << IPC_VTT_GPIO_PIN_SHIFT);
+}
+
+static void wkup_m3_set_io_isolation(struct wkup_m3_ipc *m3_ipc)
+{
+ m3_ipc->isolation_conf = (1 << IPC_IO_ISOLATION_STAT_SHIFT);
+}
+
/* Public functions */
/**
* wkup_m3_set_mem_type - Pass wkup_m3 which type of memory is in use
@@ -280,12 +454,15 @@ static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
switch (state) {
case WKUP_M3_DEEPSLEEP:
m3_power_state = IPC_CMD_DS0;
+ wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->volt_scale_offsets, 5);
break;
case WKUP_M3_STANDBY:
m3_power_state = IPC_CMD_STANDBY;
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
break;
case WKUP_M3_IDLE:
m3_power_state = IPC_CMD_IDLE;
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
break;
default:
return 1;
@@ -294,11 +471,13 @@ static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
/* Program each required IPC register then write defaults to others */
wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->resume_addr, 0);
wkup_m3_ctrl_ipc_write(m3_ipc, m3_power_state, 1);
- wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->mem_type, 4);
+ wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->mem_type |
+ m3_ipc->vtt_conf |
+ m3_ipc->isolation_conf |
+ m3_ipc->halt, 4);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 3);
- wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 6);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 7);
@@ -433,12 +612,13 @@ static int wkup_m3_rproc_boot_thread(void *arg)
static int wkup_m3_ipc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- int irq, ret;
+ int irq, ret, temp;
phandle rproc_phandle;
struct rproc *m3_rproc;
struct resource *res;
struct task_struct *task;
struct wkup_m3_ipc *m3_ipc;
+ struct device_node *np = dev->of_node;
m3_ipc = devm_kzalloc(dev, sizeof(*m3_ipc), GFP_KERNEL);
if (!m3_ipc)
@@ -450,10 +630,8 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
return PTR_ERR(m3_ipc->ipc_mem_base);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no irq resource\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(dev, irq, wkup_m3_txev_handler,
0, "wkup_m3_txev", m3_ipc);
@@ -496,6 +674,22 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
m3_ipc->ops = &ipc_ops;
+ if (!of_property_read_u32(np, "ti,vtt-gpio-pin", &temp)) {
+ if (temp >= 0 && temp <= 31)
+ wkup_m3_set_vtt_gpio(m3_ipc, temp);
+ else
+ dev_warn(dev, "Invalid VTT GPIO(%d) pin\n", temp);
+ }
+
+ if (of_find_property(np, "ti,set-io-isolation", NULL))
+ wkup_m3_set_io_isolation(m3_ipc);
+
+ ret = of_property_read_string(np, "firmware-name",
+ &m3_ipc->sd_fw_name);
+ if (ret) {
+ dev_dbg(dev, "Voltage scaling data blob not provided from DT.\n");
+ };
+
/*
* Wait for firmware loading completion in a thread so we
* can boot the wkup_m3 as soon as it's ready without holding
@@ -510,6 +704,8 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
goto err_put_rproc;
}
+ wkup_m3_ipc_dbg_init(m3_ipc);
+
return 0;
err_put_rproc:
@@ -521,6 +717,8 @@ err_free_mbox:
static int wkup_m3_ipc_remove(struct platform_device *pdev)
{
+ wkup_m3_ipc_dbg_destroy(m3_ipc_state);
+
mbox_free_channel(m3_ipc_state->mbox);
rproc_shutdown(m3_ipc_state->rproc);
diff --git a/include/linux/wkup_m3_ipc.h b/include/linux/wkup_m3_ipc.h
index 3f496967b538..26d1eb058fa3 100644
--- a/include/linux/wkup_m3_ipc.h
+++ b/include/linux/wkup_m3_ipc.h
@@ -33,7 +33,13 @@ struct wkup_m3_ipc {
int mem_type;
unsigned long resume_addr;
+ int vtt_conf;
+ int isolation_conf;
int state;
+ u32 halt;
+
+ unsigned long volt_scale_offsets;
+ const char *sd_fw_name;
struct completion sync_complete;
struct mbox_client mbox_client;
@@ -41,6 +47,7 @@ struct wkup_m3_ipc {
struct wkup_m3_ipc_ops *ops;
int is_rtc_only;
+ struct dentry *dbg_path;
};
struct wkup_m3_wakeup_src {
@@ -48,6 +55,12 @@ struct wkup_m3_wakeup_src {
char src[10];
};
+struct wkup_m3_scale_data_header {
+ u16 magic;
+ u8 sleep_offset;
+ u8 wake_offset;
+} __packed;
+
struct wkup_m3_ipc_ops {
void (*set_mem_type)(struct wkup_m3_ipc *m3_ipc, int mem_type);
void (*set_resume_address)(struct wkup_m3_ipc *m3_ipc, void *addr);