diff options
-rw-r--r-- | arch/arm/Kconfig | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/irq_work.h | 2 | ||||
-rw-r--r-- | arch/arm/kernel/reboot.c | 1 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/kasan_init.c | 6 | ||||
-rw-r--r-- | drivers/amba/bus.c | 313 |
6 files changed, 156 insertions, 170 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 4294c0123857..53e6a1da9af5 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -75,6 +75,7 @@ config ARM select HAVE_ARCH_KFENCE if MMU && !XIP_KERNEL select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL + select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_PFN_VALID select HAVE_ARCH_SECCOMP @@ -1419,6 +1420,7 @@ config HW_PERF_EVENTS config ARM_MODULE_PLTS bool "Use PLTs to allow module memory to spill over into vmalloc area" depends on MODULES + select KASAN_VMALLOC if KASAN default y help Allocate PLTs when loading modules so that jumps and calls whose diff --git a/arch/arm/include/asm/irq_work.h b/arch/arm/include/asm/irq_work.h index 8895999834cc..3149e4dc1b54 100644 --- a/arch/arm/include/asm/irq_work.h +++ b/arch/arm/include/asm/irq_work.h @@ -9,4 +9,6 @@ static inline bool arch_irq_work_has_interrupt(void) return is_smp(); } +extern void arch_irq_work_raise(void); + #endif /* _ASM_ARM_IRQ_WORK_H */ diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c index 2cb943422554..3f0d5c3dae11 100644 --- a/arch/arm/kernel/reboot.c +++ b/arch/arm/kernel/reboot.c @@ -10,6 +10,7 @@ #include <asm/cacheflush.h> #include <asm/idmap.h> #include <asm/virt.h> +#include <asm/system_misc.h> #include "reboot.h" diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 059cce018570..1483b6a4319d 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -709,7 +709,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, *handle = DMA_MAPPING_ERROR; allowblock = gfpflags_allow_blocking(gfp); - cma = allowblock ? dev_get_cma_area(dev) : false; + cma = allowblock ? dev_get_cma_area(dev) : NULL; if (cma) buf->allocator = &cma_allocator; diff --git a/arch/arm/mm/kasan_init.c b/arch/arm/mm/kasan_init.c index 5ad0d6c56d56..29caee9c79ce 100644 --- a/arch/arm/mm/kasan_init.c +++ b/arch/arm/mm/kasan_init.c @@ -236,7 +236,11 @@ void __init kasan_init(void) clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); - kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START), + if (!IS_ENABLED(CONFIG_KASAN_VMALLOC)) + kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START), + kasan_mem_to_shadow((void *)VMALLOC_END)); + + kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_END), kasan_mem_to_shadow((void *)-1UL) + 1); for_each_mem_range(i, &pa_start, &pa_end) { diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 0cb20324da16..32b0e0b930c1 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -130,11 +130,100 @@ static struct attribute *amba_dev_attrs[] = { }; ATTRIBUTE_GROUPS(amba_dev); +static int amba_read_periphid(struct amba_device *dev) +{ + struct reset_control *rstc; + u32 size, pid, cid; + void __iomem *tmp; + int i, ret; + + ret = dev_pm_domain_attach(&dev->dev, true); + if (ret) { + dev_dbg(&dev->dev, "can't get PM domain: %d\n", ret); + goto err_out; + } + + ret = amba_get_enable_pclk(dev); + if (ret) { + dev_dbg(&dev->dev, "can't get pclk: %d\n", ret); + goto err_pm; + } + + /* + * Find reset control(s) of the amba bus and de-assert them. + */ + rstc = of_reset_control_array_get_optional_shared(dev->dev.of_node); + if (IS_ERR(rstc)) { + ret = PTR_ERR(rstc); + if (ret != -EPROBE_DEFER) + dev_err(&dev->dev, "can't get reset: %d\n", ret); + goto err_clk; + } + reset_control_deassert(rstc); + reset_control_put(rstc); + + size = resource_size(&dev->res); + tmp = ioremap(dev->res.start, size); + if (!tmp) { + ret = -ENOMEM; + goto err_clk; + } + + /* + * Read pid and cid based on size of resource + * they are located at end of region + */ + for (pid = 0, i = 0; i < 4; i++) + pid |= (readl(tmp + size - 0x20 + 4 * i) & 255) << (i * 8); + for (cid = 0, i = 0; i < 4; i++) + cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) << (i * 8); + + if (cid == CORESIGHT_CID) { + /* set the base to the start of the last 4k block */ + void __iomem *csbase = tmp + size - 4096; + + dev->uci.devarch = readl(csbase + UCI_REG_DEVARCH_OFFSET); + dev->uci.devtype = readl(csbase + UCI_REG_DEVTYPE_OFFSET) & 0xff; + } + + if (cid == AMBA_CID || cid == CORESIGHT_CID) { + dev->periphid = pid; + dev->cid = cid; + } + + if (!dev->periphid) + ret = -ENODEV; + + iounmap(tmp); + +err_clk: + amba_put_disable_pclk(dev); +err_pm: + dev_pm_domain_detach(&dev->dev, true); +err_out: + return ret; +} + static int amba_match(struct device *dev, struct device_driver *drv) { struct amba_device *pcdev = to_amba_device(dev); struct amba_driver *pcdrv = to_amba_driver(drv); + if (!pcdev->periphid) { + int ret = amba_read_periphid(pcdev); + + /* + * Returning any error other than -EPROBE_DEFER from bus match + * can cause driver registration failure. So, if there's a + * permanent failure in reading pid and cid, simply map it to + * -EPROBE_DEFER. + */ + if (ret) + return -EPROBE_DEFER; + dev_set_uevent_suppress(dev, false); + kobject_uevent(&dev->kobj, KOBJ_ADD); + } + /* When driver_override is set, only bind to the matching driver */ if (pcdev->driver_override) return !strcmp(pcdev->driver_override, drv->name); @@ -368,6 +457,42 @@ static int __init amba_init(void) postcore_initcall(amba_init); +static int amba_proxy_probe(struct amba_device *adev, + const struct amba_id *id) +{ + WARN(1, "Stub driver should never match any device.\n"); + return -ENODEV; +} + +static const struct amba_id amba_stub_drv_ids[] = { + { 0, 0 }, +}; + +static struct amba_driver amba_proxy_drv = { + .drv = { + .name = "amba-proxy", + }, + .probe = amba_proxy_probe, + .id_table = amba_stub_drv_ids, +}; + +static int __init amba_stub_drv_init(void) +{ + if (!IS_ENABLED(CONFIG_MODULES)) + return 0; + + /* + * The amba_match() function will get called only if there is at least + * one amba driver registered. If all amba drivers are modules and are + * only loaded based on uevents, then we'll hit a chicken-and-egg + * situation where amba_match() is waiting on drivers and drivers are + * waiting on amba_match(). So, register a stub driver to make sure + * amba_match() is called even if no amba driver has been registered. + */ + return amba_driver_register(&amba_proxy_drv); +} +late_initcall_sync(amba_stub_drv_init); + /** * amba_driver_register - register an AMBA device driver * @drv: amba device driver structure @@ -410,156 +535,6 @@ static void amba_device_release(struct device *dev) kfree(d); } -static int amba_read_periphid(struct amba_device *dev) -{ - struct reset_control *rstc; - u32 size, pid, cid; - void __iomem *tmp; - int i, ret; - - ret = dev_pm_domain_attach(&dev->dev, true); - if (ret) - goto err_out; - - ret = amba_get_enable_pclk(dev); - if (ret) - goto err_pm; - - /* - * Find reset control(s) of the amba bus and de-assert them. - */ - rstc = of_reset_control_array_get_optional_shared(dev->dev.of_node); - if (IS_ERR(rstc)) { - ret = PTR_ERR(rstc); - if (ret != -EPROBE_DEFER) - dev_err(&dev->dev, "can't get reset: %d\n", ret); - goto err_clk; - } - reset_control_deassert(rstc); - reset_control_put(rstc); - - size = resource_size(&dev->res); - tmp = ioremap(dev->res.start, size); - if (!tmp) { - ret = -ENOMEM; - goto err_clk; - } - - /* - * Read pid and cid based on size of resource - * they are located at end of region - */ - for (pid = 0, i = 0; i < 4; i++) - pid |= (readl(tmp + size - 0x20 + 4 * i) & 255) << (i * 8); - for (cid = 0, i = 0; i < 4; i++) - cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) << (i * 8); - - if (cid == CORESIGHT_CID) { - /* set the base to the start of the last 4k block */ - void __iomem *csbase = tmp + size - 4096; - - dev->uci.devarch = readl(csbase + UCI_REG_DEVARCH_OFFSET); - dev->uci.devtype = readl(csbase + UCI_REG_DEVTYPE_OFFSET) & 0xff; - } - - if (cid == AMBA_CID || cid == CORESIGHT_CID) { - dev->periphid = pid; - dev->cid = cid; - } - - if (!dev->periphid) - ret = -ENODEV; - - iounmap(tmp); - -err_clk: - amba_put_disable_pclk(dev); -err_pm: - dev_pm_domain_detach(&dev->dev, true); -err_out: - return ret; -} - -static int amba_device_try_add(struct amba_device *dev, struct resource *parent) -{ - int ret; - - ret = request_resource(parent, &dev->res); - if (ret) - goto err_out; - - /* Hard-coded primecell ID instead of plug-n-play */ - if (dev->periphid != 0) - goto skip_probe; - - ret = amba_read_periphid(dev); - if (ret) - goto err_release; - -skip_probe: - ret = device_add(&dev->dev); -err_release: - if (ret) - release_resource(&dev->res); -err_out: - return ret; -} - -/* - * Registration of AMBA device require reading its pid and cid registers. - * To do this, the device must be turned on (if it is a part of power domain) - * and have clocks enabled. However in some cases those resources might not be - * yet available. Returning EPROBE_DEFER is not a solution in such case, - * because callers don't handle this special error code. Instead such devices - * are added to the special list and their registration is retried from - * periodic worker, until all resources are available and registration succeeds. - */ -struct deferred_device { - struct amba_device *dev; - struct resource *parent; - struct list_head node; -}; - -static LIST_HEAD(deferred_devices); -static DEFINE_MUTEX(deferred_devices_lock); - -static void amba_deferred_retry_func(struct work_struct *dummy); -static DECLARE_DELAYED_WORK(deferred_retry_work, amba_deferred_retry_func); - -#define DEFERRED_DEVICE_TIMEOUT (msecs_to_jiffies(5 * 1000)) - -static int amba_deferred_retry(void) -{ - struct deferred_device *ddev, *tmp; - - mutex_lock(&deferred_devices_lock); - - list_for_each_entry_safe(ddev, tmp, &deferred_devices, node) { - int ret = amba_device_try_add(ddev->dev, ddev->parent); - - if (ret == -EPROBE_DEFER) - continue; - - list_del_init(&ddev->node); - amba_device_put(ddev->dev); - kfree(ddev); - } - - mutex_unlock(&deferred_devices_lock); - - return 0; -} -late_initcall(amba_deferred_retry); - -static void amba_deferred_retry_func(struct work_struct *dummy) -{ - amba_deferred_retry(); - - if (!list_empty(&deferred_devices)) - schedule_delayed_work(&deferred_retry_work, - DEFERRED_DEVICE_TIMEOUT); -} - /** * amba_device_add - add a previously allocated AMBA device structure * @dev: AMBA device allocated by amba_device_alloc @@ -571,28 +546,30 @@ static void amba_deferred_retry_func(struct work_struct *dummy) */ int amba_device_add(struct amba_device *dev, struct resource *parent) { - int ret = amba_device_try_add(dev, parent); - - if (ret == -EPROBE_DEFER) { - struct deferred_device *ddev; - - ddev = kmalloc(sizeof(*ddev), GFP_KERNEL); - if (!ddev) - return -ENOMEM; + int ret; - ddev->dev = dev; - ddev->parent = parent; - ret = 0; + ret = request_resource(parent, &dev->res); + if (ret) + return ret; - mutex_lock(&deferred_devices_lock); + /* If primecell ID isn't hard-coded, figure it out */ + if (!dev->periphid) { + /* + * AMBA device uevents require reading its pid and cid + * registers. To do this, the device must be on, clocked and + * out of reset. However in some cases those resources might + * not yet be available. If that's the case, we suppress the + * generation of uevents until we can read the pid and cid + * registers. See also amba_match(). + */ + if (amba_read_periphid(dev)) + dev_set_uevent_suppress(&dev->dev, true); + } - if (list_empty(&deferred_devices)) - schedule_delayed_work(&deferred_retry_work, - DEFERRED_DEVICE_TIMEOUT); - list_add_tail(&ddev->node, &deferred_devices); + ret = device_add(&dev->dev); + if (ret) + release_resource(&dev->res); - mutex_unlock(&deferred_devices_lock); - } return ret; } EXPORT_SYMBOL_GPL(amba_device_add); |