From c334940ea26cb58f5514dcbb225a3f397b2684ad Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 23 Apr 2014 10:52:17 +0100 Subject: component: fix missed cleanup in case of devres failure In try_to_bring_up_master(), we tear down the master's component list for each error case, except for devres group failure. Fix this oversight by making the code less prone to such mistakes. Acked-by: Laurent Pinchart Signed-off-by: Russell King --- drivers/base/component.c | 62 ++++++++++++++++++++++++------------------------ 1 file changed, 31 insertions(+), 31 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/component.c b/drivers/base/component.c index c4778995cd72..d0ebd4431736 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -113,44 +113,44 @@ static void master_remove_components(struct master *master) static int try_to_bring_up_master(struct master *master, struct component *component) { - int ret = 0; + int ret; - if (!master->bound) { - /* - * Search the list of components, looking for components that - * belong to this master, and attach them to the master. - */ - if (master->ops->add_components(master->dev, master)) { - /* Failed to find all components */ - master_remove_components(master); - ret = 0; - goto out; - } + if (master->bound) + return 0; - if (component && component->master != master) { - master_remove_components(master); - ret = 0; - goto out; - } + /* + * Search the list of components, looking for components that + * belong to this master, and attach them to the master. + */ + if (master->ops->add_components(master->dev, master)) { + /* Failed to find all components */ + ret = 0; + goto out; + } - if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) { - ret = -ENOMEM; - goto out; - } + if (component && component->master != master) { + ret = 0; + goto out; + } - /* Found all components */ - ret = master->ops->bind(master->dev); - if (ret < 0) { - devres_release_group(master->dev, NULL); - dev_info(master->dev, "master bind failed: %d\n", ret); - master_remove_components(master); - goto out; - } + if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) { + ret = -ENOMEM; + goto out; + } - master->bound = true; - ret = 1; + /* Found all components */ + ret = master->ops->bind(master->dev); + if (ret < 0) { + devres_release_group(master->dev, NULL); + dev_info(master->dev, "master bind failed: %d\n", ret); + goto out; } + + master->bound = true; + return 1; + out: + master_remove_components(master); return ret; } -- cgit v1.2.3 From fcbcebce7159c928692dc6a5e88869f6e44438b9 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 18 Apr 2014 20:16:22 +0100 Subject: component: ignore multiple additions of the same component Permit masters to call component_master_add_child() and match the same child multiple times. This may happen if there's multiple connections to a single component device from other devices. In such scenarios, we should not return a failure, but instead ignore the attempt. Acked-by: Laurent Pinchart Signed-off-by: Russell King --- drivers/base/component.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/component.c b/drivers/base/component.c index d0ebd4431736..55813e91bf0d 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -69,6 +69,11 @@ static void component_detach_master(struct master *master, struct component *c) c->master = NULL; } +/* + * Add a component to a master, finding the component via the compare + * function and compare data. This is safe to call for duplicate matches + * and will not result in the same component being added multiple times. + */ int component_master_add_child(struct master *master, int (*compare)(struct device *, void *), void *compare_data) { @@ -76,11 +81,12 @@ int component_master_add_child(struct master *master, int ret = -ENXIO; list_for_each_entry(c, &component_list, node) { - if (c->master) + if (c->master && c->master != master) continue; if (compare(c->dev, compare_data)) { - component_attach_master(master, c); + if (!c->master) + component_attach_master(master, c); ret = 0; break; } -- cgit v1.2.3 From 6955b58254c2bcee8a7b55ce06468a645dc98ec5 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 19 Apr 2014 11:18:01 +0100 Subject: component: add support for component match array Add support for generating a set of component matches at master probe time, and submitting them to the component layer. This allows the component layer to perform the matches internally without needing to call into the master driver, and allows for further restructuring of the component helper. Acked-by: Laurent Pinchart Signed-off-by: Russell King --- drivers/base/component.c | 120 ++++++++++++++++++++++++++++++++++++++++++++-- include/linux/component.h | 7 +++ 2 files changed, 124 insertions(+), 3 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/component.c b/drivers/base/component.c index 55813e91bf0d..b4236daed4fa 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -18,6 +18,15 @@ #include #include +struct component_match { + size_t alloc; + size_t num; + struct { + void *data; + int (*fn)(struct device *, void *); + } compare[0]; +}; + struct master { struct list_head node; struct list_head components; @@ -25,6 +34,7 @@ struct master { const struct component_master_ops *ops; struct device *dev; + struct component_match *match; }; struct component { @@ -96,6 +106,34 @@ int component_master_add_child(struct master *master, } EXPORT_SYMBOL_GPL(component_master_add_child); +static int find_components(struct master *master) +{ + struct component_match *match = master->match; + size_t i; + int ret = 0; + + if (!match) { + /* + * Search the list of components, looking for components that + * belong to this master, and attach them to the master. + */ + return master->ops->add_components(master->dev, master); + } + + /* + * Scan the array of match functions and attach + * any components which are found to this master. + */ + for (i = 0; i < match->num; i++) { + ret = component_master_add_child(master, + match->compare[i].fn, + match->compare[i].data); + if (ret) + break; + } + return ret; +} + /* Detach all attached components from this master */ static void master_remove_components(struct master *master) { @@ -128,7 +166,7 @@ static int try_to_bring_up_master(struct master *master, * Search the list of components, looking for components that * belong to this master, and attach them to the master. */ - if (master->ops->add_components(master->dev, master)) { + if (find_components(master)) { /* Failed to find all components */ ret = 0; goto out; @@ -186,18 +224,87 @@ static void take_down_master(struct master *master) master_remove_components(master); } -int component_master_add(struct device *dev, - const struct component_master_ops *ops) +static size_t component_match_size(size_t num) +{ + return offsetof(struct component_match, compare[num]); +} + +static struct component_match *component_match_realloc(struct device *dev, + struct component_match *match, size_t num) +{ + struct component_match *new; + + if (match && match->alloc == num) + return match; + + new = devm_kmalloc(dev, component_match_size(num), GFP_KERNEL); + if (!new) + return ERR_PTR(-ENOMEM); + + if (match) { + memcpy(new, match, component_match_size(min(match->num, num))); + devm_kfree(dev, match); + } else { + new->num = 0; + } + + new->alloc = num; + + return new; +} + +/* + * Add a component to be matched. + * + * The match array is first created or extended if necessary. + */ +void component_match_add(struct device *dev, struct component_match **matchptr, + int (*compare)(struct device *, void *), void *compare_data) +{ + struct component_match *match = *matchptr; + + if (IS_ERR(match)) + return; + + if (!match || match->num == match->alloc) { + size_t new_size = match ? match->alloc + 16 : 15; + + match = component_match_realloc(dev, match, new_size); + + *matchptr = match; + + if (IS_ERR(match)) + return; + } + + match->compare[match->num].fn = compare; + match->compare[match->num].data = compare_data; + match->num++; +} +EXPORT_SYMBOL(component_match_add); + +int component_master_add_with_match(struct device *dev, + const struct component_master_ops *ops, + struct component_match *match) { struct master *master; int ret; + if (ops->add_components && match) + return -EINVAL; + + /* Reallocate the match array for its true size */ + match = component_match_realloc(dev, match, match->num); + if (IS_ERR(match)) + return PTR_ERR(match); + master = kzalloc(sizeof(*master), GFP_KERNEL); if (!master) return -ENOMEM; master->dev = dev; master->ops = ops; + master->match = match; INIT_LIST_HEAD(&master->components); /* Add to the list of available masters. */ @@ -215,6 +322,13 @@ int component_master_add(struct device *dev, return ret < 0 ? ret : 0; } +EXPORT_SYMBOL_GPL(component_master_add_with_match); + +int component_master_add(struct device *dev, + const struct component_master_ops *ops) +{ + return component_master_add_with_match(dev, ops, NULL); +} EXPORT_SYMBOL_GPL(component_master_add); void component_master_del(struct device *dev, diff --git a/include/linux/component.h b/include/linux/component.h index 68870182ca1e..c00dcc302611 100644 --- a/include/linux/component.h +++ b/include/linux/component.h @@ -29,4 +29,11 @@ void component_master_del(struct device *, int component_master_add_child(struct master *master, int (*compare)(struct device *, void *), void *compare_data); +struct component_match; + +int component_master_add_with_match(struct device *, + const struct component_master_ops *, struct component_match *); +void component_match_add(struct device *, struct component_match **, + int (*compare)(struct device *, void *), void *compare_data); + #endif -- cgit v1.2.3 From b509cc802239a8b5ba7d1d2cc5adfb9d984b7ed8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 4 Jul 2014 13:23:46 +0100 Subject: component: fix bug with legacy API Sachin Kamat reports that "component: add support for component match array" broke Exynos DRM due to a NULL pointer deref. Fix this. Reported-by: Sachin Kamat Tested-by: Sachin Kamat Signed-off-by: Russell King --- drivers/base/component.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/component.c b/drivers/base/component.c index b4236daed4fa..f748430bb654 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -293,10 +293,12 @@ int component_master_add_with_match(struct device *dev, if (ops->add_components && match) return -EINVAL; - /* Reallocate the match array for its true size */ - match = component_match_realloc(dev, match, match->num); - if (IS_ERR(match)) - return PTR_ERR(match); + if (match) { + /* Reallocate the match array for its true size */ + match = component_match_realloc(dev, match, match->num); + if (IS_ERR(match)) + return PTR_ERR(match); + } master = kzalloc(sizeof(*master), GFP_KERNEL); if (!master) -- cgit v1.2.3 From 35fac7e305dc5e0bd1e52f2505944674337de57c Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 1 Jul 2014 12:57:08 +0200 Subject: dma-buf: move to drivers/dma-buf Signed-off-by: Maarten Lankhorst Acked-by: Sumit Semwal Acked-by: Daniel Vetter Signed-off-by: Greg Kroah-Hartman --- Documentation/DocBook/device-drivers.tmpl | 3 +- MAINTAINERS | 4 +- drivers/Makefile | 1 + drivers/base/Makefile | 1 - drivers/base/dma-buf.c | 743 ------------------------------ drivers/base/reservation.c | 39 -- drivers/dma-buf/Makefile | 1 + drivers/dma-buf/dma-buf.c | 743 ++++++++++++++++++++++++++++++ drivers/dma-buf/reservation.c | 39 ++ 9 files changed, 787 insertions(+), 787 deletions(-) delete mode 100644 drivers/base/dma-buf.c delete mode 100644 drivers/base/reservation.c create mode 100644 drivers/dma-buf/Makefile create mode 100644 drivers/dma-buf/dma-buf.c create mode 100644 drivers/dma-buf/reservation.c (limited to 'drivers/base') diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl index cc63f30de166..ac61ebd92875 100644 --- a/Documentation/DocBook/device-drivers.tmpl +++ b/Documentation/DocBook/device-drivers.tmpl @@ -128,8 +128,7 @@ X!Edrivers/base/interface.c !Edrivers/base/bus.c Device Drivers DMA Management -!Edrivers/base/dma-buf.c -!Edrivers/base/reservation.c +!Edrivers/dma-buf/dma-buf.c !Iinclude/linux/reservation.h !Edrivers/base/dma-coherent.c !Edrivers/base/dma-mapping.c diff --git a/MAINTAINERS b/MAINTAINERS index 6813d0aa5ecf..2eefee768d46 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2900,8 +2900,8 @@ S: Maintained L: linux-media@vger.kernel.org L: dri-devel@lists.freedesktop.org L: linaro-mm-sig@lists.linaro.org -F: drivers/base/dma-buf* -F: include/linux/dma-buf* +F: drivers/dma-buf/ +F: include/linux/dma-buf* include/linux/reservation.h F: Documentation/dma-buf-sharing.txt T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git diff --git a/drivers/Makefile b/drivers/Makefile index f98b50d8251d..c00337be5351 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -61,6 +61,7 @@ obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/ obj-$(CONFIG_PARPORT) += parport/ obj-y += base/ block/ misc/ mfd/ nfc/ +obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/ obj-$(CONFIG_NUBUS) += nubus/ obj-y += macintosh/ obj-$(CONFIG_IDE) += ide/ diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 04b314e0fa51..4aab26ec0292 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -10,7 +10,6 @@ obj-$(CONFIG_DMA_CMA) += dma-contiguous.o obj-y += power/ obj-$(CONFIG_HAS_DMA) += dma-mapping.o obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o -obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf.o reservation.o obj-$(CONFIG_ISA) += isa.o obj-$(CONFIG_FW_LOADER) += firmware_class.o obj-$(CONFIG_NUMA) += node.o diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c deleted file mode 100644 index 840c7fa80983..000000000000 --- a/drivers/base/dma-buf.c +++ /dev/null @@ -1,743 +0,0 @@ -/* - * Framework for buffer objects that can be shared across devices/subsystems. - * - * Copyright(C) 2011 Linaro Limited. All rights reserved. - * Author: Sumit Semwal - * - * Many thanks to linaro-mm-sig list, and specially - * Arnd Bergmann , Rob Clark and - * Daniel Vetter for their support in creation and - * refining of this idea. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#include -#include -#include -#include -#include -#include -#include - -static inline int is_dma_buf_file(struct file *); - -struct dma_buf_list { - struct list_head head; - struct mutex lock; -}; - -static struct dma_buf_list db_list; - -static int dma_buf_release(struct inode *inode, struct file *file) -{ - struct dma_buf *dmabuf; - - if (!is_dma_buf_file(file)) - return -EINVAL; - - dmabuf = file->private_data; - - BUG_ON(dmabuf->vmapping_counter); - - dmabuf->ops->release(dmabuf); - - mutex_lock(&db_list.lock); - list_del(&dmabuf->list_node); - mutex_unlock(&db_list.lock); - - kfree(dmabuf); - return 0; -} - -static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) -{ - struct dma_buf *dmabuf; - - if (!is_dma_buf_file(file)) - return -EINVAL; - - dmabuf = file->private_data; - - /* check for overflowing the buffer's size */ - if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > - dmabuf->size >> PAGE_SHIFT) - return -EINVAL; - - return dmabuf->ops->mmap(dmabuf, vma); -} - -static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) -{ - struct dma_buf *dmabuf; - loff_t base; - - if (!is_dma_buf_file(file)) - return -EBADF; - - dmabuf = file->private_data; - - /* only support discovering the end of the buffer, - but also allow SEEK_SET to maintain the idiomatic - SEEK_END(0), SEEK_CUR(0) pattern */ - if (whence == SEEK_END) - base = dmabuf->size; - else if (whence == SEEK_SET) - base = 0; - else - return -EINVAL; - - if (offset != 0) - return -EINVAL; - - return base + offset; -} - -static const struct file_operations dma_buf_fops = { - .release = dma_buf_release, - .mmap = dma_buf_mmap_internal, - .llseek = dma_buf_llseek, -}; - -/* - * is_dma_buf_file - Check if struct file* is associated with dma_buf - */ -static inline int is_dma_buf_file(struct file *file) -{ - return file->f_op == &dma_buf_fops; -} - -/** - * dma_buf_export_named - Creates a new dma_buf, and associates an anon file - * with this buffer, so it can be exported. - * Also connect the allocator specific data and ops to the buffer. - * Additionally, provide a name string for exporter; useful in debugging. - * - * @priv: [in] Attach private data of allocator to this buffer - * @ops: [in] Attach allocator-defined dma buf ops to the new buffer. - * @size: [in] Size of the buffer - * @flags: [in] mode flags for the file. - * @exp_name: [in] name of the exporting module - useful for debugging. - * - * Returns, on success, a newly created dma_buf object, which wraps the - * supplied private data and operations for dma_buf_ops. On either missing - * ops, or error in allocating struct dma_buf, will return negative error. - * - */ -struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops, - size_t size, int flags, const char *exp_name) -{ - struct dma_buf *dmabuf; - struct file *file; - - if (WARN_ON(!priv || !ops - || !ops->map_dma_buf - || !ops->unmap_dma_buf - || !ops->release - || !ops->kmap_atomic - || !ops->kmap - || !ops->mmap)) { - return ERR_PTR(-EINVAL); - } - - dmabuf = kzalloc(sizeof(struct dma_buf), GFP_KERNEL); - if (dmabuf == NULL) - return ERR_PTR(-ENOMEM); - - dmabuf->priv = priv; - dmabuf->ops = ops; - dmabuf->size = size; - dmabuf->exp_name = exp_name; - - file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags); - if (IS_ERR(file)) { - kfree(dmabuf); - return ERR_CAST(file); - } - - file->f_mode |= FMODE_LSEEK; - dmabuf->file = file; - - mutex_init(&dmabuf->lock); - INIT_LIST_HEAD(&dmabuf->attachments); - - mutex_lock(&db_list.lock); - list_add(&dmabuf->list_node, &db_list.head); - mutex_unlock(&db_list.lock); - - return dmabuf; -} -EXPORT_SYMBOL_GPL(dma_buf_export_named); - - -/** - * dma_buf_fd - returns a file descriptor for the given dma_buf - * @dmabuf: [in] pointer to dma_buf for which fd is required. - * @flags: [in] flags to give to fd - * - * On success, returns an associated 'fd'. Else, returns error. - */ -int dma_buf_fd(struct dma_buf *dmabuf, int flags) -{ - int fd; - - if (!dmabuf || !dmabuf->file) - return -EINVAL; - - fd = get_unused_fd_flags(flags); - if (fd < 0) - return fd; - - fd_install(fd, dmabuf->file); - - return fd; -} -EXPORT_SYMBOL_GPL(dma_buf_fd); - -/** - * dma_buf_get - returns the dma_buf structure related to an fd - * @fd: [in] fd associated with the dma_buf to be returned - * - * On success, returns the dma_buf structure associated with an fd; uses - * file's refcounting done by fget to increase refcount. returns ERR_PTR - * otherwise. - */ -struct dma_buf *dma_buf_get(int fd) -{ - struct file *file; - - file = fget(fd); - - if (!file) - return ERR_PTR(-EBADF); - - if (!is_dma_buf_file(file)) { - fput(file); - return ERR_PTR(-EINVAL); - } - - return file->private_data; -} -EXPORT_SYMBOL_GPL(dma_buf_get); - -/** - * dma_buf_put - decreases refcount of the buffer - * @dmabuf: [in] buffer to reduce refcount of - * - * Uses file's refcounting done implicitly by fput() - */ -void dma_buf_put(struct dma_buf *dmabuf) -{ - if (WARN_ON(!dmabuf || !dmabuf->file)) - return; - - fput(dmabuf->file); -} -EXPORT_SYMBOL_GPL(dma_buf_put); - -/** - * dma_buf_attach - Add the device to dma_buf's attachments list; optionally, - * calls attach() of dma_buf_ops to allow device-specific attach functionality - * @dmabuf: [in] buffer to attach device to. - * @dev: [in] device to be attached. - * - * Returns struct dma_buf_attachment * for this attachment; returns ERR_PTR on - * error. - */ -struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, - struct device *dev) -{ - struct dma_buf_attachment *attach; - int ret; - - if (WARN_ON(!dmabuf || !dev)) - return ERR_PTR(-EINVAL); - - attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL); - if (attach == NULL) - return ERR_PTR(-ENOMEM); - - attach->dev = dev; - attach->dmabuf = dmabuf; - - mutex_lock(&dmabuf->lock); - - if (dmabuf->ops->attach) { - ret = dmabuf->ops->attach(dmabuf, dev, attach); - if (ret) - goto err_attach; - } - list_add(&attach->node, &dmabuf->attachments); - - mutex_unlock(&dmabuf->lock); - return attach; - -err_attach: - kfree(attach); - mutex_unlock(&dmabuf->lock); - return ERR_PTR(ret); -} -EXPORT_SYMBOL_GPL(dma_buf_attach); - -/** - * dma_buf_detach - Remove the given attachment from dmabuf's attachments list; - * optionally calls detach() of dma_buf_ops for device-specific detach - * @dmabuf: [in] buffer to detach from. - * @attach: [in] attachment to be detached; is free'd after this call. - * - */ -void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) -{ - if (WARN_ON(!dmabuf || !attach)) - return; - - mutex_lock(&dmabuf->lock); - list_del(&attach->node); - if (dmabuf->ops->detach) - dmabuf->ops->detach(dmabuf, attach); - - mutex_unlock(&dmabuf->lock); - kfree(attach); -} -EXPORT_SYMBOL_GPL(dma_buf_detach); - -/** - * dma_buf_map_attachment - Returns the scatterlist table of the attachment; - * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the - * dma_buf_ops. - * @attach: [in] attachment whose scatterlist is to be returned - * @direction: [in] direction of DMA transfer - * - * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR - * on error. - */ -struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, - enum dma_data_direction direction) -{ - struct sg_table *sg_table = ERR_PTR(-EINVAL); - - might_sleep(); - - if (WARN_ON(!attach || !attach->dmabuf)) - return ERR_PTR(-EINVAL); - - sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); - if (!sg_table) - sg_table = ERR_PTR(-ENOMEM); - - return sg_table; -} -EXPORT_SYMBOL_GPL(dma_buf_map_attachment); - -/** - * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might - * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of - * dma_buf_ops. - * @attach: [in] attachment to unmap buffer from - * @sg_table: [in] scatterlist info of the buffer to unmap - * @direction: [in] direction of DMA transfer - * - */ -void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, - struct sg_table *sg_table, - enum dma_data_direction direction) -{ - might_sleep(); - - if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) - return; - - attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, - direction); -} -EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); - - -/** - * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the - * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific - * preparations. Coherency is only guaranteed in the specified range for the - * specified access direction. - * @dmabuf: [in] buffer to prepare cpu access for. - * @start: [in] start of range for cpu access. - * @len: [in] length of range for cpu access. - * @direction: [in] length of range for cpu access. - * - * Can return negative error values, returns 0 on success. - */ -int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, - enum dma_data_direction direction) -{ - int ret = 0; - - if (WARN_ON(!dmabuf)) - return -EINVAL; - - if (dmabuf->ops->begin_cpu_access) - ret = dmabuf->ops->begin_cpu_access(dmabuf, start, len, direction); - - return ret; -} -EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); - -/** - * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the - * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific - * actions. Coherency is only guaranteed in the specified range for the - * specified access direction. - * @dmabuf: [in] buffer to complete cpu access for. - * @start: [in] start of range for cpu access. - * @len: [in] length of range for cpu access. - * @direction: [in] length of range for cpu access. - * - * This call must always succeed. - */ -void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, - enum dma_data_direction direction) -{ - WARN_ON(!dmabuf); - - if (dmabuf->ops->end_cpu_access) - dmabuf->ops->end_cpu_access(dmabuf, start, len, direction); -} -EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); - -/** - * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address - * space. The same restrictions as for kmap_atomic and friends apply. - * @dmabuf: [in] buffer to map page from. - * @page_num: [in] page in PAGE_SIZE units to map. - * - * This call must always succeed, any necessary preparations that might fail - * need to be done in begin_cpu_access. - */ -void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num) -{ - WARN_ON(!dmabuf); - - return dmabuf->ops->kmap_atomic(dmabuf, page_num); -} -EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic); - -/** - * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic. - * @dmabuf: [in] buffer to unmap page from. - * @page_num: [in] page in PAGE_SIZE units to unmap. - * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic. - * - * This call must always succeed. - */ -void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num, - void *vaddr) -{ - WARN_ON(!dmabuf); - - if (dmabuf->ops->kunmap_atomic) - dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr); -} -EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic); - -/** - * dma_buf_kmap - Map a page of the buffer object into kernel address space. The - * same restrictions as for kmap and friends apply. - * @dmabuf: [in] buffer to map page from. - * @page_num: [in] page in PAGE_SIZE units to map. - * - * This call must always succeed, any necessary preparations that might fail - * need to be done in begin_cpu_access. - */ -void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num) -{ - WARN_ON(!dmabuf); - - return dmabuf->ops->kmap(dmabuf, page_num); -} -EXPORT_SYMBOL_GPL(dma_buf_kmap); - -/** - * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap. - * @dmabuf: [in] buffer to unmap page from. - * @page_num: [in] page in PAGE_SIZE units to unmap. - * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap. - * - * This call must always succeed. - */ -void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num, - void *vaddr) -{ - WARN_ON(!dmabuf); - - if (dmabuf->ops->kunmap) - dmabuf->ops->kunmap(dmabuf, page_num, vaddr); -} -EXPORT_SYMBOL_GPL(dma_buf_kunmap); - - -/** - * dma_buf_mmap - Setup up a userspace mmap with the given vma - * @dmabuf: [in] buffer that should back the vma - * @vma: [in] vma for the mmap - * @pgoff: [in] offset in pages where this mmap should start within the - * dma-buf buffer. - * - * This function adjusts the passed in vma so that it points at the file of the - * dma_buf operation. It also adjusts the starting pgoff and does bounds - * checking on the size of the vma. Then it calls the exporters mmap function to - * set up the mapping. - * - * Can return negative error values, returns 0 on success. - */ -int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, - unsigned long pgoff) -{ - struct file *oldfile; - int ret; - - if (WARN_ON(!dmabuf || !vma)) - return -EINVAL; - - /* check for offset overflow */ - if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff) - return -EOVERFLOW; - - /* check for overflowing the buffer's size */ - if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > - dmabuf->size >> PAGE_SHIFT) - return -EINVAL; - - /* readjust the vma */ - get_file(dmabuf->file); - oldfile = vma->vm_file; - vma->vm_file = dmabuf->file; - vma->vm_pgoff = pgoff; - - ret = dmabuf->ops->mmap(dmabuf, vma); - if (ret) { - /* restore old parameters on failure */ - vma->vm_file = oldfile; - fput(dmabuf->file); - } else { - if (oldfile) - fput(oldfile); - } - return ret; - -} -EXPORT_SYMBOL_GPL(dma_buf_mmap); - -/** - * dma_buf_vmap - Create virtual mapping for the buffer object into kernel - * address space. Same restrictions as for vmap and friends apply. - * @dmabuf: [in] buffer to vmap - * - * This call may fail due to lack of virtual mapping address space. - * These calls are optional in drivers. The intended use for them - * is for mapping objects linear in kernel space for high use objects. - * Please attempt to use kmap/kunmap before thinking about these interfaces. - * - * Returns NULL on error. - */ -void *dma_buf_vmap(struct dma_buf *dmabuf) -{ - void *ptr; - - if (WARN_ON(!dmabuf)) - return NULL; - - if (!dmabuf->ops->vmap) - return NULL; - - mutex_lock(&dmabuf->lock); - if (dmabuf->vmapping_counter) { - dmabuf->vmapping_counter++; - BUG_ON(!dmabuf->vmap_ptr); - ptr = dmabuf->vmap_ptr; - goto out_unlock; - } - - BUG_ON(dmabuf->vmap_ptr); - - ptr = dmabuf->ops->vmap(dmabuf); - if (WARN_ON_ONCE(IS_ERR(ptr))) - ptr = NULL; - if (!ptr) - goto out_unlock; - - dmabuf->vmap_ptr = ptr; - dmabuf->vmapping_counter = 1; - -out_unlock: - mutex_unlock(&dmabuf->lock); - return ptr; -} -EXPORT_SYMBOL_GPL(dma_buf_vmap); - -/** - * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap. - * @dmabuf: [in] buffer to vunmap - * @vaddr: [in] vmap to vunmap - */ -void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) -{ - if (WARN_ON(!dmabuf)) - return; - - BUG_ON(!dmabuf->vmap_ptr); - BUG_ON(dmabuf->vmapping_counter == 0); - BUG_ON(dmabuf->vmap_ptr != vaddr); - - mutex_lock(&dmabuf->lock); - if (--dmabuf->vmapping_counter == 0) { - if (dmabuf->ops->vunmap) - dmabuf->ops->vunmap(dmabuf, vaddr); - dmabuf->vmap_ptr = NULL; - } - mutex_unlock(&dmabuf->lock); -} -EXPORT_SYMBOL_GPL(dma_buf_vunmap); - -#ifdef CONFIG_DEBUG_FS -static int dma_buf_describe(struct seq_file *s) -{ - int ret; - struct dma_buf *buf_obj; - struct dma_buf_attachment *attach_obj; - int count = 0, attach_count; - size_t size = 0; - - ret = mutex_lock_interruptible(&db_list.lock); - - if (ret) - return ret; - - seq_puts(s, "\nDma-buf Objects:\n"); - seq_puts(s, "size\tflags\tmode\tcount\texp_name\n"); - - list_for_each_entry(buf_obj, &db_list.head, list_node) { - ret = mutex_lock_interruptible(&buf_obj->lock); - - if (ret) { - seq_puts(s, - "\tERROR locking buffer object: skipping\n"); - continue; - } - - seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n", - buf_obj->size, - buf_obj->file->f_flags, buf_obj->file->f_mode, - (long)(buf_obj->file->f_count.counter), - buf_obj->exp_name); - - seq_puts(s, "\tAttached Devices:\n"); - attach_count = 0; - - list_for_each_entry(attach_obj, &buf_obj->attachments, node) { - seq_puts(s, "\t"); - - seq_printf(s, "%s\n", dev_name(attach_obj->dev)); - attach_count++; - } - - seq_printf(s, "Total %d devices attached\n\n", - attach_count); - - count++; - size += buf_obj->size; - mutex_unlock(&buf_obj->lock); - } - - seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size); - - mutex_unlock(&db_list.lock); - return 0; -} - -static int dma_buf_show(struct seq_file *s, void *unused) -{ - void (*func)(struct seq_file *) = s->private; - func(s); - return 0; -} - -static int dma_buf_debug_open(struct inode *inode, struct file *file) -{ - return single_open(file, dma_buf_show, inode->i_private); -} - -static const struct file_operations dma_buf_debug_fops = { - .open = dma_buf_debug_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static struct dentry *dma_buf_debugfs_dir; - -static int dma_buf_init_debugfs(void) -{ - int err = 0; - dma_buf_debugfs_dir = debugfs_create_dir("dma_buf", NULL); - if (IS_ERR(dma_buf_debugfs_dir)) { - err = PTR_ERR(dma_buf_debugfs_dir); - dma_buf_debugfs_dir = NULL; - return err; - } - - err = dma_buf_debugfs_create_file("bufinfo", dma_buf_describe); - - if (err) - pr_debug("dma_buf: debugfs: failed to create node bufinfo\n"); - - return err; -} - -static void dma_buf_uninit_debugfs(void) -{ - if (dma_buf_debugfs_dir) - debugfs_remove_recursive(dma_buf_debugfs_dir); -} - -int dma_buf_debugfs_create_file(const char *name, - int (*write)(struct seq_file *)) -{ - struct dentry *d; - - d = debugfs_create_file(name, S_IRUGO, dma_buf_debugfs_dir, - write, &dma_buf_debug_fops); - - return PTR_ERR_OR_ZERO(d); -} -#else -static inline int dma_buf_init_debugfs(void) -{ - return 0; -} -static inline void dma_buf_uninit_debugfs(void) -{ -} -#endif - -static int __init dma_buf_init(void) -{ - mutex_init(&db_list.lock); - INIT_LIST_HEAD(&db_list.head); - dma_buf_init_debugfs(); - return 0; -} -subsys_initcall(dma_buf_init); - -static void __exit dma_buf_deinit(void) -{ - dma_buf_uninit_debugfs(); -} -__exitcall(dma_buf_deinit); diff --git a/drivers/base/reservation.c b/drivers/base/reservation.c deleted file mode 100644 index a73fbf3b8e56..000000000000 --- a/drivers/base/reservation.c +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (C) 2012-2013 Canonical Ltd - * - * Based on bo.c which bears the following copyright notice, - * but is dual licensed: - * - * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ -/* - * Authors: Thomas Hellstrom - */ - -#include -#include - -DEFINE_WW_CLASS(reservation_ww_class); -EXPORT_SYMBOL(reservation_ww_class); diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile new file mode 100644 index 000000000000..4a4f4c9bacd0 --- /dev/null +++ b/drivers/dma-buf/Makefile @@ -0,0 +1 @@ +obj-y := dma-buf.o reservation.o diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c new file mode 100644 index 000000000000..840c7fa80983 --- /dev/null +++ b/drivers/dma-buf/dma-buf.c @@ -0,0 +1,743 @@ +/* + * Framework for buffer objects that can be shared across devices/subsystems. + * + * Copyright(C) 2011 Linaro Limited. All rights reserved. + * Author: Sumit Semwal + * + * Many thanks to linaro-mm-sig list, and specially + * Arnd Bergmann , Rob Clark and + * Daniel Vetter for their support in creation and + * refining of this idea. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include + +static inline int is_dma_buf_file(struct file *); + +struct dma_buf_list { + struct list_head head; + struct mutex lock; +}; + +static struct dma_buf_list db_list; + +static int dma_buf_release(struct inode *inode, struct file *file) +{ + struct dma_buf *dmabuf; + + if (!is_dma_buf_file(file)) + return -EINVAL; + + dmabuf = file->private_data; + + BUG_ON(dmabuf->vmapping_counter); + + dmabuf->ops->release(dmabuf); + + mutex_lock(&db_list.lock); + list_del(&dmabuf->list_node); + mutex_unlock(&db_list.lock); + + kfree(dmabuf); + return 0; +} + +static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) +{ + struct dma_buf *dmabuf; + + if (!is_dma_buf_file(file)) + return -EINVAL; + + dmabuf = file->private_data; + + /* check for overflowing the buffer's size */ + if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > + dmabuf->size >> PAGE_SHIFT) + return -EINVAL; + + return dmabuf->ops->mmap(dmabuf, vma); +} + +static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) +{ + struct dma_buf *dmabuf; + loff_t base; + + if (!is_dma_buf_file(file)) + return -EBADF; + + dmabuf = file->private_data; + + /* only support discovering the end of the buffer, + but also allow SEEK_SET to maintain the idiomatic + SEEK_END(0), SEEK_CUR(0) pattern */ + if (whence == SEEK_END) + base = dmabuf->size; + else if (whence == SEEK_SET) + base = 0; + else + return -EINVAL; + + if (offset != 0) + return -EINVAL; + + return base + offset; +} + +static const struct file_operations dma_buf_fops = { + .release = dma_buf_release, + .mmap = dma_buf_mmap_internal, + .llseek = dma_buf_llseek, +}; + +/* + * is_dma_buf_file - Check if struct file* is associated with dma_buf + */ +static inline int is_dma_buf_file(struct file *file) +{ + return file->f_op == &dma_buf_fops; +} + +/** + * dma_buf_export_named - Creates a new dma_buf, and associates an anon file + * with this buffer, so it can be exported. + * Also connect the allocator specific data and ops to the buffer. + * Additionally, provide a name string for exporter; useful in debugging. + * + * @priv: [in] Attach private data of allocator to this buffer + * @ops: [in] Attach allocator-defined dma buf ops to the new buffer. + * @size: [in] Size of the buffer + * @flags: [in] mode flags for the file. + * @exp_name: [in] name of the exporting module - useful for debugging. + * + * Returns, on success, a newly created dma_buf object, which wraps the + * supplied private data and operations for dma_buf_ops. On either missing + * ops, or error in allocating struct dma_buf, will return negative error. + * + */ +struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops, + size_t size, int flags, const char *exp_name) +{ + struct dma_buf *dmabuf; + struct file *file; + + if (WARN_ON(!priv || !ops + || !ops->map_dma_buf + || !ops->unmap_dma_buf + || !ops->release + || !ops->kmap_atomic + || !ops->kmap + || !ops->mmap)) { + return ERR_PTR(-EINVAL); + } + + dmabuf = kzalloc(sizeof(struct dma_buf), GFP_KERNEL); + if (dmabuf == NULL) + return ERR_PTR(-ENOMEM); + + dmabuf->priv = priv; + dmabuf->ops = ops; + dmabuf->size = size; + dmabuf->exp_name = exp_name; + + file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags); + if (IS_ERR(file)) { + kfree(dmabuf); + return ERR_CAST(file); + } + + file->f_mode |= FMODE_LSEEK; + dmabuf->file = file; + + mutex_init(&dmabuf->lock); + INIT_LIST_HEAD(&dmabuf->attachments); + + mutex_lock(&db_list.lock); + list_add(&dmabuf->list_node, &db_list.head); + mutex_unlock(&db_list.lock); + + return dmabuf; +} +EXPORT_SYMBOL_GPL(dma_buf_export_named); + + +/** + * dma_buf_fd - returns a file descriptor for the given dma_buf + * @dmabuf: [in] pointer to dma_buf for which fd is required. + * @flags: [in] flags to give to fd + * + * On success, returns an associated 'fd'. Else, returns error. + */ +int dma_buf_fd(struct dma_buf *dmabuf, int flags) +{ + int fd; + + if (!dmabuf || !dmabuf->file) + return -EINVAL; + + fd = get_unused_fd_flags(flags); + if (fd < 0) + return fd; + + fd_install(fd, dmabuf->file); + + return fd; +} +EXPORT_SYMBOL_GPL(dma_buf_fd); + +/** + * dma_buf_get - returns the dma_buf structure related to an fd + * @fd: [in] fd associated with the dma_buf to be returned + * + * On success, returns the dma_buf structure associated with an fd; uses + * file's refcounting done by fget to increase refcount. returns ERR_PTR + * otherwise. + */ +struct dma_buf *dma_buf_get(int fd) +{ + struct file *file; + + file = fget(fd); + + if (!file) + return ERR_PTR(-EBADF); + + if (!is_dma_buf_file(file)) { + fput(file); + return ERR_PTR(-EINVAL); + } + + return file->private_data; +} +EXPORT_SYMBOL_GPL(dma_buf_get); + +/** + * dma_buf_put - decreases refcount of the buffer + * @dmabuf: [in] buffer to reduce refcount of + * + * Uses file's refcounting done implicitly by fput() + */ +void dma_buf_put(struct dma_buf *dmabuf) +{ + if (WARN_ON(!dmabuf || !dmabuf->file)) + return; + + fput(dmabuf->file); +} +EXPORT_SYMBOL_GPL(dma_buf_put); + +/** + * dma_buf_attach - Add the device to dma_buf's attachments list; optionally, + * calls attach() of dma_buf_ops to allow device-specific attach functionality + * @dmabuf: [in] buffer to attach device to. + * @dev: [in] device to be attached. + * + * Returns struct dma_buf_attachment * for this attachment; returns ERR_PTR on + * error. + */ +struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, + struct device *dev) +{ + struct dma_buf_attachment *attach; + int ret; + + if (WARN_ON(!dmabuf || !dev)) + return ERR_PTR(-EINVAL); + + attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL); + if (attach == NULL) + return ERR_PTR(-ENOMEM); + + attach->dev = dev; + attach->dmabuf = dmabuf; + + mutex_lock(&dmabuf->lock); + + if (dmabuf->ops->attach) { + ret = dmabuf->ops->attach(dmabuf, dev, attach); + if (ret) + goto err_attach; + } + list_add(&attach->node, &dmabuf->attachments); + + mutex_unlock(&dmabuf->lock); + return attach; + +err_attach: + kfree(attach); + mutex_unlock(&dmabuf->lock); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(dma_buf_attach); + +/** + * dma_buf_detach - Remove the given attachment from dmabuf's attachments list; + * optionally calls detach() of dma_buf_ops for device-specific detach + * @dmabuf: [in] buffer to detach from. + * @attach: [in] attachment to be detached; is free'd after this call. + * + */ +void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) +{ + if (WARN_ON(!dmabuf || !attach)) + return; + + mutex_lock(&dmabuf->lock); + list_del(&attach->node); + if (dmabuf->ops->detach) + dmabuf->ops->detach(dmabuf, attach); + + mutex_unlock(&dmabuf->lock); + kfree(attach); +} +EXPORT_SYMBOL_GPL(dma_buf_detach); + +/** + * dma_buf_map_attachment - Returns the scatterlist table of the attachment; + * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the + * dma_buf_ops. + * @attach: [in] attachment whose scatterlist is to be returned + * @direction: [in] direction of DMA transfer + * + * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR + * on error. + */ +struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, + enum dma_data_direction direction) +{ + struct sg_table *sg_table = ERR_PTR(-EINVAL); + + might_sleep(); + + if (WARN_ON(!attach || !attach->dmabuf)) + return ERR_PTR(-EINVAL); + + sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); + if (!sg_table) + sg_table = ERR_PTR(-ENOMEM); + + return sg_table; +} +EXPORT_SYMBOL_GPL(dma_buf_map_attachment); + +/** + * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might + * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of + * dma_buf_ops. + * @attach: [in] attachment to unmap buffer from + * @sg_table: [in] scatterlist info of the buffer to unmap + * @direction: [in] direction of DMA transfer + * + */ +void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, + struct sg_table *sg_table, + enum dma_data_direction direction) +{ + might_sleep(); + + if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) + return; + + attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, + direction); +} +EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); + + +/** + * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the + * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific + * preparations. Coherency is only guaranteed in the specified range for the + * specified access direction. + * @dmabuf: [in] buffer to prepare cpu access for. + * @start: [in] start of range for cpu access. + * @len: [in] length of range for cpu access. + * @direction: [in] length of range for cpu access. + * + * Can return negative error values, returns 0 on success. + */ +int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, + enum dma_data_direction direction) +{ + int ret = 0; + + if (WARN_ON(!dmabuf)) + return -EINVAL; + + if (dmabuf->ops->begin_cpu_access) + ret = dmabuf->ops->begin_cpu_access(dmabuf, start, len, direction); + + return ret; +} +EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); + +/** + * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the + * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific + * actions. Coherency is only guaranteed in the specified range for the + * specified access direction. + * @dmabuf: [in] buffer to complete cpu access for. + * @start: [in] start of range for cpu access. + * @len: [in] length of range for cpu access. + * @direction: [in] length of range for cpu access. + * + * This call must always succeed. + */ +void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, + enum dma_data_direction direction) +{ + WARN_ON(!dmabuf); + + if (dmabuf->ops->end_cpu_access) + dmabuf->ops->end_cpu_access(dmabuf, start, len, direction); +} +EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); + +/** + * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address + * space. The same restrictions as for kmap_atomic and friends apply. + * @dmabuf: [in] buffer to map page from. + * @page_num: [in] page in PAGE_SIZE units to map. + * + * This call must always succeed, any necessary preparations that might fail + * need to be done in begin_cpu_access. + */ +void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num) +{ + WARN_ON(!dmabuf); + + return dmabuf->ops->kmap_atomic(dmabuf, page_num); +} +EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic); + +/** + * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic. + * @dmabuf: [in] buffer to unmap page from. + * @page_num: [in] page in PAGE_SIZE units to unmap. + * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic. + * + * This call must always succeed. + */ +void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num, + void *vaddr) +{ + WARN_ON(!dmabuf); + + if (dmabuf->ops->kunmap_atomic) + dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr); +} +EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic); + +/** + * dma_buf_kmap - Map a page of the buffer object into kernel address space. The + * same restrictions as for kmap and friends apply. + * @dmabuf: [in] buffer to map page from. + * @page_num: [in] page in PAGE_SIZE units to map. + * + * This call must always succeed, any necessary preparations that might fail + * need to be done in begin_cpu_access. + */ +void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num) +{ + WARN_ON(!dmabuf); + + return dmabuf->ops->kmap(dmabuf, page_num); +} +EXPORT_SYMBOL_GPL(dma_buf_kmap); + +/** + * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap. + * @dmabuf: [in] buffer to unmap page from. + * @page_num: [in] page in PAGE_SIZE units to unmap. + * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap. + * + * This call must always succeed. + */ +void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num, + void *vaddr) +{ + WARN_ON(!dmabuf); + + if (dmabuf->ops->kunmap) + dmabuf->ops->kunmap(dmabuf, page_num, vaddr); +} +EXPORT_SYMBOL_GPL(dma_buf_kunmap); + + +/** + * dma_buf_mmap - Setup up a userspace mmap with the given vma + * @dmabuf: [in] buffer that should back the vma + * @vma: [in] vma for the mmap + * @pgoff: [in] offset in pages where this mmap should start within the + * dma-buf buffer. + * + * This function adjusts the passed in vma so that it points at the file of the + * dma_buf operation. It also adjusts the starting pgoff and does bounds + * checking on the size of the vma. Then it calls the exporters mmap function to + * set up the mapping. + * + * Can return negative error values, returns 0 on success. + */ +int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, + unsigned long pgoff) +{ + struct file *oldfile; + int ret; + + if (WARN_ON(!dmabuf || !vma)) + return -EINVAL; + + /* check for offset overflow */ + if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff) + return -EOVERFLOW; + + /* check for overflowing the buffer's size */ + if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > + dmabuf->size >> PAGE_SHIFT) + return -EINVAL; + + /* readjust the vma */ + get_file(dmabuf->file); + oldfile = vma->vm_file; + vma->vm_file = dmabuf->file; + vma->vm_pgoff = pgoff; + + ret = dmabuf->ops->mmap(dmabuf, vma); + if (ret) { + /* restore old parameters on failure */ + vma->vm_file = oldfile; + fput(dmabuf->file); + } else { + if (oldfile) + fput(oldfile); + } + return ret; + +} +EXPORT_SYMBOL_GPL(dma_buf_mmap); + +/** + * dma_buf_vmap - Create virtual mapping for the buffer object into kernel + * address space. Same restrictions as for vmap and friends apply. + * @dmabuf: [in] buffer to vmap + * + * This call may fail due to lack of virtual mapping address space. + * These calls are optional in drivers. The intended use for them + * is for mapping objects linear in kernel space for high use objects. + * Please attempt to use kmap/kunmap before thinking about these interfaces. + * + * Returns NULL on error. + */ +void *dma_buf_vmap(struct dma_buf *dmabuf) +{ + void *ptr; + + if (WARN_ON(!dmabuf)) + return NULL; + + if (!dmabuf->ops->vmap) + return NULL; + + mutex_lock(&dmabuf->lock); + if (dmabuf->vmapping_counter) { + dmabuf->vmapping_counter++; + BUG_ON(!dmabuf->vmap_ptr); + ptr = dmabuf->vmap_ptr; + goto out_unlock; + } + + BUG_ON(dmabuf->vmap_ptr); + + ptr = dmabuf->ops->vmap(dmabuf); + if (WARN_ON_ONCE(IS_ERR(ptr))) + ptr = NULL; + if (!ptr) + goto out_unlock; + + dmabuf->vmap_ptr = ptr; + dmabuf->vmapping_counter = 1; + +out_unlock: + mutex_unlock(&dmabuf->lock); + return ptr; +} +EXPORT_SYMBOL_GPL(dma_buf_vmap); + +/** + * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap. + * @dmabuf: [in] buffer to vunmap + * @vaddr: [in] vmap to vunmap + */ +void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) +{ + if (WARN_ON(!dmabuf)) + return; + + BUG_ON(!dmabuf->vmap_ptr); + BUG_ON(dmabuf->vmapping_counter == 0); + BUG_ON(dmabuf->vmap_ptr != vaddr); + + mutex_lock(&dmabuf->lock); + if (--dmabuf->vmapping_counter == 0) { + if (dmabuf->ops->vunmap) + dmabuf->ops->vunmap(dmabuf, vaddr); + dmabuf->vmap_ptr = NULL; + } + mutex_unlock(&dmabuf->lock); +} +EXPORT_SYMBOL_GPL(dma_buf_vunmap); + +#ifdef CONFIG_DEBUG_FS +static int dma_buf_describe(struct seq_file *s) +{ + int ret; + struct dma_buf *buf_obj; + struct dma_buf_attachment *attach_obj; + int count = 0, attach_count; + size_t size = 0; + + ret = mutex_lock_interruptible(&db_list.lock); + + if (ret) + return ret; + + seq_puts(s, "\nDma-buf Objects:\n"); + seq_puts(s, "size\tflags\tmode\tcount\texp_name\n"); + + list_for_each_entry(buf_obj, &db_list.head, list_node) { + ret = mutex_lock_interruptible(&buf_obj->lock); + + if (ret) { + seq_puts(s, + "\tERROR locking buffer object: skipping\n"); + continue; + } + + seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n", + buf_obj->size, + buf_obj->file->f_flags, buf_obj->file->f_mode, + (long)(buf_obj->file->f_count.counter), + buf_obj->exp_name); + + seq_puts(s, "\tAttached Devices:\n"); + attach_count = 0; + + list_for_each_entry(attach_obj, &buf_obj->attachments, node) { + seq_puts(s, "\t"); + + seq_printf(s, "%s\n", dev_name(attach_obj->dev)); + attach_count++; + } + + seq_printf(s, "Total %d devices attached\n\n", + attach_count); + + count++; + size += buf_obj->size; + mutex_unlock(&buf_obj->lock); + } + + seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size); + + mutex_unlock(&db_list.lock); + return 0; +} + +static int dma_buf_show(struct seq_file *s, void *unused) +{ + void (*func)(struct seq_file *) = s->private; + func(s); + return 0; +} + +static int dma_buf_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, dma_buf_show, inode->i_private); +} + +static const struct file_operations dma_buf_debug_fops = { + .open = dma_buf_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct dentry *dma_buf_debugfs_dir; + +static int dma_buf_init_debugfs(void) +{ + int err = 0; + dma_buf_debugfs_dir = debugfs_create_dir("dma_buf", NULL); + if (IS_ERR(dma_buf_debugfs_dir)) { + err = PTR_ERR(dma_buf_debugfs_dir); + dma_buf_debugfs_dir = NULL; + return err; + } + + err = dma_buf_debugfs_create_file("bufinfo", dma_buf_describe); + + if (err) + pr_debug("dma_buf: debugfs: failed to create node bufinfo\n"); + + return err; +} + +static void dma_buf_uninit_debugfs(void) +{ + if (dma_buf_debugfs_dir) + debugfs_remove_recursive(dma_buf_debugfs_dir); +} + +int dma_buf_debugfs_create_file(const char *name, + int (*write)(struct seq_file *)) +{ + struct dentry *d; + + d = debugfs_create_file(name, S_IRUGO, dma_buf_debugfs_dir, + write, &dma_buf_debug_fops); + + return PTR_ERR_OR_ZERO(d); +} +#else +static inline int dma_buf_init_debugfs(void) +{ + return 0; +} +static inline void dma_buf_uninit_debugfs(void) +{ +} +#endif + +static int __init dma_buf_init(void) +{ + mutex_init(&db_list.lock); + INIT_LIST_HEAD(&db_list.head); + dma_buf_init_debugfs(); + return 0; +} +subsys_initcall(dma_buf_init); + +static void __exit dma_buf_deinit(void) +{ + dma_buf_uninit_debugfs(); +} +__exitcall(dma_buf_deinit); diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c new file mode 100644 index 000000000000..a73fbf3b8e56 --- /dev/null +++ b/drivers/dma-buf/reservation.c @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2012-2013 Canonical Ltd + * + * Based on bo.c which bears the following copyright notice, + * but is dual licensed: + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#include +#include + +DEFINE_WW_CLASS(reservation_ww_class); +EXPORT_SYMBOL(reservation_ww_class); -- cgit v1.2.3 From e941759c74a44d6ac2eed21bb0a38b21fe4559e2 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 1 Jul 2014 12:57:14 +0200 Subject: fence: dma-buf cross-device synchronization (v18) A fence can be attached to a buffer which is being filled or consumed by hw, to allow userspace to pass the buffer without waiting to another device. For example, userspace can call page_flip ioctl to display the next frame of graphics after kicking the GPU but while the GPU is still rendering. The display device sharing the buffer with the GPU would attach a callback to get notified when the GPU's rendering-complete IRQ fires, to update the scan-out address of the display, without having to wake up userspace. A driver must allocate a fence context for each execution ring that can run in parallel. The function for this takes an argument with how many contexts to allocate: + fence_context_alloc() A fence is transient, one-shot deal. It is allocated and attached to one or more dma-buf's. When the one that attached it is done, with the pending operation, it can signal the fence: + fence_signal() To have a rough approximation whether a fence is fired, call: + fence_is_signaled() The dma-buf-mgr handles tracking, and waiting on, the fences associated with a dma-buf. The one pending on the fence can add an async callback: + fence_add_callback() The callback can optionally be cancelled with: + fence_remove_callback() To wait synchronously, optionally with a timeout: + fence_wait() + fence_wait_timeout() When emitting a fence, call: + trace_fence_emit() To annotate that a fence is blocking on another fence, call: + trace_fence_annotate_wait_on(fence, on_fence) A default software-only implementation is provided, which can be used by drivers attaching a fence to a buffer when they have no other means for hw sync. But a memory backed fence is also envisioned, because it is common that GPU's can write to, or poll on some memory location for synchronization. For example: fence = custom_get_fence(...); if ((seqno_fence = to_seqno_fence(fence)) != NULL) { dma_buf *fence_buf = seqno_fence->sync_buf; get_dma_buf(fence_buf); ... tell the hw the memory location to wait ... custom_wait_on(fence_buf, seqno_fence->seqno_ofs, fence->seqno); } else { /* fall-back to sw sync * / fence_add_callback(fence, my_cb); } On SoC platforms, if some other hw mechanism is provided for synchronizing between IP blocks, it could be supported as an alternate implementation with it's own fence ops in a similar way. enable_signaling callback is used to provide sw signaling in case a cpu waiter is requested or no compatible hardware signaling could be used. The intention is to provide a userspace interface (presumably via eventfd) later, to be used in conjunction with dma-buf's mmap support for sw access to buffers (or for userspace apps that would prefer to do their own synchronization). v1: Original v2: After discussion w/ danvet and mlankhorst on #dri-devel, we decided that dma-fence didn't need to care about the sw->hw signaling path (it can be handled same as sw->sw case), and therefore the fence->ops can be simplified and more handled in the core. So remove the signal, add_callback, cancel_callback, and wait ops, and replace with a simple enable_signaling() op which can be used to inform a fence supporting hw->hw signaling that one or more devices which do not support hw signaling are waiting (and therefore it should enable an irq or do whatever is necessary in order that the CPU is notified when the fence is passed). v3: Fix locking fail in attach_fence() and get_fence() v4: Remove tie-in w/ dma-buf.. after discussion w/ danvet and mlankorst we decided that we need to be able to attach one fence to N dma-buf's, so using the list_head in dma-fence struct would be problematic. v5: [ Maarten Lankhorst ] Updated for dma-bikeshed-fence and dma-buf-manager. v6: [ Maarten Lankhorst ] I removed dma_fence_cancel_callback and some comments about checking if fence fired or not. This is broken by design. waitqueue_active during destruction is now fatal, since the signaller should be holding a reference in enable_signalling until it signalled the fence. Pass the original dma_fence_cb along, and call __remove_wait in the dma_fence_callback handler, so that no cleanup needs to be performed. v7: [ Maarten Lankhorst ] Set cb->func and only enable sw signaling if fence wasn't signaled yet, for example for hardware fences that may choose to signal blindly. v8: [ Maarten Lankhorst ] Tons of tiny fixes, moved __dma_fence_init to header and fixed include mess. dma-fence.h now includes dma-buf.h All members are now initialized, so kmalloc can be used for allocating a dma-fence. More documentation added. v9: Change compiler bitfields to flags, change return type of enable_signaling to bool. Rework dma_fence_wait. Added dma_fence_is_signaled and dma_fence_wait_timeout. s/dma// and change exports to non GPL. Added fence_is_signaled and fence_enable_sw_signaling calls, add ability to override default wait operation. v10: remove event_queue, use a custom list, export try_to_wake_up from scheduler. Remove fence lock and use a global spinlock instead, this should hopefully remove all the locking headaches I was having on trying to implement this. enable_signaling is called with this lock held. v11: Use atomic ops for flags, lifting the need for some spin_lock_irqsaves. However I kept the guarantee that after fence_signal returns, it is guaranteed that enable_signaling has either been called to completion, or will not be called any more. Add contexts and seqno to base fence implementation. This allows you to wait for less fences, by testing for seqno + signaled, and then only wait on the later fence. Add FENCE_TRACE, FENCE_WARN, and FENCE_ERR. This makes debugging easier. An CONFIG_DEBUG_FENCE will be added to turn off the FENCE_TRACE spam, and another runtime option can turn it off at runtime. v12: Add CONFIG_FENCE_TRACE. Add missing documentation for the fence->context and fence->seqno members. v13: Fixup CONFIG_FENCE_TRACE kconfig description. Move fence_context_alloc to fence. Simplify fence_later. Kill priv member to fence_cb. v14: Remove priv argument from fence_add_callback, oops! v15: Remove priv from documentation. Explicitly include linux/atomic.h. v16: Add trace events. Import changes required by android syncpoints. v17: Use wake_up_state instead of try_to_wake_up. (Colin Cross) Fix up commit description for seqno_fence. (Rob Clark) v18: Rename release_fence to fence_release. Move to drivers/dma-buf/. Rename __fence_is_signaled and __fence_signal to *_locked. Rename __fence_init to fence_init. Make fence_default_wait return a signed long, and fix wait ops too. Signed-off-by: Maarten Lankhorst Signed-off-by: Thierry Reding #use smp_mb__before_atomic() Acked-by: Sumit Semwal Acked-by: Daniel Vetter Reviewed-by: Rob Clark Signed-off-by: Greg Kroah-Hartman --- Documentation/DocBook/device-drivers.tmpl | 2 + MAINTAINERS | 2 +- drivers/base/Kconfig | 9 + drivers/dma-buf/Makefile | 2 +- drivers/dma-buf/fence.c | 431 ++++++++++++++++++++++++++++++ include/linux/fence.h | 343 ++++++++++++++++++++++++ include/trace/events/fence.h | 128 +++++++++ 7 files changed, 915 insertions(+), 2 deletions(-) create mode 100644 drivers/dma-buf/fence.c create mode 100644 include/linux/fence.h create mode 100644 include/trace/events/fence.h (limited to 'drivers/base') diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl index ac61ebd92875..e634657efb52 100644 --- a/Documentation/DocBook/device-drivers.tmpl +++ b/Documentation/DocBook/device-drivers.tmpl @@ -129,6 +129,8 @@ X!Edrivers/base/interface.c Device Drivers DMA Management !Edrivers/dma-buf/dma-buf.c +!Edrivers/dma-buf/fence.c +!Iinclude/linux/fence.h !Iinclude/linux/reservation.h !Edrivers/base/dma-coherent.c !Edrivers/base/dma-mapping.c diff --git a/MAINTAINERS b/MAINTAINERS index 2eefee768d46..65c8f534b22f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2901,7 +2901,7 @@ L: linux-media@vger.kernel.org L: dri-devel@lists.freedesktop.org L: linaro-mm-sig@lists.linaro.org F: drivers/dma-buf/ -F: include/linux/dma-buf* include/linux/reservation.h +F: include/linux/dma-buf* include/linux/reservation.h include/linux/fence.h F: Documentation/dma-buf-sharing.txt T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 23b8726962af..00e13ce5cbbd 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -208,6 +208,15 @@ config DMA_SHARED_BUFFER APIs extension; the file's descriptor can then be passed on to other driver. +config FENCE_TRACE + bool "Enable verbose FENCE_TRACE messages" + depends on DMA_SHARED_BUFFER + help + Enable the FENCE_TRACE printks. This will add extra + spam to the console log, but will make it easier to diagnose + lockup related problems for dma-buffers shared across multiple + devices. + config DMA_CMA bool "DMA Contiguous Memory Allocator" depends on HAVE_DMA_CONTIGUOUS && CMA diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 4a4f4c9bacd0..d7825bfe630e 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -1 +1 @@ -obj-y := dma-buf.o reservation.o +obj-y := dma-buf.o fence.o reservation.o diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c new file mode 100644 index 000000000000..948bf00d955e --- /dev/null +++ b/drivers/dma-buf/fence.c @@ -0,0 +1,431 @@ +/* + * Fence mechanism for dma-buf and to allow for asynchronous dma access + * + * Copyright (C) 2012 Canonical Ltd + * Copyright (C) 2012 Texas Instruments + * + * Authors: + * Rob Clark + * Maarten Lankhorst + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on); +EXPORT_TRACEPOINT_SYMBOL(fence_emit); + +/** + * fence context counter: each execution context should have its own + * fence context, this allows checking if fences belong to the same + * context or not. One device can have multiple separate contexts, + * and they're used if some engine can run independently of another. + */ +static atomic_t fence_context_counter = ATOMIC_INIT(0); + +/** + * fence_context_alloc - allocate an array of fence contexts + * @num: [in] amount of contexts to allocate + * + * This function will return the first index of the number of fences allocated. + * The fence context is used for setting fence->context to a unique number. + */ +unsigned fence_context_alloc(unsigned num) +{ + BUG_ON(!num); + return atomic_add_return(num, &fence_context_counter) - num; +} +EXPORT_SYMBOL(fence_context_alloc); + +/** + * fence_signal_locked - signal completion of a fence + * @fence: the fence to signal + * + * Signal completion for software callbacks on a fence, this will unblock + * fence_wait() calls and run all the callbacks added with + * fence_add_callback(). Can be called multiple times, but since a fence + * can only go from unsignaled to signaled state, it will only be effective + * the first time. + * + * Unlike fence_signal, this function must be called with fence->lock held. + */ +int fence_signal_locked(struct fence *fence) +{ + struct fence_cb *cur, *tmp; + int ret = 0; + + if (WARN_ON(!fence)) + return -EINVAL; + + if (!ktime_to_ns(fence->timestamp)) { + fence->timestamp = ktime_get(); + smp_mb__before_atomic(); + } + + if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + ret = -EINVAL; + + /* + * we might have raced with the unlocked fence_signal, + * still run through all callbacks + */ + } else + trace_fence_signaled(fence); + + list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { + list_del_init(&cur->node); + cur->func(fence, cur); + } + return ret; +} +EXPORT_SYMBOL(fence_signal_locked); + +/** + * fence_signal - signal completion of a fence + * @fence: the fence to signal + * + * Signal completion for software callbacks on a fence, this will unblock + * fence_wait() calls and run all the callbacks added with + * fence_add_callback(). Can be called multiple times, but since a fence + * can only go from unsignaled to signaled state, it will only be effective + * the first time. + */ +int fence_signal(struct fence *fence) +{ + unsigned long flags; + + if (!fence) + return -EINVAL; + + if (!ktime_to_ns(fence->timestamp)) { + fence->timestamp = ktime_get(); + smp_mb__before_atomic(); + } + + if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return -EINVAL; + + trace_fence_signaled(fence); + + if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { + struct fence_cb *cur, *tmp; + + spin_lock_irqsave(fence->lock, flags); + list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { + list_del_init(&cur->node); + cur->func(fence, cur); + } + spin_unlock_irqrestore(fence->lock, flags); + } + return 0; +} +EXPORT_SYMBOL(fence_signal); + +/** + * fence_wait_timeout - sleep until the fence gets signaled + * or until timeout elapses + * @fence: [in] the fence to wait on + * @intr: [in] if true, do an interruptible wait + * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT + * + * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the + * remaining timeout in jiffies on success. Other error values may be + * returned on custom implementations. + * + * Performs a synchronous wait on this fence. It is assumed the caller + * directly or indirectly (buf-mgr between reservation and committing) + * holds a reference to the fence, otherwise the fence might be + * freed before return, resulting in undefined behavior. + */ +signed long +fence_wait_timeout(struct fence *fence, bool intr, signed long timeout) +{ + signed long ret; + + if (WARN_ON(timeout < 0)) + return -EINVAL; + + trace_fence_wait_start(fence); + ret = fence->ops->wait(fence, intr, timeout); + trace_fence_wait_end(fence); + return ret; +} +EXPORT_SYMBOL(fence_wait_timeout); + +void fence_release(struct kref *kref) +{ + struct fence *fence = + container_of(kref, struct fence, refcount); + + trace_fence_destroy(fence); + + BUG_ON(!list_empty(&fence->cb_list)); + + if (fence->ops->release) + fence->ops->release(fence); + else + fence_free(fence); +} +EXPORT_SYMBOL(fence_release); + +void fence_free(struct fence *fence) +{ + kfree(fence); +} +EXPORT_SYMBOL(fence_free); + +/** + * fence_enable_sw_signaling - enable signaling on fence + * @fence: [in] the fence to enable + * + * this will request for sw signaling to be enabled, to make the fence + * complete as soon as possible + */ +void fence_enable_sw_signaling(struct fence *fence) +{ + unsigned long flags; + + if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) && + !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + trace_fence_enable_signal(fence); + + spin_lock_irqsave(fence->lock, flags); + + if (!fence->ops->enable_signaling(fence)) + fence_signal_locked(fence); + + spin_unlock_irqrestore(fence->lock, flags); + } +} +EXPORT_SYMBOL(fence_enable_sw_signaling); + +/** + * fence_add_callback - add a callback to be called when the fence + * is signaled + * @fence: [in] the fence to wait on + * @cb: [in] the callback to register + * @func: [in] the function to call + * + * cb will be initialized by fence_add_callback, no initialization + * by the caller is required. Any number of callbacks can be registered + * to a fence, but a callback can only be registered to one fence at a time. + * + * Note that the callback can be called from an atomic context. If + * fence is already signaled, this function will return -ENOENT (and + * *not* call the callback) + * + * Add a software callback to the fence. Same restrictions apply to + * refcount as it does to fence_wait, however the caller doesn't need to + * keep a refcount to fence afterwards: when software access is enabled, + * the creator of the fence is required to keep the fence alive until + * after it signals with fence_signal. The callback itself can be called + * from irq context. + * + */ +int fence_add_callback(struct fence *fence, struct fence_cb *cb, + fence_func_t func) +{ + unsigned long flags; + int ret = 0; + bool was_set; + + if (WARN_ON(!fence || !func)) + return -EINVAL; + + if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + INIT_LIST_HEAD(&cb->node); + return -ENOENT; + } + + spin_lock_irqsave(fence->lock, flags); + + was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); + + if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + ret = -ENOENT; + else if (!was_set) { + trace_fence_enable_signal(fence); + + if (!fence->ops->enable_signaling(fence)) { + fence_signal_locked(fence); + ret = -ENOENT; + } + } + + if (!ret) { + cb->func = func; + list_add_tail(&cb->node, &fence->cb_list); + } else + INIT_LIST_HEAD(&cb->node); + spin_unlock_irqrestore(fence->lock, flags); + + return ret; +} +EXPORT_SYMBOL(fence_add_callback); + +/** + * fence_remove_callback - remove a callback from the signaling list + * @fence: [in] the fence to wait on + * @cb: [in] the callback to remove + * + * Remove a previously queued callback from the fence. This function returns + * true if the callback is succesfully removed, or false if the fence has + * already been signaled. + * + * *WARNING*: + * Cancelling a callback should only be done if you really know what you're + * doing, since deadlocks and race conditions could occur all too easily. For + * this reason, it should only ever be done on hardware lockup recovery, + * with a reference held to the fence. + */ +bool +fence_remove_callback(struct fence *fence, struct fence_cb *cb) +{ + unsigned long flags; + bool ret; + + spin_lock_irqsave(fence->lock, flags); + + ret = !list_empty(&cb->node); + if (ret) + list_del_init(&cb->node); + + spin_unlock_irqrestore(fence->lock, flags); + + return ret; +} +EXPORT_SYMBOL(fence_remove_callback); + +struct default_wait_cb { + struct fence_cb base; + struct task_struct *task; +}; + +static void +fence_default_wait_cb(struct fence *fence, struct fence_cb *cb) +{ + struct default_wait_cb *wait = + container_of(cb, struct default_wait_cb, base); + + wake_up_state(wait->task, TASK_NORMAL); +} + +/** + * fence_default_wait - default sleep until the fence gets signaled + * or until timeout elapses + * @fence: [in] the fence to wait on + * @intr: [in] if true, do an interruptible wait + * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT + * + * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the + * remaining timeout in jiffies on success. + */ +signed long +fence_default_wait(struct fence *fence, bool intr, signed long timeout) +{ + struct default_wait_cb cb; + unsigned long flags; + signed long ret = timeout; + bool was_set; + + if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return timeout; + + spin_lock_irqsave(fence->lock, flags); + + if (intr && signal_pending(current)) { + ret = -ERESTARTSYS; + goto out; + } + + was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); + + if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + goto out; + + if (!was_set) { + trace_fence_enable_signal(fence); + + if (!fence->ops->enable_signaling(fence)) { + fence_signal_locked(fence); + goto out; + } + } + + cb.base.func = fence_default_wait_cb; + cb.task = current; + list_add(&cb.base.node, &fence->cb_list); + + while (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) { + if (intr) + __set_current_state(TASK_INTERRUPTIBLE); + else + __set_current_state(TASK_UNINTERRUPTIBLE); + spin_unlock_irqrestore(fence->lock, flags); + + ret = schedule_timeout(ret); + + spin_lock_irqsave(fence->lock, flags); + if (ret > 0 && intr && signal_pending(current)) + ret = -ERESTARTSYS; + } + + if (!list_empty(&cb.base.node)) + list_del(&cb.base.node); + __set_current_state(TASK_RUNNING); + +out: + spin_unlock_irqrestore(fence->lock, flags); + return ret; +} +EXPORT_SYMBOL(fence_default_wait); + +/** + * fence_init - Initialize a custom fence. + * @fence: [in] the fence to initialize + * @ops: [in] the fence_ops for operations on this fence + * @lock: [in] the irqsafe spinlock to use for locking this fence + * @context: [in] the execution context this fence is run on + * @seqno: [in] a linear increasing sequence number for this context + * + * Initializes an allocated fence, the caller doesn't have to keep its + * refcount after committing with this fence, but it will need to hold a + * refcount again if fence_ops.enable_signaling gets called. This can + * be used for other implementing other types of fence. + * + * context and seqno are used for easy comparison between fences, allowing + * to check which fence is later by simply using fence_later. + */ +void +fence_init(struct fence *fence, const struct fence_ops *ops, + spinlock_t *lock, unsigned context, unsigned seqno) +{ + BUG_ON(!lock); + BUG_ON(!ops || !ops->wait || !ops->enable_signaling || + !ops->get_driver_name || !ops->get_timeline_name); + + kref_init(&fence->refcount); + fence->ops = ops; + INIT_LIST_HEAD(&fence->cb_list); + fence->lock = lock; + fence->context = context; + fence->seqno = seqno; + fence->flags = 0UL; + + trace_fence_init(fence); +} +EXPORT_SYMBOL(fence_init); diff --git a/include/linux/fence.h b/include/linux/fence.h new file mode 100644 index 000000000000..b935cc650123 --- /dev/null +++ b/include/linux/fence.h @@ -0,0 +1,343 @@ +/* + * Fence mechanism for dma-buf to allow for asynchronous dma access + * + * Copyright (C) 2012 Canonical Ltd + * Copyright (C) 2012 Texas Instruments + * + * Authors: + * Rob Clark + * Maarten Lankhorst + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __LINUX_FENCE_H +#define __LINUX_FENCE_H + +#include +#include +#include +#include +#include +#include +#include + +struct fence; +struct fence_ops; +struct fence_cb; + +/** + * struct fence - software synchronization primitive + * @refcount: refcount for this fence + * @ops: fence_ops associated with this fence + * @cb_list: list of all callbacks to call + * @lock: spin_lock_irqsave used for locking + * @context: execution context this fence belongs to, returned by + * fence_context_alloc() + * @seqno: the sequence number of this fence inside the execution context, + * can be compared to decide which fence would be signaled later. + * @flags: A mask of FENCE_FLAG_* defined below + * @timestamp: Timestamp when the fence was signaled. + * @status: Optional, only valid if < 0, must be set before calling + * fence_signal, indicates that the fence has completed with an error. + * + * the flags member must be manipulated and read using the appropriate + * atomic ops (bit_*), so taking the spinlock will not be needed most + * of the time. + * + * FENCE_FLAG_SIGNALED_BIT - fence is already signaled + * FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called* + * FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the + * implementer of the fence for its own purposes. Can be used in different + * ways by different fence implementers, so do not rely on this. + * + * *) Since atomic bitops are used, this is not guaranteed to be the case. + * Particularly, if the bit was set, but fence_signal was called right + * before this bit was set, it would have been able to set the + * FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called. + * Adding a check for FENCE_FLAG_SIGNALED_BIT after setting + * FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that + * after fence_signal was called, any enable_signaling call will have either + * been completed, or never called at all. + */ +struct fence { + struct kref refcount; + const struct fence_ops *ops; + struct list_head cb_list; + spinlock_t *lock; + unsigned context, seqno; + unsigned long flags; + ktime_t timestamp; + int status; +}; + +enum fence_flag_bits { + FENCE_FLAG_SIGNALED_BIT, + FENCE_FLAG_ENABLE_SIGNAL_BIT, + FENCE_FLAG_USER_BITS, /* must always be last member */ +}; + +typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb); + +/** + * struct fence_cb - callback for fence_add_callback + * @node: used by fence_add_callback to append this struct to fence::cb_list + * @func: fence_func_t to call + * + * This struct will be initialized by fence_add_callback, additional + * data can be passed along by embedding fence_cb in another struct. + */ +struct fence_cb { + struct list_head node; + fence_func_t func; +}; + +/** + * struct fence_ops - operations implemented for fence + * @get_driver_name: returns the driver name. + * @get_timeline_name: return the name of the context this fence belongs to. + * @enable_signaling: enable software signaling of fence. + * @signaled: [optional] peek whether the fence is signaled, can be null. + * @wait: custom wait implementation, or fence_default_wait. + * @release: [optional] called on destruction of fence, can be null + * @fill_driver_data: [optional] callback to fill in free-form debug info + * Returns amount of bytes filled, or -errno. + * @fence_value_str: [optional] fills in the value of the fence as a string + * @timeline_value_str: [optional] fills in the current value of the timeline + * as a string + * + * Notes on enable_signaling: + * For fence implementations that have the capability for hw->hw + * signaling, they can implement this op to enable the necessary + * irqs, or insert commands into cmdstream, etc. This is called + * in the first wait() or add_callback() path to let the fence + * implementation know that there is another driver waiting on + * the signal (ie. hw->sw case). + * + * This function can be called called from atomic context, but not + * from irq context, so normal spinlocks can be used. + * + * A return value of false indicates the fence already passed, + * or some failure occured that made it impossible to enable + * signaling. True indicates succesful enabling. + * + * fence->status may be set in enable_signaling, but only when false is + * returned. + * + * Calling fence_signal before enable_signaling is called allows + * for a tiny race window in which enable_signaling is called during, + * before, or after fence_signal. To fight this, it is recommended + * that before enable_signaling returns true an extra reference is + * taken on the fence, to be released when the fence is signaled. + * This will mean fence_signal will still be called twice, but + * the second time will be a noop since it was already signaled. + * + * Notes on signaled: + * May set fence->status if returning true. + * + * Notes on wait: + * Must not be NULL, set to fence_default_wait for default implementation. + * the fence_default_wait implementation should work for any fence, as long + * as enable_signaling works correctly. + * + * Must return -ERESTARTSYS if the wait is intr = true and the wait was + * interrupted, and remaining jiffies if fence has signaled, or 0 if wait + * timed out. Can also return other error values on custom implementations, + * which should be treated as if the fence is signaled. For example a hardware + * lockup could be reported like that. + * + * Notes on release: + * Can be NULL, this function allows additional commands to run on + * destruction of the fence. Can be called from irq context. + * If pointer is set to NULL, kfree will get called instead. + */ + +struct fence_ops { + const char * (*get_driver_name)(struct fence *fence); + const char * (*get_timeline_name)(struct fence *fence); + bool (*enable_signaling)(struct fence *fence); + bool (*signaled)(struct fence *fence); + signed long (*wait)(struct fence *fence, bool intr, signed long timeout); + void (*release)(struct fence *fence); + + int (*fill_driver_data)(struct fence *fence, void *data, int size); + void (*fence_value_str)(struct fence *fence, char *str, int size); + void (*timeline_value_str)(struct fence *fence, char *str, int size); +}; + +void fence_init(struct fence *fence, const struct fence_ops *ops, + spinlock_t *lock, unsigned context, unsigned seqno); + +void fence_release(struct kref *kref); +void fence_free(struct fence *fence); + +/** + * fence_get - increases refcount of the fence + * @fence: [in] fence to increase refcount of + * + * Returns the same fence, with refcount increased by 1. + */ +static inline struct fence *fence_get(struct fence *fence) +{ + if (fence) + kref_get(&fence->refcount); + return fence; +} + +/** + * fence_put - decreases refcount of the fence + * @fence: [in] fence to reduce refcount of + */ +static inline void fence_put(struct fence *fence) +{ + if (fence) + kref_put(&fence->refcount, fence_release); +} + +int fence_signal(struct fence *fence); +int fence_signal_locked(struct fence *fence); +signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout); +int fence_add_callback(struct fence *fence, struct fence_cb *cb, + fence_func_t func); +bool fence_remove_callback(struct fence *fence, struct fence_cb *cb); +void fence_enable_sw_signaling(struct fence *fence); + +/** + * fence_is_signaled_locked - Return an indication if the fence is signaled yet. + * @fence: [in] the fence to check + * + * Returns true if the fence was already signaled, false if not. Since this + * function doesn't enable signaling, it is not guaranteed to ever return + * true if fence_add_callback, fence_wait or fence_enable_sw_signaling + * haven't been called before. + * + * This function requires fence->lock to be held. + */ +static inline bool +fence_is_signaled_locked(struct fence *fence) +{ + if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return true; + + if (fence->ops->signaled && fence->ops->signaled(fence)) { + fence_signal_locked(fence); + return true; + } + + return false; +} + +/** + * fence_is_signaled - Return an indication if the fence is signaled yet. + * @fence: [in] the fence to check + * + * Returns true if the fence was already signaled, false if not. Since this + * function doesn't enable signaling, it is not guaranteed to ever return + * true if fence_add_callback, fence_wait or fence_enable_sw_signaling + * haven't been called before. + * + * It's recommended for seqno fences to call fence_signal when the + * operation is complete, it makes it possible to prevent issues from + * wraparound between time of issue and time of use by checking the return + * value of this function before calling hardware-specific wait instructions. + */ +static inline bool +fence_is_signaled(struct fence *fence) +{ + if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return true; + + if (fence->ops->signaled && fence->ops->signaled(fence)) { + fence_signal(fence); + return true; + } + + return false; +} + +/** + * fence_later - return the chronologically later fence + * @f1: [in] the first fence from the same context + * @f2: [in] the second fence from the same context + * + * Returns NULL if both fences are signaled, otherwise the fence that would be + * signaled last. Both fences must be from the same context, since a seqno is + * not re-used across contexts. + */ +static inline struct fence *fence_later(struct fence *f1, struct fence *f2) +{ + if (WARN_ON(f1->context != f2->context)) + return NULL; + + /* + * can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been + * set if enable_signaling wasn't called, and enabling that here is + * overkill. + */ + if (f2->seqno - f1->seqno <= INT_MAX) + return fence_is_signaled(f2) ? NULL : f2; + else + return fence_is_signaled(f1) ? NULL : f1; +} + +signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout); + + +/** + * fence_wait - sleep until the fence gets signaled + * @fence: [in] the fence to wait on + * @intr: [in] if true, do an interruptible wait + * + * This function will return -ERESTARTSYS if interrupted by a signal, + * or 0 if the fence was signaled. Other error values may be + * returned on custom implementations. + * + * Performs a synchronous wait on this fence. It is assumed the caller + * directly or indirectly holds a reference to the fence, otherwise the + * fence might be freed before return, resulting in undefined behavior. + */ +static inline signed long fence_wait(struct fence *fence, bool intr) +{ + signed long ret; + + /* Since fence_wait_timeout cannot timeout with + * MAX_SCHEDULE_TIMEOUT, only valid return values are + * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. + */ + ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); + + return ret < 0 ? ret : 0; +} + +unsigned fence_context_alloc(unsigned num); + +#define FENCE_TRACE(f, fmt, args...) \ + do { \ + struct fence *__ff = (f); \ + if (config_enabled(CONFIG_FENCE_TRACE)) \ + pr_info("f %u#%u: " fmt, \ + __ff->context, __ff->seqno, ##args); \ + } while (0) + +#define FENCE_WARN(f, fmt, args...) \ + do { \ + struct fence *__ff = (f); \ + pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \ + ##args); \ + } while (0) + +#define FENCE_ERR(f, fmt, args...) \ + do { \ + struct fence *__ff = (f); \ + pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \ + ##args); \ + } while (0) + +#endif /* __LINUX_FENCE_H */ diff --git a/include/trace/events/fence.h b/include/trace/events/fence.h new file mode 100644 index 000000000000..98feb1b82896 --- /dev/null +++ b/include/trace/events/fence.h @@ -0,0 +1,128 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM fence + +#if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_FENCE_H + +#include + +struct fence; + +TRACE_EVENT(fence_annotate_wait_on, + + /* fence: the fence waiting on f1, f1: the fence to be waited on. */ + TP_PROTO(struct fence *fence, struct fence *f1), + + TP_ARGS(fence, f1), + + TP_STRUCT__entry( + __string(driver, fence->ops->get_driver_name(fence)) + __string(timeline, fence->ops->get_driver_name(fence)) + __field(unsigned int, context) + __field(unsigned int, seqno) + + __string(waiting_driver, f1->ops->get_driver_name(f1)) + __string(waiting_timeline, f1->ops->get_timeline_name(f1)) + __field(unsigned int, waiting_context) + __field(unsigned int, waiting_seqno) + ), + + TP_fast_assign( + __assign_str(driver, fence->ops->get_driver_name(fence)) + __assign_str(timeline, fence->ops->get_timeline_name(fence)) + __entry->context = fence->context; + __entry->seqno = fence->seqno; + + __assign_str(waiting_driver, f1->ops->get_driver_name(f1)) + __assign_str(waiting_timeline, f1->ops->get_timeline_name(f1)) + __entry->waiting_context = f1->context; + __entry->waiting_seqno = f1->seqno; + + ), + + TP_printk("driver=%s timeline=%s context=%u seqno=%u " \ + "waits on driver=%s timeline=%s context=%u seqno=%u", + __get_str(driver), __get_str(timeline), __entry->context, + __entry->seqno, + __get_str(waiting_driver), __get_str(waiting_timeline), + __entry->waiting_context, __entry->waiting_seqno) +); + +DECLARE_EVENT_CLASS(fence, + + TP_PROTO(struct fence *fence), + + TP_ARGS(fence), + + TP_STRUCT__entry( + __string(driver, fence->ops->get_driver_name(fence)) + __string(timeline, fence->ops->get_timeline_name(fence)) + __field(unsigned int, context) + __field(unsigned int, seqno) + ), + + TP_fast_assign( + __assign_str(driver, fence->ops->get_driver_name(fence)) + __assign_str(timeline, fence->ops->get_timeline_name(fence)) + __entry->context = fence->context; + __entry->seqno = fence->seqno; + ), + + TP_printk("driver=%s timeline=%s context=%u seqno=%u", + __get_str(driver), __get_str(timeline), __entry->context, + __entry->seqno) +); + +DEFINE_EVENT(fence, fence_emit, + + TP_PROTO(struct fence *fence), + + TP_ARGS(fence) +); + +DEFINE_EVENT(fence, fence_init, + + TP_PROTO(struct fence *fence), + + TP_ARGS(fence) +); + +DEFINE_EVENT(fence, fence_destroy, + + TP_PROTO(struct fence *fence), + + TP_ARGS(fence) +); + +DEFINE_EVENT(fence, fence_enable_signal, + + TP_PROTO(struct fence *fence), + + TP_ARGS(fence) +); + +DEFINE_EVENT(fence, fence_signaled, + + TP_PROTO(struct fence *fence), + + TP_ARGS(fence) +); + +DEFINE_EVENT(fence, fence_wait_start, + + TP_PROTO(struct fence *fence), + + TP_ARGS(fence) +); + +DEFINE_EVENT(fence, fence_wait_end, + + TP_PROTO(struct fence *fence), + + TP_ARGS(fence) +); + +#endif /* _TRACE_FENCE_H */ + +/* This part must be outside protection */ +#include -- cgit v1.2.3 From 5a1379e8748a5cfa3eb068f812d61bde849ef76c Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 4 Jun 2014 17:48:15 +0200 Subject: firmware loader: allow disabling of udev as firmware loader [The patch was originally proposed by Tom Gundersen, and rewritten afterwards by me; most of changelogs below borrowed from Tom's original patch -- tiwai] Currently (at least) the dell-rbu driver selects FW_LOADER_USER_HELPER, which means that distros can't really stop loading firmware through udev without breaking other users (though some have). Ideally we would remove/disable the udev firmware helper in both the kernel and in udev, but if we were to disable it in udev and not the kernel, the result would be (seemingly) hung kernels as no one would be around to cancel firmware requests. This patch allows udev firmware loading to be disabled while still allowing non-udev firmware loading, as done by the dell-rbu driver, to continue working. This is achieved by only using the fallback mechanism when the uevent is suppressed. The patch renames the user-selectable Kconfig from FW_LOADER_USER_HELPER to FW_LOADER_USER_HELPER_FALLBACK, and the former is reverse-selected by the latter or the drivers that need userhelper like dell-rbu. Also, the "default y" is removed together with this change, since it's been deprecated in udev upstream, thus rather better to disable it nowadays. Tested with FW_LOADER_USER_HELPER=n LATTICE_ECP3_CONFIG=y DELL_RBU=y and udev without the firmware loading support, but I don't have the hardware to test the lattice/dell drivers, so additional testing would be appreciated. Reviewed-by: Tom Gundersen Cc: Ming Lei Cc: Abhay Salunke Cc: Stefan Roese Cc: Arnd Bergmann Cc: Kay Sievers Tested-by: Balaji Singh Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- drivers/base/Kconfig | 10 ++++++++-- drivers/base/firmware_class.c | 15 ++++++++++----- include/linux/firmware.h | 2 +- 3 files changed, 19 insertions(+), 8 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 00e13ce5cbbd..88500fed3c7a 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -149,15 +149,21 @@ config EXTRA_FIRMWARE_DIR some other directory containing the firmware files. config FW_LOADER_USER_HELPER + bool + +config FW_LOADER_USER_HELPER_FALLBACK bool "Fallback user-helper invocation for firmware loading" depends on FW_LOADER - default y + select FW_LOADER_USER_HELPER help This option enables / disables the invocation of user-helper (e.g. udev) for loading firmware files as a fallback after the direct file loading in kernel fails. The user-mode helper is no longer required unless you have a special firmware file that - resides in a non-standard path. + resides in a non-standard path. Moreover, the udev support has + been deprecated upstream. + + If you are unsure about this, say N here. config DEBUG_DRIVER bool "Driver Core verbose debug messages" diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index d276e33880be..46ea5f4c3bb5 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -100,9 +100,14 @@ static inline long firmware_loading_timeout(void) #define FW_OPT_UEVENT (1U << 0) #define FW_OPT_NOWAIT (1U << 1) #ifdef CONFIG_FW_LOADER_USER_HELPER -#define FW_OPT_FALLBACK (1U << 2) +#define FW_OPT_USERHELPER (1U << 2) #else -#define FW_OPT_FALLBACK 0 +#define FW_OPT_USERHELPER 0 +#endif +#ifdef CONFIG_FW_LOADER_USER_HELPER_FALLBACK +#define FW_OPT_FALLBACK FW_OPT_USERHELPER +#else +#define FW_OPT_FALLBACK 0 #endif struct firmware_cache { @@ -1111,7 +1116,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name, ret = fw_get_filesystem_firmware(device, fw->priv); if (ret) { - if (opt_flags & FW_OPT_FALLBACK) { + if (opt_flags & FW_OPT_USERHELPER) { dev_warn(device, "Direct firmware load failed with error %d\n", ret); @@ -1171,7 +1176,7 @@ request_firmware(const struct firmware **firmware_p, const char *name, } EXPORT_SYMBOL(request_firmware); -#ifdef CONFIG_FW_LOADER_USER_HELPER +#ifdef CONFIG_FW_LOADER_USER_HELPER_FALLBACK /** * request_firmware: - load firmware directly without usermode helper * @firmware_p: pointer to firmware image @@ -1277,7 +1282,7 @@ request_firmware_nowait( fw_work->context = context; fw_work->cont = cont; fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK | - (uevent ? FW_OPT_UEVENT : 0); + (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER); if (!try_module_get(module)) { kfree(fw_work); diff --git a/include/linux/firmware.h b/include/linux/firmware.h index 59529330efd6..67e5b801af0c 100644 --- a/include/linux/firmware.h +++ b/include/linux/firmware.h @@ -68,7 +68,7 @@ static inline void release_firmware(const struct firmware *fw) #endif -#ifdef CONFIG_FW_LOADER_USER_HELPER +#ifdef CONFIG_FW_LOADER_USER_HELPER_FALLBACK int request_firmware_direct(const struct firmware **fw, const char *name, struct device *device); #else -- cgit v1.2.3 From 6af6b163b3191bd2802868f17d4843c58bc4046f Mon Sep 17 00:00:00 2001 From: Dmitry Kasatkin Date: Fri, 13 Jun 2014 18:09:54 +0300 Subject: firmware: read firmware size using i_size_read() There is no need to read attr because inode structure contains size of the file. Use i_size_read() instead. Signed-off-by: Dmitry Kasatkin Acked-by: Ming Lei Signed-off-by: Greg Kroah-Hartman --- drivers/base/firmware_class.c | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 46ea5f4c3bb5..304691c5e2bc 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -284,26 +284,15 @@ static const char * const fw_path[] = { module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644); MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path"); -/* Don't inline this: 'struct kstat' is biggish */ -static noinline_for_stack int fw_file_size(struct file *file) -{ - struct kstat st; - if (vfs_getattr(&file->f_path, &st)) - return -1; - if (!S_ISREG(st.mode)) - return -1; - if (st.size != (int)st.size) - return -1; - return st.size; -} - static int fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf) { int size; char *buf; int rc; - size = fw_file_size(file); + if (!S_ISREG(file_inode(file)->i_mode)) + return -EINVAL; + size = i_size_read(file_inode(file)); if (size <= 0) return -EINVAL; buf = vmalloc(size); -- cgit v1.2.3 From a76040d835776f0e8cc2bf5f9edcfa2092449ae9 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Tue, 1 Jul 2014 21:13:30 +0200 Subject: firmware: replace ALIGN(PAGE_SIZE) by PAGE_ALIGN use mm.h definition Cc: Ming Lei Signed-off-by: Fabian Frederick Signed-off-by: Greg Kroah-Hartman --- drivers/base/firmware_class.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/base') diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 304691c5e2bc..28bc6db9fbf4 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -712,7 +712,7 @@ out: static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size) { struct firmware_buf *buf = fw_priv->buf; - int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT; + int pages_needed = PAGE_ALIGN(min_size) >> PAGE_SHIFT; /* If the array of pages is too small, grow it... */ if (buf->page_array_size < pages_needed) { -- cgit v1.2.3 From c868edf42b4db89907b467c92b7f035c8c1cb0e5 Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Wed, 2 Jul 2014 09:55:05 -0700 Subject: firmware loader: inform direct failure when udev loader is disabled Now that the udev firmware loader is optional request_firmware() will not provide any information on the kernel ring buffer if direct firmware loading failed and udev firmware loading is disabled. If no information is needed request_firmware_direct() should be used for optional firmware, at which point drivers can take on the onus over informing of any failures, if udev firmware loading is disabled though we should at the very least provide some sort of information as when the udev loader was enabled by default back in the days. With this change with a simple firmware load test module [0]: Example output without FW_LOADER_USER_HELPER_FALLBACK platform fake-dev.0: Direct firmware load for fake.bin failed with error -2 Example with FW_LOADER_USER_HELPER_FALLBACK platform fake-dev.0: Direct firmware load for fake.bin failed with error -2 platform fake-dev.0: Falling back to user helper Without this change without FW_LOADER_USER_HELPER_FALLBACK we get no output logged upon failure. Cc: Tom Gundersen Cc: Ming Lei Cc: Abhay Salunke Cc: Stefan Roese Cc: Arnd Bergmann Cc: Kay Sievers Signed-off-by: Luis R. Rodriguez Reviewed-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- drivers/base/firmware_class.c | 13 +++++++------ include/linux/firmware.h | 15 ++++++++------- 2 files changed, 15 insertions(+), 13 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 28bc6db9fbf4..124d50ceb116 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -109,6 +109,7 @@ static inline long firmware_loading_timeout(void) #else #define FW_OPT_FALLBACK 0 #endif +#define FW_OPT_NO_WARN (1U << 3) struct firmware_cache { /* firmware_buf instance will be added into the below list */ @@ -1105,10 +1106,11 @@ _request_firmware(const struct firmware **firmware_p, const char *name, ret = fw_get_filesystem_firmware(device, fw->priv); if (ret) { - if (opt_flags & FW_OPT_USERHELPER) { + if (!(opt_flags & FW_OPT_NO_WARN)) dev_warn(device, - "Direct firmware load failed with error %d\n", - ret); + "Direct firmware load for %s failed with error %d\n", + name, ret); + if (opt_flags & FW_OPT_USERHELPER) { dev_warn(device, "Falling back to user helper\n"); ret = fw_load_from_user_helper(fw, name, device, opt_flags, timeout); @@ -1165,7 +1167,6 @@ request_firmware(const struct firmware **firmware_p, const char *name, } EXPORT_SYMBOL(request_firmware); -#ifdef CONFIG_FW_LOADER_USER_HELPER_FALLBACK /** * request_firmware: - load firmware directly without usermode helper * @firmware_p: pointer to firmware image @@ -1182,12 +1183,12 @@ int request_firmware_direct(const struct firmware **firmware_p, { int ret; __module_get(THIS_MODULE); - ret = _request_firmware(firmware_p, name, device, FW_OPT_UEVENT); + ret = _request_firmware(firmware_p, name, device, + FW_OPT_UEVENT | FW_OPT_NO_WARN); module_put(THIS_MODULE); return ret; } EXPORT_SYMBOL_GPL(request_firmware_direct); -#endif /** * release_firmware: - release the resource associated with a firmware image diff --git a/include/linux/firmware.h b/include/linux/firmware.h index 67e5b801af0c..5c41c5e75b5c 100644 --- a/include/linux/firmware.h +++ b/include/linux/firmware.h @@ -45,6 +45,8 @@ int request_firmware_nowait( struct module *module, bool uevent, const char *name, struct device *device, gfp_t gfp, void *context, void (*cont)(const struct firmware *fw, void *context)); +int request_firmware_direct(const struct firmware **fw, const char *name, + struct device *device); void release_firmware(const struct firmware *fw); #else @@ -66,13 +68,12 @@ static inline void release_firmware(const struct firmware *fw) { } -#endif +static inline int request_firmware_direct(const struct firmware **fw, + const char *name, + struct device *device) +{ + return -EINVAL; +} -#ifdef CONFIG_FW_LOADER_USER_HELPER_FALLBACK -int request_firmware_direct(const struct firmware **fw, const char *name, - struct device *device); -#else -#define request_firmware_direct request_firmware #endif - #endif -- cgit v1.2.3 From 1cec24c59b4a133fc9c83912996168f511075485 Mon Sep 17 00:00:00 2001 From: Yann Droneaud Date: Fri, 30 May 2014 22:02:47 +0200 Subject: driver core/platform: remove unused implicit padding in platform_object MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Up to 7 bytes are wasted at the end of struct platform_object in the form of padding after name field: unfortunately this padding is not used when allocating the memory to hold the name. This patch converts name array from name[1] to C99 flexible array name[] (equivalent to name[0]) so that no padding is required by the presence of this field. Memory allocation is updated to take care of allocating an additional byte for the NUL terminating character. Built on Fedora 20, using GCC 4.8, for ARM, i386, SPARC64 and x86_64 architectures, the data structure layout can be reported with following command: $ pahole drivers/base/platform.o \ --recursive \ --class_name device,pdev_archdata,platform_device,platform_object Please find below some comparisons of structure layout for arm, i386, sparc64 and x86_64 architecture before and after the patch. --- obj-arm/drivers/base/platform.o.pahole.v3.15-rc7-79-gfe45736f4134 2014-05-30 10:32:06.290960701 +0200 +++ obj-arm/drivers/base/platform.o.pahole.v3.15-rc7-80-g2cdb06858d71 2014-05-30 11:26:20.851988347 +0200 @@ -81,10 +81,9 @@ /* XXX last struct has 4 bytes of padding */ /* --- cacheline 6 boundary (384 bytes) was 8 bytes ago --- */ - char name[1]; /* 392 1 */ + char name[0]; /* 392 0 */ - /* size: 400, cachelines: 7, members: 2 */ - /* padding: 7 */ + /* size: 392, cachelines: 7, members: 2 */ /* paddings: 1, sum paddings: 4 */ - /* last cacheline: 16 bytes */ + /* last cacheline: 8 bytes */ }; --- obj-i386/drivers/base/platform.o.pahole.v3.15-rc7-79-gfe45736f4134 2014-05-30 10:32:06.305960691 +0200 +++ obj-i386/drivers/base/platform.o.pahole.v3.15-rc7-80-g2cdb06858d71 2014-05-30 11:26:20.875988332 +0200 @@ -73,9 +73,8 @@ struct platform_object { struct platform_device pdev; /* 0 396 */ /* --- cacheline 6 boundary (384 bytes) was 12 bytes ago --- */ - char name[1]; /* 396 1 */ + char name[0]; /* 396 0 */ - /* size: 400, cachelines: 7, members: 2 */ - /* padding: 3 */ - /* last cacheline: 16 bytes */ + /* size: 396, cachelines: 7, members: 2 */ + /* last cacheline: 12 bytes */ }; --- obj-sparc64/drivers/base/platform.o.pahole.v3.15-rc7-79-gfe45736f4134 2014-05-30 10:32:06.406960625 +0200 +++ obj-sparc64/drivers/base/platform.o.pahole.v3.15-rc7-80-g2cdb06858d71 2014-05-30 11:26:20.971988269 +0200 @@ -94,9 +94,8 @@ struct platform_object { struct platform_device pdev; /* 0 2208 */ /* --- cacheline 34 boundary (2176 bytes) was 32 bytes ago --- */ - char name[1]; /* 2208 1 */ + char name[0]; /* 2208 0 */ - /* size: 2216, cachelines: 35, members: 2 */ - /* padding: 7 */ - /* last cacheline: 40 bytes */ + /* size: 2208, cachelines: 35, members: 2 */ + /* last cacheline: 32 bytes */ }; --- obj-x86_64/drivers/base/platform.o.pahole.v3.15-rc7-79-gfe45736f4134 2014-05-30 10:32:06.432960608 +0200 +++ obj-x86_64/drivers/base/platform.o.pahole.v3.15-rc7-80-g2cdb06858d71 2014-05-30 11:26:21.000988250 +0200 @@ -84,9 +84,8 @@ struct platform_object { struct platform_device pdev; /* 0 720 */ /* --- cacheline 11 boundary (704 bytes) was 16 bytes ago --- */ - char name[1]; /* 720 1 */ + char name[0]; /* 720 0 */ - /* size: 728, cachelines: 12, members: 2 */ - /* padding: 7 */ - /* last cacheline: 24 bytes */ + /* size: 720, cachelines: 12, members: 2 */ + /* last cacheline: 16 bytes */ }; Changes from v5 [1]: - dropped dma_mask allocation changes and only kept padding removal changes (name array length set to 0). Changes from v4 [2]: [by Emil Goode :] - Split v4 of the patch into two separate patches. - Generated new object file size and data structure layout info. - Updated the changelog message. Changes from v3 [3]: - fixed commit message so that git am doesn't fail. Changes from v2 [4]: - move 'dma_mask' to platform_object so that it's always allocated and won't leak on release; remove all previously added support functions. - use C99 flexible array member for 'name' to remove padding at the end of platform_object. Changes from v1 [5]: - remove unneeded kfree() from error path - add reference to author/commit adding allocation of dmamask Changes from v0 [6]: - small rewrite to squeeze the patch to a bare minimal [1] http://lkml.kernel.org/r/1401122483-31603-2-git-send-email-emilgoode@gmail.com http://lkml.kernel.org/r/1401122483-31603-1-git-send-email-emilgoode@gmail.com http://lkml.kernel.org/r/1401122483-31603-3-git-send-email-emilgoode@gmail.com [2] http://lkml.kernel.org/r/1390817152-30898-1-git-send-email-ydroneaud@opteya.com https://patchwork.kernel.org/patch/3541871/ [3] http://lkml.kernel.org/r/1390771138-28348-1-git-send-email-ydroneaud@opteya.com https://patchwork.kernel.org/patch/3540081/ [4] http://lkml.kernel.org/r/1389683909-17495-1-git-send-email-ydroneaud@opteya.com https://patchwork.kernel.org/patch/3484411/ [5] http://lkml.kernel.org/r/1389649085-7365-1-git-send-email-ydroneaud@opteya.com https://patchwork.kernel.org/patch/3480961/ [6] http://lkml.kernel.org/r/1386886207-2735-1-git-send-email-ydroneaud@opteya.com Cc: Emil Goode Cc: Dan Carpenter Cc: Shawn Guo Cc: Sascha Hauer Cc: Russell King Cc: Olof Johansson Cc: Uwe Kleine-König Cc: Dmitry Torokhov Signed-off-by: Yann Droneaud Acked-by: Uwe Kleine-König Signed-off-by: Greg Kroah-Hartman --- drivers/base/platform.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 9e9227e1762d..c48c4acb9b87 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -165,7 +165,7 @@ EXPORT_SYMBOL_GPL(platform_add_devices); struct platform_object { struct platform_device pdev; - char name[1]; + char name[]; }; /** @@ -206,7 +206,7 @@ struct platform_device *platform_device_alloc(const char *name, int id) { struct platform_object *pa; - pa = kzalloc(sizeof(struct platform_object) + strlen(name), GFP_KERNEL); + pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL); if (pa) { strcpy(pa->name, name); pa->pdev.name = pa->name; -- cgit v1.2.3 From 3d713e0e382e6fcfb4bba1501645b66c129ad60b Mon Sep 17 00:00:00 2001 From: Kim Phillips Date: Mon, 2 Jun 2014 19:42:58 -0500 Subject: driver core: platform: add device binding path 'driver_override' Needed by platform device drivers, such as the upcoming vfio-platform driver, in order to bypass the existing OF, ACPI, id_table and name string matches, and successfully be able to be bound to any device, like so: echo vfio-platform > /sys/bus/platform/devices/fff51000.ethernet/driver_override echo fff51000.ethernet > /sys/bus/platform/devices/fff51000.ethernet/driver/unbind echo fff51000.ethernet > /sys/bus/platform/drivers_probe This mimics "PCI: Introduce new device binding path using pci_dev.driver_override", which is an interface enhancement for more deterministic PCI device binding, e.g., when in the presence of hotplug. Reviewed-by: Alex Williamson Reviewed-by: Alexander Graf Reviewed-by: Stuart Yoder Signed-off-by: Kim Phillips Signed-off-by: Greg Kroah-Hartman --- Documentation/ABI/testing/sysfs-bus-platform | 20 ++++++++++++ drivers/base/platform.c | 47 ++++++++++++++++++++++++++++ include/linux/platform_device.h | 1 + 3 files changed, 68 insertions(+) create mode 100644 Documentation/ABI/testing/sysfs-bus-platform (limited to 'drivers/base') diff --git a/Documentation/ABI/testing/sysfs-bus-platform b/Documentation/ABI/testing/sysfs-bus-platform new file mode 100644 index 000000000000..5172a6124b27 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-platform @@ -0,0 +1,20 @@ +What: /sys/bus/platform/devices/.../driver_override +Date: April 2014 +Contact: Kim Phillips +Description: + This file allows the driver for a device to be specified which + will override standard OF, ACPI, ID table, and name matching. + When specified, only a driver with a name matching the value + written to driver_override will have an opportunity to bind + to the device. The override is specified by writing a string + to the driver_override file (echo vfio-platform > \ + driver_override) and may be cleared with an empty string + (echo > driver_override). This returns the device to standard + matching rules binding. Writing to driver_override does not + automatically unbind the device from its current driver or make + any attempt to automatically load the specified driver. If no + driver with a matching name is currently loaded in the kernel, + the device will not bind to any driver. This also allows + devices to opt-out of driver binding using a driver_override + name such as "none". Only a single driver may be specified in + the override, there is no support for parsing delimiters. diff --git a/drivers/base/platform.c b/drivers/base/platform.c index c48c4acb9b87..148f66a1d49a 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -23,6 +23,7 @@ #include #include #include +#include #include "base.h" #include "power/power.h" @@ -191,6 +192,7 @@ static void platform_device_release(struct device *dev) kfree(pa->pdev.dev.platform_data); kfree(pa->pdev.mfd_cell); kfree(pa->pdev.resource); + kfree(pa->pdev.driver_override); kfree(pa); } @@ -698,8 +700,49 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a, } static DEVICE_ATTR_RO(modalias); +static ssize_t driver_override_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct platform_device *pdev = to_platform_device(dev); + char *driver_override, *old = pdev->driver_override, *cp; + + if (count > PATH_MAX) + return -EINVAL; + + driver_override = kstrndup(buf, count, GFP_KERNEL); + if (!driver_override) + return -ENOMEM; + + cp = strchr(driver_override, '\n'); + if (cp) + *cp = '\0'; + + if (strlen(driver_override)) { + pdev->driver_override = driver_override; + } else { + kfree(driver_override); + pdev->driver_override = NULL; + } + + kfree(old); + + return count; +} + +static ssize_t driver_override_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + + return sprintf(buf, "%s\n", pdev->driver_override); +} +static DEVICE_ATTR_RW(driver_override); + + static struct attribute *platform_dev_attrs[] = { &dev_attr_modalias.attr, + &dev_attr_driver_override.attr, NULL, }; ATTRIBUTE_GROUPS(platform_dev); @@ -755,6 +798,10 @@ static int platform_match(struct device *dev, struct device_driver *drv) struct platform_device *pdev = to_platform_device(dev); struct platform_driver *pdrv = to_platform_driver(drv); + /* When driver_override is set, only bind to the matching driver */ + if (pdev->driver_override) + return !strcmp(pdev->driver_override, drv->name); + /* Attempt an OF style match first */ if (of_driver_match_device(dev, drv)) return 1; diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 16f6654082dd..153d303af7eb 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -28,6 +28,7 @@ struct platform_device { struct resource *resource; const struct platform_device_id *id_entry; + char *driver_override; /* Driver name to force a match */ /* MFD cell pointer */ struct mfd_cell *mfd_cell; -- cgit v1.2.3 From 0542ad88fbdd81bbd53f48bff981a9867e0f0659 Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Tue, 22 Jul 2014 18:10:38 -0600 Subject: firmware loader: Fix _request_firmware_load() return val for fw load abort _request_firmware_load() returns -ENOMEM when fw load is aborted after timeout. Call is_fw_load_aborted() to check if fw load is aborted and if true return -EAGAIN. Signed-off-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- drivers/base/firmware_class.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/base') diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 124d50ceb116..da77791793f1 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -906,7 +906,9 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, wait_for_completion(&buf->completion); cancel_delayed_work_sync(&fw_priv->timeout_work); - if (!buf->data) + if (is_fw_load_aborted(buf)) + retval = -EAGAIN; + else if (!buf->data) retval = -ENOMEM; device_remove_file(f_dev, &dev_attr_loading); -- cgit v1.2.3