/* * Copyright (C) 2007 Jens Axboe * * Scatterlist handling helpers. * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include #include #include "vmwgfx_compat.h" #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) /** * sg_alloc_table_from_pages - Allocate and initialize an sg table from * an array of pages * @sgt: The sg table header to use * @pages: Pointer to an array of page pointers * @n_pages: Number of pages in the pages array * @offset: Offset from start of the first page to the start of a buffer * @size: Number of valid bytes in the buffer (after offset) * @gfp_mask: GFP allocation mask * * Description: * Allocate and initialize an sg table from a list of pages. Contiguous * ranges of the pages are squashed into a single scatterlist node. A user * may provide an offset at a start and a size of valid data in a buffer * specified by the page array. The returned sg table is released by * sg_free_table. * * Returns: * 0 on success, negative error on failure */ int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, unsigned int n_pages, unsigned long offset, unsigned long size, gfp_t gfp_mask) { unsigned int chunks; unsigned int i; unsigned int cur_page; int ret; struct scatterlist *s; /* compute number of contiguous chunks */ chunks = 1; for (i = 1; i < n_pages; ++i) if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) ++chunks; ret = sg_alloc_table(sgt, chunks, gfp_mask); if (unlikely(ret)) return ret; /* merging chunks and putting them into the scatterlist */ cur_page = 0; for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { unsigned long chunk_size; unsigned int j; /* look for the end of the current chunk */ for (j = cur_page + 1; j < n_pages; ++j) if (page_to_pfn(pages[j]) != page_to_pfn(pages[j - 1]) + 1) break; chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; sg_set_page(s, pages[cur_page], min(size, chunk_size), offset); size -= chunk_size; offset = 0; cur_page = j; } return 0; } #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) static int sg_page_count(struct scatterlist *sg) { return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; } void __sg_page_iter_start(struct sg_page_iter *piter, struct scatterlist *sglist, unsigned int nents, unsigned long pgoffset) { piter->__pg_advance = 0; piter->__nents = nents; piter->sg = sglist; piter->sg_pgoffset = pgoffset; } bool __sg_page_iter_next(struct sg_page_iter *piter) { if (!piter->__nents || !piter->sg) return false; piter->sg_pgoffset += piter->__pg_advance; piter->__pg_advance = 1; while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { piter->sg_pgoffset -= sg_page_count(piter->sg); piter->sg = sg_next(piter->sg); if (!--piter->__nents || !piter->sg) return false; } return true; } #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) /** * memdup_user_nul - duplicate memory region from user space and NUL-terminate * * @src: source address in user space * @len: number of bytes to copy * * Returns an ERR_PTR() on failure. */ void *memdup_user_nul(const void __user *src, size_t len) { char *p; /* * Always use GFP_KERNEL, since copy_from_user() can sleep and * cause pagefault, which makes it pointless to use GFP_NOFS * or GFP_ATOMIC. */ /* p = kmalloc_track_caller(len + 1, GFP_KERNEL); */ p = kmalloc(len + 1, GFP_KERNEL); if (!p) return ERR_PTR(-ENOMEM); if (copy_from_user(p, src, len)) { kfree(p); return ERR_PTR(-EFAULT); } p[len] = '\0'; return p; } EXPORT_SYMBOL(memdup_user_nul); #endif #ifdef DMA_BUF_STANDALONE #include #include #include #include static int dma_buf_release(struct inode *inode, struct file *file); static const struct file_operations dma_buf_fops = { .release = dma_buf_release, }; static int is_dma_buf_file(struct file *file) { return file->f_op == &dma_buf_fops; } static int dma_buf_release(struct inode *inode, struct file *file) { struct dma_buf *dmabuf; if (!is_dma_buf_file(file)) return -EINVAL; dmabuf = file->private_data; dmabuf->ops->release(dmabuf); kfree(dmabuf); return 0; } struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops, size_t size, int flags) { struct dma_buf *dmabuf; struct file *file; dmabuf = kzalloc(sizeof(struct dma_buf), GFP_KERNEL); if (dmabuf == NULL) return ERR_PTR(-ENOMEM); dmabuf->priv = priv; dmabuf->ops = ops; file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags); if (IS_ERR(file)) { kfree(dmabuf); return ERR_CAST(file); } file->f_mode |= FMODE_LSEEK; dmabuf->file = file; return dmabuf; } void dma_buf_put(struct dma_buf *dmabuf) { if (WARN_ON(!dmabuf || !dmabuf->file)) return; fput(dmabuf->file); } struct dma_buf *dma_buf_get(int fd) { struct file *file; file = fget(fd); if (!file) return ERR_PTR(-EBADF); if (!is_dma_buf_file(file)) { fput(file); return ERR_PTR(-EINVAL); } return file->private_data; } int dma_buf_fd(struct dma_buf *dmabuf, int flags) { int fd; if (!dmabuf || !dmabuf->file) return -EINVAL; fd = get_unused_fd_flags(O_CLOEXEC); if (fd < 0) return fd; fd_install(fd, dmabuf->file); return fd; } #endif #ifdef VMW_COMPAT_FD_FLAGS static void compat_set_close_on_exec(unsigned int fd) { struct files_struct *files = current->files; struct fdtable *fdt; spin_lock(&files->file_lock); fdt = files_fdtable(files); FD_SET(fd, fdt->close_on_exec); spin_unlock(&files->file_lock); } int vmw_get_unused_fd_flags(unsigned int flags) { int fd = get_unused_fd(); WARN_ON_ONCE(flags != O_CLOEXEC); if (fd < 0) return fd; /* * We explicitly set close_on_exec here since the function * get_unused_fd_flags() which is used in the core dma-buf * implementation is not exported on early 3 series kernels. */ compat_set_close_on_exec(fd); return fd; } #endif #ifdef VMW_IDA_SIMPLE static DEFINE_SPINLOCK(simple_ida_lock); /** * ida_simple_get - get a new id. * @ida: the (initialized) ida. * @start: the minimum id (inclusive, < 0x8000000) * @end: the maximum id (exclusive, < 0x8000000 or 0) * @gfp_mask: memory allocation flags * * Allocates an id in the range start <= id < end, or returns -ENOSPC. * On memory allocation failure, returns -ENOMEM. * * Compared to ida_get_new_above() this function does its own locking, and * should be used unless there are special requirements. * * Use ida_simple_remove() to get rid of an id. */ int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, gfp_t gfp_mask) { int ret, id; unsigned int max; unsigned long flags; BUG_ON((int)start < 0); BUG_ON((int)end < 0); if (end == 0) max = 0x80000000; else { BUG_ON(end < start); max = end - 1; } again: if (!ida_pre_get(ida, gfp_mask)) return -ENOMEM; spin_lock_irqsave(&simple_ida_lock, flags); ret = ida_get_new_above(ida, start, &id); if (!ret) { if (id > max) { ida_remove(ida, id); ret = -ENOSPC; } else { ret = id; } } spin_unlock_irqrestore(&simple_ida_lock, flags); if (unlikely(ret == -EAGAIN)) goto again; return ret; } /** * ida_simple_remove - remove an allocated id. * @ida: the (initialized) ida. * @id: the id returned by ida_simple_get. * * Use to release an id allocated with ida_simple_get(). * * Compared to ida_remove() this function does its own locking, and should be * used unless there are special requirements. */ void ida_simple_remove(struct ida *ida, unsigned int id) { unsigned long flags; BUG_ON((int)id < 0); spin_lock_irqsave(&simple_ida_lock, flags); ida_remove(ida, id); spin_unlock_irqrestore(&simple_ida_lock, flags); } #endif