summaryrefslogtreecommitdiff
path: root/ttm
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2015-11-10 10:03:11 +0100
committerThomas Hellstrom <thellstrom@vmware.com>2015-11-18 02:44:23 -0800
commit23c50374e6574cc41921d0b98e4516e0224f106f (patch)
tree8f5c01af7f6852906328993e8eeb74a72286ab88 /ttm
parent80022ba8f7fc35de99947af73d594ef24b90f35b (diff)
ttm: Fix address-space and visibility errors v2
Found by sparse. Also annotate and fix code to avoid confusing sparse with unusual locking patterns. v2: Fixed a comment and added __must_check to ttm_bo_reference_unless_doomed() Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Brian Paul <brianp@vmware.com>
Diffstat (limited to 'ttm')
-rw-r--r--ttm/ttm_bo.c18
-rw-r--r--ttm/ttm_bo_api.h15
-rw-r--r--ttm/ttm_bo_util.c38
-rw-r--r--ttm/ttm_bo_vm.c73
-rw-r--r--ttm/ttm_lock.c8
-rw-r--r--ttm/ttm_module.c2
-rw-r--r--ttm/ttm_object.c2
-rw-r--r--ttm/ttm_page_alloc.c10
8 files changed, 54 insertions, 112 deletions
diff --git a/ttm/ttm_bo.c b/ttm/ttm_bo.c
index 5966e4f..1bd2dff 100644
--- a/ttm/ttm_bo.c
+++ b/ttm/ttm_bo.c
@@ -675,6 +675,7 @@ static void ttm_bo_release(struct kref *kref)
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
+ write_lock(&bdev->vm_lock);
if (likely(bo->vm_node != NULL)) {
rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
drm_mm_put_block(bo->vm_node);
@@ -686,18 +687,14 @@ static void ttm_bo_release(struct kref *kref)
ttm_mem_io_unlock(man);
ttm_bo_cleanup_refs_or_queue(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
- write_lock(&bdev->vm_lock);
}
void ttm_bo_unref(struct ttm_buffer_object **p_bo)
{
struct ttm_buffer_object *bo = *p_bo;
- struct ttm_bo_device *bdev = bo->bdev;
*p_bo = NULL;
- write_lock(&bdev->vm_lock);
kref_put(&bo->kref, ttm_bo_release);
- write_unlock(&bdev->vm_lock);
}
EXPORT_SYMBOL(ttm_bo_unref);
@@ -1058,10 +1055,10 @@ int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
}
EXPORT_SYMBOL(ttm_bo_wait_cpu);
-int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- bool interruptible, bool no_wait_reserve,
- bool no_wait_gpu)
+static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ bool interruptible, bool no_wait_reserve,
+ bool no_wait_gpu)
{
int ret = 0;
struct ttm_mem_reg mem;
@@ -1696,8 +1693,9 @@ out_unlock:
return ret;
}
-int ttm_bo_wait(struct ttm_buffer_object *bo,
- bool lazy, bool interruptible, bool no_wait)
+int __releases(bdev->fence_lock) __acquires(bdev->fence_lock)
+ttm_bo_wait(struct ttm_buffer_object *bo,
+ bool lazy, bool interruptible, bool no_wait)
{
struct ttm_bo_driver *driver = bo->bdev->driver;
struct ttm_bo_device *bdev = bo->bdev;
diff --git a/ttm/ttm_bo_api.h b/ttm/ttm_bo_api.h
index 50852aa..052f2df 100644
--- a/ttm/ttm_bo_api.h
+++ b/ttm/ttm_bo_api.h
@@ -320,6 +320,21 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
}
/**
+ * ttm_bo_reference_unless_doomed - reference a struct ttm_buffer_object unless
+ * its refcount is zero and it's about to be destroyed.
+ *
+ * @bo: The buffer object.
+ *
+ * returns NULL if unsuccessful, a pointer to the bo otherwise.
+ */
+
+static inline struct ttm_buffer_object * __must_check
+ttm_bo_reference_unless_doomed(struct ttm_buffer_object *bo)
+{
+ return kref_get_unless_zero(&bo->kref) ? bo : NULL;
+}
+
+/**
* ttm_bo_wait - wait for buffer idle.
*
* @bo: The buffer object.
diff --git a/ttm/ttm_bo_util.c b/ttm/ttm_bo_util.c
index cea792d..b2a9e6d 100644
--- a/ttm/ttm_bo_util.c
+++ b/ttm/ttm_bo_util.c
@@ -181,8 +181,8 @@ void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
}
}
-int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
- void **virtual)
+static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+ void **virtual)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
int ret;
@@ -199,9 +199,9 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
addr = mem->bus.addr;
} else {
if (mem->placement & TTM_PL_FLAG_WC)
- addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
+ addr = (void __force *)ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
else
- addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
+ addr = (void __force *)ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
if (!addr) {
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
@@ -213,15 +213,15 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
return 0;
}
-void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
- void *virtual)
+static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+ void *virtual)
{
struct ttm_mem_type_manager *man;
man = &bdev->man[mem->mem_type];
if (virtual && mem->bus.addr == NULL)
- iounmap(virtual);
+ iounmap((void __iomem *)virtual);
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
ttm_mem_io_unlock(man);
@@ -229,10 +229,10 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
{
- uint32_t *dstP =
- (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
- uint32_t *srcP =
- (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
+ uint32_t __iomem *dstP =
+ (uint32_t __iomem *) ((unsigned long)dst + (page << PAGE_SHIFT));
+ uint32_t __iomem *srcP =
+ (uint32_t __iomem *) ((unsigned long)src + (page << PAGE_SHIFT));
int i;
for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
@@ -267,7 +267,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
if (!dst)
return -ENOMEM;
- memcpy_fromio(dst, src, PAGE_SIZE);
+ memcpy_fromio(dst, (void __iomem *)src, PAGE_SIZE);
#ifdef CONFIG_X86
#ifdef VMW_HAS_STACK_KMAP_ATOMIC
@@ -311,7 +311,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
if (!src)
return -ENOMEM;
- memcpy_toio(dst, src, PAGE_SIZE);
+ memcpy_toio((void __iomem *)dst, src, PAGE_SIZE);
#ifdef CONFIG_X86
#ifdef VMW_HAS_STACK_KMAP_ATOMIC
@@ -501,11 +501,13 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
} else {
map->bo_kmap_type = ttm_bo_map_iomap;
if (mem->placement & TTM_PL_FLAG_WC)
- map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
- size);
+ map->virtual = (void __force *)
+ ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
+ size);
else
- map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
- size);
+ map->virtual = (void __force *)
+ ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
+ size);
}
return (!map->virtual) ? -ENOMEM : 0;
}
@@ -599,7 +601,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
return;
switch (map->bo_kmap_type) {
case ttm_bo_map_iomap:
- iounmap(map->virtual);
+ iounmap((void __iomem *)map->virtual);
break;
case ttm_bo_map_vmap:
vunmap(map->virtual);
diff --git a/ttm/ttm_bo_vm.c b/ttm/ttm_bo_vm.c
index 317c2d8..e097804 100644
--- a/ttm/ttm_bo_vm.c
+++ b/ttm/ttm_bo_vm.c
@@ -399,8 +399,8 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
read_lock(&bdev->vm_lock);
bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
(vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
- if (likely(bo != NULL))
- ttm_bo_reference(bo);
+ if (bo != NULL && !ttm_bo_reference_unless_doomed(bo))
+ bo = NULL;
read_unlock(&bdev->vm_lock);
if (unlikely(bo == NULL)) {
@@ -467,8 +467,8 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
read_lock(&bdev->vm_lock);
bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
- if (likely(bo != NULL))
- ttm_bo_reference(bo);
+ if (bo != NULL && !ttm_bo_reference_unless_doomed(bo))
+ bo = NULL;
read_unlock(&bdev->vm_lock);
if (unlikely(bo == NULL))
@@ -539,68 +539,3 @@ out_unref:
ttm_bo_unref(&bo);
return ret;
}
-
-ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
- char __user *rbuf, size_t count, loff_t *f_pos,
- bool write)
-{
- struct ttm_bo_kmap_obj map;
- unsigned long kmap_offset;
- unsigned long kmap_end;
- unsigned long kmap_num;
- size_t io_size;
- unsigned int page_offset;
- char *virtual;
- int ret;
- bool no_wait = false;
- bool dummy;
-
- kmap_offset = (*f_pos >> PAGE_SHIFT);
- if (unlikely(kmap_offset >= bo->num_pages))
- return -EFBIG;
-
- page_offset = *f_pos & ~PAGE_MASK;
- io_size = bo->num_pages - kmap_offset;
- io_size = (io_size << PAGE_SHIFT) - page_offset;
- if (count < io_size)
- io_size = count;
-
- kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
- kmap_num = kmap_end - kmap_offset + 1;
-
- ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
-
- switch (ret) {
- case 0:
- break;
- case -EBUSY:
- return -EAGAIN;
- default:
- return ret;
- }
-
- ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
- if (unlikely(ret != 0)) {
- ttm_bo_unreserve(bo);
- return ret;
- }
-
- virtual = ttm_kmap_obj_virtual(&map, &dummy);
- virtual += page_offset;
-
- if (write)
- ret = copy_from_user(virtual, wbuf, io_size);
- else
- ret = copy_to_user(rbuf, virtual, io_size);
-
- ttm_bo_kunmap(&map);
- ttm_bo_unreserve(bo);
- ttm_bo_unref(&bo);
-
- if (unlikely(ret != 0))
- return ret;
-
- *f_pos += io_size;
-
- return io_size;
-}
diff --git a/ttm/ttm_lock.c b/ttm/ttm_lock.c
index 4e1da5a..96dde91 100644
--- a/ttm/ttm_lock.c
+++ b/ttm/ttm_lock.c
@@ -163,14 +163,6 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
}
EXPORT_SYMBOL(ttm_write_lock);
-void ttm_write_lock_downgrade(struct ttm_lock *lock)
-{
- spin_lock(&lock->lock);
- lock->rw = 1;
- wake_up_all(&lock->queue);
- spin_unlock(&lock->lock);
-}
-
static int __ttm_vt_unlock(struct ttm_lock *lock)
{
int ret = 0;
diff --git a/ttm/ttm_module.c b/ttm/ttm_module.c
index 6bac66c..959cae5 100644
--- a/ttm/ttm_module.c
+++ b/ttm/ttm_module.c
@@ -35,7 +35,7 @@
#include "drm_sysfs.h"
static DECLARE_WAIT_QUEUE_HEAD(exit_q);
-atomic_t device_released;
+static atomic_t device_released;
static struct device_type ttm_drm_class_type = {
.name = "ttm",
diff --git a/ttm/ttm_object.c b/ttm/ttm_object.c
index 4ac011b..babf5e1 100644
--- a/ttm/ttm_object.c
+++ b/ttm/ttm_object.c
@@ -383,7 +383,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
}
EXPORT_SYMBOL(ttm_ref_object_add);
-static void ttm_ref_object_release(struct kref *kref)
+static void __releases(tfile->lock) __acquires(tfile_lock) ttm_ref_object_release(struct kref *kref)
{
struct ttm_ref_object *ref =
container_of(kref, struct ttm_ref_object, kref);
diff --git a/ttm/ttm_page_alloc.c b/ttm/ttm_page_alloc.c
index bb4b62d..409e467 100644
--- a/ttm/ttm_page_alloc.c
+++ b/ttm/ttm_page_alloc.c
@@ -69,7 +69,7 @@ struct ttm_page_pool {
spinlock_t lock;
bool fill_lock;
struct list_head list;
- int gfp_flags;
+ gfp_t gfp_flags;
unsigned npages;
char *name;
unsigned long nfrees;
@@ -492,7 +492,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
* This function is reentrant if caller updates count depending on number of
* pages returned in pages array.
*/
-static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
+static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
int ttm_flags, enum ttm_caching_state cstate, unsigned count)
{
struct page **caching_array;
@@ -683,7 +683,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
{
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct page *p = NULL;
- int gfp_flags = GFP_USER;
+ gfp_t gfp_flags = GFP_USER;
int r;
/* set zero flag for page allocation if required */
@@ -787,7 +787,7 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
ttm_page_pool_free(pool, page_count);
}
-static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
+static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
char *name)
{
spin_lock_init(&pool->lock);
@@ -835,7 +835,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
return 0;
}
-void ttm_page_alloc_fini()
+void ttm_page_alloc_fini(void)
{
int i;