diff options
author | Thomas Hellstrom <thellstrom@vmware.com> | 2018-08-30 10:27:09 +0200 |
---|---|---|
committer | Thomas Hellstrom <thellstrom@vmware.com> | 2018-08-31 14:11:39 +0200 |
commit | 1219b054653627b23ca46bde4ac737c68922624d (patch) | |
tree | 2f0f571782b723705ff8b14c8fb533626627d123 | |
parent | a866523eb2de4dff7049ab8c2343a7ad2e0d1e7e (diff) |
vmwgfx: Fix compilation on 4.19
The ida API and the dmabuf API has changed.
The ida fix might be short-lived since the ida_simple API might go soon too.
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Sinclair Yeh <syeh@vmware.com>
Reviewed-by: Deepak Rawat <drawat@vmware.com>
-rw-r--r-- | vmwgfx_gmrid_manager.c | 41 | ||||
-rw-r--r-- | vmwgfx_prime.c | 9 |
2 files changed, 20 insertions, 30 deletions
diff --git a/vmwgfx_gmrid_manager.c b/vmwgfx_gmrid_manager.c index fafce6a..88ef787 100644 --- a/vmwgfx_gmrid_manager.c +++ b/vmwgfx_gmrid_manager.c @@ -51,51 +51,34 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, { struct vmwgfx_gmrid_man *gman = (struct vmwgfx_gmrid_man *)man->priv; - int ret = 0; int id; mem->mm_node = NULL; + id = ida_simple_get(&gman->gmr_ida, 0, gman->max_gmr_ids, GFP_KERNEL); + if (id < 0) + return id; + spin_lock(&gman->lock); if (gman->max_gmr_pages > 0) { gman->used_gmr_pages += bo->num_pages; if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages)) - goto out_err_locked; + goto nospace; } - do { - spin_unlock(&gman->lock); - if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) { - ret = -ENOMEM; - goto out_err; - } - spin_lock(&gman->lock); - - ret = ida_get_new(&gman->gmr_ida, &id); - if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) { - ida_remove(&gman->gmr_ida, id); - ret = 0; - goto out_err_locked; - } - } while (ret == -EAGAIN); - - if (likely(ret == 0)) { - mem->mm_node = gman; - mem->start = id; - mem->num_pages = bo->num_pages; - } else - goto out_err_locked; + mem->mm_node = gman; + mem->start = id; + mem->num_pages = bo->num_pages; spin_unlock(&gman->lock); return 0; -out_err: - spin_lock(&gman->lock); -out_err_locked: +nospace: gman->used_gmr_pages -= bo->num_pages; spin_unlock(&gman->lock); - return ret; + ida_simple_remove(&gman->gmr_ida, id); + return 0; } static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man, @@ -106,7 +89,7 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man, if (mem->mm_node) { spin_lock(&gman->lock); - ida_remove(&gman->gmr_ida, mem->start); + ida_simple_remove(&gman->gmr_ida, mem->start); gman->used_gmr_pages -= mem->num_pages; spin_unlock(&gman->lock); mem->mm_node = NULL; diff --git a/vmwgfx_prime.c b/vmwgfx_prime.c index 0a20bfa..dc46060 100644 --- a/vmwgfx_prime.c +++ b/vmwgfx_prime.c @@ -42,7 +42,9 @@ */ static int vmw_prime_map_attach(struct dma_buf *dma_buf, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) struct device *target_dev, +#endif struct dma_buf_attachment *attach) { return -ENOSYS; @@ -65,6 +67,7 @@ static void vmw_prime_unmap_dma_buf(struct dma_buf_attachment *attach, { } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) { @@ -76,6 +79,8 @@ static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, { } +#endif + static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num) { @@ -117,9 +122,11 @@ const struct dma_buf_ops vmw_prime_dmabuf_ops = { #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) || \ (RHEL_VERSION_CODE >= RHEL_RELEASE_VERSION(7, 5))) .map = vmw_prime_dmabuf_kmap, - .map_atomic = vmw_prime_dmabuf_kmap_atomic, .unmap = vmw_prime_dmabuf_kunmap, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) + .map_atomic = vmw_prime_dmabuf_kmap_atomic, .unmap_atomic = vmw_prime_dmabuf_kunmap_atomic, +#endif #else .kmap = vmw_prime_dmabuf_kmap, .kmap_atomic = vmw_prime_dmabuf_kmap_atomic, |