diff options
author | Chunming Zhou <david1.zhou@amd.com> | 2018-02-08 14:52:11 +0800 |
---|---|---|
committer | Chunming Zhou <david1.zhou@amd.com> | 2018-02-08 14:52:23 +0800 |
commit | 41b94a3fb6e87d057fad78568d920d29489e5060 (patch) | |
tree | ffd03a70bdbfb85a452fcfa8807524447df5bff9 | |
parent | d07be74a4afe9d22f987aca7e8e84cccaa210248 (diff) |
amdgpu: clean up non list code path for vamgr
Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
-rw-r--r-- | amdgpu/amdgpu_internal.h | 2 | ||||
-rw-r--r-- | amdgpu/amdgpu_vamgr.c | 121 |
2 files changed, 43 insertions, 80 deletions
diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h index 3e044f11..75276a99 100644 --- a/amdgpu/amdgpu_internal.h +++ b/amdgpu/amdgpu_internal.h @@ -53,8 +53,6 @@ struct amdgpu_bo_va_hole { }; struct amdgpu_bo_va_mgr { - /* the start virtual address */ - uint64_t va_offset; uint64_t va_max; struct list_head va_holes; pthread_mutex_t bo_va_mutex; diff --git a/amdgpu/amdgpu_vamgr.c b/amdgpu/amdgpu_vamgr.c index a2852b55..2311e5eb 100644 --- a/amdgpu/amdgpu_vamgr.c +++ b/amdgpu/amdgpu_vamgr.c @@ -48,12 +48,19 @@ int amdgpu_va_range_query(amdgpu_device_handle dev, drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start, uint64_t max, uint64_t alignment) { - mgr->va_offset = start; + struct amdgpu_bo_va_hole *n; + mgr->va_max = max; mgr->va_alignment = alignment; list_inithead(&mgr->va_holes); pthread_mutex_init(&mgr->bo_va_mutex, NULL); + pthread_mutex_lock(&mgr->bo_va_mutex); + n = calloc(1, sizeof(struct amdgpu_bo_va_hole)); + n->size = mgr->va_max; + n->offset = start; + list_add(&n->list, &mgr->va_holes); + pthread_mutex_unlock(&mgr->bo_va_mutex); } drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr) @@ -122,41 +129,14 @@ amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size, } } - if (base_required) { - if (base_required < mgr->va_offset) { - pthread_mutex_unlock(&mgr->bo_va_mutex); - return AMDGPU_INVALID_VA_ADDRESS; - } - offset = mgr->va_offset; - waste = base_required - mgr->va_offset; - } else { - offset = mgr->va_offset; - waste = offset % alignment; - waste = waste ? alignment - waste : 0; - } - - if (offset + waste + size > mgr->va_max) { - pthread_mutex_unlock(&mgr->bo_va_mutex); - return AMDGPU_INVALID_VA_ADDRESS; - } - - if (waste) { - n = calloc(1, sizeof(struct amdgpu_bo_va_hole)); - n->size = waste; - n->offset = offset; - list_add(&n->list, &mgr->va_holes); - } - - offset += waste; - mgr->va_offset += size + waste; pthread_mutex_unlock(&mgr->bo_va_mutex); - return offset; + return AMDGPU_INVALID_VA_ADDRESS; } static drm_private void amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size) { - struct amdgpu_bo_va_hole *hole; + struct amdgpu_bo_va_hole *hole, *next; if (va == AMDGPU_INVALID_VA_ADDRESS) return; @@ -164,61 +144,46 @@ amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size) size = ALIGN(size, mgr->va_alignment); pthread_mutex_lock(&mgr->bo_va_mutex); - if ((va + size) == mgr->va_offset) { - mgr->va_offset = va; - /* Delete uppermost hole if it reaches the new top */ - if (!LIST_IS_EMPTY(&mgr->va_holes)) { - hole = container_of(mgr->va_holes.next, hole, list); - if ((hole->offset + hole->size) == va) { - mgr->va_offset = hole->offset; + hole = container_of(&mgr->va_holes, hole, list); + LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) { + if (next->offset < va) + break; + hole = next; + } + + if (&hole->list != &mgr->va_holes) { + /* Grow upper hole if it's adjacent */ + if (hole->offset == (va + size)) { + hole->offset = va; + hole->size += size; + /* Merge lower hole if it's adjacent */ + if (next != hole && + &next->list != &mgr->va_holes && + (next->offset + next->size) == va) { + next->size += hole->size; list_del(&hole->list); free(hole); } } - } else { - struct amdgpu_bo_va_hole *next; - - hole = container_of(&mgr->va_holes, hole, list); - LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) { - if (next->offset < va) - break; - hole = next; - } - - if (&hole->list != &mgr->va_holes) { - /* Grow upper hole if it's adjacent */ - if (hole->offset == (va + size)) { - hole->offset = va; - hole->size += size; - /* Merge lower hole if it's adjacent */ - if (next != hole && - &next->list != &mgr->va_holes && - (next->offset + next->size) == va) { - next->size += hole->size; - list_del(&hole->list); - free(hole); - } - goto out; - } - } + } - /* Grow lower hole if it's adjacent */ - if (next != hole && &next->list != &mgr->va_holes && - (next->offset + next->size) == va) { - next->size += size; - goto out; - } + /* Grow lower hole if it's adjacent */ + if (next != hole && &next->list != &mgr->va_holes && + (next->offset + next->size) == va) { + next->size += size; + goto out; + } - /* FIXME on allocation failure we just lose virtual address space - * maybe print a warning - */ - next = calloc(1, sizeof(struct amdgpu_bo_va_hole)); - if (next) { - next->size = size; - next->offset = va; - list_add(&next->list, &hole->list); - } + /* FIXME on allocation failure we just lose virtual address space + * maybe print a warning + */ + next = calloc(1, sizeof(struct amdgpu_bo_va_hole)); + if (next) { + next->size = size; + next->offset = va; + list_add(&next->list, &hole->list); } + out: pthread_mutex_unlock(&mgr->bo_va_mutex); } |