diff options
author | Thomas Hellstrom <unichrome@shipmail.org> | 2006-02-18 17:42:11 +0000 |
---|---|---|
committer | Thomas Hellstrom <unichrome@shipmail.org> | 2006-02-18 17:42:11 +0000 |
commit | b0f15444a780f549a71c143d7f3957571afbb6f5 (patch) | |
tree | 689a9405e76f1801fbdd76e313248ec2f0b8708e | |
parent | 20e4ead84f157ffbc43f120d65f6f259386b7b93 (diff) |
ttm: Make memory manager base alignment-aware.
-rw-r--r-- | linux-core/drmP.h | 5 | ||||
-rw-r--r-- | linux-core/drm_mm.c | 19 | ||||
-rw-r--r-- | linux-core/drm_ttm.c | 4 |
3 files changed, 22 insertions, 6 deletions
diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 8044367f..b164e02c 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1095,10 +1095,11 @@ extern void drm_ttm_mm_init(drm_device_t *dev, drm_ttm_mm_t *mm, unsigned long s * Basic memory manager support (drm_mm.c) */ -extern drm_mm_node_t * drm_mm_get_block_locked(drm_mm_node_t * parent, unsigned long size); +extern drm_mm_node_t * drm_mm_get_block_locked(drm_mm_node_t * parent, unsigned long size, + unsigned alignment); extern void drm_mm_put_block_locked(drm_mm_t *mm, drm_mm_node_t *cur); extern drm_mm_node_t *drm_mm_search_free_locked(const drm_mm_t *mm, unsigned long size, - int best_match); + unsigned alignment, int best_match); extern int drm_mm_init(drm_mm_t *mm, unsigned long start, unsigned long size); /* Inline replacements for DRM_IOREMAP macros */ diff --git a/linux-core/drm_mm.c b/linux-core/drm_mm.c index 05d316ee..82bbf93d 100644 --- a/linux-core/drm_mm.c +++ b/linux-core/drm_mm.c @@ -30,16 +30,26 @@ * Generic simple memory manager implementation. Intended to be used as a base * class implementation for more advanced memory managers. The mm.mm_lock spinlock * needs to be explicitly taken by the client before calling any _locked function. + * + * Note that the algorithm used is quite simple and there might be substantial + * performance gains if a smarter free list is implemented. Currently it is just an + * unordered stack of free regions. Feel free to improve. + * + * Aligned allocations can also see some improvement. */ #include "drmP.h" drm_mm_node_t *drm_mm_get_block_locked(drm_mm_node_t * parent, - unsigned long size) + unsigned long size, + unsigned alignment) { drm_mm_node_t * child; + if (alignment) + size += alignment - 1; + if (parent->size == size) { list_del_init(&parent->fl_entry); parent->free = FALSE; @@ -112,7 +122,9 @@ void drm_mm_put_block_locked(drm_mm_t * mm, drm_mm_node_t * cur) } drm_mm_node_t *drm_mm_search_free_locked(const drm_mm_t * mm, - unsigned long size, int best_match) + unsigned long size, + unsigned alignment, + int best_match) { struct list_head *list; const struct list_head *free_stack = &mm->root_node.fl_entry; @@ -123,6 +135,9 @@ drm_mm_node_t *drm_mm_search_free_locked(const drm_mm_t * mm, best = NULL; best_size = ~0UL; + if (alignment) + size += alignment - 1; + list_for_each(list, free_stack) { entry = list_entry(list, drm_mm_node_t, fl_entry); if (entry->size >= size) { diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index c8048d12..03c81bb2 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -885,7 +885,7 @@ static int drm_validate_ttm_region(drm_ttm_backend_list_t * entry, while (!mm_node) { mm_node = drm_mm_search_free_locked(&entry->mm->mm, num_pages, - 0); + 0, 0); if (!mm_node) { ret = drm_ttm_evict_lru_sl(entry, &have_fence, &cur_fence); if (ret) @@ -894,7 +894,7 @@ static int drm_validate_ttm_region(drm_ttm_backend_list_t * entry, } if (!entry->mm_node) { - mm_node = drm_mm_get_block_locked(mm_node, num_pages); + mm_node = drm_mm_get_block_locked(mm_node, num_pages, 0); mm_node->private = mm_priv; mm_priv->region = entry; entry->mm_node = mm_node; |