diff options
author | Alon Levy <alevy@redhat.com> | 2012-09-09 10:53:27 +0300 |
---|---|---|
committer | Alon Levy <alevy@redhat.com> | 2012-10-19 13:59:01 +0200 |
commit | 0f6d5f6bd5369bbda29cf474adb1f78d0d3f7eb6 (patch) | |
tree | dfc4ac42293f1ad0fa9fc36987bdd643f6c5f4f2 /drivers/gpu | |
parent | 2273c44423f8fac7d835e6ba793c0665787f3491 (diff) |
drivers/gpu/drm/ttm: (debug) debug prints
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 17 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 50 |
3 files changed, 68 insertions, 2 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 402ab69f9f9..13c9b6966ba 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1169,6 +1169,18 @@ int ttm_bo_check_placement(struct ttm_buffer_object *bo, return 0; } +#if 0 +static const char *ttm_bo_type_to_string(int ttm_bo_type) +{ + switch (ttm_bo_type) { + case ttm_bo_type_device: return "device"; + case ttm_bo_type_kernel: return "kernel"; + default: + return "unknown"; + } +} +#endif + int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo, unsigned long size, @@ -1186,6 +1198,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, unsigned long num_pages; struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; + //printk("%s: size = %ld, type = %s\n", __func__, size, ttm_bo_type_to_string(type)); ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); if (ret) { pr_err("Out of kernel memory\n"); @@ -1685,8 +1698,12 @@ retry_pre_get: return ret; write_lock(&bdev->vm_lock); + //printk("%s: calling drm_mm_search_free %ld\n", __func__, + // bo->mem.num_pages); bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm, bo->mem.num_pages, 0, 0); + //printk("%s: got a vm_node->start == %ld\n", __func__, + // bo->vm_node->start); if (unlikely(bo->vm_node == NULL)) { ret = -ENOMEM; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 2026060f03e..054bf259a04 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -487,6 +487,9 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo, { struct ttm_mem_reg *mem = &bo->mem; + //printk("%s:%d: bo->mem.bus = {.base %ld, .offset %ld, .addr %p}\n", + // __func__, __LINE__, + // bo->mem.bus.base, bo->mem.bus.offset, bo->mem.bus.addr); if (bo->mem.bus.addr) { map->bo_kmap_type = ttm_bo_map_premapped; map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 3ba72dbdc4b..3440882dd66 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -61,12 +61,44 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev, cur = cur->rb_left; } - if (unlikely(best_bo == NULL)) + if (unlikely(best_bo == NULL)) { + printk("%s: best_bo == NULL, Here comes the tree: (bo start, #pages)\n", __func__); + cur = bdev->addr_space_rb.rb_node; + while (likely(cur != NULL)) { + bo = rb_entry(cur, struct ttm_buffer_object, vm_rb); + printk("%s: cur = %p, (%ld, %ld)\n", __func__, cur, bo->vm_node->start, bo->num_pages); + cur_offset = bo->vm_node->start; + if (page_start >= cur_offset) { + cur = cur->rb_right; + best_bo = bo; + if (page_start == cur_offset) + break; + } else + cur = cur->rb_left; + } return NULL; + } if (unlikely((best_bo->vm_node->start + best_bo->num_pages) < - (page_start + num_pages))) + (page_start + num_pages))) { + printk("%s: best_bo->vm_node->start + best_bo->num_pages %ld + %ld < %ld + %ld page_start + num_pages\nHere comes the branch: (bo start, #pages)\n", + __func__, best_bo->vm_node->start, best_bo->num_pages, + page_start, num_pages); + cur = bdev->addr_space_rb.rb_node; + while (likely(cur != NULL)) { + bo = rb_entry(cur, struct ttm_buffer_object, vm_rb); + printk("%s: cur = %p, (%ld, %ld)\n", __func__, cur, bo->vm_node->start, bo->num_pages); + cur_offset = bo->vm_node->start; + if (page_start >= cur_offset) { + cur = cur->rb_right; + best_bo = bo; + if (page_start == cur_offset) + break; + } else + cur = cur->rb_left; + } return NULL; + } return best_bo; } @@ -94,10 +126,18 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) * for reserve, and if it fails, retry the fault after scheduling. */ +#if 0 + printk("%s: enter: reserved = %d, mem.bus.is_iomem = %d\n", __func__, + bo->reserved.counter, + bo->mem.bus.is_iomem + ); +#endif + ret = ttm_bo_reserve(bo, true, true, false, 0); if (unlikely(ret != 0)) { if (ret == -EBUSY) set_need_resched(); + printk("%s: VM_FAULT_NOPAGE (ttm_bo_reserve = %d)\n", __func__, ret); return VM_FAULT_NOPAGE; } @@ -141,6 +181,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } ret = ttm_mem_io_reserve_vm(bo); if (unlikely(ret != 0)) { + printk("%s: VM_FAULT_SIGBUS (ttm_bo_io_reserve = %d)\n", + __func__, ret); retval = VM_FAULT_SIGBUS; goto out_io_unlock; } @@ -151,6 +193,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) bo->vm_node->start - vma->vm_pgoff; if (unlikely(page_offset >= bo->num_pages)) { + printk("%s: VM_FAULT_SIGBUS (page_offset %ld >= bo->num_pages %ld)\n", + __func__, page_offset, bo->num_pages); + printk("%s: address %ld\nvma->vm_start %ld\nbo->vm_node->start %ld\nvma->vm_pgoff %ld\n", + __func__, address, vma->vm_start, bo->vm_node->start, vma->vm_pgoff); retval = VM_FAULT_SIGBUS; goto out_io_unlock; } |