diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-02-09 17:15:47 +0100 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-02-09 21:25:23 +0100 |
commit | 7bddb01fb9697afd5d39bb69dd9f782a28063101 (patch) | |
tree | cf8d8b67d4bac35a611073a6723228d074960036 /drivers/gpu/drm/i915/i915_gem_gtt.c | |
parent | 1d2a314c97ceaf383de8e23cdde46729927d433c (diff) |
drm/i915: ppgtt binding/unbinding support
This adds support to bind/unbind objects and wires it up. Objects are
only put into the ppgtt when necessary, i.e. at execbuf time.
Objects are still unconditionally put into the global gtt.
v2: Kill the quick hack and explicitly pass cache_level to ppgtt_bind
like for the global gtt function. Noticed by Chris Wilson.
Reviewed-by: Ben Widawsky <ben@bwidawsk.net>
Tested-by: Chris Wilson <chris@chris-wilson.co.uk>
Tested-by: Eugeni Dodonov <eugeni.dodonov@intel.com>
Reviewed-by: Eugeni Dodonov <eugeni.dodonov@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 146 |
1 files changed, 140 insertions, 6 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index f408f8c710d..2eacd78bb93 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -34,22 +34,31 @@ static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, unsigned first_entry, unsigned num_entries) { - int i, j; uint32_t *pt_vaddr; uint32_t scratch_pte; + unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; + unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; + unsigned last_pte, i; scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr); scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC; - for (i = 0; i < ppgtt->num_pd_entries; i++) { - pt_vaddr = kmap_atomic(ppgtt->pt_pages[i]); + while (num_entries) { + last_pte = first_pte + num_entries; + if (last_pte > I915_PPGTT_PT_ENTRIES) + last_pte = I915_PPGTT_PT_ENTRIES; + + pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); - for (j = 0; j < I915_PPGTT_PT_ENTRIES; j++) - pt_vaddr[j] = scratch_pte; + for (i = first_pte; i < last_pte; i++) + pt_vaddr[i] = scratch_pte; kunmap_atomic(pt_vaddr); - } + num_entries -= last_pte - first_pte; + first_pte = 0; + act_pd++; + } } int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) @@ -168,6 +177,131 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) kfree(ppgtt); } +static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, + struct scatterlist *sg_list, + unsigned sg_len, + unsigned first_entry, + uint32_t pte_flags) +{ + uint32_t *pt_vaddr, pte; + unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; + unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; + unsigned i, j, m, segment_len; + dma_addr_t page_addr; + struct scatterlist *sg; + + /* init sg walking */ + sg = sg_list; + i = 0; + segment_len = sg_dma_len(sg) >> PAGE_SHIFT; + m = 0; + + while (i < sg_len) { + pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); + + for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { + page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT); + pte = GEN6_PTE_ADDR_ENCODE(page_addr); + pt_vaddr[j] = pte | pte_flags; + + /* grab the next page */ + m++; + if (m == segment_len) { + sg = sg_next(sg); + i++; + if (i == sg_len) + break; + + segment_len = sg_dma_len(sg) >> PAGE_SHIFT; + m = 0; + } + } + + kunmap_atomic(pt_vaddr); + + first_pte = 0; + act_pd++; + } +} + +static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt, + unsigned first_entry, unsigned num_entries, + struct page **pages, uint32_t pte_flags) +{ + uint32_t *pt_vaddr, pte; + unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; + unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; + unsigned last_pte, i; + dma_addr_t page_addr; + + while (num_entries) { + last_pte = first_pte + num_entries; + last_pte = min_t(unsigned, last_pte, I915_PPGTT_PT_ENTRIES); + + pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); + + for (i = first_pte; i < last_pte; i++) { + page_addr = page_to_phys(*pages); + pte = GEN6_PTE_ADDR_ENCODE(page_addr); + pt_vaddr[i] = pte | pte_flags; + + pages++; + } + + kunmap_atomic(pt_vaddr); + + num_entries -= last_pte - first_pte; + first_pte = 0; + act_pd++; + } +} + +void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, + struct drm_i915_gem_object *obj, + enum i915_cache_level cache_level) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t pte_flags = GEN6_PTE_VALID; + + switch (cache_level) { + case I915_CACHE_LLC_MLC: + pte_flags |= GEN6_PTE_CACHE_LLC_MLC; + break; + case I915_CACHE_LLC: + pte_flags |= GEN6_PTE_CACHE_LLC; + break; + case I915_CACHE_NONE: + pte_flags |= GEN6_PTE_UNCACHED; + break; + default: + BUG(); + } + + if (dev_priv->mm.gtt->needs_dmar) { + BUG_ON(!obj->sg_list); + + i915_ppgtt_insert_sg_entries(ppgtt, + obj->sg_list, + obj->num_sg, + obj->gtt_space->start >> PAGE_SHIFT, + pte_flags); + } else + i915_ppgtt_insert_pages(ppgtt, + obj->gtt_space->start >> PAGE_SHIFT, + obj->base.size >> PAGE_SHIFT, + obj->pages, + pte_flags); +} + +void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, + struct drm_i915_gem_object *obj) +{ + i915_ppgtt_clear_range(ppgtt, + obj->gtt_space->start >> PAGE_SHIFT, + obj->base.size >> PAGE_SHIFT); +} + /* XXX kill agp_type! */ static unsigned int cache_level_to_agp_type(struct drm_device *dev, enum i915_cache_level cache_level) |