diff options
author | Imre Deak <imre.deak@intel.com> | 2013-03-26 15:14:18 +0200 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2013-03-27 17:13:44 +0100 |
commit | 2db76d7c3c6db93058f983c8240f7c7c25e87ee6 (patch) | |
tree | a35f01706b353841b71645da050bc721c9f0467b /drivers/gpu | |
parent | 693db1842d864ca2771e881127cdb4d09979758b (diff) |
lib/scatterlist: sg_page_iter: support sg lists w/o backing pages
The i915 driver uses sg lists for memory without backing 'struct page'
pages, similarly to other IO memory regions, setting only the DMA
address for these. It does this, so that it can program the HW MMU
tables in a uniform way both for sg lists with and without backing pages.
Without a valid page pointer we can't call nth_page to get the current
page in __sg_page_iter_next, so add a helper that relevant users can
call separately. Also add a helper to get the DMA address of the current
page (idea from Daniel).
Convert all places in i915, to use the new API.
Signed-off-by: Imre Deak <imre.deak@intel.com>
Reviewed-by: Damien Lespiau <damien.lespiau@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/drm_cache.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_dmabuf.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_tiling.c | 4 |
6 files changed, 11 insertions, 13 deletions
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index bc8edbeca3fd..bb8f58012189 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -109,7 +109,7 @@ drm_clflush_sg(struct sg_table *st) mb(); for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) - drm_clflush_page(sg_iter.page); + drm_clflush_page(sg_page_iter_page(&sg_iter)); mb(); return; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1d091ea12fad..f69538508d8c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1543,7 +1543,7 @@ static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object * struct sg_page_iter sg_iter; for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n) - return sg_iter.page; + return sg_page_iter_page(&sg_iter); return NULL; } diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a1123a32dc27..911bd40ef513 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -442,7 +442,7 @@ i915_gem_shmem_pread(struct drm_device *dev, for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, offset >> PAGE_SHIFT) { - struct page *page = sg_iter.page; + struct page *page = sg_page_iter_page(&sg_iter); if (remain <= 0) break; @@ -765,7 +765,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, offset >> PAGE_SHIFT) { - struct page *page = sg_iter.page; + struct page *page = sg_page_iter_page(&sg_iter); int partial_cacheline_write; if (remain <= 0) @@ -1647,7 +1647,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) obj->dirty = 0; for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { - struct page *page = sg_iter.page; + struct page *page = sg_page_iter_page(&sg_iter); if (obj->dirty) set_page_dirty(page); @@ -1827,7 +1827,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) err_pages: sg_mark_end(sg); for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) - page_cache_release(sg_iter.page); + page_cache_release(sg_page_iter_page(&sg_iter)); sg_free_table(st); kfree(st); return PTR_ERR(page); diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 898615d2d5e2..c6dfc1466e3a 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -130,7 +130,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) i = 0; for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0); - pages[i++] = sg_iter.page; + pages[i++] = sg_page_iter_page(&sg_iter); obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL); drm_free_large(pages); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 4cbae7bbb833..24a23b31b55f 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -123,8 +123,7 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt, for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { dma_addr_t page_addr; - page_addr = sg_dma_address(sg_iter.sg) + - (sg_iter.sg_pgoffset << PAGE_SHIFT); + page_addr = sg_page_iter_dma_address(&sg_iter); pt_vaddr[act_pte] = gen6_pte_encode(ppgtt->dev, page_addr, cache_level); if (++act_pte == I915_PPGTT_PT_ENTRIES) { @@ -424,8 +423,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev, dma_addr_t addr; for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { - addr = sg_dma_address(sg_iter.sg) + - (sg_iter.sg_pgoffset << PAGE_SHIFT); + addr = sg_page_iter_dma_address(&sg_iter); iowrite32(gen6_pte_encode(dev, addr, level), >t_entries[i]); i++; } diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index f799708bcb85..c807eb93755b 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -481,7 +481,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) i = 0; for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { - struct page *page = sg_iter.page; + struct page *page = sg_page_iter_page(&sg_iter); char new_bit_17 = page_to_phys(page) >> 17; if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) { @@ -511,7 +511,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) i = 0; for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { - if (page_to_phys(sg_iter.page) & (1 << 17)) + if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17)) __set_bit(i, obj->bit_17); else __clear_bit(i, obj->bit_17); |