diff options
author | Thomas Hellstrom <thomas@tungstengraphics.com> | 2006-03-13 10:42:12 +0000 |
---|---|---|
committer | Thomas Hellstrom <thomas@tungstengraphics.com> | 2006-03-13 10:42:12 +0000 |
commit | c818b5a94165160d0218bf2f3fc738dcee4c2d9f (patch) | |
tree | a1b3816cd53c98ec872c34bedac691b88b2647d7 | |
parent | ed7d564a526b7773d97b7b8549408f3f631c895f (diff) |
ttm: Fix for backdoor mapping.
-rw-r--r-- | linux-core/drm_ttm.c | 83 | ||||
-rw-r--r-- | linux-core/drm_vm.c | 136 |
2 files changed, 134 insertions, 85 deletions
diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index 065713af..0f63963c 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -65,37 +65,29 @@ void pmd_clear_bad(pmd_t * pmd) * Invalidate or update all PTEs associated with a vma. */ +#define DRM_TTM_UNMAP 0x01 +#define DRM_TTM_REWRITE 0x02 + static void change_pte_range(struct mm_struct *mm, pmd_t * pmd, unsigned long addr, unsigned long end, - pgprot_t newprot, int unmap) + pgprot_t newprot, unsigned long pfn, + int flags) { pte_t *pte; - int count; - struct page *page; pte = pte_offset_map(pmd, addr); do { - if (unmap && pte_present(*pte)) { -#if 0 - count = get_mm_counter(mm, rss); - if (count) { - page = pte_page(*pte); - ptep_get_and_clear(mm, addr, pte); - dec_mm_counter(mm, rss); - atomic_add_negative(-1, &page->_mapcount); - put_page(page); - lazy_mmu_prot_update(*pte); - } -#else + if ((flags & DRM_TTM_UNMAP) && pte_present(*pte)) { + pte_t ptent; + ptent = *pte; ptep_get_and_clear(mm, addr,pte); - lazy_mmu_prot_update(*pte); -#endif + lazy_mmu_prot_update(ptent); } - if (pte_present(*pte)) { + if (flags & DRM_TTM_REWRITE) { + unsigned long new_pfn = (pfn + addr) >> PAGE_SHIFT; pte_t ptent; - ptent = - pte_modify(ptep_get_and_clear(mm, addr, pte), - newprot); + ptep_get_and_clear(mm, addr, pte); + ptent = pfn_pte(new_pfn, newprot); set_pte_at(mm, addr, pte, ptent); lazy_mmu_prot_update(ptent); } @@ -105,7 +97,8 @@ static void change_pte_range(struct mm_struct *mm, pmd_t * pmd, static inline void change_pmd_range(struct mm_struct *mm, pud_t * pud, unsigned long addr, unsigned long end, - pgprot_t newprot, int unmap) + pgprot_t newprot, unsigned long pfn, + int flags) { pmd_t *pmd; unsigned long next; @@ -115,13 +108,14 @@ static inline void change_pmd_range(struct mm_struct *mm, pud_t * pud, next = pmd_addr_end(addr, end); if (pmd_none_or_clear_bad(pmd)) continue; - change_pte_range(mm, pmd, addr, next, newprot, unmap); + change_pte_range(mm, pmd, addr, next, newprot, pfn, flags); } while (pmd++, addr = next, addr != end); } static inline void change_pud_range(struct mm_struct *mm, pgd_t * pgd, unsigned long addr, unsigned long end, - pgprot_t newprot, int unmap) + pgprot_t newprot, unsigned long pfn, + int flags) { pud_t *pud; unsigned long next; @@ -129,19 +123,21 @@ static inline void change_pud_range(struct mm_struct *mm, pgd_t * pgd, pud = pud_offset(pgd, addr); do { next = pud_addr_end(addr, end); - if (pud_none_or_clear_bad(pud)) + if (pud_none_or_clear_bad(pud)) continue; - change_pmd_range(mm, pud, addr, next, newprot, unmap); + change_pmd_range(mm, pud, addr, next, newprot, pfn, flags); } while (pud++, addr = next, addr != end); } static void drm_change_protection(struct vm_area_struct *vma, unsigned long addr, unsigned long end, - pgprot_t newprot, int unmap) + pgprot_t newprot, unsigned long pfn, + int flags) { struct mm_struct *mm = vma->vm_mm; pgd_t *pgd; unsigned long next; + pfn = (pfn << PAGE_SHIFT) - addr; BUG_ON(addr >= end); pgd = pgd_offset(mm, addr); @@ -150,7 +146,7 @@ static void drm_change_protection(struct vm_area_struct *vma, next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; - change_pud_range(mm, pgd, addr, next, newprot, unmap); + change_pud_range(mm, pgd, addr, next, newprot, pfn, flags); } while (pgd++, addr = next, addr != end); } @@ -179,6 +175,7 @@ static int ioremap_vmas(drm_ttm_t * ttm, unsigned long page_offset, if (ret) break; } + global_flush_tlb(); return ret; } @@ -203,8 +200,8 @@ static int unmap_vma_pages(drm_ttm_t * ttm, unsigned long page_offset, (page_offset << PAGE_SHIFT), entry->vma->vm_start + ((page_offset + num_pages) << PAGE_SHIFT), - entry->vma->vm_page_prot, TRUE); - + entry->vma->vm_page_prot, 0, + DRM_TTM_UNMAP); } for (cur_page = first_page; cur_page != last_page; ++cur_page) { @@ -353,7 +350,8 @@ drm_ttm_t *drm_init_ttm(struct drm_device * dev, unsigned long size) */ static int drm_set_caching(drm_ttm_t * ttm, unsigned long page_offset, - unsigned long num_pages, int noncached) + unsigned long num_pages, int noncached, + int do_tlbflush) { int i, cur; struct page **cur_page; @@ -391,7 +389,8 @@ static int drm_set_caching(drm_ttm_t * ttm, unsigned long page_offset, } } } - global_flush_tlb(); + if (do_tlbflush) + global_flush_tlb(); if (do_spinlock) { spin_unlock(¤t->mm->page_table_lock); up_write(¤t->mm->mmap_sem); @@ -450,7 +449,7 @@ int drm_evict_ttm_region(drm_ttm_backend_list_t * entry) be->unbind(entry->be); if (ttm && be->needs_cache_adjust(be)) { drm_set_caching(ttm, entry->page_offset, - entry->num_pages, 0); + entry->num_pages, 0, 1); } break; default: @@ -487,7 +486,7 @@ void drm_destroy_ttm_region(drm_ttm_backend_list_t * entry) be->clear(entry->be); if (be->needs_cache_adjust(be)) { drm_set_caching(ttm, entry->page_offset, - entry->num_pages, 0); + entry->num_pages, 0, 1); } be->destroy(be); } @@ -575,6 +574,7 @@ int drm_create_ttm_region(drm_ttm_t * ttm, unsigned long page_offset, entry->mm_node = NULL; entry->mm = &ttm->dev->mm_driver->ttm_mm; ttm->aperture_base = be->aperture_base; + *region = entry; return 0; } @@ -605,8 +605,10 @@ int drm_bind_ttm_region(drm_ttm_backend_list_t * region, ttm = region->owner; if (ttm && be->needs_cache_adjust(be)) { - drm_set_caching(ttm, region->page_offset, region->num_pages, - DRM_TTM_PAGE_UNCACHED); + drm_set_caching(ttm, region->page_offset, region->num_pages, + DRM_TTM_PAGE_UNCACHED, 0); + ioremap_vmas(ttm, region->page_offset, region->num_pages, + aper_offset); } if ((ret = be->bind(be, aper_offset))) { @@ -615,15 +617,6 @@ int drm_bind_ttm_region(drm_ttm_backend_list_t * region, return ret; } - if (ttm && be->needs_cache_adjust(be)) { - if ((ret = ioremap_vmas(ttm, region->page_offset, region->num_pages, - aper_offset))) { - drm_unbind_ttm_region(region); - DRM_ERROR("Couldn't remap AGP aperture.\n"); - return ret; - } - } - cur_page_flag = ttm->page_flags + region->page_offset; for (i = 0; i < region->num_pages; ++i) { DRM_MASK_VAL(*cur_page_flag, DRM_TTM_MASK_PFN, diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index a8502a28..a214bc4e 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -85,6 +85,7 @@ pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) else tmp = pgprot_noncached(tmp); #endif + return tmp; } /** @@ -222,7 +223,89 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, return page; } -#define TTM_BACKDOOR + +static int drm_ttm_remap_bound_pfn(struct vm_area_struct *vma, + unsigned long address, + unsigned long size) +{ + unsigned long + page_offset = (address - vma->vm_start) >> PAGE_SHIFT; + unsigned long + num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + drm_ttm_vma_list_t *entry = (drm_ttm_vma_list_t *) + vma->vm_private_data; + drm_map_t *map = entry->map; + drm_ttm_t *ttm = (drm_ttm_t *) map->offset; + unsigned long i, cur_pfn; + unsigned long start = 0; + unsigned long end = 0; + unsigned long last_pfn = 0; + unsigned long start_pfn = 0; + int bound_sequence = FALSE; + int ret = 0; + uint32_t cur_flags; + + for (i=page_offset; i<page_offset + num_pages; ++i) { + cur_flags = ttm->page_flags[i]; + + if (!bound_sequence && (cur_flags & DRM_TTM_PAGE_UNCACHED)) { + + start = i; + end = i; + last_pfn = (cur_flags & DRM_TTM_MASK_PFN) >> PAGE_SHIFT; + start_pfn = last_pfn; + bound_sequence = TRUE; + + } else if (bound_sequence) { + + cur_pfn = (cur_flags & DRM_TTM_MASK_PFN) >> PAGE_SHIFT; + + if ( !(cur_flags & DRM_TTM_PAGE_UNCACHED) || + (cur_pfn != last_pfn + 1)) { + + ret = io_remap_pfn_range(vma, + vma->vm_start + (start << PAGE_SHIFT), + (ttm->aperture_base >> PAGE_SHIFT) + + start_pfn, + (end - start + 1) << PAGE_SHIFT, + drm_io_prot(_DRM_AGP, vma)); + + if (ret) + break; + + bound_sequence = (cur_flags & DRM_TTM_PAGE_UNCACHED); + if (!bound_sequence) + continue; + + start = i; + end = i; + last_pfn = cur_pfn; + start_pfn = last_pfn; + + } else { + + end++; + last_pfn = cur_pfn; + + } + } + } + + if (!ret && bound_sequence) { + ret = io_remap_pfn_range(vma, + vma->vm_start + (start << PAGE_SHIFT), + (ttm->aperture_base >> PAGE_SHIFT) + + start_pfn, + (end - start + 1) << PAGE_SHIFT, + drm_io_prot(_DRM_AGP, vma)); + } + + if (ret) { + DRM_ERROR("Map returned %c\n", ret); + } + return ret; +} + static __inline__ struct page *drm_do_vm_ttm_nopage(struct vm_area_struct *vma, unsigned long address) { @@ -233,7 +316,6 @@ static __inline__ struct page *drm_do_vm_ttm_nopage(struct vm_area_struct *vma, struct page *page; drm_ttm_t *ttm; pgprot_t default_prot; - unsigned long aper_loc = 0; uint32_t page_flags; if (address > vma->vm_end) @@ -248,41 +330,17 @@ static __inline__ struct page *drm_do_vm_ttm_nopage(struct vm_area_struct *vma, page_flags = ttm->page_flags[page_offset]; -#ifdef TTM_BACKDOOR - if (page_flags & DRM_TTM_PAGE_UNCACHED) { - unsigned long pfn; - - BUG_ON(!page); - aper_loc = ttm->aperture_base + - (page_flags & DRM_TTM_MASK_PFN); - pfn = aper_loc >> PAGE_SHIFT; - - if (!pfn_valid(pfn)) { - DRM_ERROR("Invalid page encountered while trying to " - "map backdoor ttm page.\n"); - return NOPAGE_SIGBUS; - } - - page = pfn_to_page(pfn); - - if (PageAnon(page)) { - DRM_ERROR("Anonymous page trying to map aperture " - "at 0x%08lx\n", (unsigned long) aper_loc); - return NOPAGE_SIGBUS; - } - } -#endif if (!page) { page = ttm->pages[page_offset] = alloc_page(GFP_USER); + SetPageReserved(page); } if (!page) return NOPAGE_OOM; - SetPageReserved(page); -#if 0 get_page(page); -#endif + + /* * FIXME: Potential security hazard: Have someone export the * mm subsystem's protection_map instead. Otherwise we will @@ -293,17 +351,11 @@ static __inline__ struct page *drm_do_vm_ttm_nopage(struct vm_area_struct *vma, default_prot = drm_prot_map[vma->vm_flags & 0x0f]; if (page_flags & DRM_TTM_PAGE_UNCACHED) { -#ifdef TTM_BACKDOOR - pgprot_val(default_prot) |= _PAGE_PCD; - pgprot_val(default_prot) &= ~_PAGE_PWT; -#else - default_prot = pgprot_noncached(default_prot); -#endif + DRM_ERROR("Uncached nopage\n"); + default_prot = pgprot_noncached(default_prot); } vma->vm_page_prot = default_prot; - - return page; } @@ -682,8 +734,8 @@ static void drm_vm_ttm_close(struct vm_area_struct *vma) drm_ttm_t *ttm; int found_maps; struct list_head *list; - drm_map_list_t *r_list; - drm_device_t *dev; + drm_map_list_t *r_list; + drm_device_t *dev; drm_vm_close(vma); if (ttm_vma) { @@ -935,7 +987,11 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_ops = &drm_vm_ttm_ops; vma->vm_private_data = (void *) &tmp_vma; vma->vm_file = filp; - vma->vm_flags |= VM_RESERVED; + vma->vm_flags |= VM_RESERVED | VM_IO; + if (drm_ttm_remap_bound_pfn(vma, + vma->vm_start, + vma->vm_end - vma->vm_start)) + return -EAGAIN; drm_vm_ttm_open(vma); return 0; } |