diff options
author | Michel Daenzer <michel@daenzer.net> | 2003-05-16 23:41:27 +0000 |
---|---|---|
committer | Michel Daenzer <michel@daenzer.net> | 2003-05-16 23:41:27 +0000 |
commit | e5d3c7f260d18168eec755c73f01ac617390d96c (patch) | |
tree | 9628a3f566362142f10f7d0109a1a218cbc5ddc4 /linux-core/drm_memory.h | |
parent | 1d5bf7a7de35f87e68cce740151fd46cd8fa2ff3 (diff) |
Support AGP bridges where the AGP aperture can't be accessed directly by
the CPU (David Mosberger, Benjamin Herrenschmidt, myself, Paul
Mackerras, Jeff Wiedemeier)
Diffstat (limited to 'linux-core/drm_memory.h')
-rw-r--r-- | linux-core/drm_memory.h | 165 |
1 files changed, 159 insertions, 6 deletions
diff --git a/linux-core/drm_memory.h b/linux-core/drm_memory.h index ae5737f1..5f111459 100644 --- a/linux-core/drm_memory.h +++ b/linux-core/drm_memory.h @@ -39,6 +39,159 @@ */ #define DEBUG_MEMORY 0 +/* Need the 4-argument version of vmap(). */ +#if __REALLY_HAVE_AGP && defined(VMAP_4_ARGS) + +#include <linux/vmalloc.h> + +#ifdef HAVE_PAGE_AGP +#include <asm/agp.h> +#else +# ifdef __powerpc__ +# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) +# else +# define PAGE_AGP PAGE_KERNEL +# endif +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) +# include <asm/tlbflush.h> +#else +# define pte_offset_kernel(dir, address) pte_offset(dir, address) +# define pte_pfn(pte) (pte_page(pte) - mem_map) +# define pfn_to_page(pfn) (mem_map + (pfn)) +# define flush_tlb_kernel_range(s,e) flush_tlb_all() +#endif + +/* + * Find the drm_map that covers the range [offset, offset+size). + */ +static inline drm_map_t * +drm_lookup_map (unsigned long offset, unsigned long size, drm_device_t *dev) +{ + struct list_head *list; + drm_map_list_t *r_list; + drm_map_t *map; + + list_for_each(list, &dev->maplist->head) { + r_list = (drm_map_list_t *) list; + map = r_list->map; + if (!map) + continue; + if (map->offset <= offset && (offset + size) <= (map->offset + map->size)) + return map; + } + return NULL; +} + +static inline void * +agp_remap (unsigned long offset, unsigned long size, drm_device_t *dev) +{ + unsigned long *phys_addr_map, i, num_pages = PAGE_ALIGN(size) / PAGE_SIZE; + struct drm_agp_mem *agpmem; + struct page **page_map; + void *addr; + + size = PAGE_ALIGN(size); + +#ifdef __alpha__ + offset -= dev->hose->mem_space->start; +#endif + + for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) + if (agpmem->bound <= offset + && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= (offset + size)) + break; + if (!agpmem) + return NULL; + + /* + * OK, we're mapping AGP space on a chipset/platform on which memory accesses by + * the CPU do not get remapped by the GART. We fix this by using the kernel's + * page-table instead (that's probably faster anyhow...). + */ + /* note: use vmalloc() because num_pages could be large... */ + page_map = vmalloc(num_pages * sizeof(struct page *)); + if (!page_map) + return NULL; + + phys_addr_map = agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE; + for (i = 0; i < num_pages; ++i) + page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT); + addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP); + vfree(page_map); + if (!addr) + return NULL; + + flush_tlb_kernel_range((unsigned long) addr, (unsigned long) addr + size); + return addr; +} + +static inline unsigned long +drm_follow_page (void *vaddr) +{ + pgd_t *pgd = pgd_offset_k((unsigned long) vaddr); + pmd_t *pmd = pmd_offset(pgd, (unsigned long) vaddr); + pte_t *ptep = pte_offset_kernel(pmd, (unsigned long) vaddr); + return pte_pfn(*ptep) << PAGE_SHIFT; +} + +#endif /* __REALLY_HAVE_AGP && defined(VMAP_4_ARGS) */ + +static inline void *drm_ioremap(unsigned long offset, unsigned long size, drm_device_t *dev) +{ +#if __REALLY_HAVE_AGP && defined(VMAP_4_ARGS) + if (dev->agp && dev->agp->cant_use_aperture) { + drm_map_t *map = drm_lookup_map(offset, size, dev); + + if (map && map->type == _DRM_AGP) + return agp_remap(offset, size, dev); + } +#endif + + return ioremap(offset, size); +} + +static inline void *drm_ioremap_nocache(unsigned long offset, unsigned long size, + drm_device_t *dev) +{ +#if __REALLY_HAVE_AGP && defined(VMAP_4_ARGS) + if (dev->agp && dev->agp->cant_use_aperture) { + drm_map_t *map = drm_lookup_map(offset, size, dev); + + if (map && map->type == _DRM_AGP) + return agp_remap(offset, size, dev); + } +#endif + + return ioremap_nocache(offset, size); +} + +static inline void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev) +{ +#if __REALLY_HAVE_AGP && defined(VMAP_4_ARGS) + /* + * This is a bit ugly. It would be much cleaner if the DRM API would use separate + * routines for handling mappings in the AGP space. Hopefully this can be done in + * a future revision of the interface... + */ + if (dev->agp && dev->agp->cant_use_aperture + && ((unsigned long) pt >= VMALLOC_START && (unsigned long) pt < VMALLOC_END)) + { + unsigned long offset; + drm_map_t *map; + + offset = drm_follow_page(pt) | ((unsigned long) pt & ~PAGE_MASK); + map = drm_lookup_map(offset, size, dev); + if (map && map->type == _DRM_AGP) { + vunmap(pt); + return; + } + } +#endif + + iounmap(pt); +} #if DEBUG_MEMORY #include "drm_memory_debug.h" @@ -119,19 +272,19 @@ void DRM(free_pages)(unsigned long address, int order, int area) free_pages(address, order); } -void *DRM(ioremap)(unsigned long offset, unsigned long size) +void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev) { - return ioremap(offset, size); + return drm_ioremap(offset, size, dev); } -void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size) +void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, drm_device_t *dev) { - return ioremap_nocache(offset, size); + return drm_ioremap_nocache(offset, size, dev); } -void DRM(ioremapfree)(void *pt, unsigned long size) +void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev) { - iounmap(pt); + drm_ioremapfree(pt, size, dev); } #if __REALLY_HAVE_AGP |