summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Hellstrom <thomas-at-tungstengraphics-dot-com>2006-10-13 13:41:59 +0200
committerThomas Hellstrom <thomas-at-tungstengraphics-dot-com>2006-10-13 13:41:59 +0200
commit3260daab96672a88d4266058ec2a7ce30f8a243a (patch)
treead1857ee3fe1486019dea52acdc5ae6e3a9113e9
Initial commit
-rw-r--r--Makefile17
-rw-r--r--agp.h386
-rw-r--r--backend.c457
-rw-r--r--frontend.c1152
-rw-r--r--generic.c1515
-rw-r--r--intel-agp.c2195
-rw-r--r--isoch.c475
-rw-r--r--picker.c30
-rw-r--r--testagp.c1573
-rw-r--r--testgart.c159
10 files changed, 7959 insertions, 0 deletions
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..a5c58a0
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,17 @@
+ifneq ($(KERNELRELEASE),)
+agpgart-y := backend.o frontend.o generic.o isoch.o
+
+obj-$(CONFIG_AGP) += agpgart.o
+obj-$(CONFIG_AGP_INTEL) += intel-agp.o
+obj-$(CONFIG_AGP_INTEL_MCH) += intel-agp.o
+else
+KDIR := /lib/modules/$(shell uname -r)/build
+PWD := $(shell pwd)
+
+default:
+ $(CC) -I. -static -o testgart testgart.c
+ $(MAKE) -C $(KDIR) SUBDIRS=$(PWD) modules
+endif
+
+clean:
+ rm -f *.o *.ko *.mod.c testgart
diff --git a/agp.h b/agp.h
new file mode 100644
index 0000000..1c54278
--- /dev/null
+++ b/agp.h
@@ -0,0 +1,386 @@
+/*
+ * AGPGART
+ * Copyright (C) 2004 Silicon Graphics, Inc.
+ * Copyright (C) 2002-2004 Dave Jones
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _AGP_BACKEND_PRIV_H
+#define _AGP_BACKEND_PRIV_H 1
+
+#include <asm/agp.h> /* for flush_agp_cache() */
+#include <linux/version.h>
+
+#define PFX "agpgart: "
+
+//#define AGP_DEBUG 1
+#ifdef AGP_DEBUG
+#define DBG(x,y...) printk (KERN_DEBUG PFX "%s: " x "\n", __FUNCTION__ , ## y)
+#else
+#define DBG(x,y...) do { } while (0)
+#endif
+
+extern struct agp_bridge_data *agp_bridge;
+
+enum aper_size_type {
+ U8_APER_SIZE,
+ U16_APER_SIZE,
+ U32_APER_SIZE,
+ LVL2_APER_SIZE,
+ FIXED_APER_SIZE
+};
+
+struct gatt_mask {
+ unsigned long mask;
+ u32 type;
+ /* totally device specific, for integrated chipsets that
+ * might have different types of memory masks. For other
+ * devices this will probably be ignored */
+};
+
+struct aper_size_info_8 {
+ int size;
+ int num_entries;
+ int page_order;
+ u8 size_value;
+};
+
+struct aper_size_info_16 {
+ int size;
+ int num_entries;
+ int page_order;
+ u16 size_value;
+};
+
+struct aper_size_info_32 {
+ int size;
+ int num_entries;
+ int page_order;
+ u32 size_value;
+};
+
+struct aper_size_info_lvl2 {
+ int size;
+ int num_entries;
+ u32 size_value;
+};
+
+struct aper_size_info_fixed {
+ int size;
+ int num_entries;
+ int page_order;
+};
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+struct agp_bridge_driver {
+ struct module *owner;
+ void *aperture_sizes;
+ int num_aperture_sizes;
+ enum aper_size_type size_type;
+ int cant_use_aperture;
+ int needs_scratch_page;
+ struct gatt_mask *masks;
+ int (*fetch_size)(void);
+ int (*configure)(void);
+ void (*agp_enable)(u32);
+ void (*cleanup)(void);
+ void (*tlb_flush)(struct agp_memory *);
+ unsigned long (*mask_memory)(unsigned long, int);
+ void (*cache_flush)(void);
+ int (*create_gatt_table)(void);
+ int (*free_gatt_table)(void);
+ int (*insert_memory)(struct agp_memory *, off_t, int);
+ int (*remove_memory)(struct agp_memory *, off_t, int);
+ struct agp_memory *(*alloc_by_type) (size_t, int);
+ void (*free_by_type)(struct agp_memory *);
+ void *(*agp_alloc_page)(void);
+ void (*agp_destroy_page)(void *);
+};
+#else
+struct agp_bridge_driver {
+ struct module *owner;
+ void *aperture_sizes;
+ int num_aperture_sizes;
+ enum aper_size_type size_type;
+ int cant_use_aperture;
+ int needs_scratch_page;
+ struct gatt_mask *masks;
+ int (*fetch_size)(void);
+ int (*configure)(void);
+ void (*agp_enable)(struct agp_bridge_data *, u32);
+ void (*cleanup)(void);
+ void (*tlb_flush)(struct agp_memory *);
+ unsigned long (*mask_memory)(struct agp_bridge_data *,
+ unsigned long, int);
+ void (*cache_flush)(void);
+ int (*create_gatt_table)(struct agp_bridge_data *);
+ int (*free_gatt_table)(struct agp_bridge_data *);
+ int (*insert_memory)(struct agp_memory *, off_t, int);
+ int (*remove_memory)(struct agp_memory *, off_t, int);
+ struct agp_memory *(*alloc_by_type) (size_t, int);
+ void (*free_by_type)(struct agp_memory *);
+ void *(*agp_alloc_page)(struct agp_bridge_data *);
+ void (*agp_destroy_page)(void *);
+};
+#endif
+
+struct agp_bridge_data {
+ struct agp_version *version;
+ struct agp_bridge_driver *driver;
+ struct vm_operations_struct *vm_ops;
+ void *previous_size;
+ void *current_size;
+ void *dev_private_data;
+ struct pci_dev *dev;
+ u32 __iomem *gatt_table;
+ u32 *gatt_table_real;
+ unsigned long scratch_page;
+ unsigned long scratch_page_real;
+ unsigned long gart_bus_addr;
+ unsigned long gatt_bus_addr;
+ u32 mode;
+ enum chipset_type type;
+ unsigned long *key_list;
+ atomic_t current_memory_agp;
+ atomic_t agp_in_use;
+ int max_memory_agp; /* in number of pages */
+ int aperture_size_idx;
+ int capndx;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ char major_version;
+ char minor_version;
+#else
+ int flags;
+ char major_version;
+ char minor_version;
+ struct list_head list;
+ u32 apbase_config;
+#endif
+};
+
+
+#define KB(x) ((x) * 1024)
+#define MB(x) (KB (KB (x)))
+#define GB(x) (MB (KB (x)))
+
+#define A_SIZE_8(x) ((struct aper_size_info_8 *) x)
+#define A_SIZE_16(x) ((struct aper_size_info_16 *) x)
+#define A_SIZE_32(x) ((struct aper_size_info_32 *) x)
+#define A_SIZE_LVL2(x) ((struct aper_size_info_lvl2 *) x)
+#define A_SIZE_FIX(x) ((struct aper_size_info_fixed *) x)
+#define A_IDX8(bridge) (A_SIZE_8((bridge)->driver->aperture_sizes) + i)
+#define A_IDX16(bridge) (A_SIZE_16((bridge)->driver->aperture_sizes) + i)
+#define A_IDX32(bridge) (A_SIZE_32((bridge)->driver->aperture_sizes) + i)
+#define MAXKEY (4096 * 32)
+
+#define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page)
+
+
+/* Intel registers */
+#define INTEL_APSIZE 0xb4
+#define INTEL_ATTBASE 0xb8
+#define INTEL_AGPCTRL 0xb0
+#define INTEL_NBXCFG 0x50
+#define INTEL_ERRSTS 0x91
+
+/* Intel i830 registers */
+#define I830_GMCH_CTRL 0x52
+#define I830_GMCH_ENABLED 0x4
+#define I830_GMCH_MEM_MASK 0x1
+#define I830_GMCH_MEM_64M 0x1
+#define I830_GMCH_MEM_128M 0
+#define I830_GMCH_GMS_MASK 0x70
+#define I830_GMCH_GMS_DISABLED 0x00
+#define I830_GMCH_GMS_LOCAL 0x10
+#define I830_GMCH_GMS_STOLEN_512 0x20
+#define I830_GMCH_GMS_STOLEN_1024 0x30
+#define I830_GMCH_GMS_STOLEN_8192 0x40
+#define I830_RDRAM_CHANNEL_TYPE 0x03010
+#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5)
+#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3)
+
+/* This one is for I830MP w. an external graphic card */
+#define INTEL_I830_ERRSTS 0x92
+
+/* Intel 855GM/852GM registers */
+#define I855_GMCH_GMS_STOLEN_0M 0x0
+#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
+#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
+#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
+#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
+#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
+#define I85X_CAPID 0x44
+#define I85X_VARIANT_MASK 0x7
+#define I85X_VARIANT_SHIFT 5
+#define I855_GME 0x0
+#define I855_GM 0x4
+#define I852_GME 0x2
+#define I852_GM 0x5
+
+/* Intel i845 registers */
+#define INTEL_I845_AGPM 0x51
+#define INTEL_I845_ERRSTS 0xc8
+
+/* Intel i860 registers */
+#define INTEL_I860_MCHCFG 0x50
+#define INTEL_I860_ERRSTS 0xc8
+
+/* Intel i810 registers */
+#define I810_GMADDR 0x10
+#define I810_MMADDR 0x14
+#define I810_PTE_BASE 0x10000
+#define I810_PTE_MAIN_UNCACHED 0x00000000
+#define I810_PTE_LOCAL 0x00000002
+#define I810_PTE_VALID 0x00000001
+#define I810_SMRAM_MISCC 0x70
+#define I810_GFX_MEM_WIN_SIZE 0x00010000
+#define I810_GFX_MEM_WIN_32M 0x00010000
+#define I810_GMS 0x000000c0
+#define I810_GMS_DISABLE 0x00000000
+#define I810_PGETBL_CTL 0x2020
+#define I810_PGETBL_ENABLED 0x00000001
+#define I810_DRAM_CTL 0x3000
+#define I810_DRAM_ROW_0 0x00000001
+#define I810_DRAM_ROW_0_SDRAM 0x00000001
+
+struct agp_device_ids {
+ unsigned short device_id; /* first, to make table easier to read */
+ enum chipset_type chipset;
+ const char *chipset_name;
+ int (*chipset_setup) (struct pci_dev *pdev); /* used to override generic */
+};
+
+/* Driver registration */
+struct agp_bridge_data *agp_alloc_bridge(void);
+void agp_put_bridge(struct agp_bridge_data *bridge);
+int agp_add_bridge(struct agp_bridge_data *bridge);
+void agp_remove_bridge(struct agp_bridge_data *bridge);
+
+/* Frontend routines. */
+int agp_frontend_initialize(void);
+void agp_frontend_cleanup(void);
+
+/* Generic routines. */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+void agp_generic_enable(u32 mode);
+int agp_generic_create_gatt_table(void);
+int agp_generic_free_gatt_table(void);
+void *agp_generic_alloc_page(void);
+u32 agp_collect_device_status(u32 mode, u32 command);
+unsigned long agp_generic_mask_memory(unsigned long addr, int type);
+#else
+void agp_generic_enable(struct agp_bridge_data *bridge, u32 mode);
+int agp_generic_create_gatt_table(struct agp_bridge_data *bridge);
+int agp_generic_free_gatt_table(struct agp_bridge_data *bridge);
+void *agp_generic_alloc_page(struct agp_bridge_data *bridge);
+u32 agp_collect_device_status(struct agp_bridge_data *bridge,
+ u32 mode, u32 command);
+unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
+ unsigned long addr, int type);
+
+#endif
+
+
+struct agp_memory *agp_create_memory(int scratch_pages);
+int agp_generic_insert_memory(struct agp_memory *mem, off_t pg_start, int type);
+int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type);
+struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type);
+void agp_generic_free_by_type(struct agp_memory *curr);
+void agp_generic_destroy_page(void *addr);
+void agp_free_key(int key);
+int agp_num_entries(void);
+void agp_device_command(u32 command, int agp_v3);
+int agp_3_5_enable(struct agp_bridge_data *bridge);
+void global_cache_flush(void);
+void get_agp_version(struct agp_bridge_data *bridge);
+struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev);
+
+/* generic routines for agp>=3 */
+int agp3_generic_fetch_size(void);
+void agp3_generic_tlbflush(struct agp_memory *mem);
+int agp3_generic_configure(void);
+void agp3_generic_cleanup(void);
+
+/* aperture sizes have been standardised since v3 */
+#define AGP_GENERIC_SIZES_ENTRIES 11
+extern struct aper_size_info_16 agp3_generic_sizes[];
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+#define virt_to_gart(x) (virt_to_phys(x))
+#define gart_to_virt(x) (phys_to_virt(x))
+#else
+#define virt_to_gart(x) (phys_to_gart(virt_to_phys(x)))
+#define gart_to_virt(x) (phys_to_virt(gart_to_phys(x)))
+#endif
+
+extern int agp_off;
+extern int agp_try_unsupported_boot;
+
+/* Chipset independant registers (from AGP Spec) */
+#define AGP_APBASE 0x10
+
+#define AGPSTAT 0x4
+#define AGPCMD 0x8
+#define AGPNISTAT 0xc
+#define AGPCTRL 0x10
+#define AGPAPSIZE 0x14
+#define AGPNEPG 0x16
+#define AGPGARTLO 0x18
+#define AGPGARTHI 0x1c
+#define AGPNICMD 0x20
+
+#define AGP_MAJOR_VERSION_SHIFT (20)
+#define AGP_MINOR_VERSION_SHIFT (16)
+
+#define AGPSTAT_RQ_DEPTH (0xff000000)
+#define AGPSTAT_RQ_DEPTH_SHIFT 24
+
+#define AGPSTAT_CAL_MASK (1<<12|1<<11|1<<10)
+#define AGPSTAT_ARQSZ (1<<15|1<<14|1<<13)
+#define AGPSTAT_ARQSZ_SHIFT 13
+
+#define AGPSTAT_SBA (1<<9)
+#define AGPSTAT_AGP_ENABLE (1<<8)
+#define AGPSTAT_FW (1<<4)
+#define AGPSTAT_MODE_3_0 (1<<3)
+
+#define AGPSTAT2_1X (1<<0)
+#define AGPSTAT2_2X (1<<1)
+#define AGPSTAT2_4X (1<<2)
+
+#define AGPSTAT3_RSVD (1<<2)
+#define AGPSTAT3_8X (1<<1)
+#define AGPSTAT3_4X (1)
+
+#define AGPCTRL_APERENB (1<<8)
+#define AGPCTRL_GTLBEN (1<<7)
+
+#define AGP2_RESERVED_MASK 0x00fffcc8
+#define AGP3_RESERVED_MASK 0x00ff00c4
+
+#define AGP_ERRATA_FASTWRITES 1<<0
+#define AGP_ERRATA_SBA 1<<1
+#define AGP_ERRATA_1X 1<<2
+
+#endif /* _AGP_BACKEND_PRIV_H */
diff --git a/backend.c b/backend.c
new file mode 100644
index 0000000..75a9890
--- /dev/null
+++ b/backend.c
@@ -0,0 +1,457 @@
+/*
+ * AGPGART driver backend routines.
+ * Copyright (C) 2004 Silicon Graphics, Inc.
+ * Copyright (C) 2002-2003 Dave Jones.
+ * Copyright (C) 1999 Jeff Hartmann.
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, DAVE JONES, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * TODO:
+ * - Allocate more than order 0 pages to avoid too much linear map splitting.
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/miscdevice.h>
+#include <linux/pm.h>
+#include <linux/agp_backend.h>
+#include <linux/agpgart.h>
+#include <linux/vmalloc.h>
+#include <asm/io.h>
+#include "agp.h"
+
+/* Due to XFree86 brain-damage, we can't go to 1.0 until they
+ * fix some real stupidity. It's only by chance we can bump
+ * past 0.99 at all due to some boolean logic error. */
+#define AGPGART_VERSION_MAJOR 0
+#define AGPGART_VERSION_MINOR 101
+static struct agp_version agp_current_version =
+{
+ .major = AGPGART_VERSION_MAJOR,
+ .minor = AGPGART_VERSION_MINOR,
+};
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+static int agp_count=0;
+
+struct agp_bridge_data agp_bridge_dummy = { .type = NOT_SUPPORTED };
+struct agp_bridge_data *agp_bridge = &agp_bridge_dummy;
+EXPORT_SYMBOL(agp_bridge);
+#else
+struct agp_bridge_data *(*agp_find_bridge)(struct pci_dev *) =
+ &agp_generic_find_bridge;
+
+struct agp_bridge_data *agp_bridge;
+LIST_HEAD(agp_bridges);
+EXPORT_SYMBOL(agp_bridge);
+EXPORT_SYMBOL(agp_bridges);
+EXPORT_SYMBOL(agp_find_bridge);
+#endif
+
+
+/**
+ * agp_backend_acquire - attempt to acquire an agp backend.
+ *
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+int agp_backend_acquire(void)
+{
+ if (agp_bridge->type == NOT_SUPPORTED)
+ return -EINVAL;
+ if (atomic_read(&agp_bridge->agp_in_use))
+ return -EBUSY;
+ atomic_inc(&agp_bridge->agp_in_use);
+ return 0;
+}
+#else
+struct agp_bridge_data *agp_backend_acquire(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge;
+
+ bridge = agp_find_bridge(pdev);
+
+ if (!bridge)
+ return NULL;
+
+ if (atomic_read(&bridge->agp_in_use))
+ return NULL;
+ atomic_inc(&bridge->agp_in_use);
+ return bridge;
+}
+#endif
+EXPORT_SYMBOL(agp_backend_acquire);
+
+
+/**
+ * agp_backend_release - release the lock on the agp backend.
+ *
+ * The caller must insure that the graphics aperture translation table
+ * is read for use by another entity.
+ *
+ * (Ensure that all memory it bound is unbound.)
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+void agp_backend_release(void)
+{
+ struct agp_bridge_data *bridge = agp_bridge;
+ if (bridge->type == NOT_SUPPORTED)
+ return;
+#else
+void agp_backend_release(struct agp_bridge_data *bridge)
+{
+#endif
+ if (bridge)
+ atomic_dec(&bridge->agp_in_use);
+}
+EXPORT_SYMBOL(agp_backend_release);
+
+
+static struct { int mem, agp; } maxes_table[] = {
+ {0, 0},
+ {32, 4},
+ {64, 28},
+ {128, 96},
+ {256, 204},
+ {512, 440},
+ {1024, 942},
+ {2048, 1920},
+ {4096, 3932}
+};
+
+static int agp_find_max(void)
+{
+ long memory, index, result;
+
+#if PAGE_SHIFT < 20
+ memory = num_physpages >> (20 - PAGE_SHIFT);
+#else
+ memory = num_physpages << (PAGE_SHIFT - 20);
+#endif
+ index = 1;
+
+ while ((memory > maxes_table[index].mem) && (index < 8))
+ index++;
+
+ result = maxes_table[index - 1].agp +
+ ( (memory - maxes_table[index - 1].mem) *
+ (maxes_table[index].agp - maxes_table[index - 1].agp)) /
+ (maxes_table[index].mem - maxes_table[index - 1].mem);
+
+ result = result << (20 - PAGE_SHIFT);
+ return result;
+}
+
+
+static int agp_backend_initialize(struct agp_bridge_data *bridge)
+{
+ int size_value, rc, got_gatt=0, got_keylist=0;
+
+ bridge->max_memory_agp = agp_find_max();
+ bridge->version = &agp_current_version;
+
+ if (bridge->driver->needs_scratch_page) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ void *addr = bridge->driver->agp_alloc_page();
+#else
+ void *addr = bridge->driver->agp_alloc_page(bridge);
+#endif
+
+ if (!addr) {
+ printk(KERN_ERR PFX "unable to get memory for scratch page.\n");
+ return -ENOMEM;
+ }
+
+ bridge->scratch_page_real = virt_to_gart(addr);
+ bridge->scratch_page =
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ bridge->driver->mask_memory(bridge->scratch_page_real, 0);
+#else
+ bridge->driver->mask_memory(bridge, bridge->scratch_page_real, 0);
+#endif
+ }
+
+ size_value = bridge->driver->fetch_size();
+ if (size_value == 0) {
+ printk(KERN_ERR PFX "unable to determine aperture size.\n");
+ rc = -EINVAL;
+ goto err_out;
+ }
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ if (bridge->driver->create_gatt_table()) {
+#else
+ if (bridge->driver->create_gatt_table(bridge)) {
+#endif
+ printk(KERN_ERR PFX
+ "unable to get memory for graphics translation table.\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ got_gatt = 1;
+
+ bridge->key_list = vmalloc(PAGE_SIZE * 4);
+ if (bridge->key_list == NULL) {
+ printk(KERN_ERR PFX "error allocating memory for key lists.\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ got_keylist = 1;
+
+ /* FIXME vmalloc'd memory not guaranteed contiguous */
+ memset(bridge->key_list, 0, PAGE_SIZE * 4);
+
+ if (bridge->driver->configure()) {
+ printk(KERN_ERR PFX "error configuring host chipset.\n");
+ rc = -EINVAL;
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ if (bridge->driver->needs_scratch_page)
+ bridge->driver->agp_destroy_page(
+ gart_to_virt(bridge->scratch_page_real));
+ if (got_gatt)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ bridge->driver->free_gatt_table();
+#else
+ bridge->driver->free_gatt_table(bridge);
+#endif
+ if (got_keylist) {
+ vfree(bridge->key_list);
+ bridge->key_list = NULL;
+ }
+ return rc;
+}
+
+/* cannot be __exit b/c as it could be called from __init code */
+static void agp_backend_cleanup(struct agp_bridge_data *bridge)
+{
+ if (bridge->driver->cleanup)
+ bridge->driver->cleanup();
+ if (bridge->driver->free_gatt_table)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ bridge->driver->free_gatt_table();
+#else
+ bridge->driver->free_gatt_table(bridge);
+#endif
+ vfree(bridge->key_list);
+ bridge->key_list = NULL;
+
+ if (bridge->driver->agp_destroy_page &&
+ bridge->driver->needs_scratch_page)
+ bridge->driver->agp_destroy_page(
+ gart_to_virt(bridge->scratch_page_real));
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
+static const drm_agp_t drm_agp = {
+ &agp_free_memory,
+ &agp_allocate_memory,
+ &agp_bind_memory,
+ &agp_unbind_memory,
+ &agp_enable,
+ &agp_backend_acquire,
+ &agp_backend_release,
+ &agp_copy_info
+};
+#endif
+
+/* When we remove the global variable agp_bridge from all drivers
+ * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
+ */
+
+struct agp_bridge_data *agp_alloc_bridge(void)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ return agp_bridge;
+#else
+ struct agp_bridge_data *bridge = kmalloc(sizeof(*bridge), GFP_KERNEL);
+
+ if (!bridge)
+ return NULL;
+
+ memset(bridge, 0, sizeof(*bridge));
+ atomic_set(&bridge->agp_in_use, 0);
+ atomic_set(&bridge->current_memory_agp, 0);
+
+ if (list_empty(&agp_bridges))
+ agp_bridge = bridge;
+
+ return bridge;
+#endif
+}
+EXPORT_SYMBOL(agp_alloc_bridge);
+
+
+void agp_put_bridge(struct agp_bridge_data *bridge)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ return;
+#else
+ kfree(bridge);
+
+ if (list_empty(&agp_bridges))
+ agp_bridge = NULL;
+#endif
+}
+EXPORT_SYMBOL(agp_put_bridge);
+
+
+int agp_add_bridge(struct agp_bridge_data *bridge)
+{
+ int error;
+
+ if (agp_off)
+ return -ENODEV;
+
+ if (!bridge->dev) {
+ printk (KERN_DEBUG PFX "Erk, registering with no pci_dev!\n");
+ return -EINVAL;
+ }
+
+ /* Grab reference on the chipset driver. */
+ if (!try_module_get(bridge->driver->owner)) {
+ printk (KERN_INFO PFX "Couldn't lock chipset driver.\n");
+ return -EINVAL;
+ }
+
+ error = agp_backend_initialize(bridge);
+ if (error) {
+ printk (KERN_INFO PFX "agp_backend_initialize() failed.\n");
+ goto err_out;
+ }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ bridge->type = SUPPORTED;
+
+ error = agp_backend_initialize(agp_bridge);
+ if (error) {
+ printk (KERN_INFO PFX "agp_backend_initialize() failed.\n");
+ goto err_out;
+ }
+
+ error = agp_frontend_initialize();
+ if (error) {
+ printk (KERN_INFO PFX "agp_frontend_initialize() failed.\n");
+ goto frontend_err;
+ }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11))
+ inter_module_register("drm_agp", THIS_MODULE, &drm_agp);
+#endif
+
+ agp_count++;
+ return 0;
+
+frontend_err:
+ agp_backend_cleanup(agp_bridge);
+err_out:
+ bridge->type = NOT_SUPPORTED;
+ module_put(bridge->driver->owner);
+ return error;
+
+#else
+ if (list_empty(&agp_bridges)) {
+ error = agp_frontend_initialize();
+ if (error) {
+ printk (KERN_INFO PFX "agp_frontend_initialize() failed.\n");
+ goto frontend_err;
+ }
+
+ printk(KERN_INFO PFX "AGP aperture is %dM @ 0x%lx\n",
+ bridge->driver->fetch_size(), bridge->gart_bus_addr);
+
+ }
+
+ list_add(&bridge->list, &agp_bridges);
+ return 0;
+
+frontend_err:
+ agp_backend_cleanup(bridge);
+err_out:
+ module_put(bridge->driver->owner);
+ agp_put_bridge(bridge);
+ return error;
+#endif
+}
+EXPORT_SYMBOL_GPL(agp_add_bridge);
+
+void agp_remove_bridge(struct agp_bridge_data *bridge)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ bridge->type = NOT_SUPPORTED;
+ agp_frontend_cleanup();
+ agp_backend_cleanup(bridge);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
+ inter_module_unregister("drm_agp");
+#endif
+ agp_count--;
+ module_put(bridge->driver->owner);
+#else
+ agp_backend_cleanup(bridge);
+ list_del(&bridge->list);
+ if (list_empty(&agp_bridges))
+ agp_frontend_cleanup();
+ module_put(bridge->driver->owner);
+#endif
+}
+EXPORT_SYMBOL_GPL(agp_remove_bridge);
+
+int agp_off;
+int agp_try_unsupported_boot;
+EXPORT_SYMBOL(agp_off);
+EXPORT_SYMBOL(agp_try_unsupported_boot);
+
+static int __init agp_init(void)
+{
+ if (!agp_off)
+ printk(KERN_INFO "Linux agpgart interface v%d.%d (c) Dave Jones\n",
+ AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR);
+ return 0;
+}
+
+static void __exit agp_exit(void)
+{
+}
+
+#ifndef MODULE
+static __init int agp_setup(char *s)
+{
+ if (!strcmp(s,"off"))
+ agp_off = 1;
+ if (!strcmp(s,"try_unsupported"))
+ agp_try_unsupported_boot = 1;
+ return 1;
+}
+__setup("agp=", agp_setup);
+#endif
+
+MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>");
+MODULE_DESCRIPTION("AGP GART driver");
+MODULE_LICENSE("GPL and additional rights");
+MODULE_ALIAS_MISCDEV(AGPGART_MINOR);
+
+module_init(agp_init);
+module_exit(agp_exit);
+
diff --git a/frontend.c b/frontend.c
new file mode 100644
index 0000000..6dd7acb
--- /dev/null
+++ b/frontend.c
@@ -0,0 +1,1152 @@
+/*
+ * AGPGART driver frontend
+ * Copyright (C) 2004 Silicon Graphics, Inc.
+ * Copyright (C) 2002-2003 Dave Jones
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mman.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/agp_backend.h>
+#include <linux/agpgart.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include "agp.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16))
+#define down(_x) mutex_lock(_x)
+#define up(_x) mutex_unlock(_x)
+#endif
+
+
+static struct agp_front_data agp_fe;
+
+static struct agp_memory *agp_find_mem_by_key(int key)
+{
+ struct agp_memory *curr;
+
+ if (agp_fe.current_controller == NULL)
+ return NULL;
+
+ curr = agp_fe.current_controller->pool;
+
+ while (curr != NULL) {
+ if (curr->key == key)
+ break;
+ curr = curr->next;
+ }
+
+ DBG("key=%d -> mem=%p", key, curr);
+ return curr;
+}
+
+static void agp_remove_from_pool(struct agp_memory *temp)
+{
+ struct agp_memory *prev;
+ struct agp_memory *next;
+
+ /* Check to see if this is even in the memory pool */
+
+ DBG("mem=%p", temp);
+ if (agp_find_mem_by_key(temp->key) != NULL) {
+ next = temp->next;
+ prev = temp->prev;
+
+ if (prev != NULL) {
+ prev->next = next;
+ if (next != NULL)
+ next->prev = prev;
+
+ } else {
+ /* This is the first item on the list */
+ if (next != NULL)
+ next->prev = NULL;
+
+ agp_fe.current_controller->pool = next;
+ }
+ }
+}
+
+/*
+ * Routines for managing each client's segment list -
+ * These routines handle adding and removing segments
+ * to each auth'ed client.
+ */
+
+static struct
+agp_segment_priv *agp_find_seg_in_client(const struct agp_client *client,
+ unsigned long offset,
+ int size, pgprot_t page_prot)
+{
+ struct agp_segment_priv *seg;
+ int num_segments, i;
+ off_t pg_start;
+ size_t pg_count;
+
+ pg_start = offset / 4096;
+ pg_count = size / 4096;
+ seg = *(client->segments);
+ num_segments = client->num_segments;
+
+ for (i = 0; i < client->num_segments; i++) {
+ if ((seg[i].pg_start == pg_start) &&
+ (seg[i].pg_count == pg_count) &&
+ (pgprot_val(seg[i].prot) == pgprot_val(page_prot))) {
+ return seg + i;
+ }
+ }
+
+ return NULL;
+}
+
+static void agp_remove_seg_from_client(struct agp_client *client)
+{
+ DBG("client=%p", client);
+
+ if (client->segments != NULL) {
+ if (*(client->segments) != NULL) {
+ DBG("Freeing %p from client %p", *(client->segments), client);
+ kfree(*(client->segments));
+ }
+ DBG("Freeing %p from client %p", client->segments, client);
+ kfree(client->segments);
+ client->segments = NULL;
+ }
+}
+
+static void agp_add_seg_to_client(struct agp_client *client,
+ struct agp_segment_priv ** seg, int num_segments)
+{
+ struct agp_segment_priv **prev_seg;
+
+ prev_seg = client->segments;
+
+ if (prev_seg != NULL)
+ agp_remove_seg_from_client(client);
+
+ DBG("Adding seg %p (%d segments) to client %p", seg, num_segments, client);
+ client->num_segments = num_segments;
+ client->segments = seg;
+}
+
+/* Originally taken from linux/mm/mmap.c from the array
+ * protection_map.
+ * The original really should be exported to modules, or
+ * some routine which does the conversion for you
+ */
+
+static const pgprot_t my_protect_map[16] =
+{
+ __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
+ __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
+};
+
+static pgprot_t agp_convert_mmap_flags(int prot)
+{
+#define _trans(x,bit1,bit2) \
+((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0)
+
+ unsigned long prot_bits;
+ pgprot_t temp;
+
+ prot_bits = _trans(prot, PROT_READ, VM_READ) |
+ _trans(prot, PROT_WRITE, VM_WRITE) |
+ _trans(prot, PROT_EXEC, VM_EXEC);
+
+ prot_bits |= VM_SHARED;
+
+ temp = my_protect_map[prot_bits & 0x0000000f];
+
+ return temp;
+}
+
+static int agp_create_segment(struct agp_client *client, struct agp_region *region)
+{
+ struct agp_segment_priv **ret_seg;
+ struct agp_segment_priv *seg;
+ struct agp_segment *user_seg;
+ size_t i;
+
+ seg = kmalloc((sizeof(struct agp_segment_priv) * region->seg_count), GFP_KERNEL);
+ if (seg == NULL) {
+ kfree(region->seg_list);
+ region->seg_list = NULL;
+ return -ENOMEM;
+ }
+ memset(seg, 0, (sizeof(struct agp_segment_priv) * region->seg_count));
+ user_seg = region->seg_list;
+
+ for (i = 0; i < region->seg_count; i++) {
+ seg[i].pg_start = user_seg[i].pg_start;
+ seg[i].pg_count = user_seg[i].pg_count;
+ seg[i].prot = agp_convert_mmap_flags(user_seg[i].prot);
+ }
+ kfree(region->seg_list);
+ region->seg_list = NULL;
+
+ ret_seg = kmalloc(sizeof(void *), GFP_KERNEL);
+ if (ret_seg == NULL) {
+ kfree(seg);
+ return -ENOMEM;
+ }
+ *ret_seg = seg;
+ agp_add_seg_to_client(client, ret_seg, region->seg_count);
+ return 0;
+}
+
+/* End - Routines for managing each client's segment list */
+
+/* This function must only be called when current_controller != NULL */
+static void agp_insert_into_pool(struct agp_memory * temp)
+{
+ struct agp_memory *prev;
+
+ prev = agp_fe.current_controller->pool;
+
+ if (prev != NULL) {
+ prev->prev = temp;
+ temp->next = prev;
+ }
+ agp_fe.current_controller->pool = temp;
+}
+
+
+/* File private list routines */
+
+static struct agp_file_private *agp_find_private(pid_t pid)
+{
+ struct agp_file_private *curr;
+
+ curr = agp_fe.file_priv_list;
+
+ while (curr != NULL) {
+ if (curr->my_pid == pid)
+ return curr;
+ curr = curr->next;
+ }
+
+ return NULL;
+}
+
+static void agp_insert_file_private(struct agp_file_private * priv)
+{
+ struct agp_file_private *prev;
+
+ prev = agp_fe.file_priv_list;
+
+ if (prev != NULL)
+ prev->prev = priv;
+ priv->next = prev;
+ agp_fe.file_priv_list = priv;
+}
+
+static void agp_remove_file_private(struct agp_file_private * priv)
+{
+ struct agp_file_private *next;
+ struct agp_file_private *prev;
+
+ next = priv->next;
+ prev = priv->prev;
+
+ if (prev != NULL) {
+ prev->next = next;
+
+ if (next != NULL)
+ next->prev = prev;
+
+ } else {
+ if (next != NULL)
+ next->prev = NULL;
+
+ agp_fe.file_priv_list = next;
+ }
+}
+
+/* End - File flag list routines */
+
+/*
+ * Wrappers for agp_free_memory & agp_allocate_memory
+ * These make sure that internal lists are kept updated.
+ */
+static void agp_free_memory_wrap(struct agp_memory *memory)
+{
+ agp_remove_from_pool(memory);
+ agp_free_memory(memory);
+}
+
+static struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type)
+{
+ struct agp_memory *memory;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ memory = agp_allocate_memory(pg_count, type);
+#else
+ memory = agp_allocate_memory(agp_bridge, pg_count, type);
+#endif
+ if (memory == NULL)
+ return NULL;
+
+ agp_insert_into_pool(memory);
+ return memory;
+}
+
+/* Routines for managing the list of controllers -
+ * These routines manage the current controller, and the list of
+ * controllers
+ */
+
+static struct agp_controller *agp_find_controller_by_pid(pid_t id)
+{
+ struct agp_controller *controller;
+
+ controller = agp_fe.controllers;
+
+ while (controller != NULL) {
+ if (controller->pid == id)
+ return controller;
+ controller = controller->next;
+ }
+
+ return NULL;
+}
+
+static struct agp_controller *agp_create_controller(pid_t id)
+{
+ struct agp_controller *controller;
+
+ controller = kmalloc(sizeof(struct agp_controller), GFP_KERNEL);
+
+ if (controller == NULL)
+ return NULL;
+
+ memset(controller, 0, sizeof(struct agp_controller));
+ controller->pid = id;
+
+ return controller;
+}
+
+static int agp_insert_controller(struct agp_controller *controller)
+{
+ struct agp_controller *prev_controller;
+
+ prev_controller = agp_fe.controllers;
+ controller->next = prev_controller;
+
+ if (prev_controller != NULL)
+ prev_controller->prev = controller;
+
+ agp_fe.controllers = controller;
+
+ return 0;
+}
+
+static void agp_remove_all_clients(struct agp_controller *controller)
+{
+ struct agp_client *client;
+ struct agp_client *temp;
+
+ client = controller->clients;
+
+ while (client) {
+ struct agp_file_private *priv;
+
+ temp = client;
+ agp_remove_seg_from_client(temp);
+ priv = agp_find_private(temp->pid);
+
+ if (priv != NULL) {
+ clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
+ clear_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
+ }
+ client = client->next;
+ kfree(temp);
+ }
+}
+
+static void agp_remove_all_memory(struct agp_controller *controller)
+{
+ struct agp_memory *memory;
+ struct agp_memory *temp;
+
+ memory = controller->pool;
+
+ while (memory) {
+ temp = memory;
+ memory = memory->next;
+ agp_free_memory_wrap(temp);
+ }
+}
+
+static int agp_remove_controller(struct agp_controller *controller)
+{
+ struct agp_controller *prev_controller;
+ struct agp_controller *next_controller;
+
+ prev_controller = controller->prev;
+ next_controller = controller->next;
+
+ if (prev_controller != NULL) {
+ prev_controller->next = next_controller;
+ if (next_controller != NULL)
+ next_controller->prev = prev_controller;
+
+ } else {
+ if (next_controller != NULL)
+ next_controller->prev = NULL;
+
+ agp_fe.controllers = next_controller;
+ }
+
+ agp_remove_all_memory(controller);
+ agp_remove_all_clients(controller);
+
+ if (agp_fe.current_controller == controller) {
+ agp_fe.current_controller = NULL;
+ agp_fe.backend_acquired = FALSE;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ agp_backend_release();
+#else
+ agp_backend_release(agp_bridge);
+#endif
+ }
+ kfree(controller);
+ return 0;
+}
+
+static void agp_controller_make_current(struct agp_controller *controller)
+{
+ struct agp_client *clients;
+
+ clients = controller->clients;
+
+ while (clients != NULL) {
+ struct agp_file_private *priv;
+
+ priv = agp_find_private(clients->pid);
+
+ if (priv != NULL) {
+ set_bit(AGP_FF_IS_VALID, &priv->access_flags);
+ set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
+ }
+ clients = clients->next;
+ }
+
+ agp_fe.current_controller = controller;
+}
+
+static void agp_controller_release_current(struct agp_controller *controller,
+ struct agp_file_private *controller_priv)
+{
+ struct agp_client *clients;
+
+ clear_bit(AGP_FF_IS_VALID, &controller_priv->access_flags);
+ clients = controller->clients;
+
+ while (clients != NULL) {
+ struct agp_file_private *priv;
+
+ priv = agp_find_private(clients->pid);
+
+ if (priv != NULL)
+ clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
+
+ clients = clients->next;
+ }
+
+ agp_fe.current_controller = NULL;
+ agp_fe.used_by_controller = FALSE;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ agp_backend_release();
+#else
+ agp_backend_release(agp_bridge);
+#endif
+}
+
+/*
+ * Routines for managing client lists -
+ * These routines are for managing the list of auth'ed clients.
+ */
+
+static struct agp_client
+*agp_find_client_in_controller(struct agp_controller *controller, pid_t id)
+{
+ struct agp_client *client;
+
+ if (controller == NULL)
+ return NULL;
+
+ client = controller->clients;
+
+ while (client != NULL) {
+ if (client->pid == id)
+ return client;
+ client = client->next;
+ }
+
+ return NULL;
+}
+
+static struct agp_controller *agp_find_controller_for_client(pid_t id)
+{
+ struct agp_controller *controller;
+
+ controller = agp_fe.controllers;
+
+ while (controller != NULL) {
+ if ((agp_find_client_in_controller(controller, id)) != NULL)
+ return controller;
+ controller = controller->next;
+ }
+
+ return NULL;
+}
+
+static struct agp_client *agp_find_client_by_pid(pid_t id)
+{
+ struct agp_client *temp;
+
+ if (agp_fe.current_controller == NULL)
+ return NULL;
+
+ temp = agp_find_client_in_controller(agp_fe.current_controller, id);
+ return temp;
+}
+
+static void agp_insert_client(struct agp_client *client)
+{
+ struct agp_client *prev_client;
+
+ prev_client = agp_fe.current_controller->clients;
+ client->next = prev_client;
+
+ if (prev_client != NULL)
+ prev_client->prev = client;
+
+ agp_fe.current_controller->clients = client;
+ agp_fe.current_controller->num_clients++;
+}
+
+static struct agp_client *agp_create_client(pid_t id)
+{
+ struct agp_client *new_client;
+
+ new_client = kmalloc(sizeof(struct agp_client), GFP_KERNEL);
+
+ if (new_client == NULL)
+ return NULL;
+
+ memset(new_client, 0, sizeof(struct agp_client));
+ new_client->pid = id;
+ agp_insert_client(new_client);
+ return new_client;
+}
+
+static int agp_remove_client(pid_t id)
+{
+ struct agp_client *client;
+ struct agp_client *prev_client;
+ struct agp_client *next_client;
+ struct agp_controller *controller;
+
+ controller = agp_find_controller_for_client(id);
+ if (controller == NULL)
+ return -EINVAL;
+
+ client = agp_find_client_in_controller(controller, id);
+ if (client == NULL)
+ return -EINVAL;
+
+ prev_client = client->prev;
+ next_client = client->next;
+
+ if (prev_client != NULL) {
+ prev_client->next = next_client;
+ if (next_client != NULL)
+ next_client->prev = prev_client;
+
+ } else {
+ if (next_client != NULL)
+ next_client->prev = NULL;
+ controller->clients = next_client;
+ }
+
+ controller->num_clients--;
+ agp_remove_seg_from_client(client);
+ kfree(client);
+ return 0;
+}
+
+/* End - Routines for managing client lists */
+
+/* File Operations */
+
+static int agp_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned int size, current_size;
+ unsigned long offset;
+ struct agp_client *client;
+ struct agp_file_private *priv = file->private_data;
+ struct agp_kern_info kerninfo;
+
+ down(&(agp_fe.agp_mutex));
+
+ if (agp_fe.backend_acquired != TRUE)
+ goto out_eperm;
+
+ if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags)))
+ goto out_eperm;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ agp_copy_info(&kerninfo);
+#else
+ agp_copy_info(agp_bridge, &kerninfo);
+#endif
+ size = vma->vm_end - vma->vm_start;
+ current_size = kerninfo.aper_size;
+ current_size = current_size * 0x100000;
+ offset = vma->vm_pgoff << PAGE_SHIFT;
+ DBG("%lx:%lx", offset, offset+size);
+
+ if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) {
+ if ((size + offset) > current_size)
+ goto out_inval;
+
+ client = agp_find_client_by_pid(current->pid);
+
+ if (client == NULL)
+ goto out_eperm;
+
+ if (!agp_find_seg_in_client(client, offset, size, vma->vm_page_prot))
+ goto out_inval;
+
+ DBG("client vm_ops=%p", kerninfo.vm_ops);
+ if (kerninfo.vm_ops) {
+ vma->vm_ops = kerninfo.vm_ops;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
+ } else if (remap_page_range(vma, vma->vm_start,
+ (kerninfo.aper_base + offset),
+ size, vma->vm_page_prot)) {
+#else
+ } else if (io_remap_pfn_range(vma, vma->vm_start,
+ (kerninfo.aper_base + offset) >> PAGE_SHIFT,
+ size, vma->vm_page_prot)) {
+#endif
+ goto out_again;
+ }
+ up(&(agp_fe.agp_mutex));
+ return 0;
+ }
+
+ if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
+ if (size != current_size)
+ goto out_inval;
+
+ DBG("controller vm_ops=%p", kerninfo.vm_ops);
+ if (kerninfo.vm_ops) {
+ vma->vm_ops = kerninfo.vm_ops;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
+ } else if (remap_page_range(vma, vma->vm_start,
+ kerninfo.aper_base,
+ size, vma->vm_page_prot)) {
+#else
+ } else if (io_remap_pfn_range(vma, vma->vm_start,
+ kerninfo.aper_base >> PAGE_SHIFT,
+ size, vma->vm_page_prot)) {
+#endif
+ goto out_again;
+ }
+ up(&(agp_fe.agp_mutex));
+ return 0;
+ }
+
+out_eperm:
+ up(&(agp_fe.agp_mutex));
+ return -EPERM;
+
+out_inval:
+ up(&(agp_fe.agp_mutex));
+ return -EINVAL;
+
+out_again:
+ up(&(agp_fe.agp_mutex));
+ return -EAGAIN;
+}
+
+static int agp_release(struct inode *inode, struct file *file)
+{
+ struct agp_file_private *priv = file->private_data;
+
+ down(&(agp_fe.agp_mutex));
+
+ DBG("priv=%p", priv);
+
+ if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
+ struct agp_controller *controller;
+
+ controller = agp_find_controller_by_pid(priv->my_pid);
+
+ if (controller != NULL) {
+ if (controller == agp_fe.current_controller)
+ agp_controller_release_current(controller, priv);
+ agp_remove_controller(controller);
+ controller = NULL;
+ }
+ }
+
+ if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags))
+ agp_remove_client(priv->my_pid);
+
+ agp_remove_file_private(priv);
+ kfree(priv);
+ file->private_data = NULL;
+ up(&(agp_fe.agp_mutex));
+ return 0;
+}
+
+static int agp_open(struct inode *inode, struct file *file)
+{
+ int minor = iminor(inode);
+ struct agp_file_private *priv;
+ struct agp_client *client;
+ int rc = -ENXIO;
+
+ down(&(agp_fe.agp_mutex));
+
+ if (minor != AGPGART_MINOR)
+ goto err_out;
+
+ priv = kmalloc(sizeof(struct agp_file_private), GFP_KERNEL);
+ if (priv == NULL)
+ goto err_out_nomem;
+
+ memset(priv, 0, sizeof(struct agp_file_private));
+ set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags);
+ priv->my_pid = current->pid;
+
+ if ((current->uid == 0) || (current->suid == 0)) {
+ /* Root priv, can be controller */
+ set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags);
+ }
+ client = agp_find_client_by_pid(current->pid);
+
+ if (client != NULL) {
+ set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
+ set_bit(AGP_FF_IS_VALID, &priv->access_flags);
+ }
+ file->private_data = (void *) priv;
+ agp_insert_file_private(priv);
+ DBG("private=%p, client=%p", priv, client);
+ up(&(agp_fe.agp_mutex));
+ return 0;
+
+err_out_nomem:
+ rc = -ENOMEM;
+err_out:
+ up(&(agp_fe.agp_mutex));
+ return rc;
+}
+
+
+static ssize_t agp_read(struct file *file, char __user *buf,
+ size_t count, loff_t * ppos)
+{
+ return -EINVAL;
+}
+
+static ssize_t agp_write(struct file *file, const char __user *buf,
+ size_t count, loff_t * ppos)
+{
+ return -EINVAL;
+}
+
+static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_info userinfo;
+ struct agp_kern_info kerninfo;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ agp_copy_info(&kerninfo);
+#else
+ agp_copy_info(agp_bridge, &kerninfo);
+#endif
+
+ userinfo.version.major = kerninfo.version.major;
+ userinfo.version.minor = kerninfo.version.minor;
+ userinfo.bridge_id = kerninfo.device->vendor |
+ (kerninfo.device->device << 16);
+ userinfo.agp_mode = kerninfo.mode;
+ userinfo.aper_base = kerninfo.aper_base;
+ userinfo.aper_size = kerninfo.aper_size;
+ userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory;
+ userinfo.pg_used = kerninfo.current_memory;
+
+ if (copy_to_user(arg, &userinfo, sizeof(struct agp_info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int agpioc_acquire_wrap(struct agp_file_private *priv)
+{
+ struct agp_controller *controller;
+
+ DBG("");
+
+ if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags)))
+ return -EPERM;
+
+ if (agp_fe.current_controller != NULL)
+ return -EBUSY;
+
+ if(!agp_bridge)
+ return -ENODEV;
+
+ if (atomic_read(&agp_bridge->agp_in_use))
+ return -EBUSY;
+
+ atomic_inc(&agp_bridge->agp_in_use);
+
+ agp_fe.backend_acquired = TRUE;
+
+ controller = agp_find_controller_by_pid(priv->my_pid);
+
+ if (controller != NULL) {
+ agp_controller_make_current(controller);
+ } else {
+ controller = agp_create_controller(priv->my_pid);
+
+ if (controller == NULL) {
+ agp_fe.backend_acquired = FALSE;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ agp_backend_release();
+#else
+ agp_backend_release(agp_bridge);
+#endif
+ return -ENOMEM;
+ }
+ agp_insert_controller(controller);
+ agp_controller_make_current(controller);
+ }
+
+ set_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags);
+ set_bit(AGP_FF_IS_VALID, &priv->access_flags);
+ return 0;
+}
+
+static int agpioc_release_wrap(struct agp_file_private *priv)
+{
+ DBG("");
+ agp_controller_release_current(agp_fe.current_controller, priv);
+ return 0;
+}
+
+static int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_setup mode;
+
+ DBG("");
+ if (copy_from_user(&mode, arg, sizeof(struct agp_setup)))
+ return -EFAULT;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ agp_enable(mode.agp_mode);
+#else
+ agp_enable(agp_bridge, mode.agp_mode);
+#endif
+ return 0;
+}
+
+static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_region reserve;
+ struct agp_client *client;
+ struct agp_file_private *client_priv;
+
+ DBG("");
+ if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
+ return -EFAULT;
+
+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
+ return -EFAULT;
+
+ client = agp_find_client_by_pid(reserve.pid);
+
+ if (reserve.seg_count == 0) {
+ /* remove a client */
+ client_priv = agp_find_private(reserve.pid);
+
+ if (client_priv != NULL) {
+ set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
+ set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
+ }
+ if (client == NULL) {
+ /* client is already removed */
+ return 0;
+ }
+ return agp_remove_client(reserve.pid);
+ } else {
+ struct agp_segment *segment;
+
+ if (reserve.seg_count >= 16384)
+ return -EINVAL;
+
+ segment = kmalloc((sizeof(struct agp_segment) * reserve.seg_count),
+ GFP_KERNEL);
+
+ if (segment == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(segment, (void __user *) reserve.seg_list,
+ sizeof(struct agp_segment) * reserve.seg_count)) {
+ kfree(segment);
+ return -EFAULT;
+ }
+ reserve.seg_list = segment;
+
+ if (client == NULL) {
+ /* Create the client and add the segment */
+ client = agp_create_client(reserve.pid);
+
+ if (client == NULL) {
+ kfree(segment);
+ return -ENOMEM;
+ }
+ client_priv = agp_find_private(reserve.pid);
+
+ if (client_priv != NULL) {
+ set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
+ set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
+ }
+ }
+ return agp_create_segment(client, &reserve);
+ }
+ /* Will never really happen */
+ return -EINVAL;
+}
+
+static int agpioc_protect_wrap(struct agp_file_private *priv)
+{
+ DBG("");
+ /* This function is not currently implemented */
+ return -EINVAL;
+}
+
+static int agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_memory *memory;
+ struct agp_allocate alloc;
+
+ DBG("");
+ if (copy_from_user(&alloc, arg, sizeof(struct agp_allocate)))
+ return -EFAULT;
+
+ memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
+
+ if (memory == NULL)
+ return -ENOMEM;
+
+ alloc.key = memory->key;
+ alloc.physical = memory->physical;
+
+ if (copy_to_user(arg, &alloc, sizeof(struct agp_allocate))) {
+ agp_free_memory_wrap(memory);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg)
+{
+ struct agp_memory *memory;
+
+ DBG("");
+ memory = agp_find_mem_by_key(arg);
+
+ if (memory == NULL)
+ return -EINVAL;
+
+ agp_free_memory_wrap(memory);
+ return 0;
+}
+
+static int agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_bind bind_info;
+ struct agp_memory *memory;
+
+ DBG("");
+ if (copy_from_user(&bind_info, arg, sizeof(struct agp_bind)))
+ return -EFAULT;
+
+ memory = agp_find_mem_by_key(bind_info.key);
+
+ if (memory == NULL)
+ return -EINVAL;
+
+ return agp_bind_memory(memory, bind_info.pg_start);
+}
+
+static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_memory *memory;
+ struct agp_unbind unbind;
+
+ DBG("");
+ if (copy_from_user(&unbind, arg, sizeof(struct agp_unbind)))
+ return -EFAULT;
+
+ memory = agp_find_mem_by_key(unbind.key);
+
+ if (memory == NULL)
+ return -EINVAL;
+
+ return agp_unbind_memory(memory);
+}
+
+static int agp_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct agp_file_private *curr_priv = file->private_data;
+ int ret_val = -ENOTTY;
+
+ DBG("priv=%p, cmd=%x", curr_priv, cmd);
+ down(&(agp_fe.agp_mutex));
+
+ if ((agp_fe.current_controller == NULL) &&
+ (cmd != AGPIOC_ACQUIRE)) {
+ ret_val = -EINVAL;
+ goto ioctl_out;
+ }
+ if ((agp_fe.backend_acquired != TRUE) &&
+ (cmd != AGPIOC_ACQUIRE)) {
+ ret_val = -EBUSY;
+ goto ioctl_out;
+ }
+ if (cmd != AGPIOC_ACQUIRE) {
+ if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) {
+ ret_val = -EPERM;
+ goto ioctl_out;
+ }
+ /* Use the original pid of the controller,
+ * in case it's threaded */
+
+ if (agp_fe.current_controller->pid != curr_priv->my_pid) {
+ ret_val = -EBUSY;
+ goto ioctl_out;
+ }
+ }
+
+ switch (cmd) {
+ case AGPIOC_INFO:
+ ret_val = agpioc_info_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_ACQUIRE:
+ ret_val = agpioc_acquire_wrap(curr_priv);
+ break;
+
+ case AGPIOC_RELEASE:
+ ret_val = agpioc_release_wrap(curr_priv);
+ break;
+
+ case AGPIOC_SETUP:
+ ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_RESERVE:
+ ret_val = agpioc_reserve_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_PROTECT:
+ ret_val = agpioc_protect_wrap(curr_priv);
+ break;
+
+ case AGPIOC_ALLOCATE:
+ ret_val = agpioc_allocate_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_DEALLOCATE:
+ ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg);
+ break;
+
+ case AGPIOC_BIND:
+ ret_val = agpioc_bind_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_UNBIND:
+ ret_val = agpioc_unbind_wrap(curr_priv, (void __user *) arg);
+ break;
+ }
+
+ioctl_out:
+ DBG("ioctl returns %d\n", ret_val);
+ up(&(agp_fe.agp_mutex));
+ return ret_val;
+}
+
+static struct file_operations agp_fops =
+{
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = agp_read,
+ .write = agp_write,
+ .ioctl = agp_ioctl,
+ .mmap = agp_mmap,
+ .open = agp_open,
+ .release = agp_release,
+};
+
+static struct miscdevice agp_miscdev =
+{
+ .minor = AGPGART_MINOR,
+ .name = "agpgart",
+ .fops = &agp_fops
+};
+
+int agp_frontend_initialize(void)
+{
+ memset(&agp_fe, 0, sizeof(struct agp_front_data));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16))
+ mutex_init(&(agp_fe.agp_mutex));
+#else
+ sema_init(&(agp_fe.agp_mutex), 1);
+#endif
+ if (misc_register(&agp_miscdev)) {
+ printk(KERN_ERR PFX "unable to get minor: %d\n", AGPGART_MINOR);
+ return -EIO;
+ }
+ return 0;
+}
+
+void agp_frontend_cleanup(void)
+{
+ misc_deregister(&agp_miscdev);
+}
diff --git a/generic.c b/generic.c
new file mode 100644
index 0000000..7e60228
--- /dev/null
+++ b/generic.c
@@ -0,0 +1,1515 @@
+/*
+ * AGPGART driver.
+ * Copyright (C) 2004 Silicon Graphics, Inc.
+ * Copyright (C) 2002-2005 Dave Jones.
+ * Copyright (C) 1999 Jeff Hartmann.
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * TODO:
+ * - Allocate more than order 0 pages to avoid too much linear map splitting.
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/miscdevice.h>
+#include <linux/pm.h>
+#include <linux/agp_backend.h>
+#include <linux/vmalloc.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <asm/io.h>
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+#include "agp.h"
+
+__u32 *agp_gatt_table;
+int agp_memory_reserved;
+
+/*
+ * Needed by the Nforce GART driver for the time being. Would be
+ * nice to do this some other way instead of needing this export.
+ */
+EXPORT_SYMBOL_GPL(agp_memory_reserved);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
+#if defined(CONFIG_X86)
+int map_page_into_agp(struct page *page)
+{
+ int i;
+ i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
+ return i;
+}
+EXPORT_SYMBOL_GPL(map_page_into_agp);
+
+int unmap_page_from_agp(struct page *page)
+{
+ int i;
+ i = change_page_attr(page, 1, PAGE_KERNEL);
+ return i;
+}
+EXPORT_SYMBOL_GPL(unmap_page_from_agp);
+#endif
+#endif
+
+/*
+ * Generic routines for handling agp_memory structures -
+ * They use the basic page allocation routines to do the brunt of the work.
+ */
+
+void agp_free_key(int key)
+{
+ if (key < 0)
+ return;
+
+ if (key < MAXKEY)
+ clear_bit(key, agp_bridge->key_list);
+}
+EXPORT_SYMBOL(agp_free_key);
+
+
+static int agp_get_key(void)
+{
+ int bit;
+
+ bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY);
+ if (bit < MAXKEY) {
+ set_bit(bit, agp_bridge->key_list);
+ return bit;
+ }
+ return -1;
+}
+
+
+struct agp_memory *agp_create_memory(int scratch_pages)
+{
+ struct agp_memory *new;
+
+ new = kmalloc(sizeof(struct agp_memory), GFP_KERNEL);
+
+ if (new == NULL)
+ return NULL;
+
+ memset(new, 0, sizeof(struct agp_memory));
+ new->key = agp_get_key();
+
+ if (new->key < 0) {
+ kfree(new);
+ return NULL;
+ }
+ new->memory = vmalloc(PAGE_SIZE * scratch_pages);
+
+ if (new->memory == NULL) {
+ agp_free_key(new->key);
+ kfree(new);
+ return NULL;
+ }
+ new->num_scratch_pages = scratch_pages;
+ return new;
+}
+EXPORT_SYMBOL(agp_create_memory);
+
+/**
+ * agp_free_memory - free memory associated with an agp_memory pointer.
+ *
+ * @curr: agp_memory pointer to be freed.
+ *
+ * It is the only function that can be called when the backend is not owned
+ * by the caller. (So it can free memory on client death.)
+ */
+void agp_free_memory(struct agp_memory *curr)
+{
+ size_t i;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ if (agp_bridge->type == NOT_SUPPORTED)
+ return;
+#endif
+ if (curr == NULL)
+ return;
+
+ if (curr->is_bound == TRUE)
+ agp_unbind_memory(curr);
+
+ if (curr->type != 0) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ agp_bridge->driver->free_by_type(curr);
+#else
+ curr->bridge->driver->free_by_type(curr);
+#endif
+ return;
+ }
+ if (curr->page_count != 0) {
+ for (i = 0; i < curr->page_count; i++) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]));
+#else
+ curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]));
+#endif
+ }
+ }
+ agp_free_key(curr->key);
+ vfree(curr->memory);
+ kfree(curr);
+}
+EXPORT_SYMBOL(agp_free_memory);
+
+#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
+
+/**
+ * agp_allocate_memory - allocate a group of pages of a certain type.
+ *
+ * @page_count: size_t argument of the number of pages
+ * @type: u32 argument of the type of memory to be allocated.
+ *
+ * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
+ * maps to physical ram. Any other type is device dependent.
+ *
+ * It returns NULL whenever memory is unavailable.
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+struct agp_memory *agp_allocate_memory(size_t page_count, u32 type)
+{
+ struct agp_bridge_data *bridge = agp_bridge;
+ int scratch_pages;
+ struct agp_memory *new;
+ size_t i;
+
+ if (bridge->type == NOT_SUPPORTED)
+ return NULL;
+#else
+struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
+ size_t page_count, u32 type)
+{
+ int scratch_pages;
+ struct agp_memory *new;
+ size_t i;
+
+ if (!bridge)
+ return NULL;
+#endif
+
+ if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp)
+ return NULL;
+
+ if (type != 0) {
+ new = bridge->driver->alloc_by_type(page_count, type);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
+ if (new)
+ new->bridge = bridge;
+#endif
+ return new;
+ }
+
+ scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
+
+ new = agp_create_memory(scratch_pages);
+
+ if (new == NULL)
+ return NULL;
+
+ for (i = 0; i < page_count; i++) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ void *addr = bridge->driver->agp_alloc_page();
+#else
+ void *addr = bridge->driver->agp_alloc_page(bridge);
+#endif
+ if (addr == NULL) {
+ agp_free_memory(new);
+ return NULL;
+ }
+ new->memory[i] = virt_to_gart(addr);
+ new->page_count++;
+ }
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
+ new->bridge = bridge;
+#endif
+ flush_agp_mappings();
+
+ return new;
+}
+EXPORT_SYMBOL(agp_allocate_memory);
+
+
+/* End - Generic routines for handling agp_memory structures */
+
+
+static int agp_return_size(void)
+{
+ int current_size;
+ void *temp;
+
+ temp = agp_bridge->current_size;
+
+ switch (agp_bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ current_size = A_SIZE_8(temp)->size;
+ break;
+ case U16_APER_SIZE:
+ current_size = A_SIZE_16(temp)->size;
+ break;
+ case U32_APER_SIZE:
+ current_size = A_SIZE_32(temp)->size;
+ break;
+ case LVL2_APER_SIZE:
+ current_size = A_SIZE_LVL2(temp)->size;
+ break;
+ case FIXED_APER_SIZE:
+ current_size = A_SIZE_FIX(temp)->size;
+ break;
+ default:
+ current_size = 0;
+ break;
+ }
+
+ current_size -= (agp_memory_reserved / (1024*1024));
+ if (current_size <0)
+ current_size = 0;
+ return current_size;
+}
+
+
+int agp_num_entries(void)
+{
+ int num_entries;
+ void *temp;
+
+ temp = agp_bridge->current_size;
+
+ switch (agp_bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ num_entries = A_SIZE_8(temp)->num_entries;
+ break;
+ case U16_APER_SIZE:
+ num_entries = A_SIZE_16(temp)->num_entries;
+ break;
+ case U32_APER_SIZE:
+ num_entries = A_SIZE_32(temp)->num_entries;
+ break;
+ case LVL2_APER_SIZE:
+ num_entries = A_SIZE_LVL2(temp)->num_entries;
+ break;
+ case FIXED_APER_SIZE:
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+ break;
+ default:
+ num_entries = 0;
+ break;
+ }
+
+ num_entries -= agp_memory_reserved>>PAGE_SHIFT;
+ if (num_entries<0)
+ num_entries = 0;
+ return num_entries;
+}
+EXPORT_SYMBOL_GPL(agp_num_entries);
+
+
+/**
+ * agp_copy_info - copy bridge state information
+ *
+ * @info: agp_kern_info pointer. The caller should insure that this pointer is valid.
+ *
+ * This function copies information about the agp bridge device and the state of
+ * the agp backend into an agp_kern_info pointer.
+ */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+int agp_copy_info(struct agp_kern_info *info)
+{
+ struct agp_bridge_data *bridge = agp_bridge;
+
+ memset(info, 0, sizeof(struct agp_kern_info));
+ if (!agp_bridge || agp_bridge->type == NOT_SUPPORTED ||
+ !agp_bridge->version) {
+ info->chipset = NOT_SUPPORTED;
+ return -EIO;
+ }
+#else
+int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
+{
+ memset(info, 0, sizeof(struct agp_kern_info));
+ if (!bridge) {
+ info->chipset = NOT_SUPPORTED;
+ return -EIO;
+ }
+#endif
+ info->version.major = bridge->version->major;
+ info->version.minor = bridge->version->minor;
+ info->chipset = SUPPORTED;
+ info->device = bridge->dev;
+ if (bridge->mode & AGPSTAT_MODE_3_0)
+ info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
+ else
+ info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
+ info->aper_base = bridge->gart_bus_addr;
+ info->aper_size = agp_return_size();
+ info->max_memory = bridge->max_memory_agp;
+ info->current_memory = atomic_read(&bridge->current_memory_agp);
+ info->cant_use_aperture = bridge->driver->cant_use_aperture;
+ info->vm_ops = bridge->vm_ops;
+ info->page_mask = ~0UL;
+ return 0;
+}
+EXPORT_SYMBOL(agp_copy_info);
+
+/* End - Routine to copy over information structure */
+
+/*
+ * Routines for handling swapping of agp_memory into the GATT -
+ * These routines take agp_memory and insert them into the GATT.
+ * They call device specific routines to actually write to the GATT.
+ */
+
+/**
+ * agp_bind_memory - Bind an agp_memory structure into the GATT.
+ *
+ * @curr: agp_memory pointer
+ * @pg_start: an offset into the graphics aperture translation table
+ *
+ * It returns -EINVAL if the pointer == NULL.
+ * It returns -EBUSY if the area of the table requested is already in use.
+ */
+int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
+{
+ int ret_val;
+ struct agp_bridge_data *bridge;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ if (agp_bridge->type == NOT_SUPPORTED)
+ return -EINVAL;
+#endif
+
+ if (curr == NULL)
+ return -EINVAL;
+
+ if (curr->is_bound == TRUE) {
+ printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
+ return -EINVAL;
+ }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ bridge = agp_bridge;
+#else
+ bridge = curr->bridge;
+#endif
+ if (curr->is_flushed == FALSE) {
+ bridge->driver->cache_flush();
+ curr->is_flushed = TRUE;
+ }
+ ret_val = bridge->driver->insert_memory(curr, pg_start, curr->type);
+
+ if (ret_val != 0)
+ return ret_val;
+
+ curr->is_bound = TRUE;
+ curr->pg_start = pg_start;
+ return 0;
+}
+EXPORT_SYMBOL(agp_bind_memory);
+
+
+/**
+ * agp_unbind_memory - Removes an agp_memory structure from the GATT
+ *
+ * @curr: agp_memory pointer to be removed from the GATT.
+ *
+ * It returns -EINVAL if this piece of agp_memory is not currently bound to
+ * the graphics aperture translation table or if the agp_memory pointer == NULL
+ */
+int agp_unbind_memory(struct agp_memory *curr)
+{
+ int ret_val;
+ struct agp_bridge_data *bridge;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ if (agp_bridge->type == NOT_SUPPORTED)
+ return -EINVAL;
+#endif
+
+ if (curr == NULL)
+ return -EINVAL;
+
+ if (curr->is_bound != TRUE) {
+ printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
+ return -EINVAL;
+ }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ bridge = agp_bridge;
+#else
+ bridge = curr->bridge;
+#endif
+
+ ret_val = bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
+
+ if (ret_val != 0)
+ return ret_val;
+
+ curr->is_bound = FALSE;
+ curr->pg_start = 0;
+ return 0;
+}
+EXPORT_SYMBOL(agp_unbind_memory);
+
+/* End - Routines for handling swapping of agp_memory into the GATT */
+
+
+/* Generic Agp routines - Start */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+
+static void agp_v2_parse_one(u32 *mode, u32 *cmd, u32 *tmp)
+{
+ /* disable SBA if it's not supported */
+ if (!((*cmd & AGPSTAT_SBA) && (*tmp & AGPSTAT_SBA) && (*mode & AGPSTAT_SBA)))
+ *cmd &= ~AGPSTAT_SBA;
+
+ /* Set speed */
+ if (!((*cmd & AGPSTAT2_4X) && (*tmp & AGPSTAT2_4X) && (*mode & AGPSTAT2_4X)))
+ *cmd &= ~AGPSTAT2_4X;
+
+ if (!((*cmd & AGPSTAT2_2X) && (*tmp & AGPSTAT2_2X) && (*mode & AGPSTAT2_2X)))
+ *cmd &= ~AGPSTAT2_2X;
+
+ if (!((*cmd & AGPSTAT2_1X) && (*tmp & AGPSTAT2_1X) && (*mode & AGPSTAT2_1X)))
+ *cmd &= ~AGPSTAT2_1X;
+
+ /* Now we know what mode it should be, clear out the unwanted bits. */
+ if (*cmd & AGPSTAT2_4X)
+ *cmd &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */
+
+ if (*cmd & AGPSTAT2_2X)
+ *cmd &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */
+
+ if (*cmd & AGPSTAT2_1X)
+ *cmd &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */
+}
+#else
+static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
+{
+ u32 tmp;
+
+ if (*requested_mode & AGP2_RESERVED_MASK) {
+ printk(KERN_INFO PFX "reserved bits set in mode 0x%x. Fixed.\n", *requested_mode);
+ *requested_mode &= ~AGP2_RESERVED_MASK;
+ }
+
+ /* Check the speed bits make sense. Only one should be set. */
+ tmp = *requested_mode & 7;
+ switch (tmp) {
+ case 0:
+ printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm);
+ *requested_mode |= AGPSTAT2_1X;
+ break;
+ case 1:
+ case 2:
+ break;
+ case 3:
+ *requested_mode &= ~(AGPSTAT2_1X); /* rate=2 */
+ break;
+ case 4:
+ break;
+ case 5:
+ case 6:
+ case 7:
+ *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/
+ break;
+ }
+
+ /* disable SBA if it's not supported */
+ if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA)))
+ *bridge_agpstat &= ~AGPSTAT_SBA;
+
+ /* Set rate */
+ if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X)))
+ *bridge_agpstat &= ~AGPSTAT2_4X;
+
+ if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X)))
+ *bridge_agpstat &= ~AGPSTAT2_2X;
+
+ if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X)))
+ *bridge_agpstat &= ~AGPSTAT2_1X;
+
+ /* Now we know what mode it should be, clear out the unwanted bits. */
+ if (*bridge_agpstat & AGPSTAT2_4X)
+ *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */
+
+ if (*bridge_agpstat & AGPSTAT2_2X)
+ *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */
+
+ if (*bridge_agpstat & AGPSTAT2_1X)
+ *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */
+
+ /* Apply any errata. */
+ if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
+ *bridge_agpstat &= ~AGPSTAT_FW;
+
+ if (agp_bridge->flags & AGP_ERRATA_SBA)
+ *bridge_agpstat &= ~AGPSTAT_SBA;
+
+ if (agp_bridge->flags & AGP_ERRATA_1X) {
+ *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
+ *bridge_agpstat |= AGPSTAT2_1X;
+ }
+
+ /* If we've dropped down to 1X, disable fast writes. */
+ if (*bridge_agpstat & AGPSTAT2_1X)
+ *bridge_agpstat &= ~AGPSTAT_FW;
+}
+#endif
+
+/*
+ * requested_mode = Mode requested by (typically) X.
+ * bridge_agpstat = PCI_AGP_STATUS from agp bridge.
+ * vga_agpstat = PCI_AGP_STATUS from graphic card.
+ */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+static void agp_v3_parse_one(u32 *mode, u32 *cmd, u32 *tmp)
+{
+ u32 origcmd=*cmd, origtmp=*tmp;
+
+ /* ARQSZ - Set the value to the maximum one.
+ * Don't allow the mode register to override values. */
+ *cmd = ((*cmd & ~AGPSTAT_ARQSZ) |
+ max_t(u32,(*cmd & AGPSTAT_ARQSZ),(*tmp & AGPSTAT_ARQSZ)));
+
+ /* Calibration cycle.
+ * Don't allow the mode register to override values. */
+ *cmd = ((*cmd & ~AGPSTAT_CAL_MASK) |
+ min_t(u32,(*cmd & AGPSTAT_CAL_MASK),(*tmp & AGPSTAT_CAL_MASK)));
+
+ /* SBA *must* be supported for AGP v3 */
+ *cmd |= AGPSTAT_SBA;
+
+ /*
+ * Set speed.
+ * Check for invalid speeds. This can happen when applications
+ * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
+ */
+ if (*mode & AGPSTAT_MODE_3_0) {
+ /*
+ * Caller hasn't a clue what its doing. We are in 3.0 mode,
+ * have been passed a 3.0 mode, but with 2.x speed bits set.
+ * AGP2.x 4x -> AGP3.0 4x.
+ */
+ if (*mode & AGPSTAT2_4X) {
+ printk (KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
+ current->comm, *mode);
+ *mode &= ~AGPSTAT2_4X;
+ *mode |= AGPSTAT3_4X;
+ }
+ } else {
+ /*
+ * The caller doesn't know what they are doing. We are in 3.0 mode,
+ * but have been passed an AGP 2.x mode.
+ * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
+ */
+ printk (KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
+ current->comm, *mode);
+ *mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
+ *mode |= AGPSTAT3_4X;
+ }
+
+ if (*mode & AGPSTAT3_8X) {
+ if (!(*cmd & AGPSTAT3_8X)) {
+ *cmd &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+ *cmd |= AGPSTAT3_4X;
+ printk ("%s requested AGPx8 but bridge not capable.\n", current->comm);
+ return;
+ }
+ if (!(*tmp & AGPSTAT3_8X)) {
+ *cmd &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+ *cmd |= AGPSTAT3_4X;
+ printk ("%s requested AGPx8 but graphic card not capable.\n", current->comm);
+ return;
+ }
+ /* All set, bridge & device can do AGP x8*/
+ *cmd &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
+ return;
+
+ } else {
+
+ /*
+ * If we didn't specify AGPx8, we can only do x4.
+ * If the hardware can't do x4, we're up shit creek, and never
+ * should have got this far.
+ */
+ *cmd &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+ if ((*cmd & AGPSTAT3_4X) && (*tmp & AGPSTAT3_4X))
+ *cmd |= AGPSTAT3_4X;
+ else {
+ printk (KERN_INFO PFX "Badness. Don't know which AGP mode to set. "
+ "[cmd:%x tmp:%x fell back to:- cmd:%x tmp:%x]\n",
+ origcmd, origtmp, *cmd, *tmp);
+ if (!(*cmd & AGPSTAT3_4X))
+ printk (KERN_INFO PFX "Bridge couldn't do AGP x4.\n");
+ if (!(*tmp & AGPSTAT3_4X))
+ printk (KERN_INFO PFX "Graphic card couldn't do AGP x4.\n");
+ }
+ }
+}
+#else
+static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
+{
+ u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat;
+ u32 tmp;
+
+ if (*requested_mode & AGP3_RESERVED_MASK) {
+ printk(KERN_INFO PFX "reserved bits set in mode 0x%x. Fixed.\n", *requested_mode);
+ *requested_mode &= ~AGP3_RESERVED_MASK;
+ }
+
+ /* Check the speed bits make sense. */
+ tmp = *requested_mode & 7;
+ if (tmp == 0) {
+ printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm);
+ *requested_mode |= AGPSTAT3_4X;
+ }
+ if (tmp >= 3) {
+ printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4);
+ *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X;
+ }
+
+ /* ARQSZ - Set the value to the maximum one.
+ * Don't allow the mode register to override values. */
+ *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) |
+ max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ)));
+
+ /* Calibration cycle.
+ * Don't allow the mode register to override values. */
+ *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) |
+ min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK)));
+
+ /* SBA *must* be supported for AGP v3 */
+ *bridge_agpstat |= AGPSTAT_SBA;
+
+ /*
+ * Set speed.
+ * Check for invalid speeds. This can happen when applications
+ * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
+ */
+ if (*requested_mode & AGPSTAT_MODE_3_0) {
+ /*
+ * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
+ * have been passed a 3.0 mode, but with 2.x speed bits set.
+ * AGP2.x 4x -> AGP3.0 4x.
+ */
+ if (*requested_mode & AGPSTAT2_4X) {
+ printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
+ current->comm, *requested_mode);
+ *requested_mode &= ~AGPSTAT2_4X;
+ *requested_mode |= AGPSTAT3_4X;
+ }
+ } else {
+ /*
+ * The caller doesn't know what they are doing. We are in 3.0 mode,
+ * but have been passed an AGP 2.x mode.
+ * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
+ */
+ printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
+ current->comm, *requested_mode);
+ *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
+ *requested_mode |= AGPSTAT3_4X;
+ }
+
+ if (*requested_mode & AGPSTAT3_8X) {
+ if (!(*bridge_agpstat & AGPSTAT3_8X)) {
+ *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+ *bridge_agpstat |= AGPSTAT3_4X;
+ printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm);
+ return;
+ }
+ if (!(*vga_agpstat & AGPSTAT3_8X)) {
+ *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+ *bridge_agpstat |= AGPSTAT3_4X;
+ printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm);
+ return;
+ }
+ /* All set, bridge & device can do AGP x8*/
+ *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
+ goto done;
+
+ } else {
+
+ /*
+ * If we didn't specify AGPx8, we can only do x4.
+ * If the hardware can't do x4, we're up shit creek, and never
+ * should have got this far.
+ */
+ *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+ if ((*bridge_agpstat & AGPSTAT3_4X) && (*vga_agpstat & AGPSTAT3_4X))
+ *bridge_agpstat |= AGPSTAT3_4X;
+ else {
+ printk(KERN_INFO PFX "Badness. Don't know which AGP mode to set. "
+ "[bridge_agpstat:%x vga_agpstat:%x fell back to:- bridge_agpstat:%x vga_agpstat:%x]\n",
+ origbridge, origvga, *bridge_agpstat, *vga_agpstat);
+ if (!(*bridge_agpstat & AGPSTAT3_4X))
+ printk(KERN_INFO PFX "Bridge couldn't do AGP x4.\n");
+ if (!(*vga_agpstat & AGPSTAT3_4X))
+ printk(KERN_INFO PFX "Graphic card couldn't do AGP x4.\n");
+ return;
+ }
+ }
+
+done:
+ /* Apply any errata. */
+ if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
+ *bridge_agpstat &= ~AGPSTAT_FW;
+
+ if (agp_bridge->flags & AGP_ERRATA_SBA)
+ *bridge_agpstat &= ~AGPSTAT_SBA;
+
+ if (agp_bridge->flags & AGP_ERRATA_1X) {
+ *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
+ *bridge_agpstat |= AGPSTAT2_1X;
+ }
+}
+#endif
+
+
+/**
+ * agp_collect_device_status - determine correct agp_cmd from various agp_stat's
+ * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
+ * @requested_mode: requested agp_stat from userspace (Typically from X)
+ * @bridge_agpstat: current agp_stat from AGP bridge.
+ *
+ * This function will hunt for an AGP graphics card, and try to match
+ * the requested mode to the capabilities of both the bridge and the card.
+ */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+u32 agp_collect_device_status(u32 mode, u32 cmd)
+{
+ struct pci_dev *device = NULL;
+ u8 cap_ptr;
+ u32 tmp;
+ u32 agp3;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
+ for_each_pci_dev(device) {
+#else
+ while ((device = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, device)) != NULL) {
+#endif
+ cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ continue;
+
+ //FIXME: We should probably skip anything here that
+ // isn't an AGP graphic card.
+ /*
+ * Ok, here we have a AGP device. Disable impossible
+ * settings, and adjust the readqueue to the minimum.
+ */
+ pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &tmp);
+
+ /* adjust RQ depth */
+ cmd = ((cmd & ~AGPSTAT_RQ_DEPTH) |
+ min_t(u32, (mode & AGPSTAT_RQ_DEPTH),
+ min_t(u32, (cmd & AGPSTAT_RQ_DEPTH), (tmp & AGPSTAT_RQ_DEPTH))));
+
+ /* disable FW if it's not supported */
+ if (!((cmd & AGPSTAT_FW) && (tmp & AGPSTAT_FW) && (mode & AGPSTAT_FW)))
+ cmd &= ~AGPSTAT_FW;
+
+ /* Check to see if we are operating in 3.0 mode */
+ pci_read_config_dword(device, cap_ptr+AGPSTAT, &agp3);
+ if (agp3 & AGPSTAT_MODE_3_0) {
+ agp_v3_parse_one(&mode, &cmd, &tmp);
+ } else {
+ agp_v2_parse_one(&mode, &cmd, &tmp);
+ }
+ }
+ return cmd;
+}
+#else
+
+u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat)
+{
+ struct pci_dev *device = NULL;
+ u32 vga_agpstat;
+ u8 cap_ptr;
+
+ for (;;) {
+ device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device);
+ if (!device) {
+ printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
+ return 0;
+ }
+ cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
+ if (cap_ptr)
+ break;
+ }
+
+ /*
+ * Ok, here we have a AGP device. Disable impossible
+ * settings, and adjust the readqueue to the minimum.
+ */
+ pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat);
+
+ /* adjust RQ depth */
+ bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) |
+ min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH),
+ min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH))));
+
+ /* disable FW if it's not supported */
+ if (!((bridge_agpstat & AGPSTAT_FW) &&
+ (vga_agpstat & AGPSTAT_FW) &&
+ (requested_mode & AGPSTAT_FW)))
+ bridge_agpstat &= ~AGPSTAT_FW;
+
+ /* Check to see if we are operating in 3.0 mode */
+ if (agp_bridge->mode & AGPSTAT_MODE_3_0)
+ agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
+ else
+ agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
+
+ pci_dev_put(device);
+ return bridge_agpstat;
+}
+#endif
+EXPORT_SYMBOL(agp_collect_device_status);
+
+
+void agp_device_command(u32 bridge_agpstat, int agp_v3)
+{
+ struct pci_dev *device = NULL;
+ int mode;
+
+ mode = bridge_agpstat & 0x7;
+ if (agp_v3)
+ mode *= 4;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
+ for_each_pci_dev(device) {
+#else
+ while ((device = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, device)) != NULL) {
+#endif
+ u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
+ if (!agp)
+ continue;
+
+ printk(KERN_INFO PFX "Putting AGP V%d device at %s into %dx mode\n",
+ agp_v3 ? 3 : 2, pci_name(device), mode);
+ pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
+ }
+}
+EXPORT_SYMBOL(agp_device_command);
+
+
+void get_agp_version(struct agp_bridge_data *bridge)
+{
+ u32 ncapid;
+
+ /* Exit early if already set by errata workarounds. */
+ if (bridge->major_version != 0)
+ return;
+
+ pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid);
+ bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
+ bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf;
+}
+EXPORT_SYMBOL(get_agp_version);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+void agp_generic_enable(u32 requested_mode)
+{
+ struct agp_bridge_data *bridge = agp_bridge;
+#else
+void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
+{
+#endif
+ u32 bridge_agpstat, temp;
+
+ get_agp_version(agp_bridge);
+
+ printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n",
+ agp_bridge->major_version,
+ agp_bridge->minor_version,
+ pci_name(agp_bridge->dev));
+
+ pci_read_config_dword(agp_bridge->dev,
+ agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ bridge_agpstat = agp_collect_device_status(requested_mode, bridge_agpstat);
+#else
+ bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat);
+#endif
+ if (bridge_agpstat == 0)
+ /* Something bad happened. FIXME: Return error code? */
+ return;
+
+ bridge_agpstat |= AGPSTAT_AGP_ENABLE;
+
+ /* Do AGP version specific frobbing. */
+ if (bridge->major_version >= 3) {
+ if (bridge->mode & AGPSTAT_MODE_3_0) {
+ /* If we have 3.5, we can do the isoch stuff. */
+ if (bridge->minor_version >= 5)
+ agp_3_5_enable(bridge);
+ agp_device_command(bridge_agpstat, TRUE);
+ return;
+ } else {
+ /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
+ bridge_agpstat &= ~(7<<10) ;
+ pci_read_config_dword(bridge->dev,
+ bridge->capndx+AGPCTRL, &temp);
+ temp |= (1<<9);
+ pci_write_config_dword(bridge->dev,
+ bridge->capndx+AGPCTRL, temp);
+
+ printk(KERN_INFO PFX "Device is in legacy mode,"
+ " falling back to 2.x\n");
+ }
+ }
+
+ /* AGP v<3 */
+ agp_device_command(bridge_agpstat, FALSE);
+}
+EXPORT_SYMBOL(agp_generic_enable);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+int agp_generic_create_gatt_table( void )
+{
+ struct agp_bridge_data *bridge = agp_bridge;
+#else
+int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
+{
+#endif
+ char *table;
+ char *table_end;
+ int size;
+ int page_order;
+ int num_entries;
+ int i;
+ void *temp;
+ struct page *page;
+
+ /* The generic routines can't handle 2 level gatt's */
+ if (bridge->driver->size_type == LVL2_APER_SIZE)
+ return -EINVAL;
+
+ table = NULL;
+ i = bridge->aperture_size_idx;
+ temp = bridge->current_size;
+ size = page_order = num_entries = 0;
+
+ if (bridge->driver->size_type != FIXED_APER_SIZE) {
+ do {
+ switch (bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ size = A_SIZE_8(temp)->size;
+ page_order =
+ A_SIZE_8(temp)->page_order;
+ num_entries =
+ A_SIZE_8(temp)->num_entries;
+ break;
+ case U16_APER_SIZE:
+ size = A_SIZE_16(temp)->size;
+ page_order = A_SIZE_16(temp)->page_order;
+ num_entries = A_SIZE_16(temp)->num_entries;
+ break;
+ case U32_APER_SIZE:
+ size = A_SIZE_32(temp)->size;
+ page_order = A_SIZE_32(temp)->page_order;
+ num_entries = A_SIZE_32(temp)->num_entries;
+ break;
+ /* This case will never really happen. */
+ case FIXED_APER_SIZE:
+ case LVL2_APER_SIZE:
+ default:
+ size = page_order = num_entries = 0;
+ break;
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
+ table = alloc_gatt_pages(page_order);
+#else
+ table = (char *) __get_free_pages(GFP_KERNEL,
+ page_order);
+#endif
+
+ if (table == NULL) {
+ i++;
+ switch (bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ bridge->current_size = A_IDX8(bridge);
+ break;
+ case U16_APER_SIZE:
+ bridge->current_size = A_IDX16(bridge);
+ break;
+ case U32_APER_SIZE:
+ bridge->current_size = A_IDX32(bridge);
+ break;
+ /* This case will never really happen. */
+ case FIXED_APER_SIZE:
+ case LVL2_APER_SIZE:
+ default:
+ bridge->current_size =
+ bridge->current_size;
+ break;
+ }
+ temp = bridge->current_size;
+ } else {
+ bridge->aperture_size_idx = i;
+ }
+ } while (!table && (i < bridge->driver->num_aperture_sizes));
+ } else {
+ size = ((struct aper_size_info_fixed *) temp)->size;
+ page_order = ((struct aper_size_info_fixed *) temp)->page_order;
+ num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
+ table = alloc_gatt_pages(page_order);
+#else
+ table = (char *) __get_free_pages(GFP_KERNEL, page_order);
+#endif
+ }
+
+ if (table == NULL)
+ return -ENOMEM;
+
+ table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+
+ for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+ SetPageReserved(page);
+
+ bridge->gatt_table_real = (u32 *) table;
+ agp_gatt_table = (void *)table;
+
+ bridge->driver->cache_flush();
+ bridge->gatt_table = ioremap_nocache(virt_to_gart(table),
+ (PAGE_SIZE * (1 << page_order)));
+ bridge->driver->cache_flush();
+
+ if (bridge->gatt_table == NULL) {
+ for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+ ClearPageReserved(page);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
+ free_gatt_pages(table, page_order);
+#else
+ free_pages((unsigned long) table, page_order);
+#endif
+
+ return -ENOMEM;
+ }
+ bridge->gatt_bus_addr = virt_to_gart(bridge->gatt_table_real);
+
+ /* AK: bogus, should encode addresses > 4GB */
+ for (i = 0; i < num_entries; i++) {
+ writel(bridge->scratch_page, bridge->gatt_table+i);
+ readl(bridge->gatt_table+i); /* PCI Posting. */
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(agp_generic_create_gatt_table);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+int agp_generic_free_gatt_table( void )
+{
+ struct agp_bridge_data *bridge = agp_bridge;
+#else
+int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
+{
+#endif
+ int page_order;
+ char *table, *table_end;
+ void *temp;
+ struct page *page;
+
+ temp = bridge->current_size;
+
+ switch (bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ page_order = A_SIZE_8(temp)->page_order;
+ break;
+ case U16_APER_SIZE:
+ page_order = A_SIZE_16(temp)->page_order;
+ break;
+ case U32_APER_SIZE:
+ page_order = A_SIZE_32(temp)->page_order;
+ break;
+ case FIXED_APER_SIZE:
+ page_order = A_SIZE_FIX(temp)->page_order;
+ break;
+ case LVL2_APER_SIZE:
+ /* The generic routines can't deal with 2 level gatt's */
+ return -EINVAL;
+ break;
+ default:
+ page_order = 0;
+ break;
+ }
+
+ /* Do not worry about freeing memory, because if this is
+ * called, then all agp memory is deallocated and removed
+ * from the table. */
+
+ iounmap(bridge->gatt_table);
+ table = (char *) bridge->gatt_table_real;
+ table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+
+ for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+ ClearPageReserved(page);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
+ free_gatt_pages(bridge->gatt_table_real, page_order);
+#else
+ free_pages((unsigned long) bridge->gatt_table_real, page_order);
+#endif
+
+ agp_gatt_table = NULL;
+ bridge->gatt_table = NULL;
+ bridge->gatt_table_real = NULL;
+ bridge->gatt_bus_addr = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(agp_generic_free_gatt_table);
+
+
+int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
+{
+ int num_entries;
+ size_t i;
+ off_t j;
+ void *temp;
+ struct agp_bridge_data *bridge;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ bridge = agp_bridge;
+#else
+ bridge = mem->bridge;
+#endif
+
+ if (!bridge)
+ return -EINVAL;
+
+ temp = bridge->current_size;
+
+ switch (bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ num_entries = A_SIZE_8(temp)->num_entries;
+ break;
+ case U16_APER_SIZE:
+ num_entries = A_SIZE_16(temp)->num_entries;
+ break;
+ case U32_APER_SIZE:
+ num_entries = A_SIZE_32(temp)->num_entries;
+ break;
+ case FIXED_APER_SIZE:
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+ break;
+ case LVL2_APER_SIZE:
+ /* The generic routines can't deal with 2 level gatt's */
+ return -EINVAL;
+ break;
+ default:
+ num_entries = 0;
+ break;
+ }
+
+ num_entries -= agp_memory_reserved/PAGE_SIZE;
+ if (num_entries < 0) num_entries = 0;
+
+ if (type != 0 || mem->type != 0) {
+ /* The generic routines know nothing of memory types */
+ return -EINVAL;
+ }
+
+ /* AK: could wrap */
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+
+ j = pg_start;
+
+ while (j < (pg_start + mem->page_count)) {
+ if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j)))
+ return -EBUSY;
+ j++;
+ }
+
+ if (mem->is_flushed == FALSE) {
+ bridge->driver->cache_flush();
+ mem->is_flushed = TRUE;
+ }
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ writel(bridge->driver->mask_memory(mem->memory[i], mem->type), bridge->gatt_table+j);
+#else
+ writel(bridge->driver->mask_memory(bridge, mem->memory[i], mem->type), bridge->gatt_table+j);
+#endif
+ readl(bridge->gatt_table+j); /* PCI Posting. */
+ }
+
+ bridge->driver->tlb_flush(mem);
+ return 0;
+}
+EXPORT_SYMBOL(agp_generic_insert_memory);
+
+
+int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ size_t i;
+ struct agp_bridge_data *bridge;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ bridge = agp_bridge;
+#else
+ bridge = mem->bridge;
+#endif
+ if (!bridge)
+ return -EINVAL;
+
+ if (type != 0 || mem->type != 0) {
+ /* The generic routines know nothing of memory types */
+ return -EINVAL;
+ }
+
+ /* AK: bogus, should encode addresses > 4GB */
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ writel(bridge->scratch_page, bridge->gatt_table+i);
+ readl(bridge->gatt_table+i); /* PCI Posting. */
+ }
+
+ global_cache_flush();
+ bridge->driver->tlb_flush(mem);
+ return 0;
+}
+EXPORT_SYMBOL(agp_generic_remove_memory);
+
+
+struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
+{
+ return NULL;
+}
+EXPORT_SYMBOL(agp_generic_alloc_by_type);
+
+
+void agp_generic_free_by_type(struct agp_memory *curr)
+{
+ vfree(curr->memory);
+ agp_free_key(curr->key);
+ kfree(curr);
+}
+EXPORT_SYMBOL(agp_generic_free_by_type);
+
+
+/*
+ * Basic Page Allocation Routines -
+ * These routines handle page allocation and by default they reserve the allocated
+ * memory. They also handle incrementing the current_memory_agp value, Which is checked
+ * against a maximum value.
+ */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+void *agp_generic_alloc_page( void )
+{
+#else
+void *agp_generic_alloc_page(struct agp_bridge_data *bridge)
+{
+#endif
+ struct page * page;
+
+ page = alloc_page(GFP_KERNEL);
+ if (page == NULL)
+ return NULL;
+
+ map_page_into_agp(page);
+
+ get_page(page);
+ SetPageLocked(page);
+ atomic_inc(&agp_bridge->current_memory_agp);
+ return page_address(page);
+}
+EXPORT_SYMBOL(agp_generic_alloc_page);
+
+
+void agp_generic_destroy_page(void *addr)
+{
+ struct page *page;
+
+ if (addr == NULL)
+ return;
+
+ page = virt_to_page(addr);
+ unmap_page_from_agp(page);
+ put_page(page);
+ unlock_page(page);
+ free_page((unsigned long)addr);
+ atomic_dec(&agp_bridge->current_memory_agp);
+}
+EXPORT_SYMBOL(agp_generic_destroy_page);
+
+/* End Basic Page Allocation Routines */
+
+
+/**
+ * agp_enable - initialise the agp point-to-point connection.
+ *
+ * @mode: agp mode register value to configure with.
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+void agp_enable(u32 mode)
+{
+ if (agp_bridge->type == NOT_SUPPORTED)
+ return;
+ agp_bridge->driver->agp_enable(mode);
+}
+#else
+void agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ if (!bridge)
+ return;
+ bridge->driver->agp_enable(bridge, mode);
+}
+#endif
+
+EXPORT_SYMBOL(agp_enable);
+
+/* When we remove the global variable agp_bridge from all drivers
+ * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
+ */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
+struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev)
+{
+ if (list_empty(&agp_bridges))
+ return NULL;
+
+ return agp_bridge;
+}
+#endif
+
+static void ipi_handler(void *null)
+{
+ flush_agp_cache();
+}
+
+void global_cache_flush(void)
+{
+ if (on_each_cpu(ipi_handler, NULL, 1, 1) != 0)
+ panic(PFX "timed out waiting for the other CPUs!\n");
+}
+EXPORT_SYMBOL(global_cache_flush);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+unsigned long agp_generic_mask_memory(unsigned long addr, int type)
+{
+ struct agp_bridge_data *bridge = agp_bridge;
+#else
+unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
+ unsigned long addr, int type)
+{
+#endif
+ /* memory type is ignored in the generic routine */
+ if (bridge->driver->masks)
+ return addr | bridge->driver->masks[0].mask;
+ else
+ return addr;
+}
+EXPORT_SYMBOL(agp_generic_mask_memory);
+
+/*
+ * These functions are implemented according to the AGPv3 spec,
+ * which covers implementation details that had previously been
+ * left open.
+ */
+
+int agp3_generic_fetch_size(void)
+{
+ u16 temp_size;
+ int i;
+ struct aper_size_info_16 *values;
+
+ pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
+ values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp_size == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(agp3_generic_fetch_size);
+
+void agp3_generic_tlbflush(struct agp_memory *mem)
+{
+ u32 ctrl;
+ pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
+ pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
+ pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
+}
+EXPORT_SYMBOL(agp3_generic_tlbflush);
+
+int agp3_generic_configure(void)
+{
+ u32 temp;
+ struct aper_size_info_16 *current_size;
+
+ current_size = A_SIZE_16(agp_bridge->current_size);
+
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* set aperture size */
+ pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
+ /* set gart pointer */
+ pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
+ /* enable aperture and GTLB */
+ pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
+ return 0;
+}
+EXPORT_SYMBOL(agp3_generic_configure);
+
+void agp3_generic_cleanup(void)
+{
+ u32 ctrl;
+ pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
+ pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
+}
+EXPORT_SYMBOL(agp3_generic_cleanup);
+
+struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
+{
+ {4096, 1048576, 10,0x000},
+ {2048, 524288, 9, 0x800},
+ {1024, 262144, 8, 0xc00},
+ { 512, 131072, 7, 0xe00},
+ { 256, 65536, 6, 0xf00},
+ { 128, 32768, 5, 0xf20},
+ { 64, 16384, 4, 0xf30},
+ { 32, 8192, 3, 0xf38},
+ { 16, 4096, 2, 0xf3c},
+ { 8, 2048, 1, 0xf3e},
+ { 4, 1024, 0, 0xf3f}
+};
+EXPORT_SYMBOL(agp3_generic_sizes);
+
diff --git a/intel-agp.c b/intel-agp.c
new file mode 100644
index 0000000..adf7475
--- /dev/null
+++ b/intel-agp.c
@@ -0,0 +1,2195 @@
+/*
+ * Intel AGPGART routines.
+ */
+
+/*
+ * Intel(R) 855GM/852GM and 865G support added by David Dawes
+ * <dawes@tungstengraphics.com>.
+ *
+ * Intel(R) 915G/915GM support added by Alan Hourihane
+ * <alanh@tungstengraphics.com>.
+ *
+ * Intel(R) 945G/945GM support added by Alan Hourihane
+ * <alanh@tungstengraphics.com>.
+ *
+ * Intel(R) 946GZ/965Q/965G support added by Alan Hourihane
+ * <alanh@tungstengraphics.com>.
+ *
+ */
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/agp_backend.h>
+#include "agp.h"
+
+#define PCI_DEVICE_ID_INTEL_82915G_HB 0x2580
+#define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582
+#define PCI_DEVICE_ID_INTEL_82915GM_HB 0x2590
+#define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592
+#define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770
+#define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772
+#define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27A0
+#define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2
+#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
+#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972
+#define PCI_DEVICE_ID_INTEL_82965G_1_HB 0x2980
+#define PCI_DEVICE_ID_INTEL_82965G_1_IG 0x2982
+#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
+#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992
+#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
+#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2
+
+#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_1_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB)
+
+/* Intel 815 register */
+#define INTEL_815_APCONT 0x51
+#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
+
+/* Intel i820 registers */
+#define INTEL_I820_RDCR 0x51
+#define INTEL_I820_ERRSTS 0xc8
+
+/* Intel i840 registers */
+#define INTEL_I840_MCHCFG 0x50
+#define INTEL_I840_ERRSTS 0xc8
+
+/* Intel i850 registers */
+#define INTEL_I850_MCHCFG 0x50
+#define INTEL_I850_ERRSTS 0xc8
+
+/* intel 915G registers */
+#define I915_GMADDR 0x18
+#define I915_MMADDR 0x10
+#define I915_PTEADDR 0x1C
+#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
+#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
+
+/* Intel 965G registers */
+#define I965_MSAC 0x62
+
+/* Intel 7505 registers */
+#define INTEL_I7505_APSIZE 0x74
+#define INTEL_I7505_NCAPID 0x60
+#define INTEL_I7505_NISTAT 0x6c
+#define INTEL_I7505_ATTBASE 0x78
+#define INTEL_I7505_ERRSTS 0x42
+#define INTEL_I7505_AGPCTRL 0x70
+#define INTEL_I7505_MCHCFG 0x50
+
+static struct aper_size_info_fixed intel_i810_sizes[] =
+{
+ {64, 16384, 4},
+ /* The 32M mode still requires a 64k gatt */
+ {32, 8192, 4}
+};
+
+#define AGP_DCACHE_MEMORY 1
+#define AGP_PHYS_MEMORY 2
+#define AGP_OVERRIDE_TLB_BUG 3
+
+static struct gatt_mask intel_i810_masks[] =
+{
+ {.mask = I810_PTE_VALID, .type = 0},
+ {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
+ {.mask = I810_PTE_VALID, .type = 0}
+};
+
+static struct _intel_i810_private {
+ struct pci_dev *i810_dev; /* device one */
+ u32 pm_state[16]; /* host pci bridge state */
+ u32 pm_ig_state[16]; /* integrated graphics pci state */
+ volatile u8 __iomem *registers;
+ int num_dcache_entries;
+} intel_i810_private;
+
+static int intel_i810_fetch_size(void)
+{
+ u32 smram_miscc;
+ struct aper_size_info_fixed *values;
+
+ pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
+
+ if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
+ printk(KERN_WARNING PFX "i810 is disabled\n");
+ return 0;
+ }
+ if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + 1);
+ agp_bridge->aperture_size_idx = 1;
+ return values[1].size;
+ } else {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values);
+ agp_bridge->aperture_size_idx = 0;
+ return values[0].size;
+ }
+
+ return 0;
+}
+
+static int intel_i810_configure(void)
+{
+ struct aper_size_info_fixed *current_size;
+ u32 temp;
+ int i;
+
+ current_size = A_SIZE_FIX(agp_bridge->current_size);
+
+ pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp);
+ temp &= 0xfff80000;
+
+ intel_i810_private.registers = ioremap(temp, 128 * 4096);
+ if (!intel_i810_private.registers) {
+ printk(KERN_ERR PFX "Unable to remap memory.\n");
+ return -ENOMEM;
+ }
+
+ if ((readl(intel_i810_private.registers+I810_DRAM_CTL)
+ & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
+ /* This will need to be dynamically assigned */
+ printk(KERN_INFO PFX "detected 4MB dedicated video ram.\n");
+ intel_i810_private.num_dcache_entries = 1024;
+ }
+ pci_read_config_dword(intel_i810_private.i810_dev, I810_GMADDR, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_i810_private.registers+I810_PGETBL_CTL);
+ readl(intel_i810_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+
+ if (agp_bridge->driver->needs_scratch_page) {
+ for (i = 0; i < current_size->num_entries; i++) {
+ writel(agp_bridge->scratch_page, intel_i810_private.registers+I810_PTE_BASE+(i*4));
+ readl(intel_i810_private.registers+I810_PTE_BASE+(i*4)); /* PCI posting. */
+ }
+ }
+ global_cache_flush();
+ return 0;
+}
+
+static void intel_i810_cleanup(void)
+{
+ writel(0, intel_i810_private.registers+I810_PGETBL_CTL);
+ readl(intel_i810_private.registers); /* PCI Posting. */
+ iounmap(intel_i810_private.registers);
+}
+
+static void intel_i810_tlbflush(struct agp_memory *mem)
+{
+ return;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+static void intel_i810_agp_enable(u32 mode)
+#else
+static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+#endif
+{
+ return;
+}
+
+/* Exists to support ARGB cursors */
+static void *i8xx_alloc_pages(void)
+{
+ struct page * page;
+
+ page = alloc_pages(GFP_KERNEL, 2);
+ if (page == NULL)
+ return NULL;
+
+ if (change_page_attr(page, 4, PAGE_KERNEL_NOCACHE) < 0) {
+ flush_agp_mappings();
+ __free_page(page);
+ return NULL;
+ }
+ flush_agp_mappings();
+ get_page(page);
+ SetPageLocked(page);
+ atomic_inc(&agp_bridge->current_memory_agp);
+ return page_address(page);
+}
+
+static void i8xx_destroy_pages(void *addr)
+{
+ struct page *page;
+
+ if (addr == NULL)
+ return;
+
+ page = virt_to_page(addr);
+ change_page_attr(page, 4, PAGE_KERNEL);
+ flush_agp_mappings();
+ put_page(page);
+ unlock_page(page);
+ free_pages((unsigned long)addr, 2);
+ atomic_dec(&agp_bridge->current_memory_agp);
+}
+
+static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i, j, num_entries;
+ void *temp;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+
+ if ((pg_start + mem->page_count) > num_entries) {
+ return -EINVAL;
+ }
+ for (j = pg_start; j < (pg_start + mem->page_count); j++) {
+ if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j)))
+ return -EBUSY;
+ }
+
+ if (type != 0 || mem->type != 0) {
+ if ((type == AGP_DCACHE_MEMORY) && (mem->type == AGP_DCACHE_MEMORY)) {
+ /* special insert */
+ global_cache_flush();
+ for (i = pg_start; i < (pg_start + mem->page_count); i++) {
+ writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, intel_i810_private.registers+I810_PTE_BASE+(i*4));
+ readl(intel_i810_private.registers+I810_PTE_BASE+(i*4)); /* PCI Posting. */
+ }
+ global_cache_flush();
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+ }
+ if((type == AGP_PHYS_MEMORY) && (mem->type == AGP_PHYS_MEMORY))
+ goto insert;
+ return -EINVAL;
+ }
+
+insert:
+ global_cache_flush();
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ writel(agp_bridge->driver->mask_memory(mem->memory[i], mem->type),
+#else
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ mem->memory[i], mem->type),
+#endif
+ intel_i810_private.registers+I810_PTE_BASE+(j*4));
+ readl(intel_i810_private.registers+I810_PTE_BASE+(j*4)); /* PCI Posting. */
+ }
+ global_cache_flush();
+
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i;
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ writel(agp_bridge->scratch_page, intel_i810_private.registers+I810_PTE_BASE+(i*4));
+ readl(intel_i810_private.registers+I810_PTE_BASE+(i*4)); /* PCI Posting. */
+ }
+
+ global_cache_flush();
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+/*
+ * The i810/i830 requires a physical address to program its mouse
+ * pointer into hardware.
+ * However the Xserver still writes to it through the agp aperture.
+ */
+static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
+{
+ struct agp_memory *new;
+ void *addr;
+
+ if (pg_count != 1 && pg_count != 4)
+ return NULL;
+
+ switch (pg_count) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ case 1: addr = agp_bridge->driver->agp_alloc_page();
+#else
+ case 1: addr = agp_bridge->driver->agp_alloc_page(agp_bridge);
+#endif
+ flush_agp_mappings();
+ break;
+ case 4:
+ /* kludge to get 4 physical pages for ARGB cursor */
+ addr = i8xx_alloc_pages();
+ break;
+ default:
+ return NULL;
+ }
+
+ if (addr == NULL)
+ return NULL;
+
+ new = agp_create_memory(pg_count);
+ if (new == NULL)
+ return NULL;
+
+ new->memory[0] = virt_to_gart(addr);
+ if (pg_count == 4) {
+ /* kludge to get 4 physical pages for ARGB cursor */
+ new->memory[1] = new->memory[0] + PAGE_SIZE;
+ new->memory[2] = new->memory[1] + PAGE_SIZE;
+ new->memory[3] = new->memory[2] + PAGE_SIZE;
+ }
+ new->page_count = pg_count;
+ new->num_scratch_pages = pg_count;
+ new->type = AGP_PHYS_MEMORY;
+ new->physical = new->memory[0];
+ return new;
+}
+
+static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
+{
+ struct agp_memory *new;
+
+ if (type == AGP_DCACHE_MEMORY) {
+ if (pg_count != intel_i810_private.num_dcache_entries)
+ return NULL;
+
+ new = agp_create_memory(1);
+ if (new == NULL)
+ return NULL;
+
+ new->type = AGP_DCACHE_MEMORY;
+ new->page_count = pg_count;
+ new->num_scratch_pages = 0;
+ vfree(new->memory);
+ return new;
+ }
+ if (type == AGP_PHYS_MEMORY)
+ return alloc_agpphysmem_i8xx(pg_count, type);
+
+ return NULL;
+}
+
+static void intel_i810_free_by_type(struct agp_memory *curr)
+{
+ agp_free_key(curr->key);
+ if(curr->type == AGP_PHYS_MEMORY) {
+ if (curr->page_count == 4)
+ i8xx_destroy_pages(gart_to_virt(curr->memory[0]));
+ else {
+ agp_bridge->driver->agp_destroy_page(
+ gart_to_virt(curr->memory[0]));
+ flush_agp_mappings();
+ }
+ vfree(curr->memory);
+ }
+ kfree(curr);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+static unsigned long intel_i810_mask_memory(unsigned long addr, int type)
+{
+ /* Type checking must be done elsewhere */
+ return addr | agp_bridge->driver->masks[type].mask;
+}
+#else
+static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
+ unsigned long addr, int type)
+{
+ /* Type checking must be done elsewhere */
+ return addr | bridge->driver->masks[type].mask;
+}
+#endif
+
+static struct aper_size_info_fixed intel_i830_sizes[] =
+{
+ {128, 32768, 5},
+ /* The 64M mode still requires a 128k gatt */
+ {64, 16384, 5},
+ {256, 65536, 6},
+ {512, 131072, 7},
+};
+
+static struct _intel_i830_private {
+ struct pci_dev *i830_dev; /* device one */
+ u32 pm_state[16]; /* host pci bridge state */
+ u32 pm_ig_state[16]; /* integrated graphics pci state */
+ volatile u8 __iomem *registers;
+ volatile u32 __iomem *gtt; /* I915G */
+ int gtt_entries;
+} intel_i830_private;
+
+static void intel_i830_init_gtt_entries(void)
+{
+ u16 gmch_ctrl;
+ int gtt_entries;
+ u8 rdct;
+ int local = 0;
+ static const int ddt[4] = { 0, 16, 32, 64 };
+ int size;
+
+ pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
+
+ /* We obtain the size of the GTT, which is also stored (for some
+ * reason) at the top of stolen memory. Then we add 4KB to that
+ * for the video BIOS popup, which is also stored in there. */
+ if (IS_I965)
+ size = 512 + 4;
+ else
+ size = agp_bridge->driver->fetch_size() + 4;
+
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
+ switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+ case I830_GMCH_GMS_STOLEN_512:
+ gtt_entries = KB(512) - KB(size);
+ break;
+ case I830_GMCH_GMS_STOLEN_1024:
+ gtt_entries = MB(1) - KB(size);
+ break;
+ case I830_GMCH_GMS_STOLEN_8192:
+ gtt_entries = MB(8) - KB(size);
+ break;
+ case I830_GMCH_GMS_LOCAL:
+ rdct = readb(intel_i830_private.registers+I830_RDRAM_CHANNEL_TYPE);
+ gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
+ MB(ddt[I830_RDRAM_DDT(rdct)]);
+ local = 1;
+ break;
+ default:
+ gtt_entries = 0;
+ break;
+ }
+ } else {
+ switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+ case I855_GMCH_GMS_STOLEN_1M:
+ gtt_entries = MB(1) - KB(size);
+ break;
+ case I855_GMCH_GMS_STOLEN_4M:
+ gtt_entries = MB(4) - KB(size);
+ break;
+ case I855_GMCH_GMS_STOLEN_8M:
+ gtt_entries = MB(8) - KB(size);
+ break;
+ case I855_GMCH_GMS_STOLEN_16M:
+ gtt_entries = MB(16) - KB(size);
+ break;
+ case I855_GMCH_GMS_STOLEN_32M:
+ gtt_entries = MB(32) - KB(size);
+ break;
+ case I915_GMCH_GMS_STOLEN_48M:
+ /* Check it's really I915G */
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB ||
+ IS_I965)
+ gtt_entries = MB(48) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ case I915_GMCH_GMS_STOLEN_64M:
+ /* Check it's really I915G */
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB ||
+ IS_I965)
+ gtt_entries = MB(64) - KB(size);
+ else
+ gtt_entries = 0;
+ default:
+ gtt_entries = 0;
+ break;
+ }
+ }
+ if (gtt_entries > 0)
+ printk(KERN_INFO PFX "Detected %dK %s memory.\n",
+ gtt_entries / KB(1), local ? "local" : "stolen");
+ else
+ printk(KERN_INFO PFX
+ "No pre-allocated video memory detected.\n");
+ gtt_entries /= KB(4);
+
+ intel_i830_private.gtt_entries = gtt_entries;
+}
+
+/* The intel i830 automatically initializes the agp aperture during POST.
+ * Use the memory already set aside for in the GTT.
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+static int intel_i830_create_gatt_table( void )
+{
+#else
+static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
+{
+#endif
+ int page_order;
+ struct aper_size_info_fixed *size;
+ int num_entries;
+ u32 temp;
+
+ size = agp_bridge->current_size;
+ page_order = size->page_order;
+ num_entries = size->num_entries;
+ agp_bridge->gatt_table_real = NULL;
+
+ pci_read_config_dword(intel_i830_private.i830_dev,I810_MMADDR,&temp);
+ temp &= 0xfff80000;
+
+ intel_i830_private.registers = ioremap(temp,128 * 4096);
+ if (!intel_i830_private.registers)
+ return -ENOMEM;
+
+ temp = readl(intel_i830_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+ global_cache_flush(); /* FIXME: ?? */
+
+ /* we have to call this as early as possible after the MMIO base address is known */
+ intel_i830_init_gtt_entries();
+
+ agp_bridge->gatt_table = NULL;
+
+ agp_bridge->gatt_bus_addr = temp;
+
+ return 0;
+}
+
+/* Return the gatt table to a sane state. Use the top of stolen
+ * memory for the GTT.
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+static int intel_i830_free_gatt_table(void)
+#else
+static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
+#endif
+{
+ return 0;
+}
+
+static int intel_i830_fetch_size(void)
+{
+ u16 gmch_ctrl;
+ struct aper_size_info_fixed *values;
+
+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
+
+ if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
+ agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
+ /* 855GM/852GM/865G has 128MB aperture size */
+ agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
+ agp_bridge->aperture_size_idx = 0;
+ return values[0].size;
+ }
+
+ pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
+
+ if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
+ agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
+ agp_bridge->aperture_size_idx = 0;
+ return values[0].size;
+ } else {
+ agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1);
+ agp_bridge->aperture_size_idx = 1;
+ return values[1].size;
+ }
+
+ return 0;
+}
+
+static int intel_i830_configure(void)
+{
+ struct aper_size_info_fixed *current_size;
+ u32 temp;
+ u16 gmch_ctrl;
+ int i;
+
+ current_size = A_SIZE_FIX(agp_bridge->current_size);
+
+ pci_read_config_dword(intel_i830_private.i830_dev,I810_GMADDR,&temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
+ gmch_ctrl |= I830_GMCH_ENABLED;
+ pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl);
+
+ writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_i830_private.registers+I810_PGETBL_CTL);
+ readl(intel_i830_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+
+ if (agp_bridge->driver->needs_scratch_page) {
+ for (i = intel_i830_private.gtt_entries; i < current_size->num_entries; i++) {
+ writel(agp_bridge->scratch_page, intel_i830_private.registers+I810_PTE_BASE+(i*4));
+ readl(intel_i830_private.registers+I810_PTE_BASE+(i*4)); /* PCI Posting. */
+ }
+ }
+
+ global_cache_flush();
+ return 0;
+}
+
+static void intel_i830_cleanup(void)
+{
+ iounmap(intel_i830_private.registers);
+}
+
+static int intel_i830_insert_entries(struct agp_memory *mem,off_t pg_start, int type)
+{
+ int i,j,num_entries;
+ void *temp;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+
+ if (pg_start < intel_i830_private.gtt_entries) {
+ printk (KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_i830_private.gtt_entries == 0x%.8x\n",
+ pg_start,intel_i830_private.gtt_entries);
+
+ printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n");
+ return -EINVAL;
+ }
+
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+
+ /* The i830 can't check the GTT for entries since its read only,
+ * depend on the caller to make the correct offset decisions.
+ */
+
+ if ((type != 0 && type != AGP_PHYS_MEMORY) ||
+ (mem->type != 0 && mem->type != AGP_PHYS_MEMORY))
+ return -EINVAL;
+
+ global_cache_flush(); /* FIXME: Necessary ?*/
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ writel(agp_bridge->driver->mask_memory(
+#else
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+#endif
+ mem->memory[i], mem->type),
+ intel_i830_private.registers+I810_PTE_BASE+(j*4));
+ readl(intel_i830_private.registers+I810_PTE_BASE+(j*4)); /* PCI Posting. */
+ }
+
+ global_cache_flush();
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static int intel_i830_remove_entries(struct agp_memory *mem,off_t pg_start,
+ int type)
+{
+ int i;
+
+ global_cache_flush();
+
+ if (pg_start < intel_i830_private.gtt_entries) {
+ printk (KERN_INFO PFX "Trying to disable local/stolen memory\n");
+ return -EINVAL;
+ }
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ writel(agp_bridge->scratch_page, intel_i830_private.registers+I810_PTE_BASE+(i*4));
+ readl(intel_i830_private.registers+I810_PTE_BASE+(i*4)); /* PCI Posting. */
+ }
+
+ global_cache_flush();
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+static void *intel_alloc_page( void )
+{
+ struct agp_bridge_data *bridge = agp_bridge;
+
+#else
+static void *intel_alloc_page(struct agp_bridge_data *bridge)
+{
+#endif
+ struct page * page;
+
+ page = alloc_page(GFP_KERNEL);
+ if (page == NULL)
+ return NULL;
+
+ change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
+
+ get_page(page);
+ SetPageLocked(page);
+ atomic_inc(&bridge->current_memory_agp);
+ return page_address(page);
+}
+
+void intel_destroy_page(void *addr)
+{
+ struct page *page;
+
+ if (addr == NULL)
+ return;
+
+ page = virt_to_page(addr);
+ change_page_attr(page, 1, PAGE_KERNEL);
+ put_page(page);
+ unlock_page(page);
+ free_page((unsigned long)addr);
+ atomic_dec(&agp_bridge->current_memory_agp);
+}
+
+static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count,int type)
+{
+ if (type == AGP_PHYS_MEMORY)
+ return alloc_agpphysmem_i8xx(pg_count, type);
+
+ if (type == AGP_OVERRIDE_TLB_BUG) {
+ int scratch_pages;
+ struct agp_memory *new;
+ size_t i;
+
+#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
+
+ scratch_pages = (pg_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
+
+ new = agp_create_memory(scratch_pages);
+
+ if (new == NULL)
+ return NULL;
+
+ for (i = 0; i < pg_count; i++) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ void *addr = intel_alloc_page();
+#else
+ void *addr = intel_alloc_page(agp_bridge);
+#endif
+ if (addr == NULL) {
+ /* don't need flush_agp_mappings()
+ * as agp_free_memory() will do it.
+ */
+ agp_free_memory(new);
+ return NULL;
+ }
+ new->memory[i] = virt_to_gart(addr);
+ new->page_count++;
+ }
+ flush_agp_mappings();
+
+ return new;
+ }
+
+ /* always return NULL for other allocation types for now */
+ return NULL;
+}
+
+static void intel_i830_free_by_type(struct agp_memory *curr)
+{
+ agp_free_key(curr->key);
+ if(curr->type == AGP_PHYS_MEMORY) {
+ if (curr->page_count == 4)
+ i8xx_destroy_pages(gart_to_virt(curr->memory[0]));
+ else {
+ intel_destroy_page(gart_to_virt(curr->memory[0]));
+ flush_agp_mappings();
+ }
+ vfree(curr->memory);
+ }
+ if(curr->type == AGP_OVERRIDE_TLB_BUG) {
+ size_t i;
+
+ if (curr->page_count != 0) {
+ for (i = 0; i < curr->page_count; i++) {
+ intel_destroy_page(gart_to_virt(curr->memory[i]));
+ }
+ flush_agp_mappings();
+ }
+ agp_free_key(curr->key);
+ vfree(curr->memory);
+ }
+ kfree(curr);
+}
+
+
+static int intel_i915_configure(void)
+{
+ struct aper_size_info_fixed *current_size;
+ u32 temp;
+ u16 gmch_ctrl;
+ int i;
+
+ current_size = A_SIZE_FIX(agp_bridge->current_size);
+
+ pci_read_config_dword(intel_i830_private.i830_dev, I915_GMADDR, &temp);
+
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
+ gmch_ctrl |= I830_GMCH_ENABLED;
+ pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl);
+
+ writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_i830_private.registers+I810_PGETBL_CTL);
+ readl(intel_i830_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+
+ if (agp_bridge->driver->needs_scratch_page) {
+ for (i = intel_i830_private.gtt_entries; i < current_size->num_entries; i++) {
+ writel(agp_bridge->scratch_page, intel_i830_private.gtt+i);
+ readl(intel_i830_private.gtt+i); /* PCI Posting. */
+ }
+ }
+
+ global_cache_flush();
+ return 0;
+}
+
+static void intel_i915_cleanup(void)
+{
+ iounmap(intel_i830_private.gtt);
+ iounmap(intel_i830_private.registers);
+}
+
+static int intel_i915_insert_entries(struct agp_memory *mem,off_t pg_start,
+ int type)
+{
+ int i,j,num_entries;
+ void *temp;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+
+ if (pg_start < intel_i830_private.gtt_entries) {
+ printk (KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_i830_private.gtt_entries == 0x%.8x\n",
+ pg_start,intel_i830_private.gtt_entries);
+
+ printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n");
+ return -EINVAL;
+ }
+
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+
+ /* The i830 can't check the GTT for entries since its read only,
+ * depend on the caller to make the correct offset decisions.
+ */
+
+ if ((type != 0 && type != AGP_PHYS_MEMORY) ||
+ (mem->type != 0 && mem->type != AGP_PHYS_MEMORY))
+ return -EINVAL;
+
+ global_cache_flush();
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+ writel(agp_bridge->driver->mask_memory(
+#else
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+#endif
+ mem->memory[i], mem->type), intel_i830_private.gtt+j);
+ readl(intel_i830_private.gtt+j); /* PCI Posting. */
+ }
+
+ global_cache_flush();
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static int intel_i915_remove_entries(struct agp_memory *mem,off_t pg_start,
+ int type)
+{
+ int i;
+
+ global_cache_flush();
+
+ if (pg_start < intel_i830_private.gtt_entries) {
+ printk (KERN_INFO PFX "Trying to disable local/stolen memory\n");
+ return -EINVAL;
+ }
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ writel(agp_bridge->scratch_page, intel_i830_private.gtt+i);
+ readl(intel_i830_private.gtt+i);
+ }
+
+ global_cache_flush();
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static int intel_i915_fetch_size(void)
+{
+ struct aper_size_info_fixed *values;
+ u32 temp, offset = 0;
+
+#define I915_256MB_ADDRESS_MASK (1<<27)
+
+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
+
+ pci_read_config_dword(intel_i830_private.i830_dev, I915_GMADDR, &temp);
+ if (temp & I915_256MB_ADDRESS_MASK)
+ offset = 0; /* 128MB aperture */
+ else
+ offset = 2; /* 256MB aperture */
+ agp_bridge->previous_size = agp_bridge->current_size = (void *)(values + offset);
+ return values[offset].size;
+}
+
+/* The intel i915 automatically initializes the agp aperture during POST.
+ * Use the memory already set aside for in the GTT.
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
+static int intel_i915_create_gatt_table( void )
+{
+#else
+static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
+{
+#endif
+ int page_order;
+ struct aper_size_info_fixed *size;
+ int num_entries;
+ u32 temp, temp2;
+
+ size = agp_bridge->current_size;
+ page_order = size->page_order;
+ num_entries = size->num_entries;
+ agp_bridge->gatt_table_real = NULL;
+
+ pci_read_config_dword(intel_i830_private.i830_dev, I915_MMADDR, &temp);
+ pci_read_config_dword(intel_i830_private.i830_dev, I915_PTEADDR,&temp2);
+
+ intel_i830_private.gtt = ioremap(temp2, 256 * 1024);
+ if (!intel_i830_private.gtt)
+ return -ENOMEM;
+
+ temp &= 0xfff80000;
+
+ intel_i830_private.registers = ioremap(temp,128 * 4096);
+ if (!intel_i830_private.registers)
+ return -ENOMEM;
+
+ temp = readl(intel_i830_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+ global_cache_flush(); /* FIXME: ? */
+
+ /* we have to call this as early as possible after the MMIO base address is known */
+ intel_i830_init_gtt_entries();
+
+ agp_bridge->gatt_table = NULL;
+
+ agp_bridge->gatt_bus_addr = temp;
+
+ return 0;
+}
+
+static int intel_i965_fetch_size(void)
+{
+ struct aper_size_info_fixed *values;
+ u32 offset = 0;
+ u8 temp;
+
+#define I965_512MB_ADDRESS_MASK (3<<1)
+
+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
+
+ pci_read_config_byte(intel_i830_private.i830_dev, I965_MSAC, &temp);
+ temp &= I965_512MB_ADDRESS_MASK;
+ switch (temp) {
+ case 0x00:
+ offset = 0; /* 128MB */
+ break;
+ case 0x06:
+ offset = 3; /* 512MB */
+ break;
+ default:
+ case 0x02:
+ offset = 2; /* 256MB */
+ break;
+ }
+
+ agp_bridge->previous_size = agp_bridge->current_size = (void *)(values + offset);
+
+ return values[offset].size;
+}
+
+/* The intel i965 automatically initializes the agp aperture during POST.
+ * Use the memory already set aside for in the GTT.
+ */
+static int intel_i965_create_gatt_table(void)
+{
+ int page_order;
+ struct aper_size_info_fixed *size;
+ int num_entries;
+ u32 temp;
+
+ size = agp_bridge->current_size;
+ page_order = size->page_order;
+ num_entries = size->num_entries;
+ agp_bridge->gatt_table_real = NULL;
+
+ pci_read_config_dword(intel_i830_private.i830_dev, I915_MMADDR, &temp);
+
+ temp &= 0xfff00000;
+ intel_i830_private.gtt = ioremap((temp + (512 * 1024)) , 512 * 1024);
+
+ if (!intel_i830_private.gtt)
+ return -ENOMEM;
+
+
+ intel_i830_private.registers = ioremap(temp,128 * 4096);
+ if (!intel_i830_private.registers)
+ return -ENOMEM;
+
+ temp = readl(intel_i830_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+ global_cache_flush(); /* FIXME: ? */
+
+ /* we have to call this as early as possible after the MMIO base address is known */
+ intel_i830_init_gtt_entries();
+
+ agp_bridge->gatt_table = NULL;
+
+ agp_bridge->gatt_bus_addr = temp;
+
+ return 0;
+}
+
+static int intel_fetch_size(void)
+{
+ int i;
+ u16 temp;
+ struct aper_size_info_16 *values;
+
+ pci_read_config_word(agp_bridge->dev, INTEL_APSIZE, &temp);
+ values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static int __intel_8xx_fetch_size(u8 temp)
+{
+ int i;
+ struct aper_size_info_8 *values;
+
+ values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+ return 0;
+}
+
+static int intel_8xx_fetch_size(void)
+{
+ u8 temp;
+
+ pci_read_config_byte(agp_bridge->dev, INTEL_APSIZE, &temp);
+ return __intel_8xx_fetch_size(temp);
+}
+
+static int intel_815_fetch_size(void)
+{
+ u8 temp;
+
+ /* Intel 815 chipsets have a _weird_ APSIZE register with only
+ * one non-reserved bit, so mask the others out ... */
+ pci_read_config_byte(agp_bridge->dev, INTEL_APSIZE, &temp);
+ temp &= (1 << 3);
+
+ return __intel_8xx_fetch_size(temp);
+}
+
+static void intel_tlbflush(struct agp_memory *mem)
+{
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200);
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
+}
+
+
+static void intel_8xx_tlbflush(struct agp_memory *mem)
+{
+ u32 temp;
+ pci_read_config_dword(agp_bridge->dev, INTEL_AGPCTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, temp & ~(1 << 7));
+ pci_read_config_dword(agp_bridge->dev, INTEL_AGPCTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, temp | (1 << 7));
+}
+
+
+static void intel_cleanup(void)
+{
+ u16 temp;
+ struct aper_size_info_16 *previous_size;
+
+ previous_size = A_SIZE_16(agp_bridge->previous_size);
+ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp);
+ pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9));
+ pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value);
+}
+
+
+static void intel_8xx_cleanup(void)
+{
+ u16 temp;
+ struct aper_size_info_8 *previous_size;
+
+ previous_size = A_SIZE_8(agp_bridge->previous_size);
+ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp);
+ pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9));
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value);
+}
+
+
+static int intel_configure(void)
+{
+ u32 temp;
+ u16 temp2;
+ struct aper_size_info_16 *current_size;
+
+ current_size = A_SIZE_16(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
+
+ /* paccfg/nbxcfg */
+ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG,
+ (temp2 & ~(1 << 10)) | (1 << 9));
+ /* clear any possible error conditions */
+ pci_write_config_byte(agp_bridge->dev, INTEL_ERRSTS + 1, 7);
+ return 0;
+}
+
+static int intel_815_configure(void)
+{
+ u32 temp, addr;
+ u8 temp2;
+ struct aper_size_info_8 *current_size;
+
+ /* attbase - aperture base */
+ /* the Intel 815 chipset spec. says that bits 29-31 in the
+ * ATTBASE register are reserved -> try not to write them */
+ if (agp_bridge->gatt_bus_addr & INTEL_815_ATTBASE_MASK) {
+ printk (KERN_EMERG PFX "gatt bus addr too high");
+ return -EINVAL;
+ }
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE,
+ current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ pci_read_config_dword(agp_bridge->dev, INTEL_ATTBASE, &addr);
+ addr &= INTEL_815_ATTBASE_MASK;
+ addr |= agp_bridge->gatt_bus_addr;
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* apcont */
+ pci_read_config_byte(agp_bridge->dev, INTEL_815_APCONT, &temp2);
+ pci_write_config_byte(agp_bridge->dev, INTEL_815_APCONT, temp2 | (1 << 1));
+
+ /* clear any possible error conditions */
+ /* Oddness : this chipset seems to have no ERRSTS register ! */
+ return 0;
+}
+
+static void intel_820_tlbflush(struct agp_memory *mem)
+{
+ return;
+}
+
+static void intel_820_cleanup(void)
+{
+ u8 temp;
+ struct aper_size_info_8 *previous_size;
+
+ previous_size = A_SIZE_8(agp_bridge->previous_size);
+ pci_read_config_byte(agp_bridge->dev, INTEL_I820_RDCR, &temp);
+ pci_write_config_byte(agp_bridge->dev, INTEL_I820_RDCR,
+ temp & ~(1 << 1));
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE,
+ previous_size->size_value);
+}
+
+
+static int intel_820_configure(void)
+{
+ u32 temp;
+ u8 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* global enable aperture access */
+ /* This flag is not accessed through MCHCFG register as in */
+ /* i850 chipset. */
+ pci_read_config_byte(agp_bridge->dev, INTEL_I820_RDCR, &temp2);
+ pci_write_config_byte(agp_bridge->dev, INTEL_I820_RDCR, temp2 | (1 << 1));
+ /* clear any possible AGP-related error conditions */
+ pci_write_config_word(agp_bridge->dev, INTEL_I820_ERRSTS, 0x001c);
+ return 0;
+}
+
+static int intel_840_configure(void)
+{
+ u32 temp;
+ u16 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* mcgcfg */
+ pci_read_config_word(agp_bridge->dev, INTEL_I840_MCHCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_I840_MCHCFG, temp2 | (1 << 9));
+ /* clear any possible error conditions */
+ pci_write_config_word(agp_bridge->dev, INTEL_I840_ERRSTS, 0xc000);
+ return 0;
+}
+
+static int intel_845_configure(void)
+{
+ u32 temp;
+ u8 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* agpm */
+ pci_read_config_byte(agp_bridge->dev, INTEL_I845_AGPM, &temp2);
+ pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1));
+ /* clear any possible error conditions */
+ pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c);
+ return 0;
+}
+
+static int intel_850_configure(void)
+{
+ u32 temp;
+ u16 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* mcgcfg */
+ pci_read_config_word(agp_bridge->dev, INTEL_I850_MCHCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_I850_MCHCFG, temp2 | (1 << 9));
+ /* clear any possible AGP-related error conditions */
+ pci_write_config_word(agp_bridge->dev, INTEL_I850_ERRSTS, 0x001c);
+ return 0;
+}
+
+static int intel_860_configure(void)
+{
+ u32 temp;
+ u16 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* mcgcfg */
+ pci_read_config_word(agp_bridge->dev, INTEL_I860_MCHCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_I860_MCHCFG, temp2 | (1 << 9));
+ /* clear any possible AGP-related error conditions */
+ pci_write_config_word(agp_bridge->dev, INTEL_I860_ERRSTS, 0xf700);
+ return 0;
+}
+
+static int intel_830mp_configure(void)
+{
+ u32 temp;
+ u16 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* gmch */
+ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp2 | (1 << 9));
+ /* clear any possible AGP-related error conditions */
+ pci_write_config_word(agp_bridge->dev, INTEL_I830_ERRSTS, 0x1c);
+ return 0;
+}
+
+static int intel_7505_configure(void)
+{
+ u32 temp;
+ u16 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* mchcfg */
+ pci_read_config_word(agp_bridge->dev, INTEL_I7505_MCHCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_I7505_MCHCFG, temp2 | (1 << 9));
+
+ return 0;
+}
+
+/* Setup function */
+static struct gatt_mask intel_generic_masks[] =
+{
+ {.mask = 0x00000017, .type = 0}
+};
+
+static struct aper_size_info_8 intel_815_sizes[2] =
+{
+ {64, 16384, 4, 0},
+ {32, 8192, 3, 8},
+};
+
+static struct aper_size_info_8 intel_8xx_sizes[7] =
+{
+ {256, 65536, 6, 0},
+ {128, 32768, 5, 32},
+ {64, 16384, 4, 48},
+ {32, 8192, 3, 56},
+ {16, 4096, 2, 60},
+ {8, 2048, 1, 62},
+ {4, 1024, 0, 63}
+};
+
+static struct aper_size_info_16 intel_generic_sizes[7] =
+{
+ {256, 65536, 6, 0},
+ {128, 32768, 5, 32},
+ {64, 16384, 4, 48},
+ {32, 8192, 3, 56},
+ {16, 4096, 2, 60},
+ {8, 2048, 1, 62},
+ {4, 1024, 0, 63}
+};
+
+static struct aper_size_info_8 intel_830mp_sizes[4] =
+{
+ {256, 65536, 6, 0},
+ {128, 32768, 5, 32},
+ {64, 16384, 4, 48},
+ {32, 8192, 3, 56}
+};
+
+static struct agp_bridge_driver intel_generic_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_generic_sizes,
+ .size_type = U16_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = intel_configure,
+ .fetch_size = intel_fetch_size,
+ .cleanup = intel_cleanup,
+ .tlb_flush = intel_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static struct agp_bridge_driver intel_810_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i810_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 2,
+ .needs_scratch_page = TRUE,
+ .configure = intel_i810_configure,
+ .fetch_size = intel_i810_fetch_size,
+ .cleanup = intel_i810_cleanup,
+ .tlb_flush = intel_i810_tlbflush,
+ .mask_memory = intel_i810_mask_memory,
+ .masks = intel_i810_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = intel_i810_insert_entries,
+ .remove_memory = intel_i810_remove_entries,
+ .alloc_by_type = intel_i810_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static struct agp_bridge_driver intel_815_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_815_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 2,
+ .configure = intel_815_configure,
+ .fetch_size = intel_815_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static struct agp_bridge_driver intel_830_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i830_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 3,
+ .needs_scratch_page = TRUE,
+ .configure = intel_i830_configure,
+ .fetch_size = intel_i830_fetch_size,
+ .cleanup = intel_i830_cleanup,
+ .tlb_flush = intel_i810_tlbflush,
+ .mask_memory = intel_i810_mask_memory,
+ .masks = intel_i810_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = intel_i830_create_gatt_table,
+ .free_gatt_table = intel_i830_free_gatt_table,
+ .insert_memory = intel_i830_insert_entries,
+ .remove_memory = intel_i830_remove_entries,
+ .alloc_by_type = intel_i830_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static struct agp_bridge_driver intel_820_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_8xx_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = intel_820_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_820_cleanup,
+ .tlb_flush = intel_820_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static struct agp_bridge_driver intel_830mp_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_830mp_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 4,
+ .configure = intel_830mp_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static struct agp_bridge_driver intel_840_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_8xx_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = intel_840_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static struct agp_bridge_driver intel_845_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_8xx_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = intel_845_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static struct agp_bridge_driver intel_850_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_8xx_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = intel_850_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static struct agp_bridge_driver intel_860_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_8xx_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = intel_860_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static struct agp_bridge_driver intel_915_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i830_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 4,
+ .needs_scratch_page = TRUE,
+ .configure = intel_i915_configure,
+ .fetch_size = intel_i915_fetch_size,
+ .cleanup = intel_i915_cleanup,
+ .tlb_flush = intel_i810_tlbflush,
+ .mask_memory = intel_i810_mask_memory,
+ .masks = intel_i810_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = intel_i915_create_gatt_table,
+ .free_gatt_table = intel_i830_free_gatt_table,
+ .insert_memory = intel_i915_insert_entries,
+ .remove_memory = intel_i915_remove_entries,
+ .alloc_by_type = intel_i830_alloc_by_type,
+ .free_by_type = intel_i830_free_by_type,
+ .agp_alloc_page = intel_alloc_page,
+ .agp_destroy_page = intel_destroy_page,
+};
+
+static struct agp_bridge_driver intel_i965_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i830_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 4,
+ .needs_scratch_page = TRUE,
+ .configure = intel_i915_configure,
+ .fetch_size = intel_i965_fetch_size,
+ .cleanup = intel_i915_cleanup,
+ .tlb_flush = intel_i810_tlbflush,
+ .mask_memory = intel_i810_mask_memory,
+ .masks = intel_i810_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = intel_i965_create_gatt_table,
+ .free_gatt_table = intel_i830_free_gatt_table,
+ .insert_memory = intel_i915_insert_entries,
+ .remove_memory = intel_i915_remove_entries,
+ .alloc_by_type = intel_i830_alloc_by_type,
+ .free_by_type = intel_i830_free_by_type,
+ .agp_alloc_page = intel_alloc_page,
+ .agp_destroy_page = intel_destroy_page,
+};
+
+static struct agp_bridge_driver intel_7505_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_8xx_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = intel_7505_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static int find_i810(u16 device)
+{
+ struct pci_dev *i810_dev;
+
+ i810_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
+ if (!i810_dev)
+ return 0;
+ intel_i810_private.i810_dev = i810_dev;
+ return 1;
+}
+
+static int find_i830(u16 device)
+{
+ struct pci_dev *i830_dev;
+
+ i830_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
+ if (i830_dev && PCI_FUNC(i830_dev->devfn) != 0) {
+ i830_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ device, i830_dev);
+ }
+
+ if (!i830_dev)
+ return 0;
+
+ intel_i830_private.i830_dev = i830_dev;
+ return 1;
+}
+
+static int __devinit agp_intel_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_bridge_data *bridge;
+ char *name = "(unknown)";
+ u8 cap_ptr = 0;
+ struct resource *r;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_82443LX_0:
+ bridge->driver = &intel_generic_driver;
+ name = "440LX";
+ break;
+ case PCI_DEVICE_ID_INTEL_82443BX_0:
+ bridge->driver = &intel_generic_driver;
+ name = "440BX";
+ break;
+ case PCI_DEVICE_ID_INTEL_82443GX_0:
+ bridge->driver = &intel_generic_driver;
+ name = "440GX";
+ break;
+ case PCI_DEVICE_ID_INTEL_82810_MC1:
+ name = "i810";
+ if (!find_i810(PCI_DEVICE_ID_INTEL_82810_IG1))
+ goto fail;
+ bridge->driver = &intel_810_driver;
+ break;
+ case PCI_DEVICE_ID_INTEL_82810_MC3:
+ name = "i810 DC100";
+ if (!find_i810(PCI_DEVICE_ID_INTEL_82810_IG3))
+ goto fail;
+ bridge->driver = &intel_810_driver;
+ break;
+ case PCI_DEVICE_ID_INTEL_82810E_MC:
+ name = "i810 E";
+ if (!find_i810(PCI_DEVICE_ID_INTEL_82810E_IG))
+ goto fail;
+ bridge->driver = &intel_810_driver;
+ break;
+ case PCI_DEVICE_ID_INTEL_82815_MC:
+ /*
+ * The i815 can operate either as an i810 style
+ * integrated device, or as an AGP4X motherboard.
+ */
+ if (find_i810(PCI_DEVICE_ID_INTEL_82815_CGC))
+ bridge->driver = &intel_810_driver;
+ else
+ bridge->driver = &intel_815_driver;
+ name = "i815";
+ break;
+ case PCI_DEVICE_ID_INTEL_82820_HB:
+ case PCI_DEVICE_ID_INTEL_82820_UP_HB:
+ bridge->driver = &intel_820_driver;
+ name = "i820";
+ break;
+ case PCI_DEVICE_ID_INTEL_82830_HB:
+ if (find_i830(PCI_DEVICE_ID_INTEL_82830_CGC)) {
+ bridge->driver = &intel_830_driver;
+ } else {
+ bridge->driver = &intel_830mp_driver;
+ }
+ name = "830M";
+ break;
+ case PCI_DEVICE_ID_INTEL_82840_HB:
+ bridge->driver = &intel_840_driver;
+ name = "i840";
+ break;
+ case PCI_DEVICE_ID_INTEL_82845_HB:
+ bridge->driver = &intel_845_driver;
+ name = "i845";
+ break;
+ case PCI_DEVICE_ID_INTEL_82845G_HB:
+ if (find_i830(PCI_DEVICE_ID_INTEL_82845G_IG)) {
+ bridge->driver = &intel_830_driver;
+ } else {
+ bridge->driver = &intel_845_driver;
+ }
+ name = "845G";
+ break;
+ case PCI_DEVICE_ID_INTEL_82850_HB:
+ bridge->driver = &intel_850_driver;
+ name = "i850";
+ break;
+ case PCI_DEVICE_ID_INTEL_82855PM_HB:
+ bridge->driver = &intel_845_driver;
+ name = "855PM";
+ break;
+ case PCI_DEVICE_ID_INTEL_82855GM_HB:
+ if (find_i830(PCI_DEVICE_ID_INTEL_82855GM_IG)) {
+ bridge->driver = &intel_830_driver;
+ name = "855";
+ } else {
+ bridge->driver = &intel_845_driver;
+ name = "855GM";
+ }
+ break;
+ case PCI_DEVICE_ID_INTEL_82860_HB:
+ bridge->driver = &intel_860_driver;
+ name = "i860";
+ break;
+ case PCI_DEVICE_ID_INTEL_82865_HB:
+ if (find_i830(PCI_DEVICE_ID_INTEL_82865_IG)) {
+ bridge->driver = &intel_830_driver;
+ } else {
+ bridge->driver = &intel_845_driver;
+ }
+ name = "865";
+ break;
+ case PCI_DEVICE_ID_INTEL_82875_HB:
+ bridge->driver = &intel_845_driver;
+ name = "i875";
+ break;
+ case PCI_DEVICE_ID_INTEL_82915G_HB:
+ if (find_i830(PCI_DEVICE_ID_INTEL_82915G_IG)) {
+ bridge->driver = &intel_915_driver;
+ } else {
+ /* fixme */
+ bridge->driver = &intel_845_driver;
+ }
+ name = "915G";
+ break;
+ case PCI_DEVICE_ID_INTEL_82915GM_HB:
+ if (find_i830(PCI_DEVICE_ID_INTEL_82915GM_IG)) {
+ bridge->driver = &intel_915_driver;
+ } else {
+ /* fixme */
+ bridge->driver = &intel_845_driver;
+ }
+ name = "915GM";
+ break;
+ case PCI_DEVICE_ID_INTEL_82945G_HB:
+ if (find_i830(PCI_DEVICE_ID_INTEL_82945G_IG)) {
+ bridge->driver = &intel_915_driver;
+ } else {
+ /* fixme */
+ bridge->driver = &intel_845_driver;
+ }
+ name = "945G";
+ break;
+ case PCI_DEVICE_ID_INTEL_82945GM_HB:
+ if (find_i830(PCI_DEVICE_ID_INTEL_82945GM_IG)) {
+ bridge->driver = &intel_915_driver;
+ } else {
+ /* fixme */
+ bridge->driver = &intel_845_driver;
+ }
+ name = "945GM";
+ break;
+ case PCI_DEVICE_ID_INTEL_82946GZ_HB:
+ if (find_i830(PCI_DEVICE_ID_INTEL_82946GZ_IG)) {
+ bridge->driver = &intel_i965_driver;
+ } else {
+ /* fixme */
+ bridge->driver = &intel_845_driver;
+ }
+ name = "946GZ";
+ break;
+ case PCI_DEVICE_ID_INTEL_82965G_1_HB:
+ if (find_i830(PCI_DEVICE_ID_INTEL_82965G_1_IG)) {
+ bridge->driver = &intel_i965_driver;
+ } else {
+ /* fixme */
+ bridge->driver = &intel_845_driver;
+ }
+ name = "965G";
+ break;
+ case PCI_DEVICE_ID_INTEL_82965Q_HB:
+ if (find_i830(PCI_DEVICE_ID_INTEL_82965Q_IG)) {
+ bridge->driver = &intel_i965_driver;
+ } else {
+ /* fixme */
+ bridge->driver = &intel_845_driver;
+ }
+ name = "965Q";
+ break;
+ case PCI_DEVICE_ID_INTEL_82965G_HB:
+ if (find_i830(PCI_DEVICE_ID_INTEL_82965G_IG)) {
+ bridge->driver = &intel_i965_driver;
+ } else {
+ /* fixme */
+ bridge->driver = &intel_845_driver;
+ }
+ name = "965G";
+ break;
+ case PCI_DEVICE_ID_INTEL_7505_0:
+ bridge->driver = &intel_7505_driver;
+ name = "E7505";
+ break;
+ case PCI_DEVICE_ID_INTEL_7205_0:
+ bridge->driver = &intel_7505_driver;
+ name = "E7205";
+ break;
+ default:
+ if (cap_ptr)
+ printk(KERN_ERR PFX "Unsupported Intel chipset (device id: %04x)\n",
+ pdev->device);
+ agp_put_bridge(bridge);
+ return -ENODEV;
+ };
+
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ if (bridge->driver == &intel_810_driver)
+ bridge->dev_private_data = &intel_i810_private;
+ else if (bridge->driver == &intel_830_driver)
+ bridge->dev_private_data = &intel_i830_private;
+
+ printk(KERN_INFO PFX "Detected an Intel %s Chipset.\n", name);
+
+ /*
+ * The following fixes the case where the BIOS has "forgotten" to
+ * provide an address range for the GART.
+ * 20030610 - hamish@zot.org
+ */
+ r = &pdev->resource[0];
+ if (!r->start && r->end) {
+ if(pci_assign_resource(pdev, 0)) {
+ printk(KERN_ERR PFX "could not assign resource 0\n");
+ agp_put_bridge(bridge);
+ return -ENODEV;
+ }
+ }
+
+ /*
+ * If the device has not been properly setup, the following will catch
+ * the problem and should stop the system from crashing.
+ * 20030610 - hamish@zot.org
+ */
+ if (pci_enable_device(pdev)) {
+ printk(KERN_ERR PFX "Unable to Enable PCI device\n");
+ agp_put_bridge(bridge);
+ return -ENODEV;
+ }
+
+ /* Fill in the mode register */
+ if (cap_ptr) {
+ pci_read_config_dword(pdev,
+ bridge->capndx+PCI_AGP_STATUS,
+ &bridge->mode);
+ }
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+
+fail:
+ printk(KERN_ERR PFX "Detected an Intel %s chipset, "
+ "but could not find the secondary device.\n", name);
+ agp_put_bridge(bridge);
+ return -ENODEV;
+}
+
+static void __devexit agp_intel_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+
+ if (intel_i810_private.i810_dev)
+ pci_dev_put(intel_i810_private.i810_dev);
+ if (intel_i830_private.i830_dev)
+ pci_dev_put(intel_i830_private.i830_dev);
+
+ agp_put_bridge(bridge);
+}
+
+static int agp_intel_resume(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
+ pci_restore_state(pdev);
+#else
+ pci_restore_state(pdev, intel_i830_private.pm_state);
+
+ /* now restore integrated graphics pci state, if we have one (i830) */
+ if (intel_i830_private.i830_dev)
+ pci_restore_state(intel_i830_private.i830_dev,
+ intel_i830_private.pm_ig_state);
+
+ /* now restore integrated graphics pci state, if we have one (i810) */
+ if (intel_i810_private.i810_dev)
+ pci_restore_state(intel_i810_private.i810_dev,
+ intel_i810_private.pm_ig_state);
+#endif
+
+ if (bridge->driver == &intel_generic_driver)
+ intel_configure();
+ else if (bridge->driver == &intel_850_driver)
+ intel_850_configure();
+ else if (bridge->driver == &intel_845_driver)
+ intel_845_configure();
+ else if (bridge->driver == &intel_830mp_driver)
+ intel_830mp_configure();
+ else if (bridge->driver == &intel_915_driver)
+ intel_i915_configure();
+ else if (bridge->driver == &intel_830_driver)
+ intel_i830_configure();
+ else if (bridge->driver == &intel_810_driver)
+ intel_i810_configure();
+
+ return 0;
+}
+
+static struct pci_device_id agp_intel_pci_table[] = {
+#define ID(x) \
+ { \
+ .class = (PCI_CLASS_BRIDGE_HOST << 8), \
+ .class_mask = ~0, \
+ .vendor = PCI_VENDOR_ID_INTEL, \
+ .device = x, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ }
+ ID(PCI_DEVICE_ID_INTEL_82443LX_0),
+ ID(PCI_DEVICE_ID_INTEL_82443BX_0),
+ ID(PCI_DEVICE_ID_INTEL_82443GX_0),
+ ID(PCI_DEVICE_ID_INTEL_82810_MC1),
+ ID(PCI_DEVICE_ID_INTEL_82810_MC3),
+ ID(PCI_DEVICE_ID_INTEL_82810E_MC),
+ ID(PCI_DEVICE_ID_INTEL_82815_MC),
+ ID(PCI_DEVICE_ID_INTEL_82820_HB),
+ ID(PCI_DEVICE_ID_INTEL_82820_UP_HB),
+ ID(PCI_DEVICE_ID_INTEL_82830_HB),
+ ID(PCI_DEVICE_ID_INTEL_82840_HB),
+ ID(PCI_DEVICE_ID_INTEL_82845_HB),
+ ID(PCI_DEVICE_ID_INTEL_82845G_HB),
+ ID(PCI_DEVICE_ID_INTEL_82850_HB),
+ ID(PCI_DEVICE_ID_INTEL_82855PM_HB),
+ ID(PCI_DEVICE_ID_INTEL_82855GM_HB),
+ ID(PCI_DEVICE_ID_INTEL_82860_HB),
+ ID(PCI_DEVICE_ID_INTEL_82865_HB),
+ ID(PCI_DEVICE_ID_INTEL_82875_HB),
+ ID(PCI_DEVICE_ID_INTEL_7505_0),
+ ID(PCI_DEVICE_ID_INTEL_7205_0),
+ ID(PCI_DEVICE_ID_INTEL_82915G_HB),
+ ID(PCI_DEVICE_ID_INTEL_82915GM_HB),
+ ID(PCI_DEVICE_ID_INTEL_82945G_HB),
+ ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
+ ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
+ ID(PCI_DEVICE_ID_INTEL_82965G_1_HB),
+ ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
+ ID(PCI_DEVICE_ID_INTEL_82965G_HB),
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_intel_pci_table);
+
+static struct pci_driver agp_intel_pci_driver = {
+ .name = "agpgart-intel",
+ .id_table = agp_intel_pci_table,
+ .probe = agp_intel_probe,
+ .remove = __devexit_p(agp_intel_remove),
+ .resume = agp_intel_resume,
+};
+
+static int __init agp_intel_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_intel_pci_driver);
+}
+
+static void __exit agp_intel_cleanup(void)
+{
+ pci_unregister_driver(&agp_intel_pci_driver);
+}
+
+module_init(agp_intel_init);
+module_exit(agp_intel_cleanup);
+
+MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/isoch.c b/isoch.c
new file mode 100644
index 0000000..9a5417a
--- /dev/null
+++ b/isoch.c
@@ -0,0 +1,475 @@
+/*
+ * Setup routines for AGP 3.5 compliant bridges.
+ */
+
+#include <linux/version.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/agp_backend.h>
+#include <linux/module.h>
+
+#include "agp.h"
+
+/* Generic AGP 3.5 enabling routines */
+
+struct agp_3_5_dev {
+ struct list_head list;
+ u8 capndx;
+ u32 maxbw;
+ struct pci_dev *dev;
+};
+
+static void agp_3_5_dev_list_insert(struct list_head *head, struct list_head *new)
+{
+ struct agp_3_5_dev *cur, *n = list_entry(new, struct agp_3_5_dev, list);
+ struct list_head *pos;
+
+ list_for_each(pos, head) {
+ cur = list_entry(pos, struct agp_3_5_dev, list);
+ if(cur->maxbw > n->maxbw)
+ break;
+ }
+ list_add_tail(new, pos);
+}
+
+static void agp_3_5_dev_list_sort(struct agp_3_5_dev *list, unsigned int ndevs)
+{
+ struct agp_3_5_dev *cur;
+ struct pci_dev *dev;
+ struct list_head *pos, *tmp, *head = &list->list, *start = head->next;
+ u32 nistat;
+
+ INIT_LIST_HEAD(head);
+
+ for (pos=start; pos!=head; ) {
+ cur = list_entry(pos, struct agp_3_5_dev, list);
+ dev = cur->dev;
+
+ pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &nistat);
+ cur->maxbw = (nistat >> 16) & 0xff;
+
+ tmp = pos;
+ pos = pos->next;
+ agp_3_5_dev_list_insert(head, tmp);
+ }
+}
+
+/*
+ * Initialize all isochronous transfer parameters for an AGP 3.0
+ * node (i.e. a host bridge in combination with the adapters
+ * lying behind it...)
+ */
+
+static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
+ struct agp_3_5_dev *dev_list, unsigned int ndevs)
+{
+ /*
+ * Convenience structure to make the calculations clearer
+ * here. The field names come straight from the AGP 3.0 spec.
+ */
+ struct isoch_data {
+ u32 maxbw;
+ u32 n;
+ u32 y;
+ u32 l;
+ u32 rq;
+ struct agp_3_5_dev *dev;
+ };
+
+ struct pci_dev *td = bridge->dev, *dev;
+ struct list_head *head = &dev_list->list, *pos;
+ struct agp_3_5_dev *cur;
+ struct isoch_data *master, target;
+ unsigned int cdev = 0;
+ u32 mnistat, tnistat, tstatus, mcmd;
+ u16 tnicmd, mnicmd;
+ u8 mcapndx;
+ u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async;
+ u32 step, rem, rem_isoch, rem_async;
+ int ret = 0;
+
+ /*
+ * We'll work with an array of isoch_data's (one for each
+ * device in dev_list) throughout this function.
+ */
+ if ((master = kmalloc(ndevs * sizeof(*master), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto get_out;
+ }
+
+ /*
+ * Sort the device list by maxbw. We need to do this because the
+ * spec suggests that the devices with the smallest requirements
+ * have their resources allocated first, with all remaining resources
+ * falling to the device with the largest requirement.
+ *
+ * We don't exactly do this, we divide target resources by ndevs
+ * and split them amongst the AGP 3.0 devices. The remainder of such
+ * division operations are dropped on the last device, sort of like
+ * the spec mentions it should be done.
+ *
+ * We can't do this sort when we initially construct the dev_list
+ * because we don't know until this function whether isochronous
+ * transfers are enabled and consequently whether maxbw will mean
+ * anything.
+ */
+ agp_3_5_dev_list_sort(dev_list, ndevs);
+
+ pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat);
+ pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus);
+
+ /* Extract power-on defaults from the target */
+ target.maxbw = (tnistat >> 16) & 0xff;
+ target.n = (tnistat >> 8) & 0xff;
+ target.y = (tnistat >> 6) & 0x3;
+ target.l = (tnistat >> 3) & 0x7;
+ target.rq = (tstatus >> 24) & 0xff;
+
+ y_max = target.y;
+
+ /*
+ * Extract power-on defaults for each device in dev_list. Along
+ * the way, calculate the total isochronous bandwidth required
+ * by these devices and the largest requested payload size.
+ */
+ list_for_each(pos, head) {
+ cur = list_entry(pos, struct agp_3_5_dev, list);
+ dev = cur->dev;
+
+ mcapndx = cur->capndx;
+
+ pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat);
+
+ master[cdev].maxbw = (mnistat >> 16) & 0xff;
+ master[cdev].n = (mnistat >> 8) & 0xff;
+ master[cdev].y = (mnistat >> 6) & 0x3;
+ master[cdev].dev = cur;
+
+ tot_bw += master[cdev].maxbw;
+ y_max = max(y_max, master[cdev].y);
+
+ cdev++;
+ }
+
+ /* Check if this configuration has any chance of working */
+ if (tot_bw > target.maxbw) {
+ printk(KERN_ERR PFX "isochronous bandwidth required "
+ "by AGP 3.0 devices exceeds that which is supported by "
+ "the AGP 3.0 bridge!\n");
+ ret = -ENODEV;
+ goto free_and_exit;
+ }
+
+ target.y = y_max;
+
+ /*
+ * Write the calculated payload size into the target's NICMD
+ * register. Doing this directly effects the ISOCH_N value
+ * in the target's NISTAT register, so we need to do this now
+ * to get an accurate value for ISOCH_N later.
+ */
+ pci_read_config_word(td, bridge->capndx+AGPNICMD, &tnicmd);
+ tnicmd &= ~(0x3 << 6);
+ tnicmd |= target.y << 6;
+ pci_write_config_word(td, bridge->capndx+AGPNICMD, tnicmd);
+
+ /* Reread the target's ISOCH_N */
+ pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat);
+ target.n = (tnistat >> 8) & 0xff;
+
+ /* Calculate the minimum ISOCH_N needed by each master */
+ for (cdev=0; cdev<ndevs; cdev++) {
+ master[cdev].y = target.y;
+ master[cdev].n = master[cdev].maxbw / (master[cdev].y + 1);
+
+ tot_n += master[cdev].n;
+ }
+
+ /* Exit if the minimal ISOCH_N allocation among the masters is more
+ * than the target can handle. */
+ if (tot_n > target.n) {
+ printk(KERN_ERR PFX "number of isochronous "
+ "transactions per period required by AGP 3.0 devices "
+ "exceeds that which is supported by the AGP 3.0 "
+ "bridge!\n");
+ ret = -ENODEV;
+ goto free_and_exit;
+ }
+
+ /* Calculate left over ISOCH_N capability in the target. We'll give
+ * this to the hungriest device (as per the spec) */
+ rem = target.n - tot_n;
+
+ /*
+ * Calculate the minimum isochronous RQ depth needed by each master.
+ * Along the way, distribute the extra ISOCH_N capability calculated
+ * above.
+ */
+ for (cdev=0; cdev<ndevs; cdev++) {
+ /*
+ * This is a little subtle. If ISOCH_Y > 64B, then ISOCH_Y
+ * byte isochronous writes will be broken into 64B pieces.
+ * This means we need to budget more RQ depth to account for
+ * these kind of writes (each isochronous write is actually
+ * many writes on the AGP bus).
+ */
+ master[cdev].rq = master[cdev].n;
+ if(master[cdev].y > 0x1)
+ master[cdev].rq *= (1 << (master[cdev].y - 1));
+
+ tot_rq += master[cdev].rq;
+
+ if (cdev == ndevs-1)
+ master[cdev].n += rem;
+ }
+
+ /* Figure the number of isochronous and asynchronous RQ slots the
+ * target is providing. */
+ rq_isoch = (target.y > 0x1) ? target.n * (1 << (target.y - 1)) : target.n;
+ rq_async = target.rq - rq_isoch;
+
+ /* Exit if the minimal RQ needs of the masters exceeds what the target
+ * can provide. */
+ if (tot_rq > rq_isoch) {
+ printk(KERN_ERR PFX "number of request queue slots "
+ "required by the isochronous bandwidth requested by "
+ "AGP 3.0 devices exceeds the number provided by the "
+ "AGP 3.0 bridge!\n");
+ ret = -ENODEV;
+ goto free_and_exit;
+ }
+
+ /* Calculate asynchronous RQ capability in the target (per master) as
+ * well as the total number of leftover isochronous RQ slots. */
+ step = rq_async / ndevs;
+ rem_async = step + (rq_async % ndevs);
+ rem_isoch = rq_isoch - tot_rq;
+
+ /* Distribute the extra RQ slots calculated above and write our
+ * isochronous settings out to the actual devices. */
+ for (cdev=0; cdev<ndevs; cdev++) {
+ cur = master[cdev].dev;
+ dev = cur->dev;
+
+ mcapndx = cur->capndx;
+
+ master[cdev].rq += (cdev == ndevs - 1)
+ ? (rem_async + rem_isoch) : step;
+
+ pci_read_config_word(dev, cur->capndx+AGPNICMD, &mnicmd);
+ pci_read_config_dword(dev, cur->capndx+AGPCMD, &mcmd);
+
+ mnicmd &= ~(0xff << 8);
+ mnicmd &= ~(0x3 << 6);
+ mcmd &= ~(0xff << 24);
+
+ mnicmd |= master[cdev].n << 8;
+ mnicmd |= master[cdev].y << 6;
+ mcmd |= master[cdev].rq << 24;
+
+ pci_write_config_dword(dev, cur->capndx+AGPCMD, mcmd);
+ pci_write_config_word(dev, cur->capndx+AGPNICMD, mnicmd);
+ }
+
+free_and_exit:
+ kfree(master);
+
+get_out:
+ return ret;
+}
+
+/*
+ * This function basically allocates request queue slots among the
+ * AGP 3.0 systems in nonisochronous nodes. The algorithm is
+ * pretty stupid, divide the total number of RQ slots provided by the
+ * target by ndevs. Distribute this many slots to each AGP 3.0 device,
+ * giving any left over slots to the last device in dev_list.
+ */
+static void agp_3_5_nonisochronous_node_enable(struct agp_bridge_data *bridge,
+ struct agp_3_5_dev *dev_list, unsigned int ndevs)
+{
+ struct agp_3_5_dev *cur;
+ struct list_head *head = &dev_list->list, *pos;
+ u32 tstatus, mcmd;
+ u32 trq, mrq, rem;
+ unsigned int cdev = 0;
+
+ pci_read_config_dword(bridge->dev, bridge->capndx+AGPSTAT, &tstatus);
+
+ trq = (tstatus >> 24) & 0xff;
+ mrq = trq / ndevs;
+
+ rem = mrq + (trq % ndevs);
+
+ for (pos=head->next; cdev<ndevs; cdev++, pos=pos->next) {
+ cur = list_entry(pos, struct agp_3_5_dev, list);
+
+ pci_read_config_dword(cur->dev, cur->capndx+AGPCMD, &mcmd);
+ mcmd &= ~(0xff << 24);
+ mcmd |= ((cdev == ndevs - 1) ? rem : mrq) << 24;
+ pci_write_config_dword(cur->dev, cur->capndx+AGPCMD, mcmd);
+ }
+}
+
+/*
+ * Fully configure and enable an AGP 3.0 host bridge and all the devices
+ * lying behind it.
+ */
+int agp_3_5_enable(struct agp_bridge_data *bridge)
+{
+ struct pci_dev *td = bridge->dev, *dev = NULL;
+ u8 mcapndx;
+ u32 isoch, arqsz;
+ u32 tstatus, mstatus, ncapid;
+ u32 mmajor;
+ u16 mpstat;
+ struct agp_3_5_dev *dev_list, *cur;
+ struct list_head *head, *pos;
+ unsigned int ndevs = 0;
+ int ret = 0;
+
+ /* Extract some power-on defaults from the target */
+ pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus);
+ isoch = (tstatus >> 17) & 0x1;
+ if (isoch == 0) /* isoch xfers not available, bail out. */
+ return -ENODEV;
+
+ arqsz = (tstatus >> 13) & 0x7;
+
+ /*
+ * Allocate a head for our AGP 3.5 device list
+ * (multiple AGP v3 devices are allowed behind a single bridge).
+ */
+ if ((dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto get_out;
+ }
+ head = &dev_list->list;
+ INIT_LIST_HEAD(head);
+
+ /* Find all AGP devices, and add them to dev_list. */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
+ for_each_pci_dev(dev) {
+#else
+ while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+#endif
+ mcapndx = pci_find_capability(dev, PCI_CAP_ID_AGP);
+ if (mcapndx == 0)
+ continue;
+
+ switch ((dev->class >>8) & 0xff00) {
+ case 0x0600: /* Bridge */
+ /* Skip bridges. We should call this function for each one. */
+ continue;
+
+ case 0x0001: /* Unclassified device */
+ /* Don't know what this is, but log it for investigation. */
+ if (mcapndx != 0) {
+ printk (KERN_INFO PFX "Wacky, found unclassified AGP device. %x:%x\n",
+ dev->vendor, dev->device);
+ }
+ continue;
+
+ case 0x0300: /* Display controller */
+ case 0x0400: /* Multimedia controller */
+ if((cur = kmalloc(sizeof(*cur), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto free_and_exit;
+ }
+ cur->dev = dev;
+
+ pos = &cur->list;
+ list_add(pos, head);
+ ndevs++;
+ continue;
+
+ default:
+ continue;
+ }
+ }
+
+ /*
+ * Take an initial pass through the devices lying behind our host
+ * bridge. Make sure each one is actually an AGP 3.0 device, otherwise
+ * exit with an error message. Along the way store the AGP 3.0
+ * cap_ptr for each device
+ */
+ list_for_each(pos, head) {
+ cur = list_entry(pos, struct agp_3_5_dev, list);
+ dev = cur->dev;
+
+ pci_read_config_word(dev, PCI_STATUS, &mpstat);
+ if ((mpstat & PCI_STATUS_CAP_LIST) == 0)
+ continue;
+
+ pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &mcapndx);
+ if (mcapndx != 0) {
+ do {
+ pci_read_config_dword(dev, mcapndx, &ncapid);
+ if ((ncapid & 0xff) != 2)
+ mcapndx = (ncapid >> 8) & 0xff;
+ }
+ while (((ncapid & 0xff) != 2) && (mcapndx != 0));
+ }
+
+ if (mcapndx == 0) {
+ printk(KERN_ERR PFX "woah! Non-AGP device "
+ "found on the secondary bus of an AGP 3.5 bridge!\n");
+ ret = -ENODEV;
+ goto free_and_exit;
+ }
+
+ mmajor = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
+ if (mmajor < 3) {
+ printk(KERN_ERR PFX "woah! AGP 2.0 device "
+ "found on the secondary bus of an AGP 3.5 "
+ "bridge operating with AGP 3.0 electricals!\n");
+ ret = -ENODEV;
+ goto free_and_exit;
+ }
+
+ cur->capndx = mcapndx;
+
+ pci_read_config_dword(dev, cur->capndx+AGPSTAT, &mstatus);
+
+ if (((mstatus >> 3) & 0x1) == 0) {
+ printk(KERN_ERR PFX "woah! AGP 3.x device "
+ "not operating in AGP 3.x mode found on the "
+ "secondary bus of an AGP 3.5 bridge operating "
+ "with AGP 3.0 electricals!\n");
+ ret = -ENODEV;
+ goto free_and_exit;
+ }
+ }
+
+ /*
+ * Call functions to divide target resources amongst the AGP 3.0
+ * masters. This process is dramatically different depending on
+ * whether isochronous transfers are supported.
+ */
+ if (isoch) {
+ ret = agp_3_5_isochronous_node_enable(bridge, dev_list, ndevs);
+ if (ret) {
+ printk(KERN_INFO PFX "Something bad happened setting "
+ "up isochronous xfers. Falling back to "
+ "non-isochronous xfer mode.\n");
+ } else {
+ goto free_and_exit;
+ }
+ }
+ agp_3_5_nonisochronous_node_enable(bridge, dev_list, ndevs);
+
+free_and_exit:
+ /* Be sure to free the dev_list */
+ for (pos=head->next; pos!=head; ) {
+ cur = list_entry(pos, struct agp_3_5_dev, list);
+
+ pos = pos->next;
+ kfree(cur);
+ }
+ kfree(dev_list);
+
+get_out:
+ return ret;
+}
+
diff --git a/picker.c b/picker.c
new file mode 100644
index 0000000..6c228df
--- /dev/null
+++ b/picker.c
@@ -0,0 +1,30 @@
+
+#include <linux/config.h>
+#include <linux/version.h>
+
+#ifndef CONFIG_SMP
+#define CONFIG_SMP 0
+#endif
+
+#ifndef CONFIG_MODULES
+#define CONFIG_MODULES 0
+#endif
+
+#ifndef CONFIG_MODVERSIONS
+#define CONFIG_MODVERSIONS 0
+#endif
+
+#ifndef CONFIG_AGP_MODULE
+#define CONFIG_AGP_MODULE 0
+#endif
+
+#ifndef CONFIG_AGP
+#define CONFIG_AGP 0
+#endif
+
+SMP = CONFIG_SMP
+MODULES = CONFIG_MODULES
+MODVERSIONS = CONFIG_MODVERSIONS
+AGP = CONFIG_AGP
+AGP_MODULE = CONFIG_AGP_MODULE
+RELEASE = UTS_RELEASE
diff --git a/testagp.c b/testagp.c
new file mode 100644
index 0000000..7487506
--- /dev/null
+++ b/testagp.c
@@ -0,0 +1,1573 @@
+/*
+ * Copyright (C) 2003 Tungsten Graphics
+ * Copyright (C) 2003 Jeff Hartmann
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * testagp: Test program for /dev/agpgart under Linux, slightly based off old
+ * testgart program which had the following copyright notice.
+ *
+ * Copyright (C) 1999 Jeff Hartmann,
+ * Precision Insight, Inc., Xi Graphics, Inc.
+ *
+ * Currently it works on ia32 compatible archs only, however it should be
+ * trivial to convert it for use on other archs.
+ *
+ * Changelog:
+ * Dec 2002: Initial conversion of testgart program to something more useful.
+ * Jan 2003: Add AGP 3.0 tests for new ioctl's that are exported.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#include <sys/wait.h>
+#include <linux/types.h>
+#include "agpgart.h"
+#include <asm/mtrr.h>
+#include <errno.h>
+#include <pwd.h>
+#include <signal.h>
+
+#ifndef AGP_RESERVED_KEY
+#define AGP_RESERVED_KEY (-2)
+#endif
+
+/* These should probably be variables, since the agp version 3.0
+ * capable /dev/agpgart will provide this information.
+ */
+#define AGP_PAGE_SHIFT 12
+#define AGP_PAGE_SIZE (1 << AGP_PAGE_SHIFT)
+#define PTR_TO_PAGE(x, y) ((char *)(x) + ((y) << AGP_PAGE_SHIFT))
+#define CONST_PATTERN_1 0xdeadbeaf
+#define CONST_PATTERN_2 0xaef3456a
+/* Number of bytes to dump of an error condition by default */
+#define HEXDUMP_BYTES 128
+/* Number of hex digits to print out for the index in a hex dump */
+#define NUMBER_IDX_PLACES 4
+#define TEST_GET_MAP_SUPPORT 2
+
+/* Global information about the reserved block of memory */
+int agpgartSupportsGetMap = TEST_GET_MAP_SUPPORT;
+agp_map reserved_map;
+
+/* Global pointer to gart and the gart file descriptor. */
+unsigned char *gart;
+int gartfd;
+
+/* General Utility Functions */
+
+/* Call xchg and cpuid asm instructions to flush the write combining cache.
+ * Could be replaced with sfence on some cpus or perhaps just the code
+ * for mb() from the kernel
+ */
+void flushWriteCombining(void)
+{
+ int xchangeDummy;
+ __asm__ volatile(" push %%eax ; "
+ " xchg %%eax, %0 ;"
+ " pop %%eax" : : "m" (xchangeDummy));
+ __asm__ volatile(" push %%eax ; "
+ " push %%ebx ; "
+ " push %%ecx ; "
+ " push %%edx ; "
+ " movl $0,%%eax ; "
+ " cpuid ; "
+ " pop %%edx ; "
+ " pop %%ecx ; "
+ " pop %%ebx ; "
+ " pop %%eax" : /* no outputs */ : /* no inputs */ );
+}
+
+int usec(void)
+{
+ struct timeval tv;
+ struct timezone tz;
+
+ gettimeofday( &tv, &tz );
+ return (tv.tv_sec & 2047) * 1000000 + tv.tv_usec;
+}
+
+int coverRangeWithMTRR(int base, int range, int type)
+{
+ int count;
+ int mtrr;
+
+ if ((mtrr = open("/proc/mtrr", O_WRONLY, 0)) == -1) {
+ if (errno == ENOENT) {
+ perror("/proc/mtrr not found: MTRR not enabled\n");
+ } else {
+ perror("Error opening /proc/mtrr:");
+ perror("MTRR not enabled\n");
+ }
+ mtrr = -1;
+ }
+
+ /* set it if we aren't just checking the number */
+ if (mtrr != -1 && type != -1 ) {
+ struct mtrr_sentry sentry;
+
+ sentry.base = base;
+ sentry.size = range;
+ sentry.type = type;
+
+ if(ioctl(mtrr, MTRRIOC_ADD_ENTRY, &sentry) == -1 ) {
+ perror("Error during ioctl MTRR_ADD_ENTRY\n");
+ exit(1);
+ }
+ }
+}
+
+/* size is in bytes */
+void fillBufConstPattern(unsigned int pattern, char *bufPtr,
+ int size)
+{
+ unsigned int *buf = (unsigned int *)bufPtr;
+ int i;
+
+ for(i = 0; i < size / sizeof(unsigned int); i++) {
+ buf[i] = pattern;
+ }
+}
+
+/* Returns the failed index on the error, -1 on success */
+int checkBufConstPattern(unsigned int pattern, char *bufPtr,
+ int size)
+{
+ unsigned int *buf = (unsigned int *)bufPtr;
+ int i;
+
+ for(i = 0; i < size / sizeof(unsigned int); i++) {
+ if(buf[i] != pattern) return i;
+ }
+ return -1;
+}
+
+void createRandomBuffer(char *bufPtr,
+ int size)
+{
+ unsigned int *buf = (unsigned int *)bufPtr;
+ int i;
+
+ for(i = 0; i < size / sizeof(unsigned int); i++) {
+ buf[i] = rand();
+ }
+}
+
+static inline char *valueToHex(int digit)
+{
+ switch(digit & 0xf) {
+ case 0: return "0";
+ case 1: return "1";
+ case 2: return "2";
+ case 3: return "3";
+ case 4: return "4";
+ case 5: return "5";
+ case 6: return "6";
+ case 7: return "7";
+ case 8: return "8";
+ case 9: return "9";
+ case 10: return "a";
+ case 11: return "b";
+ case 12: return "c";
+ case 13: return "d";
+ case 14: return "e";
+ case 15: return "f";
+ }
+}
+
+/* Only used to print upto 16 bytes at a time so 80 is safe */
+void printBytes(char *data, int numBytes)
+{
+ char temp[80];
+ int i;
+
+ temp[0] = '\0';
+ for(i = 0; i < numBytes; i++) {
+ strcat(temp, valueToHex(data[i] >> 4));
+ strcat(temp, valueToHex(data[i]));
+ strcat(temp, " ");
+ }
+ printf("%s\n", temp);
+}
+
+
+void printIdx(int index, int places)
+{
+ char temp[80];
+ int i;
+
+ temp[0] = '\0';
+ --places;
+ for(i = places; i >= 0; i--) {
+ strcat(temp, valueToHex(index >> (i * 4)));
+ }
+ printf("%s : ", temp);
+}
+
+void printHexDump(char *data, int outputBytes)
+{
+ int loop16 = outputBytes / 16;
+ int remainBytes = outputBytes % 16;
+ int i;
+
+ for(i = 0; i < loop16; i++, data += 16) {
+ printIdx(i * 16, NUMBER_IDX_PLACES);
+ printBytes(data, 16);
+ }
+ if(remainBytes) {
+ printIdx(i * 16, NUMBER_IDX_PLACES);
+ printBytes(data, remainBytes);
+ }
+}
+
+int memoryBenchmark(void *buffer, int dwords)
+{
+ int i;
+ int start, end;
+ int mb;
+ int *base;
+
+ base = (int *)buffer;
+ start = usec();
+ for ( i = 0 ; i < dwords ; i += 8 ) {
+ base[i] =
+ base[i+1] =
+ base[i+2] =
+ base[i+3] =
+ base[i+4] =
+ base[i+5] =
+ base[i+6] =
+ base[i+7] = 0xdeadbeef;
+ }
+ end = usec();
+ mb = ( (float)dwords / 0x40000 ) * 1000000 / (end - start);
+ printf("MemoryBenchmark: %i mb/s\n", mb );
+ return mb;
+}
+
+/* Functions to perform /dev/agpgart ioctls and general agp setup */
+
+int unbindMemory(int key)
+{
+ agp_unbind unbind;
+
+ unbind.key = key;
+#ifdef DEBUG
+ printf("Using AGPIOC_UNBIND\n");
+#endif
+ if(ioctl(gartfd, AGPIOC_UNBIND, &unbind) != 0) {
+ perror("ioctl(AGPIOC_UNBIND)");
+ exit(1);
+ }
+
+ return 0;
+}
+
+int bindMemory(int key, int page)
+{
+ agp_bind bind;
+
+ bind.key = key;
+ bind.pg_start = page;
+#ifdef DEBUG
+ printf("Using AGPIOC_BIND\n");
+#endif
+ if(ioctl(gartfd, AGPIOC_BIND, &bind) != 0) {
+ perror("ioctl(AGPIOC_BIND)");
+ exit(1);
+ }
+
+ return 0;
+}
+
+int allocMemory(int size)
+{
+ agp_allocate entry;
+
+ entry.type = 0;
+ entry.pg_count = size;
+#ifdef DEBUG
+ printf("Using AGPIOC_ALLOCATE\n");
+#endif
+ if(ioctl(gartfd, AGPIOC_ALLOCATE, &entry) != 0) {
+ perror("ioctl(AGPIOC_ALLOCATE)");
+ exit(1);
+ }
+ return entry.key;
+}
+
+int allocAndBind(int page, int size)
+{
+ int key = allocMemory(size);
+
+ bindMemory(key, page);
+ return key;
+}
+
+int freeMemory(int key)
+{
+#ifdef DEBUG
+ printf("Using AGPIOC_DEALLOCATE\n");
+#endif
+ if(ioctl(gartfd, AGPIOC_DEALLOCATE, key) != 0) {
+ perror("ioctl(AGPIOCREMOVE)");
+ exit(1);
+ }
+
+ return 0;
+}
+
+void getAgpInfo(agp_info *info)
+{
+#ifdef DEBUG
+ printf("Using AGPIOC_INFO\n");
+#endif
+ if(ioctl(gartfd, AGPIOC_INFO, info) != 0) {
+ perror("ioctl(AGPIOC_INFO)");
+ exit(1);
+ }
+}
+
+int getCurrentPages(void)
+{
+ agp_info info;
+
+ getAgpInfo(&info);
+ return info.pg_used;
+}
+
+void openAgpDevice(int flags)
+{
+ gartfd = open("/dev/agpgart", flags);
+ if (gartfd == -1) {
+ perror("Open of /dev/agpgart failed!");
+ exit(1);
+ }
+}
+
+size_t getApertureSize(void)
+{
+ agp_info info;
+
+ getAgpInfo(&info);
+ return info.aper_size;
+}
+
+void unmapAgpAperture(size_t aper_size)
+{
+ if(gartfd != -1) {
+ munmap(gart, aper_size);
+ gart = (char *)0xffffffff;
+ close(gartfd);
+ gartfd = -1;
+ }
+}
+
+void mapAgpAperture(size_t aper_size, int prot)
+{
+ gart = mmap(NULL, aper_size * 0x100000,
+ prot, MAP_SHARED, gartfd, 0);
+#ifdef DEBUG
+ if(gart == (unsigned char *)0xffffffff) {
+ perror("mmap failed with ");
+ }
+#endif
+}
+
+int supportsGetMap(void)
+{
+ if(agpgartSupportsGetMap == TEST_GET_MAP_SUPPORT) {
+ memset(&reserved_map, 0, sizeof(reserved_map));
+ reserved_map.key = AGP_RESERVED_KEY;
+
+ if(ioctl(gartfd, AGPIOC_GETMAP, &reserved_map) == -1 &&
+ errno == ENOTTY) {
+ agpgartSupportsGetMap = FALSE;
+ } else {
+ agpgartSupportsGetMap = TRUE;
+ }
+ }
+
+ return agpgartSupportsGetMap;
+}
+
+/* Only call if supportsGetMap returns TRUE */
+int printReserved(void)
+{
+ printf("Reserved agp memory block: \n");
+ printf("bound: %s, ", reserved_map.is_bound ? "yes" : "no");
+ printf("offset: 0x%08x, ", reserved_map.pg_start * AGP_PAGE_SIZE);
+ printf("size: 0x%08x\n", reserved_map.page_count * AGP_PAGE_SIZE);
+}
+
+int checkPageAvailable(int page)
+{
+ agp_map map;
+ off_t pg_start;
+ off_t pg_end;
+
+ /* If we don't support get map, the page is available */
+ if(supportsGetMap() == FALSE) return TRUE;
+
+ /* Reserved map is of zero size or reserved region not bound, so page
+ * is available */
+ if(reserved_map.page_count == 0 ||
+ reserved_map.is_bound == FALSE) return TRUE;
+
+ /* If we are equal to or greater than the first page but
+ * less then the first page + page count then we are within the
+ * reserved region. This page isn't available. Otherwise it
+ * is available for use.
+ */
+ pg_start = reserved_map.pg_start;
+ pg_end = reserved_map.pg_start + reserved_map.page_count - 1;
+ if(pg_start <= page &&
+ page <= pg_end) return FALSE;
+
+ return TRUE;
+}
+
+int getNumberOfPages(void)
+{
+ unsigned long apertureSize = getApertureSize() * 0x100000;
+ int numberPages = apertureSize / AGP_PAGE_SIZE;
+
+ return numberPages;
+}
+
+/* Very stupid brute force approach to finding a slot of size,
+ * if we want this to be fast we need to make a memory allocator of some
+ * kind. This test code doesn't have to be terribly efficent so we
+ * don't care.
+ * This function only handles the reserved memory, nothing else at the
+ * moment. Its all we need so don't worry.
+ *
+ * returns -1 if we can't find such a region.
+ */
+
+int findFirstRegionOfSize(int numberPages)
+{
+
+ int pagesTotal = getNumberOfPages();
+ int i, k;
+
+ for(i = 0; i < pagesTotal; i++) {
+ if(checkPageAvailable(i) == TRUE) {
+ /* We already know the first page is available */
+ for(k = 1; k < numberPages; k++) {
+ if(checkPageAvailable(i + k) == FALSE) break;
+ }
+ /* Full region is available, return i as the index */
+ if(k == numberPages) return i;
+ /* Okay we didn't get the full region and i + k is not available,
+ * so we continue testing at i + k + 1
+ */
+ i = i + k + 1;
+ }
+ }
+ return -1;
+}
+
+/* Returns the size of the aperture in megabytes */
+int initializeAgp(void)
+{
+ agp_info info;
+ agp_setup setup;
+
+#ifdef DEBUG
+ printf("Using AGPIOC_ACQUIRE\n");
+#endif
+ if(ioctl(gartfd, AGPIOC_ACQUIRE) != 0) {
+ perror("ioctl(AGPIOC_ACQUIRE)");
+ exit(1);
+ }
+ getAgpInfo(&info);
+
+ printf("Basic information extracted from /dev/agpgart:\n");
+ printf("Agpgart Interface Version: %i.%i\n",
+ info.version.major,
+ info.version.minor);
+ printf("Bridge pci id: 0x%lx\n", info.bridge_id);
+ printf("Agp mode: 0x%lx\n", info.agp_mode);
+ printf("Aperture base: 0x%lx\n", info.aper_base);
+ printf("Aperture size: %iMB\n", info.aper_size);
+ printf("Pages allowed total: %i\n", info.pg_total);
+ printf("Pages allowed from memory: %i\n", info.pg_system);
+ printf("Pages currently used: %i\n", info.pg_used);
+
+ coverRangeWithMTRR(info.aper_base, info.aper_size * 0x100000,
+ MTRR_TYPE_WRCOMB);
+
+ mapAgpAperture(info.aper_size, PROT_READ | PROT_WRITE);
+
+ gart = mmap(NULL, info.aper_size * 0x100000,
+ PROT_READ | PROT_WRITE, MAP_SHARED, gartfd, 0);
+
+ if(gart == (unsigned char *) 0xffffffff) {
+ close(gartfd);
+ exit(1);
+ }
+
+ setup.agp_mode = info.agp_mode;
+#ifdef DEBUG
+ printf("Using AGPIOC_SETUP\n");
+#endif
+ if(ioctl(gartfd, AGPIOC_SETUP, &setup) != 0) {
+ perror("ioctl(AGPIOC_SETUP)");
+ exit(1);
+ }
+
+ if(supportsGetMap() == TRUE) {
+ printReserved();
+ }
+ return info.aper_size;
+}
+
+/* Test functions start in earnest */
+void simpleBenchMark(unsigned char *regionPtr)
+{
+ int i;
+ unsigned long *tempPtr;
+
+ /* Make sure we are paged in, then do the performance test */
+ tempPtr = (unsigned long *)regionPtr;
+ for(i = 0; i < (4 * 1024 * 1024) / sizeof(unsigned long); i++) {
+ tempPtr[i] = 0;
+ }
+ printf("Benchmarking writes:\n");
+
+ i = memoryBenchmark(regionPtr, (1024 * 1024 * 4) / 4) +
+ memoryBenchmark(regionPtr, (1024 * 1024 * 4) / 4) +
+ memoryBenchmark(regionPtr, (1024 * 1024 * 4) / 4);
+
+ printf("Average speed: %i mb/s\n", i / 3);
+}
+
+int oldIntegrity(int firstPage, int key1, int key2,
+ unsigned char *regionPtrStageOne,
+ unsigned char *regionPtrStageTwo,
+ int do_rebind)
+{
+ int i, worked1 = 1, worked2 = 1;
+
+ printf("Testing data integrity (1st pass): ");
+ fflush(stdout);
+
+ flushWriteCombining();
+
+ for(i=0; i < 8 * 0x100000; i++) {
+ regionPtrStageOne[i] = i % 256;
+ }
+
+ flushWriteCombining();
+
+ for(i=0; i < 8 * 0x100000; i++) {
+ if(!(regionPtrStageOne[i] == i % 256)) {
+#ifdef DEBUG
+ printf("failed on %i, gart[i] = %i\n", i, regionPtr[i]);
+#endif
+ worked1 = 0;
+ break;
+ }
+ }
+
+ if (!worked1) printf("failed on first pass!\n");
+ else printf("passed on first pass.\n");
+
+ if(do_rebind == TRUE) {
+ if(key1 >= 0 && key2 >= 0) {
+ unbindMemory(key1);
+ unbindMemory(key2);
+ bindMemory(key1, firstPage);
+ bindMemory(key2, firstPage + 1024);
+ } else if(key1 >= 0) {
+ /* Special test case where we know we are unbound. */
+ bindMemory(key1, firstPage);
+ }
+ }
+
+ printf("Testing data integrity (2nd pass): ");
+ fflush(stdout);
+
+ for(i=0; i < 8 * 0x100000; i++) {
+ if(!(regionPtrStageTwo[i] == i % 256)) {
+#ifdef DEBUG
+ printf("failed on %i, gart[i] = %i\n", i, regionPtr[i]);
+#endif
+ worked2 = 0;
+ }
+ }
+
+ if(!worked2) printf("failed on second pass!\n");
+ else printf("passed on second pass.\n");
+
+ return worked1 & worked2;
+}
+
+/* Quick benchmark and very simple data integrity test */
+void legacyTests()
+{
+ int key1, key2;
+ int firstPage = 0;
+ int worked;
+ int totalStart = getCurrentPages();
+
+ printf("\nNow performing legacy testgart functionality.\n");
+ firstPage = findFirstRegionOfSize(2048);
+ if(firstPage == -1) {
+ printf("There are no 8MB regions, so we can't perform the legecy "
+ "tests.\n");
+ return;
+ }
+
+ printf("Testing from offset into gart : 0x%08x\n",
+ (unsigned int)(firstPage * AGP_PAGE_SIZE));
+ key1 = allocAndBind(firstPage, 1024);
+ key2 = allocAndBind(firstPage + 1024, 1024);
+
+ if(key1 >= 0 && key2 >= 0 && getCurrentPages() - totalStart == 2048) {
+ printf("Successfully allocated 8 megs of memory from /dev/agpgart\n");
+ } else {
+ printf("Couldn't successfully allocate 8 megs of GART memory\n");
+ printf("Legacy tests failed!\n");
+ return;
+ }
+
+ simpleBenchMark(PTR_TO_PAGE(gart, firstPage));
+
+ worked = oldIntegrity(firstPage, key1, key2, PTR_TO_PAGE(gart, firstPage),
+ PTR_TO_PAGE(gart, firstPage), TRUE);
+
+ freeMemory(key1);
+ freeMemory(key2);
+ if(getCurrentPages() - totalStart == 0) {
+ printf("Successfully deallocated memory from /dev/agpgart.\n");
+ } else {
+ printf("Memory was not successfully deallocated\n");
+ printf("Start total : %d, Current total: %d\n",
+ totalStart, getCurrentPages());
+ printf("Legacy tests failed!\n");
+ return;
+ }
+ if(worked) printf("Legacy tests passed.\n");
+ else printf("Legacy tests failed!\n");
+}
+
+/* Takes aperture size in megabytes,
+ * Plugs in pages around any reserved area we know about.
+ */
+void apertureIntegrityTests(int aper_size)
+{
+ unsigned long apertureSize = aper_size * 0x100000;
+ int numberPages = apertureSize / AGP_PAGE_SIZE;
+ int firstPage = 0;
+ char *pagePtr;
+ char *patternBuf = malloc(2 * AGP_PAGE_SIZE);
+ int sectionKey;
+ int i;
+ int test;
+
+ if(!patternBuf) {
+ printf("Failed allocating pattern buffer!");
+ exit(1);
+ }
+
+ /* Test with one page first, fill it with a constant pattern
+ * then move it around the whole aperture and test for the correct value
+ * at that location.
+ */
+ printf("\nAperture Integrity Tests:\n"
+ "Now performing various integrity tests on the agp aperture.\n");
+ printf("These are useful to see if an agpgart driver for a specific "
+ "chipset\n");
+ printf("is functioning correctly\n\n");
+ printf("Simple Constant Pattern Test 1:\n");
+ printf("\tThis test allocates a single page of agp memory and fills it\n");
+ printf("with a constant pattern. It then binds it into each available\n");
+ printf("page location in the aperture and tests to make sure it "
+ "matches.\n");
+ printf("Performing Simple Constant Pattern Test 1: ");
+ fflush(stdout);
+
+ firstPage = findFirstRegionOfSize(1);
+ if(firstPage == -1) {
+ printf("Fatal error, can't find any size 1 regions.\n");
+ printf("Exiting.\n");
+ exit(1);
+ }
+#if 0
+ printf("First Page (%d)\n", firstPage);
+#endif
+ sectionKey = allocAndBind(firstPage, 1);
+ /* Start at the beginning of the aperture */
+
+ pagePtr = PTR_TO_PAGE(gart, firstPage);
+ fillBufConstPattern(CONST_PATTERN_1, pagePtr, AGP_PAGE_SIZE);
+ flushWriteCombining();
+ unbindMemory(sectionKey);
+ for(i = firstPage; i < numberPages; i++) {
+ if(checkPageAvailable(i) == FALSE) continue;
+ bindMemory(sectionKey, i);
+ pagePtr = PTR_TO_PAGE(gart, i);
+ test = checkBufConstPattern(CONST_PATTERN_1, pagePtr, AGP_PAGE_SIZE);
+ if(test != -1) {
+ printf("failed!\n");
+ printf("Simple constant pattern test has failed at page %d\n",
+ i);
+ printf("The Dword at offset %d from the start of that page was "
+ "incorrect.\n", test);
+ printf("Expected : [0x%lx], Got : [0x%lx]\n", CONST_PATTERN_1,
+ ((unsigned int *)pagePtr) + test);
+ printf("Integrity tests failed\n");
+ unbindMemory(sectionKey);
+ freeMemory(sectionKey);
+ return;
+ }
+ unbindMemory(sectionKey);
+ }
+ printf("passed.\n");
+ fflush(stdout);
+ freeMemory(sectionKey);
+
+ /* Do the same test as above but do it with two pages */
+ firstPage = findFirstRegionOfSize(2);
+ if(firstPage == -1) {
+ printf("Fatal error, can't find any size 2 regions.\n");
+ printf("Exiting.\n");
+ exit(1);
+ }
+ sectionKey = allocAndBind(firstPage, 2);
+ printf("\nSimple Constant Pattern Test 2:\n");
+ printf("\tThis test allocates two pages of agp memory and fills them\n");
+ printf("with two seperate constant patterns. It then binds it into"
+ " each\n");
+ printf("available page pair in the aperture and tests to make sure it "
+ "matches.\n");
+ printf("Performing Simple Constant Pattern Test 2: ");
+ fflush(stdout);
+
+ pagePtr = PTR_TO_PAGE(gart, firstPage);
+ fillBufConstPattern(CONST_PATTERN_1, pagePtr, AGP_PAGE_SIZE);
+ fillBufConstPattern(CONST_PATTERN_2, pagePtr + AGP_PAGE_SIZE,
+ AGP_PAGE_SIZE);
+ flushWriteCombining();
+ unbindMemory(sectionKey);
+
+ for(i = firstPage; i < numberPages; i += 2) {
+ if(checkPageAvailable(i) == FALSE ||
+ checkPageAvailable(i + 1) == FALSE) continue;
+ if(numberPages - i < 2) continue;
+ bindMemory(sectionKey, i);
+ pagePtr = PTR_TO_PAGE(gart, i);
+ test = checkBufConstPattern(CONST_PATTERN_1, pagePtr, AGP_PAGE_SIZE);
+ if(test != -1) {
+ printf("failed!\n");
+ printf("Simple constant pattern test has failed at page %d\n",
+ i);
+ printf("The Dword at offset %d from the start of that page was "
+ "incorrect.\n", test);
+ printf("Expected : [0x%lx], Got : [0x%lx]\n", CONST_PATTERN_1,
+ ((unsigned int *)pagePtr) + test);
+ printf("Integrity test failed\n");
+ unbindMemory(sectionKey);
+ freeMemory(sectionKey);
+ return;
+ }
+ test = checkBufConstPattern(CONST_PATTERN_2, pagePtr + AGP_PAGE_SIZE,
+ AGP_PAGE_SIZE);
+ if(test != -1) {
+ printf("failed!\n");
+ printf("Simple constant pattern test has failed at page %d\n",
+ i + 1);
+ printf("The Dword at offset %d from the start of that page was "
+ "incorrect.\n", test);
+ printf("Expected : [0x%lx], Got : [0x%lx]\n", CONST_PATTERN_2,
+ ((unsigned int *)(pagePtr + AGP_PAGE_SIZE)) + test);
+ printf("Integrity test failed\n");
+ unbindMemory(sectionKey);
+ freeMemory(sectionKey);
+ return;
+ }
+ unbindMemory(sectionKey);
+ }
+ printf("passed.\n");
+ fflush(stdout);
+
+ pagePtr = PTR_TO_PAGE(gart, firstPage);
+ bindMemory(sectionKey, firstPage);
+ createRandomBuffer(patternBuf, 2 * AGP_PAGE_SIZE);
+ memcpy(pagePtr, patternBuf, 2 * AGP_PAGE_SIZE);
+ flushWriteCombining();
+ unbindMemory(sectionKey);
+
+ printf("\nRandom Pattern Test:\n");
+ printf("\tThis test allocates two pages of agp memory and fills them\n");
+ printf("with a random pattern. It then binds it into each available\n");
+ printf("page pair in the aperture and tests to make sure it matches.\n");
+ printf("Performing Random Pattern Test: ");
+ fflush(stdout);
+
+ for(i = firstPage; i < numberPages; i += 2) {
+ if(checkPageAvailable(i) == FALSE ||
+ checkPageAvailable(i + 1) == FALSE) continue;
+ if(numberPages - i < 2) continue;
+
+ bindMemory(sectionKey, i);
+ pagePtr = PTR_TO_PAGE(gart, i);
+ test = memcmp((void *)pagePtr, (void *)patternBuf, 2 * AGP_PAGE_SIZE);
+
+ if(test != 0) {
+ printf("failed!\n");
+ printf("Random pattern test has failed at page %d\n",
+ i);
+ printf("Hex dump of first %d bytes of expected data:\n",
+ HEXDUMP_BYTES);
+ printHexDump(patternBuf, HEXDUMP_BYTES);
+ printf("\nHex dump of first %d bytes of actual data:\n",
+ HEXDUMP_BYTES);
+ printHexDump(pagePtr, HEXDUMP_BYTES);
+ printf("\nIntegrity test failed\n");
+ unbindMemory(sectionKey);
+ freeMemory(sectionKey);
+ return;
+ }
+ unbindMemory(sectionKey);
+ }
+ printf("passed.\n");
+ fflush(stdout);
+
+ freeMemory(sectionKey);
+
+ printf("\nAperture Integrity Tests Complete\n");
+}
+
+#define RECYCLE_KEYS_TO_TEST 10
+/* Test key recycling mechanism to make sure its working correctly. */
+void keyRecycleTest()
+{
+ int keys[RECYCLE_KEYS_TO_TEST];
+ int newKeys[3];
+ int midpt, start, end;
+ int i;
+
+ printf("\nKey Recycle Test:\n");
+ printf("This test insures that the key recycling is functioning "
+ "properly.\n");
+ printf("This is needed to insure that an Xserver can continue to "
+ "recycle\n");
+ printf("and not leak keys, since there are only a finite amount.\n");
+ printf("\nNow peforming key recycle test: ");
+
+ for(i = 0; i < RECYCLE_KEYS_TO_TEST; i++) {
+ int key = allocMemory(1);
+ if(key < 0) {
+ printf("failed!\n");
+ printf("Failed to allocate key to test with.\n");
+ return;
+ }
+ keys[i] = key;
+ }
+ /* Hold onto the keys values */
+ midpt = keys[RECYCLE_KEYS_TO_TEST / 2];
+ start = keys[0];
+ end = keys[RECYCLE_KEYS_TO_TEST - 1];
+
+ freeMemory(midpt);
+ freeMemory(start);
+ freeMemory(end);
+
+ for(i = 0; i < 3; i++) {
+ int key = allocMemory(1);
+ if(key < 0) {
+ printf("failed!\n");
+ printf("Failed to allocate key to test with.\n");
+ return;
+ }
+ newKeys[i] = key;
+ }
+ if(start != newKeys[0] ||
+ midpt != newKeys[1] ||
+ end != newKeys[2]) {
+ printf("failed!\n");
+ } else {
+ printf("passed.\n");
+ }
+
+ keys[RECYCLE_KEYS_TO_TEST / 2] = newKeys[1];
+ keys[0] = newKeys[0];
+ keys[RECYCLE_KEYS_TO_TEST - 1] = newKeys[2];
+
+ for(i = 0; i < RECYCLE_KEYS_TO_TEST; i++) {
+ freeMemory(keys[i]);
+ }
+ printf("\n");
+}
+
+#define CLIENTS_TO_TEST 2
+#define CLIENT_SLEEP_PERIOD 3
+
+int initializeClient(agp_region *region)
+{
+#ifdef DEBUG
+ printf("Using AGPIOC_RESERVE\n");
+#endif
+ if(ioctl(gartfd, AGPIOC_RESERVE, region) != 0) {
+ perror("ioctl(AGPIOC_RESERVE)");
+ exit(1);
+ }
+
+ return 0;
+}
+
+void clientTestOne(int aper_size, int create_segments)
+{
+ unsigned long apertureSize = aper_size * 0x100000;
+ int numberPages = apertureSize / AGP_PAGE_SIZE;
+ pid_t clients[CLIENTS_TO_TEST];
+ struct passwd *userNobody;
+ agp_region region;
+ agp_segment segment;
+ int passed = 1;
+ int i;
+
+ region.seg_count = 1;
+ region.seg_list = &segment;
+ segment.pg_start = 0;
+ segment.pg_count = numberPages;
+ segment.prot = PROT_READ;
+
+ userNobody = getpwnam("nobody");
+ if(!userNobody) {
+ printf("failed!\n");
+ printf("Can not perform client test since user nobody can't be found\n");
+ return;
+ }
+ for(i = 0; i < CLIENTS_TO_TEST; i++) {
+ pid_t cPid = fork();
+
+ if(cPid == 0) {
+ /* Client path */
+ /* Just test to see if the client could map the aperture */
+ unmapAgpAperture(aper_size);
+ setuid(userNobody->pw_uid);
+ sleep(CLIENT_SLEEP_PERIOD);
+ openAgpDevice(O_RDONLY);
+ mapAgpAperture(aper_size, PROT_READ);
+
+ if(gart == (unsigned char *)0xffffffff) {
+ exit(1);
+ } else {
+ exit(0);
+ }
+ } else if(cPid == -1) {
+ /* Error path */
+ int k;
+ for(k = 0; k < i; k++) {
+ kill(clients[k], SIGKILL);
+ }
+ printf("failed!\n");
+ printf("Couldn't create enough clients\n");
+ return;
+ } else {
+ /* Normal Path */
+ clients[i] = cPid;
+ }
+ }
+ /* Let the clients do their thing */
+ sleep(1);
+ if(create_segments == 1) {
+ /* Setup the segments with the proper pids */
+ for(i = 0; i < CLIENTS_TO_TEST; i++) {
+ region.pid = clients[i];
+ initializeClient(&region);
+ }
+ }
+ for(i = 0; i < CLIENTS_TO_TEST; i++) {
+ int status;
+ waitpid(clients[i], &status, 0);
+ if(WIFEXITED(status) && WEXITSTATUS(status) != 0) {
+ /* Failure */
+ if(create_segments && passed) {
+ printf("failed!\n");
+ printf("%d failed to map the agp aperture\n", clients[i]);
+ }
+ passed = 0;
+ }
+ }
+
+ if(passed && create_segments) printf("passed.\n");
+ if(!passed && !create_segments) printf("passed.\n");
+ if(passed && !create_segments) printf("failed.\n");
+}
+
+void clientTest(int aper_size)
+{
+ printf("\nClient Permissions Test:\n");
+ printf("\tThis test tests the ability of /dev/agpgart to mmap the "
+ "aperture\n");
+ printf("into a clients process space.\n");
+ printf("\tThis test will fail if the user nobody can't read the\n");
+ printf("/dev/agpgart device. This test only tests read mappings\n");
+ printf("since most installations have set permissions as to only allow "
+ "reads.\n");
+ printf("The first test checks to see if permission setting works. "
+ " While\n");
+ printf("the second test checks to see if the mmap is blocked properly.\n");
+ printf("\nNow peforming client permissions test: ");
+ fflush(stdout);
+ clientTestOne(aper_size, 1);
+ printf("\nNow testing permission failure case: ");
+ fflush(stdout);
+ clientTestOne(aper_size, 0);
+}
+
+void testMemoryFailures(int aper_size)
+{
+ unsigned long apertureSize = aper_size * 0x100000;
+ int numberPages = apertureSize / AGP_PAGE_SIZE;
+ int allocKey1, allocKey2, allocKey3;
+ int firstPage = 0;
+ agp_bind bind;
+
+ /* This test set check for some bad things are handled properly. */
+ allocKey1 = allocMemory(1);
+ allocKey2 = allocMemory(2);
+ allocKey3 = allocMemory(3);
+
+ printf("\nMemory Ioctl Sanity Test:\n");
+ printf("\tThis set of tests checks that the proper error values are "
+ "returned\n");
+ printf("from /dev/agpgart when several incorrect requests are performed.\n");
+
+ /* Testing double freeing */
+ printf("\nNow testing double freeing a block of agp memory: ");
+ fflush(stdout);
+ freeMemory(allocKey3);
+ if(ioctl(gartfd, AGPIOC_DEALLOCATE, allocKey3) != -1 &&
+ errno != EINVAL) {
+ printf("failed!\n");
+ printf("Deallocate ioctl didn't return expected error value.\n");
+ } else {
+ printf("passed.\n");
+ }
+
+ /* Testing inserting memory past the aperture end */
+ bind.key = allocKey1;
+ bind.pg_start = numberPages;
+ printf("Now testing binding a single page past the agp aperture end: ");
+ fflush(stdout);
+ if(ioctl(gartfd, AGPIOC_BIND, &bind) != -1 &&
+ errno != EINVAL) {
+ printf("failed!\n");
+ printf("Bind ioctl didn't return expected error value.\n");
+ printf("Inserting past the end of the agp aperture didn't fail\n");
+ printf("properly.\n");
+ } else {
+ printf("passed.\n");
+ }
+ bind.key = allocKey2;
+ bind.pg_start = numberPages - 1;
+ printf("Now testing bind with a block lying inside and outside: ");
+ fflush(stdout);
+ if(ioctl(gartfd, AGPIOC_BIND, &bind) != -1 &&
+ errno != EINVAL) {
+ printf("failed!\n");
+ printf("Bind ioctl didn't return expected error value.\n");
+ printf("Inserting past the end of the agp aperture didn't fail\n");
+ printf("properly for two pages.\n");
+ } else {
+ printf("passed.\n");
+ }
+
+ /* Test several busy conditions */
+ firstPage = findFirstRegionOfSize(2);
+ if(firstPage == -1) {
+ printf("Fatal error, no regions of 2 pages inside aperture, exiting\n");
+ exit(1);
+ }
+
+ bind.key = allocKey1;
+ bind.pg_start = firstPage;
+
+ bindMemory(allocKey1, firstPage);
+ printf("Now testing double binding of the same block of memory: ");
+ fflush(stdout);
+ if(ioctl(gartfd, AGPIOC_BIND, &bind) != -1 &&
+ errno != EINVAL) {
+ printf("failed!\n");
+ printf("Bind ioctl didn't return the expected error value.\n");
+ } else {
+ printf("passed.\n");
+ }
+
+ bind.key = allocKey2;
+ bind.pg_start = firstPage;
+ printf("Now testing binding another block of memory to the same place: ");
+ fflush(stdout);
+ if(ioctl(gartfd, AGPIOC_BIND, &bind) != -1 &&
+ errno != EBUSY) {
+ printf("failed!\n");
+ printf("Bind ioctl didn't return a busy condition as expected.\n");
+ } else {
+ printf("passed\n");
+ }
+ unbindMemory(allocKey1);
+ bindMemory(allocKey1, firstPage + 1);
+ printf("Now testing binding a block of memory within anothers bounds: ");
+ fflush(stdout);
+ if(ioctl(gartfd, AGPIOC_BIND, &bind) != -1 &&
+ errno != EBUSY) {
+ printf("failed!\n");
+ printf("Bind ioctl didn't return a busy condition as expected.\n");
+ } else {
+ printf("passed\n");
+ }
+ freeMemory(allocKey1);
+ printf("Now testing if free automatically unbound a bound page: ");
+ if(ioctl(gartfd, AGPIOC_BIND, &bind) != 0) {
+ printf("failed!\n");
+ perror("Bind shouldn't have failed. Reason");
+ } else {
+ printf("passed\n");
+ }
+ freeMemory(allocKey2);
+}
+
+
+/* A set of tests of the agp 3.0 functionality if it is available
+ * from this /dev/agpgart.
+ */
+agp_driver_info *agp_alloc_driver_info(int ctx)
+{
+ agp_query_request rq;
+ agp_driver_info *drv;
+
+ rq.ctx = ctx;
+
+ if(ioctl(gartfd, AGPIOC_QUERY_SIZE, &rq) != 0) {
+ perror("query_size");
+ return NULL;
+ }
+ drv = malloc(rq.size);
+ return drv;
+}
+
+int agp_copy_driver_info(int ctx, agp_driver_info *buffer)
+{
+ agp_query_request rq;
+
+ rq.ctx = ctx;
+ rq.buffer = (void *)buffer;
+ if(ioctl(gartfd, AGPIOC_QUERY_CTX, &rq) != 0) {
+ perror("query ctx");
+ return errno;
+ }
+ return 0;
+}
+
+void agp_print_one_master(agp_master *info)
+{
+ printf("\nAgp Master Information:\n");
+ printf("Agp version %d.%d\n", info->agp_major_version,
+ info->agp_minor_version);
+ printf("Request Depth : %d\n", info->num_requests_enqueue);
+ printf("Pci Vender ID : 0x%04x\n", info->master_pci_id >> 16);
+ printf("Pci Device ID : 0x%04x\n", info->master_pci_id & 0xffff);
+ if(info->agp_major_version >= 3) {
+ printf("Calibration cycle : %d ms\n", info->calibration_cycle_ms);
+ if(info->flags & AGP_SUPPORTS_AGP_3_0_ENABLED) {
+ printf("Agp Modes Supported : %s%s\n",
+ (info->flags & AGP_SUPPORTS_SPEED_4X) ? "4X " : "",
+ (info->flags & AGP_SUPPORTS_SPEED_8X) ? "8X " : "");
+ } else {
+ printf("Agp Modes Supported : %s%s%s\n",
+ (info->flags & AGP_SUPPORTS_SPEED_1X) ? "1X " : "",
+ (info->flags & AGP_SUPPORTS_SPEED_2X) ? "2X " : "",
+ (info->flags & AGP_SUPPORTS_SPEED_4X) ? "4X " : "");
+ }
+ printf("Supports isochronous operation mode : %s\n",
+ (info->flags & AGP_SUPPORTS_ISOCHRONOUS) ? "true" : "false");
+ printf("Supports Sideband addressing : %s\n",
+ (info->flags & AGP_SUPPORTS_SBA) ? "true" : "false");
+ printf("Supports Fast write : %s\n",
+ (info->flags & AGP_SUPPORTS_FAST_WRITE) ? "true" : "false");
+ printf("Supports over 4G addressing : %s\n",
+ (info->flags & AGP_SUPPORTS_OVER4G_ADDR) ? "true" : "false");
+ } else {
+ printf("Agp Modes Supported : %s%s%s\n",
+ (info->flags & AGP_SUPPORTS_SPEED_1X) ? "1X " : "",
+ (info->flags & AGP_SUPPORTS_SPEED_2X) ? "2X " : "",
+ (info->flags & AGP_SUPPORTS_SPEED_4X) ? "4X " : "");
+ printf("Supports Sideband addressing : %s\n",
+ (info->flags & AGP_SUPPORTS_SBA) ? "true" : "false");
+ printf("Supports Fast write : %s\n",
+ (info->flags & AGP_SUPPORTS_FAST_WRITE) ? "true" : "false");
+ printf("Supports over 4G addressing : %s\n",
+ (info->flags & AGP_SUPPORTS_OVER4G_ADDR) ? "true" : "false");
+ }
+}
+
+void agp_output_driver_info(agp_driver_info *info)
+{
+ agp_master *masters = info->masters;
+ int i, num_masters = info->num_masters;
+
+ printf("Agp Driver Name : %s\n", info->driver_name);
+ printf("Agp context id : %d\n", info->context_id);
+ printf("Agp page size : %d\n", 1 << info->agp_page_shift);
+ printf("Alloc page size : %d\n", 1 << info->alloc_page_shift);
+ printf("Agp page mask : 0x%lx\n", info->agp_page_mask);
+ printf("Alloc page mask : 0x%lx\n", info->alloc_page_mask);
+ printf("Maximum system pages for Agp : %d\n", info->max_system_pages);
+ printf("Current system pages used by Agp : %d\n", info->current_memory);
+ printf("\nAgp Target Information:\n");
+ printf("Agp version %d.%d\n", info->agp_major_version,
+ info->agp_minor_version);
+ printf("Pci Vender ID : 0x%04x\n", info->target_pci_id >> 16);
+ printf("Pci Device ID : 0x%04x\n", info->target_pci_id & 0xffff);
+ printf("Agp aperture base : 0x%lx\n", info->aper_base);
+ printf("Agp aperture size (MB) : %d\n", info->aper_size);
+ printf("Request Depth : %d\n", info->num_requests_enqueue);
+ if(info->agp_major_version >= 3) {
+ printf("Optimum agp request size : %d\n", info->optimum_request_size);
+ printf("Calibration cycle : %d ms\n", info->calibration_cycle_ms);
+ if(info->target_flags & AGP_SUPPORTS_AGP_3_0_ENABLED) {
+ printf("Agp Modes Supported : %s%s\n",
+ (info->target_flags & AGP_SUPPORTS_SPEED_4X) ? "4X " : "",
+ (info->target_flags & AGP_SUPPORTS_SPEED_8X) ? "8X " : "");
+ } else {
+ printf("Agp Modes Supported : %s%s%s\n",
+ (info->target_flags & AGP_SUPPORTS_SPEED_1X) ? "1X " : "",
+ (info->target_flags & AGP_SUPPORTS_SPEED_2X) ? "2X " : "",
+ (info->target_flags & AGP_SUPPORTS_SPEED_4X) ? "4X " : "");
+ }
+ printf("Supports isochronous operation mode : %s\n",
+ (info->target_flags & AGP_SUPPORTS_ISOCHRONOUS) ?
+ "true" : "false");
+ printf("Supports cached memory accesses : %s\n",
+ (info->target_flags & AGP_SUPPORTS_CACHED_MEMORY) ?
+ "true" : "false");
+ printf("Supports Sideband addressing : %s\n",
+ (info->target_flags & AGP_SUPPORTS_SBA) ? "true" : "false");
+ printf("Supports Fast write : %s\n",
+ (info->target_flags & AGP_SUPPORTS_FAST_WRITE) ?
+ "true" : "false");
+ printf("Supports over 4G addressing : %s\n",
+ (info->target_flags & AGP_SUPPORTS_OVER4G_ADDR) ?
+ "true" : "false");
+ printf("Supports directly mapping the agp aperture : %s\n",
+ (info->target_flags & AGP_SUPPORTS_APER_MMAP) ?
+ "true" : "false");
+ } else {
+ printf("Agp Modes Supported : %s%s%s\n",
+ (info->target_flags & AGP_SUPPORTS_SPEED_1X) ? "1X " : "",
+ (info->target_flags & AGP_SUPPORTS_SPEED_2X) ? "2X " : "",
+ (info->target_flags & AGP_SUPPORTS_SPEED_4X) ? "4X " : "");
+ printf("Supports Sideband addressing : %s\n",
+ (info->target_flags & AGP_SUPPORTS_SBA) ? "true" : "false");
+ printf("Supports Fast write : %s\n",
+ (info->target_flags & AGP_SUPPORTS_FAST_WRITE) ?
+ "true" : "false");
+ printf("Supports over 4G addressing : %s\n",
+ (info->target_flags & AGP_SUPPORTS_OVER4G_ADDR) ?
+ "true" : "false");
+ printf("Supports directly mapping the agp aperture : %s\n",
+ (info->target_flags & AGP_SUPPORTS_APER_MMAP) ?
+ "true" : "false");
+ }
+ printf("Number of detected agp masters : %d\n", num_masters);
+ if(num_masters) {
+ agp_print_one_master(masters);
+ masters++;
+ }
+}
+
+/* Just tests to make sure that we can completely copy and print out
+ * an agp extended info structure properly. Only tests context zero.
+ */
+void print_agp3_info(void)
+{
+ agp_driver_info *info;
+
+ printf("\nNow testing agp 3.0 basic driver information copying ioctls: ");
+ fflush(stderr);
+
+ info = agp_alloc_driver_info(0);
+ if(!info) {
+ printf("failed!\n");
+ printf("Error allocating buffer for driver info struct.\n");
+ return;
+ }
+ if(agp_copy_driver_info(0, info) != 0) {
+ printf("failed!\n");
+ printf("Error copying information from kernel.\n");
+ return;
+ }
+ printf("passed.\n");
+ printf("Please insure that the following information matches what you "
+ "expect :\n");
+ agp_output_driver_info(info);
+}
+
+void test_context(void)
+{
+ int num_contexts;
+
+ printf("\nNow testing basic context support: ");
+ fflush(stderr);
+
+ num_contexts = ioctl(gartfd, AGPIOC_NUM_CTXS);
+ if(num_contexts < 1) {
+ printf("failed!\n");
+ printf("Expected at least one context, got : %d\n", num_contexts);
+ perror("");
+ return;
+ }
+ /* Test that we get a valid return value for changing to context zero.
+ * All others are expected to fail if we don't implement multiple agp
+ * bridges per system.
+ */
+ if(ioctl(gartfd, AGPIOC_CHG_CTX, 0) != 0) {
+ printf("failed!\n");
+ printf("Expected success for change to context zero.\n");
+ perror("");
+ return;
+ }
+ /* Test a context just past our range for the failure, since this
+ * agpgart might truely support more then one context.
+ */
+ if(ioctl(gartfd, AGPIOC_CHG_CTX, num_contexts) == 0) {
+ printf("failed!\n");
+ printf("Expected failure for a context outside the valid range.\n");
+ perror("");
+ return;
+ }
+
+ printf("passed.\n");
+ printf("This agpgart implementation reports that it supports %d contexts.\n",
+ num_contexts);
+}
+
+unsigned char *agp_map_memory(int key, off_t pg_ofs, size_t pages,
+ unsigned long prot, unsigned long flags)
+{
+ agp_map_request rq;
+ rq.key = key;
+ rq.pg_start = pg_ofs;
+ rq.page_count = pages;
+ rq.prot = prot;
+ rq.flags = flags;
+ if(ioctl(gartfd, AGPIOC_MAP, &rq) != 0) {
+ perror("map ioctl");
+ return NULL;
+ }
+ return (unsigned char *)rq.addr;
+}
+
+int agp_unmap_memory(int key, unsigned char *ptr)
+{
+ agp_map_request rq;
+ rq.key = key;
+ rq.addr = (unsigned long)ptr;
+ if(ioctl(gartfd, AGPIOC_UNMAP, &rq) != 0) {
+ perror("unmap ioctl");
+ return errno;
+ }
+ return 0;
+}
+
+void test_usermap(void)
+{
+ unsigned char *userMap;
+ int worked;
+ int firstPage, key1;
+
+ firstPage = findFirstRegionOfSize(2048);
+ if(firstPage == -1) {
+ printf("There are no 8MB regions, so we can't perform the "
+ "User map/unmap tests.\n");
+ return;
+ }
+
+ key1 = allocAndBind(firstPage, 2048);
+
+ printf("User map/unmap test:\n");
+ printf("This set of tests checks to make sure that the ioctls for"
+ "mapping\na piece of agp memory are working correctly. It "
+ "also attempts to\nmeasure the performance of these mappings.\n");
+
+ printf("\nNow testing user map of 8 MB of bound agp memory: ");
+ fflush(stderr);
+
+ userMap = agp_map_memory(key1, 0, 2048, PROT_READ | PROT_WRITE, MAP_SHARED);
+
+ if(!userMap) {
+ printf("failed!\n");
+ printf("User map testing failed\n");
+ return;
+ } else {
+ printf("success.\n");
+ }
+
+ printf("\nNow attempting to use this mapping:\n");
+
+ printf("Testing from offset into gart : 0x%08x\n",
+ (unsigned int)(firstPage * AGP_PAGE_SIZE));
+
+ printf("Testing basic memory performance:\n");
+ simpleBenchMark(userMap);
+ printf("Now testing memory visability through agp aperture:\n");
+ worked = oldIntegrity(firstPage, key1, -1, userMap,
+ PTR_TO_PAGE(gart, firstPage), FALSE);
+
+ if(!worked) {
+ printf("\nThe mappings don't match, test failed!\n");
+ return;
+ }
+
+ printf("\nNow testing user unmap of 8 MB of bound memory: ");
+ fflush(stderr);
+ worked = agp_unmap_memory(key1, userMap);
+ if(worked != 0) {
+ printf("failed!\n");
+ printf("User map testing failed.\n");
+ return;
+ } else {
+ printf("success.\n");
+ }
+
+ printf("\nNow testing user map of 8 MB of unbound memory: ");
+ fflush(stderr);
+ unbindMemory(key1);
+
+ userMap = agp_map_memory(key1, 0, 2048, PROT_READ | PROT_WRITE, MAP_SHARED);
+ if(!userMap) {
+ printf("failed!\n");
+ printf("User map testing failed.\n");
+ return;
+ } else {
+ printf("success.\n");
+ }
+
+ printf("\nNow attempting to use this mapping:\n");
+
+ printf("Testing from offset into gart : 0x%08x\n",
+ (unsigned int)(firstPage * AGP_PAGE_SIZE));
+
+ printf("Testing basic memory performance:\n");
+ simpleBenchMark(userMap);
+ printf("Now testing memory visability through agp aperture:\n");
+ worked = oldIntegrity(firstPage, key1, -1, userMap,
+ PTR_TO_PAGE(gart, firstPage), TRUE);
+
+ unbindMemory(key1);
+
+ if(!worked) {
+ printf("\nThe mappings don't match, test failed!\n");
+ return;
+ }
+
+ printf("\nNow testing user unmap of 8 MB of unbound memory: ");
+ fflush(stderr);
+ worked = agp_unmap_memory(key1, userMap);
+ if(worked != 0) {
+ printf("failed!\n");
+ printf("User map testing failed.\n");
+ return;
+ } else {
+ printf("success.\n");
+ }
+
+ printf("\nAll user map tests completed successfully.\n");
+
+ freeMemory(key1);
+}
+
+void agp3_tests(void)
+{
+ printf("\nNow performing some tests to test the "
+ "agp 3.0 infrastructure\nprovided by /dev/agpgart.\n");
+ print_agp3_info();
+ test_context();
+ test_usermap();
+}
+
+/* Some more tests that could be written */
+
+/* A client test that checks to see if clients writes to the aperture
+ * are correctly placed in the reads from the test program. Requires
+ * that /dev/agpgart be r/w by nobody.
+ * Could chmod /dev/agpgart for the test and then put it back how it was.
+ */
+
+/* A set of tests of the ioctl permissions when the /dev/agpgart isn't
+ * acquired.
+ */
+
+/* A set of tests of the ioctl permissions for a client, everything should
+ * fail.
+ */
+
+int main(void)
+{
+ int aperSize;
+
+ gart = (char *)0xffffffff;
+
+ openAgpDevice(O_RDWR);
+
+ aperSize = initializeAgp();
+
+ apertureIntegrityTests(aperSize);
+#ifdef DEBUG
+ printf("Current number of pages : %d\n", getCurrentPages());
+#endif
+ keyRecycleTest();
+#ifdef DEBUG
+ printf("Current number of pages : %d\n", getCurrentPages());
+#endif
+ clientTest(aperSize);
+
+ testMemoryFailures(aperSize);
+#ifdef DEBUG
+ printf("Current number of pages : %d\n", getCurrentPages());
+#endif
+ legacyTests();
+#ifdef DEBUG
+ printf("Using AGPIOC_RELEASE\n");
+#endif
+
+ if(agpgartSupportsGetMap == TRUE) {
+ printf("Detected AGP 3.0 capable /dev/agpgart.\n");
+ agp3_tests();
+ }
+
+ if(ioctl(gartfd, AGPIOC_RELEASE) != 0) {
+ perror("ioctl(AGPIOC_RELEASE)");
+ exit(1);
+ }
+
+ close(gartfd);
+}
diff --git a/testgart.c b/testgart.c
new file mode 100644
index 0000000..4475ab9
--- /dev/null
+++ b/testgart.c
@@ -0,0 +1,159 @@
+/*
+ *
+ * Test program for AGPGART module under Linux
+ *
+ * Copyright (C) 1999 Jeff Hartmann,
+ * Precision Insight, Inc., Xi Graphics, Inc.
+ *
+ */
+
+
+
+#define DEBUG
+
+
+/*
+ * Set the offset (in KB) past the stolen memory.
+ */
+
+#if 0
+#define OFFSET (32 * 1024 - 132)
+#else
+#define OFFSET (16 * 1024 - 132)
+#endif
+
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#include <linux/types.h>
+#include <linux/agpgart.h>
+#include <asm/mtrr.h>
+#include <errno.h>
+#include <stdlib.h>
+
+
+unsigned char *gart;
+int gartfd;
+int mtrr;
+int offset;
+
+int usec( void ) {
+ struct timeval tv;
+ struct timezone tz;
+
+ gettimeofday( &tv, &tz );
+ return (tv.tv_sec & 2047) * 1000000 + tv.tv_usec;
+}
+
+int init_agp(void)
+{
+ agp_info info;
+ agp_setup setup;
+
+#ifdef DEBUG
+ printf("Using AGPIOC_ACQUIRE\n");
+#endif
+ if(ioctl(gartfd, AGPIOC_ACQUIRE) != 0)
+ {
+ perror("ioctl(AGPIOC_ACQUIRE)");
+ exit(1);
+ }
+#ifdef DEBUG
+ printf("Using AGPIOC_INFO\n");
+#endif
+ if(ioctl(gartfd, AGPIOC_INFO, &info) != 0)
+ {
+ perror("ioctl(AGPIOC_INFO)");
+ exit(1);
+ }
+
+#ifdef DEBUG
+ printf("version: %i.%i\n", info.version.major, info.version.minor);
+ printf("bridge id: 0x%lx\n", info.bridge_id);
+ printf("agp_mode: 0x%lx\n", info.agp_mode);
+ printf("aper_base: 0x%lx\n", info.aper_base);
+ printf("aper_size: %i\n", info.aper_size);
+ printf("pg_total: %i\n", info.pg_total);
+ printf("pg_system: %i\n", info.pg_system);
+ printf("pg_used: %i\n", info.pg_used);
+#endif
+
+ gart = mmap(NULL, info.aper_size * 0x100000, PROT_READ | PROT_WRITE, MAP_SHARED, gartfd, 0);
+
+ if(gart == (unsigned char *) 0xffffffff)
+ {
+ perror("mmap");
+ close(gartfd);
+ exit(1);
+ }
+
+ gart += offset * 4096;
+
+ setup.agp_mode = info.agp_mode;
+#ifdef DEBUG
+ printf("Using AGPIOC_SETUP\n");
+#endif
+ if(ioctl(gartfd, AGPIOC_SETUP, &setup) != 0)
+ {
+ perror("ioctl(AGPIOC_SETUP)");
+ exit(1);
+ }
+
+ return(0);
+}
+
+int xchangeDummy;
+
+#ifndef __x86_64__
+void FlushWriteCombining( void ) {
+ __asm__ volatile( " push %%eax ; xchg %%eax, %0 ; pop %%eax" : : "m" (xchangeDummy));
+ __asm__ volatile( " push %%eax ; push %%ebx ; push %%ecx ; push %%edx ; movl $0,%%eax ; cpuid ; pop %%edx ; pop %%ecx ; pop %%ebx ; pop %%eax" : /* no outputs */ : /* no inputs */ );
+}
+#else
+void FlushWriteCombining( void ) {
+
+ __asm__ volatile("\t"
+ "xchg %%eax, %0\n\t"
+ :
+ : "m" (xchangeDummy)
+ : "eax");
+
+ __asm__ volatile ("\t"
+ "push %%rbx\n\t"
+ "cpuid\n\t"
+ "pop %%rbx\n\t"
+ :
+ :
+ :"ecx", "edx", "cc");
+}
+#endif
+
+int main(int argc, char *argv[])
+{
+ int i;
+ int key;
+ int key2;
+ agp_info info;
+
+ if (argc > 1)
+ offset = atoi(argv[0]);
+ else
+ offset = OFFSET;
+
+ offset /= 4;
+
+ gartfd = open("/dev/agpgart", O_RDWR);
+ if (gartfd == -1)
+ {
+ perror("open");
+ exit(1);
+ }
+
+ init_agp();
+
+ close(gartfd);
+}
+