summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDoug Rabson <dfr@freebsd.org>2000-05-22 10:28:45 +0000
committerDoug Rabson <dfr@freebsd.org>2000-05-22 10:28:45 +0000
commitba6e222e9eacf94d9a07523000837bce224a8ac9 (patch)
tree841b1fd8d5acafac9571824fab77c3139af3707c
parentb6da7b4178a3c8158fb56fa3145f8328e5e4af0d (diff)
Update so that it builds on the latest FreeBSD-current and change to use
the new module versioning system. Add initial support for AGP (disabled since I haven't committed the AGP driver to FreeBSD yet).
-rw-r--r--bsd-core/drmP.h85
-rw-r--r--bsd-core/tdfx/Makefile3
-rw-r--r--bsd/drm.h9
-rw-r--r--bsd/drm/Makefile7
-rw-r--r--bsd/drm/agpsupport.c271
-rw-r--r--bsd/drm/init.c2
-rw-r--r--bsd/drm/memory.c125
-rw-r--r--bsd/drmP.h85
-rw-r--r--bsd/gamma/Makefile3
-rw-r--r--bsd/gamma/gamma_dma.c2
-rw-r--r--bsd/gamma/gamma_drv.c2
-rw-r--r--bsd/tdfx/Makefile3
-rw-r--r--bsd/tdfx/tdfx_drv.c6
-rw-r--r--linux-core/r128_drv.c737
-rw-r--r--linux/r128_bufs.c309
-rw-r--r--linux/r128_context.c214
-rw-r--r--linux/r128_dma.c908
-rw-r--r--linux/r128_drm.h111
-rw-r--r--linux/r128_drv.c737
-rw-r--r--linux/r128_drv.h226
20 files changed, 3826 insertions, 19 deletions
diff --git a/bsd-core/drmP.h b/bsd-core/drmP.h
index 0ab33349..c5951ff7 100644
--- a/bsd-core/drmP.h
+++ b/bsd-core/drmP.h
@@ -49,6 +49,11 @@
#include <sys/sysctl.h>
#include <sys/select.h>
#include <sys/bus.h>
+#include <sys/taskqueue.h>
+
+#ifdef DRM_AGP
+#include <pci/agpvar.h>
+#endif
#include "drm.h"
@@ -73,11 +78,19 @@ test_and_set_bit(int b, volatile u_int32_t *p)
}
#define clear_bit(b, p) atomic_clear_int(p, 1<<(b))
+#define set_bit(b, p) atomic_set_int(p, 1<<(b))
+#define test_bit(b, p) (*(u_int32_t*)p & (1<<(b)))
#define spldrm() spltty()
#define memset(p, v, s) bzero(p, s)
+/*
+ * Software interrupts for DMA pipe feeding. The FreeBSD kernel apis
+ * are severely lacking here.
+ */
+#define SWI_DRI (SWI_VM+2)
+
#define DRM_DEBUG_CODE 2 /* Include debugging code (if > 1, then
also include looping detection. */
#define DRM_DMA_HISTOGRAM 1 /* Make histogram of DMA latency. */
@@ -108,6 +121,10 @@ test_and_set_bit(int b, volatile u_int32_t *p)
#define DRM_MEM_CMDS 12
#define DRM_MEM_MAPPINGS 13
#define DRM_MEM_BUFLISTS 14
+#define DRM_MEM_AGPLISTS 15
+#define DRM_MEM_TOTALAGP 16
+#define DRM_MEM_BOUNDAGP 17
+#define DRM_MEM_CTXBITMAP 18
/* Backward compatibility section */
#ifndef _PAGE_PWT
@@ -207,6 +224,7 @@ typedef struct drm_buf {
int used; /* Amount of buffer in use (for DMA) */
unsigned long offset; /* Byte offset (used internally) */
void *address; /* Address of buffer */
+ unsigned long bus_address; /* Bus address of buffer */
struct drm_buf *next; /* Kernel-only: used for free list */
__volatile__ int waiting; /* On kernel DMA queue */
__volatile__ int pending; /* On hardware DMA queue */
@@ -222,6 +240,10 @@ typedef struct drm_buf {
DRM_LIST_PRIO = 4,
DRM_LIST_RECLAIM = 5
} list; /* Which list we're on */
+
+ void *dev_private;
+ int dev_priv_size;
+
#if DRM_DMA_HISTOGRAM
struct timespec time_queued; /* Queued to kernel DMA queue */
struct timespec time_dispatched; /* Dispatched to hardware */
@@ -349,6 +371,9 @@ typedef struct drm_device_dma {
int page_count;
vm_offset_t *pagelist;
unsigned long byte_count;
+ enum {
+ _DRM_DMA_USE_AGP = 0x01
+ } flags;
/* DMA support */
drm_buf_t *this_buffer; /* Buffer being sent */
@@ -357,6 +382,30 @@ typedef struct drm_device_dma {
int waiting; /* Processes waiting on free bufs */
} drm_device_dma_t;
+#ifdef DRM_AGP
+
+typedef struct drm_agp_mem {
+ void *handle;
+ unsigned long bound; /* address */
+ int pages;
+ struct drm_agp_mem *prev;
+ struct drm_agp_mem *next;
+} drm_agp_mem_t;
+
+typedef struct drm_agp_head {
+ device_t agpdev;
+ struct agp_info info;
+ const char *chipset;
+ drm_agp_mem_t *memory;
+ unsigned long mode;
+ int enabled;
+ int acquired;
+ unsigned long base;
+ int agp_mtrr;
+} drm_agp_head_t;
+
+#endif
+
typedef struct drm_device {
const char *name; /* Simple driver name */
char *unique; /* Unique identifier: e.g., busid */
@@ -422,9 +471,7 @@ typedef struct drm_device {
int last_checked; /* Last context checked for DMA */
int last_context; /* Last current context */
int last_switch; /* Time at last context switch */
-#if 0
- struct tq_struct tq;
-#endif
+ struct task task;
struct timespec ctx_start;
struct timespec lck_start;
#if DRM_DMA_HISTOGRAM
@@ -445,6 +492,12 @@ typedef struct drm_device {
/* Sysctl support */
struct drm_sysctl_info *sysctl;
+
+#ifdef DRM_AGP
+ drm_agp_head_t *agp;
+#endif
+ unsigned long *ctx_bitmap;
+ void *dev_private;
} drm_device_t;
@@ -506,6 +559,13 @@ extern void drm_free_pages(unsigned long address, int order,
extern void *drm_ioremap(unsigned long offset, unsigned long size);
extern void drm_ioremapfree(void *pt, unsigned long size);
+#ifdef DRM_AGP
+extern void *drm_alloc_agp(int pages, u_int32_t type);
+extern int drm_free_agp(void *handle, int pages);
+extern int drm_bind_agp(void *handle, unsigned int start);
+extern int drm_unbind_agp(void *handle);
+#endif
+
/* Buffer management support (bufs.c) */
extern int drm_order(unsigned long size);
extern d_ioctl_t drm_addmap;
@@ -592,5 +652,24 @@ extern int drm_flush_unblock(drm_device_t *dev, int context,
drm_lock_flags_t flags);
extern int drm_flush_block_and_flush(drm_device_t *dev, int context,
drm_lock_flags_t flags);
+
+ /* Context Bitmap support (ctxbitmap.c) */
+extern int drm_ctxbitmap_init(drm_device_t *dev);
+extern void drm_ctxbitmap_cleanup(drm_device_t *dev);
+extern int drm_ctxbitmap_next(drm_device_t *dev);
+extern void drm_ctxbitmap_free(drm_device_t *dev, int ctx_handle);
+
+#ifdef DRM_AGP
+ /* AGP/GART support (agpsupport.c) */
+extern drm_agp_head_t *drm_agp_init(void);
+extern d_ioctl_t drm_agp_acquire;
+extern d_ioctl_t drm_agp_release;
+extern d_ioctl_t drm_agp_enable;
+extern d_ioctl_t drm_agp_info;
+extern d_ioctl_t drm_agp_alloc;
+extern d_ioctl_t drm_agp_free;
+extern d_ioctl_t drm_agp_unbind;
+extern d_ioctl_t drm_agp_bind;
+#endif
#endif
#endif
diff --git a/bsd-core/tdfx/Makefile b/bsd-core/tdfx/Makefile
index 7f212c21..e0ff8ffa 100644
--- a/bsd-core/tdfx/Makefile
+++ b/bsd-core/tdfx/Makefile
@@ -5,7 +5,6 @@ SRCS = tdfx_drv.c tdfx_context.c
SRCS += device_if.h bus_if.h pci_if.h
CFLAGS += ${DEBUG_FLAGS} -I..
KERN = /usr/src/sys
-KMODDEPS = drm
@:
ln -sf /sys @
@@ -13,4 +12,4 @@ KMODDEPS = drm
machine:
ln -sf /sys/i386/include machine
-.include <bsd.kmod.mk>
+.include "/usr/src/sys/conf/kmod.mk"
diff --git a/bsd/drm.h b/bsd/drm.h
index af3c14d0..d2a0ab43 100644
--- a/bsd/drm.h
+++ b/bsd/drm.h
@@ -109,7 +109,8 @@ typedef struct drm_control {
typedef enum drm_map_type {
_DRM_FRAME_BUFFER = 0, /* WC (no caching), no core dump */
_DRM_REGISTERS = 1, /* no caching, no core dump */
- _DRM_SHM = 2 /* shared, cached */
+ _DRM_SHM = 2, /* shared, cached */
+ _DRM_AGP = 3 /* AGP/GART */
} drm_map_type_t;
typedef enum drm_map_flags {
@@ -321,11 +322,11 @@ typedef struct drm_agp_info {
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
-#define DRM_IOCTL_AGP_ENABLE DRM_IOR( 0x32, drm_agp_mode_t)
-#define DRM_IOCTL_AGP_INFO DRM_IOW( 0x33, drm_agp_info_t)
+#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t)
+#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t)
#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t)
#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t)
-#define DRM_IOCTL_AGP_BIND DRM_IOWR(0x36, drm_agp_binding_t)
+#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t)
#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t)
/* Mga specific ioctls */
diff --git a/bsd/drm/Makefile b/bsd/drm/Makefile
index 6735d3c3..6a70a5b0 100644
--- a/bsd/drm/Makefile
+++ b/bsd/drm/Makefile
@@ -2,9 +2,10 @@
KMOD = drm
SRCS = init.c memory.c auth.c context.c drawable.c bufs.c \
- lists.c lock.c ioctl.c fops.c vm.c dma.c sysctl.c
+ lists.c lock.c ioctl.c fops.c vm.c dma.c sysctl.c \
+ agpsupport.c
SRCS += device_if.h bus_if.h pci_if.h
-CFLAGS += ${DEBUG_FLAGS} -I..
+CFLAGS += ${DEBUG_FLAGS} -I.. # -DDRM_AGP
KERN = /usr/src/sys
@:
@@ -13,4 +14,4 @@ KERN = /usr/src/sys
machine:
ln -sf /sys/i386/include machine
-.include <bsd.kmod.mk>
+.include "/usr/src/sys/conf/kmod.mk"
diff --git a/bsd/drm/agpsupport.c b/bsd/drm/agpsupport.c
new file mode 100644
index 00000000..53444c90
--- /dev/null
+++ b/bsd/drm/agpsupport.c
@@ -0,0 +1,271 @@
+/* agpsupport.c -- DRM support for AGP/GART backend
+ * Created: Mon Dec 13 09:56:45 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Rickard E. (Rik) Faith <faith@precisioninsight.com>
+ *
+ * $XFree86$
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+#ifdef DRM_AGP
+
+#include <pci/agpvar.h>
+
+MODULE_DEPEND(drm, agp, 1, 1, 1);
+
+int
+drm_agp_info(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ struct agp_info *kern;
+ drm_agp_info_t info;
+
+ if (!dev->agp->acquired) return EINVAL;
+
+ kern = &dev->agp->info;
+ agp_get_info(dev->agp->agpdev, kern);
+ info.agp_version_major = 1;
+ info.agp_version_minor = 0;
+ info.mode = kern->ai_mode;
+ info.aperture_base = kern->ai_aperture_base;
+ info.aperture_size = kern->ai_aperture_size;
+ info.memory_allowed = kern->ai_memory_allowed;
+ info.memory_used = kern->ai_memory_used;
+ info.id_vendor = kern->ai_devid & 0xffff;
+ info.id_device = kern->ai_devid >> 16;
+
+ *(drm_agp_info_t *) data = info;
+ return 0;
+}
+
+int
+drm_agp_acquire(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ int retcode;
+
+ if (dev->agp->acquired) return EINVAL;
+ retcode = agp_acquire(dev->agp->agpdev);
+ if (retcode) return retcode;
+ dev->agp->acquired = 1;
+ return 0;
+}
+
+int
+drm_agp_release(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+
+ if (!dev->agp->acquired) return EINVAL;
+ agp_release(dev->agp->agpdev);
+ dev->agp->acquired = 0;
+ return 0;
+
+}
+
+int
+drm_agp_enable(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_agp_mode_t mode;
+
+ if (!dev->agp->acquired) return EINVAL;
+
+ mode = *(drm_agp_mode_t *) data;
+
+ dev->agp->mode = mode.mode;
+ agp_enable(dev->agp->agpdev, mode.mode);
+ dev->agp->base = dev->agp->info.ai_aperture_base;
+ dev->agp->enabled = 1;
+ return 0;
+}
+
+int drm_agp_alloc(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_agp_buffer_t request;
+ drm_agp_mem_t *entry;
+ void *handle;
+ unsigned long pages;
+ u_int32_t type;
+ struct agp_memory_info info;
+
+ if (!dev->agp->acquired) return EINVAL;
+
+ request = *(drm_agp_buffer_t *) data;
+
+ if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS)))
+ return ENOMEM;
+
+ memset(entry, 0, sizeof(*entry));
+
+ pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE;
+ type = (u_int32_t) request.type;
+
+ if (!(handle = drm_alloc_agp(pages, type))) {
+ drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+ return ENOMEM;
+ }
+
+ entry->handle = handle;
+ entry->bound = 0;
+ entry->pages = pages;
+ entry->prev = NULL;
+ entry->next = dev->agp->memory;
+ if (dev->agp->memory) dev->agp->memory->prev = entry;
+ dev->agp->memory = entry;
+
+ agp_memory_info(dev->agp->agpdev, entry->handle, &info);
+
+ request.handle = (unsigned long) entry->handle;
+ request.physical = info.ami_physical;
+
+ *(drm_agp_buffer_t *) data = request;
+
+ return 0;
+}
+
+static drm_agp_mem_t *
+drm_agp_lookup_entry(drm_device_t *dev, void *handle)
+{
+ drm_agp_mem_t *entry;
+
+ for (entry = dev->agp->memory; entry; entry = entry->next) {
+ if (entry->handle == handle) return entry;
+ }
+ return NULL;
+}
+
+int
+drm_agp_unbind(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_agp_binding_t request;
+ drm_agp_mem_t *entry;
+
+ if (!dev->agp->acquired) return EINVAL;
+ request = *(drm_agp_binding_t *) data;
+ if (!(entry = drm_agp_lookup_entry(dev, (void *) request.handle)))
+ return EINVAL;
+ if (!entry->bound) return EINVAL;
+ return drm_unbind_agp(entry->handle);
+}
+
+int drm_agp_bind(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_agp_binding_t request;
+ drm_agp_mem_t *entry;
+ int retcode;
+ int page;
+
+ if (!dev->agp->acquired) return EINVAL;
+ request = *(drm_agp_binding_t *) data;
+ if (!(entry = drm_agp_lookup_entry(dev, (void *) request.handle)))
+ return EINVAL;
+ if (entry->bound) return EINVAL;
+ page = (request.offset + PAGE_SIZE - 1) / PAGE_SIZE;
+ if ((retcode = drm_bind_agp(entry->handle, page))) return retcode;
+ entry->bound = dev->agp->base + (page << PAGE_SHIFT);
+ return 0;
+}
+
+int drm_agp_free(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_agp_buffer_t request;
+ drm_agp_mem_t *entry;
+
+ if (!dev->agp->acquired) return EINVAL;
+ request = *(drm_agp_buffer_t *) data;
+ if (!(entry = drm_agp_lookup_entry(dev, (void*) request.handle)))
+ return EINVAL;
+ if (entry->bound) drm_unbind_agp(entry->handle);
+
+ if (entry->prev) entry->prev->next = entry->next;
+ else dev->agp->memory = entry->next;
+ if (entry->next) entry->next->prev = entry->prev;
+ drm_free_agp(entry->handle, entry->pages);
+ drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+ return 0;
+}
+
+drm_agp_head_t *drm_agp_init(void)
+{
+ device_t agpdev;
+ drm_agp_head_t *head = NULL;
+ int agp_available = 1;
+
+ agpdev = agp_find_device();
+ if (!agpdev)
+ agp_available = 0;
+
+ DRM_DEBUG("agp_available = %d\n", agp_available);
+
+ if (agp_available) {
+ if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS)))
+ return NULL;
+ head->agpdev = agpdev;
+ memset((void *)head, 0, sizeof(*head));
+ agp_get_info(agpdev, &head->info);
+ head->memory = NULL;
+#if 0 /* bogus */
+ switch (head->agp_info.chipset) {
+ case INTEL_GENERIC: head->chipset = "Intel"; break;
+ case INTEL_LX: head->chipset = "Intel 440LX"; break;
+ case INTEL_BX: head->chipset = "Intel 440BX"; break;
+ case INTEL_GX: head->chipset = "Intel 440GX"; break;
+ case INTEL_I810: head->chipset = "Intel i810"; break;
+ case VIA_GENERIC: head->chipset = "VIA"; break;
+ case VIA_VP3: head->chipset = "VIA VP3"; break;
+ case VIA_MVP3: head->chipset = "VIA MVP3"; break;
+ case VIA_APOLLO_PRO: head->chipset = "VIA Apollo Pro"; break;
+ case SIS_GENERIC: head->chipset = "SiS"; break;
+ case AMD_GENERIC: head->chipset = "AMD"; break;
+ case AMD_IRONGATE: head->chipset = "AMD Irongate"; break;
+ case ALI_GENERIC: head->chipset = "ALi"; break;
+ case ALI_M1541: head->chipset = "ALi M1541"; break;
+ default:
+ }
+#endif
+ DRM_INFO("AGP at 0x%08x %dMB\n",
+ head->info.ai_aperture_base,
+ head->info.ai_aperture_size >> 20);
+ }
+ return head;
+}
+
+#endif /* DRM_AGP */
diff --git a/bsd/drm/init.c b/bsd/drm/init.c
index 68b145ff..44e9be99 100644
--- a/bsd/drm/init.c
+++ b/bsd/drm/init.c
@@ -32,6 +32,8 @@
#define __NO_VERSION__
#include "drmP.h"
+MODULE_VERSION(drm, 1);
+
int drm_flags = 0;
/* drm_parse_option parses a single option. See description for
diff --git a/bsd/drm/memory.c b/bsd/drm/memory.c
index 0285c6d6..a9171d91 100644
--- a/bsd/drm/memory.c
+++ b/bsd/drm/memory.c
@@ -321,3 +321,128 @@ void drm_ioremapfree(void *pt, unsigned long size)
free_count, alloc_count);
}
}
+
+#ifdef DRM_AGP
+void *drm_alloc_agp(int pages, u_int32_t type)
+{
+ device_t dev = agp_find_device();
+ void *handle;
+
+ if (!dev)
+ return NULL;
+
+ if (!pages) {
+ DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n");
+ return NULL;
+ }
+
+ if ((handle = agp_alloc_memory(dev, type, pages << AGP_PAGE_SHIFT))) {
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
+ drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated
+ += pages << PAGE_SHIFT;
+ spin_unlock(&drm_mem_lock);
+ return handle;
+ }
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count;
+ spin_unlock(&drm_mem_lock);
+ return NULL;
+}
+
+int drm_free_agp(agp_memory *handle, int pages)
+{
+ device_t dev = agp_find_device();
+ int alloc_count;
+ int free_count;
+ int retval = EINVAL;
+
+ if (!dev)
+ return EINVAL;
+
+ if (!handle) {
+ DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
+ "Attempt to free NULL AGP handle\n");
+ return retval;
+ }
+
+ agp_free_memory(dev, handle);
+ spin_lock(&drm_mem_lock);
+ free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count;
+ alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
+ drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed
+ += pages << PAGE_SHIFT;
+ spin_unlock(&drm_mem_lock);
+ if (free_count > alloc_count) {
+ DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
+ "Excess frees: %d frees, %d allocs\n",
+ free_count, alloc_count);
+ }
+ return 0;
+}
+
+int drm_bind_agp(agp_memory *handle, unsigned int start)
+{
+ device_t dev = agp_find_device();
+ int retcode = EINVAL;
+
+ DRM_DEBUG("drm_bind_agp called\n");
+
+ if (!dev)
+ return EINVAL;
+
+ if (!handle) {
+ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
+ "Attempt to bind NULL AGP handle\n");
+ return retcode;
+ }
+
+ if (!(retcode = agp_bind_memory(handle, start << AGP_PAGE_SHIFT))) {
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
+ drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated
+ += handle->page_count << PAGE_SHIFT;
+ spin_unlock(&drm_mem_lock);
+ DRM_DEBUG("drm_agp.bind_memory: retcode %d\n", retcode);
+ return retcode;
+ }
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count;
+ spin_unlock(&drm_mem_lock);
+ return retcode;
+}
+
+int drm_unbind_agp(agp_memory *handle)
+{
+ device_t dev = agp_find_device();
+ int alloc_count;
+ int free_count;
+ int retcode = EINVAL;
+ struct agp_memory_info info;
+
+ if (!dev)
+ return EINVAL;
+
+ if (!handle) {
+ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
+ "Attempt to unbind NULL AGP handle\n");
+ return retcode;
+ }
+
+
+ agp_memory_info(dev, handle, &info);
+ if ((retcode = agp_unbind_memory(dev, handle)))
+ return retcode;
+ spin_lock(&drm_mem_lock);
+ free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count;
+ alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
+ drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed += info.ami_size;
+ spin_unlock(&drm_mem_lock);
+ if (free_count > alloc_count) {
+ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
+ "Excess frees: %d frees, %d allocs\n",
+ free_count, alloc_count);
+ }
+ return retcode;
+}
+#endif
diff --git a/bsd/drmP.h b/bsd/drmP.h
index 0ab33349..c5951ff7 100644
--- a/bsd/drmP.h
+++ b/bsd/drmP.h
@@ -49,6 +49,11 @@
#include <sys/sysctl.h>
#include <sys/select.h>
#include <sys/bus.h>
+#include <sys/taskqueue.h>
+
+#ifdef DRM_AGP
+#include <pci/agpvar.h>
+#endif
#include "drm.h"
@@ -73,11 +78,19 @@ test_and_set_bit(int b, volatile u_int32_t *p)
}
#define clear_bit(b, p) atomic_clear_int(p, 1<<(b))
+#define set_bit(b, p) atomic_set_int(p, 1<<(b))
+#define test_bit(b, p) (*(u_int32_t*)p & (1<<(b)))
#define spldrm() spltty()
#define memset(p, v, s) bzero(p, s)
+/*
+ * Software interrupts for DMA pipe feeding. The FreeBSD kernel apis
+ * are severely lacking here.
+ */
+#define SWI_DRI (SWI_VM+2)
+
#define DRM_DEBUG_CODE 2 /* Include debugging code (if > 1, then
also include looping detection. */
#define DRM_DMA_HISTOGRAM 1 /* Make histogram of DMA latency. */
@@ -108,6 +121,10 @@ test_and_set_bit(int b, volatile u_int32_t *p)
#define DRM_MEM_CMDS 12
#define DRM_MEM_MAPPINGS 13
#define DRM_MEM_BUFLISTS 14
+#define DRM_MEM_AGPLISTS 15
+#define DRM_MEM_TOTALAGP 16
+#define DRM_MEM_BOUNDAGP 17
+#define DRM_MEM_CTXBITMAP 18
/* Backward compatibility section */
#ifndef _PAGE_PWT
@@ -207,6 +224,7 @@ typedef struct drm_buf {
int used; /* Amount of buffer in use (for DMA) */
unsigned long offset; /* Byte offset (used internally) */
void *address; /* Address of buffer */
+ unsigned long bus_address; /* Bus address of buffer */
struct drm_buf *next; /* Kernel-only: used for free list */
__volatile__ int waiting; /* On kernel DMA queue */
__volatile__ int pending; /* On hardware DMA queue */
@@ -222,6 +240,10 @@ typedef struct drm_buf {
DRM_LIST_PRIO = 4,
DRM_LIST_RECLAIM = 5
} list; /* Which list we're on */
+
+ void *dev_private;
+ int dev_priv_size;
+
#if DRM_DMA_HISTOGRAM
struct timespec time_queued; /* Queued to kernel DMA queue */
struct timespec time_dispatched; /* Dispatched to hardware */
@@ -349,6 +371,9 @@ typedef struct drm_device_dma {
int page_count;
vm_offset_t *pagelist;
unsigned long byte_count;
+ enum {
+ _DRM_DMA_USE_AGP = 0x01
+ } flags;
/* DMA support */
drm_buf_t *this_buffer; /* Buffer being sent */
@@ -357,6 +382,30 @@ typedef struct drm_device_dma {
int waiting; /* Processes waiting on free bufs */
} drm_device_dma_t;
+#ifdef DRM_AGP
+
+typedef struct drm_agp_mem {
+ void *handle;
+ unsigned long bound; /* address */
+ int pages;
+ struct drm_agp_mem *prev;
+ struct drm_agp_mem *next;
+} drm_agp_mem_t;
+
+typedef struct drm_agp_head {
+ device_t agpdev;
+ struct agp_info info;
+ const char *chipset;
+ drm_agp_mem_t *memory;
+ unsigned long mode;
+ int enabled;
+ int acquired;
+ unsigned long base;
+ int agp_mtrr;
+} drm_agp_head_t;
+
+#endif
+
typedef struct drm_device {
const char *name; /* Simple driver name */
char *unique; /* Unique identifier: e.g., busid */
@@ -422,9 +471,7 @@ typedef struct drm_device {
int last_checked; /* Last context checked for DMA */
int last_context; /* Last current context */
int last_switch; /* Time at last context switch */
-#if 0
- struct tq_struct tq;
-#endif
+ struct task task;
struct timespec ctx_start;
struct timespec lck_start;
#if DRM_DMA_HISTOGRAM
@@ -445,6 +492,12 @@ typedef struct drm_device {
/* Sysctl support */
struct drm_sysctl_info *sysctl;
+
+#ifdef DRM_AGP
+ drm_agp_head_t *agp;
+#endif
+ unsigned long *ctx_bitmap;
+ void *dev_private;
} drm_device_t;
@@ -506,6 +559,13 @@ extern void drm_free_pages(unsigned long address, int order,
extern void *drm_ioremap(unsigned long offset, unsigned long size);
extern void drm_ioremapfree(void *pt, unsigned long size);
+#ifdef DRM_AGP
+extern void *drm_alloc_agp(int pages, u_int32_t type);
+extern int drm_free_agp(void *handle, int pages);
+extern int drm_bind_agp(void *handle, unsigned int start);
+extern int drm_unbind_agp(void *handle);
+#endif
+
/* Buffer management support (bufs.c) */
extern int drm_order(unsigned long size);
extern d_ioctl_t drm_addmap;
@@ -592,5 +652,24 @@ extern int drm_flush_unblock(drm_device_t *dev, int context,
drm_lock_flags_t flags);
extern int drm_flush_block_and_flush(drm_device_t *dev, int context,
drm_lock_flags_t flags);
+
+ /* Context Bitmap support (ctxbitmap.c) */
+extern int drm_ctxbitmap_init(drm_device_t *dev);
+extern void drm_ctxbitmap_cleanup(drm_device_t *dev);
+extern int drm_ctxbitmap_next(drm_device_t *dev);
+extern void drm_ctxbitmap_free(drm_device_t *dev, int ctx_handle);
+
+#ifdef DRM_AGP
+ /* AGP/GART support (agpsupport.c) */
+extern drm_agp_head_t *drm_agp_init(void);
+extern d_ioctl_t drm_agp_acquire;
+extern d_ioctl_t drm_agp_release;
+extern d_ioctl_t drm_agp_enable;
+extern d_ioctl_t drm_agp_info;
+extern d_ioctl_t drm_agp_alloc;
+extern d_ioctl_t drm_agp_free;
+extern d_ioctl_t drm_agp_unbind;
+extern d_ioctl_t drm_agp_bind;
+#endif
#endif
#endif
diff --git a/bsd/gamma/Makefile b/bsd/gamma/Makefile
index 777f3325..dd611038 100644
--- a/bsd/gamma/Makefile
+++ b/bsd/gamma/Makefile
@@ -5,7 +5,6 @@ SRCS = gamma_drv.c gamma_dma.c
SRCS += device_if.h bus_if.h pci_if.h
CFLAGS += ${DEBUG_FLAGS} -I..
KERN = /usr/src/sys
-KMODDEPS = drm
@:
ln -sf /sys @
@@ -13,4 +12,4 @@ KMODDEPS = drm
machine:
ln -sf /sys/i386/include machine
-.include <bsd.kmod.mk>
+.include "/usr/src/sys/conf/kmod.mk"
diff --git a/bsd/gamma/gamma_dma.c b/bsd/gamma/gamma_dma.c
index 8ac27aa0..177440db 100644
--- a/bsd/gamma/gamma_dma.c
+++ b/bsd/gamma/gamma_dma.c
@@ -614,10 +614,12 @@ int gamma_irq_install(drm_device_t *dev, int irq)
if (!irq) return EINVAL;
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
if (dev->irq) {
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
return EBUSY;
}
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
DRM_DEBUG("%d\n", irq);
diff --git a/bsd/gamma/gamma_drv.c b/bsd/gamma/gamma_drv.c
index d278031a..e155532e 100644
--- a/bsd/gamma/gamma_drv.c
+++ b/bsd/gamma/gamma_drv.c
@@ -34,6 +34,8 @@
#include <pci/pcivar.h>
+MODULE_DEPEND(gamma, drm, 1, 1, 1);
+
static int gamma_init(device_t nbdev);
static void gamma_cleanup(device_t nbdev);
diff --git a/bsd/tdfx/Makefile b/bsd/tdfx/Makefile
index 7f212c21..e0ff8ffa 100644
--- a/bsd/tdfx/Makefile
+++ b/bsd/tdfx/Makefile
@@ -5,7 +5,6 @@ SRCS = tdfx_drv.c tdfx_context.c
SRCS += device_if.h bus_if.h pci_if.h
CFLAGS += ${DEBUG_FLAGS} -I..
KERN = /usr/src/sys
-KMODDEPS = drm
@:
ln -sf /sys @
@@ -13,4 +12,4 @@ KMODDEPS = drm
machine:
ln -sf /sys/i386/include machine
-.include <bsd.kmod.mk>
+.include "/usr/src/sys/conf/kmod.mk"
diff --git a/bsd/tdfx/tdfx_drv.c b/bsd/tdfx/tdfx_drv.c
index e043cdc4..991e92ac 100644
--- a/bsd/tdfx/tdfx_drv.c
+++ b/bsd/tdfx/tdfx_drv.c
@@ -34,6 +34,8 @@
#include <pci/pcivar.h>
+MODULE_DEPEND(tdfx, drm, 1, 1, 1);
+
#define TDFX_NAME "tdfx"
#define TDFX_DESC "tdfx"
#define TDFX_DATE "19991009"
@@ -289,6 +291,8 @@ tdfx_takedown(drm_device_t *dev)
- PAGE_SHIFT,
DRM_MEM_SAREA);
break;
+ case _DRM_AGP:
+ break; /* XXX */
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
@@ -585,6 +589,8 @@ tdfx_lock(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
PZERO|PCATCH,
"drmlk2",
0);
+ if (ret)
+ break;
}
}
diff --git a/linux-core/r128_drv.c b/linux-core/r128_drv.c
new file mode 100644
index 00000000..45ade1de
--- /dev/null
+++ b/linux-core/r128_drv.c
@@ -0,0 +1,737 @@
+/* r128_drv.c -- ATI Rage 128 driver -*- linux-c -*-
+ * Created: Mon Dec 13 09:47:27 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
+ * Kevin E. Martin <kevin@precisioninsight.com>
+ *
+ * $XFree86$
+ *
+ */
+
+#define EXPORT_SYMTAB
+#include "drmP.h"
+#include "r128_drv.h"
+EXPORT_SYMBOL(r128_init);
+EXPORT_SYMBOL(r128_cleanup);
+
+#define R128_NAME "r128"
+#define R128_DESC "r128"
+#define R128_DATE "20000422"
+#define R128_MAJOR 0
+#define R128_MINOR 0
+#define R128_PATCHLEVEL 5
+
+static drm_device_t r128_device;
+drm_ctx_t r128_res_ctx;
+
+static struct file_operations r128_fops = {
+ open: r128_open,
+ flush: drm_flush,
+ release: r128_release,
+ ioctl: r128_ioctl,
+ mmap: drm_mmap,
+ read: drm_read,
+ fasync: drm_fasync,
+ poll: drm_poll,
+};
+
+static struct miscdevice r128_misc = {
+ minor: MISC_DYNAMIC_MINOR,
+ name: R128_NAME,
+ fops: &r128_fops,
+};
+
+static drm_ioctl_desc_t r128_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { r128_version, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { r128_addbufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { r128_mapbufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { r128_addctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { r128_rmctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { r128_modctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { r128_getctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { r128_switchctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { r128_newctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { r128_resctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { r128_lock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { r128_unlock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
+
+#ifdef DRM_AGP
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
+#endif
+
+ [DRM_IOCTL_NR(DRM_IOCTL_R128_INIT)] = { r128_init_cce, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_R128_RESET)] = { r128_eng_reset, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_R128_FLUSH)] = { r128_eng_flush, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_R128_PACKET)] = { r128_submit_pkt, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_R128_CCEIDL)] = { r128_cce_idle, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_R128_VERTEX)] = { r128_vertex_buf, 1, 0 },
+};
+#define R128_IOCTL_COUNT DRM_ARRAY_SIZE(r128_ioctls)
+
+#ifdef MODULE
+int init_module(void);
+void cleanup_module(void);
+static char *r128 = NULL;
+
+MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas.");
+MODULE_DESCRIPTION("r128");
+MODULE_PARM(r128, "s");
+
+/* init_module is called when insmod is used to load the module */
+
+int init_module(void)
+{
+ return r128_init();
+}
+
+/* cleanup_module is called when rmmod is used to unload the module */
+
+void cleanup_module(void)
+{
+ r128_cleanup();
+}
+#endif
+
+#ifndef MODULE
+/* r128_setup is called by the kernel to parse command-line options passed
+ * via the boot-loader (e.g., LILO). It calls the insmod option routine,
+ * drm_parse_drm.
+ *
+ * This is not currently supported, since it requires changes to
+ * linux/init/main.c. */
+
+
+void __init r128_setup(char *str, int *ints)
+{
+ if (ints[0] != 0) {
+ DRM_ERROR("Illegal command line format, ignored\n");
+ return;
+ }
+ drm_parse_options(str);
+}
+#endif
+
+static int r128_setup(drm_device_t *dev)
+{
+ int i;
+
+ atomic_set(&dev->ioctl_count, 0);
+ atomic_set(&dev->vma_count, 0);
+ dev->buf_use = 0;
+ atomic_set(&dev->buf_alloc, 0);
+
+ drm_dma_setup(dev);
+
+ atomic_set(&dev->total_open, 0);
+ atomic_set(&dev->total_close, 0);
+ atomic_set(&dev->total_ioctl, 0);
+ atomic_set(&dev->total_irq, 0);
+ atomic_set(&dev->total_ctx, 0);
+ atomic_set(&dev->total_locks, 0);
+ atomic_set(&dev->total_unlocks, 0);
+ atomic_set(&dev->total_contends, 0);
+ atomic_set(&dev->total_sleeps, 0);
+
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ dev->magiclist[i].head = NULL;
+ dev->magiclist[i].tail = NULL;
+ }
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ dev->vmalist = NULL;
+ dev->lock.hw_lock = NULL;
+ init_waitqueue_head(&dev->lock.lock_queue);
+ dev->queue_count = 0;
+ dev->queue_reserved = 0;
+ dev->queue_slots = 0;
+ dev->queuelist = NULL;
+ dev->irq = 0;
+ dev->context_flag = 0;
+ dev->interrupt_flag = 0;
+ dev->dma_flag = 0;
+ dev->last_context = 0;
+ dev->last_switch = 0;
+ dev->last_checked = 0;
+ init_timer(&dev->timer);
+ init_waitqueue_head(&dev->context_wait);
+
+ dev->ctx_start = 0;
+ dev->lck_start = 0;
+
+ dev->buf_rp = dev->buf;
+ dev->buf_wp = dev->buf;
+ dev->buf_end = dev->buf + DRM_BSZ;
+ dev->buf_async = NULL;
+ init_waitqueue_head(&dev->buf_readers);
+ init_waitqueue_head(&dev->buf_writers);
+
+ r128_res_ctx.handle=-1;
+
+ DRM_DEBUG("\n");
+
+ /* The kernel's context could be created here, but is now created
+ in drm_dma_enqueue. This is more resource-efficient for
+ hardware that does not do DMA, but may mean that
+ drm_select_queue fails between the time the interrupt is
+ initialized and the time the queues are initialized. */
+
+ return 0;
+}
+
+
+static int r128_takedown(drm_device_t *dev)
+{
+ int i;
+ drm_magic_entry_t *pt, *next;
+ drm_map_t *map;
+ drm_vma_entry_t *vma, *vma_next;
+
+ DRM_DEBUG("\n");
+
+ down(&dev->struct_sem);
+ del_timer(&dev->timer);
+
+ if (dev->devname) {
+ drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
+ dev->devname = NULL;
+ }
+
+ if (dev->unique) {
+ drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
+ dev->unique = NULL;
+ dev->unique_len = 0;
+ }
+ /* Clear pid list */
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ for (pt = dev->magiclist[i].head; pt; pt = next) {
+ next = pt->next;
+ drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
+ }
+ dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
+ }
+
+#ifdef DRM_AGP
+ /* Clear AGP information */
+ if (dev->agp) {
+ drm_agp_mem_t *entry;
+ drm_agp_mem_t *nexte;
+
+ /* Remove AGP resources, but leave dev->agp
+ intact until r128_cleanup is called. */
+ for (entry = dev->agp->memory; entry; entry = nexte) {
+ nexte = entry->next;
+ if (entry->bound) drm_unbind_agp(entry->memory);
+ drm_free_agp(entry->memory, entry->pages);
+ drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+ }
+ dev->agp->memory = NULL;
+
+ if (dev->agp->acquired && drm_agp.release)
+ (*drm_agp.release)();
+
+ dev->agp->acquired = 0;
+ dev->agp->enabled = 0;
+ }
+#endif
+
+ /* Clear vma list (only built for debugging) */
+ if (dev->vmalist) {
+ for (vma = dev->vmalist; vma; vma = vma_next) {
+ vma_next = vma->next;
+ drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
+ }
+ dev->vmalist = NULL;
+ }
+
+ /* Clear map area and mtrr information */
+ if (dev->maplist) {
+ for (i = 0; i < dev->map_count; i++) {
+ map = dev->maplist[i];
+ switch (map->type) {
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+#ifdef CONFIG_MTRR
+ if (map->mtrr >= 0) {
+ int retcode;
+ retcode = mtrr_del(map->mtrr,
+ map->offset,
+ map->size);
+ DRM_DEBUG("mtrr_del = %d\n", retcode);
+ }
+#endif
+ drm_ioremapfree(map->handle, map->size);
+ break;
+ case _DRM_SHM:
+ drm_free_pages((unsigned long)map->handle,
+ drm_order(map->size)
+ - PAGE_SHIFT,
+ DRM_MEM_SAREA);
+ break;
+ case _DRM_AGP:
+ /* Do nothing here, because this is all
+ handled in the AGP/GART driver. */
+ break;
+ }
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ }
+ drm_free(dev->maplist,
+ dev->map_count * sizeof(*dev->maplist),
+ DRM_MEM_MAPS);
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ }
+
+ drm_dma_takedown(dev);
+
+ dev->queue_count = 0;
+ if (dev->lock.hw_lock) {
+ dev->lock.hw_lock = NULL; /* SHM removed */
+ dev->lock.pid = 0;
+ wake_up_interruptible(&dev->lock.lock_queue);
+ }
+ up(&dev->struct_sem);
+
+ return 0;
+}
+
+/* r128_init is called via init_module at module load time, or via
+ * linux/init/main.c (this is not currently supported). */
+
+int r128_init(void)
+{
+ int retcode;
+ drm_device_t *dev = &r128_device;
+
+ DRM_DEBUG("\n");
+
+ memset((void *)dev, 0, sizeof(*dev));
+ dev->count_lock = SPIN_LOCK_UNLOCKED;
+ sema_init(&dev->struct_sem, 1);
+
+#ifdef MODULE
+ drm_parse_options(r128);
+#endif
+
+ if ((retcode = misc_register(&r128_misc))) {
+ DRM_ERROR("Cannot register \"%s\"\n", R128_NAME);
+ return retcode;
+ }
+ dev->device = MKDEV(MISC_MAJOR, r128_misc.minor);
+ dev->name = R128_NAME;
+
+ drm_mem_init();
+ drm_proc_init(dev);
+
+#ifdef DRM_AGP
+ dev->agp = drm_agp_init();
+
+#ifdef CONFIG_MTRR
+ dev->agp->agp_mtrr = mtrr_add(dev->agp->agp_info.aper_base,
+ dev->agp->agp_info.aper_size*1024*1024,
+ MTRR_TYPE_WRCOMB,
+ 1);
+#endif
+#endif
+
+ if((retcode = drm_ctxbitmap_init(dev))) {
+ DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+ drm_proc_cleanup();
+ misc_deregister(&r128_misc);
+ r128_takedown(dev);
+ return retcode;
+ }
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ R128_NAME,
+ R128_MAJOR,
+ R128_MINOR,
+ R128_PATCHLEVEL,
+ R128_DATE,
+ r128_misc.minor);
+
+ return 0;
+}
+
+/* r128_cleanup is called via cleanup_module at module unload time. */
+
+void r128_cleanup(void)
+{
+ drm_device_t *dev = &r128_device;
+
+ DRM_DEBUG("\n");
+
+ drm_proc_cleanup();
+ if (misc_deregister(&r128_misc)) {
+ DRM_ERROR("Cannot unload module\n");
+ } else {
+ DRM_INFO("Module unloaded\n");
+ }
+ drm_ctxbitmap_cleanup(dev);
+ r128_takedown(dev);
+#ifdef DRM_AGP
+ if (dev->agp) {
+ /* FIXME -- free other information, too */
+ drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
+ dev->agp = NULL;
+ }
+#endif
+}
+
+int r128_version(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_version_t version;
+ int len;
+
+ copy_from_user_ret(&version,
+ (drm_version_t *)arg,
+ sizeof(version),
+ -EFAULT);
+
+#define DRM_COPY(name,value) \
+ len = strlen(value); \
+ if (len > name##_len) len = name##_len; \
+ name##_len = strlen(value); \
+ if (len && name) { \
+ copy_to_user_ret(name, value, len, -EFAULT); \
+ }
+
+ version.version_major = R128_MAJOR;
+ version.version_minor = R128_MINOR;
+ version.version_patchlevel = R128_PATCHLEVEL;
+
+ DRM_COPY(version.name, R128_NAME);
+ DRM_COPY(version.date, R128_DATE);
+ DRM_COPY(version.desc, R128_DESC);
+
+ copy_to_user_ret((drm_version_t *)arg,
+ &version,
+ sizeof(version),
+ -EFAULT);
+ return 0;
+}
+
+int r128_open(struct inode *inode, struct file *filp)
+{
+ drm_device_t *dev = &r128_device;
+ int retcode = 0;
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+ if (!(retcode = drm_open_helper(inode, filp, dev))) {
+ MOD_INC_USE_COUNT;
+ atomic_inc(&dev->total_open);
+ spin_lock(&dev->count_lock);
+ if (!dev->open_count++) {
+ spin_unlock(&dev->count_lock);
+ return r128_setup(dev);
+ }
+ spin_unlock(&dev->count_lock);
+ }
+ return retcode;
+}
+
+int r128_release(struct inode *inode, struct file *filp)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ int retcode = 0;
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+ if (!(retcode = drm_release(inode, filp))) {
+ MOD_DEC_USE_COUNT;
+ atomic_inc(&dev->total_close);
+ spin_lock(&dev->count_lock);
+ if (!--dev->open_count) {
+ if (atomic_read(&dev->ioctl_count) || dev->blocked) {
+ DRM_ERROR("Device busy: %d %d\n",
+ atomic_read(&dev->ioctl_count),
+ dev->blocked);
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ spin_unlock(&dev->count_lock);
+ return r128_takedown(dev);
+ }
+ spin_unlock(&dev->count_lock);
+ }
+ return retcode;
+}
+
+/* r128_ioctl is called whenever a process performs an ioctl on /dev/drm. */
+
+int r128_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int nr = DRM_IOCTL_NR(cmd);
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ int retcode = 0;
+ drm_ioctl_desc_t *ioctl;
+ drm_ioctl_t *func;
+
+ atomic_inc(&dev->ioctl_count);
+ atomic_inc(&dev->total_ioctl);
+ ++priv->ioctl_count;
+
+ DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
+ current->pid, cmd, nr, dev->device, priv->authenticated);
+
+ if (nr >= R128_IOCTL_COUNT) {
+ retcode = -EINVAL;
+ } else {
+ ioctl = &r128_ioctls[nr];
+ func = ioctl->func;
+
+ if (!func) {
+ DRM_DEBUG("no function\n");
+ retcode = -EINVAL;
+ } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
+ || (ioctl->auth_needed && !priv->authenticated)) {
+ retcode = -EACCES;
+ } else {
+ retcode = (func)(inode, filp, cmd, arg);
+ }
+ }
+
+ atomic_dec(&dev->ioctl_count);
+ return retcode;
+}
+
+int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ DECLARE_WAITQUEUE(entry, current);
+ int ret = 0;
+ drm_lock_t lock;
+#if DRM_DMA_HISTOGRAM
+ cycles_t start;
+
+ dev->lck_start = start = get_cycles();
+#endif
+
+ copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
+
+ if (lock.context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ current->pid, lock.context);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
+ lock.context, current->pid, dev->lock.hw_lock->lock,
+ lock.flags);
+
+#if 0
+ /* dev->queue_count == 0 right now for
+ r128. FIXME? */
+ if (lock.context < 0 || lock.context >= dev->queue_count)
+ return -EINVAL;
+#endif
+
+ if (!ret) {
+#if 0
+ if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
+ != lock.context) {
+ long j = jiffies - dev->lock.lock_time;
+
+ if (lock.context == r128_res_ctx.handle &&
+ j >= 0 && j < DRM_LOCK_SLICE) {
+ /* Can't take lock if we just had it and
+ there is contention. */
+ DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d jiffies=%d\n",
+ lock.context, current->pid, j,
+ dev->lock.lock_time, jiffies);
+ current->state = TASK_INTERRUPTIBLE;
+ current->policy |= SCHED_YIELD;
+ schedule_timeout(DRM_LOCK_SLICE-j);
+ DRM_DEBUG("jiffies=%d\n", jiffies);
+ }
+ }
+#endif
+ add_wait_queue(&dev->lock.lock_queue, &entry);
+ for (;;) {
+ if (!dev->lock.hw_lock) {
+ /* Device has been unregistered */
+ ret = -EINTR;
+ break;
+ }
+ if (drm_lock_take(&dev->lock.hw_lock->lock,
+ lock.context)) {
+ dev->lock.pid = current->pid;
+ dev->lock.lock_time = jiffies;
+ atomic_inc(&dev->total_locks);
+ break; /* Got lock */
+ }
+
+ /* Contention */
+ atomic_inc(&dev->total_sleeps);
+ current->state = TASK_INTERRUPTIBLE;
+#if 1
+ current->policy |= SCHED_YIELD;
+#endif
+ schedule();
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&dev->lock.lock_queue, &entry);
+ }
+
+#if 0
+ if (!ret && dev->last_context != lock.context &&
+ lock.context != r128_res_ctx.handle &&
+ dev->last_context != r128_res_ctx.handle) {
+ add_wait_queue(&dev->context_wait, &entry);
+ current->state = TASK_INTERRUPTIBLE;
+ /* PRE: dev->last_context != lock.context */
+ r128_context_switch(dev, dev->last_context, lock.context);
+ /* POST: we will wait for the context
+ switch and will dispatch on a later call
+ when dev->last_context == lock.context
+ NOTE WE HOLD THE LOCK THROUGHOUT THIS
+ TIME! */
+ current->policy |= SCHED_YIELD;
+ schedule();
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&dev->context_wait, &entry);
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ } else if (dev->last_context != lock.context) {
+ DRM_ERROR("Context mismatch: %d %d\n",
+ dev->last_context, lock.context);
+ }
+ }
+#endif
+
+ if (!ret) {
+ if (lock.flags & _DRM_LOCK_READY) {
+ /* Wait for space in DMA/FIFO */
+ }
+ if (lock.flags & _DRM_LOCK_QUIESCENT) {
+ /* Make hardware quiescent */
+#if 0
+ r128_quiescent(dev);
+#endif
+ }
+ }
+
+#if 0
+ DRM_ERROR("pid = %5d, old counter = %5ld\n",
+ current->pid, current->counter);
+#endif
+ if (lock.context != r128_res_ctx.handle) {
+ current->counter = 5;
+ current->priority = DEF_PRIORITY/4;
+ }
+#if 0
+ while (current->counter > 25)
+ current->counter >>= 1; /* decrease time slice */
+ DRM_ERROR("pid = %5d, new counter = %5ld\n",
+ current->pid, current->counter);
+#endif
+ DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
+
+#if DRM_DMA_HISTOGRAM
+ atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]);
+#endif
+
+ return ret;
+}
+
+
+int r128_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_lock_t lock;
+
+ copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
+
+ if (lock.context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ current->pid, lock.context);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("%d frees lock (%d holds)\n",
+ lock.context,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+ atomic_inc(&dev->total_unlocks);
+ if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
+ atomic_inc(&dev->total_contends);
+ drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
+ /* FIXME: Try to send data to card here */
+ if (!dev->context_flag) {
+ if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ DRM_ERROR("\n");
+ }
+ }
+
+#if 0
+ current->policy |= SCHED_YIELD;
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(1000);
+#endif
+
+ if (lock.context != r128_res_ctx.handle) {
+ current->counter = 5;
+ current->priority = DEF_PRIORITY;
+ }
+#if 0
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(10);
+#endif
+
+ return 0;
+}
diff --git a/linux/r128_bufs.c b/linux/r128_bufs.c
new file mode 100644
index 00000000..bad6c571
--- /dev/null
+++ b/linux/r128_bufs.c
@@ -0,0 +1,309 @@
+/* r128_bufs.c -- IOCTLs to manage buffers -*- linux-c -*-
+ * Created: Wed Apr 12 16:19:08 2000 by kevin@precisioninsight.com
+ *
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Kevin E. Martin <kevin@precisioninsight.com>
+ * Rickard E. (Rik) Faith <faith@precisioninsight.com>
+ * Jeff Hartmann <jhartmann@precisioninsight.com>
+ *
+ * $XFree86$
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include "r128_drv.h"
+#include "linux/un.h"
+
+
+#ifdef DRM_AGP
+int r128_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ drm_buf_entry_t *entry;
+ drm_buf_t *buf;
+ unsigned long offset;
+ unsigned long agp_offset;
+ int count;
+ int order;
+ int size;
+ int alignment;
+ int page_order;
+ int total;
+ int byte_count;
+ int i;
+
+ if (!dma) return -EINVAL;
+
+ copy_from_user_ret(&request,
+ (drm_buf_desc_t *)arg,
+ sizeof(request),
+ -EFAULT);
+
+ count = request.count;
+ order = drm_order(request.size);
+ size = 1 << order;
+
+ alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size):size;
+ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
+
+ byte_count = 0;
+ agp_offset = dev->agp->base + request.agp_start;
+
+ DRM_DEBUG("count: %d\n", count);
+ DRM_DEBUG("order: %d\n", order);
+ DRM_DEBUG("size: %d\n", size);
+ DRM_DEBUG("agp_offset: %ld\n", agp_offset);
+ DRM_DEBUG("alignment: %d\n", alignment);
+ DRM_DEBUG("page_order: %d\n", page_order);
+ DRM_DEBUG("total: %d\n", total);
+
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
+ if (dev->queue_count) return -EBUSY; /* Not while in use */
+
+ spin_lock(&dev->count_lock);
+ if (dev->buf_use) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ atomic_inc(&dev->buf_alloc);
+ spin_unlock(&dev->count_lock);
+
+ down(&dev->struct_sem);
+ entry = &dma->bufs[order];
+ if (entry->buf_count) {
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM; /* May only call once for each order */
+ }
+
+ entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS);
+ if (!entry->buflist) {
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
+ }
+ memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+ entry->buf_size = size;
+ entry->page_order = page_order;
+ offset = 0;
+
+ for (offset = 0;
+ entry->buf_count < count;
+ offset += alignment, ++entry->buf_count) {
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
+ buf->offset = (dma->byte_count + offset);
+ buf->address = (void *)(agp_offset + offset);
+ buf->next = NULL;
+ buf->waiting = 0;
+ buf->pending = 0;
+ init_waitqueue_head(&buf->dma_wait);
+ buf->pid = 0;
+
+ buf->dev_priv_size = sizeof(drm_r128_buf_priv_t);
+ buf->dev_private = drm_alloc(sizeof(drm_r128_buf_priv_t),
+ DRM_MEM_BUFS);
+ memset(buf->dev_private, 0, buf->dev_priv_size);
+
+#if DRM_DMA_HISTOGRAM
+ buf->time_queued = 0;
+ buf->time_dispatched = 0;
+ buf->time_completed = 0;
+ buf->time_freed = 0;
+#endif
+
+ byte_count += PAGE_SIZE << page_order;
+
+ DRM_DEBUG("buffer %d @ %p\n",
+ entry->buf_count, buf->address);
+ }
+
+ DRM_DEBUG("byte_count: %d\n", byte_count);
+
+ dma->buflist = drm_realloc(dma->buflist,
+ dma->buf_count * sizeof(*dma->buflist),
+ (dma->buf_count + entry->buf_count)
+ * sizeof(*dma->buflist),
+ DRM_MEM_BUFS);
+ for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
+ dma->buflist[i] = &entry->buflist[i - dma->buf_count];
+
+ dma->buf_count += entry->buf_count;
+ dma->byte_count += byte_count;
+
+ drm_freelist_create(&entry->freelist, entry->buf_count);
+ for (i = 0; i < entry->buf_count; i++) {
+ drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
+ }
+
+ up(&dev->struct_sem);
+
+ request.count = entry->buf_count;
+ request.size = size;
+
+ copy_to_user_ret((drm_buf_desc_t *)arg,
+ &request,
+ sizeof(request),
+ -EFAULT);
+
+ dma->flags = _DRM_DMA_USE_AGP;
+
+ atomic_dec(&dev->buf_alloc);
+ return 0;
+}
+#endif
+
+int r128_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_r128_private_t *dev_priv = dev->dev_private;
+ drm_buf_desc_t request;
+
+ if (!dev_priv || dev_priv->is_pci) return -EINVAL;
+
+ copy_from_user_ret(&request,
+ (drm_buf_desc_t *)arg,
+ sizeof(request),
+ -EFAULT);
+
+#ifdef DRM_AGP
+ if (request.flags & _DRM_AGP_BUFFER)
+ return r128_addbufs_agp(inode, filp, cmd, arg);
+ else
+#endif
+ return -EINVAL;
+}
+
+int r128_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_r128_private_t *dev_priv = dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
+ int retcode = 0;
+ const int zero = 0;
+ unsigned long virtual;
+ unsigned long address;
+ drm_buf_map_t request;
+ int i;
+
+ if (!dma || !dev_priv || dev_priv->is_pci) return -EINVAL;
+
+ DRM_DEBUG("\n");
+
+ spin_lock(&dev->count_lock);
+ if (atomic_read(&dev->buf_alloc)) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ ++dev->buf_use; /* Can't allocate more after this call */
+ spin_unlock(&dev->count_lock);
+
+ copy_from_user_ret(&request,
+ (drm_buf_map_t *)arg,
+ sizeof(request),
+ -EFAULT);
+
+ if (request.count >= dma->buf_count) {
+ if (dma->flags & _DRM_DMA_USE_AGP) {
+ drm_map_t *map;
+
+ map = dev_priv->agp_vertbufs;
+ if (!map) {
+ retcode = -EINVAL;
+ goto done;
+ }
+
+ down(&current->mm->mmap_sem);
+ virtual = do_mmap(filp, 0, map->size,
+ PROT_READ|PROT_WRITE,
+ MAP_SHARED,
+ (unsigned long)map->offset);
+ up(&current->mm->mmap_sem);
+ } else {
+ down(&current->mm->mmap_sem);
+ virtual = do_mmap(filp, 0, dma->byte_count,
+ PROT_READ|PROT_WRITE, MAP_SHARED, 0);
+ up(&current->mm->mmap_sem);
+ }
+ if (virtual > -1024UL) {
+ /* Real error */
+ retcode = (signed long)virtual;
+ goto done;
+ }
+ request.virtual = (void *)virtual;
+
+ for (i = 0; i < dma->buf_count; i++) {
+ if (copy_to_user(&request.list[i].idx,
+ &dma->buflist[i]->idx,
+ sizeof(request.list[0].idx))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ if (copy_to_user(&request.list[i].total,
+ &dma->buflist[i]->total,
+ sizeof(request.list[0].total))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ if (copy_to_user(&request.list[i].used,
+ &zero,
+ sizeof(zero))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ address = virtual + dma->buflist[i]->offset;
+ if (copy_to_user(&request.list[i].address,
+ &address,
+ sizeof(address))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ }
+ }
+ done:
+ request.count = dma->buf_count;
+ DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
+
+ copy_to_user_ret((drm_buf_map_t *)arg,
+ &request,
+ sizeof(request),
+ -EFAULT);
+
+ return retcode;
+}
diff --git a/linux/r128_context.c b/linux/r128_context.c
new file mode 100644
index 00000000..d288fd28
--- /dev/null
+++ b/linux/r128_context.c
@@ -0,0 +1,214 @@
+/* r128_context.c -- IOCTLs for r128 contexts -*- linux-c -*-
+ * Created: Mon Dec 13 09:51:35 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Rickard E. (Rik) Faith <faith@precisioninsight.com>
+ *
+ * $XFree86$
+ *
+ */
+
+#include <linux/sched.h>
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include "r128_drv.h"
+
+extern drm_ctx_t r128_res_ctx;
+
+static int r128_alloc_queue(drm_device_t *dev)
+{
+#if 0
+ static int context = 0;
+#endif
+
+ return drm_ctxbitmap_next(dev);
+}
+
+int r128_context_switch(drm_device_t *dev, int old, int new)
+{
+ char buf[64];
+
+ atomic_inc(&dev->total_ctx);
+
+ if (test_and_set_bit(0, &dev->context_flag)) {
+ DRM_ERROR("Reentering -- FIXME\n");
+ return -EBUSY;
+ }
+
+#if DRM_DMA_HISTOGRAM
+ dev->ctx_start = get_cycles();
+#endif
+
+ DRM_DEBUG("Context switch from %d to %d\n", old, new);
+
+ if (new == dev->last_context) {
+ clear_bit(0, &dev->context_flag);
+ return 0;
+ }
+
+ if (drm_flags & DRM_FLAG_NOCTX) {
+ r128_context_switch_complete(dev, new);
+ } else {
+ sprintf(buf, "C %d %d\n", old, new);
+ drm_write_string(dev, buf);
+ }
+
+ return 0;
+}
+
+int r128_context_switch_complete(drm_device_t *dev, int new)
+{
+ dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
+ dev->last_switch = jiffies;
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("Lock isn't held after context switch\n");
+ }
+
+ /* If a context switch is ever initiated
+ when the kernel holds the lock, release
+ that lock here. */
+#if DRM_DMA_HISTOGRAM
+ atomic_inc(&dev->histo.ctx[drm_histogram_slot(get_cycles()
+ - dev->ctx_start)]);
+
+#endif
+ clear_bit(0, &dev->context_flag);
+ wake_up(&dev->context_wait);
+
+ return 0;
+}
+
+
+int r128_resctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_ctx_res_t res;
+ drm_ctx_t ctx;
+ int i;
+
+ DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
+ copy_from_user_ret(&res, (drm_ctx_res_t *)arg, sizeof(res), -EFAULT);
+ if (res.count >= DRM_RESERVED_CONTEXTS) {
+ memset(&ctx, 0, sizeof(ctx));
+ for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
+ ctx.handle = i;
+ copy_to_user_ret(&res.contexts[i],
+ &i,
+ sizeof(i),
+ -EFAULT);
+ }
+ }
+ res.count = DRM_RESERVED_CONTEXTS;
+ copy_to_user_ret((drm_ctx_res_t *)arg, &res, sizeof(res), -EFAULT);
+ return 0;
+}
+
+
+int r128_addctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_ctx_t ctx;
+
+ copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
+ if ((ctx.handle = r128_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
+ /* Skip kernel's context and get a new one. */
+ ctx.handle = r128_alloc_queue(dev);
+ }
+ DRM_DEBUG("%d\n", ctx.handle);
+ if (ctx.handle == -1) {
+ DRM_DEBUG("Not enough free contexts.\n");
+ /* Should this return -EBUSY instead? */
+ return -ENOMEM;
+ }
+
+ copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT);
+ return 0;
+}
+
+int r128_modctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_ctx_t ctx;
+
+ copy_from_user_ret(&ctx, (drm_ctx_t*)arg, sizeof(ctx), -EFAULT);
+ if (ctx.flags==_DRM_CONTEXT_PRESERVED)
+ r128_res_ctx.handle=ctx.handle;
+ return 0;
+}
+
+int r128_getctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_ctx_t ctx;
+
+ copy_from_user_ret(&ctx, (drm_ctx_t*)arg, sizeof(ctx), -EFAULT);
+ /* This is 0, because we don't hanlde any context flags */
+ ctx.flags = 0;
+ copy_to_user_ret((drm_ctx_t*)arg, &ctx, sizeof(ctx), -EFAULT);
+ return 0;
+}
+
+int r128_switchctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_ctx_t ctx;
+
+ copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
+ DRM_DEBUG("%d\n", ctx.handle);
+ return r128_context_switch(dev, dev->last_context, ctx.handle);
+}
+
+int r128_newctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_ctx_t ctx;
+
+ copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
+ DRM_DEBUG("%d\n", ctx.handle);
+ r128_context_switch_complete(dev, ctx.handle);
+
+ return 0;
+}
+
+int r128_rmctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_ctx_t ctx;
+
+ copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
+ DRM_DEBUG("%d\n", ctx.handle);
+ drm_ctxbitmap_free(dev, ctx.handle);
+
+ return 0;
+}
diff --git a/linux/r128_dma.c b/linux/r128_dma.c
new file mode 100644
index 00000000..860c4188
--- /dev/null
+++ b/linux/r128_dma.c
@@ -0,0 +1,908 @@
+/* r128_drv.c -- ATI Rage 128 driver -*- linux-c -*-
+ * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
+ *
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Kevin E. Martin <kevin@precisioninsight.com>
+ *
+ * $XFree86$
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include "r128_drv.h"
+
+#include <linux/interrupt.h> /* For task queue support */
+#include <linux/delay.h>
+
+
+
+#define DO_REMAP(_m) (_m)->handle = drm_ioremap((_m)->offset, (_m)->size)
+
+#define DO_REMAPFREE(_m) \
+ do { \
+ if ((_m)->handle && (_m)->size) \
+ drm_ioremapfree((_m)->handle, (_m)->size); \
+ } while (0)
+
+#define DO_FIND_MAP(_m, _o) \
+ do { \
+ int _i; \
+ for (_i = 0; _i < dev->map_count; _i++) { \
+ if (dev->maplist[_i]->offset == _o) { \
+ _m = dev->maplist[_i]; \
+ break; \
+ } \
+ } \
+ } while (0)
+
+
+#define R128_MAX_VBUF_AGE 0x10000000
+#define R128_VB_AGE_REG R128_GUI_SCRATCH_REG0
+
+int R128_READ_PLL(drm_device_t *dev, int addr)
+{
+ drm_r128_private_t *dev_priv = dev->dev_private;
+
+ R128_WRITE8(R128_CLOCK_CNTL_INDEX, addr & 0x1f);
+ return R128_READ(R128_CLOCK_CNTL_DATA);
+}
+
+static void r128_flush_write_combine(void)
+{
+ int xchangeDummy;
+
+ __asm__ volatile("push %%eax ;"
+ "xchg %%eax, %0 ;"
+ "pop %%eax" : : "m" (xchangeDummy));
+ __asm__ volatile("push %%eax ;"
+ "push %%ebx ;"
+ "push %%ecx ;"
+ "push %%edx ;"
+ "movl $0,%%eax ;"
+ "cpuid ;"
+ "pop %%edx ;"
+ "pop %%ecx ;"
+ "pop %%ebx ;"
+ "pop %%eax" : /* no outputs */ : /* no inputs */ );
+}
+
+static int r128_do_cleanup_cce(drm_device_t *dev)
+{
+ if (dev->dev_private) {
+ drm_r128_private_t *dev_priv = dev->dev_private;
+
+ if (!dev_priv->is_pci) {
+ DO_REMAPFREE(dev_priv->agp_ring);
+ DO_REMAPFREE(dev_priv->agp_read_ptr);
+ DO_REMAPFREE(dev_priv->agp_vertbufs);
+ DO_REMAPFREE(dev_priv->agp_indbufs);
+ DO_REMAPFREE(dev_priv->agp_textures);
+ }
+
+ drm_free(dev->dev_private, sizeof(drm_r128_private_t),
+ DRM_MEM_DRIVER);
+ dev->dev_private = NULL;
+ }
+
+ return 0;
+}
+
+static int r128_do_init_cce(drm_device_t *dev, drm_r128_init_t *init)
+{
+ drm_r128_private_t *dev_priv;
+ int i;
+
+ dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER);
+ if (dev_priv == NULL) return -ENOMEM;
+ dev->dev_private = (void *)dev_priv;
+
+ memset(dev_priv, 0, sizeof(drm_r128_private_t));
+
+ dev_priv->is_pci = init->is_pci;
+
+ dev_priv->usec_timeout = init->usec_timeout;
+ if (dev_priv->usec_timeout < 1 ||
+ dev_priv->usec_timeout > R128_MAX_USEC_TIMEOUT) {
+ drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
+ dev->dev_private = NULL;
+ return -EINVAL;
+ }
+
+ dev_priv->cce_mode = init->cce_mode;
+ dev_priv->cce_fifo_size = init->cce_fifo_size;
+ dev_priv->cce_is_bm_mode =
+ ((init->cce_mode == R128_PM4_192BM) ||
+ (init->cce_mode == R128_PM4_128BM_64INDBM) ||
+ (init->cce_mode == R128_PM4_64BM_128INDBM) ||
+ (init->cce_mode == R128_PM4_64BM_64VCBM_64INDBM));
+ dev_priv->cce_secure = init->cce_secure;
+
+ if (dev_priv->cce_is_bm_mode && dev_priv->is_pci) {
+ drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
+ dev->dev_private = NULL;
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dev->map_count; i++) {
+ if (dev->maplist[i]->type == _DRM_SHM) {
+ dev_priv->sarea = dev->maplist[i];
+ break;
+ }
+ }
+
+ DO_FIND_MAP(dev_priv->fb, init->fb_offset);
+ if (!dev_priv->is_pci) {
+ DO_FIND_MAP(dev_priv->agp_ring, init->agp_ring_offset);
+ DO_FIND_MAP(dev_priv->agp_read_ptr, init->agp_read_ptr_offset);
+ DO_FIND_MAP(dev_priv->agp_vertbufs, init->agp_vertbufs_offset);
+ DO_FIND_MAP(dev_priv->agp_indbufs, init->agp_indbufs_offset);
+ DO_FIND_MAP(dev_priv->agp_textures, init->agp_textures_offset);
+ }
+ DO_FIND_MAP(dev_priv->mmio, init->mmio_offset);
+
+ dev_priv->sarea_priv =
+ (drm_r128_sarea_t *)((u8 *)dev_priv->sarea->handle +
+ init->sarea_priv_offset);
+
+ if (!dev_priv->is_pci) {
+ DO_REMAP(dev_priv->agp_ring);
+ DO_REMAP(dev_priv->agp_read_ptr);
+ DO_REMAP(dev_priv->agp_vertbufs);
+#if 0
+ DO_REMAP(dev_priv->agp_indirectbufs);
+ DO_REMAP(dev_priv->agp_textures);
+#endif
+
+ dev_priv->ring_size = init->ring_size;
+ dev_priv->ring_sizel2qw = drm_order(init->ring_size/8);
+ dev_priv->ring_entries = init->ring_size/sizeof(u32);
+ dev_priv->ring_read_ptr = ((__volatile__ u32 *)
+ dev_priv->agp_read_ptr->handle);
+ dev_priv->ring_start = (u32 *)dev_priv->agp_ring->handle;
+ dev_priv->ring_end = ((u32 *)dev_priv->agp_ring->handle
+ + dev_priv->ring_entries);
+ }
+
+ dev_priv->submit_age = 0;
+ R128_WRITE(R128_VB_AGE_REG, dev_priv->submit_age);
+
+ return 0;
+}
+
+int r128_init_cce(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_r128_init_t init;
+
+ copy_from_user_ret(&init, (drm_r128_init_t *)arg, sizeof(init),
+ -EFAULT);
+
+ switch (init.func) {
+ case R128_INIT_CCE:
+ return r128_do_init_cce(dev, &init);
+ case R128_CLEANUP_CCE:
+ return r128_do_cleanup_cce(dev);
+ }
+
+ return -EINVAL;
+}
+
+static void r128_mark_vertbufs_done(drm_device_t *dev)
+{
+ drm_device_dma_t *dma = dev->dma;
+ int i;
+
+ for (i = 0; i < dma->buf_count; i++) {
+ drm_buf_t *buf = dma->buflist[i];
+ drm_r128_buf_priv_t *buf_priv = buf->dev_private;
+ buf_priv->age = 0;
+ }
+}
+
+static int r128_do_pixcache_flush(drm_device_t *dev)
+{
+ drm_r128_private_t *dev_priv = dev->dev_private;
+ u32 tmp;
+ int i;
+
+ tmp = R128_READ(R128_PC_NGUI_CTLSTAT) | R128_PC_FLUSH_ALL;
+ R128_WRITE(R128_PC_NGUI_CTLSTAT, tmp);
+
+ for (i = 0; i < dev_priv->usec_timeout; i++) {
+ if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY))
+ return 0;
+ udelay(1);
+ }
+
+ return -EBUSY;
+}
+
+static int r128_do_wait_for_fifo(drm_device_t *dev, int entries)
+{
+ drm_r128_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < dev_priv->usec_timeout; i++) {
+ int slots = R128_READ(R128_GUI_STAT) & R128_GUI_FIFOCNT_MASK;
+ if (slots >= entries) return 0;
+ udelay(1);
+ }
+ return -EBUSY;
+}
+
+static int r128_do_wait_for_idle(drm_device_t *dev)
+{
+ drm_r128_private_t *dev_priv = dev->dev_private;
+ int i, ret;
+
+ if (!(ret = r128_do_wait_for_fifo(dev, 64))) return ret;
+
+ for (i = 0; i < dev_priv->usec_timeout; i++) {
+ if (!(R128_READ(R128_GUI_STAT) & R128_GUI_ACTIVE)) {
+ (void)r128_do_pixcache_flush(dev);
+ return 0;
+ }
+ udelay(1);
+ }
+ return -EBUSY;
+}
+
+int r128_do_engine_reset(drm_device_t *dev)
+{
+ drm_r128_private_t *dev_priv = dev->dev_private;
+ u32 clock_cntl_index, mclk_cntl, gen_reset_cntl;
+
+ (void)r128_do_pixcache_flush(dev);
+
+ clock_cntl_index = R128_READ(R128_CLOCK_CNTL_INDEX);
+ mclk_cntl = R128_READ_PLL(dev, R128_MCLK_CNTL);
+
+ R128_WRITE_PLL(R128_MCLK_CNTL,
+ mclk_cntl | R128_FORCE_GCP | R128_FORCE_PIPE3D_CPP);
+
+ gen_reset_cntl = R128_READ(R128_GEN_RESET_CNTL);
+
+ R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl | R128_SOFT_RESET_GUI);
+ (void)R128_READ(R128_GEN_RESET_CNTL);
+ R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl & ~R128_SOFT_RESET_GUI);
+ (void)R128_READ(R128_GEN_RESET_CNTL);
+
+ R128_WRITE_PLL(R128_MCLK_CNTL, mclk_cntl);
+ R128_WRITE(R128_CLOCK_CNTL_INDEX, clock_cntl_index);
+ R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl);
+
+ /* For CCE ring buffer only */
+ if (dev_priv->cce_is_bm_mode) {
+ R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0);
+ R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0);
+ *dev_priv->ring_read_ptr = 0;
+ dev_priv->sarea_priv->ring_write = 0;
+ }
+
+ /* Reset the CCE mode */
+ (void)r128_do_wait_for_idle(dev);
+ R128_WRITE(R128_PM4_BUFFER_CNTL,
+ dev_priv->cce_mode | dev_priv->ring_sizel2qw);
+ (void)R128_READ(R128_PM4_BUFFER_ADDR); /* as per the sample code */
+ R128_WRITE(R128_PM4_MICRO_CNTL, R128_PM4_MICRO_FREERUN);
+
+ r128_mark_vertbufs_done(dev);
+ return 0;
+}
+
+int r128_eng_reset(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
+ dev->lock.pid != current->pid) {
+ DRM_ERROR("r128_eng_reset called without holding the lock\n");
+ return -EINVAL;
+ }
+
+ return r128_do_engine_reset(dev);
+}
+
+static int r128_do_engine_flush(drm_device_t *dev)
+{
+ drm_r128_private_t *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ tmp = R128_READ(R128_PM4_BUFFER_DL_WPTR);
+ R128_WRITE(R128_PM4_BUFFER_DL_WPTR, tmp | R128_PM4_BUFFER_DL_DONE);
+
+ return 0;
+}
+
+int r128_eng_flush(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
+ dev->lock.pid != current->pid) {
+ DRM_ERROR("r128_eng_flush called without holding the lock\n");
+ return -EINVAL;
+ }
+
+ return r128_do_engine_flush(dev);
+}
+
+static int r128_do_cce_wait_for_fifo(drm_device_t *dev, int entries)
+{
+ drm_r128_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < dev_priv->usec_timeout; i++) {
+ int slots = R128_READ(R128_PM4_STAT) & R128_PM4_FIFOCNT_MASK;
+ if (slots >= entries) return 0;
+ udelay(1);
+ }
+ return -EBUSY;
+}
+
+int r128_do_cce_wait_for_idle(drm_device_t *dev)
+{
+ drm_r128_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ if (dev_priv->cce_is_bm_mode) {
+ for (i = 0; i < dev_priv->usec_timeout; i++) {
+ if (*dev_priv->ring_read_ptr == dev_priv->sarea_priv->ring_write) {
+ int pm4stat = R128_READ(R128_PM4_STAT);
+ if ((pm4stat & R128_PM4_FIFOCNT_MASK) >= dev_priv->cce_fifo_size &&
+ !(pm4stat & (R128_PM4_BUSY | R128_PM4_GUI_ACTIVE))) {
+ return r128_do_pixcache_flush(dev);
+ }
+ }
+ udelay(1);
+ }
+ return -EBUSY;
+ } else {
+ int ret = r128_do_cce_wait_for_fifo(dev, dev_priv->cce_fifo_size);
+ if (ret < 0) return ret;
+
+ for (i = 0; i < dev_priv->usec_timeout; i++) {
+ int pm4stat = R128_READ(R128_PM4_STAT);
+ if (!(pm4stat & (R128_PM4_BUSY | R128_PM4_GUI_ACTIVE))) {
+ return r128_do_pixcache_flush(dev);
+ }
+ udelay(1);
+ }
+ return -EBUSY;
+ }
+}
+
+int r128_cce_idle(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
+ dev->lock.pid != current->pid) {
+ DRM_ERROR("r128_wait_idle called without holding the lock\n");
+ return -EINVAL;
+ }
+
+ return r128_do_cce_wait_for_idle(dev);
+}
+
+static int r128_submit_packets_ring_secure(drm_device_t *dev,
+ u32 *commands, int *count)
+{
+ drm_r128_private_t *dev_priv = dev->dev_private;
+ int write = dev_priv->sarea_priv->ring_write;
+ int *write_ptr = dev_priv->ring_start + write;
+ int c = *count;
+ u32 tmp = 0;
+ int psize = 0;
+ int writing = 1;
+ int timeout;
+
+ while (c > 0) {
+ tmp = *commands++;
+ if (!psize) {
+ writing = 1;
+
+ if ((tmp & R128_CCE_PACKET_MASK) == R128_CCE_PACKET0) {
+ if ((tmp & R128_CCE_PACKET0_REG_MASK) <= (0x1004 >> 2)) {
+ if ((tmp & R128_CCE_PACKET0_REG_MASK) !=
+ (R128_PM4_VC_FPU_SETUP >> 2)) {
+ writing = 0;
+ }
+ }
+ psize = ((tmp & R128_CCE_PACKET_COUNT_MASK) >> 16) + 2;
+ } else if ((tmp & R128_CCE_PACKET_MASK) == R128_CCE_PACKET1) {
+ if ((tmp & R128_CCE_PACKET1_REG0_MASK) <= (0x1004 >> 2)) {
+ if ((tmp & R128_CCE_PACKET1_REG0_MASK) !=
+ (R128_PM4_VC_FPU_SETUP >> 2)) {
+ writing = 0;
+ }
+ } else if ((tmp & R128_CCE_PACKET1_REG1_MASK) <=
+ (0x1004 << 9)) {
+ if ((tmp & R128_CCE_PACKET1_REG1_MASK) !=
+ (R128_PM4_VC_FPU_SETUP << 9)) {
+ writing = 0;
+ }
+ }
+ psize = 3;
+ } else {
+ psize = ((tmp & R128_CCE_PACKET_COUNT_MASK) >> 16) + 2;
+ }
+ }
+ psize--;
+
+ if (writing) {
+ write++;
+ *write_ptr++ = tmp;
+ }
+ if (write >= dev_priv->ring_entries) {
+ write = 0;
+ write_ptr = dev_priv->ring_start;
+ }
+ timeout = 0;
+ while (write == *dev_priv->ring_read_ptr) {
+ (void)R128_READ(R128_PM4_BUFFER_DL_RPTR);
+ if (timeout++ >= dev_priv->usec_timeout)
+ return -EBUSY;
+ udelay(1);
+ }
+ c--;
+ }
+
+ if (write < 32)
+ memcpy(dev_priv->ring_end,
+ dev_priv->ring_start,
+ write * sizeof(u32));
+
+ /* Make sure WC cache has been flushed */
+ r128_flush_write_combine();
+
+ dev_priv->sarea_priv->ring_write = write;
+ R128_WRITE(R128_PM4_BUFFER_DL_WPTR, write);
+
+ *count = 0;
+
+ return 0;
+}
+
+static int r128_submit_packets_pio_secure(drm_device_t *dev,
+ u32 *commands, int *count)
+{
+ drm_r128_private_t *dev_priv = dev->dev_private;
+ u32 tmp = 0;
+ int psize = 0;
+ int writing = 1;
+ int addr = R128_PM4_FIFO_DATA_EVEN;
+ int ret;
+
+ while (*count > 0) {
+ tmp = *commands++;
+ if (!psize) {
+ writing = 1;
+
+ if ((tmp & R128_CCE_PACKET_MASK) == R128_CCE_PACKET0) {
+ if ((tmp & R128_CCE_PACKET0_REG_MASK) <= (0x1004 >> 2)) {
+ if ((tmp & R128_CCE_PACKET0_REG_MASK) !=
+ (R128_PM4_VC_FPU_SETUP >> 2)) {
+ writing = 0;
+ }
+ }
+ psize = ((tmp & R128_CCE_PACKET_COUNT_MASK) >> 16) + 2;
+ } else if ((tmp & R128_CCE_PACKET_MASK) == R128_CCE_PACKET1) {
+ if ((tmp & R128_CCE_PACKET1_REG0_MASK) <= (0x1004 >> 2)) {
+ if ((tmp & R128_CCE_PACKET1_REG0_MASK) !=
+ (R128_PM4_VC_FPU_SETUP >> 2)) {
+ writing = 0;
+ }
+ } else if ((tmp & R128_CCE_PACKET1_REG1_MASK) <=
+ (0x1004 << 9)) {
+ if ((tmp & R128_CCE_PACKET1_REG1_MASK) !=
+ (R128_PM4_VC_FPU_SETUP << 9)) {
+ writing = 0;
+ }
+ }
+ psize = 3;
+ } else {
+ psize = ((tmp & R128_CCE_PACKET_COUNT_MASK) >> 16) + 2;
+ }
+ }
+ psize--;
+
+ if (writing) {
+ if ((ret = r128_do_cce_wait_for_fifo(dev, 1)) < 0)
+ return ret;
+ R128_WRITE(addr, tmp);
+ addr ^= 0x0004;
+ }
+
+ *count -= 1;
+ }
+
+ if (addr == R128_PM4_FIFO_DATA_ODD) {
+ if ((ret = r128_do_cce_wait_for_fifo(dev, 1)) < 0) return ret;
+ R128_WRITE(addr, R128_CCE_PACKET2);
+ }
+
+ return 0;
+}
+
+static int r128_submit_packets_ring(drm_device_t *dev,
+ u32 *commands, int *count)
+{
+ drm_r128_private_t *dev_priv = dev->dev_private;
+ int write = dev_priv->sarea_priv->ring_write;
+ int *write_ptr = dev_priv->ring_start + write;
+ int c = *count;
+ int timeout;
+
+ while (c > 0) {
+ write++;
+ *write_ptr++ = *commands++;
+ if (write >= dev_priv->ring_entries) {
+ write = 0;
+ write_ptr = dev_priv->ring_start;
+ }
+ timeout = 0;
+ while (write == *dev_priv->ring_read_ptr) {
+ (void)R128_READ(R128_PM4_BUFFER_DL_RPTR);
+ if (timeout++ >= dev_priv->usec_timeout)
+ return -EBUSY;
+ udelay(1);
+ }
+ c--;
+ }
+
+ if (write < 32)
+ memcpy(dev_priv->ring_end,
+ dev_priv->ring_start,
+ write * sizeof(u32));
+
+ /* Make sure WC cache has been flushed */
+ r128_flush_write_combine();
+
+ dev_priv->sarea_priv->ring_write = write;
+ R128_WRITE(R128_PM4_BUFFER_DL_WPTR, write);
+
+ *count = 0;
+
+ return 0;
+}
+
+static int r128_submit_packets_pio(drm_device_t *dev,
+ u32 *commands, int *count)
+{
+ drm_r128_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ while (*count > 1) {
+ if ((ret = r128_do_cce_wait_for_fifo(dev, 2)) < 0) return ret;
+ R128_WRITE(R128_PM4_FIFO_DATA_EVEN, *commands++);
+ R128_WRITE(R128_PM4_FIFO_DATA_ODD, *commands++);
+ *count -= 2;
+ }
+
+ if (*count) {
+ if ((ret = r128_do_cce_wait_for_fifo(dev, 2)) < 0) return ret;
+ R128_WRITE(R128_PM4_FIFO_DATA_EVEN, *commands++);
+ R128_WRITE(R128_PM4_FIFO_DATA_ODD, R128_CCE_PACKET2);
+ *count = 0;
+ }
+
+ return 0;
+}
+
+static int r128_do_submit_packets(drm_device_t *dev, u32 *buffer, int count)
+{
+ drm_r128_private_t *dev_priv = dev->dev_private;
+ int c = count;
+ int ret;
+
+ if (dev_priv->cce_is_bm_mode) {
+ int left = 0;
+
+ if (c >= dev_priv->ring_entries) {
+ c = dev_priv->ring_entries-1;
+ left = count - c;
+ }
+
+ /* Since this is only used by the kernel we can use the
+ insecure ring buffer submit packet routine */
+ ret = r128_submit_packets_ring(dev, buffer, &c);
+
+ c += left;
+ } else {
+ /* Since this is only used by the kernel we can use the
+ insecure PIO submit packet routine */
+ ret = r128_submit_packets_pio(dev, buffer, &c);
+ }
+
+ if (ret < 0) return ret;
+ else return c;
+}
+
+int r128_submit_pkt(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_r128_private_t *dev_priv = dev->dev_private;
+ drm_r128_packet_t packet;
+ u32 *buffer;
+ int c;
+ int size;
+ int ret = 0;
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
+ dev->lock.pid != current->pid) {
+ DRM_ERROR("r128_submit_pkt called without holding the lock\n");
+ return -EINVAL;
+ }
+
+ copy_from_user_ret(&packet, (drm_r128_packet_t *)arg, sizeof(packet),
+ -EFAULT);
+
+ c = packet.count;
+ size = c * sizeof(*buffer);
+
+ if (dev_priv->cce_is_bm_mode) {
+ int left = 0;
+
+ if (c >= dev_priv->ring_entries) {
+ c = dev_priv->ring_entries-1;
+ size = c * sizeof(*buffer);
+ left = packet.count - c;
+ }
+
+ if ((buffer = kmalloc(size, 0)) == NULL) return -ENOMEM;
+ copy_from_user_ret(buffer, packet.buffer, size, -EFAULT);
+
+ if (dev_priv->cce_secure)
+ ret = r128_submit_packets_ring_secure(dev, buffer, &c);
+ else
+ ret = r128_submit_packets_ring(dev, buffer, &c);
+
+ c += left;
+ } else {
+ if ((buffer = kmalloc(size, 0)) == NULL) return -ENOMEM;
+ copy_from_user_ret(buffer, packet.buffer, size, -EFAULT);
+
+ if (dev_priv->cce_secure)
+ ret = r128_submit_packets_pio_secure(dev, buffer, &c);
+ else
+ ret = r128_submit_packets_pio(dev, buffer, &c);
+ }
+
+ kfree(buffer);
+
+ packet.count = c;
+ copy_to_user_ret((drm_r128_packet_t *)arg, &packet, sizeof(packet),
+ -EFAULT);
+
+ if (ret) return ret;
+ else if (c > 0) return -EAGAIN;
+
+ return 0;
+}
+
+static int r128_send_vertbufs(drm_device_t *dev, drm_r128_vertex_t *v)
+{
+ drm_device_dma_t *dma = dev->dma;
+ drm_r128_private_t *dev_priv = dev->dev_private;
+ drm_r128_buf_priv_t *buf_priv;
+ drm_buf_t *buf;
+ int i, ret;
+ u32 cce[2];
+
+ /* Make sure we have valid data */
+ for (i = 0; i < v->send_count; i++) {
+ int idx = v->send_indices[i];
+
+ if (idx < 0 || idx >= dma->buf_count) {
+ DRM_ERROR("Index %d (of %d max)\n",
+ idx, dma->buf_count - 1);
+ return -EINVAL;
+ }
+ buf = dma->buflist[idx];
+ if (buf->pid != current->pid) {
+ DRM_ERROR("Process %d using buffer owned by %d\n",
+ current->pid, buf->pid);
+ return -EINVAL;
+ }
+ if (buf->pending) {
+ DRM_ERROR("Sending pending buffer:"
+ " buffer %d, offset %d\n",
+ v->send_indices[i], i);
+ return -EINVAL;
+ }
+ }
+
+ /* Wait for idle, if we've wrapped to make sure that all pending
+ buffers have been processed */
+ if (dev_priv->submit_age == R128_MAX_VBUF_AGE) {
+ if ((ret = r128_do_cce_wait_for_idle(dev)) < 0) return ret;
+ dev_priv->submit_age = 0;
+ r128_mark_vertbufs_done(dev);
+ }
+
+ /* Make sure WC cache has been flushed (if in PIO mode) */
+ if (!dev_priv->cce_is_bm_mode) r128_flush_write_combine();
+
+ /* FIXME: Add support for sending vertex buffer to the CCE here
+ instead of in client code. The v->prim holds the primitive
+ type that should be drawn. Loop over the list buffers in
+ send_indices[] and submit a packet for each VB.
+
+ This will require us to loop over the clip rects here as
+ well, which implies that we extend the kernel driver to allow
+ cliprects to be stored here. Note that the cliprects could
+ possibly come from the X server instead of the client, but
+ this will require additional changes to the DRI to allow for
+ this optimization. */
+
+ /* Submit a CCE packet that writes submit_age to R128_VB_AGE_REG */
+ cce[0] = R128CCE0(R128_CCE_PACKET0, R128_VB_AGE_REG, 0);
+ cce[1] = dev_priv->submit_age;
+ if ((ret = r128_do_submit_packets(dev, cce, 2)) < 0) {
+ /* Until we add support for sending VBs to the CCE in
+ this routine, we can recover from this error. After
+ we add that support, we won't be able to easily
+ recover, so we will probably have to implement
+ another mechanism for handling timeouts from packets
+ submitted directly by the kernel. */
+ return ret;
+ }
+
+ /* Now that the submit packet request has succeeded, we can mark
+ the buffers as pending */
+ for (i = 0; i < v->send_count; i++) {
+ buf = dma->buflist[v->send_indices[i]];
+ buf->pending = 1;
+
+ buf_priv = buf->dev_private;
+ buf_priv->age = dev_priv->submit_age;
+ }
+
+ dev_priv->submit_age++;
+
+ return 0;
+}
+
+static drm_buf_t *r128_freelist_get(drm_device_t *dev)
+{
+ drm_device_dma_t *dma = dev->dma;
+ drm_r128_private_t *dev_priv = dev->dev_private;
+ drm_r128_buf_priv_t *buf_priv;
+ drm_buf_t *buf;
+ int i, t;
+
+ /* FIXME: Optimize -- use freelist code */
+
+ for (i = 0; i < dma->buf_count; i++) {
+ buf = dma->buflist[i];
+ buf_priv = buf->dev_private;
+ if (buf->pid == 0) return buf;
+ }
+
+ for (t = 0; t < dev_priv->usec_timeout; t++) {
+ u32 done_age = R128_READ(R128_VB_AGE_REG);
+
+ for (i = 0; i < dma->buf_count; i++) {
+ buf = dma->buflist[i];
+ buf_priv = buf->dev_private;
+ if (buf->pending && buf_priv->age <= done_age) {
+ /* The buffer has been processed, so it
+ can now be used */
+ buf->pending = 0;
+ return buf;
+ }
+ }
+ udelay(1);
+ }
+
+ return NULL;
+}
+
+
+static int r128_get_vertbufs(drm_device_t *dev, drm_r128_vertex_t *v)
+{
+ drm_buf_t *buf;
+ int i;
+
+ for (i = v->granted_count; i < v->request_count; i++) {
+ buf = r128_freelist_get(dev);
+ if (!buf) break;
+ buf->pid = current->pid;
+ copy_to_user_ret(&v->request_indices[i],
+ &buf->idx,
+ sizeof(buf->idx),
+ -EFAULT);
+ copy_to_user_ret(&v->request_sizes[i],
+ &buf->total,
+ sizeof(buf->total),
+ -EFAULT);
+ ++v->granted_count;
+ }
+ return 0;
+}
+
+int r128_vertex_buf(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_r128_private_t *dev_priv = dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
+ int retcode = 0;
+ drm_r128_vertex_t v;
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
+ dev->lock.pid != current->pid) {
+ DRM_ERROR("r128_vertex_buf called without holding the lock\n");
+ return -EINVAL;
+ }
+
+ if (!dev_priv || dev_priv->is_pci) {
+ DRM_ERROR("r128_vertex_buf called with a PCI card\n");
+ return -EINVAL;
+ }
+
+ copy_from_user_ret(&v, (drm_r128_vertex_t *)arg, sizeof(v), -EFAULT);
+ DRM_DEBUG("%d: %d send, %d req\n",
+ current->pid, v.send_count, v.request_count);
+
+ if (v.send_count < 0 || v.send_count > dma->buf_count) {
+ DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
+ current->pid, v.send_count, dma->buf_count);
+ return -EINVAL;
+ }
+ if (v.request_count < 0 || v.request_count > dma->buf_count) {
+ DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
+ current->pid, v.request_count, dma->buf_count);
+ return -EINVAL;
+ }
+
+ if (v.send_count) {
+ retcode = r128_send_vertbufs(dev, &v);
+ }
+
+ v.granted_count = 0;
+
+ if (!retcode && v.request_count) {
+ retcode = r128_get_vertbufs(dev, &v);
+ }
+
+ DRM_DEBUG("%d returning, granted = %d\n",
+ current->pid, v.granted_count);
+ copy_to_user_ret((drm_r128_vertex_t *)arg, &v, sizeof(v), -EFAULT);
+
+ return retcode;
+}
diff --git a/linux/r128_drm.h b/linux/r128_drm.h
new file mode 100644
index 00000000..fa90d72d
--- /dev/null
+++ b/linux/r128_drm.h
@@ -0,0 +1,111 @@
+/* r128_drm.h -- Public header for the r128 driver -*- linux-c -*-
+ * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
+ *
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Kevin E. Martin <kevin@precisioninsight.com>
+ *
+ * $XFree86$
+ */
+
+#ifndef _R128_DRM_H_
+#define _R128_DRM_H_
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the Xserver file (xf86drmR128.h)
+ */
+typedef struct drm_r128_init {
+ enum {
+ R128_INIT_CCE = 0x01,
+ R128_CLEANUP_CCE = 0x02
+ } func;
+ int sarea_priv_offset;
+ int is_pci;
+ int cce_mode;
+ int cce_fifo_size;
+ int cce_secure;
+ int ring_size;
+ int usec_timeout;
+
+ int fb_offset;
+ int agp_ring_offset;
+ int agp_read_ptr_offset;
+ int agp_vertbufs_offset;
+ int agp_indbufs_offset;
+ int agp_textures_offset;
+ int mmio_offset;
+} drm_r128_init_t;
+
+typedef struct drm_r128_packet {
+ unsigned long *buffer;
+ int count;
+ int flags;
+} drm_r128_packet_t;
+
+typedef enum drm_r128_prim {
+ _DRM_R128_PRIM_NONE = 0x0001,
+ _DRM_R128_PRIM_POINT = 0x0002,
+ _DRM_R128_PRIM_LINE = 0x0004,
+ _DRM_R128_PRIM_POLY_LINE = 0x0008,
+ _DRM_R128_PRIM_TRI_LIST = 0x0010,
+ _DRM_R128_PRIM_TRI_FAN = 0x0020,
+ _DRM_R128_PRIM_TRI_STRIP = 0x0040,
+ _DRM_R128_PRIM_TRI_TYPE2 = 0x0080
+} drm_r128_prim_t;
+
+typedef struct drm_r128_vertex {
+ /* Indices here refer to the offset into
+ buflist in drm_buf_get_t. */
+ int send_count; /* Number of buffers to send */
+ int *send_indices; /* List of handles to buffers */
+ int *send_sizes; /* Lengths of data to send */
+ drm_r128_prim_t prim; /* Primitive type */
+ int request_count; /* Number of buffers requested */
+ int *request_indices; /* Buffer information */
+ int *request_sizes;
+ int granted_count; /* Number of buffers granted */
+} drm_r128_vertex_t;
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the Xserver file (r128_sarea.h)
+ */
+#define R128_LOCAL_TEX_HEAP 0
+#define R128_AGP_TEX_HEAP 1
+#define R128_NR_TEX_HEAPS 2
+#define R128_NR_TEX_REGIONS 64
+#define R128_LOG_TEX_GRANULARITY 16
+
+typedef struct drm_tex_region {
+ unsigned char next, prev;
+ unsigned char in_use;
+ int age;
+} drm_tex_region_t;
+
+typedef struct drm_r128_sarea {
+ drm_tex_region_t tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS+1];
+ int tex_age[R128_NR_TEX_HEAPS];
+ int ctx_owner;
+ int ring_write;
+} drm_r128_sarea_t;
+
+#endif
diff --git a/linux/r128_drv.c b/linux/r128_drv.c
new file mode 100644
index 00000000..45ade1de
--- /dev/null
+++ b/linux/r128_drv.c
@@ -0,0 +1,737 @@
+/* r128_drv.c -- ATI Rage 128 driver -*- linux-c -*-
+ * Created: Mon Dec 13 09:47:27 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
+ * Kevin E. Martin <kevin@precisioninsight.com>
+ *
+ * $XFree86$
+ *
+ */
+
+#define EXPORT_SYMTAB
+#include "drmP.h"
+#include "r128_drv.h"
+EXPORT_SYMBOL(r128_init);
+EXPORT_SYMBOL(r128_cleanup);
+
+#define R128_NAME "r128"
+#define R128_DESC "r128"
+#define R128_DATE "20000422"
+#define R128_MAJOR 0
+#define R128_MINOR 0
+#define R128_PATCHLEVEL 5
+
+static drm_device_t r128_device;
+drm_ctx_t r128_res_ctx;
+
+static struct file_operations r128_fops = {
+ open: r128_open,
+ flush: drm_flush,
+ release: r128_release,
+ ioctl: r128_ioctl,
+ mmap: drm_mmap,
+ read: drm_read,
+ fasync: drm_fasync,
+ poll: drm_poll,
+};
+
+static struct miscdevice r128_misc = {
+ minor: MISC_DYNAMIC_MINOR,
+ name: R128_NAME,
+ fops: &r128_fops,
+};
+
+static drm_ioctl_desc_t r128_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { r128_version, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { r128_addbufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { r128_mapbufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { r128_addctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { r128_rmctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { r128_modctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { r128_getctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { r128_switchctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { r128_newctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { r128_resctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { r128_lock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { r128_unlock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
+
+#ifdef DRM_AGP
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
+#endif
+
+ [DRM_IOCTL_NR(DRM_IOCTL_R128_INIT)] = { r128_init_cce, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_R128_RESET)] = { r128_eng_reset, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_R128_FLUSH)] = { r128_eng_flush, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_R128_PACKET)] = { r128_submit_pkt, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_R128_CCEIDL)] = { r128_cce_idle, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_R128_VERTEX)] = { r128_vertex_buf, 1, 0 },
+};
+#define R128_IOCTL_COUNT DRM_ARRAY_SIZE(r128_ioctls)
+
+#ifdef MODULE
+int init_module(void);
+void cleanup_module(void);
+static char *r128 = NULL;
+
+MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas.");
+MODULE_DESCRIPTION("r128");
+MODULE_PARM(r128, "s");
+
+/* init_module is called when insmod is used to load the module */
+
+int init_module(void)
+{
+ return r128_init();
+}
+
+/* cleanup_module is called when rmmod is used to unload the module */
+
+void cleanup_module(void)
+{
+ r128_cleanup();
+}
+#endif
+
+#ifndef MODULE
+/* r128_setup is called by the kernel to parse command-line options passed
+ * via the boot-loader (e.g., LILO). It calls the insmod option routine,
+ * drm_parse_drm.
+ *
+ * This is not currently supported, since it requires changes to
+ * linux/init/main.c. */
+
+
+void __init r128_setup(char *str, int *ints)
+{
+ if (ints[0] != 0) {
+ DRM_ERROR("Illegal command line format, ignored\n");
+ return;
+ }
+ drm_parse_options(str);
+}
+#endif
+
+static int r128_setup(drm_device_t *dev)
+{
+ int i;
+
+ atomic_set(&dev->ioctl_count, 0);
+ atomic_set(&dev->vma_count, 0);
+ dev->buf_use = 0;
+ atomic_set(&dev->buf_alloc, 0);
+
+ drm_dma_setup(dev);
+
+ atomic_set(&dev->total_open, 0);
+ atomic_set(&dev->total_close, 0);
+ atomic_set(&dev->total_ioctl, 0);
+ atomic_set(&dev->total_irq, 0);
+ atomic_set(&dev->total_ctx, 0);
+ atomic_set(&dev->total_locks, 0);
+ atomic_set(&dev->total_unlocks, 0);
+ atomic_set(&dev->total_contends, 0);
+ atomic_set(&dev->total_sleeps, 0);
+
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ dev->magiclist[i].head = NULL;
+ dev->magiclist[i].tail = NULL;
+ }
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ dev->vmalist = NULL;
+ dev->lock.hw_lock = NULL;
+ init_waitqueue_head(&dev->lock.lock_queue);
+ dev->queue_count = 0;
+ dev->queue_reserved = 0;
+ dev->queue_slots = 0;
+ dev->queuelist = NULL;
+ dev->irq = 0;
+ dev->context_flag = 0;
+ dev->interrupt_flag = 0;
+ dev->dma_flag = 0;
+ dev->last_context = 0;
+ dev->last_switch = 0;
+ dev->last_checked = 0;
+ init_timer(&dev->timer);
+ init_waitqueue_head(&dev->context_wait);
+
+ dev->ctx_start = 0;
+ dev->lck_start = 0;
+
+ dev->buf_rp = dev->buf;
+ dev->buf_wp = dev->buf;
+ dev->buf_end = dev->buf + DRM_BSZ;
+ dev->buf_async = NULL;
+ init_waitqueue_head(&dev->buf_readers);
+ init_waitqueue_head(&dev->buf_writers);
+
+ r128_res_ctx.handle=-1;
+
+ DRM_DEBUG("\n");
+
+ /* The kernel's context could be created here, but is now created
+ in drm_dma_enqueue. This is more resource-efficient for
+ hardware that does not do DMA, but may mean that
+ drm_select_queue fails between the time the interrupt is
+ initialized and the time the queues are initialized. */
+
+ return 0;
+}
+
+
+static int r128_takedown(drm_device_t *dev)
+{
+ int i;
+ drm_magic_entry_t *pt, *next;
+ drm_map_t *map;
+ drm_vma_entry_t *vma, *vma_next;
+
+ DRM_DEBUG("\n");
+
+ down(&dev->struct_sem);
+ del_timer(&dev->timer);
+
+ if (dev->devname) {
+ drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
+ dev->devname = NULL;
+ }
+
+ if (dev->unique) {
+ drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
+ dev->unique = NULL;
+ dev->unique_len = 0;
+ }
+ /* Clear pid list */
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ for (pt = dev->magiclist[i].head; pt; pt = next) {
+ next = pt->next;
+ drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
+ }
+ dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
+ }
+
+#ifdef DRM_AGP
+ /* Clear AGP information */
+ if (dev->agp) {
+ drm_agp_mem_t *entry;
+ drm_agp_mem_t *nexte;
+
+ /* Remove AGP resources, but leave dev->agp
+ intact until r128_cleanup is called. */
+ for (entry = dev->agp->memory; entry; entry = nexte) {
+ nexte = entry->next;
+ if (entry->bound) drm_unbind_agp(entry->memory);
+ drm_free_agp(entry->memory, entry->pages);
+ drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+ }
+ dev->agp->memory = NULL;
+
+ if (dev->agp->acquired && drm_agp.release)
+ (*drm_agp.release)();
+
+ dev->agp->acquired = 0;
+ dev->agp->enabled = 0;
+ }
+#endif
+
+ /* Clear vma list (only built for debugging) */
+ if (dev->vmalist) {
+ for (vma = dev->vmalist; vma; vma = vma_next) {
+ vma_next = vma->next;
+ drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
+ }
+ dev->vmalist = NULL;
+ }
+
+ /* Clear map area and mtrr information */
+ if (dev->maplist) {
+ for (i = 0; i < dev->map_count; i++) {
+ map = dev->maplist[i];
+ switch (map->type) {
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+#ifdef CONFIG_MTRR
+ if (map->mtrr >= 0) {
+ int retcode;
+ retcode = mtrr_del(map->mtrr,
+ map->offset,
+ map->size);
+ DRM_DEBUG("mtrr_del = %d\n", retcode);
+ }
+#endif
+ drm_ioremapfree(map->handle, map->size);
+ break;
+ case _DRM_SHM:
+ drm_free_pages((unsigned long)map->handle,
+ drm_order(map->size)
+ - PAGE_SHIFT,
+ DRM_MEM_SAREA);
+ break;
+ case _DRM_AGP:
+ /* Do nothing here, because this is all
+ handled in the AGP/GART driver. */
+ break;
+ }
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ }
+ drm_free(dev->maplist,
+ dev->map_count * sizeof(*dev->maplist),
+ DRM_MEM_MAPS);
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ }
+
+ drm_dma_takedown(dev);
+
+ dev->queue_count = 0;
+ if (dev->lock.hw_lock) {
+ dev->lock.hw_lock = NULL; /* SHM removed */
+ dev->lock.pid = 0;
+ wake_up_interruptible(&dev->lock.lock_queue);
+ }
+ up(&dev->struct_sem);
+
+ return 0;
+}
+
+/* r128_init is called via init_module at module load time, or via
+ * linux/init/main.c (this is not currently supported). */
+
+int r128_init(void)
+{
+ int retcode;
+ drm_device_t *dev = &r128_device;
+
+ DRM_DEBUG("\n");
+
+ memset((void *)dev, 0, sizeof(*dev));
+ dev->count_lock = SPIN_LOCK_UNLOCKED;
+ sema_init(&dev->struct_sem, 1);
+
+#ifdef MODULE
+ drm_parse_options(r128);
+#endif
+
+ if ((retcode = misc_register(&r128_misc))) {
+ DRM_ERROR("Cannot register \"%s\"\n", R128_NAME);
+ return retcode;
+ }
+ dev->device = MKDEV(MISC_MAJOR, r128_misc.minor);
+ dev->name = R128_NAME;
+
+ drm_mem_init();
+ drm_proc_init(dev);
+
+#ifdef DRM_AGP
+ dev->agp = drm_agp_init();
+
+#ifdef CONFIG_MTRR
+ dev->agp->agp_mtrr = mtrr_add(dev->agp->agp_info.aper_base,
+ dev->agp->agp_info.aper_size*1024*1024,
+ MTRR_TYPE_WRCOMB,
+ 1);
+#endif
+#endif
+
+ if((retcode = drm_ctxbitmap_init(dev))) {
+ DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+ drm_proc_cleanup();
+ misc_deregister(&r128_misc);
+ r128_takedown(dev);
+ return retcode;
+ }
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ R128_NAME,
+ R128_MAJOR,
+ R128_MINOR,
+ R128_PATCHLEVEL,
+ R128_DATE,
+ r128_misc.minor);
+
+ return 0;
+}
+
+/* r128_cleanup is called via cleanup_module at module unload time. */
+
+void r128_cleanup(void)
+{
+ drm_device_t *dev = &r128_device;
+
+ DRM_DEBUG("\n");
+
+ drm_proc_cleanup();
+ if (misc_deregister(&r128_misc)) {
+ DRM_ERROR("Cannot unload module\n");
+ } else {
+ DRM_INFO("Module unloaded\n");
+ }
+ drm_ctxbitmap_cleanup(dev);
+ r128_takedown(dev);
+#ifdef DRM_AGP
+ if (dev->agp) {
+ /* FIXME -- free other information, too */
+ drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
+ dev->agp = NULL;
+ }
+#endif
+}
+
+int r128_version(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_version_t version;
+ int len;
+
+ copy_from_user_ret(&version,
+ (drm_version_t *)arg,
+ sizeof(version),
+ -EFAULT);
+
+#define DRM_COPY(name,value) \
+ len = strlen(value); \
+ if (len > name##_len) len = name##_len; \
+ name##_len = strlen(value); \
+ if (len && name) { \
+ copy_to_user_ret(name, value, len, -EFAULT); \
+ }
+
+ version.version_major = R128_MAJOR;
+ version.version_minor = R128_MINOR;
+ version.version_patchlevel = R128_PATCHLEVEL;
+
+ DRM_COPY(version.name, R128_NAME);
+ DRM_COPY(version.date, R128_DATE);
+ DRM_COPY(version.desc, R128_DESC);
+
+ copy_to_user_ret((drm_version_t *)arg,
+ &version,
+ sizeof(version),
+ -EFAULT);
+ return 0;
+}
+
+int r128_open(struct inode *inode, struct file *filp)
+{
+ drm_device_t *dev = &r128_device;
+ int retcode = 0;
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+ if (!(retcode = drm_open_helper(inode, filp, dev))) {
+ MOD_INC_USE_COUNT;
+ atomic_inc(&dev->total_open);
+ spin_lock(&dev->count_lock);
+ if (!dev->open_count++) {
+ spin_unlock(&dev->count_lock);
+ return r128_setup(dev);
+ }
+ spin_unlock(&dev->count_lock);
+ }
+ return retcode;
+}
+
+int r128_release(struct inode *inode, struct file *filp)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ int retcode = 0;
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+ if (!(retcode = drm_release(inode, filp))) {
+ MOD_DEC_USE_COUNT;
+ atomic_inc(&dev->total_close);
+ spin_lock(&dev->count_lock);
+ if (!--dev->open_count) {
+ if (atomic_read(&dev->ioctl_count) || dev->blocked) {
+ DRM_ERROR("Device busy: %d %d\n",
+ atomic_read(&dev->ioctl_count),
+ dev->blocked);
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ spin_unlock(&dev->count_lock);
+ return r128_takedown(dev);
+ }
+ spin_unlock(&dev->count_lock);
+ }
+ return retcode;
+}
+
+/* r128_ioctl is called whenever a process performs an ioctl on /dev/drm. */
+
+int r128_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int nr = DRM_IOCTL_NR(cmd);
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ int retcode = 0;
+ drm_ioctl_desc_t *ioctl;
+ drm_ioctl_t *func;
+
+ atomic_inc(&dev->ioctl_count);
+ atomic_inc(&dev->total_ioctl);
+ ++priv->ioctl_count;
+
+ DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
+ current->pid, cmd, nr, dev->device, priv->authenticated);
+
+ if (nr >= R128_IOCTL_COUNT) {
+ retcode = -EINVAL;
+ } else {
+ ioctl = &r128_ioctls[nr];
+ func = ioctl->func;
+
+ if (!func) {
+ DRM_DEBUG("no function\n");
+ retcode = -EINVAL;
+ } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
+ || (ioctl->auth_needed && !priv->authenticated)) {
+ retcode = -EACCES;
+ } else {
+ retcode = (func)(inode, filp, cmd, arg);
+ }
+ }
+
+ atomic_dec(&dev->ioctl_count);
+ return retcode;
+}
+
+int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ DECLARE_WAITQUEUE(entry, current);
+ int ret = 0;
+ drm_lock_t lock;
+#if DRM_DMA_HISTOGRAM
+ cycles_t start;
+
+ dev->lck_start = start = get_cycles();
+#endif
+
+ copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
+
+ if (lock.context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ current->pid, lock.context);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
+ lock.context, current->pid, dev->lock.hw_lock->lock,
+ lock.flags);
+
+#if 0
+ /* dev->queue_count == 0 right now for
+ r128. FIXME? */
+ if (lock.context < 0 || lock.context >= dev->queue_count)
+ return -EINVAL;
+#endif
+
+ if (!ret) {
+#if 0
+ if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
+ != lock.context) {
+ long j = jiffies - dev->lock.lock_time;
+
+ if (lock.context == r128_res_ctx.handle &&
+ j >= 0 && j < DRM_LOCK_SLICE) {
+ /* Can't take lock if we just had it and
+ there is contention. */
+ DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d jiffies=%d\n",
+ lock.context, current->pid, j,
+ dev->lock.lock_time, jiffies);
+ current->state = TASK_INTERRUPTIBLE;
+ current->policy |= SCHED_YIELD;
+ schedule_timeout(DRM_LOCK_SLICE-j);
+ DRM_DEBUG("jiffies=%d\n", jiffies);
+ }
+ }
+#endif
+ add_wait_queue(&dev->lock.lock_queue, &entry);
+ for (;;) {
+ if (!dev->lock.hw_lock) {
+ /* Device has been unregistered */
+ ret = -EINTR;
+ break;
+ }
+ if (drm_lock_take(&dev->lock.hw_lock->lock,
+ lock.context)) {
+ dev->lock.pid = current->pid;
+ dev->lock.lock_time = jiffies;
+ atomic_inc(&dev->total_locks);
+ break; /* Got lock */
+ }
+
+ /* Contention */
+ atomic_inc(&dev->total_sleeps);
+ current->state = TASK_INTERRUPTIBLE;
+#if 1
+ current->policy |= SCHED_YIELD;
+#endif
+ schedule();
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&dev->lock.lock_queue, &entry);
+ }
+
+#if 0
+ if (!ret && dev->last_context != lock.context &&
+ lock.context != r128_res_ctx.handle &&
+ dev->last_context != r128_res_ctx.handle) {
+ add_wait_queue(&dev->context_wait, &entry);
+ current->state = TASK_INTERRUPTIBLE;
+ /* PRE: dev->last_context != lock.context */
+ r128_context_switch(dev, dev->last_context, lock.context);
+ /* POST: we will wait for the context
+ switch and will dispatch on a later call
+ when dev->last_context == lock.context
+ NOTE WE HOLD THE LOCK THROUGHOUT THIS
+ TIME! */
+ current->policy |= SCHED_YIELD;
+ schedule();
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&dev->context_wait, &entry);
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ } else if (dev->last_context != lock.context) {
+ DRM_ERROR("Context mismatch: %d %d\n",
+ dev->last_context, lock.context);
+ }
+ }
+#endif
+
+ if (!ret) {
+ if (lock.flags & _DRM_LOCK_READY) {
+ /* Wait for space in DMA/FIFO */
+ }
+ if (lock.flags & _DRM_LOCK_QUIESCENT) {
+ /* Make hardware quiescent */
+#if 0
+ r128_quiescent(dev);
+#endif
+ }
+ }
+
+#if 0
+ DRM_ERROR("pid = %5d, old counter = %5ld\n",
+ current->pid, current->counter);
+#endif
+ if (lock.context != r128_res_ctx.handle) {
+ current->counter = 5;
+ current->priority = DEF_PRIORITY/4;
+ }
+#if 0
+ while (current->counter > 25)
+ current->counter >>= 1; /* decrease time slice */
+ DRM_ERROR("pid = %5d, new counter = %5ld\n",
+ current->pid, current->counter);
+#endif
+ DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
+
+#if DRM_DMA_HISTOGRAM
+ atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]);
+#endif
+
+ return ret;
+}
+
+
+int r128_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_lock_t lock;
+
+ copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
+
+ if (lock.context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ current->pid, lock.context);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("%d frees lock (%d holds)\n",
+ lock.context,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+ atomic_inc(&dev->total_unlocks);
+ if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
+ atomic_inc(&dev->total_contends);
+ drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
+ /* FIXME: Try to send data to card here */
+ if (!dev->context_flag) {
+ if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ DRM_ERROR("\n");
+ }
+ }
+
+#if 0
+ current->policy |= SCHED_YIELD;
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(1000);
+#endif
+
+ if (lock.context != r128_res_ctx.handle) {
+ current->counter = 5;
+ current->priority = DEF_PRIORITY;
+ }
+#if 0
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(10);
+#endif
+
+ return 0;
+}
diff --git a/linux/r128_drv.h b/linux/r128_drv.h
new file mode 100644
index 00000000..3b888c49
--- /dev/null
+++ b/linux/r128_drv.h
@@ -0,0 +1,226 @@
+/* r128_drv.h -- Private header for r128 driver -*- linux-c -*-
+ * Created: Mon Dec 13 09:51:11 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
+ * Kevin E. Martin <kevin@precisioninsight.com>
+ *
+ * $XFree86$
+ *
+ */
+
+#ifndef _R128_DRV_H_
+#define _R128_DRV_H_
+
+typedef struct drm_r128_private {
+ int is_pci;
+
+ int cce_mode;
+ int cce_fifo_size;
+ int cce_is_bm_mode;
+ int cce_secure;
+
+ drm_r128_sarea_t *sarea_priv;
+
+ __volatile__ u32 *ring_read_ptr;
+
+ u32 *ring_start;
+ u32 *ring_end;
+ int ring_size;
+ int ring_sizel2qw;
+ int ring_entries;
+
+ int submit_age;
+
+ int usec_timeout;
+
+ drm_map_t *sarea;
+ drm_map_t *fb;
+ drm_map_t *agp_ring;
+ drm_map_t *agp_read_ptr;
+ drm_map_t *agp_vertbufs;
+ drm_map_t *agp_indbufs;
+ drm_map_t *agp_textures;
+ drm_map_t *mmio;
+} drm_r128_private_t;
+
+typedef struct drm_r128_buf_priv {
+ u32 age;
+} drm_r128_buf_priv_t;
+
+ /* r128_drv.c */
+extern int r128_init(void);
+extern void r128_cleanup(void);
+extern int r128_version(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int r128_open(struct inode *inode, struct file *filp);
+extern int r128_release(struct inode *inode, struct file *filp);
+extern int r128_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int r128_lock(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int r128_unlock(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+ /* r128_dma.c */
+extern int r128_init_cce(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int r128_eng_reset(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int r128_eng_flush(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int r128_submit_pkt(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int r128_cce_idle(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int r128_vertex_buf(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+ /* r128_bufs.c */
+extern int r128_addbufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int r128_mapbufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+ /* r128_context.c */
+extern int r128_resctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int r128_addctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int r128_modctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int r128_getctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int r128_switchctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int r128_newctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int r128_rmctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+extern int r128_context_switch(drm_device_t *dev, int old, int new);
+extern int r128_context_switch_complete(drm_device_t *dev, int new);
+
+
+/* Register definitions, register access macros and drmAddMap constants
+ * for Rage 128 kernel driver.
+ */
+
+#define R128_PC_NGUI_CTLSTAT 0x0184
+# define R128_PC_FLUSH_ALL 0x00ff
+# define R128_PC_BUSY (1 << 31)
+
+#define R128_CLOCK_CNTL_INDEX 0x0008
+#define R128_CLOCK_CNTL_DATA 0x000c
+# define R128_PLL_WR_EN (1 << 7)
+
+#define R128_MCLK_CNTL 0x000f
+# define R128_FORCE_GCP (1 << 16)
+# define R128_FORCE_PIPE3D_CPP (1 << 17)
+
+#define R128_GEN_RESET_CNTL 0x00f0
+# define R128_SOFT_RESET_GUI (1 << 0)
+
+#define R128_PM4_BUFFER_CNTL 0x0704
+# define R128_PM4_NONPM4 (0 << 28)
+# define R128_PM4_192PIO (1 << 28)
+# define R128_PM4_192BM (2 << 28)
+# define R128_PM4_128PIO_64INDBM (3 << 28)
+# define R128_PM4_128BM_64INDBM (4 << 28)
+# define R128_PM4_64PIO_128INDBM (5 << 28)
+# define R128_PM4_64BM_128INDBM (6 << 28)
+# define R128_PM4_64PIO_64VCBM_64INDBM (7 << 28)
+# define R128_PM4_64BM_64VCBM_64INDBM (8 << 28)
+# define R128_PM4_64PIO_64VCPIO_64INDPIO (15 << 28)
+
+
+#define R128_PM4_BUFFER_DL_RPTR 0x0710
+#define R128_PM4_BUFFER_DL_WPTR 0x0714
+# define R128_PM4_BUFFER_DL_DONE (1 << 31)
+
+#define R128_PM4_VC_FPU_SETUP 0x071c
+
+#define R128_PM4_STAT 0x07b8
+# define R128_PM4_FIFOCNT_MASK 0x0fff
+# define R128_PM4_BUSY (1 << 16)
+# define R128_PM4_GUI_ACTIVE (1 << 31)
+
+#define R128_PM4_BUFFER_ADDR 0x07f0
+#define R128_PM4_MICRO_CNTL 0x07fc
+# define R128_PM4_MICRO_FREERUN (1 << 30)
+
+#define R128_PM4_FIFO_DATA_EVEN 0x1000
+#define R128_PM4_FIFO_DATA_ODD 0x1004
+
+#define R128_GUI_SCRATCH_REG0 0x15e0
+#define R128_GUI_SCRATCH_REG1 0x15e4
+#define R128_GUI_SCRATCH_REG2 0x15e8
+#define R128_GUI_SCRATCH_REG3 0x15ec
+#define R128_GUI_SCRATCH_REG4 0x15f0
+#define R128_GUI_SCRATCH_REG5 0x15f4
+
+#define R128_GUI_STAT 0x1740
+# define R128_GUI_FIFOCNT_MASK 0x0fff
+# define R128_GUI_ACTIVE (1 << 31)
+
+
+/* CCE command packets */
+#define R128_CCE_PACKET0 0x00000000
+#define R128_CCE_PACKET1 0x40000000
+#define R128_CCE_PACKET2 0x80000000
+# define R128_CCE_PACKET_MASK 0xC0000000
+# define R128_CCE_PACKET_COUNT_MASK 0x3fff0000
+# define R128_CCE_PACKET0_REG_MASK 0x000007ff
+# define R128_CCE_PACKET1_REG0_MASK 0x000007ff
+# define R128_CCE_PACKET1_REG1_MASK 0x003ff800
+
+
+#define R128_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+
+
+#define R128_BASE(reg) ((u32)(dev_priv->mmio->handle))
+#define R128_ADDR(reg) (R128_BASE(reg) + reg)
+
+#define R128_DEREF(reg) *(__volatile__ int *)R128_ADDR(reg)
+#define R128_READ(reg) R128_DEREF(reg)
+#define R128_WRITE(reg,val) do { R128_DEREF(reg) = val; } while (0)
+
+#define R128_DEREF8(reg) *(__volatile__ char *)R128_ADDR(reg)
+#define R128_READ8(reg) R128_DEREF8(reg)
+#define R128_WRITE8(reg,val) do { R128_DEREF8(reg) = val; } while (0)
+
+#define R128_WRITE_PLL(addr,val) \
+do { \
+ R128_WRITE8(R128_CLOCK_CNTL_INDEX, ((addr) & 0x1f) | R128_PLL_WR_EN); \
+ R128_WRITE(R128_CLOCK_CNTL_DATA, (val)); \
+} while (0)
+
+extern int R128_READ_PLL(drm_device_t *dev, int addr);
+
+#define R128CCE0(p,r,n) ((p) | ((n) << 16) | ((r) >> 2))
+#define R128CCE1(p,r1,r2) ((p) | (((r2) >> 2) << 11) | ((r1) >> 2))
+#define R128CCE2(p) ((p))
+#define R128CCE3(p,n) ((p) | ((n) << 16))
+
+#endif