summaryrefslogtreecommitdiff
path: root/linux
diff options
context:
space:
mode:
authorKevin E Martin <kem@kem.org>2000-02-22 15:43:59 +0000
committerKevin E Martin <kem@kem.org>2000-02-22 15:43:59 +0000
commit2daf8cb6882797f5ab0c44db2da7d6c710968105 (patch)
tree435d6f6f14665a7ed4bc27e64f5953d959a07cd1 /linux
parentf3347be400bbbeda4d1803eb212f0f48f362761b (diff)
Import of XFree86 3.9.18
Diffstat (limited to 'linux')
-rw-r--r--linux/Makefile.kernel35
-rw-r--r--linux/Makefile.linux157
-rw-r--r--linux/README.drm41
-rw-r--r--linux/agpsupport.c313
-rw-r--r--linux/bufs.c528
-rw-r--r--linux/ctxbitmap.c86
-rw-r--r--linux/dma.c531
-rw-r--r--linux/drm.h280
-rw-r--r--linux/drmP.h647
-rw-r--r--linux/fops.c234
-rw-r--r--linux/gamma_drv.c526
-rw-r--r--linux/i810_bufs.c584
-rw-r--r--linux/i810_dma.c762
-rw-r--r--linux/i810_drv.c583
-rw-r--r--linux/i810_drv.h76
-rw-r--r--linux/lists.c245
-rw-r--r--linux/lock.c225
-rw-r--r--linux/memory.c327
-rw-r--r--linux/mga_bufs.c636
-rw-r--r--linux/mga_context.c245
-rw-r--r--linux/mga_dma.c978
-rw-r--r--linux/mga_drv.c576
-rw-r--r--linux/mga_drv.h292
-rw-r--r--linux/mga_state.c362
-rw-r--r--linux/picker.c14
-rw-r--r--linux/proc.c569
-rw-r--r--linux/tdfx_drv.c654
-rw-r--r--linux/vm.c301
28 files changed, 10807 insertions, 0 deletions
diff --git a/linux/Makefile.kernel b/linux/Makefile.kernel
new file mode 100644
index 00000000..44d75c48
--- /dev/null
+++ b/linux/Makefile.kernel
@@ -0,0 +1,35 @@
+#
+# Makefile for the drm device driver. This driver provides support for
+# the Direct Rendering Infrastructure (DRI) in XFree86 4.x.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definitions are now inherited from the
+# parent makes..
+#
+# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/Makefile.kernel,v 1.5 2000/02/14 06:27:25 martin Exp $
+
+L_TARGET := libdrm.a
+
+L_OBJS := init.o memory.o proc.o auth.o context.o drawable.o bufs.o \
+ lists.o lock.o ioctl.o fops.o vm.o dma.o
+
+M_OBJS :=
+
+ifdef CONFIG_DRM_GAMMA
+M_OBJS += gamma.o
+endif
+
+ifdef CONFIG_DRM_TDFX
+M_OBJS += tdfx.o
+endif
+
+include $(TOPDIR)/Rules.make
+
+gamma.o: gamma_drv.o gamma_dma.o $(L_TARGET)
+ $(LD) $(LD_RFLAG) -r -o $@ gamma_drv.o gamma_dma.o -L. -ldrm
+
+tdfx.o: tdfx_drv.o tdfx_context.o $(L_TARGET)
+ $(LD) $(LD_RFLAG) -r -o $@ tdfx_drv.o tdfx_context.o -L. -ldrm
diff --git a/linux/Makefile.linux b/linux/Makefile.linux
new file mode 100644
index 00000000..0f14540d
--- /dev/null
+++ b/linux/Makefile.linux
@@ -0,0 +1,157 @@
+# Makefile -- For the Direct Rendering Manager module (drm)
+# Created: Mon Jan 4 09:26:53 1999 by faith@precisioninsight.com
+# Revised: Sun Feb 13 23:15:59 2000 by kevin@precisioninsight.com
+#
+# Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+#
+# $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/generic/Makefile.linux,v 1.23 1999/07/02 17:46:30 faith Exp $
+# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/Makefile.linux,v 1.5 2000/02/14 06:27:25 martin Exp $
+#
+
+.SUFFIXES:
+
+# *** Setup
+
+MODS= gamma.o tdfx.o
+LIBS= libdrm.a
+PROGS= drmstat
+
+DRMOBJS= init.o memory.o proc.o auth.o context.o drawable.o bufs.o \
+ lists.o lock.o ioctl.o fops.o vm.o dma.o
+DRMHEADERS= drm.h drmP.h
+
+GAMMAOBJS= gamma_drv.o gamma_dma.o
+GAMMAHEADERS= gamma_drv.h $(DRMHEADERS)
+
+TDFXOBJS= tdfx_drv.o tdfx_context.o
+TDFXHEADERS= tdfx_drv.h $(DRMHEADERS)
+
+PROGOBJS= drmstat.po xf86drm.po xf86drmHash.po xf86drmRandom.po sigio.po
+PROGHEADERS= xf86drm.h $(DRMHEADERS)
+
+CFLAGS= -O2 $(WARNINGS)
+WARNINGS= -Wall -Wwrite-strings -Wpointer-arith -Wcast-align \
+ -Wstrict-prototypes -Wshadow -Wnested-externs \
+ -Winline -Wpointer-arith
+MODCFLAGS= $(CFLAGS) -D__KERNEL__ -DMODULE -fomit-frame-pointer
+PRGCFLAGS= $(CFLAGS) -g -ansi -pedantic -DPOSIX_C_SOURCE=199309L \
+ -D_POSIX_SOURCE -D_XOPEN_SOURCE -D_BSD_SOURCE -D_SVID_SOURCE \
+ -I../../../../../../include -I../../../../../../../../include \
+ -I../../../../../../../../programs/Xserver/hw/xfree86/common
+PRGLIBS=
+
+# **** Start of SMP/MODVERSIONS detection
+
+# First, locate correct tree for this kernel version. If we find a
+# matching tree, we assume that we can rely on that tree's autoconf.h.
+# This may not be correct, but it is the best assumption we can make.
+
+VERSION := $(shell uname -r)
+A := /usr/src/linux-$(VERSION)/include
+B := /usr/src/linux/include
+C := /usr/include
+
+V := $(shell gcc -E -nostdinc -I$A picker.c 2>/dev/null \
+ | grep -s 'RELEASE = ' | cut -d' ' -f3)
+ifeq ($(V),"$(VERSION)")
+TREE := $A
+else
+ V := $(shell gcc -E -nostdinc -I$B picker.c 2>/dev/null \
+ | grep -s 'RELEASE = ' | cut -d' ' -f3)
+ifeq ($(V),"$(VERSION)")
+ TREE := $B
+else
+ V := $(shell gcc -E -nostdinc -I$C picker.c 2>/dev/null \
+ | grep -s 'RELEASE = ' | cut -d' ' -f3)
+ifeq ($(V),"$(VERSION)")
+ TREE := $C
+else
+ TREE := 0
+endif
+endif
+endif
+
+# To override this determination, pass the path on the make command line:
+# make TREE=/usr/my-kernel-tree/include
+# or hardcode a value for TREE here:
+# TREE:=/usr/include
+
+ifeq ($(TREE),0)
+all:; @echo Error: Could not locate kernel tree in $A $B $C
+else
+SMP := $(shell gcc -E -nostdinc -I$(TREE) picker.c 2>/dev/null \
+ | grep -s 'SMP = ' | cut -d' ' -f3)
+MODVERSIONS := $(shell gcc -E -I $(TREE) picker.c 2>/dev/null \
+ | grep -s 'MODVERSIONS = ' | cut -d' ' -f3)
+all::;@echo KERNEL HEADERS IN $(TREE): SMP=${SMP} MODVERSIONS=${MODVERSIONS}
+all:: $(LIBS) $(MODS) $(PROGS)
+endif
+
+# **** End of SMP/MODVERSIONS detection
+
+# **** Handle SMP/MODVERSIONS
+ifeq ($(SMP),1)
+MODCFLAGS += -D__SMP__
+endif
+ifeq ($(MODVERSIONS),1)
+MODCFLAGS += -DMODVERSIONS -include $(TREE)/linux/modversions.h
+endif
+
+# **** End of configuration
+
+libdrm.a: $(DRMOBJS)
+ -$(RM) -f $@
+ $(AR) rcs $@ $(DRMOBJS)
+
+gamma.o: $(GAMMAOBJS) $(LIBS)
+ $(LD) -r $^ -o $@
+
+tdfx.o: $(TDFXOBJS) $(LIBS)
+ $(LD) -r $^ -o $@
+
+drmstat: $(PROGOBJS)
+ $(CC) $(PRGCFLAGS) $^ $(PRGLIBS) -o $@
+
+.PHONY: ChangeLog
+ChangeLog:
+ @rm -f Changelog
+ @rcs2log -i 2 -r -l \
+ | sed 's,@.*alephnull.com,@precisioninsight.com,' > ChangeLog
+
+
+# .o files are used for modules
+%.o: %.c
+ $(CC) $(MODCFLAGS) -c $< -o $@
+
+%.po: %.c
+ $(CC) $(PRGCFLAGS) -DDRM_USE_MALLOC -c $< -o $@
+
+
+$(DRMOBJS): $(DRMHEADERS)
+$(GAMMAOBJS): $(GAMMAHEADERS)
+$(TDFXOBJS): $(TDFXHEADERS)
+$(PROGOBJS): $(PROGHEADERS)
+
+clean:
+ rm -f *.o *.po *~ core $(PROGS)
+
diff --git a/linux/README.drm b/linux/README.drm
new file mode 100644
index 00000000..1cc4c277
--- /dev/null
+++ b/linux/README.drm
@@ -0,0 +1,41 @@
+
+The Direct Rendering Manager (drm) is a device-independent kernel-level
+device driver that provides support for the XFree86 Direct Rendering
+Infrastructure (DRI).
+
+The DRM supports the Direct Rendering Infrastructure (DRI) in four major
+ways:
+
+ 1. The DRM provides synchronized access to the graphics hardware via
+ the use of an optimized two-tiered lock.
+
+ 2. The DRM enforces the DRI security policy for access to the graphics
+ hardware by only allowing authenticated X11 clients access to
+ restricted regions of memory.
+
+ 3. The DRM provides a generic DMA engine, complete with multiple
+ queues and the ability to detect the need for an OpenGL context
+ switch.
+
+ 4. The DRM is extensible via the use of small device-specific modules
+ that rely extensively on the API exported by the DRM module.
+
+
+Documentation on the DRI is available from:
+ http://precisioninsight.com/piinsights.html
+
+For specific information about kernel-level support, see:
+
+ The Direct Rendering Manager, Kernel Support for the Direct Rendering
+ Infrastructure
+ http://precisioninsight.com/dr/drm.html
+
+ Hardware Locking for the Direct Rendering Infrastructure
+ http://precisioninsight.com/dr/locking.html
+
+ A Security Analysis of the Direct Rendering Infrastructure
+ http://precisioninsight.com/dr/security.html
+
+
+
+$XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/README.drm,v 1.2 1999/09/27 14:59:24 dawes Exp $
diff --git a/linux/agpsupport.c b/linux/agpsupport.c
new file mode 100644
index 00000000..fb58154d
--- /dev/null
+++ b/linux/agpsupport.c
@@ -0,0 +1,313 @@
+/* agpsupport.c -- DRM support for AGP/GART backend -*- linux-c -*-
+ * Created: Mon Dec 13 09:56:45 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Rickard E. (Rik) Faith <faith@precisioninsight.com>
+ *
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/agpsupport.c,v 1.1 2000/02/11 17:26:02 dawes Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+drm_agp_func_t drm_agp = { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL };
+
+/* The C standard says that 'void *' is not guaranteed to hold a function
+ pointer, so we use this union to define a generic pointer that is
+ guaranteed to hold any of the function pointers we care about. */
+typedef union {
+ void (*free_memory)(agp_memory *);
+ agp_memory *(*allocate_memory)(size_t, u32);
+ int (*bind_memory)(agp_memory *, off_t);
+ int (*unbind_memory)(agp_memory *);
+ void (*enable)(u32);
+ int (*acquire)(void);
+ void (*release)(void);
+ void (*copy_info)(agp_kern_info *);
+ unsigned long address;
+} drm_agp_func_u;
+
+typedef struct drm_agp_fill {
+ const char *name;
+ drm_agp_func_u *f;
+} drm_agp_fill_t;
+
+static drm_agp_fill_t drm_agp_fill[] = {
+ { __MODULE_STRING(agp_free_memory),
+ (drm_agp_func_u *)&drm_agp.free_memory },
+ { __MODULE_STRING(agp_allocate_memory),
+ (drm_agp_func_u *)&drm_agp.allocate_memory },
+ { __MODULE_STRING(agp_bind_memory),
+ (drm_agp_func_u *)&drm_agp.bind_memory },
+ { __MODULE_STRING(agp_unbind_memory),
+ (drm_agp_func_u *)&drm_agp.unbind_memory },
+ { __MODULE_STRING(agp_enable),
+ (drm_agp_func_u *)&drm_agp.enable },
+ { __MODULE_STRING(agp_backend_acquire),
+ (drm_agp_func_u *)&drm_agp.acquire },
+ { __MODULE_STRING(agp_backend_release),
+ (drm_agp_func_u *)&drm_agp.release },
+ { __MODULE_STRING(agp_copy_info),
+ (drm_agp_func_u *)&drm_agp.copy_info },
+ { NULL, NULL }
+};
+
+int drm_agp_info(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ agp_kern_info *kern;
+ drm_agp_info_t info;
+
+ if (!dev->agp->acquired || !drm_agp.copy_info) return -EINVAL;
+
+ kern = &dev->agp->agp_info;
+ info.agp_version_major = kern->version.major;
+ info.agp_version_minor = kern->version.minor;
+ info.mode = kern->mode;
+ info.aperture_base = kern->aper_base;
+ info.aperture_size = kern->aper_size * 1024 * 1024;
+ info.memory_allowed = kern->max_memory << PAGE_SHIFT;
+ info.memory_used = kern->current_memory << PAGE_SHIFT;
+ info.id_vendor = kern->device->vendor;
+ info.id_device = kern->device->device;
+
+ copy_to_user_ret((drm_agp_info_t *)arg, &info, sizeof(info), -EFAULT);
+ return 0;
+}
+
+int drm_agp_acquire(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ int retcode;
+
+ if (dev->agp->acquired || !drm_agp.acquire) return -EINVAL;
+ if ((retcode = (*drm_agp.acquire)())) return retcode;
+ dev->agp->acquired = 1;
+ return 0;
+}
+
+int drm_agp_release(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+
+ if (!dev->agp->acquired || !drm_agp.release) return -EINVAL;
+ (*drm_agp.release)();
+ dev->agp->acquired = 0;
+ return 0;
+
+}
+
+int drm_agp_enable(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_agp_mode_t mode;
+
+ if (!dev->agp->acquired || !drm_agp.enable) return -EINVAL;
+
+ copy_from_user_ret(&mode, (drm_agp_mode_t *)arg, sizeof(mode),
+ -EFAULT);
+
+ dev->agp->mode = mode.mode;
+ (*drm_agp.enable)(mode.mode);
+ dev->agp->base = dev->agp->agp_info.aper_base;
+ dev->agp->enabled = 1;
+ return 0;
+}
+
+int drm_agp_alloc(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_agp_buffer_t request;
+ drm_agp_mem_t *entry;
+ agp_memory *memory;
+ unsigned long pages;
+ u32 type;
+ if (!dev->agp->acquired) return -EINVAL;
+ copy_from_user_ret(&request, (drm_agp_buffer_t *)arg, sizeof(request),
+ -EFAULT);
+ if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS)))
+ return -ENOMEM;
+
+ pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE;
+ type = (u32) request.type;
+
+ if (!(memory = drm_alloc_agp(pages, type))) {
+ drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+ return -ENOMEM;
+ }
+
+ entry->handle = (unsigned long)memory->memory;
+ entry->memory = memory;
+ entry->bound = 0;
+ entry->pages = pages;
+ entry->prev = NULL;
+ entry->next = dev->agp->memory;
+ if (dev->agp->memory) dev->agp->memory->prev = entry;
+ dev->agp->memory = entry;
+
+ request.handle = entry->handle;
+ request.physical = memory->physical;
+
+ if (copy_to_user((drm_agp_buffer_t *)arg, &request, sizeof(request))) {
+ dev->agp->memory = entry->next;
+ dev->agp->memory->prev = NULL;
+ drm_free_agp(memory, pages);
+ drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t *dev,
+ unsigned long handle)
+{
+ drm_agp_mem_t *entry;
+
+ for (entry = dev->agp->memory; entry; entry = entry->next) {
+ if (entry->handle == handle) return entry;
+ }
+ return NULL;
+}
+
+int drm_agp_unbind(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_agp_binding_t request;
+ drm_agp_mem_t *entry;
+
+ if (!dev->agp->acquired) return -EINVAL;
+ copy_from_user_ret(&request, (drm_agp_binding_t *)arg, sizeof(request),
+ -EFAULT);
+ if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
+ return -EINVAL;
+ if (!entry->bound) return -EINVAL;
+ return drm_unbind_agp(entry->memory);
+}
+
+int drm_agp_bind(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_agp_binding_t request;
+ drm_agp_mem_t *entry;
+ int retcode;
+ int page;
+
+ if (!dev->agp->acquired || !drm_agp.bind_memory) return -EINVAL;
+ copy_from_user_ret(&request, (drm_agp_binding_t *)arg, sizeof(request),
+ -EFAULT);
+ if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
+ return -EINVAL;
+ if (entry->bound) return -EINVAL;
+ page = (request.offset + PAGE_SIZE - 1) / PAGE_SIZE;
+ if ((retcode = drm_bind_agp(entry->memory, page))) return retcode;
+ entry->bound = dev->agp->base + (page << PAGE_SHIFT);
+ return 0;
+}
+
+int drm_agp_free(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_agp_buffer_t request;
+ drm_agp_mem_t *entry;
+
+ if (!dev->agp->acquired) return -EINVAL;
+ copy_from_user_ret(&request, (drm_agp_buffer_t *)arg, sizeof(request),
+ -EFAULT);
+ if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
+ return -EINVAL;
+ if (entry->bound) drm_unbind_agp(entry->memory);
+ entry->prev->next = entry->next;
+ entry->next->prev = entry->prev;
+ drm_free_agp(entry->memory, entry->pages);
+ drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+ return 0;
+}
+
+drm_agp_head_t *drm_agp_init(void)
+{
+ drm_agp_fill_t *fill;
+ drm_agp_head_t *head = NULL;
+ int agp_available = 1;
+
+ for (fill = &drm_agp_fill[0]; fill->name; fill++) {
+ char *n = (char *)fill->name;
+#if 0
+ *fill->f = (drm_agp_func_u)get_module_symbol(NULL, n);
+#endif
+ *fill->f = (drm_agp_func_u)get_module_symbol(NULL, n);
+ printk("%s resolves to 0x%08lx\n", n, (*fill->f).address);
+ if (!(*fill->f).address) agp_available = 0;
+ }
+
+ printk("agp_available = %d\n", agp_available);
+
+ if (agp_available) {
+ if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS)))
+ return NULL;
+ memset((void *)head, 0, sizeof(*head));
+ (*drm_agp.copy_info)(&head->agp_info);
+ head->memory = NULL;
+ switch (head->agp_info.chipset) {
+ case INTEL_GENERIC: head->chipset = "Intel"; break;
+ case INTEL_LX: head->chipset = "Intel 440LX"; break;
+ case INTEL_BX: head->chipset = "Intel 440BX"; break;
+ case INTEL_GX: head->chipset = "Intel 440GX"; break;
+ case INTEL_I810: head->chipset = "Intel i810"; break;
+ case VIA_GENERIC: head->chipset = "VIA"; break;
+ case VIA_VP3: head->chipset = "VIA VP3"; break;
+ case VIA_MVP3: head->chipset = "VIA MVP3"; break;
+ case VIA_APOLLO_PRO: head->chipset = "VIA Apollo Pro"; break;
+ case SIS_GENERIC: head->chipset = "SiS"; break;
+ case AMD_GENERIC: head->chipset = "AMD"; break;
+ case AMD_IRONGATE: head->chipset = "AMD Irongate"; break;
+ case ALI_GENERIC: head->chipset = "ALi"; break;
+ case ALI_M1541: head->chipset = "ALi M1541"; break;
+ default:
+ }
+ DRM_INFO("AGP %d.%d on %s @ 0x%08lx %dMB\n",
+ head->agp_info.version.major,
+ head->agp_info.version.minor,
+ head->chipset,
+ head->agp_info.aper_base,
+ head->agp_info.aper_size);
+ }
+ return head;
+}
diff --git a/linux/bufs.c b/linux/bufs.c
new file mode 100644
index 00000000..289868d3
--- /dev/null
+++ b/linux/bufs.c
@@ -0,0 +1,528 @@
+/* bufs.c -- IOCTLs to manage buffers -*- linux-c -*-
+ * Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
+ * Revised: Mon Feb 14 00:14:11 2000 by kevin@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/bufs.c,v 1.8 1999/08/30 13:05:00 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/bufs.c,v 1.4 2000/02/14 06:27:25 martin Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include "linux/un.h"
+
+ /* Compute order. Can be made faster. */
+int drm_order(unsigned long size)
+{
+ int order;
+ unsigned long tmp;
+
+ for (order = 0, tmp = size; tmp >>= 1; ++order);
+ if (size & ~(1 << order)) ++order;
+ return order;
+}
+
+int drm_addmap(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_map_t *map;
+
+ if (!(filp->f_mode & 3)) return -EACCES; /* Require read/write */
+
+ map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
+ if (!map) return -ENOMEM;
+ if (copy_from_user(map, (drm_map_t *)arg, sizeof(*map))) {
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ return -EFAULT;
+ }
+
+ DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
+ map->offset, map->size, map->type);
+ if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ return -EINVAL;
+ }
+ map->mtrr = -1;
+ map->handle = 0;
+
+ switch (map->type) {
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+ if (map->offset + map->size < map->offset
+ || map->offset < virt_to_phys(high_memory)) {
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ return -EINVAL;
+ }
+#ifdef CONFIG_MTRR
+ if (map->type == _DRM_FRAME_BUFFER
+ || (map->flags & _DRM_WRITE_COMBINING)) {
+ map->mtrr = mtrr_add(map->offset, map->size,
+ MTRR_TYPE_WRCOMB, 1);
+ }
+#endif
+ map->handle = drm_ioremap(map->offset, map->size);
+ break;
+
+
+ case _DRM_SHM:
+ map->handle = (void *)drm_alloc_pages(drm_order(map->size)
+ - PAGE_SHIFT,
+ DRM_MEM_SAREA);
+ DRM_DEBUG("%ld %d %p\n", map->size, drm_order(map->size),
+ map->handle);
+ if (!map->handle) {
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ return -ENOMEM;
+ }
+ map->offset = (unsigned long)map->handle;
+ if (map->flags & _DRM_CONTAINS_LOCK) {
+ dev->lock.hw_lock = map->handle; /* Pointer to lock */
+ }
+ break;
+ default:
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ return -EINVAL;
+ }
+
+ down(&dev->struct_sem);
+ if (dev->maplist) {
+ ++dev->map_count;
+ dev->maplist = drm_realloc(dev->maplist,
+ (dev->map_count-1)
+ * sizeof(*dev->maplist),
+ dev->map_count
+ * sizeof(*dev->maplist),
+ DRM_MEM_MAPS);
+ } else {
+ dev->map_count = 1;
+ dev->maplist = drm_alloc(dev->map_count*sizeof(*dev->maplist),
+ DRM_MEM_MAPS);
+ }
+ dev->maplist[dev->map_count-1] = map;
+ up(&dev->struct_sem);
+
+ copy_to_user_ret((drm_map_t *)arg, map, sizeof(*map), -EFAULT);
+ if (map->type != _DRM_SHM) {
+ copy_to_user_ret(&((drm_map_t *)arg)->handle,
+ &map->offset,
+ sizeof(map->offset),
+ -EFAULT);
+ }
+ return 0;
+}
+
+int drm_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ int count;
+ int order;
+ int size;
+ int total;
+ int page_order;
+ drm_buf_entry_t *entry;
+ unsigned long page;
+ drm_buf_t *buf;
+ int alignment;
+ unsigned long offset;
+ int i;
+ int byte_count;
+ int page_count;
+
+ if (!dma) return -EINVAL;
+
+ copy_from_user_ret(&request,
+ (drm_buf_desc_t *)arg,
+ sizeof(request),
+ -EFAULT);
+
+ count = request.count;
+ order = drm_order(request.size);
+ size = 1 << order;
+
+ DRM_DEBUG("count = %d, size = %d (%d), order = %d, queue_count = %d\n",
+ request.count, request.size, size, order, dev->queue_count);
+
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
+ if (dev->queue_count) return -EBUSY; /* Not while in use */
+
+ alignment = (request.flags & DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) :size;
+ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
+
+ spin_lock(&dev->count_lock);
+ if (dev->buf_use) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ atomic_inc(&dev->buf_alloc);
+ spin_unlock(&dev->count_lock);
+
+ down(&dev->struct_sem);
+ entry = &dma->bufs[order];
+ if (entry->buf_count) {
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM; /* May only call once for each order */
+ }
+
+ entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS);
+ if (!entry->buflist) {
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
+ }
+ memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+ entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
+ DRM_MEM_SEGS);
+ if (!entry->seglist) {
+ drm_free(entry->buflist,
+ count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS);
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
+ }
+ memset(entry->seglist, 0, count * sizeof(*entry->seglist));
+
+ dma->pagelist = drm_realloc(dma->pagelist,
+ dma->page_count * sizeof(*dma->pagelist),
+ (dma->page_count + (count << page_order))
+ * sizeof(*dma->pagelist),
+ DRM_MEM_PAGES);
+ DRM_DEBUG("pagelist: %d entries\n",
+ dma->page_count + (count << page_order));
+
+
+ entry->buf_size = size;
+ entry->page_order = page_order;
+ byte_count = 0;
+ page_count = 0;
+ while (entry->buf_count < count) {
+ if (!(page = drm_alloc_pages(page_order, DRM_MEM_DMA))) break;
+ entry->seglist[entry->seg_count++] = page;
+ for (i = 0; i < (1 << page_order); i++) {
+ DRM_DEBUG("page %d @ 0x%08lx\n",
+ dma->page_count + page_count,
+ page + PAGE_SIZE * i);
+ dma->pagelist[dma->page_count + page_count++]
+ = page + PAGE_SIZE * i;
+ }
+ for (offset = 0;
+ offset + size <= total && entry->buf_count < count;
+ offset += alignment, ++entry->buf_count) {
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
+ buf->offset = (dma->byte_count + byte_count + offset);
+ buf->address = (void *)(page + offset);
+ buf->next = NULL;
+ buf->waiting = 0;
+ buf->pending = 0;
+ init_waitqueue_head(&buf->dma_wait);
+ buf->pid = 0;
+#if DRM_DMA_HISTOGRAM
+ buf->time_queued = 0;
+ buf->time_dispatched = 0;
+ buf->time_completed = 0;
+ buf->time_freed = 0;
+#endif
+ DRM_DEBUG("buffer %d @ %p\n",
+ entry->buf_count, buf->address);
+ }
+ byte_count += PAGE_SIZE << page_order;
+ }
+
+ dma->buflist = drm_realloc(dma->buflist,
+ dma->buf_count * sizeof(*dma->buflist),
+ (dma->buf_count + entry->buf_count)
+ * sizeof(*dma->buflist),
+ DRM_MEM_BUFS);
+ for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
+ dma->buflist[i] = &entry->buflist[i - dma->buf_count];
+
+ dma->buf_count += entry->buf_count;
+ dma->seg_count += entry->seg_count;
+ dma->page_count += entry->seg_count << page_order;
+ dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
+
+ drm_freelist_create(&entry->freelist, entry->buf_count);
+ for (i = 0; i < entry->buf_count; i++) {
+ drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
+ }
+
+ up(&dev->struct_sem);
+
+ request.count = entry->buf_count;
+ request.size = size;
+
+ copy_to_user_ret((drm_buf_desc_t *)arg,
+ &request,
+ sizeof(request),
+ -EFAULT);
+
+ atomic_dec(&dev->buf_alloc);
+ return 0;
+}
+
+int drm_infobufs(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_info_t request;
+ int i;
+ int count;
+
+ if (!dma) return -EINVAL;
+
+ spin_lock(&dev->count_lock);
+ if (atomic_read(&dev->buf_alloc)) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ ++dev->buf_use; /* Can't allocate more after this call */
+ spin_unlock(&dev->count_lock);
+
+ copy_from_user_ret(&request,
+ (drm_buf_info_t *)arg,
+ sizeof(request),
+ -EFAULT);
+
+ for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
+ if (dma->bufs[i].buf_count) ++count;
+ }
+
+ DRM_DEBUG("count = %d\n", count);
+
+ if (request.count >= count) {
+ for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
+ if (dma->bufs[i].buf_count) {
+ copy_to_user_ret(&request.list[count].count,
+ &dma->bufs[i].buf_count,
+ sizeof(dma->bufs[0]
+ .buf_count),
+ -EFAULT);
+ copy_to_user_ret(&request.list[count].size,
+ &dma->bufs[i].buf_size,
+ sizeof(dma->bufs[0].buf_size),
+ -EFAULT);
+ copy_to_user_ret(&request.list[count].low_mark,
+ &dma->bufs[i]
+ .freelist.low_mark,
+ sizeof(dma->bufs[0]
+ .freelist.low_mark),
+ -EFAULT);
+ copy_to_user_ret(&request.list[count]
+ .high_mark,
+ &dma->bufs[i]
+ .freelist.high_mark,
+ sizeof(dma->bufs[0]
+ .freelist.high_mark),
+ -EFAULT);
+ DRM_DEBUG("%d %d %d %d %d\n",
+ i,
+ dma->bufs[i].buf_count,
+ dma->bufs[i].buf_size,
+ dma->bufs[i].freelist.low_mark,
+ dma->bufs[i].freelist.high_mark);
+ ++count;
+ }
+ }
+ }
+ request.count = count;
+
+ copy_to_user_ret((drm_buf_info_t *)arg,
+ &request,
+ sizeof(request),
+ -EFAULT);
+
+ return 0;
+}
+
+int drm_markbufs(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ int order;
+ drm_buf_entry_t *entry;
+
+ if (!dma) return -EINVAL;
+
+ copy_from_user_ret(&request,
+ (drm_buf_desc_t *)arg,
+ sizeof(request),
+ -EFAULT);
+
+ DRM_DEBUG("%d, %d, %d\n",
+ request.size, request.low_mark, request.high_mark);
+ order = drm_order(request.size);
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
+ entry = &dma->bufs[order];
+
+ if (request.low_mark < 0 || request.low_mark > entry->buf_count)
+ return -EINVAL;
+ if (request.high_mark < 0 || request.high_mark > entry->buf_count)
+ return -EINVAL;
+
+ entry->freelist.low_mark = request.low_mark;
+ entry->freelist.high_mark = request.high_mark;
+
+ return 0;
+}
+
+int drm_freebufs(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_free_t request;
+ int i;
+ int idx;
+ drm_buf_t *buf;
+
+ if (!dma) return -EINVAL;
+
+ copy_from_user_ret(&request,
+ (drm_buf_free_t *)arg,
+ sizeof(request),
+ -EFAULT);
+
+ DRM_DEBUG("%d\n", request.count);
+ for (i = 0; i < request.count; i++) {
+ copy_from_user_ret(&idx,
+ &request.list[i],
+ sizeof(idx),
+ -EFAULT);
+ if (idx < 0 || idx >= dma->buf_count) {
+ DRM_ERROR("Index %d (of %d max)\n",
+ idx, dma->buf_count - 1);
+ return -EINVAL;
+ }
+ buf = dma->buflist[idx];
+ if (buf->pid != current->pid) {
+ DRM_ERROR("Process %d freeing buffer owned by %d\n",
+ current->pid, buf->pid);
+ return -EINVAL;
+ }
+ drm_free_buffer(dev, buf);
+ }
+
+ return 0;
+}
+
+int drm_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ int retcode = 0;
+ const int zero = 0;
+ unsigned long virtual;
+ unsigned long address;
+ drm_buf_map_t request;
+ int i;
+
+ if (!dma) return -EINVAL;
+
+ DRM_DEBUG("\n");
+
+ spin_lock(&dev->count_lock);
+ if (atomic_read(&dev->buf_alloc)) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ ++dev->buf_use; /* Can't allocate more after this call */
+ spin_unlock(&dev->count_lock);
+
+ copy_from_user_ret(&request,
+ (drm_buf_map_t *)arg,
+ sizeof(request),
+ -EFAULT);
+
+ if (request.count >= dma->buf_count) {
+ virtual = do_mmap(filp, 0, dma->byte_count,
+ PROT_READ|PROT_WRITE, MAP_SHARED, 0);
+ if (virtual > -1024UL) {
+ /* Real error */
+ retcode = (signed long)virtual;
+ goto done;
+ }
+ request.virtual = (void *)virtual;
+
+ for (i = 0; i < dma->buf_count; i++) {
+ if (copy_to_user(&request.list[i].idx,
+ &dma->buflist[i]->idx,
+ sizeof(request.list[0].idx))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ if (copy_to_user(&request.list[i].total,
+ &dma->buflist[i]->total,
+ sizeof(request.list[0].total))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ if (copy_to_user(&request.list[i].used,
+ &zero,
+ sizeof(zero))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ address = virtual + dma->buflist[i]->offset;
+ if (copy_to_user(&request.list[i].address,
+ &address,
+ sizeof(address))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ }
+ }
+done:
+ request.count = dma->buf_count;
+ DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
+
+ copy_to_user_ret((drm_buf_map_t *)arg,
+ &request,
+ sizeof(request),
+ -EFAULT);
+
+ return retcode;
+}
diff --git a/linux/ctxbitmap.c b/linux/ctxbitmap.c
new file mode 100644
index 00000000..f67209d4
--- /dev/null
+++ b/linux/ctxbitmap.c
@@ -0,0 +1,86 @@
+/* ctxbitmap.c -- Context bitmap management -*- linux-c -*-
+ * Created: Thu Jan 6 03:56:42 2000 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Jeff Hartmann <jhartmann@precisioninsight.com>
+ *
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/ctxbitmap.c,v 1.1 2000/02/11 17:26:02 dawes Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+void drm_ctxbitmap_free(drm_device_t *dev, int ctx_handle)
+{
+ if (ctx_handle < 0) goto failed;
+
+ if (ctx_handle < DRM_MAX_CTXBITMAP) {
+ clear_bit(ctx_handle, dev->ctx_bitmap);
+ return;
+ }
+failed:
+ DRM_ERROR("Attempt to free invalid context handle: %d\n",
+ ctx_handle);
+ return;
+}
+
+int drm_ctxbitmap_next(drm_device_t *dev)
+{
+ int bit;
+
+ bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP);
+ if (bit < DRM_MAX_CTXBITMAP) {
+ set_bit(bit, dev->ctx_bitmap);
+ printk("drm_ctxbitmap_next bit : %d\n", bit);
+ return bit;
+ }
+ return -1;
+}
+
+int drm_ctxbitmap_init(drm_device_t *dev)
+{
+ int i;
+ int temp;
+
+ dev->ctx_bitmap = (unsigned long *) drm_alloc(PAGE_SIZE * 4,
+ DRM_MEM_CTXBITMAP);
+ if(dev->ctx_bitmap == NULL) {
+ return -ENOMEM;
+ }
+ memset((void *) dev->ctx_bitmap, 0, PAGE_SIZE * 4);
+ for(i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
+ temp = drm_ctxbitmap_next(dev);
+ printk("drm_ctxbitmap_init : %d\n", temp);
+ }
+
+ return 0;
+}
+
+void drm_ctxbitmap_cleanup(drm_device_t *dev)
+{
+ drm_free((void *)dev->ctx_bitmap, PAGE_SIZE * 4,
+ DRM_MEM_CTXBITMAP);
+}
+
diff --git a/linux/dma.c b/linux/dma.c
new file mode 100644
index 00000000..2efeba35
--- /dev/null
+++ b/linux/dma.c
@@ -0,0 +1,531 @@
+/* dma.c -- DMA IOCTL and function support -*- linux-c -*-
+ * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
+ * Revised: Sun Feb 13 23:19:45 2000 by kevin@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/dma.c,v 1.7 1999/09/16 16:56:18 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/dma.c,v 1.4 2000/02/14 06:27:26 martin Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+#include <linux/interrupt.h> /* For task queue support */
+
+void drm_dma_setup(drm_device_t *dev)
+{
+ int i;
+
+ dev->dma = drm_alloc(sizeof(*dev->dma), DRM_MEM_DRIVER);
+ memset(dev->dma, 0, sizeof(*dev->dma));
+ for (i = 0; i <= DRM_MAX_ORDER; i++)
+ memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
+}
+
+void drm_dma_takedown(drm_device_t *dev)
+{
+ drm_device_dma_t *dma = dev->dma;
+ int i, j;
+
+ if (!dma) return;
+
+ /* Clear dma buffers */
+ for (i = 0; i <= DRM_MAX_ORDER; i++) {
+ if (dma->bufs[i].seg_count) {
+ DRM_DEBUG("order %d: buf_count = %d,"
+ " seg_count = %d\n",
+ i,
+ dma->bufs[i].buf_count,
+ dma->bufs[i].seg_count);
+ for (j = 0; j < dma->bufs[i].seg_count; j++) {
+ drm_free_pages(dma->bufs[i].seglist[j],
+ dma->bufs[i].page_order,
+ DRM_MEM_DMA);
+ }
+ drm_free(dma->bufs[i].buflist,
+ dma->buf_count
+ * sizeof(*dma->bufs[0].buflist),
+ DRM_MEM_BUFS);
+ drm_free(dma->bufs[i].seglist,
+ dma->buf_count
+ * sizeof(*dma->bufs[0].seglist),
+ DRM_MEM_SEGS);
+ drm_freelist_destroy(&dma->bufs[i].freelist);
+ }
+ }
+
+ if (dma->buflist) {
+ drm_free(dma->buflist,
+ dma->buf_count * sizeof(*dma->buflist),
+ DRM_MEM_BUFS);
+ }
+
+ if (dma->pagelist) {
+ drm_free(dma->pagelist,
+ dma->page_count * sizeof(*dma->pagelist),
+ DRM_MEM_PAGES);
+ }
+ drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
+ dev->dma = NULL;
+}
+
+#if DRM_DMA_HISTOGRAM
+/* This is slow, but is useful for debugging. */
+int drm_histogram_slot(unsigned long count)
+{
+ int value = DRM_DMA_HISTOGRAM_INITIAL;
+ int slot;
+
+ for (slot = 0;
+ slot < DRM_DMA_HISTOGRAM_SLOTS;
+ ++slot, value = DRM_DMA_HISTOGRAM_NEXT(value)) {
+ if (count < value) return slot;
+ }
+ return DRM_DMA_HISTOGRAM_SLOTS - 1;
+}
+
+void drm_histogram_compute(drm_device_t *dev, drm_buf_t *buf)
+{
+ cycles_t queued_to_dispatched;
+ cycles_t dispatched_to_completed;
+ cycles_t completed_to_freed;
+ int q2d, d2c, c2f, q2c, q2f;
+
+ if (buf->time_queued) {
+ queued_to_dispatched = (buf->time_dispatched
+ - buf->time_queued);
+ dispatched_to_completed = (buf->time_completed
+ - buf->time_dispatched);
+ completed_to_freed = (buf->time_freed
+ - buf->time_completed);
+
+ q2d = drm_histogram_slot(queued_to_dispatched);
+ d2c = drm_histogram_slot(dispatched_to_completed);
+ c2f = drm_histogram_slot(completed_to_freed);
+
+ q2c = drm_histogram_slot(queued_to_dispatched
+ + dispatched_to_completed);
+ q2f = drm_histogram_slot(queued_to_dispatched
+ + dispatched_to_completed
+ + completed_to_freed);
+
+ atomic_inc(&dev->histo.total);
+ atomic_inc(&dev->histo.queued_to_dispatched[q2d]);
+ atomic_inc(&dev->histo.dispatched_to_completed[d2c]);
+ atomic_inc(&dev->histo.completed_to_freed[c2f]);
+
+ atomic_inc(&dev->histo.queued_to_completed[q2c]);
+ atomic_inc(&dev->histo.queued_to_freed[q2f]);
+
+ }
+ buf->time_queued = 0;
+ buf->time_dispatched = 0;
+ buf->time_completed = 0;
+ buf->time_freed = 0;
+}
+#endif
+
+void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf)
+{
+ drm_device_dma_t *dma = dev->dma;
+
+ if (!buf) return;
+
+ buf->waiting = 0;
+ buf->pending = 0;
+ buf->pid = 0;
+ buf->used = 0;
+#if DRM_DMA_HISTOGRAM
+ buf->time_completed = get_cycles();
+#endif
+ if (waitqueue_active(&buf->dma_wait)) {
+ wake_up_interruptible(&buf->dma_wait);
+ } else {
+ /* If processes are waiting, the last one
+ to wake will put the buffer on the free
+ list. If no processes are waiting, we
+ put the buffer on the freelist here. */
+ drm_freelist_put(dev, &dma->bufs[buf->order].freelist, buf);
+ }
+}
+
+void drm_reclaim_buffers(drm_device_t *dev, pid_t pid)
+{
+ drm_device_dma_t *dma = dev->dma;
+ int i;
+
+ if (!dma) return;
+ for (i = 0; i < dma->buf_count; i++) {
+ if (dma->buflist[i]->pid == pid) {
+ switch (dma->buflist[i]->list) {
+ case DRM_LIST_NONE:
+ drm_free_buffer(dev, dma->buflist[i]);
+ break;
+ case DRM_LIST_WAIT:
+ dma->buflist[i]->list = DRM_LIST_RECLAIM;
+ break;
+ default:
+ /* Buffer already on hardware. */
+ break;
+ }
+ }
+ }
+}
+
+int drm_context_switch(drm_device_t *dev, int old, int new)
+{
+ char buf[64];
+ drm_queue_t *q;
+
+ atomic_inc(&dev->total_ctx);
+
+ if (test_and_set_bit(0, &dev->context_flag)) {
+ DRM_ERROR("Reentering -- FIXME\n");
+ return -EBUSY;
+ }
+
+#if DRM_DMA_HISTOGRAM
+ dev->ctx_start = get_cycles();
+#endif
+
+ DRM_DEBUG("Context switch from %d to %d\n", old, new);
+
+ if (new >= dev->queue_count) {
+ clear_bit(0, &dev->context_flag);
+ return -EINVAL;
+ }
+
+ if (new == dev->last_context) {
+ clear_bit(0, &dev->context_flag);
+ return 0;
+ }
+
+ q = dev->queuelist[new];
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->use_count) == 1) {
+ atomic_dec(&q->use_count);
+ clear_bit(0, &dev->context_flag);
+ return -EINVAL;
+ }
+
+ if (drm_flags & DRM_FLAG_NOCTX) {
+ drm_context_switch_complete(dev, new);
+ } else {
+ sprintf(buf, "C %d %d\n", old, new);
+ drm_write_string(dev, buf);
+ }
+
+ atomic_dec(&q->use_count);
+
+ return 0;
+}
+
+int drm_context_switch_complete(drm_device_t *dev, int new)
+{
+ drm_device_dma_t *dma = dev->dma;
+
+ dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
+ dev->last_switch = jiffies;
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("Lock isn't held after context switch\n");
+ }
+
+ if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
+ if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ DRM_ERROR("Cannot free lock\n");
+ }
+ }
+
+#if DRM_DMA_HISTOGRAM
+ atomic_inc(&dev->histo.ctx[drm_histogram_slot(get_cycles()
+ - dev->ctx_start)]);
+
+#endif
+ clear_bit(0, &dev->context_flag);
+ wake_up_interruptible(&dev->context_wait);
+
+ return 0;
+}
+
+void drm_clear_next_buffer(drm_device_t *dev)
+{
+ drm_device_dma_t *dma = dev->dma;
+
+ dma->next_buffer = NULL;
+ if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
+ wake_up_interruptible(&dma->next_queue->flush_queue);
+ }
+ dma->next_queue = NULL;
+}
+
+
+int drm_select_queue(drm_device_t *dev, void (*wrapper)(unsigned long))
+{
+ int i;
+ int candidate = -1;
+ int j = jiffies;
+
+ if (!dev) {
+ DRM_ERROR("No device\n");
+ return -1;
+ }
+ if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
+ /* This only happens between the time the
+ interrupt is initialized and the time
+ the queues are initialized. */
+ return -1;
+ }
+
+ /* Doing "while locked" DMA? */
+ if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
+ return DRM_KERNEL_CONTEXT;
+ }
+
+ /* If there are buffers on the last_context
+ queue, and we have not been executing
+ this context very long, continue to
+ execute this context. */
+ if (dev->last_switch <= j
+ && dev->last_switch + DRM_TIME_SLICE > j
+ && DRM_WAITCOUNT(dev, dev->last_context)) {
+ return dev->last_context;
+ }
+
+ /* Otherwise, find a candidate */
+ for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
+ if (DRM_WAITCOUNT(dev, i)) {
+ candidate = dev->last_checked = i;
+ break;
+ }
+ }
+
+ if (candidate < 0) {
+ for (i = 0; i < dev->queue_count; i++) {
+ if (DRM_WAITCOUNT(dev, i)) {
+ candidate = dev->last_checked = i;
+ break;
+ }
+ }
+ }
+
+ if (wrapper
+ && candidate >= 0
+ && candidate != dev->last_context
+ && dev->last_switch <= j
+ && dev->last_switch + DRM_TIME_SLICE > j) {
+ if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) {
+ del_timer(&dev->timer);
+ dev->timer.function = wrapper;
+ dev->timer.data = (unsigned long)dev;
+ dev->timer.expires = dev->last_switch+DRM_TIME_SLICE;
+ add_timer(&dev->timer);
+ }
+ return -1;
+ }
+
+ return candidate;
+}
+
+
+int drm_dma_enqueue(drm_device_t *dev, drm_dma_t *d)
+{
+ int i;
+ drm_queue_t *q;
+ drm_buf_t *buf;
+ int idx;
+ int while_locked = 0;
+ drm_device_dma_t *dma = dev->dma;
+ DECLARE_WAITQUEUE(entry, current);
+
+ DRM_DEBUG("%d\n", d->send_count);
+
+ if (d->flags & _DRM_DMA_WHILE_LOCKED) {
+ int context = dev->lock.hw_lock->lock;
+
+ if (!_DRM_LOCK_IS_HELD(context)) {
+ DRM_ERROR("No lock held during \"while locked\""
+ " request\n");
+ return -EINVAL;
+ }
+ if (d->context != _DRM_LOCKING_CONTEXT(context)
+ && _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Lock held by %d while %d makes"
+ " \"while locked\" request\n",
+ _DRM_LOCKING_CONTEXT(context),
+ d->context);
+ return -EINVAL;
+ }
+ q = dev->queuelist[DRM_KERNEL_CONTEXT];
+ while_locked = 1;
+ } else {
+ q = dev->queuelist[d->context];
+ }
+
+
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->block_write)) {
+ current->state = TASK_INTERRUPTIBLE;
+ add_wait_queue(&q->write_queue, &entry);
+ atomic_inc(&q->block_count);
+ for (;;) {
+ if (!atomic_read(&q->block_write)) break;
+ schedule();
+ if (signal_pending(current)) {
+ atomic_dec(&q->use_count);
+ return -EINTR;
+ }
+ }
+ atomic_dec(&q->block_count);
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&q->write_queue, &entry);
+ }
+
+ for (i = 0; i < d->send_count; i++) {
+ idx = d->send_indices[i];
+ if (idx < 0 || idx >= dma->buf_count) {
+ atomic_dec(&q->use_count);
+ DRM_ERROR("Index %d (of %d max)\n",
+ d->send_indices[i], dma->buf_count - 1);
+ return -EINVAL;
+ }
+ buf = dma->buflist[ idx ];
+ if (buf->pid != current->pid) {
+ atomic_dec(&q->use_count);
+ DRM_ERROR("Process %d using buffer owned by %d\n",
+ current->pid, buf->pid);
+ return -EINVAL;
+ }
+ if (buf->list != DRM_LIST_NONE) {
+ atomic_dec(&q->use_count);
+ DRM_ERROR("Process %d using buffer %d on list %d\n",
+ current->pid, buf->idx, buf->list);
+ }
+ buf->used = d->send_sizes[i];
+ buf->while_locked = while_locked;
+ buf->context = d->context;
+ if (!buf->used) {
+ DRM_ERROR("Queueing 0 length buffer\n");
+ }
+ if (buf->pending) {
+ atomic_dec(&q->use_count);
+ DRM_ERROR("Queueing pending buffer:"
+ " buffer %d, offset %d\n",
+ d->send_indices[i], i);
+ return -EINVAL;
+ }
+ if (buf->waiting) {
+ atomic_dec(&q->use_count);
+ DRM_ERROR("Queueing waiting buffer:"
+ " buffer %d, offset %d\n",
+ d->send_indices[i], i);
+ return -EINVAL;
+ }
+ buf->waiting = 1;
+ if (atomic_read(&q->use_count) == 1
+ || atomic_read(&q->finalization)) {
+ drm_free_buffer(dev, buf);
+ } else {
+ drm_waitlist_put(&q->waitlist, buf);
+ atomic_inc(&q->total_queued);
+ }
+ }
+ atomic_dec(&q->use_count);
+
+ return 0;
+}
+
+static int drm_dma_get_buffers_of_order(drm_device_t *dev, drm_dma_t *d,
+ int order)
+{
+ int i;
+ drm_buf_t *buf;
+ drm_device_dma_t *dma = dev->dma;
+
+ for (i = d->granted_count; i < d->request_count; i++) {
+ buf = drm_freelist_get(&dma->bufs[order].freelist,
+ d->flags & _DRM_DMA_WAIT);
+ if (!buf) break;
+ if (buf->pending || buf->waiting) {
+ DRM_ERROR("Free buffer %d in use by %d (w%d, p%d)\n",
+ buf->idx,
+ buf->pid,
+ buf->waiting,
+ buf->pending);
+ }
+ buf->pid = current->pid;
+ copy_to_user_ret(&d->request_indices[i],
+ &buf->idx,
+ sizeof(buf->idx),
+ -EFAULT);
+ copy_to_user_ret(&d->request_sizes[i],
+ &buf->total,
+ sizeof(buf->total),
+ -EFAULT);
+ ++d->granted_count;
+ }
+ return 0;
+}
+
+
+int drm_dma_get_buffers(drm_device_t *dev, drm_dma_t *dma)
+{
+ int order;
+ int retcode = 0;
+ int tmp_order;
+
+ order = drm_order(dma->request_size);
+
+ dma->granted_count = 0;
+ retcode = drm_dma_get_buffers_of_order(dev, dma, order);
+
+ if (dma->granted_count < dma->request_count
+ && (dma->flags & _DRM_DMA_SMALLER_OK)) {
+ for (tmp_order = order - 1;
+ !retcode
+ && dma->granted_count < dma->request_count
+ && tmp_order >= DRM_MIN_ORDER;
+ --tmp_order) {
+
+ retcode = drm_dma_get_buffers_of_order(dev, dma,
+ tmp_order);
+ }
+ }
+
+ if (dma->granted_count < dma->request_count
+ && (dma->flags & _DRM_DMA_LARGER_OK)) {
+ for (tmp_order = order + 1;
+ !retcode
+ && dma->granted_count < dma->request_count
+ && tmp_order <= DRM_MAX_ORDER;
+ ++tmp_order) {
+
+ retcode = drm_dma_get_buffers_of_order(dev, dma,
+ tmp_order);
+ }
+ }
+ return 0;
+}
diff --git a/linux/drm.h b/linux/drm.h
new file mode 100644
index 00000000..7488a315
--- /dev/null
+++ b/linux/drm.h
@@ -0,0 +1,280 @@
+/* drm.h -- Header for Direct Rendering Manager -*- linux-c -*-
+ * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
+ * Revised: Mon Feb 14 00:15:23 2000 by kevin@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drm.h,v 1.46 1999/08/20 20:00:53 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drm.h,v 1.4 2000/02/14 06:27:26 martin Exp $
+ *
+ * Acknowledgements:
+ * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic cmpxchg.
+ *
+ */
+
+#ifndef _DRM_H_
+#define _DRM_H_
+
+#include <asm/ioctl.h> /* For _IO* macros */
+
+#define DRM_PROC_DEVICES "/proc/devices"
+#define DRM_PROC_MISC "/proc/misc"
+#define DRM_PROC_DRM "/proc/drm"
+#define DRM_DEV_DRM "/dev/drm"
+#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
+#define DRM_DEV_UID 0
+#define DRM_DEV_GID 0
+
+
+#define DRM_NAME "drm" /* Name in kernel, /dev, and /proc */
+#define DRM_MIN_ORDER 5 /* At least 2^5 bytes = 32 bytes */
+#define DRM_MAX_ORDER 22 /* Up to 2^22 bytes = 4MB */
+#define DRM_RAM_PERCENT 10 /* How much system ram can we lock? */
+
+#define _DRM_LOCK_HELD 0x80000000 /* Hardware lock is held */
+#define _DRM_LOCK_CONT 0x40000000 /* Hardware lock is contended */
+#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
+#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
+#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
+
+typedef unsigned long drm_handle_t;
+typedef unsigned int drm_context_t;
+typedef unsigned int drm_drawable_t;
+typedef unsigned int drm_magic_t;
+
+
+typedef struct drm_version {
+ int version_major; /* Major version */
+ int version_minor; /* Minor version */
+ int version_patchlevel;/* Patch level */
+ size_t name_len; /* Length of name buffer */
+ char *name; /* Name of driver */
+ size_t date_len; /* Length of date buffer */
+ char *date; /* User-space buffer to hold date */
+ size_t desc_len; /* Length of desc buffer */
+ char *desc; /* User-space buffer to hold desc */
+} drm_version_t;
+
+typedef struct drm_unique {
+ size_t unique_len; /* Length of unique */
+ char *unique; /* Unique name for driver instantiation */
+} drm_unique_t;
+
+typedef struct drm_list {
+ int count; /* Length of user-space structures */
+ drm_version_t *version;
+} drm_list_t;
+
+typedef struct drm_block {
+ int unused;
+} drm_block_t;
+
+typedef struct drm_control {
+ enum {
+ DRM_ADD_COMMAND,
+ DRM_RM_COMMAND,
+ DRM_INST_HANDLER,
+ DRM_UNINST_HANDLER
+ } func;
+ int irq;
+} drm_control_t;
+
+typedef enum drm_map_type {
+ _DRM_FRAME_BUFFER = 0, /* WC (no caching), no core dump */
+ _DRM_REGISTERS = 1, /* no caching, no core dump */
+ _DRM_SHM = 2 /* shared, cached */
+} drm_map_type_t;
+
+typedef enum drm_map_flags {
+ _DRM_RESTRICTED = 0x01, /* Cannot be mapped to user-virtual */
+ _DRM_READ_ONLY = 0x02,
+ _DRM_LOCKED = 0x04, /* shared, cached, locked */
+ _DRM_KERNEL = 0x08, /* kernel requires access */
+ _DRM_WRITE_COMBINING = 0x10, /* use write-combining if available */
+ _DRM_CONTAINS_LOCK = 0x20 /* SHM page that contains lock */
+} drm_map_flags_t;
+
+typedef struct drm_map {
+ unsigned long offset; /* Requested physical address (0 for SAREA)*/
+ unsigned long size; /* Requested physical size (bytes) */
+ drm_map_type_t type; /* Type of memory to map */
+ drm_map_flags_t flags; /* Flags */
+ void *handle; /* User-space: "Handle" to pass to mmap */
+ /* Kernel-space: kernel-virtual address */
+ int mtrr; /* MTRR slot used */
+ /* Private data */
+} drm_map_t;
+
+typedef enum drm_lock_flags {
+ _DRM_LOCK_READY = 0x01, /* Wait until hardware is ready for DMA */
+ _DRM_LOCK_QUIESCENT = 0x02, /* Wait until hardware quiescent */
+ _DRM_LOCK_FLUSH = 0x04, /* Flush this context's DMA queue first */
+ _DRM_LOCK_FLUSH_ALL = 0x08, /* Flush all DMA queues first */
+ /* These *HALT* flags aren't supported yet
+ -- they will be used to support the
+ full-screen DGA-like mode. */
+ _DRM_HALT_ALL_QUEUES = 0x10, /* Halt all current and future queues */
+ _DRM_HALT_CUR_QUEUES = 0x20 /* Halt all current queues */
+} drm_lock_flags_t;
+
+typedef struct drm_lock {
+ int context;
+ drm_lock_flags_t flags;
+} drm_lock_t;
+
+typedef enum drm_dma_flags { /* These values *MUST* match xf86drm.h */
+ /* Flags for DMA buffer dispatch */
+ _DRM_DMA_BLOCK = 0x01, /* Block until buffer dispatched.
+ Note, the buffer may not yet have
+ been processed by the hardware --
+ getting a hardware lock with the
+ hardware quiescent will ensure
+ that the buffer has been
+ processed. */
+ _DRM_DMA_WHILE_LOCKED = 0x02, /* Dispatch while lock held */
+ _DRM_DMA_PRIORITY = 0x04, /* High priority dispatch */
+
+ /* Flags for DMA buffer request */
+ _DRM_DMA_WAIT = 0x10, /* Wait for free buffers */
+ _DRM_DMA_SMALLER_OK = 0x20, /* Smaller-than-requested buffers ok */
+ _DRM_DMA_LARGER_OK = 0x40 /* Larger-than-requested buffers ok */
+} drm_dma_flags_t;
+
+typedef struct drm_buf_desc {
+ int count; /* Number of buffers of this size */
+ int size; /* Size in bytes */
+ int low_mark; /* Low water mark */
+ int high_mark; /* High water mark */
+ enum {
+ DRM_PAGE_ALIGN = 0x01 /* Align on page boundaries for DMA */
+ } flags;
+} drm_buf_desc_t;
+
+typedef struct drm_buf_info {
+ int count; /* Entries in list */
+ drm_buf_desc_t *list;
+} drm_buf_info_t;
+
+typedef struct drm_buf_free {
+ int count;
+ int *list;
+} drm_buf_free_t;
+
+typedef struct drm_buf_pub {
+ int idx; /* Index into master buflist */
+ int total; /* Buffer size */
+ int used; /* Amount of buffer in use (for DMA) */
+ void *address; /* Address of buffer */
+} drm_buf_pub_t;
+
+typedef struct drm_buf_map {
+ int count; /* Length of buflist */
+ void *virtual; /* Mmaped area in user-virtual */
+ drm_buf_pub_t *list; /* Buffer information */
+} drm_buf_map_t;
+
+typedef struct drm_dma {
+ /* Indices here refer to the offset into
+ buflist in drm_buf_get_t. */
+ int context; /* Context handle */
+ int send_count; /* Number of buffers to send */
+ int *send_indices; /* List of handles to buffers */
+ int *send_sizes; /* Lengths of data to send */
+ drm_dma_flags_t flags; /* Flags */
+ int request_count; /* Number of buffers requested */
+ int request_size; /* Desired size for buffers */
+ int *request_indices; /* Buffer information */
+ int *request_sizes;
+ int granted_count; /* Number of buffers granted */
+} drm_dma_t;
+
+typedef enum {
+ _DRM_CONTEXT_PRESERVED = 0x01,
+ _DRM_CONTEXT_2DONLY = 0x02
+} drm_ctx_flags_t;
+
+typedef struct drm_ctx {
+ drm_context_t handle;
+ drm_ctx_flags_t flags;
+} drm_ctx_t;
+
+typedef struct drm_ctx_res {
+ int count;
+ drm_ctx_t *contexts;
+} drm_ctx_res_t;
+
+typedef struct drm_draw {
+ drm_drawable_t handle;
+} drm_draw_t;
+
+typedef struct drm_auth {
+ drm_magic_t magic;
+} drm_auth_t;
+
+typedef struct drm_irq_busid {
+ int irq;
+ int busnum;
+ int devnum;
+ int funcnum;
+} drm_irq_busid_t;
+
+#define DRM_IOCTL_BASE 'd'
+#define DRM_IOCTL_NR(n) _IOC_NR(n)
+#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
+#define DRM_IOR(nr,size) _IOR(DRM_IOCTL_BASE,nr,size)
+#define DRM_IOW(nr,size) _IOW(DRM_IOCTL_BASE,nr,size)
+#define DRM_IOWR(nr,size) _IOWR(DRM_IOCTL_BASE,nr,size)
+
+
+#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t)
+#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t)
+#define DRM_IOCTL_GET_MAGIC DRM_IOW( 0x02, drm_auth_t)
+#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t)
+
+#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t)
+#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t)
+#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, drm_block_t)
+#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, drm_block_t)
+#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, drm_control_t)
+#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, drm_map_t)
+#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, drm_buf_desc_t)
+#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, drm_buf_desc_t)
+#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm_buf_info_t)
+#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t)
+#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t)
+
+#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t)
+#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t)
+#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t)
+#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, drm_ctx_t)
+#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, drm_ctx_t)
+#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, drm_ctx_t)
+#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, drm_ctx_res_t)
+#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, drm_draw_t)
+#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, drm_draw_t)
+#define DRM_IOCTL_DMA DRM_IOWR(0x29, drm_dma_t)
+#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, drm_lock_t)
+#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t)
+#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t)
+
+#endif
diff --git a/linux/drmP.h b/linux/drmP.h
new file mode 100644
index 00000000..25c35425
--- /dev/null
+++ b/linux/drmP.h
@@ -0,0 +1,647 @@
+/* drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
+ * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
+ * Revised: Sun Feb 13 23:34:30 2000 by kevin@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmP.h,v 1.58 1999/08/30 13:05:00 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmP.h,v 1.5 2000/02/14 06:27:26 martin Exp $
+ *
+ */
+
+#ifndef _DRM_P_H_
+#define _DRM_P_H_
+
+#ifdef __KERNEL__
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/file.h>
+#include <linux/pci.h>
+#include <linux/wrapper.h>
+#include <linux/version.h>
+#include <asm/io.h>
+#include <asm/mman.h>
+#include <asm/uaccess.h>
+#ifdef CONFIG_MTRR
+#include <asm/mtrr.h>
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,1,0)
+#include <asm/spinlock.h>
+#include <linux/poll.h>
+#endif
+#include "drm.h"
+
+#define DRM_DEBUG_CODE 2 /* Include debugging code (if > 1, then
+ also include looping detection. */
+#define DRM_DMA_HISTOGRAM 1 /* Make histogram of DMA latency. */
+
+#define DRM_HASH_SIZE 16 /* Size of key hash table */
+#define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */
+#define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */
+#define DRM_LOOPING_LIMIT 5000000
+#define DRM_BSZ 1024 /* Buffer size for /dev/drm? output */
+#define DRM_TIME_SLICE (HZ/20) /* Time slice for GLXContexts */
+#define DRM_LOCK_SLICE 1 /* Time slice for lock, in jiffies */
+
+#define DRM_FLAG_DEBUG 0x01
+#define DRM_FLAG_NOCTX 0x02
+
+#define DRM_MEM_DMA 0
+#define DRM_MEM_SAREA 1
+#define DRM_MEM_DRIVER 2
+#define DRM_MEM_MAGIC 3
+#define DRM_MEM_IOCTLS 4
+#define DRM_MEM_MAPS 5
+#define DRM_MEM_VMAS 6
+#define DRM_MEM_BUFS 7
+#define DRM_MEM_SEGS 8
+#define DRM_MEM_PAGES 9
+#define DRM_MEM_FILES 10
+#define DRM_MEM_QUEUES 11
+#define DRM_MEM_CMDS 12
+#define DRM_MEM_MAPPINGS 13
+#define DRM_MEM_BUFLISTS 14
+
+ /* Backward compatibility section */
+ /* _PAGE_WT changed to _PAGE_PWT in 2.2.6 */
+#ifndef _PAGE_PWT
+#define _PAGE_PWT _PAGE_WT
+#endif
+ /* Wait queue declarations changed in 2.3.1 */
+#ifndef DECLARE_WAITQUEUE
+#define DECLARE_WAITQUEUE(w,c) struct wait_queue w = { c, NULL }
+typedef struct wait_queue *wait_queue_head_t;
+#define init_waitqueue_head(q) *q = NULL;
+#endif
+
+ /* _PAGE_4M changed to _PAGE_PSE in 2.3.23 */
+#ifndef _PAGE_PSE
+#define _PAGE_PSE _PAGE_4M
+#endif
+
+ /* vm_offset changed to vm_pgoff in 2.3.25 */
+#if LINUX_VERSION_CODE < 0x020319
+#define VM_OFFSET(vma) ((vma)->vm_offset)
+#else
+#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT)
+#endif
+
+ /* *_nopage return values defined in 2.3.26 */
+#ifndef NOPAGE_SIGBUS
+#define NOPAGE_SIGBUS 0
+#endif
+#ifndef NOPAGE_OOM
+#define NOPAGE_OOM 0
+#endif
+
+ /* Generic cmpxchg added in 2.3.x */
+#if CPU != 386
+#ifndef __HAVE_ARCH_CMPXCHG
+ /* Include this here so that driver can be
+ used with older kernels. */
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long prev;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 2:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 4:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ }
+ return old;
+}
+
+#define cmpxchg(ptr,o,n) \
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o), \
+ (unsigned long)(n),sizeof(*(ptr))))
+#endif
+#else
+ /* Compiling for a 386 proper... */
+#error DRI not supported on Intel 80386
+#endif
+
+ /* Macros to make printk easier */
+#define DRM_ERROR(fmt, arg...) \
+ printk(KERN_ERR "[" DRM_NAME ":" __FUNCTION__ "] *ERROR* " fmt , ##arg)
+#define DRM_MEM_ERROR(area, fmt, arg...) \
+ printk(KERN_ERR "[" DRM_NAME ":" __FUNCTION__ ":%s] *ERROR* " fmt , \
+ drm_mem_stats[area].name , ##arg)
+#define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg)
+
+#if DRM_DEBUG_CODE
+#define DRM_DEBUG(fmt, arg...) \
+ do { \
+ if (drm_flags&DRM_FLAG_DEBUG) \
+ printk(KERN_DEBUG \
+ "[" DRM_NAME ":" __FUNCTION__ "] " fmt , \
+ ##arg); \
+ } while (0)
+#else
+#define DRM_DEBUG(fmt, arg...) do { } while (0)
+#endif
+
+#define DRM_PROC_LIMIT (PAGE_SIZE-80)
+
+#define DRM_PROC_PRINT(fmt, arg...) \
+ len += sprintf(&buf[len], fmt , ##arg); \
+ if (len > DRM_PROC_LIMIT) return len;
+
+#define DRM_PROC_PRINT_RET(ret, fmt, arg...) \
+ len += sprintf(&buf[len], fmt , ##arg); \
+ if (len > DRM_PROC_LIMIT) { ret; return len; }
+
+ /* Internal types and structures */
+#define DRM_ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
+#define DRM_MIN(a,b) ((a)<(b)?(a):(b))
+#define DRM_MAX(a,b) ((a)>(b)?(a):(b))
+
+#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
+#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
+#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
+
+typedef int drm_ioctl_t(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+typedef struct drm_ioctl_desc {
+ drm_ioctl_t *func;
+ int auth_needed;
+ int root_only;
+} drm_ioctl_desc_t;
+
+typedef struct drm_devstate {
+ pid_t owner; /* X server pid holding x_lock */
+
+} drm_devstate_t;
+
+typedef struct drm_magic_entry {
+ drm_magic_t magic;
+ struct drm_file *priv;
+ struct drm_magic_entry *next;
+} drm_magic_entry_t;
+
+typedef struct drm_magic_head {
+ struct drm_magic_entry *head;
+ struct drm_magic_entry *tail;
+} drm_magic_head_t;
+
+typedef struct drm_vma_entry {
+ struct vm_area_struct *vma;
+ struct drm_vma_entry *next;
+ pid_t pid;
+} drm_vma_entry_t;
+
+typedef struct drm_buf {
+ int idx; /* Index into master buflist */
+ int total; /* Buffer size */
+ int order; /* log-base-2(total) */
+ int used; /* Amount of buffer in use (for DMA) */
+ unsigned long offset; /* Byte offset (used internally) */
+ void *address; /* Address of buffer */
+ struct drm_buf *next; /* Kernel-only: used for free list */
+ __volatile__ int waiting; /* On kernel DMA queue */
+ __volatile__ int pending; /* On hardware DMA queue */
+ wait_queue_head_t dma_wait; /* Processes waiting */
+ pid_t pid; /* PID of holding process */
+ int context; /* Kernel queue for this buffer */
+ int while_locked;/* Dispatch this buffer while locked */
+ enum {
+ DRM_LIST_NONE = 0,
+ DRM_LIST_FREE = 1,
+ DRM_LIST_WAIT = 2,
+ DRM_LIST_PEND = 3,
+ DRM_LIST_PRIO = 4,
+ DRM_LIST_RECLAIM = 5
+ } list; /* Which list we're on */
+#if DRM_DMA_HISTOGRAM
+ cycles_t time_queued; /* Queued to kernel DMA queue */
+ cycles_t time_dispatched; /* Dispatched to hardware */
+ cycles_t time_completed; /* Completed by hardware */
+ cycles_t time_freed; /* Back on freelist */
+#endif
+} drm_buf_t;
+
+#if DRM_DMA_HISTOGRAM
+#define DRM_DMA_HISTOGRAM_SLOTS 9
+#define DRM_DMA_HISTOGRAM_INITIAL 10
+#define DRM_DMA_HISTOGRAM_NEXT(current) ((current)*10)
+typedef struct drm_histogram {
+ atomic_t total;
+
+ atomic_t queued_to_dispatched[DRM_DMA_HISTOGRAM_SLOTS];
+ atomic_t dispatched_to_completed[DRM_DMA_HISTOGRAM_SLOTS];
+ atomic_t completed_to_freed[DRM_DMA_HISTOGRAM_SLOTS];
+
+ atomic_t queued_to_completed[DRM_DMA_HISTOGRAM_SLOTS];
+ atomic_t queued_to_freed[DRM_DMA_HISTOGRAM_SLOTS];
+
+ atomic_t dma[DRM_DMA_HISTOGRAM_SLOTS];
+ atomic_t schedule[DRM_DMA_HISTOGRAM_SLOTS];
+ atomic_t ctx[DRM_DMA_HISTOGRAM_SLOTS];
+ atomic_t lacq[DRM_DMA_HISTOGRAM_SLOTS];
+ atomic_t lhld[DRM_DMA_HISTOGRAM_SLOTS];
+} drm_histogram_t;
+#endif
+
+ /* bufs is one longer than it has to be */
+typedef struct drm_waitlist {
+ int count; /* Number of possible buffers */
+ drm_buf_t **bufs; /* List of pointers to buffers */
+ drm_buf_t **rp; /* Read pointer */
+ drm_buf_t **wp; /* Write pointer */
+ drm_buf_t **end; /* End pointer */
+ spinlock_t read_lock;
+ spinlock_t write_lock;
+} drm_waitlist_t;
+
+typedef struct drm_freelist {
+ int initialized; /* Freelist in use */
+ atomic_t count; /* Number of free buffers */
+ drm_buf_t *next; /* End pointer */
+
+ wait_queue_head_t waiting; /* Processes waiting on free bufs */
+ int low_mark; /* Low water mark */
+ int high_mark; /* High water mark */
+ atomic_t wfh; /* If waiting for high mark */
+} drm_freelist_t;
+
+typedef struct drm_buf_entry {
+ int buf_size;
+ int buf_count;
+ drm_buf_t *buflist;
+ int seg_count;
+ int page_order;
+ unsigned long *seglist;
+
+ drm_freelist_t freelist;
+} drm_buf_entry_t;
+
+typedef struct drm_hw_lock {
+ __volatile__ unsigned int lock;
+ char padding[60]; /* Pad to cache line */
+} drm_hw_lock_t;
+
+typedef struct drm_file {
+ int authenticated;
+ int minor;
+ pid_t pid;
+ uid_t uid;
+ drm_magic_t magic;
+ unsigned long ioctl_count;
+ struct drm_file *next;
+ struct drm_file *prev;
+ struct drm_device *dev;
+} drm_file_t;
+
+
+typedef struct drm_queue {
+ atomic_t use_count; /* Outstanding uses (+1) */
+ atomic_t finalization; /* Finalization in progress */
+ atomic_t block_count; /* Count of processes waiting */
+ atomic_t block_read; /* Queue blocked for reads */
+ wait_queue_head_t read_queue; /* Processes waiting on block_read */
+ atomic_t block_write; /* Queue blocked for writes */
+ wait_queue_head_t write_queue; /* Processes waiting on block_write */
+ atomic_t total_queued; /* Total queued statistic */
+ atomic_t total_flushed;/* Total flushes statistic */
+ atomic_t total_locks; /* Total locks statistics */
+ drm_ctx_flags_t flags; /* Context preserving and 2D-only */
+ drm_waitlist_t waitlist; /* Pending buffers */
+ wait_queue_head_t flush_queue; /* Processes waiting until flush */
+} drm_queue_t;
+
+typedef struct drm_lock_data {
+ drm_hw_lock_t *hw_lock; /* Hardware lock */
+ pid_t pid; /* PID of lock holder (0=kernel) */
+ wait_queue_head_t lock_queue; /* Queue of blocked processes */
+ unsigned long lock_time; /* Time of last lock in jiffies */
+} drm_lock_data_t;
+
+typedef struct drm_device_dma {
+ /* Performance Counters */
+ atomic_t total_prio; /* Total DRM_DMA_PRIORITY */
+ atomic_t total_bytes; /* Total bytes DMA'd */
+ atomic_t total_dmas; /* Total DMA buffers dispatched */
+
+ atomic_t total_missed_dma; /* Missed drm_do_dma */
+ atomic_t total_missed_lock; /* Missed lock in drm_do_dma */
+ atomic_t total_missed_free; /* Missed drm_free_this_buffer */
+ atomic_t total_missed_sched;/* Missed drm_dma_schedule */
+
+ atomic_t total_tried; /* Tried next_buffer */
+ atomic_t total_hit; /* Sent next_buffer */
+ atomic_t total_lost; /* Lost interrupt */
+
+ drm_buf_entry_t bufs[DRM_MAX_ORDER+1];
+ int buf_count;
+ drm_buf_t **buflist; /* Vector of pointers info bufs */
+ int seg_count;
+ int page_count;
+ unsigned long *pagelist;
+ unsigned long byte_count;
+
+ /* DMA support */
+ drm_buf_t *this_buffer; /* Buffer being sent */
+ drm_buf_t *next_buffer; /* Selected buffer to send */
+ drm_queue_t *next_queue; /* Queue from which buffer selected*/
+ wait_queue_head_t waiting; /* Processes waiting on free bufs */
+} drm_device_dma_t;
+
+typedef struct drm_device {
+ const char *name; /* Simple driver name */
+ char *unique; /* Unique identifier: e.g., busid */
+ int unique_len; /* Length of unique field */
+ dev_t device; /* Device number for mknod */
+ char *devname; /* For /proc/interrupts */
+
+ int blocked; /* Blocked due to VC switch? */
+ struct proc_dir_entry *root; /* Root for this device's entries */
+
+ /* Locks */
+ spinlock_t count_lock; /* For inuse, open_count, buf_use */
+ struct semaphore struct_sem; /* For others */
+
+ /* Usage Counters */
+ int open_count; /* Outstanding files open */
+ atomic_t ioctl_count; /* Outstanding IOCTLs pending */
+ atomic_t vma_count; /* Outstanding vma areas open */
+ int buf_use; /* Buffers in use -- cannot alloc */
+ atomic_t buf_alloc; /* Buffer allocation in progress */
+
+ /* Performance Counters */
+ atomic_t total_open;
+ atomic_t total_close;
+ atomic_t total_ioctl;
+ atomic_t total_irq; /* Total interruptions */
+ atomic_t total_ctx; /* Total context switches */
+
+ atomic_t total_locks;
+ atomic_t total_unlocks;
+ atomic_t total_contends;
+ atomic_t total_sleeps;
+
+ /* Authentication */
+ drm_file_t *file_first;
+ drm_file_t *file_last;
+ drm_magic_head_t magiclist[DRM_HASH_SIZE];
+
+ /* Memory management */
+ drm_map_t **maplist; /* Vector of pointers to regions */
+ int map_count; /* Number of mappable regions */
+
+ drm_vma_entry_t *vmalist; /* List of vmas (for debugging) */
+ drm_lock_data_t lock; /* Information on hardware lock */
+
+ /* DMA queues (contexts) */
+ int queue_count; /* Number of active DMA queues */
+ int queue_reserved; /* Number of reserved DMA queues */
+ int queue_slots; /* Actual length of queuelist */
+ drm_queue_t **queuelist; /* Vector of pointers to DMA queues */
+ drm_device_dma_t *dma; /* Optional pointer for DMA support */
+
+ /* Context support */
+ int irq; /* Interrupt used by board */
+ __volatile__ int context_flag; /* Context swapping flag */
+ __volatile__ int interrupt_flag;/* Interruption handler flag */
+ __volatile__ int dma_flag; /* DMA dispatch flag */
+ struct timer_list timer; /* Timer for delaying ctx switch */
+ wait_queue_head_t context_wait; /* Processes waiting on ctx switch */
+ int last_checked; /* Last context checked for DMA */
+ int last_context; /* Last current context */
+ unsigned long last_switch; /* jiffies at last context switch */
+ struct tq_struct tq;
+ cycles_t ctx_start;
+ cycles_t lck_start;
+#if DRM_DMA_HISTOGRAM
+ drm_histogram_t histo;
+#endif
+
+ /* Callback to X server for context switch
+ and for heavy-handed reset. */
+ char buf[DRM_BSZ]; /* Output buffer */
+ char *buf_rp; /* Read pointer */
+ char *buf_wp; /* Write pointer */
+ char *buf_end; /* End pointer */
+ struct fasync_struct *buf_async;/* Processes waiting for SIGIO */
+ wait_queue_head_t buf_readers; /* Processes waiting to read */
+ wait_queue_head_t buf_writers; /* Processes waiting to ctx switch */
+} drm_device_t;
+
+
+ /* Internal function definitions */
+
+ /* Misc. support (init.c) */
+extern int drm_flags;
+extern void drm_parse_options(char *s);
+
+
+ /* Device support (fops.c) */
+extern int drm_open_helper(struct inode *inode, struct file *filp,
+ drm_device_t *dev);
+extern int drm_flush(struct file *filp);
+extern int drm_release(struct inode *inode, struct file *filp);
+extern int drm_fasync(int fd, struct file *filp, int on);
+extern ssize_t drm_read(struct file *filp, char *buf, size_t count,
+ loff_t *off);
+extern int drm_write_string(drm_device_t *dev, const char *s);
+extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
+
+ /* Mapping support (vm.c) */
+#if LINUX_VERSION_CODE < 0x020317
+extern unsigned long drm_vm_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int write_access);
+extern unsigned long drm_vm_shm_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int write_access);
+extern unsigned long drm_vm_dma_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int write_access);
+#else
+ /* Return type changed in 2.3.23 */
+extern struct page *drm_vm_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int write_access);
+extern struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int write_access);
+extern struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int write_access);
+#endif
+extern void drm_vm_open(struct vm_area_struct *vma);
+extern void drm_vm_close(struct vm_area_struct *vma);
+extern int drm_mmap_dma(struct file *filp,
+ struct vm_area_struct *vma);
+extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
+
+
+ /* Proc support (proc.c) */
+extern int drm_proc_init(drm_device_t *dev);
+extern int drm_proc_cleanup(void);
+
+ /* Memory management support (memory.c) */
+extern void drm_mem_init(void);
+extern int drm_mem_info(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data);
+extern void *drm_alloc(size_t size, int area);
+extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size,
+ int area);
+extern char *drm_strdup(const char *s, int area);
+extern void drm_strfree(const char *s, int area);
+extern void drm_free(void *pt, size_t size, int area);
+extern unsigned long drm_alloc_pages(int order, int area);
+extern void drm_free_pages(unsigned long address, int order,
+ int area);
+extern void *drm_ioremap(unsigned long offset, unsigned long size);
+extern void drm_ioremapfree(void *pt, unsigned long size);
+
+ /* Buffer management support (bufs.c) */
+extern int drm_order(unsigned long size);
+extern int drm_addmap(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int drm_addbufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int drm_infobufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int drm_markbufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int drm_freebufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int drm_mapbufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+
+ /* Buffer list management support (lists.c) */
+extern int drm_waitlist_create(drm_waitlist_t *bl, int count);
+extern int drm_waitlist_destroy(drm_waitlist_t *bl);
+extern int drm_waitlist_put(drm_waitlist_t *bl, drm_buf_t *buf);
+extern drm_buf_t *drm_waitlist_get(drm_waitlist_t *bl);
+
+extern int drm_freelist_create(drm_freelist_t *bl, int count);
+extern int drm_freelist_destroy(drm_freelist_t *bl);
+extern int drm_freelist_put(drm_device_t *dev, drm_freelist_t *bl,
+ drm_buf_t *buf);
+extern drm_buf_t *drm_freelist_get(drm_freelist_t *bl, int block);
+
+ /* DMA support (gen_dma.c) */
+extern void drm_dma_setup(drm_device_t *dev);
+extern void drm_dma_takedown(drm_device_t *dev);
+extern void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf);
+extern void drm_reclaim_buffers(drm_device_t *dev, pid_t pid);
+extern int drm_context_switch(drm_device_t *dev, int old, int new);
+extern int drm_context_switch_complete(drm_device_t *dev, int new);
+extern void drm_wakeup(drm_device_t *dev, drm_buf_t *buf);
+extern void drm_clear_next_buffer(drm_device_t *dev);
+extern int drm_select_queue(drm_device_t *dev,
+ void (*wrapper)(unsigned long));
+extern int drm_dma_enqueue(drm_device_t *dev, drm_dma_t *dma);
+extern int drm_dma_get_buffers(drm_device_t *dev, drm_dma_t *dma);
+#if DRM_DMA_HISTOGRAM
+extern int drm_histogram_slot(unsigned long count);
+extern void drm_histogram_compute(drm_device_t *dev, drm_buf_t *buf);
+#endif
+
+
+ /* Misc. IOCTL support (ioctl.c) */
+extern int drm_irq_busid(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int drm_getunique(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int drm_setunique(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+
+ /* Context IOCTL support (context.c) */
+extern int drm_resctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int drm_addctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int drm_modctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int drm_getctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int drm_switchctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int drm_newctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int drm_rmctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+
+ /* Drawable IOCTL support (drawable.c) */
+extern int drm_adddraw(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int drm_rmdraw(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+
+ /* Authentication IOCTL support (auth.c) */
+extern int drm_add_magic(drm_device_t *dev, drm_file_t *priv,
+ drm_magic_t magic);
+extern int drm_remove_magic(drm_device_t *dev, drm_magic_t magic);
+extern int drm_getmagic(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int drm_authmagic(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+
+ /* Locking IOCTL support (lock.c) */
+extern int drm_block(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int drm_unblock(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int drm_lock_take(__volatile__ unsigned int *lock,
+ unsigned int context);
+extern int drm_lock_transfer(drm_device_t *dev,
+ __volatile__ unsigned int *lock,
+ unsigned int context);
+extern int drm_lock_free(drm_device_t *dev,
+ __volatile__ unsigned int *lock,
+ unsigned int context);
+extern int drm_finish(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int drm_flush_unblock(drm_device_t *dev, int context,
+ drm_lock_flags_t flags);
+extern int drm_flush_block_and_flush(drm_device_t *dev, int context,
+ drm_lock_flags_t flags);
+#endif
+#endif
diff --git a/linux/fops.c b/linux/fops.c
new file mode 100644
index 00000000..ceef1d8c
--- /dev/null
+++ b/linux/fops.c
@@ -0,0 +1,234 @@
+/* fops.c -- File operations for DRM -*- linux-c -*-
+ * Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
+ * Revised: Fri Dec 3 10:26:26 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/fops.c,v 1.3 1999/08/20 15:36:45 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/fops.c,v 1.5 2000/02/14 06:27:27 martin Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include <linux/poll.h>
+
+/* drm_open is called whenever a process opens /dev/drm. */
+
+int drm_open_helper(struct inode *inode, struct file *filp, drm_device_t *dev)
+{
+ kdev_t minor = MINOR(inode->i_rdev);
+ drm_file_t *priv;
+
+ if (filp->f_flags & O_EXCL) return -EBUSY; /* No exclusive opens */
+
+ DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
+
+ priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
+ memset(priv, 0, sizeof(*priv));
+ filp->private_data = priv;
+ priv->uid = current->euid;
+ priv->pid = current->pid;
+ priv->minor = minor;
+ priv->dev = dev;
+ priv->ioctl_count = 0;
+ priv->authenticated = capable(CAP_SYS_ADMIN);
+
+ down(&dev->struct_sem);
+ if (!dev->file_last) {
+ priv->next = NULL;
+ priv->prev = NULL;
+ dev->file_first = priv;
+ dev->file_last = priv;
+ } else {
+ priv->next = NULL;
+ priv->prev = dev->file_last;
+ dev->file_last->next = priv;
+ dev->file_last = priv;
+ }
+ up(&dev->struct_sem);
+
+ return 0;
+}
+
+int drm_flush(struct file *filp)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+
+ DRM_DEBUG("pid = %d, device = 0x%x, open_count = %d\n",
+ current->pid, dev->device, dev->open_count);
+ return 0;
+}
+
+/* drm_release is called whenever a process closes /dev/drm*. Linux calls
+ this only if any mappings have been closed. */
+
+int drm_release(struct inode *inode, struct file *filp)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+
+ DRM_DEBUG("pid = %d, device = 0x%x, open_count = %d\n",
+ current->pid, dev->device, dev->open_count);
+
+ if (_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
+ && dev->lock.pid == current->pid) {
+ DRM_ERROR("Process %d dead, freeing lock for context %d\n",
+ current->pid,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+ drm_lock_free(dev,
+ &dev->lock.hw_lock->lock,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+
+ /* FIXME: may require heavy-handed reset of
+ hardware at this point, possibly
+ processed via a callback to the X
+ server. */
+ }
+ drm_reclaim_buffers(dev, priv->pid);
+
+ drm_fasync(-1, filp, 0);
+
+ down(&dev->struct_sem);
+ if (priv->prev) priv->prev->next = priv->next;
+ else dev->file_first = priv->next;
+ if (priv->next) priv->next->prev = priv->prev;
+ else dev->file_last = priv->prev;
+ up(&dev->struct_sem);
+
+ drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
+
+ return 0;
+}
+
+int drm_fasync(int fd, struct file *filp, int on)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ int retcode;
+
+ DRM_DEBUG("fd = %d, device = 0x%x\n", fd, dev->device);
+ retcode = fasync_helper(fd, filp, on, &dev->buf_async);
+ if (retcode < 0) return retcode;
+ return 0;
+}
+
+
+/* The drm_read and drm_write_string code (especially that which manages
+ the circular buffer), is based on Alessandro Rubini's LINUX DEVICE
+ DRIVERS (Cambridge: O'Reilly, 1998), pages 111-113. */
+
+ssize_t drm_read(struct file *filp, char *buf, size_t count, loff_t *off)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ int left;
+ int avail;
+ int send;
+ int cur;
+
+ DRM_DEBUG("%p, %p\n", dev->buf_rp, dev->buf_wp);
+
+ while (dev->buf_rp == dev->buf_wp) {
+ DRM_DEBUG(" sleeping\n");
+ if (filp->f_flags & O_NONBLOCK) {
+ return -EAGAIN;
+ }
+ interruptible_sleep_on(&dev->buf_readers);
+ if (signal_pending(current)) {
+ DRM_DEBUG(" interrupted\n");
+ return -ERESTARTSYS;
+ }
+ DRM_DEBUG(" awake\n");
+ }
+
+ left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
+ avail = DRM_BSZ - left;
+ send = DRM_MIN(avail, count);
+
+ while (send) {
+ if (dev->buf_wp > dev->buf_rp) {
+ cur = DRM_MIN(send, dev->buf_wp - dev->buf_rp);
+ } else {
+ cur = DRM_MIN(send, dev->buf_end - dev->buf_rp);
+ }
+ copy_to_user_ret(buf, dev->buf_rp, cur, -EINVAL);
+ dev->buf_rp += cur;
+ if (dev->buf_rp == dev->buf_end) dev->buf_rp = dev->buf;
+ send -= cur;
+ }
+
+ wake_up_interruptible(&dev->buf_writers);
+ return DRM_MIN(avail, count);;
+}
+
+int drm_write_string(drm_device_t *dev, const char *s)
+{
+ int left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
+ int send = strlen(s);
+ int count;
+
+ DRM_DEBUG("%d left, %d to send (%p, %p)\n",
+ left, send, dev->buf_rp, dev->buf_wp);
+
+ if (left == 1 || dev->buf_wp != dev->buf_rp) {
+ DRM_ERROR("Buffer not empty (%d left, wp = %p, rp = %p)\n",
+ left,
+ dev->buf_wp,
+ dev->buf_rp);
+ }
+
+ while (send) {
+ if (dev->buf_wp >= dev->buf_rp) {
+ count = DRM_MIN(send, dev->buf_end - dev->buf_wp);
+ if (count == left) --count; /* Leave a hole */
+ } else {
+ count = DRM_MIN(send, dev->buf_rp - dev->buf_wp - 1);
+ }
+ strncpy(dev->buf_wp, s, count);
+ dev->buf_wp += count;
+ if (dev->buf_wp == dev->buf_end) dev->buf_wp = dev->buf;
+ send -= count;
+ }
+
+#if LINUX_VERSION_CODE < 0x020315
+ if (dev->buf_async) kill_fasync(dev->buf_async, SIGIO);
+#else
+ /* Parameter added in 2.3.21 */
+ if (dev->buf_async) kill_fasync(dev->buf_async, SIGIO, POLL_IN);
+#endif
+ DRM_DEBUG("waking\n");
+ wake_up_interruptible(&dev->buf_readers);
+ return 0;
+}
+
+unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+
+ poll_wait(filp, &dev->buf_readers, wait);
+ if (dev->buf_wp != dev->buf_rp) return POLLIN | POLLRDNORM;
+ return 0;
+}
diff --git a/linux/gamma_drv.c b/linux/gamma_drv.c
new file mode 100644
index 00000000..4e1e30e1
--- /dev/null
+++ b/linux/gamma_drv.c
@@ -0,0 +1,526 @@
+/* gamma.c -- 3dlabs GMX 2000 driver -*- linux-c -*-
+ * Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
+ * Revised: Tue Oct 12 08:51:36 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/gamma_drv.c,v 1.17 1999/08/30 13:05:00 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/gamma_drv.c,v 1.3 2000/01/20 07:25:35 martin Exp $
+ *
+ */
+
+#define EXPORT_SYMTAB
+#include "drmP.h"
+#include "gamma_drv.h"
+EXPORT_SYMBOL(gamma_init);
+EXPORT_SYMBOL(gamma_cleanup);
+
+#define GAMMA_NAME "gamma"
+#define GAMMA_DESC "3dlabs GMX 2000"
+#define GAMMA_DATE "19990830"
+#define GAMMA_MAJOR 0
+#define GAMMA_MINOR 0
+#define GAMMA_PATCHLEVEL 5
+
+static drm_device_t gamma_device;
+
+static struct file_operations gamma_fops = {
+ open: gamma_open,
+ flush: drm_flush,
+ release: gamma_release,
+ ioctl: gamma_ioctl,
+ mmap: drm_mmap,
+ read: drm_read,
+ fasync: drm_fasync,
+ poll: drm_poll,
+};
+
+static struct miscdevice gamma_misc = {
+ minor: MISC_DYNAMIC_MINOR,
+ name: GAMMA_NAME,
+ fops: &gamma_fops,
+};
+
+static drm_ioctl_desc_t gamma_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { gamma_version, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { gamma_control, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { drm_addbufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { drm_mapbufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { drm_addctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { drm_rmctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { drm_modctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { drm_getctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { drm_switchctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { drm_newctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { drm_resctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { gamma_dma, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { gamma_lock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { gamma_unlock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
+};
+#define GAMMA_IOCTL_COUNT DRM_ARRAY_SIZE(gamma_ioctls)
+
+#ifdef MODULE
+int init_module(void);
+void cleanup_module(void);
+static char *gamma = NULL;
+
+MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas.");
+MODULE_DESCRIPTION("3dlabs GMX 2000");
+MODULE_PARM(gamma, "s");
+
+/* init_module is called when insmod is used to load the module */
+
+int init_module(void)
+{
+ return gamma_init();
+}
+
+/* cleanup_module is called when rmmod is used to unload the module */
+
+void cleanup_module(void)
+{
+ gamma_cleanup();
+}
+#endif
+
+#ifndef MODULE
+/* gamma_setup is called by the kernel to parse command-line options passed
+ * via the boot-loader (e.g., LILO). It calls the insmod option routine,
+ * drm_parse_drm.
+ *
+ * This is not currently supported, since it requires changes to
+ * linux/init/main.c. */
+
+
+void __init gamma_setup(char *str, int *ints)
+{
+ if (ints[0] != 0) {
+ DRM_ERROR("Illegal command line format, ignored\n");
+ return;
+ }
+ drm_parse_options(str);
+}
+#endif
+
+static int gamma_setup(drm_device_t *dev)
+{
+ int i;
+
+ atomic_set(&dev->ioctl_count, 0);
+ atomic_set(&dev->vma_count, 0);
+ dev->buf_use = 0;
+ atomic_set(&dev->buf_alloc, 0);
+
+ drm_dma_setup(dev);
+
+ atomic_set(&dev->total_open, 0);
+ atomic_set(&dev->total_close, 0);
+ atomic_set(&dev->total_ioctl, 0);
+ atomic_set(&dev->total_irq, 0);
+ atomic_set(&dev->total_ctx, 0);
+ atomic_set(&dev->total_locks, 0);
+ atomic_set(&dev->total_unlocks, 0);
+ atomic_set(&dev->total_contends, 0);
+ atomic_set(&dev->total_sleeps, 0);
+
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ dev->magiclist[i].head = NULL;
+ dev->magiclist[i].tail = NULL;
+ }
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ dev->vmalist = NULL;
+ dev->lock.hw_lock = NULL;
+ init_waitqueue_head(&dev->lock.lock_queue);
+ dev->queue_count = 0;
+ dev->queue_reserved = 0;
+ dev->queue_slots = 0;
+ dev->queuelist = NULL;
+ dev->irq = 0;
+ dev->context_flag = 0;
+ dev->interrupt_flag = 0;
+ dev->dma_flag = 0;
+ dev->last_context = 0;
+ dev->last_switch = 0;
+ dev->last_checked = 0;
+ init_timer(&dev->timer);
+ init_waitqueue_head(&dev->context_wait);
+#if DRM_DMA_HISTO
+ memset(&dev->histo, 0, sizeof(dev->histo));
+#endif
+ dev->ctx_start = 0;
+ dev->lck_start = 0;
+
+ dev->buf_rp = dev->buf;
+ dev->buf_wp = dev->buf;
+ dev->buf_end = dev->buf + DRM_BSZ;
+ dev->buf_async = NULL;
+ init_waitqueue_head(&dev->buf_readers);
+ init_waitqueue_head(&dev->buf_writers);
+
+ DRM_DEBUG("\n");
+
+ /* The kernel's context could be created here, but is now created
+ in drm_dma_enqueue. This is more resource-efficient for
+ hardware that does not do DMA, but may mean that
+ drm_select_queue fails between the time the interrupt is
+ initialized and the time the queues are initialized. */
+
+ return 0;
+}
+
+
+static int gamma_takedown(drm_device_t *dev)
+{
+ int i;
+ drm_magic_entry_t *pt, *next;
+ drm_map_t *map;
+ drm_vma_entry_t *vma, *vma_next;
+
+ DRM_DEBUG("\n");
+
+ if (dev->irq) gamma_irq_uninstall(dev);
+
+ down(&dev->struct_sem);
+ del_timer(&dev->timer);
+
+ if (dev->devname) {
+ drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
+ dev->devname = NULL;
+ }
+
+ if (dev->unique) {
+ drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
+ dev->unique = NULL;
+ dev->unique_len = 0;
+ }
+ /* Clear pid list */
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ for (pt = dev->magiclist[i].head; pt; pt = next) {
+ next = pt->next;
+ drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
+ }
+ dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
+ }
+
+ /* Clear vma list (only built for debugging) */
+ if (dev->vmalist) {
+ for (vma = dev->vmalist; vma; vma = vma_next) {
+ vma_next = vma->next;
+ drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
+ }
+ dev->vmalist = NULL;
+ }
+
+ /* Clear map area and mtrr information */
+ if (dev->maplist) {
+ for (i = 0; i < dev->map_count; i++) {
+ map = dev->maplist[i];
+ switch (map->type) {
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+#ifdef CONFIG_MTRR
+ if (map->mtrr >= 0) {
+ int retcode;
+ retcode = mtrr_del(map->mtrr,
+ map->offset,
+ map->size);
+ DRM_DEBUG("mtrr_del = %d\n", retcode);
+ }
+#endif
+ drm_ioremapfree(map->handle, map->size);
+ break;
+ case _DRM_SHM:
+ drm_free_pages((unsigned long)map->handle,
+ drm_order(map->size)
+ - PAGE_SHIFT,
+ DRM_MEM_SAREA);
+ break;
+ }
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ }
+ drm_free(dev->maplist,
+ dev->map_count * sizeof(*dev->maplist),
+ DRM_MEM_MAPS);
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ }
+
+ if (dev->queuelist) {
+ for (i = 0; i < dev->queue_count; i++) {
+ drm_waitlist_destroy(&dev->queuelist[i]->waitlist);
+ if (dev->queuelist[i]) {
+ drm_free(dev->queuelist[i],
+ sizeof(*dev->queuelist[0]),
+ DRM_MEM_QUEUES);
+ dev->queuelist[i] = NULL;
+ }
+ }
+ drm_free(dev->queuelist,
+ dev->queue_slots * sizeof(*dev->queuelist),
+ DRM_MEM_QUEUES);
+ dev->queuelist = NULL;
+ }
+
+ drm_dma_takedown(dev);
+
+ dev->queue_count = 0;
+ if (dev->lock.hw_lock) {
+ dev->lock.hw_lock = NULL; /* SHM removed */
+ dev->lock.pid = 0;
+ wake_up_interruptible(&dev->lock.lock_queue);
+ }
+ up(&dev->struct_sem);
+
+ return 0;
+}
+
+/* gamma_init is called via init_module at module load time, or via
+ * linux/init/main.c (this is not currently supported). */
+
+int gamma_init(void)
+{
+ int retcode;
+ drm_device_t *dev = &gamma_device;
+
+ DRM_DEBUG("\n");
+
+ memset((void *)dev, 0, sizeof(*dev));
+ dev->count_lock = SPIN_LOCK_UNLOCKED;
+ sema_init(&dev->struct_sem, 1);
+
+#ifdef MODULE
+ drm_parse_options(gamma);
+#endif
+
+ if ((retcode = misc_register(&gamma_misc))) {
+ DRM_ERROR("Cannot register \"%s\"\n", GAMMA_NAME);
+ return retcode;
+ }
+ dev->device = MKDEV(MISC_MAJOR, gamma_misc.minor);
+ dev->name = GAMMA_NAME;
+
+ drm_mem_init();
+ drm_proc_init(dev);
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ GAMMA_NAME,
+ GAMMA_MAJOR,
+ GAMMA_MINOR,
+ GAMMA_PATCHLEVEL,
+ GAMMA_DATE,
+ gamma_misc.minor);
+
+ return 0;
+}
+
+/* gamma_cleanup is called via cleanup_module at module unload time. */
+
+void gamma_cleanup(void)
+{
+ drm_device_t *dev = &gamma_device;
+
+ DRM_DEBUG("\n");
+
+ drm_proc_cleanup();
+ if (misc_deregister(&gamma_misc)) {
+ DRM_ERROR("Cannot unload module\n");
+ } else {
+ DRM_INFO("Module unloaded\n");
+ }
+ gamma_takedown(dev);
+}
+
+int gamma_version(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_version_t version;
+ int len;
+
+ copy_from_user_ret(&version,
+ (drm_version_t *)arg,
+ sizeof(version),
+ -EFAULT);
+
+#define DRM_COPY(name,value) \
+ len = strlen(value); \
+ if (len > name##_len) len = name##_len; \
+ name##_len = strlen(value); \
+ if (len && name) { \
+ copy_to_user_ret(name, value, len, -EFAULT); \
+ }
+
+ version.version_major = GAMMA_MAJOR;
+ version.version_minor = GAMMA_MINOR;
+ version.version_patchlevel = GAMMA_PATCHLEVEL;
+
+ DRM_COPY(version.name, GAMMA_NAME);
+ DRM_COPY(version.date, GAMMA_DATE);
+ DRM_COPY(version.desc, GAMMA_DESC);
+
+ copy_to_user_ret((drm_version_t *)arg,
+ &version,
+ sizeof(version),
+ -EFAULT);
+ return 0;
+}
+
+int gamma_open(struct inode *inode, struct file *filp)
+{
+ drm_device_t *dev = &gamma_device;
+ int retcode = 0;
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+ if (!(retcode = drm_open_helper(inode, filp, dev))) {
+ MOD_INC_USE_COUNT;
+ atomic_inc(&dev->total_open);
+ spin_lock(&dev->count_lock);
+ if (!dev->open_count++) {
+ spin_unlock(&dev->count_lock);
+ return gamma_setup(dev);
+ }
+ spin_unlock(&dev->count_lock);
+ }
+ return retcode;
+}
+
+int gamma_release(struct inode *inode, struct file *filp)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ int retcode = 0;
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+ if (!(retcode = drm_release(inode, filp))) {
+ MOD_DEC_USE_COUNT;
+ atomic_inc(&dev->total_close);
+ spin_lock(&dev->count_lock);
+ if (!--dev->open_count) {
+ if (atomic_read(&dev->ioctl_count) || dev->blocked) {
+ DRM_ERROR("Device busy: %d %d\n",
+ atomic_read(&dev->ioctl_count),
+ dev->blocked);
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ spin_unlock(&dev->count_lock);
+ return gamma_takedown(dev);
+ }
+ spin_unlock(&dev->count_lock);
+ }
+ return retcode;
+}
+
+/* drm_ioctl is called whenever a process performs an ioctl on /dev/drm. */
+
+int gamma_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int nr = DRM_IOCTL_NR(cmd);
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ int retcode = 0;
+ drm_ioctl_desc_t *ioctl;
+ drm_ioctl_t *func;
+
+ atomic_inc(&dev->ioctl_count);
+ atomic_inc(&dev->total_ioctl);
+ ++priv->ioctl_count;
+
+ DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
+ current->pid, cmd, nr, dev->device, priv->authenticated);
+
+ if (nr >= GAMMA_IOCTL_COUNT) {
+ retcode = -EINVAL;
+ } else {
+ ioctl = &gamma_ioctls[nr];
+ func = ioctl->func;
+
+ if (!func) {
+ DRM_DEBUG("no function\n");
+ retcode = -EINVAL;
+ } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
+ || (ioctl->auth_needed && !priv->authenticated)) {
+ retcode = -EACCES;
+ } else {
+ retcode = (func)(inode, filp, cmd, arg);
+ }
+ }
+
+ atomic_dec(&dev->ioctl_count);
+ return retcode;
+}
+
+
+int gamma_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_lock_t lock;
+
+ copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
+
+ if (lock.context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ current->pid, lock.context);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("%d frees lock (%d holds)\n",
+ lock.context,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+ atomic_inc(&dev->total_unlocks);
+ if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
+ atomic_inc(&dev->total_contends);
+ drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
+ gamma_dma_schedule(dev, 1);
+ if (!dev->context_flag) {
+ if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ DRM_ERROR("\n");
+ }
+ }
+#if DRM_DMA_HISTOGRAM
+ atomic_inc(&dev->histo.lhld[drm_histogram_slot(get_cycles()
+ - dev->lck_start)]);
+#endif
+
+ return 0;
+}
diff --git a/linux/i810_bufs.c b/linux/i810_bufs.c
new file mode 100644
index 00000000..49455b43
--- /dev/null
+++ b/linux/i810_bufs.c
@@ -0,0 +1,584 @@
+/* i810_bufs.c -- IOCTLs to manage buffers -*- linux-c -*-
+ * Created: Thu Jan 6 01:47:26 2000 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
+ * Jeff Hartmann <jhartmann@precisioninsight.com>
+ *
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/i810_bufs.c,v 1.1 2000/02/11 17:26:04 dawes Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include "linux/un.h"
+
+int i810_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ drm_buf_entry_t *entry;
+ drm_buf_t *buf;
+ unsigned long offset;
+ unsigned long agp_offset;
+ int count;
+ int order;
+ int size;
+ int alignment;
+ int page_order;
+ int total;
+ int byte_count;
+ int i;
+
+ if (!dma) return -EINVAL;
+
+ copy_from_user_ret(&request,
+ (drm_buf_desc_t *)arg,
+ sizeof(request),
+ -EFAULT);
+
+ count = request.count;
+ order = drm_order(request.size);
+ size = 1 << order;
+ agp_offset = request.agp_start;
+ alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) :size;
+ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
+ byte_count = 0;
+
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
+ if (dev->queue_count) return -EBUSY; /* Not while in use */
+ spin_lock(&dev->count_lock);
+ if (dev->buf_use) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ atomic_inc(&dev->buf_alloc);
+ spin_unlock(&dev->count_lock);
+
+ down(&dev->struct_sem);
+ entry = &dma->bufs[order];
+ if (entry->buf_count) {
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM; /* May only call once for each order */
+ }
+
+ entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS);
+ if (!entry->buflist) {
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
+ }
+ memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+ entry->buf_size = size;
+ entry->page_order = page_order;
+
+ while(entry->buf_count < count) {
+ for(offset = 0; offset + size <= total && entry->buf_count < count;
+ offset += alignment, ++entry->buf_count) {
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
+ buf->offset = agp_offset - dev->agp->base + offset;/* ?? */
+ buf->bus_address = agp_offset + offset;
+ buf->address = agp_offset + offset + dev->agp->base;
+ buf->next = NULL;
+ buf->waiting = 0;
+ buf->pending = 0;
+ init_waitqueue_head(&buf->dma_wait);
+ buf->pid = 0;
+#if DRM_DMA_HISTOGRAM
+ buf->time_queued = 0;
+ buf->time_dispatched = 0;
+ buf->time_completed = 0;
+ buf->time_freed = 0;
+#endif
+ DRM_DEBUG("buffer %d @ %p\n",
+ entry->buf_count, buf->address);
+ }
+ byte_count += PAGE_SIZE << page_order;
+ }
+
+ dma->buflist = drm_realloc(dma->buflist,
+ dma->buf_count * sizeof(*dma->buflist),
+ (dma->buf_count + entry->buf_count)
+ * sizeof(*dma->buflist),
+ DRM_MEM_BUFS);
+ for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
+ dma->buflist[i] = &entry->buflist[i - dma->buf_count];
+
+ dma->buf_count += entry->buf_count;
+ dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
+
+ drm_freelist_create(&entry->freelist, entry->buf_count);
+ for (i = 0; i < entry->buf_count; i++) {
+ drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
+ }
+
+ up(&dev->struct_sem);
+
+ request.count = entry->buf_count;
+ request.size = size;
+
+ copy_to_user_ret((drm_buf_desc_t *)arg,
+ &request,
+ sizeof(request),
+ -EFAULT);
+
+ atomic_dec(&dev->buf_alloc);
+ return 0;
+}
+
+int i810_addbufs_pci(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ int count;
+ int order;
+ int size;
+ int total;
+ int page_order;
+ drm_buf_entry_t *entry;
+ unsigned long page;
+ drm_buf_t *buf;
+ int alignment;
+ unsigned long offset;
+ int i;
+ int byte_count;
+ int page_count;
+
+ if (!dma) return -EINVAL;
+
+ copy_from_user_ret(&request,
+ (drm_buf_desc_t *)arg,
+ sizeof(request),
+ -EFAULT);
+
+ count = request.count;
+ order = drm_order(request.size);
+ size = 1 << order;
+
+ DRM_DEBUG("count = %d, size = %d (%d), order = %d, queue_count = %d\n",
+ request.count, request.size, size, order, dev->queue_count);
+
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
+ if (dev->queue_count) return -EBUSY; /* Not while in use */
+
+ alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) :size;
+ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
+
+ spin_lock(&dev->count_lock);
+ if (dev->buf_use) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ atomic_inc(&dev->buf_alloc);
+ spin_unlock(&dev->count_lock);
+
+ down(&dev->struct_sem);
+ entry = &dma->bufs[order];
+ if (entry->buf_count) {
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM; /* May only call once for each order */
+ }
+
+ entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS);
+ if (!entry->buflist) {
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
+ }
+ memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+ entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
+ DRM_MEM_SEGS);
+ if (!entry->seglist) {
+ drm_free(entry->buflist,
+ count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS);
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
+ }
+ memset(entry->seglist, 0, count * sizeof(*entry->seglist));
+
+ dma->pagelist = drm_realloc(dma->pagelist,
+ dma->page_count * sizeof(*dma->pagelist),
+ (dma->page_count + (count << page_order))
+ * sizeof(*dma->pagelist),
+ DRM_MEM_PAGES);
+ DRM_DEBUG("pagelist: %d entries\n",
+ dma->page_count + (count << page_order));
+
+
+ entry->buf_size = size;
+ entry->page_order = page_order;
+ byte_count = 0;
+ page_count = 0;
+ while (entry->buf_count < count) {
+ if (!(page = drm_alloc_pages(page_order, DRM_MEM_DMA))) break;
+ entry->seglist[entry->seg_count++] = page;
+ for (i = 0; i < (1 << page_order); i++) {
+ DRM_DEBUG("page %d @ 0x%08lx\n",
+ dma->page_count + page_count,
+ page + PAGE_SIZE * i);
+ dma->pagelist[dma->page_count + page_count++]
+ = page + PAGE_SIZE * i;
+ }
+ for (offset = 0;
+ offset + size <= total && entry->buf_count < count;
+ offset += alignment, ++entry->buf_count) {
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
+ buf->offset = (dma->byte_count + byte_count + offset);
+ buf->address = (void *)(page + offset);
+ buf->next = NULL;
+ buf->waiting = 0;
+ buf->pending = 0;
+ init_waitqueue_head(&buf->dma_wait);
+ buf->pid = 0;
+#if DRM_DMA_HISTOGRAM
+ buf->time_queued = 0;
+ buf->time_dispatched = 0;
+ buf->time_completed = 0;
+ buf->time_freed = 0;
+#endif
+ DRM_DEBUG("buffer %d @ %p\n",
+ entry->buf_count, buf->address);
+ }
+ byte_count += PAGE_SIZE << page_order;
+ }
+
+ dma->buflist = drm_realloc(dma->buflist,
+ dma->buf_count * sizeof(*dma->buflist),
+ (dma->buf_count + entry->buf_count)
+ * sizeof(*dma->buflist),
+ DRM_MEM_BUFS);
+ for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
+ dma->buflist[i] = &entry->buflist[i - dma->buf_count];
+
+ dma->buf_count += entry->buf_count;
+ dma->seg_count += entry->seg_count;
+ dma->page_count += entry->seg_count << page_order;
+ dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
+
+ drm_freelist_create(&entry->freelist, entry->buf_count);
+ for (i = 0; i < entry->buf_count; i++) {
+ drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
+ }
+
+ up(&dev->struct_sem);
+
+ request.count = entry->buf_count;
+ request.size = size;
+
+ copy_to_user_ret((drm_buf_desc_t *)arg,
+ &request,
+ sizeof(request),
+ -EFAULT);
+
+ atomic_dec(&dev->buf_alloc);
+ return 0;
+}
+
+int i810_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_buf_desc_t request;
+
+ copy_from_user_ret(&request,
+ (drm_buf_desc_t *)arg,
+ sizeof(request),
+ -EFAULT);
+
+ if(request.flags & _DRM_AGP_BUFFER)
+ return i810_addbufs_agp(inode, filp, cmd, arg);
+ else
+ return i810_addbufs_pci(inode, filp, cmd, arg);
+}
+
+int i810_infobufs(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_info_t request;
+ int i;
+ int count;
+
+ if (!dma) return -EINVAL;
+
+ spin_lock(&dev->count_lock);
+ if (atomic_read(&dev->buf_alloc)) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ ++dev->buf_use; /* Can't allocate more after this call */
+ spin_unlock(&dev->count_lock);
+
+ copy_from_user_ret(&request,
+ (drm_buf_info_t *)arg,
+ sizeof(request),
+ -EFAULT);
+
+ for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
+ if (dma->bufs[i].buf_count) ++count;
+ }
+
+ DRM_DEBUG("count = %d\n", count);
+
+ if (request.count >= count) {
+ for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
+ if (dma->bufs[i].buf_count) {
+ copy_to_user_ret(&request.list[count].count,
+ &dma->bufs[i].buf_count,
+ sizeof(dma->bufs[0]
+ .buf_count),
+ -EFAULT);
+ copy_to_user_ret(&request.list[count].size,
+ &dma->bufs[i].buf_size,
+ sizeof(dma->bufs[0].buf_size),
+ -EFAULT);
+ copy_to_user_ret(&request.list[count].low_mark,
+ &dma->bufs[i]
+ .freelist.low_mark,
+ sizeof(dma->bufs[0]
+ .freelist.low_mark),
+ -EFAULT);
+ copy_to_user_ret(&request.list[count]
+ .high_mark,
+ &dma->bufs[i]
+ .freelist.high_mark,
+ sizeof(dma->bufs[0]
+ .freelist.high_mark),
+ -EFAULT);
+ DRM_DEBUG("%d %d %d %d %d\n",
+ i,
+ dma->bufs[i].buf_count,
+ dma->bufs[i].buf_size,
+ dma->bufs[i].freelist.low_mark,
+ dma->bufs[i].freelist.high_mark);
+ ++count;
+ }
+ }
+ }
+ request.count = count;
+
+ copy_to_user_ret((drm_buf_info_t *)arg,
+ &request,
+ sizeof(request),
+ -EFAULT);
+
+ return 0;
+}
+
+int i810_markbufs(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ int order;
+ drm_buf_entry_t *entry;
+
+ if (!dma) return -EINVAL;
+
+ copy_from_user_ret(&request,
+ (drm_buf_desc_t *)arg,
+ sizeof(request),
+ -EFAULT);
+
+ DRM_DEBUG("%d, %d, %d\n",
+ request.size, request.low_mark, request.high_mark);
+ order = drm_order(request.size);
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
+ entry = &dma->bufs[order];
+
+ if (request.low_mark < 0 || request.low_mark > entry->buf_count)
+ return -EINVAL;
+ if (request.high_mark < 0 || request.high_mark > entry->buf_count)
+ return -EINVAL;
+
+ entry->freelist.low_mark = request.low_mark;
+ entry->freelist.high_mark = request.high_mark;
+
+ return 0;
+}
+
+int i810_freebufs(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_free_t request;
+ int i;
+ int idx;
+ drm_buf_t *buf;
+
+ if (!dma) return -EINVAL;
+
+ copy_from_user_ret(&request,
+ (drm_buf_free_t *)arg,
+ sizeof(request),
+ -EFAULT);
+
+ DRM_DEBUG("%d\n", request.count);
+ for (i = 0; i < request.count; i++) {
+ copy_from_user_ret(&idx,
+ &request.list[i],
+ sizeof(idx),
+ -EFAULT);
+ if (idx < 0 || idx >= dma->buf_count) {
+ DRM_ERROR("Index %d (of %d max)\n",
+ idx, dma->buf_count - 1);
+ return -EINVAL;
+ }
+ buf = dma->buflist[idx];
+ if (buf->pid != current->pid) {
+ DRM_ERROR("Process %d freeing buffer owned by %d\n",
+ current->pid, buf->pid);
+ return -EINVAL;
+ }
+ drm_free_buffer(dev, buf);
+ }
+
+ return 0;
+}
+
+int i810_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ int retcode = 0;
+ const int zero = 0;
+ unsigned long virtual;
+ unsigned long address;
+ drm_buf_map_t request;
+ int i;
+
+ if (!dma) return -EINVAL;
+
+ DRM_DEBUG("\n");
+
+ spin_lock(&dev->count_lock);
+ if (atomic_read(&dev->buf_alloc)) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ ++dev->buf_use; /* Can't allocate more after this call */
+ spin_unlock(&dev->count_lock);
+
+ copy_from_user_ret(&request,
+ (drm_buf_map_t *)arg,
+ sizeof(request),
+ -EFAULT);
+
+ if (request.count >= dma->buf_count) {
+ if(dma->flags & _DRM_DMA_USE_AGP) {
+ /* This is an ugly vicious hack */
+ drm_map_t *map = NULL;
+ for(i = 0; i < dev->map_count; i++) {
+ map = dev->maplist[i];
+ if(map->type == _DRM_AGP) break;
+ }
+ if (i >= dev->map_count || !map) {
+ retcode = -EINVAL;
+ goto done;
+ }
+
+ virtual = do_mmap(filp, 0, map->size, PROT_READ|PROT_WRITE,
+ MAP_SHARED, (unsigned long)map->handle);
+ }
+ else {
+ virtual = do_mmap(filp, 0, dma->byte_count,
+ PROT_READ|PROT_WRITE, MAP_SHARED, 0);
+ }
+ if (virtual > -1024UL) {
+ /* Real error */
+ retcode = (signed long)virtual;
+ goto done;
+ }
+ request.virtual = (void *)virtual;
+
+ for (i = 0; i < dma->buf_count; i++) {
+ if (copy_to_user(&request.list[i].idx,
+ &dma->buflist[i]->idx,
+ sizeof(request.list[0].idx))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ if (copy_to_user(&request.list[i].total,
+ &dma->buflist[i]->total,
+ sizeof(request.list[0].total))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ if (copy_to_user(&request.list[i].used,
+ &zero,
+ sizeof(zero))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ address = virtual + dma->buflist[i]->offset;
+ if (copy_to_user(&request.list[i].address,
+ &address,
+ sizeof(address))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ }
+ }
+done:
+ request.count = dma->buf_count;
+ DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
+
+ copy_to_user_ret((drm_buf_map_t *)arg,
+ &request,
+ sizeof(request),
+ -EFAULT);
+
+ return retcode;
+}
diff --git a/linux/i810_dma.c b/linux/i810_dma.c
new file mode 100644
index 00000000..09959b65
--- /dev/null
+++ b/linux/i810_dma.c
@@ -0,0 +1,762 @@
+/* i810_dma.c -- DMA support for the i810 -*- linux-c -*-
+ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
+ * Jeff Hartmann <jhartmann@precisioninsight.com>
+ *
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/i810_dma.c,v 1.1 2000/02/11 17:26:04 dawes Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include "i810_drv.h"
+
+#include <linux/interrupt.h> /* For task queue support */
+
+#define I810_REG(reg) 2
+#define I810_BASE(reg) ((unsigned long) \
+ dev->maplist[I810_REG(reg)]->handle)
+#define I810_ADDR(reg) (I810_BASE(reg) + reg)
+#define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg)
+#define I810_READ(reg) I810_DEREF(reg)
+#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
+
+void i810_dma_init(drm_device_t *dev)
+{
+ printk(KERN_INFO "i810_dma_init\n");
+}
+
+void i810_dma_cleanup(drm_device_t *dev)
+{
+ printk(KERN_INFO "i810_dma_cleanup\n");
+}
+
+static inline void i810_dma_dispatch(drm_device_t *dev, unsigned long address,
+ unsigned long length)
+{
+ printk(KERN_INFO "i810_dma_dispatch\n");
+}
+
+static inline void i810_dma_quiescent(drm_device_t *dev)
+{
+}
+
+static inline void i810_dma_ready(drm_device_t *dev)
+{
+ i810_dma_quiescent(dev);
+ printk(KERN_INFO "i810_dma_ready\n");
+}
+
+static inline int i810_dma_is_ready(drm_device_t *dev)
+{
+
+ i810_dma_quiescent(dev);
+
+ printk(KERN_INFO "i810_dma_is_ready\n");
+ return 1;
+}
+
+
+static void i810_dma_service(int irq, void *device, struct pt_regs *regs)
+{
+ drm_device_t *dev = (drm_device_t *)device;
+ drm_device_dma_t *dma = dev->dma;
+
+ atomic_inc(&dev->total_irq);
+ if (i810_dma_is_ready(dev)) {
+ /* Free previous buffer */
+ if (test_and_set_bit(0, &dev->dma_flag)) {
+ atomic_inc(&dma->total_missed_free);
+ return;
+ }
+ if (dma->this_buffer) {
+ drm_free_buffer(dev, dma->this_buffer);
+ dma->this_buffer = NULL;
+ }
+ clear_bit(0, &dev->dma_flag);
+
+ /* Dispatch new buffer */
+ queue_task(&dev->tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+ }
+}
+
+/* Only called by i810_dma_schedule. */
+static int i810_do_dma(drm_device_t *dev, int locked)
+{
+ unsigned long address;
+ unsigned long length;
+ drm_buf_t *buf;
+ int retcode = 0;
+ drm_device_dma_t *dma = dev->dma;
+#if DRM_DMA_HISTOGRAM
+ cycles_t dma_start, dma_stop;
+#endif
+
+ if (test_and_set_bit(0, &dev->dma_flag)) {
+ atomic_inc(&dma->total_missed_dma);
+ return -EBUSY;
+ }
+
+#if DRM_DMA_HISTOGRAM
+ dma_start = get_cycles();
+#endif
+
+ if (!dma->next_buffer) {
+ DRM_ERROR("No next_buffer\n");
+ clear_bit(0, &dev->dma_flag);
+ return -EINVAL;
+ }
+
+ buf = dma->next_buffer;
+ address = (unsigned long)buf->bus_address;
+ length = buf->used;
+
+
+ DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
+ buf->context, buf->idx, length);
+
+ if (buf->list == DRM_LIST_RECLAIM) {
+ drm_clear_next_buffer(dev);
+ drm_free_buffer(dev, buf);
+ clear_bit(0, &dev->dma_flag);
+ return -EINVAL;
+ }
+
+ if (!length) {
+ DRM_ERROR("0 length buffer\n");
+ drm_clear_next_buffer(dev);
+ drm_free_buffer(dev, buf);
+ clear_bit(0, &dev->dma_flag);
+ return 0;
+ }
+
+ if (!i810_dma_is_ready(dev)) {
+ clear_bit(0, &dev->dma_flag);
+ return -EBUSY;
+ }
+
+ if (buf->while_locked) {
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("Dispatching buffer %d from pid %d"
+ " \"while locked\", but no lock held\n",
+ buf->idx, buf->pid);
+ }
+ } else {
+ if (!locked && !drm_lock_take(&dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ atomic_inc(&dma->total_missed_lock);
+ clear_bit(0, &dev->dma_flag);
+ return -EBUSY;
+ }
+ }
+
+ if (dev->last_context != buf->context
+ && !(dev->queuelist[buf->context]->flags
+ & _DRM_CONTEXT_PRESERVED)) {
+ /* PRE: dev->last_context != buf->context */
+ if (drm_context_switch(dev, dev->last_context, buf->context)) {
+ drm_clear_next_buffer(dev);
+ drm_free_buffer(dev, buf);
+ }
+ retcode = -EBUSY;
+ goto cleanup;
+
+ /* POST: we will wait for the context
+ switch and will dispatch on a later call
+ when dev->last_context == buf->context.
+ NOTE WE HOLD THE LOCK THROUGHOUT THIS
+ TIME! */
+ }
+
+ drm_clear_next_buffer(dev);
+ buf->pending = 1;
+ buf->waiting = 0;
+ buf->list = DRM_LIST_PEND;
+#if DRM_DMA_HISTOGRAM
+ buf->time_dispatched = get_cycles();
+#endif
+
+ i810_dma_dispatch(dev, address, length);
+ drm_free_buffer(dev, dma->this_buffer);
+ dma->this_buffer = buf;
+
+ atomic_add(length, &dma->total_bytes);
+ atomic_inc(&dma->total_dmas);
+
+ if (!buf->while_locked && !dev->context_flag && !locked) {
+ if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ DRM_ERROR("\n");
+ }
+ }
+cleanup:
+
+ clear_bit(0, &dev->dma_flag);
+
+#if DRM_DMA_HISTOGRAM
+ dma_stop = get_cycles();
+ atomic_inc(&dev->histo.dma[drm_histogram_slot(dma_stop - dma_start)]);
+#endif
+
+ return retcode;
+}
+
+static void i810_dma_schedule_timer_wrapper(unsigned long dev)
+{
+ i810_dma_schedule((drm_device_t *)dev, 0);
+}
+
+static void i810_dma_schedule_tq_wrapper(void *dev)
+{
+ i810_dma_schedule(dev, 0);
+}
+
+int i810_dma_schedule(drm_device_t *dev, int locked)
+{
+ int next;
+ drm_queue_t *q;
+ drm_buf_t *buf;
+ int retcode = 0;
+ int processed = 0;
+ int missed;
+ int expire = 20;
+ drm_device_dma_t *dma = dev->dma;
+#if DRM_DMA_HISTOGRAM
+ cycles_t schedule_start;
+#endif
+
+ if (test_and_set_bit(0, &dev->interrupt_flag)) {
+ /* Not reentrant */
+ atomic_inc(&dma->total_missed_sched);
+ return -EBUSY;
+ }
+ missed = atomic_read(&dma->total_missed_sched);
+
+#if DRM_DMA_HISTOGRAM
+ schedule_start = get_cycles();
+#endif
+
+again:
+ if (dev->context_flag) {
+ clear_bit(0, &dev->interrupt_flag);
+ return -EBUSY;
+ }
+ if (dma->next_buffer) {
+ /* Unsent buffer that was previously
+ selected, but that couldn't be sent
+ because the lock could not be obtained
+ or the DMA engine wasn't ready. Try
+ again. */
+ atomic_inc(&dma->total_tried);
+ if (!(retcode = i810_do_dma(dev, locked))) {
+ atomic_inc(&dma->total_hit);
+ ++processed;
+ }
+ } else {
+ do {
+ next = drm_select_queue(dev,
+ i810_dma_schedule_timer_wrapper);
+ if (next >= 0) {
+ q = dev->queuelist[next];
+ buf = drm_waitlist_get(&q->waitlist);
+ dma->next_buffer = buf;
+ dma->next_queue = q;
+ if (buf && buf->list == DRM_LIST_RECLAIM) {
+ drm_clear_next_buffer(dev);
+ drm_free_buffer(dev, buf);
+ }
+ }
+ } while (next >= 0 && !dma->next_buffer);
+ if (dma->next_buffer) {
+ if (!(retcode = i810_do_dma(dev, locked))) {
+ ++processed;
+ }
+ }
+ }
+
+ if (--expire) {
+ if (missed != atomic_read(&dma->total_missed_sched)) {
+ atomic_inc(&dma->total_lost);
+ if (i810_dma_is_ready(dev)) goto again;
+ }
+ if (processed && i810_dma_is_ready(dev)) {
+ atomic_inc(&dma->total_lost);
+ processed = 0;
+ goto again;
+ }
+ }
+
+ clear_bit(0, &dev->interrupt_flag);
+
+#if DRM_DMA_HISTOGRAM
+ atomic_inc(&dev->histo.schedule[drm_histogram_slot(get_cycles()
+ - schedule_start)]);
+#endif
+ return retcode;
+}
+
+static int i810_dma_priority(drm_device_t *dev, drm_dma_t *d)
+{
+ unsigned long address;
+ unsigned long length;
+ int must_free = 0;
+ int retcode = 0;
+ int i;
+ int idx;
+ drm_buf_t *buf;
+ drm_buf_t *last_buf = NULL;
+ drm_device_dma_t *dma = dev->dma;
+ DECLARE_WAITQUEUE(entry, current);
+
+ /* Turn off interrupt handling */
+ while (test_and_set_bit(0, &dev->interrupt_flag)) {
+ schedule();
+ if (signal_pending(current)) return -EINTR;
+ }
+ if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
+ while (!drm_lock_take(&dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ schedule();
+ if (signal_pending(current)) {
+ clear_bit(0, &dev->interrupt_flag);
+ return -EINTR;
+ }
+ }
+ ++must_free;
+ }
+ atomic_inc(&dma->total_prio);
+
+ for (i = 0; i < d->send_count; i++) {
+ idx = d->send_indices[i];
+ if (idx < 0 || idx >= dma->buf_count) {
+ DRM_ERROR("Index %d (of %d max)\n",
+ d->send_indices[i], dma->buf_count - 1);
+ continue;
+ }
+ buf = dma->buflist[ idx ];
+ if (buf->pid != current->pid) {
+ DRM_ERROR("Process %d using buffer owned by %d\n",
+ current->pid, buf->pid);
+ retcode = -EINVAL;
+ goto cleanup;
+ }
+ if (buf->list != DRM_LIST_NONE) {
+ DRM_ERROR("Process %d using %d's buffer on list %d\n",
+ current->pid, buf->pid, buf->list);
+ retcode = -EINVAL;
+ goto cleanup;
+ }
+ /* This isn't a race condition on
+ buf->list, since our concern is the
+ buffer reclaim during the time the
+ process closes the /dev/drm? handle, so
+ it can't also be doing DMA. */
+ buf->list = DRM_LIST_PRIO;
+ buf->used = d->send_sizes[i];
+ buf->context = d->context;
+ buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
+ address = (unsigned long)buf->address;
+ length = buf->used;
+ if (!length) {
+ DRM_ERROR("0 length buffer\n");
+ }
+ if (buf->pending) {
+ DRM_ERROR("Sending pending buffer:"
+ " buffer %d, offset %d\n",
+ d->send_indices[i], i);
+ retcode = -EINVAL;
+ goto cleanup;
+ }
+ if (buf->waiting) {
+ DRM_ERROR("Sending waiting buffer:"
+ " buffer %d, offset %d\n",
+ d->send_indices[i], i);
+ retcode = -EINVAL;
+ goto cleanup;
+ }
+ buf->pending = 1;
+
+ if (dev->last_context != buf->context
+ && !(dev->queuelist[buf->context]->flags
+ & _DRM_CONTEXT_PRESERVED)) {
+ add_wait_queue(&dev->context_wait, &entry);
+ current->state = TASK_INTERRUPTIBLE;
+ /* PRE: dev->last_context != buf->context */
+ drm_context_switch(dev, dev->last_context,
+ buf->context);
+ /* POST: we will wait for the context
+ switch and will dispatch on a later call
+ when dev->last_context == buf->context.
+ NOTE WE HOLD THE LOCK THROUGHOUT THIS
+ TIME! */
+ schedule();
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&dev->context_wait, &entry);
+ if (signal_pending(current)) {
+ retcode = -EINTR;
+ goto cleanup;
+ }
+ if (dev->last_context != buf->context) {
+ DRM_ERROR("Context mismatch: %d %d\n",
+ dev->last_context,
+ buf->context);
+ }
+ }
+
+#if DRM_DMA_HISTOGRAM
+ buf->time_queued = get_cycles();
+ buf->time_dispatched = buf->time_queued;
+#endif
+ i810_dma_dispatch(dev, address, length);
+ if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ DRM_ERROR("\n");
+ }
+
+ atomic_add(length, &dma->total_bytes);
+ atomic_inc(&dma->total_dmas);
+
+ if (last_buf) {
+ drm_free_buffer(dev, last_buf);
+ }
+ last_buf = buf;
+ }
+
+
+cleanup:
+ if (last_buf) {
+ i810_dma_ready(dev);
+ drm_free_buffer(dev, last_buf);
+ }
+
+ if (must_free && !dev->context_flag) {
+ if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ DRM_ERROR("\n");
+ }
+ }
+ clear_bit(0, &dev->interrupt_flag);
+ return retcode;
+}
+
+static int i810_dma_send_buffers(drm_device_t *dev, drm_dma_t *d)
+{
+ DECLARE_WAITQUEUE(entry, current);
+ drm_buf_t *last_buf = NULL;
+ int retcode = 0;
+ drm_device_dma_t *dma = dev->dma;
+
+ if (d->flags & _DRM_DMA_BLOCK) {
+ last_buf = dma->buflist[d->send_indices[d->send_count-1]];
+ add_wait_queue(&last_buf->dma_wait, &entry);
+ }
+
+ if ((retcode = drm_dma_enqueue(dev, d))) {
+ if (d->flags & _DRM_DMA_BLOCK)
+ remove_wait_queue(&last_buf->dma_wait, &entry);
+ return retcode;
+ }
+
+ i810_dma_schedule(dev, 0);
+
+ if (d->flags & _DRM_DMA_BLOCK) {
+ DRM_DEBUG("%d waiting\n", current->pid);
+ current->state = TASK_INTERRUPTIBLE;
+ for (;;) {
+ if (!last_buf->waiting
+ && !last_buf->pending)
+ break; /* finished */
+ schedule();
+ if (signal_pending(current)) {
+ retcode = -EINTR; /* Can't restart */
+ break;
+ }
+ }
+ current->state = TASK_RUNNING;
+ DRM_DEBUG("%d running\n", current->pid);
+ remove_wait_queue(&last_buf->dma_wait, &entry);
+ if (!retcode
+ || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
+ if (!waitqueue_active(&last_buf->dma_wait)) {
+ drm_free_buffer(dev, last_buf);
+ }
+ }
+ if (retcode) {
+ DRM_ERROR("ctx%d w%d p%d c%d i%d l%d %d/%d\n",
+ d->context,
+ last_buf->waiting,
+ last_buf->pending,
+ DRM_WAITCOUNT(dev, d->context),
+ last_buf->idx,
+ last_buf->list,
+ last_buf->pid,
+ current->pid);
+ }
+ }
+ return retcode;
+}
+
+int i810_dma(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ int retcode = 0;
+ drm_dma_t d;
+
+ printk("i810_dma start\n");
+ copy_from_user_ret(&d, (drm_dma_t *)arg, sizeof(d), -EFAULT);
+ DRM_DEBUG("%d %d: %d send, %d req\n",
+ current->pid, d.context, d.send_count, d.request_count);
+
+ if (d.context == DRM_KERNEL_CONTEXT || d.context >= dev->queue_slots) {
+ DRM_ERROR("Process %d using context %d\n",
+ current->pid, d.context);
+ return -EINVAL;
+ }
+
+ if (d.send_count < 0 || d.send_count > dma->buf_count) {
+ DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
+ current->pid, d.send_count, dma->buf_count);
+ return -EINVAL;
+ }
+ if (d.request_count < 0 || d.request_count > dma->buf_count) {
+ DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
+ current->pid, d.request_count, dma->buf_count);
+ return -EINVAL;
+ }
+
+ if (d.send_count) {
+#if 0
+ if (d.flags & _DRM_DMA_PRIORITY)
+ retcode = i810_dma_priority(dev, &d);
+ else
+ retcode = i810_dma_send_buffers(dev, &d);
+#endif
+ printk("i810_dma priority\n");
+
+ retcode = i810_dma_priority(dev, &d);
+ }
+
+ d.granted_count = 0;
+
+ if (!retcode && d.request_count) {
+ retcode = drm_dma_get_buffers(dev, &d);
+ }
+
+ DRM_DEBUG("%d returning, granted = %d\n",
+ current->pid, d.granted_count);
+ copy_to_user_ret((drm_dma_t *)arg, &d, sizeof(d), -EFAULT);
+
+ printk("i810_dma end (granted)\n");
+ return retcode;
+}
+
+int i810_irq_install(drm_device_t *dev, int irq)
+{
+ int retcode;
+
+ if (!irq) return -EINVAL;
+
+ down(&dev->struct_sem);
+ if (dev->irq) {
+ up(&dev->struct_sem);
+ return -EBUSY;
+ }
+ dev->irq = irq;
+ up(&dev->struct_sem);
+
+ DRM_DEBUG("%d\n", irq);
+
+ dev->context_flag = 0;
+ dev->interrupt_flag = 0;
+ dev->dma_flag = 0;
+
+ dev->dma->next_buffer = NULL;
+ dev->dma->next_queue = NULL;
+ dev->dma->this_buffer = NULL;
+
+ dev->tq.next = NULL;
+ dev->tq.sync = 0;
+ dev->tq.routine = i810_dma_schedule_tq_wrapper;
+ dev->tq.data = dev;
+
+
+ /* Before installing handler */
+ /* TODO */
+ /* Install handler */
+ if ((retcode = request_irq(dev->irq,
+ i810_dma_service,
+ 0,
+ dev->devname,
+ dev))) {
+ down(&dev->struct_sem);
+ dev->irq = 0;
+ up(&dev->struct_sem);
+ return retcode;
+ }
+
+ /* After installing handler */
+ /* TODO */
+ return 0;
+}
+
+int i810_irq_uninstall(drm_device_t *dev)
+{
+ int irq;
+
+ down(&dev->struct_sem);
+ irq = dev->irq;
+ dev->irq = 0;
+ up(&dev->struct_sem);
+
+ if (!irq) return -EINVAL;
+
+ DRM_DEBUG("%d\n", irq);
+
+ /* TODO : Disable interrupts */
+ free_irq(irq, dev);
+
+ return 0;
+}
+
+
+int i810_control(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_control_t ctl;
+ int retcode;
+
+ printk(KERN_INFO "i810_control\n");
+ i810_dma_init(dev);
+
+ copy_from_user_ret(&ctl, (drm_control_t *)arg, sizeof(ctl), -EFAULT);
+
+ switch (ctl.func) {
+ case DRM_INST_HANDLER:
+ if ((retcode = i810_irq_install(dev, ctl.irq)))
+ return retcode;
+ break;
+ case DRM_UNINST_HANDLER:
+ if ((retcode = i810_irq_uninstall(dev)))
+ return retcode;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int i810_lock(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ DECLARE_WAITQUEUE(entry, current);
+ int ret = 0;
+ drm_lock_t lock;
+ drm_queue_t *q;
+#if DRM_DMA_HISTOGRAM
+ cycles_t start;
+
+ dev->lck_start = start = get_cycles();
+#endif
+
+ copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
+
+ if (lock.context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ current->pid, lock.context);
+ return -EINVAL;
+ }
+
+ if (lock.context < 0 || lock.context >= dev->queue_count) {
+ return -EINVAL;
+ }
+ q = dev->queuelist[lock.context];
+
+ ret = drm_flush_block_and_flush(dev, lock.context, lock.flags);
+
+ if (!ret) {
+ if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
+ != lock.context) {
+ long j = jiffies - dev->lock.lock_time;
+
+ if (j > 0 && j <= DRM_LOCK_SLICE) {
+ /* Can't take lock if we just had it and
+ there is contention. */
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(j);
+ }
+ }
+ add_wait_queue(&dev->lock.lock_queue, &entry);
+ for (;;) {
+ if (!dev->lock.hw_lock) {
+ /* Device has been unregistered */
+ ret = -EINTR;
+ break;
+ }
+ if (drm_lock_take(&dev->lock.hw_lock->lock,
+ lock.context)) {
+ dev->lock.pid = current->pid;
+ dev->lock.lock_time = jiffies;
+ atomic_inc(&dev->total_locks);
+ atomic_inc(&q->total_locks);
+ break; /* Got lock */
+ }
+
+ /* Contention */
+ atomic_inc(&dev->total_sleeps);
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&dev->lock.lock_queue, &entry);
+ }
+
+ drm_flush_unblock(dev, lock.context, lock.flags); /* cleanup phase */
+
+ if (!ret) {
+ if (lock.flags & _DRM_LOCK_READY)
+ i810_dma_ready(dev);
+ if (lock.flags & _DRM_LOCK_QUIESCENT)
+ i810_dma_quiescent(dev);
+ }
+
+#if DRM_DMA_HISTOGRAM
+ atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]);
+#endif
+
+ return ret;
+}
diff --git a/linux/i810_drv.c b/linux/i810_drv.c
new file mode 100644
index 00000000..f33153a3
--- /dev/null
+++ b/linux/i810_drv.c
@@ -0,0 +1,583 @@
+/* i810_drv.c -- I810 driver -*- linux-c -*-
+ * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
+ * Jeff Hartmann <jhartmann@precisioninsight.com>
+ *
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/i810_drv.c,v 1.1 2000/02/11 17:26:05 dawes Exp $
+ *
+ */
+
+#define EXPORT_SYMTAB
+#include "drmP.h"
+#include "i810_drv.h"
+EXPORT_SYMBOL(i810_init);
+EXPORT_SYMBOL(i810_cleanup);
+
+#define I810_NAME "i810"
+#define I810_DESC "Matrox g200/g400"
+#define I810_DATE "19991213"
+#define I810_MAJOR 0
+#define I810_MINOR 0
+#define I810_PATCHLEVEL 1
+
+static drm_device_t i810_device;
+drm_ctx_t i810_res_ctx;
+
+static struct file_operations i810_fops = {
+ open: i810_open,
+ flush: drm_flush,
+ release: i810_release,
+ ioctl: i810_ioctl,
+ mmap: drm_mmap,
+ read: drm_read,
+ fasync: drm_fasync,
+};
+
+static struct miscdevice i810_misc = {
+ minor: MISC_DYNAMIC_MINOR,
+ name: I810_NAME,
+ fops: &i810_fops,
+};
+
+static drm_ioctl_desc_t i810_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { i810_version, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { i810_control, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { i810_addbufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { i810_markbufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { i810_infobufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { i810_mapbufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { i810_freebufs, 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { drm_addctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { drm_rmctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { drm_modctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { drm_getctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { drm_switchctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { drm_newctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { drm_resctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { i810_dma, 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { i810_lock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { i810_unlock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
+};
+
+#define I810_IOCTL_COUNT DRM_ARRAY_SIZE(i810_ioctls)
+
+#ifdef MODULE
+int init_module(void);
+void cleanup_module(void);
+static char *i810 = NULL;
+
+MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas.");
+MODULE_DESCRIPTION("Intel I810");
+MODULE_PARM(i810, "s");
+
+/* init_module is called when insmod is used to load the module */
+
+int init_module(void)
+{
+ printk("doing i810_init()\n");
+ return i810_init();
+}
+
+/* cleanup_module is called when rmmod is used to unload the module */
+
+void cleanup_module(void)
+{
+ i810_cleanup();
+}
+#endif
+
+#ifndef MODULE
+/* i810_setup is called by the kernel to parse command-line options passed
+ * via the boot-loader (e.g., LILO). It calls the insmod option routine,
+ * drm_parse_drm.
+ *
+ * This is not currently supported, since it requires changes to
+ * linux/init/main.c. */
+
+
+void __init i810_setup(char *str, int *ints)
+{
+ if (ints[0] != 0) {
+ DRM_ERROR("Illegal command line format, ignored\n");
+ return;
+ }
+ drm_parse_options(str);
+}
+#endif
+
+static int i810_setup(drm_device_t *dev)
+{
+ int i;
+
+ atomic_set(&dev->ioctl_count, 0);
+ atomic_set(&dev->vma_count, 0);
+ dev->buf_use = 0;
+ atomic_set(&dev->buf_alloc, 0);
+
+ drm_dma_setup(dev);
+
+ atomic_set(&dev->total_open, 0);
+ atomic_set(&dev->total_close, 0);
+ atomic_set(&dev->total_ioctl, 0);
+ atomic_set(&dev->total_irq, 0);
+ atomic_set(&dev->total_ctx, 0);
+ atomic_set(&dev->total_locks, 0);
+ atomic_set(&dev->total_unlocks, 0);
+ atomic_set(&dev->total_contends, 0);
+ atomic_set(&dev->total_sleeps, 0);
+
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ dev->magiclist[i].head = NULL;
+ dev->magiclist[i].tail = NULL;
+ }
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ dev->vmalist = NULL;
+ dev->lock.hw_lock = NULL;
+ init_waitqueue_head(&dev->lock.lock_queue);
+ dev->queue_count = 0;
+ dev->queue_reserved = 0;
+ dev->queue_slots = 0;
+ dev->queuelist = NULL;
+ dev->irq = 0;
+ dev->context_flag = 0;
+ dev->interrupt_flag = 0;
+ dev->dma_flag = 0;
+ dev->last_context = 0;
+ dev->last_switch = 0;
+ dev->last_checked = 0;
+ init_timer(&dev->timer);
+ init_waitqueue_head(&dev->context_wait);
+#if DRM_DMA_HISTO
+ memset(&dev->histo, 0, sizeof(dev->histo));
+#endif
+ dev->ctx_start = 0;
+ dev->lck_start = 0;
+
+ dev->buf_rp = dev->buf;
+ dev->buf_wp = dev->buf;
+ dev->buf_end = dev->buf + DRM_BSZ;
+ dev->buf_async = NULL;
+ init_waitqueue_head(&dev->buf_readers);
+ init_waitqueue_head(&dev->buf_writers);
+
+ DRM_DEBUG("\n");
+
+ /* The kernel's context could be created here, but is now created
+ in drm_dma_enqueue. This is more resource-efficient for
+ hardware that does not do DMA, but may mean that
+ drm_select_queue fails between the time the interrupt is
+ initialized and the time the queues are initialized. */
+
+ return 0;
+}
+
+
+static int i810_takedown(drm_device_t *dev)
+{
+ int i;
+ drm_magic_entry_t *pt, *next;
+ drm_map_t *map;
+ drm_vma_entry_t *vma, *vma_next;
+
+ DRM_DEBUG("\n");
+
+ if (dev->irq) i810_irq_uninstall(dev);
+
+ down(&dev->struct_sem);
+ del_timer(&dev->timer);
+
+ if (dev->devname) {
+ drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
+ dev->devname = NULL;
+ }
+
+ if (dev->unique) {
+ drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
+ dev->unique = NULL;
+ dev->unique_len = 0;
+ }
+ /* Clear pid list */
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ for (pt = dev->magiclist[i].head; pt; pt = next) {
+ next = pt->next;
+ drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
+ }
+ dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
+ }
+ /* Clear AGP information */
+ if (dev->agp) {
+ drm_agp_mem_t *entry;
+ drm_agp_mem_t *nexte;
+
+ /* Remove AGP resources, but leave dev->agp
+ intact until r128_cleanup is called. */
+ for (entry = dev->agp->memory; entry; entry = nexte) {
+ nexte = entry->next;
+ if (entry->bound) drm_unbind_agp(entry->memory);
+ drm_free_agp(entry->memory, entry->pages);
+ drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+ }
+ dev->agp->memory = NULL;
+
+ if (dev->agp->acquired && drm_agp.release)
+ (*drm_agp.release)();
+
+ dev->agp->acquired = 0;
+ dev->agp->enabled = 0;
+ }
+ /* Clear vma list (only built for debugging) */
+ if (dev->vmalist) {
+ for (vma = dev->vmalist; vma; vma = vma_next) {
+ vma_next = vma->next;
+ drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
+ }
+ dev->vmalist = NULL;
+ }
+
+ /* Clear map area and mtrr information */
+ if (dev->maplist) {
+ for (i = 0; i < dev->map_count; i++) {
+ map = dev->maplist[i];
+ switch (map->type) {
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+#ifdef CONFIG_MTRR
+ if (map->mtrr >= 0) {
+ int retcode;
+ retcode = mtrr_del(map->mtrr,
+ map->offset,
+ map->size);
+ DRM_DEBUG("mtrr_del = %d\n", retcode);
+ }
+#endif
+ drm_ioremapfree(map->handle, map->size);
+ break;
+ case _DRM_SHM:
+ drm_free_pages((unsigned long)map->handle,
+ drm_order(map->size)
+ - PAGE_SHIFT,
+ DRM_MEM_SAREA);
+ break;
+ case _DRM_AGP:
+ break;
+ }
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ }
+ drm_free(dev->maplist,
+ dev->map_count * sizeof(*dev->maplist),
+ DRM_MEM_MAPS);
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ }
+
+ if (dev->queuelist) {
+ for (i = 0; i < dev->queue_count; i++) {
+ drm_waitlist_destroy(&dev->queuelist[i]->waitlist);
+ if (dev->queuelist[i]) {
+ drm_free(dev->queuelist[i],
+ sizeof(*dev->queuelist[0]),
+ DRM_MEM_QUEUES);
+ dev->queuelist[i] = NULL;
+ }
+ }
+ drm_free(dev->queuelist,
+ dev->queue_slots * sizeof(*dev->queuelist),
+ DRM_MEM_QUEUES);
+ dev->queuelist = NULL;
+ }
+
+ drm_dma_takedown(dev);
+
+ dev->queue_count = 0;
+ if (dev->lock.hw_lock) {
+ dev->lock.hw_lock = NULL; /* SHM removed */
+ dev->lock.pid = 0;
+ wake_up_interruptible(&dev->lock.lock_queue);
+ }
+ up(&dev->struct_sem);
+
+ return 0;
+}
+
+/* i810_init is called via init_module at module load time, or via
+ * linux/init/main.c (this is not currently supported). */
+
+int i810_init(void)
+{
+ int retcode;
+ drm_device_t *dev = &i810_device;
+
+ DRM_DEBUG("\n");
+
+ memset((void *)dev, 0, sizeof(*dev));
+ dev->count_lock = SPIN_LOCK_UNLOCKED;
+ sema_init(&dev->struct_sem, 1);
+
+#ifdef MODULE
+ drm_parse_options(i810);
+#endif
+ printk("doing misc_register\n");
+ if ((retcode = misc_register(&i810_misc))) {
+ DRM_ERROR("Cannot register \"%s\"\n", I810_NAME);
+ return retcode;
+ }
+ dev->device = MKDEV(MISC_MAJOR, i810_misc.minor);
+ dev->name = I810_NAME;
+
+ printk("doing mem init\n");
+ drm_mem_init();
+ printk("doing proc init\n");
+ drm_proc_init(dev);
+ printk("doing agp init\n");
+ dev->agp = drm_agp_init();
+ printk("doing ctxbitmap init\n");
+ if((retcode = drm_ctxbitmap_init(dev))) {
+ DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+ drm_proc_cleanup();
+ misc_deregister(&i810_misc);
+ i810_takedown(dev);
+ return retcode;
+ }
+#if 0
+ printk("doing i810_dma_init\n");
+ i810_dma_init(dev);
+#endif
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ I810_NAME,
+ I810_MAJOR,
+ I810_MINOR,
+ I810_PATCHLEVEL,
+ I810_DATE,
+ i810_misc.minor);
+
+ return 0;
+}
+
+/* i810_cleanup is called via cleanup_module at module unload time. */
+
+void i810_cleanup(void)
+{
+ drm_device_t *dev = &i810_device;
+
+ DRM_DEBUG("\n");
+
+ drm_proc_cleanup();
+ if (misc_deregister(&i810_misc)) {
+ DRM_ERROR("Cannot unload module\n");
+ } else {
+ DRM_INFO("Module unloaded\n");
+ }
+ drm_ctxbitmap_cleanup(dev);
+ i810_dma_cleanup(dev);
+ i810_takedown(dev);
+ if (dev->agp) {
+ drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
+ dev->agp = NULL;
+ }
+}
+
+int i810_version(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_version_t version;
+ int len;
+
+ copy_from_user_ret(&version,
+ (drm_version_t *)arg,
+ sizeof(version),
+ -EFAULT);
+
+#define DRM_COPY(name,value) \
+ len = strlen(value); \
+ if (len > name##_len) len = name##_len; \
+ name##_len = strlen(value); \
+ if (len && name) { \
+ copy_to_user_ret(name, value, len, -EFAULT); \
+ }
+
+ version.version_major = I810_MAJOR;
+ version.version_minor = I810_MINOR;
+ version.version_patchlevel = I810_PATCHLEVEL;
+
+ DRM_COPY(version.name, I810_NAME);
+ DRM_COPY(version.date, I810_DATE);
+ DRM_COPY(version.desc, I810_DESC);
+
+ copy_to_user_ret((drm_version_t *)arg,
+ &version,
+ sizeof(version),
+ -EFAULT);
+ return 0;
+}
+
+int i810_open(struct inode *inode, struct file *filp)
+{
+ drm_device_t *dev = &i810_device;
+ int retcode = 0;
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+ if (!(retcode = drm_open_helper(inode, filp, dev))) {
+ MOD_INC_USE_COUNT;
+ atomic_inc(&dev->total_open);
+ spin_lock(&dev->count_lock);
+ if (!dev->open_count++) {
+ spin_unlock(&dev->count_lock);
+ return i810_setup(dev);
+ }
+ spin_unlock(&dev->count_lock);
+ }
+ return retcode;
+}
+
+int i810_release(struct inode *inode, struct file *filp)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ int retcode = 0;
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+ if (!(retcode = drm_release(inode, filp))) {
+ MOD_DEC_USE_COUNT;
+ atomic_inc(&dev->total_close);
+ spin_lock(&dev->count_lock);
+ if (!--dev->open_count) {
+ if (atomic_read(&dev->ioctl_count) || dev->blocked) {
+ DRM_ERROR("Device busy: %d %d\n",
+ atomic_read(&dev->ioctl_count),
+ dev->blocked);
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ spin_unlock(&dev->count_lock);
+ return i810_takedown(dev);
+ }
+ spin_unlock(&dev->count_lock);
+ }
+ return retcode;
+}
+
+/* drm_ioctl is called whenever a process performs an ioctl on /dev/drm. */
+
+int i810_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int nr = DRM_IOCTL_NR(cmd);
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ int retcode = 0;
+ drm_ioctl_desc_t *ioctl;
+ drm_ioctl_t *func;
+
+ atomic_inc(&dev->ioctl_count);
+ atomic_inc(&dev->total_ioctl);
+ ++priv->ioctl_count;
+
+ DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
+ current->pid, cmd, nr, dev->device, priv->authenticated);
+
+ if (nr >= I810_IOCTL_COUNT) {
+ retcode = -EINVAL;
+ } else {
+ ioctl = &i810_ioctls[nr];
+ func = ioctl->func;
+
+ if (!func) {
+ DRM_DEBUG("no function\n");
+ retcode = -EINVAL;
+ } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
+ || (ioctl->auth_needed && !priv->authenticated)) {
+ retcode = -EACCES;
+ } else {
+ retcode = (func)(inode, filp, cmd, arg);
+ }
+ }
+
+ atomic_dec(&dev->ioctl_count);
+ return retcode;
+}
+
+int i810_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_lock_t lock;
+
+ copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
+
+ if (lock.context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ current->pid, lock.context);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("%d frees lock (%d holds)\n",
+ lock.context,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+ atomic_inc(&dev->total_unlocks);
+ if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
+ atomic_inc(&dev->total_contends);
+ drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
+ i810_dma_schedule(dev, 1);
+ if (!dev->context_flag) {
+ if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ DRM_ERROR("\n");
+ }
+ }
+#if DRM_DMA_HISTOGRAM
+ atomic_inc(&dev->histo.lhld[drm_histogram_slot(get_cycles()
+ - dev->lck_start)]);
+#endif
+
+ return 0;
+}
diff --git a/linux/i810_drv.h b/linux/i810_drv.h
new file mode 100644
index 00000000..0f5f42bb
--- /dev/null
+++ b/linux/i810_drv.h
@@ -0,0 +1,76 @@
+/* i810_drv.h -- Private header for the Matrox g200/g400 driver -*- linux-c -*-
+ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
+ * Jeff Hartmann <jhartmann@precisioninsight.com>
+ *
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/i810_drv.h,v 1.1 2000/02/11 17:26:05 dawes Exp $
+ */
+
+#ifndef _I810_DRV_H_
+#define _I810_DRV_H_
+
+ /* i810_drv.c */
+extern int i810_init(void);
+extern void i810_cleanup(void);
+extern int i810_version(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int i810_open(struct inode *inode, struct file *filp);
+extern int i810_release(struct inode *inode, struct file *filp);
+extern int i810_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int i810_unlock(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+ /* i810_dma.c */
+extern int i810_dma_schedule(drm_device_t *dev, int locked);
+extern int i810_dma(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int i810_irq_install(drm_device_t *dev, int irq);
+extern int i810_irq_uninstall(drm_device_t *dev);
+extern int i810_control(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int i810_lock(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern void i810_dma_init(drm_device_t *dev);
+extern void i810_dma_cleanup(drm_device_t *dev);
+
+
+ /* i810_bufs.c */
+extern int i810_addbufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int i810_infobufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int i810_markbufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int i810_freebufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int i810_mapbufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int i810_addmap(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+
+#endif
diff --git a/linux/lists.c b/linux/lists.c
new file mode 100644
index 00000000..50655bc3
--- /dev/null
+++ b/linux/lists.c
@@ -0,0 +1,245 @@
+/* lists.c -- Buffer list handling routines -*- linux-c -*-
+ * Created: Mon Apr 19 20:54:22 1999 by faith@precisioninsight.com
+ * Revised: Sun Feb 13 23:37:52 2000 by kevin@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/lists.c,v 1.3 1999/08/20 15:07:02 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/lists.c,v 1.4 2000/02/14 06:27:27 martin Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+int drm_waitlist_create(drm_waitlist_t *bl, int count)
+{
+ DRM_DEBUG("%d\n", count);
+ if (bl->count) return -EINVAL;
+
+ bl->count = count;
+ bl->bufs = drm_alloc((bl->count + 2) * sizeof(*bl->bufs),
+ DRM_MEM_BUFLISTS);
+ bl->rp = bl->bufs;
+ bl->wp = bl->bufs;
+ bl->end = &bl->bufs[bl->count+1];
+ bl->write_lock = SPIN_LOCK_UNLOCKED;
+ bl->read_lock = SPIN_LOCK_UNLOCKED;
+ return 0;
+}
+
+int drm_waitlist_destroy(drm_waitlist_t *bl)
+{
+ DRM_DEBUG("\n");
+ if (bl->rp != bl->wp) return -EINVAL;
+ if (bl->bufs) drm_free(bl->bufs,
+ (bl->count + 2) * sizeof(*bl->bufs),
+ DRM_MEM_BUFLISTS);
+ bl->count = 0;
+ bl->bufs = NULL;
+ bl->rp = NULL;
+ bl->wp = NULL;
+ bl->end = NULL;
+ return 0;
+}
+
+int drm_waitlist_put(drm_waitlist_t *bl, drm_buf_t *buf)
+{
+ int left;
+ unsigned long flags;
+
+ left = DRM_LEFTCOUNT(bl);
+ DRM_DEBUG("put %d (%d left, rp = %p, wp = %p)\n",
+ buf->idx, left, bl->rp, bl->wp);
+ if (!left) {
+ DRM_ERROR("Overflow while adding buffer %d from pid %d\n",
+ buf->idx, buf->pid);
+ return -EINVAL;
+ }
+#if DRM_DMA_HISTOGRAM
+ buf->time_queued = get_cycles();
+#endif
+ buf->list = DRM_LIST_WAIT;
+
+ spin_lock_irqsave(&bl->write_lock, flags);
+ *bl->wp = buf;
+ if (++bl->wp >= bl->end) bl->wp = bl->bufs;
+ spin_unlock_irqrestore(&bl->write_lock, flags);
+
+ return 0;
+}
+
+drm_buf_t *drm_waitlist_get(drm_waitlist_t *bl)
+{
+ drm_buf_t *buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bl->read_lock, flags);
+ buf = *bl->rp;
+ if (bl->rp == bl->wp) {
+ spin_unlock_irqrestore(&bl->read_lock, flags);
+ return NULL;
+ }
+ if (++bl->rp >= bl->end) bl->rp = bl->bufs;
+ spin_unlock_irqrestore(&bl->read_lock, flags);
+
+ DRM_DEBUG("get %d\n", buf->idx);
+ return buf;
+}
+
+int drm_freelist_create(drm_freelist_t *bl, int count)
+{
+ DRM_DEBUG("\n");
+ atomic_set(&bl->count, 0);
+ bl->next = NULL;
+ init_waitqueue_head(&bl->waiting);
+ bl->low_mark = 0;
+ bl->high_mark = 0;
+ atomic_set(&bl->wfh, 0);
+ ++bl->initialized;
+ return 0;
+}
+
+int drm_freelist_destroy(drm_freelist_t *bl)
+{
+ DRM_DEBUG("\n");
+ atomic_set(&bl->count, 0);
+ bl->next = NULL;
+ return 0;
+}
+
+int drm_freelist_put(drm_device_t *dev, drm_freelist_t *bl, drm_buf_t *buf)
+{
+ drm_buf_t *old, *prev;
+ int count = 0;
+ drm_device_dma_t *dma = dev->dma;
+
+ if (!dma) {
+ DRM_ERROR("No DMA support\n");
+ return 1;
+ }
+
+ if (buf->waiting || buf->pending || buf->list == DRM_LIST_FREE) {
+ DRM_ERROR("Freed buffer %d: w%d, p%d, l%d\n",
+ buf->idx, buf->waiting, buf->pending, buf->list);
+ }
+ DRM_DEBUG("%d, count = %d, wfh = %d, w%d, p%d\n",
+ buf->idx, atomic_read(&bl->count), atomic_read(&bl->wfh),
+ buf->waiting, buf->pending);
+ if (!bl) return 1;
+#if DRM_DMA_HISTOGRAM
+ buf->time_freed = get_cycles();
+ drm_histogram_compute(dev, buf);
+#endif
+ buf->list = DRM_LIST_FREE;
+ do {
+ old = bl->next;
+ bl->next = old;
+ prev = cmpxchg(&bl->next, old, buf);
+ if (++count > DRM_LOOPING_LIMIT) {
+ DRM_ERROR("Looping\n");
+ return 1;
+ }
+ } while (prev != old);
+ atomic_inc(&bl->count);
+ if (atomic_read(&bl->count) > dma->buf_count) {
+ DRM_ERROR("%d of %d buffers free after addition of %d\n",
+ atomic_read(&bl->count), dma->buf_count, buf->idx);
+ return 1;
+ }
+ /* Check for high water mark */
+ if (atomic_read(&bl->wfh) && atomic_read(&bl->count)>=bl->high_mark) {
+ atomic_set(&bl->wfh, 0);
+ wake_up_interruptible(&bl->waiting);
+ }
+ return 0;
+}
+
+static drm_buf_t *drm_freelist_try(drm_freelist_t *bl)
+{
+ drm_buf_t *old, *new, *prev;
+ drm_buf_t *buf;
+ int count = 0;
+
+ if (!bl) return NULL;
+
+ /* Get buffer */
+ do {
+ old = bl->next;
+ if (!old) return NULL;
+ new = bl->next->next;
+ prev = cmpxchg(&bl->next, old, new);
+ if (++count > DRM_LOOPING_LIMIT) {
+ DRM_ERROR("Looping\n");
+ return NULL;
+ }
+ } while (prev != old);
+ atomic_dec(&bl->count);
+
+ buf = old;
+ buf->next = NULL;
+ buf->list = DRM_LIST_NONE;
+ DRM_DEBUG("%d, count = %d, wfh = %d, w%d, p%d\n",
+ buf->idx, atomic_read(&bl->count), atomic_read(&bl->wfh),
+ buf->waiting, buf->pending);
+ if (buf->waiting || buf->pending) {
+ DRM_ERROR("Free buffer %d: w%d, p%d, l%d\n",
+ buf->idx, buf->waiting, buf->pending, buf->list);
+ }
+
+ return buf;
+}
+
+drm_buf_t *drm_freelist_get(drm_freelist_t *bl, int block)
+{
+ drm_buf_t *buf = NULL;
+ DECLARE_WAITQUEUE(entry, current);
+
+ if (!bl || !bl->initialized) return NULL;
+
+ /* Check for low water mark */
+ if (atomic_read(&bl->count) <= bl->low_mark) /* Became low */
+ atomic_set(&bl->wfh, 1);
+ if (atomic_read(&bl->wfh)) {
+ DRM_DEBUG("Block = %d, count = %d, wfh = %d\n",
+ block, atomic_read(&bl->count),
+ atomic_read(&bl->wfh));
+ if (block) {
+ add_wait_queue(&bl->waiting, &entry);
+ current->state = TASK_INTERRUPTIBLE;
+ for (;;) {
+ if (!atomic_read(&bl->wfh)
+ && (buf = drm_freelist_try(bl))) break;
+ schedule();
+ if (signal_pending(current)) break;
+ }
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&bl->waiting, &entry);
+ }
+ return buf;
+ }
+
+ DRM_DEBUG("Count = %d, wfh = %d\n",
+ atomic_read(&bl->count), atomic_read(&bl->wfh));
+ return drm_freelist_try(bl);
+}
diff --git a/linux/lock.c b/linux/lock.c
new file mode 100644
index 00000000..ae41526f
--- /dev/null
+++ b/linux/lock.c
@@ -0,0 +1,225 @@
+/* lock.c -- IOCTLs for locking -*- linux-c -*-
+ * Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
+ * Revised: Sun Feb 13 23:38:25 2000 by kevin@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/lock.c,v 1.5 1999/08/30 13:05:00 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/lock.c,v 1.4 2000/02/14 06:27:27 martin Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+int drm_block(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ DRM_DEBUG("\n");
+ return 0;
+}
+
+int drm_unblock(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ DRM_DEBUG("\n");
+ return 0;
+}
+
+int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
+{
+ unsigned int old, new, prev;
+
+ DRM_DEBUG("%d attempts\n", context);
+ do {
+ old = *lock;
+ if (old & _DRM_LOCK_HELD) new = old | _DRM_LOCK_CONT;
+ else new = context | _DRM_LOCK_HELD;
+ prev = cmpxchg(lock, old, new);
+ } while (prev != old);
+ if (_DRM_LOCKING_CONTEXT(old) == context) {
+ if (old & _DRM_LOCK_HELD) {
+ if (context != DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("%d holds heavyweight lock\n",
+ context);
+ }
+ return 0;
+ }
+ }
+ if (new == (context | _DRM_LOCK_HELD)) {
+ /* Have lock */
+ DRM_DEBUG("%d\n", context);
+ return 1;
+ }
+ DRM_DEBUG("%d unable to get lock held by %d\n",
+ context, _DRM_LOCKING_CONTEXT(old));
+ return 0;
+}
+
+/* This takes a lock forcibly and hands it to context. Should ONLY be used
+ inside *_unlock to give lock to kernel before calling *_dma_schedule. */
+int drm_lock_transfer(drm_device_t *dev,
+ __volatile__ unsigned int *lock, unsigned int context)
+{
+ unsigned int old, new, prev;
+
+ dev->lock.pid = 0;
+ do {
+ old = *lock;
+ new = context | _DRM_LOCK_HELD;
+ prev = cmpxchg(lock, old, new);
+ } while (prev != old);
+ DRM_DEBUG("%d => %d\n", _DRM_LOCKING_CONTEXT(old), context);
+ return 1;
+}
+
+int drm_lock_free(drm_device_t *dev,
+ __volatile__ unsigned int *lock, unsigned int context)
+{
+ unsigned int old, new, prev;
+ pid_t pid = dev->lock.pid;
+
+ DRM_DEBUG("%d\n", context);
+ dev->lock.pid = 0;
+ do {
+ old = *lock;
+ new = 0;
+ prev = cmpxchg(lock, old, new);
+ } while (prev != old);
+ if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
+ DRM_ERROR("%d freed heavyweight lock held by %d (pid %d)\n",
+ context,
+ _DRM_LOCKING_CONTEXT(old),
+ pid);
+ return 1;
+ }
+ wake_up_interruptible(&dev->lock.lock_queue);
+ return 0;
+}
+
+static int drm_flush_queue(drm_device_t *dev, int context)
+{
+ DECLARE_WAITQUEUE(entry, current);
+ int ret = 0;
+ drm_queue_t *q = dev->queuelist[context];
+
+ DRM_DEBUG("\n");
+
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->use_count) > 1) {
+ atomic_inc(&q->block_write);
+ current->state = TASK_INTERRUPTIBLE;
+ add_wait_queue(&q->flush_queue, &entry);
+ atomic_inc(&q->block_count);
+ for (;;) {
+ if (!DRM_BUFCOUNT(&q->waitlist)) break;
+ schedule();
+ if (signal_pending(current)) {
+ ret = -EINTR; /* Can't restart */
+ break;
+ }
+ }
+ atomic_dec(&q->block_count);
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&q->flush_queue, &entry);
+ }
+ atomic_dec(&q->use_count);
+ atomic_inc(&q->total_flushed);
+
+ /* NOTE: block_write is still incremented!
+ Use drm_flush_unlock_queue to decrement. */
+ return ret;
+}
+
+static int drm_flush_unblock_queue(drm_device_t *dev, int context)
+{
+ drm_queue_t *q = dev->queuelist[context];
+
+ DRM_DEBUG("\n");
+
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->use_count) > 1) {
+ if (atomic_read(&q->block_write)) {
+ atomic_dec(&q->block_write);
+ wake_up_interruptible(&q->write_queue);
+ }
+ }
+ atomic_dec(&q->use_count);
+ return 0;
+}
+
+int drm_flush_block_and_flush(drm_device_t *dev, int context,
+ drm_lock_flags_t flags)
+{
+ int ret = 0;
+ int i;
+
+ DRM_DEBUG("\n");
+
+ if (flags & _DRM_LOCK_FLUSH) {
+ ret = drm_flush_queue(dev, DRM_KERNEL_CONTEXT);
+ if (!ret) ret = drm_flush_queue(dev, context);
+ }
+ if (flags & _DRM_LOCK_FLUSH_ALL) {
+ for (i = 0; !ret && i < dev->queue_count; i++) {
+ ret = drm_flush_queue(dev, i);
+ }
+ }
+ return ret;
+}
+
+int drm_flush_unblock(drm_device_t *dev, int context, drm_lock_flags_t flags)
+{
+ int ret = 0;
+ int i;
+
+ DRM_DEBUG("\n");
+
+ if (flags & _DRM_LOCK_FLUSH) {
+ ret = drm_flush_unblock_queue(dev, DRM_KERNEL_CONTEXT);
+ if (!ret) ret = drm_flush_unblock_queue(dev, context);
+ }
+ if (flags & _DRM_LOCK_FLUSH_ALL) {
+ for (i = 0; !ret && i < dev->queue_count; i++) {
+ ret = drm_flush_unblock_queue(dev, i);
+ }
+ }
+
+ return ret;
+}
+
+int drm_finish(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ int ret = 0;
+ drm_lock_t lock;
+
+ DRM_DEBUG("\n");
+
+ copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
+ ret = drm_flush_block_and_flush(dev, lock.context, lock.flags);
+ drm_flush_unblock(dev, lock.context, lock.flags);
+ return ret;
+}
diff --git a/linux/memory.c b/linux/memory.c
new file mode 100644
index 00000000..4ba911f3
--- /dev/null
+++ b/linux/memory.c
@@ -0,0 +1,327 @@
+/* memory.c -- Memory management wrappers for DRM -*- linux-c -*-
+ * Created: Thu Feb 4 14:00:34 1999 by faith@precisioninsight.com
+ * Revised: Sun Feb 13 23:39:37 2000 by kevin@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/memory.c,v 1.4 1999/08/20 20:00:53 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/memory.c,v 1.4 2000/02/14 06:27:28 martin Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+typedef struct drm_mem_stats {
+ const char *name;
+ int succeed_count;
+ int free_count;
+ int fail_count;
+ unsigned long bytes_allocated;
+ unsigned long bytes_freed;
+} drm_mem_stats_t;
+
+static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED;
+static unsigned long drm_ram_available = 0; /* In pages */
+static unsigned long drm_ram_used = 0;
+static drm_mem_stats_t drm_mem_stats[] = {
+ [DRM_MEM_DMA] = { "dmabufs" },
+ [DRM_MEM_SAREA] = { "sareas" },
+ [DRM_MEM_DRIVER] = { "driver" },
+ [DRM_MEM_MAGIC] = { "magic" },
+ [DRM_MEM_IOCTLS] = { "ioctltab" },
+ [DRM_MEM_MAPS] = { "maplist" },
+ [DRM_MEM_VMAS] = { "vmalist" },
+ [DRM_MEM_BUFS] = { "buflist" },
+ [DRM_MEM_SEGS] = { "seglist" },
+ [DRM_MEM_PAGES] = { "pagelist" },
+ [DRM_MEM_FILES] = { "files" },
+ [DRM_MEM_QUEUES] = { "queues" },
+ [DRM_MEM_CMDS] = { "commands" },
+ [DRM_MEM_MAPPINGS] = { "mappings" },
+ [DRM_MEM_BUFLISTS] = { "buflists" },
+ { NULL, 0, } /* Last entry must be null */
+};
+
+void drm_mem_init(void)
+{
+ drm_mem_stats_t *mem;
+ struct sysinfo si;
+
+ for (mem = drm_mem_stats; mem->name; ++mem) {
+ mem->succeed_count = 0;
+ mem->free_count = 0;
+ mem->fail_count = 0;
+ mem->bytes_allocated = 0;
+ mem->bytes_freed = 0;
+ }
+
+ si_meminfo(&si);
+#if LINUX_VERSION_CODE < 0x020317
+ /* Changed to page count in 2.3.23 */
+ drm_ram_available = si.totalram >> PAGE_SHIFT;
+#else
+ drm_ram_available = si.totalram;
+#endif
+ drm_ram_used = 0;
+}
+
+/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
+
+static int _drm_mem_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_mem_stats_t *pt;
+
+ if (offset > 0) return 0; /* no partial requests */
+ len = 0;
+ *eof = 1;
+ DRM_PROC_PRINT(" total counts "
+ " | outstanding \n");
+ DRM_PROC_PRINT("type alloc freed fail bytes freed"
+ " | allocs bytes\n\n");
+ DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
+ "system", 0, 0, 0,
+ drm_ram_available << (PAGE_SHIFT - 10));
+ DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
+ "locked", 0, 0, 0, drm_ram_used >> 10);
+ DRM_PROC_PRINT("\n");
+ for (pt = drm_mem_stats; pt->name; pt++) {
+ DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
+ pt->name,
+ pt->succeed_count,
+ pt->free_count,
+ pt->fail_count,
+ pt->bytes_allocated,
+ pt->bytes_freed,
+ pt->succeed_count - pt->free_count,
+ (long)pt->bytes_allocated
+ - (long)pt->bytes_freed);
+ }
+
+ return len;
+}
+
+int drm_mem_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ int ret;
+
+ spin_lock(&drm_mem_lock);
+ ret = _drm_mem_info(buf, start, offset, len, eof, data);
+ spin_unlock(&drm_mem_lock);
+ return ret;
+}
+
+void *drm_alloc(size_t size, int area)
+{
+ void *pt;
+
+ if (!size) {
+ DRM_MEM_ERROR(area, "Allocating 0 bytes\n");
+ return NULL;
+ }
+
+ if (!(pt = kmalloc(size, GFP_KERNEL))) {
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[area].fail_count;
+ spin_unlock(&drm_mem_lock);
+ return NULL;
+ }
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[area].succeed_count;
+ drm_mem_stats[area].bytes_allocated += size;
+ spin_unlock(&drm_mem_lock);
+ return pt;
+}
+
+void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
+{
+ void *pt;
+
+ if (!(pt = drm_alloc(size, area))) return NULL;
+ if (oldpt && oldsize) {
+ memcpy(pt, oldpt, oldsize);
+ drm_free(oldpt, oldsize, area);
+ }
+ return pt;
+}
+
+char *drm_strdup(const char *s, int area)
+{
+ char *pt;
+ int length = s ? strlen(s) : 0;
+
+ if (!(pt = drm_alloc(length+1, area))) return NULL;
+ strcpy(pt, s);
+ return pt;
+}
+
+void drm_strfree(const char *s, int area)
+{
+ unsigned int size;
+
+ if (!s) return;
+
+ size = 1 + (s ? strlen(s) : 0);
+ drm_free((void *)s, size, area);
+}
+
+void drm_free(void *pt, size_t size, int area)
+{
+ int alloc_count;
+ int free_count;
+
+ if (!pt) DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
+ else kfree_s(pt, size);
+ spin_lock(&drm_mem_lock);
+ drm_mem_stats[area].bytes_freed += size;
+ free_count = ++drm_mem_stats[area].free_count;
+ alloc_count = drm_mem_stats[area].succeed_count;
+ spin_unlock(&drm_mem_lock);
+ if (free_count > alloc_count) {
+ DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
+ free_count, alloc_count);
+ }
+}
+
+unsigned long drm_alloc_pages(int order, int area)
+{
+ unsigned long address;
+ unsigned long bytes = PAGE_SIZE << order;
+ unsigned long addr;
+ unsigned int sz;
+
+ spin_lock(&drm_mem_lock);
+ if ((drm_ram_used >> PAGE_SHIFT)
+ > (DRM_RAM_PERCENT * drm_ram_available) / 100) {
+ spin_unlock(&drm_mem_lock);
+ return 0;
+ }
+ spin_unlock(&drm_mem_lock);
+
+ address = __get_free_pages(GFP_KERNEL, order);
+ if (!address) {
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[area].fail_count;
+ spin_unlock(&drm_mem_lock);
+ return 0;
+ }
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[area].succeed_count;
+ drm_mem_stats[area].bytes_allocated += bytes;
+ drm_ram_used += bytes;
+ spin_unlock(&drm_mem_lock);
+
+
+ /* Zero outside the lock */
+ memset((void *)address, 0, bytes);
+
+ /* Reserve */
+ for (addr = address, sz = bytes;
+ sz > 0;
+ addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+ mem_map_reserve(MAP_NR(addr));
+ }
+
+ return address;
+}
+
+void drm_free_pages(unsigned long address, int order, int area)
+{
+ unsigned long bytes = PAGE_SIZE << order;
+ int alloc_count;
+ int free_count;
+ unsigned long addr;
+ unsigned int sz;
+
+ if (!address) {
+ DRM_MEM_ERROR(area, "Attempt to free address 0\n");
+ } else {
+ /* Unreserve */
+ for (addr = address, sz = bytes;
+ sz > 0;
+ addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+ mem_map_unreserve(MAP_NR(addr));
+ }
+ free_pages(address, order);
+ }
+
+ spin_lock(&drm_mem_lock);
+ free_count = ++drm_mem_stats[area].free_count;
+ alloc_count = drm_mem_stats[area].succeed_count;
+ drm_mem_stats[area].bytes_freed += bytes;
+ drm_ram_used -= bytes;
+ spin_unlock(&drm_mem_lock);
+ if (free_count > alloc_count) {
+ DRM_MEM_ERROR(area,
+ "Excess frees: %d frees, %d allocs\n",
+ free_count, alloc_count);
+ }
+}
+
+void *drm_ioremap(unsigned long offset, unsigned long size)
+{
+ void *pt;
+
+ if (!size) {
+ DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
+ "Mapping 0 bytes at 0x%08lx\n", offset);
+ return NULL;
+ }
+
+ if (!(pt = ioremap(offset, size))) {
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
+ spin_unlock(&drm_mem_lock);
+ return NULL;
+ }
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
+ drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
+ spin_unlock(&drm_mem_lock);
+ return pt;
+}
+
+void drm_ioremapfree(void *pt, unsigned long size)
+{
+ int alloc_count;
+ int free_count;
+
+ if (!pt)
+ DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
+ "Attempt to free NULL pointer\n");
+ else
+ iounmap(pt);
+
+ spin_lock(&drm_mem_lock);
+ drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size;
+ free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count;
+ alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
+ spin_unlock(&drm_mem_lock);
+ if (free_count > alloc_count) {
+ DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
+ "Excess frees: %d frees, %d allocs\n",
+ free_count, alloc_count);
+ }
+}
diff --git a/linux/mga_bufs.c b/linux/mga_bufs.c
new file mode 100644
index 00000000..34f1112a
--- /dev/null
+++ b/linux/mga_bufs.c
@@ -0,0 +1,636 @@
+/* mga_bufs.c -- IOCTLs to manage buffers -*- linux-c -*-
+ * Created: Thu Jan 6 01:47:26 2000 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
+ * Jeff Hartmann <jhartmann@precisioninsight.com>
+ *
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_bufs.c,v 1.1 2000/02/11 17:26:06 dawes Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include "mga_drv.h"
+#include "mga_dma.h"
+#include "linux/un.h"
+
+
+int mga_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ drm_buf_entry_t *entry;
+ drm_buf_t *buf;
+ unsigned long offset;
+ unsigned long agp_offset;
+ int count;
+ int order;
+ int size;
+ int alignment;
+ int page_order;
+ int total;
+ int byte_count;
+ int i;
+
+ if (!dma) return -EINVAL;
+
+ copy_from_user_ret(&request,
+ (drm_buf_desc_t *)arg,
+ sizeof(request),
+ -EFAULT);
+
+ count = request.count;
+ order = drm_order(request.size);
+ size = 1 << order;
+ agp_offset = request.agp_start;
+ alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) :size;
+ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
+ byte_count = 0;
+
+ DRM_DEBUG("count: %d\n", count);
+ DRM_DEBUG("order: %d\n", order);
+ DRM_DEBUG("size: %d\n", size);
+ DRM_DEBUG("agp_offset: %d\n", agp_offset);
+ DRM_DEBUG("alignment: %d\n", alignment);
+ DRM_DEBUG("page_order: %d\n", page_order);
+ DRM_DEBUG("total: %d\n", total);
+ DRM_DEBUG("byte_count: %d\n", byte_count);
+
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
+ if (dev->queue_count) return -EBUSY; /* Not while in use */
+ spin_lock(&dev->count_lock);
+ if (dev->buf_use) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ atomic_inc(&dev->buf_alloc);
+ spin_unlock(&dev->count_lock);
+
+ down(&dev->struct_sem);
+ entry = &dma->bufs[order];
+ if (entry->buf_count) {
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM; /* May only call once for each order */
+ }
+
+ entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS);
+ if (!entry->buflist) {
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
+ }
+ memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+ entry->buf_size = size;
+ entry->page_order = page_order;
+ offset = 0;
+
+
+ while(entry->buf_count < count) {
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
+
+ DRM_DEBUG("offset : %d\n", offset);
+
+ buf->offset = offset; /* Hrm */
+ buf->bus_address = dev->agp->base + agp_offset + offset;
+ buf->address = (void *)(agp_offset + offset + dev->agp->base);
+ buf->next = NULL;
+ buf->waiting = 0;
+ buf->pending = 0;
+ init_waitqueue_head(&buf->dma_wait);
+ buf->pid = 0;
+
+ buf->dev_private = drm_alloc(sizeof(drm_mga_buf_priv_t), DRM_MEM_BUFS);
+ buf->dev_priv_size = sizeof(drm_mga_buf_priv_t);
+
+#if DRM_DMA_HISTOGRAM
+ buf->time_queued = 0;
+ buf->time_dispatched = 0;
+ buf->time_completed = 0;
+ buf->time_freed = 0;
+#endif
+ offset = offset + alignment;
+ entry->buf_count++;
+ byte_count += PAGE_SIZE << page_order;
+
+ DRM_DEBUG("buffer %d @ %p\n",
+ entry->buf_count, buf->address);
+ }
+
+ dma->buflist = drm_realloc(dma->buflist,
+ dma->buf_count * sizeof(*dma->buflist),
+ (dma->buf_count + entry->buf_count)
+ * sizeof(*dma->buflist),
+ DRM_MEM_BUFS);
+ for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
+ dma->buflist[i] = &entry->buflist[i - dma->buf_count];
+
+ dma->buf_count += entry->buf_count;
+
+ DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+
+ dma->byte_count += byte_count;
+
+ DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+ drm_freelist_create(&entry->freelist, entry->buf_count);
+ for (i = 0; i < entry->buf_count; i++) {
+ drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
+ }
+
+ up(&dev->struct_sem);
+
+ request.count = entry->buf_count;
+ request.size = size;
+
+ copy_to_user_ret((drm_buf_desc_t *)arg,
+ &request,
+ sizeof(request),
+ -EFAULT);
+
+ atomic_dec(&dev->buf_alloc);
+
+ DRM_DEBUG("count: %d\n", count);
+ DRM_DEBUG("order: %d\n", order);
+ DRM_DEBUG("size: %d\n", size);
+ DRM_DEBUG("agp_offset: %d\n", agp_offset);
+ DRM_DEBUG("alignment: %d\n", alignment);
+ DRM_DEBUG("page_order: %d\n", page_order);
+ DRM_DEBUG("total: %d\n", total);
+ DRM_DEBUG("byte_count: %d\n", byte_count);
+
+ dma->flags = _DRM_DMA_USE_AGP;
+
+ DRM_DEBUG("dma->flags : %lx\n", dma->flags);
+
+ return 0;
+}
+
+int mga_addbufs_pci(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ int count;
+ int order;
+ int size;
+ int total;
+ int page_order;
+ drm_buf_entry_t *entry;
+ unsigned long page;
+ drm_buf_t *buf;
+ int alignment;
+ unsigned long offset;
+ int i;
+ int byte_count;
+ int page_count;
+
+ if (!dma) return -EINVAL;
+
+ copy_from_user_ret(&request,
+ (drm_buf_desc_t *)arg,
+ sizeof(request),
+ -EFAULT);
+
+ count = request.count;
+ order = drm_order(request.size);
+ size = 1 << order;
+
+ DRM_DEBUG("count = %d, size = %d (%d), order = %d, queue_count = %d\n",
+ request.count, request.size, size, order, dev->queue_count);
+
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
+ if (dev->queue_count) return -EBUSY; /* Not while in use */
+
+ alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) :size;
+ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
+
+ spin_lock(&dev->count_lock);
+ if (dev->buf_use) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ atomic_inc(&dev->buf_alloc);
+ spin_unlock(&dev->count_lock);
+
+ down(&dev->struct_sem);
+ entry = &dma->bufs[order];
+ if (entry->buf_count) {
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM; /* May only call once for each order */
+ }
+
+ entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS);
+ if (!entry->buflist) {
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
+ }
+ memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+ entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
+ DRM_MEM_SEGS);
+ if (!entry->seglist) {
+ drm_free(entry->buflist,
+ count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS);
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
+ }
+ memset(entry->seglist, 0, count * sizeof(*entry->seglist));
+
+ dma->pagelist = drm_realloc(dma->pagelist,
+ dma->page_count * sizeof(*dma->pagelist),
+ (dma->page_count + (count << page_order))
+ * sizeof(*dma->pagelist),
+ DRM_MEM_PAGES);
+ DRM_DEBUG("pagelist: %d entries\n",
+ dma->page_count + (count << page_order));
+
+
+ entry->buf_size = size;
+ entry->page_order = page_order;
+ byte_count = 0;
+ page_count = 0;
+ while (entry->buf_count < count) {
+ if (!(page = drm_alloc_pages(page_order, DRM_MEM_DMA))) break;
+ entry->seglist[entry->seg_count++] = page;
+ for (i = 0; i < (1 << page_order); i++) {
+ DRM_DEBUG("page %d @ 0x%08lx\n",
+ dma->page_count + page_count,
+ page + PAGE_SIZE * i);
+ dma->pagelist[dma->page_count + page_count++]
+ = page + PAGE_SIZE * i;
+ }
+ for (offset = 0;
+ offset + size <= total && entry->buf_count < count;
+ offset += alignment, ++entry->buf_count) {
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
+ buf->offset = (dma->byte_count + byte_count + offset);
+ buf->address = (void *)(page + offset);
+ buf->next = NULL;
+ buf->waiting = 0;
+ buf->pending = 0;
+ init_waitqueue_head(&buf->dma_wait);
+ buf->pid = 0;
+#if DRM_DMA_HISTOGRAM
+ buf->time_queued = 0;
+ buf->time_dispatched = 0;
+ buf->time_completed = 0;
+ buf->time_freed = 0;
+#endif
+ DRM_DEBUG("buffer %d @ %p\n",
+ entry->buf_count, buf->address);
+ }
+ byte_count += PAGE_SIZE << page_order;
+ }
+
+ dma->buflist = drm_realloc(dma->buflist,
+ dma->buf_count * sizeof(*dma->buflist),
+ (dma->buf_count + entry->buf_count)
+ * sizeof(*dma->buflist),
+ DRM_MEM_BUFS);
+ for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
+ dma->buflist[i] = &entry->buflist[i - dma->buf_count];
+
+ dma->buf_count += entry->buf_count;
+ dma->seg_count += entry->seg_count;
+ dma->page_count += entry->seg_count << page_order;
+ dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
+
+ drm_freelist_create(&entry->freelist, entry->buf_count);
+ for (i = 0; i < entry->buf_count; i++) {
+ drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
+ }
+
+ up(&dev->struct_sem);
+
+ request.count = entry->buf_count;
+ request.size = size;
+
+ copy_to_user_ret((drm_buf_desc_t *)arg,
+ &request,
+ sizeof(request),
+ -EFAULT);
+
+ atomic_dec(&dev->buf_alloc);
+ return 0;
+}
+
+int mga_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_buf_desc_t request;
+
+ copy_from_user_ret(&request,
+ (drm_buf_desc_t *)arg,
+ sizeof(request),
+ -EFAULT);
+
+ if(request.flags & _DRM_AGP_BUFFER)
+ return mga_addbufs_agp(inode, filp, cmd, arg);
+ else
+ return mga_addbufs_pci(inode, filp, cmd, arg);
+}
+
+int mga_infobufs(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_info_t request;
+ int i;
+ int count;
+
+ if (!dma) return -EINVAL;
+
+ spin_lock(&dev->count_lock);
+ if (atomic_read(&dev->buf_alloc)) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ ++dev->buf_use; /* Can't allocate more after this call */
+ spin_unlock(&dev->count_lock);
+
+ copy_from_user_ret(&request,
+ (drm_buf_info_t *)arg,
+ sizeof(request),
+ -EFAULT);
+
+ for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
+ if (dma->bufs[i].buf_count) ++count;
+ }
+
+ DRM_DEBUG("count = %d\n", count);
+
+ if (request.count >= count) {
+ for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
+ if (dma->bufs[i].buf_count) {
+ copy_to_user_ret(&request.list[count].count,
+ &dma->bufs[i].buf_count,
+ sizeof(dma->bufs[0]
+ .buf_count),
+ -EFAULT);
+ copy_to_user_ret(&request.list[count].size,
+ &dma->bufs[i].buf_size,
+ sizeof(dma->bufs[0].buf_size),
+ -EFAULT);
+ copy_to_user_ret(&request.list[count].low_mark,
+ &dma->bufs[i]
+ .freelist.low_mark,
+ sizeof(dma->bufs[0]
+ .freelist.low_mark),
+ -EFAULT);
+ copy_to_user_ret(&request.list[count]
+ .high_mark,
+ &dma->bufs[i]
+ .freelist.high_mark,
+ sizeof(dma->bufs[0]
+ .freelist.high_mark),
+ -EFAULT);
+ DRM_DEBUG("%d %d %d %d %d\n",
+ i,
+ dma->bufs[i].buf_count,
+ dma->bufs[i].buf_size,
+ dma->bufs[i].freelist.low_mark,
+ dma->bufs[i].freelist.high_mark);
+ ++count;
+ }
+ }
+ }
+ request.count = count;
+
+ copy_to_user_ret((drm_buf_info_t *)arg,
+ &request,
+ sizeof(request),
+ -EFAULT);
+
+ return 0;
+}
+
+int mga_markbufs(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ int order;
+ drm_buf_entry_t *entry;
+
+ if (!dma) return -EINVAL;
+
+ copy_from_user_ret(&request,
+ (drm_buf_desc_t *)arg,
+ sizeof(request),
+ -EFAULT);
+
+ DRM_DEBUG("%d, %d, %d\n",
+ request.size, request.low_mark, request.high_mark);
+ order = drm_order(request.size);
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
+ entry = &dma->bufs[order];
+
+ if (request.low_mark < 0 || request.low_mark > entry->buf_count)
+ return -EINVAL;
+ if (request.high_mark < 0 || request.high_mark > entry->buf_count)
+ return -EINVAL;
+
+ entry->freelist.low_mark = request.low_mark;
+ entry->freelist.high_mark = request.high_mark;
+
+ return 0;
+}
+
+int mga_freebufs(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_free_t request;
+ int i;
+ int idx;
+ drm_buf_t *buf;
+
+ if (!dma) return -EINVAL;
+
+ copy_from_user_ret(&request,
+ (drm_buf_free_t *)arg,
+ sizeof(request),
+ -EFAULT);
+
+ DRM_DEBUG("%d\n", request.count);
+ for (i = 0; i < request.count; i++) {
+ copy_from_user_ret(&idx,
+ &request.list[i],
+ sizeof(idx),
+ -EFAULT);
+ if (idx < 0 || idx >= dma->buf_count) {
+ DRM_ERROR("Index %d (of %d max)\n",
+ idx, dma->buf_count - 1);
+ return -EINVAL;
+ }
+ buf = dma->buflist[idx];
+ if (buf->pid != current->pid) {
+ DRM_ERROR("Process %d freeing buffer owned by %d\n",
+ current->pid, buf->pid);
+ return -EINVAL;
+ }
+ drm_free_buffer(dev, buf);
+ }
+
+ return 0;
+}
+
+int mga_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ int retcode = 0;
+ const int zero = 0;
+ unsigned long virtual;
+ unsigned long address;
+ drm_buf_map_t request;
+ int i;
+
+ if (!dma) return -EINVAL;
+
+ DRM_DEBUG("\n");
+
+ spin_lock(&dev->count_lock);
+ if (atomic_read(&dev->buf_alloc)) {
+ spin_unlock(&dev->count_lock);
+ DRM_DEBUG("Buzy\n");
+ return -EBUSY;
+ }
+ ++dev->buf_use; /* Can't allocate more after this call */
+ spin_unlock(&dev->count_lock);
+
+ copy_from_user_ret(&request,
+ (drm_buf_map_t *)arg,
+ sizeof(request),
+ -EFAULT);
+
+ DRM_DEBUG("mga_mapbufs\n");
+ DRM_DEBUG("dma->flags : %lx\n", dma->flags);
+
+ if (request.count >= dma->buf_count) {
+ if(dma->flags & _DRM_DMA_USE_AGP) {
+ drm_mga_private_t *dev_priv = dev->dev_private;
+ drm_map_t *map = NULL;
+
+ map = dev->maplist[dev_priv->buffer_map_idx];
+ if (!map) {
+ DRM_DEBUG("map is null\n");
+ retcode = -EINVAL;
+ goto done;
+ }
+
+ DRM_DEBUG("map->offset : %lx\n", map->offset);
+ DRM_DEBUG("map->size : %lx\n", map->size);
+ DRM_DEBUG("map->type : %d\n", map->type);
+ DRM_DEBUG("map->flags : %x\n", map->flags);
+ DRM_DEBUG("map->handle : %lx\n", map->handle);
+ DRM_DEBUG("map->mtrr : %d\n", map->mtrr);
+
+ virtual = do_mmap(filp, 0, map->size, PROT_READ|PROT_WRITE,
+ MAP_SHARED, (unsigned long)map->offset);
+ } else {
+ virtual = do_mmap(filp, 0, dma->byte_count,
+ PROT_READ|PROT_WRITE, MAP_SHARED, 0);
+ }
+ if (virtual > -1024UL) {
+ /* Real error */
+ DRM_DEBUG("mmap error\n");
+ retcode = (signed long)virtual;
+ goto done;
+ }
+ request.virtual = (void *)virtual;
+
+ for (i = 0; i < dma->buf_count; i++) {
+ if (copy_to_user(&request.list[i].idx,
+ &dma->buflist[i]->idx,
+ sizeof(request.list[0].idx))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ if (copy_to_user(&request.list[i].total,
+ &dma->buflist[i]->total,
+ sizeof(request.list[0].total))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ if (copy_to_user(&request.list[i].used,
+ &zero,
+ sizeof(zero))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ address = virtual + dma->buflist[i]->offset;
+ if (copy_to_user(&request.list[i].address,
+ &address,
+ sizeof(address))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ }
+ }
+ done:
+ request.count = dma->buf_count;
+ DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
+
+ copy_to_user_ret((drm_buf_map_t *)arg,
+ &request,
+ sizeof(request),
+ -EFAULT);
+
+ DRM_DEBUG("retcode : %d\n", retcode);
+
+ return retcode;
+}
diff --git a/linux/mga_context.c b/linux/mga_context.c
new file mode 100644
index 00000000..fb0d336f
--- /dev/null
+++ b/linux/mga_context.c
@@ -0,0 +1,245 @@
+/* mga_context.c -- IOCTLs for mga contexts -*- linux-c -*-
+ * Created: Mon Dec 13 09:51:35 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Rickard E. (Rik) Faith <faith@precisioninsight.com>
+ *
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_context.c,v 1.1 2000/02/11 17:26:06 dawes Exp $
+ *
+ */
+
+#include <linux/sched.h>
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include "mga_drv.h"
+
+static int mga_alloc_queue(drm_device_t *dev)
+{
+ int temp = drm_ctxbitmap_next(dev);
+ printk("mga_alloc_queue: %d\n", temp);
+ return temp;
+}
+
+int mga_context_switch(drm_device_t *dev, int old, int new)
+{
+ char buf[64];
+
+ atomic_inc(&dev->total_ctx);
+
+ if (test_and_set_bit(0, &dev->context_flag)) {
+ DRM_ERROR("Reentering -- FIXME\n");
+ return -EBUSY;
+ }
+
+#if DRM_DMA_HISTOGRAM
+ dev->ctx_start = get_cycles();
+#endif
+
+ printk("Context switch from %d to %d\n", old, new);
+
+ if (new == dev->last_context) {
+ clear_bit(0, &dev->context_flag);
+ return 0;
+ }
+
+ if (drm_flags & DRM_FLAG_NOCTX) {
+ mga_context_switch_complete(dev, new);
+ } else {
+ sprintf(buf, "C %d %d\n", old, new);
+ drm_write_string(dev, buf);
+ }
+
+ return 0;
+}
+
+int mga_context_switch_complete(drm_device_t *dev, int new)
+{
+ dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
+ dev->last_switch = jiffies;
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("Lock isn't held after context switch\n");
+ }
+
+ /* If a context switch is ever initiated
+ when the kernel holds the lock, release
+ that lock here. */
+#if DRM_DMA_HISTOGRAM
+ atomic_inc(&dev->histo.ctx[drm_histogram_slot(get_cycles()
+ - dev->ctx_start)]);
+
+#endif
+ clear_bit(0, &dev->context_flag);
+ wake_up(&dev->context_wait);
+
+ return 0;
+}
+
+int mga_resctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_ctx_res_t res;
+ drm_ctx_t ctx;
+ int i;
+
+ printk("%d\n", DRM_RESERVED_CONTEXTS);
+ copy_from_user_ret(&res, (drm_ctx_res_t *)arg, sizeof(res), -EFAULT);
+ if (res.count >= DRM_RESERVED_CONTEXTS) {
+ memset(&ctx, 0, sizeof(ctx));
+ for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
+ ctx.handle = i;
+ copy_to_user_ret(&res.contexts[i],
+ &i,
+ sizeof(i),
+ -EFAULT);
+ }
+ }
+ res.count = DRM_RESERVED_CONTEXTS;
+ copy_to_user_ret((drm_ctx_res_t *)arg, &res, sizeof(res), -EFAULT);
+ return 0;
+}
+
+int mga_addctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_ctx_t ctx;
+
+ copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
+ if ((ctx.handle = mga_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
+ /* Skip kernel's context and get a new one. */
+ ctx.handle = mga_alloc_queue(dev);
+ }
+ if (ctx.handle == -1) {
+ printk("Not enough free contexts.\n");
+ /* Should this return -EBUSY instead? */
+ return -ENOMEM;
+ }
+ printk("%d\n", ctx.handle);
+ copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT);
+ return 0;
+}
+
+int mga_modctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ /* This does nothing for the mga */
+ return 0;
+}
+
+int mga_getctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_ctx_t ctx;
+
+ copy_from_user_ret(&ctx, (drm_ctx_t*)arg, sizeof(ctx), -EFAULT);
+ /* This is 0, because we don't hanlde any context flags */
+ ctx.flags = 0;
+ copy_to_user_ret((drm_ctx_t*)arg, &ctx, sizeof(ctx), -EFAULT);
+ return 0;
+}
+
+int mga_switchctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_ctx_t ctx;
+
+ copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
+ printk("%d\n", ctx.handle);
+ return mga_context_switch(dev, dev->last_context, ctx.handle);
+}
+
+int mga_newctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_ctx_t ctx;
+
+ copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
+ printk("%d\n", ctx.handle);
+ mga_context_switch_complete(dev, ctx.handle);
+
+ return 0;
+}
+
+int mga_rmctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_ctx_t ctx;
+ drm_queue_t *q;
+ drm_buf_t *buf;
+
+ copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
+ printk("%d\n", ctx.handle);
+ if(ctx.handle == DRM_KERNEL_CONTEXT) {
+ q = dev->queuelist[ctx.handle];
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->use_count) == 1) {
+ /* No longer in use */
+ atomic_dec(&q->use_count);
+ return -EINVAL;
+ }
+ atomic_inc(&q->finalization); /* Mark queue in finalization state */
+ atomic_sub(2, &q->use_count);
+ /* Mark queue as unused (pending finalization) */
+
+ while (test_and_set_bit(0, &dev->interrupt_flag)) {
+ printk("Calling schedule from rmctx\n");
+ schedule();
+ if (signal_pending(current)) {
+ clear_bit(0, &dev->interrupt_flag);
+ return -EINTR;
+ }
+ }
+
+ /* Remove queued buffers */
+ while ((buf = drm_waitlist_get(&q->waitlist))) {
+ drm_free_buffer(dev, buf);
+ }
+ clear_bit(0, &dev->interrupt_flag);
+
+ /* Wakeup blocked processes */
+ wake_up_interruptible(&q->read_queue);
+ wake_up_interruptible(&q->write_queue);
+ wake_up_interruptible(&q->flush_queue);
+
+ /* Finalization over. Queue is made
+ available when both use_count and
+ finalization become 0, which won't
+ happen until all the waiting processes
+ stop waiting. */
+ atomic_dec(&q->finalization);
+ } else {
+ drm_ctxbitmap_free(dev, ctx.handle);
+ }
+
+ return 0;
+}
diff --git a/linux/mga_dma.c b/linux/mga_dma.c
new file mode 100644
index 00000000..2e24e5b4
--- /dev/null
+++ b/linux/mga_dma.c
@@ -0,0 +1,978 @@
+/* mga_dma.c -- DMA support for mga g200/g400 -*- linux-c -*-
+ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
+ * Jeff Hartmann <jhartmann@precisioninsight.com>
+ *
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_dma.c,v 1.1 2000/02/11 17:26:07 dawes Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include "mga_drv.h"
+#include "mgareg_flags.h"
+#include "mga_dma.h"
+#include "mga_state.h"
+
+#include <linux/interrupt.h> /* For task queue support */
+
+#define MGA_REG(reg) 2
+#define MGA_BASE(reg) ((unsigned long) \
+ ((drm_device_t *)dev)->maplist[MGA_REG(reg)]->handle)
+#define MGA_ADDR(reg) (MGA_BASE(reg) + reg)
+#define MGA_DEREF(reg) *(__volatile__ int *)MGA_ADDR(reg)
+#define MGA_READ(reg) MGA_DEREF(reg)
+#define MGA_WRITE(reg,val) do { MGA_DEREF(reg) = val; } while (0)
+
+#define PDEA_pagpxfer_enable 0x2
+#define MGA_SYNC_TAG 0x423f4200
+
+typedef enum {
+ TT_GENERAL,
+ TT_BLIT,
+ TT_VECTOR,
+ TT_VERTEX
+} transferType_t;
+
+
+static void mga_delay(void)
+{
+ return;
+}
+
+int mga_dma_cleanup(drm_device_t *dev)
+{
+ if(dev->dev_private) {
+ drm_mga_private_t *dev_priv =
+ (drm_mga_private_t *) dev->dev_private;
+
+ if(dev_priv->ioremap) {
+ int temp = (dev_priv->warp_ucode_size +
+ dev_priv->primary_size +
+ PAGE_SIZE - 1) / PAGE_SIZE * PAGE_SIZE;
+
+ drm_ioremapfree((void *) dev_priv->ioremap, temp);
+ }
+
+ drm_free(dev->dev_private, sizeof(drm_mga_private_t),
+ DRM_MEM_DRIVER);
+ dev->dev_private = NULL;
+ }
+
+ return 0;
+}
+
+static int mga_alloc_kernel_queue(drm_device_t *dev)
+{
+ drm_queue_t *queue = NULL;
+ /* Allocate a new queue */
+ down(&dev->struct_sem);
+
+ if(dev->queue_count != 0) {
+ /* Reseting the kernel context here is not
+ * a race, since it can only happen when that
+ * queue is empty.
+ */
+ queue = dev->queuelist[DRM_KERNEL_CONTEXT];
+ printk("Kernel queue already allocated\n");
+ } else {
+ queue = drm_alloc(sizeof(*queue), DRM_MEM_QUEUES);
+ if(!queue) {
+ up(&dev->struct_sem);
+ printk("out of memory\n");
+ return -ENOMEM;
+ }
+ ++dev->queue_count;
+ dev->queuelist = drm_alloc(sizeof(*dev->queuelist),
+ DRM_MEM_QUEUES);
+ if(!dev->queuelist) {
+ up(&dev->struct_sem);
+ drm_free(queue, sizeof(*queue), DRM_MEM_QUEUES);
+ printk("out of memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ memset(queue, 0, sizeof(*queue));
+ atomic_set(&queue->use_count, 1);
+ atomic_set(&queue->finalization, 0);
+ atomic_set(&queue->block_count, 0);
+ atomic_set(&queue->block_read, 0);
+ atomic_set(&queue->block_write, 0);
+ atomic_set(&queue->total_queued, 0);
+ atomic_set(&queue->total_flushed, 0);
+ atomic_set(&queue->total_locks, 0);
+
+ init_waitqueue_head(&queue->write_queue);
+ init_waitqueue_head(&queue->read_queue);
+ init_waitqueue_head(&queue->flush_queue);
+
+ queue->flags = 0;
+
+ drm_waitlist_create(&queue->waitlist, dev->dma->buf_count);
+
+ dev->queue_slots = 1;
+ dev->queuelist[DRM_KERNEL_CONTEXT] = queue;
+ dev->queue_count--;
+
+ up(&dev->struct_sem);
+ printk("%d (new)\n", dev->queue_count - 1);
+ return DRM_KERNEL_CONTEXT;
+}
+
+static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) {
+ drm_mga_private_t *dev_priv;
+ drm_map_t *prim_map = NULL;
+ drm_map_t *sarea_map = NULL;
+ int temp;
+
+
+ dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
+ if(dev_priv == NULL) return -ENOMEM;
+ dev->dev_private = (void *) dev_priv;
+
+ printk("dev_private\n");
+
+ memset(dev_priv, 0, sizeof(drm_mga_private_t));
+ atomic_set(&dev_priv->pending_bufs, 0);
+
+ if((init->reserved_map_idx >= dev->map_count) ||
+ (init->buffer_map_idx >= dev->map_count)) {
+ mga_dma_cleanup(dev);
+ printk("reserved_map or buffer_map are invalid\n");
+ return -EINVAL;
+ }
+
+ if(mga_alloc_kernel_queue(dev) != DRM_KERNEL_CONTEXT) {
+ mga_dma_cleanup(dev);
+ DRM_ERROR("Kernel context queue not present\n");
+ }
+
+ dev_priv->reserved_map_idx = init->reserved_map_idx;
+ dev_priv->buffer_map_idx = init->buffer_map_idx;
+ sarea_map = dev->maplist[0];
+ dev_priv->sarea_priv = (drm_mga_sarea_t *)
+ ((u8 *)sarea_map->handle +
+ init->sarea_priv_offset);
+ printk("sarea_priv\n");
+
+ /* Scale primary size to the next page */
+ dev_priv->primary_size = ((init->primary_size + PAGE_SIZE - 1) /
+ PAGE_SIZE) * PAGE_SIZE;
+ dev_priv->warp_ucode_size = init->warp_ucode_size;
+ dev_priv->chipset = init->chipset;
+ dev_priv->fbOffset = init->fbOffset;
+ dev_priv->backOffset = init->backOffset;
+ dev_priv->depthOffset = init->depthOffset;
+ dev_priv->textureOffset = init->textureOffset;
+ dev_priv->textureSize = init->textureSize;
+ dev_priv->cpp = init->cpp;
+ dev_priv->sgram = init->sgram;
+ dev_priv->stride = init->stride;
+
+ dev_priv->frontOrg = init->frontOrg;
+ dev_priv->backOrg = init->backOrg;
+ dev_priv->depthOrg = init->depthOrg;
+ dev_priv->mAccess = init->mAccess;
+
+
+ printk("memcpy\n");
+ memcpy(&dev_priv->WarpIndex, &init->WarpIndex,
+ sizeof(mgaWarpIndex) * MGA_MAX_WARP_PIPES);
+ printk("memcpy done\n");
+ prim_map = dev->maplist[init->reserved_map_idx];
+ dev_priv->prim_phys_head = dev->agp->base + init->reserved_map_agpstart;
+ temp = init->warp_ucode_size + dev_priv->primary_size;
+ temp = ((temp + PAGE_SIZE - 1) /
+ PAGE_SIZE) * PAGE_SIZE;
+ printk("temp : %x\n", temp);
+ printk("dev->agp->base: %lx\n", dev->agp->base);
+ printk("init->reserved_map_agpstart: %x\n", init->reserved_map_agpstart);
+
+
+ dev_priv->ioremap = drm_ioremap(dev->agp->base + init->reserved_map_agpstart,
+ temp);
+ if(dev_priv->ioremap == NULL) {
+ printk("Ioremap failed\n");
+ mga_dma_cleanup(dev);
+ return -ENOMEM;
+ }
+
+
+
+ dev_priv->prim_head = (u32 *)dev_priv->ioremap;
+ printk("dev_priv->prim_head : %p\n", dev_priv->prim_head);
+ dev_priv->current_dma_ptr = dev_priv->prim_head;
+ dev_priv->prim_num_dwords = 0;
+ dev_priv->prim_max_dwords = dev_priv->primary_size / 4;
+
+ printk("dma initialization\n");
+
+ /* Private is now filled in, initialize the hardware */
+ {
+ PRIMLOCALS;
+ PRIMRESET( dev_priv );
+ PRIMGETPTR( dev_priv );
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DWGSYNC, 0);
+ PRIMOUTREG(MGAREG_SOFTRAP, 0);
+ PRIMADVANCE( dev_priv );
+
+ /* Poll for the first buffer to insure that
+ * the status register will be correct
+ */
+ printk("phys_head : %lx\n", phys_head);
+
+ MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
+
+ while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) {
+ int i;
+ for(i = 0 ; i < 4096; i++) mga_delay();
+ }
+
+ MGA_WRITE(MGAREG_PRIMADDRESS, phys_head | TT_GENERAL);
+
+ MGA_WRITE(MGAREG_PRIMEND, ((phys_head + num_dwords * 4) |
+ PDEA_pagpxfer_enable));
+
+ while(MGA_READ(MGAREG_DWGSYNC) == MGA_SYNC_TAG) {
+ int i;
+ for(i = 0; i < 4096; i++) mga_delay();
+ }
+
+ }
+
+ printk("dma init was successful\n");
+ return 0;
+}
+
+int mga_dma_init(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_mga_init_t init;
+
+ copy_from_user_ret(&init, (drm_mga_init_t *)arg, sizeof(init), -EFAULT);
+
+ switch(init.func) {
+ case MGA_INIT_DMA:
+ return mga_dma_initialize(dev, &init);
+ case MGA_CLEANUP_DMA:
+ return mga_dma_cleanup(dev);
+ }
+
+ return -EINVAL;
+}
+
+#define MGA_ILOAD_CMD (DC_opcod_iload | DC_atype_rpl | \
+ DC_linear_linear | DC_bltmod_bfcol | \
+ (0xC << DC_bop_SHIFT) | DC_sgnzero_enable | \
+ DC_shftzero_enable | DC_clipdis_enable)
+
+static void __mga_iload_small(drm_device_t *dev,
+ drm_buf_t *buf,
+ int use_agp)
+{
+ drm_mga_private_t *dev_priv = dev->dev_private;
+ drm_mga_buf_priv_t *buf_priv = buf->dev_private;
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned long address = (unsigned long)buf->bus_address;
+ int length = buf->used;
+ int y1 = buf_priv->boxes[0].y1;
+ int x1 = buf_priv->boxes[0].x1;
+ int y2 = buf_priv->boxes[0].y2;
+ int x2 = buf_priv->boxes[0].x2;
+ int dstorg = buf_priv->ContextState[MGA_CTXREG_DSTORG];
+ int maccess = buf_priv->ContextState[MGA_CTXREG_MACCESS];
+ PRIMLOCALS;
+
+ PRIMRESET(dev_priv);
+ PRIMGETPTR(dev_priv);
+
+ PRIMOUTREG(MGAREG_DSTORG, dstorg | use_agp);
+ PRIMOUTREG(MGAREG_MACCESS, maccess);
+ PRIMOUTREG(MGAREG_PITCH, (1 << 15));
+ PRIMOUTREG(MGAREG_YDST, y1 * (x2 - x1));
+ PRIMOUTREG(MGAREG_LEN, 1);
+ PRIMOUTREG(MGAREG_FXBNDRY, ((x2 - x1) * (y2 - y1) - 1) << 16);
+ PRIMOUTREG(MGAREG_AR0, (x2 - x1) * (y2 - y1) - 1);
+ PRIMOUTREG(MGAREG_AR3, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DWGCTL+MGAREG_MGA_EXEC, MGA_ILOAD_CMD);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_SECADDRESS, address | TT_BLIT);
+ PRIMOUTREG(MGAREG_SECEND, (address + length) | use_agp);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DWGSYNC, 0);
+ PRIMOUTREG(MGAREG_SOFTRAP, 0);
+ PRIMADVANCE(dev_priv);
+#if 0
+ /* For now we need to set this in the ioctl */
+ sarea_priv->dirty |= MGASAREA_NEW_CONTEXT;
+#endif
+ MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
+ while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) ;
+
+ MGA_WRITE(MGAREG_PRIMADDRESS, dev_priv->prim_phys_head | TT_GENERAL);
+ MGA_WRITE(MGAREG_PRIMEND, (phys_head + num_dwords * 4) | use_agp);
+}
+
+static void __mga_iload_xy(drm_device_t *dev,
+ drm_buf_t *buf,
+ int use_agp)
+{
+ drm_mga_private_t *dev_priv = dev->dev_private;
+ drm_mga_buf_priv_t *buf_priv = buf->dev_private;
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned long address = (unsigned long)buf->bus_address;
+ int length = buf->used;
+ int y1 = buf_priv->boxes[0].y1;
+ int x1 = buf_priv->boxes[0].x1;
+ int y2 = buf_priv->boxes[0].y2;
+ int x2 = buf_priv->boxes[0].x2;
+ int dstorg = buf_priv->ContextState[MGA_CTXREG_DSTORG];
+ int maccess = buf_priv->ContextState[MGA_CTXREG_MACCESS];
+ int pitch = buf_priv->ServerState[MGA_2DREG_PITCH];
+ int width, height;
+ int texperdword = 0;
+ PRIMLOCALS;
+
+ width = (x2 - x1);
+ height = (y2 - y1);
+ switch((maccess & 0x00000003)) {
+ case 0:
+ texperdword = 4;
+ break;
+ case 1:
+ texperdword = 2;
+ break;
+ case 2:
+ texperdword = 1;
+ break;
+ default:
+ DRM_ERROR("Invalid maccess value passed to __mga_iload_xy\n");
+ return;
+ }
+
+ x2 = x1 + width;
+ x2 = (x2 + (texperdword - 1)) & ~(texperdword - 1);
+ x1 = (x1 + (texperdword - 1)) & ~(texperdword - 1);
+ width = x2 - x1;
+
+ PRIMRESET(dev_priv);
+ PRIMGETPTR(dev_priv);
+ PRIMOUTREG(MGAREG_DSTORG, dstorg | use_agp);
+ PRIMOUTREG(MGAREG_MACCESS, maccess);
+ PRIMOUTREG(MGAREG_PITCH, pitch);
+ PRIMOUTREG(MGAREG_YDSTLEN, (y1 << 16) | height);
+
+ PRIMOUTREG(MGAREG_FXBNDRY, ((x1+width-1) << 16) | x1);
+ PRIMOUTREG(MGAREG_AR0, width * height - 1);
+ PRIMOUTREG(MGAREG_AR3, 0 );
+ PRIMOUTREG(MGAREG_DWGCTL+MGAREG_MGA_EXEC, MGA_ILOAD_CMD);
+
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_SECADDRESS, address | TT_BLIT);
+ PRIMOUTREG(MGAREG_SECEND, (address + length) | use_agp);
+
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DWGSYNC, 0);
+ PRIMOUTREG(MGAREG_SOFTRAP, 0);
+ PRIMADVANCE(dev_priv);
+#if 0
+ /* For now we need to set this in the ioctl */
+ sarea_priv->dirty |= MGASAREA_NEW_CONTEXT;
+#endif
+ MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
+ while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) ;
+
+ MGA_WRITE(MGAREG_PRIMADDRESS, dev_priv->prim_phys_head | TT_GENERAL);
+ MGA_WRITE(MGAREG_PRIMEND, (phys_head + num_dwords * 4) | use_agp);
+}
+
+static void mga_dma_dispatch_iload(drm_device_t *dev, drm_buf_t *buf)
+{
+ drm_mga_buf_priv_t *buf_priv = buf->dev_private;
+
+ int use_agp = PDEA_pagpxfer_enable;
+ int x1 = buf_priv->boxes[0].x1;
+ int x2 = buf_priv->boxes[0].x2;
+
+ if((x2 - x1) < 32) {
+ printk("using iload small\n");
+ __mga_iload_small(dev, buf, use_agp);
+ } else {
+ printk("using iload xy\n");
+ __mga_iload_xy(dev, buf, use_agp);
+ }
+}
+
+static void mga_dma_dispatch_vertex(drm_device_t *dev, drm_buf_t *buf)
+{
+ drm_mga_private_t *dev_priv = dev->dev_private;
+ drm_mga_buf_priv_t *buf_priv = buf->dev_private;
+ unsigned long address = (unsigned long)buf->bus_address;
+ int length = buf->used;
+ int use_agp = PDEA_pagpxfer_enable;
+ int i, count;
+ PRIMLOCALS;
+
+ PRIMRESET(dev_priv);
+
+ count = buf_priv->nbox;
+ if (count == 0)
+ count = 1;
+
+ mgaEmitState( dev_priv, buf_priv );
+
+ for (i = 0 ; i < count ; i++) {
+ if (i < buf_priv->nbox)
+ mgaEmitClipRect( dev_priv, &buf_priv->boxes[i] );
+
+ PRIMGETPTR(dev_priv);
+ PRIMOUTREG( MGAREG_DMAPAD, 0);
+ PRIMOUTREG( MGAREG_DMAPAD, 0);
+ PRIMOUTREG( MGAREG_SECADDRESS, address | TT_VERTEX);
+ PRIMOUTREG( MGAREG_SECEND, (address + length) | use_agp);
+
+ PRIMOUTREG( MGAREG_DMAPAD, 0);
+ PRIMOUTREG( MGAREG_DMAPAD, 0);
+ PRIMOUTREG( MGAREG_DWGSYNC, 0);
+ PRIMOUTREG( MGAREG_SOFTRAP, 0);
+ PRIMADVANCE(dev_priv);
+ }
+
+ PRIMGETPTR( dev_priv );
+
+ MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
+ while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) ;
+
+ MGA_WRITE(MGAREG_PRIMADDRESS, dev_priv->prim_phys_head | TT_GENERAL);
+ MGA_WRITE(MGAREG_PRIMEND, (phys_head + num_dwords * 4) | use_agp);
+}
+
+
+/* Used internally for the small buffers generated from client state
+ * information.
+ */
+static void mga_dma_dispatch_general(drm_device_t *dev, drm_buf_t *buf)
+{
+ drm_mga_private_t *dev_priv = dev->dev_private;
+ unsigned long address = (unsigned long)buf->bus_address;
+ int length = buf->used;
+ int use_agp = PDEA_pagpxfer_enable;
+ PRIMLOCALS;
+
+ PRIMRESET(dev_priv);
+ PRIMGETPTR(dev_priv);
+
+ PRIMOUTREG( MGAREG_DMAPAD, 0);
+ PRIMOUTREG( MGAREG_DMAPAD, 0);
+ PRIMOUTREG( MGAREG_SECADDRESS, address | TT_GENERAL);
+ PRIMOUTREG( MGAREG_SECEND, (address + length) | use_agp);
+
+ PRIMOUTREG( MGAREG_DMAPAD, 0);
+ PRIMOUTREG( MGAREG_DMAPAD, 0);
+ PRIMOUTREG( MGAREG_DWGSYNC, 0);
+ PRIMOUTREG( MGAREG_SOFTRAP, 0);
+ PRIMADVANCE(dev_priv);
+
+ MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
+ while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) ;
+
+ MGA_WRITE(MGAREG_PRIMADDRESS, dev_priv->prim_phys_head | TT_GENERAL);
+ MGA_WRITE(MGAREG_PRIMEND, (phys_head + num_dwords * 4) | use_agp);
+}
+
+/* Frees dispatch lock */
+static inline void mga_dma_quiescent(drm_device_t *dev)
+{
+ drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
+
+ while(1) {
+ atomic_inc(&dev_priv->dispatch_lock);
+ if(atomic_read(&dev_priv->dispatch_lock) == 1) {
+ break;
+ } else {
+ atomic_dec(&dev_priv->dispatch_lock);
+ }
+ }
+ while((MGA_READ(MGAREG_STATUS) & 0x00020001) != 0x00020000) ;
+#if 0
+ MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
+#endif
+ while(MGA_READ(MGAREG_DWGSYNC) == MGA_SYNC_TAG) ;
+ MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
+ while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) ;
+ atomic_dec(&dev_priv->dispatch_lock);
+}
+
+/* Keeps dispatch lock held */
+
+static inline int mga_dma_is_ready(drm_device_t *dev)
+{
+ drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
+
+ atomic_inc(&dev_priv->dispatch_lock);
+ if(atomic_read(&dev_priv->dispatch_lock) == 1) {
+ /* We got the lock */
+ return 1;
+ } else {
+ atomic_dec(&dev_priv->dispatch_lock);
+ return 0;
+ }
+}
+
+static inline int mga_dma_is_ready_no_hold(drm_device_t *dev)
+{
+ drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
+
+ atomic_inc(&dev_priv->dispatch_lock);
+ if(atomic_read(&dev_priv->dispatch_lock) == 1) {
+ /* We got the lock, but free it */
+ atomic_dec(&dev_priv->dispatch_lock);
+ return 1;
+ } else {
+ atomic_dec(&dev_priv->dispatch_lock);
+ return 0;
+ }
+}
+
+static void mga_dma_service(int irq, void *device, struct pt_regs *regs)
+{
+ drm_device_t *dev = (drm_device_t *)device;
+ drm_device_dma_t *dma = dev->dma;
+ drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
+
+ atomic_dec(&dev_priv->dispatch_lock);
+ atomic_inc(&dev->total_irq);
+ MGA_WRITE(MGAREG_ICLEAR, 0xfa7);
+
+ /* Free previous buffer */
+ if (test_and_set_bit(0, &dev->dma_flag)) {
+ atomic_inc(&dma->total_missed_free);
+ return;
+ }
+ if (dma->this_buffer) {
+ drm_free_buffer(dev, dma->this_buffer);
+ dma->this_buffer = NULL;
+ }
+ clear_bit(0, &dev->dma_flag);
+
+ /* Dispatch new buffer */
+ queue_task(&dev->tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+
+}
+
+/* Only called by mga_dma_schedule. */
+static int mga_do_dma(drm_device_t *dev, int locked)
+{
+ drm_buf_t *buf;
+ int retcode = 0;
+ drm_device_dma_t *dma = dev->dma;
+ drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
+ drm_mga_buf_priv_t *buf_priv;
+
+ printk("mga_do_dma\n");
+ if (test_and_set_bit(0, &dev->dma_flag)) {
+ atomic_inc(&dma->total_missed_dma);
+ return -EBUSY;
+ }
+
+ if (!dma->next_buffer) {
+ DRM_ERROR("No next_buffer\n");
+ clear_bit(0, &dev->dma_flag);
+ return -EINVAL;
+ }
+
+ buf = dma->next_buffer;
+
+ printk("context %d, buffer %d\n", buf->context, buf->idx);
+
+ if (buf->list == DRM_LIST_RECLAIM) {
+ drm_clear_next_buffer(dev);
+ drm_free_buffer(dev, buf);
+ clear_bit(0, &dev->dma_flag);
+ return -EINVAL;
+ }
+
+ if (!buf->used) {
+ DRM_ERROR("0 length buffer\n");
+ drm_clear_next_buffer(dev);
+ drm_free_buffer(dev, buf);
+ clear_bit(0, &dev->dma_flag);
+ return 0;
+ }
+
+ if (mga_dma_is_ready(dev) == 0) {
+ clear_bit(0, &dev->dma_flag);
+ return -EBUSY;
+ }
+
+ /* Always hold the hardware lock while dispatching.
+ */
+ if (!locked && !drm_lock_take(&dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ atomic_inc(&dma->total_missed_lock);
+ clear_bit(0, &dev->dma_flag);
+ atomic_dec(&dev_priv->dispatch_lock);
+ return -EBUSY;
+ }
+
+ dma->next_queue = dev->queuelist[DRM_KERNEL_CONTEXT];
+ drm_clear_next_buffer(dev);
+ buf->pending = 1;
+ buf->waiting = 0;
+ buf->list = DRM_LIST_PEND;
+
+ buf_priv = buf->dev_private;
+
+ printk("dispatch!\n");
+ switch (buf_priv->dma_type) {
+ case MGA_DMA_GENERAL:
+ mga_dma_dispatch_general(dev, buf);
+ break;
+ case MGA_DMA_VERTEX:
+ mga_dma_dispatch_vertex(dev, buf);
+ break;
+/* case MGA_DMA_SETUP: */
+/* mga_dma_dispatch_setup(dev, address, length); */
+/* break; */
+ case MGA_DMA_ILOAD:
+ mga_dma_dispatch_iload(dev, buf);
+ break;
+ default:
+ printk("bad buffer type %x in dispatch\n", buf_priv->dma_type);
+ break;
+ }
+ atomic_dec(&dev_priv->pending_bufs);
+
+ drm_free_buffer(dev, dma->this_buffer);
+ dma->this_buffer = buf;
+
+ atomic_add(buf->used, &dma->total_bytes);
+ atomic_inc(&dma->total_dmas);
+
+ if (!locked) {
+ if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ DRM_ERROR("\n");
+ }
+ }
+
+ clear_bit(0, &dev->dma_flag);
+
+ if(!atomic_read(&dev_priv->pending_bufs)) {
+ wake_up_interruptible(&dev->queuelist[DRM_KERNEL_CONTEXT]->flush_queue);
+ }
+
+#if 0
+ wake_up_interruptible(&dev->lock.lock_queue);
+#endif
+
+ /* We hold the dispatch lock until the interrupt handler
+ * frees it
+ */
+ return retcode;
+}
+
+static void mga_dma_schedule_timer_wrapper(unsigned long dev)
+{
+ mga_dma_schedule((drm_device_t *)dev, 0);
+}
+
+static void mga_dma_schedule_tq_wrapper(void *dev)
+{
+ mga_dma_schedule(dev, 0);
+}
+
+int mga_dma_schedule(drm_device_t *dev, int locked)
+{
+ drm_queue_t *q;
+ drm_buf_t *buf;
+ int retcode = 0;
+ int processed = 0;
+ int missed;
+ int expire = 20;
+ drm_device_dma_t *dma = dev->dma;
+
+ printk("mga_dma_schedule\n");
+
+ if (test_and_set_bit(0, &dev->interrupt_flag)) {
+ /* Not reentrant */
+ atomic_inc(&dma->total_missed_sched);
+ return -EBUSY;
+ }
+ missed = atomic_read(&dma->total_missed_sched);
+
+again:
+ /* There is only one queue:
+ */
+ if (!dma->next_buffer && DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
+ q = dev->queuelist[DRM_KERNEL_CONTEXT];
+ buf = drm_waitlist_get(&q->waitlist);
+ dma->next_buffer = buf;
+ dma->next_queue = q;
+ if (buf && buf->list == DRM_LIST_RECLAIM) {
+ drm_clear_next_buffer(dev);
+ drm_free_buffer(dev, buf);
+ }
+ }
+
+ if (dma->next_buffer) {
+ if (!(retcode = mga_do_dma(dev, locked)))
+ ++processed;
+ }
+
+ /* Try again if we succesfully dispatched a buffer, or if someone
+ * tried to schedule while we were working.
+ */
+ if (--expire) {
+ if (missed != atomic_read(&dma->total_missed_sched)) {
+ atomic_inc(&dma->total_lost);
+ if (mga_dma_is_ready_no_hold(dev))
+ goto again;
+ }
+
+ if (processed && mga_dma_is_ready_no_hold(dev)) {
+ atomic_inc(&dma->total_lost);
+ processed = 0;
+ goto again;
+ }
+ }
+
+ clear_bit(0, &dev->interrupt_flag);
+
+ return retcode;
+}
+
+int mga_irq_install(drm_device_t *dev, int irq)
+{
+ int retcode;
+
+ if (!irq) return -EINVAL;
+
+ down(&dev->struct_sem);
+ if (dev->irq) {
+ up(&dev->struct_sem);
+ return -EBUSY;
+ }
+ dev->irq = irq;
+ up(&dev->struct_sem);
+
+ printk("install irq handler %d\n", irq);
+
+ dev->context_flag = 0;
+ dev->interrupt_flag = 0;
+ dev->dma_flag = 0;
+ dev->dma->next_buffer = NULL;
+ dev->dma->next_queue = NULL;
+ dev->dma->this_buffer = NULL;
+ dev->tq.next = NULL;
+ dev->tq.sync = 0;
+ dev->tq.routine = mga_dma_schedule_tq_wrapper;
+ dev->tq.data = dev;
+
+ /* Before installing handler */
+ MGA_WRITE(MGAREG_ICLEAR, 0xfa7);
+ MGA_WRITE(MGAREG_IEN, 0);
+
+ /* Install handler */
+ if ((retcode = request_irq(dev->irq,
+ mga_dma_service,
+ 0,
+ dev->devname,
+ dev))) {
+ down(&dev->struct_sem);
+ dev->irq = 0;
+ up(&dev->struct_sem);
+ return retcode;
+ }
+
+ /* After installing handler */
+ MGA_WRITE(MGAREG_ICLEAR, 0xfa7);
+ MGA_WRITE(MGAREG_IEN, 0x00000001);
+
+ return 0;
+}
+
+int mga_irq_uninstall(drm_device_t *dev)
+{
+ int irq;
+
+ down(&dev->struct_sem);
+ irq = dev->irq;
+ dev->irq = 0;
+ up(&dev->struct_sem);
+
+ if (!irq) return -EINVAL;
+
+ printk("remove irq handler %d\n", irq);
+
+ MGA_WRITE(MGAREG_ICLEAR, 0xfa7);
+ MGA_WRITE(MGAREG_IEN, 0);
+ MGA_WRITE(MGAREG_ICLEAR, 0xfa7);
+
+ free_irq(irq, dev);
+
+ return 0;
+}
+
+
+int mga_control(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_control_t ctl;
+
+ copy_from_user_ret(&ctl, (drm_control_t *)arg, sizeof(ctl), -EFAULT);
+
+ switch (ctl.func) {
+ case DRM_INST_HANDLER:
+ return mga_irq_install(dev, ctl.irq);
+ case DRM_UNINST_HANDLER:
+ return mga_irq_uninstall(dev);
+ default:
+ return -EINVAL;
+ }
+}
+
+int mga_flush_queue(drm_device_t *dev)
+{
+ DECLARE_WAITQUEUE(entry, current);
+ drm_queue_t *q = dev->queuelist[DRM_KERNEL_CONTEXT];
+ drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
+ int ret = 0;
+
+ printk("mga_flush_queue\n");
+ if(atomic_read(&dev_priv->pending_bufs) != 0) {
+ current->state = TASK_INTERRUPTIBLE;
+ add_wait_queue(&q->flush_queue, &entry);
+ for (;;) {
+ if (!atomic_read(&dev_priv->pending_bufs)) break;
+ printk("Calling schedule from flush_queue : %d\n",
+ atomic_read(&dev_priv->pending_bufs));
+ mga_dma_schedule(dev, 1);
+ schedule();
+ if (signal_pending(current)) {
+ ret = -EINTR; /* Can't restart */
+ break;
+ }
+ }
+ printk("Exited out of schedule from flush_queue\n");
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&q->flush_queue, &entry);
+ }
+
+ return ret;
+}
+
+int mga_lock(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ DECLARE_WAITQUEUE(entry, current);
+ int ret = 0;
+ drm_lock_t lock;
+
+ copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
+
+ if (lock.context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ current->pid, lock.context);
+ return -EINVAL;
+ }
+
+ printk("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
+ lock.context, current->pid, dev->lock.hw_lock->lock,
+ lock.flags);
+
+
+ if (lock.context < 0) {
+ return -EINVAL;
+ }
+
+ /* Only one queue:
+ */
+
+ if (!ret) {
+ add_wait_queue(&dev->lock.lock_queue, &entry);
+ for (;;) {
+ if (!dev->lock.hw_lock) {
+ /* Device has been unregistered */
+ ret = -EINTR;
+ break;
+ }
+ if (drm_lock_take(&dev->lock.hw_lock->lock,
+ lock.context)) {
+ dev->lock.pid = current->pid;
+ dev->lock.lock_time = jiffies;
+ atomic_inc(&dev->total_locks);
+ break; /* Got lock */
+ }
+
+ /* Contention */
+ atomic_inc(&dev->total_sleeps);
+ current->state = TASK_INTERRUPTIBLE;
+ current->policy |= SCHED_YIELD;
+ printk("Calling lock schedule\n");
+ schedule();
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&dev->lock.lock_queue, &entry);
+ }
+
+ if (!ret) {
+ if (lock.flags & _DRM_LOCK_QUIESCENT) {
+ printk("_DRM_LOCK_QUIESCENT\n");
+ ret = mga_flush_queue(dev);
+ if(ret != 0) {
+ drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ lock.context);
+ } else {
+ mga_dma_quiescent(dev);
+ }
+ }
+ }
+ printk("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
+ return ret;
+}
+
diff --git a/linux/mga_drv.c b/linux/mga_drv.c
new file mode 100644
index 00000000..8bc25617
--- /dev/null
+++ b/linux/mga_drv.c
@@ -0,0 +1,576 @@
+/* mga_drv.c -- Matrox g200/g400 driver -*- linux-c -*-
+ * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
+ * Jeff Hartmann <jhartmann@precisioninsight.com>
+ *
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_drv.c,v 1.1 2000/02/11 17:26:07 dawes Exp $
+ *
+ */
+
+#define EXPORT_SYMTAB
+#include "drmP.h"
+#include "mga_drv.h"
+EXPORT_SYMBOL(mga_init);
+EXPORT_SYMBOL(mga_cleanup);
+
+#define MGA_NAME "mga"
+#define MGA_DESC "Matrox g200/g400"
+#define MGA_DATE "19991213"
+#define MGA_MAJOR 0
+#define MGA_MINOR 0
+#define MGA_PATCHLEVEL 1
+
+static drm_device_t mga_device;
+drm_ctx_t mga_res_ctx;
+
+static struct file_operations mga_fops = {
+ open: mga_open,
+ flush: drm_flush,
+ release: mga_release,
+ ioctl: mga_ioctl,
+ mmap: drm_mmap,
+ read: drm_read,
+ fasync: drm_fasync,
+};
+
+static struct miscdevice mga_misc = {
+ minor: MISC_DYNAMIC_MINOR,
+ name: MGA_NAME,
+ fops: &mga_fops,
+};
+
+static drm_ioctl_desc_t mga_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { mga_version, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { mga_control, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { mga_addbufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { mga_markbufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { mga_infobufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { mga_mapbufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { mga_freebufs, 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { mga_addctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { mga_rmctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { mga_modctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { mga_getctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { mga_switchctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { mga_newctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { mga_resctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { mga_dma, 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { mga_lock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { mga_unlock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MGA_INIT)] = { mga_dma_init, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MGA_SWAP)] = { mga_clear_bufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MGA_CLEAR)] = { mga_swap_bufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MGA_ILOAD)] = { mga_iload, 1, 1 },
+};
+
+#define MGA_IOCTL_COUNT DRM_ARRAY_SIZE(mga_ioctls)
+
+#ifdef MODULE
+int init_module(void);
+void cleanup_module(void);
+static char *mga = NULL;
+
+MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas.");
+MODULE_DESCRIPTION("Matrox g200/g400");
+MODULE_PARM(mga, "s");
+
+/* init_module is called when insmod is used to load the module */
+
+int init_module(void)
+{
+ DRM_DEBUG("doing mga_init()\n");
+ return mga_init();
+}
+
+/* cleanup_module is called when rmmod is used to unload the module */
+
+void cleanup_module(void)
+{
+ mga_cleanup();
+}
+#endif
+
+#ifndef MODULE
+/* mga_setup is called by the kernel to parse command-line options passed
+ * via the boot-loader (e.g., LILO). It calls the insmod option routine,
+ * drm_parse_drm.
+ *
+ * This is not currently supported, since it requires changes to
+ * linux/init/main.c. */
+
+
+void __init mga_setup(char *str, int *ints)
+{
+ if (ints[0] != 0) {
+ DRM_ERROR("Illegal command line format, ignored\n");
+ return;
+ }
+ drm_parse_options(str);
+}
+#endif
+
+static int mga_setup(drm_device_t *dev)
+{
+ int i;
+
+ atomic_set(&dev->ioctl_count, 0);
+ atomic_set(&dev->vma_count, 0);
+ dev->buf_use = 0;
+ atomic_set(&dev->buf_alloc, 0);
+
+ drm_dma_setup(dev);
+
+ atomic_set(&dev->total_open, 0);
+ atomic_set(&dev->total_close, 0);
+ atomic_set(&dev->total_ioctl, 0);
+ atomic_set(&dev->total_irq, 0);
+ atomic_set(&dev->total_ctx, 0);
+ atomic_set(&dev->total_locks, 0);
+ atomic_set(&dev->total_unlocks, 0);
+ atomic_set(&dev->total_contends, 0);
+ atomic_set(&dev->total_sleeps, 0);
+
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ dev->magiclist[i].head = NULL;
+ dev->magiclist[i].tail = NULL;
+ }
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ dev->vmalist = NULL;
+ dev->lock.hw_lock = NULL;
+ init_waitqueue_head(&dev->lock.lock_queue);
+ dev->queue_count = 0;
+ dev->queue_reserved = 0;
+ dev->queue_slots = 0;
+ dev->queuelist = NULL;
+ dev->irq = 0;
+ dev->context_flag = 0;
+ dev->interrupt_flag = 0;
+ dev->dma_flag = 0;
+ dev->last_context = 0;
+ dev->last_switch = 0;
+ dev->last_checked = 0;
+ init_timer(&dev->timer);
+ init_waitqueue_head(&dev->context_wait);
+
+ dev->ctx_start = 0;
+ dev->lck_start = 0;
+
+ dev->buf_rp = dev->buf;
+ dev->buf_wp = dev->buf;
+ dev->buf_end = dev->buf + DRM_BSZ;
+ dev->buf_async = NULL;
+ init_waitqueue_head(&dev->buf_readers);
+ init_waitqueue_head(&dev->buf_writers);
+
+ DRM_DEBUG("\n");
+
+ /* The kernel's context could be created here, but is now created
+ in drm_dma_enqueue. This is more resource-efficient for
+ hardware that does not do DMA, but may mean that
+ drm_select_queue fails between the time the interrupt is
+ initialized and the time the queues are initialized. */
+
+ return 0;
+}
+
+
+static int mga_takedown(drm_device_t *dev)
+{
+ int i;
+ drm_magic_entry_t *pt, *next;
+ drm_map_t *map;
+ drm_vma_entry_t *vma, *vma_next;
+
+ DRM_DEBUG("\n");
+
+ if (dev->irq) mga_irq_uninstall(dev);
+
+ down(&dev->struct_sem);
+ del_timer(&dev->timer);
+
+ if (dev->devname) {
+ drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
+ dev->devname = NULL;
+ }
+
+ if (dev->unique) {
+ drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
+ dev->unique = NULL;
+ dev->unique_len = 0;
+ }
+ /* Clear pid list */
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ for (pt = dev->magiclist[i].head; pt; pt = next) {
+ next = pt->next;
+ drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
+ }
+ dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
+ }
+ /* Clear AGP information */
+ if (dev->agp) {
+ drm_agp_mem_t *entry;
+ drm_agp_mem_t *nexte;
+
+ /* Remove AGP resources, but leave dev->agp
+ intact until cleanup is called. */
+ for (entry = dev->agp->memory; entry; entry = nexte) {
+ nexte = entry->next;
+ if (entry->bound) drm_unbind_agp(entry->memory);
+ drm_free_agp(entry->memory, entry->pages);
+ drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+ }
+ dev->agp->memory = NULL;
+
+ if (dev->agp->acquired && drm_agp.release)
+ (*drm_agp.release)();
+
+ dev->agp->acquired = 0;
+ dev->agp->enabled = 0;
+ }
+ /* Clear vma list (only built for debugging) */
+ if (dev->vmalist) {
+ for (vma = dev->vmalist; vma; vma = vma_next) {
+ vma_next = vma->next;
+ drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
+ }
+ dev->vmalist = NULL;
+ }
+
+ /* Clear map area and mtrr information */
+ if (dev->maplist) {
+ for (i = 0; i < dev->map_count; i++) {
+ map = dev->maplist[i];
+ switch (map->type) {
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+#ifdef CONFIG_MTRR
+ if (map->mtrr >= 0) {
+ int retcode;
+ retcode = mtrr_del(map->mtrr,
+ map->offset,
+ map->size);
+ DRM_DEBUG("mtrr_del = %d\n", retcode);
+ }
+#endif
+ drm_ioremapfree(map->handle, map->size);
+ break;
+ case _DRM_SHM:
+ drm_free_pages((unsigned long)map->handle,
+ drm_order(map->size)
+ - PAGE_SHIFT,
+ DRM_MEM_SAREA);
+ break;
+ case _DRM_AGP:
+ break;
+ }
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ }
+ drm_free(dev->maplist,
+ dev->map_count * sizeof(*dev->maplist),
+ DRM_MEM_MAPS);
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ }
+
+ if (dev->queuelist) {
+ for (i = 0; i < dev->queue_count; i++) {
+ drm_waitlist_destroy(&dev->queuelist[i]->waitlist);
+ if (dev->queuelist[i]) {
+ drm_free(dev->queuelist[i],
+ sizeof(*dev->queuelist[0]),
+ DRM_MEM_QUEUES);
+ dev->queuelist[i] = NULL;
+ }
+ }
+ drm_free(dev->queuelist,
+ dev->queue_slots * sizeof(*dev->queuelist),
+ DRM_MEM_QUEUES);
+ dev->queuelist = NULL;
+ }
+
+ drm_dma_takedown(dev);
+
+ dev->queue_count = 0;
+ if (dev->lock.hw_lock) {
+ dev->lock.hw_lock = NULL; /* SHM removed */
+ dev->lock.pid = 0;
+ wake_up_interruptible(&dev->lock.lock_queue);
+ }
+ up(&dev->struct_sem);
+
+ return 0;
+}
+
+/* mga_init is called via init_module at module load time, or via
+ * linux/init/main.c (this is not currently supported). */
+
+int mga_init(void)
+{
+ int retcode;
+ drm_device_t *dev = &mga_device;
+
+ DRM_DEBUG("\n");
+
+ memset((void *)dev, 0, sizeof(*dev));
+ dev->count_lock = SPIN_LOCK_UNLOCKED;
+ sema_init(&dev->struct_sem, 1);
+
+#ifdef MODULE
+ drm_parse_options(mga);
+#endif
+ DRM_DEBUG("doing misc_register\n");
+ if ((retcode = misc_register(&mga_misc))) {
+ DRM_ERROR("Cannot register \"%s\"\n", MGA_NAME);
+ return retcode;
+ }
+ dev->device = MKDEV(MISC_MAJOR, mga_misc.minor);
+ dev->name = MGA_NAME;
+
+ DRM_DEBUG("doing mem init\n");
+ drm_mem_init();
+ DRM_DEBUG("doing proc init\n");
+ drm_proc_init(dev);
+ DRM_DEBUG("doing agp init\n");
+ dev->agp = drm_agp_init();
+ DRM_DEBUG("doing ctxbitmap init\n");
+ if((retcode = drm_ctxbitmap_init(dev))) {
+ DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+ drm_proc_cleanup();
+ misc_deregister(&mga_misc);
+ mga_takedown(dev);
+ return retcode;
+ }
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ MGA_NAME,
+ MGA_MAJOR,
+ MGA_MINOR,
+ MGA_PATCHLEVEL,
+ MGA_DATE,
+ mga_misc.minor);
+
+ return 0;
+}
+
+/* mga_cleanup is called via cleanup_module at module unload time. */
+
+void mga_cleanup(void)
+{
+ drm_device_t *dev = &mga_device;
+
+ DRM_DEBUG("\n");
+
+ drm_proc_cleanup();
+ if (misc_deregister(&mga_misc)) {
+ DRM_ERROR("Cannot unload module\n");
+ } else {
+ DRM_INFO("Module unloaded\n");
+ }
+ drm_ctxbitmap_cleanup(dev);
+ mga_dma_cleanup(dev);
+ mga_takedown(dev);
+ if (dev->agp) {
+ drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
+ dev->agp = NULL;
+ }
+}
+
+int mga_version(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_version_t version;
+ int len;
+
+ copy_from_user_ret(&version,
+ (drm_version_t *)arg,
+ sizeof(version),
+ -EFAULT);
+
+#define DRM_COPY(name,value) \
+ len = strlen(value); \
+ if (len > name##_len) len = name##_len; \
+ name##_len = strlen(value); \
+ if (len && name) { \
+ copy_to_user_ret(name, value, len, -EFAULT); \
+ }
+
+ version.version_major = MGA_MAJOR;
+ version.version_minor = MGA_MINOR;
+ version.version_patchlevel = MGA_PATCHLEVEL;
+
+ DRM_COPY(version.name, MGA_NAME);
+ DRM_COPY(version.date, MGA_DATE);
+ DRM_COPY(version.desc, MGA_DESC);
+
+ copy_to_user_ret((drm_version_t *)arg,
+ &version,
+ sizeof(version),
+ -EFAULT);
+ return 0;
+}
+
+int mga_open(struct inode *inode, struct file *filp)
+{
+ drm_device_t *dev = &mga_device;
+ int retcode = 0;
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+ if (!(retcode = drm_open_helper(inode, filp, dev))) {
+ MOD_INC_USE_COUNT;
+ atomic_inc(&dev->total_open);
+ spin_lock(&dev->count_lock);
+ if (!dev->open_count++) {
+ spin_unlock(&dev->count_lock);
+ return mga_setup(dev);
+ }
+ spin_unlock(&dev->count_lock);
+ }
+ return retcode;
+}
+
+int mga_release(struct inode *inode, struct file *filp)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ int retcode = 0;
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+ if (!(retcode = drm_release(inode, filp))) {
+ MOD_DEC_USE_COUNT;
+ atomic_inc(&dev->total_close);
+ spin_lock(&dev->count_lock);
+ if (!--dev->open_count) {
+ if (atomic_read(&dev->ioctl_count) || dev->blocked) {
+ DRM_ERROR("Device busy: %d %d\n",
+ atomic_read(&dev->ioctl_count),
+ dev->blocked);
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ spin_unlock(&dev->count_lock);
+ return mga_takedown(dev);
+ }
+ spin_unlock(&dev->count_lock);
+ }
+ return retcode;
+}
+
+/* drm_ioctl is called whenever a process performs an ioctl on /dev/drm. */
+
+int mga_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int nr = DRM_IOCTL_NR(cmd);
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ int retcode = 0;
+ drm_ioctl_desc_t *ioctl;
+ drm_ioctl_t *func;
+
+ atomic_inc(&dev->ioctl_count);
+ atomic_inc(&dev->total_ioctl);
+ ++priv->ioctl_count;
+
+ DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
+ current->pid, cmd, nr, dev->device, priv->authenticated);
+
+ if (nr >= MGA_IOCTL_COUNT) {
+ retcode = -EINVAL;
+ } else {
+ ioctl = &mga_ioctls[nr];
+ func = ioctl->func;
+
+ if (!func) {
+ DRM_DEBUG("no function\n");
+ retcode = -EINVAL;
+ } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
+ || (ioctl->auth_needed && !priv->authenticated)) {
+ retcode = -EACCES;
+ } else {
+ retcode = (func)(inode, filp, cmd, arg);
+ }
+ }
+
+ atomic_dec(&dev->ioctl_count);
+ return retcode;
+}
+
+int mga_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_lock_t lock;
+
+ copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
+
+ if (lock.context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ current->pid, lock.context);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("%d frees lock (%d holds)\n",
+ lock.context,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+ atomic_inc(&dev->total_unlocks);
+ if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
+ atomic_inc(&dev->total_contends);
+ drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
+ mga_dma_schedule(dev, 1);
+
+ if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ DRM_ERROR("\n");
+ }
+
+ return 0;
+}
diff --git a/linux/mga_drv.h b/linux/mga_drv.h
new file mode 100644
index 00000000..bc7808b0
--- /dev/null
+++ b/linux/mga_drv.h
@@ -0,0 +1,292 @@
+/* mga_drv.h -- Private header for the Matrox g200/g400 driver -*- linux-c -*-
+ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
+ * Jeff Hartmann <jhartmann@precisioninsight.com>
+ *
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_drv.h,v 1.1 2000/02/11 17:26:08 dawes Exp $
+ */
+
+#ifndef _MGA_DRV_H_
+#define _MGA_DRV_H_
+#include "mga_drm_public.h"
+
+typedef struct _drm_mga_private {
+ int reserved_map_idx;
+ int buffer_map_idx;
+ drm_mga_sarea_t *sarea_priv;
+ int primary_size;
+ int warp_ucode_size;
+ int chipset;
+ int fbOffset;
+ int backOffset;
+ int depthOffset;
+ int textureOffset;
+ int textureSize;
+ int cpp;
+ int stride;
+ int sgram;
+ int use_agp;
+ mgaWarpIndex WarpIndex[MGA_MAX_G400_PIPES];
+ __volatile__ unsigned long softrap_age;
+ atomic_t dispatch_lock;
+ atomic_t pending_bufs;
+ void *ioremap;
+ u32 *prim_head;
+ u32 *current_dma_ptr;
+ u32 prim_phys_head;
+ int prim_num_dwords;
+ int prim_max_dwords;
+
+
+ /* Some validated register values:
+ */
+ u32 frontOrg;
+ u32 backOrg;
+ u32 depthOrg;
+ u32 mAccess;
+
+} drm_mga_private_t;
+
+ /* mga_drv.c */
+extern int mga_init(void);
+extern void mga_cleanup(void);
+extern int mga_version(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int mga_open(struct inode *inode, struct file *filp);
+extern int mga_release(struct inode *inode, struct file *filp);
+extern int mga_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int mga_unlock(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+ /* mga_dma.c */
+extern int mga_dma_schedule(drm_device_t *dev, int locked);
+extern int mga_dma(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int mga_irq_install(drm_device_t *dev, int irq);
+extern int mga_irq_uninstall(drm_device_t *dev);
+extern int mga_control(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int mga_lock(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+#if 0
+extern void mga_dma_init(drm_device_t *dev);
+extern void mga_dma_cleanup(drm_device_t *dev);
+
+#endif
+extern int mga_dma_init(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int mga_dma_cleanup(drm_device_t *dev);
+
+/* mga_dma_init does init and release */
+
+
+ /* mga_bufs.c */
+extern int mga_addbufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int mga_infobufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int mga_markbufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int mga_freebufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int mga_mapbufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int mga_addmap(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+ /* mga_state.c */
+extern int mga_clear_bufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int mga_swap_bufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int mga_iload(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+ /* mga_context.c */
+extern int mga_resctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int mga_addctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int mga_modctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int mga_getctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int mga_switchctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int mga_newctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int mga_rmctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+extern int mga_context_switch(drm_device_t *dev, int old, int new);
+extern int mga_context_switch_complete(drm_device_t *dev, int new);
+
+
+
+#define MGAREG_MGA_EXEC 0x0100
+#define MGAREG_AGP_PLL 0x1e4c
+#define MGAREG_ALPHACTRL 0x2c7c
+#define MGAREG_ALPHASTART 0x2c70
+#define MGAREG_ALPHAXINC 0x2c74
+#define MGAREG_ALPHAYINC 0x2c78
+#define MGAREG_AR0 0x1c60
+#define MGAREG_AR1 0x1c64
+#define MGAREG_AR2 0x1c68
+#define MGAREG_AR3 0x1c6c
+#define MGAREG_AR4 0x1c70
+#define MGAREG_AR5 0x1c74
+#define MGAREG_AR6 0x1c78
+#define MGAREG_BCOL 0x1c20
+#define MGAREG_CXBNDRY 0x1c80
+#define MGAREG_CXLEFT 0x1ca0
+#define MGAREG_CXRIGHT 0x1ca4
+#define MGAREG_DMAPAD 0x1c54
+#define MGAREG_DR0_Z32LSB 0x2c50
+#define MGAREG_DR0_Z32MSB 0x2c54
+#define MGAREG_DR2_Z32LSB 0x2c60
+#define MGAREG_DR2_Z32MSB 0x2c64
+#define MGAREG_DR3_Z32LSB 0x2c68
+#define MGAREG_DR3_Z32MSB 0x2c6c
+#define MGAREG_DR0 0x1cc0
+#define MGAREG_DR2 0x1cc8
+#define MGAREG_DR3 0x1ccc
+#define MGAREG_DR4 0x1cd0
+#define MGAREG_DR6 0x1cd8
+#define MGAREG_DR7 0x1cdc
+#define MGAREG_DR8 0x1ce0
+#define MGAREG_DR10 0x1ce8
+#define MGAREG_DR11 0x1cec
+#define MGAREG_DR12 0x1cf0
+#define MGAREG_DR14 0x1cf8
+#define MGAREG_DR15 0x1cfc
+#define MGAREG_DSTORG 0x2cb8
+#define MGAREG_DWG_INDIR_WT 0x1e80
+#define MGAREG_DWGCTL 0x1c00
+#define MGAREG_DWGSYNC 0x2c4c
+#define MGAREG_FCOL 0x1c24
+#define MGAREG_FIFOSTATUS 0x1e10
+#define MGAREG_FOGCOL 0x1cf4
+#define MGAREG_FOGSTART 0x1cc4
+#define MGAREG_FOGXINC 0x1cd4
+#define MGAREG_FOGYINC 0x1ce4
+#define MGAREG_FXBNDRY 0x1c84
+#define MGAREG_FXLEFT 0x1ca8
+#define MGAREG_FXRIGHT 0x1cac
+#define MGAREG_ICLEAR 0x1e18
+#define MGAREG_IEN 0x1e1c
+#define MGAREG_LEN 0x1c5c
+#define MGAREG_MACCESS 0x1c04
+#define MGAREG_MCTLWTST 0x1c08
+#define MGAREG_MEMRDBK 0x1e44
+#define MGAREG_OPMODE 0x1e54
+#define MGAREG_PAT0 0x1c10
+#define MGAREG_PAT1 0x1c14
+#define MGAREG_PITCH 0x1c8c
+#define MGAREG_PLNWT 0x1c1c
+#define MGAREG_PRIMADDRESS 0x1e58
+#define MGAREG_PRIMEND 0x1e5c
+#define MGAREG_PRIMPTR 0x1e50
+#define MGAREG_RST 0x1e40
+#define MGAREG_SECADDRESS 0x2c40
+#define MGAREG_SECEND 0x2c44
+#define MGAREG_SETUPADDRESS 0x2cd0
+#define MGAREG_SETUPEND 0x2cd4
+#define MGAREG_SGN 0x1c58
+#define MGAREG_SHIFT 0x1c50
+#define MGAREG_SOFTRAP 0x2c48
+#define MGAREG_SPECBSTART 0x2c98
+#define MGAREG_SPECBXINC 0x2c9c
+#define MGAREG_SPECBYINC 0x2ca0
+#define MGAREG_SPECGSTART 0x2c8c
+#define MGAREG_SPECGXINC 0x2c90
+#define MGAREG_SPECGYINC 0x2c94
+#define MGAREG_SPECRSTART 0x2c80
+#define MGAREG_SPECRXINC 0x2c84
+#define MGAREG_SPECRYINC 0x2c88
+#define MGAREG_SRC0 0x1c30
+#define MGAREG_SRC1 0x1c34
+#define MGAREG_SRC2 0x1c38
+#define MGAREG_SRC3 0x1c3c
+#define MGAREG_SRCORG 0x2cb4
+#define MGAREG_STATUS 0x1e14
+#define MGAREG_STENCIL 0x2cc8
+#define MGAREG_STENCILCTL 0x2ccc
+#define MGAREG_TDUALSTAGE0 0x2cf8
+#define MGAREG_TDUALSTAGE1 0x2cfc
+#define MGAREG_TEST0 0x1e48
+#define MGAREG_TEXBORDERCOL 0x2c5c
+#define MGAREG_TEXCTL 0x2c30
+#define MGAREG_TEXCTL2 0x2c3c
+#define MGAREG_TEXFILTER 0x2c58
+#define MGAREG_TEXHEIGHT 0x2c2c
+#define MGAREG_TEXORG 0x2c24
+#define MGAREG_TEXORG1 0x2ca4
+#define MGAREG_TEXORG2 0x2ca8
+#define MGAREG_TEXORG3 0x2cac
+#define MGAREG_TEXORG4 0x2cb0
+#define MGAREG_TEXTRANS 0x2c34
+#define MGAREG_TEXTRANSHIGH 0x2c38
+#define MGAREG_TEXWIDTH 0x2c28
+#define MGAREG_TMR0 0x2c00
+#define MGAREG_TMR1 0x2c04
+#define MGAREG_TMR2 0x2c08
+#define MGAREG_TMR3 0x2c0c
+#define MGAREG_TMR4 0x2c10
+#define MGAREG_TMR5 0x2c14
+#define MGAREG_TMR6 0x2c18
+#define MGAREG_TMR7 0x2c1c
+#define MGAREG_TMR8 0x2c20
+#define MGAREG_VBIADDR0 0x3e08
+#define MGAREG_VBIADDR1 0x3e0c
+#define MGAREG_VCOUNT 0x1e20
+#define MGAREG_WACCEPTSEQ 0x1dd4
+#define MGAREG_WCODEADDR 0x1e6c
+#define MGAREG_WFLAG 0x1dc4
+#define MGAREG_WFLAG1 0x1de0
+#define MGAREG_WFLAGNB 0x1e64
+#define MGAREG_WFLAGNB1 0x1e08
+#define MGAREG_WGETMSB 0x1dc8
+#define MGAREG_WIADDR 0x1dc0
+#define MGAREG_WIADDR2 0x1dd8
+#define MGAREG_WIADDRNB 0x1e60
+#define MGAREG_WIADDRNB1 0x1e04
+#define MGAREG_WIADDRNB2 0x1e00
+#define MGAREG_WIMEMADDR 0x1e68
+#define MGAREG_WIMEMDATA 0x2000
+#define MGAREG_WIMEMDATA1 0x2100
+#define MGAREG_WMISC 0x1e70
+#define MGAREG_WR 0x2d00
+#define MGAREG_WVRTXSZ 0x1dcc
+#define MGAREG_XDST 0x1cb0
+#define MGAREG_XYEND 0x1c44
+#define MGAREG_XYSTRT 0x1c40
+#define MGAREG_YBOT 0x1c9c
+#define MGAREG_YDST 0x1c90
+#define MGAREG_YDSTLEN 0x1c88
+#define MGAREG_YDSTORG 0x1c94
+#define MGAREG_YTOP 0x1c98
+#define MGAREG_ZORG 0x1c0c
+
+#endif
diff --git a/linux/mga_state.c b/linux/mga_state.c
new file mode 100644
index 00000000..d09881ba
--- /dev/null
+++ b/linux/mga_state.c
@@ -0,0 +1,362 @@
+/* mga_state.c -- State support for mga g200/g400 -*- linux-c -*-
+ * Created: Thu Jan 27 02:53:43 2000 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Jeff Hartmann <jhartmann@precisioninsight.com>
+ * Keith Whitwell <keithw@precisioninsight.com>
+ *
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_state.c,v 1.1 2000/02/11 17:26:08 dawes Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include "mga_drv.h"
+#include "mgareg_flags.h"
+#include "mga_dma.h"
+#include "mga_state.h"
+#include "drm.h"
+
+void mgaEmitClipRect( drm_mga_private_t *dev_priv, xf86drmClipRectRec *box )
+{
+ PRIMLOCALS;
+
+ PRIMGETPTR( dev_priv );
+
+ /* The G400 seems to have an issue with the second WARP not
+ * stalling clipper register writes. This bothers me, but the only
+ * way I could get it to never clip the last triangle under any
+ * circumstances is by inserting TWO dwgsync commands.
+ */
+ if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
+ PRIMOUTREG( MGAREG_DWGSYNC, 0 );
+ PRIMOUTREG( MGAREG_DWGSYNC, 0 );
+ }
+
+ PRIMOUTREG( MGAREG_CXBNDRY, ((box->x2)<<16)|(box->x1) );
+ PRIMOUTREG( MGAREG_YTOP, box->y1 * dev_priv->stride );
+ PRIMOUTREG( MGAREG_YBOT, box->y2 * dev_priv->stride );
+ PRIMADVANCE( dev_priv );
+}
+
+
+static void mgaEmitContext(drm_mga_private_t *dev_priv,
+ drm_mga_buf_priv_t *buf_priv)
+{
+ unsigned int *regs = buf_priv->ContextState;
+ PRIMLOCALS;
+
+ PRIMGETPTR( dev_priv );
+ PRIMOUTREG( MGAREG_DSTORG, regs[MGA_CTXREG_DSTORG] );
+ PRIMOUTREG( MGAREG_MACCESS, regs[MGA_CTXREG_MACCESS] );
+ PRIMOUTREG( MGAREG_PLNWT, regs[MGA_CTXREG_PLNWT] );
+ PRIMOUTREG( MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL] );
+ PRIMOUTREG( MGAREG_ALPHACTRL, regs[MGA_CTXREG_ALPHACTRL] );
+ PRIMOUTREG( MGAREG_FOGCOL, regs[MGA_CTXREG_FOGCOLOR] );
+ PRIMOUTREG( MGAREG_WFLAG, regs[MGA_CTXREG_WFLAG] );
+
+ if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
+ PRIMOUTREG( MGAREG_WFLAG1, regs[MGA_CTXREG_WFLAG] );
+ PRIMOUTREG( MGAREG_TDUALSTAGE0, regs[MGA_CTXREG_TDUAL0] );
+ PRIMOUTREG( MGAREG_TDUALSTAGE1, regs[MGA_CTXREG_TDUAL1] );
+ }
+
+ PRIMADVANCE( dev_priv );
+}
+
+static void mgaG200EmitTex( drm_mga_private_t *dev_priv,
+ drm_mga_buf_priv_t *buf_priv )
+{
+ unsigned int *regs = buf_priv->TexState[0];
+ PRIMLOCALS;
+
+ PRIMGETPTR( dev_priv );
+ PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] );
+ PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL] );
+ PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER] );
+ PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL] );
+ PRIMOUTREG(MGAREG_TEXORG, regs[MGA_TEXREG_ORG] );
+ PRIMOUTREG(MGAREG_TEXORG1, regs[MGA_TEXREG_ORG1] );
+ PRIMOUTREG(MGAREG_TEXORG2, regs[MGA_TEXREG_ORG2] );
+ PRIMOUTREG(MGAREG_TEXORG3, regs[MGA_TEXREG_ORG3] );
+ PRIMOUTREG(MGAREG_TEXORG4, regs[MGA_TEXREG_ORG4] );
+ PRIMOUTREG(MGAREG_TEXWIDTH, regs[MGA_TEXREG_WIDTH] );
+ PRIMOUTREG(MGAREG_TEXHEIGHT, regs[MGA_TEXREG_HEIGHT] );
+
+ PRIMOUTREG(0x2d00 + 24*4, regs[MGA_TEXREG_WIDTH] );
+ PRIMOUTREG(0x2d00 + 34*4, regs[MGA_TEXREG_HEIGHT] );
+
+ PRIMADVANCE( dev_priv );
+}
+
+static void mgaG400EmitTex0( drm_mga_private_t *dev_priv,
+ drm_mga_buf_priv_t *buf_priv )
+{
+ unsigned int *regs = buf_priv->TexState[0];
+ int multitex = buf_priv->WarpPipe & MGA_T2;
+
+ PRIMLOCALS;
+ PRIMGETPTR( dev_priv );
+
+ PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] );
+ PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL] );
+ PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER] );
+ PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL] );
+ PRIMOUTREG(MGAREG_TEXORG, regs[MGA_TEXREG_ORG] );
+ PRIMOUTREG(MGAREG_TEXORG1, regs[MGA_TEXREG_ORG1] );
+ PRIMOUTREG(MGAREG_TEXORG2, regs[MGA_TEXREG_ORG2] );
+ PRIMOUTREG(MGAREG_TEXORG3, regs[MGA_TEXREG_ORG3] );
+ PRIMOUTREG(MGAREG_TEXORG4, regs[MGA_TEXREG_ORG4] );
+ PRIMOUTREG(MGAREG_TEXWIDTH, regs[MGA_TEXREG_WIDTH] );
+ PRIMOUTREG(MGAREG_TEXHEIGHT, regs[MGA_TEXREG_HEIGHT] );
+
+ PRIMOUTREG(0x2d00 + 49*4, 0);
+ PRIMOUTREG(0x2d00 + 57*4, 0);
+ PRIMOUTREG(0x2d00 + 53*4, 0);
+ PRIMOUTREG(0x2d00 + 61*4, 0);
+
+ if (!multitex) {
+ PRIMOUTREG(0x2d00 + 52*4, 0x40 );
+ PRIMOUTREG(0x2d00 + 60*4, 0x40 );
+ }
+
+ PRIMOUTREG(0x2d00 + 54*4, regs[MGA_TEXREG_WIDTH] | 0x40 );
+ PRIMOUTREG(0x2d00 + 62*4, regs[MGA_TEXREG_HEIGHT] | 0x40 );
+
+ PRIMADVANCE( dev_priv );
+}
+
+#define TMC_map1_enable 0x80000000
+
+
+static void mgaG400EmitTex1( drm_mga_private_t *dev_priv,
+ drm_mga_buf_priv_t *buf_priv )
+{
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int *regs = sarea_priv->TexState[1];
+
+ PRIMLOCALS;
+ PRIMGETPTR(dev_priv);
+
+ PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] | TMC_map1_enable);
+ PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL] );
+ PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER] );
+ PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL] );
+ PRIMOUTREG(MGAREG_TEXORG, regs[MGA_TEXREG_ORG] );
+ PRIMOUTREG(MGAREG_TEXORG1, regs[MGA_TEXREG_ORG1] );
+ PRIMOUTREG(MGAREG_TEXORG2, regs[MGA_TEXREG_ORG2] );
+ PRIMOUTREG(MGAREG_TEXORG3, regs[MGA_TEXREG_ORG3] );
+ PRIMOUTREG(MGAREG_TEXORG4, regs[MGA_TEXREG_ORG4] );
+ PRIMOUTREG(MGAREG_TEXWIDTH, regs[MGA_TEXREG_WIDTH] );
+ PRIMOUTREG(MGAREG_TEXHEIGHT, regs[MGA_TEXREG_HEIGHT] );
+
+ PRIMOUTREG(0x2d00 + 49*4, 0);
+ PRIMOUTREG(0x2d00 + 57*4, 0);
+ PRIMOUTREG(0x2d00 + 53*4, 0);
+ PRIMOUTREG(0x2d00 + 61*4, 0);
+
+ PRIMOUTREG(0x2d00 + 52*4, regs[MGA_TEXREG_WIDTH] | 0x40 );
+ PRIMOUTREG(0x2d00 + 60*4, regs[MGA_TEXREG_HEIGHT] | 0x40 );
+
+ PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] );
+
+ PRIMADVANCE( dev_priv );
+}
+
+
+static void mgaG400EmitPipe(drm_mga_private_t *dev_priv,
+ drm_mga_buf_priv_t *buf_priv)
+{
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int pipe = sarea_priv->WarpPipe;
+ float fParam = 12800.0f;
+ PRIMLOCALS;
+
+ PRIMGETPTR(dev_priv);
+ PRIMOUTREG(MGAREG_WIADDR2, WIA_wmode_suspend);
+
+ /* Establish vertex size.
+ */
+ if (pipe & MGA_T2) {
+ PRIMOUTREG(MGAREG_WVRTXSZ, 0x00001e09);
+ PRIMOUTREG(MGAREG_WACCEPTSEQ, 0x1e000000);
+ } else {
+ PRIMOUTREG(MGAREG_WVRTXSZ, 0x00001807);
+ PRIMOUTREG(MGAREG_WACCEPTSEQ, 0x18000000);
+ }
+
+ PRIMOUTREG(MGAREG_WFLAG, 0);
+ PRIMOUTREG(MGAREG_WFLAG1, 0);
+
+ PRIMOUTREG(0x2d00 + 56*4, *((u32 *)(&fParam)));
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+
+ PRIMOUTREG(0x2d00 + 49*4, 0); /* Tex stage 0 */
+ PRIMOUTREG(0x2d00 + 57*4, 0); /* Tex stage 0 */
+ PRIMOUTREG(0x2d00 + 53*4, 0); /* Tex stage 1 */
+ PRIMOUTREG(0x2d00 + 61*4, 0); /* Tex stage 1 */
+
+ PRIMOUTREG(0x2d00 + 54*4, 0x40); /* Tex stage 0 : w */
+ PRIMOUTREG(0x2d00 + 62*4, 0x40); /* Tex stage 0 : h */
+ PRIMOUTREG(0x2d00 + 52*4, 0x40); /* Tex stage 1 : w */
+ PRIMOUTREG(0x2d00 + 60*4, 0x40); /* Tex stage 1 : h */
+
+ /* Dma pading required due to hw bug */
+ PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
+ PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
+ PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
+ PRIMOUTREG(MGAREG_WIADDR2, (dev_priv->WarpIndex[pipe].phys_addr |
+ WIA_wmode_start | WIA_wagp_agp));
+ PRIMADVANCE(dev_priv);
+}
+
+static void mgaG200EmitPipe( drm_mga_private_t *dev_priv,
+ drm_mga_buf_priv_t *buf_priv )
+{
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int pipe = sarea_priv->WarpPipe;
+ PRIMLOCALS;
+
+ PRIMGETPTR(dev_priv);
+ PRIMOUTREG(MGAREG_WIADDR, WIA_wmode_suspend);
+ PRIMOUTREG(MGAREG_WVRTXSZ, 7);
+ PRIMOUTREG(MGAREG_WFLAG, 0);
+ PRIMOUTREG(0x2d00 + 24*4, 0); /* tex w/h */
+
+ PRIMOUTREG(0x2d00 + 25*4, 0x100);
+ PRIMOUTREG(0x2d00 + 34*4, 0); /* tex w/h */
+ PRIMOUTREG(0x2d00 + 42*4, 0xFFFF);
+ PRIMOUTREG(0x2d00 + 60*4, 0xFFFF);
+
+ /* Dma pading required due to hw bug */
+ PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
+ PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
+ PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
+ PRIMOUTREG(MGAREG_WIADDR, (dev_priv->WarpIndex[pipe].phys_addr |
+ WIA_wmode_start | WIA_wagp_agp));
+
+ PRIMADVANCE(dev_priv);
+}
+
+void mgaEmitState( drm_mga_private_t *dev_priv, drm_mga_buf_priv_t *buf_priv )
+{
+ unsigned int dirty = buf_priv->dirty;
+
+ if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
+ if (dirty & MGASAREA_NEW_CONTEXT)
+ mgaEmitContext( dev_priv, buf_priv );
+
+ if (dirty & MGASAREA_NEW_TEX1)
+ mgaG400EmitTex1( dev_priv, buf_priv );
+
+ if (dirty & MGASAREA_NEW_TEX0)
+ mgaG400EmitTex0( dev_priv, buf_priv );
+
+ if (dirty & MGASAREA_NEW_PIPE)
+ mgaG400EmitPipe( dev_priv, buf_priv );
+ } else {
+ if (dirty & MGASAREA_NEW_CONTEXT)
+ mgaEmitContext( dev_priv, buf_priv );
+
+ if (dirty & MGASAREA_NEW_TEX0)
+ mgaG200EmitTex( dev_priv, buf_priv );
+
+ if (dirty & MGASAREA_NEW_PIPE)
+ mgaG200EmitPipe( dev_priv, buf_priv );
+ }
+}
+
+
+
+/* Disallow all write destinations except the front and backbuffer.
+ */
+static int mgaCopyContext(drm_mga_private_t *dev_priv,
+ drm_mga_buf_priv_t *buf_priv)
+{
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int *regs = sarea_priv->ContextState;
+
+ if (regs[MGA_CTXREG_DSTORG] != dev_priv->frontOrg &&
+ regs[MGA_CTXREG_DSTORG] != dev_priv->backOrg)
+ return -1;
+
+ memcpy(buf_priv->ContextState, sarea_priv->ContextState,
+ sizeof(buf_priv->ContextState));
+ return 0;
+}
+
+
+/* Disallow texture reads from PCI space.
+ */
+static int mgaCopyTex(drm_mga_private_t *dev_priv,
+ drm_mga_buf_priv_t *buf_priv,
+ int unit)
+{
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+
+ if ((sarea_priv->TexState[unit][MGA_TEXREG_ORG] & 0x3) == 0x1)
+ return -1;
+
+ memcpy(buf_priv->TexState[unit], sarea_priv->TexState[unit],
+ sizeof(buf_priv->TexState[0]));
+
+ return 0;
+}
+
+
+int mgaCopyAndVerifyState( drm_mga_private_t *dev_priv,
+ drm_mga_buf_priv_t *buf_priv )
+{
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int dirty = sarea_priv->dirty ;
+ int rv = 0;
+
+ buf_priv->dirty = sarea_priv->dirty;
+ buf_priv->WarpPipe = sarea_priv->WarpPipe;
+
+ if (dirty & MGASAREA_NEW_CONTEXT)
+ rv |= mgaCopyContext( dev_priv, buf_priv );
+
+ if (dirty & MGASAREA_NEW_TEX0)
+ rv |= mgaCopyTex( dev_priv, buf_priv, 0 );
+
+ if (dev_priv->chipset == MGA_CARD_TYPE_G400)
+ {
+ if (dirty & MGASAREA_NEW_TEX1)
+ rv |= mgaCopyTex( dev_priv, buf_priv, 1 );
+
+ if (dirty & MGASAREA_NEW_PIPE)
+ rv |= (buf_priv->WarpPipe > MGA_MAX_G400_PIPES);
+ }
+ else
+ {
+ if (dirty & MGASAREA_NEW_PIPE)
+ rv |= (buf_priv->WarpPipe > MGA_MAX_G200_PIPES);
+ }
+
+ return rv == 0;
+}
+
+
diff --git a/linux/picker.c b/linux/picker.c
new file mode 100644
index 00000000..ecdb2c15
--- /dev/null
+++ b/linux/picker.c
@@ -0,0 +1,14 @@
+#include <linux/autoconf.h>
+#include <linux/version.h>
+
+#ifndef CONFIG_SMP
+#define CONFIG_SMP 0
+#endif
+
+#ifndef CONFIG_MODVERSIONS
+#define CONFIG_MODVERSIONS 0
+#endif
+
+SMP = CONFIG_SMP
+MODVERSIONS = CONFIG_MODVERSIONS
+RELEASE = UTS_RELEASE
diff --git a/linux/proc.c b/linux/proc.c
new file mode 100644
index 00000000..ec0ebeeb
--- /dev/null
+++ b/linux/proc.c
@@ -0,0 +1,569 @@
+/* proc.c -- /proc support for DRM -*- linux-c -*-
+ * Created: Mon Jan 11 09:48:47 1999 by faith@precisioninsight.com
+ * Revised: Sun Feb 13 23:41:04 2000 by kevin@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/proc.c,v 1.4 1999/08/20 15:36:46 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/proc.c,v 1.5 2000/02/14 06:27:28 martin Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+static struct proc_dir_entry *drm_root = NULL;
+static struct proc_dir_entry *drm_dev_root = NULL;
+static char drm_slot_name[64];
+
+static int drm_name_info(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data);
+static int drm_vm_info(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data);
+static int drm_clients_info(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data);
+static int drm_queues_info(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data);
+static int drm_bufs_info(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data);
+#if DRM_DEBUG_CODE
+static int drm_vma_info(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data);
+#endif
+#if DRM_DMA_HISTOGRAM
+static int drm_histo_info(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data);
+#endif
+
+struct drm_proc_list {
+ const char *name;
+ int (*f)(char *, char **, off_t, int, int *, void *);
+} drm_proc_list[] = {
+ { "name", drm_name_info },
+ { "mem", drm_mem_info },
+ { "vm", drm_vm_info },
+ { "clients", drm_clients_info },
+ { "queues", drm_queues_info },
+ { "bufs", drm_bufs_info },
+#if DRM_DEBUG_CODE
+ { "vma", drm_vma_info },
+#endif
+#if DRM_DMA_HISTOGRAM
+ { "histo", drm_histo_info },
+#endif
+};
+#define DRM_PROC_ENTRIES (sizeof(drm_proc_list)/sizeof(drm_proc_list[0]))
+
+int drm_proc_init(drm_device_t *dev)
+{
+ struct proc_dir_entry *ent;
+ int i, j;
+
+ drm_root = create_proc_entry("dri", S_IFDIR, NULL);
+ if (!drm_root) {
+ DRM_ERROR("Cannot create /proc/dri\n");
+ return -1;
+ }
+
+ /* Instead of doing this search, we should
+ add some global support for /proc/dri. */
+ for (i = 0; i < 8; i++) {
+ sprintf(drm_slot_name, "dri/%d", i);
+ drm_dev_root = create_proc_entry(drm_slot_name, S_IFDIR, NULL);
+ if (!drm_dev_root) {
+ DRM_ERROR("Cannot create /proc/%s\n", drm_slot_name);
+ remove_proc_entry("dri", NULL);
+ }
+ if (drm_dev_root->nlink == 2) break;
+ drm_dev_root = NULL;
+ }
+ if (!drm_dev_root) {
+ DRM_ERROR("Cannot find slot in /proc/dri\n");
+ return -1;
+ }
+
+ for (i = 0; i < DRM_PROC_ENTRIES; i++) {
+ ent = create_proc_entry(drm_proc_list[i].name,
+ S_IFREG|S_IRUGO, drm_dev_root);
+ if (!ent) {
+ DRM_ERROR("Cannot create /proc/%s/%s\n",
+ drm_slot_name, drm_proc_list[i].name);
+ for (j = 0; j < i; j++)
+ remove_proc_entry(drm_proc_list[i].name,
+ drm_dev_root);
+ remove_proc_entry(drm_slot_name, NULL);
+ remove_proc_entry("dri", NULL);
+ return -1;
+ }
+ ent->read_proc = drm_proc_list[i].f;
+ ent->data = dev;
+ }
+
+ return 0;
+}
+
+
+int drm_proc_cleanup(void)
+{
+ int i;
+
+ if (drm_root) {
+ if (drm_dev_root) {
+ for (i = 0; i < DRM_PROC_ENTRIES; i++) {
+ remove_proc_entry(drm_proc_list[i].name,
+ drm_dev_root);
+ }
+ remove_proc_entry(drm_slot_name, NULL);
+ }
+ remove_proc_entry("dri", NULL);
+ remove_proc_entry(DRM_NAME, NULL);
+ }
+ drm_root = drm_dev_root = NULL;
+ return 0;
+}
+
+static int drm_name_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+
+ if (offset > 0) return 0; /* no partial requests */
+ len = 0;
+ *eof = 1;
+
+ if (dev->unique) {
+ DRM_PROC_PRINT("%s 0x%x %s\n",
+ dev->name, dev->device, dev->unique);
+ } else {
+ DRM_PROC_PRINT("%s 0x%x\n", dev->name, dev->device);
+ }
+ return len;
+}
+
+static int _drm_vm_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+ drm_map_t *map;
+ const char *types[] = { "FB", "REG", "SHM" };
+ const char *type;
+ int i;
+
+ if (offset > 0) return 0; /* no partial requests */
+ len = 0;
+ *eof = 1;
+ DRM_PROC_PRINT("slot offset size type flags "
+ "address mtrr\n\n");
+ for (i = 0; i < dev->map_count; i++) {
+ map = dev->maplist[i];
+ if (map->type < 0 || map->type > 2) type = "??";
+ else type = types[map->type];
+ DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
+ i,
+ map->offset,
+ map->size,
+ type,
+ map->flags,
+ (unsigned long)map->handle);
+ if (map->mtrr < 0) {
+ DRM_PROC_PRINT("none\n");
+ } else {
+ DRM_PROC_PRINT("%4d\n", map->mtrr);
+ }
+ }
+
+ return len;
+}
+
+static int drm_vm_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+ int ret;
+
+ down(&dev->struct_sem);
+ ret = _drm_vm_info(buf, start, offset, len, eof, data);
+ up(&dev->struct_sem);
+ return ret;
+}
+
+
+static int _drm_queues_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+ int i;
+ drm_queue_t *q;
+
+ if (offset > 0) return 0; /* no partial requests */
+ len = 0;
+ *eof = 1;
+ DRM_PROC_PRINT(" ctx/flags use fin"
+ " blk/rw/rwf wait flushed queued"
+ " locks\n\n");
+ for (i = 0; i < dev->queue_count; i++) {
+ q = dev->queuelist[i];
+ atomic_inc(&q->use_count);
+ DRM_PROC_PRINT_RET(atomic_dec(&q->use_count),
+ "%5d/0x%03x %5d %5d"
+ " %5d/%c%c/%c%c%c %5d %10d %10d %10d\n",
+ i,
+ q->flags,
+ atomic_read(&q->use_count),
+ atomic_read(&q->finalization),
+ atomic_read(&q->block_count),
+ atomic_read(&q->block_read) ? 'r' : '-',
+ atomic_read(&q->block_write) ? 'w' : '-',
+ waitqueue_active(&q->read_queue) ? 'r':'-',
+ waitqueue_active(&q->write_queue) ? 'w':'-',
+ waitqueue_active(&q->flush_queue) ? 'f':'-',
+ DRM_BUFCOUNT(&q->waitlist),
+ atomic_read(&q->total_flushed),
+ atomic_read(&q->total_queued),
+ atomic_read(&q->total_locks));
+ atomic_dec(&q->use_count);
+ }
+
+ return len;
+}
+
+static int drm_queues_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+ int ret;
+
+ down(&dev->struct_sem);
+ ret = _drm_queues_info(buf, start, offset, len, eof, data);
+ up(&dev->struct_sem);
+ return ret;
+}
+
+/* drm_bufs_info is called whenever a process reads
+ /dev/drm/<dev>/bufs. */
+
+static int _drm_bufs_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+ drm_device_dma_t *dma = dev->dma;
+ int i;
+
+ if (!dma) return 0;
+ if (offset > 0) return 0; /* no partial requests */
+ len = 0;
+ *eof = 1;
+ DRM_PROC_PRINT(" o size count free segs pages kB\n\n");
+ for (i = 0; i <= DRM_MAX_ORDER; i++) {
+ if (dma->bufs[i].buf_count)
+ DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n",
+ i,
+ dma->bufs[i].buf_size,
+ dma->bufs[i].buf_count,
+ atomic_read(&dma->bufs[i]
+ .freelist.count),
+ dma->bufs[i].seg_count,
+ dma->bufs[i].seg_count
+ *(1 << dma->bufs[i].page_order),
+ (dma->bufs[i].seg_count
+ * (1 << dma->bufs[i].page_order))
+ * PAGE_SIZE / 1024);
+ }
+ DRM_PROC_PRINT("\n");
+ for (i = 0; i < dma->buf_count; i++) {
+ if (i && !(i%32)) DRM_PROC_PRINT("\n");
+ DRM_PROC_PRINT(" %d", dma->buflist[i]->list);
+ }
+ DRM_PROC_PRINT("\n");
+
+ return len;
+}
+
+static int drm_bufs_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+ int ret;
+
+ down(&dev->struct_sem);
+ ret = _drm_bufs_info(buf, start, offset, len, eof, data);
+ up(&dev->struct_sem);
+ return ret;
+}
+
+
+static int _drm_clients_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+ drm_file_t *priv;
+
+ if (offset > 0) return 0; /* no partial requests */
+ len = 0;
+ *eof = 1;
+ DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n");
+ for (priv = dev->file_first; priv; priv = priv->next) {
+ DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
+ priv->authenticated ? 'y' : 'n',
+ priv->minor,
+ priv->pid,
+ priv->uid,
+ priv->magic,
+ priv->ioctl_count);
+ }
+
+ return len;
+}
+
+static int drm_clients_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+ int ret;
+
+ down(&dev->struct_sem);
+ ret = _drm_clients_info(buf, start, offset, len, eof, data);
+ up(&dev->struct_sem);
+ return ret;
+}
+
+#if DRM_DEBUG_CODE
+
+static int _drm_vma_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+ drm_vma_entry_t *pt;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long i;
+ struct vm_area_struct *vma;
+ unsigned long address;
+#if defined(__i386__)
+ unsigned int pgprot;
+#endif
+
+ if (offset > 0) return 0; /* no partial requests */
+ len = 0;
+ *eof = 1;
+ DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
+ atomic_read(&dev->vma_count),
+ high_memory, virt_to_phys(high_memory));
+ for (pt = dev->vmalist; pt; pt = pt->next) {
+ if (!(vma = pt->vma)) continue;
+ DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx",
+ pt->pid,
+ vma->vm_start,
+ vma->vm_end,
+ vma->vm_flags & VM_READ ? 'r' : '-',
+ vma->vm_flags & VM_WRITE ? 'w' : '-',
+ vma->vm_flags & VM_EXEC ? 'x' : '-',
+ vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
+ vma->vm_flags & VM_LOCKED ? 'l' : '-',
+ vma->vm_flags & VM_IO ? 'i' : '-',
+ VM_OFFSET(vma));
+
+#if defined(__i386__)
+ pgprot = pgprot_val(vma->vm_page_prot);
+ DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c",
+ pgprot & _PAGE_PRESENT ? 'p' : '-',
+ pgprot & _PAGE_RW ? 'w' : 'r',
+ pgprot & _PAGE_USER ? 'u' : 's',
+ pgprot & _PAGE_PWT ? 't' : 'b',
+ pgprot & _PAGE_PCD ? 'u' : 'c',
+ pgprot & _PAGE_ACCESSED ? 'a' : '-',
+ pgprot & _PAGE_DIRTY ? 'd' : '-',
+ pgprot & _PAGE_PSE ? 'm' : 'k',
+ pgprot & _PAGE_GLOBAL ? 'g' : 'l' );
+#endif
+ DRM_PROC_PRINT("\n");
+ for (i = vma->vm_start; i < vma->vm_end; i += PAGE_SIZE) {
+ pgd = pgd_offset(vma->vm_mm, i);
+ pmd = pmd_offset(pgd, i);
+ pte = pte_offset(pmd, i);
+ if (pte_present(*pte)) {
+ address = __pa(pte_page(*pte))
+ + (i & (PAGE_SIZE-1));
+ DRM_PROC_PRINT(" 0x%08lx -> 0x%08lx"
+ " %c%c%c%c%c\n",
+ i,
+ address,
+ pte_read(*pte) ? 'r' : '-',
+ pte_write(*pte) ? 'w' : '-',
+ pte_exec(*pte) ? 'x' : '-',
+ pte_dirty(*pte) ? 'd' : '-',
+ pte_young(*pte) ? 'a' : '-' );
+ } else {
+ DRM_PROC_PRINT(" 0x%08lx\n", i);
+ }
+ }
+ }
+
+ return len;
+}
+
+static int drm_vma_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+ int ret;
+
+ down(&dev->struct_sem);
+ ret = _drm_vma_info(buf, start, offset, len, eof, data);
+ up(&dev->struct_sem);
+ return ret;
+}
+#endif
+
+
+#if DRM_DMA_HISTOGRAM
+static int _drm_histo_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+ drm_device_dma_t *dma = dev->dma;
+ int i;
+ unsigned long slot_value = DRM_DMA_HISTOGRAM_INITIAL;
+ unsigned long prev_value = 0;
+ drm_buf_t *buffer;
+
+ if (offset > 0) return 0; /* no partial requests */
+ len = 0;
+ *eof = 1;
+
+ DRM_PROC_PRINT("general statistics:\n");
+ DRM_PROC_PRINT("total %10u\n", atomic_read(&dev->histo.total));
+ DRM_PROC_PRINT("open %10u\n", atomic_read(&dev->total_open));
+ DRM_PROC_PRINT("close %10u\n", atomic_read(&dev->total_close));
+ DRM_PROC_PRINT("ioctl %10u\n", atomic_read(&dev->total_ioctl));
+ DRM_PROC_PRINT("irq %10u\n", atomic_read(&dev->total_irq));
+ DRM_PROC_PRINT("ctx %10u\n", atomic_read(&dev->total_ctx));
+
+ DRM_PROC_PRINT("\nlock statistics:\n");
+ DRM_PROC_PRINT("locks %10u\n", atomic_read(&dev->total_locks));
+ DRM_PROC_PRINT("unlocks %10u\n", atomic_read(&dev->total_unlocks));
+ DRM_PROC_PRINT("contends %10u\n", atomic_read(&dev->total_contends));
+ DRM_PROC_PRINT("sleeps %10u\n", atomic_read(&dev->total_sleeps));
+
+
+ if (dma) {
+ DRM_PROC_PRINT("\ndma statistics:\n");
+ DRM_PROC_PRINT("prio %10u\n",
+ atomic_read(&dma->total_prio));
+ DRM_PROC_PRINT("bytes %10u\n",
+ atomic_read(&dma->total_bytes));
+ DRM_PROC_PRINT("dmas %10u\n",
+ atomic_read(&dma->total_dmas));
+ DRM_PROC_PRINT("missed:\n");
+ DRM_PROC_PRINT(" dma %10u\n",
+ atomic_read(&dma->total_missed_dma));
+ DRM_PROC_PRINT(" lock %10u\n",
+ atomic_read(&dma->total_missed_lock));
+ DRM_PROC_PRINT(" free %10u\n",
+ atomic_read(&dma->total_missed_free));
+ DRM_PROC_PRINT(" sched %10u\n",
+ atomic_read(&dma->total_missed_sched));
+ DRM_PROC_PRINT("tried %10u\n",
+ atomic_read(&dma->total_tried));
+ DRM_PROC_PRINT("hit %10u\n",
+ atomic_read(&dma->total_hit));
+ DRM_PROC_PRINT("lost %10u\n",
+ atomic_read(&dma->total_lost));
+
+ buffer = dma->next_buffer;
+ if (buffer) {
+ DRM_PROC_PRINT("next_buffer %7d\n", buffer->idx);
+ } else {
+ DRM_PROC_PRINT("next_buffer none\n");
+ }
+ buffer = dma->this_buffer;
+ if (buffer) {
+ DRM_PROC_PRINT("this_buffer %7d\n", buffer->idx);
+ } else {
+ DRM_PROC_PRINT("this_buffer none\n");
+ }
+ }
+
+
+ DRM_PROC_PRINT("\nvalues:\n");
+ if (dev->lock.hw_lock) {
+ DRM_PROC_PRINT("lock 0x%08x\n",
+ dev->lock.hw_lock->lock);
+ } else {
+ DRM_PROC_PRINT("lock none\n");
+ }
+ DRM_PROC_PRINT("context_flag 0x%08x\n", dev->context_flag);
+ DRM_PROC_PRINT("interrupt_flag 0x%08x\n", dev->interrupt_flag);
+ DRM_PROC_PRINT("dma_flag 0x%08x\n", dev->dma_flag);
+
+ DRM_PROC_PRINT("queue_count %10d\n", dev->queue_count);
+ DRM_PROC_PRINT("last_context %10d\n", dev->last_context);
+ DRM_PROC_PRINT("last_switch %10lu\n", dev->last_switch);
+ DRM_PROC_PRINT("last_checked %10d\n", dev->last_checked);
+
+
+ DRM_PROC_PRINT("\n q2d d2c c2f"
+ " q2c q2f dma sch"
+ " ctx lacq lhld\n\n");
+ for (i = 0; i < DRM_DMA_HISTOGRAM_SLOTS; i++) {
+ DRM_PROC_PRINT("%s %10lu %10u %10u %10u %10u %10u"
+ " %10u %10u %10u %10u %10u\n",
+ i == DRM_DMA_HISTOGRAM_SLOTS - 1 ? ">=" : "< ",
+ i == DRM_DMA_HISTOGRAM_SLOTS - 1
+ ? prev_value : slot_value ,
+
+ atomic_read(&dev->histo
+ .queued_to_dispatched[i]),
+ atomic_read(&dev->histo
+ .dispatched_to_completed[i]),
+ atomic_read(&dev->histo
+ .completed_to_freed[i]),
+
+ atomic_read(&dev->histo
+ .queued_to_completed[i]),
+ atomic_read(&dev->histo
+ .queued_to_freed[i]),
+ atomic_read(&dev->histo.dma[i]),
+ atomic_read(&dev->histo.schedule[i]),
+ atomic_read(&dev->histo.ctx[i]),
+ atomic_read(&dev->histo.lacq[i]),
+ atomic_read(&dev->histo.lhld[i]));
+ prev_value = slot_value;
+ slot_value = DRM_DMA_HISTOGRAM_NEXT(slot_value);
+ }
+ return len;
+}
+
+static int drm_histo_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+ int ret;
+
+ down(&dev->struct_sem);
+ ret = _drm_histo_info(buf, start, offset, len, eof, data);
+ up(&dev->struct_sem);
+ return ret;
+}
+#endif
diff --git a/linux/tdfx_drv.c b/linux/tdfx_drv.c
new file mode 100644
index 00000000..61c917eb
--- /dev/null
+++ b/linux/tdfx_drv.c
@@ -0,0 +1,654 @@
+/* tdfx.c -- tdfx driver -*- linux-c -*-
+ * Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com
+ * Revised: Tue Oct 12 08:51:35 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI$
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/tdfx_drv.c,v 1.2 2000/01/20 07:25:36 martin Exp $
+ *
+ */
+
+#define EXPORT_SYMTAB
+#include "drmP.h"
+#include "tdfx_drv.h"
+EXPORT_SYMBOL(tdfx_init);
+EXPORT_SYMBOL(tdfx_cleanup);
+
+#define TDFX_NAME "tdfx"
+#define TDFX_DESC "tdfx"
+#define TDFX_DATE "19991009"
+#define TDFX_MAJOR 0
+#define TDFX_MINOR 0
+#define TDFX_PATCHLEVEL 1
+
+static drm_device_t tdfx_device;
+drm_ctx_t tdfx_res_ctx;
+
+static struct file_operations tdfx_fops = {
+ open: tdfx_open,
+ flush: drm_flush,
+ release: tdfx_release,
+ ioctl: tdfx_ioctl,
+ mmap: drm_mmap,
+ read: drm_read,
+ fasync: drm_fasync,
+ poll: drm_poll,
+};
+
+static struct miscdevice tdfx_misc = {
+ minor: MISC_DYNAMIC_MINOR,
+ name: TDFX_NAME,
+ fops: &tdfx_fops,
+};
+
+static drm_ioctl_desc_t tdfx_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { tdfx_version, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { tdfx_addctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { tdfx_rmctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { tdfx_modctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { tdfx_getctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { tdfx_switchctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { tdfx_newctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { tdfx_resctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { tdfx_lock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { tdfx_unlock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
+};
+#define TDFX_IOCTL_COUNT DRM_ARRAY_SIZE(tdfx_ioctls)
+
+#ifdef MODULE
+int init_module(void);
+void cleanup_module(void);
+static char *tdfx = NULL;
+
+MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas.");
+MODULE_DESCRIPTION("tdfx");
+MODULE_PARM(tdfx, "s");
+
+/* init_module is called when insmod is used to load the module */
+
+int init_module(void)
+{
+ return tdfx_init();
+}
+
+/* cleanup_module is called when rmmod is used to unload the module */
+
+void cleanup_module(void)
+{
+ tdfx_cleanup();
+}
+#endif
+
+#ifndef MODULE
+/* tdfx_setup is called by the kernel to parse command-line options passed
+ * via the boot-loader (e.g., LILO). It calls the insmod option routine,
+ * drm_parse_drm.
+ *
+ * This is not currently supported, since it requires changes to
+ * linux/init/main.c. */
+
+
+void __init tdfx_setup(char *str, int *ints)
+{
+ if (ints[0] != 0) {
+ DRM_ERROR("Illegal command line format, ignored\n");
+ return;
+ }
+ drm_parse_options(str);
+}
+#endif
+
+static int tdfx_setup(drm_device_t *dev)
+{
+ int i;
+
+ atomic_set(&dev->ioctl_count, 0);
+ atomic_set(&dev->vma_count, 0);
+ dev->buf_use = 0;
+ atomic_set(&dev->buf_alloc, 0);
+
+ atomic_set(&dev->total_open, 0);
+ atomic_set(&dev->total_close, 0);
+ atomic_set(&dev->total_ioctl, 0);
+ atomic_set(&dev->total_irq, 0);
+ atomic_set(&dev->total_ctx, 0);
+ atomic_set(&dev->total_locks, 0);
+ atomic_set(&dev->total_unlocks, 0);
+ atomic_set(&dev->total_contends, 0);
+ atomic_set(&dev->total_sleeps, 0);
+
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ dev->magiclist[i].head = NULL;
+ dev->magiclist[i].tail = NULL;
+ }
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ dev->vmalist = NULL;
+ dev->lock.hw_lock = NULL;
+ init_waitqueue_head(&dev->lock.lock_queue);
+ dev->queue_count = 0;
+ dev->queue_reserved = 0;
+ dev->queue_slots = 0;
+ dev->queuelist = NULL;
+ dev->irq = 0;
+ dev->context_flag = 0;
+ dev->interrupt_flag = 0;
+ dev->dma = 0;
+ dev->dma_flag = 0;
+ dev->last_context = 0;
+ dev->last_switch = 0;
+ dev->last_checked = 0;
+ init_timer(&dev->timer);
+ init_waitqueue_head(&dev->context_wait);
+
+ dev->ctx_start = 0;
+ dev->lck_start = 0;
+
+ dev->buf_rp = dev->buf;
+ dev->buf_wp = dev->buf;
+ dev->buf_end = dev->buf + DRM_BSZ;
+ dev->buf_async = NULL;
+ init_waitqueue_head(&dev->buf_readers);
+ init_waitqueue_head(&dev->buf_writers);
+
+ tdfx_res_ctx.handle=-1;
+
+ DRM_DEBUG("\n");
+
+ /* The kernel's context could be created here, but is now created
+ in drm_dma_enqueue. This is more resource-efficient for
+ hardware that does not do DMA, but may mean that
+ drm_select_queue fails between the time the interrupt is
+ initialized and the time the queues are initialized. */
+
+ return 0;
+}
+
+
+static int tdfx_takedown(drm_device_t *dev)
+{
+ int i;
+ drm_magic_entry_t *pt, *next;
+ drm_map_t *map;
+ drm_vma_entry_t *vma, *vma_next;
+
+ DRM_DEBUG("\n");
+
+ down(&dev->struct_sem);
+ del_timer(&dev->timer);
+
+ if (dev->devname) {
+ drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
+ dev->devname = NULL;
+ }
+
+ if (dev->unique) {
+ drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
+ dev->unique = NULL;
+ dev->unique_len = 0;
+ }
+ /* Clear pid list */
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ for (pt = dev->magiclist[i].head; pt; pt = next) {
+ next = pt->next;
+ drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
+ }
+ dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
+ }
+
+ /* Clear vma list (only built for debugging) */
+ if (dev->vmalist) {
+ for (vma = dev->vmalist; vma; vma = vma_next) {
+ vma_next = vma->next;
+ drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
+ }
+ dev->vmalist = NULL;
+ }
+
+ /* Clear map area and mtrr information */
+ if (dev->maplist) {
+ for (i = 0; i < dev->map_count; i++) {
+ map = dev->maplist[i];
+ switch (map->type) {
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+#ifdef CONFIG_MTRR
+ if (map->mtrr >= 0) {
+ int retcode;
+ retcode = mtrr_del(map->mtrr,
+ map->offset,
+ map->size);
+ DRM_DEBUG("mtrr_del = %d\n", retcode);
+ }
+#endif
+ drm_ioremapfree(map->handle, map->size);
+ break;
+ case _DRM_SHM:
+ drm_free_pages((unsigned long)map->handle,
+ drm_order(map->size)
+ - PAGE_SHIFT,
+ DRM_MEM_SAREA);
+ break;
+ }
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ }
+ drm_free(dev->maplist,
+ dev->map_count * sizeof(*dev->maplist),
+ DRM_MEM_MAPS);
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ }
+
+ if (dev->lock.hw_lock) {
+ dev->lock.hw_lock = NULL; /* SHM removed */
+ dev->lock.pid = 0;
+ wake_up_interruptible(&dev->lock.lock_queue);
+ }
+ up(&dev->struct_sem);
+
+ return 0;
+}
+
+/* tdfx_init is called via init_module at module load time, or via
+ * linux/init/main.c (this is not currently supported). */
+
+int tdfx_init(void)
+{
+ int retcode;
+ drm_device_t *dev = &tdfx_device;
+
+ DRM_DEBUG("\n");
+
+ memset((void *)dev, 0, sizeof(*dev));
+ dev->count_lock = SPIN_LOCK_UNLOCKED;
+ sema_init(&dev->struct_sem, 1);
+
+#ifdef MODULE
+ drm_parse_options(tdfx);
+#endif
+
+ if ((retcode = misc_register(&tdfx_misc))) {
+ DRM_ERROR("Cannot register \"%s\"\n", TDFX_NAME);
+ return retcode;
+ }
+ dev->device = MKDEV(MISC_MAJOR, tdfx_misc.minor);
+ dev->name = TDFX_NAME;
+
+ drm_mem_init();
+ drm_proc_init(dev);
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ TDFX_NAME,
+ TDFX_MAJOR,
+ TDFX_MINOR,
+ TDFX_PATCHLEVEL,
+ TDFX_DATE,
+ tdfx_misc.minor);
+
+ return 0;
+}
+
+/* tdfx_cleanup is called via cleanup_module at module unload time. */
+
+void tdfx_cleanup(void)
+{
+ drm_device_t *dev = &tdfx_device;
+
+ DRM_DEBUG("\n");
+
+ drm_proc_cleanup();
+ if (misc_deregister(&tdfx_misc)) {
+ DRM_ERROR("Cannot unload module\n");
+ } else {
+ DRM_INFO("Module unloaded\n");
+ }
+ tdfx_takedown(dev);
+}
+
+int tdfx_version(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_version_t version;
+ int len;
+
+ copy_from_user_ret(&version,
+ (drm_version_t *)arg,
+ sizeof(version),
+ -EFAULT);
+
+#define DRM_COPY(name,value) \
+ len = strlen(value); \
+ if (len > name##_len) len = name##_len; \
+ name##_len = strlen(value); \
+ if (len && name) { \
+ copy_to_user_ret(name, value, len, -EFAULT); \
+ }
+
+ version.version_major = TDFX_MAJOR;
+ version.version_minor = TDFX_MINOR;
+ version.version_patchlevel = TDFX_PATCHLEVEL;
+
+ DRM_COPY(version.name, TDFX_NAME);
+ DRM_COPY(version.date, TDFX_DATE);
+ DRM_COPY(version.desc, TDFX_DESC);
+
+ copy_to_user_ret((drm_version_t *)arg,
+ &version,
+ sizeof(version),
+ -EFAULT);
+ return 0;
+}
+
+int tdfx_open(struct inode *inode, struct file *filp)
+{
+ drm_device_t *dev = &tdfx_device;
+ int retcode = 0;
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+ if (!(retcode = drm_open_helper(inode, filp, dev))) {
+ MOD_INC_USE_COUNT;
+ atomic_inc(&dev->total_open);
+ spin_lock(&dev->count_lock);
+ if (!dev->open_count++) {
+ spin_unlock(&dev->count_lock);
+ return tdfx_setup(dev);
+ }
+ spin_unlock(&dev->count_lock);
+ }
+ return retcode;
+}
+
+int tdfx_release(struct inode *inode, struct file *filp)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ int retcode = 0;
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+ if (!(retcode = drm_release(inode, filp))) {
+ MOD_DEC_USE_COUNT;
+ atomic_inc(&dev->total_close);
+ spin_lock(&dev->count_lock);
+ if (!--dev->open_count) {
+ if (atomic_read(&dev->ioctl_count) || dev->blocked) {
+ DRM_ERROR("Device busy: %d %d\n",
+ atomic_read(&dev->ioctl_count),
+ dev->blocked);
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ spin_unlock(&dev->count_lock);
+ return tdfx_takedown(dev);
+ }
+ spin_unlock(&dev->count_lock);
+ }
+ return retcode;
+}
+
+/* tdfx_ioctl is called whenever a process performs an ioctl on /dev/drm. */
+
+int tdfx_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int nr = DRM_IOCTL_NR(cmd);
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ int retcode = 0;
+ drm_ioctl_desc_t *ioctl;
+ drm_ioctl_t *func;
+
+ atomic_inc(&dev->ioctl_count);
+ atomic_inc(&dev->total_ioctl);
+ ++priv->ioctl_count;
+
+ DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
+ current->pid, cmd, nr, dev->device, priv->authenticated);
+
+ if (nr >= TDFX_IOCTL_COUNT) {
+ retcode = -EINVAL;
+ } else {
+ ioctl = &tdfx_ioctls[nr];
+ func = ioctl->func;
+
+ if (!func) {
+ DRM_DEBUG("no function\n");
+ retcode = -EINVAL;
+ } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
+ || (ioctl->auth_needed && !priv->authenticated)) {
+ retcode = -EACCES;
+ } else {
+ retcode = (func)(inode, filp, cmd, arg);
+ }
+ }
+
+ atomic_dec(&dev->ioctl_count);
+ return retcode;
+}
+
+int tdfx_lock(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ DECLARE_WAITQUEUE(entry, current);
+ int ret = 0;
+ drm_lock_t lock;
+#if DRM_DMA_HISTOGRAM
+ cycles_t start;
+
+ dev->lck_start = start = get_cycles();
+#endif
+
+ copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
+
+ if (lock.context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ current->pid, lock.context);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
+ lock.context, current->pid, dev->lock.hw_lock->lock,
+ lock.flags);
+
+#if 0
+ /* dev->queue_count == 0 right now for
+ tdfx. FIXME? */
+ if (lock.context < 0 || lock.context >= dev->queue_count)
+ return -EINVAL;
+#endif
+
+ if (!ret) {
+#if 0
+ if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
+ != lock.context) {
+ long j = jiffies - dev->lock.lock_time;
+
+ if (lock.context == tdfx_res_ctx.handle &&
+ j >= 0 && j < DRM_LOCK_SLICE) {
+ /* Can't take lock if we just had it and
+ there is contention. */
+ DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d jiffies=%d\n",
+ lock.context, current->pid, j,
+ dev->lock.lock_time, jiffies);
+ current->state = TASK_INTERRUPTIBLE;
+ current->policy |= SCHED_YIELD;
+ schedule_timeout(DRM_LOCK_SLICE-j);
+ DRM_DEBUG("jiffies=%d\n", jiffies);
+ }
+ }
+#endif
+ add_wait_queue(&dev->lock.lock_queue, &entry);
+ for (;;) {
+ if (!dev->lock.hw_lock) {
+ /* Device has been unregistered */
+ ret = -EINTR;
+ break;
+ }
+ if (drm_lock_take(&dev->lock.hw_lock->lock,
+ lock.context)) {
+ dev->lock.pid = current->pid;
+ dev->lock.lock_time = jiffies;
+ atomic_inc(&dev->total_locks);
+ break; /* Got lock */
+ }
+
+ /* Contention */
+ atomic_inc(&dev->total_sleeps);
+ current->state = TASK_INTERRUPTIBLE;
+#if 1
+ current->policy |= SCHED_YIELD;
+#endif
+ schedule();
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&dev->lock.lock_queue, &entry);
+ }
+
+#if 0
+ if (!ret && dev->last_context != lock.context &&
+ lock.context != tdfx_res_ctx.handle &&
+ dev->last_context != tdfx_res_ctx.handle) {
+ add_wait_queue(&dev->context_wait, &entry);
+ current->state = TASK_INTERRUPTIBLE;
+ /* PRE: dev->last_context != lock.context */
+ tdfx_context_switch(dev, dev->last_context, lock.context);
+ /* POST: we will wait for the context
+ switch and will dispatch on a later call
+ when dev->last_context == lock.context
+ NOTE WE HOLD THE LOCK THROUGHOUT THIS
+ TIME! */
+ current->policy |= SCHED_YIELD;
+ schedule();
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&dev->context_wait, &entry);
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ } else if (dev->last_context != lock.context) {
+ DRM_ERROR("Context mismatch: %d %d\n",
+ dev->last_context, lock.context);
+ }
+ }
+#endif
+
+ if (!ret) {
+ if (lock.flags & _DRM_LOCK_READY) {
+ /* Wait for space in DMA/FIFO */
+ }
+ if (lock.flags & _DRM_LOCK_QUIESCENT) {
+ /* Make hardware quiescent */
+#if 0
+ tdfx_quiescent(dev);
+#endif
+ }
+ }
+
+#if 0
+ DRM_ERROR("pid = %5d, old counter = %5ld\n",
+ current->pid, current->counter);
+#endif
+ if (lock.context != tdfx_res_ctx.handle) {
+ current->counter = 5;
+ current->priority = DEF_PRIORITY/4;
+ }
+#if 0
+ while (current->counter > 25)
+ current->counter >>= 1; /* decrease time slice */
+ DRM_ERROR("pid = %5d, new counter = %5ld\n",
+ current->pid, current->counter);
+#endif
+ DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
+
+#if DRM_DMA_HISTOGRAM
+ atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]);
+#endif
+
+ return ret;
+}
+
+
+int tdfx_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_lock_t lock;
+
+ copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
+
+ if (lock.context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ current->pid, lock.context);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("%d frees lock (%d holds)\n",
+ lock.context,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+ atomic_inc(&dev->total_unlocks);
+ if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
+ atomic_inc(&dev->total_contends);
+ drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
+ /* FIXME: Try to send data to card here */
+ if (!dev->context_flag) {
+ if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ DRM_ERROR("\n");
+ }
+ }
+
+#if 0
+ current->policy |= SCHED_YIELD;
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(1000);
+#endif
+
+ if (lock.context != tdfx_res_ctx.handle) {
+ current->counter = 5;
+ current->priority = DEF_PRIORITY;
+ }
+#if 0
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(10);
+#endif
+
+ return 0;
+}
diff --git a/linux/vm.c b/linux/vm.c
new file mode 100644
index 00000000..5c272eb6
--- /dev/null
+++ b/linux/vm.c
@@ -0,0 +1,301 @@
+/* vm.c -- Memory mapping for DRM -*- linux-c -*-
+ * Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
+ * Revised: Mon Feb 14 00:16:45 2000 by kevin@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/vm.c,v 1.7 1999/08/21 02:48:34 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/vm.c,v 1.4 2000/02/14 06:27:28 martin Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+struct vm_operations_struct drm_vm_ops = {
+ nopage: drm_vm_nopage,
+ open: drm_vm_open,
+ close: drm_vm_close,
+};
+
+struct vm_operations_struct drm_vm_shm_ops = {
+ nopage: drm_vm_shm_nopage,
+ open: drm_vm_open,
+ close: drm_vm_close,
+};
+
+struct vm_operations_struct drm_vm_dma_ops = {
+ nopage: drm_vm_dma_nopage,
+ open: drm_vm_open,
+ close: drm_vm_close,
+};
+
+#if LINUX_VERSION_CODE < 0x020317
+unsigned long drm_vm_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int write_access)
+#else
+ /* Return type changed in 2.3.23 */
+struct page *drm_vm_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int write_access)
+#endif
+{
+ DRM_DEBUG("0x%08lx, %d\n", address, write_access);
+
+ return NOPAGE_SIGBUS; /* Disallow mremap */
+}
+
+#if LINUX_VERSION_CODE < 0x020317
+unsigned long drm_vm_shm_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int write_access)
+#else
+ /* Return type changed in 2.3.23 */
+struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int write_access)
+#endif
+{
+ drm_file_t *priv = vma->vm_file->private_data;
+ drm_device_t *dev = priv->dev;
+ unsigned long physical;
+ unsigned long offset;
+ unsigned long page;
+
+ if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
+ if (!dev->lock.hw_lock) return NOPAGE_OOM; /* Nothing allocated */
+
+ offset = address - vma->vm_start;
+ page = offset >> PAGE_SHIFT;
+ physical = (unsigned long)dev->lock.hw_lock + (offset & (~PAGE_MASK));
+ atomic_inc(&mem_map[MAP_NR(physical)].count); /* Dec. by kernel */
+
+ DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical);
+#if LINUX_VERSION_CODE < 0x020317
+ return physical;
+#else
+ return mem_map + MAP_NR(physical);
+#endif
+}
+
+#if LINUX_VERSION_CODE < 0x020317
+unsigned long drm_vm_dma_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int write_access)
+#else
+ /* Return type changed in 2.3.23 */
+struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int write_access)
+#endif
+{
+ drm_file_t *priv = vma->vm_file->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ unsigned long physical;
+ unsigned long offset;
+ unsigned long page;
+
+ if (!dma) return NOPAGE_SIGBUS; /* Error */
+ if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
+ if (!dma->pagelist) return NOPAGE_OOM ; /* Nothing allocated */
+
+ offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
+ page = offset >> PAGE_SHIFT;
+ physical = dma->pagelist[page] + (offset & (~PAGE_MASK));
+ atomic_inc(&mem_map[MAP_NR(physical)].count); /* Dec. by kernel */
+
+ DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical);
+#if LINUX_VERSION_CODE < 0x020317
+ return physical;
+#else
+ return mem_map + MAP_NR(physical);
+#endif
+}
+
+void drm_vm_open(struct vm_area_struct *vma)
+{
+ drm_file_t *priv = vma->vm_file->private_data;
+ drm_device_t *dev = priv->dev;
+#if DRM_DEBUG_CODE
+ drm_vma_entry_t *vma_entry;
+#endif
+
+ DRM_DEBUG("0x%08lx,0x%08lx\n",
+ vma->vm_start, vma->vm_end - vma->vm_start);
+ atomic_inc(&dev->vma_count);
+ MOD_INC_USE_COUNT;
+
+#if DRM_DEBUG_CODE
+ vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
+ if (vma_entry) {
+ down(&dev->struct_sem);
+ vma_entry->vma = vma;
+ vma_entry->next = dev->vmalist;
+ vma_entry->pid = current->pid;
+ dev->vmalist = vma_entry;
+ up(&dev->struct_sem);
+ }
+#endif
+}
+
+void drm_vm_close(struct vm_area_struct *vma)
+{
+ drm_file_t *priv = vma->vm_file->private_data;
+ drm_device_t *dev = priv->dev;
+#if DRM_DEBUG_CODE
+ drm_vma_entry_t *pt, *prev;
+#endif
+
+ DRM_DEBUG("0x%08lx,0x%08lx\n",
+ vma->vm_start, vma->vm_end - vma->vm_start);
+ MOD_DEC_USE_COUNT;
+ atomic_dec(&dev->vma_count);
+
+#if DRM_DEBUG_CODE
+ down(&dev->struct_sem);
+ for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
+ if (pt->vma == vma) {
+ if (prev) {
+ prev->next = pt->next;
+ } else {
+ dev->vmalist = pt->next;
+ }
+ drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
+ break;
+ }
+ }
+ up(&dev->struct_sem);
+#endif
+}
+
+int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ unsigned long length = vma->vm_end - vma->vm_start;
+
+ DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
+ vma->vm_start, vma->vm_end, VM_OFFSET(vma));
+
+ /* Length must match exact page count */
+ if ((length >> PAGE_SHIFT) != dma->page_count) return -EINVAL;
+
+ vma->vm_ops = &drm_vm_dma_ops;
+ vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
+
+#if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
+ /* In Linux 2.2.3 and above, this is
+ handled in do_mmap() in mm/mmap.c. */
+ ++filp->f_count;
+#endif
+ vma->vm_file = filp; /* Needed for drm_vm_open() */
+ drm_vm_open(vma);
+ return 0;
+}
+
+int drm_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_map_t *map = NULL;
+ int i;
+
+ DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
+ vma->vm_start, vma->vm_end, VM_OFFSET(vma));
+
+ if (!VM_OFFSET(vma)) return drm_mmap_dma(filp, vma);
+
+ /* A sequential search of a linked list is
+ fine here because: 1) there will only be
+ about 5-10 entries in the list and, 2) a
+ DRI client only has to do this mapping
+ once, so it doesn't have to be optimized
+ for performance, even if the list was a
+ bit longer. */
+ for (i = 0; i < dev->map_count; i++) {
+ map = dev->maplist[i];
+ if (map->offset == VM_OFFSET(vma)) break;
+ }
+
+ if (i >= dev->map_count) return -EINVAL;
+ if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
+ return -EPERM;
+
+ /* Check for valid size. */
+ if (map->size != vma->vm_end - vma->vm_start) return -EINVAL;
+
+
+ switch (map->type) {
+ case _DRM_FRAME_BUFFER:
+ case _DRM_REGISTERS:
+ if (VM_OFFSET(vma) >= __pa(high_memory)) {
+#if defined(__i386__)
+ if (boot_cpu_data.x86 > 3) {
+ pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
+ pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
+ }
+#endif
+ vma->vm_flags |= VM_IO; /* not in core dump */
+ }
+ if (remap_page_range(vma->vm_start,
+ VM_OFFSET(vma),
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot))
+ return -EAGAIN;
+ vma->vm_ops = &drm_vm_ops;
+ break;
+ case _DRM_SHM:
+ vma->vm_ops = &drm_vm_shm_ops;
+ /* Don't let this area swap. Change when
+ DRM_KERNEL advisory is supported. */
+ vma->vm_flags |= VM_LOCKED;
+ break;
+ default:
+ return -EINVAL; /* This should never happen. */
+ }
+ vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
+ if (map->flags & _DRM_READ_ONLY) {
+#if defined(__i386__)
+ pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
+#else
+ /* Ye gads this is ugly. With more thought
+ we could move this up higher and use
+ `protection_map' instead. */
+ vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
+ __pte(pgprot_val(vma->vm_page_prot)))));
+#endif
+ }
+
+
+#if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
+ /* In Linux 2.2.3 and above, this is
+ handled in do_mmap() in mm/mmap.c. */
+ ++filp->f_count;
+#endif
+ vma->vm_file = filp; /* Needed for drm_vm_open() */
+ drm_vm_open(vma);
+ return 0;
+}