summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrian Paul <brian.paul@tungstengraphics.com>2000-06-07 16:06:56 +0000
committerBrian Paul <brian.paul@tungstengraphics.com>2000-06-07 16:06:56 +0000
commit8bbccd2c72f3c1f36cb6eba4ae58aa7d1fbe3991 (patch)
treefc13c9a9ddbed0fff7a99a8f5b5369519766de44
parent127fd6583ff6ca60879bd79bc67dc379eedb6c8b (diff)
merge from trunkglxmisc-3-0-0-20000607
-rw-r--r--bsd-core/Makefile5
-rw-r--r--bsd-core/drmP.h708
-rw-r--r--bsd-core/tdfx/Makefile15
-rw-r--r--bsd/Imakefile31
-rw-r--r--bsd/Makefile5
-rw-r--r--bsd/Makefile.bsd5
-rw-r--r--bsd/drm.h359
-rw-r--r--bsd/drm/Makefile17
-rw-r--r--bsd/drm/agpsupport.c271
-rw-r--r--bsd/drm/auth.c168
-rw-r--r--bsd/drm/bufs.c500
-rw-r--r--bsd/drm/context.c297
-rw-r--r--bsd/drm/dma.c534
-rw-r--r--bsd/drm/drawable.c50
-rw-r--r--bsd/drm/drmstat.c418
-rw-r--r--bsd/drm/fops.c260
-rw-r--r--bsd/drm/init.c101
-rw-r--r--bsd/drm/ioctl.c120
-rw-r--r--bsd/drm/lists.c258
-rw-r--r--bsd/drm/lock.c220
-rw-r--r--bsd/drm/memory.c458
-rw-r--r--bsd/drm/proc.c568
-rw-r--r--bsd/drm/sysctl.c554
-rw-r--r--bsd/drm/vm.c104
-rw-r--r--bsd/drmP.h708
-rw-r--r--bsd/gamma/Makefile15
-rw-r--r--bsd/gamma/gamma_dma.c802
-rw-r--r--bsd/gamma/gamma_drv.c574
-rw-r--r--bsd/gamma/gamma_drv.h50
-rw-r--r--bsd/i810_drm.h188
-rw-r--r--bsd/mga_drm.h269
-rw-r--r--bsd/r128_drm.h111
-rw-r--r--bsd/tdfx/Makefile15
-rw-r--r--bsd/tdfx/tdfx_context.c201
-rw-r--r--bsd/tdfx/tdfx_drv.c694
-rw-r--r--bsd/tdfx/tdfx_drv.h47
-rw-r--r--linux-core/Makefile.kernel2
-rw-r--r--linux-core/drmP.h11
-rw-r--r--linux-core/i810_dma.c717
-rw-r--r--linux-core/i810_drm.h129
-rw-r--r--linux-core/i810_drv.c8
-rw-r--r--linux-core/i810_drv.h66
-rw-r--r--linux-core/mga_drv.c1
-rw-r--r--linux/Makefile.kernel2
-rw-r--r--linux/Makefile.linux9
-rw-r--r--linux/agpsupport.c2
-rw-r--r--linux/drm.h24
-rw-r--r--linux/drmP.h11
-rw-r--r--linux/gamma_dma.c30
-rw-r--r--linux/gamma_drv.c39
-rw-r--r--linux/gamma_drv.h2
-rw-r--r--linux/i810_bufs.c111
-rw-r--r--linux/i810_dma.c717
-rw-r--r--linux/i810_drm.h129
-rw-r--r--linux/i810_drv.c8
-rw-r--r--linux/i810_drv.h66
-rw-r--r--linux/mga_dma.c413
-rw-r--r--linux/mga_drm.h54
-rw-r--r--linux/mga_drv.c1
-rw-r--r--linux/mga_drv.h142
-rw-r--r--linux/mga_state.c634
-rw-r--r--linux/proc.c2
-rw-r--r--linux/vm.c28
-rw-r--r--shared-core/drm.h24
-rw-r--r--shared/drm.h24
65 files changed, 11983 insertions, 1123 deletions
diff --git a/bsd-core/Makefile b/bsd-core/Makefile
new file mode 100644
index 00000000..ff26c762
--- /dev/null
+++ b/bsd-core/Makefile
@@ -0,0 +1,5 @@
+# $FreeBSD$
+
+SUBDIR = drm tdfx gamma
+
+.include <bsd.subdir.mk>
diff --git a/bsd-core/drmP.h b/bsd-core/drmP.h
new file mode 100644
index 00000000..863836a6
--- /dev/null
+++ b/bsd-core/drmP.h
@@ -0,0 +1,708 @@
+/* drmP.h -- Private header for Direct Rendering Manager -*- c -*-
+ * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
+ * Revised: Tue Oct 12 08:51:07 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmP.h,v 1.58 1999/08/30 13:05:00 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmP.h,v 1.1 1999/09/25 14:37:59 dawes Exp $
+ *
+ */
+
+#ifndef _DRM_P_H_
+#define _DRM_P_H_
+
+#ifdef _KERNEL
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/stat.h>
+#include <sys/proc.h>
+#include <sys/lock.h>
+#include <sys/fcntl.h>
+#include <sys/uio.h>
+#include <sys/filio.h>
+#include <sys/sysctl.h>
+#include <sys/select.h>
+#include <sys/bus.h>
+#include <sys/taskqueue.h>
+
+#ifdef DRM_AGP
+#include <pci/agpvar.h>
+#endif
+
+#include "drm.h"
+
+typedef u_int32_t atomic_t;
+typedef u_int32_t cycles_t;
+typedef u_int32_t spinlock_t;
+#define atomic_set(p, v) (*(p) = (v))
+#define atomic_read(p) (*(p))
+#define atomic_inc(p) atomic_add_int(p, 1)
+#define atomic_dec(p) atomic_subtract_int(p, 1)
+#define atomic_add(n, p) atomic_add_int(p, n)
+#define atomic_sub(n, p) atomic_subtract_int(p, n)
+
+/* Fake this */
+static __inline u_int32_t
+test_and_set_bit(int b, volatile u_int32_t *p)
+{
+ u_int32_t m = 1<<b;
+ u_int32_t r = *p & m;
+ *p |= m;
+ return r;
+}
+
+static __inline void
+clear_bit(int b, volatile u_int32_t *p)
+{
+ atomic_clear_int(p + (b >> 5), 1 << (b & 0x1f));
+}
+
+static __inline void
+set_bit(int b, volatile u_int32_t *p)
+{
+ atomic_set_int(p + (b >> 5), 1 << (b & 0x1f));
+}
+
+static __inline int
+test_bit(int b, volatile u_int32_t *p)
+{
+ return p[b >> 5] & (1 << (b & 0x1f));
+}
+
+static __inline int
+find_first_zero_bit(volatile u_int32_t *p, int max)
+{
+ int b;
+
+ for (b = 0; b < max; b += 32) {
+ if (p[b >> 5]) {
+ for (;;) {
+ if (p[b >> 5] & (1 << (b & 0x1f)))
+ return b;
+ b++;
+ }
+ }
+ }
+ return max;
+}
+
+#define spldrm() spltty()
+
+#define memset(p, v, s) bzero(p, s)
+
+/*
+ * Software interrupts for DMA pipe feeding. The FreeBSD kernel apis
+ * are severely lacking here.
+ */
+#define SWI_DRI (SWI_VM+2)
+
+#define DRM_DEBUG_CODE 2 /* Include debugging code (if > 1, then
+ also include looping detection. */
+#define DRM_DMA_HISTOGRAM 1 /* Make histogram of DMA latency. */
+
+#define DRM_HASH_SIZE 16 /* Size of key hash table */
+#define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */
+#define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */
+#define DRM_LOOPING_LIMIT 5000000
+#define DRM_BSZ 1024 /* Buffer size for /dev/drm? output */
+#define DRM_TIME_SLICE (hz/20) /* Time slice for GLXContexts */
+#define DRM_LOCK_SLICE 1 /* Time slice for lock, in jiffies */
+
+#define DRM_FLAG_DEBUG 0x01
+#define DRM_FLAG_NOCTX 0x02
+
+#define DRM_MEM_DMA 0
+#define DRM_MEM_SAREA 1
+#define DRM_MEM_DRIVER 2
+#define DRM_MEM_MAGIC 3
+#define DRM_MEM_IOCTLS 4
+#define DRM_MEM_MAPS 5
+#define DRM_MEM_VMAS 6
+#define DRM_MEM_BUFS 7
+#define DRM_MEM_SEGS 8
+#define DRM_MEM_PAGES 9
+#define DRM_MEM_FILES 10
+#define DRM_MEM_QUEUES 11
+#define DRM_MEM_CMDS 12
+#define DRM_MEM_MAPPINGS 13
+#define DRM_MEM_BUFLISTS 14
+#define DRM_MEM_AGPLISTS 15
+#define DRM_MEM_TOTALAGP 16
+#define DRM_MEM_BOUNDAGP 17
+#define DRM_MEM_CTXBITMAP 18
+
+#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
+
+ /* Backward compatibility section */
+#ifndef _PAGE_PWT
+ /* The name of _PAGE_WT was changed to
+ _PAGE_PWT in Linux 2.2.6 */
+#define _PAGE_PWT _PAGE_WT
+#endif
+
+#define __drm_dummy_lock(lock) (*(__volatile__ unsigned int *)lock)
+#define _DRM_CAS(lock,old,new,__ret) \
+ do { \
+ int __dummy; /* Can't mark eax as clobbered */ \
+ __asm__ __volatile__( \
+ "lock ; cmpxchg %4,%1\n\t" \
+ "setnz %0" \
+ : "=d" (__ret), \
+ "=m" (__drm_dummy_lock(lock)), \
+ "=a" (__dummy) \
+ : "2" (old), \
+ "r" (new)); \
+ } while (0)
+
+
+
+ /* Macros to make printk easier */
+#define DRM_ERROR(fmt, arg...) \
+ printf("error: " "[" DRM_NAME ":" __FUNCTION__ "] *ERROR* " fmt , ##arg)
+#define DRM_MEM_ERROR(area, fmt, arg...) \
+ printf("error: " "[" DRM_NAME ":" __FUNCTION__ ":%s] *ERROR* " fmt , \
+ drm_mem_stats[area].name , ##arg)
+#define DRM_INFO(fmt, arg...) printf("info: " "[" DRM_NAME "] " fmt , ##arg)
+
+#if DRM_DEBUG_CODE
+#define DRM_DEBUG(fmt, arg...) \
+ do { \
+ if (drm_flags&DRM_FLAG_DEBUG) \
+ printf("[" DRM_NAME ":" __FUNCTION__ "] " fmt , \
+ ##arg); \
+ } while (0)
+#else
+#define DRM_DEBUG(fmt, arg...) do { } while (0)
+#endif
+
+#define DRM_PROC_LIMIT (PAGE_SIZE-80)
+
+#define DRM_SYSCTL_PRINT(fmt, arg...) \
+ snprintf(buf, sizeof(buf), fmt, ##arg); \
+ error = SYSCTL_OUT(req, buf, strlen(buf)); \
+ if (error) return error;
+
+#define DRM_SYSCTL_PRINT_RET(ret, fmt, arg...) \
+ snprintf(buf, sizeof(buf), fmt, ##arg); \
+ error = SYSCTL_OUT(req, buf, strlen(buf)); \
+ if (error) { ret; return error; }
+
+ /* Internal types and structures */
+#define DRM_ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
+#define DRM_MIN(a,b) ((a)<(b)?(a):(b))
+#define DRM_MAX(a,b) ((a)>(b)?(a):(b))
+
+#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
+#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
+#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
+
+typedef struct drm_ioctl_desc {
+ d_ioctl_t *func;
+ int auth_needed;
+ int root_only;
+} drm_ioctl_desc_t;
+
+typedef struct drm_devstate {
+ pid_t owner; /* X server pid holding x_lock */
+
+} drm_devstate_t;
+
+typedef struct drm_magic_entry {
+ drm_magic_t magic;
+ struct drm_file *priv;
+ struct drm_magic_entry *next;
+} drm_magic_entry_t;
+
+typedef struct drm_magic_head {
+ struct drm_magic_entry *head;
+ struct drm_magic_entry *tail;
+} drm_magic_head_t;
+
+typedef struct drm_vma_entry {
+ struct vm_area_struct *vma;
+ struct drm_vma_entry *next;
+ pid_t pid;
+} drm_vma_entry_t;
+
+typedef struct drm_buf {
+ int idx; /* Index into master buflist */
+ int total; /* Buffer size */
+ int order; /* log-base-2(total) */
+ int used; /* Amount of buffer in use (for DMA) */
+ unsigned long offset; /* Byte offset (used internally) */
+ void *address; /* Address of buffer */
+ unsigned long bus_address; /* Bus address of buffer */
+ struct drm_buf *next; /* Kernel-only: used for free list */
+ __volatile__ int waiting; /* On kernel DMA queue */
+ __volatile__ int pending; /* On hardware DMA queue */
+ int dma_wait; /* Processes waiting */
+ pid_t pid; /* PID of holding process */
+ int context; /* Kernel queue for this buffer */
+ int while_locked;/* Dispatch this buffer while locked */
+ enum {
+ DRM_LIST_NONE = 0,
+ DRM_LIST_FREE = 1,
+ DRM_LIST_WAIT = 2,
+ DRM_LIST_PEND = 3,
+ DRM_LIST_PRIO = 4,
+ DRM_LIST_RECLAIM = 5
+ } list; /* Which list we're on */
+
+ void *dev_private;
+ int dev_priv_size;
+
+#if DRM_DMA_HISTOGRAM
+ struct timespec time_queued; /* Queued to kernel DMA queue */
+ struct timespec time_dispatched; /* Dispatched to hardware */
+ struct timespec time_completed; /* Completed by hardware */
+ struct timespec time_freed; /* Back on freelist */
+#endif
+} drm_buf_t;
+
+#if DRM_DMA_HISTOGRAM
+#define DRM_DMA_HISTOGRAM_SLOTS 9
+#define DRM_DMA_HISTOGRAM_INITIAL 10
+#define DRM_DMA_HISTOGRAM_NEXT(current) ((current)*10)
+typedef struct drm_histogram {
+ atomic_t total;
+
+ atomic_t queued_to_dispatched[DRM_DMA_HISTOGRAM_SLOTS];
+ atomic_t dispatched_to_completed[DRM_DMA_HISTOGRAM_SLOTS];
+ atomic_t completed_to_freed[DRM_DMA_HISTOGRAM_SLOTS];
+
+ atomic_t queued_to_completed[DRM_DMA_HISTOGRAM_SLOTS];
+ atomic_t queued_to_freed[DRM_DMA_HISTOGRAM_SLOTS];
+
+ atomic_t dma[DRM_DMA_HISTOGRAM_SLOTS];
+ atomic_t schedule[DRM_DMA_HISTOGRAM_SLOTS];
+ atomic_t ctx[DRM_DMA_HISTOGRAM_SLOTS];
+ atomic_t lacq[DRM_DMA_HISTOGRAM_SLOTS];
+ atomic_t lhld[DRM_DMA_HISTOGRAM_SLOTS];
+} drm_histogram_t;
+#endif
+
+ /* bufs is one longer than it has to be */
+typedef struct drm_waitlist {
+ int count; /* Number of possible buffers */
+ drm_buf_t **bufs; /* List of pointers to buffers */
+ drm_buf_t **rp; /* Read pointer */
+ drm_buf_t **wp; /* Write pointer */
+ drm_buf_t **end; /* End pointer */
+ spinlock_t read_lock;
+ spinlock_t write_lock;
+} drm_waitlist_t;
+
+typedef struct drm_freelist {
+ int initialized; /* Freelist in use */
+ atomic_t count; /* Number of free buffers */
+ drm_buf_t *next; /* End pointer */
+
+ int waiting; /* Processes waiting on free bufs */
+ int low_mark; /* Low water mark */
+ int high_mark; /* High water mark */
+ atomic_t wfh; /* If waiting for high mark */
+} drm_freelist_t;
+
+typedef struct drm_buf_entry {
+ int buf_size;
+ int buf_count;
+ drm_buf_t *buflist;
+ int seg_count;
+ int page_order;
+ unsigned long *seglist;
+
+ drm_freelist_t freelist;
+} drm_buf_entry_t;
+
+typedef struct drm_hw_lock {
+ __volatile__ unsigned int lock;
+ char padding[60]; /* Pad to cache line */
+} drm_hw_lock_t;
+
+typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t;
+typedef struct drm_file {
+ TAILQ_ENTRY(drm_file) link;
+ int authenticated;
+ int minor;
+ pid_t pid;
+ uid_t uid;
+ int refs;
+ drm_magic_t magic;
+ unsigned long ioctl_count;
+ struct drm_device *devXX;
+} drm_file_t;
+
+
+typedef struct drm_queue {
+ atomic_t use_count; /* Outstanding uses (+1) */
+ atomic_t finalization; /* Finalization in progress */
+ atomic_t block_count; /* Count of processes waiting */
+ atomic_t block_read; /* Queue blocked for reads */
+ int read_queue; /* Processes waiting on block_read */
+ atomic_t block_write; /* Queue blocked for writes */
+ int write_queue; /* Processes waiting on block_write */
+ atomic_t total_queued; /* Total queued statistic */
+ atomic_t total_flushed;/* Total flushes statistic */
+ atomic_t total_locks; /* Total locks statistics */
+ drm_ctx_flags_t flags; /* Context preserving and 2D-only */
+ drm_waitlist_t waitlist; /* Pending buffers */
+ int flush_queue; /* Processes waiting until flush */
+} drm_queue_t;
+
+typedef struct drm_lock_data {
+ drm_hw_lock_t *hw_lock; /* Hardware lock */
+ pid_t pid; /* PID of lock holder (0=kernel) */
+ int lock_queue; /* Queue of blocked processes */
+ unsigned long lock_time; /* Time of last lock in jiffies */
+} drm_lock_data_t;
+
+typedef struct drm_device_dma {
+ /* Performance Counters */
+ atomic_t total_prio; /* Total DRM_DMA_PRIORITY */
+ atomic_t total_bytes; /* Total bytes DMA'd */
+ atomic_t total_dmas; /* Total DMA buffers dispatched */
+
+ atomic_t total_missed_dma; /* Missed drm_do_dma */
+ atomic_t total_missed_lock; /* Missed lock in drm_do_dma */
+ atomic_t total_missed_free; /* Missed drm_free_this_buffer */
+ atomic_t total_missed_sched;/* Missed drm_dma_schedule */
+
+ atomic_t total_tried; /* Tried next_buffer */
+ atomic_t total_hit; /* Sent next_buffer */
+ atomic_t total_lost; /* Lost interrupt */
+
+ drm_buf_entry_t bufs[DRM_MAX_ORDER+1];
+ int buf_count;
+ drm_buf_t **buflist; /* Vector of pointers info bufs */
+ int seg_count;
+ int page_count;
+ vm_offset_t *pagelist;
+ unsigned long byte_count;
+ enum {
+ _DRM_DMA_USE_AGP = 0x01
+ } flags;
+
+ /* DMA support */
+ drm_buf_t *this_buffer; /* Buffer being sent */
+ drm_buf_t *next_buffer; /* Selected buffer to send */
+ drm_queue_t *next_queue; /* Queue from which buffer selected*/
+ int waiting; /* Processes waiting on free bufs */
+} drm_device_dma_t;
+
+#ifdef DRM_AGP
+
+typedef struct drm_agp_mem {
+ void *handle;
+ unsigned long bound; /* address */
+ int pages;
+ struct drm_agp_mem *prev;
+ struct drm_agp_mem *next;
+} drm_agp_mem_t;
+
+typedef struct drm_agp_head {
+ device_t agpdev;
+ struct agp_info info;
+ const char *chipset;
+ drm_agp_mem_t *memory;
+ unsigned long mode;
+ int enabled;
+ int acquired;
+ unsigned long base;
+ int agp_mtrr;
+} drm_agp_head_t;
+
+#endif
+
+typedef struct drm_device {
+ const char *name; /* Simple driver name */
+ char *unique; /* Unique identifier: e.g., busid */
+ int unique_len; /* Length of unique field */
+ device_t device; /* Device instance from newbus */
+ dev_t devnode; /* Device number for mknod */
+ char *devname; /* For /proc/interrupts */
+
+ int blocked; /* Blocked due to VC switch? */
+ int flags; /* Flags to open(2) */
+ int writable; /* Opened with FWRITE */
+ struct proc_dir_entry *root; /* Root for this device's entries */
+
+ /* Locks */
+ struct simplelock count_lock; /* For inuse, open_count, buf_use */
+ struct lock dev_lock; /* For others */
+
+ /* Usage Counters */
+ int open_count; /* Outstanding files open */
+ atomic_t ioctl_count; /* Outstanding IOCTLs pending */
+ atomic_t vma_count; /* Outstanding vma areas open */
+ int buf_use; /* Buffers in use -- cannot alloc */
+ atomic_t buf_alloc; /* Buffer allocation in progress */
+
+ /* Performance Counters */
+ atomic_t total_open;
+ atomic_t total_close;
+ atomic_t total_ioctl;
+ atomic_t total_irq; /* Total interruptions */
+ atomic_t total_ctx; /* Total context switches */
+
+ atomic_t total_locks;
+ atomic_t total_unlocks;
+ atomic_t total_contends;
+ atomic_t total_sleeps;
+
+ /* Authentication */
+ drm_file_list_t files;
+ drm_magic_head_t magiclist[DRM_HASH_SIZE];
+
+ /* Memory management */
+ drm_map_t **maplist; /* Vector of pointers to regions */
+ int map_count; /* Number of mappable regions */
+
+ drm_vma_entry_t *vmalist; /* List of vmas (for debugging) */
+ drm_lock_data_t lock; /* Information on hardware lock */
+
+ /* DMA queues (contexts) */
+ int queue_count; /* Number of active DMA queues */
+ int queue_reserved; /* Number of reserved DMA queues */
+ int queue_slots; /* Actual length of queuelist */
+ drm_queue_t **queuelist; /* Vector of pointers to DMA queues */
+ drm_device_dma_t *dma; /* Optional pointer for DMA support */
+
+ /* Context support */
+ struct resource *irq; /* Interrupt used by board */
+ void *irqh; /* Handle from bus_setup_intr */
+ __volatile__ int context_flag; /* Context swapping flag */
+ __volatile__ int interrupt_flag;/* Interruption handler flag */
+ __volatile__ int dma_flag; /* DMA dispatch flag */
+ struct callout timer; /* Timer for delaying ctx switch */
+ int context_wait; /* Processes waiting on ctx switch */
+ int last_checked; /* Last context checked for DMA */
+ int last_context; /* Last current context */
+ int last_switch; /* Time at last context switch */
+ struct task task;
+ struct timespec ctx_start;
+ struct timespec lck_start;
+#if DRM_DMA_HISTOGRAM
+ drm_histogram_t histo;
+#endif
+
+ /* Callback to X server for context switch
+ and for heavy-handed reset. */
+ char buf[DRM_BSZ]; /* Output buffer */
+ char *buf_rp; /* Read pointer */
+ char *buf_wp; /* Write pointer */
+ char *buf_end; /* End pointer */
+ struct sigio *buf_sigio; /* Processes waiting for SIGIO */
+ struct selinfo buf_sel; /* Workspace for select/poll */
+ int buf_readers; /* Processes waiting to read */
+ int buf_writers; /* Processes waiting to ctx switch */
+ int buf_selecting; /* True if poll sleeper */
+
+ /* Sysctl support */
+ struct drm_sysctl_info *sysctl;
+
+#ifdef DRM_AGP
+ drm_agp_head_t *agp;
+#endif
+ u_int32_t *ctx_bitmap;
+ void *dev_private;
+} drm_device_t;
+
+
+ /* Internal function definitions */
+
+ /* Misc. support (init.c) */
+extern int drm_flags;
+extern void drm_parse_options(char *s);
+
+
+ /* Device support (fops.c) */
+extern drm_file_t *drm_find_file_by_proc(drm_device_t *dev, struct proc *p);
+extern int drm_open_helper(dev_t kdev, int flags, int fmt, struct proc *p,
+ drm_device_t *dev);
+extern d_close_t drm_close;
+extern d_read_t drm_read;
+extern d_write_t drm_write;
+extern d_poll_t drm_poll;
+extern int drm_fsetown(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p);
+extern int drm_fgetown(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p);
+extern int drm_write_string(drm_device_t *dev, const char *s);
+
+#if 0
+ /* Mapping support (vm.c) */
+extern unsigned long drm_vm_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int write_access);
+extern unsigned long drm_vm_shm_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int write_access);
+extern unsigned long drm_vm_dma_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int write_access);
+extern void drm_vm_open(struct vm_area_struct *vma);
+extern void drm_vm_close(struct vm_area_struct *vma);
+extern int drm_mmap_dma(struct file *filp,
+ struct vm_area_struct *vma);
+#endif
+extern d_mmap_t drm_mmap;
+
+ /* Proc support (proc.c) */
+extern int drm_sysctl_init(drm_device_t *dev);
+extern int drm_sysctl_cleanup(drm_device_t *dev);
+
+ /* Memory management support (memory.c) */
+extern void drm_mem_init(void);
+extern int drm_mem_info SYSCTL_HANDLER_ARGS;
+extern void *drm_alloc(size_t size, int area);
+extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size,
+ int area);
+extern char *drm_strdup(const char *s, int area);
+extern void drm_strfree(char *s, int area);
+extern void drm_free(void *pt, size_t size, int area);
+extern unsigned long drm_alloc_pages(int order, int area);
+extern void drm_free_pages(unsigned long address, int order,
+ int area);
+extern void *drm_ioremap(unsigned long offset, unsigned long size);
+extern void drm_ioremapfree(void *pt, unsigned long size);
+
+#ifdef DRM_AGP
+extern void *drm_alloc_agp(int pages, u_int32_t type);
+extern int drm_free_agp(void *handle, int pages);
+extern int drm_bind_agp(void *handle, unsigned int start);
+extern int drm_unbind_agp(void *handle);
+#endif
+
+ /* Buffer management support (bufs.c) */
+extern int drm_order(unsigned long size);
+extern d_ioctl_t drm_addmap;
+extern d_ioctl_t drm_addbufs;
+extern d_ioctl_t drm_infobufs;
+extern d_ioctl_t drm_markbufs;
+extern d_ioctl_t drm_freebufs;
+extern d_ioctl_t drm_mapbufs;
+
+
+ /* Buffer list management support (lists.c) */
+extern int drm_waitlist_create(drm_waitlist_t *bl, int count);
+extern int drm_waitlist_destroy(drm_waitlist_t *bl);
+extern int drm_waitlist_put(drm_waitlist_t *bl, drm_buf_t *buf);
+extern drm_buf_t *drm_waitlist_get(drm_waitlist_t *bl);
+
+extern int drm_freelist_create(drm_freelist_t *bl, int count);
+extern int drm_freelist_destroy(drm_freelist_t *bl);
+extern int drm_freelist_put(drm_device_t *dev, drm_freelist_t *bl,
+ drm_buf_t *buf);
+extern drm_buf_t *drm_freelist_get(drm_freelist_t *bl, int block);
+
+ /* DMA support (gen_dma.c) */
+extern void drm_dma_setup(drm_device_t *dev);
+extern void drm_dma_takedown(drm_device_t *dev);
+extern void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf);
+extern void drm_reclaim_buffers(drm_device_t *dev, pid_t pid);
+extern int drm_context_switch(drm_device_t *dev, int old, int new);
+extern int drm_context_switch_complete(drm_device_t *dev, int new);
+extern void drm_wakeup(drm_device_t *dev, drm_buf_t *buf);
+extern void drm_clear_next_buffer(drm_device_t *dev);
+extern int drm_select_queue(drm_device_t *dev,
+ void (*wrapper)(void *));
+extern int drm_dma_enqueue(drm_device_t *dev, drm_dma_t *dma);
+extern int drm_dma_get_buffers(drm_device_t *dev, drm_dma_t *dma);
+#if DRM_DMA_HISTOGRAM
+extern int drm_histogram_slot(struct timespec *ts);
+extern void drm_histogram_compute(drm_device_t *dev, drm_buf_t *buf);
+#endif
+
+
+ /* Misc. IOCTL support (ioctl.c) */
+extern d_ioctl_t drm_irq_busid;
+extern d_ioctl_t drm_getunique;
+extern d_ioctl_t drm_setunique;
+
+
+ /* Context IOCTL support (context.c) */
+extern d_ioctl_t drm_resctx;
+extern d_ioctl_t drm_addctx;
+extern d_ioctl_t drm_modctx;
+extern d_ioctl_t drm_getctx;
+extern d_ioctl_t drm_switchctx;
+extern d_ioctl_t drm_newctx;
+extern d_ioctl_t drm_rmctx;
+
+
+ /* Drawable IOCTL support (drawable.c) */
+extern d_ioctl_t drm_adddraw;
+extern d_ioctl_t drm_rmdraw;
+
+
+ /* Authentication IOCTL support (auth.c) */
+extern int drm_add_magic(drm_device_t *dev, drm_file_t *priv,
+ drm_magic_t magic);
+extern int drm_remove_magic(drm_device_t *dev, drm_magic_t magic);
+extern d_ioctl_t drm_getmagic;
+extern d_ioctl_t drm_authmagic;
+
+
+ /* Locking IOCTL support (lock.c) */
+extern d_ioctl_t drm_block;
+extern d_ioctl_t drm_unblock;
+extern int drm_lock_take(__volatile__ unsigned int *lock,
+ unsigned int context);
+extern int drm_lock_transfer(drm_device_t *dev,
+ __volatile__ unsigned int *lock,
+ unsigned int context);
+extern int drm_lock_free(drm_device_t *dev,
+ __volatile__ unsigned int *lock,
+ unsigned int context);
+extern d_ioctl_t drm_finish;
+extern int drm_flush_unblock(drm_device_t *dev, int context,
+ drm_lock_flags_t flags);
+extern int drm_flush_block_and_flush(drm_device_t *dev, int context,
+ drm_lock_flags_t flags);
+
+ /* Context Bitmap support (ctxbitmap.c) */
+extern int drm_ctxbitmap_init(drm_device_t *dev);
+extern void drm_ctxbitmap_cleanup(drm_device_t *dev);
+extern int drm_ctxbitmap_next(drm_device_t *dev);
+extern void drm_ctxbitmap_free(drm_device_t *dev, int ctx_handle);
+
+#ifdef DRM_AGP
+ /* AGP/GART support (agpsupport.c) */
+extern drm_agp_head_t *drm_agp_init(void);
+extern d_ioctl_t drm_agp_acquire;
+extern d_ioctl_t drm_agp_release;
+extern d_ioctl_t drm_agp_enable;
+extern d_ioctl_t drm_agp_info;
+extern d_ioctl_t drm_agp_alloc;
+extern d_ioctl_t drm_agp_free;
+extern d_ioctl_t drm_agp_unbind;
+extern d_ioctl_t drm_agp_bind;
+#endif
+#endif
+#endif
diff --git a/bsd-core/tdfx/Makefile b/bsd-core/tdfx/Makefile
new file mode 100644
index 00000000..e0ff8ffa
--- /dev/null
+++ b/bsd-core/tdfx/Makefile
@@ -0,0 +1,15 @@
+# $FreeBSD$
+
+KMOD = tdfx
+SRCS = tdfx_drv.c tdfx_context.c
+SRCS += device_if.h bus_if.h pci_if.h
+CFLAGS += ${DEBUG_FLAGS} -I..
+KERN = /usr/src/sys
+
+@:
+ ln -sf /sys @
+
+machine:
+ ln -sf /sys/i386/include machine
+
+.include "/usr/src/sys/conf/kmod.mk"
diff --git a/bsd/Imakefile b/bsd/Imakefile
new file mode 100644
index 00000000..cf042d21
--- /dev/null
+++ b/bsd/Imakefile
@@ -0,0 +1,31 @@
+XCOMM $XFree86$
+XCOMM $PI$
+
+#include <Server.tmpl>
+
+LinkSourceFile(xf86drm.c,..)
+LinkSourceFile(xf86drmHash.c,..)
+LinkSourceFile(xf86drmRandom.c,..)
+LinkSourceFile(xf86drmSL.c,..)
+LinkSourceFile(xf86drm.h,$(XF86OSSRC))
+LinkSourceFile(xf86_OSproc.h,$(XF86OSSRC))
+LinkSourceFile(sigio.c,$(XF86OSSRC)/shared)
+
+XCOMM This is a kludge until we determine how best to build the
+XCOMM kernel-specific device driver. This allows us to continue
+XCOMM to maintain the single Makefile.bsd with kernel-specific
+XCOMM support. Later, we can move to a different Imakefile.
+
+#if BuildXF86DRI && BuildXF86DRM
+all::
+ $(MAKE) -f Makefile.bsd
+
+install::
+ $(MAKE) -f Makefile.bsd install
+#else
+all::
+ echo 'Use "make -f Makefile.bsd" to manually build drm.o'
+#endif
+
+clean::
+ $(MAKE) -f Makefile.bsd clean
diff --git a/bsd/Makefile b/bsd/Makefile
new file mode 100644
index 00000000..ff26c762
--- /dev/null
+++ b/bsd/Makefile
@@ -0,0 +1,5 @@
+# $FreeBSD$
+
+SUBDIR = drm tdfx gamma
+
+.include <bsd.subdir.mk>
diff --git a/bsd/Makefile.bsd b/bsd/Makefile.bsd
new file mode 100644
index 00000000..ff26c762
--- /dev/null
+++ b/bsd/Makefile.bsd
@@ -0,0 +1,5 @@
+# $FreeBSD$
+
+SUBDIR = drm tdfx gamma
+
+.include <bsd.subdir.mk>
diff --git a/bsd/drm.h b/bsd/drm.h
new file mode 100644
index 00000000..9e0ade35
--- /dev/null
+++ b/bsd/drm.h
@@ -0,0 +1,359 @@
+/* drm.h -- Header for Direct Rendering Manager -*- c -*-
+ * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
+ * Revised: Fri Aug 20 13:08:18 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drm.h,v 1.46 1999/08/20 20:00:53 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drm.h,v 1.1 1999/09/25 14:37:58 dawes Exp $
+ *
+ */
+
+#ifndef _DRM_H_
+#define _DRM_H_
+
+#include <sys/ioccom.h> /* For _IO* macros */
+
+#define DRM_DEV_DRM "/dev/drm"
+#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
+#define DRM_DEV_UID 0
+#define DRM_DEV_GID 0
+
+
+#define DRM_NAME "drm" /* Name in kernel, /dev */
+#define DRM_MIN_ORDER 5 /* At least 2^5 bytes = 32 bytes */
+#define DRM_MAX_ORDER 22 /* Up to 2^22 bytes = 4MB */
+#define DRM_RAM_PERCENT 10 /* How much system ram can we lock? */
+
+#define _DRM_LOCK_HELD 0x80000000 /* Hardware lock is held */
+#define _DRM_LOCK_CONT 0x40000000 /* Hardware lock is contended */
+#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
+#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
+#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
+
+typedef unsigned long drm_handle_t;
+typedef unsigned int drm_context_t;
+typedef unsigned int drm_drawable_t;
+typedef unsigned int drm_magic_t;
+
+/* Warning: If you change this structure, make sure you change
+ * XF86DRIClipRectRec in the server as well */
+
+typedef struct drm_clip_rect {
+ unsigned short x1;
+ unsigned short y1;
+ unsigned short x2;
+ unsigned short y2;
+} drm_clip_rect_t;
+
+/* Seperate include files for the i810/mga specific structures */
+#include "mga_drm.h"
+#include "i810_drm.h"
+#include "r128_drm.h"
+
+typedef struct drm_version {
+ int version_major; /* Major version */
+ int version_minor; /* Minor version */
+ int version_patchlevel;/* Patch level */
+ size_t name_len; /* Length of name buffer */
+ char *name; /* Name of driver */
+ size_t date_len; /* Length of date buffer */
+ char *date; /* User-space buffer to hold date */
+ size_t desc_len; /* Length of desc buffer */
+ char *desc; /* User-space buffer to hold desc */
+} drm_version_t;
+
+typedef struct drm_unique {
+ size_t unique_len; /* Length of unique */
+ char *unique; /* Unique name for driver instantiation */
+} drm_unique_t;
+
+typedef struct drm_list {
+ int count; /* Length of user-space structures */
+ drm_version_t *version;
+} drm_list_t;
+
+typedef struct drm_block {
+ int unused;
+} drm_block_t;
+
+typedef struct drm_control {
+ enum {
+ DRM_ADD_COMMAND,
+ DRM_RM_COMMAND,
+ DRM_INST_HANDLER,
+ DRM_UNINST_HANDLER
+ } func;
+ int irq;
+} drm_control_t;
+
+typedef enum drm_map_type {
+ _DRM_FRAME_BUFFER = 0, /* WC (no caching), no core dump */
+ _DRM_REGISTERS = 1, /* no caching, no core dump */
+ _DRM_SHM = 2, /* shared, cached */
+ _DRM_AGP = 3 /* AGP/GART */
+} drm_map_type_t;
+
+typedef enum drm_map_flags {
+ _DRM_RESTRICTED = 0x01, /* Cannot be mapped to user-virtual */
+ _DRM_READ_ONLY = 0x02,
+ _DRM_LOCKED = 0x04, /* shared, cached, locked */
+ _DRM_KERNEL = 0x08, /* kernel requires access */
+ _DRM_WRITE_COMBINING = 0x10, /* use write-combining if available */
+ _DRM_CONTAINS_LOCK = 0x20 /* SHM page that contains lock */
+} drm_map_flags_t;
+
+typedef struct drm_map {
+ unsigned long offset; /* Requested physical address (0 for SAREA)*/
+ unsigned long size; /* Requested physical size (bytes) */
+ drm_map_type_t type; /* Type of memory to map */
+ drm_map_flags_t flags; /* Flags */
+ void *handle; /* User-space: "Handle" to pass to mmap */
+ /* Kernel-space: kernel-virtual address */
+ int mtrr; /* MTRR slot used */
+ /* Private data */
+} drm_map_t;
+
+typedef enum drm_lock_flags {
+ _DRM_LOCK_READY = 0x01, /* Wait until hardware is ready for DMA */
+ _DRM_LOCK_QUIESCENT = 0x02, /* Wait until hardware quiescent */
+ _DRM_LOCK_FLUSH = 0x04, /* Flush this context's DMA queue first */
+ _DRM_LOCK_FLUSH_ALL = 0x08, /* Flush all DMA queues first */
+ /* These *HALT* flags aren't supported yet
+ -- they will be used to support the
+ full-screen DGA-like mode. */
+ _DRM_HALT_ALL_QUEUES = 0x10, /* Halt all current and future queues */
+ _DRM_HALT_CUR_QUEUES = 0x20 /* Halt all current queues */
+} drm_lock_flags_t;
+
+typedef struct drm_lock {
+ int context;
+ drm_lock_flags_t flags;
+} drm_lock_t;
+
+typedef enum drm_dma_flags { /* These values *MUST* match xf86drm.h */
+ /* Flags for DMA buffer dispatch */
+ _DRM_DMA_BLOCK = 0x01, /* Block until buffer dispatched.
+ Note, the buffer may not yet have
+ been processed by the hardware --
+ getting a hardware lock with the
+ hardware quiescent will ensure
+ that the buffer has been
+ processed. */
+ _DRM_DMA_WHILE_LOCKED = 0x02, /* Dispatch while lock held */
+ _DRM_DMA_PRIORITY = 0x04, /* High priority dispatch */
+
+ /* Flags for DMA buffer request */
+ _DRM_DMA_WAIT = 0x10, /* Wait for free buffers */
+ _DRM_DMA_SMALLER_OK = 0x20, /* Smaller-than-requested buffers ok */
+ _DRM_DMA_LARGER_OK = 0x40 /* Larger-than-requested buffers ok */
+} drm_dma_flags_t;
+
+typedef struct drm_buf_desc {
+ int count; /* Number of buffers of this size */
+ int size; /* Size in bytes */
+ int low_mark; /* Low water mark */
+ int high_mark; /* High water mark */
+ enum {
+ _DRM_PAGE_ALIGN = 0x01, /* Align on page boundaries for DMA */
+ _DRM_AGP_BUFFER = 0x02 /* Buffer is in agp space */
+ } flags;
+ unsigned long agp_start; /* Start address of where the agp buffers
+ * are in the agp aperture */
+} drm_buf_desc_t;
+
+typedef struct drm_buf_info {
+ int count; /* Entries in list */
+ drm_buf_desc_t *list;
+} drm_buf_info_t;
+
+typedef struct drm_buf_free {
+ int count;
+ int *list;
+} drm_buf_free_t;
+
+typedef struct drm_buf_pub {
+ int idx; /* Index into master buflist */
+ int total; /* Buffer size */
+ int used; /* Amount of buffer in use (for DMA) */
+ void *address; /* Address of buffer */
+} drm_buf_pub_t;
+
+typedef struct drm_buf_map {
+ int count; /* Length of buflist */
+ void *virtual; /* Mmaped area in user-virtual */
+ drm_buf_pub_t *list; /* Buffer information */
+} drm_buf_map_t;
+
+typedef struct drm_dma {
+ /* Indices here refer to the offset into
+ buflist in drm_buf_get_t. */
+ int context; /* Context handle */
+ int send_count; /* Number of buffers to send */
+ int *send_indices; /* List of handles to buffers */
+ int *send_sizes; /* Lengths of data to send */
+ drm_dma_flags_t flags; /* Flags */
+ int request_count; /* Number of buffers requested */
+ int request_size; /* Desired size for buffers */
+ int *request_indices; /* Buffer information */
+ int *request_sizes;
+ int granted_count; /* Number of buffers granted */
+} drm_dma_t;
+
+typedef enum {
+ _DRM_CONTEXT_PRESERVED = 0x01,
+ _DRM_CONTEXT_2DONLY = 0x02
+} drm_ctx_flags_t;
+
+typedef struct drm_ctx {
+ drm_context_t handle;
+ drm_ctx_flags_t flags;
+} drm_ctx_t;
+
+typedef struct drm_ctx_res {
+ int count;
+ drm_ctx_t *contexts;
+} drm_ctx_res_t;
+
+typedef struct drm_draw {
+ drm_drawable_t handle;
+} drm_draw_t;
+
+typedef struct drm_auth {
+ drm_magic_t magic;
+} drm_auth_t;
+
+typedef struct drm_irq_busid {
+ int irq;
+ int busnum;
+ int devnum;
+ int funcnum;
+} drm_irq_busid_t;
+
+typedef struct drm_agp_mode {
+ unsigned long mode;
+} drm_agp_mode_t;
+
+ /* For drm_agp_alloc -- allocated a buffer */
+typedef struct drm_agp_buffer {
+ unsigned long size; /* In bytes -- will round to page boundary */
+ unsigned long handle; /* Used for BIND/UNBIND ioctls */
+ unsigned long type; /* Type of memory to allocate */
+ unsigned long physical; /* Physical used by i810 */
+} drm_agp_buffer_t;
+
+ /* For drm_agp_bind */
+typedef struct drm_agp_binding {
+ unsigned long handle; /* From drm_agp_buffer */
+ unsigned long offset; /* In bytes -- will round to page boundary */
+} drm_agp_binding_t;
+
+typedef struct drm_agp_info {
+ int agp_version_major;
+ int agp_version_minor;
+ unsigned long mode;
+ unsigned long aperture_base; /* physical address */
+ unsigned long aperture_size; /* bytes */
+ unsigned long memory_allowed; /* bytes */
+ unsigned long memory_used;
+
+ /* PCI information */
+ unsigned short id_vendor;
+ unsigned short id_device;
+} drm_agp_info_t;
+
+#define DRM_IOCTL_BASE 'd'
+#define DRM_IOCTL_NR(n) ((n) & 0xff)
+#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
+#define DRM_IOR(nr,size) _IOR(DRM_IOCTL_BASE,nr,size)
+#define DRM_IOW(nr,size) _IOW(DRM_IOCTL_BASE,nr,size)
+#define DRM_IOWR(nr,size) _IOWR(DRM_IOCTL_BASE,nr,size)
+
+
+#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t)
+#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t)
+#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t)
+#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t)
+
+#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t)
+#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t)
+#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, drm_block_t)
+#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, drm_block_t)
+#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, drm_control_t)
+#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, drm_map_t)
+#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, drm_buf_desc_t)
+#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, drm_buf_desc_t)
+#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm_buf_info_t)
+#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t)
+#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t)
+
+#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t)
+#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t)
+#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t)
+#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, drm_ctx_t)
+#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, drm_ctx_t)
+#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, drm_ctx_t)
+#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, drm_ctx_res_t)
+#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, drm_draw_t)
+#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, drm_draw_t)
+#define DRM_IOCTL_DMA DRM_IOWR(0x29, drm_dma_t)
+#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, drm_lock_t)
+#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t)
+#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t)
+
+#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
+#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
+#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t)
+#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t)
+#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t)
+#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t)
+#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t)
+#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t)
+
+/* Mga specific ioctls */
+#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t)
+#define DRM_IOCTL_MGA_SWAP DRM_IOW( 0x41, drm_mga_swap_t)
+#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x42, drm_mga_clear_t)
+#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x43, drm_mga_iload_t)
+#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x44, drm_mga_vertex_t)
+#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x45, drm_lock_t )
+#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t)
+
+/* I810 specific ioctls */
+#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t)
+#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t)
+#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t)
+#define DRM_IOCTL_I810_FLUSH DRM_IO ( 0x43)
+#define DRM_IOCTL_I810_GETAGE DRM_IO ( 0x44)
+#define DRM_IOCTL_I810_GETBUF DRM_IOWR(0x45, drm_i810_dma_t)
+#define DRM_IOCTL_I810_SWAP DRM_IO ( 0x46)
+
+/* Rage 128 specific ioctls */
+#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
+#define DRM_IOCTL_R128_RESET DRM_IO( 0x41)
+#define DRM_IOCTL_R128_FLUSH DRM_IO( 0x42)
+#define DRM_IOCTL_R128_CCEIDL DRM_IO( 0x43)
+#define DRM_IOCTL_R128_PACKET DRM_IOW( 0x44, drm_r128_packet_t)
+#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x45, drm_r128_vertex_t)
+
+#endif
diff --git a/bsd/drm/Makefile b/bsd/drm/Makefile
new file mode 100644
index 00000000..6a70a5b0
--- /dev/null
+++ b/bsd/drm/Makefile
@@ -0,0 +1,17 @@
+# $FreeBSD$
+
+KMOD = drm
+SRCS = init.c memory.c auth.c context.c drawable.c bufs.c \
+ lists.c lock.c ioctl.c fops.c vm.c dma.c sysctl.c \
+ agpsupport.c
+SRCS += device_if.h bus_if.h pci_if.h
+CFLAGS += ${DEBUG_FLAGS} -I.. # -DDRM_AGP
+KERN = /usr/src/sys
+
+@:
+ ln -sf /sys @
+
+machine:
+ ln -sf /sys/i386/include machine
+
+.include "/usr/src/sys/conf/kmod.mk"
diff --git a/bsd/drm/agpsupport.c b/bsd/drm/agpsupport.c
new file mode 100644
index 00000000..53444c90
--- /dev/null
+++ b/bsd/drm/agpsupport.c
@@ -0,0 +1,271 @@
+/* agpsupport.c -- DRM support for AGP/GART backend
+ * Created: Mon Dec 13 09:56:45 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Rickard E. (Rik) Faith <faith@precisioninsight.com>
+ *
+ * $XFree86$
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+#ifdef DRM_AGP
+
+#include <pci/agpvar.h>
+
+MODULE_DEPEND(drm, agp, 1, 1, 1);
+
+int
+drm_agp_info(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ struct agp_info *kern;
+ drm_agp_info_t info;
+
+ if (!dev->agp->acquired) return EINVAL;
+
+ kern = &dev->agp->info;
+ agp_get_info(dev->agp->agpdev, kern);
+ info.agp_version_major = 1;
+ info.agp_version_minor = 0;
+ info.mode = kern->ai_mode;
+ info.aperture_base = kern->ai_aperture_base;
+ info.aperture_size = kern->ai_aperture_size;
+ info.memory_allowed = kern->ai_memory_allowed;
+ info.memory_used = kern->ai_memory_used;
+ info.id_vendor = kern->ai_devid & 0xffff;
+ info.id_device = kern->ai_devid >> 16;
+
+ *(drm_agp_info_t *) data = info;
+ return 0;
+}
+
+int
+drm_agp_acquire(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ int retcode;
+
+ if (dev->agp->acquired) return EINVAL;
+ retcode = agp_acquire(dev->agp->agpdev);
+ if (retcode) return retcode;
+ dev->agp->acquired = 1;
+ return 0;
+}
+
+int
+drm_agp_release(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+
+ if (!dev->agp->acquired) return EINVAL;
+ agp_release(dev->agp->agpdev);
+ dev->agp->acquired = 0;
+ return 0;
+
+}
+
+int
+drm_agp_enable(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_agp_mode_t mode;
+
+ if (!dev->agp->acquired) return EINVAL;
+
+ mode = *(drm_agp_mode_t *) data;
+
+ dev->agp->mode = mode.mode;
+ agp_enable(dev->agp->agpdev, mode.mode);
+ dev->agp->base = dev->agp->info.ai_aperture_base;
+ dev->agp->enabled = 1;
+ return 0;
+}
+
+int drm_agp_alloc(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_agp_buffer_t request;
+ drm_agp_mem_t *entry;
+ void *handle;
+ unsigned long pages;
+ u_int32_t type;
+ struct agp_memory_info info;
+
+ if (!dev->agp->acquired) return EINVAL;
+
+ request = *(drm_agp_buffer_t *) data;
+
+ if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS)))
+ return ENOMEM;
+
+ memset(entry, 0, sizeof(*entry));
+
+ pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE;
+ type = (u_int32_t) request.type;
+
+ if (!(handle = drm_alloc_agp(pages, type))) {
+ drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+ return ENOMEM;
+ }
+
+ entry->handle = handle;
+ entry->bound = 0;
+ entry->pages = pages;
+ entry->prev = NULL;
+ entry->next = dev->agp->memory;
+ if (dev->agp->memory) dev->agp->memory->prev = entry;
+ dev->agp->memory = entry;
+
+ agp_memory_info(dev->agp->agpdev, entry->handle, &info);
+
+ request.handle = (unsigned long) entry->handle;
+ request.physical = info.ami_physical;
+
+ *(drm_agp_buffer_t *) data = request;
+
+ return 0;
+}
+
+static drm_agp_mem_t *
+drm_agp_lookup_entry(drm_device_t *dev, void *handle)
+{
+ drm_agp_mem_t *entry;
+
+ for (entry = dev->agp->memory; entry; entry = entry->next) {
+ if (entry->handle == handle) return entry;
+ }
+ return NULL;
+}
+
+int
+drm_agp_unbind(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_agp_binding_t request;
+ drm_agp_mem_t *entry;
+
+ if (!dev->agp->acquired) return EINVAL;
+ request = *(drm_agp_binding_t *) data;
+ if (!(entry = drm_agp_lookup_entry(dev, (void *) request.handle)))
+ return EINVAL;
+ if (!entry->bound) return EINVAL;
+ return drm_unbind_agp(entry->handle);
+}
+
+int drm_agp_bind(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_agp_binding_t request;
+ drm_agp_mem_t *entry;
+ int retcode;
+ int page;
+
+ if (!dev->agp->acquired) return EINVAL;
+ request = *(drm_agp_binding_t *) data;
+ if (!(entry = drm_agp_lookup_entry(dev, (void *) request.handle)))
+ return EINVAL;
+ if (entry->bound) return EINVAL;
+ page = (request.offset + PAGE_SIZE - 1) / PAGE_SIZE;
+ if ((retcode = drm_bind_agp(entry->handle, page))) return retcode;
+ entry->bound = dev->agp->base + (page << PAGE_SHIFT);
+ return 0;
+}
+
+int drm_agp_free(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_agp_buffer_t request;
+ drm_agp_mem_t *entry;
+
+ if (!dev->agp->acquired) return EINVAL;
+ request = *(drm_agp_buffer_t *) data;
+ if (!(entry = drm_agp_lookup_entry(dev, (void*) request.handle)))
+ return EINVAL;
+ if (entry->bound) drm_unbind_agp(entry->handle);
+
+ if (entry->prev) entry->prev->next = entry->next;
+ else dev->agp->memory = entry->next;
+ if (entry->next) entry->next->prev = entry->prev;
+ drm_free_agp(entry->handle, entry->pages);
+ drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+ return 0;
+}
+
+drm_agp_head_t *drm_agp_init(void)
+{
+ device_t agpdev;
+ drm_agp_head_t *head = NULL;
+ int agp_available = 1;
+
+ agpdev = agp_find_device();
+ if (!agpdev)
+ agp_available = 0;
+
+ DRM_DEBUG("agp_available = %d\n", agp_available);
+
+ if (agp_available) {
+ if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS)))
+ return NULL;
+ head->agpdev = agpdev;
+ memset((void *)head, 0, sizeof(*head));
+ agp_get_info(agpdev, &head->info);
+ head->memory = NULL;
+#if 0 /* bogus */
+ switch (head->agp_info.chipset) {
+ case INTEL_GENERIC: head->chipset = "Intel"; break;
+ case INTEL_LX: head->chipset = "Intel 440LX"; break;
+ case INTEL_BX: head->chipset = "Intel 440BX"; break;
+ case INTEL_GX: head->chipset = "Intel 440GX"; break;
+ case INTEL_I810: head->chipset = "Intel i810"; break;
+ case VIA_GENERIC: head->chipset = "VIA"; break;
+ case VIA_VP3: head->chipset = "VIA VP3"; break;
+ case VIA_MVP3: head->chipset = "VIA MVP3"; break;
+ case VIA_APOLLO_PRO: head->chipset = "VIA Apollo Pro"; break;
+ case SIS_GENERIC: head->chipset = "SiS"; break;
+ case AMD_GENERIC: head->chipset = "AMD"; break;
+ case AMD_IRONGATE: head->chipset = "AMD Irongate"; break;
+ case ALI_GENERIC: head->chipset = "ALi"; break;
+ case ALI_M1541: head->chipset = "ALi M1541"; break;
+ default:
+ }
+#endif
+ DRM_INFO("AGP at 0x%08x %dMB\n",
+ head->info.ai_aperture_base,
+ head->info.ai_aperture_size >> 20);
+ }
+ return head;
+}
+
+#endif /* DRM_AGP */
diff --git a/bsd/drm/auth.c b/bsd/drm/auth.c
new file mode 100644
index 00000000..f7b3bc49
--- /dev/null
+++ b/bsd/drm/auth.c
@@ -0,0 +1,168 @@
+/* auth.c -- IOCTLs for authentication -*- c -*-
+ * Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
+ * Revised: Fri Aug 20 11:31:48 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/auth.c,v 1.4 1999/08/30 13:05:00 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/auth.c,v 1.1 1999/09/25 14:37:57 dawes Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+static int drm_hash_magic(drm_magic_t magic)
+{
+ return magic & (DRM_HASH_SIZE-1);
+}
+
+static drm_file_t *drm_find_file(drm_device_t *dev, drm_magic_t magic)
+{
+ drm_file_t *retval = NULL;
+ drm_magic_entry_t *pt;
+ int hash = drm_hash_magic(magic);
+
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
+ for (pt = dev->magiclist[hash].head; pt; pt = pt->next) {
+ if (pt->priv->authenticated) continue;
+ if (pt->magic == magic) {
+ retval = pt->priv;
+ break;
+ }
+ }
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+
+ return retval;
+}
+
+int drm_add_magic(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic)
+{
+ int hash;
+ drm_magic_entry_t *entry;
+
+ DRM_DEBUG("%d\n", magic);
+
+ hash = drm_hash_magic(magic);
+ entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC);
+ if (!entry) return ENOMEM;
+ entry->magic = magic;
+ entry->priv = priv;
+ entry->next = NULL;
+
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
+ if (dev->magiclist[hash].tail) {
+ dev->magiclist[hash].tail->next = entry;
+ dev->magiclist[hash].tail = entry;
+ } else {
+ dev->magiclist[hash].head = entry;
+ dev->magiclist[hash].tail = entry;
+ }
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+
+ return 0;
+}
+
+int drm_remove_magic(drm_device_t *dev, drm_magic_t magic)
+{
+ drm_magic_entry_t *prev = NULL;
+ drm_magic_entry_t *pt;
+ int hash;
+
+ DRM_DEBUG("%d\n", magic);
+ hash = drm_hash_magic(magic);
+
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
+ for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) {
+ if (pt->magic == magic) {
+ if (dev->magiclist[hash].head == pt) {
+ dev->magiclist[hash].head = pt->next;
+ }
+ if (dev->magiclist[hash].tail == pt) {
+ dev->magiclist[hash].tail = prev;
+ }
+ if (prev) {
+ prev->next = pt->next;
+ }
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+ return 0;
+ }
+ }
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+
+ drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
+
+ return EINVAL;
+}
+
+int drm_getmagic(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ static drm_magic_t sequence = 0;
+#if 0
+ static struct simplelock lock; /* XXX */
+#endif
+ drm_device_t *dev = kdev->si_drv1;
+ drm_file_t *priv;
+ drm_auth_t auth;
+
+ /* Find unique magic */
+ priv = drm_find_file_by_proc(dev, p);
+ if (!priv) {
+ DRM_DEBUG("can't find file structure\n");
+ return EINVAL;
+ }
+ if (priv->magic) {
+ auth.magic = priv->magic;
+ } else {
+ simple_lock(&lock);
+ do {
+ if (!sequence) ++sequence; /* reserve 0 */
+ auth.magic = sequence++;
+ } while (drm_find_file(dev, auth.magic));
+ simple_unlock(&lock);
+ priv->magic = auth.magic;
+ drm_add_magic(dev, priv, auth.magic);
+ }
+
+ DRM_DEBUG("%u\n", auth.magic);
+ *(drm_auth_t *) data = auth;
+ return 0;
+}
+
+int drm_authmagic(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_auth_t auth;
+ drm_file_t *file;
+
+ auth = *(drm_auth_t *) data;
+ DRM_DEBUG("%u\n", auth.magic);
+ if ((file = drm_find_file(dev, auth.magic))) {
+ file->authenticated = 1;
+ drm_remove_magic(dev, auth.magic);
+ return 0;
+ }
+ return EINVAL;
+}
diff --git a/bsd/drm/bufs.c b/bsd/drm/bufs.c
new file mode 100644
index 00000000..fc08b69d
--- /dev/null
+++ b/bsd/drm/bufs.c
@@ -0,0 +1,500 @@
+/* bufs.c -- IOCTLs to manage buffers -*- c -*-
+ * Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
+ * Revised: Fri Aug 20 22:48:10 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/bufs.c,v 1.8 1999/08/30 13:05:00 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/bufs.c,v 1.1 1999/09/25 14:37:57 dawes Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include <sys/mman.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_map.h>
+
+ /* Compute order. Can be made faster. */
+int drm_order(unsigned long size)
+{
+ int order;
+ unsigned long tmp;
+
+ for (order = 0, tmp = size; tmp >>= 1; ++order);
+ if (size & ~(1 << order)) ++order;
+ return order;
+}
+
+int drm_addmap(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_map_t *map;
+
+ if (!(dev->flags & (FREAD|FWRITE)))
+ return EACCES; /* Require read/write */
+
+ map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
+ if (!map) return ENOMEM;
+ *map = *(drm_map_t *) data;
+
+ DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
+ map->offset, map->size, map->type);
+ if ((map->offset & (PAGE_SIZE-1)) || (map->size & (PAGE_SIZE-1))) {
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ DRM_DEBUG("offset or size not page aligned\n");
+ return EINVAL;
+ }
+ map->mtrr = -1;
+ map->handle = 0;
+
+ switch (map->type) {
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+ if (map->offset + map->size < map->offset
+ /* || map->offset < virt_to_phys(high_memory) */) {
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ DRM_DEBUG("bad frame buffer size\n");
+ return EINVAL;
+ }
+#ifdef CONFIG_MTRR
+ if (map->type == _DRM_FRAME_BUFFER
+ || (map->flags & _DRM_WRITE_COMBINING)) {
+ map->mtrr = mtrr_add(map->offset, map->size,
+ MTRR_TYPE_WRCOMB, 1);
+ }
+#endif
+ map->handle = drm_ioremap(map->offset, map->size);
+ break;
+
+
+ case _DRM_SHM:
+ DRM_DEBUG("%ld %d\n", map->size, drm_order(map->size));
+ map->handle = (void *)drm_alloc_pages(drm_order(map->size)
+ - PAGE_SHIFT,
+ DRM_MEM_SAREA);
+ if (!map->handle) {
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ return ENOMEM;
+ }
+ map->offset = (unsigned long)map->handle;
+ if (map->flags & _DRM_CONTAINS_LOCK) {
+ dev->lock.hw_lock = map->handle; /* Pointer to lock */
+ }
+ break;
+#ifdef DRM_AGP
+ case _DRM_AGP:
+ map->offset = map->offset + dev->agp->base;
+ break;
+#endif
+ default:
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ DRM_DEBUG("bad type\n");
+ return EINVAL;
+ }
+
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
+ if (dev->maplist) {
+ ++dev->map_count;
+ dev->maplist = drm_realloc(dev->maplist,
+ (dev->map_count-1)
+ * sizeof(*dev->maplist),
+ dev->map_count
+ * sizeof(*dev->maplist),
+ DRM_MEM_MAPS);
+ } else {
+ dev->map_count = 1;
+ dev->maplist = drm_alloc(dev->map_count*sizeof(*dev->maplist),
+ DRM_MEM_MAPS);
+ }
+ dev->maplist[dev->map_count-1] = map;
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+
+ *(drm_map_t *) data = *map;
+ if (map->type != _DRM_SHM)
+ ((drm_map_t *)data)->handle = (void *) map->offset;
+
+ return 0;
+}
+
+int drm_addbufs(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ int count;
+ int order;
+ int size;
+ int total;
+ int page_order;
+ drm_buf_entry_t *entry;
+ unsigned long page;
+ drm_buf_t *buf;
+ int alignment;
+ unsigned long offset;
+ int i;
+ int byte_count;
+ int page_count;
+
+ if (!dma) return EINVAL;
+
+ request = *(drm_buf_desc_t *) data;
+
+ count = request.count;
+ order = drm_order(request.size);
+ size = 1 << order;
+
+ DRM_DEBUG("count = %d, size = %d (%d), order = %d, queue_count = %d\n",
+ request.count, request.size, size, order, dev->queue_count);
+
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return EINVAL;
+ if (dev->queue_count) return EBUSY; /* Not while in use */
+
+ alignment = (request.flags & _DRM_PAGE_ALIGN) ? round_page(size) :size;
+ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
+
+ simple_lock(&dev->count_lock);
+ if (dev->buf_use) {
+ simple_unlock(&dev->count_lock);
+ return EBUSY;
+ }
+ atomic_inc(&dev->buf_alloc);
+ simple_unlock(&dev->count_lock);
+
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
+ entry = &dma->bufs[order];
+ if (entry->buf_count) {
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+ atomic_dec(&dev->buf_alloc);
+ return ENOMEM; /* May only call once for each order */
+ }
+
+ entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS);
+ if (!entry->buflist) {
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+ atomic_dec(&dev->buf_alloc);
+ return ENOMEM;
+ }
+ memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+ entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
+ DRM_MEM_SEGS);
+ if (!entry->seglist) {
+ drm_free(entry->buflist,
+ count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS);
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+ atomic_dec(&dev->buf_alloc);
+ return ENOMEM;
+ }
+ memset(entry->seglist, 0, count * sizeof(*entry->seglist));
+
+ dma->pagelist = drm_realloc(dma->pagelist,
+ dma->page_count * sizeof(*dma->pagelist),
+ (dma->page_count + (count << page_order))
+ * sizeof(*dma->pagelist),
+ DRM_MEM_PAGES);
+ DRM_DEBUG("pagelist: %d entries\n",
+ dma->page_count + (count << page_order));
+
+
+ entry->buf_size = size;
+ entry->page_order = page_order;
+ byte_count = 0;
+ page_count = 0;
+ while (entry->buf_count < count) {
+ if (!(page = drm_alloc_pages(page_order, DRM_MEM_DMA))) break;
+ entry->seglist[entry->seg_count++] = page;
+ for (i = 0; i < (1 << page_order); i++) {
+ DRM_DEBUG("page %d @ 0x%08lx\n",
+ dma->page_count + page_count,
+ page + PAGE_SIZE * i);
+ dma->pagelist[dma->page_count + page_count++]
+ = page + PAGE_SIZE * i;
+ }
+ for (offset = 0;
+ offset + size <= total && entry->buf_count < count;
+ offset += alignment, ++entry->buf_count) {
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
+ buf->offset = (dma->byte_count + byte_count + offset);
+ buf->address = (void *)(page + offset);
+ buf->next = NULL;
+ buf->waiting = 0;
+ buf->pending = 0;
+ buf->dma_wait = 0;
+ buf->pid = 0;
+#if DRM_DMA_HISTOGRAM
+ timespecclear(&buf->time_queued);
+ timespecclear(&buf->time_dispatched);
+ timespecclear(&buf->time_completed);
+ timespecclear(&buf->time_freed);
+#endif
+ DRM_DEBUG("buffer %d @ %p\n",
+ entry->buf_count, buf->address);
+ }
+ byte_count += PAGE_SIZE << page_order;
+ }
+
+ dma->buflist = drm_realloc(dma->buflist,
+ dma->buf_count * sizeof(*dma->buflist),
+ (dma->buf_count + entry->buf_count)
+ * sizeof(*dma->buflist),
+ DRM_MEM_BUFS);
+ for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
+ dma->buflist[i] = &entry->buflist[i - dma->buf_count];
+
+ dma->buf_count += entry->buf_count;
+ dma->seg_count += entry->seg_count;
+ dma->page_count += entry->seg_count << page_order;
+ dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
+
+ drm_freelist_create(&entry->freelist, entry->buf_count);
+ for (i = 0; i < entry->buf_count; i++) {
+ drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
+ }
+
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+
+ request.count = entry->buf_count;
+ request.size = size;
+
+ *(drm_buf_desc_t *) data = request;
+
+ atomic_dec(&dev->buf_alloc);
+ return 0;
+}
+
+int drm_infobufs(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_info_t request;
+ int i;
+ int count;
+
+ if (!dma) return EINVAL;
+
+ simple_lock(&dev->count_lock);
+ if (atomic_read(&dev->buf_alloc)) {
+ simple_unlock(&dev->count_lock);
+ return EBUSY;
+ }
+ ++dev->buf_use; /* Can't allocate more after this call */
+ simple_unlock(&dev->count_lock);
+
+ request = *(drm_buf_info_t *) data;
+
+ for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
+ if (dma->bufs[i].buf_count) ++count;
+ }
+
+ DRM_DEBUG("count = %d\n", count);
+
+ if (request.count >= count) {
+ for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
+ if (dma->bufs[i].buf_count) {
+ int error;
+ error = copyout(&dma->bufs[i].buf_count,
+ &request.list[count].count,
+ sizeof(dma->bufs[0]
+ .buf_count));
+ if (error) return error;
+ error = copyout(&dma->bufs[i].buf_size,
+ &request.list[count].size,
+ sizeof(dma->bufs[0].buf_size));
+ if (error) return error;
+ error = copyout(&dma->bufs[i]
+ .freelist.low_mark,
+ &request.list[count].low_mark,
+ sizeof(dma->bufs[0]
+ .freelist.low_mark));
+ if (error) return error;
+ error = copyout(&dma->bufs[i]
+ .freelist.high_mark,
+ &request.list[count].high_mark,
+ sizeof(dma->bufs[0]
+ .freelist.high_mark));
+ if (error) return error;
+ DRM_DEBUG("%d %d %d %d %d\n",
+ i,
+ dma->bufs[i].buf_count,
+ dma->bufs[i].buf_size,
+ dma->bufs[i].freelist.low_mark,
+ dma->bufs[i].freelist.high_mark);
+ ++count;
+ }
+ }
+ }
+ request.count = count;
+
+ *(drm_buf_info_t *) data = request;
+
+ return 0;
+}
+
+int drm_markbufs(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ int order;
+ drm_buf_entry_t *entry;
+
+ if (!dma) return EINVAL;
+
+ request = *(drm_buf_desc_t *) data;
+
+ DRM_DEBUG("%d, %d, %d\n",
+ request.size, request.low_mark, request.high_mark);
+ order = drm_order(request.size);
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return EINVAL;
+ entry = &dma->bufs[order];
+
+ if (request.low_mark < 0 || request.low_mark > entry->buf_count)
+ return EINVAL;
+ if (request.high_mark < 0 || request.high_mark > entry->buf_count)
+ return EINVAL;
+
+ entry->freelist.low_mark = request.low_mark;
+ entry->freelist.high_mark = request.high_mark;
+
+ return 0;
+}
+
+int drm_freebufs(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_free_t request;
+ int i;
+ int idx;
+ int error;
+ drm_buf_t *buf;
+
+ if (!dma) return EINVAL;
+
+ request = *(drm_buf_free_t *) data;
+
+ DRM_DEBUG("%d\n", request.count);
+ for (i = 0; i < request.count; i++) {
+ error = copyin(&request.list[i], &idx, sizeof(idx));
+ if (error)
+ return error;
+ if (idx < 0 || idx >= dma->buf_count) {
+ DRM_ERROR("Index %d (of %d max)\n",
+ idx, dma->buf_count - 1);
+ return EINVAL;
+ }
+ buf = dma->buflist[idx];
+ if (buf->pid != p->p_pid) {
+ DRM_ERROR("Process %d freeing buffer owned by %d\n",
+ p->p_pid, buf->pid);
+ return EINVAL;
+ }
+ drm_free_buffer(dev, buf);
+ }
+
+ return 0;
+}
+
+int drm_mapbufs(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_device_dma_t *dma = dev->dma;
+ int retcode = 0;
+ const int zero = 0;
+ vm_offset_t virtual;
+ vm_offset_t address;
+ drm_buf_map_t request;
+ int i;
+
+ if (!dma) return EINVAL;
+
+ DRM_DEBUG("\n");
+
+ simple_lock(&dev->count_lock);
+ if (atomic_read(&dev->buf_alloc)) {
+ simple_unlock(&dev->count_lock);
+ return EBUSY;
+ }
+ ++dev->buf_use; /* Can't allocate more after this call */
+ simple_unlock(&dev->count_lock);
+
+ request = *(drm_buf_map_t *) data;
+
+ if (request.count >= dma->buf_count) {
+ virtual = 0;
+ retcode = vm_mmap(&p->p_vmspace->vm_map,
+ &virtual,
+ round_page(dma->byte_count),
+ PROT_READ|PROT_WRITE, VM_PROT_ALL,
+ MAP_SHARED,
+ SLIST_FIRST(&kdev->si_hlist),
+ 0);
+ if (retcode)
+ goto done;
+
+ request.virtual = (void *)virtual;
+
+ for (i = 0; i < dma->buf_count; i++) {
+ retcode = copyout(&dma->buflist[i]->idx,
+ &request.list[i].idx,
+ sizeof(request.list[0].idx));
+ if (retcode) goto done;
+ retcode = copyout(&dma->buflist[i]->total,
+ &request.list[i].total,
+ sizeof(request.list[0].total));
+ if (retcode) goto done;
+ retcode = copyout(&zero,
+ &request.list[i].used,
+ sizeof(request.list[0].used));
+ if (retcode) goto done;
+ address = virtual + dma->buflist[i]->offset;
+ retcode = copyout(&address,
+ &request.list[i].address,
+ sizeof(address));
+ if (retcode) goto done;
+ }
+ }
+done:
+ request.count = dma->buf_count;
+ DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
+
+ *(drm_buf_map_t *) data = request;
+
+ return retcode;
+}
diff --git a/bsd/drm/context.c b/bsd/drm/context.c
new file mode 100644
index 00000000..d79990f8
--- /dev/null
+++ b/bsd/drm/context.c
@@ -0,0 +1,297 @@
+/* context.c -- IOCTLs for contexts and DMA queues -*- c -*-
+ * Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
+ * Revised: Fri Aug 20 11:32:09 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/context.c,v 1.5 1999/08/30 13:05:00 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/context.c,v 1.1 1999/09/25 14:37:58 dawes Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+static int drm_init_queue(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx)
+{
+ DRM_DEBUG("\n");
+
+ if (atomic_read(&q->use_count) != 1
+ || atomic_read(&q->finalization)
+ || atomic_read(&q->block_count)) {
+ DRM_ERROR("New queue is already in use: u%d f%d b%d\n",
+ atomic_read(&q->use_count),
+ atomic_read(&q->finalization),
+ atomic_read(&q->block_count));
+ }
+
+ atomic_set(&q->finalization, 0);
+ atomic_set(&q->block_count, 0);
+ atomic_set(&q->block_read, 0);
+ atomic_set(&q->block_write, 0);
+ atomic_set(&q->total_queued, 0);
+ atomic_set(&q->total_flushed, 0);
+ atomic_set(&q->total_locks, 0);
+
+ q->write_queue = 0;
+ q->read_queue = 0;
+ q->flush_queue = 0;
+
+ q->flags = ctx->flags;
+
+ drm_waitlist_create(&q->waitlist, dev->dma->buf_count);
+
+ return 0;
+}
+
+
+/* drm_alloc_queue:
+PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
+ disappear (so all deallocation must be done after IOCTLs are off)
+ 2) dev->queue_count < dev->queue_slots
+ 3) dev->queuelist[i].use_count == 0 and
+ dev->queuelist[i].finalization == 0 if i not in use
+POST: 1) dev->queuelist[i].use_count == 1
+ 2) dev->queue_count < dev->queue_slots */
+
+static int drm_alloc_queue(drm_device_t *dev)
+{
+ int i;
+ drm_queue_t *queue;
+ int oldslots;
+ int newslots;
+ /* Check for a free queue */
+ for (i = 0; i < dev->queue_count; i++) {
+ atomic_inc(&dev->queuelist[i]->use_count);
+ if (atomic_read(&dev->queuelist[i]->use_count) == 1
+ && !atomic_read(&dev->queuelist[i]->finalization)) {
+ DRM_DEBUG("%d (free)\n", i);
+ return i;
+ }
+ atomic_dec(&dev->queuelist[i]->use_count);
+ }
+ /* Allocate a new queue */
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
+
+ queue = drm_alloc(sizeof(*queue), DRM_MEM_QUEUES);
+ memset(queue, 0, sizeof(*queue));
+ atomic_set(&queue->use_count, 1);
+
+ ++dev->queue_count;
+ if (dev->queue_count >= dev->queue_slots) {
+ oldslots = dev->queue_slots * sizeof(*dev->queuelist);
+ if (!dev->queue_slots) dev->queue_slots = 1;
+ dev->queue_slots *= 2;
+ newslots = dev->queue_slots * sizeof(*dev->queuelist);
+
+ dev->queuelist = drm_realloc(dev->queuelist,
+ oldslots,
+ newslots,
+ DRM_MEM_QUEUES);
+ if (!dev->queuelist) {
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+ DRM_DEBUG("out of memory\n");
+ return -ENOMEM;
+ }
+ }
+ dev->queuelist[dev->queue_count-1] = queue;
+
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+ DRM_DEBUG("%d (new)\n", dev->queue_count - 1);
+ return dev->queue_count - 1;
+}
+
+int drm_resctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_ctx_res_t res;
+ drm_ctx_t ctx;
+ int i;
+ int error;
+
+ DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
+ res = *(drm_ctx_res_t *) data;
+ if (res.count >= DRM_RESERVED_CONTEXTS) {
+ memset(&ctx, 0, sizeof(ctx));
+ for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
+ ctx.handle = i;
+ error = copyout(&i, &res.contexts[i],
+ sizeof(i));
+ if (error) return error;
+ }
+ }
+ res.count = DRM_RESERVED_CONTEXTS;
+ *(drm_ctx_res_t *) data = res;
+ return 0;
+}
+
+
+int drm_addctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_ctx_t ctx;
+
+ ctx = *(drm_ctx_t *) data;
+ if ((ctx.handle = drm_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
+ /* Init kernel's context and get a new one. */
+ drm_init_queue(dev, dev->queuelist[ctx.handle], &ctx);
+ ctx.handle = drm_alloc_queue(dev);
+ }
+ drm_init_queue(dev, dev->queuelist[ctx.handle], &ctx);
+ DRM_DEBUG("%d\n", ctx.handle);
+ *(drm_ctx_t *) data = ctx;
+ return 0;
+}
+
+int drm_modctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_ctx_t ctx;
+ drm_queue_t *q;
+
+ ctx = *(drm_ctx_t *) data;
+
+ DRM_DEBUG("%d\n", ctx.handle);
+
+ if (ctx.handle < 0 || ctx.handle >= dev->queue_count) return -EINVAL;
+ q = dev->queuelist[ctx.handle];
+
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->use_count) == 1) {
+ /* No longer in use */
+ atomic_dec(&q->use_count);
+ return -EINVAL;
+ }
+
+ if (DRM_BUFCOUNT(&q->waitlist)) {
+ atomic_dec(&q->use_count);
+ return -EBUSY;
+ }
+
+ q->flags = ctx.flags;
+
+ atomic_dec(&q->use_count);
+ return 0;
+}
+
+int drm_getctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_ctx_t ctx;
+ drm_queue_t *q;
+
+ ctx = *(drm_ctx_t *) data;
+
+ DRM_DEBUG("%d\n", ctx.handle);
+
+ if (ctx.handle >= dev->queue_count) return -EINVAL;
+ q = dev->queuelist[ctx.handle];
+
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->use_count) == 1) {
+ /* No longer in use */
+ atomic_dec(&q->use_count);
+ return -EINVAL;
+ }
+
+ ctx.flags = q->flags;
+ atomic_dec(&q->use_count);
+
+ *(drm_ctx_t *) data = ctx;
+
+ return 0;
+}
+
+int drm_switchctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_ctx_t ctx;
+
+ ctx = *(drm_ctx_t *) data;
+ DRM_DEBUG("%d\n", ctx.handle);
+ return drm_context_switch(dev, dev->last_context, ctx.handle);
+}
+
+int drm_newctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_ctx_t ctx;
+
+ ctx = *(drm_ctx_t *) data;
+ DRM_DEBUG("%d\n", ctx.handle);
+ drm_context_switch_complete(dev, ctx.handle);
+
+ return 0;
+}
+
+int drm_rmctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_ctx_t ctx;
+ drm_queue_t *q;
+ drm_buf_t *buf;
+
+ ctx = *(drm_ctx_t *) data;
+ DRM_DEBUG("%d\n", ctx.handle);
+
+ if (ctx.handle >= dev->queue_count) return -EINVAL;
+ q = dev->queuelist[ctx.handle];
+
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->use_count) == 1) {
+ /* No longer in use */
+ atomic_dec(&q->use_count);
+ return -EINVAL;
+ }
+
+ atomic_inc(&q->finalization); /* Mark queue in finalization state */
+ atomic_sub(2, &q->use_count); /* Mark queue as unused (pending
+ finalization) */
+
+ /* Wait while interrupt servicing is in progress */
+ while (test_and_set_bit(0, &dev->interrupt_flag)) {
+ int never;
+ int error = tsleep(&never, PZERO|PCATCH, "drmrc", 1);
+ if (error) {
+ clear_bit(0, &dev->interrupt_flag);
+ return error;
+ }
+ }
+ /* Remove queued buffers */
+ while ((buf = drm_waitlist_get(&q->waitlist))) {
+ drm_free_buffer(dev, buf);
+ }
+ clear_bit(0, &dev->interrupt_flag);
+
+ /* Wakeup blocked processes */
+ wakeup(&q->read_queue);
+ wakeup(&q->write_queue);
+ wakeup(&q->flush_queue);
+
+ /* Finalization over. Queue is made
+ available when both use_count and
+ finalization become 0, which won't
+ happen until all the waiting processes
+ stop waiting. */
+ atomic_dec(&q->finalization);
+ return 0;
+}
diff --git a/bsd/drm/dma.c b/bsd/drm/dma.c
new file mode 100644
index 00000000..149f4593
--- /dev/null
+++ b/bsd/drm/dma.c
@@ -0,0 +1,534 @@
+/* dma.c -- DMA IOCTL and function support -*- c -*-
+ * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
+ * Revised: Thu Sep 16 12:55:39 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/dma.c,v 1.7 1999/09/16 16:56:18 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/dma.c,v 1.1 1999/09/25 14:37:58 dawes Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+void drm_dma_setup(drm_device_t *dev)
+{
+ int i;
+
+ dev->dma = drm_alloc(sizeof(*dev->dma), DRM_MEM_DRIVER);
+ memset(dev->dma, 0, sizeof(*dev->dma));
+ for (i = 0; i <= DRM_MAX_ORDER; i++)
+ memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
+}
+
+void drm_dma_takedown(drm_device_t *dev)
+{
+ drm_device_dma_t *dma = dev->dma;
+ int i, j;
+
+ if (!dma) return;
+
+ /* Clear dma buffers */
+ for (i = 0; i <= DRM_MAX_ORDER; i++) {
+ if (dma->bufs[i].seg_count) {
+ DRM_DEBUG("order %d: buf_count = %d,"
+ " seg_count = %d\n",
+ i,
+ dma->bufs[i].buf_count,
+ dma->bufs[i].seg_count);
+ for (j = 0; j < dma->bufs[i].seg_count; j++) {
+ drm_free_pages(dma->bufs[i].seglist[j],
+ dma->bufs[i].page_order,
+ DRM_MEM_DMA);
+ }
+ drm_free(dma->bufs[i].buflist,
+ dma->buf_count
+ * sizeof(*dma->bufs[0].buflist),
+ DRM_MEM_BUFS);
+ drm_free(dma->bufs[i].seglist,
+ dma->buf_count
+ * sizeof(*dma->bufs[0].seglist),
+ DRM_MEM_SEGS);
+ drm_freelist_destroy(&dma->bufs[i].freelist);
+ }
+ }
+
+ if (dma->buflist) {
+ drm_free(dma->buflist,
+ dma->buf_count * sizeof(*dma->buflist),
+ DRM_MEM_BUFS);
+ }
+
+ if (dma->pagelist) {
+ drm_free(dma->pagelist,
+ dma->page_count * sizeof(*dma->pagelist),
+ DRM_MEM_PAGES);
+ }
+ drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
+ dev->dma = NULL;
+}
+
+#if DRM_DMA_HISTOGRAM
+/* This is slow, but is useful for debugging. */
+int drm_histogram_slot(struct timespec *ts)
+{
+ long count = ts->tv_sec * 1000 + ts->tv_nsec / 1000000;
+ int value = DRM_DMA_HISTOGRAM_INITIAL;
+ int slot;
+
+ for (slot = 0;
+ slot < DRM_DMA_HISTOGRAM_SLOTS;
+ ++slot, value = DRM_DMA_HISTOGRAM_NEXT(value)) {
+ if (count < value) return slot;
+ }
+ return DRM_DMA_HISTOGRAM_SLOTS - 1;
+}
+
+void drm_histogram_compute(drm_device_t *dev, drm_buf_t *buf)
+{
+ struct timespec queued_to_dispatched;
+ struct timespec dispatched_to_completed;
+ struct timespec completed_to_freed;
+ int q2d, d2c, c2f, q2c, q2f;
+
+ if (timespecisset(&buf->time_queued)) {
+ queued_to_dispatched = buf->time_dispatched;
+ timespecsub(&queued_to_dispatched, &buf->time_queued);
+ dispatched_to_completed = buf->time_completed;
+ timespecsub(&dispatched_to_completed, &buf->time_dispatched);
+ completed_to_freed = buf->time_freed;
+ timespecsub(&completed_to_freed, &buf->time_completed);
+
+ q2d = drm_histogram_slot(&queued_to_dispatched);
+ d2c = drm_histogram_slot(&dispatched_to_completed);
+ c2f = drm_histogram_slot(&completed_to_freed);
+
+ timespecadd(&queued_to_dispatched, &dispatched_to_completed);
+ q2c = drm_histogram_slot(&queued_to_dispatched);
+ timespecadd(&queued_to_dispatched, &completed_to_freed);
+ q2f = drm_histogram_slot(&queued_to_dispatched);
+
+ atomic_inc(&dev->histo.total);
+ atomic_inc(&dev->histo.queued_to_dispatched[q2d]);
+ atomic_inc(&dev->histo.dispatched_to_completed[d2c]);
+ atomic_inc(&dev->histo.completed_to_freed[c2f]);
+
+ atomic_inc(&dev->histo.queued_to_completed[q2c]);
+ atomic_inc(&dev->histo.queued_to_freed[q2f]);
+
+ }
+ timespecclear(&buf->time_queued);
+ timespecclear(&buf->time_dispatched);
+ timespecclear(&buf->time_completed);
+ timespecclear(&buf->time_freed);
+}
+#endif
+
+void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf)
+{
+ drm_device_dma_t *dma = dev->dma;
+
+ if (!buf) return;
+
+ buf->waiting = 0;
+ buf->pending = 0;
+ buf->pid = 0;
+ buf->used = 0;
+#if DRM_DMA_HISTOGRAMxx
+ buf->time_completed = get_cycles();
+#endif
+ if (buf->dma_wait) {
+ buf->dma_wait = 0;
+ wakeup(&buf->dma_wait);
+ } else {
+ /* If processes are waiting, the last one
+ to wake will put the buffer on the free
+ list. If no processes are waiting, we
+ put the buffer on the freelist here. */
+ drm_freelist_put(dev, &dma->bufs[buf->order].freelist, buf);
+ }
+}
+
+void drm_reclaim_buffers(drm_device_t *dev, pid_t pid)
+{
+ drm_device_dma_t *dma = dev->dma;
+ int i;
+
+ if (!dma) return;
+ for (i = 0; i < dma->buf_count; i++) {
+ if (dma->buflist[i]->pid == pid) {
+ switch (dma->buflist[i]->list) {
+ case DRM_LIST_NONE:
+ drm_free_buffer(dev, dma->buflist[i]);
+ break;
+ case DRM_LIST_WAIT:
+ dma->buflist[i]->list = DRM_LIST_RECLAIM;
+ break;
+ default:
+ /* Buffer already on hardware. */
+ break;
+ }
+ }
+ }
+}
+
+int drm_context_switch(drm_device_t *dev, int old, int new)
+{
+ char buf[64];
+ drm_queue_t *q;
+
+ atomic_inc(&dev->total_ctx);
+
+ if (test_and_set_bit(0, &dev->context_flag)) {
+ DRM_ERROR("Reentering -- FIXME\n");
+ return EBUSY;
+ }
+
+#if DRM_DMA_HISTOGRAM
+ getnanotime(&dev->ctx_start);
+#endif
+
+ DRM_DEBUG("Context switch from %d to %d\n", old, new);
+
+ if (new >= dev->queue_count) {
+ clear_bit(0, &dev->context_flag);
+ return EINVAL;
+ }
+
+ if (new == dev->last_context) {
+ clear_bit(0, &dev->context_flag);
+ return 0;
+ }
+
+ q = dev->queuelist[new];
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->use_count) == 1) {
+ atomic_dec(&q->use_count);
+ clear_bit(0, &dev->context_flag);
+ return EINVAL;
+ }
+
+ if (drm_flags & DRM_FLAG_NOCTX) {
+ drm_context_switch_complete(dev, new);
+ } else {
+ sprintf(buf, "C %d %d\n", old, new);
+ drm_write_string(dev, buf);
+ }
+
+ atomic_dec(&q->use_count);
+
+ return 0;
+}
+
+int drm_context_switch_complete(drm_device_t *dev, int new)
+{
+ drm_device_dma_t *dma = dev->dma;
+
+ dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
+ dev->last_switch = ticks;
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("Lock isn't held after context switch\n");
+ }
+
+ if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
+ if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ DRM_ERROR("Cannot free lock\n");
+ }
+ }
+
+#if DRM_DMA_HISTOGRAM
+ {
+ struct timespec ts;
+ getnanotime(&ts);
+ timespecsub(&ts, &dev->ctx_start);
+ atomic_inc(&dev->histo.ctx[drm_histogram_slot(&ts)]);
+ }
+#endif
+ clear_bit(0, &dev->context_flag);
+ wakeup(&dev->context_wait);
+
+ return 0;
+}
+
+void drm_clear_next_buffer(drm_device_t *dev)
+{
+ drm_device_dma_t *dma = dev->dma;
+
+ dma->next_buffer = NULL;
+ if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
+ wakeup(&dma->next_queue->flush_queue);
+ }
+ dma->next_queue = NULL;
+}
+
+
+int drm_select_queue(drm_device_t *dev, void (*wrapper)(void *))
+{
+ int i;
+ int candidate = -1;
+ int j = ticks;
+
+ if (!dev) {
+ DRM_ERROR("No device\n");
+ return -1;
+ }
+ if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
+ /* This only happens between the time the
+ interrupt is initialized and the time
+ the queues are initialized. */
+ return -1;
+ }
+
+ /* Doing "while locked" DMA? */
+ if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
+ return DRM_KERNEL_CONTEXT;
+ }
+
+ /* If there are buffers on the last_context
+ queue, and we have not been executing
+ this context very long, continue to
+ execute this context. */
+ if (dev->last_switch <= j
+ && dev->last_switch + DRM_TIME_SLICE > j
+ && DRM_WAITCOUNT(dev, dev->last_context)) {
+ return dev->last_context;
+ }
+
+ /* Otherwise, find a candidate */
+ for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
+ if (DRM_WAITCOUNT(dev, i)) {
+ candidate = dev->last_checked = i;
+ break;
+ }
+ }
+
+ if (candidate < 0) {
+ for (i = 0; i < dev->queue_count; i++) {
+ if (DRM_WAITCOUNT(dev, i)) {
+ candidate = dev->last_checked = i;
+ break;
+ }
+ }
+ }
+
+ if (wrapper
+ && candidate >= 0
+ && candidate != dev->last_context
+ && dev->last_switch <= j
+ && dev->last_switch + DRM_TIME_SLICE > j) {
+ int s = splclock();
+ if (dev->timer.c_time != dev->last_switch + DRM_TIME_SLICE) {
+ callout_reset(&dev->timer,
+ dev->last_switch + DRM_TIME_SLICE - j,
+ wrapper,
+ dev);
+ }
+ splx(s);
+ return -1;
+ }
+
+ return candidate;
+}
+
+
+int drm_dma_enqueue(drm_device_t *dev, drm_dma_t *d)
+{
+ int i;
+ drm_queue_t *q;
+ drm_buf_t *buf;
+ int idx;
+ int while_locked = 0;
+ drm_device_dma_t *dma = dev->dma;
+ int error;
+
+ DRM_DEBUG("%d\n", d->send_count);
+
+ if (d->flags & _DRM_DMA_WHILE_LOCKED) {
+ int context = dev->lock.hw_lock->lock;
+
+ if (!_DRM_LOCK_IS_HELD(context)) {
+ DRM_ERROR("No lock held during \"while locked\""
+ " request\n");
+ return EINVAL;
+ }
+ if (d->context != _DRM_LOCKING_CONTEXT(context)
+ && _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Lock held by %d while %d makes"
+ " \"while locked\" request\n",
+ _DRM_LOCKING_CONTEXT(context),
+ d->context);
+ return EINVAL;
+ }
+ q = dev->queuelist[DRM_KERNEL_CONTEXT];
+ while_locked = 1;
+ } else {
+ q = dev->queuelist[d->context];
+ }
+
+
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->block_write)) {
+ atomic_inc(&q->block_count);
+ for (;;) {
+ if (!atomic_read(&q->block_write)) break;
+ error = tsleep(&q->block_write, PZERO|PCATCH,
+ "dmawr", 0);
+ if (error) {
+ atomic_dec(&q->use_count);
+ return error;
+ }
+ }
+ atomic_dec(&q->block_count);
+ }
+
+ for (i = 0; i < d->send_count; i++) {
+ idx = d->send_indices[i];
+ if (idx < 0 || idx >= dma->buf_count) {
+ atomic_dec(&q->use_count);
+ DRM_ERROR("Index %d (of %d max)\n",
+ d->send_indices[i], dma->buf_count - 1);
+ return EINVAL;
+ }
+ buf = dma->buflist[ idx ];
+ if (buf->pid != curproc->p_pid) {
+ atomic_dec(&q->use_count);
+ DRM_ERROR("Process %d using buffer owned by %d\n",
+ curproc->p_pid, buf->pid);
+ return EINVAL;
+ }
+ if (buf->list != DRM_LIST_NONE) {
+ atomic_dec(&q->use_count);
+ DRM_ERROR("Process %d using buffer %d on list %d\n",
+ curproc->p_pid, buf->idx, buf->list);
+ }
+ buf->used = d->send_sizes[i];
+ buf->while_locked = while_locked;
+ buf->context = d->context;
+ if (!buf->used) {
+ DRM_ERROR("Queueing 0 length buffer\n");
+ }
+ if (buf->pending) {
+ atomic_dec(&q->use_count);
+ DRM_ERROR("Queueing pending buffer:"
+ " buffer %d, offset %d\n",
+ d->send_indices[i], i);
+ return EINVAL;
+ }
+ if (buf->waiting) {
+ atomic_dec(&q->use_count);
+ DRM_ERROR("Queueing waiting buffer:"
+ " buffer %d, offset %d\n",
+ d->send_indices[i], i);
+ return EINVAL;
+ }
+ buf->waiting = 1;
+ if (atomic_read(&q->use_count) == 1
+ || atomic_read(&q->finalization)) {
+ drm_free_buffer(dev, buf);
+ } else {
+ drm_waitlist_put(&q->waitlist, buf);
+ atomic_inc(&q->total_queued);
+ }
+ }
+ atomic_dec(&q->use_count);
+
+ return 0;
+}
+
+static int drm_dma_get_buffers_of_order(drm_device_t *dev, drm_dma_t *d,
+ int order)
+{
+ int i;
+ int error;
+ drm_buf_t *buf;
+ drm_device_dma_t *dma = dev->dma;
+
+ for (i = d->granted_count; i < d->request_count; i++) {
+ buf = drm_freelist_get(&dma->bufs[order].freelist,
+ d->flags & _DRM_DMA_WAIT);
+ if (!buf) break;
+ if (buf->pending || buf->waiting) {
+ DRM_ERROR("Free buffer %d in use by %d (w%d, p%d)\n",
+ buf->idx,
+ buf->pid,
+ buf->waiting,
+ buf->pending);
+ }
+ buf->pid = curproc->p_pid;
+ error = copyout(&buf->idx,
+ &d->request_indices[i],
+ sizeof(buf->idx));
+ if (error)
+ return error;
+ error = copyout(&buf->total,
+ &d->request_sizes[i],
+ sizeof(buf->total));
+ if (error)
+ return error;
+ ++d->granted_count;
+ }
+ return 0;
+}
+
+
+int drm_dma_get_buffers(drm_device_t *dev, drm_dma_t *dma)
+{
+ int order;
+ int retcode = 0;
+ int tmp_order;
+
+ order = drm_order(dma->request_size);
+
+ dma->granted_count = 0;
+ retcode = drm_dma_get_buffers_of_order(dev, dma, order);
+
+ if (dma->granted_count < dma->request_count
+ && (dma->flags & _DRM_DMA_SMALLER_OK)) {
+ for (tmp_order = order - 1;
+ !retcode
+ && dma->granted_count < dma->request_count
+ && tmp_order >= DRM_MIN_ORDER;
+ --tmp_order) {
+
+ retcode = drm_dma_get_buffers_of_order(dev, dma,
+ tmp_order);
+ }
+ }
+
+ if (dma->granted_count < dma->request_count
+ && (dma->flags & _DRM_DMA_LARGER_OK)) {
+ for (tmp_order = order + 1;
+ !retcode
+ && dma->granted_count < dma->request_count
+ && tmp_order <= DRM_MAX_ORDER;
+ ++tmp_order) {
+
+ retcode = drm_dma_get_buffers_of_order(dev, dma,
+ tmp_order);
+ }
+ }
+ return 0;
+}
diff --git a/bsd/drm/drawable.c b/bsd/drm/drawable.c
new file mode 100644
index 00000000..d8005af6
--- /dev/null
+++ b/bsd/drm/drawable.c
@@ -0,0 +1,50 @@
+/* drawable.c -- IOCTLs for drawables -*- c -*-
+ * Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
+ * Revised: Fri Aug 20 09:27:03 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drawable.c,v 1.3 1999/08/30 13:05:00 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drawable.c,v 1.1 1999/09/25 14:37:58 dawes Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+int drm_adddraw(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_draw_t draw;
+
+ draw.handle = 0; /* NOOP */
+ DRM_DEBUG("%d\n", draw.handle);
+ *(drm_draw_t *) data = draw;
+ return 0;
+}
+
+int drm_rmdraw(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ return 0; /* NOOP */
+}
diff --git a/bsd/drm/drmstat.c b/bsd/drm/drmstat.c
new file mode 100644
index 00000000..0ce76b01
--- /dev/null
+++ b/bsd/drm/drmstat.c
@@ -0,0 +1,418 @@
+/* drmstat.c -- DRM device status and testing program
+ * Created: Tue Jan 5 08:19:24 1999 by faith@precisioninsight.com
+ * Revised: Sun Aug 1 11:02:00 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmstat.c,v 1.28 1999/08/04 18:12:11 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmstat.c,v 1.1 1999/09/25 14:37:59 dawes Exp $
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/mman.h>
+#include <strings.h>
+#include <errno.h>
+#include <signal.h>
+#include <fcntl.h>
+#include "xf86drm.h"
+
+int sigio_fd;
+
+static double usec(struct timeval *end, struct timeval *start)
+{
+ double e = end->tv_sec * 1000000 + end->tv_usec;
+ double s = start->tv_sec * 1000000 + start->tv_usec;
+
+ return e - s;
+}
+
+static void getversion(int fd)
+{
+ drmVersionPtr version;
+
+ version = drmGetVersion(fd);
+ if (version) {
+ printf( "Name: %s\n", version->name ? version->name : "?" );
+ printf( " Version: %d.%d.%d\n",
+ version->version_major,
+ version->version_minor,
+ version->version_patchlevel );
+ printf( " Date: %s\n", version->date ? version->date : "?" );
+ printf( " Desc: %s\n", version->desc ? version->desc : "?" );
+ drmFreeVersion(version);
+ } else {
+ printf( "No driver available\n" );
+ }
+}
+
+void handler(int fd, void *oldctx, void *newctx)
+{
+ printf("Got fd %d\n", fd);
+}
+
+void process_sigio(char *device)
+{
+ int fd;
+
+printf("%s\n", device);
+ if ((fd = open(device, 0)) < 0) {
+printf("%d\n", errno);
+ drmError(-errno, __FUNCTION__);
+ exit(1);
+ }
+
+ sigio_fd = fd;
+ drmInstallSIGIOHandler(fd, handler);
+ for (;;) sleep(60);
+}
+
+int main(int argc, char **argv)
+{
+ int c;
+ int r = 0;
+ int fd = -1;
+ drmHandle handle;
+ void *address;
+ char *pt;
+ unsigned long count;
+ unsigned long offset;
+ unsigned long size;
+ drmContext context;
+ int loops;
+ char buf[1024];
+ int i;
+ drmBufInfoPtr info;
+ drmBufMapPtr bufs;
+ drmLockPtr lock;
+ int secs;
+
+ while ((c = getopt(argc, argv,
+ "lc:vo:O:f:s:w:W:b:r:R:P:L:C:XS:B:F:")) != EOF)
+ switch (c) {
+ case 'F':
+ count = strtoul(optarg, NULL, 0);
+ if (!fork()) {
+ dup(fd);
+ sleep(count);
+ }
+ close(fd);
+ break;
+ case 'v': getversion(fd); break;
+ case 'X':
+ if ((r = drmCreateContext(fd, &context))) {
+ drmError(r, argv[0]);
+ return 1;
+ }
+ printf( "Got %d\n", context);
+ break;
+ case 'S':
+ process_sigio(optarg);
+ break;
+ case 'C':
+ if ((r = drmSwitchToContext(fd, strtoul(optarg, NULL, 0)))) {
+ drmError(r, argv[0]);
+ return 1;
+ }
+ break;
+ case 'c':
+ if ((r = drmSetBusid(fd,optarg))) {
+ drmError(r, argv[0]);
+ return 1;
+ }
+ break;
+ case 'o':
+ if ((fd = drmOpen(optarg, NULL)) < 0) {
+ drmError(fd, argv[0]);
+ return 1;
+ }
+ break;
+ case 'O':
+ if ((fd = drmOpen(NULL, optarg)) < 0) {
+ drmError(fd, argv[0]);
+ return 1;
+ }
+ break;
+ case 'B': /* Test buffer allocation */
+ count = strtoul(optarg, &pt, 0);
+ size = strtoul(pt+1, &pt, 0);
+ secs = strtoul(pt+1, NULL, 0);
+ {
+ drmDMAReq dma;
+ int *indices, *sizes;
+
+ indices = alloca(sizeof(*indices) * count);
+ sizes = alloca(sizeof(*sizes) * count);
+ dma.context = context;
+ dma.send_count = 0;
+ dma.request_count = count;
+ dma.request_size = size;
+ dma.request_list = indices;
+ dma.request_sizes = sizes;
+ dma.flags = DRM_DMA_WAIT;
+ if ((r = drmDMA(fd, &dma))) {
+ drmError(r, argv[0]);
+ return 1;
+ }
+ for (i = 0; i < dma.granted_count; i++) {
+ printf("%5d: index = %d, size = %d\n",
+ i, dma.request_list[i], dma.request_sizes[i]);
+ }
+ sleep(secs);
+ drmFreeBufs(fd, dma.granted_count, indices);
+ }
+ break;
+ case 'b':
+ count = strtoul(optarg, &pt, 0);
+ size = strtoul(pt+1, NULL, 0);
+ if ((r = drmAddBufs(fd, count, size, 0, 0)) < 0) {
+ drmError(r, argv[0]);
+ return 1;
+ }
+ if (!(info = drmGetBufInfo(fd))) {
+ drmError(0, argv[0]);
+ return 1;
+ }
+ for (i = 0; i < info->count; i++) {
+ printf("%5d buffers of size %6d (low = %d, high = %d)\n",
+ info->list[i].count,
+ info->list[i].size,
+ info->list[i].low_mark,
+ info->list[i].high_mark);
+ }
+ if ((r = drmMarkBufs(fd, 0.50, 0.80))) {
+ drmError(r, argv[0]);
+ return 1;
+ }
+ if (!(info = drmGetBufInfo(fd))) {
+ drmError(0, argv[0]);
+ return 1;
+ }
+ for (i = 0; i < info->count; i++) {
+ printf("%5d buffers of size %6d (low = %d, high = %d)\n",
+ info->list[i].count,
+ info->list[i].size,
+ info->list[i].low_mark,
+ info->list[i].high_mark);
+ }
+ printf("===== /proc/drm/1/meminfo =====\n");
+ sprintf(buf, "cat /proc/drm/1/meminfo");
+ system(buf);
+#if 1
+ if (!(bufs = drmMapBufs(fd))) {
+ drmError(0, argv[0]);
+ return 1;
+ }
+ printf("===============================\n");
+ printf( "%d bufs\n", bufs->count);
+ for (i = 0; i < bufs->count; i++) {
+ printf( " %4d: %8d bytes at %p\n",
+ i,
+ bufs->list[i].total,
+ bufs->list[i].address);
+ }
+ printf("===== /proc/drm/1/vmainfo =====\n");
+ sprintf(buf, "cat /proc/drm/1/vmainfo");
+ system(buf);
+#endif
+ break;
+ case 'f':
+ offset = strtoul(optarg, &pt, 0);
+ size = strtoul(pt+1, NULL, 0);
+ handle = 0;
+ if ((r = drmAddMap(fd, offset, size,
+ DRM_FRAME_BUFFER, 0, &handle))) {
+ drmError(r, argv[0]);
+ return 1;
+ }
+ printf("0x%08lx:0x%04lx added\n", offset, size);
+ printf("===== /proc/drm/1/meminfo =====\n");
+ sprintf(buf, "cat /proc/drm/1/meminfo");
+ system(buf);
+ break;
+ case 'r':
+ case 'R':
+ offset = strtoul(optarg, &pt, 0);
+ size = strtoul(pt+1, NULL, 0);
+ handle = 0;
+ if ((r = drmAddMap(fd, offset, size,
+ DRM_REGISTERS,
+ c == 'R' ? DRM_READ_ONLY : 0,
+ &handle))) {
+ drmError(r, argv[0]);
+ return 1;
+ }
+ printf("0x%08lx:0x%04lx added\n", offset, size);
+ printf("===== /proc/drm/1/meminfo =====\n");
+ sprintf(buf, "cat /proc/drm/1/meminfo");
+ system(buf);
+ break;
+ case 's':
+ size = strtoul(optarg, &pt, 0);
+ handle = 0;
+ if ((r = drmAddMap(fd, 0, size,
+ DRM_SHM, DRM_CONTAINS_LOCK,
+ &handle))) {
+ drmError(r, argv[0]);
+ return 1;
+ }
+ printf("0x%04lx byte shm added at 0x%08lx\n", size, handle);
+ sprintf(buf, "sysctl hw.graphics.0.vm");
+ system(buf);
+ break;
+ case 'P':
+ offset = strtoul(optarg, &pt, 0);
+ size = strtoul(pt+1, NULL, 0);
+ address = NULL;
+ if ((r = drmMap(fd, offset, size, &address))) {
+ drmError(r, argv[0]);
+ return 1;
+ }
+ printf("0x%08lx:0x%04lx mapped at %p for pid %d\n",
+ offset, size, address, getpid());
+ printf("===== hw.graphics.0.vma =====\n");
+ sprintf(buf, "sysctl hw.graphics.0.vma");
+ system(buf);
+ mprotect((void *)offset, size, PROT_READ);
+ printf("===== hw.graphics.0.vma =====\n");
+ sprintf(buf, "sysctl hw.graphics.0.vma");
+ system(buf);
+ break;
+ case 'w':
+ case 'W':
+ offset = strtoul(optarg, &pt, 0);
+ size = strtoul(pt+1, NULL, 0);
+ address = NULL;
+ if ((r = drmMap(fd, offset, size, &address))) {
+ drmError(r, argv[0]);
+ return 1;
+ }
+ printf("0x%08lx:0x%04lx mapped at %p for pid %d\n",
+ offset, size, address, getpid());
+ printf("===== /proc/%d/maps =====\n", getpid());
+ sprintf(buf, "cat /proc/%d/maps", getpid());
+ system(buf);
+ printf("===== /proc/drm/1/meminfo =====\n");
+ sprintf(buf, "cat /proc/drm/1/meminfo");
+ system(buf);
+ printf("===== /proc/drm/1/vmainfo =====\n");
+ sprintf(buf, "cat /proc/drm/1/vmainfo");
+ system(buf);
+ printf("===== READING =====\n");
+ for (i = 0; i < 0x10; i++)
+ printf("%02x ", (unsigned int)((unsigned char *)address)[i]);
+ printf("\n");
+ if (c == 'w') {
+ printf("===== WRITING =====\n");
+ for (i = 0; i < size; i+=2) {
+ ((char *)address)[i] = i & 0xff;
+ ((char *)address)[i+1] = i & 0xff;
+ }
+ }
+ printf("===== READING =====\n");
+ for (i = 0; i < 0x10; i++)
+ printf("%02x ", (unsigned int)((unsigned char *)address)[i]);
+ printf("\n");
+ printf("===== /proc/drm/1/vmainfo =====\n");
+ sprintf(buf, "cat /proc/drm/1/vmainfo");
+ system(buf);
+ break;
+ case 'L':
+ context = strtoul(optarg, &pt, 0);
+ offset = strtoul(pt+1, &pt, 0);
+ size = strtoul(pt+1, &pt, 0);
+ loops = strtoul(pt+1, NULL, 0);
+ address = NULL;
+ if ((r = drmMap(fd, offset, size, &address))) {
+ drmError(r, argv[0]);
+ return 1;
+ }
+ lock = address;
+#if 1
+ {
+ int counter = 0;
+ struct timeval loop_start, loop_end;
+ struct timeval lock_start, lock_end;
+ double wt;
+#define HISTOSIZE 9
+ int histo[HISTOSIZE];
+ int output = 0;
+ int fast = 0;
+
+ if (loops < 0) {
+ loops = -loops;
+ ++output;
+ }
+
+ for (i = 0; i < HISTOSIZE; i++) histo[i] = 0;
+
+ gettimeofday(&loop_start, NULL);
+ for (i = 0; i < loops; i++) {
+ gettimeofday(&lock_start, NULL);
+ DRM_LIGHT_LOCK_COUNT(fd,lock,context,fast);
+ gettimeofday(&lock_end, NULL);
+ DRM_UNLOCK(fd,lock,context);
+ ++counter;
+ wt = usec(&lock_end, &lock_start);
+ if (wt <= 2.5) ++histo[8];
+ if (wt < 5.0) ++histo[0];
+ else if (wt < 50.0) ++histo[1];
+ else if (wt < 500.0) ++histo[2];
+ else if (wt < 5000.0) ++histo[3];
+ else if (wt < 50000.0) ++histo[4];
+ else if (wt < 500000.0) ++histo[5];
+ else if (wt < 5000000.0) ++histo[6];
+ else ++histo[7];
+ if (output) printf( "%.2f uSec, %d fast\n", wt, fast);
+ }
+ gettimeofday(&loop_end, NULL);
+ printf( "Average wait time = %.2f usec, %d fast\n",
+ usec(&loop_end, &loop_start) / counter, fast);
+ printf( "%9d <= 2.5 uS\n", histo[8]);
+ printf( "%9d < 5 uS\n", histo[0]);
+ printf( "%9d < 50 uS\n", histo[1]);
+ printf( "%9d < 500 uS\n", histo[2]);
+ printf( "%9d < 5000 uS\n", histo[3]);
+ printf( "%9d < 50000 uS\n", histo[4]);
+ printf( "%9d < 500000 uS\n", histo[5]);
+ printf( "%9d < 5000000 uS\n", histo[6]);
+ printf( "%9d >= 5000000 uS\n", histo[7]);
+ }
+#else
+ printf( "before lock: 0x%08x\n", lock->lock);
+ printf( "lock: 0x%08x\n", lock->lock);
+ sleep(5);
+ printf( "unlock: 0x%08x\n", lock->lock);
+#endif
+ break;
+ default:
+ fprintf( stderr, "Usage: drmstat [options]\n" );
+ return 1;
+ }
+
+ return r;
+}
diff --git a/bsd/drm/fops.c b/bsd/drm/fops.c
new file mode 100644
index 00000000..837fc7db
--- /dev/null
+++ b/bsd/drm/fops.c
@@ -0,0 +1,260 @@
+/* fops.c -- File operations for DRM -*- c -*-
+ * Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
+ * Revised: Tue Oct 12 08:48:59 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/fops.c,v 1.3 1999/08/20 15:36:45 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/fops.c,v 1.1 1999/09/25 14:37:59 dawes Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include <sys/signalvar.h>
+#include <sys/poll.h>
+
+drm_file_t *drm_find_file_by_proc(drm_device_t *dev, struct proc *p)
+{
+ uid_t uid = p->p_cred->p_svuid;
+ pid_t pid = p->p_pid;
+ drm_file_t *priv;
+
+ TAILQ_FOREACH(priv, &dev->files, link)
+ if (priv->pid == pid && priv->uid == uid)
+ return priv;
+ return NULL;
+}
+
+
+/* drm_open is called whenever a process opens /dev/drm. */
+
+int drm_open_helper(dev_t kdev, int flags, int fmt, struct proc *p,
+ drm_device_t *dev)
+{
+ int m = minor(kdev);
+ drm_file_t *priv;
+
+ if (flags & O_EXCL)
+ return EBUSY; /* No exclusive opens */
+
+ dev->flags = flags;
+
+ DRM_DEBUG("pid = %d, device = %p, minor = %d\n",
+ p->p_pid, dev->device, m);
+
+ priv = drm_find_file_by_proc(dev, p);
+ if (priv) {
+ priv->refs++;
+ } else {
+ priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
+ memset(priv, 0, sizeof(*priv));
+ priv->uid = p->p_cred->p_svuid;
+ priv->pid = p->p_pid;
+ priv->refs = 1;
+ priv->minor = m;
+ priv->devXX = dev;
+ priv->ioctl_count = 0;
+ priv->authenticated = !suser(p);
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, p);
+ TAILQ_INSERT_TAIL(&dev->files, priv, link);
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, p);
+ }
+
+ kdev->si_drv1 = dev;
+
+ return 0;
+}
+
+int drm_write(dev_t kdev, struct uio *uio, int ioflag)
+{
+ struct proc *p = curproc;
+ drm_device_t *dev = kdev->si_drv1;
+
+ DRM_DEBUG("pid = %d, device = %p, open_count = %d\n",
+ p->p_pid, dev->device, dev->open_count);
+ return 0;
+}
+
+/* drm_release is called whenever a process closes /dev/drm*. */
+
+int drm_close(dev_t kdev, int fflag, int devtype, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_file_t *priv;
+
+ DRM_DEBUG("pid = %d, device = %p, open_count = %d\n",
+ p->p_pid, dev->device, dev->open_count);
+
+ priv = drm_find_file_by_proc(dev, p);
+ if (!priv) {
+ DRM_DEBUG("can't find authenticator\n");
+ return EINVAL;
+ }
+
+ if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
+ && dev->lock.pid == p->p_pid) {
+ DRM_ERROR("Process %d dead, freeing lock for context %d\n",
+ p->p_pid,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+ drm_lock_free(dev,
+ &dev->lock.hw_lock->lock,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+
+ /* FIXME: may require heavy-handed reset of
+ hardware at this point, possibly
+ processed via a callback to the X
+ server. */
+ }
+ drm_reclaim_buffers(dev, priv->pid);
+
+ funsetown(dev->buf_sigio);
+
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, p);
+ priv = drm_find_file_by_proc(dev, p);
+ if (priv) {
+ priv->refs--;
+ if (!priv->refs) {
+ TAILQ_REMOVE(&dev->files, priv, link);
+ drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
+ }
+ }
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, p);
+
+ return 0;
+}
+
+/* The drm_read and drm_write_string code (especially that which manages
+ the circular buffer), is based on Alessandro Rubini's LINUX DEVICE
+ DRIVERS (Cambridge: O'Reilly, 1998), pages 111-113. */
+
+ssize_t drm_read(dev_t kdev, struct uio *uio, int ioflag)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ int left;
+ int avail;
+ int send;
+ int cur;
+ int error = 0;
+
+ DRM_DEBUG("%p, %p\n", dev->buf_rp, dev->buf_wp);
+
+ while (dev->buf_rp == dev->buf_wp) {
+ DRM_DEBUG(" sleeping\n");
+ if (dev->flags & FASYNC) {
+ return EWOULDBLOCK;
+ }
+ error = tsleep(&dev->buf_rp, PZERO|PCATCH, "drmrd", 0);
+ if (error) {
+ DRM_DEBUG(" interrupted\n");
+ return error;
+ }
+ DRM_DEBUG(" awake\n");
+ }
+
+ left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
+ avail = DRM_BSZ - left;
+ send = DRM_MIN(avail, uio->uio_resid);
+
+ while (send) {
+ if (dev->buf_wp > dev->buf_rp) {
+ cur = DRM_MIN(send, dev->buf_wp - dev->buf_rp);
+ } else {
+ cur = DRM_MIN(send, dev->buf_end - dev->buf_rp);
+ }
+ error = uiomove(dev->buf_rp, cur, uio);
+ if (error)
+ break;
+ dev->buf_rp += cur;
+ if (dev->buf_rp == dev->buf_end) dev->buf_rp = dev->buf;
+ send -= cur;
+ }
+
+ wakeup(&dev->buf_wp);
+
+ return error;
+}
+
+int drm_write_string(drm_device_t *dev, const char *s)
+{
+ int left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
+ int send = strlen(s);
+ int count;
+
+ DRM_DEBUG("%d left, %d to send (%p, %p)\n",
+ left, send, dev->buf_rp, dev->buf_wp);
+
+ if (left == 1 || dev->buf_wp != dev->buf_rp) {
+ DRM_ERROR("Buffer not empty (%d left, wp = %p, rp = %p)\n",
+ left,
+ dev->buf_wp,
+ dev->buf_rp);
+ }
+
+ while (send) {
+ if (dev->buf_wp >= dev->buf_rp) {
+ count = DRM_MIN(send, dev->buf_end - dev->buf_wp);
+ if (count == left) --count; /* Leave a hole */
+ } else {
+ count = DRM_MIN(send, dev->buf_rp - dev->buf_wp - 1);
+ }
+ strncpy(dev->buf_wp, s, count);
+ dev->buf_wp += count;
+ if (dev->buf_wp == dev->buf_end) dev->buf_wp = dev->buf;
+ send -= count;
+ }
+
+ if (dev->buf_selecting) {
+ dev->buf_selecting = 0;
+ selwakeup(&dev->buf_sel);
+ }
+
+ DRM_DEBUG("dev->buf_sigio=%p\n", dev->buf_sigio);
+ if (dev->buf_sigio) {
+ DRM_DEBUG("dev->buf_sigio->sio_pgid=%d\n", dev->buf_sigio->sio_pgid);
+ pgsigio(dev->buf_sigio, SIGIO, 0);
+ }
+
+ DRM_DEBUG("waking\n");
+ wakeup(&dev->buf_rp);
+ return 0;
+}
+
+int drm_poll(dev_t kdev, int events, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ int s;
+ int revents = 0;
+
+ s = spldrm();
+ if (events & (POLLIN | POLLRDNORM)) {
+ int left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
+ if (left > 0)
+ revents |= events & (POLLIN | POLLRDNORM);
+ else
+ selrecord(p, &dev->buf_sel);
+ }
+ splx(s);
+
+ return revents;
+}
diff --git a/bsd/drm/init.c b/bsd/drm/init.c
new file mode 100644
index 00000000..44e9be99
--- /dev/null
+++ b/bsd/drm/init.c
@@ -0,0 +1,101 @@
+/* init.c -- Setup/Cleanup for DRM -*- c -*-
+ * Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
+ * Revised: Fri Aug 20 09:27:02 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/init.c,v 1.3 1999/08/20 15:07:01 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/init.c,v 1.1 1999/09/25 14:38:01 dawes Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+MODULE_VERSION(drm, 1);
+
+int drm_flags = 0;
+
+/* drm_parse_option parses a single option. See description for
+ drm_parse_drm for details. */
+
+static void drm_parse_option(char *s)
+{
+ char *c, *r;
+
+ DRM_DEBUG("\"%s\"\n", s);
+ if (!s || !*s) return;
+ for (c = s; *c && *c != ':'; c++); /* find : or \0 */
+ if (*c) r = c + 1; else r = NULL; /* remember remainder */
+ *c = '\0'; /* terminate */
+ if (!strcmp(s, "noctx")) {
+ drm_flags |= DRM_FLAG_NOCTX;
+ DRM_INFO("Server-mediated context switching OFF\n");
+ return;
+ }
+ if (!strcmp(s, "debug")) {
+ drm_flags |= DRM_FLAG_DEBUG;
+ DRM_INFO("Debug messages ON\n");
+ return;
+ }
+ DRM_ERROR("\"%s\" is not a valid option\n", s);
+ return;
+}
+
+/* drm_parse_options parse the insmod "drm=" options, or the command-line
+ * options passed to the kernel via LILO. The grammar of the format is as
+ * follows:
+ *
+ * drm ::= 'drm=' option_list
+ * option_list ::= option [ ';' option_list ]
+ * option ::= 'device:' major
+ * | 'debug'
+ * | 'noctx'
+ * major ::= INTEGER
+ *
+ * Note that 's' contains option_list without the 'drm=' part.
+ *
+ * device=major,minor specifies the device number used for /dev/drm
+ * if major == 0 then the misc device is used
+ * if major == 0 and minor == 0 then dynamic misc allocation is used
+ * debug=on specifies that debugging messages will be printk'd
+ * debug=trace specifies that each function call will be logged via printk
+ * debug=off turns off all debugging options
+ *
+ */
+
+void drm_parse_options(char *s)
+{
+ char *h, *t, *n;
+
+ DRM_DEBUG("\"%s\"\n", s ?: "");
+ if (!s || !*s) return;
+
+ for (h = t = n = s; h && *h; h = n) {
+ for (; *t && *t != ';'; t++); /* find ; or \0 */
+ if (*t) n = t + 1; else n = NULL; /* remember next */
+ *t = '\0'; /* terminate */
+ drm_parse_option(h); /* parse */
+ }
+}
+
diff --git a/bsd/drm/ioctl.c b/bsd/drm/ioctl.c
new file mode 100644
index 00000000..55bdeeda
--- /dev/null
+++ b/bsd/drm/ioctl.c
@@ -0,0 +1,120 @@
+/* ioctl.c -- IOCTL processing for DRM -*- c -*-
+ * Created: Fri Jan 8 09:01:26 1999 by faith@precisioninsight.com
+ * Revised: Fri Aug 20 09:27:02 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/ioctl.c,v 1.3 1999/08/30 13:05:00 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/ioctl.c,v 1.1 1999/09/25 14:38:01 dawes Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include <sys/bus.h>
+#include <pci/pcivar.h>
+
+int
+drm_irq_busid(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_irq_busid_t id;
+ devclass_t pci;
+ device_t bus, dev;
+ device_t *kids;
+ int error, i, num_kids;
+
+ id = *(drm_irq_busid_t *) data;
+ pci = devclass_find("pci");
+ if (!pci)
+ return ENOENT;
+ bus = devclass_get_device(pci, id.busnum);
+ if (!bus)
+ return ENOENT;
+ error = device_get_children(bus, &kids, &num_kids);
+ if (error)
+ return error;
+
+ dev = 0;
+ for (i = 0; i < num_kids; i++) {
+ dev = kids[i];
+ if (pci_get_slot(dev) == id.devnum
+ && pci_get_function(dev) == id.funcnum)
+ break;
+ }
+
+ free(kids, M_TEMP);
+
+ if (i != num_kids)
+ id.irq = pci_get_irq(dev);
+ else
+ id.irq = 0;
+
+ DRM_DEBUG("%d:%d:%d => IRQ %d\n",
+ id.busnum, id.devnum, id.funcnum, id.irq);
+ *(drm_irq_busid_t *) data = id;
+
+ return 0;
+}
+
+int
+drm_getunique(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_unique_t u;
+ int error;
+
+ u = *(drm_unique_t *) data;
+ if (u.unique_len >= dev->unique_len) {
+ error = copyout(dev->unique, u.unique, dev->unique_len);
+ if (error)
+ return error;
+ }
+ u.unique_len = dev->unique_len;
+ *(drm_unique_t *) data = u;
+ return 0;
+}
+
+int
+drm_setunique(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_unique_t u;
+ int error;
+
+ if (dev->unique_len || dev->unique) return EBUSY;
+
+ u = *(drm_unique_t *) data;
+
+ dev->unique_len = u.unique_len;
+ dev->unique = drm_alloc(u.unique_len + 1, DRM_MEM_DRIVER);
+ error = copyin(u.unique, dev->unique, dev->unique_len);
+ if (error)
+ return error;
+ dev->unique[dev->unique_len] = '\0';
+
+ dev->devname = drm_alloc(strlen(dev->name) + strlen(dev->unique) + 2,
+ DRM_MEM_DRIVER);
+ sprintf(dev->devname, "%s@%s", dev->name, dev->unique);
+
+ return 0;
+}
diff --git a/bsd/drm/lists.c b/bsd/drm/lists.c
new file mode 100644
index 00000000..9f9b5f7a
--- /dev/null
+++ b/bsd/drm/lists.c
@@ -0,0 +1,258 @@
+/* lists.c -- Buffer list handling routines -*- c -*-
+ * Created: Mon Apr 19 20:54:22 1999 by faith@precisioninsight.com
+ * Revised: Fri Aug 20 09:27:01 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/lists.c,v 1.3 1999/08/20 15:07:02 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/lists.c,v 1.1 1999/09/25 14:38:01 dawes Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+int drm_waitlist_create(drm_waitlist_t *bl, int count)
+{
+ DRM_DEBUG("%d\n", count);
+ if (bl->count) return EINVAL;
+
+ bl->count = count;
+ bl->bufs = drm_alloc((bl->count + 2) * sizeof(*bl->bufs),
+ DRM_MEM_BUFLISTS);
+ bl->rp = bl->bufs;
+ bl->wp = bl->bufs;
+ bl->end = &bl->bufs[bl->count+1];
+ simple_lock_init(&bl->write_lock);
+ simple_lock_init(&bl->read_lock);
+ return 0;
+}
+
+int drm_waitlist_destroy(drm_waitlist_t *bl)
+{
+ DRM_DEBUG("\n");
+ if (bl->rp != bl->wp) return EINVAL;
+ if (bl->bufs) drm_free(bl->bufs,
+ (bl->count + 2) * sizeof(*bl->bufs),
+ DRM_MEM_BUFLISTS);
+ bl->count = 0;
+ bl->bufs = NULL;
+ bl->rp = NULL;
+ bl->wp = NULL;
+ bl->end = NULL;
+ return 0;
+}
+
+int drm_waitlist_put(drm_waitlist_t *bl, drm_buf_t *buf)
+{
+ int left;
+ int s;
+
+ left = DRM_LEFTCOUNT(bl);
+ DRM_DEBUG("put %d (%d left, rp = %p, wp = %p)\n",
+ buf->idx, left, bl->rp, bl->wp);
+ if (!left) {
+ DRM_ERROR("Overflow while adding buffer %d from pid %d\n",
+ buf->idx, buf->pid);
+ return EINVAL;
+ }
+#if DRM_DMA_HISTOGRAM
+ getnanotime(&buf->time_queued);
+#endif
+ buf->list = DRM_LIST_WAIT;
+
+ simple_lock(&bl->write_lock);
+ s = spldrm();
+ *bl->wp = buf;
+ if (++bl->wp >= bl->end) bl->wp = bl->bufs;
+ splx(s);
+ simple_unlock(&bl->write_lock);
+
+ return 0;
+}
+
+drm_buf_t *drm_waitlist_get(drm_waitlist_t *bl)
+{
+ drm_buf_t *buf;
+ int s;
+
+ simple_lock(&bl->read_lock);
+ s = spldrm();
+ buf = *bl->rp;
+ if (bl->rp == bl->wp) {
+ splx(s);
+ simple_unlock(&bl->read_lock);
+ return NULL;
+ }
+ if (++bl->rp >= bl->end) bl->rp = bl->bufs;
+ splx(s);
+ simple_unlock(&bl->read_lock);
+
+ DRM_DEBUG("get %d\n", buf->idx);
+ return buf;
+}
+
+int drm_freelist_create(drm_freelist_t *bl, int count)
+{
+ DRM_DEBUG("\n");
+ atomic_set(&bl->count, 0);
+ bl->next = NULL;
+ bl->waiting = 0;
+ bl->low_mark = 0;
+ bl->high_mark = 0;
+ atomic_set(&bl->wfh, 0);
+ ++bl->initialized;
+ return 0;
+}
+
+int drm_freelist_destroy(drm_freelist_t *bl)
+{
+ DRM_DEBUG("\n");
+ atomic_set(&bl->count, 0);
+ bl->next = NULL;
+ return 0;
+}
+
+int drm_freelist_put(drm_device_t *dev, drm_freelist_t *bl, drm_buf_t *buf)
+{
+ unsigned int old;
+ unsigned int new;
+ char failed;
+ int count = 0;
+ drm_device_dma_t *dma = dev->dma;
+
+ if (!dma) {
+ DRM_ERROR("No DMA support\n");
+ return 1;
+ }
+
+ if (buf->waiting || buf->pending || buf->list == DRM_LIST_FREE) {
+ DRM_ERROR("Freed buffer %d: w%d, p%d, l%d\n",
+ buf->idx, buf->waiting, buf->pending, buf->list);
+ }
+ DRM_DEBUG("%d, count = %d, wfh = %d, w%d, p%d\n",
+ buf->idx, atomic_read(&bl->count), atomic_read(&bl->wfh),
+ buf->waiting, buf->pending);
+ if (!bl) return 1;
+#if DRM_DMA_HISTOGRAM
+ getnanotime(&buf->time_freed);
+ drm_histogram_compute(dev, buf);
+#endif
+ buf->list = DRM_LIST_FREE;
+ do {
+ old = (unsigned long)bl->next;
+ buf->next = (void *)old;
+ new = (unsigned long)buf;
+ _DRM_CAS(&bl->next, old, new, failed);
+ if (++count > DRM_LOOPING_LIMIT) {
+ DRM_ERROR("Looping\n");
+ return 1;
+ }
+ } while (failed);
+ atomic_inc(&bl->count);
+ if (atomic_read(&bl->count) > dma->buf_count) {
+ DRM_ERROR("%d of %d buffers free after addition of %d\n",
+ atomic_read(&bl->count), dma->buf_count, buf->idx);
+ return 1;
+ }
+ /* Check for high water mark */
+ if (atomic_read(&bl->wfh) && atomic_read(&bl->count)>=bl->high_mark) {
+ atomic_set(&bl->wfh, 0);
+ if (bl->waiting)
+ wakeup(&bl->waiting);
+ }
+ return 0;
+}
+
+static drm_buf_t *drm_freelist_try(drm_freelist_t *bl)
+{
+ unsigned int old;
+ unsigned int new;
+ char failed;
+ drm_buf_t *buf;
+ int count = 0;
+
+ if (!bl) return NULL;
+
+ /* Get buffer */
+ do {
+ old = (unsigned int)bl->next;
+ if (!old) {
+ return NULL;
+ }
+ new = (unsigned long)bl->next->next;
+ _DRM_CAS(&bl->next, old, new, failed);
+ if (++count > DRM_LOOPING_LIMIT) {
+ DRM_ERROR("Looping\n");
+ return NULL;
+ }
+ } while (failed);
+ atomic_dec(&bl->count);
+
+ buf = (drm_buf_t *)old;
+ buf->next = NULL;
+ buf->list = DRM_LIST_NONE;
+ DRM_DEBUG("%d, count = %d, wfh = %d, w%d, p%d\n",
+ buf->idx, atomic_read(&bl->count), atomic_read(&bl->wfh),
+ buf->waiting, buf->pending);
+ if (buf->waiting || buf->pending) {
+ DRM_ERROR("Free buffer %d: w%d, p%d, l%d\n",
+ buf->idx, buf->waiting, buf->pending, buf->list);
+ }
+
+ return buf;
+}
+
+drm_buf_t *drm_freelist_get(drm_freelist_t *bl, int block)
+{
+ drm_buf_t *buf = NULL;
+ int error;
+
+ if (!bl || !bl->initialized) return NULL;
+
+ /* Check for low water mark */
+ if (atomic_read(&bl->count) <= bl->low_mark) /* Became low */
+ atomic_set(&bl->wfh, 1);
+ if (atomic_read(&bl->wfh)) {
+ DRM_DEBUG("Block = %d, count = %d, wfh = %d\n",
+ block, atomic_read(&bl->count),
+ atomic_read(&bl->wfh));
+ if (block) {
+ atomic_inc(&bl->waiting);
+ for (;;) {
+ if (!atomic_read(&bl->wfh)
+ && (buf = drm_freelist_try(bl))) break;
+ error = tsleep(&bl->waiting, PZERO|PCATCH,
+ "drmfg", 0);
+ if (error)
+ break;
+ }
+ atomic_dec(&bl->waiting);
+ }
+ return buf;
+ }
+
+ DRM_DEBUG("Count = %d, wfh = %d\n",
+ atomic_read(&bl->count), atomic_read(&bl->wfh));
+ return drm_freelist_try(bl);
+}
diff --git a/bsd/drm/lock.c b/bsd/drm/lock.c
new file mode 100644
index 00000000..cd14a882
--- /dev/null
+++ b/bsd/drm/lock.c
@@ -0,0 +1,220 @@
+/* lock.c -- IOCTLs for locking -*- c -*-
+ * Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
+ * Revised: Tue Oct 12 08:51:06 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/lock.c,v 1.5 1999/08/30 13:05:00 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/lock.c,v 1.1 1999/09/25 14:38:01 dawes Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+int
+drm_block(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ DRM_DEBUG("\n");
+ return 0;
+}
+
+int
+drm_unblock(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ DRM_DEBUG("\n");
+ return 0;
+}
+
+int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
+{
+ unsigned int old;
+ unsigned int new;
+ char failed;
+
+ DRM_DEBUG("%d attempts\n", context);
+ do {
+ old = *lock;
+ if (old & _DRM_LOCK_HELD) new = old | _DRM_LOCK_CONT;
+ else new = context | _DRM_LOCK_HELD;
+ _DRM_CAS(lock, old, new, failed);
+ } while (failed);
+ if (_DRM_LOCKING_CONTEXT(old) == context) {
+ if (old & _DRM_LOCK_HELD) {
+ if (context != DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("%d holds heavyweight lock\n",
+ context);
+ }
+ return 0;
+ }
+ }
+ if (new == (context | _DRM_LOCK_HELD)) {
+ /* Have lock */
+ DRM_DEBUG("%d\n", context);
+ return 1;
+ }
+ DRM_DEBUG("%d unable to get lock held by %d\n",
+ context, _DRM_LOCKING_CONTEXT(old));
+ return 0;
+}
+
+/* This takes a lock forcibly and hands it to context. Should ONLY be used
+ inside *_unlock to give lock to kernel before calling *_dma_schedule. */
+int drm_lock_transfer(drm_device_t *dev,
+ __volatile__ unsigned int *lock, unsigned int context)
+{
+ unsigned int old;
+ unsigned int new;
+ char failed;
+
+ dev->lock.pid = 0;
+ do {
+ old = *lock;
+ new = context | _DRM_LOCK_HELD;
+ _DRM_CAS(lock, old, new, failed);
+ } while (failed);
+ DRM_DEBUG("%d => %d\n", _DRM_LOCKING_CONTEXT(old), context);
+ return 1;
+}
+
+int drm_lock_free(drm_device_t *dev,
+ __volatile__ unsigned int *lock, unsigned int context)
+{
+ unsigned int old;
+ unsigned int new;
+ char failed;
+ pid_t pid = dev->lock.pid;
+
+ DRM_DEBUG("%d\n", context);
+ dev->lock.pid = 0;
+ do {
+ old = *lock;
+ new = 0;
+ _DRM_CAS(lock, old, new, failed);
+ } while (failed);
+ if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
+ DRM_ERROR("%d freed heavyweight lock held by %d (pid %d)\n",
+ context,
+ _DRM_LOCKING_CONTEXT(old),
+ pid);
+ return 1;
+ }
+ wakeup(&dev->lock.lock_queue);
+ return 0;
+}
+
+static int drm_flush_queue(drm_device_t *dev, int context)
+{
+ int ret = 0;
+ int error;
+ drm_queue_t *q = dev->queuelist[context];
+
+ DRM_DEBUG("\n");
+
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->use_count) > 1) {
+ atomic_inc(&q->block_write);
+ atomic_inc(&q->block_count);
+ error = tsleep(&q->flush_queue, PCATCH|PZERO, "drmfq", 0);
+ if (error)
+ return error;
+ atomic_dec(&q->block_count);
+ }
+ atomic_dec(&q->use_count);
+ atomic_inc(&q->total_flushed);
+
+ /* NOTE: block_write is still incremented!
+ Use drm_flush_unlock_queue to decrement. */
+ return ret;
+}
+
+static int drm_flush_unblock_queue(drm_device_t *dev, int context)
+{
+ drm_queue_t *q = dev->queuelist[context];
+
+ DRM_DEBUG("\n");
+
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->use_count) > 1) {
+ if (atomic_read(&q->block_write)) {
+ atomic_dec(&q->block_write);
+ wakeup(&q->write_queue);
+ }
+ }
+ atomic_dec(&q->use_count);
+ return 0;
+}
+
+int drm_flush_block_and_flush(drm_device_t *dev, int context,
+ drm_lock_flags_t flags)
+{
+ int ret = 0;
+ int i;
+
+ DRM_DEBUG("\n");
+
+ if (flags & _DRM_LOCK_FLUSH) {
+ ret = drm_flush_queue(dev, DRM_KERNEL_CONTEXT);
+ if (!ret) ret = drm_flush_queue(dev, context);
+ }
+ if (flags & _DRM_LOCK_FLUSH_ALL) {
+ for (i = 0; !ret && i < dev->queue_count; i++) {
+ ret = drm_flush_queue(dev, i);
+ }
+ }
+ return ret;
+}
+
+int drm_flush_unblock(drm_device_t *dev, int context, drm_lock_flags_t flags)
+{
+ int ret = 0;
+ int i;
+
+ DRM_DEBUG("\n");
+
+ if (flags & _DRM_LOCK_FLUSH) {
+ ret = drm_flush_unblock_queue(dev, DRM_KERNEL_CONTEXT);
+ if (!ret) ret = drm_flush_unblock_queue(dev, context);
+ }
+ if (flags & _DRM_LOCK_FLUSH_ALL) {
+ for (i = 0; !ret && i < dev->queue_count; i++) {
+ ret = drm_flush_unblock_queue(dev, i);
+ }
+ }
+
+ return ret;
+}
+
+int drm_finish(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ int ret = 0;
+ drm_lock_t lock;
+
+ DRM_DEBUG("\n");
+
+ lock = *(drm_lock_t *) data;
+ ret = drm_flush_block_and_flush(dev, lock.context, lock.flags);
+ drm_flush_unblock(dev, lock.context, lock.flags);
+ return ret;
+}
diff --git a/bsd/drm/memory.c b/bsd/drm/memory.c
new file mode 100644
index 00000000..a8a936df
--- /dev/null
+++ b/bsd/drm/memory.c
@@ -0,0 +1,458 @@
+/* memory.c -- Memory management wrappers for DRM -*- c -*-
+ * Created: Thu Feb 4 14:00:34 1999 by faith@precisioninsight.com
+ * Revised: Fri Aug 20 13:04:33 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/memory.c,v 1.4 1999/08/20 20:00:53 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/memory.c,v 1.1 1999/09/25 14:38:02 dawes Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#ifdef DRM_AGP
+#include <sys/agpio.h>
+#endif
+
+MALLOC_DEFINE(M_DRM, "drm", "DRM Data Structures");
+
+typedef struct drm_mem_stats {
+ const char *name;
+ int succeed_count;
+ int free_count;
+ int fail_count;
+ unsigned long bytes_allocated;
+ unsigned long bytes_freed;
+} drm_mem_stats_t;
+
+#ifdef SMP
+static struct simplelock drm_mem_lock;
+#endif
+static unsigned long drm_ram_available = 0;
+static unsigned long drm_ram_used = 0;
+static drm_mem_stats_t drm_mem_stats[] = {
+ [DRM_MEM_DMA] = { "dmabufs" },
+ [DRM_MEM_SAREA] = { "sareas" },
+ [DRM_MEM_DRIVER] = { "driver" },
+ [DRM_MEM_MAGIC] = { "magic" },
+ [DRM_MEM_IOCTLS] = { "ioctltab" },
+ [DRM_MEM_MAPS] = { "maplist" },
+ [DRM_MEM_VMAS] = { "vmalist" },
+ [DRM_MEM_BUFS] = { "buflist" },
+ [DRM_MEM_SEGS] = { "seglist" },
+ [DRM_MEM_PAGES] = { "pagelist" },
+ [DRM_MEM_FILES] = { "files" },
+ [DRM_MEM_QUEUES] = { "queues" },
+ [DRM_MEM_CMDS] = { "commands" },
+ [DRM_MEM_MAPPINGS] = { "mappings" },
+ [DRM_MEM_BUFLISTS] = { "buflists" },
+ [DRM_MEM_AGPLISTS] = { "agplist" },
+ [DRM_MEM_TOTALAGP] = { "totalagp" },
+ [DRM_MEM_BOUNDAGP] = { "boundagp" },
+ [DRM_MEM_CTXBITMAP] = { "ctxbitmap"},
+ { NULL, 0, } /* Last entry must be null */
+};
+
+void drm_mem_init(void)
+{
+ drm_mem_stats_t *mem;
+
+ for (mem = drm_mem_stats; mem->name; ++mem) {
+ mem->succeed_count = 0;
+ mem->free_count = 0;
+ mem->fail_count = 0;
+ mem->bytes_allocated = 0;
+ mem->bytes_freed = 0;
+ }
+
+ drm_ram_available = 0; /* si.totalram; */
+ drm_ram_used = 0;
+}
+
+/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
+
+static int _drm_mem_info SYSCTL_HANDLER_ARGS
+{
+ drm_mem_stats_t *pt;
+ char buf[128];
+ int error;
+
+ DRM_SYSCTL_PRINT(" total counts "
+ " | outstanding \n");
+ DRM_SYSCTL_PRINT("type alloc freed fail bytes freed"
+ " | allocs bytes\n\n");
+ DRM_SYSCTL_PRINT("%-9.9s %5d %5d %4d %10lu |\n",
+ "system", 0, 0, 0, drm_ram_available);
+ DRM_SYSCTL_PRINT("%-9.9s %5d %5d %4d %10lu |\n",
+ "locked", 0, 0, 0, drm_ram_used);
+ DRM_SYSCTL_PRINT("\n");
+ for (pt = drm_mem_stats; pt->name; pt++) {
+ DRM_SYSCTL_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
+ pt->name,
+ pt->succeed_count,
+ pt->free_count,
+ pt->fail_count,
+ pt->bytes_allocated,
+ pt->bytes_freed,
+ pt->succeed_count - pt->free_count,
+ (long)pt->bytes_allocated
+ - (long)pt->bytes_freed);
+ }
+ SYSCTL_OUT(req, "", 1);
+
+ return 0;
+}
+
+int drm_mem_info SYSCTL_HANDLER_ARGS
+{
+ int ret;
+
+ simple_lock(&drm_mem_lock);
+ ret = _drm_mem_info(oidp, arg1, arg2, req);
+ simple_unlock(&drm_mem_lock);
+ return ret;
+}
+
+void *drm_alloc(size_t size, int area)
+{
+ void *pt;
+
+ if (!size) {
+ DRM_MEM_ERROR(area, "Allocating 0 bytes\n");
+ return NULL;
+ }
+
+ if (!(pt = malloc(size, M_DRM, M_NOWAIT))) {
+ simple_lock(&drm_mem_lock);
+ ++drm_mem_stats[area].fail_count;
+ simple_unlock(&drm_mem_lock);
+ return NULL;
+ }
+ simple_lock(&drm_mem_lock);
+ ++drm_mem_stats[area].succeed_count;
+ drm_mem_stats[area].bytes_allocated += size;
+ simple_unlock(&drm_mem_lock);
+ return pt;
+}
+
+void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
+{
+ void *pt;
+
+ if (!(pt = drm_alloc(size, area))) return NULL;
+ if (oldpt && oldsize) {
+ memcpy(pt, oldpt, oldsize);
+ drm_free(oldpt, oldsize, area);
+ }
+ return pt;
+}
+
+char *drm_strdup(const char *s, int area)
+{
+ char *pt;
+ int length = s ? strlen(s) : 0;
+
+ if (!(pt = drm_alloc(length+1, area))) return NULL;
+ strcpy(pt, s);
+ return pt;
+}
+
+void drm_strfree(char *s, int area)
+{
+ unsigned int size;
+
+ if (!s) return;
+
+ size = 1 + (s ? strlen(s) : 0);
+ drm_free((void *)s, size, area);
+}
+
+void drm_free(void *pt, size_t size, int area)
+{
+ int alloc_count;
+ int free_count;
+
+ if (!pt) DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
+ else free(pt, M_DRM);
+ simple_lock(&drm_mem_lock);
+ drm_mem_stats[area].bytes_freed += size;
+ free_count = ++drm_mem_stats[area].free_count;
+ alloc_count = drm_mem_stats[area].succeed_count;
+ simple_unlock(&drm_mem_lock);
+ if (free_count > alloc_count) {
+ DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
+ free_count, alloc_count);
+ }
+}
+
+unsigned long drm_alloc_pages(int order, int area)
+{
+ vm_offset_t address;
+ unsigned long bytes = PAGE_SIZE << order;
+ unsigned long addr;
+ unsigned int sz;
+
+ simple_lock(&drm_mem_lock);
+ if (drm_ram_used > +(DRM_RAM_PERCENT * drm_ram_available) / 100) {
+ simple_unlock(&drm_mem_lock);
+ return 0;
+ }
+ simple_unlock(&drm_mem_lock);
+
+ address = (vm_offset_t) contigmalloc(1<<order, M_DRM, M_WAITOK, 0, ~0, 1, 0);
+ if (!address) {
+ simple_lock(&drm_mem_lock);
+ ++drm_mem_stats[area].fail_count;
+ simple_unlock(&drm_mem_lock);
+ return 0;
+ }
+ simple_lock(&drm_mem_lock);
+ ++drm_mem_stats[area].succeed_count;
+ drm_mem_stats[area].bytes_allocated += bytes;
+ drm_ram_used += bytes;
+ simple_unlock(&drm_mem_lock);
+
+
+ /* Zero outside the lock */
+ memset((void *)address, 0, bytes);
+
+ /* Reserve */
+ for (addr = address, sz = bytes;
+ sz > 0;
+ addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+ /* mem_map_reserve(MAP_NR(addr));*/
+ }
+
+ return address;
+}
+
+void drm_free_pages(unsigned long address, int order, int area)
+{
+ unsigned long bytes = PAGE_SIZE << order;
+ int alloc_count;
+ int free_count;
+ unsigned long addr;
+ unsigned int sz;
+
+ if (!address) {
+ DRM_MEM_ERROR(area, "Attempt to free address 0\n");
+ } else {
+ /* Unreserve */
+ for (addr = address, sz = bytes;
+ sz > 0;
+ addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+ /* mem_map_unreserve(MAP_NR(addr));*/
+ }
+ contigfree((void *) address, bytes, M_DRM);
+ }
+
+ simple_lock(&drm_mem_lock);
+ free_count = ++drm_mem_stats[area].free_count;
+ alloc_count = drm_mem_stats[area].succeed_count;
+ drm_mem_stats[area].bytes_freed += bytes;
+ drm_ram_used -= bytes;
+ simple_unlock(&drm_mem_lock);
+ if (free_count > alloc_count) {
+ DRM_MEM_ERROR(area,
+ "Excess frees: %d frees, %d allocs\n",
+ free_count, alloc_count);
+ }
+}
+
+void *drm_ioremap(unsigned long offset, unsigned long size)
+{
+ void *pt;
+
+ if (!size) {
+ DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
+ "Mapping 0 bytes at 0x%08lx\n", offset);
+ return NULL;
+ }
+
+ if (!(pt = pmap_mapdev(offset, size))) {
+ simple_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
+ simple_unlock(&drm_mem_lock);
+ return NULL;
+ }
+ simple_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
+ drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
+ simple_unlock(&drm_mem_lock);
+ return pt;
+}
+
+void drm_ioremapfree(void *pt, unsigned long size)
+{
+ int alloc_count;
+ int free_count;
+
+ if (!pt)
+ DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
+ "Attempt to free NULL pointer\n");
+ else
+ pmap_unmapdev((vm_offset_t) pt, size);
+
+ simple_lock(&drm_mem_lock);
+ drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size;
+ free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count;
+ alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
+ simple_unlock(&drm_mem_lock);
+ if (free_count > alloc_count) {
+ DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
+ "Excess frees: %d frees, %d allocs\n",
+ free_count, alloc_count);
+ }
+}
+
+#ifdef DRM_AGP
+void *drm_alloc_agp(int pages, u_int32_t type)
+{
+ device_t dev = agp_find_device();
+ void *handle;
+
+ if (!dev)
+ return NULL;
+
+ if (!pages) {
+ DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n");
+ return NULL;
+ }
+
+ if ((handle = agp_alloc_memory(dev, type, pages << AGP_PAGE_SHIFT))) {
+ simple_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
+ drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated
+ += pages << PAGE_SHIFT;
+ simple_unlock(&drm_mem_lock);
+ return handle;
+ }
+ simple_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count;
+ simple_unlock(&drm_mem_lock);
+ return NULL;
+}
+
+int drm_free_agp(void *handle, int pages)
+{
+ device_t dev = agp_find_device();
+ int alloc_count;
+ int free_count;
+ int retval = EINVAL;
+
+ if (!dev)
+ return EINVAL;
+
+ if (!handle) {
+ DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
+ "Attempt to free NULL AGP handle\n");
+ return retval;
+ }
+
+ agp_free_memory(dev, handle);
+ simple_lock(&drm_mem_lock);
+ free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count;
+ alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
+ drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed
+ += pages << PAGE_SHIFT;
+ simple_unlock(&drm_mem_lock);
+ if (free_count > alloc_count) {
+ DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
+ "Excess frees: %d frees, %d allocs\n",
+ free_count, alloc_count);
+ }
+ return 0;
+}
+
+int drm_bind_agp(void *handle, unsigned int start)
+{
+ device_t dev = agp_find_device();
+ int retcode = EINVAL;
+ struct agp_memory_info info;
+
+ DRM_DEBUG("drm_bind_agp called\n");
+
+ if (!dev)
+ return EINVAL;
+
+ if (!handle) {
+ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
+ "Attempt to bind NULL AGP handle\n");
+ return retcode;
+ }
+
+ if (!(retcode = agp_bind_memory(dev, handle,
+ start << AGP_PAGE_SHIFT))) {
+ simple_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
+ agp_memory_info(dev, handle, &info);
+ drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated
+ += info.ami_size;
+ simple_unlock(&drm_mem_lock);
+ DRM_DEBUG("drm_agp.bind_memory: retcode %d\n", retcode);
+ return retcode;
+ }
+ simple_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count;
+ simple_unlock(&drm_mem_lock);
+ return retcode;
+}
+
+int drm_unbind_agp(void *handle)
+{
+ device_t dev = agp_find_device();
+ int alloc_count;
+ int free_count;
+ int retcode = EINVAL;
+ struct agp_memory_info info;
+
+ if (!dev)
+ return EINVAL;
+
+ if (!handle) {
+ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
+ "Attempt to unbind NULL AGP handle\n");
+ return retcode;
+ }
+
+
+ agp_memory_info(dev, handle, &info);
+ if ((retcode = agp_unbind_memory(dev, handle)))
+ return retcode;
+ simple_lock(&drm_mem_lock);
+ free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count;
+ alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
+ drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed += info.ami_size;
+ simple_unlock(&drm_mem_lock);
+ if (free_count > alloc_count) {
+ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
+ "Excess frees: %d frees, %d allocs\n",
+ free_count, alloc_count);
+ }
+ return retcode;
+}
+#endif
diff --git a/bsd/drm/proc.c b/bsd/drm/proc.c
new file mode 100644
index 00000000..12168aa3
--- /dev/null
+++ b/bsd/drm/proc.c
@@ -0,0 +1,568 @@
+/* proc.c -- /proc support for DRM -*- c -*-
+ * Created: Mon Jan 11 09:48:47 1999 by faith@precisioninsight.com
+ * Revised: Fri Aug 20 11:31:48 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/proc.c,v 1.4 1999/08/20 15:36:46 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/proc.c,v 1.1 1999/09/25 14:38:02 dawes Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+static struct proc_dir_entry *drm_root = NULL;
+static struct proc_dir_entry *drm_dev_root = NULL;
+static char drm_slot_name[64];
+
+static int drm_name_info(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data);
+static int drm_vm_info(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data);
+static int drm_clients_info(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data);
+static int drm_queues_info(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data);
+static int drm_bufs_info(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data);
+#if DRM_DEBUG_CODE
+static int drm_vma_info(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data);
+#endif
+#if DRM_DMA_HISTOGRAM
+static int drm_histo_info(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data);
+#endif
+
+struct drm_proc_list {
+ const char *name;
+ int (*f)(char *, char **, off_t, int, int *, void *);
+} drm_proc_list[] = {
+ { "name", drm_name_info },
+ { "mem", drm_mem_info },
+ { "vm", drm_vm_info },
+ { "clients", drm_clients_info },
+ { "queues", drm_queues_info },
+ { "bufs", drm_bufs_info },
+#if DRM_DEBUG_CODE
+ { "vma", drm_vma_info },
+#endif
+#if DRM_DMA_HISTOGRAM
+ { "histo", drm_histo_info },
+#endif
+};
+#define DRM_PROC_ENTRIES (sizeof(drm_proc_list)/sizeof(drm_proc_list[0]))
+
+int drm_proc_init(drm_device_t *dev)
+{
+ struct proc_dir_entry *ent;
+ int i, j;
+
+ drm_root = create_proc_entry("graphics", S_IFDIR, NULL);
+ if (!drm_root) {
+ DRM_ERROR("Cannot create /proc/graphics\n");
+ return -1;
+ }
+
+ /* Instead of doing this search, we should
+ add some global support for /proc/graphics. */
+ for (i = 0; i < 8; i++) {
+ sprintf(drm_slot_name, "graphics/%d", i);
+ drm_dev_root = create_proc_entry(drm_slot_name, S_IFDIR, NULL);
+ if (!drm_dev_root) {
+ DRM_ERROR("Cannot create /proc/%s\n", drm_slot_name);
+ remove_proc_entry("graphics", NULL);
+ }
+ if (drm_dev_root->nlink == 2) break;
+ drm_dev_root = NULL;
+ }
+ if (!drm_dev_root) {
+ DRM_ERROR("Cannot find slot in /proc/graphics\n");
+ return -1;
+ }
+
+ for (i = 0; i < DRM_PROC_ENTRIES; i++) {
+ ent = create_proc_entry(drm_proc_list[i].name,
+ S_IFREG|S_IRUGO, drm_dev_root);
+ if (!ent) {
+ DRM_ERROR("Cannot create /proc/%s/%s\n",
+ drm_slot_name, drm_proc_list[i].name);
+ for (j = 0; j < i; j++)
+ remove_proc_entry(drm_proc_list[i].name,
+ drm_dev_root);
+ remove_proc_entry(drm_slot_name, NULL);
+ remove_proc_entry("graphics", NULL);
+ return -1;
+ }
+ ent->read_proc = drm_proc_list[i].f;
+ ent->data = dev;
+ }
+
+ return 0;
+}
+
+
+int drm_proc_cleanup(void)
+{
+ int i;
+
+ if (drm_root) {
+ if (drm_dev_root) {
+ for (i = 0; i < DRM_PROC_ENTRIES; i++) {
+ remove_proc_entry(drm_proc_list[i].name,
+ drm_dev_root);
+ }
+ remove_proc_entry(drm_slot_name, NULL);
+ }
+ remove_proc_entry("graphics", NULL);
+ remove_proc_entry(DRM_NAME, NULL);
+ }
+ drm_root = drm_dev_root = NULL;
+ return 0;
+}
+
+static int drm_name_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+
+ if (offset > 0) return 0; /* no partial requests */
+ len = 0;
+ *eof = 1;
+
+ if (dev->unique) {
+ DRM_PROC_PRINT("%s 0x%x %s\n",
+ dev->name, dev->device, dev->unique);
+ } else {
+ DRM_PROC_PRINT("%s 0x%x\n", dev->name, dev->device);
+ }
+ return len;
+}
+
+static int _drm_vm_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+ drm_map_t *map;
+ const char *types[] = { "FB", "REG", "SHM" };
+ const char *type;
+ int i;
+
+ if (offset > 0) return 0; /* no partial requests */
+ len = 0;
+ *eof = 1;
+ DRM_PROC_PRINT("slot offset size type flags "
+ "address mtrr\n\n");
+ for (i = 0; i < dev->map_count; i++) {
+ map = dev->maplist[i];
+ if (map->type < 0 || map->type > 2) type = "??";
+ else type = types[map->type];
+ DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
+ i,
+ map->offset,
+ map->size,
+ type,
+ map->flags,
+ (unsigned long)map->handle);
+ if (map->mtrr < 0) {
+ DRM_PROC_PRINT("none\n");
+ } else {
+ DRM_PROC_PRINT("%4d\n", map->mtrr);
+ }
+ }
+
+ return len;
+}
+
+static int drm_vm_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+ int ret;
+
+ down(&dev->struct_sem);
+ ret = _drm_vm_info(buf, start, offset, len, eof, data);
+ up(&dev->struct_sem);
+ return ret;
+}
+
+
+static int _drm_queues_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+ int i;
+ drm_queue_t *q;
+
+ if (offset > 0) return 0; /* no partial requests */
+ len = 0;
+ *eof = 1;
+ DRM_PROC_PRINT(" ctx/flags use fin"
+ " blk/rw/rwf wait flushed queued"
+ " locks\n\n");
+ for (i = 0; i < dev->queue_count; i++) {
+ q = dev->queuelist[i];
+ atomic_inc(&q->use_count);
+ DRM_PROC_PRINT_RET(atomic_dec(&q->use_count),
+ "%5d/0x%03x %5d %5d"
+ " %5d/%c%c/%c%c%c %5d %10d %10d %10d\n",
+ i,
+ q->flags,
+ atomic_read(&q->use_count),
+ atomic_read(&q->finalization),
+ atomic_read(&q->block_count),
+ atomic_read(&q->block_read) ? 'r' : '-',
+ atomic_read(&q->block_write) ? 'w' : '-',
+ waitqueue_active(&q->read_queue) ? 'r':'-',
+ waitqueue_active(&q->write_queue) ? 'w':'-',
+ waitqueue_active(&q->flush_queue) ? 'f':'-',
+ DRM_BUFCOUNT(&q->waitlist),
+ atomic_read(&q->total_flushed),
+ atomic_read(&q->total_queued),
+ atomic_read(&q->total_locks));
+ atomic_dec(&q->use_count);
+ }
+
+ return len;
+}
+
+static int drm_queues_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+ int ret;
+
+ down(&dev->struct_sem);
+ ret = _drm_queues_info(buf, start, offset, len, eof, data);
+ up(&dev->struct_sem);
+ return ret;
+}
+
+/* drm_bufs_info is called whenever a process reads
+ /dev/drm/<dev>/bufs. */
+
+static int _drm_bufs_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+ drm_device_dma_t *dma = dev->dma;
+ int i;
+
+ if (!dma) return 0;
+ if (offset > 0) return 0; /* no partial requests */
+ len = 0;
+ *eof = 1;
+ DRM_PROC_PRINT(" o size count free segs pages kB\n\n");
+ for (i = 0; i <= DRM_MAX_ORDER; i++) {
+ if (dma->bufs[i].buf_count)
+ DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n",
+ i,
+ dma->bufs[i].buf_size,
+ dma->bufs[i].buf_count,
+ atomic_read(&dma->bufs[i]
+ .freelist.count),
+ dma->bufs[i].seg_count,
+ dma->bufs[i].seg_count
+ *(1 << dma->bufs[i].page_order),
+ (dma->bufs[i].seg_count
+ * (1 << dma->bufs[i].page_order))
+ * PAGE_SIZE / 1024);
+ }
+ DRM_PROC_PRINT("\n");
+ for (i = 0; i < dma->buf_count; i++) {
+ if (i && !(i%32)) DRM_PROC_PRINT("\n");
+ DRM_PROC_PRINT(" %d", dma->buflist[i]->list);
+ }
+ DRM_PROC_PRINT("\n");
+
+ return len;
+}
+
+static int drm_bufs_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+ int ret;
+
+ down(&dev->struct_sem);
+ ret = _drm_bufs_info(buf, start, offset, len, eof, data);
+ up(&dev->struct_sem);
+ return ret;
+}
+
+
+static int _drm_clients_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+ drm_file_t *priv;
+
+ if (offset > 0) return 0; /* no partial requests */
+ len = 0;
+ *eof = 1;
+ DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n");
+ for (priv = dev->file_first; priv; priv = priv->next) {
+ DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
+ priv->authenticated ? 'y' : 'n',
+ priv->minor,
+ priv->pid,
+ priv->uid,
+ priv->magic,
+ priv->ioctl_count);
+ }
+
+ return len;
+}
+
+static int drm_clients_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+ int ret;
+
+ down(&dev->struct_sem);
+ ret = _drm_clients_info(buf, start, offset, len, eof, data);
+ up(&dev->struct_sem);
+ return ret;
+}
+
+#if DRM_DEBUG_CODE
+
+static int _drm_vma_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+ drm_vma_entry_t *pt;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long i;
+ struct vm_area_struct *vma;
+ unsigned long address;
+#if defined(__i386__)
+ unsigned int pgprot;
+#endif
+
+ if (offset > 0) return 0; /* no partial requests */
+ len = 0;
+ *eof = 1;
+ DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
+ atomic_read(&dev->vma_count),
+ high_memory, virt_to_phys(high_memory));
+ for (pt = dev->vmalist; pt; pt = pt->next) {
+ if (!(vma = pt->vma)) continue;
+ DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx",
+ pt->pid,
+ vma->vm_start,
+ vma->vm_end,
+ vma->vm_flags & VM_READ ? 'r' : '-',
+ vma->vm_flags & VM_WRITE ? 'w' : '-',
+ vma->vm_flags & VM_EXEC ? 'x' : '-',
+ vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
+ vma->vm_flags & VM_LOCKED ? 'l' : '-',
+ vma->vm_flags & VM_IO ? 'i' : '-',
+ vma->vm_offset );
+#if defined(__i386__)
+ pgprot = pgprot_val(vma->vm_page_prot);
+ DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c",
+ pgprot & _PAGE_PRESENT ? 'p' : '-',
+ pgprot & _PAGE_RW ? 'w' : 'r',
+ pgprot & _PAGE_USER ? 'u' : 's',
+ pgprot & _PAGE_PWT ? 't' : 'b',
+ pgprot & _PAGE_PCD ? 'u' : 'c',
+ pgprot & _PAGE_ACCESSED ? 'a' : '-',
+ pgprot & _PAGE_DIRTY ? 'd' : '-',
+ pgprot & _PAGE_4M ? 'm' : 'k',
+ pgprot & _PAGE_GLOBAL ? 'g' : 'l' );
+#endif
+ DRM_PROC_PRINT("\n");
+ for (i = vma->vm_start; i < vma->vm_end; i += PAGE_SIZE) {
+ pgd = pgd_offset(vma->vm_mm, i);
+ pmd = pmd_offset(pgd, i);
+ pte = pte_offset(pmd, i);
+ if (pte_present(*pte)) {
+ address = __pa(pte_page(*pte))
+ + (i & (PAGE_SIZE-1));
+ DRM_PROC_PRINT(" 0x%08lx -> 0x%08lx"
+ " %c%c%c%c%c\n",
+ i,
+ address,
+ pte_read(*pte) ? 'r' : '-',
+ pte_write(*pte) ? 'w' : '-',
+ pte_exec(*pte) ? 'x' : '-',
+ pte_dirty(*pte) ? 'd' : '-',
+ pte_young(*pte) ? 'a' : '-' );
+ } else {
+ DRM_PROC_PRINT(" 0x%08lx\n", i);
+ }
+ }
+ }
+
+ return len;
+}
+
+static int drm_vma_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+ int ret;
+
+ down(&dev->struct_sem);
+ ret = _drm_vma_info(buf, start, offset, len, eof, data);
+ up(&dev->struct_sem);
+ return ret;
+}
+#endif
+
+
+#if DRM_DMA_HISTOGRAM
+static int _drm_histo_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+ drm_device_dma_t *dma = dev->dma;
+ int i;
+ unsigned long slot_value = DRM_DMA_HISTOGRAM_INITIAL;
+ unsigned long prev_value = 0;
+ drm_buf_t *buffer;
+
+ if (offset > 0) return 0; /* no partial requests */
+ len = 0;
+ *eof = 1;
+
+ DRM_PROC_PRINT("general statistics:\n");
+ DRM_PROC_PRINT("total %10u\n", atomic_read(&dev->histo.total));
+ DRM_PROC_PRINT("open %10u\n", atomic_read(&dev->total_open));
+ DRM_PROC_PRINT("close %10u\n", atomic_read(&dev->total_close));
+ DRM_PROC_PRINT("ioctl %10u\n", atomic_read(&dev->total_ioctl));
+ DRM_PROC_PRINT("irq %10u\n", atomic_read(&dev->total_irq));
+ DRM_PROC_PRINT("ctx %10u\n", atomic_read(&dev->total_ctx));
+
+ DRM_PROC_PRINT("\nlock statistics:\n");
+ DRM_PROC_PRINT("locks %10u\n", atomic_read(&dev->total_locks));
+ DRM_PROC_PRINT("unlocks %10u\n", atomic_read(&dev->total_unlocks));
+ DRM_PROC_PRINT("contends %10u\n", atomic_read(&dev->total_contends));
+ DRM_PROC_PRINT("sleeps %10u\n", atomic_read(&dev->total_sleeps));
+
+
+ if (dma) {
+ DRM_PROC_PRINT("\ndma statistics:\n");
+ DRM_PROC_PRINT("prio %10u\n",
+ atomic_read(&dma->total_prio));
+ DRM_PROC_PRINT("bytes %10u\n",
+ atomic_read(&dma->total_bytes));
+ DRM_PROC_PRINT("dmas %10u\n",
+ atomic_read(&dma->total_dmas));
+ DRM_PROC_PRINT("missed:\n");
+ DRM_PROC_PRINT(" dma %10u\n",
+ atomic_read(&dma->total_missed_dma));
+ DRM_PROC_PRINT(" lock %10u\n",
+ atomic_read(&dma->total_missed_lock));
+ DRM_PROC_PRINT(" free %10u\n",
+ atomic_read(&dma->total_missed_free));
+ DRM_PROC_PRINT(" sched %10u\n",
+ atomic_read(&dma->total_missed_sched));
+ DRM_PROC_PRINT("tried %10u\n",
+ atomic_read(&dma->total_tried));
+ DRM_PROC_PRINT("hit %10u\n",
+ atomic_read(&dma->total_hit));
+ DRM_PROC_PRINT("lost %10u\n",
+ atomic_read(&dma->total_lost));
+
+ buffer = dma->next_buffer;
+ if (buffer) {
+ DRM_PROC_PRINT("next_buffer %7d\n", buffer->idx);
+ } else {
+ DRM_PROC_PRINT("next_buffer none\n");
+ }
+ buffer = dma->this_buffer;
+ if (buffer) {
+ DRM_PROC_PRINT("this_buffer %7d\n", buffer->idx);
+ } else {
+ DRM_PROC_PRINT("this_buffer none\n");
+ }
+ }
+
+
+ DRM_PROC_PRINT("\nvalues:\n");
+ if (dev->lock.hw_lock) {
+ DRM_PROC_PRINT("lock 0x%08x\n",
+ dev->lock.hw_lock->lock);
+ } else {
+ DRM_PROC_PRINT("lock none\n");
+ }
+ DRM_PROC_PRINT("context_flag 0x%08x\n", dev->context_flag);
+ DRM_PROC_PRINT("interrupt_flag 0x%08x\n", dev->interrupt_flag);
+ DRM_PROC_PRINT("dma_flag 0x%08x\n", dev->dma_flag);
+
+ DRM_PROC_PRINT("queue_count %10d\n", dev->queue_count);
+ DRM_PROC_PRINT("last_context %10d\n", dev->last_context);
+ DRM_PROC_PRINT("last_switch %10lu\n", dev->last_switch);
+ DRM_PROC_PRINT("last_checked %10d\n", dev->last_checked);
+
+
+ DRM_PROC_PRINT("\n q2d d2c c2f"
+ " q2c q2f dma sch"
+ " ctx lacq lhld\n\n");
+ for (i = 0; i < DRM_DMA_HISTOGRAM_SLOTS; i++) {
+ DRM_PROC_PRINT("%s %10lu %10u %10u %10u %10u %10u"
+ " %10u %10u %10u %10u %10u\n",
+ i == DRM_DMA_HISTOGRAM_SLOTS - 1 ? ">=" : "< ",
+ i == DRM_DMA_HISTOGRAM_SLOTS - 1
+ ? prev_value : slot_value ,
+
+ atomic_read(&dev->histo
+ .queued_to_dispatched[i]),
+ atomic_read(&dev->histo
+ .dispatched_to_completed[i]),
+ atomic_read(&dev->histo
+ .completed_to_freed[i]),
+
+ atomic_read(&dev->histo
+ .queued_to_completed[i]),
+ atomic_read(&dev->histo
+ .queued_to_freed[i]),
+ atomic_read(&dev->histo.dma[i]),
+ atomic_read(&dev->histo.schedule[i]),
+ atomic_read(&dev->histo.ctx[i]),
+ atomic_read(&dev->histo.lacq[i]),
+ atomic_read(&dev->histo.lhld[i]));
+ prev_value = slot_value;
+ slot_value = DRM_DMA_HISTOGRAM_NEXT(slot_value);
+ }
+ return len;
+}
+
+static int drm_histo_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *)data;
+ int ret;
+
+ down(&dev->struct_sem);
+ ret = _drm_histo_info(buf, start, offset, len, eof, data);
+ up(&dev->struct_sem);
+ return ret;
+}
+#endif
diff --git a/bsd/drm/sysctl.c b/bsd/drm/sysctl.c
new file mode 100644
index 00000000..7c736abf
--- /dev/null
+++ b/bsd/drm/sysctl.c
@@ -0,0 +1,554 @@
+/* proc.c -- /proc support for DRM -*- c -*-
+ * Created: Mon Jan 11 09:48:47 1999 by faith@precisioninsight.com
+ * Revised: Fri Aug 20 11:31:48 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI$
+ * $XFree86$
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include <sys/sysctl.h>
+
+SYSCTL_NODE(_hw, OID_AUTO, dri, CTLFLAG_RW, 0, "DRI Graphics");
+
+static int drm_name_info SYSCTL_HANDLER_ARGS;
+static int drm_vm_info SYSCTL_HANDLER_ARGS;
+static int drm_clients_info SYSCTL_HANDLER_ARGS;
+static int drm_queues_info SYSCTL_HANDLER_ARGS;
+static int drm_bufs_info SYSCTL_HANDLER_ARGS;
+#if DRM_DEBUG_CODExx
+static int drm_vma_info SYSCTL_HANDLER_ARGS;
+#endif
+#if DRM_DMA_HISTOGRAM
+static int drm_histo_info SYSCTL_HANDLER_ARGS;
+#endif
+
+struct drm_sysctl_list {
+ const char *name;
+ int (*f) SYSCTL_HANDLER_ARGS;
+} drm_sysctl_list[] = {
+ { "name", drm_name_info },
+ { "mem", drm_mem_info },
+ { "vm", drm_vm_info },
+ { "clients", drm_clients_info },
+ { "queues", drm_queues_info },
+ { "bufs", drm_bufs_info },
+#if DRM_DEBUG_CODExx
+ { "vma", drm_vma_info },
+#endif
+#if DRM_DMA_HISTOGRAM
+ { "histo", drm_histo_info },
+#endif
+};
+#define DRM_SYSCTL_ENTRIES (sizeof(drm_sysctl_list)/sizeof(drm_sysctl_list[0]))
+
+struct drm_sysctl_info {
+ struct sysctl_oid oids[DRM_SYSCTL_ENTRIES + 1];
+ struct sysctl_oid_list list;
+ char name[2];
+};
+
+int drm_sysctl_init(drm_device_t *dev)
+{
+ struct drm_sysctl_info *info;
+ struct sysctl_oid *oid;
+ struct sysctl_oid *top;
+ int i;
+
+ /* Find the next free slot under hw.graphics */
+ i = 0;
+ SLIST_FOREACH(oid, &sysctl__hw_dri_children, oid_link) {
+ if (i <= oid->oid_arg2)
+ i = oid->oid_arg2 + 1;
+ }
+
+ info = drm_alloc(sizeof *info, DRM_MEM_DRIVER);
+ dev->sysctl = info;
+
+ /* Construct the node under hw.graphics */
+ info->name[0] = '0' + i;
+ info->name[1] = 0;
+ oid = &info->oids[DRM_SYSCTL_ENTRIES];
+ bzero(oid, sizeof(*oid));
+ oid->oid_parent = &sysctl__hw_dri_children;
+ oid->oid_number = OID_AUTO;
+ oid->oid_kind = CTLTYPE_NODE | CTLFLAG_RW;
+ oid->oid_arg1 = &info->list;
+ oid->oid_arg2 = i;
+ oid->oid_name = info->name;
+ oid->oid_handler = 0;
+ oid->oid_fmt = "N";
+ SLIST_INIT(&info->list);
+ sysctl_register_oid(oid);
+ top = oid;
+
+ for (i = 0; i < DRM_SYSCTL_ENTRIES; i++) {
+ oid = &info->oids[i];
+ bzero(oid, sizeof(*oid));
+ oid->oid_parent = top->oid_arg1;
+ oid->oid_number = OID_AUTO;
+ oid->oid_kind = CTLTYPE_INT | CTLFLAG_RD;
+ oid->oid_arg1 = dev;
+ oid->oid_arg2 = 0;
+ oid->oid_name = drm_sysctl_list[i].name;
+ oid->oid_handler = drm_sysctl_list[i].f;
+ oid->oid_fmt = "A";
+ sysctl_register_oid(oid);
+ }
+
+ return 0;
+}
+
+int drm_sysctl_cleanup(drm_device_t *dev)
+{
+ int i;
+
+ DRM_DEBUG("dev->sysctl=%p\n", dev->sysctl);
+ for (i = 0; i < DRM_SYSCTL_ENTRIES + 1; i++)
+ sysctl_unregister_oid(&dev->sysctl->oids[i]);
+
+ drm_free(dev->sysctl, sizeof *dev->sysctl, DRM_MEM_DRIVER);
+ dev->sysctl = NULL;
+
+ return 0;
+}
+
+static int drm_name_info SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ char buf[128];
+ int error;
+
+ if (dev->unique) {
+ DRM_SYSCTL_PRINT("%s 0x%x %s\n",
+ dev->name, dev2udev(dev->devnode), dev->unique);
+ } else {
+ DRM_SYSCTL_PRINT("%s 0x%x\n", dev->name, dev2udev(dev->devnode));
+ }
+
+ SYSCTL_OUT(req, "", 1);
+
+ return 0;
+}
+
+static int _drm_vm_info SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ drm_map_t *map;
+ const char *types[] = { "FB", "REG", "SHM" };
+ const char *type;
+ int i;
+ char buf[128];
+ int error;
+
+ DRM_SYSCTL_PRINT("slot offset size type flags "
+ "address mtrr\n\n");
+ error = SYSCTL_OUT(req, buf, strlen(buf));
+ if (error) return error;
+
+ for (i = 0; i < dev->map_count; i++) {
+ map = dev->maplist[i];
+ if (map->type < 0 || map->type > 2) type = "??";
+ else type = types[map->type];
+ DRM_SYSCTL_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
+ i,
+ map->offset,
+ map->size,
+ type,
+ map->flags,
+ (unsigned long)map->handle);
+ if (map->mtrr < 0) {
+ DRM_SYSCTL_PRINT("none\n");
+ } else {
+ DRM_SYSCTL_PRINT("%4d\n", map->mtrr);
+ }
+ }
+ SYSCTL_OUT(req, "", 1);
+
+ return 0;
+}
+
+static int drm_vm_info SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ int ret;
+
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
+ ret = _drm_vm_info(oidp, arg1, arg2, req);
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+
+ return ret;
+}
+
+
+static int _drm_queues_info SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ int i;
+ drm_queue_t *q;
+ char buf[128];
+ int error;
+
+ DRM_SYSCTL_PRINT(" ctx/flags use fin"
+ " blk/rw/rwf wait flushed queued"
+ " locks\n\n");
+ for (i = 0; i < dev->queue_count; i++) {
+ q = dev->queuelist[i];
+ atomic_inc(&q->use_count);
+ DRM_SYSCTL_PRINT_RET(atomic_dec(&q->use_count),
+ "%5d/0x%03x %5d %5d"
+ " %5d/%c%c/%c%c%c %5d %10d %10d %10d\n",
+ i,
+ q->flags,
+ atomic_read(&q->use_count),
+ atomic_read(&q->finalization),
+ atomic_read(&q->block_count),
+ atomic_read(&q->block_read) ? 'r' : '-',
+ atomic_read(&q->block_write) ? 'w' : '-',
+ q->read_queue ? 'r':'-',
+ q->write_queue ? 'w':'-',
+ q->flush_queue ? 'f':'-',
+ DRM_BUFCOUNT(&q->waitlist),
+ atomic_read(&q->total_flushed),
+ atomic_read(&q->total_queued),
+ atomic_read(&q->total_locks));
+ atomic_dec(&q->use_count);
+ }
+
+ SYSCTL_OUT(req, "", 1);
+ return 0;
+}
+
+static int drm_queues_info SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ int ret;
+
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
+ ret = _drm_queues_info(oidp, arg1, arg2, req);
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+ return ret;
+}
+
+/* drm_bufs_info is called whenever a process reads
+ hw.dri.0.bufs. */
+
+static int _drm_bufs_info SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ drm_device_dma_t *dma = dev->dma;
+ int i;
+ char buf[128];
+ int error;
+
+ if (!dma) return 0;
+ DRM_SYSCTL_PRINT(" o size count free segs pages kB\n\n");
+ for (i = 0; i <= DRM_MAX_ORDER; i++) {
+ if (dma->bufs[i].buf_count)
+ DRM_SYSCTL_PRINT("%2d %8d %5d %5d %5d %5d %5d\n",
+ i,
+ dma->bufs[i].buf_size,
+ dma->bufs[i].buf_count,
+ atomic_read(&dma->bufs[i]
+ .freelist.count),
+ dma->bufs[i].seg_count,
+ dma->bufs[i].seg_count
+ *(1 << dma->bufs[i].page_order),
+ (dma->bufs[i].seg_count
+ * (1 << dma->bufs[i].page_order))
+ * PAGE_SIZE / 1024);
+ }
+ DRM_SYSCTL_PRINT("\n");
+ for (i = 0; i < dma->buf_count; i++) {
+ if (i && !(i%32)) DRM_SYSCTL_PRINT("\n");
+ DRM_SYSCTL_PRINT(" %d", dma->buflist[i]->list);
+ }
+ DRM_SYSCTL_PRINT("\n");
+
+ SYSCTL_OUT(req, "", 1);
+ return 0;
+}
+
+static int drm_bufs_info SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ int ret;
+
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
+ ret = _drm_bufs_info(oidp, arg1, arg2, req);
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+ return ret;
+}
+
+
+static int _drm_clients_info SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ drm_file_t *priv;
+ char buf[128];
+ int error;
+
+ DRM_SYSCTL_PRINT("a dev pid uid magic ioctls\n\n");
+ TAILQ_FOREACH(priv, &dev->files, link) {
+ DRM_SYSCTL_PRINT("%c %3d %5d %5d %10u %10lu\n",
+ priv->authenticated ? 'y' : 'n',
+ priv->minor,
+ priv->pid,
+ priv->uid,
+ priv->magic,
+ priv->ioctl_count);
+ }
+
+ SYSCTL_OUT(req, "", 1);
+ return 0;
+}
+
+static int drm_clients_info SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ int ret;
+
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
+ ret = _drm_clients_info(oidp, arg1, arg2, req);
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+ return ret;
+}
+
+#if DRM_DEBUG_CODExx
+
+static int _drm_vma_info SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ drm_vma_entry_t *pt;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long i;
+ struct vm_area_struct *vma;
+ unsigned long address;
+#if defined(__i386__)
+ unsigned int pgprot;
+#endif
+ char buf[128];
+ int error;
+
+ DRM_SYSCTL_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
+ atomic_read(&dev->vma_count),
+ high_memory, virt_to_phys(high_memory));
+ for (pt = dev->vmalist; pt; pt = pt->next) {
+ if (!(vma = pt->vma)) continue;
+ DRM_SYSCTL_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx",
+ pt->pid,
+ vma->vm_start,
+ vma->vm_end,
+ vma->vm_flags & VM_READ ? 'r' : '-',
+ vma->vm_flags & VM_WRITE ? 'w' : '-',
+ vma->vm_flags & VM_EXEC ? 'x' : '-',
+ vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
+ vma->vm_flags & VM_LOCKED ? 'l' : '-',
+ vma->vm_flags & VM_IO ? 'i' : '-',
+ vma->vm_offset );
+#if defined(__i386__)
+ pgprot = pgprot_val(vma->vm_page_prot);
+ DRM_SYSCTL_PRINT(" %c%c%c%c%c%c%c%c%c",
+ pgprot & _PAGE_PRESENT ? 'p' : '-',
+ pgprot & _PAGE_RW ? 'w' : 'r',
+ pgprot & _PAGE_USER ? 'u' : 's',
+ pgprot & _PAGE_PWT ? 't' : 'b',
+ pgprot & _PAGE_PCD ? 'u' : 'c',
+ pgprot & _PAGE_ACCESSED ? 'a' : '-',
+ pgprot & _PAGE_DIRTY ? 'd' : '-',
+ pgprot & _PAGE_4M ? 'm' : 'k',
+ pgprot & _PAGE_GLOBAL ? 'g' : 'l' );
+#endif
+ DRM_SYSCTL_PRINT("\n");
+ for (i = vma->vm_start; i < vma->vm_end; i += PAGE_SIZE) {
+ pgd = pgd_offset(vma->vm_mm, i);
+ pmd = pmd_offset(pgd, i);
+ pte = pte_offset(pmd, i);
+ if (pte_present(*pte)) {
+ address = __pa(pte_page(*pte))
+ + (i & (PAGE_SIZE-1));
+ DRM_SYSCTL_PRINT(" 0x%08lx -> 0x%08lx"
+ " %c%c%c%c%c\n",
+ i,
+ address,
+ pte_read(*pte) ? 'r' : '-',
+ pte_write(*pte) ? 'w' : '-',
+ pte_exec(*pte) ? 'x' : '-',
+ pte_dirty(*pte) ? 'd' : '-',
+ pte_young(*pte) ? 'a' : '-' );
+ } else {
+ DRM_SYSCTL_PRINT(" 0x%08lx\n", i);
+ }
+ }
+ }
+
+ SYSCTL_OUT(req, "", 1);
+ return 0;
+}
+
+static int drm_vma_info SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ int ret;
+
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
+ ret = _drm_vma_info(oidp, arg1, arg2, req);
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+ return ret;
+}
+#endif
+
+
+#if DRM_DMA_HISTOGRAM
+static int _drm_histo_info SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ drm_device_dma_t *dma = dev->dma;
+ int i;
+ unsigned long slot_value = DRM_DMA_HISTOGRAM_INITIAL;
+ unsigned long prev_value = 0;
+ drm_buf_t *buffer;
+ char buf[128];
+ int error;
+
+ DRM_SYSCTL_PRINT("general statistics:\n");
+ DRM_SYSCTL_PRINT("total %10u\n", atomic_read(&dev->histo.total));
+ DRM_SYSCTL_PRINT("open %10u\n", atomic_read(&dev->total_open));
+ DRM_SYSCTL_PRINT("close %10u\n", atomic_read(&dev->total_close));
+ DRM_SYSCTL_PRINT("ioctl %10u\n", atomic_read(&dev->total_ioctl));
+ DRM_SYSCTL_PRINT("irq %10u\n", atomic_read(&dev->total_irq));
+ DRM_SYSCTL_PRINT("ctx %10u\n", atomic_read(&dev->total_ctx));
+
+ DRM_SYSCTL_PRINT("\nlock statistics:\n");
+ DRM_SYSCTL_PRINT("locks %10u\n", atomic_read(&dev->total_locks));
+ DRM_SYSCTL_PRINT("unlocks %10u\n", atomic_read(&dev->total_unlocks));
+ DRM_SYSCTL_PRINT("contends %10u\n", atomic_read(&dev->total_contends));
+ DRM_SYSCTL_PRINT("sleeps %10u\n", atomic_read(&dev->total_sleeps));
+
+
+ if (dma) {
+ DRM_SYSCTL_PRINT("\ndma statistics:\n");
+ DRM_SYSCTL_PRINT("prio %10u\n",
+ atomic_read(&dma->total_prio));
+ DRM_SYSCTL_PRINT("bytes %10u\n",
+ atomic_read(&dma->total_bytes));
+ DRM_SYSCTL_PRINT("dmas %10u\n",
+ atomic_read(&dma->total_dmas));
+ DRM_SYSCTL_PRINT("missed:\n");
+ DRM_SYSCTL_PRINT(" dma %10u\n",
+ atomic_read(&dma->total_missed_dma));
+ DRM_SYSCTL_PRINT(" lock %10u\n",
+ atomic_read(&dma->total_missed_lock));
+ DRM_SYSCTL_PRINT(" free %10u\n",
+ atomic_read(&dma->total_missed_free));
+ DRM_SYSCTL_PRINT(" sched %10u\n",
+ atomic_read(&dma->total_missed_sched));
+ DRM_SYSCTL_PRINT("tried %10u\n",
+ atomic_read(&dma->total_tried));
+ DRM_SYSCTL_PRINT("hit %10u\n",
+ atomic_read(&dma->total_hit));
+ DRM_SYSCTL_PRINT("lost %10u\n",
+ atomic_read(&dma->total_lost));
+
+ buffer = dma->next_buffer;
+ if (buffer) {
+ DRM_SYSCTL_PRINT("next_buffer %7d\n", buffer->idx);
+ } else {
+ DRM_SYSCTL_PRINT("next_buffer none\n");
+ }
+ buffer = dma->this_buffer;
+ if (buffer) {
+ DRM_SYSCTL_PRINT("this_buffer %7d\n", buffer->idx);
+ } else {
+ DRM_SYSCTL_PRINT("this_buffer none\n");
+ }
+ }
+
+
+ DRM_SYSCTL_PRINT("\nvalues:\n");
+ if (dev->lock.hw_lock) {
+ DRM_SYSCTL_PRINT("lock 0x%08x\n",
+ dev->lock.hw_lock->lock);
+ } else {
+ DRM_SYSCTL_PRINT("lock none\n");
+ }
+ DRM_SYSCTL_PRINT("context_flag 0x%08x\n", dev->context_flag);
+ DRM_SYSCTL_PRINT("interrupt_flag 0x%08x\n", dev->interrupt_flag);
+ DRM_SYSCTL_PRINT("dma_flag 0x%08x\n", dev->dma_flag);
+
+ DRM_SYSCTL_PRINT("queue_count %10d\n", dev->queue_count);
+ DRM_SYSCTL_PRINT("last_context %10d\n", dev->last_context);
+ DRM_SYSCTL_PRINT("last_switch %10u\n", dev->last_switch);
+ DRM_SYSCTL_PRINT("last_checked %10d\n", dev->last_checked);
+
+
+ DRM_SYSCTL_PRINT("\n q2d d2c c2f"
+ " q2c q2f dma sch"
+ " ctx lacq lhld\n\n");
+ for (i = 0; i < DRM_DMA_HISTOGRAM_SLOTS; i++) {
+ DRM_SYSCTL_PRINT("%s %10lu %10u %10u %10u %10u %10u"
+ " %10u %10u %10u %10u %10u\n",
+ i == DRM_DMA_HISTOGRAM_SLOTS - 1 ? ">=" : "< ",
+ i == DRM_DMA_HISTOGRAM_SLOTS - 1
+ ? prev_value : slot_value ,
+
+ atomic_read(&dev->histo
+ .queued_to_dispatched[i]),
+ atomic_read(&dev->histo
+ .dispatched_to_completed[i]),
+ atomic_read(&dev->histo
+ .completed_to_freed[i]),
+
+ atomic_read(&dev->histo
+ .queued_to_completed[i]),
+ atomic_read(&dev->histo
+ .queued_to_freed[i]),
+ atomic_read(&dev->histo.dma[i]),
+ atomic_read(&dev->histo.schedule[i]),
+ atomic_read(&dev->histo.ctx[i]),
+ atomic_read(&dev->histo.lacq[i]),
+ atomic_read(&dev->histo.lhld[i]));
+ prev_value = slot_value;
+ slot_value = DRM_DMA_HISTOGRAM_NEXT(slot_value);
+ }
+ SYSCTL_OUT(req, "", 1);
+ return 0;
+}
+
+static int drm_histo_info SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ int ret;
+
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
+ ret = _drm_histo_info(oidp, arg1, arg2, req);
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+ return ret;
+}
+#endif
diff --git a/bsd/drm/vm.c b/bsd/drm/vm.c
new file mode 100644
index 00000000..9c457fca
--- /dev/null
+++ b/bsd/drm/vm.c
@@ -0,0 +1,104 @@
+/* vm.c -- Memory mapping for DRM -*- c -*-
+ * Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
+ * Revised: Fri Aug 20 22:48:11 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/vm.c,v 1.7 1999/08/21 02:48:34 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/vm.c,v 1.1 1999/09/25 14:38:02 dawes Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+static int drm_dma_mmap(dev_t kdev, vm_offset_t offset, int prot)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_device_dma_t *dma = dev->dma;
+ unsigned long physical;
+ unsigned long page;
+
+ if (!dma) return -1; /* Error */
+ if (!dma->pagelist) return -1; /* Nothing allocated */
+
+ page = offset >> PAGE_SHIFT;
+ physical = dma->pagelist[page];
+
+ DRM_DEBUG("0x%08x (page %lu) => 0x%08lx\n", offset, page, physical);
+ return atop(physical);
+}
+
+int drm_mmap(dev_t kdev, vm_offset_t offset, int prot)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_map_t *map = NULL;
+ int i;
+
+ /* DRM_DEBUG("offset = 0x%x\n", offset); */
+
+ if (dev->dma
+ && offset >= 0
+ && offset < ptoa(dev->dma->page_count))
+ return drm_dma_mmap(kdev, offset, prot);
+
+ /* A sequential search of a linked list is
+ fine here because: 1) there will only be
+ about 5-10 entries in the list and, 2) a
+ DRI client only has to do this mapping
+ once, so it doesn't have to be optimized
+ for performance, even if the list was a
+ bit longer. */
+ for (i = 0; i < dev->map_count; i++) {
+ map = dev->maplist[i];
+ /* DRM_DEBUG("considering 0x%x..0x%x\n", map->offset, map->offset + map->size - 1); */
+ if (offset >= map->offset
+ && offset < map->offset + map->size) break;
+ }
+
+ if (i >= dev->map_count) {
+ DRM_DEBUG("can't find map\n");
+ return -1;
+ }
+ if (!map || ((map->flags&_DRM_RESTRICTED) && suser(curproc))) {
+ DRM_DEBUG("restricted map\n");
+ return -1;
+ }
+
+ switch (map->type) {
+ case _DRM_FRAME_BUFFER:
+ case _DRM_REGISTERS:
+ case _DRM_AGP:
+ return atop(offset);
+ case _DRM_SHM:
+ return atop(vtophys(offset));
+ default:
+ return -1; /* This should never happen. */
+ }
+ DRM_DEBUG("bailing out\n");
+
+ return -1;
+}
diff --git a/bsd/drmP.h b/bsd/drmP.h
new file mode 100644
index 00000000..863836a6
--- /dev/null
+++ b/bsd/drmP.h
@@ -0,0 +1,708 @@
+/* drmP.h -- Private header for Direct Rendering Manager -*- c -*-
+ * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
+ * Revised: Tue Oct 12 08:51:07 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmP.h,v 1.58 1999/08/30 13:05:00 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmP.h,v 1.1 1999/09/25 14:37:59 dawes Exp $
+ *
+ */
+
+#ifndef _DRM_P_H_
+#define _DRM_P_H_
+
+#ifdef _KERNEL
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/stat.h>
+#include <sys/proc.h>
+#include <sys/lock.h>
+#include <sys/fcntl.h>
+#include <sys/uio.h>
+#include <sys/filio.h>
+#include <sys/sysctl.h>
+#include <sys/select.h>
+#include <sys/bus.h>
+#include <sys/taskqueue.h>
+
+#ifdef DRM_AGP
+#include <pci/agpvar.h>
+#endif
+
+#include "drm.h"
+
+typedef u_int32_t atomic_t;
+typedef u_int32_t cycles_t;
+typedef u_int32_t spinlock_t;
+#define atomic_set(p, v) (*(p) = (v))
+#define atomic_read(p) (*(p))
+#define atomic_inc(p) atomic_add_int(p, 1)
+#define atomic_dec(p) atomic_subtract_int(p, 1)
+#define atomic_add(n, p) atomic_add_int(p, n)
+#define atomic_sub(n, p) atomic_subtract_int(p, n)
+
+/* Fake this */
+static __inline u_int32_t
+test_and_set_bit(int b, volatile u_int32_t *p)
+{
+ u_int32_t m = 1<<b;
+ u_int32_t r = *p & m;
+ *p |= m;
+ return r;
+}
+
+static __inline void
+clear_bit(int b, volatile u_int32_t *p)
+{
+ atomic_clear_int(p + (b >> 5), 1 << (b & 0x1f));
+}
+
+static __inline void
+set_bit(int b, volatile u_int32_t *p)
+{
+ atomic_set_int(p + (b >> 5), 1 << (b & 0x1f));
+}
+
+static __inline int
+test_bit(int b, volatile u_int32_t *p)
+{
+ return p[b >> 5] & (1 << (b & 0x1f));
+}
+
+static __inline int
+find_first_zero_bit(volatile u_int32_t *p, int max)
+{
+ int b;
+
+ for (b = 0; b < max; b += 32) {
+ if (p[b >> 5]) {
+ for (;;) {
+ if (p[b >> 5] & (1 << (b & 0x1f)))
+ return b;
+ b++;
+ }
+ }
+ }
+ return max;
+}
+
+#define spldrm() spltty()
+
+#define memset(p, v, s) bzero(p, s)
+
+/*
+ * Software interrupts for DMA pipe feeding. The FreeBSD kernel apis
+ * are severely lacking here.
+ */
+#define SWI_DRI (SWI_VM+2)
+
+#define DRM_DEBUG_CODE 2 /* Include debugging code (if > 1, then
+ also include looping detection. */
+#define DRM_DMA_HISTOGRAM 1 /* Make histogram of DMA latency. */
+
+#define DRM_HASH_SIZE 16 /* Size of key hash table */
+#define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */
+#define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */
+#define DRM_LOOPING_LIMIT 5000000
+#define DRM_BSZ 1024 /* Buffer size for /dev/drm? output */
+#define DRM_TIME_SLICE (hz/20) /* Time slice for GLXContexts */
+#define DRM_LOCK_SLICE 1 /* Time slice for lock, in jiffies */
+
+#define DRM_FLAG_DEBUG 0x01
+#define DRM_FLAG_NOCTX 0x02
+
+#define DRM_MEM_DMA 0
+#define DRM_MEM_SAREA 1
+#define DRM_MEM_DRIVER 2
+#define DRM_MEM_MAGIC 3
+#define DRM_MEM_IOCTLS 4
+#define DRM_MEM_MAPS 5
+#define DRM_MEM_VMAS 6
+#define DRM_MEM_BUFS 7
+#define DRM_MEM_SEGS 8
+#define DRM_MEM_PAGES 9
+#define DRM_MEM_FILES 10
+#define DRM_MEM_QUEUES 11
+#define DRM_MEM_CMDS 12
+#define DRM_MEM_MAPPINGS 13
+#define DRM_MEM_BUFLISTS 14
+#define DRM_MEM_AGPLISTS 15
+#define DRM_MEM_TOTALAGP 16
+#define DRM_MEM_BOUNDAGP 17
+#define DRM_MEM_CTXBITMAP 18
+
+#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
+
+ /* Backward compatibility section */
+#ifndef _PAGE_PWT
+ /* The name of _PAGE_WT was changed to
+ _PAGE_PWT in Linux 2.2.6 */
+#define _PAGE_PWT _PAGE_WT
+#endif
+
+#define __drm_dummy_lock(lock) (*(__volatile__ unsigned int *)lock)
+#define _DRM_CAS(lock,old,new,__ret) \
+ do { \
+ int __dummy; /* Can't mark eax as clobbered */ \
+ __asm__ __volatile__( \
+ "lock ; cmpxchg %4,%1\n\t" \
+ "setnz %0" \
+ : "=d" (__ret), \
+ "=m" (__drm_dummy_lock(lock)), \
+ "=a" (__dummy) \
+ : "2" (old), \
+ "r" (new)); \
+ } while (0)
+
+
+
+ /* Macros to make printk easier */
+#define DRM_ERROR(fmt, arg...) \
+ printf("error: " "[" DRM_NAME ":" __FUNCTION__ "] *ERROR* " fmt , ##arg)
+#define DRM_MEM_ERROR(area, fmt, arg...) \
+ printf("error: " "[" DRM_NAME ":" __FUNCTION__ ":%s] *ERROR* " fmt , \
+ drm_mem_stats[area].name , ##arg)
+#define DRM_INFO(fmt, arg...) printf("info: " "[" DRM_NAME "] " fmt , ##arg)
+
+#if DRM_DEBUG_CODE
+#define DRM_DEBUG(fmt, arg...) \
+ do { \
+ if (drm_flags&DRM_FLAG_DEBUG) \
+ printf("[" DRM_NAME ":" __FUNCTION__ "] " fmt , \
+ ##arg); \
+ } while (0)
+#else
+#define DRM_DEBUG(fmt, arg...) do { } while (0)
+#endif
+
+#define DRM_PROC_LIMIT (PAGE_SIZE-80)
+
+#define DRM_SYSCTL_PRINT(fmt, arg...) \
+ snprintf(buf, sizeof(buf), fmt, ##arg); \
+ error = SYSCTL_OUT(req, buf, strlen(buf)); \
+ if (error) return error;
+
+#define DRM_SYSCTL_PRINT_RET(ret, fmt, arg...) \
+ snprintf(buf, sizeof(buf), fmt, ##arg); \
+ error = SYSCTL_OUT(req, buf, strlen(buf)); \
+ if (error) { ret; return error; }
+
+ /* Internal types and structures */
+#define DRM_ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
+#define DRM_MIN(a,b) ((a)<(b)?(a):(b))
+#define DRM_MAX(a,b) ((a)>(b)?(a):(b))
+
+#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
+#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
+#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
+
+typedef struct drm_ioctl_desc {
+ d_ioctl_t *func;
+ int auth_needed;
+ int root_only;
+} drm_ioctl_desc_t;
+
+typedef struct drm_devstate {
+ pid_t owner; /* X server pid holding x_lock */
+
+} drm_devstate_t;
+
+typedef struct drm_magic_entry {
+ drm_magic_t magic;
+ struct drm_file *priv;
+ struct drm_magic_entry *next;
+} drm_magic_entry_t;
+
+typedef struct drm_magic_head {
+ struct drm_magic_entry *head;
+ struct drm_magic_entry *tail;
+} drm_magic_head_t;
+
+typedef struct drm_vma_entry {
+ struct vm_area_struct *vma;
+ struct drm_vma_entry *next;
+ pid_t pid;
+} drm_vma_entry_t;
+
+typedef struct drm_buf {
+ int idx; /* Index into master buflist */
+ int total; /* Buffer size */
+ int order; /* log-base-2(total) */
+ int used; /* Amount of buffer in use (for DMA) */
+ unsigned long offset; /* Byte offset (used internally) */
+ void *address; /* Address of buffer */
+ unsigned long bus_address; /* Bus address of buffer */
+ struct drm_buf *next; /* Kernel-only: used for free list */
+ __volatile__ int waiting; /* On kernel DMA queue */
+ __volatile__ int pending; /* On hardware DMA queue */
+ int dma_wait; /* Processes waiting */
+ pid_t pid; /* PID of holding process */
+ int context; /* Kernel queue for this buffer */
+ int while_locked;/* Dispatch this buffer while locked */
+ enum {
+ DRM_LIST_NONE = 0,
+ DRM_LIST_FREE = 1,
+ DRM_LIST_WAIT = 2,
+ DRM_LIST_PEND = 3,
+ DRM_LIST_PRIO = 4,
+ DRM_LIST_RECLAIM = 5
+ } list; /* Which list we're on */
+
+ void *dev_private;
+ int dev_priv_size;
+
+#if DRM_DMA_HISTOGRAM
+ struct timespec time_queued; /* Queued to kernel DMA queue */
+ struct timespec time_dispatched; /* Dispatched to hardware */
+ struct timespec time_completed; /* Completed by hardware */
+ struct timespec time_freed; /* Back on freelist */
+#endif
+} drm_buf_t;
+
+#if DRM_DMA_HISTOGRAM
+#define DRM_DMA_HISTOGRAM_SLOTS 9
+#define DRM_DMA_HISTOGRAM_INITIAL 10
+#define DRM_DMA_HISTOGRAM_NEXT(current) ((current)*10)
+typedef struct drm_histogram {
+ atomic_t total;
+
+ atomic_t queued_to_dispatched[DRM_DMA_HISTOGRAM_SLOTS];
+ atomic_t dispatched_to_completed[DRM_DMA_HISTOGRAM_SLOTS];
+ atomic_t completed_to_freed[DRM_DMA_HISTOGRAM_SLOTS];
+
+ atomic_t queued_to_completed[DRM_DMA_HISTOGRAM_SLOTS];
+ atomic_t queued_to_freed[DRM_DMA_HISTOGRAM_SLOTS];
+
+ atomic_t dma[DRM_DMA_HISTOGRAM_SLOTS];
+ atomic_t schedule[DRM_DMA_HISTOGRAM_SLOTS];
+ atomic_t ctx[DRM_DMA_HISTOGRAM_SLOTS];
+ atomic_t lacq[DRM_DMA_HISTOGRAM_SLOTS];
+ atomic_t lhld[DRM_DMA_HISTOGRAM_SLOTS];
+} drm_histogram_t;
+#endif
+
+ /* bufs is one longer than it has to be */
+typedef struct drm_waitlist {
+ int count; /* Number of possible buffers */
+ drm_buf_t **bufs; /* List of pointers to buffers */
+ drm_buf_t **rp; /* Read pointer */
+ drm_buf_t **wp; /* Write pointer */
+ drm_buf_t **end; /* End pointer */
+ spinlock_t read_lock;
+ spinlock_t write_lock;
+} drm_waitlist_t;
+
+typedef struct drm_freelist {
+ int initialized; /* Freelist in use */
+ atomic_t count; /* Number of free buffers */
+ drm_buf_t *next; /* End pointer */
+
+ int waiting; /* Processes waiting on free bufs */
+ int low_mark; /* Low water mark */
+ int high_mark; /* High water mark */
+ atomic_t wfh; /* If waiting for high mark */
+} drm_freelist_t;
+
+typedef struct drm_buf_entry {
+ int buf_size;
+ int buf_count;
+ drm_buf_t *buflist;
+ int seg_count;
+ int page_order;
+ unsigned long *seglist;
+
+ drm_freelist_t freelist;
+} drm_buf_entry_t;
+
+typedef struct drm_hw_lock {
+ __volatile__ unsigned int lock;
+ char padding[60]; /* Pad to cache line */
+} drm_hw_lock_t;
+
+typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t;
+typedef struct drm_file {
+ TAILQ_ENTRY(drm_file) link;
+ int authenticated;
+ int minor;
+ pid_t pid;
+ uid_t uid;
+ int refs;
+ drm_magic_t magic;
+ unsigned long ioctl_count;
+ struct drm_device *devXX;
+} drm_file_t;
+
+
+typedef struct drm_queue {
+ atomic_t use_count; /* Outstanding uses (+1) */
+ atomic_t finalization; /* Finalization in progress */
+ atomic_t block_count; /* Count of processes waiting */
+ atomic_t block_read; /* Queue blocked for reads */
+ int read_queue; /* Processes waiting on block_read */
+ atomic_t block_write; /* Queue blocked for writes */
+ int write_queue; /* Processes waiting on block_write */
+ atomic_t total_queued; /* Total queued statistic */
+ atomic_t total_flushed;/* Total flushes statistic */
+ atomic_t total_locks; /* Total locks statistics */
+ drm_ctx_flags_t flags; /* Context preserving and 2D-only */
+ drm_waitlist_t waitlist; /* Pending buffers */
+ int flush_queue; /* Processes waiting until flush */
+} drm_queue_t;
+
+typedef struct drm_lock_data {
+ drm_hw_lock_t *hw_lock; /* Hardware lock */
+ pid_t pid; /* PID of lock holder (0=kernel) */
+ int lock_queue; /* Queue of blocked processes */
+ unsigned long lock_time; /* Time of last lock in jiffies */
+} drm_lock_data_t;
+
+typedef struct drm_device_dma {
+ /* Performance Counters */
+ atomic_t total_prio; /* Total DRM_DMA_PRIORITY */
+ atomic_t total_bytes; /* Total bytes DMA'd */
+ atomic_t total_dmas; /* Total DMA buffers dispatched */
+
+ atomic_t total_missed_dma; /* Missed drm_do_dma */
+ atomic_t total_missed_lock; /* Missed lock in drm_do_dma */
+ atomic_t total_missed_free; /* Missed drm_free_this_buffer */
+ atomic_t total_missed_sched;/* Missed drm_dma_schedule */
+
+ atomic_t total_tried; /* Tried next_buffer */
+ atomic_t total_hit; /* Sent next_buffer */
+ atomic_t total_lost; /* Lost interrupt */
+
+ drm_buf_entry_t bufs[DRM_MAX_ORDER+1];
+ int buf_count;
+ drm_buf_t **buflist; /* Vector of pointers info bufs */
+ int seg_count;
+ int page_count;
+ vm_offset_t *pagelist;
+ unsigned long byte_count;
+ enum {
+ _DRM_DMA_USE_AGP = 0x01
+ } flags;
+
+ /* DMA support */
+ drm_buf_t *this_buffer; /* Buffer being sent */
+ drm_buf_t *next_buffer; /* Selected buffer to send */
+ drm_queue_t *next_queue; /* Queue from which buffer selected*/
+ int waiting; /* Processes waiting on free bufs */
+} drm_device_dma_t;
+
+#ifdef DRM_AGP
+
+typedef struct drm_agp_mem {
+ void *handle;
+ unsigned long bound; /* address */
+ int pages;
+ struct drm_agp_mem *prev;
+ struct drm_agp_mem *next;
+} drm_agp_mem_t;
+
+typedef struct drm_agp_head {
+ device_t agpdev;
+ struct agp_info info;
+ const char *chipset;
+ drm_agp_mem_t *memory;
+ unsigned long mode;
+ int enabled;
+ int acquired;
+ unsigned long base;
+ int agp_mtrr;
+} drm_agp_head_t;
+
+#endif
+
+typedef struct drm_device {
+ const char *name; /* Simple driver name */
+ char *unique; /* Unique identifier: e.g., busid */
+ int unique_len; /* Length of unique field */
+ device_t device; /* Device instance from newbus */
+ dev_t devnode; /* Device number for mknod */
+ char *devname; /* For /proc/interrupts */
+
+ int blocked; /* Blocked due to VC switch? */
+ int flags; /* Flags to open(2) */
+ int writable; /* Opened with FWRITE */
+ struct proc_dir_entry *root; /* Root for this device's entries */
+
+ /* Locks */
+ struct simplelock count_lock; /* For inuse, open_count, buf_use */
+ struct lock dev_lock; /* For others */
+
+ /* Usage Counters */
+ int open_count; /* Outstanding files open */
+ atomic_t ioctl_count; /* Outstanding IOCTLs pending */
+ atomic_t vma_count; /* Outstanding vma areas open */
+ int buf_use; /* Buffers in use -- cannot alloc */
+ atomic_t buf_alloc; /* Buffer allocation in progress */
+
+ /* Performance Counters */
+ atomic_t total_open;
+ atomic_t total_close;
+ atomic_t total_ioctl;
+ atomic_t total_irq; /* Total interruptions */
+ atomic_t total_ctx; /* Total context switches */
+
+ atomic_t total_locks;
+ atomic_t total_unlocks;
+ atomic_t total_contends;
+ atomic_t total_sleeps;
+
+ /* Authentication */
+ drm_file_list_t files;
+ drm_magic_head_t magiclist[DRM_HASH_SIZE];
+
+ /* Memory management */
+ drm_map_t **maplist; /* Vector of pointers to regions */
+ int map_count; /* Number of mappable regions */
+
+ drm_vma_entry_t *vmalist; /* List of vmas (for debugging) */
+ drm_lock_data_t lock; /* Information on hardware lock */
+
+ /* DMA queues (contexts) */
+ int queue_count; /* Number of active DMA queues */
+ int queue_reserved; /* Number of reserved DMA queues */
+ int queue_slots; /* Actual length of queuelist */
+ drm_queue_t **queuelist; /* Vector of pointers to DMA queues */
+ drm_device_dma_t *dma; /* Optional pointer for DMA support */
+
+ /* Context support */
+ struct resource *irq; /* Interrupt used by board */
+ void *irqh; /* Handle from bus_setup_intr */
+ __volatile__ int context_flag; /* Context swapping flag */
+ __volatile__ int interrupt_flag;/* Interruption handler flag */
+ __volatile__ int dma_flag; /* DMA dispatch flag */
+ struct callout timer; /* Timer for delaying ctx switch */
+ int context_wait; /* Processes waiting on ctx switch */
+ int last_checked; /* Last context checked for DMA */
+ int last_context; /* Last current context */
+ int last_switch; /* Time at last context switch */
+ struct task task;
+ struct timespec ctx_start;
+ struct timespec lck_start;
+#if DRM_DMA_HISTOGRAM
+ drm_histogram_t histo;
+#endif
+
+ /* Callback to X server for context switch
+ and for heavy-handed reset. */
+ char buf[DRM_BSZ]; /* Output buffer */
+ char *buf_rp; /* Read pointer */
+ char *buf_wp; /* Write pointer */
+ char *buf_end; /* End pointer */
+ struct sigio *buf_sigio; /* Processes waiting for SIGIO */
+ struct selinfo buf_sel; /* Workspace for select/poll */
+ int buf_readers; /* Processes waiting to read */
+ int buf_writers; /* Processes waiting to ctx switch */
+ int buf_selecting; /* True if poll sleeper */
+
+ /* Sysctl support */
+ struct drm_sysctl_info *sysctl;
+
+#ifdef DRM_AGP
+ drm_agp_head_t *agp;
+#endif
+ u_int32_t *ctx_bitmap;
+ void *dev_private;
+} drm_device_t;
+
+
+ /* Internal function definitions */
+
+ /* Misc. support (init.c) */
+extern int drm_flags;
+extern void drm_parse_options(char *s);
+
+
+ /* Device support (fops.c) */
+extern drm_file_t *drm_find_file_by_proc(drm_device_t *dev, struct proc *p);
+extern int drm_open_helper(dev_t kdev, int flags, int fmt, struct proc *p,
+ drm_device_t *dev);
+extern d_close_t drm_close;
+extern d_read_t drm_read;
+extern d_write_t drm_write;
+extern d_poll_t drm_poll;
+extern int drm_fsetown(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p);
+extern int drm_fgetown(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p);
+extern int drm_write_string(drm_device_t *dev, const char *s);
+
+#if 0
+ /* Mapping support (vm.c) */
+extern unsigned long drm_vm_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int write_access);
+extern unsigned long drm_vm_shm_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int write_access);
+extern unsigned long drm_vm_dma_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int write_access);
+extern void drm_vm_open(struct vm_area_struct *vma);
+extern void drm_vm_close(struct vm_area_struct *vma);
+extern int drm_mmap_dma(struct file *filp,
+ struct vm_area_struct *vma);
+#endif
+extern d_mmap_t drm_mmap;
+
+ /* Proc support (proc.c) */
+extern int drm_sysctl_init(drm_device_t *dev);
+extern int drm_sysctl_cleanup(drm_device_t *dev);
+
+ /* Memory management support (memory.c) */
+extern void drm_mem_init(void);
+extern int drm_mem_info SYSCTL_HANDLER_ARGS;
+extern void *drm_alloc(size_t size, int area);
+extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size,
+ int area);
+extern char *drm_strdup(const char *s, int area);
+extern void drm_strfree(char *s, int area);
+extern void drm_free(void *pt, size_t size, int area);
+extern unsigned long drm_alloc_pages(int order, int area);
+extern void drm_free_pages(unsigned long address, int order,
+ int area);
+extern void *drm_ioremap(unsigned long offset, unsigned long size);
+extern void drm_ioremapfree(void *pt, unsigned long size);
+
+#ifdef DRM_AGP
+extern void *drm_alloc_agp(int pages, u_int32_t type);
+extern int drm_free_agp(void *handle, int pages);
+extern int drm_bind_agp(void *handle, unsigned int start);
+extern int drm_unbind_agp(void *handle);
+#endif
+
+ /* Buffer management support (bufs.c) */
+extern int drm_order(unsigned long size);
+extern d_ioctl_t drm_addmap;
+extern d_ioctl_t drm_addbufs;
+extern d_ioctl_t drm_infobufs;
+extern d_ioctl_t drm_markbufs;
+extern d_ioctl_t drm_freebufs;
+extern d_ioctl_t drm_mapbufs;
+
+
+ /* Buffer list management support (lists.c) */
+extern int drm_waitlist_create(drm_waitlist_t *bl, int count);
+extern int drm_waitlist_destroy(drm_waitlist_t *bl);
+extern int drm_waitlist_put(drm_waitlist_t *bl, drm_buf_t *buf);
+extern drm_buf_t *drm_waitlist_get(drm_waitlist_t *bl);
+
+extern int drm_freelist_create(drm_freelist_t *bl, int count);
+extern int drm_freelist_destroy(drm_freelist_t *bl);
+extern int drm_freelist_put(drm_device_t *dev, drm_freelist_t *bl,
+ drm_buf_t *buf);
+extern drm_buf_t *drm_freelist_get(drm_freelist_t *bl, int block);
+
+ /* DMA support (gen_dma.c) */
+extern void drm_dma_setup(drm_device_t *dev);
+extern void drm_dma_takedown(drm_device_t *dev);
+extern void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf);
+extern void drm_reclaim_buffers(drm_device_t *dev, pid_t pid);
+extern int drm_context_switch(drm_device_t *dev, int old, int new);
+extern int drm_context_switch_complete(drm_device_t *dev, int new);
+extern void drm_wakeup(drm_device_t *dev, drm_buf_t *buf);
+extern void drm_clear_next_buffer(drm_device_t *dev);
+extern int drm_select_queue(drm_device_t *dev,
+ void (*wrapper)(void *));
+extern int drm_dma_enqueue(drm_device_t *dev, drm_dma_t *dma);
+extern int drm_dma_get_buffers(drm_device_t *dev, drm_dma_t *dma);
+#if DRM_DMA_HISTOGRAM
+extern int drm_histogram_slot(struct timespec *ts);
+extern void drm_histogram_compute(drm_device_t *dev, drm_buf_t *buf);
+#endif
+
+
+ /* Misc. IOCTL support (ioctl.c) */
+extern d_ioctl_t drm_irq_busid;
+extern d_ioctl_t drm_getunique;
+extern d_ioctl_t drm_setunique;
+
+
+ /* Context IOCTL support (context.c) */
+extern d_ioctl_t drm_resctx;
+extern d_ioctl_t drm_addctx;
+extern d_ioctl_t drm_modctx;
+extern d_ioctl_t drm_getctx;
+extern d_ioctl_t drm_switchctx;
+extern d_ioctl_t drm_newctx;
+extern d_ioctl_t drm_rmctx;
+
+
+ /* Drawable IOCTL support (drawable.c) */
+extern d_ioctl_t drm_adddraw;
+extern d_ioctl_t drm_rmdraw;
+
+
+ /* Authentication IOCTL support (auth.c) */
+extern int drm_add_magic(drm_device_t *dev, drm_file_t *priv,
+ drm_magic_t magic);
+extern int drm_remove_magic(drm_device_t *dev, drm_magic_t magic);
+extern d_ioctl_t drm_getmagic;
+extern d_ioctl_t drm_authmagic;
+
+
+ /* Locking IOCTL support (lock.c) */
+extern d_ioctl_t drm_block;
+extern d_ioctl_t drm_unblock;
+extern int drm_lock_take(__volatile__ unsigned int *lock,
+ unsigned int context);
+extern int drm_lock_transfer(drm_device_t *dev,
+ __volatile__ unsigned int *lock,
+ unsigned int context);
+extern int drm_lock_free(drm_device_t *dev,
+ __volatile__ unsigned int *lock,
+ unsigned int context);
+extern d_ioctl_t drm_finish;
+extern int drm_flush_unblock(drm_device_t *dev, int context,
+ drm_lock_flags_t flags);
+extern int drm_flush_block_and_flush(drm_device_t *dev, int context,
+ drm_lock_flags_t flags);
+
+ /* Context Bitmap support (ctxbitmap.c) */
+extern int drm_ctxbitmap_init(drm_device_t *dev);
+extern void drm_ctxbitmap_cleanup(drm_device_t *dev);
+extern int drm_ctxbitmap_next(drm_device_t *dev);
+extern void drm_ctxbitmap_free(drm_device_t *dev, int ctx_handle);
+
+#ifdef DRM_AGP
+ /* AGP/GART support (agpsupport.c) */
+extern drm_agp_head_t *drm_agp_init(void);
+extern d_ioctl_t drm_agp_acquire;
+extern d_ioctl_t drm_agp_release;
+extern d_ioctl_t drm_agp_enable;
+extern d_ioctl_t drm_agp_info;
+extern d_ioctl_t drm_agp_alloc;
+extern d_ioctl_t drm_agp_free;
+extern d_ioctl_t drm_agp_unbind;
+extern d_ioctl_t drm_agp_bind;
+#endif
+#endif
+#endif
diff --git a/bsd/gamma/Makefile b/bsd/gamma/Makefile
new file mode 100644
index 00000000..dd611038
--- /dev/null
+++ b/bsd/gamma/Makefile
@@ -0,0 +1,15 @@
+# $FreeBSD$
+
+KMOD = gamma
+SRCS = gamma_drv.c gamma_dma.c
+SRCS += device_if.h bus_if.h pci_if.h
+CFLAGS += ${DEBUG_FLAGS} -I..
+KERN = /usr/src/sys
+
+@:
+ ln -sf /sys @
+
+machine:
+ ln -sf /sys/i386/include machine
+
+.include "/usr/src/sys/conf/kmod.mk"
diff --git a/bsd/gamma/gamma_dma.c b/bsd/gamma/gamma_dma.c
new file mode 100644
index 00000000..177440db
--- /dev/null
+++ b/bsd/gamma/gamma_dma.c
@@ -0,0 +1,802 @@
+/* gamma_dma.c -- DMA support for GMX 2000 -*- c -*-
+ * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
+ * Revised: Thu Sep 16 12:55:37 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/gamma_dma.c,v 1.9 1999/09/16 16:56:18 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/gamma_dma.c,v 1.1 1999/09/25 14:38:00 dawes Exp $
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include "gamma_drv.h"
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/rman.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+/* WARNING!!! MAGIC NUMBER!!! The number of regions already added to the
+ kernel must be specified here. Currently, the number is 2. This must
+ match the order the X server uses for instantiating register regions ,
+ or must be passed in a new ioctl. */
+#define GAMMA_REG(reg) \
+ (2 \
+ + ((reg < 0x1000) \
+ ? 0 \
+ : ((reg < 0x10000) ? 1 : ((reg < 0x11000) ? 2 : 3))))
+
+#define GAMMA_OFF(reg) \
+ ((reg < 0x1000) \
+ ? reg \
+ : ((reg < 0x10000) \
+ ? (reg - 0x1000) \
+ : ((reg < 0x11000) \
+ ? (reg - 0x10000) \
+ : (reg - 0x11000))))
+
+#define GAMMA_BASE(reg) ((unsigned long)dev->maplist[GAMMA_REG(reg)]->handle)
+#define GAMMA_ADDR(reg) (GAMMA_BASE(reg) + GAMMA_OFF(reg))
+#define GAMMA_DEREF(reg) *(__volatile__ int *)GAMMA_ADDR(reg)
+#define GAMMA_READ(reg) GAMMA_DEREF(reg)
+#define GAMMA_WRITE(reg,val) do { GAMMA_DEREF(reg) = val; } while (0)
+
+#define GAMMA_BROADCASTMASK 0x9378
+#define GAMMA_COMMANDINTENABLE 0x0c48
+#define GAMMA_DMAADDRESS 0x0028
+#define GAMMA_DMACOUNT 0x0030
+#define GAMMA_FILTERMODE 0x8c00
+#define GAMMA_GCOMMANDINTFLAGS 0x0c50
+#define GAMMA_GCOMMANDMODE 0x0c40
+#define GAMMA_GCOMMANDSTATUS 0x0c60
+#define GAMMA_GDELAYTIMER 0x0c38
+#define GAMMA_GDMACONTROL 0x0060
+#define GAMMA_GINTENABLE 0x0808
+#define GAMMA_GINTFLAGS 0x0810
+#define GAMMA_INFIFOSPACE 0x0018
+#define GAMMA_OUTFIFOWORDS 0x0020
+#define GAMMA_OUTPUTFIFO 0x2000
+#define GAMMA_SYNC 0x8c40
+#define GAMMA_SYNC_TAG 0x0188
+
+static __inline void gamma_dma_dispatch(drm_device_t *dev,
+ vm_offset_t address,
+ vm_size_t length)
+{
+ GAMMA_WRITE(GAMMA_DMAADDRESS, vtophys(address));
+ while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4)
+ ;
+ GAMMA_WRITE(GAMMA_DMACOUNT, length / 4);
+}
+
+static __inline void gamma_dma_quiescent(drm_device_t *dev)
+{
+ while (GAMMA_READ(GAMMA_DMACOUNT))
+ ;
+ while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
+ ;
+ GAMMA_WRITE(GAMMA_BROADCASTMASK, 3);
+ GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
+ GAMMA_WRITE(GAMMA_SYNC, 0);
+
+ /* Read from first MX */
+ do {
+ while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
+ ;
+ } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
+
+
+ /* Read from second MX */
+ do {
+ while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000))
+ ;
+ } while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG);
+}
+
+static __inline void gamma_dma_ready(drm_device_t *dev)
+{
+ while (GAMMA_READ(GAMMA_DMACOUNT))
+ ;
+}
+
+static __inline int gamma_dma_is_ready(drm_device_t *dev)
+{
+ return !GAMMA_READ(GAMMA_DMACOUNT);
+}
+
+static void gamma_dma_service(void *arg)
+{
+ drm_device_t *dev = (drm_device_t *)arg;
+ drm_device_dma_t *dma = dev->dma;
+
+ atomic_inc(&dev->total_irq);
+ GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */
+ GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8);
+ GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001);
+ if (gamma_dma_is_ready(dev)) {
+ /* Free previous buffer */
+ if (test_and_set_bit(0, &dev->dma_flag)) {
+ atomic_inc(&dma->total_missed_free);
+ return;
+ }
+ if (dma->this_buffer) {
+ drm_free_buffer(dev, dma->this_buffer);
+ dma->this_buffer = NULL;
+ }
+ clear_bit(0, &dev->dma_flag);
+
+#if 0
+ /* Dispatch new buffer */
+ queue_task(&dev->tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+#endif
+ }
+}
+
+/* Only called by gamma_dma_schedule. */
+static int gamma_do_dma(drm_device_t *dev, int locked)
+{
+ unsigned long address;
+ unsigned long length;
+ drm_buf_t *buf;
+ int retcode = 0;
+ drm_device_dma_t *dma = dev->dma;
+#if DRM_DMA_HISTOGRAM
+ struct timespec dma_start, dma_stop;
+#endif
+
+ if (test_and_set_bit(0, &dev->dma_flag)) {
+ atomic_inc(&dma->total_missed_dma);
+ return EBUSY;
+ }
+
+#if DRM_DMA_HISTOGRAM
+ getnanotime(&dma_start);
+#endif
+
+ if (!dma->next_buffer) {
+ DRM_ERROR("No next_buffer\n");
+ clear_bit(0, &dev->dma_flag);
+ return EINVAL;
+ }
+
+ buf = dma->next_buffer;
+ address = (unsigned long)buf->address;
+ length = buf->used;
+
+ DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
+ buf->context, buf->idx, length);
+
+ if (buf->list == DRM_LIST_RECLAIM) {
+ drm_clear_next_buffer(dev);
+ drm_free_buffer(dev, buf);
+ clear_bit(0, &dev->dma_flag);
+ return EINVAL;
+ }
+
+ if (!length) {
+ DRM_ERROR("0 length buffer\n");
+ drm_clear_next_buffer(dev);
+ drm_free_buffer(dev, buf);
+ clear_bit(0, &dev->dma_flag);
+ return 0;
+ }
+
+ if (!gamma_dma_is_ready(dev)) {
+ clear_bit(0, &dev->dma_flag);
+ return EBUSY;
+ }
+
+ if (buf->while_locked) {
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("Dispatching buffer %d from pid %d"
+ " \"while locked\", but no lock held\n",
+ buf->idx, buf->pid);
+ }
+ } else {
+ if (!locked && !drm_lock_take(&dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ atomic_inc(&dma->total_missed_lock);
+ clear_bit(0, &dev->dma_flag);
+ return EBUSY;
+ }
+ }
+
+ if (dev->last_context != buf->context
+ && !(dev->queuelist[buf->context]->flags
+ & _DRM_CONTEXT_PRESERVED)) {
+ /* PRE: dev->last_context != buf->context */
+ if (drm_context_switch(dev, dev->last_context, buf->context)) {
+ drm_clear_next_buffer(dev);
+ drm_free_buffer(dev, buf);
+ }
+ retcode = EBUSY;
+ goto cleanup;
+
+ /* POST: we will wait for the context
+ switch and will dispatch on a later call
+ when dev->last_context == buf->context.
+ NOTE WE HOLD THE LOCK THROUGHOUT THIS
+ TIME! */
+ }
+
+ drm_clear_next_buffer(dev);
+ buf->pending = 1;
+ buf->waiting = 0;
+ buf->list = DRM_LIST_PEND;
+#if DRM_DMA_HISTOGRAM
+ getnanotime(&buf->time_dispatched);
+#endif
+
+ gamma_dma_dispatch(dev, address, length);
+ drm_free_buffer(dev, dma->this_buffer);
+ dma->this_buffer = buf;
+
+ atomic_add(length, &dma->total_bytes);
+ atomic_inc(&dma->total_dmas);
+
+ if (!buf->while_locked && !dev->context_flag && !locked) {
+ if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ DRM_ERROR("\n");
+ }
+ }
+cleanup:
+
+ clear_bit(0, &dev->dma_flag);
+
+#if DRM_DMA_HISTOGRAM
+ getnanotime(&dma_stop);
+ timespecsub(&dma_stop, &dma_start);
+ atomic_inc(&dev->histo.ctx[drm_histogram_slot(&dma_stop)]);
+#endif
+
+ return retcode;
+}
+
+static void gamma_dma_schedule_wrapper(void *dev)
+{
+ gamma_dma_schedule(dev, 0);
+}
+
+int gamma_dma_schedule(drm_device_t *dev, int locked)
+{
+ int next;
+ drm_queue_t *q;
+ drm_buf_t *buf;
+ int retcode = 0;
+ int processed = 0;
+ int missed;
+ int expire = 20;
+ drm_device_dma_t *dma = dev->dma;
+#if DRM_DMA_HISTOGRAM
+ struct timespec schedule_start;
+#endif
+
+ if (test_and_set_bit(0, &dev->interrupt_flag)) {
+ /* Not reentrant */
+ atomic_inc(&dma->total_missed_sched);
+ return EBUSY;
+ }
+ missed = atomic_read(&dma->total_missed_sched);
+
+#if DRM_DMA_HISTOGRAM
+ getnanotime(&schedule_start);
+#endif
+
+again:
+ if (dev->context_flag) {
+ clear_bit(0, &dev->interrupt_flag);
+ return EBUSY;
+ }
+ if (dma->next_buffer) {
+ /* Unsent buffer that was previously
+ selected, but that couldn't be sent
+ because the lock could not be obtained
+ or the DMA engine wasn't ready. Try
+ again. */
+ atomic_inc(&dma->total_tried);
+ if (!(retcode = gamma_do_dma(dev, locked))) {
+ atomic_inc(&dma->total_hit);
+ ++processed;
+ }
+ } else {
+ do {
+ next = drm_select_queue(dev,
+ gamma_dma_schedule_wrapper);
+ if (next >= 0) {
+ q = dev->queuelist[next];
+ buf = drm_waitlist_get(&q->waitlist);
+ dma->next_buffer = buf;
+ dma->next_queue = q;
+ if (buf && buf->list == DRM_LIST_RECLAIM) {
+ drm_clear_next_buffer(dev);
+ drm_free_buffer(dev, buf);
+ }
+ }
+ } while (next >= 0 && !dma->next_buffer);
+ if (dma->next_buffer) {
+ if (!(retcode = gamma_do_dma(dev, locked))) {
+ ++processed;
+ }
+ }
+ }
+
+ if (--expire) {
+ if (missed != atomic_read(&dma->total_missed_sched)) {
+ atomic_inc(&dma->total_lost);
+ if (gamma_dma_is_ready(dev)) goto again;
+ }
+ if (processed && gamma_dma_is_ready(dev)) {
+ atomic_inc(&dma->total_lost);
+ processed = 0;
+ goto again;
+ }
+ }
+
+ clear_bit(0, &dev->interrupt_flag);
+
+#if DRM_DMA_HISTOGRAM
+ {
+ struct timespec ts;
+ getnanotime(&ts);
+ timespecsub(&ts, &schedule_start);
+ atomic_inc(&dev->histo.schedule[drm_histogram_slot(&ts)]);
+ }
+#endif
+ return retcode;
+}
+
+static int gamma_dma_priority(drm_device_t *dev, drm_dma_t *d)
+{
+ struct proc *p = curproc;
+ unsigned long address;
+ unsigned long length;
+ int must_free = 0;
+ int retcode = 0;
+ int i;
+ int idx;
+ drm_buf_t *buf;
+ drm_buf_t *last_buf = NULL;
+ drm_device_dma_t *dma = dev->dma;
+ static int never;
+
+ /* Turn off interrupt handling */
+ while (test_and_set_bit(0, &dev->interrupt_flag)) {
+ retcode = tsleep(&never, PZERO|PCATCH, "gamp1", 1);
+ if (retcode)
+ return retcode;
+ }
+ if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
+ while (!drm_lock_take(&dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ retcode = tsleep(&never, PZERO|PCATCH, "gamp2", 1);
+ if (retcode)
+ return retcode;
+ }
+ ++must_free;
+ }
+ atomic_inc(&dma->total_prio);
+
+ for (i = 0; i < d->send_count; i++) {
+ idx = d->send_indices[i];
+ if (idx < 0 || idx >= dma->buf_count) {
+ DRM_ERROR("Index %d (of %d max)\n",
+ d->send_indices[i], dma->buf_count - 1);
+ continue;
+ }
+ buf = dma->buflist[ idx ];
+ if (buf->pid != p->p_pid) {
+ DRM_ERROR("Process %d using buffer owned by %d\n",
+ p->p_pid, buf->pid);
+ retcode = EINVAL;
+ goto cleanup;
+ }
+ if (buf->list != DRM_LIST_NONE) {
+ DRM_ERROR("Process %d using %d's buffer on list %d\n",
+ p->p_pid, buf->pid, buf->list);
+ retcode = EINVAL;
+ goto cleanup;
+ }
+ /* This isn't a race condition on
+ buf->list, since our concern is the
+ buffer reclaim during the time the
+ process closes the /dev/drm? handle, so
+ it can't also be doing DMA. */
+ buf->list = DRM_LIST_PRIO;
+ buf->used = d->send_sizes[i];
+ buf->context = d->context;
+ buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
+ address = (unsigned long)buf->address;
+ length = buf->used;
+ if (!length) {
+ DRM_ERROR("0 length buffer\n");
+ }
+ if (buf->pending) {
+ DRM_ERROR("Sending pending buffer:"
+ " buffer %d, offset %d\n",
+ d->send_indices[i], i);
+ retcode = EINVAL;
+ goto cleanup;
+ }
+ if (buf->waiting) {
+ DRM_ERROR("Sending waiting buffer:"
+ " buffer %d, offset %d\n",
+ d->send_indices[i], i);
+ retcode = EINVAL;
+ goto cleanup;
+ }
+ buf->pending = 1;
+
+ if (dev->last_context != buf->context
+ && !(dev->queuelist[buf->context]->flags
+ & _DRM_CONTEXT_PRESERVED)) {
+ atomic_inc(&dev->context_wait);
+ /* PRE: dev->last_context != buf->context */
+ drm_context_switch(dev, dev->last_context,
+ buf->context);
+ /* POST: we will wait for the context
+ switch and will dispatch on a later call
+ when dev->last_context == buf->context.
+ NOTE WE HOLD THE LOCK THROUGHOUT THIS
+ TIME! */
+ retcode = tsleep(&dev->context_wait, PZERO|PCATCH,
+ "gamctx", 0);
+ atomic_dec(&dev->context_wait);
+ if (retcode)
+ goto cleanup;
+ if (dev->last_context != buf->context) {
+ DRM_ERROR("Context mismatch: %d %d\n",
+ dev->last_context,
+ buf->context);
+ }
+ }
+
+#if DRM_DMA_HISTOGRAM
+ getnanotime(&buf->time_queued);
+ buf->time_dispatched = buf->time_queued;
+#endif
+ gamma_dma_dispatch(dev, address, length);
+ atomic_add(length, &dma->total_bytes);
+ atomic_inc(&dma->total_dmas);
+
+ if (last_buf) {
+ drm_free_buffer(dev, last_buf);
+ }
+ last_buf = buf;
+ }
+
+
+cleanup:
+ if (last_buf) {
+ gamma_dma_ready(dev);
+ drm_free_buffer(dev, last_buf);
+ }
+
+ if (must_free && !dev->context_flag) {
+ if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ DRM_ERROR("\n");
+ }
+ }
+ clear_bit(0, &dev->interrupt_flag);
+ return retcode;
+}
+
+static int gamma_dma_send_buffers(drm_device_t *dev, drm_dma_t *d)
+{
+ struct proc *p = curproc;
+ drm_buf_t *last_buf = NULL;
+ int retcode = 0;
+ drm_device_dma_t *dma = dev->dma;
+
+
+ if ((retcode = drm_dma_enqueue(dev, d))) {
+ return retcode;
+ }
+
+ gamma_dma_schedule(dev, 0);
+
+ if (d->flags & _DRM_DMA_BLOCK) {
+ last_buf = dma->buflist[d->send_indices[d->send_count-1]];
+ atomic_inc(&last_buf->dma_wait);
+ }
+
+ if (d->flags & _DRM_DMA_BLOCK) {
+ DRM_DEBUG("%d waiting\n", p->p_pid);
+ for (;;) {
+ retcode = tsleep(&last_buf->dma_wait, PZERO|PCATCH,
+ "gamdw", 0);
+ if (!last_buf->waiting
+ && !last_buf->pending)
+ break; /* finished */
+ if (retcode)
+ break;
+ }
+
+ DRM_DEBUG("%d running\n", p->p_pid);
+ atomic_dec(&last_buf->dma_wait);
+ if (!retcode
+ || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
+ if (!last_buf->dma_wait) {
+ drm_free_buffer(dev, last_buf);
+ }
+ }
+ if (retcode) {
+ DRM_ERROR("ctx%d w%d p%d c%d i%d l%d %d/%d\n",
+ d->context,
+ last_buf->waiting,
+ last_buf->pending,
+ DRM_WAITCOUNT(dev, d->context),
+ last_buf->idx,
+ last_buf->list,
+ last_buf->pid,
+ p->p_pid);
+ }
+ }
+ return retcode;
+}
+
+int gamma_dma(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_device_dma_t *dma = dev->dma;
+ int retcode = 0;
+ drm_dma_t d;
+
+ d = *(drm_dma_t *) data;
+ DRM_DEBUG("%d %d: %d send, %d req\n",
+ p->p_pid, d.context, d.send_count, d.request_count);
+
+ if (d.context == DRM_KERNEL_CONTEXT || d.context >= dev->queue_slots) {
+ DRM_ERROR("Process %d using context %d\n",
+ p->p_pid, d.context);
+ return EINVAL;
+ }
+ if (d.send_count < 0 || d.send_count > dma->buf_count) {
+ DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
+ p->p_pid, d.send_count, dma->buf_count);
+ return EINVAL;
+ }
+ if (d.request_count < 0 || d.request_count > dma->buf_count) {
+ DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
+ p->p_pid, d.request_count, dma->buf_count);
+ return EINVAL;
+ }
+
+ if (d.send_count) {
+ if (d.flags & _DRM_DMA_PRIORITY)
+ retcode = gamma_dma_priority(dev, &d);
+ else
+ retcode = gamma_dma_send_buffers(dev, &d);
+ }
+
+ d.granted_count = 0;
+
+ if (!retcode && d.request_count) {
+ retcode = drm_dma_get_buffers(dev, &d);
+ }
+
+ DRM_DEBUG("%d returning, granted = %d\n",
+ p->p_pid, d.granted_count);
+ *(drm_dma_t *) data = d;
+
+ return retcode;
+}
+
+int gamma_irq_install(drm_device_t *dev, int irq)
+{
+ int rid;
+ int retcode;
+
+ if (!irq) return EINVAL;
+
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
+ if (dev->irq) {
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+ return EBUSY;
+ }
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+
+ DRM_DEBUG("%d\n", irq);
+
+ dev->context_flag = 0;
+ dev->interrupt_flag = 0;
+ dev->dma_flag = 0;
+
+ dev->dma->next_buffer = NULL;
+ dev->dma->next_queue = NULL;
+ dev->dma->this_buffer = NULL;
+
+#if 0
+ dev->tq.next = NULL;
+ dev->tq.sync = 0;
+ dev->tq.routine = gamma_dma_schedule_tq_wrapper;
+ dev->tq.data = dev;
+#endif
+ /* Before installing handler */
+ GAMMA_WRITE(GAMMA_GCOMMANDMODE, 0);
+ GAMMA_WRITE(GAMMA_GDMACONTROL, 0);
+
+ /* Install handler */
+ rid = 0;
+ dev->irq = bus_alloc_resource(dev->device, SYS_RES_IRQ, &rid,
+ 0, ~0, 1, RF_SHAREABLE);
+ if (!dev->irq)
+ return ENOENT;
+
+ retcode = bus_setup_intr(dev->device, dev->irq, INTR_TYPE_TTY,
+ gamma_dma_service, dev, &dev->irqh);
+ if (retcode) {
+ bus_release_resource(dev->device, SYS_RES_IRQ, 0, dev->irq);
+ dev->irq = 0;
+ return retcode;
+ }
+
+ /* After installing handler */
+ GAMMA_WRITE(GAMMA_GINTENABLE, 0x2001);
+ GAMMA_WRITE(GAMMA_COMMANDINTENABLE, 0x0008);
+ GAMMA_WRITE(GAMMA_GDELAYTIMER, 0x39090);
+
+ return 0;
+}
+
+int gamma_irq_uninstall(drm_device_t *dev)
+{
+ if (!dev->irq)
+ return EINVAL;
+
+ DRM_DEBUG("%ld\n", rman_get_start(dev->irq));
+
+ GAMMA_WRITE(GAMMA_GDELAYTIMER, 0);
+ GAMMA_WRITE(GAMMA_COMMANDINTENABLE, 0);
+ GAMMA_WRITE(GAMMA_GINTENABLE, 0);
+
+ bus_teardown_intr(dev->device, dev->irq, dev->irqh);
+ bus_release_resource(dev->device, SYS_RES_IRQ, 0, dev->irq);
+ dev->irq = 0;
+
+ return 0;
+}
+
+
+int gamma_control(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_control_t ctl;
+ int retcode;
+
+ ctl = *(drm_control_t *) data;
+
+ switch (ctl.func) {
+ case DRM_INST_HANDLER:
+ if ((retcode = gamma_irq_install(dev, ctl.irq)))
+ return retcode;
+ break;
+ case DRM_UNINST_HANDLER:
+ if ((retcode = gamma_irq_uninstall(dev)))
+ return retcode;
+ break;
+ default:
+ return EINVAL;
+ }
+ return 0;
+}
+
+int gamma_lock(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ int ret = 0;
+ drm_lock_t lock;
+ drm_queue_t *q;
+#if DRM_DMA_HISTOGRAM
+ struct timespec start;
+
+ getnanotime(&start);
+ dev->lck_start = start;
+#endif
+
+ lock = *(drm_lock_t *) data;
+
+ if (lock.context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ p->p_pid, lock.context);
+ return EINVAL;
+ }
+
+ DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
+ lock.context, p->p_pid, dev->lock.hw_lock->lock,
+ lock.flags);
+
+ if (lock.context < 0 || lock.context >= dev->queue_count)
+ return EINVAL;
+ q = dev->queuelist[lock.context];
+
+ ret = drm_flush_block_and_flush(dev, lock.context, lock.flags);
+
+ if (!ret) {
+ if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
+ != lock.context) {
+ long j = ticks - dev->lock.lock_time;
+
+ if (j > 0 && j <= DRM_LOCK_SLICE) {
+ /* Can't take lock if we just had it and
+ there is contention. */
+ static int never;
+ ret = tsleep(&never, PZERO|PCATCH,
+ "gaml1", j);
+ if (ret)
+ return ret;
+ }
+ }
+ atomic_inc(&dev->lock.lock_queue);
+ for (;;) {
+ if (!dev->lock.hw_lock) {
+ /* Device has been unregistered */
+ ret = EINTR;
+ break;
+ }
+ if (drm_lock_take(&dev->lock.hw_lock->lock,
+ lock.context)) {
+ dev->lock.pid = p->p_pid;
+ dev->lock.lock_time = ticks;
+ atomic_inc(&dev->total_locks);
+ atomic_inc(&q->total_locks);
+ break; /* Got lock */
+ }
+
+ /* Contention */
+ atomic_inc(&dev->total_sleeps);
+ ret = tsleep(&dev->lock.lock_queue, PZERO|PCATCH,
+ "gaml2", 0);
+ if (ret)
+ break;
+ }
+ atomic_dec(&dev->lock.lock_queue);
+ }
+
+ drm_flush_unblock(dev, lock.context, lock.flags); /* cleanup phase */
+
+ if (!ret) {
+ if (lock.flags & _DRM_LOCK_READY)
+ gamma_dma_ready(dev);
+ if (lock.flags & _DRM_LOCK_QUIESCENT)
+ gamma_dma_quiescent(dev);
+ }
+ DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
+
+#if DRM_DMA_HISTOGRAM
+ {
+ struct timespec ts;
+ getnanotime(&ts);
+ timespecsub(&ts, &start);
+ atomic_inc(&dev->histo.lacq[drm_histogram_slot(&ts)]);
+ }
+#endif
+
+ return ret;
+}
diff --git a/bsd/gamma/gamma_drv.c b/bsd/gamma/gamma_drv.c
new file mode 100644
index 00000000..fe71737a
--- /dev/null
+++ b/bsd/gamma/gamma_drv.c
@@ -0,0 +1,574 @@
+/* gamma.c -- 3dlabs GMX 2000 driver -*- c -*-
+ * Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
+ * Revised: Tue Oct 12 08:51:36 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/gamma_drv.c,v 1.17 1999/08/30 13:05:00 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/gamma_drv.c,v 1.1 1999/09/25 14:38:00 dawes Exp $
+ *
+ */
+
+#include "drmP.h"
+#include "gamma_drv.h"
+
+#include <pci/pcivar.h>
+
+MODULE_DEPEND(gamma, drm, 1, 1, 1);
+
+static int gamma_init(device_t nbdev);
+static void gamma_cleanup(device_t nbdev);
+
+static int gamma_probe(device_t dev)
+{
+ const char *s = 0;
+
+ switch (pci_get_devid(dev)) {
+ case 0x00083d3d:
+ s = "3D Labs Gamma graphics accelerator";
+ break;
+ }
+
+ if (s) {
+ device_set_desc(dev, s);
+ return 0;
+ }
+
+ return ENXIO;
+}
+
+static int gamma_attach(device_t dev)
+{
+ gamma_init(dev);
+ return 0;
+}
+
+static int gamma_detach(device_t dev)
+{
+ gamma_cleanup(dev);
+ return 0;
+}
+
+static device_method_t gamma_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, gamma_probe),
+ DEVMETHOD(device_attach, gamma_attach),
+ DEVMETHOD(device_detach, gamma_detach),
+
+ { 0, 0 }
+};
+
+static driver_t gamma_driver = {
+ "drm",
+ gamma_methods,
+ sizeof(drm_device_t),
+};
+
+static devclass_t gamma_devclass;
+#define GAMMA_SOFTC(unit) \
+ ((drm_device_t *) devclass_get_softc(gamma_devclass, unit))
+
+DRIVER_MODULE(if_gamma, pci, gamma_driver, gamma_devclass, 0, 0);
+
+#define GAMMA_NAME "gamma"
+#define GAMMA_DESC "3dlabs GMX 2000"
+#define GAMMA_DATE "19990830"
+#define GAMMA_MAJOR 0
+#define GAMMA_MINOR 0
+#define GAMMA_PATCHLEVEL 5
+
+#define CDEV_MAJOR 200
+
+static struct cdevsw gamma_cdevsw = {
+ /* open */ gamma_open,
+ /* close */ gamma_close,
+ /* read */ drm_read,
+ /* write */ drm_write,
+ /* ioctl */ gamma_ioctl,
+ /* poll */ nopoll,
+ /* mmap */ drm_mmap,
+ /* strategy */ nostrategy,
+ /* name */ "gamma",
+ /* maj */ CDEV_MAJOR,
+ /* dump */ nodump,
+ /* psize */ nopsize,
+ /* flags */ D_TTY | D_TRACKCLOSE,
+ /* bmaj */ -1
+};
+
+static drm_ioctl_desc_t gamma_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { gamma_version, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { gamma_control, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { drm_addbufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { drm_mapbufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { drm_addctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { drm_rmctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { drm_modctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { drm_getctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { drm_switchctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { drm_newctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { drm_resctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { gamma_dma, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { gamma_lock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { gamma_unlock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
+};
+#define GAMMA_IOCTL_COUNT DRM_ARRAY_SIZE(gamma_ioctls)
+
+static int gamma_setup(drm_device_t *dev)
+{
+ int i;
+
+ device_busy(dev->device);
+
+ atomic_set(&dev->ioctl_count, 0);
+ atomic_set(&dev->vma_count, 0);
+ dev->buf_use = 0;
+ atomic_set(&dev->buf_alloc, 0);
+
+ drm_dma_setup(dev);
+
+ atomic_set(&dev->total_open, 0);
+ atomic_set(&dev->total_close, 0);
+ atomic_set(&dev->total_ioctl, 0);
+ atomic_set(&dev->total_irq, 0);
+ atomic_set(&dev->total_ctx, 0);
+ atomic_set(&dev->total_locks, 0);
+ atomic_set(&dev->total_unlocks, 0);
+ atomic_set(&dev->total_contends, 0);
+ atomic_set(&dev->total_sleeps, 0);
+
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ dev->magiclist[i].head = NULL;
+ dev->magiclist[i].tail = NULL;
+ }
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ dev->vmalist = NULL;
+ dev->lock.hw_lock = NULL;
+ dev->lock.lock_queue = 0;
+ dev->queue_count = 0;
+ dev->queue_reserved = 0;
+ dev->queue_slots = 0;
+ dev->queuelist = NULL;
+ dev->irq = 0;
+ dev->context_flag = 0;
+ dev->interrupt_flag = 0;
+ dev->dma_flag = 0;
+ dev->last_context = 0;
+ dev->last_switch = 0;
+ dev->last_checked = 0;
+ callout_init(&dev->timer);
+ dev->context_wait = 0;
+#if DRM_DMA_HISTO
+ memset(&dev->histo, 0, sizeof(dev->histo));
+#endif
+ timespecclear(&dev->ctx_start);
+ timespecclear(&dev->lck_start);
+
+ dev->buf_rp = dev->buf;
+ dev->buf_wp = dev->buf;
+ dev->buf_end = dev->buf + DRM_BSZ;
+ dev->buf_sigio = NULL;
+
+ DRM_DEBUG("\n");
+
+ /* The kernel's context could be created here, but is now created
+ in drm_dma_enqueue. This is more resource-efficient for
+ hardware that does not do DMA, but may mean that
+ drm_select_queue fails between the time the interrupt is
+ initialized and the time the queues are initialized. */
+
+ return 0;
+}
+
+
+static int
+gamma_takedown(drm_device_t *dev)
+{
+ int i;
+ drm_magic_entry_t *pt, *next;
+ drm_map_t *map;
+ drm_vma_entry_t *vma, *vma_next;
+
+ DRM_DEBUG("\n");
+
+ if (dev->irq) gamma_irq_uninstall(dev);
+
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
+ callout_stop(&dev->timer);
+
+ if (dev->devname) {
+ drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
+ dev->devname = NULL;
+ }
+
+ if (dev->unique) {
+ drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
+ dev->unique = NULL;
+ dev->unique_len = 0;
+ }
+ /* Clear pid list */
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ for (pt = dev->magiclist[i].head; pt; pt = next) {
+ next = pt->next;
+ drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
+ }
+ dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
+ }
+
+ /* Clear vma list (only built for debugging) */
+ if (dev->vmalist) {
+ for (vma = dev->vmalist; vma; vma = vma_next) {
+ vma_next = vma->next;
+ drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
+ }
+ dev->vmalist = NULL;
+ }
+
+ /* Clear map area and mtrr information */
+ if (dev->maplist) {
+ for (i = 0; i < dev->map_count; i++) {
+ map = dev->maplist[i];
+ switch (map->type) {
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+#ifdef CONFIG_MTRR
+ if (map->mtrr >= 0) {
+ int retcode;
+ retcode = mtrr_del(map->mtrr,
+ map->offset,
+ map->size);
+ DRM_DEBUG("mtrr_del = %d\n", retcode);
+ }
+#endif
+ drm_ioremapfree(map->handle, map->size);
+ break;
+ case _DRM_SHM:
+ drm_free_pages((unsigned long)map->handle,
+ drm_order(map->size)
+ - PAGE_SHIFT,
+ DRM_MEM_SAREA);
+ break;
+ }
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ }
+ drm_free(dev->maplist,
+ dev->map_count * sizeof(*dev->maplist),
+ DRM_MEM_MAPS);
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ }
+
+ if (dev->queuelist) {
+ for (i = 0; i < dev->queue_count; i++) {
+ drm_waitlist_destroy(&dev->queuelist[i]->waitlist);
+ if (dev->queuelist[i]) {
+ drm_free(dev->queuelist[i],
+ sizeof(*dev->queuelist[0]),
+ DRM_MEM_QUEUES);
+ dev->queuelist[i] = NULL;
+ }
+ }
+ drm_free(dev->queuelist,
+ dev->queue_slots * sizeof(*dev->queuelist),
+ DRM_MEM_QUEUES);
+ dev->queuelist = NULL;
+ }
+
+ drm_dma_takedown(dev);
+
+ dev->queue_count = 0;
+ if (dev->lock.hw_lock) {
+ dev->lock.hw_lock = NULL; /* SHM removed */
+ dev->lock.pid = 0;
+ wakeup(&dev->lock.lock_queue);
+ }
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+
+ device_unbusy(dev->device);
+
+ return 0;
+}
+
+/* gamma_init is called via gamma_attach at module load time */
+
+static int
+gamma_init(device_t nbdev)
+{
+ drm_device_t *dev = device_get_softc(nbdev);
+
+ DRM_DEBUG("\n");
+
+ memset((void *)dev, 0, sizeof(*dev));
+ simple_lock_init(&dev->count_lock);
+ lockinit(&dev->dev_lock, PZERO, "drmlk", 0, 0);
+
+#if 0 /* XXX use getenv I guess */
+ drm_parse_options(gamma);
+#endif
+
+#if 0
+ if ((retcode = misc_register(&gamma_misc))) {
+ DRM_ERROR("Cannot register \"%s\"\n", GAMMA_NAME);
+ return retcode;
+ }
+#endif
+ dev->device = nbdev;
+ dev->devnode = make_dev(&gamma_cdevsw,
+ device_get_unit(nbdev),
+ DRM_DEV_UID,
+ DRM_DEV_GID,
+ DRM_DEV_MODE,
+ GAMMA_NAME);
+ dev->name = GAMMA_NAME;
+
+ drm_mem_init();
+ drm_sysctl_init(dev);
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ GAMMA_NAME,
+ GAMMA_MAJOR,
+ GAMMA_MINOR,
+ GAMMA_PATCHLEVEL,
+ GAMMA_DATE,
+ device_get_unit(nbdev));
+
+ return 0;
+}
+
+/* gamma_cleanup is called via gamma_detach at module unload time. */
+
+static void
+gamma_cleanup(device_t nbdev)
+{
+ drm_device_t *dev = device_get_softc(nbdev);
+
+ DRM_DEBUG("\n");
+
+ drm_sysctl_cleanup(dev);
+#if 0
+ if (misc_deregister(&gamma_misc)) {
+ DRM_ERROR("Cannot unload module\n");
+ } else {
+ DRM_INFO("Module unloaded\n");
+ }
+#endif
+ device_busy(dev->device);
+ gamma_takedown(dev);
+}
+
+SYSUNINIT(gamma_cleanup, SI_SUB_DRIVERS, SI_ORDER_ANY, gamma_cleanup, 0);
+
+#if 0
+int gamma_version(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_version_t version;
+ int len;
+
+ copy_from_user_ret(&version,
+ (drm_version_t *)arg,
+ sizeof(version),
+ -EFAULT);
+
+#define DRM_COPY(name,value) \
+ len = strlen(value); \
+ if (len > name##_len) len = name##_len; \
+ name##_len = strlen(value); \
+ if (len && name) { \
+ copy_to_user_ret(name, value, len, -EFAULT); \
+ }
+
+ version.version_major = GAMMA_MAJOR;
+ version.version_minor = GAMMA_MINOR;
+ version.version_patchlevel = GAMMA_PATCHLEVEL;
+
+ DRM_COPY(version.name, GAMMA_NAME);
+ DRM_COPY(version.date, GAMMA_DATE);
+ DRM_COPY(version.desc, GAMMA_DESC);
+
+ copy_to_user_ret((drm_version_t *)arg,
+ &version,
+ sizeof(version),
+ -EFAULT);
+ return 0;
+}
+#endif
+
+int
+gamma_open(dev_t kdev, int flags, int fmt, struct proc *p)
+{
+ drm_device_t *dev = GAMMA_SOFTC(minor(kdev));
+ int retcode = 0;
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+
+ device_busy(dev->device);
+ if (!(retcode = drm_open_helper(kdev, flags, fmt, p, dev))) {
+ atomic_inc(&dev->total_open);
+ simple_lock(&dev->count_lock);
+ if (!dev->open_count++) {
+ simple_unlock(&dev->count_lock);
+ retcode = gamma_setup(dev);
+ }
+ simple_unlock(&dev->count_lock);
+ }
+ device_unbusy(dev->device);
+
+ return retcode;
+}
+
+int
+gamma_close(dev_t kdev, int flags, int fmt, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ int retcode = 0;
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+ if (!(retcode = drm_close(kdev, flags, fmt, p))) {
+ atomic_inc(&dev->total_close);
+ simple_lock(&dev->count_lock);
+ if (!--dev->open_count) {
+ if (atomic_read(&dev->ioctl_count) || dev->blocked) {
+ DRM_ERROR("Device busy: %d %d\n",
+ atomic_read(&dev->ioctl_count),
+ dev->blocked);
+ simple_unlock(&dev->count_lock);
+ return EBUSY;
+ }
+ simple_unlock(&dev->count_lock);
+ return gamma_takedown(dev);
+ }
+ simple_unlock(&dev->count_lock);
+ }
+ return retcode;
+}
+
+/* drm_ioctl is called whenever a process performs an ioctl on /dev/drm. */
+
+int
+gamma_ioctl(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ int nr = DRM_IOCTL_NR(cmd);
+ drm_device_t *dev = kdev->si_drv1;
+ drm_file_t *priv;
+ int retcode = 0;
+ drm_ioctl_desc_t *ioctl;
+ d_ioctl_t *func;
+
+ priv = drm_find_file_by_proc(dev, p);
+ if (!priv) {
+ DRM_DEBUG("can't find authenticator\n");
+ return EINVAL;
+ }
+
+ atomic_inc(&dev->ioctl_count);
+ atomic_inc(&dev->total_ioctl);
+ ++priv->ioctl_count;
+
+ DRM_DEBUG("pid = %d, cmd = 0x%02lx, nr = 0x%02x, auth = %d\n",
+ p->p_pid, cmd, nr, priv->authenticated);
+
+ switch (cmd) {
+ case FIOSETOWN:
+ return fsetown(*(int *)data, &dev->buf_sigio);
+
+ case FIOGETOWN:
+ *(int *) data = fgetown(dev->buf_sigio);
+ return 0;
+ }
+
+ if (nr >= GAMMA_IOCTL_COUNT) {
+ retcode = EINVAL;
+ } else {
+ ioctl = &gamma_ioctls[nr];
+ func = ioctl->func;
+
+ if (!func) {
+ DRM_DEBUG("no function\n");
+ retcode = EINVAL;
+ } else if ((ioctl->root_only && suser(p))
+ || (ioctl->auth_needed && !priv->authenticated)) {
+ retcode = EACCES;
+ } else {
+ retcode = (func)(kdev, cmd, data, flags, p);
+ }
+ }
+
+ atomic_dec(&dev->ioctl_count);
+ return retcode;
+}
+
+int gamma_unlock(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_lock_t *lockp = (drm_lock_t *) data;
+
+ if (lockp->context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ p->p_pid, lockp->context);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("%d frees lock (%d holds)\n",
+ lockp->context,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+ atomic_inc(&dev->total_unlocks);
+ if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
+ atomic_inc(&dev->total_contends);
+ drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
+ gamma_dma_schedule(dev, 1);
+ if (!dev->context_flag) {
+ if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ DRM_ERROR("\n");
+ }
+ }
+
+#if DRM_DMA_HISTOGRAM
+ {
+ struct timespec ts;
+ getnanotime(&ts);
+ timespecsub(&ts, &dev->lck_start);
+ atomic_inc(&dev->histo.lhld[drm_histogram_slot(&ts)]);
+ }
+#endif
+
+ return 0;
+}
diff --git a/bsd/gamma/gamma_drv.h b/bsd/gamma/gamma_drv.h
new file mode 100644
index 00000000..e4888220
--- /dev/null
+++ b/bsd/gamma/gamma_drv.h
@@ -0,0 +1,50 @@
+/* gamma_drv.h -- Private header for 3dlabs GMX 2000 driver -*- c -*-
+ * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
+ * Revised: Fri Aug 20 09:24:27 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/gamma_drv.h,v 1.4 1999/08/30 13:05:00 faith Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/gamma_drv.h,v 1.1 1999/09/25 14:38:00 dawes Exp $
+ *
+ */
+
+#ifndef _GAMMA_DRV_H_
+#define _GAMMA_DRV_H_
+
+ /* gamma_drv.c */
+extern d_open_t gamma_open;
+extern d_close_t gamma_close;
+extern d_ioctl_t gamma_ioctl;
+extern d_ioctl_t gamma_version;
+extern d_ioctl_t gamma_dma;
+extern d_ioctl_t gamma_lock;
+extern d_ioctl_t gamma_unlock;
+extern d_ioctl_t gamma_control;
+
+ /* gamma_dma.c */
+extern int gamma_dma_schedule(drm_device_t *dev, int locked);
+extern int gamma_irq_install(drm_device_t *dev, int irq);
+extern int gamma_irq_uninstall(drm_device_t *dev);
+
+#endif
diff --git a/bsd/i810_drm.h b/bsd/i810_drm.h
new file mode 100644
index 00000000..4c8e09f6
--- /dev/null
+++ b/bsd/i810_drm.h
@@ -0,0 +1,188 @@
+#ifndef _I810_DRM_H_
+#define _I810_DRM_H_
+
+/* WARNING: These defines must be the same as what the Xserver uses.
+ * if you change them, you must change the defines in the Xserver.
+ */
+
+#ifndef _I810_DEFINES_
+#define _I810_DEFINES_
+
+#define I810_DMA_BUF_ORDER 12
+#define I810_DMA_BUF_SZ (1<<I810_DMA_BUF_ORDER)
+#define I810_DMA_BUF_NR 256
+#define I810_NR_SAREA_CLIPRECTS 8
+
+/* Each region is a minimum of 64k, and there are at most 64 of them.
+ */
+#define I810_NR_TEX_REGIONS 64
+#define I810_LOG_MIN_TEX_REGION_SIZE 16
+#endif
+
+#define I810_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
+#define I810_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
+#define I810_UPLOAD_CTX 0x4
+#define I810_UPLOAD_BUFFERS 0x8
+#define I810_UPLOAD_TEX0 0x10
+#define I810_UPLOAD_TEX1 0x20
+#define I810_UPLOAD_CLIPRECTS 0x40
+
+
+/* Indices into buf.Setup where various bits of state are mirrored per
+ * context and per buffer. These can be fired at the card as a unit,
+ * or in a piecewise fashion as required.
+ */
+
+/* Destbuffer state
+ * - backbuffer linear offset and pitch -- invarient in the current dri
+ * - zbuffer linear offset and pitch -- also invarient
+ * - drawing origin in back and depth buffers.
+ *
+ * Keep the depth/back buffer state here to acommodate private buffers
+ * in the future.
+ */
+#define I810_DESTREG_DI0 0 /* CMD_OP_DESTBUFFER_INFO (2 dwords) */
+#define I810_DESTREG_DI1 1
+#define I810_DESTREG_DV0 2 /* GFX_OP_DESTBUFFER_VARS (2 dwords) */
+#define I810_DESTREG_DV1 3
+#define I810_DESTREG_DR0 4 /* GFX_OP_DRAWRECT_INFO (4 dwords) */
+#define I810_DESTREG_DR1 5
+#define I810_DESTREG_DR2 6
+#define I810_DESTREG_DR3 7
+#define I810_DESTREG_DR4 8
+#define I810_DEST_SETUP_SIZE 10
+
+/* Context state
+ */
+#define I810_CTXREG_CF0 0 /* GFX_OP_COLOR_FACTOR */
+#define I810_CTXREG_CF1 1
+#define I810_CTXREG_ST0 2 /* GFX_OP_STIPPLE */
+#define I810_CTXREG_ST1 3
+#define I810_CTXREG_VF 4 /* GFX_OP_VERTEX_FMT */
+#define I810_CTXREG_MT 5 /* GFX_OP_MAP_TEXELS */
+#define I810_CTXREG_MC0 6 /* GFX_OP_MAP_COLOR_STAGES - stage 0 */
+#define I810_CTXREG_MC1 7 /* GFX_OP_MAP_COLOR_STAGES - stage 1 */
+#define I810_CTXREG_MC2 8 /* GFX_OP_MAP_COLOR_STAGES - stage 2 */
+#define I810_CTXREG_MA0 9 /* GFX_OP_MAP_ALPHA_STAGES - stage 0 */
+#define I810_CTXREG_MA1 10 /* GFX_OP_MAP_ALPHA_STAGES - stage 1 */
+#define I810_CTXREG_MA2 11 /* GFX_OP_MAP_ALPHA_STAGES - stage 2 */
+#define I810_CTXREG_SDM 12 /* GFX_OP_SRC_DEST_MONO */
+#define I810_CTXREG_FOG 13 /* GFX_OP_FOG_COLOR */
+#define I810_CTXREG_B1 14 /* GFX_OP_BOOL_1 */
+#define I810_CTXREG_B2 15 /* GFX_OP_BOOL_2 */
+#define I810_CTXREG_LCS 16 /* GFX_OP_LINEWIDTH_CULL_SHADE_MODE */
+#define I810_CTXREG_PV 17 /* GFX_OP_PV_RULE -- Invarient! */
+#define I810_CTXREG_ZA 18 /* GFX_OP_ZBIAS_ALPHAFUNC */
+#define I810_CTXREG_AA 19 /* GFX_OP_ANTIALIAS */
+#define I810_CTX_SETUP_SIZE 20
+
+/* Texture state (per tex unit)
+ */
+#define I810_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (4 dwords) */
+#define I810_TEXREG_MI1 1
+#define I810_TEXREG_MI2 2
+#define I810_TEXREG_MI3 3
+#define I810_TEXREG_MF 4 /* GFX_OP_MAP_FILTER */
+#define I810_TEXREG_MLC 5 /* GFX_OP_MAP_LOD_CTL */
+#define I810_TEXREG_MLL 6 /* GFX_OP_MAP_LOD_LIMITS */
+#define I810_TEXREG_MCS 7 /* GFX_OP_MAP_COORD_SETS ??? */
+#define I810_TEX_SETUP_SIZE 8
+
+#define I810_FRONT 0x1
+#define I810_BACK 0x2
+#define I810_DEPTH 0x4
+
+
+typedef struct _drm_i810_init {
+ enum {
+ I810_INIT_DMA = 0x01,
+ I810_CLEANUP_DMA = 0x02
+ } func;
+ int ring_map_idx;
+ int buffer_map_idx;
+ int sarea_priv_offset;
+ unsigned int ring_start;
+ unsigned int ring_end;
+ unsigned int ring_size;
+ unsigned int front_offset;
+ unsigned int back_offset;
+ unsigned int depth_offset;
+ unsigned int w;
+ unsigned int h;
+ unsigned int pitch;
+ unsigned int pitch_bits;
+} drm_i810_init_t;
+
+/* Warning: If you change the SAREA structure you must change the Xserver
+ * structure as well */
+
+typedef struct _drm_i810_tex_region {
+ unsigned char next, prev; /* indices to form a circular LRU */
+ unsigned char in_use; /* owned by a client, or free? */
+ int age; /* tracked by clients to update local LRU's */
+} drm_i810_tex_region_t;
+
+typedef struct _drm_i810_sarea {
+ unsigned int ContextState[I810_CTX_SETUP_SIZE];
+ unsigned int BufferState[I810_DEST_SETUP_SIZE];
+ unsigned int TexState[2][I810_TEX_SETUP_SIZE];
+ unsigned int dirty;
+
+ unsigned int nbox;
+ drm_clip_rect_t boxes[I810_NR_SAREA_CLIPRECTS];
+
+ /* Maintain an LRU of contiguous regions of texture space. If
+ * you think you own a region of texture memory, and it has an
+ * age different to the one you set, then you are mistaken and
+ * it has been stolen by another client. If global texAge
+ * hasn't changed, there is no need to walk the list.
+ *
+ * These regions can be used as a proxy for the fine-grained
+ * texture information of other clients - by maintaining them
+ * in the same lru which is used to age their own textures,
+ * clients have an approximate lru for the whole of global
+ * texture space, and can make informed decisions as to which
+ * areas to kick out. There is no need to choose whether to
+ * kick out your own texture or someone else's - simply eject
+ * them all in LRU order.
+ */
+
+ drm_i810_tex_region_t texList[I810_NR_TEX_REGIONS+1];
+ /* Last elt is sentinal */
+ int texAge; /* last time texture was uploaded */
+ int last_enqueue; /* last time a buffer was enqueued */
+ int last_dispatch; /* age of the most recently dispatched buffer */
+ int last_quiescent; /* */
+ int ctxOwner; /* last context to upload state */
+
+ int vertex_prim;
+
+} drm_i810_sarea_t;
+
+typedef struct _drm_i810_clear {
+ int clear_color;
+ int clear_depth;
+ int flags;
+} drm_i810_clear_t;
+
+
+
+/* These may be placeholders if we have more cliprects than
+ * I810_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
+ * false, indicating that the buffer will be dispatched again with a
+ * new set of cliprects.
+ */
+typedef struct _drm_i810_vertex {
+ int idx; /* buffer index */
+ int used; /* nr bytes in use */
+ int discard; /* client is finished with the buffer? */
+} drm_i810_vertex_t;
+
+typedef struct drm_i810_dma {
+ void *virtual;
+ int request_idx;
+ int request_size;
+ int granted;
+} drm_i810_dma_t;
+
+#endif /* _I810_DRM_H_ */
diff --git a/bsd/mga_drm.h b/bsd/mga_drm.h
new file mode 100644
index 00000000..8bfa2b97
--- /dev/null
+++ b/bsd/mga_drm.h
@@ -0,0 +1,269 @@
+/* mga_drm.h -- Public header for the Matrox g200/g400 driver
+ * Created: Tue Jan 25 01:50:01 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Jeff Hartmann <jhartmann@precisioninsight.com>
+ * Keith Whitwell <keithw@precisioninsight.com>
+ *
+ * $XFree86$
+ */
+
+#ifndef _MGA_DRM_H_
+#define _MGA_DRM_H_
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the Xserver file (xf86drmMga.h)
+ */
+#ifndef _MGA_DEFINES_
+#define _MGA_DEFINES_
+
+#define MGA_F 0x1 /* fog */
+#define MGA_A 0x2 /* alpha */
+#define MGA_S 0x4 /* specular */
+#define MGA_T2 0x8 /* multitexture */
+
+#define MGA_WARP_TGZ 0
+#define MGA_WARP_TGZF (MGA_F)
+#define MGA_WARP_TGZA (MGA_A)
+#define MGA_WARP_TGZAF (MGA_F|MGA_A)
+#define MGA_WARP_TGZS (MGA_S)
+#define MGA_WARP_TGZSF (MGA_S|MGA_F)
+#define MGA_WARP_TGZSA (MGA_S|MGA_A)
+#define MGA_WARP_TGZSAF (MGA_S|MGA_F|MGA_A)
+#define MGA_WARP_T2GZ (MGA_T2)
+#define MGA_WARP_T2GZF (MGA_T2|MGA_F)
+#define MGA_WARP_T2GZA (MGA_T2|MGA_A)
+#define MGA_WARP_T2GZAF (MGA_T2|MGA_A|MGA_F)
+#define MGA_WARP_T2GZS (MGA_T2|MGA_S)
+#define MGA_WARP_T2GZSF (MGA_T2|MGA_S|MGA_F)
+#define MGA_WARP_T2GZSA (MGA_T2|MGA_S|MGA_A)
+#define MGA_WARP_T2GZSAF (MGA_T2|MGA_S|MGA_F|MGA_A)
+
+#define MGA_MAX_G400_PIPES 16
+#define MGA_MAX_G200_PIPES 8 /* no multitex */
+#define MGA_MAX_WARP_PIPES MGA_MAX_G400_PIPES
+
+#define MGA_CARD_TYPE_G200 1
+#define MGA_CARD_TYPE_G400 2
+
+#define MGA_FRONT 0x1
+#define MGA_BACK 0x2
+#define MGA_DEPTH 0x4
+
+/* 3d state excluding texture units:
+ */
+#define MGA_CTXREG_DSTORG 0 /* validated */
+#define MGA_CTXREG_MACCESS 1
+#define MGA_CTXREG_PLNWT 2
+#define MGA_CTXREG_DWGCTL 3
+#define MGA_CTXREG_ALPHACTRL 4
+#define MGA_CTXREG_FOGCOLOR 5
+#define MGA_CTXREG_WFLAG 6
+#define MGA_CTXREG_TDUAL0 7
+#define MGA_CTXREG_TDUAL1 8
+#define MGA_CTXREG_FCOL 9
+#define MGA_CTX_SETUP_SIZE 10
+
+/* 2d state
+ */
+#define MGA_2DREG_PITCH 0
+#define MGA_2D_SETUP_SIZE 1
+
+/* Each texture unit has a state:
+ */
+#define MGA_TEXREG_CTL 0
+#define MGA_TEXREG_CTL2 1
+#define MGA_TEXREG_FILTER 2
+#define MGA_TEXREG_BORDERCOL 3
+#define MGA_TEXREG_ORG 4 /* validated */
+#define MGA_TEXREG_ORG1 5
+#define MGA_TEXREG_ORG2 6
+#define MGA_TEXREG_ORG3 7
+#define MGA_TEXREG_ORG4 8
+#define MGA_TEXREG_WIDTH 9
+#define MGA_TEXREG_HEIGHT 10
+#define MGA_TEX_SETUP_SIZE 11
+
+/* What needs to be changed for the current vertex dma buffer?
+ */
+#define MGA_UPLOAD_CTX 0x1
+#define MGA_UPLOAD_TEX0 0x2
+#define MGA_UPLOAD_TEX1 0x4
+#define MGA_UPLOAD_PIPE 0x8
+#define MGA_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */
+#define MGA_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */
+#define MGA_UPLOAD_2D 0x40
+#define MGA_WAIT_AGE 0x80 /* handled client-side */
+#define MGA_UPLOAD_CLIPRECTS 0x100 /* handled client-side */
+#define MGA_DMA_FLUSH 0x200 /* set when someone gets the lock
+ quiescent */
+
+/* 32 buffers of 64k each, total 2 meg.
+ */
+#define MGA_DMA_BUF_ORDER 16
+#define MGA_DMA_BUF_SZ (1<<MGA_DMA_BUF_ORDER)
+#define MGA_DMA_BUF_NR 31
+
+/* Keep these small for testing.
+ */
+#define MGA_NR_SAREA_CLIPRECTS 8
+
+/* 2 heaps (1 for card, 1 for agp), each divided into upto 128
+ * regions, subject to a minimum region size of (1<<16) == 64k.
+ *
+ * Clients may subdivide regions internally, but when sharing between
+ * clients, the region size is the minimum granularity.
+ */
+
+#define MGA_CARD_HEAP 0
+#define MGA_AGP_HEAP 1
+#define MGA_NR_TEX_HEAPS 2
+#define MGA_NR_TEX_REGIONS 16
+#define MGA_LOG_MIN_TEX_REGION_SIZE 16
+#endif
+
+typedef struct _drm_mga_warp_index {
+ int installed;
+ unsigned long phys_addr;
+ int size;
+} drm_mga_warp_index_t;
+
+typedef struct drm_mga_init {
+ enum {
+ MGA_INIT_DMA = 0x01,
+ MGA_CLEANUP_DMA = 0x02
+ } func;
+ int reserved_map_agpstart;
+ int reserved_map_idx;
+ int buffer_map_idx;
+ int sarea_priv_offset;
+ int primary_size;
+ int warp_ucode_size;
+ unsigned int frontOffset;
+ unsigned int backOffset;
+ unsigned int depthOffset;
+ unsigned int textureOffset;
+ unsigned int textureSize;
+ unsigned int agpTextureOffset;
+ unsigned int agpTextureSize;
+ unsigned int cpp;
+ unsigned int stride;
+ int sgram;
+ int chipset;
+ drm_mga_warp_index_t WarpIndex[MGA_MAX_WARP_PIPES];
+ unsigned int mAccess;
+} drm_mga_init_t;
+
+/* Warning: if you change the sarea structure, you must change the Xserver
+ * structures as well */
+
+typedef struct _drm_mga_tex_region {
+ unsigned char next, prev;
+ unsigned char in_use;
+ unsigned int age;
+} drm_mga_tex_region_t;
+
+typedef struct _drm_mga_sarea {
+ /* The channel for communication of state information to the kernel
+ * on firing a vertex dma buffer.
+ */
+ unsigned int ContextState[MGA_CTX_SETUP_SIZE];
+ unsigned int ServerState[MGA_2D_SETUP_SIZE];
+ unsigned int TexState[2][MGA_TEX_SETUP_SIZE];
+ unsigned int WarpPipe;
+ unsigned int dirty;
+
+ unsigned int nbox;
+ drm_clip_rect_t boxes[MGA_NR_SAREA_CLIPRECTS];
+
+
+ /* Information about the most recently used 3d drawable. The
+ * client fills in the req_* fields, the server fills in the
+ * exported_ fields and puts the cliprects into boxes, above.
+ *
+ * The client clears the exported_drawable field before
+ * clobbering the boxes data.
+ */
+ unsigned int req_drawable; /* the X drawable id */
+ unsigned int req_draw_buffer; /* MGA_FRONT or MGA_BACK */
+
+ unsigned int exported_drawable;
+ unsigned int exported_index;
+ unsigned int exported_stamp;
+ unsigned int exported_buffers;
+ unsigned int exported_nfront;
+ unsigned int exported_nback;
+ int exported_back_x, exported_front_x, exported_w;
+ int exported_back_y, exported_front_y, exported_h;
+ drm_clip_rect_t exported_boxes[MGA_NR_SAREA_CLIPRECTS];
+
+ /* Counters for aging textures and for client-side throttling.
+ */
+ unsigned int last_enqueue; /* last time a buffer was enqueued */
+ unsigned int last_dispatch; /* age of the most recently dispatched buffer */
+ unsigned int last_quiescent; /* */
+
+
+ /* LRU lists for texture memory in agp space and on the card
+ */
+ drm_mga_tex_region_t texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS+1];
+ unsigned int texAge[MGA_NR_TEX_HEAPS];
+
+ /* Mechanism to validate card state.
+ */
+ int ctxOwner;
+} drm_mga_sarea_t;
+
+/* Device specific ioctls:
+ */
+typedef struct _drm_mga_clear {
+ unsigned int clear_color;
+ unsigned int clear_depth;
+ unsigned int flags;
+} drm_mga_clear_t;
+
+typedef struct _drm_mga_swap {
+ int dummy;
+} drm_mga_swap_t;
+
+typedef struct _drm_mga_iload {
+ int idx;
+ int length;
+ unsigned int destOrg;
+} drm_mga_iload_t;
+
+typedef struct _drm_mga_vertex {
+ int idx; /* buffer to queue */
+ int used; /* bytes in use */
+ int discard; /* client finished with buffer? */
+} drm_mga_vertex_t;
+
+typedef struct _drm_mga_indices {
+ int idx; /* buffer to queue */
+ unsigned int start;
+ unsigned int end;
+ int discard; /* client finished with buffer? */
+} drm_mga_indices_t;
+
+#endif
diff --git a/bsd/r128_drm.h b/bsd/r128_drm.h
new file mode 100644
index 00000000..0379a5fa
--- /dev/null
+++ b/bsd/r128_drm.h
@@ -0,0 +1,111 @@
+/* r128_drm.h -- Public header for the r128 driver
+ * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
+ *
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Kevin E. Martin <kevin@precisioninsight.com>
+ *
+ * $XFree86$
+ */
+
+#ifndef _R128_DRM_H_
+#define _R128_DRM_H_
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the Xserver file (xf86drmR128.h)
+ */
+typedef struct drm_r128_init {
+ enum {
+ R128_INIT_CCE = 0x01,
+ R128_CLEANUP_CCE = 0x02
+ } func;
+ int sarea_priv_offset;
+ int is_pci;
+ int cce_mode;
+ int cce_fifo_size;
+ int cce_secure;
+ int ring_size;
+ int usec_timeout;
+
+ int fb_offset;
+ int agp_ring_offset;
+ int agp_read_ptr_offset;
+ int agp_vertbufs_offset;
+ int agp_indbufs_offset;
+ int agp_textures_offset;
+ int mmio_offset;
+} drm_r128_init_t;
+
+typedef struct drm_r128_packet {
+ unsigned long *buffer;
+ int count;
+ int flags;
+} drm_r128_packet_t;
+
+typedef enum drm_r128_prim {
+ _DRM_R128_PRIM_NONE = 0x0001,
+ _DRM_R128_PRIM_POINT = 0x0002,
+ _DRM_R128_PRIM_LINE = 0x0004,
+ _DRM_R128_PRIM_POLY_LINE = 0x0008,
+ _DRM_R128_PRIM_TRI_LIST = 0x0010,
+ _DRM_R128_PRIM_TRI_FAN = 0x0020,
+ _DRM_R128_PRIM_TRI_STRIP = 0x0040,
+ _DRM_R128_PRIM_TRI_TYPE2 = 0x0080
+} drm_r128_prim_t;
+
+typedef struct drm_r128_vertex {
+ /* Indices here refer to the offset into
+ buflist in drm_buf_get_t. */
+ int send_count; /* Number of buffers to send */
+ int *send_indices; /* List of handles to buffers */
+ int *send_sizes; /* Lengths of data to send */
+ drm_r128_prim_t prim; /* Primitive type */
+ int request_count; /* Number of buffers requested */
+ int *request_indices; /* Buffer information */
+ int *request_sizes;
+ int granted_count; /* Number of buffers granted */
+} drm_r128_vertex_t;
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the Xserver file (r128_sarea.h)
+ */
+#define R128_LOCAL_TEX_HEAP 0
+#define R128_AGP_TEX_HEAP 1
+#define R128_NR_TEX_HEAPS 2
+#define R128_NR_TEX_REGIONS 64
+#define R128_LOG_TEX_GRANULARITY 16
+
+typedef struct drm_tex_region {
+ unsigned char next, prev;
+ unsigned char in_use;
+ int age;
+} drm_tex_region_t;
+
+typedef struct drm_r128_sarea {
+ drm_tex_region_t tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS+1];
+ int tex_age[R128_NR_TEX_HEAPS];
+ int ctx_owner;
+ int ring_write;
+} drm_r128_sarea_t;
+
+#endif
diff --git a/bsd/tdfx/Makefile b/bsd/tdfx/Makefile
new file mode 100644
index 00000000..e0ff8ffa
--- /dev/null
+++ b/bsd/tdfx/Makefile
@@ -0,0 +1,15 @@
+# $FreeBSD$
+
+KMOD = tdfx
+SRCS = tdfx_drv.c tdfx_context.c
+SRCS += device_if.h bus_if.h pci_if.h
+CFLAGS += ${DEBUG_FLAGS} -I..
+KERN = /usr/src/sys
+
+@:
+ ln -sf /sys @
+
+machine:
+ ln -sf /sys/i386/include machine
+
+.include "/usr/src/sys/conf/kmod.mk"
diff --git a/bsd/tdfx/tdfx_context.c b/bsd/tdfx/tdfx_context.c
new file mode 100644
index 00000000..0aecf762
--- /dev/null
+++ b/bsd/tdfx/tdfx_context.c
@@ -0,0 +1,201 @@
+/* tdfx_context.c -- IOCTLs for tdfx contexts -*- c -*-
+ * Created: Thu Oct 7 10:50:22 1999 by faith@precisioninsight.com
+ * Revised: Sat Oct 9 23:39:56 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI$
+ * $XFree86$
+ *
+ */
+
+#include "drmP.h"
+#include "tdfx_drv.h"
+
+extern drm_ctx_t tdfx_res_ctx;
+
+static int tdfx_alloc_queue(drm_device_t *dev)
+{
+ static int context = 0;
+
+ return ++context; /* Should this reuse contexts in the future? */
+}
+
+int tdfx_context_switch(drm_device_t *dev, int old, int new)
+{
+ char buf[64];
+
+ atomic_inc(&dev->total_ctx);
+
+ if (test_and_set_bit(0, &dev->context_flag)) {
+ DRM_ERROR("Reentering -- FIXME\n");
+ return -EBUSY;
+ }
+
+#if DRM_DMA_HISTOGRAM
+ getnanotime(&dev->ctx_start);
+#endif
+
+ DRM_DEBUG("Context switch from %d to %d\n", old, new);
+
+ if (new == dev->last_context) {
+ clear_bit(0, &dev->context_flag);
+ return 0;
+ }
+
+ if (drm_flags & DRM_FLAG_NOCTX) {
+ tdfx_context_switch_complete(dev, new);
+ } else {
+ sprintf(buf, "C %d %d\n", old, new);
+ drm_write_string(dev, buf);
+ }
+
+ return 0;
+}
+
+int tdfx_context_switch_complete(drm_device_t *dev, int new)
+{
+ dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
+ dev->last_switch = ticks;
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("Lock isn't held after context switch\n");
+ }
+
+ /* If a context switch is ever initiated
+ when the kernel holds the lock, release
+ that lock here. */
+#if DRM_DMA_HISTOGRAM
+ {
+ struct timespec ts;
+ getnanotime(&ts);
+ timespecsub(&ts, &dev->lck_start);
+ atomic_inc(&dev->histo.ctx[drm_histogram_slot(&ts)]);
+ }
+#endif
+ clear_bit(0, &dev->context_flag);
+ wakeup(&dev->context_wait);
+
+ return 0;
+}
+
+
+int
+tdfx_resctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_ctx_res_t res;
+ drm_ctx_t ctx;
+ int i, error;
+
+ DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
+ res = *(drm_ctx_res_t *) data;
+ if (res.count >= DRM_RESERVED_CONTEXTS) {
+ memset(&ctx, 0, sizeof(ctx));
+ for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
+ ctx.handle = i;
+ error = copyout(&i, &res.contexts[i], sizeof(i));
+ if (error) return error;
+ }
+ }
+ res.count = DRM_RESERVED_CONTEXTS;
+ *(drm_ctx_res_t *) data = res;
+ return 0;
+}
+
+
+int
+tdfx_addctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_ctx_t ctx;
+
+ ctx = *(drm_ctx_t *) data;
+ if ((ctx.handle = tdfx_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
+ /* Skip kernel's context and get a new one. */
+ ctx.handle = tdfx_alloc_queue(dev);
+ }
+ DRM_DEBUG("%d\n", ctx.handle);
+ *(drm_ctx_t *) data = ctx;
+ return 0;
+}
+
+int
+tdfx_modctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_ctx_t ctx;
+
+ ctx = *(drm_ctx_t *) data;
+ if (ctx.flags==_DRM_CONTEXT_PRESERVED)
+ tdfx_res_ctx.handle=ctx.handle;
+ return 0;
+}
+
+int
+tdfx_getctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_ctx_t ctx;
+
+ ctx = *(drm_ctx_t *) data;
+ /* This is 0, because we don't hanlde any context flags */
+ ctx.flags = 0;
+ *(drm_ctx_t *) data = ctx;
+ return 0;
+}
+
+int
+tdfx_switchctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_ctx_t ctx;
+
+ ctx = *(drm_ctx_t *) data;
+ DRM_DEBUG("%d\n", ctx.handle);
+ return tdfx_context_switch(dev, dev->last_context, ctx.handle);
+}
+
+int
+tdfx_newctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_ctx_t ctx;
+
+ ctx = *(drm_ctx_t *) data;
+ DRM_DEBUG("%d\n", ctx.handle);
+ tdfx_context_switch_complete(dev, ctx.handle);
+
+ return 0;
+}
+
+int
+tdfx_rmctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_ctx_t ctx;
+
+ ctx = *(drm_ctx_t *) data;
+ DRM_DEBUG("%d\n", ctx.handle);
+ /* This is currently a noop because we
+ don't reuse context values. Perhaps we
+ should? */
+
+ return 0;
+}
diff --git a/bsd/tdfx/tdfx_drv.c b/bsd/tdfx/tdfx_drv.c
new file mode 100644
index 00000000..573cfcf1
--- /dev/null
+++ b/bsd/tdfx/tdfx_drv.c
@@ -0,0 +1,694 @@
+/* tdfx.c -- tdfx driver -*- c -*-
+ * Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com
+ * Revised: Tue Oct 12 08:51:35 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI$
+ * $XFree86$
+ *
+ */
+
+#include "drmP.h"
+#include "tdfx_drv.h"
+
+#include <pci/pcivar.h>
+
+MODULE_DEPEND(tdfx, drm, 1, 1, 1);
+
+#define TDFX_NAME "tdfx"
+#define TDFX_DESC "tdfx"
+#define TDFX_DATE "19991009"
+#define TDFX_MAJOR 0
+#define TDFX_MINOR 0
+#define TDFX_PATCHLEVEL 1
+
+static int tdfx_init(device_t nbdev);
+static void tdfx_cleanup(device_t nbdev);
+
+drm_ctx_t tdfx_res_ctx;
+
+static int tdfx_probe(device_t dev)
+{
+ const char *s = 0;
+
+ switch (pci_get_devid(dev)) {
+ case 0x0003121a:
+ s = "3Dfx Voodoo Banshee graphics accelerator";
+ break;
+
+ case 0x0005121a:
+ s = "3Dfx Voodoo 3 graphics accelerator";
+ break;
+ }
+
+ if (s) {
+ device_set_desc(dev, s);
+ return 0;
+ }
+
+ return ENXIO;
+}
+
+static int tdfx_attach(device_t dev)
+{
+ tdfx_init(dev);
+ return 0;
+}
+
+static int tdfx_detach(device_t dev)
+{
+ tdfx_cleanup(dev);
+ return 0;
+}
+
+static device_method_t tdfx_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, tdfx_probe),
+ DEVMETHOD(device_attach, tdfx_attach),
+ DEVMETHOD(device_detach, tdfx_detach),
+
+ { 0, 0 }
+};
+
+static driver_t tdfx_driver = {
+ "drm",
+ tdfx_methods,
+ sizeof(drm_device_t),
+};
+
+static devclass_t tdfx_devclass;
+#define TDFX_SOFTC(unit) \
+ ((drm_device_t *) devclass_get_softc(tdfx_devclass, unit))
+
+DRIVER_MODULE(if_tdfx, pci, tdfx_driver, tdfx_devclass, 0, 0);
+
+#define CDEV_MAJOR 145
+ /* tdfx_drv.c */
+static d_open_t tdfx_open;
+static d_close_t tdfx_close;
+static d_ioctl_t tdfx_version;
+static d_ioctl_t tdfx_ioctl;
+static d_ioctl_t tdfx_lock;
+static d_ioctl_t tdfx_unlock;
+
+static struct cdevsw tdfx_cdevsw = {
+ /* open */ tdfx_open,
+ /* close */ tdfx_close,
+ /* read */ drm_read,
+ /* write */ drm_write,
+ /* ioctl */ tdfx_ioctl,
+ /* poll */ drm_poll,
+ /* mmap */ drm_mmap,
+ /* strategy */ nostrategy,
+ /* name */ "tdfx",
+ /* maj */ CDEV_MAJOR,
+ /* dump */ nodump,
+ /* psize */ nopsize,
+ /* flags */ D_TTY | D_TRACKCLOSE,
+ /* bmaj */ -1
+};
+
+static drm_ioctl_desc_t tdfx_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { tdfx_version, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { tdfx_addctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { tdfx_rmctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { tdfx_modctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { tdfx_getctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { tdfx_switchctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { tdfx_newctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { tdfx_resctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { tdfx_lock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { tdfx_unlock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
+};
+#define TDFX_IOCTL_COUNT DRM_ARRAY_SIZE(tdfx_ioctls)
+
+static int
+tdfx_setup(drm_device_t *dev)
+{
+ int i;
+
+ device_busy(dev->device);
+
+ atomic_set(&dev->ioctl_count, 0);
+ atomic_set(&dev->vma_count, 0);
+ dev->buf_use = 0;
+ atomic_set(&dev->buf_alloc, 0);
+
+ atomic_set(&dev->total_open, 0);
+ atomic_set(&dev->total_close, 0);
+ atomic_set(&dev->total_ioctl, 0);
+ atomic_set(&dev->total_irq, 0);
+ atomic_set(&dev->total_ctx, 0);
+ atomic_set(&dev->total_locks, 0);
+ atomic_set(&dev->total_unlocks, 0);
+ atomic_set(&dev->total_contends, 0);
+ atomic_set(&dev->total_sleeps, 0);
+
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ dev->magiclist[i].head = NULL;
+ dev->magiclist[i].tail = NULL;
+ }
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ dev->vmalist = NULL;
+ dev->lock.hw_lock = NULL;
+ dev->lock.lock_queue = 0;
+ dev->queue_count = 0;
+ dev->queue_reserved = 0;
+ dev->queue_slots = 0;
+ dev->queuelist = NULL;
+ dev->irq = 0;
+ dev->context_flag = 0;
+ dev->interrupt_flag = 0;
+ dev->dma = 0;
+ dev->dma_flag = 0;
+ dev->last_context = 0;
+ dev->last_switch = 0;
+ dev->last_checked = 0;
+ callout_init(&dev->timer);
+ dev->context_wait = 0;
+
+ timespecclear(&dev->ctx_start);
+ timespecclear(&dev->lck_start);
+
+ dev->buf_rp = dev->buf;
+ dev->buf_wp = dev->buf;
+ dev->buf_end = dev->buf + DRM_BSZ;
+ bzero(&dev->buf_sel, sizeof dev->buf_sel);
+ dev->buf_sigio = NULL;
+ dev->buf_readers = 0;
+ dev->buf_writers = 0;
+ dev->buf_selecting = 0;
+
+ tdfx_res_ctx.handle=-1;
+
+ DRM_DEBUG("\n");
+
+ /* The kernel's context could be created here, but is now created
+ in drm_dma_enqueue. This is more resource-efficient for
+ hardware that does not do DMA, but may mean that
+ drm_select_queue fails between the time the interrupt is
+ initialized and the time the queues are initialized. */
+
+ return 0;
+}
+
+
+static int
+tdfx_takedown(drm_device_t *dev)
+{
+ int i;
+ drm_magic_entry_t *pt, *next;
+ drm_map_t *map;
+ drm_vma_entry_t *vma, *vma_next;
+
+ DRM_DEBUG("\n");
+
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
+ callout_stop(&dev->timer);
+
+ if (dev->devname) {
+ drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
+ dev->devname = NULL;
+ }
+
+ if (dev->unique) {
+ drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
+ dev->unique = NULL;
+ dev->unique_len = 0;
+ }
+ /* Clear pid list */
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ for (pt = dev->magiclist[i].head; pt; pt = next) {
+ next = pt->next;
+ drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
+ }
+ dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
+ }
+
+ /* Clear vma list (only built for debugging) */
+ if (dev->vmalist) {
+ for (vma = dev->vmalist; vma; vma = vma_next) {
+ vma_next = vma->next;
+ drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
+ }
+ dev->vmalist = NULL;
+ }
+
+ /* Clear map area and mtrr information */
+ if (dev->maplist) {
+ for (i = 0; i < dev->map_count; i++) {
+ map = dev->maplist[i];
+ switch (map->type) {
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+#ifdef CONFIG_MTRR
+ if (map->mtrr >= 0) {
+ int retcode;
+ retcode = mtrr_del(map->mtrr,
+ map->offset,
+ map->size);
+ DRM_DEBUG("mtrr_del = %d\n", retcode);
+ }
+#endif
+ drm_ioremapfree(map->handle, map->size);
+ break;
+ case _DRM_SHM:
+ drm_free_pages((unsigned long)map->handle,
+ drm_order(map->size)
+ - PAGE_SHIFT,
+ DRM_MEM_SAREA);
+ break;
+ case _DRM_AGP:
+ break; /* XXX */
+ }
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ }
+ drm_free(dev->maplist,
+ dev->map_count * sizeof(*dev->maplist),
+ DRM_MEM_MAPS);
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ }
+
+ if (dev->lock.hw_lock) {
+ dev->lock.hw_lock = NULL; /* SHM removed */
+ dev->lock.pid = 0;
+ wakeup(&dev->lock.lock_queue);
+ }
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+
+ device_unbusy(dev->device);
+
+ return 0;
+}
+
+/* tdfx_init is called via tdfx_attach at module load time, */
+
+static int
+tdfx_init(device_t nbdev)
+{
+ drm_device_t *dev = device_get_softc(nbdev);
+
+ DRM_DEBUG("\n");
+
+ memset((void *)dev, 0, sizeof(*dev));
+ simple_lock_init(&dev->count_lock);
+ lockinit(&dev->dev_lock, PZERO, "drmlk", 0, 0);
+
+#if 0
+ drm_parse_options(tdfx);
+#endif
+
+ dev->device = nbdev;
+ dev->devnode = make_dev(&tdfx_cdevsw,
+ device_get_unit(nbdev),
+ DRM_DEV_UID,
+ DRM_DEV_GID,
+ DRM_DEV_MODE,
+ TDFX_NAME);
+ dev->name = TDFX_NAME;
+
+ drm_mem_init();
+ drm_sysctl_init(dev);
+ TAILQ_INIT(&dev->files);
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ TDFX_NAME,
+ TDFX_MAJOR,
+ TDFX_MINOR,
+ TDFX_PATCHLEVEL,
+ TDFX_DATE,
+ device_get_unit(nbdev));
+
+ return 0;
+}
+
+/* tdfx_cleanup is called via tdfx_detach at module unload time. */
+
+static void
+tdfx_cleanup(device_t nbdev)
+{
+ drm_device_t *dev = device_get_softc(nbdev);
+
+ DRM_DEBUG("\n");
+
+ drm_sysctl_cleanup(dev);
+ destroy_dev(dev->devnode);
+
+ DRM_INFO("Module unloaded\n");
+
+ device_busy(dev->device);
+ tdfx_takedown(dev);
+}
+
+static int
+tdfx_version(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_version_t version;
+ int len;
+
+ version = *(drm_version_t *) data;
+
+#define DRM_COPY(name,value) \
+ len = strlen(value); \
+ if (len > name##_len) len = name##_len; \
+ name##_len = strlen(value); \
+ if (len && name) { \
+ int error = copyout(value, name, len); \
+ if (error) return error; \
+ }
+
+ version.version_major = TDFX_MAJOR;
+ version.version_minor = TDFX_MINOR;
+ version.version_patchlevel = TDFX_PATCHLEVEL;
+
+ DRM_COPY(version.name, TDFX_NAME);
+ DRM_COPY(version.date, TDFX_DATE);
+ DRM_COPY(version.desc, TDFX_DESC);
+
+ *(drm_version_t *) data = version;
+ return 0;
+}
+
+static int
+tdfx_open(dev_t kdev, int flags, int fmt, struct proc *p)
+{
+ drm_device_t *dev = TDFX_SOFTC(minor(kdev));
+ int retcode = 0;
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+
+ device_busy(dev->device);
+ if (!(retcode = drm_open_helper(kdev, flags, fmt, p, dev))) {
+ atomic_inc(&dev->total_open);
+ simple_lock(&dev->count_lock);
+ if (!dev->open_count++) {
+ simple_unlock(&dev->count_lock);
+ retcode = tdfx_setup(dev);
+ }
+ simple_unlock(&dev->count_lock);
+ }
+ device_unbusy(dev->device);
+
+ return retcode;
+}
+
+static int
+tdfx_close(dev_t kdev, int flags, int fmt, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ int retcode = 0;
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+ if (!(retcode = drm_close(kdev, flags, fmt, p))) {
+ atomic_inc(&dev->total_close);
+ simple_lock(&dev->count_lock);
+ if (!--dev->open_count) {
+ if (atomic_read(&dev->ioctl_count) || dev->blocked) {
+ DRM_ERROR("Device busy: %d %d\n",
+ atomic_read(&dev->ioctl_count),
+ dev->blocked);
+ simple_unlock(&dev->count_lock);
+ return EBUSY;
+ }
+ simple_unlock(&dev->count_lock);
+ return tdfx_takedown(dev);
+ }
+ simple_unlock(&dev->count_lock);
+ }
+
+ return retcode;
+}
+
+/* tdfx_ioctl is called whenever a process performs an ioctl on /dev/drm. */
+
+static int
+tdfx_ioctl(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ int nr = DRM_IOCTL_NR(cmd);
+ drm_device_t *dev = kdev->si_drv1;
+ drm_file_t *priv;
+ int retcode = 0;
+ drm_ioctl_desc_t *ioctl;
+ d_ioctl_t *func;
+
+ DRM_DEBUG("dev=%p\n", dev);
+ priv = drm_find_file_by_proc(dev, p);
+ if (!priv) {
+ DRM_DEBUG("can't find authenticator\n");
+ return EINVAL;
+ }
+
+ atomic_inc(&dev->ioctl_count);
+ atomic_inc(&dev->total_ioctl);
+ ++priv->ioctl_count;
+
+ DRM_DEBUG("pid = %d, cmd = 0x%02lx, nr = 0x%02x, auth = %d\n",
+ p->p_pid, cmd, nr, priv->authenticated);
+
+ switch (cmd) {
+ case FIONBIO:
+ atomic_dec(&dev->ioctl_count);
+ return 0;
+
+ case FIOASYNC:
+ atomic_dec(&dev->ioctl_count);
+ dev->flags |= FASYNC;
+ return 0;
+
+ case FIOSETOWN:
+ atomic_dec(&dev->ioctl_count);
+ return fsetown(*(int *)data, &dev->buf_sigio);
+
+ case FIOGETOWN:
+ atomic_dec(&dev->ioctl_count);
+ *(int *) data = fgetown(dev->buf_sigio);
+ return 0;
+ }
+
+ if (nr >= TDFX_IOCTL_COUNT) {
+ retcode = EINVAL;
+ } else {
+ ioctl = &tdfx_ioctls[nr];
+ func = ioctl->func;
+
+ if (!func) {
+ DRM_DEBUG("no function\n");
+ retcode = EINVAL;
+ } else if ((ioctl->root_only && suser(p))
+ || (ioctl->auth_needed && !priv->authenticated)) {
+ retcode = EACCES;
+ } else {
+ retcode = (func)(kdev, cmd, data, flags, p);
+ }
+ }
+
+ atomic_dec(&dev->ioctl_count);
+ return retcode;
+}
+
+static int
+tdfx_lock(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ int ret = 0;
+ drm_lock_t lock;
+#if DRM_DMA_HISTOGRAM
+
+ getnanotime(&dev->lck_start);
+#endif
+
+ lock = *(drm_lock_t *) data;
+
+ if (lock.context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ p->p_pid, lock.context);
+ return EINVAL;
+ }
+
+ DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
+ lock.context, p->p_pid, dev->lock.hw_lock->lock,
+ lock.flags);
+
+#if 0
+ /* dev->queue_count == 0 right now for
+ tdfx. FIXME? */
+ if (lock.context < 0 || lock.context >= dev->queue_count)
+ return EINVAL;
+#endif
+
+ if (!ret) {
+#if 0
+ if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
+ != lock.context) {
+ long j = ticks - dev->lock.lock_time;
+
+ if (lock.context == tdfx_res_ctx.handle &&
+ j >= 0 && j < DRM_LOCK_SLICE) {
+ /* Can't take lock if we just had it and
+ there is contention. */
+ DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d ticks=%d\n",
+ lock.context, p->p_pid, j,
+ dev->lock.lock_time, ticks);
+ ret = tsleep(&never, PZERO|PCATCH, "drmlk1",
+ DRM_LOCK_SLICE - j);
+ if (ret)
+ return ret;
+ DRM_DEBUG("ticks=%d\n", ticks);
+ }
+ }
+#endif
+ for (;;) {
+ if (!dev->lock.hw_lock) {
+ /* Device has been unregistered */
+ ret = EINTR;
+ break;
+ }
+ if (drm_lock_take(&dev->lock.hw_lock->lock,
+ lock.context)) {
+ dev->lock.pid = p->p_pid;
+ dev->lock.lock_time = ticks;
+ atomic_inc(&dev->total_locks);
+ break; /* Got lock */
+ }
+
+ /* Contention */
+ atomic_inc(&dev->total_sleeps);
+ ret = tsleep(&dev->lock.lock_queue,
+ PZERO|PCATCH,
+ "drmlk2",
+ 0);
+ if (ret)
+ break;
+ }
+ }
+
+#if 0
+ if (!ret && dev->last_context != lock.context &&
+ lock.context != tdfx_res_ctx.handle &&
+ dev->last_context != tdfx_res_ctx.handle) {
+ add_wait_queue(&dev->context_wait, &entry);
+ current->state = TASK_INTERRUPTIBLE;
+ /* PRE: dev->last_context != lock.context */
+ tdfx_context_switch(dev, dev->last_context, lock.context);
+ /* POST: we will wait for the context
+ switch and will dispatch on a later call
+ when dev->last_context == lock.context
+ NOTE WE HOLD THE LOCK THROUGHOUT THIS
+ TIME! */
+ current->policy |= SCHED_YIELD;
+ schedule();
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&dev->context_wait, &entry);
+ if (signal_pending(current)) {
+ ret = EINTR;
+ } else if (dev->last_context != lock.context) {
+ DRM_ERROR("Context mismatch: %d %d\n",
+ dev->last_context, lock.context);
+ }
+ }
+#endif
+
+ if (!ret) {
+ if (lock.flags & _DRM_LOCK_READY) {
+ /* Wait for space in DMA/FIFO */
+ }
+ if (lock.flags & _DRM_LOCK_QUIESCENT) {
+ /* Make hardware quiescent */
+#if 0
+ tdfx_quiescent(dev);
+#endif
+ }
+ }
+
+#if 0
+ DRM_ERROR("pid = %5d, old counter = %5ld\n",
+ p->p_pid, current->counter);
+#endif
+#if 0
+ while (current->counter > 25)
+ current->counter >>= 1; /* decrease time slice */
+ DRM_ERROR("pid = %5d, new counter = %5ld\n",
+ p->p_pid, current->counter);
+#endif
+ DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
+
+#if DRM_DMA_HISTOGRAM
+ {
+ struct timespec ts;
+ getnanotime(&ts);
+ timespecsub(&ts, &dev->lck_start);
+ atomic_inc(&dev->histo.lhld[drm_histogram_slot(&ts)]);
+ }
+#endif
+
+ return ret;
+}
+
+
+static int
+tdfx_unlock(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_lock_t lock;
+
+ lock = *(drm_lock_t *) data;
+
+ if (lock.context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ p->p_pid, lock.context);
+ return EINVAL;
+ }
+
+ DRM_DEBUG("%d frees lock (%d holds)\n",
+ lock.context,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+ atomic_inc(&dev->total_unlocks);
+ if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
+ atomic_inc(&dev->total_contends);
+ drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
+ /* FIXME: Try to send data to card here */
+ if (!dev->context_flag) {
+ if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ DRM_ERROR("\n");
+ }
+ }
+
+ return 0;
+}
diff --git a/bsd/tdfx/tdfx_drv.h b/bsd/tdfx/tdfx_drv.h
new file mode 100644
index 00000000..213f8ef1
--- /dev/null
+++ b/bsd/tdfx/tdfx_drv.h
@@ -0,0 +1,47 @@
+/* tdfx_drv.h -- Private header for tdfx driver -*- c -*-
+ * Created: Thu Oct 7 10:40:04 1999 by faith@precisioninsight.com
+ * Revised: Sat Oct 9 23:38:19 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $PI$
+ * $XFree86$
+ *
+ */
+
+#ifndef _TDFX_DRV_H_
+#define _TDFX_DRV_H_
+
+ /* tdfx_context.c */
+
+extern d_ioctl_t tdfx_resctx;
+extern d_ioctl_t tdfx_addctx;
+extern d_ioctl_t tdfx_modctx;
+extern d_ioctl_t tdfx_getctx;
+extern d_ioctl_t tdfx_switchctx;
+extern d_ioctl_t tdfx_newctx;
+extern d_ioctl_t tdfx_rmctx;
+
+extern int tdfx_context_switch(drm_device_t *dev, int old, int new);
+extern int tdfx_context_switch_complete(drm_device_t *dev, int new);
+#endif
diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel
index 2ea6c721..a169473a 100644
--- a/linux-core/Makefile.kernel
+++ b/linux-core/Makefile.kernel
@@ -49,3 +49,5 @@ i810.o: i810_drv.o i810_context.o $(L_TARGET)
mga.o: mga_drv.o mga_context.o mga_dma.o mga_bufs.o $(L_TARGET)
$(LD) $(LD_RFLAG) -r -o $@ mga_drv.o mga_bufs.o mga_dma.o mga_context.o mga_state.o -L. -ldrm
+r128.o: r128_drv.o r128_context.o $(L_TARGET)
+ $(LD) $(LD_RFLAG) -r -o $@ r128_drv.o r128_context.o -L. -ldrm
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index ecf50436..350d1ef9 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -229,8 +229,8 @@ typedef struct drm_magic_entry {
} drm_magic_entry_t;
typedef struct drm_magic_head {
- struct drm_magic_entry *head;
- struct drm_magic_entry *tail;
+ struct drm_magic_entry *head;
+ struct drm_magic_entry *tail;
} drm_magic_head_t;
typedef struct drm_vma_entry {
@@ -263,16 +263,15 @@ typedef struct drm_buf {
DRM_LIST_RECLAIM = 5
} list; /* Which list we're on */
-
- void *dev_private;
- int dev_priv_size;
-
#if DRM_DMA_HISTOGRAM
cycles_t time_queued; /* Queued to kernel DMA queue */
cycles_t time_dispatched; /* Dispatched to hardware */
cycles_t time_completed; /* Completed by hardware */
cycles_t time_freed; /* Back on freelist */
#endif
+
+ int dev_priv_size; /* Size of buffer private stoarge */
+ void *dev_private; /* Per-buffer private storage */
} drm_buf_t;
#if DRM_DMA_HISTOGRAM
diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c
index d82ed049..94f35b61 100644
--- a/linux-core/i810_dma.c
+++ b/linux-core/i810_dma.c
@@ -36,8 +36,17 @@
#include <linux/interrupt.h> /* For task queue support */
-#define I810_BUF_FREE 1
-#define I810_BUF_USED 0
+/* in case we don't have a 2.3.99-pre6 kernel or later: */
+#ifndef VM_DONTCOPY
+#define VM_DONTCOPY 0
+#endif
+
+#define I810_BUF_FREE 2
+#define I810_BUF_CLIENT 1
+#define I810_BUF_HARDWARE 0
+
+#define I810_BUF_UNMAPPED 0
+#define I810_BUF_MAPPED 1
#define I810_REG(reg) 2
#define I810_BASE(reg) ((unsigned long) \
@@ -90,7 +99,7 @@ static inline void i810_print_status_page(drm_device_t *dev)
DRM_DEBUG( "hw_status: Reserved : %x\n", temp[3]);
DRM_DEBUG( "hw_status: Driver Counter : %d\n", temp[5]);
for(i = 6; i < dma->buf_count + 6; i++) {
- DRM_DEBUG( "buffer status idx : %d used: %d\n", i - 6, temp[i]);
+ DRM_DEBUG( "buffer status idx : %d used: %d\n", i - 6, temp[i]);
}
}
@@ -107,7 +116,7 @@ static drm_buf_t *i810_freelist_get(drm_device_t *dev)
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
/* In use is already a pointer */
used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
- I810_BUF_USED);
+ I810_BUF_CLIENT);
if(used == I810_BUF_FREE) {
return buf;
}
@@ -125,8 +134,8 @@ static int i810_freelist_put(drm_device_t *dev, drm_buf_t *buf)
int used;
/* In use is already a pointer */
- used = cmpxchg(buf_priv->in_use, I810_BUF_USED, I810_BUF_FREE);
- if(used != I810_BUF_USED) {
+ used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE);
+ if(used != I810_BUF_CLIENT) {
DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
return -EINVAL;
}
@@ -134,26 +143,114 @@ static int i810_freelist_put(drm_device_t *dev, drm_buf_t *buf)
return 0;
}
-static int i810_dma_get_buffers(drm_device_t *dev, drm_dma_t *d)
+static struct file_operations i810_buffer_fops = {
+ open: i810_open,
+ flush: drm_flush,
+ release: i810_release,
+ ioctl: i810_ioctl,
+ mmap: i810_mmap_buffers,
+ read: drm_read,
+ fasync: drm_fasync,
+ poll: drm_poll,
+};
+
+int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_i810_private_t *dev_priv = dev->dev_private;
+ drm_buf_t *buf = dev_priv->mmap_buffer;
+ drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+
+ vma->vm_flags |= (VM_IO | VM_DONTCOPY);
+ vma->vm_file = filp;
+
+ buf_priv->currently_mapped = I810_BUF_MAPPED;
+
+ if (remap_page_range(vma->vm_start,
+ VM_OFFSET(vma),
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot)) return -EAGAIN;
+ return 0;
+}
+
+static int i810_map_buffer(drm_buf_t *buf, struct file *filp)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+ drm_i810_private_t *dev_priv = dev->dev_private;
+ struct file_operations *old_fops;
+ int retcode = 0;
+
+ if(buf_priv->currently_mapped == I810_BUF_MAPPED) return -EINVAL;
+ down(&current->mm->mmap_sem);
+ old_fops = filp->f_op;
+ filp->f_op = &i810_buffer_fops;
+ dev_priv->mmap_buffer = buf;
+ buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total,
+ PROT_READ|PROT_WRITE,
+ MAP_SHARED,
+ buf->bus_address);
+ dev_priv->mmap_buffer = NULL;
+ filp->f_op = old_fops;
+ if ((unsigned long)buf_priv->virtual > -1024UL) {
+ /* Real error */
+ DRM_DEBUG("mmap error\n");
+ retcode = (signed int)buf_priv->virtual;
+ buf_priv->virtual = 0;
+ }
+ up(&current->mm->mmap_sem);
+ return retcode;
+}
+
+static int i810_unmap_buffer(drm_buf_t *buf)
+{
+ drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+ int retcode = 0;
+
+ if(buf_priv->currently_mapped != I810_BUF_MAPPED) return -EINVAL;
+ down(&current->mm->mmap_sem);
+ retcode = do_munmap((unsigned long)buf_priv->virtual,
+ (size_t) buf->total);
+ buf_priv->currently_mapped = I810_BUF_UNMAPPED;
+ buf_priv->virtual = 0;
+ up(&current->mm->mmap_sem);
+
+ return retcode;
+}
+
+static int i810_dma_get_buffer(drm_device_t *dev, drm_i810_dma_t *d,
+ struct file *filp)
{
- int i;
+ drm_file_t *priv = filp->private_data;
drm_buf_t *buf;
+ drm_i810_buf_priv_t *buf_priv;
+ int retcode = 0;
- for (i = d->granted_count; i < d->request_count; i++) {
- buf = i810_freelist_get(dev);
- if (!buf) break;
- buf->pid = current->pid;
- copy_to_user_ret(&d->request_indices[i],
- &buf->idx,
- sizeof(buf->idx),
- -EFAULT);
- copy_to_user_ret(&d->request_sizes[i],
- &buf->total,
- sizeof(buf->total),
- -EFAULT);
- ++d->granted_count;
+ buf = i810_freelist_get(dev);
+ if (!buf) {
+ retcode = -ENOMEM;
+ DRM_DEBUG("%s retcode %d\n", __FUNCTION__, retcode);
+ goto out_get_buf;
}
- return 0;
+
+ retcode = i810_map_buffer(buf, filp);
+ if(retcode) {
+ i810_freelist_put(dev, buf);
+ DRM_DEBUG("mapbuf failed in %s retcode %d\n",
+ __FUNCTION__, retcode);
+ goto out_get_buf;
+ }
+ buf->pid = priv->pid;
+ buf_priv = buf->dev_private;
+ d->granted = 1;
+ d->request_idx = buf->idx;
+ d->request_size = buf->total;
+ d->virtual = buf_priv->virtual;
+
+out_get_buf:
+ return retcode;
}
static unsigned long i810_alloc_page(drm_device_t *dev)
@@ -184,7 +281,10 @@ static void i810_free_page(drm_device_t *dev, unsigned long page)
static int i810_dma_cleanup(drm_device_t *dev)
{
+ drm_device_dma_t *dma = dev->dma;
+
if(dev->dev_private) {
+ int i;
drm_i810_private_t *dev_priv =
(drm_i810_private_t *) dev->dev_private;
@@ -200,6 +300,12 @@ static int i810_dma_cleanup(drm_device_t *dev)
drm_free(dev->dev_private, sizeof(drm_i810_private_t),
DRM_MEM_DRIVER);
dev->dev_private = NULL;
+
+ for (i = 0; i < dma->buf_count; i++) {
+ drm_buf_t *buf = dma->buflist[ i ];
+ drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+ drm_ioremapfree(buf_priv->kernel_virtual, buf->total);
+ }
}
return 0;
}
@@ -210,6 +316,7 @@ static int i810_wait_ring(drm_device_t *dev, int n)
drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
int iters = 0;
unsigned long end;
+ unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
end = jiffies + (HZ*3);
while (ring->space < n) {
@@ -217,9 +324,11 @@ static int i810_wait_ring(drm_device_t *dev, int n)
ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
ring->space = ring->head - (ring->tail+8);
+ if (ring->space < 0) ring->space += ring->Size;
- if (ring->space < 0) ring->space += ring->Size;
-
+ if (ring->head != last_head)
+ end = jiffies + (HZ*3);
+
iters++;
if((signed)(end - jiffies) <= 0) {
DRM_ERROR("space: %d wanted %d\n", ring->space, n);
@@ -249,9 +358,9 @@ static int i810_freelist_init(drm_device_t *dev)
{
drm_device_dma_t *dma = dev->dma;
drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
- u8 *hw_status = (u8 *)dev_priv->hw_status_page;
- int i;
int my_idx = 24;
+ u32 *hw_status = (u32 *)(dev_priv->hw_status_page + my_idx);
+ int i;
if(dma->buf_count > 1019) {
/* Not enough space in the status page for the freelist */
@@ -262,11 +371,14 @@ static int i810_freelist_init(drm_device_t *dev)
drm_buf_t *buf = dma->buflist[ i ];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
- buf_priv->in_use = hw_status + my_idx;
- DRM_DEBUG("buf_priv->in_use : %p\n", buf_priv->in_use);
- *buf_priv->in_use = I810_BUF_FREE;
+ buf_priv->in_use = hw_status++;
buf_priv->my_use_idx = my_idx;
my_idx += 4;
+
+ *buf_priv->in_use = I810_BUF_FREE;
+
+ buf_priv->kernel_virtual = drm_ioremap(buf->bus_address,
+ buf->total);
}
return 0;
}
@@ -300,9 +412,11 @@ static int i810_dma_initialize(drm_device_t *dev,
dev_priv->ring.Start = init->ring_start;
dev_priv->ring.End = init->ring_end;
dev_priv->ring.Size = init->ring_size;
+
dev_priv->ring.virtual_start = drm_ioremap(dev->agp->base +
init->ring_start,
init->ring_size);
+
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
if (dev_priv->ring.virtual_start == NULL) {
@@ -311,6 +425,17 @@ static int i810_dma_initialize(drm_device_t *dev,
" ring buffer\n");
return -ENOMEM;
}
+
+ dev_priv->w = init->w;
+ dev_priv->h = init->h;
+ dev_priv->pitch = init->pitch;
+ dev_priv->back_offset = init->back_offset;
+ dev_priv->depth_offset = init->depth_offset;
+
+ dev_priv->front_di1 = init->front_offset | init->pitch_bits;
+ dev_priv->back_di1 = init->back_offset | init->pitch_bits;
+ dev_priv->zi1 = init->depth_offset | init->pitch_bits;
+
/* Program Hardware Status Page */
dev_priv->hw_status_page = i810_alloc_page(dev);
@@ -365,37 +490,270 @@ int i810_dma_init(struct inode *inode, struct file *filp,
return retcode;
}
-static void i810_dma_dispatch_general(drm_device_t *dev, drm_buf_t *buf,
- int used )
+
+
+/* Most efficient way to verify state for the i810 is as it is
+ * emitted. Non-conformant state is silently dropped.
+ *
+ * Use 'volatile' & local var tmp to force the emitted values to be
+ * identical to the verified ones.
+ */
+static void i810EmitContextVerified( drm_device_t *dev,
+ volatile unsigned int *code )
+{
+ drm_i810_private_t *dev_priv = dev->dev_private;
+ int i, j = 0;
+ unsigned int tmp;
+ RING_LOCALS;
+
+ BEGIN_LP_RING( I810_CTX_SETUP_SIZE );
+
+ OUT_RING( GFX_OP_COLOR_FACTOR );
+ OUT_RING( code[I810_CTXREG_CF1] );
+
+ OUT_RING( GFX_OP_STIPPLE );
+ OUT_RING( code[I810_CTXREG_ST1] );
+
+ for ( i = 4 ; i < I810_CTX_SETUP_SIZE ; i++ ) {
+ tmp = code[i];
+
+ if ((tmp & (7<<29)) == (3<<29) &&
+ (tmp & (0x1f<<24)) < (0x1d<<24))
+ {
+ OUT_RING( tmp );
+ j++;
+ }
+ }
+
+ if (j & 1)
+ OUT_RING( 0 );
+
+ ADVANCE_LP_RING();
+}
+
+static void i810EmitTexVerified( drm_device_t *dev,
+ volatile unsigned int *code )
+{
+ drm_i810_private_t *dev_priv = dev->dev_private;
+ int i, j = 0;
+ unsigned int tmp;
+ RING_LOCALS;
+
+ BEGIN_LP_RING( I810_TEX_SETUP_SIZE );
+
+ OUT_RING( GFX_OP_MAP_INFO );
+ OUT_RING( code[I810_TEXREG_MI1] );
+ OUT_RING( code[I810_TEXREG_MI2] );
+ OUT_RING( code[I810_TEXREG_MI3] );
+
+ for ( i = 4 ; i < I810_TEX_SETUP_SIZE ; i++ ) {
+ tmp = code[i];
+
+ if ((tmp & (7<<29)) == (3<<29) &&
+ (tmp & (0x1f<<24)) < (0x1d<<24))
+ {
+ OUT_RING( tmp );
+ j++;
+ }
+ }
+
+ if (j & 1)
+ OUT_RING( 0 );
+
+ ADVANCE_LP_RING();
+}
+
+
+/* Need to do some additional checking when setting the dest buffer.
+ */
+static void i810EmitDestVerified( drm_device_t *dev,
+ volatile unsigned int *code )
+{
+ drm_i810_private_t *dev_priv = dev->dev_private;
+ unsigned int tmp;
+ RING_LOCALS;
+
+ BEGIN_LP_RING( I810_DEST_SETUP_SIZE + 2 );
+
+ tmp = code[I810_DESTREG_DI1];
+ if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
+ OUT_RING( CMD_OP_DESTBUFFER_INFO );
+ OUT_RING( tmp );
+ } else
+ DRM_DEBUG("bad di1 %x (allow %x or %x)\n",
+ tmp, dev_priv->front_di1, dev_priv->back_di1);
+
+ /* invarient:
+ */
+ OUT_RING( CMD_OP_Z_BUFFER_INFO );
+ OUT_RING( dev_priv->zi1 );
+
+ OUT_RING( GFX_OP_DESTBUFFER_VARS );
+ OUT_RING( code[I810_DESTREG_DV1] );
+
+ OUT_RING( GFX_OP_DRAWRECT_INFO );
+ OUT_RING( code[I810_DESTREG_DR1] );
+ OUT_RING( code[I810_DESTREG_DR2] );
+ OUT_RING( code[I810_DESTREG_DR3] );
+ OUT_RING( code[I810_DESTREG_DR4] );
+ OUT_RING( 0 );
+
+ ADVANCE_LP_RING();
+}
+
+
+
+static void i810EmitState( drm_device_t *dev )
{
- drm_i810_private_t *dev_priv = dev->dev_private;
- drm_i810_buf_priv_t *buf_priv = buf->dev_private;
- unsigned long address = (unsigned long)buf->bus_address;
- unsigned long start = address - dev->agp->base;
- RING_LOCALS;
+ drm_i810_private_t *dev_priv = dev->dev_private;
+ drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int dirty = sarea_priv->dirty;
- dev_priv->counter++;
- DRM_DEBUG( "dispatch counter : %ld\n", dev_priv->counter);
- DRM_DEBUG( "i810_dma_dispatch\n");
- DRM_DEBUG( "start : 0x%lx\n", start);
- DRM_DEBUG( "used : 0x%x\n", used);
- DRM_DEBUG( "start + used - 4 : 0x%lx\n", start + used - 4);
- i810_kernel_lost_context(dev);
+ if (dirty & I810_UPLOAD_BUFFERS) {
+ i810EmitDestVerified( dev, sarea_priv->BufferState );
+ sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS;
+ }
- BEGIN_LP_RING(10);
- OUT_RING( CMD_OP_BATCH_BUFFER );
- OUT_RING( start | BB1_PROTECTED );
- OUT_RING( start + used - 4 );
- OUT_RING( CMD_STORE_DWORD_IDX );
- OUT_RING( 20 );
- OUT_RING( dev_priv->counter );
- OUT_RING( CMD_STORE_DWORD_IDX );
- OUT_RING( buf_priv->my_use_idx );
- OUT_RING( I810_BUF_FREE );
- OUT_RING( CMD_REPORT_HEAD );
- ADVANCE_LP_RING();
+ if (dirty & I810_UPLOAD_CTX) {
+ i810EmitContextVerified( dev, sarea_priv->ContextState );
+ sarea_priv->dirty &= ~I810_UPLOAD_CTX;
+ }
+
+ if (dirty & I810_UPLOAD_TEX0) {
+ i810EmitTexVerified( dev, sarea_priv->TexState[0] );
+ sarea_priv->dirty &= ~I810_UPLOAD_TEX0;
+ }
+
+ if (dirty & I810_UPLOAD_TEX1) {
+ i810EmitTexVerified( dev, sarea_priv->TexState[1] );
+ sarea_priv->dirty &= ~I810_UPLOAD_TEX1;
+ }
}
+
+
+/* need to verify
+ */
+static void i810_dma_dispatch_clear( drm_device_t *dev, int flags,
+ unsigned int clear_color,
+ unsigned int clear_zval )
+{
+ drm_i810_private_t *dev_priv = dev->dev_private;
+ drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ int nbox = sarea_priv->nbox;
+ drm_clip_rect_t *pbox = sarea_priv->boxes;
+ int pitch = dev_priv->pitch;
+ int cpp = 2;
+ int i;
+ RING_LOCALS;
+
+ i810_kernel_lost_context(dev);
+
+ if (nbox > I810_NR_SAREA_CLIPRECTS)
+ nbox = I810_NR_SAREA_CLIPRECTS;
+
+ for (i = 0 ; i < nbox ; i++, pbox++) {
+ unsigned int x = pbox->x1;
+ unsigned int y = pbox->y1;
+ unsigned int width = (pbox->x2 - x) * cpp;
+ unsigned int height = pbox->y2 - y;
+ unsigned int start = y * pitch + x * cpp;
+
+ if (pbox->x1 > pbox->x2 ||
+ pbox->y1 > pbox->y2 ||
+ pbox->x2 > dev_priv->w ||
+ pbox->y2 > dev_priv->h)
+ continue;
+
+ if ( flags & I810_FRONT ) {
+ DRM_DEBUG("clear front\n");
+ BEGIN_LP_RING( 6 );
+ OUT_RING( BR00_BITBLT_CLIENT |
+ BR00_OP_COLOR_BLT | 0x3 );
+ OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
+ OUT_RING( (height << 16) | width );
+ OUT_RING( start );
+ OUT_RING( clear_color );
+ OUT_RING( 0 );
+ ADVANCE_LP_RING();
+ }
+
+ if ( flags & I810_BACK ) {
+ DRM_DEBUG("clear back\n");
+ BEGIN_LP_RING( 6 );
+ OUT_RING( BR00_BITBLT_CLIENT |
+ BR00_OP_COLOR_BLT | 0x3 );
+ OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
+ OUT_RING( (height << 16) | width );
+ OUT_RING( dev_priv->back_offset + start );
+ OUT_RING( clear_color );
+ OUT_RING( 0 );
+ ADVANCE_LP_RING();
+ }
+
+ if ( flags & I810_DEPTH ) {
+ DRM_DEBUG("clear depth\n");
+ BEGIN_LP_RING( 6 );
+ OUT_RING( BR00_BITBLT_CLIENT |
+ BR00_OP_COLOR_BLT | 0x3 );
+ OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
+ OUT_RING( (height << 16) | width );
+ OUT_RING( dev_priv->depth_offset + start );
+ OUT_RING( clear_zval );
+ OUT_RING( 0 );
+ ADVANCE_LP_RING();
+ }
+ }
+}
+
+static void i810_dma_dispatch_swap( drm_device_t *dev )
+{
+ drm_i810_private_t *dev_priv = dev->dev_private;
+ drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ int nbox = sarea_priv->nbox;
+ drm_clip_rect_t *pbox = sarea_priv->boxes;
+ int pitch = dev_priv->pitch;
+ int cpp = 2;
+ int ofs = dev_priv->back_offset;
+ int i;
+ RING_LOCALS;
+
+ DRM_DEBUG("swapbuffers\n");
+
+ i810_kernel_lost_context(dev);
+
+ if (nbox > I810_NR_SAREA_CLIPRECTS)
+ nbox = I810_NR_SAREA_CLIPRECTS;
+
+ for (i = 0 ; i < nbox; i++, pbox++)
+ {
+ unsigned int w = pbox->x2 - pbox->x1;
+ unsigned int h = pbox->y2 - pbox->y1;
+ unsigned int dst = pbox->x1*cpp + pbox->y1*pitch;
+ unsigned int start = ofs + dst;
+
+ if (pbox->x1 > pbox->x2 ||
+ pbox->y1 > pbox->y2 ||
+ pbox->x2 > dev_priv->w ||
+ pbox->y2 > dev_priv->h)
+ continue;
+
+ DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n",
+ pbox[i].x1, pbox[i].y1,
+ pbox[i].x2, pbox[i].y2);
+
+ BEGIN_LP_RING( 6 );
+ OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4 );
+ OUT_RING( pitch | (0xCC << 16));
+ OUT_RING( (h << 16) | (w * cpp));
+ OUT_RING( dst );
+ OUT_RING( pitch );
+ OUT_RING( start );
+ ADVANCE_LP_RING();
+ }
+}
+
+
static void i810_dma_dispatch_vertex(drm_device_t *dev,
drm_buf_t *buf,
int discard,
@@ -408,14 +766,29 @@ static void i810_dma_dispatch_vertex(drm_device_t *dev,
int nbox = sarea_priv->nbox;
unsigned long address = (unsigned long)buf->bus_address;
unsigned long start = address - dev->agp->base;
- int i = 0;
+ int i = 0, u;
RING_LOCALS;
-
+ i810_kernel_lost_context(dev);
+
if (nbox > I810_NR_SAREA_CLIPRECTS)
nbox = I810_NR_SAREA_CLIPRECTS;
-
- DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n",
+
+ if (discard) {
+ u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
+ I810_BUF_HARDWARE);
+ if(u != I810_BUF_CLIENT) {
+ DRM_DEBUG("xxxx 2\n");
+ }
+ }
+
+ if (used > 4*1024)
+ used = 0;
+
+ if (sarea_priv->dirty)
+ i810EmitState( dev );
+
+ DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n",
address, used, nbox);
dev_priv->counter++;
@@ -424,8 +797,20 @@ static void i810_dma_dispatch_vertex(drm_device_t *dev,
DRM_DEBUG( "start : %lx\n", start);
DRM_DEBUG( "used : %d\n", used);
DRM_DEBUG( "start + used - 4 : %ld\n", start + used - 4);
- i810_kernel_lost_context(dev);
+ if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
+ *(u32 *)buf_priv->virtual = (GFX_OP_PRIMITIVE |
+ sarea_priv->vertex_prim |
+ ((used/4)-2));
+
+ if (used & 4) {
+ *(u32 *)((u32)buf_priv->virtual + used) = 0;
+ used += 4;
+ }
+
+ i810_unmap_buffer(buf);
+ }
+
if (used) {
do {
if (i < nbox) {
@@ -433,7 +818,7 @@ static void i810_dma_dispatch_vertex(drm_device_t *dev,
OUT_RING( GFX_OP_SCISSOR | SC_UPDATE_SCISSOR |
SC_ENABLE );
OUT_RING( GFX_OP_SCISSOR_INFO );
- OUT_RING( box[i].x1 | (box[i].y1 << 16) );
+ OUT_RING( box[i].x1 | (box[i].y1<<16) );
OUT_RING( (box[i].x2-1) | ((box[i].y2-1)<<16) );
ADVANCE_LP_RING();
}
@@ -478,7 +863,9 @@ static void i810_dma_service(int irq, void *device, struct pt_regs *regs)
temp = temp & ~(0x6000);
if(temp != 0) I810_WRITE16(I810REG_INT_IDENTITY_R,
temp); /* Clear all interrupts */
-
+ else
+ return;
+
queue_task(&dev->tq, &tq_immediate);
mark_bh(IMMEDIATE_BH);
}
@@ -538,7 +925,7 @@ int i810_irq_install(drm_device_t *dev, int irq)
/* Install handler */
if ((retcode = request_irq(dev->irq,
i810_dma_service,
- 0,
+ SA_SHIRQ,
dev->devname,
dev))) {
down(&dev->struct_sem);
@@ -559,6 +946,9 @@ int i810_irq_uninstall(drm_device_t *dev)
int irq;
u16 temp;
+
+/* return 0; */
+
down(&dev->struct_sem);
irq = dev->irq;
dev->irq = 0;
@@ -617,10 +1007,15 @@ static inline void i810_dma_emit_flush(drm_device_t *dev)
RING_LOCALS;
i810_kernel_lost_context(dev);
+
BEGIN_LP_RING(2);
OUT_RING( CMD_REPORT_HEAD );
- OUT_RING( GFX_OP_USER_INTERRUPT );
+ OUT_RING( GFX_OP_USER_INTERRUPT );
ADVANCE_LP_RING();
+
+/* i810_wait_ring( dev, dev_priv->ring.Size - 8 ); */
+/* atomic_set(&dev_priv->flush_done, 1); */
+/* wake_up_interruptible(&dev_priv->flush_queue); */
}
static inline void i810_dma_quiescent_emit(drm_device_t *dev)
@@ -629,13 +1024,17 @@ static inline void i810_dma_quiescent_emit(drm_device_t *dev)
RING_LOCALS;
i810_kernel_lost_context(dev);
- BEGIN_LP_RING(4);
+ BEGIN_LP_RING(4);
OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE );
OUT_RING( CMD_REPORT_HEAD );
- OUT_RING( GFX_OP_USER_INTERRUPT );
- OUT_RING( 0 );
+ OUT_RING( 0 );
+ OUT_RING( GFX_OP_USER_INTERRUPT );
ADVANCE_LP_RING();
+
+/* i810_wait_ring( dev, dev_priv->ring.Size - 8 ); */
+/* atomic_set(&dev_priv->flush_done, 1); */
+/* wake_up_interruptible(&dev_priv->flush_queue); */
}
static void i810_dma_quiescent(drm_device_t *dev)
@@ -675,8 +1074,9 @@ static int i810_flush_queue(drm_device_t *dev)
{
DECLARE_WAITQUEUE(entry, current);
drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
unsigned long end;
- int ret = 0;
+ int i, ret = 0;
if(dev_priv == NULL) {
return 0;
@@ -701,7 +1101,21 @@ static int i810_flush_queue(drm_device_t *dev)
current->state = TASK_RUNNING;
remove_wait_queue(&dev_priv->flush_queue, &entry);
-
+
+
+ for (i = 0; i < dma->buf_count; i++) {
+ drm_buf_t *buf = dma->buflist[ i ];
+ drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+
+ int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
+ I810_BUF_FREE);
+
+ if (used == I810_BUF_HARDWARE)
+ DRM_DEBUG("reclaimed from HARDWARE\n");
+ if (used == I810_BUF_CLIENT)
+ DRM_DEBUG("still on client HARDWARE\n");
+ }
+
return ret;
}
@@ -712,20 +1126,23 @@ void i810_reclaim_buffers(drm_device_t *dev, pid_t pid)
int i;
if (!dma) return;
- if(dev->dev_private == NULL) return;
- if(dma->buflist == NULL) return;
+ if (!dev->dev_private) return;
+ if (!dma->buflist) return;
+
i810_flush_queue(dev);
for (i = 0; i < dma->buf_count; i++) {
drm_buf_t *buf = dma->buflist[ i ];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
- /* Only buffers that need to get reclaimed ever
- * get set to free
- */
if (buf->pid == pid && buf_priv) {
- cmpxchg(buf_priv->in_use,
- I810_BUF_USED, I810_BUF_FREE);
+ int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
+ I810_BUF_FREE);
+
+ if (used == I810_BUF_CLIENT)
+ DRM_DEBUG("reclaimed from client\n");
+ if(buf_priv->currently_mapped == I810_BUF_MAPPED)
+ buf_priv->currently_mapped = I810_BUF_UNMAPPED;
}
}
}
@@ -759,19 +1176,6 @@ int i810_lock(struct inode *inode, struct file *filp, unsigned int cmd,
*/
if (!ret) {
-#if 0
- if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
- != lock.context) {
- long j = jiffies - dev->lock.lock_time;
-
- if (j > 0 && j <= DRM_LOCK_SLICE) {
- /* Can't take lock if we just had it and
- there is contention. */
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(j);
- }
- }
-#endif
add_wait_queue(&dev->lock.lock_queue, &entry);
for (;;) {
if (!dev->lock.hw_lock) {
@@ -828,90 +1232,80 @@ int i810_flush_ioctl(struct inode *inode, struct file *filp,
return 0;
}
-static int i810DmaGeneral(drm_device_t *dev, drm_i810_general_t *args)
-{
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf = dma->buflist[ args->idx ];
-
- if (!args->used) {
- i810_freelist_put(dev, buf);
- } else {
- i810_dma_dispatch_general( dev, buf, args->used );
- atomic_add(args->used, &dma->total_bytes);
- atomic_inc(&dma->total_dmas);
- }
- return 0;
-}
-static int i810DmaVertex(drm_device_t *dev, drm_i810_vertex_t *args)
-{
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf = dma->buflist[ args->idx ];
- i810_dma_dispatch_vertex( dev, buf, args->discard, args->used );
- atomic_add(args->used, &dma->total_bytes);
- atomic_inc(&dma->total_dmas);
- return 0;
-}
-
-int i810_dma_general(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int i810_dma_vertex(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_i810_general_t general;
- drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
+ drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
u32 *hw_status = (u32 *)dev_priv->hw_status_page;
drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
dev_priv->sarea_priv;
+ drm_i810_vertex_t vertex;
- int retcode = 0;
-
- copy_from_user_ret(&general, (drm_i810_general_t *)arg, sizeof(general),
+ copy_from_user_ret(&vertex, (drm_i810_vertex_t *)arg, sizeof(vertex),
-EFAULT);
- DRM_DEBUG("i810 dma general idx %d used %d\n",
- general.idx, general.used);
-
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
- DRM_ERROR("i810_dma_general called without lock held\n");
+ DRM_ERROR("i810_dma_vertex called without lock held\n");
return -EINVAL;
}
- retcode = i810DmaGeneral(dev, &general);
+ DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n",
+ vertex.idx, vertex.used, vertex.discard);
+
+ i810_dma_dispatch_vertex( dev,
+ dma->buflist[ vertex.idx ],
+ vertex.discard, vertex.used );
+
+ atomic_add(vertex.used, &dma->total_bytes);
+ atomic_inc(&dma->total_dmas);
sarea_priv->last_enqueue = dev_priv->counter-1;
sarea_priv->last_dispatch = (int) hw_status[5];
- return retcode;
+ return 0;
}
-int i810_dma_vertex(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+
+
+int i810_clear_bufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
- u32 *hw_status = (u32 *)dev_priv->hw_status_page;
- drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
- dev_priv->sarea_priv;
- drm_i810_vertex_t vertex;
- int retcode = 0;
+ drm_i810_clear_t clear;
- copy_from_user_ret(&vertex, (drm_i810_vertex_t *)arg, sizeof(vertex),
+ copy_from_user_ret(&clear, (drm_i810_clear_t *)arg, sizeof(clear),
-EFAULT);
+
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
- DRM_ERROR("i810_dma_vertex called without lock held\n");
+ DRM_ERROR("i810_clear_bufs called without lock held\n");
return -EINVAL;
}
- DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n",
- vertex.idx, vertex.used, vertex.discard);
+ i810_dma_dispatch_clear( dev, clear.flags,
+ clear.clear_color,
+ clear.clear_depth );
+ return 0;
+}
- retcode = i810DmaVertex(dev, &vertex);
- sarea_priv->last_enqueue = dev_priv->counter-1;
- sarea_priv->last_dispatch = (int) hw_status[5];
+int i810_swap_bufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
- return retcode;
+ DRM_DEBUG("i810_swap_bufs\n");
+ if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("i810_swap_buf called without lock held\n");
+ return -EINVAL;
+ }
+
+ i810_dma_dispatch_swap( dev );
+ return 0;
}
int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd,
@@ -928,53 +1322,32 @@ int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd,
return 0;
}
-int i810_dma(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg)
+int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_device_dma_t *dma = dev->dma;
int retcode = 0;
- drm_dma_t d;
+ drm_i810_dma_t d;
drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
u32 *hw_status = (u32 *)dev_priv->hw_status_page;
drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
dev_priv->sarea_priv;
-
- copy_from_user_ret(&d, (drm_dma_t *)arg, sizeof(d), -EFAULT);
- DRM_DEBUG("%d %d: %d send, %d req\n",
- current->pid, d.context, d.send_count, d.request_count);
+ DRM_DEBUG("getbuf\n");
+ copy_from_user_ret(&d, (drm_i810_dma_t *)arg, sizeof(d), -EFAULT);
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("i810_dma called without lock held\n");
return -EINVAL;
}
-
- /* Please don't send us buffers.
- */
- if (d.send_count != 0) {
- DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
- current->pid, d.send_count);
- return -EINVAL;
- }
- /* We'll send you buffers.
- */
- if (d.request_count < 0 || d.request_count > dma->buf_count) {
- DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
- current->pid, d.request_count, dma->buf_count);
- return -EINVAL;
- }
-
- d.granted_count = 0;
+ d.granted = 0;
- if (!retcode && d.request_count) {
- retcode = i810_dma_get_buffers(dev, &d);
- }
+ retcode = i810_dma_get_buffer(dev, &d, filp);
- DRM_DEBUG("i810_dma: %d returning, granted = %d\n",
- current->pid, d.granted_count);
+ DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
+ current->pid, retcode, d.granted);
copy_to_user_ret((drm_dma_t *)arg, &d, sizeof(d), -EFAULT);
sarea_priv->last_dispatch = (int) hw_status[5];
diff --git a/linux-core/i810_drm.h b/linux-core/i810_drm.h
index 0754874c..4c8e09f6 100644
--- a/linux-core/i810_drm.h
+++ b/linux-core/i810_drm.h
@@ -5,35 +5,112 @@
* if you change them, you must change the defines in the Xserver.
*/
-/* Might one day want to support the client-side ringbuffer code again.
- */
#ifndef _I810_DEFINES_
#define _I810_DEFINES_
-#define I810_USE_BATCH 1
#define I810_DMA_BUF_ORDER 12
#define I810_DMA_BUF_SZ (1<<I810_DMA_BUF_ORDER)
#define I810_DMA_BUF_NR 256
-#define I810_NR_SAREA_CLIPRECTS 2
+#define I810_NR_SAREA_CLIPRECTS 8
/* Each region is a minimum of 64k, and there are at most 64 of them.
*/
-
#define I810_NR_TEX_REGIONS 64
#define I810_LOG_MIN_TEX_REGION_SIZE 16
#endif
+#define I810_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
+#define I810_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
+#define I810_UPLOAD_CTX 0x4
+#define I810_UPLOAD_BUFFERS 0x8
+#define I810_UPLOAD_TEX0 0x10
+#define I810_UPLOAD_TEX1 0x20
+#define I810_UPLOAD_CLIPRECTS 0x40
+
+
+/* Indices into buf.Setup where various bits of state are mirrored per
+ * context and per buffer. These can be fired at the card as a unit,
+ * or in a piecewise fashion as required.
+ */
+
+/* Destbuffer state
+ * - backbuffer linear offset and pitch -- invarient in the current dri
+ * - zbuffer linear offset and pitch -- also invarient
+ * - drawing origin in back and depth buffers.
+ *
+ * Keep the depth/back buffer state here to acommodate private buffers
+ * in the future.
+ */
+#define I810_DESTREG_DI0 0 /* CMD_OP_DESTBUFFER_INFO (2 dwords) */
+#define I810_DESTREG_DI1 1
+#define I810_DESTREG_DV0 2 /* GFX_OP_DESTBUFFER_VARS (2 dwords) */
+#define I810_DESTREG_DV1 3
+#define I810_DESTREG_DR0 4 /* GFX_OP_DRAWRECT_INFO (4 dwords) */
+#define I810_DESTREG_DR1 5
+#define I810_DESTREG_DR2 6
+#define I810_DESTREG_DR3 7
+#define I810_DESTREG_DR4 8
+#define I810_DEST_SETUP_SIZE 10
+
+/* Context state
+ */
+#define I810_CTXREG_CF0 0 /* GFX_OP_COLOR_FACTOR */
+#define I810_CTXREG_CF1 1
+#define I810_CTXREG_ST0 2 /* GFX_OP_STIPPLE */
+#define I810_CTXREG_ST1 3
+#define I810_CTXREG_VF 4 /* GFX_OP_VERTEX_FMT */
+#define I810_CTXREG_MT 5 /* GFX_OP_MAP_TEXELS */
+#define I810_CTXREG_MC0 6 /* GFX_OP_MAP_COLOR_STAGES - stage 0 */
+#define I810_CTXREG_MC1 7 /* GFX_OP_MAP_COLOR_STAGES - stage 1 */
+#define I810_CTXREG_MC2 8 /* GFX_OP_MAP_COLOR_STAGES - stage 2 */
+#define I810_CTXREG_MA0 9 /* GFX_OP_MAP_ALPHA_STAGES - stage 0 */
+#define I810_CTXREG_MA1 10 /* GFX_OP_MAP_ALPHA_STAGES - stage 1 */
+#define I810_CTXREG_MA2 11 /* GFX_OP_MAP_ALPHA_STAGES - stage 2 */
+#define I810_CTXREG_SDM 12 /* GFX_OP_SRC_DEST_MONO */
+#define I810_CTXREG_FOG 13 /* GFX_OP_FOG_COLOR */
+#define I810_CTXREG_B1 14 /* GFX_OP_BOOL_1 */
+#define I810_CTXREG_B2 15 /* GFX_OP_BOOL_2 */
+#define I810_CTXREG_LCS 16 /* GFX_OP_LINEWIDTH_CULL_SHADE_MODE */
+#define I810_CTXREG_PV 17 /* GFX_OP_PV_RULE -- Invarient! */
+#define I810_CTXREG_ZA 18 /* GFX_OP_ZBIAS_ALPHAFUNC */
+#define I810_CTXREG_AA 19 /* GFX_OP_ANTIALIAS */
+#define I810_CTX_SETUP_SIZE 20
+
+/* Texture state (per tex unit)
+ */
+#define I810_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (4 dwords) */
+#define I810_TEXREG_MI1 1
+#define I810_TEXREG_MI2 2
+#define I810_TEXREG_MI3 3
+#define I810_TEXREG_MF 4 /* GFX_OP_MAP_FILTER */
+#define I810_TEXREG_MLC 5 /* GFX_OP_MAP_LOD_CTL */
+#define I810_TEXREG_MLL 6 /* GFX_OP_MAP_LOD_LIMITS */
+#define I810_TEXREG_MCS 7 /* GFX_OP_MAP_COORD_SETS ??? */
+#define I810_TEX_SETUP_SIZE 8
+
+#define I810_FRONT 0x1
+#define I810_BACK 0x2
+#define I810_DEPTH 0x4
+
+
typedef struct _drm_i810_init {
- enum {
- I810_INIT_DMA = 0x01,
- I810_CLEANUP_DMA = 0x02
+ enum {
+ I810_INIT_DMA = 0x01,
+ I810_CLEANUP_DMA = 0x02
} func;
- int ring_map_idx;
- int buffer_map_idx;
+ int ring_map_idx;
+ int buffer_map_idx;
int sarea_priv_offset;
- unsigned long ring_start;
- unsigned long ring_end;
- unsigned long ring_size;
+ unsigned int ring_start;
+ unsigned int ring_end;
+ unsigned int ring_size;
+ unsigned int front_offset;
+ unsigned int back_offset;
+ unsigned int depth_offset;
+ unsigned int w;
+ unsigned int h;
+ unsigned int pitch;
+ unsigned int pitch_bits;
} drm_i810_init_t;
/* Warning: If you change the SAREA structure you must change the Xserver
@@ -46,6 +123,11 @@ typedef struct _drm_i810_tex_region {
} drm_i810_tex_region_t;
typedef struct _drm_i810_sarea {
+ unsigned int ContextState[I810_CTX_SETUP_SIZE];
+ unsigned int BufferState[I810_DEST_SETUP_SIZE];
+ unsigned int TexState[2][I810_TEX_SETUP_SIZE];
+ unsigned int dirty;
+
unsigned int nbox;
drm_clip_rect_t boxes[I810_NR_SAREA_CLIPRECTS];
@@ -72,12 +154,18 @@ typedef struct _drm_i810_sarea {
int last_dispatch; /* age of the most recently dispatched buffer */
int last_quiescent; /* */
int ctxOwner; /* last context to upload state */
+
+ int vertex_prim;
+
} drm_i810_sarea_t;
-typedef struct _drm_i810_general {
- int idx;
- int used;
-} drm_i810_general_t;
+typedef struct _drm_i810_clear {
+ int clear_color;
+ int clear_depth;
+ int flags;
+} drm_i810_clear_t;
+
+
/* These may be placeholders if we have more cliprects than
* I810_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
@@ -90,4 +178,11 @@ typedef struct _drm_i810_vertex {
int discard; /* client is finished with the buffer? */
} drm_i810_vertex_t;
+typedef struct drm_i810_dma {
+ void *virtual;
+ int request_idx;
+ int request_size;
+ int granted;
+} drm_i810_dma_t;
+
#endif /* _I810_DRM_H_ */
diff --git a/linux-core/i810_drv.c b/linux-core/i810_drv.c
index eec9eb0f..b523db90 100644
--- a/linux-core/i810_drv.c
+++ b/linux-core/i810_drv.c
@@ -79,7 +79,6 @@ static drm_ioctl_desc_t i810_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { i810_addbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { i810_markbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { i810_infobufs, 1, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { i810_mapbufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { i810_freebufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { i810_addctx, 1, 1 },
@@ -92,8 +91,6 @@ static drm_ioctl_desc_t i810_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { i810_dma, 1, 0 },
-
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { i810_lock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { i810_unlock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
@@ -106,11 +103,14 @@ static drm_ioctl_desc_t i810_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
+
[DRM_IOCTL_NR(DRM_IOCTL_I810_INIT)] = { i810_dma_init, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_I810_VERTEX)] = { i810_dma_vertex, 1, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_I810_DMA)] = { i810_dma_general,1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_I810_CLEAR)] = { i810_clear_bufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_I810_FLUSH)] = { i810_flush_ioctl,1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_I810_GETAGE)] = { i810_getage, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_I810_GETBUF)] = { i810_getbuf, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_I810_SWAP)] = { i810_swap_bufs, 1, 0 },
};
#define I810_IOCTL_COUNT DRM_ARRAY_SIZE(i810_ioctls)
diff --git a/linux-core/i810_drv.h b/linux-core/i810_drv.h
index 690710c8..c387bf72 100644
--- a/linux-core/i810_drv.h
+++ b/linux-core/i810_drv.h
@@ -32,6 +32,16 @@
#ifndef _I810_DRV_H_
#define _I810_DRV_H_
+typedef struct drm_i810_buf_priv {
+ u32 *in_use;
+ int my_use_idx;
+ int currently_mapped;
+ void *virtual;
+ void *kernel_virtual;
+ int map_count;
+ struct vm_area_struct *vma;
+} drm_i810_buf_priv_t;
+
typedef struct _drm_i810_ring_buffer{
int tail_mask;
unsigned long Start;
@@ -55,6 +65,15 @@ typedef struct drm_i810_private {
atomic_t flush_done;
wait_queue_head_t flush_queue; /* Processes waiting until flush */
+ drm_buf_t *mmap_buffer;
+
+
+ u32 front_di1, back_di1, zi1;
+
+ int back_offset;
+ int depth_offset;
+ int w, h;
+ int pitch;
} drm_i810_private_t;
/* i810_drv.c */
@@ -71,8 +90,8 @@ extern int i810_unlock(struct inode *inode, struct file *filp,
/* i810_dma.c */
extern int i810_dma_schedule(drm_device_t *dev, int locked);
-extern int i810_dma(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
+extern int i810_getbuf(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
extern int i810_irq_install(drm_device_t *dev, int irq);
extern int i810_irq_uninstall(drm_device_t *dev);
extern int i810_control(struct inode *inode, struct file *filp,
@@ -86,6 +105,7 @@ extern int i810_flush_ioctl(struct inode *inode, struct file *filp,
extern void i810_reclaim_buffers(drm_device_t *dev, pid_t pid);
extern int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg);
+extern int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma);
/* i810_bufs.c */
@@ -97,8 +117,6 @@ extern int i810_markbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_freebufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-extern int i810_mapbufs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
extern int i810_addmap(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
@@ -121,32 +139,17 @@ extern int i810_rmctx(struct inode *inode, struct file *filp,
extern int i810_context_switch(drm_device_t *dev, int old, int new);
extern int i810_context_switch_complete(drm_device_t *dev, int new);
-
-
-
-/* Copy the outstanding cliprects for every I810_DMA_VERTEX buffer.
- * This can be fixed by emitting directly to the ringbuffer in the
- * 'vertex_dma' ioctl.
-*/
-typedef struct {
- u32 *in_use;
- int my_use_idx;
-} drm_i810_buf_priv_t;
-
-
-#define I810_DMA_GENERAL 0
-#define I810_DMA_VERTEX 1
-#define I810_DMA_DISCARD 2 /* not used */
-
#define I810_VERBOSE 0
int i810_dma_vertex(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-int i810_dma_general(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
+int i810_swap_bufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+int i810_clear_bufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
@@ -200,5 +203,22 @@ int i810_dma_general(struct inode *inode, struct file *filp,
#define SCI_YMAX_MASK (0xffff<<16)
#define SCI_XMAX_MASK (0xffff<<0)
+#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
+#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x2)
+#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+#define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24))
+
+#define CMD_OP_Z_BUFFER_INFO ((0x0<<29)|(0x16<<23))
+#define CMD_OP_DESTBUFFER_INFO ((0x0<<29)|(0x15<<23))
+
+#define BR00_BITBLT_CLIENT 0x40000000
+#define BR00_OP_COLOR_BLT 0x10000000
+#define BR00_OP_SRC_COPY_BLT 0x10C00000
+#define BR13_SOLID_PATTERN 0x80000000
+
+
+
#endif
diff --git a/linux-core/mga_drv.c b/linux-core/mga_drv.c
index b066fe9d..4b2c835f 100644
--- a/linux-core/mga_drv.c
+++ b/linux-core/mga_drv.c
@@ -111,6 +111,7 @@ static drm_ioctl_desc_t mga_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_MGA_ILOAD)] = { mga_iload, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_VERTEX)] = { mga_vertex, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_FLUSH)] = { mga_flush_ioctl, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MGA_INDICES)] = { mga_indices, 1, 0 },
};
#define MGA_IOCTL_COUNT DRM_ARRAY_SIZE(mga_ioctls)
diff --git a/linux/Makefile.kernel b/linux/Makefile.kernel
index 2ea6c721..a169473a 100644
--- a/linux/Makefile.kernel
+++ b/linux/Makefile.kernel
@@ -49,3 +49,5 @@ i810.o: i810_drv.o i810_context.o $(L_TARGET)
mga.o: mga_drv.o mga_context.o mga_dma.o mga_bufs.o $(L_TARGET)
$(LD) $(LD_RFLAG) -r -o $@ mga_drv.o mga_bufs.o mga_dma.o mga_context.o mga_state.o -L. -ldrm
+r128.o: r128_drv.o r128_context.o $(L_TARGET)
+ $(LD) $(LD_RFLAG) -r -o $@ r128_drv.o r128_context.o -L. -ldrm
diff --git a/linux/Makefile.linux b/linux/Makefile.linux
index 9810fe3d..ecc196bd 100644
--- a/linux/Makefile.linux
+++ b/linux/Makefile.linux
@@ -47,7 +47,7 @@
# **** End of SMP/MODVERSIONS detection
-MODS= gamma.o tdfx.o
+MODS= gamma.o tdfx.o r128.o
LIBS= libdrm.a
PROGS= drmstat
@@ -61,6 +61,9 @@ GAMMAHEADERS= gamma_drv.h $(DRMHEADERS)
TDFXOBJS= tdfx_drv.o tdfx_context.o
TDFXHEADERS= tdfx_drv.h $(DRMHEADERS)
+R128OBJS= r128_drv.o r128_dma.o r128_bufs.o r128_context.o
+R128HEADERS= r128_drv.h r128_drm.h $(DRMHEADERS)
+
PROGOBJS= drmstat.po xf86drm.po xf86drmHash.po xf86drmRandom.po sigio.po
PROGHEADERS= xf86drm.h $(DRMHEADERS)
@@ -167,6 +170,9 @@ gamma.o: $(GAMMAOBJS) $(LIBS)
tdfx.o: $(TDFXOBJS) $(LIBS)
$(LD) -r $^ -o $@
+r128.o: $(R128OBJS) $(LIBS)
+ $(LD) -r $^ -o $@
+
ifeq ($(AGP),1)
mga.o: $(MGAOBJS) $(LIBS)
$(LD) -r $^ -o $@
@@ -196,6 +202,7 @@ ChangeLog:
$(DRMOBJS): $(DRMHEADERS)
$(GAMMAOBJS): $(GAMMAHEADERS)
$(TDFXOBJS): $(TDFXHEADERS)
+$(R128OBJS): $(R128HEADERS)
ifeq ($(AGP),1)
$(MGAOBJS): $(MGAHEADERS)
$(I810OBJS): $(I810HEADERS)
diff --git a/linux/agpsupport.c b/linux/agpsupport.c
index 262d63ad..c89c3e25 100644
--- a/linux/agpsupport.c
+++ b/linux/agpsupport.c
@@ -238,6 +238,8 @@ int drm_agp_bind(struct inode *inode, struct file *filp, unsigned int cmd,
page = (request.offset + PAGE_SIZE - 1) / PAGE_SIZE;
if ((retcode = drm_bind_agp(entry->memory, page))) return retcode;
entry->bound = dev->agp->base + (page << PAGE_SHIFT);
+ DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
+ dev->agp->base, entry->bound);
return 0;
}
diff --git a/linux/drm.h b/linux/drm.h
index 15491aee..c8c5581d 100644
--- a/linux/drm.h
+++ b/linux/drm.h
@@ -72,9 +72,10 @@ typedef struct drm_clip_rect {
unsigned short y2;
} drm_clip_rect_t;
-/* Seperate include files for the i810/mga specific structures */
+/* Seperate include files for the i810/mga/r128 specific structures */
#include "mga_drm.h"
#include "i810_drm.h"
+#include "r128_drm.h"
typedef struct drm_version {
int version_major; /* Major version */
@@ -297,7 +298,7 @@ typedef struct drm_agp_info {
#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t)
#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t)
-#define DRM_IOCTL_GET_MAGIC DRM_IOW( 0x02, drm_auth_t)
+#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t)
#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t)
@@ -328,11 +329,11 @@ typedef struct drm_agp_info {
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
-#define DRM_IOCTL_AGP_ENABLE DRM_IOR( 0x32, drm_agp_mode_t)
-#define DRM_IOCTL_AGP_INFO DRM_IOW( 0x33, drm_agp_info_t)
+#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t)
+#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t)
#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t)
#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t)
-#define DRM_IOCTL_AGP_BIND DRM_IOWR(0x36, drm_agp_binding_t)
+#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t)
#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t)
/* Mga specific ioctls */
@@ -342,12 +343,23 @@ typedef struct drm_agp_info {
#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x43, drm_mga_iload_t)
#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x44, drm_mga_vertex_t)
#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x45, drm_lock_t )
+#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t)
/* I810 specific ioctls */
#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t)
#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t)
-#define DRM_IOCTL_I810_DMA DRM_IOW( 0x42, drm_i810_general_t)
+#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t)
#define DRM_IOCTL_I810_FLUSH DRM_IO ( 0x43)
#define DRM_IOCTL_I810_GETAGE DRM_IO ( 0x44)
+#define DRM_IOCTL_I810_GETBUF DRM_IOW( 0x45, drm_i810_dma_t)
+#define DRM_IOCTL_I810_SWAP DRM_IO ( 0x46)
+
+/* Rage 128 specific ioctls */
+#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
+#define DRM_IOCTL_R128_RESET DRM_IO( 0x41)
+#define DRM_IOCTL_R128_FLUSH DRM_IO( 0x42)
+#define DRM_IOCTL_R128_CCEIDL DRM_IO( 0x43)
+#define DRM_IOCTL_R128_PACKET DRM_IOW( 0x44, drm_r128_packet_t)
+#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x45, drm_r128_vertex_t)
#endif
diff --git a/linux/drmP.h b/linux/drmP.h
index ecf50436..350d1ef9 100644
--- a/linux/drmP.h
+++ b/linux/drmP.h
@@ -229,8 +229,8 @@ typedef struct drm_magic_entry {
} drm_magic_entry_t;
typedef struct drm_magic_head {
- struct drm_magic_entry *head;
- struct drm_magic_entry *tail;
+ struct drm_magic_entry *head;
+ struct drm_magic_entry *tail;
} drm_magic_head_t;
typedef struct drm_vma_entry {
@@ -263,16 +263,15 @@ typedef struct drm_buf {
DRM_LIST_RECLAIM = 5
} list; /* Which list we're on */
-
- void *dev_private;
- int dev_priv_size;
-
#if DRM_DMA_HISTOGRAM
cycles_t time_queued; /* Queued to kernel DMA queue */
cycles_t time_dispatched; /* Dispatched to hardware */
cycles_t time_completed; /* Completed by hardware */
cycles_t time_freed; /* Back on freelist */
#endif
+
+ int dev_priv_size; /* Size of buffer private stoarge */
+ void *dev_private; /* Per-buffer private storage */
} drm_buf_t;
#if DRM_DMA_HISTOGRAM
diff --git a/linux/gamma_dma.c b/linux/gamma_dma.c
index 3372f51e..eb78c037 100644
--- a/linux/gamma_dma.c
+++ b/linux/gamma_dma.c
@@ -88,13 +88,31 @@ static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
GAMMA_WRITE(GAMMA_DMACOUNT, length / 4);
}
-static inline void gamma_dma_quiescent(drm_device_t *dev)
+static inline void gamma_dma_quiescent_single(drm_device_t *dev)
{
while (GAMMA_READ(GAMMA_DMACOUNT))
;
while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
;
+
+ GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
+ GAMMA_WRITE(GAMMA_SYNC, 0);
+
+ do {
+ while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
+ ;
+ } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
+}
+
+static inline void gamma_dma_quiescent_dual(drm_device_t *dev)
+{
+ while (GAMMA_READ(GAMMA_DMACOUNT))
+ ;
+ while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
+ ;
+
GAMMA_WRITE(GAMMA_BROADCASTMASK, 3);
+
GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
GAMMA_WRITE(GAMMA_SYNC, 0);
@@ -104,7 +122,6 @@ static inline void gamma_dma_quiescent(drm_device_t *dev)
;
} while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
-
/* Read from second MX */
do {
while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000))
@@ -789,8 +806,13 @@ int gamma_lock(struct inode *inode, struct file *filp, unsigned int cmd,
if (!ret) {
if (lock.flags & _DRM_LOCK_READY)
gamma_dma_ready(dev);
- if (lock.flags & _DRM_LOCK_QUIESCENT)
- gamma_dma_quiescent(dev);
+ if (lock.flags & _DRM_LOCK_QUIESCENT) {
+ if (gamma_found() == 1) {
+ gamma_dma_quiescent_single(dev);
+ } else {
+ gamma_dma_quiescent_dual(dev);
+ }
+ }
}
DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
diff --git a/linux/gamma_drv.c b/linux/gamma_drv.c
index c6838cb7..d42cf4ae 100644
--- a/linux/gamma_drv.c
+++ b/linux/gamma_drv.c
@@ -32,6 +32,7 @@
#define EXPORT_SYMTAB
#include "drmP.h"
#include "gamma_drv.h"
+#include <linux/pci.h>
EXPORT_SYMBOL(gamma_init);
EXPORT_SYMBOL(gamma_cleanup);
@@ -99,10 +100,13 @@ static drm_ioctl_desc_t gamma_ioctls[] = {
int init_module(void);
void cleanup_module(void);
static char *gamma = NULL;
+static int devices = 0;
MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas.");
MODULE_DESCRIPTION("3dlabs GMX 2000");
MODULE_PARM(gamma, "s");
+MODULE_PARM(devices, "i");
+MODULE_PARM_DESC(devices, "devices=x, where x is the number of MX chips on your card\n");
/* init_module is called when insmod is used to load the module */
@@ -317,6 +321,34 @@ static int gamma_takedown(drm_device_t *dev)
return 0;
}
+int gamma_found(void)
+{
+ return devices;
+}
+
+int gamma_find_devices(void)
+{
+ struct pci_dev *d = NULL, *one = NULL, *two = NULL;
+
+ d = pci_find_device(PCI_VENDOR_ID_3DLABS,PCI_DEVICE_ID_3DLABS_GAMMA,d);
+ if (!d) return 0;
+
+ one = pci_find_device(PCI_VENDOR_ID_3DLABS,PCI_DEVICE_ID_3DLABS_MX,d);
+ if (!one) return 0;
+
+ /* Make sure it's on the same card, if not - no MX's found */
+ if (PCI_SLOT(d->devfn) != PCI_SLOT(one->devfn)) return 0;
+
+ two = pci_find_device(PCI_VENDOR_ID_3DLABS,PCI_DEVICE_ID_3DLABS_MX,one);
+ if (!two) return 1;
+
+ /* Make sure it's on the same card, if not - only 1 MX found */
+ if (PCI_SLOT(d->devfn) != PCI_SLOT(two->devfn)) return 1;
+
+ /* Two MX's found - we don't currently support more than 2 */
+ return 2;
+}
+
/* gamma_init is called via init_module at module load time, or via
* linux/init/main.c (this is not currently supported). */
@@ -334,6 +366,8 @@ int gamma_init(void)
#ifdef MODULE
drm_parse_options(gamma);
#endif
+ devices = gamma_find_devices();
+ if (devices == 0) return -1;
if ((retcode = misc_register(&gamma_misc))) {
DRM_ERROR("Cannot register \"%s\"\n", GAMMA_NAME);
@@ -345,13 +379,14 @@ int gamma_init(void)
drm_mem_init();
drm_proc_init(dev);
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d with %d MX devices\n",
GAMMA_NAME,
GAMMA_MAJOR,
GAMMA_MINOR,
GAMMA_PATCHLEVEL,
GAMMA_DATE,
- gamma_misc.minor);
+ gamma_misc.minor,
+ devices);
return 0;
}
diff --git a/linux/gamma_drv.h b/linux/gamma_drv.h
index 622cedfe..55dc26be 100644
--- a/linux/gamma_drv.h
+++ b/linux/gamma_drv.h
@@ -51,5 +51,7 @@ extern int gamma_irq_install(drm_device_t *dev, int irq);
extern int gamma_irq_uninstall(drm_device_t *dev);
extern int gamma_control(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
+extern int gamma_find_devices(void);
+extern int gamma_found(void);
#endif
diff --git a/linux/i810_bufs.c b/linux/i810_bufs.c
index 315f3437..fa1f84dc 100644
--- a/linux/i810_bufs.c
+++ b/linux/i810_bufs.c
@@ -119,6 +119,7 @@ int i810_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd,
buf->dev_private = drm_alloc(sizeof(drm_i810_buf_priv_t),
DRM_MEM_BUFS);
buf->dev_priv_size = sizeof(drm_i810_buf_priv_t);
+ memset(buf->dev_private, 0, sizeof(drm_i810_buf_priv_t));
#if DRM_DMA_HISTOGRAM
buf->time_queued = 0;
@@ -331,113 +332,3 @@ int i810_freebufs(struct inode *inode, struct file *filp, unsigned int cmd,
return 0;
}
-int i810_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->dev;
- drm_device_dma_t *dma = dev->dma;
- int retcode = 0;
- const int zero = 0;
- unsigned long virtual;
- unsigned long address;
- drm_buf_map_t request;
- int i;
-
- if (!dma) return -EINVAL;
-
- DRM_DEBUG("\n");
-
- spin_lock(&dev->count_lock);
- if (atomic_read(&dev->buf_alloc)) {
- spin_unlock(&dev->count_lock);
- DRM_DEBUG("Busy\n");
- return -EBUSY;
- }
- ++dev->buf_use; /* Can't allocate more after this call */
- spin_unlock(&dev->count_lock);
-
- copy_from_user_ret(&request,
- (drm_buf_map_t *)arg,
- sizeof(request),
- -EFAULT);
- DRM_DEBUG("dma->flags : %lx\n", dma->flags);
- if (request.count >= dma->buf_count) {
- if(dma->flags & _DRM_DMA_USE_AGP) {
- drm_i810_private_t *dev_priv =
- (drm_i810_private_t *)dev->dev_private;
- drm_map_t *map = NULL;
-
- map = dev->maplist[dev_priv->buffer_map_idx];
- if (!map) {
- DRM_DEBUG("map is null\n");
- retcode = -EINVAL;
- goto done;
- }
- DRM_DEBUG("map->offset : %lx\n", map->offset);
- DRM_DEBUG("map->size : %lx\n", map->size);
- DRM_DEBUG("map->type : %d\n", map->type);
- DRM_DEBUG("map->flags : %x\n", map->flags);
- DRM_DEBUG("map->handle : %lx\n", map->handle);
- DRM_DEBUG("map->mtrr : %d\n", map->mtrr);
- down(&current->mm->mmap_sem);
- virtual = do_mmap(filp, 0, map->size,
- PROT_READ|PROT_WRITE,
- MAP_SHARED,
- (unsigned long)map->offset);
-
- up(&current->mm->mmap_sem);
- } else {
- down(&current->mm->mmap_sem);
- virtual = do_mmap(filp, 0, dma->byte_count,
- PROT_READ|PROT_WRITE, MAP_SHARED, 0);
- up(&current->mm->mmap_sem);
- }
- if (virtual > -1024UL) {
- /* Real error */
- DRM_DEBUG("mmap error\n");
- retcode = (signed long)virtual;
- goto done;
- }
- request.virtual = (void *)virtual;
-
- for (i = 0; i < dma->buf_count; i++) {
- if (copy_to_user(&request.list[i].idx,
- &dma->buflist[i]->idx,
- sizeof(request.list[0].idx))) {
- retcode = -EFAULT;
- goto done;
- }
- if (copy_to_user(&request.list[i].total,
- &dma->buflist[i]->total,
- sizeof(request.list[0].total))) {
- retcode = -EFAULT;
- goto done;
- }
- if (copy_to_user(&request.list[i].used,
- &zero,
- sizeof(zero))) {
- retcode = -EFAULT;
- goto done;
- }
- address = virtual + dma->buflist[i]->offset;
- if (copy_to_user(&request.list[i].address,
- &address,
- sizeof(address))) {
- retcode = -EFAULT;
- goto done;
- }
- }
- }
- done:
- request.count = dma->buf_count;
- DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
-
- copy_to_user_ret((drm_buf_map_t *)arg,
- &request,
- sizeof(request),
- -EFAULT);
-
- DRM_DEBUG("retcode : %d\n", retcode);
- return retcode;
-}
diff --git a/linux/i810_dma.c b/linux/i810_dma.c
index d82ed049..94f35b61 100644
--- a/linux/i810_dma.c
+++ b/linux/i810_dma.c
@@ -36,8 +36,17 @@
#include <linux/interrupt.h> /* For task queue support */
-#define I810_BUF_FREE 1
-#define I810_BUF_USED 0
+/* in case we don't have a 2.3.99-pre6 kernel or later: */
+#ifndef VM_DONTCOPY
+#define VM_DONTCOPY 0
+#endif
+
+#define I810_BUF_FREE 2
+#define I810_BUF_CLIENT 1
+#define I810_BUF_HARDWARE 0
+
+#define I810_BUF_UNMAPPED 0
+#define I810_BUF_MAPPED 1
#define I810_REG(reg) 2
#define I810_BASE(reg) ((unsigned long) \
@@ -90,7 +99,7 @@ static inline void i810_print_status_page(drm_device_t *dev)
DRM_DEBUG( "hw_status: Reserved : %x\n", temp[3]);
DRM_DEBUG( "hw_status: Driver Counter : %d\n", temp[5]);
for(i = 6; i < dma->buf_count + 6; i++) {
- DRM_DEBUG( "buffer status idx : %d used: %d\n", i - 6, temp[i]);
+ DRM_DEBUG( "buffer status idx : %d used: %d\n", i - 6, temp[i]);
}
}
@@ -107,7 +116,7 @@ static drm_buf_t *i810_freelist_get(drm_device_t *dev)
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
/* In use is already a pointer */
used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
- I810_BUF_USED);
+ I810_BUF_CLIENT);
if(used == I810_BUF_FREE) {
return buf;
}
@@ -125,8 +134,8 @@ static int i810_freelist_put(drm_device_t *dev, drm_buf_t *buf)
int used;
/* In use is already a pointer */
- used = cmpxchg(buf_priv->in_use, I810_BUF_USED, I810_BUF_FREE);
- if(used != I810_BUF_USED) {
+ used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE);
+ if(used != I810_BUF_CLIENT) {
DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
return -EINVAL;
}
@@ -134,26 +143,114 @@ static int i810_freelist_put(drm_device_t *dev, drm_buf_t *buf)
return 0;
}
-static int i810_dma_get_buffers(drm_device_t *dev, drm_dma_t *d)
+static struct file_operations i810_buffer_fops = {
+ open: i810_open,
+ flush: drm_flush,
+ release: i810_release,
+ ioctl: i810_ioctl,
+ mmap: i810_mmap_buffers,
+ read: drm_read,
+ fasync: drm_fasync,
+ poll: drm_poll,
+};
+
+int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_i810_private_t *dev_priv = dev->dev_private;
+ drm_buf_t *buf = dev_priv->mmap_buffer;
+ drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+
+ vma->vm_flags |= (VM_IO | VM_DONTCOPY);
+ vma->vm_file = filp;
+
+ buf_priv->currently_mapped = I810_BUF_MAPPED;
+
+ if (remap_page_range(vma->vm_start,
+ VM_OFFSET(vma),
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot)) return -EAGAIN;
+ return 0;
+}
+
+static int i810_map_buffer(drm_buf_t *buf, struct file *filp)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+ drm_i810_private_t *dev_priv = dev->dev_private;
+ struct file_operations *old_fops;
+ int retcode = 0;
+
+ if(buf_priv->currently_mapped == I810_BUF_MAPPED) return -EINVAL;
+ down(&current->mm->mmap_sem);
+ old_fops = filp->f_op;
+ filp->f_op = &i810_buffer_fops;
+ dev_priv->mmap_buffer = buf;
+ buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total,
+ PROT_READ|PROT_WRITE,
+ MAP_SHARED,
+ buf->bus_address);
+ dev_priv->mmap_buffer = NULL;
+ filp->f_op = old_fops;
+ if ((unsigned long)buf_priv->virtual > -1024UL) {
+ /* Real error */
+ DRM_DEBUG("mmap error\n");
+ retcode = (signed int)buf_priv->virtual;
+ buf_priv->virtual = 0;
+ }
+ up(&current->mm->mmap_sem);
+ return retcode;
+}
+
+static int i810_unmap_buffer(drm_buf_t *buf)
+{
+ drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+ int retcode = 0;
+
+ if(buf_priv->currently_mapped != I810_BUF_MAPPED) return -EINVAL;
+ down(&current->mm->mmap_sem);
+ retcode = do_munmap((unsigned long)buf_priv->virtual,
+ (size_t) buf->total);
+ buf_priv->currently_mapped = I810_BUF_UNMAPPED;
+ buf_priv->virtual = 0;
+ up(&current->mm->mmap_sem);
+
+ return retcode;
+}
+
+static int i810_dma_get_buffer(drm_device_t *dev, drm_i810_dma_t *d,
+ struct file *filp)
{
- int i;
+ drm_file_t *priv = filp->private_data;
drm_buf_t *buf;
+ drm_i810_buf_priv_t *buf_priv;
+ int retcode = 0;
- for (i = d->granted_count; i < d->request_count; i++) {
- buf = i810_freelist_get(dev);
- if (!buf) break;
- buf->pid = current->pid;
- copy_to_user_ret(&d->request_indices[i],
- &buf->idx,
- sizeof(buf->idx),
- -EFAULT);
- copy_to_user_ret(&d->request_sizes[i],
- &buf->total,
- sizeof(buf->total),
- -EFAULT);
- ++d->granted_count;
+ buf = i810_freelist_get(dev);
+ if (!buf) {
+ retcode = -ENOMEM;
+ DRM_DEBUG("%s retcode %d\n", __FUNCTION__, retcode);
+ goto out_get_buf;
}
- return 0;
+
+ retcode = i810_map_buffer(buf, filp);
+ if(retcode) {
+ i810_freelist_put(dev, buf);
+ DRM_DEBUG("mapbuf failed in %s retcode %d\n",
+ __FUNCTION__, retcode);
+ goto out_get_buf;
+ }
+ buf->pid = priv->pid;
+ buf_priv = buf->dev_private;
+ d->granted = 1;
+ d->request_idx = buf->idx;
+ d->request_size = buf->total;
+ d->virtual = buf_priv->virtual;
+
+out_get_buf:
+ return retcode;
}
static unsigned long i810_alloc_page(drm_device_t *dev)
@@ -184,7 +281,10 @@ static void i810_free_page(drm_device_t *dev, unsigned long page)
static int i810_dma_cleanup(drm_device_t *dev)
{
+ drm_device_dma_t *dma = dev->dma;
+
if(dev->dev_private) {
+ int i;
drm_i810_private_t *dev_priv =
(drm_i810_private_t *) dev->dev_private;
@@ -200,6 +300,12 @@ static int i810_dma_cleanup(drm_device_t *dev)
drm_free(dev->dev_private, sizeof(drm_i810_private_t),
DRM_MEM_DRIVER);
dev->dev_private = NULL;
+
+ for (i = 0; i < dma->buf_count; i++) {
+ drm_buf_t *buf = dma->buflist[ i ];
+ drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+ drm_ioremapfree(buf_priv->kernel_virtual, buf->total);
+ }
}
return 0;
}
@@ -210,6 +316,7 @@ static int i810_wait_ring(drm_device_t *dev, int n)
drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
int iters = 0;
unsigned long end;
+ unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
end = jiffies + (HZ*3);
while (ring->space < n) {
@@ -217,9 +324,11 @@ static int i810_wait_ring(drm_device_t *dev, int n)
ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
ring->space = ring->head - (ring->tail+8);
+ if (ring->space < 0) ring->space += ring->Size;
- if (ring->space < 0) ring->space += ring->Size;
-
+ if (ring->head != last_head)
+ end = jiffies + (HZ*3);
+
iters++;
if((signed)(end - jiffies) <= 0) {
DRM_ERROR("space: %d wanted %d\n", ring->space, n);
@@ -249,9 +358,9 @@ static int i810_freelist_init(drm_device_t *dev)
{
drm_device_dma_t *dma = dev->dma;
drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
- u8 *hw_status = (u8 *)dev_priv->hw_status_page;
- int i;
int my_idx = 24;
+ u32 *hw_status = (u32 *)(dev_priv->hw_status_page + my_idx);
+ int i;
if(dma->buf_count > 1019) {
/* Not enough space in the status page for the freelist */
@@ -262,11 +371,14 @@ static int i810_freelist_init(drm_device_t *dev)
drm_buf_t *buf = dma->buflist[ i ];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
- buf_priv->in_use = hw_status + my_idx;
- DRM_DEBUG("buf_priv->in_use : %p\n", buf_priv->in_use);
- *buf_priv->in_use = I810_BUF_FREE;
+ buf_priv->in_use = hw_status++;
buf_priv->my_use_idx = my_idx;
my_idx += 4;
+
+ *buf_priv->in_use = I810_BUF_FREE;
+
+ buf_priv->kernel_virtual = drm_ioremap(buf->bus_address,
+ buf->total);
}
return 0;
}
@@ -300,9 +412,11 @@ static int i810_dma_initialize(drm_device_t *dev,
dev_priv->ring.Start = init->ring_start;
dev_priv->ring.End = init->ring_end;
dev_priv->ring.Size = init->ring_size;
+
dev_priv->ring.virtual_start = drm_ioremap(dev->agp->base +
init->ring_start,
init->ring_size);
+
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
if (dev_priv->ring.virtual_start == NULL) {
@@ -311,6 +425,17 @@ static int i810_dma_initialize(drm_device_t *dev,
" ring buffer\n");
return -ENOMEM;
}
+
+ dev_priv->w = init->w;
+ dev_priv->h = init->h;
+ dev_priv->pitch = init->pitch;
+ dev_priv->back_offset = init->back_offset;
+ dev_priv->depth_offset = init->depth_offset;
+
+ dev_priv->front_di1 = init->front_offset | init->pitch_bits;
+ dev_priv->back_di1 = init->back_offset | init->pitch_bits;
+ dev_priv->zi1 = init->depth_offset | init->pitch_bits;
+
/* Program Hardware Status Page */
dev_priv->hw_status_page = i810_alloc_page(dev);
@@ -365,37 +490,270 @@ int i810_dma_init(struct inode *inode, struct file *filp,
return retcode;
}
-static void i810_dma_dispatch_general(drm_device_t *dev, drm_buf_t *buf,
- int used )
+
+
+/* Most efficient way to verify state for the i810 is as it is
+ * emitted. Non-conformant state is silently dropped.
+ *
+ * Use 'volatile' & local var tmp to force the emitted values to be
+ * identical to the verified ones.
+ */
+static void i810EmitContextVerified( drm_device_t *dev,
+ volatile unsigned int *code )
+{
+ drm_i810_private_t *dev_priv = dev->dev_private;
+ int i, j = 0;
+ unsigned int tmp;
+ RING_LOCALS;
+
+ BEGIN_LP_RING( I810_CTX_SETUP_SIZE );
+
+ OUT_RING( GFX_OP_COLOR_FACTOR );
+ OUT_RING( code[I810_CTXREG_CF1] );
+
+ OUT_RING( GFX_OP_STIPPLE );
+ OUT_RING( code[I810_CTXREG_ST1] );
+
+ for ( i = 4 ; i < I810_CTX_SETUP_SIZE ; i++ ) {
+ tmp = code[i];
+
+ if ((tmp & (7<<29)) == (3<<29) &&
+ (tmp & (0x1f<<24)) < (0x1d<<24))
+ {
+ OUT_RING( tmp );
+ j++;
+ }
+ }
+
+ if (j & 1)
+ OUT_RING( 0 );
+
+ ADVANCE_LP_RING();
+}
+
+static void i810EmitTexVerified( drm_device_t *dev,
+ volatile unsigned int *code )
+{
+ drm_i810_private_t *dev_priv = dev->dev_private;
+ int i, j = 0;
+ unsigned int tmp;
+ RING_LOCALS;
+
+ BEGIN_LP_RING( I810_TEX_SETUP_SIZE );
+
+ OUT_RING( GFX_OP_MAP_INFO );
+ OUT_RING( code[I810_TEXREG_MI1] );
+ OUT_RING( code[I810_TEXREG_MI2] );
+ OUT_RING( code[I810_TEXREG_MI3] );
+
+ for ( i = 4 ; i < I810_TEX_SETUP_SIZE ; i++ ) {
+ tmp = code[i];
+
+ if ((tmp & (7<<29)) == (3<<29) &&
+ (tmp & (0x1f<<24)) < (0x1d<<24))
+ {
+ OUT_RING( tmp );
+ j++;
+ }
+ }
+
+ if (j & 1)
+ OUT_RING( 0 );
+
+ ADVANCE_LP_RING();
+}
+
+
+/* Need to do some additional checking when setting the dest buffer.
+ */
+static void i810EmitDestVerified( drm_device_t *dev,
+ volatile unsigned int *code )
+{
+ drm_i810_private_t *dev_priv = dev->dev_private;
+ unsigned int tmp;
+ RING_LOCALS;
+
+ BEGIN_LP_RING( I810_DEST_SETUP_SIZE + 2 );
+
+ tmp = code[I810_DESTREG_DI1];
+ if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
+ OUT_RING( CMD_OP_DESTBUFFER_INFO );
+ OUT_RING( tmp );
+ } else
+ DRM_DEBUG("bad di1 %x (allow %x or %x)\n",
+ tmp, dev_priv->front_di1, dev_priv->back_di1);
+
+ /* invarient:
+ */
+ OUT_RING( CMD_OP_Z_BUFFER_INFO );
+ OUT_RING( dev_priv->zi1 );
+
+ OUT_RING( GFX_OP_DESTBUFFER_VARS );
+ OUT_RING( code[I810_DESTREG_DV1] );
+
+ OUT_RING( GFX_OP_DRAWRECT_INFO );
+ OUT_RING( code[I810_DESTREG_DR1] );
+ OUT_RING( code[I810_DESTREG_DR2] );
+ OUT_RING( code[I810_DESTREG_DR3] );
+ OUT_RING( code[I810_DESTREG_DR4] );
+ OUT_RING( 0 );
+
+ ADVANCE_LP_RING();
+}
+
+
+
+static void i810EmitState( drm_device_t *dev )
{
- drm_i810_private_t *dev_priv = dev->dev_private;
- drm_i810_buf_priv_t *buf_priv = buf->dev_private;
- unsigned long address = (unsigned long)buf->bus_address;
- unsigned long start = address - dev->agp->base;
- RING_LOCALS;
+ drm_i810_private_t *dev_priv = dev->dev_private;
+ drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int dirty = sarea_priv->dirty;
- dev_priv->counter++;
- DRM_DEBUG( "dispatch counter : %ld\n", dev_priv->counter);
- DRM_DEBUG( "i810_dma_dispatch\n");
- DRM_DEBUG( "start : 0x%lx\n", start);
- DRM_DEBUG( "used : 0x%x\n", used);
- DRM_DEBUG( "start + used - 4 : 0x%lx\n", start + used - 4);
- i810_kernel_lost_context(dev);
+ if (dirty & I810_UPLOAD_BUFFERS) {
+ i810EmitDestVerified( dev, sarea_priv->BufferState );
+ sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS;
+ }
- BEGIN_LP_RING(10);
- OUT_RING( CMD_OP_BATCH_BUFFER );
- OUT_RING( start | BB1_PROTECTED );
- OUT_RING( start + used - 4 );
- OUT_RING( CMD_STORE_DWORD_IDX );
- OUT_RING( 20 );
- OUT_RING( dev_priv->counter );
- OUT_RING( CMD_STORE_DWORD_IDX );
- OUT_RING( buf_priv->my_use_idx );
- OUT_RING( I810_BUF_FREE );
- OUT_RING( CMD_REPORT_HEAD );
- ADVANCE_LP_RING();
+ if (dirty & I810_UPLOAD_CTX) {
+ i810EmitContextVerified( dev, sarea_priv->ContextState );
+ sarea_priv->dirty &= ~I810_UPLOAD_CTX;
+ }
+
+ if (dirty & I810_UPLOAD_TEX0) {
+ i810EmitTexVerified( dev, sarea_priv->TexState[0] );
+ sarea_priv->dirty &= ~I810_UPLOAD_TEX0;
+ }
+
+ if (dirty & I810_UPLOAD_TEX1) {
+ i810EmitTexVerified( dev, sarea_priv->TexState[1] );
+ sarea_priv->dirty &= ~I810_UPLOAD_TEX1;
+ }
}
+
+
+/* need to verify
+ */
+static void i810_dma_dispatch_clear( drm_device_t *dev, int flags,
+ unsigned int clear_color,
+ unsigned int clear_zval )
+{
+ drm_i810_private_t *dev_priv = dev->dev_private;
+ drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ int nbox = sarea_priv->nbox;
+ drm_clip_rect_t *pbox = sarea_priv->boxes;
+ int pitch = dev_priv->pitch;
+ int cpp = 2;
+ int i;
+ RING_LOCALS;
+
+ i810_kernel_lost_context(dev);
+
+ if (nbox > I810_NR_SAREA_CLIPRECTS)
+ nbox = I810_NR_SAREA_CLIPRECTS;
+
+ for (i = 0 ; i < nbox ; i++, pbox++) {
+ unsigned int x = pbox->x1;
+ unsigned int y = pbox->y1;
+ unsigned int width = (pbox->x2 - x) * cpp;
+ unsigned int height = pbox->y2 - y;
+ unsigned int start = y * pitch + x * cpp;
+
+ if (pbox->x1 > pbox->x2 ||
+ pbox->y1 > pbox->y2 ||
+ pbox->x2 > dev_priv->w ||
+ pbox->y2 > dev_priv->h)
+ continue;
+
+ if ( flags & I810_FRONT ) {
+ DRM_DEBUG("clear front\n");
+ BEGIN_LP_RING( 6 );
+ OUT_RING( BR00_BITBLT_CLIENT |
+ BR00_OP_COLOR_BLT | 0x3 );
+ OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
+ OUT_RING( (height << 16) | width );
+ OUT_RING( start );
+ OUT_RING( clear_color );
+ OUT_RING( 0 );
+ ADVANCE_LP_RING();
+ }
+
+ if ( flags & I810_BACK ) {
+ DRM_DEBUG("clear back\n");
+ BEGIN_LP_RING( 6 );
+ OUT_RING( BR00_BITBLT_CLIENT |
+ BR00_OP_COLOR_BLT | 0x3 );
+ OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
+ OUT_RING( (height << 16) | width );
+ OUT_RING( dev_priv->back_offset + start );
+ OUT_RING( clear_color );
+ OUT_RING( 0 );
+ ADVANCE_LP_RING();
+ }
+
+ if ( flags & I810_DEPTH ) {
+ DRM_DEBUG("clear depth\n");
+ BEGIN_LP_RING( 6 );
+ OUT_RING( BR00_BITBLT_CLIENT |
+ BR00_OP_COLOR_BLT | 0x3 );
+ OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
+ OUT_RING( (height << 16) | width );
+ OUT_RING( dev_priv->depth_offset + start );
+ OUT_RING( clear_zval );
+ OUT_RING( 0 );
+ ADVANCE_LP_RING();
+ }
+ }
+}
+
+static void i810_dma_dispatch_swap( drm_device_t *dev )
+{
+ drm_i810_private_t *dev_priv = dev->dev_private;
+ drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ int nbox = sarea_priv->nbox;
+ drm_clip_rect_t *pbox = sarea_priv->boxes;
+ int pitch = dev_priv->pitch;
+ int cpp = 2;
+ int ofs = dev_priv->back_offset;
+ int i;
+ RING_LOCALS;
+
+ DRM_DEBUG("swapbuffers\n");
+
+ i810_kernel_lost_context(dev);
+
+ if (nbox > I810_NR_SAREA_CLIPRECTS)
+ nbox = I810_NR_SAREA_CLIPRECTS;
+
+ for (i = 0 ; i < nbox; i++, pbox++)
+ {
+ unsigned int w = pbox->x2 - pbox->x1;
+ unsigned int h = pbox->y2 - pbox->y1;
+ unsigned int dst = pbox->x1*cpp + pbox->y1*pitch;
+ unsigned int start = ofs + dst;
+
+ if (pbox->x1 > pbox->x2 ||
+ pbox->y1 > pbox->y2 ||
+ pbox->x2 > dev_priv->w ||
+ pbox->y2 > dev_priv->h)
+ continue;
+
+ DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n",
+ pbox[i].x1, pbox[i].y1,
+ pbox[i].x2, pbox[i].y2);
+
+ BEGIN_LP_RING( 6 );
+ OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4 );
+ OUT_RING( pitch | (0xCC << 16));
+ OUT_RING( (h << 16) | (w * cpp));
+ OUT_RING( dst );
+ OUT_RING( pitch );
+ OUT_RING( start );
+ ADVANCE_LP_RING();
+ }
+}
+
+
static void i810_dma_dispatch_vertex(drm_device_t *dev,
drm_buf_t *buf,
int discard,
@@ -408,14 +766,29 @@ static void i810_dma_dispatch_vertex(drm_device_t *dev,
int nbox = sarea_priv->nbox;
unsigned long address = (unsigned long)buf->bus_address;
unsigned long start = address - dev->agp->base;
- int i = 0;
+ int i = 0, u;
RING_LOCALS;
-
+ i810_kernel_lost_context(dev);
+
if (nbox > I810_NR_SAREA_CLIPRECTS)
nbox = I810_NR_SAREA_CLIPRECTS;
-
- DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n",
+
+ if (discard) {
+ u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
+ I810_BUF_HARDWARE);
+ if(u != I810_BUF_CLIENT) {
+ DRM_DEBUG("xxxx 2\n");
+ }
+ }
+
+ if (used > 4*1024)
+ used = 0;
+
+ if (sarea_priv->dirty)
+ i810EmitState( dev );
+
+ DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n",
address, used, nbox);
dev_priv->counter++;
@@ -424,8 +797,20 @@ static void i810_dma_dispatch_vertex(drm_device_t *dev,
DRM_DEBUG( "start : %lx\n", start);
DRM_DEBUG( "used : %d\n", used);
DRM_DEBUG( "start + used - 4 : %ld\n", start + used - 4);
- i810_kernel_lost_context(dev);
+ if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
+ *(u32 *)buf_priv->virtual = (GFX_OP_PRIMITIVE |
+ sarea_priv->vertex_prim |
+ ((used/4)-2));
+
+ if (used & 4) {
+ *(u32 *)((u32)buf_priv->virtual + used) = 0;
+ used += 4;
+ }
+
+ i810_unmap_buffer(buf);
+ }
+
if (used) {
do {
if (i < nbox) {
@@ -433,7 +818,7 @@ static void i810_dma_dispatch_vertex(drm_device_t *dev,
OUT_RING( GFX_OP_SCISSOR | SC_UPDATE_SCISSOR |
SC_ENABLE );
OUT_RING( GFX_OP_SCISSOR_INFO );
- OUT_RING( box[i].x1 | (box[i].y1 << 16) );
+ OUT_RING( box[i].x1 | (box[i].y1<<16) );
OUT_RING( (box[i].x2-1) | ((box[i].y2-1)<<16) );
ADVANCE_LP_RING();
}
@@ -478,7 +863,9 @@ static void i810_dma_service(int irq, void *device, struct pt_regs *regs)
temp = temp & ~(0x6000);
if(temp != 0) I810_WRITE16(I810REG_INT_IDENTITY_R,
temp); /* Clear all interrupts */
-
+ else
+ return;
+
queue_task(&dev->tq, &tq_immediate);
mark_bh(IMMEDIATE_BH);
}
@@ -538,7 +925,7 @@ int i810_irq_install(drm_device_t *dev, int irq)
/* Install handler */
if ((retcode = request_irq(dev->irq,
i810_dma_service,
- 0,
+ SA_SHIRQ,
dev->devname,
dev))) {
down(&dev->struct_sem);
@@ -559,6 +946,9 @@ int i810_irq_uninstall(drm_device_t *dev)
int irq;
u16 temp;
+
+/* return 0; */
+
down(&dev->struct_sem);
irq = dev->irq;
dev->irq = 0;
@@ -617,10 +1007,15 @@ static inline void i810_dma_emit_flush(drm_device_t *dev)
RING_LOCALS;
i810_kernel_lost_context(dev);
+
BEGIN_LP_RING(2);
OUT_RING( CMD_REPORT_HEAD );
- OUT_RING( GFX_OP_USER_INTERRUPT );
+ OUT_RING( GFX_OP_USER_INTERRUPT );
ADVANCE_LP_RING();
+
+/* i810_wait_ring( dev, dev_priv->ring.Size - 8 ); */
+/* atomic_set(&dev_priv->flush_done, 1); */
+/* wake_up_interruptible(&dev_priv->flush_queue); */
}
static inline void i810_dma_quiescent_emit(drm_device_t *dev)
@@ -629,13 +1024,17 @@ static inline void i810_dma_quiescent_emit(drm_device_t *dev)
RING_LOCALS;
i810_kernel_lost_context(dev);
- BEGIN_LP_RING(4);
+ BEGIN_LP_RING(4);
OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE );
OUT_RING( CMD_REPORT_HEAD );
- OUT_RING( GFX_OP_USER_INTERRUPT );
- OUT_RING( 0 );
+ OUT_RING( 0 );
+ OUT_RING( GFX_OP_USER_INTERRUPT );
ADVANCE_LP_RING();
+
+/* i810_wait_ring( dev, dev_priv->ring.Size - 8 ); */
+/* atomic_set(&dev_priv->flush_done, 1); */
+/* wake_up_interruptible(&dev_priv->flush_queue); */
}
static void i810_dma_quiescent(drm_device_t *dev)
@@ -675,8 +1074,9 @@ static int i810_flush_queue(drm_device_t *dev)
{
DECLARE_WAITQUEUE(entry, current);
drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
unsigned long end;
- int ret = 0;
+ int i, ret = 0;
if(dev_priv == NULL) {
return 0;
@@ -701,7 +1101,21 @@ static int i810_flush_queue(drm_device_t *dev)
current->state = TASK_RUNNING;
remove_wait_queue(&dev_priv->flush_queue, &entry);
-
+
+
+ for (i = 0; i < dma->buf_count; i++) {
+ drm_buf_t *buf = dma->buflist[ i ];
+ drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+
+ int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
+ I810_BUF_FREE);
+
+ if (used == I810_BUF_HARDWARE)
+ DRM_DEBUG("reclaimed from HARDWARE\n");
+ if (used == I810_BUF_CLIENT)
+ DRM_DEBUG("still on client HARDWARE\n");
+ }
+
return ret;
}
@@ -712,20 +1126,23 @@ void i810_reclaim_buffers(drm_device_t *dev, pid_t pid)
int i;
if (!dma) return;
- if(dev->dev_private == NULL) return;
- if(dma->buflist == NULL) return;
+ if (!dev->dev_private) return;
+ if (!dma->buflist) return;
+
i810_flush_queue(dev);
for (i = 0; i < dma->buf_count; i++) {
drm_buf_t *buf = dma->buflist[ i ];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
- /* Only buffers that need to get reclaimed ever
- * get set to free
- */
if (buf->pid == pid && buf_priv) {
- cmpxchg(buf_priv->in_use,
- I810_BUF_USED, I810_BUF_FREE);
+ int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
+ I810_BUF_FREE);
+
+ if (used == I810_BUF_CLIENT)
+ DRM_DEBUG("reclaimed from client\n");
+ if(buf_priv->currently_mapped == I810_BUF_MAPPED)
+ buf_priv->currently_mapped = I810_BUF_UNMAPPED;
}
}
}
@@ -759,19 +1176,6 @@ int i810_lock(struct inode *inode, struct file *filp, unsigned int cmd,
*/
if (!ret) {
-#if 0
- if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
- != lock.context) {
- long j = jiffies - dev->lock.lock_time;
-
- if (j > 0 && j <= DRM_LOCK_SLICE) {
- /* Can't take lock if we just had it and
- there is contention. */
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(j);
- }
- }
-#endif
add_wait_queue(&dev->lock.lock_queue, &entry);
for (;;) {
if (!dev->lock.hw_lock) {
@@ -828,90 +1232,80 @@ int i810_flush_ioctl(struct inode *inode, struct file *filp,
return 0;
}
-static int i810DmaGeneral(drm_device_t *dev, drm_i810_general_t *args)
-{
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf = dma->buflist[ args->idx ];
-
- if (!args->used) {
- i810_freelist_put(dev, buf);
- } else {
- i810_dma_dispatch_general( dev, buf, args->used );
- atomic_add(args->used, &dma->total_bytes);
- atomic_inc(&dma->total_dmas);
- }
- return 0;
-}
-static int i810DmaVertex(drm_device_t *dev, drm_i810_vertex_t *args)
-{
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf = dma->buflist[ args->idx ];
- i810_dma_dispatch_vertex( dev, buf, args->discard, args->used );
- atomic_add(args->used, &dma->total_bytes);
- atomic_inc(&dma->total_dmas);
- return 0;
-}
-
-int i810_dma_general(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int i810_dma_vertex(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_i810_general_t general;
- drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
+ drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
u32 *hw_status = (u32 *)dev_priv->hw_status_page;
drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
dev_priv->sarea_priv;
+ drm_i810_vertex_t vertex;
- int retcode = 0;
-
- copy_from_user_ret(&general, (drm_i810_general_t *)arg, sizeof(general),
+ copy_from_user_ret(&vertex, (drm_i810_vertex_t *)arg, sizeof(vertex),
-EFAULT);
- DRM_DEBUG("i810 dma general idx %d used %d\n",
- general.idx, general.used);
-
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
- DRM_ERROR("i810_dma_general called without lock held\n");
+ DRM_ERROR("i810_dma_vertex called without lock held\n");
return -EINVAL;
}
- retcode = i810DmaGeneral(dev, &general);
+ DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n",
+ vertex.idx, vertex.used, vertex.discard);
+
+ i810_dma_dispatch_vertex( dev,
+ dma->buflist[ vertex.idx ],
+ vertex.discard, vertex.used );
+
+ atomic_add(vertex.used, &dma->total_bytes);
+ atomic_inc(&dma->total_dmas);
sarea_priv->last_enqueue = dev_priv->counter-1;
sarea_priv->last_dispatch = (int) hw_status[5];
- return retcode;
+ return 0;
}
-int i810_dma_vertex(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+
+
+int i810_clear_bufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
- u32 *hw_status = (u32 *)dev_priv->hw_status_page;
- drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
- dev_priv->sarea_priv;
- drm_i810_vertex_t vertex;
- int retcode = 0;
+ drm_i810_clear_t clear;
- copy_from_user_ret(&vertex, (drm_i810_vertex_t *)arg, sizeof(vertex),
+ copy_from_user_ret(&clear, (drm_i810_clear_t *)arg, sizeof(clear),
-EFAULT);
+
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
- DRM_ERROR("i810_dma_vertex called without lock held\n");
+ DRM_ERROR("i810_clear_bufs called without lock held\n");
return -EINVAL;
}
- DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n",
- vertex.idx, vertex.used, vertex.discard);
+ i810_dma_dispatch_clear( dev, clear.flags,
+ clear.clear_color,
+ clear.clear_depth );
+ return 0;
+}
- retcode = i810DmaVertex(dev, &vertex);
- sarea_priv->last_enqueue = dev_priv->counter-1;
- sarea_priv->last_dispatch = (int) hw_status[5];
+int i810_swap_bufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
- return retcode;
+ DRM_DEBUG("i810_swap_bufs\n");
+ if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("i810_swap_buf called without lock held\n");
+ return -EINVAL;
+ }
+
+ i810_dma_dispatch_swap( dev );
+ return 0;
}
int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd,
@@ -928,53 +1322,32 @@ int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd,
return 0;
}
-int i810_dma(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg)
+int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_device_dma_t *dma = dev->dma;
int retcode = 0;
- drm_dma_t d;
+ drm_i810_dma_t d;
drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
u32 *hw_status = (u32 *)dev_priv->hw_status_page;
drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
dev_priv->sarea_priv;
-
- copy_from_user_ret(&d, (drm_dma_t *)arg, sizeof(d), -EFAULT);
- DRM_DEBUG("%d %d: %d send, %d req\n",
- current->pid, d.context, d.send_count, d.request_count);
+ DRM_DEBUG("getbuf\n");
+ copy_from_user_ret(&d, (drm_i810_dma_t *)arg, sizeof(d), -EFAULT);
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("i810_dma called without lock held\n");
return -EINVAL;
}
-
- /* Please don't send us buffers.
- */
- if (d.send_count != 0) {
- DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
- current->pid, d.send_count);
- return -EINVAL;
- }
- /* We'll send you buffers.
- */
- if (d.request_count < 0 || d.request_count > dma->buf_count) {
- DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
- current->pid, d.request_count, dma->buf_count);
- return -EINVAL;
- }
-
- d.granted_count = 0;
+ d.granted = 0;
- if (!retcode && d.request_count) {
- retcode = i810_dma_get_buffers(dev, &d);
- }
+ retcode = i810_dma_get_buffer(dev, &d, filp);
- DRM_DEBUG("i810_dma: %d returning, granted = %d\n",
- current->pid, d.granted_count);
+ DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
+ current->pid, retcode, d.granted);
copy_to_user_ret((drm_dma_t *)arg, &d, sizeof(d), -EFAULT);
sarea_priv->last_dispatch = (int) hw_status[5];
diff --git a/linux/i810_drm.h b/linux/i810_drm.h
index 0754874c..4c8e09f6 100644
--- a/linux/i810_drm.h
+++ b/linux/i810_drm.h
@@ -5,35 +5,112 @@
* if you change them, you must change the defines in the Xserver.
*/
-/* Might one day want to support the client-side ringbuffer code again.
- */
#ifndef _I810_DEFINES_
#define _I810_DEFINES_
-#define I810_USE_BATCH 1
#define I810_DMA_BUF_ORDER 12
#define I810_DMA_BUF_SZ (1<<I810_DMA_BUF_ORDER)
#define I810_DMA_BUF_NR 256
-#define I810_NR_SAREA_CLIPRECTS 2
+#define I810_NR_SAREA_CLIPRECTS 8
/* Each region is a minimum of 64k, and there are at most 64 of them.
*/
-
#define I810_NR_TEX_REGIONS 64
#define I810_LOG_MIN_TEX_REGION_SIZE 16
#endif
+#define I810_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
+#define I810_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
+#define I810_UPLOAD_CTX 0x4
+#define I810_UPLOAD_BUFFERS 0x8
+#define I810_UPLOAD_TEX0 0x10
+#define I810_UPLOAD_TEX1 0x20
+#define I810_UPLOAD_CLIPRECTS 0x40
+
+
+/* Indices into buf.Setup where various bits of state are mirrored per
+ * context and per buffer. These can be fired at the card as a unit,
+ * or in a piecewise fashion as required.
+ */
+
+/* Destbuffer state
+ * - backbuffer linear offset and pitch -- invarient in the current dri
+ * - zbuffer linear offset and pitch -- also invarient
+ * - drawing origin in back and depth buffers.
+ *
+ * Keep the depth/back buffer state here to acommodate private buffers
+ * in the future.
+ */
+#define I810_DESTREG_DI0 0 /* CMD_OP_DESTBUFFER_INFO (2 dwords) */
+#define I810_DESTREG_DI1 1
+#define I810_DESTREG_DV0 2 /* GFX_OP_DESTBUFFER_VARS (2 dwords) */
+#define I810_DESTREG_DV1 3
+#define I810_DESTREG_DR0 4 /* GFX_OP_DRAWRECT_INFO (4 dwords) */
+#define I810_DESTREG_DR1 5
+#define I810_DESTREG_DR2 6
+#define I810_DESTREG_DR3 7
+#define I810_DESTREG_DR4 8
+#define I810_DEST_SETUP_SIZE 10
+
+/* Context state
+ */
+#define I810_CTXREG_CF0 0 /* GFX_OP_COLOR_FACTOR */
+#define I810_CTXREG_CF1 1
+#define I810_CTXREG_ST0 2 /* GFX_OP_STIPPLE */
+#define I810_CTXREG_ST1 3
+#define I810_CTXREG_VF 4 /* GFX_OP_VERTEX_FMT */
+#define I810_CTXREG_MT 5 /* GFX_OP_MAP_TEXELS */
+#define I810_CTXREG_MC0 6 /* GFX_OP_MAP_COLOR_STAGES - stage 0 */
+#define I810_CTXREG_MC1 7 /* GFX_OP_MAP_COLOR_STAGES - stage 1 */
+#define I810_CTXREG_MC2 8 /* GFX_OP_MAP_COLOR_STAGES - stage 2 */
+#define I810_CTXREG_MA0 9 /* GFX_OP_MAP_ALPHA_STAGES - stage 0 */
+#define I810_CTXREG_MA1 10 /* GFX_OP_MAP_ALPHA_STAGES - stage 1 */
+#define I810_CTXREG_MA2 11 /* GFX_OP_MAP_ALPHA_STAGES - stage 2 */
+#define I810_CTXREG_SDM 12 /* GFX_OP_SRC_DEST_MONO */
+#define I810_CTXREG_FOG 13 /* GFX_OP_FOG_COLOR */
+#define I810_CTXREG_B1 14 /* GFX_OP_BOOL_1 */
+#define I810_CTXREG_B2 15 /* GFX_OP_BOOL_2 */
+#define I810_CTXREG_LCS 16 /* GFX_OP_LINEWIDTH_CULL_SHADE_MODE */
+#define I810_CTXREG_PV 17 /* GFX_OP_PV_RULE -- Invarient! */
+#define I810_CTXREG_ZA 18 /* GFX_OP_ZBIAS_ALPHAFUNC */
+#define I810_CTXREG_AA 19 /* GFX_OP_ANTIALIAS */
+#define I810_CTX_SETUP_SIZE 20
+
+/* Texture state (per tex unit)
+ */
+#define I810_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (4 dwords) */
+#define I810_TEXREG_MI1 1
+#define I810_TEXREG_MI2 2
+#define I810_TEXREG_MI3 3
+#define I810_TEXREG_MF 4 /* GFX_OP_MAP_FILTER */
+#define I810_TEXREG_MLC 5 /* GFX_OP_MAP_LOD_CTL */
+#define I810_TEXREG_MLL 6 /* GFX_OP_MAP_LOD_LIMITS */
+#define I810_TEXREG_MCS 7 /* GFX_OP_MAP_COORD_SETS ??? */
+#define I810_TEX_SETUP_SIZE 8
+
+#define I810_FRONT 0x1
+#define I810_BACK 0x2
+#define I810_DEPTH 0x4
+
+
typedef struct _drm_i810_init {
- enum {
- I810_INIT_DMA = 0x01,
- I810_CLEANUP_DMA = 0x02
+ enum {
+ I810_INIT_DMA = 0x01,
+ I810_CLEANUP_DMA = 0x02
} func;
- int ring_map_idx;
- int buffer_map_idx;
+ int ring_map_idx;
+ int buffer_map_idx;
int sarea_priv_offset;
- unsigned long ring_start;
- unsigned long ring_end;
- unsigned long ring_size;
+ unsigned int ring_start;
+ unsigned int ring_end;
+ unsigned int ring_size;
+ unsigned int front_offset;
+ unsigned int back_offset;
+ unsigned int depth_offset;
+ unsigned int w;
+ unsigned int h;
+ unsigned int pitch;
+ unsigned int pitch_bits;
} drm_i810_init_t;
/* Warning: If you change the SAREA structure you must change the Xserver
@@ -46,6 +123,11 @@ typedef struct _drm_i810_tex_region {
} drm_i810_tex_region_t;
typedef struct _drm_i810_sarea {
+ unsigned int ContextState[I810_CTX_SETUP_SIZE];
+ unsigned int BufferState[I810_DEST_SETUP_SIZE];
+ unsigned int TexState[2][I810_TEX_SETUP_SIZE];
+ unsigned int dirty;
+
unsigned int nbox;
drm_clip_rect_t boxes[I810_NR_SAREA_CLIPRECTS];
@@ -72,12 +154,18 @@ typedef struct _drm_i810_sarea {
int last_dispatch; /* age of the most recently dispatched buffer */
int last_quiescent; /* */
int ctxOwner; /* last context to upload state */
+
+ int vertex_prim;
+
} drm_i810_sarea_t;
-typedef struct _drm_i810_general {
- int idx;
- int used;
-} drm_i810_general_t;
+typedef struct _drm_i810_clear {
+ int clear_color;
+ int clear_depth;
+ int flags;
+} drm_i810_clear_t;
+
+
/* These may be placeholders if we have more cliprects than
* I810_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
@@ -90,4 +178,11 @@ typedef struct _drm_i810_vertex {
int discard; /* client is finished with the buffer? */
} drm_i810_vertex_t;
+typedef struct drm_i810_dma {
+ void *virtual;
+ int request_idx;
+ int request_size;
+ int granted;
+} drm_i810_dma_t;
+
#endif /* _I810_DRM_H_ */
diff --git a/linux/i810_drv.c b/linux/i810_drv.c
index eec9eb0f..b523db90 100644
--- a/linux/i810_drv.c
+++ b/linux/i810_drv.c
@@ -79,7 +79,6 @@ static drm_ioctl_desc_t i810_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { i810_addbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { i810_markbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { i810_infobufs, 1, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { i810_mapbufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { i810_freebufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { i810_addctx, 1, 1 },
@@ -92,8 +91,6 @@ static drm_ioctl_desc_t i810_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { i810_dma, 1, 0 },
-
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { i810_lock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { i810_unlock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
@@ -106,11 +103,14 @@ static drm_ioctl_desc_t i810_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
+
[DRM_IOCTL_NR(DRM_IOCTL_I810_INIT)] = { i810_dma_init, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_I810_VERTEX)] = { i810_dma_vertex, 1, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_I810_DMA)] = { i810_dma_general,1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_I810_CLEAR)] = { i810_clear_bufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_I810_FLUSH)] = { i810_flush_ioctl,1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_I810_GETAGE)] = { i810_getage, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_I810_GETBUF)] = { i810_getbuf, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_I810_SWAP)] = { i810_swap_bufs, 1, 0 },
};
#define I810_IOCTL_COUNT DRM_ARRAY_SIZE(i810_ioctls)
diff --git a/linux/i810_drv.h b/linux/i810_drv.h
index 690710c8..c387bf72 100644
--- a/linux/i810_drv.h
+++ b/linux/i810_drv.h
@@ -32,6 +32,16 @@
#ifndef _I810_DRV_H_
#define _I810_DRV_H_
+typedef struct drm_i810_buf_priv {
+ u32 *in_use;
+ int my_use_idx;
+ int currently_mapped;
+ void *virtual;
+ void *kernel_virtual;
+ int map_count;
+ struct vm_area_struct *vma;
+} drm_i810_buf_priv_t;
+
typedef struct _drm_i810_ring_buffer{
int tail_mask;
unsigned long Start;
@@ -55,6 +65,15 @@ typedef struct drm_i810_private {
atomic_t flush_done;
wait_queue_head_t flush_queue; /* Processes waiting until flush */
+ drm_buf_t *mmap_buffer;
+
+
+ u32 front_di1, back_di1, zi1;
+
+ int back_offset;
+ int depth_offset;
+ int w, h;
+ int pitch;
} drm_i810_private_t;
/* i810_drv.c */
@@ -71,8 +90,8 @@ extern int i810_unlock(struct inode *inode, struct file *filp,
/* i810_dma.c */
extern int i810_dma_schedule(drm_device_t *dev, int locked);
-extern int i810_dma(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
+extern int i810_getbuf(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
extern int i810_irq_install(drm_device_t *dev, int irq);
extern int i810_irq_uninstall(drm_device_t *dev);
extern int i810_control(struct inode *inode, struct file *filp,
@@ -86,6 +105,7 @@ extern int i810_flush_ioctl(struct inode *inode, struct file *filp,
extern void i810_reclaim_buffers(drm_device_t *dev, pid_t pid);
extern int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg);
+extern int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma);
/* i810_bufs.c */
@@ -97,8 +117,6 @@ extern int i810_markbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_freebufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-extern int i810_mapbufs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
extern int i810_addmap(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
@@ -121,32 +139,17 @@ extern int i810_rmctx(struct inode *inode, struct file *filp,
extern int i810_context_switch(drm_device_t *dev, int old, int new);
extern int i810_context_switch_complete(drm_device_t *dev, int new);
-
-
-
-/* Copy the outstanding cliprects for every I810_DMA_VERTEX buffer.
- * This can be fixed by emitting directly to the ringbuffer in the
- * 'vertex_dma' ioctl.
-*/
-typedef struct {
- u32 *in_use;
- int my_use_idx;
-} drm_i810_buf_priv_t;
-
-
-#define I810_DMA_GENERAL 0
-#define I810_DMA_VERTEX 1
-#define I810_DMA_DISCARD 2 /* not used */
-
#define I810_VERBOSE 0
int i810_dma_vertex(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-int i810_dma_general(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
+int i810_swap_bufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+int i810_clear_bufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
@@ -200,5 +203,22 @@ int i810_dma_general(struct inode *inode, struct file *filp,
#define SCI_YMAX_MASK (0xffff<<16)
#define SCI_XMAX_MASK (0xffff<<0)
+#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
+#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x2)
+#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+#define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24))
+
+#define CMD_OP_Z_BUFFER_INFO ((0x0<<29)|(0x16<<23))
+#define CMD_OP_DESTBUFFER_INFO ((0x0<<29)|(0x15<<23))
+
+#define BR00_BITBLT_CLIENT 0x40000000
+#define BR00_OP_COLOR_BLT 0x10000000
+#define BR00_OP_SRC_COPY_BLT 0x10C00000
+#define BR13_SOLID_PATTERN 0x80000000
+
+
+
#endif
diff --git a/linux/mga_dma.c b/linux/mga_dma.c
index b03544bc..25e3622c 100644
--- a/linux/mga_dma.c
+++ b/linux/mga_dma.c
@@ -52,6 +52,7 @@ static unsigned long mga_alloc_page(drm_device_t *dev)
{
unsigned long address;
+ DRM_DEBUG("%s\n", __FUNCTION__);
address = __get_free_page(GFP_KERNEL);
if(address == 0UL) {
return 0;
@@ -64,6 +65,8 @@ static unsigned long mga_alloc_page(drm_device_t *dev)
static void mga_free_page(drm_device_t *dev, unsigned long page)
{
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
if(page == 0UL) {
return;
}
@@ -79,9 +82,11 @@ static void mga_delay(void)
return;
}
-static void mga_flush_write_combine(void)
+void mga_flush_write_combine(void)
{
int xchangeDummy;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
__asm__ volatile(" push %%eax ; xchg %%eax, %0 ; pop %%eax" : : "m" (xchangeDummy));
__asm__ volatile(" push %%eax ; push %%ebx ; push %%ecx ; push %%edx ;"
" movl $0,%%eax ; cpuid ; pop %%edx ; pop %%ecx ; pop %%ebx ;"
@@ -93,18 +98,6 @@ static void mga_flush_write_combine(void)
#define MGA_BUF_USED 0xffffffff
#define MGA_BUF_FREE 0
-static void mga_freelist_debug(drm_mga_freelist_t *item)
-{
- if(item->buf != NULL) {
- DRM_DEBUG("buf index : %d\n", item->buf->idx);
- } else {
- DRM_DEBUG("Freelist head\n");
- }
- DRM_DEBUG("item->age : %x\n", item->age);
- DRM_DEBUG("item->next : %p\n", item->next);
- DRM_DEBUG("item->prev : %p\n", item->prev);
-}
-
static int mga_freelist_init(drm_device_t *dev)
{
drm_device_dma_t *dma = dev->dma;
@@ -113,7 +106,9 @@ static int mga_freelist_init(drm_device_t *dev)
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
drm_mga_freelist_t *item;
int i;
-
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
if(dev_priv->head == NULL) return -ENOMEM;
memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t));
@@ -135,19 +130,10 @@ static int mga_freelist_init(drm_device_t *dev)
item->buf = buf;
buf_priv->my_freelist = item;
buf_priv->discard = 0;
+ buf_priv->dispatched = 0;
dev_priv->head->next = item;
}
- item = dev_priv->head;
- while(item) {
- mga_freelist_debug(item);
- item = item->next;
- }
- DRM_DEBUG("Head\n");
- mga_freelist_debug(dev_priv->head);
- DRM_DEBUG("Tail\n");
- mga_freelist_debug(dev_priv->tail);
-
return 0;
}
@@ -157,6 +143,8 @@ static void mga_freelist_cleanup(drm_device_t *dev)
drm_mga_freelist_t *item;
drm_mga_freelist_t *prev;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
item = dev_priv->head;
while(item) {
prev = item;
@@ -173,14 +161,14 @@ static inline void mga_dma_quiescent(drm_device_t *dev)
drm_device_dma_t *dma = dev->dma;
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- __volatile__ unsigned int *status =
- (__volatile__ unsigned int *)dev_priv->status_page;
unsigned long end;
int i;
+ DRM_DEBUG("%s\n", __FUNCTION__);
end = jiffies + (HZ*3);
while(1) {
- if(!test_and_set_bit(0, &dev_priv->dispatch_lock)) {
+ if(!test_and_set_bit(MGA_IN_DISPATCH,
+ &dev_priv->dispatch_status)) {
break;
}
if((signed)(end - jiffies) <= 0) {
@@ -204,51 +192,25 @@ static inline void mga_dma_quiescent(drm_device_t *dev)
}
for (i = 0 ; i < 2000 ; i++) mga_delay();
}
- DRM_DEBUG("status[1] : %x last_sync_tag : %x\n", status[1],
- dev_priv->last_sync_tag);
sarea_priv->dirty |= MGA_DMA_FLUSH;
out_status:
- clear_bit(0, &dev_priv->dispatch_lock);
+ clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status);
out_nolock:
}
-#define FREELIST_INITIAL (MGA_DMA_BUF_NR * 2)
-#define FREELIST_COMPARE(age) ((age >> 2))
-
-unsigned int mga_create_sync_tag(drm_device_t *dev)
+static void mga_reset_freelist(drm_device_t *dev)
{
- drm_mga_private_t *dev_priv =
- (drm_mga_private_t *) dev->dev_private;
- unsigned int temp;
- drm_buf_t *buf;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_t *buf;
drm_mga_buf_priv_t *buf_priv;
- drm_device_dma_t *dma = dev->dma;
- int i;
-
- dev_priv->sync_tag++;
-
- if(dev_priv->sync_tag < FREELIST_INITIAL) {
- dev_priv->sync_tag = FREELIST_INITIAL;
- }
- if(dev_priv->sync_tag > 0x3fffffff) {
- mga_flush_queue(dev);
- mga_dma_quiescent(dev);
-
- for (i = 0; i < dma->buf_count; i++) {
- buf = dma->buflist[ i ];
- buf_priv = buf->dev_private;
- buf_priv->my_freelist->age = MGA_BUF_FREE;
- }
-
- dev_priv->sync_tag = FREELIST_INITIAL;
- }
- temp = dev_priv->sync_tag << 2;
-
- dev_priv->sarea_priv->last_enqueue = temp;
+ int i;
- DRM_DEBUG("sync_tag : %x\n", temp);
- return temp;
+ for (i = 0; i < dma->buf_count; i++) {
+ buf = dma->buflist[ i ];
+ buf_priv = buf->dev_private;
+ buf_priv->my_freelist->age = MGA_BUF_FREE;
+ }
}
/* Least recently used :
@@ -257,23 +219,52 @@ unsigned int mga_create_sync_tag(drm_device_t *dev)
drm_buf_t *mga_freelist_get(drm_device_t *dev)
{
+ DECLARE_WAITQUEUE(entry, current);
drm_mga_private_t *dev_priv =
(drm_mga_private_t *) dev->dev_private;
- __volatile__ unsigned int *status =
- (__volatile__ unsigned int *)dev_priv->status_page;
drm_mga_freelist_t *prev;
drm_mga_freelist_t *next;
+ static int failed = 0;
+
+ DRM_DEBUG("%s : tail->age : %d last_prim_age : %d\n", __FUNCTION__,
+ dev_priv->tail->age, dev_priv->last_prim_age);
- if((dev_priv->tail->age >> 2) <= FREELIST_COMPARE(status[1])) {
+ if(failed >= 1000 && dev_priv->tail->age >= dev_priv->last_prim_age) {
+ DRM_DEBUG("I'm waiting on the freelist!!! %d\n",
+ dev_priv->last_prim_age);
+ set_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status);
+ current->state = TASK_INTERRUPTIBLE;
+ add_wait_queue(&dev_priv->buf_queue, &entry);
+ for (;;) {
+ mga_dma_schedule(dev, 0);
+ if(!test_bit(MGA_IN_GETBUF,
+ &dev_priv->dispatch_status))
+ break;
+ atomic_inc(&dev->total_sleeps);
+ schedule();
+ if (signal_pending(current)) {
+ clear_bit(MGA_IN_GETBUF,
+ &dev_priv->dispatch_status);
+ goto failed_getbuf;
+ }
+ }
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&dev_priv->buf_queue, &entry);
+ }
+
+ if(dev_priv->tail->age < dev_priv->last_prim_age) {
prev = dev_priv->tail->prev;
next = dev_priv->tail;
prev->next = NULL;
next->prev = next->next = NULL;
dev_priv->tail = prev;
next->age = MGA_BUF_USED;
+ failed = 0;
return next->buf;
- }
+ }
+failed_getbuf:
+ failed++;
return NULL;
}
@@ -286,6 +277,8 @@ int mga_freelist_put(drm_device_t *dev, drm_buf_t *buf)
drm_mga_freelist_t *head;
drm_mga_freelist_t *next;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
if(buf_priv->my_freelist->age == MGA_BUF_USED) {
/* Discarded buffer, put it on the tail */
next = buf_priv->my_freelist;
@@ -312,32 +305,6 @@ int mga_freelist_put(drm_device_t *dev, drm_buf_t *buf)
return 0;
}
-static void mga_print_all_primary(drm_device_t *dev)
-{
- drm_mga_private_t *dev_priv = dev->dev_private;
- drm_mga_prim_buf_t *prim;
- int i;
-
- DRM_DEBUG("Full list of primarys\n");
- for(i = 0; i < MGA_NUM_PRIM_BUFS; i++) {
- prim = dev_priv->prim_bufs[i];
- DRM_DEBUG("index : %d num_dwords : %d "
- "max_dwords : %d phy_head : %x\n",
- prim->idx, prim->num_dwords,
- prim->max_dwords, prim->phys_head);
- DRM_DEBUG("sec_used : %d swap_pending : %x "
- "in_use : %x force_fire : %d\n",
- prim->sec_used, prim->swap_pending,
- prim->in_use, atomic_read(&prim->force_fire));
- DRM_DEBUG("needs_overflow : %d\n",
- atomic_read(&prim->needs_overflow));
- }
-
- DRM_DEBUG("current_idx : %d, next_idx : %d, last_idx : %d\n",
- dev_priv->next_prim->idx, dev_priv->last_prim->idx,
- dev_priv->current_prim->idx);
-}
-
static int mga_init_primary_bufs(drm_device_t *dev, drm_mga_init_t *init)
{
drm_mga_private_t *dev_priv = dev->dev_private;
@@ -345,10 +312,9 @@ static int mga_init_primary_bufs(drm_device_t *dev, drm_mga_init_t *init)
int i, temp, size_of_buf;
int offset = init->reserved_map_agpstart;
- DRM_DEBUG("mga_init_primary_bufs\n");
+ DRM_DEBUG("%s\n", __FUNCTION__);
dev_priv->primary_size = ((init->primary_size + PAGE_SIZE - 1) /
PAGE_SIZE) * PAGE_SIZE;
- DRM_DEBUG("primary_size\n");
size_of_buf = dev_priv->primary_size / MGA_NUM_PRIM_BUFS;
dev_priv->warp_ucode_size = init->warp_ucode_size;
dev_priv->prim_bufs = drm_alloc(sizeof(drm_mga_prim_buf_t *) *
@@ -358,18 +324,12 @@ static int mga_init_primary_bufs(drm_device_t *dev, drm_mga_init_t *init)
DRM_ERROR("Unable to allocate memory for prim_buf\n");
return -ENOMEM;
}
- DRM_DEBUG("memset\n");
memset(dev_priv->prim_bufs,
0, sizeof(drm_mga_prim_buf_t *) * (MGA_NUM_PRIM_BUFS + 1));
temp = init->warp_ucode_size + dev_priv->primary_size;
temp = ((temp + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE;
- DRM_DEBUG("temp : %x\n", temp);
- DRM_DEBUG("dev->agp->base: %lx\n", dev->agp->base);
- DRM_DEBUG("init->reserved_map_agpstart: %x\n",
- init->reserved_map_agpstart);
- DRM_DEBUG("ioremap\n");
dev_priv->ioremap = drm_ioremap(dev->agp->base + offset,
temp);
if(dev_priv->ioremap == NULL) {
@@ -379,11 +339,9 @@ static int mga_init_primary_bufs(drm_device_t *dev, drm_mga_init_t *init)
init_waitqueue_head(&dev_priv->wait_queue);
for(i = 0; i < MGA_NUM_PRIM_BUFS; i++) {
- DRM_DEBUG("For loop\n");
prim_buffer = drm_alloc(sizeof(drm_mga_prim_buf_t),
DRM_MEM_DRIVER);
if(prim_buffer == NULL) return -ENOMEM;
- DRM_DEBUG("memset\n");
memset(prim_buffer, 0, sizeof(drm_mga_prim_buf_t));
prim_buffer->phys_head = offset + dev->agp->base;
prim_buffer->current_dma_ptr =
@@ -396,17 +354,18 @@ static int mga_init_primary_bufs(drm_device_t *dev, drm_mga_init_t *init)
prim_buffer->max_dwords -= 5; /* Leave room for the softrap */
prim_buffer->sec_used = 0;
prim_buffer->idx = i;
+ prim_buffer->prim_age = i + 1;
offset = offset + size_of_buf;
dev_priv->prim_bufs[i] = prim_buffer;
- DRM_DEBUG("Looping\n");
}
dev_priv->current_prim_idx = 0;
dev_priv->next_prim =
dev_priv->last_prim =
dev_priv->current_prim =
dev_priv->prim_bufs[0];
- set_bit(0, &dev_priv->current_prim->in_use);
- DRM_DEBUG("init done\n");
+ dev_priv->next_prim_age = 2;
+ dev_priv->last_prim_age = 1;
+ set_bit(MGA_BUF_IN_USE, &dev_priv->current_prim->buffer_status);
return 0;
}
@@ -420,9 +379,8 @@ void mga_fire_primary(drm_device_t *dev, drm_mga_prim_buf_t *prim)
int i;
int next_idx;
PRIMLOCALS;
-
- DRM_DEBUG("mga_fire_primary\n");
- dev_priv->last_sync_tag = mga_create_sync_tag(dev);
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
dev_priv->last_prim = prim;
/* We never check for overflow, b/c there is always room */
@@ -433,7 +391,7 @@ void mga_fire_primary(drm_device_t *dev, drm_mga_prim_buf_t *prim)
}
PRIMOUTREG( MGAREG_DMAPAD, 0);
PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG( MGAREG_DWGSYNC, dev_priv->last_sync_tag);
+ PRIMOUTREG( MGAREG_DMAPAD, 0);
PRIMOUTREG( MGAREG_SOFTRAP, 0);
PRIMFINISH(prim);
@@ -468,13 +426,13 @@ void mga_fire_primary(drm_device_t *dev, drm_mga_prim_buf_t *prim)
for (i = 0 ; i < 4096 ; i++) mga_delay();
}
}
-
+
mga_flush_write_combine();
atomic_inc(&dev_priv->pending_bufs);
- atomic_inc(&dma->total_lost);
MGA_WRITE(MGAREG_PRIMADDRESS, phys_head | TT_GENERAL);
MGA_WRITE(MGAREG_PRIMEND, (phys_head + num_dwords * 4) | use_agp);
prim->num_dwords = 0;
+ sarea_priv->last_enqueue = prim->prim_age;
next_idx = prim->idx + 1;
if(next_idx >= MGA_NUM_PRIM_BUFS)
@@ -486,11 +444,10 @@ void mga_fire_primary(drm_device_t *dev, drm_mga_prim_buf_t *prim)
out_prim_wait:
prim->num_dwords = 0;
prim->sec_used = 0;
- clear_bit(0, &prim->in_use);
+ clear_bit(MGA_BUF_IN_USE, &prim->buffer_status);
wake_up_interruptible(&dev_priv->wait_queue);
- clear_bit(0, &prim->swap_pending);
- clear_bit(0, &dev_priv->dispatch_lock);
- atomic_dec(&dev_priv->pending_bufs);
+ clear_bit(MGA_BUF_SWAP_PENDING, &prim->buffer_status);
+ clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status);
}
int mga_advance_primary(drm_device_t *dev)
@@ -505,27 +462,28 @@ int mga_advance_primary(drm_device_t *dev)
/* This needs to reset the primary buffer if available,
* we should collect stats on how many times it bites
* it's tail */
+ DRM_DEBUG("%s\n", __FUNCTION__);
next_prim_idx = dev_priv->current_prim_idx + 1;
if(next_prim_idx >= MGA_NUM_PRIM_BUFS)
next_prim_idx = 0;
prim_buffer = dev_priv->prim_bufs[next_prim_idx];
- atomic_set(&dev_priv->in_wait, 1);
+ set_bit(MGA_IN_WAIT, &dev_priv->dispatch_status);
/* In use is cleared in interrupt handler */
- if(test_and_set_bit(0, &prim_buffer->in_use)) {
+ if(test_and_set_bit(MGA_BUF_IN_USE, &prim_buffer->buffer_status)) {
add_wait_queue(&dev_priv->wait_queue, &entry);
+ current->state = TASK_INTERRUPTIBLE;
+
for (;;) {
- current->state = TASK_INTERRUPTIBLE;
mga_dma_schedule(dev, 0);
- if(!test_and_set_bit(0, &prim_buffer->in_use)) break;
+ if(!test_and_set_bit(MGA_BUF_IN_USE,
+ &prim_buffer->buffer_status))
+ break;
atomic_inc(&dev->total_sleeps);
atomic_inc(&dma->total_missed_sched);
- mga_print_all_primary(dev);
- DRM_DEBUG("Schedule in advance\n");
- /* Three second delay */
- schedule_timeout(HZ*3);
+ schedule();
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
@@ -535,16 +493,27 @@ int mga_advance_primary(drm_device_t *dev)
remove_wait_queue(&dev_priv->wait_queue, &entry);
if(ret) return ret;
}
- atomic_set(&dev_priv->in_wait, 0);
+ clear_bit(MGA_IN_WAIT, &dev_priv->dispatch_status);
+
/* This primary buffer is now free to use */
prim_buffer->current_dma_ptr = prim_buffer->head;
prim_buffer->num_dwords = 0;
prim_buffer->sec_used = 0;
- atomic_set(&prim_buffer->needs_overflow, 0);
+ prim_buffer->prim_age = dev_priv->next_prim_age++;
+ if(prim_buffer->prim_age == 0 || prim_buffer->prim_age == 0xffffffff) {
+ mga_flush_queue(dev);
+ mga_dma_quiescent(dev);
+ mga_reset_freelist(dev);
+ prim_buffer->prim_age = (dev_priv->next_prim_age += 2);
+ }
+
+ /* Reset all buffer status stuff */
+ clear_bit(MGA_BUF_NEEDS_OVERFLOW, &prim_buffer->buffer_status);
+ clear_bit(MGA_BUF_FORCE_FIRE, &prim_buffer->buffer_status);
+ clear_bit(MGA_BUF_SWAP_PENDING, &prim_buffer->buffer_status);
+
dev_priv->current_prim = prim_buffer;
dev_priv->current_prim_idx = next_prim_idx;
- DRM_DEBUG("Primarys at advance\n");
- mga_print_all_primary(dev);
return 0;
}
@@ -553,21 +522,29 @@ static inline int mga_decide_to_fire(drm_device_t *dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
drm_device_dma_t *dma = dev->dma;
-
- if(atomic_read(&dev_priv->next_prim->force_fire))
- {
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ if(test_bit(MGA_BUF_FORCE_FIRE, &dev_priv->next_prim->buffer_status)) {
atomic_inc(&dma->total_prio);
return 1;
}
- if (atomic_read(&dev_priv->in_flush) && dev_priv->next_prim->num_dwords)
- {
+ if (test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status) &&
+ dev_priv->next_prim->num_dwords) {
+ atomic_inc(&dma->total_prio);
+ return 1;
+ }
+
+ if (test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status) &&
+ dev_priv->next_prim->num_dwords) {
atomic_inc(&dma->total_prio);
return 1;
}
if(atomic_read(&dev_priv->pending_bufs) <= MGA_NUM_PRIM_BUFS - 1) {
- if(test_bit(0, &dev_priv->next_prim->swap_pending)) {
+ if(test_bit(MGA_BUF_SWAP_PENDING,
+ &dev_priv->next_prim->buffer_status)) {
atomic_inc(&dma->total_dmas);
return 1;
}
@@ -601,10 +578,11 @@ int mga_dma_schedule(drm_device_t *dev, int locked)
return -EBUSY;
}
- DRM_DEBUG("mga_dma_schedule\n");
+ DRM_DEBUG("%s\n", __FUNCTION__);
- if(atomic_read(&dev_priv->in_flush) ||
- atomic_read(&dev_priv->in_wait)) {
+ if(test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status) ||
+ test_bit(MGA_IN_WAIT, &dev_priv->dispatch_status) ||
+ test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status)) {
locked = 1;
}
@@ -612,27 +590,25 @@ int mga_dma_schedule(drm_device_t *dev, int locked)
!drm_lock_take(&dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) {
atomic_inc(&dma->total_missed_lock);
clear_bit(0, &dev->dma_flag);
+ DRM_DEBUG("Not locked\n");
return -EBUSY;
}
DRM_DEBUG("I'm locked\n");
-
- if(!test_and_set_bit(0, &dev_priv->dispatch_lock)) {
+ if(!test_and_set_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status)) {
/* Fire dma buffer */
if(mga_decide_to_fire(dev)) {
- DRM_DEBUG("mga_fire_primary\n");
DRM_DEBUG("idx :%d\n", dev_priv->next_prim->idx);
- atomic_set(&dev_priv->next_prim->force_fire, 0);
- if(dev_priv->current_prim == dev_priv->next_prim &&
- dev_priv->next_prim->num_dwords != 0) {
+ clear_bit(MGA_BUF_FORCE_FIRE,
+ &dev_priv->next_prim->buffer_status);
+ if(dev_priv->current_prim == dev_priv->next_prim) {
/* Schedule overflow for a later time */
- atomic_set(
- &dev_priv->current_prim->needs_overflow,
- 1);
+ set_bit(MGA_BUF_NEEDS_OVERFLOW,
+ &dev_priv->next_prim->buffer_status);
}
mga_fire_primary(dev, dev_priv->next_prim);
} else {
- clear_bit(0, &dev_priv->dispatch_lock);
+ clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status);
}
} else {
DRM_DEBUG("I can't get the dispatch lock\n");
@@ -645,17 +621,26 @@ int mga_dma_schedule(drm_device_t *dev, int locked)
}
}
- clear_bit(0, &dev->dma_flag);
-
- if(atomic_read(&dev_priv->in_flush) == 1 &&
- dev_priv->next_prim->num_dwords == 0) {
- /* Everything is on the hardware */
- DRM_DEBUG("Primarys at Flush\n");
- mga_print_all_primary(dev);
- atomic_set(&dev_priv->in_flush, 0);
+ if(test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status) &&
+ dev_priv->next_prim->num_dwords == 0 &&
+ atomic_read(&dev_priv->pending_bufs) == 0) {
+ /* Everything has been processed by the hardware */
+ clear_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status);
wake_up_interruptible(&dev_priv->flush_queue);
}
+ if(test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status) &&
+ dev_priv->tail->age < dev_priv->last_prim_age) {
+ clear_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status);
+ DRM_DEBUG("Waking up buf queue\n");
+ wake_up_interruptible(&dev_priv->buf_queue);
+ } else if (test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status)) {
+ DRM_DEBUG("Not waking buf_queue on %d %d\n",
+ atomic_read(&dev->total_irq),
+ dev_priv->last_prim_age);
+ }
+
+ clear_bit(0, &dev->dma_flag);
return 0;
}
@@ -664,33 +649,35 @@ static void mga_dma_service(int irq, void *device, struct pt_regs *regs)
drm_device_t *dev = (drm_device_t *)device;
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
drm_mga_prim_buf_t *last_prim_buffer;
- __volatile__ unsigned int *status =
- (__volatile__ unsigned int *)dev_priv->status_page;
-
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
atomic_inc(&dev->total_irq);
+ if((MGA_READ(MGAREG_STATUS) & 0x00000001) != 0x00000001) return;
MGA_WRITE(MGAREG_ICLEAR, 0x00000001);
last_prim_buffer = dev_priv->last_prim;
last_prim_buffer->num_dwords = 0;
last_prim_buffer->sec_used = 0;
- clear_bit(0, &last_prim_buffer->in_use);
+ dev_priv->sarea_priv->last_dispatch =
+ dev_priv->last_prim_age = last_prim_buffer->prim_age;
+ clear_bit(MGA_BUF_IN_USE, &last_prim_buffer->buffer_status);
wake_up_interruptible(&dev_priv->wait_queue);
- clear_bit(0, &last_prim_buffer->swap_pending);
- clear_bit(0, &dev_priv->dispatch_lock);
+ clear_bit(MGA_BUF_SWAP_PENDING, &last_prim_buffer->buffer_status);
+ clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status);
atomic_dec(&dev_priv->pending_bufs);
- dev_priv->sarea_priv->last_dispatch = status[1];
queue_task(&dev->tq, &tq_immediate);
mark_bh(IMMEDIATE_BH);
}
static void mga_dma_task_queue(void *device)
{
- drm_device_t *dev = (drm_device_t *) device;
-
- mga_dma_schedule(dev, 0);
+ DRM_DEBUG("%s\n", __FUNCTION__);
+ mga_dma_schedule((drm_device_t *)device, 0);
}
int mga_dma_cleanup(drm_device_t *dev)
{
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
if(dev->dev_private) {
drm_mga_private_t *dev_priv =
(drm_mga_private_t *) dev->dev_private;
@@ -739,14 +726,13 @@ static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) {
drm_map_t *sarea_map = NULL;
int i;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
if(dev_priv == NULL) return -ENOMEM;
dev->dev_private = (void *) dev_priv;
- DRM_DEBUG("dev_private\n");
-
memset(dev_priv, 0, sizeof(drm_mga_private_t));
- atomic_set(&dev_priv->in_flush, 0);
if((init->reserved_map_idx >= dev->map_count) ||
(init->buffer_map_idx >= dev->map_count)) {
@@ -761,7 +747,6 @@ static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) {
dev_priv->sarea_priv = (drm_mga_sarea_t *)
((u8 *)sarea_map->handle +
init->sarea_priv_offset);
- DRM_DEBUG("sarea_priv\n");
/* Scale primary size to the next page */
dev_priv->chipset = init->chipset;
@@ -776,6 +761,7 @@ static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) {
dev_priv->mAccess = init->mAccess;
init_waitqueue_head(&dev_priv->flush_queue);
+ init_waitqueue_head(&dev_priv->buf_queue);
dev_priv->WarpPipe = -1;
DRM_DEBUG("chipset: %d ucode_size: %d backOffset: %x depthOffset: %x\n",
@@ -795,20 +781,17 @@ static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) {
dev_priv->WarpIndex[i].phys_addr,
dev_priv->WarpIndex[i].size);
- DRM_DEBUG("Doing init prim buffers\n");
if(mga_init_primary_bufs(dev, init) != 0) {
DRM_ERROR("Can not initialize primary buffers\n");
mga_dma_cleanup(dev);
return -ENOMEM;
}
- DRM_DEBUG("Done with init prim buffers\n");
dev_priv->real_status_page = mga_alloc_page(dev);
if(dev_priv->real_status_page == 0UL) {
mga_dma_cleanup(dev);
DRM_ERROR("Can not allocate status page\n");
return -ENOMEM;
}
- DRM_DEBUG("Status page at %lx\n", dev_priv->real_status_page);
dev_priv->status_page =
ioremap_nocache(virt_to_bus((void *)dev_priv->real_status_page),
@@ -820,38 +803,23 @@ static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) {
return -ENOMEM;
}
- DRM_DEBUG("Status page remapped to %p\n", dev_priv->status_page);
/* Write status page when secend or softrap occurs */
MGA_WRITE(MGAREG_PRIMPTR,
virt_to_bus((void *)dev_priv->real_status_page) | 0x00000003);
-
- dev_priv->device = pci_find_device(0x102b, 0x0525, NULL);
- if(dev_priv->device == NULL) {
- DRM_ERROR("Could not find pci device for card\n");
- mga_dma_cleanup(dev);
- return -EINVAL;
- }
-
- DRM_DEBUG("dma initialization\n");
+
/* Private is now filled in, initialize the hardware */
{
- __volatile__ unsigned int *status =
- (unsigned int *)dev_priv->status_page;
PRIMLOCALS;
PRIMGETPTR( dev_priv );
-
- dev_priv->last_sync_tag = mga_create_sync_tag(dev);
-
+
PRIMOUTREG(MGAREG_DMAPAD, 0);
PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DWGSYNC, dev_priv->last_sync_tag);
+ PRIMOUTREG(MGAREG_DWGSYNC, 0x0100);
PRIMOUTREG(MGAREG_SOFTRAP, 0);
/* Poll for the first buffer to insure that
* the status register will be correct
*/
- DRM_DEBUG("phys_head : %lx\n", (unsigned long)phys_head);
- status[1] = 0;
mga_flush_write_combine();
MGA_WRITE(MGAREG_PRIMADDRESS, phys_head | TT_GENERAL);
@@ -859,9 +827,7 @@ static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) {
MGA_WRITE(MGAREG_PRIMEND, ((phys_head + num_dwords * 4) |
PDEA_pagpxfer_enable));
- while(MGA_READ(MGAREG_DWGSYNC) != dev_priv->last_sync_tag) ;
- DRM_DEBUG("status[0] after initialization : %x\n", status[0]);
- DRM_DEBUG("status[1] after initialization : %x\n", status[1]);
+ while(MGA_READ(MGAREG_DWGSYNC) != 0x0100) ;
}
if(mga_freelist_init(dev) != 0) {
@@ -869,7 +835,6 @@ static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) {
mga_dma_cleanup(dev);
return -ENOMEM;
}
- DRM_DEBUG("dma init was successful\n");
return 0;
}
@@ -880,6 +845,8 @@ int mga_dma_init(struct inode *inode, struct file *filp,
drm_device_t *dev = priv->dev;
drm_mga_init_t init;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
copy_from_user_ret(&init, (drm_mga_init_t *)arg, sizeof(init), -EFAULT);
switch(init.func) {
@@ -924,7 +891,7 @@ int mga_irq_install(drm_device_t *dev, int irq)
/* Install handler */
if ((retcode = request_irq(dev->irq,
mga_dma_service,
- 0,
+ SA_SHIRQ,
dev->devname,
dev))) {
down(&dev->struct_sem);
@@ -963,7 +930,9 @@ int mga_control(struct inode *inode, struct file *filp, unsigned int cmd,
drm_control_t ctl;
copy_from_user_ret(&ctl, (drm_control_t *)arg, sizeof(ctl), -EFAULT);
-
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
switch (ctl.func) {
case DRM_INST_HANDLER:
return mga_irq_install(dev, ctl.irq);
@@ -980,30 +949,33 @@ static int mga_flush_queue(drm_device_t *dev)
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
int ret = 0;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
if(dev_priv == NULL) {
return 0;
}
if(dev_priv->next_prim->num_dwords != 0) {
- atomic_set(&dev_priv->in_flush, 1);
+ set_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status);
current->state = TASK_INTERRUPTIBLE;
add_wait_queue(&dev_priv->flush_queue, &entry);
for (;;) {
mga_dma_schedule(dev, 0);
- if (atomic_read(&dev_priv->in_flush) == 0)
+ if (!test_bit(MGA_IN_FLUSH,
+ &dev_priv->dispatch_status))
break;
atomic_inc(&dev->total_sleeps);
- DRM_DEBUG("Schedule in flush_queue\n");
- schedule_timeout(HZ*3);
+ schedule();
if (signal_pending(current)) {
ret = -EINTR; /* Can't restart */
+ clear_bit(MGA_IN_FLUSH,
+ &dev_priv->dispatch_status);
break;
}
}
current->state = TASK_RUNNING;
remove_wait_queue(&dev_priv->flush_queue, &entry);
}
- atomic_set(&dev_priv->in_flush, 0);
return ret;
}
@@ -1017,6 +989,7 @@ void mga_reclaim_buffers(drm_device_t *dev, pid_t pid)
if(dev->dev_private == NULL) return;
if(dma->buflist == NULL) return;
+ DRM_DEBUG("%s\n", __FUNCTION__);
mga_flush_queue(dev);
for (i = 0; i < dma->buf_count; i++) {
@@ -1042,6 +1015,7 @@ int mga_lock(struct inode *inode, struct file *filp, unsigned int cmd,
int ret = 0;
drm_lock_t lock;
+ DRM_DEBUG("%s\n", __FUNCTION__);
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (lock.context == DRM_KERNEL_CONTEXT) {
@@ -1080,7 +1054,6 @@ int mga_lock(struct inode *inode, struct file *filp, unsigned int cmd,
/* Contention */
atomic_inc(&dev->total_sleeps);
current->state = TASK_INTERRUPTIBLE;
- DRM_DEBUG("Calling lock schedule\n");
schedule();
if (signal_pending(current)) {
ret = -ERESTARTSYS;
@@ -1110,11 +1083,8 @@ int mga_flush_ioctl(struct inode *inode, struct file *filp,
drm_device_t *dev = priv->dev;
drm_lock_t lock;
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- __volatile__ unsigned int *status =
- (__volatile__ unsigned int *)dev_priv->status_page;
- int i;
-
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
@@ -1123,28 +1093,17 @@ int mga_flush_ioctl(struct inode *inode, struct file *filp,
}
if(lock.flags & _DRM_LOCK_FLUSH || lock.flags & _DRM_LOCK_FLUSH_ALL) {
- mga_flush_queue(dev);
-
- if((MGA_READ(MGAREG_STATUS) & 0x00030001) == 0x00020000 &&
- status[1] != dev_priv->last_sync_tag)
- {
- DRM_DEBUG("Reseting hardware status\n");
- MGA_WRITE(MGAREG_DWGSYNC, dev_priv->last_sync_tag);
-
- while(MGA_READ(MGAREG_DWGSYNC) !=
- dev_priv->last_sync_tag)
- {
- for(i = 0; i < 4096; i++) mga_delay();
- }
-
- status[1] =
- sarea_priv->last_dispatch =
- dev_priv->last_sync_tag;
- } else {
- sarea_priv->last_dispatch = status[1];
- }
+ drm_mga_prim_buf_t *temp_buf =
+ dev_priv->prim_bufs[dev_priv->current_prim_idx];
+
+ if(temp_buf && temp_buf->num_dwords) {
+ set_bit(MGA_BUF_FORCE_FIRE, &temp_buf->buffer_status);
+ mga_advance_primary(dev);
+ mga_dma_schedule(dev, 1);
+ }
}
if(lock.flags & _DRM_LOCK_QUIESCENT) {
+ mga_flush_queue(dev);
mga_dma_quiescent(dev);
}
diff --git a/linux/mga_drm.h b/linux/mga_drm.h
index 53d3590b..e75e91a4 100644
--- a/linux/mga_drm.h
+++ b/linux/mga_drm.h
@@ -37,6 +37,7 @@
*/
#ifndef _MGA_DEFINES_
#define _MGA_DEFINES_
+
#define MGA_F 0x1 /* fog */
#define MGA_A 0x2 /* alpha */
#define MGA_S 0x4 /* specular */
@@ -61,11 +62,11 @@
#define MGA_MAX_G400_PIPES 16
#define MGA_MAX_G200_PIPES 8 /* no multitex */
-
#define MGA_MAX_WARP_PIPES MGA_MAX_G400_PIPES
#define MGA_CARD_TYPE_G200 1
#define MGA_CARD_TYPE_G400 2
+
#define MGA_FRONT 0x1
#define MGA_BACK 0x2
#define MGA_DEPTH 0x4
@@ -110,19 +111,19 @@
#define MGA_UPLOAD_TEX0 0x2
#define MGA_UPLOAD_TEX1 0x4
#define MGA_UPLOAD_PIPE 0x8
-#define MGA_UPLOAD_TEX0IMAGE 0x10
-#define MGA_UPLOAD_TEX1IMAGE 0x20
+#define MGA_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */
+#define MGA_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */
#define MGA_UPLOAD_2D 0x40
#define MGA_WAIT_AGE 0x80 /* handled client-side */
#define MGA_UPLOAD_CLIPRECTS 0x100 /* handled client-side */
#define MGA_DMA_FLUSH 0x200 /* set when someone gets the lock
quiescent */
-/* 64 buffers of 16k each, total 1 meg.
+/* 32 buffers of 64k each, total 2 meg.
*/
-#define MGA_DMA_BUF_ORDER 14
+#define MGA_DMA_BUF_ORDER 16
#define MGA_DMA_BUF_SZ (1<<MGA_DMA_BUF_ORDER)
-#define MGA_DMA_BUF_NR 63
+#define MGA_DMA_BUF_NR 31
/* Keep these small for testing.
*/
@@ -159,19 +160,19 @@ typedef struct drm_mga_init {
int sarea_priv_offset;
int primary_size;
int warp_ucode_size;
- int frontOffset;
- int backOffset;
- int depthOffset;
- int textureOffset;
- int textureSize;
- int agpTextureOffset;
- int agpTextureSize;
- int cpp;
- int stride;
+ unsigned int frontOffset;
+ unsigned int backOffset;
+ unsigned int depthOffset;
+ unsigned int textureOffset;
+ unsigned int textureSize;
+ unsigned int agpTextureOffset;
+ unsigned int agpTextureSize;
+ unsigned int cpp;
+ unsigned int stride;
int sgram;
int chipset;
drm_mga_warp_index_t WarpIndex[MGA_MAX_WARP_PIPES];
- int mAccess;
+ unsigned int mAccess;
} drm_mga_init_t;
/* Warning: if you change the sarea structure, you must change the Xserver
@@ -180,7 +181,7 @@ typedef struct drm_mga_init {
typedef struct _drm_mga_tex_region {
unsigned char next, prev;
unsigned char in_use;
- int age;
+ unsigned int age;
} drm_mga_tex_region_t;
typedef struct _drm_mga_sarea {
@@ -219,9 +220,9 @@ typedef struct _drm_mga_sarea {
/* Counters for aging textures and for client-side throttling.
*/
- int last_enqueue; /* last time a buffer was enqueued */
- int last_dispatch; /* age of the most recently dispatched buffer */
- int last_quiescent; /* */
+ unsigned int last_enqueue; /* last time a buffer was enqueued */
+ unsigned int last_dispatch; /* age of the most recently dispatched buffer */
+ unsigned int last_quiescent; /* */
/* LRU lists for texture memory in agp space and on the card
@@ -237,9 +238,9 @@ typedef struct _drm_mga_sarea {
/* Device specific ioctls:
*/
typedef struct _drm_mga_clear {
- int clear_color;
- int clear_depth;
- int flags;
+ unsigned int clear_color;
+ unsigned int clear_depth;
+ unsigned int flags;
} drm_mga_clear_t;
typedef struct _drm_mga_swap {
@@ -258,4 +259,11 @@ typedef struct _drm_mga_vertex {
int discard; /* client finished with buffer? */
} drm_mga_vertex_t;
+typedef struct _drm_mga_indices {
+ int idx; /* buffer to queue */
+ unsigned int start;
+ unsigned int end;
+ int discard; /* client finished with buffer? */
+} drm_mga_indices_t;
+
#endif
diff --git a/linux/mga_drv.c b/linux/mga_drv.c
index b066fe9d..4b2c835f 100644
--- a/linux/mga_drv.c
+++ b/linux/mga_drv.c
@@ -111,6 +111,7 @@ static drm_ioctl_desc_t mga_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_MGA_ILOAD)] = { mga_iload, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_VERTEX)] = { mga_vertex, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_FLUSH)] = { mga_flush_ioctl, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MGA_INDICES)] = { mga_indices, 1, 0 },
};
#define MGA_IOCTL_COUNT DRM_ARRAY_SIZE(mga_ioctls)
diff --git a/linux/mga_drv.h b/linux/mga_drv.h
index fe6e4ef5..f217acb9 100644
--- a/linux/mga_drv.h
+++ b/linux/mga_drv.h
@@ -32,18 +32,21 @@
#ifndef _MGA_DRV_H_
#define _MGA_DRV_H_
+#define MGA_BUF_IN_USE 0
+#define MGA_BUF_SWAP_PENDING 1
+#define MGA_BUF_FORCE_FIRE 2
+#define MGA_BUF_NEEDS_OVERFLOW 3
+
typedef struct {
+ u32 buffer_status;
unsigned int num_dwords;
unsigned int max_dwords;
u32 *current_dma_ptr;
u32 *head;
u32 phys_head;
+ unsigned int prim_age;
int sec_used;
int idx;
- int swap_pending;
- u32 in_use;
- atomic_t force_fire;
- atomic_t needs_overflow;
} drm_mga_prim_buf_t;
typedef struct _drm_mga_freelist {
@@ -53,31 +56,33 @@ typedef struct _drm_mga_freelist {
struct _drm_mga_freelist *prev;
} drm_mga_freelist_t;
+#define MGA_IN_DISPATCH 0
+#define MGA_IN_FLUSH 1
+#define MGA_IN_WAIT 2
+#define MGA_IN_GETBUF 3
+
typedef struct _drm_mga_private {
+ u32 dispatch_status;
+ unsigned int next_prim_age;
+ __volatile__ unsigned int last_prim_age;
int reserved_map_idx;
int buffer_map_idx;
drm_mga_sarea_t *sarea_priv;
int primary_size;
int warp_ucode_size;
int chipset;
- int frontOffset;
- int backOffset;
- int depthOffset;
- int textureOffset;
- int textureSize;
+ unsigned int frontOffset;
+ unsigned int backOffset;
+ unsigned int depthOffset;
+ unsigned int textureOffset;
+ unsigned int textureSize;
int cpp;
- int stride;
+ unsigned int stride;
int sgram;
int use_agp;
drm_mga_warp_index_t WarpIndex[MGA_MAX_G400_PIPES];
unsigned int WarpPipe;
- __volatile__ unsigned long softrap_age;
- u32 dispatch_lock;
- atomic_t in_flush;
- atomic_t in_wait;
atomic_t pending_bufs;
- unsigned int last_sync_tag;
- unsigned int sync_tag;
void *status_page;
unsigned long real_status_page;
u8 *ioremap;
@@ -86,12 +91,11 @@ typedef struct _drm_mga_private {
drm_mga_prim_buf_t *last_prim;
drm_mga_prim_buf_t *current_prim;
int current_prim_idx;
- struct pci_dev *device;
drm_mga_freelist_t *head;
drm_mga_freelist_t *tail;
wait_queue_head_t flush_queue; /* Processes waiting until flush */
wait_queue_head_t wait_queue; /* Processes waiting until interrupt */
-
+ wait_queue_head_t buf_queue; /* Processes waiting for a free buf */
/* Some validated register values:
*/
u32 mAccess;
@@ -126,7 +130,7 @@ extern int mga_dma_init(struct inode *inode, struct file *filp,
extern int mga_dma_cleanup(drm_device_t *dev);
extern int mga_flush_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-
+extern void mga_flush_write_combine(void);
extern unsigned int mga_create_sync_tag(drm_device_t *dev);
extern drm_buf_t *mga_freelist_get(drm_device_t *dev);
extern int mga_freelist_put(drm_device_t *dev, drm_buf_t *buf);
@@ -156,6 +160,8 @@ extern int mga_iload(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int mga_vertex(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
+extern int mga_indices(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
/* mga_context.c */
extern int mga_resctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
@@ -186,6 +192,7 @@ typedef enum {
typedef struct {
drm_mga_freelist_t *my_freelist;
int discard;
+ int dispatched;
} drm_mga_buf_priv_t;
#define DWGREG0 0x1c00
@@ -206,15 +213,16 @@ typedef struct {
#define PRIM_OVERFLOW(dev, dev_priv, length) do { \
drm_mga_prim_buf_t *tmp_buf = \
- dev_priv->prim_bufs[dev_priv->current_prim_idx]; \
- if( tmp_buf->max_dwords - tmp_buf->num_dwords < length || \
- tmp_buf->sec_used > MGA_DMA_BUF_NR/2) { \
- atomic_set(&tmp_buf->force_fire, 1); \
- mga_advance_primary(dev); \
- mga_dma_schedule(dev, 1); \
- } else if( atomic_read(&tmp_buf->needs_overflow)) { \
- mga_advance_primary(dev); \
- mga_dma_schedule(dev, 1); \
+ dev_priv->prim_bufs[dev_priv->current_prim_idx]; \
+ if( test_bit(MGA_BUF_NEEDS_OVERFLOW, \
+ &tmp_buf->buffer_status)) { \
+ mga_advance_primary(dev); \
+ mga_dma_schedule(dev, 1); \
+ } else if( tmp_buf->max_dwords - tmp_buf->num_dwords < length ||\
+ tmp_buf->sec_used > MGA_DMA_BUF_NR/2) { \
+ set_bit(MGA_BUF_FORCE_FIRE, &tmp_buf->buffer_status); \
+ mga_advance_primary(dev); \
+ mga_dma_schedule(dev, 1); \
} \
} while(0)
@@ -266,6 +274,13 @@ drm_mga_prim_buf_t *tmp_buf = \
tmp_buf->sec_used++; \
} while (0)
+#define AGEBUF(dev_priv, buf_priv) do { \
+ drm_mga_prim_buf_t *tmp_buf = \
+ dev_priv->prim_bufs[dev_priv->current_prim_idx]; \
+ buf_priv->my_freelist->age = tmp_buf->prim_age; \
+} while (0)
+
+
#define PRIMOUTREG(reg, val) do { \
tempIndex[outcount]=ADRINDEX(reg); \
dma_ptr[1+outcount] = val; \
@@ -356,28 +371,61 @@ drm_mga_prim_buf_t *tmp_buf = \
#define MGAREG_YTOP 0x1c98
#define MGAREG_ZORG 0x1c0c
-#define DC_atype_rstr 0x10
-#define DC_atype_blk 0x40
#define PDEA_pagpxfer_enable 0x2
+
#define WIA_wmode_suspend 0x0
#define WIA_wmode_start 0x3
#define WIA_wagp_agp 0x4
-#define DC_opcod_trap 0x4
-#define DC_arzero_enable 0x1000
-#define DC_sgnzero_enable 0x2000
-#define DC_shftzero_enable 0x4000
-#define DC_bop_SHIFT 16
-#define DC_clipdis_enable 0x80000000
-#define DC_solid_enable 0x800
-#define DC_transc_enable 0x40000000
+
+#define DC_opcod_line_open 0x0
+#define DC_opcod_autoline_open 0x1
+#define DC_opcod_line_close 0x2
+#define DC_opcod_autoline_close 0x3
+#define DC_opcod_trap 0x4
+#define DC_opcod_texture_trap 0x6
#define DC_opcod_bitblt 0x8
-#define DC_atype_rpl 0x0
-#define DC_linear_xy 0x0
-#define DC_solid_disable 0x0
-#define DC_arzero_disable 0x0
-#define DC_bltmod_bfcol 0x4000000
-#define DC_pattern_disable 0x0
-#define DC_transc_disable 0x0
+#define DC_opcod_iload 0x9
+#define DC_atype_rpl 0x0
+#define DC_atype_rstr 0x10
+#define DC_atype_zi 0x30
+#define DC_atype_blk 0x40
+#define DC_atype_i 0x70
+#define DC_linear_xy 0x0
+#define DC_linear_linear 0x80
+#define DC_zmode_nozcmp 0x0
+#define DC_zmode_ze 0x200
+#define DC_zmode_zne 0x300
+#define DC_zmode_zlt 0x400
+#define DC_zmode_zlte 0x500
+#define DC_zmode_zgt 0x600
+#define DC_zmode_zgte 0x700
+#define DC_solid_disable 0x0
+#define DC_solid_enable 0x800
+#define DC_arzero_disable 0x0
+#define DC_arzero_enable 0x1000
+#define DC_sgnzero_disable 0x0
+#define DC_sgnzero_enable 0x2000
+#define DC_shftzero_disable 0x0
+#define DC_shftzero_enable 0x4000
+#define DC_bop_SHIFT 16
+#define DC_trans_SHIFT 20
+#define DC_bltmod_bmonolef 0x0
+#define DC_bltmod_bmonowf 0x8000000
+#define DC_bltmod_bplan 0x2000000
+#define DC_bltmod_bfcol 0x4000000
+#define DC_bltmod_bu32bgr 0x6000000
+#define DC_bltmod_bu32rgb 0xe000000
+#define DC_bltmod_bu24bgr 0x16000000
+#define DC_bltmod_bu24rgb 0x1e000000
+#define DC_pattern_disable 0x0
+#define DC_pattern_enable 0x20000000
+#define DC_transc_disable 0x0
+#define DC_transc_enable 0x40000000
+#define DC_clipdis_disable 0x0
+#define DC_clipdis_enable 0x80000000
+
+#define SETADD_mode_vertlist 0x0
+
#define MGA_CLEAR_CMD (DC_opcod_trap | DC_arzero_enable | \
DC_sgnzero_enable | DC_shftzero_enable | \
@@ -392,4 +440,8 @@ drm_mga_prim_buf_t *tmp_buf = \
DC_pattern_disable | DC_transc_disable | \
DC_clipdis_enable) \
+#define MGA_FLUSH_CMD (DC_opcod_texture_trap | (0xF << DC_trans_SHIFT) |\
+ DC_arzero_enable | DC_sgnzero_enable | \
+ DC_atype_i)
+
#endif
diff --git a/linux/mga_state.c b/linux/mga_state.c
index 0a50aa4f..3134b785 100644
--- a/linux/mga_state.c
+++ b/linux/mga_state.c
@@ -40,15 +40,23 @@ static void mgaEmitClipRect( drm_mga_private_t *dev_priv,
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int *regs = sarea_priv->ContextState;
PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
/* This takes 10 dwords */
PRIMGETPTR( dev_priv );
/* Force reset of dwgctl (eliminates clip disable) */
+#if 1
PRIMOUTREG( MGAREG_DMAPAD, 0 );
- PRIMOUTREG( MGAREG_DWGSYNC, dev_priv->last_sync_tag - 1 );
- PRIMOUTREG( MGAREG_DWGSYNC, dev_priv->last_sync_tag - 1 );
+ PRIMOUTREG( MGAREG_DWGSYNC, 0 );
+ PRIMOUTREG( MGAREG_DWGSYNC, 0 );
PRIMOUTREG( MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL] );
+#else
+ PRIMOUTREG( MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL] );
+ PRIMOUTREG( MGAREG_LEN + MGAREG_MGA_EXEC, 0x80000000 );
+ PRIMOUTREG( MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL] );
+ PRIMOUTREG( MGAREG_LEN + MGAREG_MGA_EXEC, 0x80000000 );
+#endif
PRIMOUTREG( MGAREG_DMAPAD, 0 );
PRIMOUTREG( MGAREG_CXBNDRY, ((box->x2)<<16)|(box->x1) );
@@ -63,6 +71,7 @@ static void mgaEmitContext(drm_mga_private_t *dev_priv )
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int *regs = sarea_priv->ContextState;
PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
/* This takes a max of 15 dwords */
PRIMGETPTR( dev_priv );
@@ -80,13 +89,13 @@ static void mgaEmitContext(drm_mga_private_t *dev_priv )
if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
PRIMOUTREG( MGAREG_WFLAG1, regs[MGA_CTXREG_WFLAG] );
PRIMOUTREG( MGAREG_TDUALSTAGE0, regs[MGA_CTXREG_TDUAL0] );
- PRIMOUTREG( MGAREG_TDUALSTAGE1, regs[MGA_CTXREG_TDUAL1] );
+ PRIMOUTREG( MGAREG_TDUALSTAGE1, regs[MGA_CTXREG_TDUAL1] );
PRIMOUTREG( MGAREG_FCOL, regs[MGA_CTXREG_FCOL] );
} else {
PRIMOUTREG( MGAREG_FCOL, regs[MGA_CTXREG_FCOL] );
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG( MGAREG_DMAPAD, 0);
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
}
PRIMADVANCE( dev_priv );
@@ -97,27 +106,28 @@ static void mgaG200EmitTex( drm_mga_private_t *dev_priv )
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int *regs = sarea_priv->TexState[0];
PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
PRIMGETPTR( dev_priv );
/* This takes 20 dwords */
- PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] );
- PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL] );
- PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER] );
- PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL] );
+ PRIMOUTREG( MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] );
+ PRIMOUTREG( MGAREG_TEXCTL, regs[MGA_TEXREG_CTL] );
+ PRIMOUTREG( MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER] );
+ PRIMOUTREG( MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL] );
- PRIMOUTREG(MGAREG_TEXORG, regs[MGA_TEXREG_ORG] );
- PRIMOUTREG(MGAREG_TEXORG1, regs[MGA_TEXREG_ORG1] );
- PRIMOUTREG(MGAREG_TEXORG2, regs[MGA_TEXREG_ORG2] );
- PRIMOUTREG(MGAREG_TEXORG3, regs[MGA_TEXREG_ORG3] );
+ PRIMOUTREG( MGAREG_TEXORG, regs[MGA_TEXREG_ORG] );
+ PRIMOUTREG( MGAREG_TEXORG1, regs[MGA_TEXREG_ORG1] );
+ PRIMOUTREG( MGAREG_TEXORG2, regs[MGA_TEXREG_ORG2] );
+ PRIMOUTREG( MGAREG_TEXORG3, regs[MGA_TEXREG_ORG3] );
- PRIMOUTREG(MGAREG_TEXORG4, regs[MGA_TEXREG_ORG4] );
- PRIMOUTREG(MGAREG_TEXWIDTH, regs[MGA_TEXREG_WIDTH] );
- PRIMOUTREG(MGAREG_TEXHEIGHT, regs[MGA_TEXREG_HEIGHT] );
- PRIMOUTREG(0x2d00 + 24*4, regs[MGA_TEXREG_WIDTH] );
+ PRIMOUTREG( MGAREG_TEXORG4, regs[MGA_TEXREG_ORG4] );
+ PRIMOUTREG( MGAREG_TEXWIDTH, regs[MGA_TEXREG_WIDTH] );
+ PRIMOUTREG( MGAREG_TEXHEIGHT, regs[MGA_TEXREG_HEIGHT] );
+ PRIMOUTREG( 0x2d00 + 24*4, regs[MGA_TEXREG_WIDTH] );
- PRIMOUTREG(0x2d00 + 34*4, regs[MGA_TEXREG_HEIGHT] );
+ PRIMOUTREG( 0x2d00 + 34*4, regs[MGA_TEXREG_HEIGHT] );
PRIMOUTREG( MGAREG_TEXTRANS, 0xffff );
PRIMOUTREG( MGAREG_TEXTRANSHIGH, 0xffff );
PRIMOUTREG( MGAREG_DMAPAD, 0 );
@@ -131,34 +141,35 @@ static void mgaG400EmitTex0( drm_mga_private_t *dev_priv )
unsigned int *regs = sarea_priv->TexState[0];
int multitex = sarea_priv->WarpPipe & MGA_T2;
PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
PRIMGETPTR( dev_priv );
/* This takes a max of 30 dwords */
- PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] );
- PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL] );
- PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER] );
- PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL] );
+ PRIMOUTREG( MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] );
+ PRIMOUTREG( MGAREG_TEXCTL, regs[MGA_TEXREG_CTL] );
+ PRIMOUTREG( MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER] );
+ PRIMOUTREG( MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL] );
- PRIMOUTREG(MGAREG_TEXORG, regs[MGA_TEXREG_ORG] );
- PRIMOUTREG(MGAREG_TEXORG1, regs[MGA_TEXREG_ORG1] );
- PRIMOUTREG(MGAREG_TEXORG2, regs[MGA_TEXREG_ORG2] );
- PRIMOUTREG(MGAREG_TEXORG3, regs[MGA_TEXREG_ORG3] );
+ PRIMOUTREG( MGAREG_TEXORG, regs[MGA_TEXREG_ORG] );
+ PRIMOUTREG( MGAREG_TEXORG1, regs[MGA_TEXREG_ORG1] );
+ PRIMOUTREG( MGAREG_TEXORG2, regs[MGA_TEXREG_ORG2] );
+ PRIMOUTREG( MGAREG_TEXORG3, regs[MGA_TEXREG_ORG3] );
- PRIMOUTREG(MGAREG_TEXORG4, regs[MGA_TEXREG_ORG4] );
- PRIMOUTREG(MGAREG_TEXWIDTH, regs[MGA_TEXREG_WIDTH] );
- PRIMOUTREG(MGAREG_TEXHEIGHT, regs[MGA_TEXREG_HEIGHT] );
- PRIMOUTREG(0x2d00 + 49*4, 0);
+ PRIMOUTREG( MGAREG_TEXORG4, regs[MGA_TEXREG_ORG4] );
+ PRIMOUTREG( MGAREG_TEXWIDTH, regs[MGA_TEXREG_WIDTH] );
+ PRIMOUTREG( MGAREG_TEXHEIGHT, regs[MGA_TEXREG_HEIGHT] );
+ PRIMOUTREG( 0x2d00 + 49*4, 0 );
- PRIMOUTREG(0x2d00 + 57*4, 0);
- PRIMOUTREG(0x2d00 + 53*4, 0);
- PRIMOUTREG(0x2d00 + 61*4, 0);
+ PRIMOUTREG( 0x2d00 + 57*4, 0 );
+ PRIMOUTREG( 0x2d00 + 53*4, 0 );
+ PRIMOUTREG( 0x2d00 + 61*4, 0 );
PRIMOUTREG( MGAREG_DMAPAD, 0 );
if (!multitex) {
- PRIMOUTREG(0x2d00 + 52*4, 0x40 );
- PRIMOUTREG(0x2d00 + 60*4, 0x40 );
+ PRIMOUTREG( 0x2d00 + 52*4, 0x40 );
+ PRIMOUTREG( 0x2d00 + 60*4, 0x40 );
PRIMOUTREG( MGAREG_DMAPAD, 0 );
PRIMOUTREG( MGAREG_DMAPAD, 0 );
}
@@ -178,85 +189,127 @@ static void mgaG400EmitTex1( drm_mga_private_t *dev_priv )
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int *regs = sarea_priv->TexState[1];
PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
- PRIMGETPTR(dev_priv);
+ PRIMGETPTR( dev_priv );
/* This takes 25 dwords */
- PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] | TMC_map1_enable);
- PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL] );
- PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER] );
- PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL] );
-
- PRIMOUTREG(MGAREG_TEXORG, regs[MGA_TEXREG_ORG] );
- PRIMOUTREG(MGAREG_TEXORG1, regs[MGA_TEXREG_ORG1] );
- PRIMOUTREG(MGAREG_TEXORG2, regs[MGA_TEXREG_ORG2] );
- PRIMOUTREG(MGAREG_TEXORG3, regs[MGA_TEXREG_ORG3] );
-
- PRIMOUTREG(MGAREG_TEXORG4, regs[MGA_TEXREG_ORG4] );
- PRIMOUTREG(MGAREG_TEXWIDTH, regs[MGA_TEXREG_WIDTH] );
- PRIMOUTREG(MGAREG_TEXHEIGHT, regs[MGA_TEXREG_HEIGHT] );
- PRIMOUTREG(0x2d00 + 49*4, 0);
-
- PRIMOUTREG(0x2d00 + 57*4, 0);
- PRIMOUTREG(0x2d00 + 53*4, 0);
- PRIMOUTREG(0x2d00 + 61*4, 0);
- PRIMOUTREG(0x2d00 + 52*4, regs[MGA_TEXREG_WIDTH] | 0x40 );
-
- PRIMOUTREG(0x2d00 + 60*4, regs[MGA_TEXREG_HEIGHT] | 0x40 );
+
+ PRIMOUTREG( MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] | TMC_map1_enable );
+ PRIMOUTREG( MGAREG_TEXCTL, regs[MGA_TEXREG_CTL] );
+ PRIMOUTREG( MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER] );
+ PRIMOUTREG( MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL] );
+
+ PRIMOUTREG( MGAREG_TEXORG, regs[MGA_TEXREG_ORG] );
+ PRIMOUTREG( MGAREG_TEXORG1, regs[MGA_TEXREG_ORG1] );
+ PRIMOUTREG( MGAREG_TEXORG2, regs[MGA_TEXREG_ORG2] );
+ PRIMOUTREG( MGAREG_TEXORG3, regs[MGA_TEXREG_ORG3] );
+
+ PRIMOUTREG( MGAREG_TEXORG4, regs[MGA_TEXREG_ORG4] );
+ PRIMOUTREG( MGAREG_TEXWIDTH, regs[MGA_TEXREG_WIDTH] );
+ PRIMOUTREG( MGAREG_TEXHEIGHT, regs[MGA_TEXREG_HEIGHT] );
+ PRIMOUTREG( 0x2d00 + 49*4, 0 );
+
+ PRIMOUTREG( 0x2d00 + 57*4, 0 );
+ PRIMOUTREG( 0x2d00 + 53*4, 0 );
+ PRIMOUTREG( 0x2d00 + 61*4, 0 );
+ PRIMOUTREG( 0x2d00 + 52*4, regs[MGA_TEXREG_WIDTH] | 0x40 );
+
+ PRIMOUTREG( 0x2d00 + 60*4, regs[MGA_TEXREG_HEIGHT] | 0x40 );
PRIMOUTREG( MGAREG_TEXTRANS, 0xffff );
PRIMOUTREG( MGAREG_TEXTRANSHIGH, 0xffff );
- PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] );
+ PRIMOUTREG( MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] );
+
+ PRIMADVANCE( dev_priv );
+}
+
+/* Required when switching from multitexturing to single texturing.
+ */
+static void mgaG400EmitTexFlush( drm_mga_private_t *dev_priv )
+{
+ PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ PRIMGETPTR( dev_priv );
+
+ /* This takes 15 dwords */
+
+ PRIMOUTREG( MGAREG_YDST, 0 );
+ PRIMOUTREG( MGAREG_FXLEFT, 0 );
+ PRIMOUTREG( MGAREG_FXRIGHT, 1 );
+ PRIMOUTREG( MGAREG_DWGCTL, MGA_FLUSH_CMD );
+
+ PRIMOUTREG( MGAREG_LEN + MGAREG_MGA_EXEC, 1 );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_DWGSYNC, 0x7000 );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+
+ PRIMOUTREG( MGAREG_TEXCTL2, 0 );
+ PRIMOUTREG( MGAREG_LEN + MGAREG_MGA_EXEC, 0 );
+ PRIMOUTREG( MGAREG_TEXCTL2, 0x80 );
+ PRIMOUTREG( MGAREG_LEN + MGAREG_MGA_EXEC, 0 );
PRIMADVANCE( dev_priv );
}
-static void mgaG400EmitPipe(drm_mga_private_t *dev_priv )
+static void mgaG400EmitPipe( drm_mga_private_t *dev_priv )
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int pipe = sarea_priv->WarpPipe;
float fParam = 12800.0f;
PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
- PRIMGETPTR(dev_priv);
+ PRIMGETPTR( dev_priv );
+
+ /* This takes 30 dwords */
- /* This takes 25 dwords */
-
/* Establish vertex size.
*/
if (pipe & MGA_T2) {
- PRIMOUTREG(MGAREG_WIADDR2, WIA_wmode_suspend);
- PRIMOUTREG(MGAREG_WVRTXSZ, 0x00001e09);
- PRIMOUTREG(MGAREG_WACCEPTSEQ, 0x1e000000);
- PRIMOUTREG(MGAREG_WFLAG, 0);
+ PRIMOUTREG( MGAREG_WIADDR2, WIA_wmode_suspend );
+ PRIMOUTREG( MGAREG_WVRTXSZ, 0x00001e09 );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+
+ PRIMOUTREG( MGAREG_WACCEPTSEQ, 0 );
+ PRIMOUTREG( MGAREG_WACCEPTSEQ, 0 );
+ PRIMOUTREG( MGAREG_WACCEPTSEQ, 0 );
+ PRIMOUTREG( MGAREG_WACCEPTSEQ, 0x1e000000 );
} else {
- PRIMOUTREG(MGAREG_WIADDR2, WIA_wmode_suspend);
- PRIMOUTREG(MGAREG_WVRTXSZ, 0x00001807);
- PRIMOUTREG(MGAREG_WACCEPTSEQ, 0x18000000);
- PRIMOUTREG(MGAREG_WFLAG, 0);
+ PRIMOUTREG( MGAREG_WIADDR2, WIA_wmode_suspend );
+ PRIMOUTREG( MGAREG_WVRTXSZ, 0x00001807 );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+
+ PRIMOUTREG( MGAREG_WACCEPTSEQ, 0 );
+ PRIMOUTREG( MGAREG_WACCEPTSEQ, 0 );
+ PRIMOUTREG( MGAREG_WACCEPTSEQ, 0 );
+ PRIMOUTREG( MGAREG_WACCEPTSEQ, 0x18000000 );
}
- PRIMOUTREG(MGAREG_WFLAG1, 0);
- PRIMOUTREG(0x2d00 + 56*4, *((u32 *)(&fParam)));
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG( MGAREG_WFLAG, 0 );
+ PRIMOUTREG( MGAREG_WFLAG1, 0 );
+ PRIMOUTREG( 0x2d00 + 56*4, *((u32 *)(&fParam)) );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
- PRIMOUTREG(0x2d00 + 49*4, 0); /* Tex stage 0 */
- PRIMOUTREG(0x2d00 + 57*4, 0); /* Tex stage 0 */
- PRIMOUTREG(0x2d00 + 53*4, 0); /* Tex stage 1 */
- PRIMOUTREG(0x2d00 + 61*4, 0); /* Tex stage 1 */
+ PRIMOUTREG( 0x2d00 + 49*4, 0 ); /* Tex stage 0 */
+ PRIMOUTREG( 0x2d00 + 57*4, 0 ); /* Tex stage 0 */
+ PRIMOUTREG( 0x2d00 + 53*4, 0 ); /* Tex stage 1 */
+ PRIMOUTREG( 0x2d00 + 61*4, 0 ); /* Tex stage 1 */
- PRIMOUTREG(0x2d00 + 54*4, 0x40); /* Tex stage 0 : w */
- PRIMOUTREG(0x2d00 + 62*4, 0x40); /* Tex stage 0 : h */
- PRIMOUTREG(0x2d00 + 52*4, 0x40); /* Tex stage 1 : w */
- PRIMOUTREG(0x2d00 + 60*4, 0x40); /* Tex stage 1 : h */
+ PRIMOUTREG( 0x2d00 + 54*4, 0x40 ); /* Tex stage 0 : w */
+ PRIMOUTREG( 0x2d00 + 62*4, 0x40 ); /* Tex stage 0 : h */
+ PRIMOUTREG( 0x2d00 + 52*4, 0x40 ); /* Tex stage 1 : w */
+ PRIMOUTREG( 0x2d00 + 60*4, 0x40 ); /* Tex stage 1 : h */
/* Dma pading required due to hw bug */
- PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
- PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
- PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
- PRIMOUTREG(MGAREG_WIADDR2, (__u32)(dev_priv->WarpIndex[pipe].phys_addr |
- WIA_wmode_start | WIA_wagp_agp));
- PRIMADVANCE(dev_priv);
+ PRIMOUTREG( MGAREG_DMAPAD, 0xffffffff );
+ PRIMOUTREG( MGAREG_DMAPAD, 0xffffffff );
+ PRIMOUTREG( MGAREG_DMAPAD, 0xffffffff );
+ PRIMOUTREG( MGAREG_WIADDR2, (u32)(dev_priv->WarpIndex[pipe].phys_addr |
+ WIA_wmode_start | WIA_wagp_agp) );
+ PRIMADVANCE( dev_priv );
}
static void mgaG200EmitPipe( drm_mga_private_t *dev_priv )
@@ -264,40 +317,45 @@ static void mgaG200EmitPipe( drm_mga_private_t *dev_priv )
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int pipe = sarea_priv->WarpPipe;
PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
- PRIMGETPTR(dev_priv);
+ PRIMGETPTR( dev_priv );
/* This takes 15 dwords */
- PRIMOUTREG(MGAREG_WIADDR, WIA_wmode_suspend);
- PRIMOUTREG(MGAREG_WVRTXSZ, 7);
- PRIMOUTREG(MGAREG_WFLAG, 0);
- PRIMOUTREG(0x2d00 + 24*4, 0); /* tex w/h */
+ PRIMOUTREG( MGAREG_WIADDR, WIA_wmode_suspend );
+ PRIMOUTREG( MGAREG_WVRTXSZ, 7 );
+ PRIMOUTREG( MGAREG_WFLAG, 0 );
+ PRIMOUTREG( 0x2d00 + 24*4, 0 ); /* tex w/h */
- PRIMOUTREG(0x2d00 + 25*4, 0x100);
- PRIMOUTREG(0x2d00 + 34*4, 0); /* tex w/h */
- PRIMOUTREG(0x2d00 + 42*4, 0xFFFF);
- PRIMOUTREG(0x2d00 + 60*4, 0xFFFF);
+ PRIMOUTREG( 0x2d00 + 25*4, 0x100 );
+ PRIMOUTREG( 0x2d00 + 34*4, 0 ); /* tex w/h */
+ PRIMOUTREG( 0x2d00 + 42*4, 0xFFFF );
+ PRIMOUTREG( 0x2d00 + 60*4, 0xFFFF );
/* Dma pading required due to hw bug */
- PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
- PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
- PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
- PRIMOUTREG(MGAREG_WIADDR, (__u32)(dev_priv->WarpIndex[pipe].phys_addr |
- WIA_wmode_start | WIA_wagp_agp));
+ PRIMOUTREG( MGAREG_DMAPAD, 0xffffffff );
+ PRIMOUTREG( MGAREG_DMAPAD, 0xffffffff );
+ PRIMOUTREG( MGAREG_DMAPAD, 0xffffffff );
+ PRIMOUTREG( MGAREG_WIADDR, (u32)(dev_priv->WarpIndex[pipe].phys_addr |
+ WIA_wmode_start | WIA_wagp_agp) );
- PRIMADVANCE(dev_priv);
+ PRIMADVANCE( dev_priv );
}
static void mgaEmitState( drm_mga_private_t *dev_priv )
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
+ DRM_DEBUG("%s\n", __FUNCTION__);
if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
int multitex = sarea_priv->WarpPipe & MGA_T2;
- if (sarea_priv->WarpPipe != dev_priv->WarpPipe) {
+ if (sarea_priv->WarpPipe != dev_priv->WarpPipe) {
+ if ((dev_priv->WarpPipe & MGA_T2) && !multitex) {
+ mgaG400EmitTexFlush( dev_priv );
+ }
mgaG400EmitPipe( dev_priv );
dev_priv->WarpPipe = sarea_priv->WarpPipe;
}
@@ -342,6 +400,8 @@ static int mgaVerifyContext(drm_mga_private_t *dev_priv )
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int *regs = sarea_priv->ContextState;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
if (regs[MGA_CTXREG_DSTORG] != dev_priv->frontOffset &&
regs[MGA_CTXREG_DSTORG] != dev_priv->backOffset) {
DRM_DEBUG("BAD DSTORG: %x (front %x, back %x)\n\n",
@@ -361,6 +421,8 @@ static int mgaVerifyTex(drm_mga_private_t *dev_priv,
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
if ((sarea_priv->TexState[unit][MGA_TEXREG_ORG] & 0x3) == 0x1) {
DRM_DEBUG("BAD TEXREG_ORG: %x, unit %d\n",
sarea_priv->TexState[unit][MGA_TEXREG_ORG],
@@ -378,6 +440,8 @@ static int mgaVerifyState( drm_mga_private_t *dev_priv )
unsigned int dirty = sarea_priv->dirty;
int rv = 0;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
@@ -408,6 +472,8 @@ static int mgaVerifyIload( drm_mga_private_t *dev_priv,
unsigned long bus_address,
unsigned int dstOrg, int length )
{
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
if(dstOrg < dev_priv->textureOffset ||
dstOrg + length >
(dev_priv->textureOffset + dev_priv->textureSize)) {
@@ -431,35 +497,34 @@ static void mga_dma_dispatch_tex_blit( drm_device_t *dev,
int use_agp = PDEA_pagpxfer_enable | 0x00000001;
u16 y2;
PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
y2 = length / 64;
- PRIM_OVERFLOW(dev, dev_priv, 30);
+ PRIM_OVERFLOW( dev, dev_priv, 30 );
PRIMGETPTR( dev_priv );
- dev_priv->last_sync_tag = mga_create_sync_tag(dev);
-
- PRIMOUTREG( MGAREG_DSTORG, destOrg);
- PRIMOUTREG( MGAREG_MACCESS, 0x00000000);
+ PRIMOUTREG( MGAREG_DSTORG, destOrg );
+ PRIMOUTREG( MGAREG_MACCESS, 0x00000000 );
DRM_DEBUG("srcorg : %lx\n", bus_address | use_agp);
- PRIMOUTREG( MGAREG_SRCORG, (u32) bus_address | use_agp);
- PRIMOUTREG( MGAREG_AR5, 64);
-
- PRIMOUTREG( MGAREG_PITCH, 64);
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG( MGAREG_DWGCTL, MGA_COPY_CMD);
-
- PRIMOUTREG(MGAREG_AR0, 63);
- PRIMOUTREG(MGAREG_AR3, 0);
- PRIMOUTREG(MGAREG_FXBNDRY, (63 << 16));
- PRIMOUTREG(MGAREG_YDSTLEN+MGAREG_MGA_EXEC, y2);
+ PRIMOUTREG( MGAREG_SRCORG, (u32) bus_address | use_agp );
+ PRIMOUTREG( MGAREG_AR5, 64 );
+
+ PRIMOUTREG( MGAREG_PITCH, 64 );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_DWGCTL, MGA_COPY_CMD );
+
+ PRIMOUTREG( MGAREG_AR0, 63 );
+ PRIMOUTREG( MGAREG_AR3, 0 );
+ PRIMOUTREG( MGAREG_FXBNDRY, (63 << 16) );
+ PRIMOUTREG( MGAREG_YDSTLEN+MGAREG_MGA_EXEC, y2 );
- PRIMOUTREG( MGAREG_SRCORG, 0);
- PRIMOUTREG( MGAREG_PITCH, dev_priv->stride / dev_priv->cpp);
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG( MGAREG_DWGSYNC, dev_priv->last_sync_tag);
- PRIMADVANCE(dev_priv);
+ PRIMOUTREG( MGAREG_SRCORG, 0 );
+ PRIMOUTREG( MGAREG_PITCH, dev_priv->stride / dev_priv->cpp );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMADVANCE( dev_priv );
}
static void mga_dma_dispatch_vertex(drm_device_t *dev,
@@ -474,33 +539,100 @@ static void mga_dma_dispatch_vertex(drm_device_t *dev,
int i = 0;
int primary_needed;
PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("dispatch vertex %d addr 0x%lx, "
"length 0x%x nbox %d dirty %x\n",
buf->idx, address, length,
sarea_priv->nbox, sarea_priv->dirty);
+ DRM_DEBUG("used : %d, total : %d\n", buf->used, buf->total);
+ if(sarea_priv->WarpPipe & MGA_T2) {
+ if ((buf->used/4) % 10)
+ DRM_DEBUG("Multitex Buf is not aligned properly!!!\n");
+ } else {
+ if ((buf->used/4) % 8)
+ DRM_DEBUG("Buf is not aligned properly!!!\n");
+ }
- dev_priv->last_sync_tag = mga_create_sync_tag(dev);
+ if (buf->used) {
+ /* WARNING: if you change any of the state functions verify
+ * these numbers (Overestimating this doesn't hurt).
+ */
+ buf_priv->dispatched = 1;
+ primary_needed = (30+15+15+30+25+
+ 10 +
+ 15 * MGA_NR_SAREA_CLIPRECTS);
+ PRIM_OVERFLOW(dev, dev_priv, primary_needed);
+ mgaEmitState( dev_priv );
+
+ do {
+ if (i < sarea_priv->nbox) {
+ DRM_DEBUG("idx %d Emit box %d/%d:"
+ "%d,%d - %d,%d\n",
+ buf->idx,
+ i, sarea_priv->nbox,
+ sarea_priv->boxes[i].x1,
+ sarea_priv->boxes[i].y1,
+ sarea_priv->boxes[i].x2,
+ sarea_priv->boxes[i].y2);
+
+ mgaEmitClipRect( dev_priv,
+ &sarea_priv->boxes[i] );
+ }
+
+ PRIMGETPTR( dev_priv );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_SECADDRESS,
+ ((u32)address) | TT_VERTEX );
+ PRIMOUTREG( MGAREG_SECEND,
+ (((u32)(address + length)) | use_agp) );
+ PRIMADVANCE( dev_priv );
+ } while (++i < sarea_priv->nbox);
+ }
- if (buf_priv->discard) {
- buf_priv->my_freelist->age = dev_priv->last_sync_tag;
+ if (buf_priv->discard) {
+ if (buf_priv->dispatched == 1) AGEBUF(dev_priv, buf_priv);
+ buf_priv->dispatched = 0;
mga_freelist_put(dev, buf);
}
- /* WARNING: if you change any of the state functions verify
- * these numbers (Overestimating this doesn't hurt).
- */
- primary_needed = (25+15+30+25+
- 10 +
- 15 * MGA_NR_SAREA_CLIPRECTS);
+}
- PRIM_OVERFLOW(dev, dev_priv, primary_needed);
- mgaEmitState( dev_priv );
+static void mga_dma_dispatch_indices(drm_device_t *dev,
+ drm_buf_t *buf,
+ unsigned int start,
+ unsigned int end)
+{
+ drm_mga_private_t *dev_priv = dev->dev_private;
+ drm_mga_buf_priv_t *buf_priv = buf->dev_private;
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int address = (unsigned int)buf->bus_address;
+ int use_agp = PDEA_pagpxfer_enable;
+ int i = 0;
+ int primary_needed;
+ PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ DRM_DEBUG("dispatch indices %d addr 0x%x, "
+ "start 0x%x end 0x%x nbox %d dirty %x\n",
+ buf->idx, address, start, end,
+ sarea_priv->nbox, sarea_priv->dirty);
+
+ if (start != end) {
+ /* WARNING: if you change any of the state functions verify
+ * these numbers (Overestimating this doesn't hurt).
+ */
+ buf_priv->dispatched = 1;
+ primary_needed = (25+15+30+25+
+ 10 +
+ 15 * MGA_NR_SAREA_CLIPRECTS);
+ PRIM_OVERFLOW( dev, dev_priv, primary_needed );
+ mgaEmitState( dev_priv );
- if (buf->used) {
do {
if (i < sarea_priv->nbox) {
DRM_DEBUG("idx %d Emit box %d/%d:"
@@ -515,25 +647,23 @@ static void mga_dma_dispatch_vertex(drm_device_t *dev,
mgaEmitClipRect( dev_priv,
&sarea_priv->boxes[i] );
}
-
- PRIMGETPTR(dev_priv);
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG( MGAREG_SECADDRESS,
- ((__u32)address) | TT_VERTEX);
- PRIMOUTREG( MGAREG_SECEND,
- (((__u32)(address + length)) |
- use_agp));
+
+ PRIMGETPTR( dev_priv );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_SETUPADDRESS,
+ ((address + start) |
+ SETADD_mode_vertlist) );
+ PRIMOUTREG( MGAREG_SETUPEND,
+ ((address + end) | use_agp) );
PRIMADVANCE( dev_priv );
} while (++i < sarea_priv->nbox);
}
-
- PRIMGETPTR( dev_priv );
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DWGSYNC, dev_priv->last_sync_tag);
- PRIMADVANCE( dev_priv );
+ if (buf_priv->discard) {
+ if (buf_priv->dispatched == 1) AGEBUF(dev_priv, buf_priv);
+ buf_priv->dispatched = 0;
+ mga_freelist_put(dev, buf);
+ }
}
@@ -550,6 +680,7 @@ static void mga_dma_dispatch_clear( drm_device_t *dev, int flags,
int i;
int primary_needed;
PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
if ( dev_priv->sgram )
cmd = MGA_CLEAR_CMD | DC_atype_blk;
@@ -557,10 +688,9 @@ static void mga_dma_dispatch_clear( drm_device_t *dev, int flags,
cmd = MGA_CLEAR_CMD | DC_atype_rstr;
primary_needed = nbox * 70;
- if(primary_needed == 0) primary_needed = 70;
- PRIM_OVERFLOW(dev, dev_priv, primary_needed);
+ if (primary_needed == 0) primary_needed = 70;
+ PRIM_OVERFLOW( dev, dev_priv, primary_needed );
PRIMGETPTR( dev_priv );
- dev_priv->last_sync_tag = mga_create_sync_tag(dev);
for (i = 0 ; i < nbox ; i++) {
unsigned int height = pbox[i].y2 - pbox[i].y1;
@@ -571,55 +701,50 @@ static void mga_dma_dispatch_clear( drm_device_t *dev, int flags,
if ( flags & MGA_FRONT ) {
DRM_DEBUG("clear front\n");
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_YDSTLEN, (pbox[i].y1<<16)|height);
- PRIMOUTREG(MGAREG_FXBNDRY, (pbox[i].x2<<16)|pbox[i].x1);
-
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_FCOL, clear_color);
- PRIMOUTREG(MGAREG_DSTORG, dev_priv->frontOffset);
- PRIMOUTREG(MGAREG_DWGCTL+MGAREG_MGA_EXEC, cmd );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_YDSTLEN, (pbox[i].y1<<16)|height);
+ PRIMOUTREG( MGAREG_FXBNDRY, (pbox[i].x2<<16)|pbox[i].x1);
+
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_FCOL, clear_color );
+ PRIMOUTREG( MGAREG_DSTORG, dev_priv->frontOffset );
+ PRIMOUTREG( MGAREG_DWGCTL+MGAREG_MGA_EXEC, cmd );
}
if ( flags & MGA_BACK ) {
DRM_DEBUG("clear back\n");
PRIMOUTREG( MGAREG_DMAPAD, 0);
PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_YDSTLEN, (pbox[i].y1<<16)|height);
- PRIMOUTREG(MGAREG_FXBNDRY, (pbox[i].x2<<16)|pbox[i].x1);
+ PRIMOUTREG( MGAREG_YDSTLEN, (pbox[i].y1<<16)|height );
+ PRIMOUTREG( MGAREG_FXBNDRY, (pbox[i].x2<<16)|pbox[i].x1 );
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_FCOL, clear_color);
- PRIMOUTREG(MGAREG_DSTORG, dev_priv->backOffset);
- PRIMOUTREG(MGAREG_DWGCTL+MGAREG_MGA_EXEC, cmd );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_FCOL, clear_color );
+ PRIMOUTREG( MGAREG_DSTORG, dev_priv->backOffset );
+ PRIMOUTREG( MGAREG_DWGCTL+MGAREG_MGA_EXEC, cmd );
}
if ( flags & MGA_DEPTH ) {
DRM_DEBUG("clear depth\n");
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_YDSTLEN, (pbox[i].y1<<16)|height);
- PRIMOUTREG(MGAREG_FXBNDRY, (pbox[i].x2<<16)|pbox[i].x1);
-
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_FCOL, clear_zval);
- PRIMOUTREG(MGAREG_DSTORG, dev_priv->depthOffset);
- PRIMOUTREG(MGAREG_DWGCTL+MGAREG_MGA_EXEC, cmd );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_YDSTLEN, (pbox[i].y1<<16)|height );
+ PRIMOUTREG( MGAREG_FXBNDRY, (pbox[i].x2<<16)|pbox[i].x1 );
+
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_FCOL, clear_zval );
+ PRIMOUTREG( MGAREG_DSTORG, dev_priv->depthOffset );
+ PRIMOUTREG( MGAREG_DWGCTL+MGAREG_MGA_EXEC, cmd );
}
}
/* Force reset of DWGCTL */
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG( MGAREG_DMAPAD, 0);
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
PRIMOUTREG( MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL] );
-
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG( MGAREG_DWGSYNC, dev_priv->last_sync_tag);
- PRIMADVANCE(dev_priv);
+ PRIMADVANCE( dev_priv );
}
static void mga_dma_dispatch_swap( drm_device_t *dev )
@@ -632,23 +757,22 @@ static void mga_dma_dispatch_swap( drm_device_t *dev )
int i;
int primary_needed;
PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
primary_needed = nbox * 5;
primary_needed += 60;
PRIM_OVERFLOW(dev, dev_priv, primary_needed);
PRIMGETPTR( dev_priv );
- dev_priv->last_sync_tag = mga_create_sync_tag(dev);
+ PRIMOUTREG( MGAREG_DSTORG, dev_priv->frontOffset );
+ PRIMOUTREG( MGAREG_MACCESS, dev_priv->mAccess );
+ PRIMOUTREG( MGAREG_SRCORG, dev_priv->backOffset );
+ PRIMOUTREG( MGAREG_AR5, dev_priv->stride/2 );
- PRIMOUTREG(MGAREG_DSTORG, dev_priv->frontOffset);
- PRIMOUTREG(MGAREG_MACCESS, dev_priv->mAccess);
- PRIMOUTREG(MGAREG_SRCORG, dev_priv->backOffset);
- PRIMOUTREG(MGAREG_AR5, dev_priv->stride/2);
-
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DWGCTL, MGA_COPY_CMD);
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_DWGCTL, MGA_COPY_CMD );
for (i = 0 ; i < nbox; i++) {
unsigned int h = pbox[i].y2 - pbox[i].y1;
@@ -658,23 +782,19 @@ static void mga_dma_dispatch_swap( drm_device_t *dev )
pbox[i].x1, pbox[i].y1,
pbox[i].x2, pbox[i].y2);
- PRIMOUTREG(MGAREG_AR0, start + pbox[i].x2 - 1);
- PRIMOUTREG(MGAREG_AR3, start + pbox[i].x1);
- PRIMOUTREG(MGAREG_FXBNDRY, pbox[i].x1|((pbox[i].x2 - 1)<<16));
- PRIMOUTREG(MGAREG_YDSTLEN+MGAREG_MGA_EXEC, (pbox[i].y1<<16)|h);
+ PRIMOUTREG( MGAREG_AR0, start + pbox[i].x2 - 1 );
+ PRIMOUTREG( MGAREG_AR3, start + pbox[i].x1 );
+ PRIMOUTREG( MGAREG_FXBNDRY, pbox[i].x1|((pbox[i].x2 - 1)<<16) );
+ PRIMOUTREG( MGAREG_YDSTLEN+MGAREG_MGA_EXEC, (pbox[i].y1<<16)|h );
}
/* Force reset of DWGCTL */
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG( MGAREG_DMAPAD, 0);
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_DMAPAD, 0 );
+ PRIMOUTREG( MGAREG_SRCORG, 0 );
PRIMOUTREG( MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL] );
- PRIMOUTREG( MGAREG_SRCORG, 0);
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG( MGAREG_DWGSYNC, dev_priv->last_sync_tag);
- PRIMADVANCE(dev_priv);
+ PRIMADVANCE( dev_priv );
}
int mga_clear_bufs(struct inode *inode, struct file *filp,
@@ -684,12 +804,11 @@ int mga_clear_bufs(struct inode *inode, struct file *filp,
drm_device_t *dev = priv->dev;
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- __volatile__ unsigned int *status =
- (__volatile__ unsigned int *)dev_priv->status_page;
drm_mga_clear_t clear;
copy_from_user_ret(&clear, (drm_mga_clear_t *)arg, sizeof(clear),
-EFAULT);
+ DRM_DEBUG("%s\n", __FUNCTION__);
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("mga_clear_bufs called without lock held\n");
@@ -706,8 +825,8 @@ int mga_clear_bufs(struct inode *inode, struct file *filp,
clear.clear_color,
clear.clear_depth );
PRIMUPDATE(dev_priv);
+ mga_flush_write_combine();
mga_dma_schedule(dev, 1);
- sarea_priv->last_dispatch = status[1];
return 0;
}
@@ -718,8 +837,7 @@ int mga_swap_bufs(struct inode *inode, struct file *filp,
drm_device_t *dev = priv->dev;
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- __volatile__ unsigned int *status =
- (__volatile__ unsigned int *)dev_priv->status_page;
+ DRM_DEBUG("%s\n", __FUNCTION__);
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("mga_swap_bufs called without lock held\n");
@@ -734,10 +852,9 @@ int mga_swap_bufs(struct inode *inode, struct file *filp,
dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CTX;
mga_dma_dispatch_swap( dev );
PRIMUPDATE(dev_priv);
- set_bit(0, &dev_priv->current_prim->swap_pending);
- dev_priv->current_prim->swap_pending = 1;
+ set_bit(MGA_BUF_SWAP_PENDING, &dev_priv->current_prim->buffer_status);
+ mga_flush_write_combine();
mga_dma_schedule(dev, 1);
- sarea_priv->last_dispatch = status[1];
return 0;
}
@@ -749,12 +866,11 @@ int mga_iload(struct inode *inode, struct file *filp,
drm_device_dma_t *dma = dev->dma;
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- __volatile__ unsigned int *status =
- (__volatile__ unsigned int *)dev_priv->status_page;
drm_buf_t *buf;
drm_mga_buf_priv_t *buf_priv;
drm_mga_iload_t iload;
unsigned long bus_address;
+ DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("Starting Iload\n");
copy_from_user_ret(&iload, (drm_mga_iload_t *)arg, sizeof(iload),
@@ -783,11 +899,11 @@ int mga_iload(struct inode *inode, struct file *filp,
mga_dma_dispatch_tex_blit(dev, bus_address, iload.length,
iload.destOrg);
- buf_priv->my_freelist->age = dev_priv->last_sync_tag;
+ AGEBUF(dev_priv, buf_priv);
buf_priv->discard = 1;
mga_freelist_put(dev, buf);
+ mga_flush_write_combine();
mga_dma_schedule(dev, 1);
- sarea_priv->last_dispatch = status[1];
return 0;
}
@@ -797,13 +913,11 @@ int mga_vertex(struct inode *inode, struct file *filp,
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- __volatile__ unsigned int *status =
- (__volatile__ unsigned int *)dev_priv->status_page;
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf;
drm_mga_buf_priv_t *buf_priv;
drm_mga_vertex_t vertex;
+ DRM_DEBUG("%s\n", __FUNCTION__);
copy_from_user_ret(&vertex, (drm_mga_vertex_t *)arg, sizeof(vertex),
-EFAULT);
@@ -822,25 +936,75 @@ int mga_vertex(struct inode *inode, struct file *filp,
buf_priv->discard = vertex.discard;
if (!mgaVerifyState(dev_priv)) {
- if (vertex.discard) {
- buf_priv->my_freelist->age = dev_priv->last_sync_tag;
- mga_freelist_put(dev, buf);
+ if (vertex.discard) {
+ if(buf_priv->dispatched == 1) AGEBUF(dev_priv, buf_priv);
+ buf_priv->dispatched = 0;
+ mga_freelist_put(dev, buf);
}
+ DRM_DEBUG("bad state\n");
return -EINVAL;
}
mga_dma_dispatch_vertex(dev, buf);
PRIMUPDATE(dev_priv);
+ mga_flush_write_combine();
mga_dma_schedule(dev, 1);
- sarea_priv->last_dispatch = status[1];
return 0;
}
+
+int mga_indices(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_t *buf;
+ drm_mga_buf_priv_t *buf_priv;
+ drm_mga_indices_t indices;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ copy_from_user_ret(&indices, (drm_mga_indices_t *)arg, sizeof(indices),
+ -EFAULT);
+
+ if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("mga_indices called without lock held\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("mga_indices\n");
+
+ buf = dma->buflist[ indices.idx ];
+ buf_priv = buf->dev_private;
+
+ buf_priv->discard = indices.discard;
+
+ if (!mgaVerifyState(dev_priv)) {
+ if (indices.discard) {
+ if(buf_priv->dispatched == 1) AGEBUF(dev_priv, buf_priv);
+ buf_priv->dispatched = 0;
+ mga_freelist_put(dev, buf);
+ }
+ return -EINVAL;
+ }
+
+ mga_dma_dispatch_indices(dev, buf, indices.start, indices.end);
+
+ PRIMUPDATE(dev_priv);
+ mga_flush_write_combine();
+ mga_dma_schedule(dev, 1);
+ return 0;
+}
+
+
+
static int mga_dma_get_buffers(drm_device_t *dev, drm_dma_t *d)
{
int i;
drm_buf_t *buf;
+ DRM_DEBUG("%s\n", __FUNCTION__);
for (i = d->granted_count; i < d->request_count; i++) {
buf = mga_freelist_get(dev);
@@ -865,12 +1029,9 @@ int mga_dma(struct inode *inode, struct file *filp, unsigned int cmd,
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
- drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- __volatile__ unsigned int *status =
- (__volatile__ unsigned int *)dev_priv->status_page;
int retcode = 0;
drm_dma_t d;
+ DRM_DEBUG("%s\n", __FUNCTION__);
copy_from_user_ret(&d, (drm_dma_t *)arg, sizeof(d), -EFAULT);
DRM_DEBUG("%d %d: %d send, %d req\n",
@@ -906,6 +1067,5 @@ int mga_dma(struct inode *inode, struct file *filp, unsigned int cmd,
DRM_DEBUG("%d returning, granted = %d\n",
current->pid, d.granted_count);
copy_to_user_ret((drm_dma_t *)arg, &d, sizeof(d), -EFAULT);
- sarea_priv->last_dispatch = status[1];
return retcode;
}
diff --git a/linux/proc.c b/linux/proc.c
index bb8e18e0..ba6dee00 100644
--- a/linux/proc.c
+++ b/linux/proc.c
@@ -400,6 +400,7 @@ static int _drm_vma_info(char *buf, char **start, off_t offset, int len,
pgprot & _PAGE_GLOBAL ? 'g' : 'l' );
#endif
DRM_PROC_PRINT("\n");
+#if 0
for (i = vma->vm_start; i < vma->vm_end; i += PAGE_SIZE) {
pgd = pgd_offset(vma->vm_mm, i);
pmd = pmd_offset(pgd, i);
@@ -420,6 +421,7 @@ static int _drm_vma_info(char *buf, char **start, off_t offset, int len,
DRM_PROC_PRINT(" 0x%08lx\n", i);
}
}
+#endif
}
return len;
diff --git a/linux/vm.c b/linux/vm.c
index 534723a5..0b2b00e0 100644
--- a/linux/vm.c
+++ b/linux/vm.c
@@ -247,6 +247,18 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
/* Check for valid size. */
if (map->size != vma->vm_end - vma->vm_start) return -EINVAL;
+ if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
+ vma->vm_flags &= VM_MAYWRITE;
+#if defined(__i386__)
+ pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
+#else
+ /* Ye gads this is ugly. With more thought
+ we could move this up higher and use
+ `protection_map' instead. */
+ vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
+ __pte(pgprot_val(vma->vm_page_prot)))));
+#endif
+ }
switch (map->type) {
case _DRM_FRAME_BUFFER:
@@ -266,6 +278,10 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
+ DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
+ " offset = 0x%lx\n",
+ map->type,
+ vma->vm_start, vma->vm_end, VM_OFFSET(vma));
vma->vm_ops = &drm_vm_ops;
break;
case _DRM_SHM:
@@ -278,19 +294,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
return -EINVAL; /* This should never happen. */
}
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
- if (map->flags & _DRM_READ_ONLY) {
-#if defined(__i386__)
- pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
-#else
- /* Ye gads this is ugly. With more thought
- we could move this up higher and use
- `protection_map' instead. */
- vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
- __pte(pgprot_val(vma->vm_page_prot)))));
-#endif
- }
-
#if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
/* In Linux 2.2.3 and above, this is
handled in do_mmap() in mm/mmap.c. */
diff --git a/shared-core/drm.h b/shared-core/drm.h
index 15491aee..c8c5581d 100644
--- a/shared-core/drm.h
+++ b/shared-core/drm.h
@@ -72,9 +72,10 @@ typedef struct drm_clip_rect {
unsigned short y2;
} drm_clip_rect_t;
-/* Seperate include files for the i810/mga specific structures */
+/* Seperate include files for the i810/mga/r128 specific structures */
#include "mga_drm.h"
#include "i810_drm.h"
+#include "r128_drm.h"
typedef struct drm_version {
int version_major; /* Major version */
@@ -297,7 +298,7 @@ typedef struct drm_agp_info {
#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t)
#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t)
-#define DRM_IOCTL_GET_MAGIC DRM_IOW( 0x02, drm_auth_t)
+#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t)
#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t)
@@ -328,11 +329,11 @@ typedef struct drm_agp_info {
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
-#define DRM_IOCTL_AGP_ENABLE DRM_IOR( 0x32, drm_agp_mode_t)
-#define DRM_IOCTL_AGP_INFO DRM_IOW( 0x33, drm_agp_info_t)
+#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t)
+#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t)
#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t)
#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t)
-#define DRM_IOCTL_AGP_BIND DRM_IOWR(0x36, drm_agp_binding_t)
+#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t)
#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t)
/* Mga specific ioctls */
@@ -342,12 +343,23 @@ typedef struct drm_agp_info {
#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x43, drm_mga_iload_t)
#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x44, drm_mga_vertex_t)
#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x45, drm_lock_t )
+#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t)
/* I810 specific ioctls */
#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t)
#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t)
-#define DRM_IOCTL_I810_DMA DRM_IOW( 0x42, drm_i810_general_t)
+#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t)
#define DRM_IOCTL_I810_FLUSH DRM_IO ( 0x43)
#define DRM_IOCTL_I810_GETAGE DRM_IO ( 0x44)
+#define DRM_IOCTL_I810_GETBUF DRM_IOW( 0x45, drm_i810_dma_t)
+#define DRM_IOCTL_I810_SWAP DRM_IO ( 0x46)
+
+/* Rage 128 specific ioctls */
+#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
+#define DRM_IOCTL_R128_RESET DRM_IO( 0x41)
+#define DRM_IOCTL_R128_FLUSH DRM_IO( 0x42)
+#define DRM_IOCTL_R128_CCEIDL DRM_IO( 0x43)
+#define DRM_IOCTL_R128_PACKET DRM_IOW( 0x44, drm_r128_packet_t)
+#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x45, drm_r128_vertex_t)
#endif
diff --git a/shared/drm.h b/shared/drm.h
index 15491aee..c8c5581d 100644
--- a/shared/drm.h
+++ b/shared/drm.h
@@ -72,9 +72,10 @@ typedef struct drm_clip_rect {
unsigned short y2;
} drm_clip_rect_t;
-/* Seperate include files for the i810/mga specific structures */
+/* Seperate include files for the i810/mga/r128 specific structures */
#include "mga_drm.h"
#include "i810_drm.h"
+#include "r128_drm.h"
typedef struct drm_version {
int version_major; /* Major version */
@@ -297,7 +298,7 @@ typedef struct drm_agp_info {
#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t)
#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t)
-#define DRM_IOCTL_GET_MAGIC DRM_IOW( 0x02, drm_auth_t)
+#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t)
#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t)
@@ -328,11 +329,11 @@ typedef struct drm_agp_info {
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
-#define DRM_IOCTL_AGP_ENABLE DRM_IOR( 0x32, drm_agp_mode_t)
-#define DRM_IOCTL_AGP_INFO DRM_IOW( 0x33, drm_agp_info_t)
+#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t)
+#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t)
#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t)
#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t)
-#define DRM_IOCTL_AGP_BIND DRM_IOWR(0x36, drm_agp_binding_t)
+#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t)
#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t)
/* Mga specific ioctls */
@@ -342,12 +343,23 @@ typedef struct drm_agp_info {
#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x43, drm_mga_iload_t)
#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x44, drm_mga_vertex_t)
#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x45, drm_lock_t )
+#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t)
/* I810 specific ioctls */
#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t)
#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t)
-#define DRM_IOCTL_I810_DMA DRM_IOW( 0x42, drm_i810_general_t)
+#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t)
#define DRM_IOCTL_I810_FLUSH DRM_IO ( 0x43)
#define DRM_IOCTL_I810_GETAGE DRM_IO ( 0x44)
+#define DRM_IOCTL_I810_GETBUF DRM_IOW( 0x45, drm_i810_dma_t)
+#define DRM_IOCTL_I810_SWAP DRM_IO ( 0x46)
+
+/* Rage 128 specific ioctls */
+#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
+#define DRM_IOCTL_R128_RESET DRM_IO( 0x41)
+#define DRM_IOCTL_R128_FLUSH DRM_IO( 0x42)
+#define DRM_IOCTL_R128_CCEIDL DRM_IO( 0x43)
+#define DRM_IOCTL_R128_PACKET DRM_IOW( 0x44, drm_r128_packet_t)
+#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x45, drm_r128_vertex_t)
#endif