diff options
author | Alan Hourihane <alanh@fairlite.demon.co.uk> | 2000-07-10 21:32:08 +0000 |
---|---|---|
committer | Alan Hourihane <alanh@fairlite.demon.co.uk> | 2000-07-10 21:32:08 +0000 |
commit | 7021c5bd521258a61204a84906a55497151e8c5a (patch) | |
tree | ec7bc209482c3c3695e595958f7af9a14def0e83 | |
parent | bb6d67806a31e24dc984764cc7ca6a5009f0afa3 (diff) |
Import of XFree86 4.0.1
70 files changed, 14554 insertions, 2972 deletions
diff --git a/bsd-core/drmP.h b/bsd-core/drmP.h new file mode 100644 index 00000000..b62aff08 --- /dev/null +++ b/bsd-core/drmP.h @@ -0,0 +1,723 @@ +/* drmP.h -- Private header for Direct Rendering Manager -*- c -*- + * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com + * Revised: Tue Oct 12 08:51:07 1999 by faith@precisioninsight.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmP.h,v 1.58 1999/08/30 13:05:00 faith Exp $ + * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/bsd/drm/kernel/drmP.h,v 1.1 2000/06/17 00:03:28 martin Exp $ + * + */ + +#ifndef _DRM_P_H_ +#define _DRM_P_H_ + +#ifdef _KERNEL +#include <sys/param.h> +#include <sys/queue.h> +#include <sys/malloc.h> +#include <sys/kernel.h> +#include <sys/module.h> +#include <sys/systm.h> +#include <sys/conf.h> +#include <sys/stat.h> +#include <sys/proc.h> +#include <sys/lock.h> +#include <sys/fcntl.h> +#include <sys/uio.h> +#include <sys/filio.h> +#include <sys/sysctl.h> +#include <sys/select.h> +#include <sys/bus.h> +#if __FreeBSD_version >= 500005 +#include <sys/taskqueue.h> +#endif + +#if __FreeBSD_version >= 500006 +#define DRM_AGP +#endif + +#ifdef DRM_AGP +#include <pci/agpvar.h> +#endif + +#include "drm.h" + +typedef u_int32_t atomic_t; +typedef u_int32_t cycles_t; +typedef u_int32_t spinlock_t; +#define atomic_set(p, v) (*(p) = (v)) +#define atomic_read(p) (*(p)) +#define atomic_inc(p) atomic_add_int(p, 1) +#define atomic_dec(p) atomic_subtract_int(p, 1) +#define atomic_add(n, p) atomic_add_int(p, n) +#define atomic_sub(n, p) atomic_subtract_int(p, n) + +/* Fake this */ +static __inline u_int32_t +test_and_set_bit(int b, volatile u_int32_t *p) +{ + int s = splhigh(); + u_int32_t m = 1<<b; + u_int32_t r = *p & m; + *p |= m; + splx(s); + return r; +} + +static __inline void +clear_bit(int b, volatile u_int32_t *p) +{ + atomic_clear_int(p + (b >> 5), 1 << (b & 0x1f)); +} + +static __inline void +set_bit(int b, volatile u_int32_t *p) +{ + atomic_set_int(p + (b >> 5), 1 << (b & 0x1f)); +} + +static __inline int +test_bit(int b, volatile u_int32_t *p) +{ + return p[b >> 5] & (1 << (b & 0x1f)); +} + +static __inline int +find_first_zero_bit(volatile u_int32_t *p, int max) +{ + int b; + + for (b = 0; b < max; b += 32) { + if (p[b >> 5] != ~0) { + for (;;) { + if ((p[b >> 5] & (1 << (b & 0x1f))) == 0) + return b; + b++; + } + } + } + return max; +} + +#define spldrm() spltty() + +#define memset(p, v, s) bzero(p, s) + +/* + * Fake out the module macros for versions of FreeBSD where they don't + * exist. + */ +#if __FreeBSD_version < 500002 + +#define MODULE_VERSION(a,b) struct __hack +#define MODULE_DEPEND(a,b,c,d,e) struct __hack + +#endif + +#define DRM_DEBUG_CODE 2 /* Include debugging code (if > 1, then + also include looping detection. */ +#define DRM_DMA_HISTOGRAM 1 /* Make histogram of DMA latency. */ + +#define DRM_HASH_SIZE 16 /* Size of key hash table */ +#define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */ +#define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */ +#define DRM_LOOPING_LIMIT 5000000 +#define DRM_BSZ 1024 /* Buffer size for /dev/drm? output */ +#define DRM_TIME_SLICE (hz/20) /* Time slice for GLXContexts */ +#define DRM_LOCK_SLICE 1 /* Time slice for lock, in jiffies */ + +#define DRM_FLAG_DEBUG 0x01 +#define DRM_FLAG_NOCTX 0x02 + +#define DRM_MEM_DMA 0 +#define DRM_MEM_SAREA 1 +#define DRM_MEM_DRIVER 2 +#define DRM_MEM_MAGIC 3 +#define DRM_MEM_IOCTLS 4 +#define DRM_MEM_MAPS 5 +#define DRM_MEM_VMAS 6 +#define DRM_MEM_BUFS 7 +#define DRM_MEM_SEGS 8 +#define DRM_MEM_PAGES 9 +#define DRM_MEM_FILES 10 +#define DRM_MEM_QUEUES 11 +#define DRM_MEM_CMDS 12 +#define DRM_MEM_MAPPINGS 13 +#define DRM_MEM_BUFLISTS 14 +#define DRM_MEM_AGPLISTS 15 +#define DRM_MEM_TOTALAGP 16 +#define DRM_MEM_BOUNDAGP 17 +#define DRM_MEM_CTXBITMAP 18 + +#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) + + /* Backward compatibility section */ +#ifndef _PAGE_PWT + /* The name of _PAGE_WT was changed to + _PAGE_PWT in Linux 2.2.6 */ +#define _PAGE_PWT _PAGE_WT +#endif + +#define __drm_dummy_lock(lock) (*(__volatile__ unsigned int *)lock) +#define _DRM_CAS(lock,old,new,__ret) \ + do { \ + int __dummy; /* Can't mark eax as clobbered */ \ + __asm__ __volatile__( \ + "lock ; cmpxchg %4,%1\n\t" \ + "setnz %0" \ + : "=d" (__ret), \ + "=m" (__drm_dummy_lock(lock)), \ + "=a" (__dummy) \ + : "2" (old), \ + "r" (new)); \ + } while (0) + + + + /* Macros to make printk easier */ +#define DRM_ERROR(fmt, arg...) \ + printf("error: " "[" DRM_NAME ":" __FUNCTION__ "] *ERROR* " fmt , ##arg) +#define DRM_MEM_ERROR(area, fmt, arg...) \ + printf("error: " "[" DRM_NAME ":" __FUNCTION__ ":%s] *ERROR* " fmt , \ + drm_mem_stats[area].name , ##arg) +#define DRM_INFO(fmt, arg...) printf("info: " "[" DRM_NAME "] " fmt , ##arg) + +#if DRM_DEBUG_CODE +#define DRM_DEBUG(fmt, arg...) \ + do { \ + if (drm_flags&DRM_FLAG_DEBUG) \ + printf("[" DRM_NAME ":" __FUNCTION__ "] " fmt , \ + ##arg); \ + } while (0) +#else +#define DRM_DEBUG(fmt, arg...) do { } while (0) +#endif + +#define DRM_PROC_LIMIT (PAGE_SIZE-80) + +#define DRM_SYSCTL_PRINT(fmt, arg...) \ + snprintf(buf, sizeof(buf), fmt, ##arg); \ + error = SYSCTL_OUT(req, buf, strlen(buf)); \ + if (error) return error; + +#define DRM_SYSCTL_PRINT_RET(ret, fmt, arg...) \ + snprintf(buf, sizeof(buf), fmt, ##arg); \ + error = SYSCTL_OUT(req, buf, strlen(buf)); \ + if (error) { ret; return error; } + + /* Internal types and structures */ +#define DRM_ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) +#define DRM_MIN(a,b) ((a)<(b)?(a):(b)) +#define DRM_MAX(a,b) ((a)>(b)?(a):(b)) + +#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1)) +#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x)) +#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist) + +typedef struct drm_ioctl_desc { + d_ioctl_t *func; + int auth_needed; + int root_only; +} drm_ioctl_desc_t; + +typedef struct drm_devstate { + pid_t owner; /* X server pid holding x_lock */ + +} drm_devstate_t; + +typedef struct drm_magic_entry { + drm_magic_t magic; + struct drm_file *priv; + struct drm_magic_entry *next; +} drm_magic_entry_t; + +typedef struct drm_magic_head { + struct drm_magic_entry *head; + struct drm_magic_entry *tail; +} drm_magic_head_t; + +typedef struct drm_vma_entry { + struct vm_area_struct *vma; + struct drm_vma_entry *next; + pid_t pid; +} drm_vma_entry_t; + +typedef struct drm_buf { + int idx; /* Index into master buflist */ + int total; /* Buffer size */ + int order; /* log-base-2(total) */ + int used; /* Amount of buffer in use (for DMA) */ + unsigned long offset; /* Byte offset (used internally) */ + void *address; /* Address of buffer */ + unsigned long bus_address; /* Bus address of buffer */ + struct drm_buf *next; /* Kernel-only: used for free list */ + __volatile__ int waiting; /* On kernel DMA queue */ + __volatile__ int pending; /* On hardware DMA queue */ + int dma_wait; /* Processes waiting */ + pid_t pid; /* PID of holding process */ + int context; /* Kernel queue for this buffer */ + int while_locked;/* Dispatch this buffer while locked */ + enum { + DRM_LIST_NONE = 0, + DRM_LIST_FREE = 1, + DRM_LIST_WAIT = 2, + DRM_LIST_PEND = 3, + DRM_LIST_PRIO = 4, + DRM_LIST_RECLAIM = 5 + } list; /* Which list we're on */ + + void *dev_private; + int dev_priv_size; + +#if DRM_DMA_HISTOGRAM + struct timespec time_queued; /* Queued to kernel DMA queue */ + struct timespec time_dispatched; /* Dispatched to hardware */ + struct timespec time_completed; /* Completed by hardware */ + struct timespec time_freed; /* Back on freelist */ +#endif +} drm_buf_t; + +#if DRM_DMA_HISTOGRAM +#define DRM_DMA_HISTOGRAM_SLOTS 9 +#define DRM_DMA_HISTOGRAM_INITIAL 10 +#define DRM_DMA_HISTOGRAM_NEXT(current) ((current)*10) +typedef struct drm_histogram { + atomic_t total; + + atomic_t queued_to_dispatched[DRM_DMA_HISTOGRAM_SLOTS]; + atomic_t dispatched_to_completed[DRM_DMA_HISTOGRAM_SLOTS]; + atomic_t completed_to_freed[DRM_DMA_HISTOGRAM_SLOTS]; + + atomic_t queued_to_completed[DRM_DMA_HISTOGRAM_SLOTS]; + atomic_t queued_to_freed[DRM_DMA_HISTOGRAM_SLOTS]; + + atomic_t dma[DRM_DMA_HISTOGRAM_SLOTS]; + atomic_t schedule[DRM_DMA_HISTOGRAM_SLOTS]; + atomic_t ctx[DRM_DMA_HISTOGRAM_SLOTS]; + atomic_t lacq[DRM_DMA_HISTOGRAM_SLOTS]; + atomic_t lhld[DRM_DMA_HISTOGRAM_SLOTS]; +} drm_histogram_t; +#endif + + /* bufs is one longer than it has to be */ +typedef struct drm_waitlist { + int count; /* Number of possible buffers */ + drm_buf_t **bufs; /* List of pointers to buffers */ + drm_buf_t **rp; /* Read pointer */ + drm_buf_t **wp; /* Write pointer */ + drm_buf_t **end; /* End pointer */ + spinlock_t read_lock; + spinlock_t write_lock; +} drm_waitlist_t; + +typedef struct drm_freelist { + int initialized; /* Freelist in use */ + atomic_t count; /* Number of free buffers */ + drm_buf_t *next; /* End pointer */ + + int waiting; /* Processes waiting on free bufs */ + int low_mark; /* Low water mark */ + int high_mark; /* High water mark */ + atomic_t wfh; /* If waiting for high mark */ +} drm_freelist_t; + +typedef struct drm_buf_entry { + int buf_size; + int buf_count; + drm_buf_t *buflist; + int seg_count; + int page_order; + unsigned long *seglist; + + drm_freelist_t freelist; +} drm_buf_entry_t; + +typedef struct drm_hw_lock { + __volatile__ unsigned int lock; + char padding[60]; /* Pad to cache line */ +} drm_hw_lock_t; + +typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t; +typedef struct drm_file { + TAILQ_ENTRY(drm_file) link; + int authenticated; + int minor; + pid_t pid; + uid_t uid; + int refs; + drm_magic_t magic; + unsigned long ioctl_count; + struct drm_device *devXX; +} drm_file_t; + + +typedef struct drm_queue { + atomic_t use_count; /* Outstanding uses (+1) */ + atomic_t finalization; /* Finalization in progress */ + atomic_t block_count; /* Count of processes waiting */ + atomic_t block_read; /* Queue blocked for reads */ + int read_queue; /* Processes waiting on block_read */ + atomic_t block_write; /* Queue blocked for writes */ + int write_queue; /* Processes waiting on block_write */ + atomic_t total_queued; /* Total queued statistic */ + atomic_t total_flushed;/* Total flushes statistic */ + atomic_t total_locks; /* Total locks statistics */ + drm_ctx_flags_t flags; /* Context preserving and 2D-only */ + drm_waitlist_t waitlist; /* Pending buffers */ + int flush_queue; /* Processes waiting until flush */ +} drm_queue_t; + +typedef struct drm_lock_data { + drm_hw_lock_t *hw_lock; /* Hardware lock */ + pid_t pid; /* PID of lock holder (0=kernel) */ + int lock_queue; /* Queue of blocked processes */ + unsigned long lock_time; /* Time of last lock in jiffies */ +} drm_lock_data_t; + +typedef struct drm_device_dma { + /* Performance Counters */ + atomic_t total_prio; /* Total DRM_DMA_PRIORITY */ + atomic_t total_bytes; /* Total bytes DMA'd */ + atomic_t total_dmas; /* Total DMA buffers dispatched */ + + atomic_t total_missed_dma; /* Missed drm_do_dma */ + atomic_t total_missed_lock; /* Missed lock in drm_do_dma */ + atomic_t total_missed_free; /* Missed drm_free_this_buffer */ + atomic_t total_missed_sched;/* Missed drm_dma_schedule */ + + atomic_t total_tried; /* Tried next_buffer */ + atomic_t total_hit; /* Sent next_buffer */ + atomic_t total_lost; /* Lost interrupt */ + + drm_buf_entry_t bufs[DRM_MAX_ORDER+1]; + int buf_count; + drm_buf_t **buflist; /* Vector of pointers info bufs */ + int seg_count; + int page_count; + vm_offset_t *pagelist; + unsigned long byte_count; + enum { + _DRM_DMA_USE_AGP = 0x01 + } flags; + + /* DMA support */ + drm_buf_t *this_buffer; /* Buffer being sent */ + drm_buf_t *next_buffer; /* Selected buffer to send */ + drm_queue_t *next_queue; /* Queue from which buffer selected*/ + int waiting; /* Processes waiting on free bufs */ +} drm_device_dma_t; + +#ifdef DRM_AGP + +typedef struct drm_agp_mem { + void *handle; + unsigned long bound; /* address */ + int pages; + struct drm_agp_mem *prev; + struct drm_agp_mem *next; +} drm_agp_mem_t; + +typedef struct drm_agp_head { + device_t agpdev; + struct agp_info info; + const char *chipset; + drm_agp_mem_t *memory; + unsigned long mode; + int enabled; + int acquired; + unsigned long base; + int agp_mtrr; +} drm_agp_head_t; + +#endif + +typedef struct drm_device { + const char *name; /* Simple driver name */ + char *unique; /* Unique identifier: e.g., busid */ + int unique_len; /* Length of unique field */ + device_t device; /* Device instance from newbus */ + dev_t devnode; /* Device number for mknod */ + char *devname; /* For /proc/interrupts */ + + int blocked; /* Blocked due to VC switch? */ + int flags; /* Flags to open(2) */ + int writable; /* Opened with FWRITE */ + struct proc_dir_entry *root; /* Root for this device's entries */ + + /* Locks */ + struct simplelock count_lock; /* For inuse, open_count, buf_use */ + struct lock dev_lock; /* For others */ + + /* Usage Counters */ + int open_count; /* Outstanding files open */ + atomic_t ioctl_count; /* Outstanding IOCTLs pending */ + atomic_t vma_count; /* Outstanding vma areas open */ + int buf_use; /* Buffers in use -- cannot alloc */ + atomic_t buf_alloc; /* Buffer allocation in progress */ + + /* Performance Counters */ + atomic_t total_open; + atomic_t total_close; + atomic_t total_ioctl; + atomic_t total_irq; /* Total interruptions */ + atomic_t total_ctx; /* Total context switches */ + + atomic_t total_locks; + atomic_t total_unlocks; + atomic_t total_contends; + atomic_t total_sleeps; + + /* Authentication */ + drm_file_list_t files; + drm_magic_head_t magiclist[DRM_HASH_SIZE]; + + /* Memory management */ + drm_map_t **maplist; /* Vector of pointers to regions */ + int map_count; /* Number of mappable regions */ + + drm_vma_entry_t *vmalist; /* List of vmas (for debugging) */ + drm_lock_data_t lock; /* Information on hardware lock */ + + /* DMA queues (contexts) */ + int queue_count; /* Number of active DMA queues */ + int queue_reserved; /* Number of reserved DMA queues */ + int queue_slots; /* Actual length of queuelist */ + drm_queue_t **queuelist; /* Vector of pointers to DMA queues */ + drm_device_dma_t *dma; /* Optional pointer for DMA support */ + + /* Context support */ + struct resource *irq; /* Interrupt used by board */ + void *irqh; /* Handle from bus_setup_intr */ + __volatile__ int context_flag; /* Context swapping flag */ + __volatile__ int interrupt_flag;/* Interruption handler flag */ + __volatile__ int dma_flag; /* DMA dispatch flag */ + struct callout timer; /* Timer for delaying ctx switch */ + int context_wait; /* Processes waiting on ctx switch */ + int last_checked; /* Last context checked for DMA */ + int last_context; /* Last current context */ + int last_switch; /* Time at last context switch */ +#if __FreeBSD_version >= 500005 + struct task task; +#endif + struct timespec ctx_start; + struct timespec lck_start; +#if DRM_DMA_HISTOGRAM + drm_histogram_t histo; +#endif + + /* Callback to X server for context switch + and for heavy-handed reset. */ + char buf[DRM_BSZ]; /* Output buffer */ + char *buf_rp; /* Read pointer */ + char *buf_wp; /* Write pointer */ + char *buf_end; /* End pointer */ + struct sigio *buf_sigio; /* Processes waiting for SIGIO */ + struct selinfo buf_sel; /* Workspace for select/poll */ + int buf_readers; /* Processes waiting to read */ + int buf_writers; /* Processes waiting to ctx switch */ + int buf_selecting; /* True if poll sleeper */ + + /* Sysctl support */ + struct drm_sysctl_info *sysctl; + +#ifdef DRM_AGP + drm_agp_head_t *agp; +#endif + u_int32_t *ctx_bitmap; + void *dev_private; +} drm_device_t; + + + /* Internal function definitions */ + + /* Misc. support (init.c) */ +extern int drm_flags; +extern void drm_parse_options(char *s); + + + /* Device support (fops.c) */ +extern drm_file_t *drm_find_file_by_proc(drm_device_t *dev, struct proc *p); +extern int drm_open_helper(dev_t kdev, int flags, int fmt, struct proc *p, + drm_device_t *dev); +extern d_close_t drm_close; +extern d_read_t drm_read; +extern d_write_t drm_write; +extern d_poll_t drm_poll; +extern int drm_fsetown(dev_t kdev, u_long cmd, caddr_t data, + int flags, struct proc *p); +extern int drm_fgetown(dev_t kdev, u_long cmd, caddr_t data, + int flags, struct proc *p); +extern int drm_write_string(drm_device_t *dev, const char *s); + +#if 0 + /* Mapping support (vm.c) */ +extern unsigned long drm_vm_nopage(struct vm_area_struct *vma, + unsigned long address, + int write_access); +extern unsigned long drm_vm_shm_nopage(struct vm_area_struct *vma, + unsigned long address, + int write_access); +extern unsigned long drm_vm_dma_nopage(struct vm_area_struct *vma, + unsigned long address, + int write_access); +extern void drm_vm_open(struct vm_area_struct *vma); +extern void drm_vm_close(struct vm_area_struct *vma); +extern int drm_mmap_dma(struct file *filp, + struct vm_area_struct *vma); +#endif +extern d_mmap_t drm_mmap; + + /* Proc support (proc.c) */ +extern int drm_sysctl_init(drm_device_t *dev); +extern int drm_sysctl_cleanup(drm_device_t *dev); + + /* Memory management support (memory.c) */ +extern void drm_mem_init(void); +extern int drm_mem_info SYSCTL_HANDLER_ARGS; +extern void *drm_alloc(size_t size, int area); +extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, + int area); +extern char *drm_strdup(const char *s, int area); +extern void drm_strfree(char *s, int area); +extern void drm_free(void *pt, size_t size, int area); +extern unsigned long drm_alloc_pages(int order, int area); +extern void drm_free_pages(unsigned long address, int order, + int area); +extern void *drm_ioremap(unsigned long offset, unsigned long size); +extern void drm_ioremapfree(void *pt, unsigned long size); + +#ifdef DRM_AGP +extern void *drm_alloc_agp(int pages, u_int32_t type); +extern int drm_free_agp(void *handle, int pages); +extern int drm_bind_agp(void *handle, unsigned int start); +extern int drm_unbind_agp(void *handle); +#endif + + /* Buffer management support (bufs.c) */ +extern int drm_order(unsigned long size); +extern d_ioctl_t drm_addmap; +extern d_ioctl_t drm_addbufs; +extern d_ioctl_t drm_infobufs; +extern d_ioctl_t drm_markbufs; +extern d_ioctl_t drm_freebufs; +extern d_ioctl_t drm_mapbufs; + + + /* Buffer list management support (lists.c) */ +extern int drm_waitlist_create(drm_waitlist_t *bl, int count); +extern int drm_waitlist_destroy(drm_waitlist_t *bl); +extern int drm_waitlist_put(drm_waitlist_t *bl, drm_buf_t *buf); +extern drm_buf_t *drm_waitlist_get(drm_waitlist_t *bl); + +extern int drm_freelist_create(drm_freelist_t *bl, int count); +extern int drm_freelist_destroy(drm_freelist_t *bl); +extern int drm_freelist_put(drm_device_t *dev, drm_freelist_t *bl, + drm_buf_t *buf); +extern drm_buf_t *drm_freelist_get(drm_freelist_t *bl, int block); + + /* DMA support (gen_dma.c) */ +extern void drm_dma_setup(drm_device_t *dev); +extern void drm_dma_takedown(drm_device_t *dev); +extern void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf); +extern void drm_reclaim_buffers(drm_device_t *dev, pid_t pid); +extern int drm_context_switch(drm_device_t *dev, int old, int new); +extern int drm_context_switch_complete(drm_device_t *dev, int new); +extern void drm_wakeup(drm_device_t *dev, drm_buf_t *buf); +extern void drm_clear_next_buffer(drm_device_t *dev); +extern int drm_select_queue(drm_device_t *dev, + void (*wrapper)(void *)); +extern int drm_dma_enqueue(drm_device_t *dev, drm_dma_t *dma); +extern int drm_dma_get_buffers(drm_device_t *dev, drm_dma_t *dma); +#if DRM_DMA_HISTOGRAM +extern int drm_histogram_slot(struct timespec *ts); +extern void drm_histogram_compute(drm_device_t *dev, drm_buf_t *buf); +#endif + + + /* Misc. IOCTL support (ioctl.c) */ +extern d_ioctl_t drm_irq_busid; +extern d_ioctl_t drm_getunique; +extern d_ioctl_t drm_setunique; + + + /* Context IOCTL support (context.c) */ +extern d_ioctl_t drm_resctx; +extern d_ioctl_t drm_addctx; +extern d_ioctl_t drm_modctx; +extern d_ioctl_t drm_getctx; +extern d_ioctl_t drm_switchctx; +extern d_ioctl_t drm_newctx; +extern d_ioctl_t drm_rmctx; + + + /* Drawable IOCTL support (drawable.c) */ +extern d_ioctl_t drm_adddraw; +extern d_ioctl_t drm_rmdraw; + + + /* Authentication IOCTL support (auth.c) */ +extern int drm_add_magic(drm_device_t *dev, drm_file_t *priv, + drm_magic_t magic); +extern int drm_remove_magic(drm_device_t *dev, drm_magic_t magic); +extern d_ioctl_t drm_getmagic; +extern d_ioctl_t drm_authmagic; + + + /* Locking IOCTL support (lock.c) */ +extern d_ioctl_t drm_block; +extern d_ioctl_t drm_unblock; +extern int drm_lock_take(__volatile__ unsigned int *lock, + unsigned int context); +extern int drm_lock_transfer(drm_device_t *dev, + __volatile__ unsigned int *lock, + unsigned int context); +extern int drm_lock_free(drm_device_t *dev, + __volatile__ unsigned int *lock, + unsigned int context); +extern d_ioctl_t drm_finish; +extern int drm_flush_unblock(drm_device_t *dev, int context, + drm_lock_flags_t flags); +extern int drm_flush_block_and_flush(drm_device_t *dev, int context, + drm_lock_flags_t flags); + + /* Context Bitmap support (ctxbitmap.c) */ +extern int drm_ctxbitmap_init(drm_device_t *dev); +extern void drm_ctxbitmap_cleanup(drm_device_t *dev); +extern int drm_ctxbitmap_next(drm_device_t *dev); +extern void drm_ctxbitmap_free(drm_device_t *dev, int ctx_handle); + +#ifdef DRM_AGP + /* AGP/GART support (agpsupport.c) */ +extern drm_agp_head_t *drm_agp_init(void); +extern d_ioctl_t drm_agp_acquire; +extern d_ioctl_t drm_agp_release; +extern d_ioctl_t drm_agp_enable; +extern d_ioctl_t drm_agp_info; +extern d_ioctl_t drm_agp_alloc; +extern d_ioctl_t drm_agp_free; +extern d_ioctl_t drm_agp_unbind; +extern d_ioctl_t drm_agp_bind; +#endif +#endif +#endif diff --git a/bsd/Imakefile b/bsd/Imakefile new file mode 100644 index 00000000..7d33c205 --- /dev/null +++ b/bsd/Imakefile @@ -0,0 +1,31 @@ +XCOMM $XFree86: xc/programs/Xserver/hw/xfree86/os-support/bsd/drm/kernel/Imakefile,v 1.1 2000/06/17 00:03:28 martin Exp $ +XCOMM $PI$ + +#include <Server.tmpl> + +LinkSourceFile(xf86drm.c,..) +LinkSourceFile(xf86drmHash.c,..) +LinkSourceFile(xf86drmRandom.c,..) +LinkSourceFile(xf86drmSL.c,..) +LinkSourceFile(xf86drm.h,$(XF86OSSRC)) +LinkSourceFile(xf86_OSproc.h,$(XF86OSSRC)) +LinkSourceFile(sigio.c,$(XF86OSSRC)/shared) + +XCOMM This is a kludge until we determine how best to build the +XCOMM kernel-specific device driver. This allows us to continue +XCOMM to maintain the single Makefile.bsd with kernel-specific +XCOMM support. Later, we can move to a different Imakefile. + +#if BuildXF86DRI && BuildXF86DRM +all:: + $(MAKE) -f Makefile.bsd + +install:: + $(MAKE) -f Makefile.bsd install +#else +all:: + echo 'Use "make -f Makefile.bsd" to manually build drm.o' +#endif + +clean:: + $(MAKE) -f Makefile.bsd clean diff --git a/bsd/drm.h b/bsd/drm.h new file mode 100644 index 00000000..ddad1be7 --- /dev/null +++ b/bsd/drm.h @@ -0,0 +1,362 @@ +/* drm.h -- Header for Direct Rendering Manager -*- c -*- + * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> + * + * Acknowledgements: + * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic cmpxchg. + * + */ + +#ifndef _DRM_H_ +#define _DRM_H_ + +#include <sys/ioccom.h> /* For _IO* macros */ + +#define DRM_DEV_DRM "/dev/drm" +#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP) +#define DRM_DEV_UID 0 +#define DRM_DEV_GID 0 + + +#define DRM_NAME "drm" /* Name in kernel, /dev */ +#define DRM_MIN_ORDER 5 /* At least 2^5 bytes = 32 bytes */ +#define DRM_MAX_ORDER 22 /* Up to 2^22 bytes = 4MB */ +#define DRM_RAM_PERCENT 10 /* How much system ram can we lock? */ + +#define _DRM_LOCK_HELD 0x80000000 /* Hardware lock is held */ +#define _DRM_LOCK_CONT 0x40000000 /* Hardware lock is contended */ +#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD) +#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT) +#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) + +typedef unsigned long drm_handle_t; +typedef unsigned int drm_context_t; +typedef unsigned int drm_drawable_t; +typedef unsigned int drm_magic_t; + +/* Warning: If you change this structure, make sure you change + * XF86DRIClipRectRec in the server as well */ + +typedef struct drm_clip_rect { + unsigned short x1; + unsigned short y1; + unsigned short x2; + unsigned short y2; +} drm_clip_rect_t; + +/* Seperate include files for the i810/mga/r128 specific structures */ +#include "mga_drm.h" +#include "i810_drm.h" +#include "r128_drm.h" + +typedef struct drm_version { + int version_major; /* Major version */ + int version_minor; /* Minor version */ + int version_patchlevel;/* Patch level */ + size_t name_len; /* Length of name buffer */ + char *name; /* Name of driver */ + size_t date_len; /* Length of date buffer */ + char *date; /* User-space buffer to hold date */ + size_t desc_len; /* Length of desc buffer */ + char *desc; /* User-space buffer to hold desc */ +} drm_version_t; + +typedef struct drm_unique { + size_t unique_len; /* Length of unique */ + char *unique; /* Unique name for driver instantiation */ +} drm_unique_t; + +typedef struct drm_list { + int count; /* Length of user-space structures */ + drm_version_t *version; +} drm_list_t; + +typedef struct drm_block { + int unused; +} drm_block_t; + +typedef struct drm_control { + enum { + DRM_ADD_COMMAND, + DRM_RM_COMMAND, + DRM_INST_HANDLER, + DRM_UNINST_HANDLER + } func; + int irq; +} drm_control_t; + +typedef enum drm_map_type { + _DRM_FRAME_BUFFER = 0, /* WC (no caching), no core dump */ + _DRM_REGISTERS = 1, /* no caching, no core dump */ + _DRM_SHM = 2, /* shared, cached */ + _DRM_AGP = 3 /* AGP/GART */ +} drm_map_type_t; + +typedef enum drm_map_flags { + _DRM_RESTRICTED = 0x01, /* Cannot be mapped to user-virtual */ + _DRM_READ_ONLY = 0x02, + _DRM_LOCKED = 0x04, /* shared, cached, locked */ + _DRM_KERNEL = 0x08, /* kernel requires access */ + _DRM_WRITE_COMBINING = 0x10, /* use write-combining if available */ + _DRM_CONTAINS_LOCK = 0x20 /* SHM page that contains lock */ +} drm_map_flags_t; + +typedef struct drm_map { + unsigned long offset; /* Requested physical address (0 for SAREA)*/ + unsigned long size; /* Requested physical size (bytes) */ + drm_map_type_t type; /* Type of memory to map */ + drm_map_flags_t flags; /* Flags */ + void *handle; /* User-space: "Handle" to pass to mmap */ + /* Kernel-space: kernel-virtual address */ + int mtrr; /* MTRR slot used */ + /* Private data */ +} drm_map_t; + +typedef enum drm_lock_flags { + _DRM_LOCK_READY = 0x01, /* Wait until hardware is ready for DMA */ + _DRM_LOCK_QUIESCENT = 0x02, /* Wait until hardware quiescent */ + _DRM_LOCK_FLUSH = 0x04, /* Flush this context's DMA queue first */ + _DRM_LOCK_FLUSH_ALL = 0x08, /* Flush all DMA queues first */ + /* These *HALT* flags aren't supported yet + -- they will be used to support the + full-screen DGA-like mode. */ + _DRM_HALT_ALL_QUEUES = 0x10, /* Halt all current and future queues */ + _DRM_HALT_CUR_QUEUES = 0x20 /* Halt all current queues */ +} drm_lock_flags_t; + +typedef struct drm_lock { + int context; + drm_lock_flags_t flags; +} drm_lock_t; + +typedef enum drm_dma_flags { /* These values *MUST* match xf86drm.h */ + /* Flags for DMA buffer dispatch */ + _DRM_DMA_BLOCK = 0x01, /* Block until buffer dispatched. + Note, the buffer may not yet have + been processed by the hardware -- + getting a hardware lock with the + hardware quiescent will ensure + that the buffer has been + processed. */ + _DRM_DMA_WHILE_LOCKED = 0x02, /* Dispatch while lock held */ + _DRM_DMA_PRIORITY = 0x04, /* High priority dispatch */ + + /* Flags for DMA buffer request */ + _DRM_DMA_WAIT = 0x10, /* Wait for free buffers */ + _DRM_DMA_SMALLER_OK = 0x20, /* Smaller-than-requested buffers ok */ + _DRM_DMA_LARGER_OK = 0x40 /* Larger-than-requested buffers ok */ +} drm_dma_flags_t; + +typedef struct drm_buf_desc { + int count; /* Number of buffers of this size */ + int size; /* Size in bytes */ + int low_mark; /* Low water mark */ + int high_mark; /* High water mark */ + enum { + _DRM_PAGE_ALIGN = 0x01, /* Align on page boundaries for DMA */ + _DRM_AGP_BUFFER = 0x02 /* Buffer is in agp space */ + } flags; + unsigned long agp_start; /* Start address of where the agp buffers + * are in the agp aperture */ +} drm_buf_desc_t; + +typedef struct drm_buf_info { + int count; /* Entries in list */ + drm_buf_desc_t *list; +} drm_buf_info_t; + +typedef struct drm_buf_free { + int count; + int *list; +} drm_buf_free_t; + +typedef struct drm_buf_pub { + int idx; /* Index into master buflist */ + int total; /* Buffer size */ + int used; /* Amount of buffer in use (for DMA) */ + void *address; /* Address of buffer */ +} drm_buf_pub_t; + +typedef struct drm_buf_map { + int count; /* Length of buflist */ + void *virtual; /* Mmaped area in user-virtual */ + drm_buf_pub_t *list; /* Buffer information */ +} drm_buf_map_t; + +typedef struct drm_dma { + /* Indices here refer to the offset into + buflist in drm_buf_get_t. */ + int context; /* Context handle */ + int send_count; /* Number of buffers to send */ + int *send_indices; /* List of handles to buffers */ + int *send_sizes; /* Lengths of data to send */ + drm_dma_flags_t flags; /* Flags */ + int request_count; /* Number of buffers requested */ + int request_size; /* Desired size for buffers */ + int *request_indices; /* Buffer information */ + int *request_sizes; + int granted_count; /* Number of buffers granted */ +} drm_dma_t; + +typedef enum { + _DRM_CONTEXT_PRESERVED = 0x01, + _DRM_CONTEXT_2DONLY = 0x02 +} drm_ctx_flags_t; + +typedef struct drm_ctx { + drm_context_t handle; + drm_ctx_flags_t flags; +} drm_ctx_t; + +typedef struct drm_ctx_res { + int count; + drm_ctx_t *contexts; +} drm_ctx_res_t; + +typedef struct drm_draw { + drm_drawable_t handle; +} drm_draw_t; + +typedef struct drm_auth { + drm_magic_t magic; +} drm_auth_t; + +typedef struct drm_irq_busid { + int irq; + int busnum; + int devnum; + int funcnum; +} drm_irq_busid_t; + +typedef struct drm_agp_mode { + unsigned long mode; +} drm_agp_mode_t; + + /* For drm_agp_alloc -- allocated a buffer */ +typedef struct drm_agp_buffer { + unsigned long size; /* In bytes -- will round to page boundary */ + unsigned long handle; /* Used for BIND/UNBIND ioctls */ + unsigned long type; /* Type of memory to allocate */ + unsigned long physical; /* Physical used by i810 */ +} drm_agp_buffer_t; + + /* For drm_agp_bind */ +typedef struct drm_agp_binding { + unsigned long handle; /* From drm_agp_buffer */ + unsigned long offset; /* In bytes -- will round to page boundary */ +} drm_agp_binding_t; + +typedef struct drm_agp_info { + int agp_version_major; + int agp_version_minor; + unsigned long mode; + unsigned long aperture_base; /* physical address */ + unsigned long aperture_size; /* bytes */ + unsigned long memory_allowed; /* bytes */ + unsigned long memory_used; + + /* PCI information */ + unsigned short id_vendor; + unsigned short id_device; +} drm_agp_info_t; + +#define DRM_IOCTL_BASE 'd' +#define DRM_IOCTL_NR(n) ((n) & 0xff) +#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) +#define DRM_IOR(nr,size) _IOR(DRM_IOCTL_BASE,nr,size) +#define DRM_IOW(nr,size) _IOW(DRM_IOCTL_BASE,nr,size) +#define DRM_IOWR(nr,size) _IOWR(DRM_IOCTL_BASE,nr,size) + + +#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t) +#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t) +#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t) +#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t) + +#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t) +#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t) +#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, drm_block_t) +#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, drm_block_t) +#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, drm_control_t) +#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, drm_map_t) +#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, drm_buf_desc_t) +#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, drm_buf_desc_t) +#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm_buf_info_t) +#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t) +#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t) + +#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t) +#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t) +#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t) +#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, drm_ctx_t) +#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, drm_ctx_t) +#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, drm_ctx_t) +#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, drm_ctx_res_t) +#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, drm_draw_t) +#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, drm_draw_t) +#define DRM_IOCTL_DMA DRM_IOWR(0x29, drm_dma_t) +#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, drm_lock_t) +#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t) +#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t) + +#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) +#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) +#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t) +#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t) +#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t) +#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t) +#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t) +#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t) + +/* Mga specific ioctls */ +#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t) +#define DRM_IOCTL_MGA_SWAP DRM_IOW( 0x41, drm_mga_swap_t) +#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x42, drm_mga_clear_t) +#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x43, drm_mga_iload_t) +#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x44, drm_mga_vertex_t) +#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x45, drm_lock_t ) +#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t) + +/* I810 specific ioctls */ +#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t) +#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t) +#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t) +#define DRM_IOCTL_I810_FLUSH DRM_IO ( 0x43) +#define DRM_IOCTL_I810_GETAGE DRM_IO ( 0x44) +#define DRM_IOCTL_I810_GETBUF DRM_IOWR(0x45, drm_i810_dma_t) +#define DRM_IOCTL_I810_SWAP DRM_IO ( 0x46) + +/* Rage 128 specific ioctls */ +#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t) +#define DRM_IOCTL_R128_RESET DRM_IO( 0x41) +#define DRM_IOCTL_R128_FLUSH DRM_IO( 0x42) +#define DRM_IOCTL_R128_CCEIDL DRM_IO( 0x43) +#define DRM_IOCTL_R128_PACKET DRM_IOW( 0x44, drm_r128_packet_t) +#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x45, drm_r128_vertex_t) + +#endif diff --git a/bsd/drm/drmstat.c b/bsd/drm/drmstat.c new file mode 100644 index 00000000..48fb1b16 --- /dev/null +++ b/bsd/drm/drmstat.c @@ -0,0 +1,418 @@ +/* drmstat.c -- DRM device status and testing program + * Created: Tue Jan 5 08:19:24 1999 by faith@precisioninsight.com + * Revised: Sun Aug 1 11:02:00 1999 by faith@precisioninsight.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmstat.c,v 1.28 1999/08/04 18:12:11 faith Exp $ + * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/bsd/drm/kernel/drm/drmstat.c,v 1.1 2000/06/17 00:03:30 martin Exp $ + * + */ + +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <sys/types.h> +#include <sys/time.h> +#include <sys/mman.h> +#include <strings.h> +#include <errno.h> +#include <signal.h> +#include <fcntl.h> +#include "xf86drm.h" + +int sigio_fd; + +static double usec(struct timeval *end, struct timeval *start) +{ + double e = end->tv_sec * 1000000 + end->tv_usec; + double s = start->tv_sec * 1000000 + start->tv_usec; + + return e - s; +} + +static void getversion(int fd) +{ + drmVersionPtr version; + + version = drmGetVersion(fd); + if (version) { + printf( "Name: %s\n", version->name ? version->name : "?" ); + printf( " Version: %d.%d.%d\n", + version->version_major, + version->version_minor, + version->version_patchlevel ); + printf( " Date: %s\n", version->date ? version->date : "?" ); + printf( " Desc: %s\n", version->desc ? version->desc : "?" ); + drmFreeVersion(version); + } else { + printf( "No driver available\n" ); + } +} + +void handler(int fd, void *oldctx, void *newctx) +{ + printf("Got fd %d\n", fd); +} + +void process_sigio(char *device) +{ + int fd; + +printf("%s\n", device); + if ((fd = open(device, 0)) < 0) { +printf("%d\n", errno); + drmError(-errno, __FUNCTION__); + exit(1); + } + + sigio_fd = fd; + drmInstallSIGIOHandler(fd, handler); + for (;;) sleep(60); +} + +int main(int argc, char **argv) +{ + int c; + int r = 0; + int fd = -1; + drmHandle handle; + void *address; + char *pt; + unsigned long count; + unsigned long offset; + unsigned long size; + drmContext context; + int loops; + char buf[1024]; + int i; + drmBufInfoPtr info; + drmBufMapPtr bufs; + drmLockPtr lock; + int secs; + + while ((c = getopt(argc, argv, + "lc:vo:O:f:s:w:W:b:r:R:P:L:C:XS:B:F:")) != EOF) + switch (c) { + case 'F': + count = strtoul(optarg, NULL, 0); + if (!fork()) { + dup(fd); + sleep(count); + } + close(fd); + break; + case 'v': getversion(fd); break; + case 'X': + if ((r = drmCreateContext(fd, &context))) { + drmError(r, argv[0]); + return 1; + } + printf( "Got %d\n", context); + break; + case 'S': + process_sigio(optarg); + break; + case 'C': + if ((r = drmSwitchToContext(fd, strtoul(optarg, NULL, 0)))) { + drmError(r, argv[0]); + return 1; + } + break; + case 'c': + if ((r = drmSetBusid(fd,optarg))) { + drmError(r, argv[0]); + return 1; + } + break; + case 'o': + if ((fd = drmOpen(optarg, NULL)) < 0) { + drmError(fd, argv[0]); + return 1; + } + break; + case 'O': + if ((fd = drmOpen(NULL, optarg)) < 0) { + drmError(fd, argv[0]); + return 1; + } + break; + case 'B': /* Test buffer allocation */ + count = strtoul(optarg, &pt, 0); + size = strtoul(pt+1, &pt, 0); + secs = strtoul(pt+1, NULL, 0); + { + drmDMAReq dma; + int *indices, *sizes; + + indices = alloca(sizeof(*indices) * count); + sizes = alloca(sizeof(*sizes) * count); + dma.context = context; + dma.send_count = 0; + dma.request_count = count; + dma.request_size = size; + dma.request_list = indices; + dma.request_sizes = sizes; + dma.flags = DRM_DMA_WAIT; + if ((r = drmDMA(fd, &dma))) { + drmError(r, argv[0]); + return 1; + } + for (i = 0; i < dma.granted_count; i++) { + printf("%5d: index = %d, size = %d\n", + i, dma.request_list[i], dma.request_sizes[i]); + } + sleep(secs); + drmFreeBufs(fd, dma.granted_count, indices); + } + break; + case 'b': + count = strtoul(optarg, &pt, 0); + size = strtoul(pt+1, NULL, 0); + if ((r = drmAddBufs(fd, count, size, 0, 0)) < 0) { + drmError(r, argv[0]); + return 1; + } + if (!(info = drmGetBufInfo(fd))) { + drmError(0, argv[0]); + return 1; + } + for (i = 0; i < info->count; i++) { + printf("%5d buffers of size %6d (low = %d, high = %d)\n", + info->list[i].count, + info->list[i].size, + info->list[i].low_mark, + info->list[i].high_mark); + } + if ((r = drmMarkBufs(fd, 0.50, 0.80))) { + drmError(r, argv[0]); + return 1; + } + if (!(info = drmGetBufInfo(fd))) { + drmError(0, argv[0]); + return 1; + } + for (i = 0; i < info->count; i++) { + printf("%5d buffers of size %6d (low = %d, high = %d)\n", + info->list[i].count, + info->list[i].size, + info->list[i].low_mark, + info->list[i].high_mark); + } + printf("===== /proc/drm/1/meminfo =====\n"); + sprintf(buf, "cat /proc/drm/1/meminfo"); + system(buf); +#if 1 + if (!(bufs = drmMapBufs(fd))) { + drmError(0, argv[0]); + return 1; + } + printf("===============================\n"); + printf( "%d bufs\n", bufs->count); + for (i = 0; i < bufs->count; i++) { + printf( " %4d: %8d bytes at %p\n", + i, + bufs->list[i].total, + bufs->list[i].address); + } + printf("===== /proc/drm/1/vmainfo =====\n"); + sprintf(buf, "cat /proc/drm/1/vmainfo"); + system(buf); +#endif + break; + case 'f': + offset = strtoul(optarg, &pt, 0); + size = strtoul(pt+1, NULL, 0); + handle = 0; + if ((r = drmAddMap(fd, offset, size, + DRM_FRAME_BUFFER, 0, &handle))) { + drmError(r, argv[0]); + return 1; + } + printf("0x%08lx:0x%04lx added\n", offset, size); + printf("===== /proc/drm/1/meminfo =====\n"); + sprintf(buf, "cat /proc/drm/1/meminfo"); + system(buf); + break; + case 'r': + case 'R': + offset = strtoul(optarg, &pt, 0); + size = strtoul(pt+1, NULL, 0); + handle = 0; + if ((r = drmAddMap(fd, offset, size, + DRM_REGISTERS, + c == 'R' ? DRM_READ_ONLY : 0, + &handle))) { + drmError(r, argv[0]); + return 1; + } + printf("0x%08lx:0x%04lx added\n", offset, size); + printf("===== /proc/drm/1/meminfo =====\n"); + sprintf(buf, "cat /proc/drm/1/meminfo"); + system(buf); + break; + case 's': + size = strtoul(optarg, &pt, 0); + handle = 0; + if ((r = drmAddMap(fd, 0, size, + DRM_SHM, DRM_CONTAINS_LOCK, + &handle))) { + drmError(r, argv[0]); + return 1; + } + printf("0x%04lx byte shm added at 0x%08lx\n", size, handle); + sprintf(buf, "sysctl hw.graphics.0.vm"); + system(buf); + break; + case 'P': + offset = strtoul(optarg, &pt, 0); + size = strtoul(pt+1, NULL, 0); + address = NULL; + if ((r = drmMap(fd, offset, size, &address))) { + drmError(r, argv[0]); + return 1; + } + printf("0x%08lx:0x%04lx mapped at %p for pid %d\n", + offset, size, address, getpid()); + printf("===== hw.graphics.0.vma =====\n"); + sprintf(buf, "sysctl hw.graphics.0.vma"); + system(buf); + mprotect((void *)offset, size, PROT_READ); + printf("===== hw.graphics.0.vma =====\n"); + sprintf(buf, "sysctl hw.graphics.0.vma"); + system(buf); + break; + case 'w': + case 'W': + offset = strtoul(optarg, &pt, 0); + size = strtoul(pt+1, NULL, 0); + address = NULL; + if ((r = drmMap(fd, offset, size, &address))) { + drmError(r, argv[0]); + return 1; + } + printf("0x%08lx:0x%04lx mapped at %p for pid %d\n", + offset, size, address, getpid()); + printf("===== /proc/%d/maps =====\n", getpid()); + sprintf(buf, "cat /proc/%d/maps", getpid()); + system(buf); + printf("===== /proc/drm/1/meminfo =====\n"); + sprintf(buf, "cat /proc/drm/1/meminfo"); + system(buf); + printf("===== /proc/drm/1/vmainfo =====\n"); + sprintf(buf, "cat /proc/drm/1/vmainfo"); + system(buf); + printf("===== READING =====\n"); + for (i = 0; i < 0x10; i++) + printf("%02x ", (unsigned int)((unsigned char *)address)[i]); + printf("\n"); + if (c == 'w') { + printf("===== WRITING =====\n"); + for (i = 0; i < size; i+=2) { + ((char *)address)[i] = i & 0xff; + ((char *)address)[i+1] = i & 0xff; + } + } + printf("===== READING =====\n"); + for (i = 0; i < 0x10; i++) + printf("%02x ", (unsigned int)((unsigned char *)address)[i]); + printf("\n"); + printf("===== /proc/drm/1/vmainfo =====\n"); + sprintf(buf, "cat /proc/drm/1/vmainfo"); + system(buf); + break; + case 'L': + context = strtoul(optarg, &pt, 0); + offset = strtoul(pt+1, &pt, 0); + size = strtoul(pt+1, &pt, 0); + loops = strtoul(pt+1, NULL, 0); + address = NULL; + if ((r = drmMap(fd, offset, size, &address))) { + drmError(r, argv[0]); + return 1; + } + lock = address; +#if 1 + { + int counter = 0; + struct timeval loop_start, loop_end; + struct timeval lock_start, lock_end; + double wt; +#define HISTOSIZE 9 + int histo[HISTOSIZE]; + int output = 0; + int fast = 0; + + if (loops < 0) { + loops = -loops; + ++output; + } + + for (i = 0; i < HISTOSIZE; i++) histo[i] = 0; + + gettimeofday(&loop_start, NULL); + for (i = 0; i < loops; i++) { + gettimeofday(&lock_start, NULL); + DRM_LIGHT_LOCK_COUNT(fd,lock,context,fast); + gettimeofday(&lock_end, NULL); + DRM_UNLOCK(fd,lock,context); + ++counter; + wt = usec(&lock_end, &lock_start); + if (wt <= 2.5) ++histo[8]; + if (wt < 5.0) ++histo[0]; + else if (wt < 50.0) ++histo[1]; + else if (wt < 500.0) ++histo[2]; + else if (wt < 5000.0) ++histo[3]; + else if (wt < 50000.0) ++histo[4]; + else if (wt < 500000.0) ++histo[5]; + else if (wt < 5000000.0) ++histo[6]; + else ++histo[7]; + if (output) printf( "%.2f uSec, %d fast\n", wt, fast); + } + gettimeofday(&loop_end, NULL); + printf( "Average wait time = %.2f usec, %d fast\n", + usec(&loop_end, &loop_start) / counter, fast); + printf( "%9d <= 2.5 uS\n", histo[8]); + printf( "%9d < 5 uS\n", histo[0]); + printf( "%9d < 50 uS\n", histo[1]); + printf( "%9d < 500 uS\n", histo[2]); + printf( "%9d < 5000 uS\n", histo[3]); + printf( "%9d < 50000 uS\n", histo[4]); + printf( "%9d < 500000 uS\n", histo[5]); + printf( "%9d < 5000000 uS\n", histo[6]); + printf( "%9d >= 5000000 uS\n", histo[7]); + } +#else + printf( "before lock: 0x%08x\n", lock->lock); + printf( "lock: 0x%08x\n", lock->lock); + sleep(5); + printf( "unlock: 0x%08x\n", lock->lock); +#endif + break; + default: + fprintf( stderr, "Usage: drmstat [options]\n" ); + return 1; + } + + return r; +} diff --git a/bsd/drm/proc.c b/bsd/drm/proc.c new file mode 100644 index 00000000..3f6616ef --- /dev/null +++ b/bsd/drm/proc.c @@ -0,0 +1,568 @@ +/* proc.c -- /proc support for DRM -*- c -*- + * Created: Mon Jan 11 09:48:47 1999 by faith@precisioninsight.com + * Revised: Fri Aug 20 11:31:48 1999 by faith@precisioninsight.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/proc.c,v 1.4 1999/08/20 15:36:46 faith Exp $ + * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/bsd/drm/kernel/drm/proc.c,v 1.1 2000/06/17 00:03:30 martin Exp $ + * + */ + +#define __NO_VERSION__ +#include "drmP.h" + +static struct proc_dir_entry *drm_root = NULL; +static struct proc_dir_entry *drm_dev_root = NULL; +static char drm_slot_name[64]; + +static int drm_name_info(char *buf, char **start, off_t offset, + int len, int *eof, void *data); +static int drm_vm_info(char *buf, char **start, off_t offset, + int len, int *eof, void *data); +static int drm_clients_info(char *buf, char **start, off_t offset, + int len, int *eof, void *data); +static int drm_queues_info(char *buf, char **start, off_t offset, + int len, int *eof, void *data); +static int drm_bufs_info(char *buf, char **start, off_t offset, + int len, int *eof, void *data); +#if DRM_DEBUG_CODE +static int drm_vma_info(char *buf, char **start, off_t offset, + int len, int *eof, void *data); +#endif +#if DRM_DMA_HISTOGRAM +static int drm_histo_info(char *buf, char **start, off_t offset, + int len, int *eof, void *data); +#endif + +struct drm_proc_list { + const char *name; + int (*f)(char *, char **, off_t, int, int *, void *); +} drm_proc_list[] = { + { "name", drm_name_info }, + { "mem", drm_mem_info }, + { "vm", drm_vm_info }, + { "clients", drm_clients_info }, + { "queues", drm_queues_info }, + { "bufs", drm_bufs_info }, +#if DRM_DEBUG_CODE + { "vma", drm_vma_info }, +#endif +#if DRM_DMA_HISTOGRAM + { "histo", drm_histo_info }, +#endif +}; +#define DRM_PROC_ENTRIES (sizeof(drm_proc_list)/sizeof(drm_proc_list[0])) + +int drm_proc_init(drm_device_t *dev) +{ + struct proc_dir_entry *ent; + int i, j; + + drm_root = create_proc_entry("graphics", S_IFDIR, NULL); + if (!drm_root) { + DRM_ERROR("Cannot create /proc/graphics\n"); + return -1; + } + + /* Instead of doing this search, we should + add some global support for /proc/graphics. */ + for (i = 0; i < 8; i++) { + sprintf(drm_slot_name, "graphics/%d", i); + drm_dev_root = create_proc_entry(drm_slot_name, S_IFDIR, NULL); + if (!drm_dev_root) { + DRM_ERROR("Cannot create /proc/%s\n", drm_slot_name); + remove_proc_entry("graphics", NULL); + } + if (drm_dev_root->nlink == 2) break; + drm_dev_root = NULL; + } + if (!drm_dev_root) { + DRM_ERROR("Cannot find slot in /proc/graphics\n"); + return -1; + } + + for (i = 0; i < DRM_PROC_ENTRIES; i++) { + ent = create_proc_entry(drm_proc_list[i].name, + S_IFREG|S_IRUGO, drm_dev_root); + if (!ent) { + DRM_ERROR("Cannot create /proc/%s/%s\n", + drm_slot_name, drm_proc_list[i].name); + for (j = 0; j < i; j++) + remove_proc_entry(drm_proc_list[i].name, + drm_dev_root); + remove_proc_entry(drm_slot_name, NULL); + remove_proc_entry("graphics", NULL); + return -1; + } + ent->read_proc = drm_proc_list[i].f; + ent->data = dev; + } + + return 0; +} + + +int drm_proc_cleanup(void) +{ + int i; + + if (drm_root) { + if (drm_dev_root) { + for (i = 0; i < DRM_PROC_ENTRIES; i++) { + remove_proc_entry(drm_proc_list[i].name, + drm_dev_root); + } + remove_proc_entry(drm_slot_name, NULL); + } + remove_proc_entry("graphics", NULL); + remove_proc_entry(DRM_NAME, NULL); + } + drm_root = drm_dev_root = NULL; + return 0; +} + +static int drm_name_info(char *buf, char **start, off_t offset, int len, + int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *)data; + + if (offset > 0) return 0; /* no partial requests */ + len = 0; + *eof = 1; + + if (dev->unique) { + DRM_PROC_PRINT("%s 0x%x %s\n", + dev->name, dev->device, dev->unique); + } else { + DRM_PROC_PRINT("%s 0x%x\n", dev->name, dev->device); + } + return len; +} + +static int _drm_vm_info(char *buf, char **start, off_t offset, int len, + int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *)data; + drm_map_t *map; + const char *types[] = { "FB", "REG", "SHM" }; + const char *type; + int i; + + if (offset > 0) return 0; /* no partial requests */ + len = 0; + *eof = 1; + DRM_PROC_PRINT("slot offset size type flags " + "address mtrr\n\n"); + for (i = 0; i < dev->map_count; i++) { + map = dev->maplist[i]; + if (map->type < 0 || map->type > 2) type = "??"; + else type = types[map->type]; + DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ", + i, + map->offset, + map->size, + type, + map->flags, + (unsigned long)map->handle); + if (map->mtrr < 0) { + DRM_PROC_PRINT("none\n"); + } else { + DRM_PROC_PRINT("%4d\n", map->mtrr); + } + } + + return len; +} + +static int drm_vm_info(char *buf, char **start, off_t offset, int len, + int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *)data; + int ret; + + down(&dev->struct_sem); + ret = _drm_vm_info(buf, start, offset, len, eof, data); + up(&dev->struct_sem); + return ret; +} + + +static int _drm_queues_info(char *buf, char **start, off_t offset, int len, + int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *)data; + int i; + drm_queue_t *q; + + if (offset > 0) return 0; /* no partial requests */ + len = 0; + *eof = 1; + DRM_PROC_PRINT(" ctx/flags use fin" + " blk/rw/rwf wait flushed queued" + " locks\n\n"); + for (i = 0; i < dev->queue_count; i++) { + q = dev->queuelist[i]; + atomic_inc(&q->use_count); + DRM_PROC_PRINT_RET(atomic_dec(&q->use_count), + "%5d/0x%03x %5d %5d" + " %5d/%c%c/%c%c%c %5d %10d %10d %10d\n", + i, + q->flags, + atomic_read(&q->use_count), + atomic_read(&q->finalization), + atomic_read(&q->block_count), + atomic_read(&q->block_read) ? 'r' : '-', + atomic_read(&q->block_write) ? 'w' : '-', + waitqueue_active(&q->read_queue) ? 'r':'-', + waitqueue_active(&q->write_queue) ? 'w':'-', + waitqueue_active(&q->flush_queue) ? 'f':'-', + DRM_BUFCOUNT(&q->waitlist), + atomic_read(&q->total_flushed), + atomic_read(&q->total_queued), + atomic_read(&q->total_locks)); + atomic_dec(&q->use_count); + } + + return len; +} + +static int drm_queues_info(char *buf, char **start, off_t offset, int len, + int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *)data; + int ret; + + down(&dev->struct_sem); + ret = _drm_queues_info(buf, start, offset, len, eof, data); + up(&dev->struct_sem); + return ret; +} + +/* drm_bufs_info is called whenever a process reads + /dev/drm/<dev>/bufs. */ + +static int _drm_bufs_info(char *buf, char **start, off_t offset, int len, + int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *)data; + drm_device_dma_t *dma = dev->dma; + int i; + + if (!dma) return 0; + if (offset > 0) return 0; /* no partial requests */ + len = 0; + *eof = 1; + DRM_PROC_PRINT(" o size count free segs pages kB\n\n"); + for (i = 0; i <= DRM_MAX_ORDER; i++) { + if (dma->bufs[i].buf_count) + DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n", + i, + dma->bufs[i].buf_size, + dma->bufs[i].buf_count, + atomic_read(&dma->bufs[i] + .freelist.count), + dma->bufs[i].seg_count, + dma->bufs[i].seg_count + *(1 << dma->bufs[i].page_order), + (dma->bufs[i].seg_count + * (1 << dma->bufs[i].page_order)) + * PAGE_SIZE / 1024); + } + DRM_PROC_PRINT("\n"); + for (i = 0; i < dma->buf_count; i++) { + if (i && !(i%32)) DRM_PROC_PRINT("\n"); + DRM_PROC_PRINT(" %d", dma->buflist[i]->list); + } + DRM_PROC_PRINT("\n"); + + return len; +} + +static int drm_bufs_info(char *buf, char **start, off_t offset, int len, + int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *)data; + int ret; + + down(&dev->struct_sem); + ret = _drm_bufs_info(buf, start, offset, len, eof, data); + up(&dev->struct_sem); + return ret; +} + + +static int _drm_clients_info(char *buf, char **start, off_t offset, int len, + int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *)data; + drm_file_t *priv; + + if (offset > 0) return 0; /* no partial requests */ + len = 0; + *eof = 1; + DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n"); + for (priv = dev->file_first; priv; priv = priv->next) { + DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n", + priv->authenticated ? 'y' : 'n', + priv->minor, + priv->pid, + priv->uid, + priv->magic, + priv->ioctl_count); + } + + return len; +} + +static int drm_clients_info(char *buf, char **start, off_t offset, int len, + int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *)data; + int ret; + + down(&dev->struct_sem); + ret = _drm_clients_info(buf, start, offset, len, eof, data); + up(&dev->struct_sem); + return ret; +} + +#if DRM_DEBUG_CODE + +static int _drm_vma_info(char *buf, char **start, off_t offset, int len, + int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *)data; + drm_vma_entry_t *pt; + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + unsigned long i; + struct vm_area_struct *vma; + unsigned long address; +#if defined(__i386__) + unsigned int pgprot; +#endif + + if (offset > 0) return 0; /* no partial requests */ + len = 0; + *eof = 1; + DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n", + atomic_read(&dev->vma_count), + high_memory, virt_to_phys(high_memory)); + for (pt = dev->vmalist; pt; pt = pt->next) { + if (!(vma = pt->vma)) continue; + DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx", + pt->pid, + vma->vm_start, + vma->vm_end, + vma->vm_flags & VM_READ ? 'r' : '-', + vma->vm_flags & VM_WRITE ? 'w' : '-', + vma->vm_flags & VM_EXEC ? 'x' : '-', + vma->vm_flags & VM_MAYSHARE ? 's' : 'p', + vma->vm_flags & VM_LOCKED ? 'l' : '-', + vma->vm_flags & VM_IO ? 'i' : '-', + vma->vm_offset ); +#if defined(__i386__) + pgprot = pgprot_val(vma->vm_page_prot); + DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c", + pgprot & _PAGE_PRESENT ? 'p' : '-', + pgprot & _PAGE_RW ? 'w' : 'r', + pgprot & _PAGE_USER ? 'u' : 's', + pgprot & _PAGE_PWT ? 't' : 'b', + pgprot & _PAGE_PCD ? 'u' : 'c', + pgprot & _PAGE_ACCESSED ? 'a' : '-', + pgprot & _PAGE_DIRTY ? 'd' : '-', + pgprot & _PAGE_4M ? 'm' : 'k', + pgprot & _PAGE_GLOBAL ? 'g' : 'l' ); +#endif + DRM_PROC_PRINT("\n"); + for (i = vma->vm_start; i < vma->vm_end; i += PAGE_SIZE) { + pgd = pgd_offset(vma->vm_mm, i); + pmd = pmd_offset(pgd, i); + pte = pte_offset(pmd, i); + if (pte_present(*pte)) { + address = __pa(pte_page(*pte)) + + (i & (PAGE_SIZE-1)); + DRM_PROC_PRINT(" 0x%08lx -> 0x%08lx" + " %c%c%c%c%c\n", + i, + address, + pte_read(*pte) ? 'r' : '-', + pte_write(*pte) ? 'w' : '-', + pte_exec(*pte) ? 'x' : '-', + pte_dirty(*pte) ? 'd' : '-', + pte_young(*pte) ? 'a' : '-' ); + } else { + DRM_PROC_PRINT(" 0x%08lx\n", i); + } + } + } + + return len; +} + +static int drm_vma_info(char *buf, char **start, off_t offset, int len, + int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *)data; + int ret; + + down(&dev->struct_sem); + ret = _drm_vma_info(buf, start, offset, len, eof, data); + up(&dev->struct_sem); + return ret; +} +#endif + + +#if DRM_DMA_HISTOGRAM +static int _drm_histo_info(char *buf, char **start, off_t offset, int len, + int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *)data; + drm_device_dma_t *dma = dev->dma; + int i; + unsigned long slot_value = DRM_DMA_HISTOGRAM_INITIAL; + unsigned long prev_value = 0; + drm_buf_t *buffer; + + if (offset > 0) return 0; /* no partial requests */ + len = 0; + *eof = 1; + + DRM_PROC_PRINT("general statistics:\n"); + DRM_PROC_PRINT("total %10u\n", atomic_read(&dev->histo.total)); + DRM_PROC_PRINT("open %10u\n", atomic_read(&dev->total_open)); + DRM_PROC_PRINT("close %10u\n", atomic_read(&dev->total_close)); + DRM_PROC_PRINT("ioctl %10u\n", atomic_read(&dev->total_ioctl)); + DRM_PROC_PRINT("irq %10u\n", atomic_read(&dev->total_irq)); + DRM_PROC_PRINT("ctx %10u\n", atomic_read(&dev->total_ctx)); + + DRM_PROC_PRINT("\nlock statistics:\n"); + DRM_PROC_PRINT("locks %10u\n", atomic_read(&dev->total_locks)); + DRM_PROC_PRINT("unlocks %10u\n", atomic_read(&dev->total_unlocks)); + DRM_PROC_PRINT("contends %10u\n", atomic_read(&dev->total_contends)); + DRM_PROC_PRINT("sleeps %10u\n", atomic_read(&dev->total_sleeps)); + + + if (dma) { + DRM_PROC_PRINT("\ndma statistics:\n"); + DRM_PROC_PRINT("prio %10u\n", + atomic_read(&dma->total_prio)); + DRM_PROC_PRINT("bytes %10u\n", + atomic_read(&dma->total_bytes)); + DRM_PROC_PRINT("dmas %10u\n", + atomic_read(&dma->total_dmas)); + DRM_PROC_PRINT("missed:\n"); + DRM_PROC_PRINT(" dma %10u\n", + atomic_read(&dma->total_missed_dma)); + DRM_PROC_PRINT(" lock %10u\n", + atomic_read(&dma->total_missed_lock)); + DRM_PROC_PRINT(" free %10u\n", + atomic_read(&dma->total_missed_free)); + DRM_PROC_PRINT(" sched %10u\n", + atomic_read(&dma->total_missed_sched)); + DRM_PROC_PRINT("tried %10u\n", + atomic_read(&dma->total_tried)); + DRM_PROC_PRINT("hit %10u\n", + atomic_read(&dma->total_hit)); + DRM_PROC_PRINT("lost %10u\n", + atomic_read(&dma->total_lost)); + + buffer = dma->next_buffer; + if (buffer) { + DRM_PROC_PRINT("next_buffer %7d\n", buffer->idx); + } else { + DRM_PROC_PRINT("next_buffer none\n"); + } + buffer = dma->this_buffer; + if (buffer) { + DRM_PROC_PRINT("this_buffer %7d\n", buffer->idx); + } else { + DRM_PROC_PRINT("this_buffer none\n"); + } + } + + + DRM_PROC_PRINT("\nvalues:\n"); + if (dev->lock.hw_lock) { + DRM_PROC_PRINT("lock 0x%08x\n", + dev->lock.hw_lock->lock); + } else { + DRM_PROC_PRINT("lock none\n"); + } + DRM_PROC_PRINT("context_flag 0x%08x\n", dev->context_flag); + DRM_PROC_PRINT("interrupt_flag 0x%08x\n", dev->interrupt_flag); + DRM_PROC_PRINT("dma_flag 0x%08x\n", dev->dma_flag); + + DRM_PROC_PRINT("queue_count %10d\n", dev->queue_count); + DRM_PROC_PRINT("last_context %10d\n", dev->last_context); + DRM_PROC_PRINT("last_switch %10lu\n", dev->last_switch); + DRM_PROC_PRINT("last_checked %10d\n", dev->last_checked); + + + DRM_PROC_PRINT("\n q2d d2c c2f" + " q2c q2f dma sch" + " ctx lacq lhld\n\n"); + for (i = 0; i < DRM_DMA_HISTOGRAM_SLOTS; i++) { + DRM_PROC_PRINT("%s %10lu %10u %10u %10u %10u %10u" + " %10u %10u %10u %10u %10u\n", + i == DRM_DMA_HISTOGRAM_SLOTS - 1 ? ">=" : "< ", + i == DRM_DMA_HISTOGRAM_SLOTS - 1 + ? prev_value : slot_value , + + atomic_read(&dev->histo + .queued_to_dispatched[i]), + atomic_read(&dev->histo + .dispatched_to_completed[i]), + atomic_read(&dev->histo + .completed_to_freed[i]), + + atomic_read(&dev->histo + .queued_to_completed[i]), + atomic_read(&dev->histo + .queued_to_freed[i]), + atomic_read(&dev->histo.dma[i]), + atomic_read(&dev->histo.schedule[i]), + atomic_read(&dev->histo.ctx[i]), + atomic_read(&dev->histo.lacq[i]), + atomic_read(&dev->histo.lhld[i])); + prev_value = slot_value; + slot_value = DRM_DMA_HISTOGRAM_NEXT(slot_value); + } + return len; +} + +static int drm_histo_info(char *buf, char **start, off_t offset, int len, + int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *)data; + int ret; + + down(&dev->struct_sem); + ret = _drm_histo_info(buf, start, offset, len, eof, data); + up(&dev->struct_sem); + return ret; +} +#endif diff --git a/bsd/drm/sysctl.c b/bsd/drm/sysctl.c new file mode 100644 index 00000000..a890abac --- /dev/null +++ b/bsd/drm/sysctl.c @@ -0,0 +1,554 @@ +/* proc.c -- /proc support for DRM -*- c -*- + * Created: Mon Jan 11 09:48:47 1999 by faith@precisioninsight.com + * Revised: Fri Aug 20 11:31:48 1999 by faith@precisioninsight.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * $PI$ + * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/bsd/drm/kernel/drm/sysctl.c,v 1.1 2000/06/17 00:03:31 martin Exp $ + * + */ + +#define __NO_VERSION__ +#include "drmP.h" +#include <sys/sysctl.h> + +SYSCTL_NODE(_hw, OID_AUTO, dri, CTLFLAG_RW, 0, "DRI Graphics"); + +static int drm_name_info SYSCTL_HANDLER_ARGS; +static int drm_vm_info SYSCTL_HANDLER_ARGS; +static int drm_clients_info SYSCTL_HANDLER_ARGS; +static int drm_queues_info SYSCTL_HANDLER_ARGS; +static int drm_bufs_info SYSCTL_HANDLER_ARGS; +#if DRM_DEBUG_CODExx +static int drm_vma_info SYSCTL_HANDLER_ARGS; +#endif +#if DRM_DMA_HISTOGRAM +static int drm_histo_info SYSCTL_HANDLER_ARGS; +#endif + +struct drm_sysctl_list { + const char *name; + int (*f) SYSCTL_HANDLER_ARGS; +} drm_sysctl_list[] = { + { "name", drm_name_info }, + { "mem", drm_mem_info }, + { "vm", drm_vm_info }, + { "clients", drm_clients_info }, + { "queues", drm_queues_info }, + { "bufs", drm_bufs_info }, +#if DRM_DEBUG_CODExx + { "vma", drm_vma_info }, +#endif +#if DRM_DMA_HISTOGRAM + { "histo", drm_histo_info }, +#endif +}; +#define DRM_SYSCTL_ENTRIES (sizeof(drm_sysctl_list)/sizeof(drm_sysctl_list[0])) + +struct drm_sysctl_info { + struct sysctl_oid oids[DRM_SYSCTL_ENTRIES + 1]; + struct sysctl_oid_list list; + char name[2]; +}; + +int drm_sysctl_init(drm_device_t *dev) +{ + struct drm_sysctl_info *info; + struct sysctl_oid *oid; + struct sysctl_oid *top; + int i; + + /* Find the next free slot under hw.graphics */ + i = 0; + SLIST_FOREACH(oid, &sysctl__hw_dri_children, oid_link) { + if (i <= oid->oid_arg2) + i = oid->oid_arg2 + 1; + } + + info = drm_alloc(sizeof *info, DRM_MEM_DRIVER); + dev->sysctl = info; + + /* Construct the node under hw.graphics */ + info->name[0] = '0' + i; + info->name[1] = 0; + oid = &info->oids[DRM_SYSCTL_ENTRIES]; + bzero(oid, sizeof(*oid)); + oid->oid_parent = &sysctl__hw_dri_children; + oid->oid_number = OID_AUTO; + oid->oid_kind = CTLTYPE_NODE | CTLFLAG_RW; + oid->oid_arg1 = &info->list; + oid->oid_arg2 = i; + oid->oid_name = info->name; + oid->oid_handler = 0; + oid->oid_fmt = "N"; + SLIST_INIT(&info->list); + sysctl_register_oid(oid); + top = oid; + + for (i = 0; i < DRM_SYSCTL_ENTRIES; i++) { + oid = &info->oids[i]; + bzero(oid, sizeof(*oid)); + oid->oid_parent = top->oid_arg1; + oid->oid_number = OID_AUTO; + oid->oid_kind = CTLTYPE_INT | CTLFLAG_RD; + oid->oid_arg1 = dev; + oid->oid_arg2 = 0; + oid->oid_name = drm_sysctl_list[i].name; + oid->oid_handler = drm_sysctl_list[i].f; + oid->oid_fmt = "A"; + sysctl_register_oid(oid); + } + + return 0; +} + +int drm_sysctl_cleanup(drm_device_t *dev) +{ + int i; + + DRM_DEBUG("dev->sysctl=%p\n", dev->sysctl); + for (i = 0; i < DRM_SYSCTL_ENTRIES + 1; i++) + sysctl_unregister_oid(&dev->sysctl->oids[i]); + + drm_free(dev->sysctl, sizeof *dev->sysctl, DRM_MEM_DRIVER); + dev->sysctl = NULL; + + return 0; +} + +static int drm_name_info SYSCTL_HANDLER_ARGS +{ + drm_device_t *dev = arg1; + char buf[128]; + int error; + + if (dev->unique) { + DRM_SYSCTL_PRINT("%s 0x%x %s\n", + dev->name, dev2udev(dev->devnode), dev->unique); + } else { + DRM_SYSCTL_PRINT("%s 0x%x\n", dev->name, dev2udev(dev->devnode)); + } + + SYSCTL_OUT(req, "", 1); + + return 0; +} + +static int _drm_vm_info SYSCTL_HANDLER_ARGS +{ + drm_device_t *dev = arg1; + drm_map_t *map; + const char *types[] = { "FB", "REG", "SHM" }; + const char *type; + int i; + char buf[128]; + int error; + + DRM_SYSCTL_PRINT("slot offset size type flags " + "address mtrr\n\n"); + error = SYSCTL_OUT(req, buf, strlen(buf)); + if (error) return error; + + for (i = 0; i < dev->map_count; i++) { + map = dev->maplist[i]; + if (map->type < 0 || map->type > 2) type = "??"; + else type = types[map->type]; + DRM_SYSCTL_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ", + i, + map->offset, + map->size, + type, + map->flags, + (unsigned long)map->handle); + if (map->mtrr < 0) { + DRM_SYSCTL_PRINT("none\n"); + } else { + DRM_SYSCTL_PRINT("%4d\n", map->mtrr); + } + } + SYSCTL_OUT(req, "", 1); + + return 0; +} + +static int drm_vm_info SYSCTL_HANDLER_ARGS +{ + drm_device_t *dev = arg1; + int ret; + + lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc); + ret = _drm_vm_info(oidp, arg1, arg2, req); + lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc); + + return ret; +} + + +static int _drm_queues_info SYSCTL_HANDLER_ARGS +{ + drm_device_t *dev = arg1; + int i; + drm_queue_t *q; + char buf[128]; + int error; + + DRM_SYSCTL_PRINT(" ctx/flags use fin" + " blk/rw/rwf wait flushed queued" + " locks\n\n"); + for (i = 0; i < dev->queue_count; i++) { + q = dev->queuelist[i]; + atomic_inc(&q->use_count); + DRM_SYSCTL_PRINT_RET(atomic_dec(&q->use_count), + "%5d/0x%03x %5d %5d" + " %5d/%c%c/%c%c%c %5d %10d %10d %10d\n", + i, + q->flags, + atomic_read(&q->use_count), + atomic_read(&q->finalization), + atomic_read(&q->block_count), + atomic_read(&q->block_read) ? 'r' : '-', + atomic_read(&q->block_write) ? 'w' : '-', + q->read_queue ? 'r':'-', + q->write_queue ? 'w':'-', + q->flush_queue ? 'f':'-', + DRM_BUFCOUNT(&q->waitlist), + atomic_read(&q->total_flushed), + atomic_read(&q->total_queued), + atomic_read(&q->total_locks)); + atomic_dec(&q->use_count); + } + + SYSCTL_OUT(req, "", 1); + return 0; +} + +static int drm_queues_info SYSCTL_HANDLER_ARGS +{ + drm_device_t *dev = arg1; + int ret; + + lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc); + ret = _drm_queues_info(oidp, arg1, arg2, req); + lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc); + return ret; +} + +/* drm_bufs_info is called whenever a process reads + hw.dri.0.bufs. */ + +static int _drm_bufs_info SYSCTL_HANDLER_ARGS +{ + drm_device_t *dev = arg1; + drm_device_dma_t *dma = dev->dma; + int i; + char buf[128]; + int error; + + if (!dma) return 0; + DRM_SYSCTL_PRINT(" o size count free segs pages kB\n\n"); + for (i = 0; i <= DRM_MAX_ORDER; i++) { + if (dma->bufs[i].buf_count) + DRM_SYSCTL_PRINT("%2d %8d %5d %5d %5d %5d %5d\n", + i, + dma->bufs[i].buf_size, + dma->bufs[i].buf_count, + atomic_read(&dma->bufs[i] + .freelist.count), + dma->bufs[i].seg_count, + dma->bufs[i].seg_count + *(1 << dma->bufs[i].page_order), + (dma->bufs[i].seg_count + * (1 << dma->bufs[i].page_order)) + * PAGE_SIZE / 1024); + } + DRM_SYSCTL_PRINT("\n"); + for (i = 0; i < dma->buf_count; i++) { + if (i && !(i%32)) DRM_SYSCTL_PRINT("\n"); + DRM_SYSCTL_PRINT(" %d", dma->buflist[i]->list); + } + DRM_SYSCTL_PRINT("\n"); + + SYSCTL_OUT(req, "", 1); + return 0; +} + +static int drm_bufs_info SYSCTL_HANDLER_ARGS +{ + drm_device_t *dev = arg1; + int ret; + + lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc); + ret = _drm_bufs_info(oidp, arg1, arg2, req); + lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc); + return ret; +} + + +static int _drm_clients_info SYSCTL_HANDLER_ARGS +{ + drm_device_t *dev = arg1; + drm_file_t *priv; + char buf[128]; + int error; + + DRM_SYSCTL_PRINT("a dev pid uid magic ioctls\n\n"); + TAILQ_FOREACH(priv, &dev->files, link) { + DRM_SYSCTL_PRINT("%c %3d %5d %5d %10u %10lu\n", + priv->authenticated ? 'y' : 'n', + priv->minor, + priv->pid, + priv->uid, + priv->magic, + priv->ioctl_count); + } + + SYSCTL_OUT(req, "", 1); + return 0; +} + +static int drm_clients_info SYSCTL_HANDLER_ARGS +{ + drm_device_t *dev = arg1; + int ret; + + lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc); + ret = _drm_clients_info(oidp, arg1, arg2, req); + lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc); + return ret; +} + +#if DRM_DEBUG_CODExx + +static int _drm_vma_info SYSCTL_HANDLER_ARGS +{ + drm_device_t *dev = arg1; + drm_vma_entry_t *pt; + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + unsigned long i; + struct vm_area_struct *vma; + unsigned long address; +#if defined(__i386__) + unsigned int pgprot; +#endif + char buf[128]; + int error; + + DRM_SYSCTL_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n", + atomic_read(&dev->vma_count), + high_memory, virt_to_phys(high_memory)); + for (pt = dev->vmalist; pt; pt = pt->next) { + if (!(vma = pt->vma)) continue; + DRM_SYSCTL_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx", + pt->pid, + vma->vm_start, + vma->vm_end, + vma->vm_flags & VM_READ ? 'r' : '-', + vma->vm_flags & VM_WRITE ? 'w' : '-', + vma->vm_flags & VM_EXEC ? 'x' : '-', + vma->vm_flags & VM_MAYSHARE ? 's' : 'p', + vma->vm_flags & VM_LOCKED ? 'l' : '-', + vma->vm_flags & VM_IO ? 'i' : '-', + vma->vm_offset ); +#if defined(__i386__) + pgprot = pgprot_val(vma->vm_page_prot); + DRM_SYSCTL_PRINT(" %c%c%c%c%c%c%c%c%c", + pgprot & _PAGE_PRESENT ? 'p' : '-', + pgprot & _PAGE_RW ? 'w' : 'r', + pgprot & _PAGE_USER ? 'u' : 's', + pgprot & _PAGE_PWT ? 't' : 'b', + pgprot & _PAGE_PCD ? 'u' : 'c', + pgprot & _PAGE_ACCESSED ? 'a' : '-', + pgprot & _PAGE_DIRTY ? 'd' : '-', + pgprot & _PAGE_4M ? 'm' : 'k', + pgprot & _PAGE_GLOBAL ? 'g' : 'l' ); +#endif + DRM_SYSCTL_PRINT("\n"); + for (i = vma->vm_start; i < vma->vm_end; i += PAGE_SIZE) { + pgd = pgd_offset(vma->vm_mm, i); + pmd = pmd_offset(pgd, i); + pte = pte_offset(pmd, i); + if (pte_present(*pte)) { + address = __pa(pte_page(*pte)) + + (i & (PAGE_SIZE-1)); + DRM_SYSCTL_PRINT(" 0x%08lx -> 0x%08lx" + " %c%c%c%c%c\n", + i, + address, + pte_read(*pte) ? 'r' : '-', + pte_write(*pte) ? 'w' : '-', + pte_exec(*pte) ? 'x' : '-', + pte_dirty(*pte) ? 'd' : '-', + pte_young(*pte) ? 'a' : '-' ); + } else { + DRM_SYSCTL_PRINT(" 0x%08lx\n", i); + } + } + } + + SYSCTL_OUT(req, "", 1); + return 0; +} + +static int drm_vma_info SYSCTL_HANDLER_ARGS +{ + drm_device_t *dev = arg1; + int ret; + + lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc); + ret = _drm_vma_info(oidp, arg1, arg2, req); + lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc); + return ret; +} +#endif + + +#if DRM_DMA_HISTOGRAM +static int _drm_histo_info SYSCTL_HANDLER_ARGS +{ + drm_device_t *dev = arg1; + drm_device_dma_t *dma = dev->dma; + int i; + unsigned long slot_value = DRM_DMA_HISTOGRAM_INITIAL; + unsigned long prev_value = 0; + drm_buf_t *buffer; + char buf[128]; + int error; + + DRM_SYSCTL_PRINT("general statistics:\n"); + DRM_SYSCTL_PRINT("total %10u\n", atomic_read(&dev->histo.total)); + DRM_SYSCTL_PRINT("open %10u\n", atomic_read(&dev->total_open)); + DRM_SYSCTL_PRINT("close %10u\n", atomic_read(&dev->total_close)); + DRM_SYSCTL_PRINT("ioctl %10u\n", atomic_read(&dev->total_ioctl)); + DRM_SYSCTL_PRINT("irq %10u\n", atomic_read(&dev->total_irq)); + DRM_SYSCTL_PRINT("ctx %10u\n", atomic_read(&dev->total_ctx)); + + DRM_SYSCTL_PRINT("\nlock statistics:\n"); + DRM_SYSCTL_PRINT("locks %10u\n", atomic_read(&dev->total_locks)); + DRM_SYSCTL_PRINT("unlocks %10u\n", atomic_read(&dev->total_unlocks)); + DRM_SYSCTL_PRINT("contends %10u\n", atomic_read(&dev->total_contends)); + DRM_SYSCTL_PRINT("sleeps %10u\n", atomic_read(&dev->total_sleeps)); + + + if (dma) { + DRM_SYSCTL_PRINT("\ndma statistics:\n"); + DRM_SYSCTL_PRINT("prio %10u\n", + atomic_read(&dma->total_prio)); + DRM_SYSCTL_PRINT("bytes %10u\n", + atomic_read(&dma->total_bytes)); + DRM_SYSCTL_PRINT("dmas %10u\n", + atomic_read(&dma->total_dmas)); + DRM_SYSCTL_PRINT("missed:\n"); + DRM_SYSCTL_PRINT(" dma %10u\n", + atomic_read(&dma->total_missed_dma)); + DRM_SYSCTL_PRINT(" lock %10u\n", + atomic_read(&dma->total_missed_lock)); + DRM_SYSCTL_PRINT(" free %10u\n", + atomic_read(&dma->total_missed_free)); + DRM_SYSCTL_PRINT(" sched %10u\n", + atomic_read(&dma->total_missed_sched)); + DRM_SYSCTL_PRINT("tried %10u\n", + atomic_read(&dma->total_tried)); + DRM_SYSCTL_PRINT("hit %10u\n", + atomic_read(&dma->total_hit)); + DRM_SYSCTL_PRINT("lost %10u\n", + atomic_read(&dma->total_lost)); + + buffer = dma->next_buffer; + if (buffer) { + DRM_SYSCTL_PRINT("next_buffer %7d\n", buffer->idx); + } else { + DRM_SYSCTL_PRINT("next_buffer none\n"); + } + buffer = dma->this_buffer; + if (buffer) { + DRM_SYSCTL_PRINT("this_buffer %7d\n", buffer->idx); + } else { + DRM_SYSCTL_PRINT("this_buffer none\n"); + } + } + + + DRM_SYSCTL_PRINT("\nvalues:\n"); + if (dev->lock.hw_lock) { + DRM_SYSCTL_PRINT("lock 0x%08x\n", + dev->lock.hw_lock->lock); + } else { + DRM_SYSCTL_PRINT("lock none\n"); + } + DRM_SYSCTL_PRINT("context_flag 0x%08x\n", dev->context_flag); + DRM_SYSCTL_PRINT("interrupt_flag 0x%08x\n", dev->interrupt_flag); + DRM_SYSCTL_PRINT("dma_flag 0x%08x\n", dev->dma_flag); + + DRM_SYSCTL_PRINT("queue_count %10d\n", dev->queue_count); + DRM_SYSCTL_PRINT("last_context %10d\n", dev->last_context); + DRM_SYSCTL_PRINT("last_switch %10u\n", dev->last_switch); + DRM_SYSCTL_PRINT("last_checked %10d\n", dev->last_checked); + + + DRM_SYSCTL_PRINT("\n q2d d2c c2f" + " q2c q2f dma sch" + " ctx lacq lhld\n\n"); + for (i = 0; i < DRM_DMA_HISTOGRAM_SLOTS; i++) { + DRM_SYSCTL_PRINT("%s %10lu %10u %10u %10u %10u %10u" + " %10u %10u %10u %10u %10u\n", + i == DRM_DMA_HISTOGRAM_SLOTS - 1 ? ">=" : "< ", + i == DRM_DMA_HISTOGRAM_SLOTS - 1 + ? prev_value : slot_value , + + atomic_read(&dev->histo + .queued_to_dispatched[i]), + atomic_read(&dev->histo + .dispatched_to_completed[i]), + atomic_read(&dev->histo + .completed_to_freed[i]), + + atomic_read(&dev->histo + .queued_to_completed[i]), + atomic_read(&dev->histo + .queued_to_freed[i]), + atomic_read(&dev->histo.dma[i]), + atomic_read(&dev->histo.schedule[i]), + atomic_read(&dev->histo.ctx[i]), + atomic_read(&dev->histo.lacq[i]), + atomic_read(&dev->histo.lhld[i])); + prev_value = slot_value; + slot_value = DRM_DMA_HISTOGRAM_NEXT(slot_value); + } + SYSCTL_OUT(req, "", 1); + return 0; +} + +static int drm_histo_info SYSCTL_HANDLER_ARGS +{ + drm_device_t *dev = arg1; + int ret; + + lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc); + ret = _drm_histo_info(oidp, arg1, arg2, req); + lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc); + return ret; +} +#endif diff --git a/bsd/drmP.h b/bsd/drmP.h new file mode 100644 index 00000000..b62aff08 --- /dev/null +++ b/bsd/drmP.h @@ -0,0 +1,723 @@ +/* drmP.h -- Private header for Direct Rendering Manager -*- c -*- + * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com + * Revised: Tue Oct 12 08:51:07 1999 by faith@precisioninsight.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmP.h,v 1.58 1999/08/30 13:05:00 faith Exp $ + * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/bsd/drm/kernel/drmP.h,v 1.1 2000/06/17 00:03:28 martin Exp $ + * + */ + +#ifndef _DRM_P_H_ +#define _DRM_P_H_ + +#ifdef _KERNEL +#include <sys/param.h> +#include <sys/queue.h> +#include <sys/malloc.h> +#include <sys/kernel.h> +#include <sys/module.h> +#include <sys/systm.h> +#include <sys/conf.h> +#include <sys/stat.h> +#include <sys/proc.h> +#include <sys/lock.h> +#include <sys/fcntl.h> +#include <sys/uio.h> +#include <sys/filio.h> +#include <sys/sysctl.h> +#include <sys/select.h> +#include <sys/bus.h> +#if __FreeBSD_version >= 500005 +#include <sys/taskqueue.h> +#endif + +#if __FreeBSD_version >= 500006 +#define DRM_AGP +#endif + +#ifdef DRM_AGP +#include <pci/agpvar.h> +#endif + +#include "drm.h" + +typedef u_int32_t atomic_t; +typedef u_int32_t cycles_t; +typedef u_int32_t spinlock_t; +#define atomic_set(p, v) (*(p) = (v)) +#define atomic_read(p) (*(p)) +#define atomic_inc(p) atomic_add_int(p, 1) +#define atomic_dec(p) atomic_subtract_int(p, 1) +#define atomic_add(n, p) atomic_add_int(p, n) +#define atomic_sub(n, p) atomic_subtract_int(p, n) + +/* Fake this */ +static __inline u_int32_t +test_and_set_bit(int b, volatile u_int32_t *p) +{ + int s = splhigh(); + u_int32_t m = 1<<b; + u_int32_t r = *p & m; + *p |= m; + splx(s); + return r; +} + +static __inline void +clear_bit(int b, volatile u_int32_t *p) +{ + atomic_clear_int(p + (b >> 5), 1 << (b & 0x1f)); +} + +static __inline void +set_bit(int b, volatile u_int32_t *p) +{ + atomic_set_int(p + (b >> 5), 1 << (b & 0x1f)); +} + +static __inline int +test_bit(int b, volatile u_int32_t *p) +{ + return p[b >> 5] & (1 << (b & 0x1f)); +} + +static __inline int +find_first_zero_bit(volatile u_int32_t *p, int max) +{ + int b; + + for (b = 0; b < max; b += 32) { + if (p[b >> 5] != ~0) { + for (;;) { + if ((p[b >> 5] & (1 << (b & 0x1f))) == 0) + return b; + b++; + } + } + } + return max; +} + +#define spldrm() spltty() + +#define memset(p, v, s) bzero(p, s) + +/* + * Fake out the module macros for versions of FreeBSD where they don't + * exist. + */ +#if __FreeBSD_version < 500002 + +#define MODULE_VERSION(a,b) struct __hack +#define MODULE_DEPEND(a,b,c,d,e) struct __hack + +#endif + +#define DRM_DEBUG_CODE 2 /* Include debugging code (if > 1, then + also include looping detection. */ +#define DRM_DMA_HISTOGRAM 1 /* Make histogram of DMA latency. */ + +#define DRM_HASH_SIZE 16 /* Size of key hash table */ +#define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */ +#define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */ +#define DRM_LOOPING_LIMIT 5000000 +#define DRM_BSZ 1024 /* Buffer size for /dev/drm? output */ +#define DRM_TIME_SLICE (hz/20) /* Time slice for GLXContexts */ +#define DRM_LOCK_SLICE 1 /* Time slice for lock, in jiffies */ + +#define DRM_FLAG_DEBUG 0x01 +#define DRM_FLAG_NOCTX 0x02 + +#define DRM_MEM_DMA 0 +#define DRM_MEM_SAREA 1 +#define DRM_MEM_DRIVER 2 +#define DRM_MEM_MAGIC 3 +#define DRM_MEM_IOCTLS 4 +#define DRM_MEM_MAPS 5 +#define DRM_MEM_VMAS 6 +#define DRM_MEM_BUFS 7 +#define DRM_MEM_SEGS 8 +#define DRM_MEM_PAGES 9 +#define DRM_MEM_FILES 10 +#define DRM_MEM_QUEUES 11 +#define DRM_MEM_CMDS 12 +#define DRM_MEM_MAPPINGS 13 +#define DRM_MEM_BUFLISTS 14 +#define DRM_MEM_AGPLISTS 15 +#define DRM_MEM_TOTALAGP 16 +#define DRM_MEM_BOUNDAGP 17 +#define DRM_MEM_CTXBITMAP 18 + +#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) + + /* Backward compatibility section */ +#ifndef _PAGE_PWT + /* The name of _PAGE_WT was changed to + _PAGE_PWT in Linux 2.2.6 */ +#define _PAGE_PWT _PAGE_WT +#endif + +#define __drm_dummy_lock(lock) (*(__volatile__ unsigned int *)lock) +#define _DRM_CAS(lock,old,new,__ret) \ + do { \ + int __dummy; /* Can't mark eax as clobbered */ \ + __asm__ __volatile__( \ + "lock ; cmpxchg %4,%1\n\t" \ + "setnz %0" \ + : "=d" (__ret), \ + "=m" (__drm_dummy_lock(lock)), \ + "=a" (__dummy) \ + : "2" (old), \ + "r" (new)); \ + } while (0) + + + + /* Macros to make printk easier */ +#define DRM_ERROR(fmt, arg...) \ + printf("error: " "[" DRM_NAME ":" __FUNCTION__ "] *ERROR* " fmt , ##arg) +#define DRM_MEM_ERROR(area, fmt, arg...) \ + printf("error: " "[" DRM_NAME ":" __FUNCTION__ ":%s] *ERROR* " fmt , \ + drm_mem_stats[area].name , ##arg) +#define DRM_INFO(fmt, arg...) printf("info: " "[" DRM_NAME "] " fmt , ##arg) + +#if DRM_DEBUG_CODE +#define DRM_DEBUG(fmt, arg...) \ + do { \ + if (drm_flags&DRM_FLAG_DEBUG) \ + printf("[" DRM_NAME ":" __FUNCTION__ "] " fmt , \ + ##arg); \ + } while (0) +#else +#define DRM_DEBUG(fmt, arg...) do { } while (0) +#endif + +#define DRM_PROC_LIMIT (PAGE_SIZE-80) + +#define DRM_SYSCTL_PRINT(fmt, arg...) \ + snprintf(buf, sizeof(buf), fmt, ##arg); \ + error = SYSCTL_OUT(req, buf, strlen(buf)); \ + if (error) return error; + +#define DRM_SYSCTL_PRINT_RET(ret, fmt, arg...) \ + snprintf(buf, sizeof(buf), fmt, ##arg); \ + error = SYSCTL_OUT(req, buf, strlen(buf)); \ + if (error) { ret; return error; } + + /* Internal types and structures */ +#define DRM_ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) +#define DRM_MIN(a,b) ((a)<(b)?(a):(b)) +#define DRM_MAX(a,b) ((a)>(b)?(a):(b)) + +#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1)) +#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x)) +#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist) + +typedef struct drm_ioctl_desc { + d_ioctl_t *func; + int auth_needed; + int root_only; +} drm_ioctl_desc_t; + +typedef struct drm_devstate { + pid_t owner; /* X server pid holding x_lock */ + +} drm_devstate_t; + +typedef struct drm_magic_entry { + drm_magic_t magic; + struct drm_file *priv; + struct drm_magic_entry *next; +} drm_magic_entry_t; + +typedef struct drm_magic_head { + struct drm_magic_entry *head; + struct drm_magic_entry *tail; +} drm_magic_head_t; + +typedef struct drm_vma_entry { + struct vm_area_struct *vma; + struct drm_vma_entry *next; + pid_t pid; +} drm_vma_entry_t; + +typedef struct drm_buf { + int idx; /* Index into master buflist */ + int total; /* Buffer size */ + int order; /* log-base-2(total) */ + int used; /* Amount of buffer in use (for DMA) */ + unsigned long offset; /* Byte offset (used internally) */ + void *address; /* Address of buffer */ + unsigned long bus_address; /* Bus address of buffer */ + struct drm_buf *next; /* Kernel-only: used for free list */ + __volatile__ int waiting; /* On kernel DMA queue */ + __volatile__ int pending; /* On hardware DMA queue */ + int dma_wait; /* Processes waiting */ + pid_t pid; /* PID of holding process */ + int context; /* Kernel queue for this buffer */ + int while_locked;/* Dispatch this buffer while locked */ + enum { + DRM_LIST_NONE = 0, + DRM_LIST_FREE = 1, + DRM_LIST_WAIT = 2, + DRM_LIST_PEND = 3, + DRM_LIST_PRIO = 4, + DRM_LIST_RECLAIM = 5 + } list; /* Which list we're on */ + + void *dev_private; + int dev_priv_size; + +#if DRM_DMA_HISTOGRAM + struct timespec time_queued; /* Queued to kernel DMA queue */ + struct timespec time_dispatched; /* Dispatched to hardware */ + struct timespec time_completed; /* Completed by hardware */ + struct timespec time_freed; /* Back on freelist */ +#endif +} drm_buf_t; + +#if DRM_DMA_HISTOGRAM +#define DRM_DMA_HISTOGRAM_SLOTS 9 +#define DRM_DMA_HISTOGRAM_INITIAL 10 +#define DRM_DMA_HISTOGRAM_NEXT(current) ((current)*10) +typedef struct drm_histogram { + atomic_t total; + + atomic_t queued_to_dispatched[DRM_DMA_HISTOGRAM_SLOTS]; + atomic_t dispatched_to_completed[DRM_DMA_HISTOGRAM_SLOTS]; + atomic_t completed_to_freed[DRM_DMA_HISTOGRAM_SLOTS]; + + atomic_t queued_to_completed[DRM_DMA_HISTOGRAM_SLOTS]; + atomic_t queued_to_freed[DRM_DMA_HISTOGRAM_SLOTS]; + + atomic_t dma[DRM_DMA_HISTOGRAM_SLOTS]; + atomic_t schedule[DRM_DMA_HISTOGRAM_SLOTS]; + atomic_t ctx[DRM_DMA_HISTOGRAM_SLOTS]; + atomic_t lacq[DRM_DMA_HISTOGRAM_SLOTS]; + atomic_t lhld[DRM_DMA_HISTOGRAM_SLOTS]; +} drm_histogram_t; +#endif + + /* bufs is one longer than it has to be */ +typedef struct drm_waitlist { + int count; /* Number of possible buffers */ + drm_buf_t **bufs; /* List of pointers to buffers */ + drm_buf_t **rp; /* Read pointer */ + drm_buf_t **wp; /* Write pointer */ + drm_buf_t **end; /* End pointer */ + spinlock_t read_lock; + spinlock_t write_lock; +} drm_waitlist_t; + +typedef struct drm_freelist { + int initialized; /* Freelist in use */ + atomic_t count; /* Number of free buffers */ + drm_buf_t *next; /* End pointer */ + + int waiting; /* Processes waiting on free bufs */ + int low_mark; /* Low water mark */ + int high_mark; /* High water mark */ + atomic_t wfh; /* If waiting for high mark */ +} drm_freelist_t; + +typedef struct drm_buf_entry { + int buf_size; + int buf_count; + drm_buf_t *buflist; + int seg_count; + int page_order; + unsigned long *seglist; + + drm_freelist_t freelist; +} drm_buf_entry_t; + +typedef struct drm_hw_lock { + __volatile__ unsigned int lock; + char padding[60]; /* Pad to cache line */ +} drm_hw_lock_t; + +typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t; +typedef struct drm_file { + TAILQ_ENTRY(drm_file) link; + int authenticated; + int minor; + pid_t pid; + uid_t uid; + int refs; + drm_magic_t magic; + unsigned long ioctl_count; + struct drm_device *devXX; +} drm_file_t; + + +typedef struct drm_queue { + atomic_t use_count; /* Outstanding uses (+1) */ + atomic_t finalization; /* Finalization in progress */ + atomic_t block_count; /* Count of processes waiting */ + atomic_t block_read; /* Queue blocked for reads */ + int read_queue; /* Processes waiting on block_read */ + atomic_t block_write; /* Queue blocked for writes */ + int write_queue; /* Processes waiting on block_write */ + atomic_t total_queued; /* Total queued statistic */ + atomic_t total_flushed;/* Total flushes statistic */ + atomic_t total_locks; /* Total locks statistics */ + drm_ctx_flags_t flags; /* Context preserving and 2D-only */ + drm_waitlist_t waitlist; /* Pending buffers */ + int flush_queue; /* Processes waiting until flush */ +} drm_queue_t; + +typedef struct drm_lock_data { + drm_hw_lock_t *hw_lock; /* Hardware lock */ + pid_t pid; /* PID of lock holder (0=kernel) */ + int lock_queue; /* Queue of blocked processes */ + unsigned long lock_time; /* Time of last lock in jiffies */ +} drm_lock_data_t; + +typedef struct drm_device_dma { + /* Performance Counters */ + atomic_t total_prio; /* Total DRM_DMA_PRIORITY */ + atomic_t total_bytes; /* Total bytes DMA'd */ + atomic_t total_dmas; /* Total DMA buffers dispatched */ + + atomic_t total_missed_dma; /* Missed drm_do_dma */ + atomic_t total_missed_lock; /* Missed lock in drm_do_dma */ + atomic_t total_missed_free; /* Missed drm_free_this_buffer */ + atomic_t total_missed_sched;/* Missed drm_dma_schedule */ + + atomic_t total_tried; /* Tried next_buffer */ + atomic_t total_hit; /* Sent next_buffer */ + atomic_t total_lost; /* Lost interrupt */ + + drm_buf_entry_t bufs[DRM_MAX_ORDER+1]; + int buf_count; + drm_buf_t **buflist; /* Vector of pointers info bufs */ + int seg_count; + int page_count; + vm_offset_t *pagelist; + unsigned long byte_count; + enum { + _DRM_DMA_USE_AGP = 0x01 + } flags; + + /* DMA support */ + drm_buf_t *this_buffer; /* Buffer being sent */ + drm_buf_t *next_buffer; /* Selected buffer to send */ + drm_queue_t *next_queue; /* Queue from which buffer selected*/ + int waiting; /* Processes waiting on free bufs */ +} drm_device_dma_t; + +#ifdef DRM_AGP + +typedef struct drm_agp_mem { + void *handle; + unsigned long bound; /* address */ + int pages; + struct drm_agp_mem *prev; + struct drm_agp_mem *next; +} drm_agp_mem_t; + +typedef struct drm_agp_head { + device_t agpdev; + struct agp_info info; + const char *chipset; + drm_agp_mem_t *memory; + unsigned long mode; + int enabled; + int acquired; + unsigned long base; + int agp_mtrr; +} drm_agp_head_t; + +#endif + +typedef struct drm_device { + const char *name; /* Simple driver name */ + char *unique; /* Unique identifier: e.g., busid */ + int unique_len; /* Length of unique field */ + device_t device; /* Device instance from newbus */ + dev_t devnode; /* Device number for mknod */ + char *devname; /* For /proc/interrupts */ + + int blocked; /* Blocked due to VC switch? */ + int flags; /* Flags to open(2) */ + int writable; /* Opened with FWRITE */ + struct proc_dir_entry *root; /* Root for this device's entries */ + + /* Locks */ + struct simplelock count_lock; /* For inuse, open_count, buf_use */ + struct lock dev_lock; /* For others */ + + /* Usage Counters */ + int open_count; /* Outstanding files open */ + atomic_t ioctl_count; /* Outstanding IOCTLs pending */ + atomic_t vma_count; /* Outstanding vma areas open */ + int buf_use; /* Buffers in use -- cannot alloc */ + atomic_t buf_alloc; /* Buffer allocation in progress */ + + /* Performance Counters */ + atomic_t total_open; + atomic_t total_close; + atomic_t total_ioctl; + atomic_t total_irq; /* Total interruptions */ + atomic_t total_ctx; /* Total context switches */ + + atomic_t total_locks; + atomic_t total_unlocks; + atomic_t total_contends; + atomic_t total_sleeps; + + /* Authentication */ + drm_file_list_t files; + drm_magic_head_t magiclist[DRM_HASH_SIZE]; + + /* Memory management */ + drm_map_t **maplist; /* Vector of pointers to regions */ + int map_count; /* Number of mappable regions */ + + drm_vma_entry_t *vmalist; /* List of vmas (for debugging) */ + drm_lock_data_t lock; /* Information on hardware lock */ + + /* DMA queues (contexts) */ + int queue_count; /* Number of active DMA queues */ + int queue_reserved; /* Number of reserved DMA queues */ + int queue_slots; /* Actual length of queuelist */ + drm_queue_t **queuelist; /* Vector of pointers to DMA queues */ + drm_device_dma_t *dma; /* Optional pointer for DMA support */ + + /* Context support */ + struct resource *irq; /* Interrupt used by board */ + void *irqh; /* Handle from bus_setup_intr */ + __volatile__ int context_flag; /* Context swapping flag */ + __volatile__ int interrupt_flag;/* Interruption handler flag */ + __volatile__ int dma_flag; /* DMA dispatch flag */ + struct callout timer; /* Timer for delaying ctx switch */ + int context_wait; /* Processes waiting on ctx switch */ + int last_checked; /* Last context checked for DMA */ + int last_context; /* Last current context */ + int last_switch; /* Time at last context switch */ +#if __FreeBSD_version >= 500005 + struct task task; +#endif + struct timespec ctx_start; + struct timespec lck_start; +#if DRM_DMA_HISTOGRAM + drm_histogram_t histo; +#endif + + /* Callback to X server for context switch + and for heavy-handed reset. */ + char buf[DRM_BSZ]; /* Output buffer */ + char *buf_rp; /* Read pointer */ + char *buf_wp; /* Write pointer */ + char *buf_end; /* End pointer */ + struct sigio *buf_sigio; /* Processes waiting for SIGIO */ + struct selinfo buf_sel; /* Workspace for select/poll */ + int buf_readers; /* Processes waiting to read */ + int buf_writers; /* Processes waiting to ctx switch */ + int buf_selecting; /* True if poll sleeper */ + + /* Sysctl support */ + struct drm_sysctl_info *sysctl; + +#ifdef DRM_AGP + drm_agp_head_t *agp; +#endif + u_int32_t *ctx_bitmap; + void *dev_private; +} drm_device_t; + + + /* Internal function definitions */ + + /* Misc. support (init.c) */ +extern int drm_flags; +extern void drm_parse_options(char *s); + + + /* Device support (fops.c) */ +extern drm_file_t *drm_find_file_by_proc(drm_device_t *dev, struct proc *p); +extern int drm_open_helper(dev_t kdev, int flags, int fmt, struct proc *p, + drm_device_t *dev); +extern d_close_t drm_close; +extern d_read_t drm_read; +extern d_write_t drm_write; +extern d_poll_t drm_poll; +extern int drm_fsetown(dev_t kdev, u_long cmd, caddr_t data, + int flags, struct proc *p); +extern int drm_fgetown(dev_t kdev, u_long cmd, caddr_t data, + int flags, struct proc *p); +extern int drm_write_string(drm_device_t *dev, const char *s); + +#if 0 + /* Mapping support (vm.c) */ +extern unsigned long drm_vm_nopage(struct vm_area_struct *vma, + unsigned long address, + int write_access); +extern unsigned long drm_vm_shm_nopage(struct vm_area_struct *vma, + unsigned long address, + int write_access); +extern unsigned long drm_vm_dma_nopage(struct vm_area_struct *vma, + unsigned long address, + int write_access); +extern void drm_vm_open(struct vm_area_struct *vma); +extern void drm_vm_close(struct vm_area_struct *vma); +extern int drm_mmap_dma(struct file *filp, + struct vm_area_struct *vma); +#endif +extern d_mmap_t drm_mmap; + + /* Proc support (proc.c) */ +extern int drm_sysctl_init(drm_device_t *dev); +extern int drm_sysctl_cleanup(drm_device_t *dev); + + /* Memory management support (memory.c) */ +extern void drm_mem_init(void); +extern int drm_mem_info SYSCTL_HANDLER_ARGS; +extern void *drm_alloc(size_t size, int area); +extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, + int area); +extern char *drm_strdup(const char *s, int area); +extern void drm_strfree(char *s, int area); +extern void drm_free(void *pt, size_t size, int area); +extern unsigned long drm_alloc_pages(int order, int area); +extern void drm_free_pages(unsigned long address, int order, + int area); +extern void *drm_ioremap(unsigned long offset, unsigned long size); +extern void drm_ioremapfree(void *pt, unsigned long size); + +#ifdef DRM_AGP +extern void *drm_alloc_agp(int pages, u_int32_t type); +extern int drm_free_agp(void *handle, int pages); +extern int drm_bind_agp(void *handle, unsigned int start); +extern int drm_unbind_agp(void *handle); +#endif + + /* Buffer management support (bufs.c) */ +extern int drm_order(unsigned long size); +extern d_ioctl_t drm_addmap; +extern d_ioctl_t drm_addbufs; +extern d_ioctl_t drm_infobufs; +extern d_ioctl_t drm_markbufs; +extern d_ioctl_t drm_freebufs; +extern d_ioctl_t drm_mapbufs; + + + /* Buffer list management support (lists.c) */ +extern int drm_waitlist_create(drm_waitlist_t *bl, int count); +extern int drm_waitlist_destroy(drm_waitlist_t *bl); +extern int drm_waitlist_put(drm_waitlist_t *bl, drm_buf_t *buf); +extern drm_buf_t *drm_waitlist_get(drm_waitlist_t *bl); + +extern int drm_freelist_create(drm_freelist_t *bl, int count); +extern int drm_freelist_destroy(drm_freelist_t *bl); +extern int drm_freelist_put(drm_device_t *dev, drm_freelist_t *bl, + drm_buf_t *buf); +extern drm_buf_t *drm_freelist_get(drm_freelist_t *bl, int block); + + /* DMA support (gen_dma.c) */ +extern void drm_dma_setup(drm_device_t *dev); +extern void drm_dma_takedown(drm_device_t *dev); +extern void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf); +extern void drm_reclaim_buffers(drm_device_t *dev, pid_t pid); +extern int drm_context_switch(drm_device_t *dev, int old, int new); +extern int drm_context_switch_complete(drm_device_t *dev, int new); +extern void drm_wakeup(drm_device_t *dev, drm_buf_t *buf); +extern void drm_clear_next_buffer(drm_device_t *dev); +extern int drm_select_queue(drm_device_t *dev, + void (*wrapper)(void *)); +extern int drm_dma_enqueue(drm_device_t *dev, drm_dma_t *dma); +extern int drm_dma_get_buffers(drm_device_t *dev, drm_dma_t *dma); +#if DRM_DMA_HISTOGRAM +extern int drm_histogram_slot(struct timespec *ts); +extern void drm_histogram_compute(drm_device_t *dev, drm_buf_t *buf); +#endif + + + /* Misc. IOCTL support (ioctl.c) */ +extern d_ioctl_t drm_irq_busid; +extern d_ioctl_t drm_getunique; +extern d_ioctl_t drm_setunique; + + + /* Context IOCTL support (context.c) */ +extern d_ioctl_t drm_resctx; +extern d_ioctl_t drm_addctx; +extern d_ioctl_t drm_modctx; +extern d_ioctl_t drm_getctx; +extern d_ioctl_t drm_switchctx; +extern d_ioctl_t drm_newctx; +extern d_ioctl_t drm_rmctx; + + + /* Drawable IOCTL support (drawable.c) */ +extern d_ioctl_t drm_adddraw; +extern d_ioctl_t drm_rmdraw; + + + /* Authentication IOCTL support (auth.c) */ +extern int drm_add_magic(drm_device_t *dev, drm_file_t *priv, + drm_magic_t magic); +extern int drm_remove_magic(drm_device_t *dev, drm_magic_t magic); +extern d_ioctl_t drm_getmagic; +extern d_ioctl_t drm_authmagic; + + + /* Locking IOCTL support (lock.c) */ +extern d_ioctl_t drm_block; +extern d_ioctl_t drm_unblock; +extern int drm_lock_take(__volatile__ unsigned int *lock, + unsigned int context); +extern int drm_lock_transfer(drm_device_t *dev, + __volatile__ unsigned int *lock, + unsigned int context); +extern int drm_lock_free(drm_device_t *dev, + __volatile__ unsigned int *lock, + unsigned int context); +extern d_ioctl_t drm_finish; +extern int drm_flush_unblock(drm_device_t *dev, int context, + drm_lock_flags_t flags); +extern int drm_flush_block_and_flush(drm_device_t *dev, int context, + drm_lock_flags_t flags); + + /* Context Bitmap support (ctxbitmap.c) */ +extern int drm_ctxbitmap_init(drm_device_t *dev); +extern void drm_ctxbitmap_cleanup(drm_device_t *dev); +extern int drm_ctxbitmap_next(drm_device_t *dev); +extern void drm_ctxbitmap_free(drm_device_t *dev, int ctx_handle); + +#ifdef DRM_AGP + /* AGP/GART support (agpsupport.c) */ +extern drm_agp_head_t *drm_agp_init(void); +extern d_ioctl_t drm_agp_acquire; +extern d_ioctl_t drm_agp_release; +extern d_ioctl_t drm_agp_enable; +extern d_ioctl_t drm_agp_info; +extern d_ioctl_t drm_agp_alloc; +extern d_ioctl_t drm_agp_free; +extern d_ioctl_t drm_agp_unbind; +extern d_ioctl_t drm_agp_bind; +#endif +#endif +#endif diff --git a/bsd/i810_drm.h b/bsd/i810_drm.h new file mode 100644 index 00000000..4c8e09f6 --- /dev/null +++ b/bsd/i810_drm.h @@ -0,0 +1,188 @@ +#ifndef _I810_DRM_H_ +#define _I810_DRM_H_ + +/* WARNING: These defines must be the same as what the Xserver uses. + * if you change them, you must change the defines in the Xserver. + */ + +#ifndef _I810_DEFINES_ +#define _I810_DEFINES_ + +#define I810_DMA_BUF_ORDER 12 +#define I810_DMA_BUF_SZ (1<<I810_DMA_BUF_ORDER) +#define I810_DMA_BUF_NR 256 +#define I810_NR_SAREA_CLIPRECTS 8 + +/* Each region is a minimum of 64k, and there are at most 64 of them. + */ +#define I810_NR_TEX_REGIONS 64 +#define I810_LOG_MIN_TEX_REGION_SIZE 16 +#endif + +#define I810_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */ +#define I810_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */ +#define I810_UPLOAD_CTX 0x4 +#define I810_UPLOAD_BUFFERS 0x8 +#define I810_UPLOAD_TEX0 0x10 +#define I810_UPLOAD_TEX1 0x20 +#define I810_UPLOAD_CLIPRECTS 0x40 + + +/* Indices into buf.Setup where various bits of state are mirrored per + * context and per buffer. These can be fired at the card as a unit, + * or in a piecewise fashion as required. + */ + +/* Destbuffer state + * - backbuffer linear offset and pitch -- invarient in the current dri + * - zbuffer linear offset and pitch -- also invarient + * - drawing origin in back and depth buffers. + * + * Keep the depth/back buffer state here to acommodate private buffers + * in the future. + */ +#define I810_DESTREG_DI0 0 /* CMD_OP_DESTBUFFER_INFO (2 dwords) */ +#define I810_DESTREG_DI1 1 +#define I810_DESTREG_DV0 2 /* GFX_OP_DESTBUFFER_VARS (2 dwords) */ +#define I810_DESTREG_DV1 3 +#define I810_DESTREG_DR0 4 /* GFX_OP_DRAWRECT_INFO (4 dwords) */ +#define I810_DESTREG_DR1 5 +#define I810_DESTREG_DR2 6 +#define I810_DESTREG_DR3 7 +#define I810_DESTREG_DR4 8 +#define I810_DEST_SETUP_SIZE 10 + +/* Context state + */ +#define I810_CTXREG_CF0 0 /* GFX_OP_COLOR_FACTOR */ +#define I810_CTXREG_CF1 1 +#define I810_CTXREG_ST0 2 /* GFX_OP_STIPPLE */ +#define I810_CTXREG_ST1 3 +#define I810_CTXREG_VF 4 /* GFX_OP_VERTEX_FMT */ +#define I810_CTXREG_MT 5 /* GFX_OP_MAP_TEXELS */ +#define I810_CTXREG_MC0 6 /* GFX_OP_MAP_COLOR_STAGES - stage 0 */ +#define I810_CTXREG_MC1 7 /* GFX_OP_MAP_COLOR_STAGES - stage 1 */ +#define I810_CTXREG_MC2 8 /* GFX_OP_MAP_COLOR_STAGES - stage 2 */ +#define I810_CTXREG_MA0 9 /* GFX_OP_MAP_ALPHA_STAGES - stage 0 */ +#define I810_CTXREG_MA1 10 /* GFX_OP_MAP_ALPHA_STAGES - stage 1 */ +#define I810_CTXREG_MA2 11 /* GFX_OP_MAP_ALPHA_STAGES - stage 2 */ +#define I810_CTXREG_SDM 12 /* GFX_OP_SRC_DEST_MONO */ +#define I810_CTXREG_FOG 13 /* GFX_OP_FOG_COLOR */ +#define I810_CTXREG_B1 14 /* GFX_OP_BOOL_1 */ +#define I810_CTXREG_B2 15 /* GFX_OP_BOOL_2 */ +#define I810_CTXREG_LCS 16 /* GFX_OP_LINEWIDTH_CULL_SHADE_MODE */ +#define I810_CTXREG_PV 17 /* GFX_OP_PV_RULE -- Invarient! */ +#define I810_CTXREG_ZA 18 /* GFX_OP_ZBIAS_ALPHAFUNC */ +#define I810_CTXREG_AA 19 /* GFX_OP_ANTIALIAS */ +#define I810_CTX_SETUP_SIZE 20 + +/* Texture state (per tex unit) + */ +#define I810_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (4 dwords) */ +#define I810_TEXREG_MI1 1 +#define I810_TEXREG_MI2 2 +#define I810_TEXREG_MI3 3 +#define I810_TEXREG_MF 4 /* GFX_OP_MAP_FILTER */ +#define I810_TEXREG_MLC 5 /* GFX_OP_MAP_LOD_CTL */ +#define I810_TEXREG_MLL 6 /* GFX_OP_MAP_LOD_LIMITS */ +#define I810_TEXREG_MCS 7 /* GFX_OP_MAP_COORD_SETS ??? */ +#define I810_TEX_SETUP_SIZE 8 + +#define I810_FRONT 0x1 +#define I810_BACK 0x2 +#define I810_DEPTH 0x4 + + +typedef struct _drm_i810_init { + enum { + I810_INIT_DMA = 0x01, + I810_CLEANUP_DMA = 0x02 + } func; + int ring_map_idx; + int buffer_map_idx; + int sarea_priv_offset; + unsigned int ring_start; + unsigned int ring_end; + unsigned int ring_size; + unsigned int front_offset; + unsigned int back_offset; + unsigned int depth_offset; + unsigned int w; + unsigned int h; + unsigned int pitch; + unsigned int pitch_bits; +} drm_i810_init_t; + +/* Warning: If you change the SAREA structure you must change the Xserver + * structure as well */ + +typedef struct _drm_i810_tex_region { + unsigned char next, prev; /* indices to form a circular LRU */ + unsigned char in_use; /* owned by a client, or free? */ + int age; /* tracked by clients to update local LRU's */ +} drm_i810_tex_region_t; + +typedef struct _drm_i810_sarea { + unsigned int ContextState[I810_CTX_SETUP_SIZE]; + unsigned int BufferState[I810_DEST_SETUP_SIZE]; + unsigned int TexState[2][I810_TEX_SETUP_SIZE]; + unsigned int dirty; + + unsigned int nbox; + drm_clip_rect_t boxes[I810_NR_SAREA_CLIPRECTS]; + + /* Maintain an LRU of contiguous regions of texture space. If + * you think you own a region of texture memory, and it has an + * age different to the one you set, then you are mistaken and + * it has been stolen by another client. If global texAge + * hasn't changed, there is no need to walk the list. + * + * These regions can be used as a proxy for the fine-grained + * texture information of other clients - by maintaining them + * in the same lru which is used to age their own textures, + * clients have an approximate lru for the whole of global + * texture space, and can make informed decisions as to which + * areas to kick out. There is no need to choose whether to + * kick out your own texture or someone else's - simply eject + * them all in LRU order. + */ + + drm_i810_tex_region_t texList[I810_NR_TEX_REGIONS+1]; + /* Last elt is sentinal */ + int texAge; /* last time texture was uploaded */ + int last_enqueue; /* last time a buffer was enqueued */ + int last_dispatch; /* age of the most recently dispatched buffer */ + int last_quiescent; /* */ + int ctxOwner; /* last context to upload state */ + + int vertex_prim; + +} drm_i810_sarea_t; + +typedef struct _drm_i810_clear { + int clear_color; + int clear_depth; + int flags; +} drm_i810_clear_t; + + + +/* These may be placeholders if we have more cliprects than + * I810_NR_SAREA_CLIPRECTS. In that case, the client sets discard to + * false, indicating that the buffer will be dispatched again with a + * new set of cliprects. + */ +typedef struct _drm_i810_vertex { + int idx; /* buffer index */ + int used; /* nr bytes in use */ + int discard; /* client is finished with the buffer? */ +} drm_i810_vertex_t; + +typedef struct drm_i810_dma { + void *virtual; + int request_idx; + int request_size; + int granted; +} drm_i810_dma_t; + +#endif /* _I810_DRM_H_ */ diff --git a/bsd/mga_drm.h b/bsd/mga_drm.h new file mode 100644 index 00000000..eefa28d3 --- /dev/null +++ b/bsd/mga_drm.h @@ -0,0 +1,269 @@ +/* mga_drm.h -- Public header for the Matrox g200/g400 driver + * Created: Tue Jan 25 01:50:01 1999 by jhartmann@precisioninsight.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Jeff Hartmann <jhartmann@valinux.com> + * Keith Whitwell <keithw@valinux.com> + * + */ + +#ifndef _MGA_DRM_H_ +#define _MGA_DRM_H_ + +/* WARNING: If you change any of these defines, make sure to change the + * defines in the Xserver file (xf86drmMga.h) + */ +#ifndef _MGA_DEFINES_ +#define _MGA_DEFINES_ + +#define MGA_F 0x1 /* fog */ +#define MGA_A 0x2 /* alpha */ +#define MGA_S 0x4 /* specular */ +#define MGA_T2 0x8 /* multitexture */ + +#define MGA_WARP_TGZ 0 +#define MGA_WARP_TGZF (MGA_F) +#define MGA_WARP_TGZA (MGA_A) +#define MGA_WARP_TGZAF (MGA_F|MGA_A) +#define MGA_WARP_TGZS (MGA_S) +#define MGA_WARP_TGZSF (MGA_S|MGA_F) +#define MGA_WARP_TGZSA (MGA_S|MGA_A) +#define MGA_WARP_TGZSAF (MGA_S|MGA_F|MGA_A) +#define MGA_WARP_T2GZ (MGA_T2) +#define MGA_WARP_T2GZF (MGA_T2|MGA_F) +#define MGA_WARP_T2GZA (MGA_T2|MGA_A) +#define MGA_WARP_T2GZAF (MGA_T2|MGA_A|MGA_F) +#define MGA_WARP_T2GZS (MGA_T2|MGA_S) +#define MGA_WARP_T2GZSF (MGA_T2|MGA_S|MGA_F) +#define MGA_WARP_T2GZSA (MGA_T2|MGA_S|MGA_A) +#define MGA_WARP_T2GZSAF (MGA_T2|MGA_S|MGA_F|MGA_A) + +#define MGA_MAX_G400_PIPES 16 +#define MGA_MAX_G200_PIPES 8 /* no multitex */ +#define MGA_MAX_WARP_PIPES MGA_MAX_G400_PIPES + +#define MGA_CARD_TYPE_G200 1 +#define MGA_CARD_TYPE_G400 2 + +#define MGA_FRONT 0x1 +#define MGA_BACK 0x2 +#define MGA_DEPTH 0x4 + +/* 3d state excluding texture units: + */ +#define MGA_CTXREG_DSTORG 0 /* validated */ +#define MGA_CTXREG_MACCESS 1 +#define MGA_CTXREG_PLNWT 2 +#define MGA_CTXREG_DWGCTL 3 +#define MGA_CTXREG_ALPHACTRL 4 +#define MGA_CTXREG_FOGCOLOR 5 +#define MGA_CTXREG_WFLAG 6 +#define MGA_CTXREG_TDUAL0 7 +#define MGA_CTXREG_TDUAL1 8 +#define MGA_CTXREG_FCOL 9 +#define MGA_CTX_SETUP_SIZE 10 + +/* 2d state + */ +#define MGA_2DREG_PITCH 0 +#define MGA_2D_SETUP_SIZE 1 + +/* Each texture unit has a state: + */ +#define MGA_TEXREG_CTL 0 +#define MGA_TEXREG_CTL2 1 +#define MGA_TEXREG_FILTER 2 +#define MGA_TEXREG_BORDERCOL 3 +#define MGA_TEXREG_ORG 4 /* validated */ +#define MGA_TEXREG_ORG1 5 +#define MGA_TEXREG_ORG2 6 +#define MGA_TEXREG_ORG3 7 +#define MGA_TEXREG_ORG4 8 +#define MGA_TEXREG_WIDTH 9 +#define MGA_TEXREG_HEIGHT 10 +#define MGA_TEX_SETUP_SIZE 11 + +/* What needs to be changed for the current vertex dma buffer? + */ +#define MGA_UPLOAD_CTX 0x1 +#define MGA_UPLOAD_TEX0 0x2 +#define MGA_UPLOAD_TEX1 0x4 +#define MGA_UPLOAD_PIPE 0x8 +#define MGA_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */ +#define MGA_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */ +#define MGA_UPLOAD_2D 0x40 +#define MGA_WAIT_AGE 0x80 /* handled client-side */ +#define MGA_UPLOAD_CLIPRECTS 0x100 /* handled client-side */ +#define MGA_DMA_FLUSH 0x200 /* set when someone gets the lock + quiescent */ + +/* 32 buffers of 64k each, total 2 meg. + */ +#define MGA_DMA_BUF_ORDER 16 +#define MGA_DMA_BUF_SZ (1<<MGA_DMA_BUF_ORDER) +#define MGA_DMA_BUF_NR 31 + +/* Keep these small for testing. + */ +#define MGA_NR_SAREA_CLIPRECTS 8 + +/* 2 heaps (1 for card, 1 for agp), each divided into upto 128 + * regions, subject to a minimum region size of (1<<16) == 64k. + * + * Clients may subdivide regions internally, but when sharing between + * clients, the region size is the minimum granularity. + */ + +#define MGA_CARD_HEAP 0 +#define MGA_AGP_HEAP 1 +#define MGA_NR_TEX_HEAPS 2 +#define MGA_NR_TEX_REGIONS 16 +#define MGA_LOG_MIN_TEX_REGION_SIZE 16 +#endif + +typedef struct _drm_mga_warp_index { + int installed; + unsigned long phys_addr; + int size; +} drm_mga_warp_index_t; + +typedef struct drm_mga_init { + enum { + MGA_INIT_DMA = 0x01, + MGA_CLEANUP_DMA = 0x02 + } func; + int reserved_map_agpstart; + int reserved_map_idx; + int buffer_map_idx; + int sarea_priv_offset; + int primary_size; + int warp_ucode_size; + unsigned int frontOffset; + unsigned int backOffset; + unsigned int depthOffset; + unsigned int textureOffset; + unsigned int textureSize; + unsigned int agpTextureOffset; + unsigned int agpTextureSize; + unsigned int cpp; + unsigned int stride; + int sgram; + int chipset; + drm_mga_warp_index_t WarpIndex[MGA_MAX_WARP_PIPES]; + unsigned int mAccess; +} drm_mga_init_t; + +/* Warning: if you change the sarea structure, you must change the Xserver + * structures as well */ + +typedef struct _drm_mga_tex_region { + unsigned char next, prev; + unsigned char in_use; + unsigned int age; +} drm_mga_tex_region_t; + +typedef struct _drm_mga_sarea { + /* The channel for communication of state information to the kernel + * on firing a vertex dma buffer. + */ + unsigned int ContextState[MGA_CTX_SETUP_SIZE]; + unsigned int ServerState[MGA_2D_SETUP_SIZE]; + unsigned int TexState[2][MGA_TEX_SETUP_SIZE]; + unsigned int WarpPipe; + unsigned int dirty; + + unsigned int nbox; + drm_clip_rect_t boxes[MGA_NR_SAREA_CLIPRECTS]; + + + /* Information about the most recently used 3d drawable. The + * client fills in the req_* fields, the server fills in the + * exported_ fields and puts the cliprects into boxes, above. + * + * The client clears the exported_drawable field before + * clobbering the boxes data. + */ + unsigned int req_drawable; /* the X drawable id */ + unsigned int req_draw_buffer; /* MGA_FRONT or MGA_BACK */ + + unsigned int exported_drawable; + unsigned int exported_index; + unsigned int exported_stamp; + unsigned int exported_buffers; + unsigned int exported_nfront; + unsigned int exported_nback; + int exported_back_x, exported_front_x, exported_w; + int exported_back_y, exported_front_y, exported_h; + drm_clip_rect_t exported_boxes[MGA_NR_SAREA_CLIPRECTS]; + + /* Counters for aging textures and for client-side throttling. + */ + unsigned int last_enqueue; /* last time a buffer was enqueued */ + unsigned int last_dispatch; /* age of the most recently dispatched buffer */ + unsigned int last_quiescent; /* */ + + + /* LRU lists for texture memory in agp space and on the card + */ + drm_mga_tex_region_t texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS+1]; + unsigned int texAge[MGA_NR_TEX_HEAPS]; + + /* Mechanism to validate card state. + */ + int ctxOwner; +} drm_mga_sarea_t; + +/* Device specific ioctls: + */ +typedef struct _drm_mga_clear { + unsigned int clear_color; + unsigned int clear_depth; + unsigned int flags; +} drm_mga_clear_t; + +typedef struct _drm_mga_swap { + int dummy; +} drm_mga_swap_t; + +typedef struct _drm_mga_iload { + int idx; + int length; + unsigned int destOrg; +} drm_mga_iload_t; + +typedef struct _drm_mga_vertex { + int idx; /* buffer to queue */ + int used; /* bytes in use */ + int discard; /* client finished with buffer? */ +} drm_mga_vertex_t; + +typedef struct _drm_mga_indices { + int idx; /* buffer to queue */ + unsigned int start; + unsigned int end; + int discard; /* client finished with buffer? */ +} drm_mga_indices_t; + +#endif diff --git a/bsd/r128_drm.h b/bsd/r128_drm.h new file mode 100644 index 00000000..bff103c2 --- /dev/null +++ b/bsd/r128_drm.h @@ -0,0 +1,111 @@ +/* r128_drm.h -- Public header for the r128 driver -*- linux-c -*- + * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com + * + * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Kevin E. Martin <kevin@precisioninsight.com> + * + * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/bsd/drm/kernel/r128_drm.h,v 1.1 2000/06/17 00:03:29 martin Exp $ + */ + +#ifndef _R128_DRM_H_ +#define _R128_DRM_H_ + +/* WARNING: If you change any of these defines, make sure to change the + * defines in the Xserver file (xf86drmR128.h) + */ +typedef struct drm_r128_init { + enum { + R128_INIT_CCE = 0x01, + R128_CLEANUP_CCE = 0x02 + } func; + int sarea_priv_offset; + int is_pci; + int cce_mode; + int cce_fifo_size; + int cce_secure; + int ring_size; + int usec_timeout; + + int fb_offset; + int agp_ring_offset; + int agp_read_ptr_offset; + int agp_vertbufs_offset; + int agp_indbufs_offset; + int agp_textures_offset; + int mmio_offset; +} drm_r128_init_t; + +typedef struct drm_r128_packet { + unsigned long *buffer; + int count; + int flags; +} drm_r128_packet_t; + +typedef enum drm_r128_prim { + _DRM_R128_PRIM_NONE = 0x0001, + _DRM_R128_PRIM_POINT = 0x0002, + _DRM_R128_PRIM_LINE = 0x0004, + _DRM_R128_PRIM_POLY_LINE = 0x0008, + _DRM_R128_PRIM_TRI_LIST = 0x0010, + _DRM_R128_PRIM_TRI_FAN = 0x0020, + _DRM_R128_PRIM_TRI_STRIP = 0x0040, + _DRM_R128_PRIM_TRI_TYPE2 = 0x0080 +} drm_r128_prim_t; + +typedef struct drm_r128_vertex { + /* Indices here refer to the offset into + buflist in drm_buf_get_t. */ + int send_count; /* Number of buffers to send */ + int *send_indices; /* List of handles to buffers */ + int *send_sizes; /* Lengths of data to send */ + drm_r128_prim_t prim; /* Primitive type */ + int request_count; /* Number of buffers requested */ + int *request_indices; /* Buffer information */ + int *request_sizes; + int granted_count; /* Number of buffers granted */ +} drm_r128_vertex_t; + +/* WARNING: If you change any of these defines, make sure to change the + * defines in the Xserver file (r128_sarea.h) + */ +#define R128_LOCAL_TEX_HEAP 0 +#define R128_AGP_TEX_HEAP 1 +#define R128_NR_TEX_HEAPS 2 +#define R128_NR_TEX_REGIONS 64 +#define R128_LOG_TEX_GRANULARITY 16 + +typedef struct drm_tex_region { + unsigned char next, prev; + unsigned char in_use; + int age; +} drm_tex_region_t; + +typedef struct drm_r128_sarea { + drm_tex_region_t tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS+1]; + int tex_age[R128_NR_TEX_HEAPS]; + int ctx_owner; + int ring_write; +} drm_r128_sarea_t; + +#endif diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index 3b0f98ac..e4bc53bc 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -1,8 +1,8 @@ /* xf86drm.c -- User-level interface to DRM device * Created: Tue Jan 5 08:16:21 1999 by faith@precisioninsight.com - * Revised: Sun Feb 13 23:43:32 2000 by kevin@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,7 +24,10 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/xf86drm.c,v 1.10 2000/02/23 04:47:23 martin Exp $ + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> + * Kevin E. Martin <martin@valinux.com> + * + * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/xf86drm.c,v 1.14 2000/06/27 16:42:07 alanh Exp $ * */ @@ -65,15 +68,35 @@ extern int xf86RemoveSIGIOHandler(int fd); # endif #endif +#ifdef __alpha__ +extern unsigned long _bus_base(void); +#define BUS_BASE _bus_base() +#else +#define BUS_BASE (0) +#endif + /* Not all systems have MAP_FAILED defined */ #ifndef MAP_FAILED #define MAP_FAILED ((void *)-1) #endif -#include <sys/sysmacros.h> /* for makedev() */ #include "xf86drm.h" #include "drm.h" +#define DRM_FIXED_DEVICE_MAJOR 145 + +#ifdef __linux__ +#include <sys/sysmacros.h> /* for makedev() */ +#endif + +#ifndef makedev + /* This definition needs to be changed on + some systems if dev_t is a structure. + If there is a header file we can get it + from, there would be best. */ +#define makedev(x,y) ((dev_t)(((x) << 8) | (y))) +#endif + static void *drmHashTable = NULL; /* Context switch callbacks */ typedef struct drmHashEntry { @@ -94,9 +117,16 @@ void drmFree(void *pt) if (pt) _DRM_FREE(pt); } +/* drmStrdup can't use strdup(3), since it doesn't call _DRM_MALLOC... */ static char *drmStrdup(const char *s) { - return s ? strdup(s) : NULL; + char *retval = NULL; + + if (s) { + retval = _DRM_MALLOC(strlen(s)+1); + strcpy(retval, s); + } + return retval; } @@ -133,7 +163,7 @@ static drmHashEntry *drmGetEntry(int fd) return entry; } -/* drm_open is used to open the /dev/drm device */ +/* drm_open is used to open the /dev/dri device */ static int drm_open(const char *file) { @@ -143,14 +173,6 @@ static int drm_open(const char *file) return -errno; } -/* drmAvailable looks for /proc/drm, and returns 1 if it is present. */ - -int drmAvailable(void) -{ - if (!access("/proc/dri/0", R_OK)) return 1; - return 0; -} - static int drmOpenDevice(const char *path, long dev, mode_t mode, uid_t user, gid_t group) { @@ -160,7 +182,16 @@ static int drmOpenDevice(const char *path, long dev, struct stat st; #endif - if (!stat(path, &st) && st.st_rdev == dev) return drm_open(path); + /* Fiddle mode to remove execute bits */ + mode &= ~(S_IXUSR|S_IXGRP|S_IXOTH); + + if (!stat(path, &st) && st.st_rdev == dev) { + if (!geteuid()) { + chown(path, user, group); + chmod(path, mode); + } + return drm_open(path); + } if (geteuid()) return DRM_ERR_NOT_ROOT; remove(path); @@ -173,6 +204,38 @@ static int drmOpenDevice(const char *path, long dev, return drm_open(path); } +/* drmAvailable looks for /proc/dri, and returns 1 if it is present. On + OSs that do not have a Linux-like /proc, this information will not be + available, and we'll have to create a device and check if the driver is + loaded that way. */ + +int drmAvailable(void) +{ + char dev_name[64]; + drmVersionPtr version; + int retval = 0; + int fd; + + if (!access("/proc/dri/0", R_OK)) return 1; + + sprintf(dev_name, "/dev/dri-temp-%d", getpid()); + + remove(dev_name); + if ((fd = drmOpenDevice(dev_name, makedev(DRM_FIXED_DEVICE_MAJOR, 0), + S_IRUSR, geteuid(), getegid())) >= 0) { + /* Read version to make sure this is + actually a DRI device. */ + if ((version = drmGetVersion(fd))) { + retval = 1; + drmFreeVersion(version); + } + close(fd); + } + remove(dev_name); + + return retval; +} + static int drmOpenByBusid(const char *busid) { int i; @@ -214,7 +277,21 @@ static int drmOpenByName(const char *name) #if defined(XFree86Server) mode = xf86ConfigDRI.mode ? xf86ConfigDRI.mode : DRM_DEV_MODE; - group = xf86ConfigDRI.group ? xf86ConfigDRI.group : DRM_DEV_GID; + group = (xf86ConfigDRI.group >= 0) ? xf86ConfigDRI.group : DRM_DEV_GID; +#endif + +#if defined(XFree86Server) + if (!drmAvailable()) { + /* try to load the kernel module now */ + if (!xf86LoadKernelModule(name)) { + ErrorF("[drm] failed to load kernel module \"%s\"\n", + name); + return -1; + } + } +#else + if (!drmAvailable()) + return -1; #endif if (!geteuid()) { @@ -253,7 +330,24 @@ static int drmOpenByName(const char *name) } } } - } else remove(dev_name); + } else { + drmVersionPtr version; + /* /proc/dri not available, possibly + because we aren't on a Linux system. + So, try to create the next device and + see if it's active. */ + dev = makedev(DRM_FIXED_DEVICE_MAJOR, i); + if ((fd = drmOpenDevice(dev_name, dev, mode, user, group))) { + if ((version = drmGetVersion(fd))) { + if (!strcmp(version->name, name)) { + drmFreeVersion(version); + return fd; + } + drmFreeVersion(version); + } + } + remove(dev_name); + } } return -1; } @@ -288,7 +382,7 @@ static void drmFreeKernelVersion(drm_version_t *v) drmFree(v); } -static void drmCopyVersion(drmVersionPtr d, drm_version_t *s) +static void drmCopyVersion(drmVersionPtr d, const drm_version_t *s) { d->version_major = s->version_major; d->version_minor = s->version_minor; @@ -302,7 +396,7 @@ static void drmCopyVersion(drmVersionPtr d, drm_version_t *s) } /* drmVersion obtains the version information via an ioctl. Similar - * information is available via /proc/drm. */ + * information is available via /proc/dri. */ drmVersionPtr drmGetVersion(int fd) { @@ -409,6 +503,10 @@ int drmAddMap(int fd, drm_map_t map; map.offset = offset; +#ifdef __alpha__ + if (!(type & DRM_SHM)) + map.offset += BUS_BASE; +#endif map.size = size; map.handle = 0; map.type = type; @@ -418,7 +516,8 @@ int drmAddMap(int fd, return 0; } -int drmAddBufs(int fd, int count, int size, int flags) +int drmAddBufs(int fd, int count, int size, drmBufDescFlags flags, + int agp_offset) { drm_buf_desc_t request; @@ -427,6 +526,8 @@ int drmAddBufs(int fd, int count, int size, int flags) request.low_mark = 0; request.high_mark = 0; request.flags = flags; + request.agp_start = agp_offset; + if (ioctl(fd, DRM_IOCTL_ADD_BUFS, &request)) return -errno; return request.count; } @@ -744,6 +845,143 @@ int drmDestroyDrawable(int fd, drmDrawable handle) return 0; } +int drmAgpAcquire(int fd) +{ + if (ioctl(fd, DRM_IOCTL_AGP_ACQUIRE, NULL)) return -errno; + return 0; +} + +int drmAgpRelease(int fd) +{ + if (ioctl(fd, DRM_IOCTL_AGP_RELEASE, NULL)) return -errno; + return 0; +} + +int drmAgpEnable(int fd, unsigned long mode) +{ + drm_agp_mode_t m; + + m.mode = mode; + if (ioctl(fd, DRM_IOCTL_AGP_ENABLE, &m)) return -errno; + return 0; +} + +int drmAgpAlloc(int fd, unsigned long size, unsigned long type, + unsigned long *address, unsigned long *handle) +{ + drm_agp_buffer_t b; + *handle = 0; + b.size = size; + b.handle = 0; + b.type = type; + if (ioctl(fd, DRM_IOCTL_AGP_ALLOC, &b)) return -errno; + if (address != 0UL) *address = b.physical; + *handle = b.handle; + return 0; +} + +int drmAgpFree(int fd, unsigned long handle) +{ + drm_agp_buffer_t b; + + b.size = 0; + b.handle = handle; + if (ioctl(fd, DRM_IOCTL_AGP_FREE, &b)) return -errno; + return 0; +} + +int drmAgpBind(int fd, unsigned long handle, unsigned long offset) +{ + drm_agp_binding_t b; + + b.handle = handle; + b.offset = offset; + if (ioctl(fd, DRM_IOCTL_AGP_BIND, &b)) return -errno; + return 0; +} + +int drmAgpUnbind(int fd, unsigned long handle) +{ + drm_agp_binding_t b; + + b.handle = handle; + b.offset = 0; + if (ioctl(fd, DRM_IOCTL_AGP_UNBIND, &b)) return -errno; + return 0; +} + +int drmAgpVersionMajor(int fd) +{ + drm_agp_info_t i; + + if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return -errno; + return i.agp_version_major; +} + +int drmAgpVersionMinor(int fd) +{ + drm_agp_info_t i; + + if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return -errno; + return i.agp_version_minor; +} + +unsigned long drmAgpGetMode(int fd) +{ + drm_agp_info_t i; + + if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0; + return i.mode; +} + +unsigned long drmAgpBase(int fd) +{ + drm_agp_info_t i; + + if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0; + return i.aperture_base; +} + +unsigned long drmAgpSize(int fd) +{ + drm_agp_info_t i; + + if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0; + return i.aperture_size; +} + +unsigned long drmAgpMemoryUsed(int fd) +{ + drm_agp_info_t i; + + if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0; + return i.memory_used; +} + +unsigned long drmAgpMemoryAvail(int fd) +{ + drm_agp_info_t i; + + if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0; + return i.memory_allowed; +} + +unsigned int drmAgpVendorId(int fd) +{ + drm_agp_info_t i; + + if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0; + return i.id_vendor; +} + +unsigned int drmAgpDeviceId(int fd) +{ + drm_agp_info_t i; + + if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0; + return i.id_device; +} + int drmError(int err, const char *label) { switch (err) { diff --git a/libdrm/xf86drmHash.c b/libdrm/xf86drmHash.c index c08a5f4c..24b698cc 100644 --- a/libdrm/xf86drmHash.c +++ b/libdrm/xf86drmHash.c @@ -1,6 +1,5 @@ /* xf86drmHash.c -- Small hash table support for integer -> integer mapping * Created: Sun Apr 18 09:35:45 1999 by faith@precisioninsight.com - * Revised: Thu Jun 3 16:11:06 1999 by faith@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * All Rights Reserved. @@ -24,7 +23,9 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/xf86drmHash.c,v 1.2 2000/02/23 04:47:23 martin Exp $ + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> + * + * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/xf86drmHash.c,v 1.3 2000/06/17 00:03:34 martin Exp $ * * DESCRIPTION * diff --git a/libdrm/xf86drmRandom.c b/libdrm/xf86drmRandom.c index f42698c4..9e1e9ee2 100644 --- a/libdrm/xf86drmRandom.c +++ b/libdrm/xf86drmRandom.c @@ -1,6 +1,5 @@ /* xf86drmRandom.c -- "Minimal Standard" PRNG Implementation * Created: Mon Apr 19 08:28:13 1999 by faith@precisioninsight.com - * Revised: Thu Jun 24 14:53:45 1999 by faith@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * All Rights Reserved. @@ -24,7 +23,9 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/xf86drmRandom.c,v 1.3 2000/02/23 04:47:23 martin Exp $ + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> + * + * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/xf86drmRandom.c,v 1.4 2000/06/17 00:03:34 martin Exp $ * * DESCRIPTION * diff --git a/libdrm/xf86drmSL.c b/libdrm/xf86drmSL.c index 56059384..dd634c30 100644 --- a/libdrm/xf86drmSL.c +++ b/libdrm/xf86drmSL.c @@ -1,6 +1,5 @@ /* xf86drmSL.c -- Skip list support * Created: Mon May 10 09:28:13 1999 by faith@precisioninsight.com - * Revised: Thu Jun 3 16:13:01 1999 by faith@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * All Rights Reserved. @@ -24,7 +23,9 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/xf86drmSL.c,v 1.2 2000/02/23 04:47:24 martin Exp $ + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> + * + * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/xf86drmSL.c,v 1.3 2000/06/17 00:03:34 martin Exp $ * * DESCRIPTION * diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 44d75c48..e262213a 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -9,12 +9,13 @@ # Note 2! The CFLAGS definitions are now inherited from the # parent makes.. # -# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/Makefile.kernel,v 1.5 2000/02/14 06:27:25 martin Exp $ +# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/Makefile.kernel,v 1.6 2000/06/17 00:03:34 martin Exp $ L_TARGET := libdrm.a L_OBJS := init.o memory.o proc.o auth.o context.o drawable.o bufs.o \ - lists.o lock.o ioctl.o fops.o vm.o dma.o + lists.o lock.o ioctl.o fops.o vm.o dma.o ctxbitmap.o \ + agpsupport.o M_OBJS := @@ -26,6 +27,14 @@ ifdef CONFIG_DRM_TDFX M_OBJS += tdfx.o endif +ifdef CONFIG_DRM_MGA +M_OBJS += mga.o +endif + +ifdef CONFIG_DRM_R128 +M_OBJS += r128.o +endif + include $(TOPDIR)/Rules.make gamma.o: gamma_drv.o gamma_dma.o $(L_TARGET) @@ -33,3 +42,12 @@ gamma.o: gamma_drv.o gamma_dma.o $(L_TARGET) tdfx.o: tdfx_drv.o tdfx_context.o $(L_TARGET) $(LD) $(LD_RFLAG) -r -o $@ tdfx_drv.o tdfx_context.o -L. -ldrm + +i810.o: i810_drv.o i810_context.o $(L_TARGET) + $(LD) $(LD_RFLAG) -r -o $@ i810_drv.o i810_bufs.o i810_dma.o i810_context.o -L. -ldrm + +mga.o: mga_drv.o mga_context.o mga_dma.o mga_bufs.o $(L_TARGET) + $(LD) $(LD_RFLAG) -r -o $@ mga_drv.o mga_bufs.o mga_dma.o mga_context.o mga_state.o -L. -ldrm + +r128.o: r128_drv.o r128_context.o $(L_TARGET) + $(LD) $(LD_RFLAG) -r -o $@ r128_drv.o r128_context.o -L. -ldrm diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 312fba36..9ad83bde 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1,8 +1,8 @@ /* drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*- * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com - * Revised: Sun Feb 13 23:34:30 2000 by kevin@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,7 +24,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmP.h,v 1.6 2000/02/23 04:47:27 martin Exp $ + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> * */ @@ -32,6 +33,7 @@ #define _DRM_P_H_ #ifdef __KERNEL__ +#include <linux/config.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/miscdevice.h> @@ -48,8 +50,12 @@ #ifdef CONFIG_MTRR #include <asm/mtrr.h> #endif +#ifdef DRM_AGP +#include <linux/types.h> +#include <linux/agp_backend.h> +#endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,1,0) -#include <asm/spinlock.h> +#include <linux/tqueue.h> #include <linux/poll.h> #endif #include "drm.h" @@ -69,21 +75,27 @@ #define DRM_FLAG_DEBUG 0x01 #define DRM_FLAG_NOCTX 0x02 -#define DRM_MEM_DMA 0 -#define DRM_MEM_SAREA 1 -#define DRM_MEM_DRIVER 2 -#define DRM_MEM_MAGIC 3 -#define DRM_MEM_IOCTLS 4 -#define DRM_MEM_MAPS 5 -#define DRM_MEM_VMAS 6 -#define DRM_MEM_BUFS 7 -#define DRM_MEM_SEGS 8 -#define DRM_MEM_PAGES 9 -#define DRM_MEM_FILES 10 -#define DRM_MEM_QUEUES 11 -#define DRM_MEM_CMDS 12 -#define DRM_MEM_MAPPINGS 13 -#define DRM_MEM_BUFLISTS 14 +#define DRM_MEM_DMA 0 +#define DRM_MEM_SAREA 1 +#define DRM_MEM_DRIVER 2 +#define DRM_MEM_MAGIC 3 +#define DRM_MEM_IOCTLS 4 +#define DRM_MEM_MAPS 5 +#define DRM_MEM_VMAS 6 +#define DRM_MEM_BUFS 7 +#define DRM_MEM_SEGS 8 +#define DRM_MEM_PAGES 9 +#define DRM_MEM_FILES 10 +#define DRM_MEM_QUEUES 11 +#define DRM_MEM_CMDS 12 +#define DRM_MEM_MAPPINGS 13 +#define DRM_MEM_BUFLISTS 14 +#define DRM_MEM_AGPLISTS 15 +#define DRM_MEM_TOTALAGP 16 +#define DRM_MEM_BOUNDAGP 17 +#define DRM_MEM_CTXBITMAP 18 + +#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) /* Backward compatibility section */ /* _PAGE_WT changed to _PAGE_PWT in 2.2.6 */ @@ -118,7 +130,6 @@ typedef struct wait_queue *wait_queue_head_t; #endif /* Generic cmpxchg added in 2.3.x */ -#if CPU != 386 #ifndef __HAVE_ARCH_CMPXCHG /* Include this here so that driver can be used with older kernels. */ @@ -153,10 +164,6 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o), \ (unsigned long)(n),sizeof(*(ptr)))) #endif -#else - /* Compiling for a 386 proper... */ -#error DRI not supported on Intel 80386 -#endif /* Macros to make printk easier */ #define DRM_ERROR(fmt, arg...) \ @@ -218,8 +225,8 @@ typedef struct drm_magic_entry { } drm_magic_entry_t; typedef struct drm_magic_head { - struct drm_magic_entry *head; - struct drm_magic_entry *tail; + struct drm_magic_entry *head; + struct drm_magic_entry *tail; } drm_magic_head_t; typedef struct drm_vma_entry { @@ -235,6 +242,7 @@ typedef struct drm_buf { int used; /* Amount of buffer in use (for DMA) */ unsigned long offset; /* Byte offset (used internally) */ void *address; /* Address of buffer */ + unsigned long bus_address; /* Bus address of buffer */ struct drm_buf *next; /* Kernel-only: used for free list */ __volatile__ int waiting; /* On kernel DMA queue */ __volatile__ int pending; /* On hardware DMA queue */ @@ -250,12 +258,16 @@ typedef struct drm_buf { DRM_LIST_PRIO = 4, DRM_LIST_RECLAIM = 5 } list; /* Which list we're on */ + #if DRM_DMA_HISTOGRAM cycles_t time_queued; /* Queued to kernel DMA queue */ cycles_t time_dispatched; /* Dispatched to hardware */ cycles_t time_completed; /* Completed by hardware */ cycles_t time_freed; /* Back on freelist */ #endif + + int dev_priv_size; /* Size of buffer private stoarge */ + void *dev_private; /* Per-buffer private storage */ } drm_buf_t; #if DRM_DMA_HISTOGRAM @@ -376,6 +388,9 @@ typedef struct drm_device_dma { int page_count; unsigned long *pagelist; unsigned long byte_count; + enum { + _DRM_DMA_USE_AGP = 0x01 + } flags; /* DMA support */ drm_buf_t *this_buffer; /* Buffer being sent */ @@ -384,6 +399,41 @@ typedef struct drm_device_dma { wait_queue_head_t waiting; /* Processes waiting on free bufs */ } drm_device_dma_t; +#ifdef DRM_AGP +typedef struct drm_agp_mem { + unsigned long handle; + agp_memory *memory; + unsigned long bound; /* address */ + int pages; + struct drm_agp_mem *prev; + struct drm_agp_mem *next; +} drm_agp_mem_t; + +typedef struct drm_agp_head { + agp_kern_info agp_info; + const char *chipset; + drm_agp_mem_t *memory; + unsigned long mode; + int enabled; + int acquired; + unsigned long base; + int agp_mtrr; +} drm_agp_head_t; + +typedef struct { + void (*free_memory)(agp_memory *); + agp_memory *(*allocate_memory)(size_t, u32); + int (*bind_memory)(agp_memory *, off_t); + int (*unbind_memory)(agp_memory *); + void (*enable)(u32); + int (*acquire)(void); + void (*release)(void); + void (*copy_info)(agp_kern_info *); +} drm_agp_func_t; + +extern drm_agp_func_t drm_agp; +#endif + typedef struct drm_device { const char *name; /* Simple driver name */ char *unique; /* Unique identifier: e.g., busid */ @@ -462,6 +512,12 @@ typedef struct drm_device { struct fasync_struct *buf_async;/* Processes waiting for SIGIO */ wait_queue_head_t buf_readers; /* Processes waiting to read */ wait_queue_head_t buf_writers; /* Processes waiting to ctx switch */ + +#ifdef DRM_AGP + drm_agp_head_t *agp; +#endif + unsigned long *ctx_bitmap; + void *dev_private; } drm_device_t; @@ -470,6 +526,7 @@ typedef struct drm_device { /* Misc. support (init.c) */ extern int drm_flags; extern void drm_parse_options(char *s); +extern int drm_cpu_valid(void); /* Device support (fops.c) */ @@ -533,6 +590,14 @@ extern void drm_free_pages(unsigned long address, int order, extern void *drm_ioremap(unsigned long offset, unsigned long size); extern void drm_ioremapfree(void *pt, unsigned long size); +#ifdef DRM_AGP +extern agp_memory *drm_alloc_agp(int pages, u32 type); +extern int drm_free_agp(agp_memory *handle, int pages); +extern int drm_bind_agp(agp_memory *handle, unsigned int start); +extern int drm_unbind_agp(agp_memory *handle); +#endif + + /* Buffer management support (bufs.c) */ extern int drm_order(unsigned long size); extern int drm_addmap(struct inode *inode, struct file *filp, @@ -642,5 +707,32 @@ extern int drm_flush_unblock(drm_device_t *dev, int context, drm_lock_flags_t flags); extern int drm_flush_block_and_flush(drm_device_t *dev, int context, drm_lock_flags_t flags); + + /* Context Bitmap support (ctxbitmap.c) */ +extern int drm_ctxbitmap_init(drm_device_t *dev); +extern void drm_ctxbitmap_cleanup(drm_device_t *dev); +extern int drm_ctxbitmap_next(drm_device_t *dev); +extern void drm_ctxbitmap_free(drm_device_t *dev, int ctx_handle); + +#ifdef DRM_AGP + /* AGP/GART support (agpsupport.c) */ +extern drm_agp_head_t *drm_agp_init(void); +extern int drm_agp_acquire(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_agp_release(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_agp_enable(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_agp_info(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_agp_alloc(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_agp_free(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_agp_unbind(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_agp_bind(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +#endif #endif #endif diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c index 09959b65..96b7a44f 100644 --- a/linux-core/i810_dma.c +++ b/linux-core/i810_dma.c @@ -2,6 +2,7 @@ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -23,10 +24,9 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com> - * Jeff Hartmann <jhartmann@precisioninsight.com> - * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/i810_dma.c,v 1.1 2000/02/11 17:26:04 dawes Exp $ + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> + * Jeff Hartmann <jhartmann@valinux.com> + * Keith Whitwell <keithw@valinux.com> * */ @@ -36,6 +36,18 @@ #include <linux/interrupt.h> /* For task queue support */ +/* in case we don't have a 2.3.99-pre6 kernel or later: */ +#ifndef VM_DONTCOPY +#define VM_DONTCOPY 0 +#endif + +#define I810_BUF_FREE 2 +#define I810_BUF_CLIENT 1 +#define I810_BUF_HARDWARE 0 + +#define I810_BUF_UNMAPPED 0 +#define I810_BUF_MAPPED 1 + #define I810_REG(reg) 2 #define I810_BASE(reg) ((unsigned long) \ dev->maplist[I810_REG(reg)]->handle) @@ -43,544 +55,835 @@ #define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg) #define I810_READ(reg) I810_DEREF(reg) #define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0) - -void i810_dma_init(drm_device_t *dev) +#define I810_DEREF16(reg) *(__volatile__ u16 *)I810_ADDR(reg) +#define I810_READ16(reg) I810_DEREF16(reg) +#define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0) + +#define RING_LOCALS unsigned int outring, ringmask; volatile char *virt; + +#define BEGIN_LP_RING(n) do { \ + if (I810_VERBOSE) \ + DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", \ + n, __FUNCTION__); \ + if (dev_priv->ring.space < n*4) \ + i810_wait_ring(dev, n*4); \ + dev_priv->ring.space -= n*4; \ + outring = dev_priv->ring.tail; \ + ringmask = dev_priv->ring.tail_mask; \ + virt = dev_priv->ring.virtual_start; \ +} while (0) + +#define ADVANCE_LP_RING() do { \ + if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \ + dev_priv->ring.tail = outring; \ + I810_WRITE(LP_RING + RING_TAIL, outring); \ +} while(0) + +#define OUT_RING(n) do { \ + if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ + *(volatile unsigned int *)(virt + outring) = n; \ + outring += 4; \ + outring &= ringmask; \ +} while (0); + +static inline void i810_print_status_page(drm_device_t *dev) { - printk(KERN_INFO "i810_dma_init\n"); + drm_device_dma_t *dma = dev->dma; + drm_i810_private_t *dev_priv = dev->dev_private; + u32 *temp = (u32 *)dev_priv->hw_status_page; + int i; + + DRM_DEBUG( "hw_status: Interrupt Status : %x\n", temp[0]); + DRM_DEBUG( "hw_status: LpRing Head ptr : %x\n", temp[1]); + DRM_DEBUG( "hw_status: IRing Head ptr : %x\n", temp[2]); + DRM_DEBUG( "hw_status: Reserved : %x\n", temp[3]); + DRM_DEBUG( "hw_status: Driver Counter : %d\n", temp[5]); + for(i = 6; i < dma->buf_count + 6; i++) { + DRM_DEBUG( "buffer status idx : %d used: %d\n", i - 6, temp[i]); + } } -void i810_dma_cleanup(drm_device_t *dev) +static drm_buf_t *i810_freelist_get(drm_device_t *dev) { - printk(KERN_INFO "i810_dma_cleanup\n"); + drm_device_dma_t *dma = dev->dma; + int i; + int used; + + /* Linear search might not be the best solution */ + + for (i = 0; i < dma->buf_count; i++) { + drm_buf_t *buf = dma->buflist[ i ]; + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + /* In use is already a pointer */ + used = cmpxchg(buf_priv->in_use, I810_BUF_FREE, + I810_BUF_CLIENT); + if(used == I810_BUF_FREE) { + return buf; + } + } + return NULL; } -static inline void i810_dma_dispatch(drm_device_t *dev, unsigned long address, - unsigned long length) +/* This should only be called if the buffer is not sent to the hardware + * yet, the hardware updates in use for us once its on the ring buffer. + */ + +static int i810_freelist_put(drm_device_t *dev, drm_buf_t *buf) { - printk(KERN_INFO "i810_dma_dispatch\n"); + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + int used; + + /* In use is already a pointer */ + used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE); + if(used != I810_BUF_CLIENT) { + DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx); + return -EINVAL; + } + + return 0; } -static inline void i810_dma_quiescent(drm_device_t *dev) +static struct file_operations i810_buffer_fops = { + open: i810_open, + flush: drm_flush, + release: i810_release, + ioctl: i810_ioctl, + mmap: i810_mmap_buffers, + read: drm_read, + fasync: drm_fasync, + poll: drm_poll, +}; + +int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) { + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_i810_private_t *dev_priv = dev->dev_private; + drm_buf_t *buf = dev_priv->mmap_buffer; + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + + vma->vm_flags |= (VM_IO | VM_DONTCOPY); + vma->vm_file = filp; + + buf_priv->currently_mapped = I810_BUF_MAPPED; + + if (remap_page_range(vma->vm_start, + VM_OFFSET(vma), + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) return -EAGAIN; + return 0; } -static inline void i810_dma_ready(drm_device_t *dev) +static int i810_map_buffer(drm_buf_t *buf, struct file *filp) { - i810_dma_quiescent(dev); - printk(KERN_INFO "i810_dma_ready\n"); + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + drm_i810_private_t *dev_priv = dev->dev_private; + struct file_operations *old_fops; + int retcode = 0; + + if(buf_priv->currently_mapped == I810_BUF_MAPPED) return -EINVAL; + down(¤t->mm->mmap_sem); + old_fops = filp->f_op; + filp->f_op = &i810_buffer_fops; + dev_priv->mmap_buffer = buf; + buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total, + PROT_READ|PROT_WRITE, + MAP_SHARED, + buf->bus_address); + dev_priv->mmap_buffer = NULL; + filp->f_op = old_fops; + if ((unsigned long)buf_priv->virtual > -1024UL) { + /* Real error */ + DRM_DEBUG("mmap error\n"); + retcode = (signed int)buf_priv->virtual; + buf_priv->virtual = 0; + } + up(¤t->mm->mmap_sem); + return retcode; } -static inline int i810_dma_is_ready(drm_device_t *dev) +static int i810_unmap_buffer(drm_buf_t *buf) { + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + int retcode = 0; - i810_dma_quiescent(dev); + if(buf_priv->currently_mapped != I810_BUF_MAPPED) return -EINVAL; + down(¤t->mm->mmap_sem); + retcode = do_munmap(current->mm, (unsigned long)buf_priv->virtual, + (size_t) buf->total); + buf_priv->currently_mapped = I810_BUF_UNMAPPED; + buf_priv->virtual = 0; + up(¤t->mm->mmap_sem); - printk(KERN_INFO "i810_dma_is_ready\n"); - return 1; + return retcode; } - -static void i810_dma_service(int irq, void *device, struct pt_regs *regs) +static int i810_dma_get_buffer(drm_device_t *dev, drm_i810_dma_t *d, + struct file *filp) { - drm_device_t *dev = (drm_device_t *)device; - drm_device_dma_t *dma = dev->dma; - - atomic_inc(&dev->total_irq); - if (i810_dma_is_ready(dev)) { - /* Free previous buffer */ - if (test_and_set_bit(0, &dev->dma_flag)) { - atomic_inc(&dma->total_missed_free); - return; - } - if (dma->this_buffer) { - drm_free_buffer(dev, dma->this_buffer); - dma->this_buffer = NULL; - } - clear_bit(0, &dev->dma_flag); - - /* Dispatch new buffer */ - queue_task(&dev->tq, &tq_immediate); - mark_bh(IMMEDIATE_BH); + drm_file_t *priv = filp->private_data; + drm_buf_t *buf; + drm_i810_buf_priv_t *buf_priv; + int retcode = 0; + + buf = i810_freelist_get(dev); + if (!buf) { + retcode = -ENOMEM; + DRM_DEBUG("%s retcode %d\n", __FUNCTION__, retcode); + goto out_get_buf; + } + + retcode = i810_map_buffer(buf, filp); + if(retcode) { + i810_freelist_put(dev, buf); + DRM_DEBUG("mapbuf failed in %s retcode %d\n", + __FUNCTION__, retcode); + goto out_get_buf; } + buf->pid = priv->pid; + buf_priv = buf->dev_private; + d->granted = 1; + d->request_idx = buf->idx; + d->request_size = buf->total; + d->virtual = buf_priv->virtual; + +out_get_buf: + return retcode; } -/* Only called by i810_dma_schedule. */ -static int i810_do_dma(drm_device_t *dev, int locked) +static unsigned long i810_alloc_page(drm_device_t *dev) { - unsigned long address; - unsigned long length; - drm_buf_t *buf; - int retcode = 0; - drm_device_dma_t *dma = dev->dma; -#if DRM_DMA_HISTOGRAM - cycles_t dma_start, dma_stop; -#endif - - if (test_and_set_bit(0, &dev->dma_flag)) { - atomic_inc(&dma->total_missed_dma); - return -EBUSY; - } + unsigned long address; + + address = __get_free_page(GFP_KERNEL); + if(address == 0UL) + return 0; -#if DRM_DMA_HISTOGRAM - dma_start = get_cycles(); -#endif - - if (!dma->next_buffer) { - DRM_ERROR("No next_buffer\n"); - clear_bit(0, &dev->dma_flag); - return -EINVAL; - } + atomic_inc(&mem_map[MAP_NR((void *) address)].count); + set_bit(PG_locked, &mem_map[MAP_NR((void *) address)].flags); + + return address; +} - buf = dma->next_buffer; - address = (unsigned long)buf->bus_address; - length = buf->used; +static void i810_free_page(drm_device_t *dev, unsigned long page) +{ + if(page == 0UL) + return; + atomic_dec(&mem_map[MAP_NR((void *) page)].count); + clear_bit(PG_locked, &mem_map[MAP_NR((void *) page)].flags); + wake_up(&mem_map[MAP_NR((void *) page)].wait); + free_page(page); + return; +} - DRM_DEBUG("context %d, buffer %d (%ld bytes)\n", - buf->context, buf->idx, length); - - if (buf->list == DRM_LIST_RECLAIM) { - drm_clear_next_buffer(dev); - drm_free_buffer(dev, buf); - clear_bit(0, &dev->dma_flag); - return -EINVAL; - } - - if (!length) { - DRM_ERROR("0 length buffer\n"); - drm_clear_next_buffer(dev); - drm_free_buffer(dev, buf); - clear_bit(0, &dev->dma_flag); - return 0; - } - - if (!i810_dma_is_ready(dev)) { - clear_bit(0, &dev->dma_flag); - return -EBUSY; - } +static int i810_dma_cleanup(drm_device_t *dev) +{ + drm_device_dma_t *dma = dev->dma; - if (buf->while_locked) { - if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { - DRM_ERROR("Dispatching buffer %d from pid %d" - " \"while locked\", but no lock held\n", - buf->idx, buf->pid); + if(dev->dev_private) { + int i; + drm_i810_private_t *dev_priv = + (drm_i810_private_t *) dev->dev_private; + + if(dev_priv->ring.virtual_start) { + drm_ioremapfree((void *) dev_priv->ring.virtual_start, + dev_priv->ring.Size); + } + if(dev_priv->hw_status_page != 0UL) { + i810_free_page(dev, dev_priv->hw_status_page); + /* Need to rewrite hardware status page */ + I810_WRITE(0x02080, 0x1ffff000); } - } else { - if (!locked && !drm_lock_take(&dev->lock.hw_lock->lock, - DRM_KERNEL_CONTEXT)) { - atomic_inc(&dma->total_missed_lock); - clear_bit(0, &dev->dma_flag); - return -EBUSY; + drm_free(dev->dev_private, sizeof(drm_i810_private_t), + DRM_MEM_DRIVER); + dev->dev_private = NULL; + + for (i = 0; i < dma->buf_count; i++) { + drm_buf_t *buf = dma->buflist[ i ]; + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + drm_ioremapfree(buf_priv->kernel_virtual, buf->total); } } + return 0; +} - if (dev->last_context != buf->context - && !(dev->queuelist[buf->context]->flags - & _DRM_CONTEXT_PRESERVED)) { - /* PRE: dev->last_context != buf->context */ - if (drm_context_switch(dev, dev->last_context, buf->context)) { - drm_clear_next_buffer(dev); - drm_free_buffer(dev, buf); +static int i810_wait_ring(drm_device_t *dev, int n) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + drm_i810_ring_buffer_t *ring = &(dev_priv->ring); + int iters = 0; + unsigned long end; + unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; + + end = jiffies + (HZ*3); + while (ring->space < n) { + int i; + + ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; + ring->space = ring->head - (ring->tail+8); + if (ring->space < 0) ring->space += ring->Size; + + if (ring->head != last_head) + end = jiffies + (HZ*3); + + iters++; + if((signed)(end - jiffies) <= 0) { + DRM_ERROR("space: %d wanted %d\n", ring->space, n); + DRM_ERROR("lockup\n"); + goto out_wait_ring; } - retcode = -EBUSY; - goto cleanup; - - /* POST: we will wait for the context - switch and will dispatch on a later call - when dev->last_context == buf->context. - NOTE WE HOLD THE LOCK THROUGHOUT THIS - TIME! */ - } - - drm_clear_next_buffer(dev); - buf->pending = 1; - buf->waiting = 0; - buf->list = DRM_LIST_PEND; -#if DRM_DMA_HISTOGRAM - buf->time_dispatched = get_cycles(); -#endif - - i810_dma_dispatch(dev, address, length); - drm_free_buffer(dev, dma->this_buffer); - dma->this_buffer = buf; - atomic_add(length, &dma->total_bytes); - atomic_inc(&dma->total_dmas); - - if (!buf->while_locked && !dev->context_flag && !locked) { - if (drm_lock_free(dev, &dev->lock.hw_lock->lock, - DRM_KERNEL_CONTEXT)) { - DRM_ERROR("\n"); - } + for (i = 0 ; i < 2000 ; i++) ; } -cleanup: - clear_bit(0, &dev->dma_flag); - -#if DRM_DMA_HISTOGRAM - dma_stop = get_cycles(); - atomic_inc(&dev->histo.dma[drm_histogram_slot(dma_stop - dma_start)]); -#endif - - return retcode; +out_wait_ring: + return iters; } -static void i810_dma_schedule_timer_wrapper(unsigned long dev) +static void i810_kernel_lost_context(drm_device_t *dev) { - i810_dma_schedule((drm_device_t *)dev, 0); + drm_i810_private_t *dev_priv = dev->dev_private; + drm_i810_ring_buffer_t *ring = &(dev_priv->ring); + + ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; + ring->tail = I810_READ(LP_RING + RING_TAIL); + ring->space = ring->head - (ring->tail+8); + if (ring->space < 0) ring->space += ring->Size; } -static void i810_dma_schedule_tq_wrapper(void *dev) +static int i810_freelist_init(drm_device_t *dev) { - i810_dma_schedule(dev, 0); + drm_device_dma_t *dma = dev->dma; + drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; + int my_idx = 24; + u32 *hw_status = (u32 *)(dev_priv->hw_status_page + my_idx); + int i; + + if(dma->buf_count > 1019) { + /* Not enough space in the status page for the freelist */ + return -EINVAL; + } + + for (i = 0; i < dma->buf_count; i++) { + drm_buf_t *buf = dma->buflist[ i ]; + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + + buf_priv->in_use = hw_status++; + buf_priv->my_use_idx = my_idx; + my_idx += 4; + + *buf_priv->in_use = I810_BUF_FREE; + + buf_priv->kernel_virtual = drm_ioremap(buf->bus_address, + buf->total); + } + return 0; } -int i810_dma_schedule(drm_device_t *dev, int locked) +static int i810_dma_initialize(drm_device_t *dev, + drm_i810_private_t *dev_priv, + drm_i810_init_t *init) { - int next; - drm_queue_t *q; - drm_buf_t *buf; - int retcode = 0; - int processed = 0; - int missed; - int expire = 20; - drm_device_dma_t *dma = dev->dma; -#if DRM_DMA_HISTOGRAM - cycles_t schedule_start; -#endif + drm_map_t *sarea_map; - if (test_and_set_bit(0, &dev->interrupt_flag)) { - /* Not reentrant */ - atomic_inc(&dma->total_missed_sched); - return -EBUSY; + dev->dev_private = (void *) dev_priv; + memset(dev_priv, 0, sizeof(drm_i810_private_t)); + + if (init->ring_map_idx >= dev->map_count || + init->buffer_map_idx >= dev->map_count) { + i810_dma_cleanup(dev); + DRM_ERROR("ring_map or buffer_map are invalid\n"); + return -EINVAL; + } + + dev_priv->ring_map_idx = init->ring_map_idx; + dev_priv->buffer_map_idx = init->buffer_map_idx; + sarea_map = dev->maplist[0]; + dev_priv->sarea_priv = (drm_i810_sarea_t *) + ((u8 *)sarea_map->handle + + init->sarea_priv_offset); + + atomic_set(&dev_priv->flush_done, 0); + init_waitqueue_head(&dev_priv->flush_queue); + + dev_priv->ring.Start = init->ring_start; + dev_priv->ring.End = init->ring_end; + dev_priv->ring.Size = init->ring_size; + + dev_priv->ring.virtual_start = drm_ioremap(dev->agp->base + + init->ring_start, + init->ring_size); + + dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; + + if (dev_priv->ring.virtual_start == NULL) { + i810_dma_cleanup(dev); + DRM_ERROR("can not ioremap virtual address for" + " ring buffer\n"); + return -ENOMEM; } - missed = atomic_read(&dma->total_missed_sched); -#if DRM_DMA_HISTOGRAM - schedule_start = get_cycles(); -#endif + dev_priv->w = init->w; + dev_priv->h = init->h; + dev_priv->pitch = init->pitch; + dev_priv->back_offset = init->back_offset; + dev_priv->depth_offset = init->depth_offset; -again: - if (dev->context_flag) { - clear_bit(0, &dev->interrupt_flag); - return -EBUSY; + dev_priv->front_di1 = init->front_offset | init->pitch_bits; + dev_priv->back_di1 = init->back_offset | init->pitch_bits; + dev_priv->zi1 = init->depth_offset | init->pitch_bits; + + + /* Program Hardware Status Page */ + dev_priv->hw_status_page = i810_alloc_page(dev); + memset((void *) dev_priv->hw_status_page, 0, PAGE_SIZE); + if(dev_priv->hw_status_page == 0UL) { + i810_dma_cleanup(dev); + DRM_ERROR("Can not allocate hardware status page\n"); + return -ENOMEM; } - if (dma->next_buffer) { - /* Unsent buffer that was previously - selected, but that couldn't be sent - because the lock could not be obtained - or the DMA engine wasn't ready. Try - again. */ - atomic_inc(&dma->total_tried); - if (!(retcode = i810_do_dma(dev, locked))) { - atomic_inc(&dma->total_hit); - ++processed; - } - } else { - do { - next = drm_select_queue(dev, - i810_dma_schedule_timer_wrapper); - if (next >= 0) { - q = dev->queuelist[next]; - buf = drm_waitlist_get(&q->waitlist); - dma->next_buffer = buf; - dma->next_queue = q; - if (buf && buf->list == DRM_LIST_RECLAIM) { - drm_clear_next_buffer(dev); - drm_free_buffer(dev, buf); - } - } - } while (next >= 0 && !dma->next_buffer); - if (dma->next_buffer) { - if (!(retcode = i810_do_dma(dev, locked))) { - ++processed; - } - } + DRM_DEBUG("hw status page @ %lx\n", dev_priv->hw_status_page); + + I810_WRITE(0x02080, virt_to_bus((void *)dev_priv->hw_status_page)); + DRM_DEBUG("Enabled hardware status page\n"); + + /* Now we need to init our freelist */ + if(i810_freelist_init(dev) != 0) { + i810_dma_cleanup(dev); + DRM_ERROR("Not enough space in the status page for" + " the freelist\n"); + return -ENOMEM; } + return 0; +} - if (--expire) { - if (missed != atomic_read(&dma->total_missed_sched)) { - atomic_inc(&dma->total_lost); - if (i810_dma_is_ready(dev)) goto again; - } - if (processed && i810_dma_is_ready(dev)) { - atomic_inc(&dma->total_lost); - processed = 0; - goto again; - } - } +int i810_dma_init(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_i810_private_t *dev_priv; + drm_i810_init_t init; + int retcode = 0; - clear_bit(0, &dev->interrupt_flag); + copy_from_user_ret(&init, (drm_i810_init_t *)arg, + sizeof(init), -EFAULT); -#if DRM_DMA_HISTOGRAM - atomic_inc(&dev->histo.schedule[drm_histogram_slot(get_cycles() - - schedule_start)]); -#endif - return retcode; + switch(init.func) { + case I810_INIT_DMA: + dev_priv = drm_alloc(sizeof(drm_i810_private_t), + DRM_MEM_DRIVER); + if(dev_priv == NULL) return -ENOMEM; + retcode = i810_dma_initialize(dev, dev_priv, &init); + break; + case I810_CLEANUP_DMA: + retcode = i810_dma_cleanup(dev); + break; + default: + retcode = -EINVAL; + break; + } + + return retcode; } -static int i810_dma_priority(drm_device_t *dev, drm_dma_t *d) -{ - unsigned long address; - unsigned long length; - int must_free = 0; - int retcode = 0; - int i; - int idx; - drm_buf_t *buf; - drm_buf_t *last_buf = NULL; - drm_device_dma_t *dma = dev->dma; - DECLARE_WAITQUEUE(entry, current); - /* Turn off interrupt handling */ - while (test_and_set_bit(0, &dev->interrupt_flag)) { - schedule(); - if (signal_pending(current)) return -EINTR; - } - if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) { - while (!drm_lock_take(&dev->lock.hw_lock->lock, - DRM_KERNEL_CONTEXT)) { - schedule(); - if (signal_pending(current)) { - clear_bit(0, &dev->interrupt_flag); - return -EINTR; - } - } - ++must_free; + +/* Most efficient way to verify state for the i810 is as it is + * emitted. Non-conformant state is silently dropped. + * + * Use 'volatile' & local var tmp to force the emitted values to be + * identical to the verified ones. + */ +static void i810EmitContextVerified( drm_device_t *dev, + volatile unsigned int *code ) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + int i, j = 0; + unsigned int tmp; + RING_LOCALS; + + BEGIN_LP_RING( I810_CTX_SETUP_SIZE ); + + OUT_RING( GFX_OP_COLOR_FACTOR ); + OUT_RING( code[I810_CTXREG_CF1] ); + + OUT_RING( GFX_OP_STIPPLE ); + OUT_RING( code[I810_CTXREG_ST1] ); + + for ( i = 4 ; i < I810_CTX_SETUP_SIZE ; i++ ) { + tmp = code[i]; + + if ((tmp & (7<<29)) == (3<<29) && + (tmp & (0x1f<<24)) < (0x1d<<24)) + { + OUT_RING( tmp ); + j++; + } } - atomic_inc(&dma->total_prio); - for (i = 0; i < d->send_count; i++) { - idx = d->send_indices[i]; - if (idx < 0 || idx >= dma->buf_count) { - DRM_ERROR("Index %d (of %d max)\n", - d->send_indices[i], dma->buf_count - 1); - continue; - } - buf = dma->buflist[ idx ]; - if (buf->pid != current->pid) { - DRM_ERROR("Process %d using buffer owned by %d\n", - current->pid, buf->pid); - retcode = -EINVAL; - goto cleanup; - } - if (buf->list != DRM_LIST_NONE) { - DRM_ERROR("Process %d using %d's buffer on list %d\n", - current->pid, buf->pid, buf->list); - retcode = -EINVAL; - goto cleanup; - } - /* This isn't a race condition on - buf->list, since our concern is the - buffer reclaim during the time the - process closes the /dev/drm? handle, so - it can't also be doing DMA. */ - buf->list = DRM_LIST_PRIO; - buf->used = d->send_sizes[i]; - buf->context = d->context; - buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED; - address = (unsigned long)buf->address; - length = buf->used; - if (!length) { - DRM_ERROR("0 length buffer\n"); - } - if (buf->pending) { - DRM_ERROR("Sending pending buffer:" - " buffer %d, offset %d\n", - d->send_indices[i], i); - retcode = -EINVAL; - goto cleanup; - } - if (buf->waiting) { - DRM_ERROR("Sending waiting buffer:" - " buffer %d, offset %d\n", - d->send_indices[i], i); - retcode = -EINVAL; - goto cleanup; + if (j & 1) + OUT_RING( 0 ); + + ADVANCE_LP_RING(); +} + +static void i810EmitTexVerified( drm_device_t *dev, + volatile unsigned int *code ) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + int i, j = 0; + unsigned int tmp; + RING_LOCALS; + + BEGIN_LP_RING( I810_TEX_SETUP_SIZE ); + + OUT_RING( GFX_OP_MAP_INFO ); + OUT_RING( code[I810_TEXREG_MI1] ); + OUT_RING( code[I810_TEXREG_MI2] ); + OUT_RING( code[I810_TEXREG_MI3] ); + + for ( i = 4 ; i < I810_TEX_SETUP_SIZE ; i++ ) { + tmp = code[i]; + + if ((tmp & (7<<29)) == (3<<29) && + (tmp & (0x1f<<24)) < (0x1d<<24)) + { + OUT_RING( tmp ); + j++; } - buf->pending = 1; + } - if (dev->last_context != buf->context - && !(dev->queuelist[buf->context]->flags - & _DRM_CONTEXT_PRESERVED)) { - add_wait_queue(&dev->context_wait, &entry); - current->state = TASK_INTERRUPTIBLE; - /* PRE: dev->last_context != buf->context */ - drm_context_switch(dev, dev->last_context, - buf->context); - /* POST: we will wait for the context - switch and will dispatch on a later call - when dev->last_context == buf->context. - NOTE WE HOLD THE LOCK THROUGHOUT THIS - TIME! */ - schedule(); - current->state = TASK_RUNNING; - remove_wait_queue(&dev->context_wait, &entry); - if (signal_pending(current)) { - retcode = -EINTR; - goto cleanup; - } - if (dev->last_context != buf->context) { - DRM_ERROR("Context mismatch: %d %d\n", - dev->last_context, - buf->context); - } - } + if (j & 1) + OUT_RING( 0 ); -#if DRM_DMA_HISTOGRAM - buf->time_queued = get_cycles(); - buf->time_dispatched = buf->time_queued; -#endif - i810_dma_dispatch(dev, address, length); - if (drm_lock_free(dev, &dev->lock.hw_lock->lock, - DRM_KERNEL_CONTEXT)) { - DRM_ERROR("\n"); - } + ADVANCE_LP_RING(); +} - atomic_add(length, &dma->total_bytes); - atomic_inc(&dma->total_dmas); - - if (last_buf) { - drm_free_buffer(dev, last_buf); - } - last_buf = buf; + +/* Need to do some additional checking when setting the dest buffer. + */ +static void i810EmitDestVerified( drm_device_t *dev, + volatile unsigned int *code ) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + unsigned int tmp; + RING_LOCALS; + + BEGIN_LP_RING( I810_DEST_SETUP_SIZE + 2 ); + + tmp = code[I810_DESTREG_DI1]; + if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) { + OUT_RING( CMD_OP_DESTBUFFER_INFO ); + OUT_RING( tmp ); + } else + DRM_DEBUG("bad di1 %x (allow %x or %x)\n", + tmp, dev_priv->front_di1, dev_priv->back_di1); + + /* invarient: + */ + OUT_RING( CMD_OP_Z_BUFFER_INFO ); + OUT_RING( dev_priv->zi1 ); + + OUT_RING( GFX_OP_DESTBUFFER_VARS ); + OUT_RING( code[I810_DESTREG_DV1] ); + + OUT_RING( GFX_OP_DRAWRECT_INFO ); + OUT_RING( code[I810_DESTREG_DR1] ); + OUT_RING( code[I810_DESTREG_DR2] ); + OUT_RING( code[I810_DESTREG_DR3] ); + OUT_RING( code[I810_DESTREG_DR4] ); + OUT_RING( 0 ); + + ADVANCE_LP_RING(); +} + + + +static void i810EmitState( drm_device_t *dev ) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; + unsigned int dirty = sarea_priv->dirty; + + if (dirty & I810_UPLOAD_BUFFERS) { + i810EmitDestVerified( dev, sarea_priv->BufferState ); + sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS; } + if (dirty & I810_UPLOAD_CTX) { + i810EmitContextVerified( dev, sarea_priv->ContextState ); + sarea_priv->dirty &= ~I810_UPLOAD_CTX; + } -cleanup: - if (last_buf) { - i810_dma_ready(dev); - drm_free_buffer(dev, last_buf); + if (dirty & I810_UPLOAD_TEX0) { + i810EmitTexVerified( dev, sarea_priv->TexState[0] ); + sarea_priv->dirty &= ~I810_UPLOAD_TEX0; } - - if (must_free && !dev->context_flag) { - if (drm_lock_free(dev, &dev->lock.hw_lock->lock, - DRM_KERNEL_CONTEXT)) { - DRM_ERROR("\n"); - } + + if (dirty & I810_UPLOAD_TEX1) { + i810EmitTexVerified( dev, sarea_priv->TexState[1] ); + sarea_priv->dirty &= ~I810_UPLOAD_TEX1; } - clear_bit(0, &dev->interrupt_flag); - return retcode; } -static int i810_dma_send_buffers(drm_device_t *dev, drm_dma_t *d) + + +/* need to verify + */ +static void i810_dma_dispatch_clear( drm_device_t *dev, int flags, + unsigned int clear_color, + unsigned int clear_zval ) { - DECLARE_WAITQUEUE(entry, current); - drm_buf_t *last_buf = NULL; - int retcode = 0; - drm_device_dma_t *dma = dev->dma; + drm_i810_private_t *dev_priv = dev->dev_private; + drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; + int nbox = sarea_priv->nbox; + drm_clip_rect_t *pbox = sarea_priv->boxes; + int pitch = dev_priv->pitch; + int cpp = 2; + int i; + RING_LOCALS; + + i810_kernel_lost_context(dev); + + if (nbox > I810_NR_SAREA_CLIPRECTS) + nbox = I810_NR_SAREA_CLIPRECTS; + + for (i = 0 ; i < nbox ; i++, pbox++) { + unsigned int x = pbox->x1; + unsigned int y = pbox->y1; + unsigned int width = (pbox->x2 - x) * cpp; + unsigned int height = pbox->y2 - y; + unsigned int start = y * pitch + x * cpp; + + if (pbox->x1 > pbox->x2 || + pbox->y1 > pbox->y2 || + pbox->x2 > dev_priv->w || + pbox->y2 > dev_priv->h) + continue; - if (d->flags & _DRM_DMA_BLOCK) { - last_buf = dma->buflist[d->send_indices[d->send_count-1]]; - add_wait_queue(&last_buf->dma_wait, &entry); - } - - if ((retcode = drm_dma_enqueue(dev, d))) { - if (d->flags & _DRM_DMA_BLOCK) - remove_wait_queue(&last_buf->dma_wait, &entry); - return retcode; - } - - i810_dma_schedule(dev, 0); - - if (d->flags & _DRM_DMA_BLOCK) { - DRM_DEBUG("%d waiting\n", current->pid); - current->state = TASK_INTERRUPTIBLE; - for (;;) { - if (!last_buf->waiting - && !last_buf->pending) - break; /* finished */ - schedule(); - if (signal_pending(current)) { - retcode = -EINTR; /* Can't restart */ - break; - } + if ( flags & I810_FRONT ) { + DRM_DEBUG("clear front\n"); + BEGIN_LP_RING( 6 ); + OUT_RING( BR00_BITBLT_CLIENT | + BR00_OP_COLOR_BLT | 0x3 ); + OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch ); + OUT_RING( (height << 16) | width ); + OUT_RING( start ); + OUT_RING( clear_color ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); } - current->state = TASK_RUNNING; - DRM_DEBUG("%d running\n", current->pid); - remove_wait_queue(&last_buf->dma_wait, &entry); - if (!retcode - || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) { - if (!waitqueue_active(&last_buf->dma_wait)) { - drm_free_buffer(dev, last_buf); - } + + if ( flags & I810_BACK ) { + DRM_DEBUG("clear back\n"); + BEGIN_LP_RING( 6 ); + OUT_RING( BR00_BITBLT_CLIENT | + BR00_OP_COLOR_BLT | 0x3 ); + OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch ); + OUT_RING( (height << 16) | width ); + OUT_RING( dev_priv->back_offset + start ); + OUT_RING( clear_color ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); } - if (retcode) { - DRM_ERROR("ctx%d w%d p%d c%d i%d l%d %d/%d\n", - d->context, - last_buf->waiting, - last_buf->pending, - DRM_WAITCOUNT(dev, d->context), - last_buf->idx, - last_buf->list, - last_buf->pid, - current->pid); + + if ( flags & I810_DEPTH ) { + DRM_DEBUG("clear depth\n"); + BEGIN_LP_RING( 6 ); + OUT_RING( BR00_BITBLT_CLIENT | + BR00_OP_COLOR_BLT | 0x3 ); + OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch ); + OUT_RING( (height << 16) | width ); + OUT_RING( dev_priv->depth_offset + start ); + OUT_RING( clear_zval ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); } } - return retcode; } -int i810_dma(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +static void i810_dma_dispatch_swap( drm_device_t *dev ) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - drm_device_dma_t *dma = dev->dma; - int retcode = 0; - drm_dma_t d; + drm_i810_private_t *dev_priv = dev->dev_private; + drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; + int nbox = sarea_priv->nbox; + drm_clip_rect_t *pbox = sarea_priv->boxes; + int pitch = dev_priv->pitch; + int cpp = 2; + int ofs = dev_priv->back_offset; + int i; + RING_LOCALS; + + DRM_DEBUG("swapbuffers\n"); + + i810_kernel_lost_context(dev); + + if (nbox > I810_NR_SAREA_CLIPRECTS) + nbox = I810_NR_SAREA_CLIPRECTS; + + for (i = 0 ; i < nbox; i++, pbox++) + { + unsigned int w = pbox->x2 - pbox->x1; + unsigned int h = pbox->y2 - pbox->y1; + unsigned int dst = pbox->x1*cpp + pbox->y1*pitch; + unsigned int start = ofs + dst; + + if (pbox->x1 > pbox->x2 || + pbox->y1 > pbox->y2 || + pbox->x2 > dev_priv->w || + pbox->y2 > dev_priv->h) + continue; + + DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n", + pbox[i].x1, pbox[i].y1, + pbox[i].x2, pbox[i].y2); + + BEGIN_LP_RING( 6 ); + OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4 ); + OUT_RING( pitch | (0xCC << 16)); + OUT_RING( (h << 16) | (w * cpp)); + OUT_RING( dst ); + OUT_RING( pitch ); + OUT_RING( start ); + ADVANCE_LP_RING(); + } +} - printk("i810_dma start\n"); - copy_from_user_ret(&d, (drm_dma_t *)arg, sizeof(d), -EFAULT); - DRM_DEBUG("%d %d: %d send, %d req\n", - current->pid, d.context, d.send_count, d.request_count); - if (d.context == DRM_KERNEL_CONTEXT || d.context >= dev->queue_slots) { - DRM_ERROR("Process %d using context %d\n", - current->pid, d.context); - return -EINVAL; +static void i810_dma_dispatch_vertex(drm_device_t *dev, + drm_buf_t *buf, + int discard, + int used) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_clip_rect_t *box = sarea_priv->boxes; + int nbox = sarea_priv->nbox; + unsigned long address = (unsigned long)buf->bus_address; + unsigned long start = address - dev->agp->base; + int i = 0, u; + RING_LOCALS; + + i810_kernel_lost_context(dev); + + if (nbox > I810_NR_SAREA_CLIPRECTS) + nbox = I810_NR_SAREA_CLIPRECTS; + + if (discard) { + u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, + I810_BUF_HARDWARE); + if(u != I810_BUF_CLIENT) { + DRM_DEBUG("xxxx 2\n"); + } } - if (d.send_count < 0 || d.send_count > dma->buf_count) { - DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n", - current->pid, d.send_count, dma->buf_count); - return -EINVAL; + if (used > 4*1024) + used = 0; + + if (sarea_priv->dirty) + i810EmitState( dev ); + + DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n", + address, used, nbox); + + dev_priv->counter++; + DRM_DEBUG( "dispatch counter : %ld\n", dev_priv->counter); + DRM_DEBUG( "i810_dma_dispatch\n"); + DRM_DEBUG( "start : %lx\n", start); + DRM_DEBUG( "used : %d\n", used); + DRM_DEBUG( "start + used - 4 : %ld\n", start + used - 4); + + if (buf_priv->currently_mapped == I810_BUF_MAPPED) { + *(u32 *)buf_priv->virtual = (GFX_OP_PRIMITIVE | + sarea_priv->vertex_prim | + ((used/4)-2)); + + if (used & 4) { + *(u32 *)((u32)buf_priv->virtual + used) = 0; + used += 4; + } + + i810_unmap_buffer(buf); } - if (d.request_count < 0 || d.request_count > dma->buf_count) { - DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", - current->pid, d.request_count, dma->buf_count); - return -EINVAL; + + if (used) { + do { + if (i < nbox) { + BEGIN_LP_RING(4); + OUT_RING( GFX_OP_SCISSOR | SC_UPDATE_SCISSOR | + SC_ENABLE ); + OUT_RING( GFX_OP_SCISSOR_INFO ); + OUT_RING( box[i].x1 | (box[i].y1<<16) ); + OUT_RING( (box[i].x2-1) | ((box[i].y2-1)<<16) ); + ADVANCE_LP_RING(); + } + + BEGIN_LP_RING(4); + OUT_RING( CMD_OP_BATCH_BUFFER ); + OUT_RING( start | BB1_PROTECTED ); + OUT_RING( start + used - 4 ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); + + } while (++i < nbox); } - if (d.send_count) { -#if 0 - if (d.flags & _DRM_DMA_PRIORITY) - retcode = i810_dma_priority(dev, &d); - else - retcode = i810_dma_send_buffers(dev, &d); -#endif - printk("i810_dma priority\n"); - - retcode = i810_dma_priority(dev, &d); + BEGIN_LP_RING(10); + OUT_RING( CMD_STORE_DWORD_IDX ); + OUT_RING( 20 ); + OUT_RING( dev_priv->counter ); + OUT_RING( 0 ); + + if (discard) { + OUT_RING( CMD_STORE_DWORD_IDX ); + OUT_RING( buf_priv->my_use_idx ); + OUT_RING( I810_BUF_FREE ); + OUT_RING( 0 ); } - d.granted_count = 0; + OUT_RING( CMD_REPORT_HEAD ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); +} - if (!retcode && d.request_count) { - retcode = drm_dma_get_buffers(dev, &d); - } - DRM_DEBUG("%d returning, granted = %d\n", - current->pid, d.granted_count); - copy_to_user_ret((drm_dma_t *)arg, &d, sizeof(d), -EFAULT); +/* Interrupts are only for flushing */ +static void i810_dma_service(int irq, void *device, struct pt_regs *regs) +{ + drm_device_t *dev = (drm_device_t *)device; + u16 temp; + + atomic_inc(&dev->total_irq); + temp = I810_READ16(I810REG_INT_IDENTITY_R); + temp = temp & ~(0x6000); + if(temp != 0) I810_WRITE16(I810REG_INT_IDENTITY_R, + temp); /* Clear all interrupts */ + else + return; + + queue_task(&dev->tq, &tq_immediate); + mark_bh(IMMEDIATE_BH); +} - printk("i810_dma end (granted)\n"); - return retcode; +static void i810_dma_task_queue(void *device) +{ + drm_device_t *dev = (drm_device_t *) device; + drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; + + atomic_set(&dev_priv->flush_done, 1); + wake_up_interruptible(&dev_priv->flush_queue); } int i810_irq_install(drm_device_t *dev, int irq) { int retcode; - + u16 temp; + if (!irq) return -EINVAL; down(&dev->struct_sem); @@ -591,6 +894,7 @@ int i810_irq_install(drm_device_t *dev, int irq) dev->irq = irq; up(&dev->struct_sem); + DRM_DEBUG( "Interrupt Install : %d\n", irq); DRM_DEBUG("%d\n", irq); dev->context_flag = 0; @@ -603,16 +907,25 @@ int i810_irq_install(drm_device_t *dev, int irq) dev->tq.next = NULL; dev->tq.sync = 0; - dev->tq.routine = i810_dma_schedule_tq_wrapper; + dev->tq.routine = i810_dma_task_queue; dev->tq.data = dev; - /* Before installing handler */ - /* TODO */ + temp = I810_READ16(I810REG_HWSTAM); + temp = temp & 0x6000; + I810_WRITE16(I810REG_HWSTAM, temp); + + temp = I810_READ16(I810REG_INT_MASK_R); + temp = temp & 0x6000; + I810_WRITE16(I810REG_INT_MASK_R, temp); /* Unmask interrupts */ + temp = I810_READ16(I810REG_INT_ENABLE_R); + temp = temp & 0x6000; + I810_WRITE16(I810REG_INT_ENABLE_R, temp); /* Disable all interrupts */ + /* Install handler */ if ((retcode = request_irq(dev->irq, i810_dma_service, - 0, + SA_SHIRQ, dev->devname, dev))) { down(&dev->struct_sem); @@ -620,15 +933,21 @@ int i810_irq_install(drm_device_t *dev, int irq) up(&dev->struct_sem); return retcode; } - - /* After installing handler */ - /* TODO */ + temp = I810_READ16(I810REG_INT_ENABLE_R); + temp = temp & 0x6000; + temp = temp | 0x0003; + I810_WRITE16(I810REG_INT_ENABLE_R, + temp); /* Enable bp & user interrupts */ return 0; } int i810_irq_uninstall(drm_device_t *dev) { int irq; + u16 temp; + + +/* return 0; */ down(&dev->struct_sem); irq = dev->irq; @@ -636,16 +955,25 @@ int i810_irq_uninstall(drm_device_t *dev) up(&dev->struct_sem); if (!irq) return -EINVAL; - + + DRM_DEBUG( "Interrupt UnInstall: %d\n", irq); DRM_DEBUG("%d\n", irq); - - /* TODO : Disable interrupts */ + + temp = I810_READ16(I810REG_INT_IDENTITY_R); + temp = temp & ~(0x6000); + if(temp != 0) I810_WRITE16(I810REG_INT_IDENTITY_R, + temp); /* Clear all interrupts */ + + temp = I810_READ16(I810REG_INT_ENABLE_R); + temp = temp & 0x6000; + I810_WRITE16(I810REG_INT_ENABLE_R, + temp); /* Disable all interrupts */ + free_irq(irq, dev); return 0; } - int i810_control(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { @@ -654,8 +982,7 @@ int i810_control(struct inode *inode, struct file *filp, unsigned int cmd, drm_control_t ctl; int retcode; - printk(KERN_INFO "i810_control\n"); - i810_dma_init(dev); + DRM_DEBUG( "i810_control\n"); copy_from_user_ret(&ctl, (drm_control_t *)arg, sizeof(ctl), -EFAULT); @@ -674,20 +1001,161 @@ int i810_control(struct inode *inode, struct file *filp, unsigned int cmd, return 0; } +static inline void i810_dma_emit_flush(drm_device_t *dev) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + i810_kernel_lost_context(dev); + + BEGIN_LP_RING(2); + OUT_RING( CMD_REPORT_HEAD ); + OUT_RING( GFX_OP_USER_INTERRUPT ); + ADVANCE_LP_RING(); + +/* i810_wait_ring( dev, dev_priv->ring.Size - 8 ); */ +/* atomic_set(&dev_priv->flush_done, 1); */ +/* wake_up_interruptible(&dev_priv->flush_queue); */ +} + +static inline void i810_dma_quiescent_emit(drm_device_t *dev) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + i810_kernel_lost_context(dev); + + BEGIN_LP_RING(4); + OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE ); + OUT_RING( CMD_REPORT_HEAD ); + OUT_RING( 0 ); + OUT_RING( GFX_OP_USER_INTERRUPT ); + ADVANCE_LP_RING(); + +/* i810_wait_ring( dev, dev_priv->ring.Size - 8 ); */ +/* atomic_set(&dev_priv->flush_done, 1); */ +/* wake_up_interruptible(&dev_priv->flush_queue); */ +} + +static void i810_dma_quiescent(drm_device_t *dev) +{ + DECLARE_WAITQUEUE(entry, current); + drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; + unsigned long end; + + if(dev_priv == NULL) { + return; + } + atomic_set(&dev_priv->flush_done, 0); + current->state = TASK_INTERRUPTIBLE; + add_wait_queue(&dev_priv->flush_queue, &entry); + end = jiffies + (HZ*3); + + for (;;) { + i810_dma_quiescent_emit(dev); + if (atomic_read(&dev_priv->flush_done) == 1) break; + if((signed)(end - jiffies) <= 0) { + DRM_ERROR("lockup\n"); + break; + } + schedule_timeout(HZ*3); + if (signal_pending(current)) { + break; + } + } + + current->state = TASK_RUNNING; + remove_wait_queue(&dev_priv->flush_queue, &entry); + + return; +} + +static int i810_flush_queue(drm_device_t *dev) +{ + DECLARE_WAITQUEUE(entry, current); + drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; + drm_device_dma_t *dma = dev->dma; + unsigned long end; + int i, ret = 0; + + if(dev_priv == NULL) { + return 0; + } + atomic_set(&dev_priv->flush_done, 0); + current->state = TASK_INTERRUPTIBLE; + add_wait_queue(&dev_priv->flush_queue, &entry); + end = jiffies + (HZ*3); + for (;;) { + i810_dma_emit_flush(dev); + if (atomic_read(&dev_priv->flush_done) == 1) break; + if((signed)(end - jiffies) <= 0) { + DRM_ERROR("lockup\n"); + break; + } + schedule_timeout(HZ*3); + if (signal_pending(current)) { + ret = -EINTR; /* Can't restart */ + break; + } + } + + current->state = TASK_RUNNING; + remove_wait_queue(&dev_priv->flush_queue, &entry); + + + for (i = 0; i < dma->buf_count; i++) { + drm_buf_t *buf = dma->buflist[ i ]; + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + + int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE, + I810_BUF_FREE); + + if (used == I810_BUF_HARDWARE) + DRM_DEBUG("reclaimed from HARDWARE\n"); + if (used == I810_BUF_CLIENT) + DRM_DEBUG("still on client HARDWARE\n"); + } + + return ret; +} + +/* Must be called with the lock held */ +void i810_reclaim_buffers(drm_device_t *dev, pid_t pid) +{ + drm_device_dma_t *dma = dev->dma; + int i; + + if (!dma) return; + if (!dev->dev_private) return; + if (!dma->buflist) return; + + i810_flush_queue(dev); + + for (i = 0; i < dma->buf_count; i++) { + drm_buf_t *buf = dma->buflist[ i ]; + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + + if (buf->pid == pid && buf_priv) { + int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, + I810_BUF_FREE); + + if (used == I810_BUF_CLIENT) + DRM_DEBUG("reclaimed from client\n"); + if(buf_priv->currently_mapped == I810_BUF_MAPPED) + buf_priv->currently_mapped = I810_BUF_UNMAPPED; + } + } +} + int i810_lock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; + DECLARE_WAITQUEUE(entry, current); int ret = 0; drm_lock_t lock; - drm_queue_t *q; -#if DRM_DMA_HISTOGRAM - cycles_t start; - - dev->lck_start = start = get_cycles(); -#endif copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT); @@ -696,26 +1164,18 @@ int i810_lock(struct inode *inode, struct file *filp, unsigned int cmd, current->pid, lock.context); return -EINVAL; } + + DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", + lock.context, current->pid, dev->lock.hw_lock->lock, + lock.flags); - if (lock.context < 0 || lock.context >= dev->queue_count) { + if (lock.context < 0) { return -EINVAL; } - q = dev->queuelist[lock.context]; - - ret = drm_flush_block_and_flush(dev, lock.context, lock.flags); + /* Only one queue: + */ if (!ret) { - if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) - != lock.context) { - long j = jiffies - dev->lock.lock_time; - - if (j > 0 && j <= DRM_LOCK_SLICE) { - /* Can't take lock if we just had it and - there is contention. */ - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(j); - } - } add_wait_queue(&dev->lock.lock_queue, &entry); for (;;) { if (!dev->lock.hw_lock) { @@ -728,13 +1188,13 @@ int i810_lock(struct inode *inode, struct file *filp, unsigned int cmd, dev->lock.pid = current->pid; dev->lock.lock_time = jiffies; atomic_inc(&dev->total_locks); - atomic_inc(&q->total_locks); break; /* Got lock */ } /* Contention */ atomic_inc(&dev->total_sleeps); current->state = TASK_INTERRUPTIBLE; + DRM_DEBUG("Calling lock schedule\n"); schedule(); if (signal_pending(current)) { ret = -ERESTARTSYS; @@ -744,19 +1204,153 @@ int i810_lock(struct inode *inode, struct file *filp, unsigned int cmd, current->state = TASK_RUNNING; remove_wait_queue(&dev->lock.lock_queue, &entry); } - - drm_flush_unblock(dev, lock.context, lock.flags); /* cleanup phase */ if (!ret) { - if (lock.flags & _DRM_LOCK_READY) - i810_dma_ready(dev); - if (lock.flags & _DRM_LOCK_QUIESCENT) - i810_dma_quiescent(dev); + if (lock.flags & _DRM_LOCK_QUIESCENT) { + DRM_DEBUG("_DRM_LOCK_QUIESCENT\n"); + DRM_DEBUG("fred\n"); + i810_dma_quiescent(dev); + } } + DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock"); + return ret; +} -#if DRM_DMA_HISTOGRAM - atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]); -#endif +int i810_flush_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + + DRM_DEBUG("i810_flush_ioctl\n"); + if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("i810_flush_ioctl called without lock held\n"); + return -EINVAL; + } + + i810_flush_queue(dev); + return 0; +} + + +int i810_dma_vertex(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_device_dma_t *dma = dev->dma; + drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; + u32 *hw_status = (u32 *)dev_priv->hw_status_page; + drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) + dev_priv->sarea_priv; + drm_i810_vertex_t vertex; + + copy_from_user_ret(&vertex, (drm_i810_vertex_t *)arg, sizeof(vertex), + -EFAULT); + + if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("i810_dma_vertex called without lock held\n"); + return -EINVAL; + } + + DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n", + vertex.idx, vertex.used, vertex.discard); + + i810_dma_dispatch_vertex( dev, + dma->buflist[ vertex.idx ], + vertex.discard, vertex.used ); + + atomic_add(vertex.used, &dma->total_bytes); + atomic_inc(&dma->total_dmas); + sarea_priv->last_enqueue = dev_priv->counter-1; + sarea_priv->last_dispatch = (int) hw_status[5]; + + return 0; +} + + + +int i810_clear_bufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_i810_clear_t clear; + + copy_from_user_ret(&clear, (drm_i810_clear_t *)arg, sizeof(clear), + -EFAULT); + + if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("i810_clear_bufs called without lock held\n"); + return -EINVAL; + } + + i810_dma_dispatch_clear( dev, clear.flags, + clear.clear_color, + clear.clear_depth ); + return 0; +} + +int i810_swap_bufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + + DRM_DEBUG("i810_swap_bufs\n"); + + if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("i810_swap_buf called without lock held\n"); + return -EINVAL; + } + + i810_dma_dispatch_swap( dev ); + return 0; +} + +int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; + u32 *hw_status = (u32 *)dev_priv->hw_status_page; + drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) + dev_priv->sarea_priv; + + sarea_priv->last_dispatch = (int) hw_status[5]; + return 0; +} + +int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + int retcode = 0; + drm_i810_dma_t d; + drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; + u32 *hw_status = (u32 *)dev_priv->hw_status_page; + drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) + dev_priv->sarea_priv; + + DRM_DEBUG("getbuf\n"); + copy_from_user_ret(&d, (drm_i810_dma_t *)arg, sizeof(d), -EFAULT); + + if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("i810_dma called without lock held\n"); + return -EINVAL; + } - return ret; + d.granted = 0; + + retcode = i810_dma_get_buffer(dev, &d, filp); + + DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", + current->pid, retcode, d.granted); + + copy_to_user_ret((drm_dma_t *)arg, &d, sizeof(d), -EFAULT); + sarea_priv->last_dispatch = (int) hw_status[5]; + + return retcode; } diff --git a/linux-core/i810_drm.h b/linux-core/i810_drm.h new file mode 100644 index 00000000..4c8e09f6 --- /dev/null +++ b/linux-core/i810_drm.h @@ -0,0 +1,188 @@ +#ifndef _I810_DRM_H_ +#define _I810_DRM_H_ + +/* WARNING: These defines must be the same as what the Xserver uses. + * if you change them, you must change the defines in the Xserver. + */ + +#ifndef _I810_DEFINES_ +#define _I810_DEFINES_ + +#define I810_DMA_BUF_ORDER 12 +#define I810_DMA_BUF_SZ (1<<I810_DMA_BUF_ORDER) +#define I810_DMA_BUF_NR 256 +#define I810_NR_SAREA_CLIPRECTS 8 + +/* Each region is a minimum of 64k, and there are at most 64 of them. + */ +#define I810_NR_TEX_REGIONS 64 +#define I810_LOG_MIN_TEX_REGION_SIZE 16 +#endif + +#define I810_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */ +#define I810_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */ +#define I810_UPLOAD_CTX 0x4 +#define I810_UPLOAD_BUFFERS 0x8 +#define I810_UPLOAD_TEX0 0x10 +#define I810_UPLOAD_TEX1 0x20 +#define I810_UPLOAD_CLIPRECTS 0x40 + + +/* Indices into buf.Setup where various bits of state are mirrored per + * context and per buffer. These can be fired at the card as a unit, + * or in a piecewise fashion as required. + */ + +/* Destbuffer state + * - backbuffer linear offset and pitch -- invarient in the current dri + * - zbuffer linear offset and pitch -- also invarient + * - drawing origin in back and depth buffers. + * + * Keep the depth/back buffer state here to acommodate private buffers + * in the future. + */ +#define I810_DESTREG_DI0 0 /* CMD_OP_DESTBUFFER_INFO (2 dwords) */ +#define I810_DESTREG_DI1 1 +#define I810_DESTREG_DV0 2 /* GFX_OP_DESTBUFFER_VARS (2 dwords) */ +#define I810_DESTREG_DV1 3 +#define I810_DESTREG_DR0 4 /* GFX_OP_DRAWRECT_INFO (4 dwords) */ +#define I810_DESTREG_DR1 5 +#define I810_DESTREG_DR2 6 +#define I810_DESTREG_DR3 7 +#define I810_DESTREG_DR4 8 +#define I810_DEST_SETUP_SIZE 10 + +/* Context state + */ +#define I810_CTXREG_CF0 0 /* GFX_OP_COLOR_FACTOR */ +#define I810_CTXREG_CF1 1 +#define I810_CTXREG_ST0 2 /* GFX_OP_STIPPLE */ +#define I810_CTXREG_ST1 3 +#define I810_CTXREG_VF 4 /* GFX_OP_VERTEX_FMT */ +#define I810_CTXREG_MT 5 /* GFX_OP_MAP_TEXELS */ +#define I810_CTXREG_MC0 6 /* GFX_OP_MAP_COLOR_STAGES - stage 0 */ +#define I810_CTXREG_MC1 7 /* GFX_OP_MAP_COLOR_STAGES - stage 1 */ +#define I810_CTXREG_MC2 8 /* GFX_OP_MAP_COLOR_STAGES - stage 2 */ +#define I810_CTXREG_MA0 9 /* GFX_OP_MAP_ALPHA_STAGES - stage 0 */ +#define I810_CTXREG_MA1 10 /* GFX_OP_MAP_ALPHA_STAGES - stage 1 */ +#define I810_CTXREG_MA2 11 /* GFX_OP_MAP_ALPHA_STAGES - stage 2 */ +#define I810_CTXREG_SDM 12 /* GFX_OP_SRC_DEST_MONO */ +#define I810_CTXREG_FOG 13 /* GFX_OP_FOG_COLOR */ +#define I810_CTXREG_B1 14 /* GFX_OP_BOOL_1 */ +#define I810_CTXREG_B2 15 /* GFX_OP_BOOL_2 */ +#define I810_CTXREG_LCS 16 /* GFX_OP_LINEWIDTH_CULL_SHADE_MODE */ +#define I810_CTXREG_PV 17 /* GFX_OP_PV_RULE -- Invarient! */ +#define I810_CTXREG_ZA 18 /* GFX_OP_ZBIAS_ALPHAFUNC */ +#define I810_CTXREG_AA 19 /* GFX_OP_ANTIALIAS */ +#define I810_CTX_SETUP_SIZE 20 + +/* Texture state (per tex unit) + */ +#define I810_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (4 dwords) */ +#define I810_TEXREG_MI1 1 +#define I810_TEXREG_MI2 2 +#define I810_TEXREG_MI3 3 +#define I810_TEXREG_MF 4 /* GFX_OP_MAP_FILTER */ +#define I810_TEXREG_MLC 5 /* GFX_OP_MAP_LOD_CTL */ +#define I810_TEXREG_MLL 6 /* GFX_OP_MAP_LOD_LIMITS */ +#define I810_TEXREG_MCS 7 /* GFX_OP_MAP_COORD_SETS ??? */ +#define I810_TEX_SETUP_SIZE 8 + +#define I810_FRONT 0x1 +#define I810_BACK 0x2 +#define I810_DEPTH 0x4 + + +typedef struct _drm_i810_init { + enum { + I810_INIT_DMA = 0x01, + I810_CLEANUP_DMA = 0x02 + } func; + int ring_map_idx; + int buffer_map_idx; + int sarea_priv_offset; + unsigned int ring_start; + unsigned int ring_end; + unsigned int ring_size; + unsigned int front_offset; + unsigned int back_offset; + unsigned int depth_offset; + unsigned int w; + unsigned int h; + unsigned int pitch; + unsigned int pitch_bits; +} drm_i810_init_t; + +/* Warning: If you change the SAREA structure you must change the Xserver + * structure as well */ + +typedef struct _drm_i810_tex_region { + unsigned char next, prev; /* indices to form a circular LRU */ + unsigned char in_use; /* owned by a client, or free? */ + int age; /* tracked by clients to update local LRU's */ +} drm_i810_tex_region_t; + +typedef struct _drm_i810_sarea { + unsigned int ContextState[I810_CTX_SETUP_SIZE]; + unsigned int BufferState[I810_DEST_SETUP_SIZE]; + unsigned int TexState[2][I810_TEX_SETUP_SIZE]; + unsigned int dirty; + + unsigned int nbox; + drm_clip_rect_t boxes[I810_NR_SAREA_CLIPRECTS]; + + /* Maintain an LRU of contiguous regions of texture space. If + * you think you own a region of texture memory, and it has an + * age different to the one you set, then you are mistaken and + * it has been stolen by another client. If global texAge + * hasn't changed, there is no need to walk the list. + * + * These regions can be used as a proxy for the fine-grained + * texture information of other clients - by maintaining them + * in the same lru which is used to age their own textures, + * clients have an approximate lru for the whole of global + * texture space, and can make informed decisions as to which + * areas to kick out. There is no need to choose whether to + * kick out your own texture or someone else's - simply eject + * them all in LRU order. + */ + + drm_i810_tex_region_t texList[I810_NR_TEX_REGIONS+1]; + /* Last elt is sentinal */ + int texAge; /* last time texture was uploaded */ + int last_enqueue; /* last time a buffer was enqueued */ + int last_dispatch; /* age of the most recently dispatched buffer */ + int last_quiescent; /* */ + int ctxOwner; /* last context to upload state */ + + int vertex_prim; + +} drm_i810_sarea_t; + +typedef struct _drm_i810_clear { + int clear_color; + int clear_depth; + int flags; +} drm_i810_clear_t; + + + +/* These may be placeholders if we have more cliprects than + * I810_NR_SAREA_CLIPRECTS. In that case, the client sets discard to + * false, indicating that the buffer will be dispatched again with a + * new set of cliprects. + */ +typedef struct _drm_i810_vertex { + int idx; /* buffer index */ + int used; /* nr bytes in use */ + int discard; /* client is finished with the buffer? */ +} drm_i810_vertex_t; + +typedef struct drm_i810_dma { + void *virtual; + int request_idx; + int request_size; + int granted; +} drm_i810_dma_t; + +#endif /* _I810_DRM_H_ */ diff --git a/linux-core/i810_drv.c b/linux-core/i810_drv.c index f33153a3..6f78fbc9 100644 --- a/linux-core/i810_drv.c +++ b/linux-core/i810_drv.c @@ -2,6 +2,7 @@ * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -23,25 +24,26 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com> - * Jeff Hartmann <jhartmann@precisioninsight.com> - * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/i810_drv.c,v 1.1 2000/02/11 17:26:05 dawes Exp $ + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> + * Jeff Hartmann <jhartmann@valinux.com> * */ +#include <linux/config.h> #define EXPORT_SYMTAB #include "drmP.h" #include "i810_drv.h" + + EXPORT_SYMBOL(i810_init); EXPORT_SYMBOL(i810_cleanup); #define I810_NAME "i810" -#define I810_DESC "Matrox g200/g400" +#define I810_DESC "Intel I810" #define I810_DATE "19991213" -#define I810_MAJOR 0 +#define I810_MAJOR 1 #define I810_MINOR 0 -#define I810_PATCHLEVEL 1 +#define I810_PATCHLEVEL 0 static drm_device_t i810_device; drm_ctx_t i810_res_ctx; @@ -54,6 +56,7 @@ static struct file_operations i810_fops = { mmap: drm_mmap, read: drm_read, fasync: drm_fasync, + poll: drm_poll, }; static struct miscdevice i810_misc = { @@ -77,21 +80,18 @@ static drm_ioctl_desc_t i810_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { i810_addbufs, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { i810_markbufs, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { i810_infobufs, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { i810_mapbufs, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { i810_freebufs, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { drm_addctx, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { drm_rmctx, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { drm_modctx, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { drm_getctx, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { drm_switchctx, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { drm_newctx, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { drm_resctx, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { i810_addctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { i810_rmctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { i810_modctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { i810_getctx, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { i810_switchctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { i810_newctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { i810_resctx, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { i810_dma, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { i810_lock, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { i810_unlock, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 }, @@ -104,6 +104,14 @@ static drm_ioctl_desc_t i810_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 }, + + [DRM_IOCTL_NR(DRM_IOCTL_I810_INIT)] = { i810_dma_init, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_I810_VERTEX)] = { i810_dma_vertex, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_I810_CLEAR)] = { i810_clear_bufs, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_I810_FLUSH)] = { i810_flush_ioctl,1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_I810_GETAGE)] = { i810_getage, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_I810_GETBUF)] = { i810_getbuf, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_I810_SWAP)] = { i810_swap_bufs, 1, 0 }, }; #define I810_IOCTL_COUNT DRM_ARRAY_SIZE(i810_ioctls) @@ -121,7 +129,7 @@ MODULE_PARM(i810, "s"); int init_module(void) { - printk("doing i810_init()\n"); + DRM_DEBUG("doing i810_init()\n"); return i810_init(); } @@ -364,7 +372,7 @@ int i810_init(void) #ifdef MODULE drm_parse_options(i810); #endif - printk("doing misc_register\n"); + DRM_DEBUG("doing misc_register\n"); if ((retcode = misc_register(&i810_misc))) { DRM_ERROR("Cannot register \"%s\"\n", I810_NAME); return retcode; @@ -372,13 +380,22 @@ int i810_init(void) dev->device = MKDEV(MISC_MAJOR, i810_misc.minor); dev->name = I810_NAME; - printk("doing mem init\n"); + DRM_DEBUG("doing mem init\n"); drm_mem_init(); - printk("doing proc init\n"); + DRM_DEBUG("doing proc init\n"); drm_proc_init(dev); - printk("doing agp init\n"); + DRM_DEBUG("doing agp init\n"); dev->agp = drm_agp_init(); - printk("doing ctxbitmap init\n"); + if(dev->agp == NULL) { + DRM_INFO("The i810 drm module requires the agpgart module" + " to function correctly\nPlease load the agpgart" + " module before you load the i810 module\n"); + drm_proc_cleanup(); + misc_deregister(&i810_misc); + i810_takedown(dev); + return -ENOMEM; + } + DRM_DEBUG("doing ctxbitmap init\n"); if((retcode = drm_ctxbitmap_init(dev))) { DRM_ERROR("Cannot allocate memory for context bitmap.\n"); drm_proc_cleanup(); @@ -386,10 +403,6 @@ int i810_init(void) i810_takedown(dev); return retcode; } -#if 0 - printk("doing i810_dma_init\n"); - i810_dma_init(dev); -#endif DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", I810_NAME, @@ -417,7 +430,6 @@ void i810_cleanup(void) DRM_INFO("Module unloaded\n"); } drm_ctxbitmap_cleanup(dev); - i810_dma_cleanup(dev); i810_takedown(dev); if (dev->agp) { drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); @@ -484,24 +496,82 @@ int i810_release(struct inode *inode, struct file *filp) drm_device_t *dev = priv->dev; int retcode = 0; - DRM_DEBUG("open_count = %d\n", dev->open_count); - if (!(retcode = drm_release(inode, filp))) { - MOD_DEC_USE_COUNT; - atomic_inc(&dev->total_close); - spin_lock(&dev->count_lock); - if (!--dev->open_count) { - if (atomic_read(&dev->ioctl_count) || dev->blocked) { - DRM_ERROR("Device busy: %d %d\n", - atomic_read(&dev->ioctl_count), - dev->blocked); - spin_unlock(&dev->count_lock); - return -EBUSY; + DRM_DEBUG("pid = %d, device = 0x%x, open_count = %d\n", + current->pid, dev->device, dev->open_count); + + if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) + && dev->lock.pid == current->pid) { + i810_reclaim_buffers(dev, priv->pid); + DRM_ERROR("Process %d dead, freeing lock for context %d\n", + current->pid, + _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); + drm_lock_free(dev, + &dev->lock.hw_lock->lock, + _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); + + /* FIXME: may require heavy-handed reset of + hardware at this point, possibly + processed via a callback to the X + server. */ + } else if (dev->lock.hw_lock) { + /* The lock is required to reclaim buffers */ + DECLARE_WAITQUEUE(entry, current); + add_wait_queue(&dev->lock.lock_queue, &entry); + for (;;) { + if (!dev->lock.hw_lock) { + /* Device has been unregistered */ + retcode = -EINTR; + break; + } + if (drm_lock_take(&dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT)) { + dev->lock.pid = priv->pid; + dev->lock.lock_time = jiffies; + atomic_inc(&dev->total_locks); + break; /* Got lock */ + } + /* Contention */ + atomic_inc(&dev->total_sleeps); + current->state = TASK_INTERRUPTIBLE; + schedule(); + if (signal_pending(current)) { + retcode = -ERESTARTSYS; + break; } - spin_unlock(&dev->count_lock); - return i810_takedown(dev); } - spin_unlock(&dev->count_lock); + current->state = TASK_RUNNING; + remove_wait_queue(&dev->lock.lock_queue, &entry); + if(!retcode) { + i810_reclaim_buffers(dev, priv->pid); + drm_lock_free(dev, &dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT); + } + } + drm_fasync(-1, filp, 0); + + down(&dev->struct_sem); + if (priv->prev) priv->prev->next = priv->next; + else dev->file_first = priv->next; + if (priv->next) priv->next->prev = priv->prev; + else dev->file_last = priv->prev; + up(&dev->struct_sem); + + drm_free(priv, sizeof(*priv), DRM_MEM_FILES); + MOD_DEC_USE_COUNT; + atomic_inc(&dev->total_close); + spin_lock(&dev->count_lock); + if (!--dev->open_count) { + if (atomic_read(&dev->ioctl_count) || dev->blocked) { + DRM_ERROR("Device busy: %d %d\n", + atomic_read(&dev->ioctl_count), + dev->blocked); + spin_unlock(&dev->count_lock); + return -EBUSY; + } + spin_unlock(&dev->count_lock); + return i810_takedown(dev); } + spin_unlock(&dev->count_lock); return retcode; } @@ -566,8 +636,7 @@ int i810_unlock(struct inode *inode, struct file *filp, unsigned int cmd, atomic_inc(&dev->total_unlocks); if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock)) atomic_inc(&dev->total_contends); - drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT); - i810_dma_schedule(dev, 1); + drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT); if (!dev->context_flag) { if (drm_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) { diff --git a/linux-core/i810_drv.h b/linux-core/i810_drv.h index 0f5f42bb..c387bf72 100644 --- a/linux-core/i810_drv.h +++ b/linux-core/i810_drv.h @@ -2,6 +2,7 @@ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -23,15 +24,58 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com> - * Jeff Hartmann <jhartmann@precisioninsight.com> + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> + * Jeff Hartmann <jhartmann@valinux.com> * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/i810_drv.h,v 1.1 2000/02/11 17:26:05 dawes Exp $ */ #ifndef _I810_DRV_H_ #define _I810_DRV_H_ +typedef struct drm_i810_buf_priv { + u32 *in_use; + int my_use_idx; + int currently_mapped; + void *virtual; + void *kernel_virtual; + int map_count; + struct vm_area_struct *vma; +} drm_i810_buf_priv_t; + +typedef struct _drm_i810_ring_buffer{ + int tail_mask; + unsigned long Start; + unsigned long End; + unsigned long Size; + u8 *virtual_start; + int head; + int tail; + int space; +} drm_i810_ring_buffer_t; + +typedef struct drm_i810_private { + int ring_map_idx; + int buffer_map_idx; + + drm_i810_ring_buffer_t ring; + drm_i810_sarea_t *sarea_priv; + + unsigned long hw_status_page; + unsigned long counter; + + atomic_t flush_done; + wait_queue_head_t flush_queue; /* Processes waiting until flush */ + drm_buf_t *mmap_buffer; + + + u32 front_di1, back_di1, zi1; + + int back_offset; + int depth_offset; + int w, h; + int pitch; +} drm_i810_private_t; + /* i810_drv.c */ extern int i810_init(void); extern void i810_cleanup(void); @@ -46,16 +90,22 @@ extern int i810_unlock(struct inode *inode, struct file *filp, /* i810_dma.c */ extern int i810_dma_schedule(drm_device_t *dev, int locked); -extern int i810_dma(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); +extern int i810_getbuf(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); extern int i810_irq_install(drm_device_t *dev, int irq); extern int i810_irq_uninstall(drm_device_t *dev); extern int i810_control(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int i810_lock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern void i810_dma_init(drm_device_t *dev); -extern void i810_dma_cleanup(drm_device_t *dev); +extern int i810_dma_init(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int i810_flush_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern void i810_reclaim_buffers(drm_device_t *dev, pid_t pid); +extern int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg); +extern int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma); /* i810_bufs.c */ @@ -67,10 +117,108 @@ extern int i810_markbufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int i810_freebufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int i810_mapbufs(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); extern int i810_addmap(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); + /* i810_context.c */ +extern int i810_resctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int i810_addctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int i810_modctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int i810_getctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int i810_switchctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int i810_newctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int i810_rmctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + +extern int i810_context_switch(drm_device_t *dev, int old, int new); +extern int i810_context_switch_complete(drm_device_t *dev, int new); + +#define I810_VERBOSE 0 + + +int i810_dma_vertex(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + +int i810_swap_bufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + +int i810_clear_bufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + +#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) +#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23)) +#define CMD_REPORT_HEAD (7<<23) +#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1) +#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1) + +#define INST_PARSER_CLIENT 0x00000000 +#define INST_OP_FLUSH 0x02000000 +#define INST_FLUSH_MAP_CACHE 0x00000001 + + +#define BB1_START_ADDR_MASK (~0x7) +#define BB1_PROTECTED (1<<0) +#define BB1_UNPROTECTED (0<<0) +#define BB2_END_ADDR_MASK (~0x7) + +#define I810REG_HWSTAM 0x02098 +#define I810REG_INT_IDENTITY_R 0x020a4 +#define I810REG_INT_MASK_R 0x020a8 +#define I810REG_INT_ENABLE_R 0x020a0 + +#define LP_RING 0x2030 +#define HP_RING 0x2040 +#define RING_TAIL 0x00 +#define TAIL_ADDR 0x000FFFF8 +#define RING_HEAD 0x04 +#define HEAD_WRAP_COUNT 0xFFE00000 +#define HEAD_WRAP_ONE 0x00200000 +#define HEAD_ADDR 0x001FFFFC +#define RING_START 0x08 +#define START_ADDR 0x00FFFFF8 +#define RING_LEN 0x0C +#define RING_NR_PAGES 0x000FF000 +#define RING_REPORT_MASK 0x00000006 +#define RING_REPORT_64K 0x00000002 +#define RING_REPORT_128K 0x00000004 +#define RING_NO_REPORT 0x00000000 +#define RING_VALID_MASK 0x00000001 +#define RING_VALID 0x00000001 +#define RING_INVALID 0x00000000 + +#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) +#define SC_UPDATE_SCISSOR (0x1<<1) +#define SC_ENABLE_MASK (0x1<<0) +#define SC_ENABLE (0x1<<0) + +#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) +#define SCI_YMIN_MASK (0xffff<<16) +#define SCI_XMIN_MASK (0xffff<<0) +#define SCI_YMAX_MASK (0xffff<<16) +#define SCI_XMAX_MASK (0xffff<<0) + +#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0) +#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) +#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x2) +#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) +#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) +#define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24)) + +#define CMD_OP_Z_BUFFER_INFO ((0x0<<29)|(0x16<<23)) +#define CMD_OP_DESTBUFFER_INFO ((0x0<<29)|(0x15<<23)) + +#define BR00_BITBLT_CLIENT 0x40000000 +#define BR00_OP_COLOR_BLT 0x10000000 +#define BR00_OP_SRC_COPY_BLT 0x10C00000 +#define BR13_SOLID_PATTERN 0x80000000 + + #endif + diff --git a/linux-core/mga_drv.c b/linux-core/mga_drv.c index 8bc25617..e77d827b 100644 --- a/linux-core/mga_drv.c +++ b/linux-core/mga_drv.c @@ -2,6 +2,7 @@ * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -23,13 +24,13 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com> - * Jeff Hartmann <jhartmann@precisioninsight.com> + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> + * Jeff Hartmann <jhartmann@valinux.com> * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_drv.c,v 1.1 2000/02/11 17:26:07 dawes Exp $ * */ +#include <linux/config.h> #define EXPORT_SYMTAB #include "drmP.h" #include "mga_drv.h" @@ -39,9 +40,9 @@ EXPORT_SYMBOL(mga_cleanup); #define MGA_NAME "mga" #define MGA_DESC "Matrox g200/g400" #define MGA_DATE "19991213" -#define MGA_MAJOR 0 +#define MGA_MAJOR 1 #define MGA_MINOR 0 -#define MGA_PATCHLEVEL 1 +#define MGA_PATCHLEVEL 0 static drm_device_t mga_device; drm_ctx_t mga_res_ctx; @@ -54,6 +55,7 @@ static struct file_operations mga_fops = { mmap: drm_mmap, read: drm_read, fasync: drm_fasync, + poll: drm_poll, }; static struct miscdevice mga_misc = { @@ -105,9 +107,12 @@ static drm_ioctl_desc_t mga_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_MGA_INIT)] = { mga_dma_init, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_MGA_SWAP)] = { mga_clear_bufs, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_MGA_CLEAR)] = { mga_swap_bufs, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_MGA_ILOAD)] = { mga_iload, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_MGA_SWAP)] = { mga_swap_bufs, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_MGA_CLEAR)] = { mga_clear_bufs, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_MGA_ILOAD)] = { mga_iload, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_MGA_VERTEX)] = { mga_vertex, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_MGA_FLUSH)] = { mga_flush_ioctl, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_MGA_INDICES)] = { mga_indices, 1, 0 }, }; #define MGA_IOCTL_COUNT DRM_ARRAY_SIZE(mga_ioctls) @@ -380,6 +385,21 @@ int mga_init(void) drm_proc_init(dev); DRM_DEBUG("doing agp init\n"); dev->agp = drm_agp_init(); + if(dev->agp == NULL) { + DRM_INFO("The mga drm module requires the agpgart module" + " to function correctly\nPlease load the agpgart" + " module before you load the mga module\n"); + drm_proc_cleanup(); + misc_deregister(&mga_misc); + mga_takedown(dev); + return -ENOMEM; + } +#ifdef CONFIG_MTRR + dev->agp->agp_mtrr = mtrr_add(dev->agp->agp_info.aper_base, + dev->agp->agp_info.aper_size * 1024 * 1024, + MTRR_TYPE_WRCOMB, + 1); +#endif DRM_DEBUG("doing ctxbitmap init\n"); if((retcode = drm_ctxbitmap_init(dev))) { DRM_ERROR("Cannot allocate memory for context bitmap.\n"); @@ -416,6 +436,16 @@ void mga_cleanup(void) } drm_ctxbitmap_cleanup(dev); mga_dma_cleanup(dev); +#ifdef CONFIG_MTRR + if(dev->agp && dev->agp->agp_mtrr) { + int retval; + retval = mtrr_del(dev->agp->agp_mtrr, + dev->agp->agp_info.aper_base, + dev->agp->agp_info.aper_size * 1024*1024); + DRM_DEBUG("mtrr_del = %d\n", retval); + } +#endif + mga_takedown(dev); if (dev->agp) { drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); @@ -482,27 +512,86 @@ int mga_release(struct inode *inode, struct file *filp) drm_device_t *dev = priv->dev; int retcode = 0; - DRM_DEBUG("open_count = %d\n", dev->open_count); - if (!(retcode = drm_release(inode, filp))) { - MOD_DEC_USE_COUNT; - atomic_inc(&dev->total_close); - spin_lock(&dev->count_lock); - if (!--dev->open_count) { - if (atomic_read(&dev->ioctl_count) || dev->blocked) { - DRM_ERROR("Device busy: %d %d\n", - atomic_read(&dev->ioctl_count), - dev->blocked); - spin_unlock(&dev->count_lock); - return -EBUSY; + DRM_DEBUG("pid = %d, device = 0x%x, open_count = %d\n", + current->pid, dev->device, dev->open_count); + + if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) + && dev->lock.pid == current->pid) { + mga_reclaim_buffers(dev, priv->pid); + DRM_ERROR("Process %d dead, freeing lock for context %d\n", + current->pid, + _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); + drm_lock_free(dev, + &dev->lock.hw_lock->lock, + _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); + + /* FIXME: may require heavy-handed reset of + hardware at this point, possibly + processed via a callback to the X + server. */ + } else if (dev->lock.hw_lock) { + /* The lock is required to reclaim buffers */ + DECLARE_WAITQUEUE(entry, current); + add_wait_queue(&dev->lock.lock_queue, &entry); + for (;;) { + if (!dev->lock.hw_lock) { + /* Device has been unregistered */ + retcode = -EINTR; + break; + } + if (drm_lock_take(&dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT)) { + dev->lock.pid = priv->pid; + dev->lock.lock_time = jiffies; + atomic_inc(&dev->total_locks); + break; /* Got lock */ + } + /* Contention */ + atomic_inc(&dev->total_sleeps); + current->state = TASK_INTERRUPTIBLE; + schedule(); + if (signal_pending(current)) { + retcode = -ERESTARTSYS; + break; } - spin_unlock(&dev->count_lock); - return mga_takedown(dev); } - spin_unlock(&dev->count_lock); + current->state = TASK_RUNNING; + remove_wait_queue(&dev->lock.lock_queue, &entry); + if(!retcode) { + mga_reclaim_buffers(dev, priv->pid); + drm_lock_free(dev, &dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT); + } } + drm_fasync(-1, filp, 0); + + down(&dev->struct_sem); + if (priv->prev) priv->prev->next = priv->next; + else dev->file_first = priv->next; + if (priv->next) priv->next->prev = priv->prev; + else dev->file_last = priv->prev; + up(&dev->struct_sem); + + drm_free(priv, sizeof(*priv), DRM_MEM_FILES); + MOD_DEC_USE_COUNT; + atomic_inc(&dev->total_close); + spin_lock(&dev->count_lock); + if (!--dev->open_count) { + if (atomic_read(&dev->ioctl_count) || dev->blocked) { + DRM_ERROR("Device busy: %d %d\n", + atomic_read(&dev->ioctl_count), + dev->blocked); + spin_unlock(&dev->count_lock); + return -EBUSY; + } + spin_unlock(&dev->count_lock); + return mga_takedown(dev); + } + spin_unlock(&dev->count_lock); return retcode; } + /* drm_ioctl is called whenever a process performs an ioctl on /dev/drm. */ int mga_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, diff --git a/linux-core/r128_drv.c b/linux-core/r128_drv.c new file mode 100644 index 00000000..8b669888 --- /dev/null +++ b/linux-core/r128_drv.c @@ -0,0 +1,737 @@ +/* r128_drv.c -- ATI Rage 128 driver -*- linux-c -*- + * Created: Mon Dec 13 09:47:27 1999 by faith@precisioninsight.com + * + * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> + * Kevin E. Martin <martin@valinux.com> + * + */ + +#include <linux/config.h> +#define EXPORT_SYMTAB +#include "drmP.h" +#include "r128_drv.h" +EXPORT_SYMBOL(r128_init); +EXPORT_SYMBOL(r128_cleanup); + +#define R128_NAME "r128" +#define R128_DESC "r128" +#define R128_DATE "20000607" +#define R128_MAJOR 1 +#define R128_MINOR 0 +#define R128_PATCHLEVEL 0 + +static drm_device_t r128_device; +drm_ctx_t r128_res_ctx; + +static struct file_operations r128_fops = { + open: r128_open, + flush: drm_flush, + release: r128_release, + ioctl: r128_ioctl, + mmap: drm_mmap, + read: drm_read, + fasync: drm_fasync, + poll: drm_poll, +}; + +static struct miscdevice r128_misc = { + minor: MISC_DYNAMIC_MINOR, + name: R128_NAME, + fops: &r128_fops, +}; + +static drm_ioctl_desc_t r128_ioctls[] = { + [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { r128_version, 0, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 }, + + [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { r128_addbufs, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { r128_mapbufs, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 }, + + [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { r128_addctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { r128_rmctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { r128_modctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { r128_getctx, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { r128_switchctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { r128_newctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { r128_resctx, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { r128_lock, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { r128_unlock, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 }, + +#ifdef DRM_AGP + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 }, +#endif + + [DRM_IOCTL_NR(DRM_IOCTL_R128_INIT)] = { r128_init_cce, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_RESET)] = { r128_eng_reset, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_FLUSH)] = { r128_eng_flush, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_PACKET)] = { r128_submit_pkt, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_IDLE)] = { r128_cce_idle, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_VERTEX)] = { r128_vertex_buf, 1, 0 }, +}; +#define R128_IOCTL_COUNT DRM_ARRAY_SIZE(r128_ioctls) + +#ifdef MODULE +int init_module(void); +void cleanup_module(void); +static char *r128 = NULL; + +MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas."); +MODULE_DESCRIPTION("r128"); +MODULE_PARM(r128, "s"); + +/* init_module is called when insmod is used to load the module */ + +int init_module(void) +{ + return r128_init(); +} + +/* cleanup_module is called when rmmod is used to unload the module */ + +void cleanup_module(void) +{ + r128_cleanup(); +} +#endif + +#ifndef MODULE +/* r128_setup is called by the kernel to parse command-line options passed + * via the boot-loader (e.g., LILO). It calls the insmod option routine, + * drm_parse_drm. + * + * This is not currently supported, since it requires changes to + * linux/init/main.c. */ + + +void __init r128_setup(char *str, int *ints) +{ + if (ints[0] != 0) { + DRM_ERROR("Illegal command line format, ignored\n"); + return; + } + drm_parse_options(str); +} +#endif + +static int r128_setup(drm_device_t *dev) +{ + int i; + + atomic_set(&dev->ioctl_count, 0); + atomic_set(&dev->vma_count, 0); + dev->buf_use = 0; + atomic_set(&dev->buf_alloc, 0); + + drm_dma_setup(dev); + + atomic_set(&dev->total_open, 0); + atomic_set(&dev->total_close, 0); + atomic_set(&dev->total_ioctl, 0); + atomic_set(&dev->total_irq, 0); + atomic_set(&dev->total_ctx, 0); + atomic_set(&dev->total_locks, 0); + atomic_set(&dev->total_unlocks, 0); + atomic_set(&dev->total_contends, 0); + atomic_set(&dev->total_sleeps, 0); + + for (i = 0; i < DRM_HASH_SIZE; i++) { + dev->magiclist[i].head = NULL; + dev->magiclist[i].tail = NULL; + } + dev->maplist = NULL; + dev->map_count = 0; + dev->vmalist = NULL; + dev->lock.hw_lock = NULL; + init_waitqueue_head(&dev->lock.lock_queue); + dev->queue_count = 0; + dev->queue_reserved = 0; + dev->queue_slots = 0; + dev->queuelist = NULL; + dev->irq = 0; + dev->context_flag = 0; + dev->interrupt_flag = 0; + dev->dma_flag = 0; + dev->last_context = 0; + dev->last_switch = 0; + dev->last_checked = 0; + init_timer(&dev->timer); + init_waitqueue_head(&dev->context_wait); + + dev->ctx_start = 0; + dev->lck_start = 0; + + dev->buf_rp = dev->buf; + dev->buf_wp = dev->buf; + dev->buf_end = dev->buf + DRM_BSZ; + dev->buf_async = NULL; + init_waitqueue_head(&dev->buf_readers); + init_waitqueue_head(&dev->buf_writers); + + r128_res_ctx.handle=-1; + + DRM_DEBUG("\n"); + + /* The kernel's context could be created here, but is now created + in drm_dma_enqueue. This is more resource-efficient for + hardware that does not do DMA, but may mean that + drm_select_queue fails between the time the interrupt is + initialized and the time the queues are initialized. */ + + return 0; +} + + +static int r128_takedown(drm_device_t *dev) +{ + int i; + drm_magic_entry_t *pt, *next; + drm_map_t *map; + drm_vma_entry_t *vma, *vma_next; + + DRM_DEBUG("\n"); + + down(&dev->struct_sem); + del_timer(&dev->timer); + + if (dev->devname) { + drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER); + dev->devname = NULL; + } + + if (dev->unique) { + drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER); + dev->unique = NULL; + dev->unique_len = 0; + } + /* Clear pid list */ + for (i = 0; i < DRM_HASH_SIZE; i++) { + for (pt = dev->magiclist[i].head; pt; pt = next) { + next = pt->next; + drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); + } + dev->magiclist[i].head = dev->magiclist[i].tail = NULL; + } + +#ifdef DRM_AGP + /* Clear AGP information */ + if (dev->agp) { + drm_agp_mem_t *entry; + drm_agp_mem_t *nexte; + + /* Remove AGP resources, but leave dev->agp + intact until r128_cleanup is called. */ + for (entry = dev->agp->memory; entry; entry = nexte) { + nexte = entry->next; + if (entry->bound) drm_unbind_agp(entry->memory); + drm_free_agp(entry->memory, entry->pages); + drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); + } + dev->agp->memory = NULL; + + if (dev->agp->acquired && drm_agp.release) + (*drm_agp.release)(); + + dev->agp->acquired = 0; + dev->agp->enabled = 0; + } +#endif + + /* Clear vma list (only built for debugging) */ + if (dev->vmalist) { + for (vma = dev->vmalist; vma; vma = vma_next) { + vma_next = vma->next; + drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); + } + dev->vmalist = NULL; + } + + /* Clear map area and mtrr information */ + if (dev->maplist) { + for (i = 0; i < dev->map_count; i++) { + map = dev->maplist[i]; + switch (map->type) { + case _DRM_REGISTERS: + case _DRM_FRAME_BUFFER: +#ifdef CONFIG_MTRR + if (map->mtrr >= 0) { + int retcode; + retcode = mtrr_del(map->mtrr, + map->offset, + map->size); + DRM_DEBUG("mtrr_del = %d\n", retcode); + } +#endif + drm_ioremapfree(map->handle, map->size); + break; + case _DRM_SHM: + drm_free_pages((unsigned long)map->handle, + drm_order(map->size) + - PAGE_SHIFT, + DRM_MEM_SAREA); + break; + case _DRM_AGP: + /* Do nothing here, because this is all + handled in the AGP/GART driver. */ + break; + } + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + } + drm_free(dev->maplist, + dev->map_count * sizeof(*dev->maplist), + DRM_MEM_MAPS); + dev->maplist = NULL; + dev->map_count = 0; + } + + drm_dma_takedown(dev); + + dev->queue_count = 0; + if (dev->lock.hw_lock) { + dev->lock.hw_lock = NULL; /* SHM removed */ + dev->lock.pid = 0; + wake_up_interruptible(&dev->lock.lock_queue); + } + up(&dev->struct_sem); + + return 0; +} + +/* r128_init is called via init_module at module load time, or via + * linux/init/main.c (this is not currently supported). */ + +int r128_init(void) +{ + int retcode; + drm_device_t *dev = &r128_device; + + DRM_DEBUG("\n"); + + memset((void *)dev, 0, sizeof(*dev)); + dev->count_lock = SPIN_LOCK_UNLOCKED; + sema_init(&dev->struct_sem, 1); + +#ifdef MODULE + drm_parse_options(r128); +#endif + + if ((retcode = misc_register(&r128_misc))) { + DRM_ERROR("Cannot register \"%s\"\n", R128_NAME); + return retcode; + } + dev->device = MKDEV(MISC_MAJOR, r128_misc.minor); + dev->name = R128_NAME; + + drm_mem_init(); + drm_proc_init(dev); + +#ifdef DRM_AGP + dev->agp = drm_agp_init(); + +#ifdef CONFIG_MTRR + dev->agp->agp_mtrr = mtrr_add(dev->agp->agp_info.aper_base, + dev->agp->agp_info.aper_size*1024*1024, + MTRR_TYPE_WRCOMB, + 1); +#endif +#endif + + if((retcode = drm_ctxbitmap_init(dev))) { + DRM_ERROR("Cannot allocate memory for context bitmap.\n"); + drm_proc_cleanup(); + misc_deregister(&r128_misc); + r128_takedown(dev); + return retcode; + } + + DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", + R128_NAME, + R128_MAJOR, + R128_MINOR, + R128_PATCHLEVEL, + R128_DATE, + r128_misc.minor); + + return 0; +} + +/* r128_cleanup is called via cleanup_module at module unload time. */ + +void r128_cleanup(void) +{ + drm_device_t *dev = &r128_device; + + DRM_DEBUG("\n"); + + drm_proc_cleanup(); + if (misc_deregister(&r128_misc)) { + DRM_ERROR("Cannot unload module\n"); + } else { + DRM_INFO("Module unloaded\n"); + } + drm_ctxbitmap_cleanup(dev); + r128_takedown(dev); +#ifdef DRM_AGP + if (dev->agp) { + /* FIXME -- free other information, too */ + drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); + dev->agp = NULL; + } +#endif +} + +int r128_version(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_version_t version; + int len; + + copy_from_user_ret(&version, + (drm_version_t *)arg, + sizeof(version), + -EFAULT); + +#define DRM_COPY(name,value) \ + len = strlen(value); \ + if (len > name##_len) len = name##_len; \ + name##_len = strlen(value); \ + if (len && name) { \ + copy_to_user_ret(name, value, len, -EFAULT); \ + } + + version.version_major = R128_MAJOR; + version.version_minor = R128_MINOR; + version.version_patchlevel = R128_PATCHLEVEL; + + DRM_COPY(version.name, R128_NAME); + DRM_COPY(version.date, R128_DATE); + DRM_COPY(version.desc, R128_DESC); + + copy_to_user_ret((drm_version_t *)arg, + &version, + sizeof(version), + -EFAULT); + return 0; +} + +int r128_open(struct inode *inode, struct file *filp) +{ + drm_device_t *dev = &r128_device; + int retcode = 0; + + DRM_DEBUG("open_count = %d\n", dev->open_count); + if (!(retcode = drm_open_helper(inode, filp, dev))) { + MOD_INC_USE_COUNT; + atomic_inc(&dev->total_open); + spin_lock(&dev->count_lock); + if (!dev->open_count++) { + spin_unlock(&dev->count_lock); + return r128_setup(dev); + } + spin_unlock(&dev->count_lock); + } + return retcode; +} + +int r128_release(struct inode *inode, struct file *filp) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + int retcode = 0; + + DRM_DEBUG("open_count = %d\n", dev->open_count); + if (!(retcode = drm_release(inode, filp))) { + MOD_DEC_USE_COUNT; + atomic_inc(&dev->total_close); + spin_lock(&dev->count_lock); + if (!--dev->open_count) { + if (atomic_read(&dev->ioctl_count) || dev->blocked) { + DRM_ERROR("Device busy: %d %d\n", + atomic_read(&dev->ioctl_count), + dev->blocked); + spin_unlock(&dev->count_lock); + return -EBUSY; + } + spin_unlock(&dev->count_lock); + return r128_takedown(dev); + } + spin_unlock(&dev->count_lock); + } + return retcode; +} + +/* r128_ioctl is called whenever a process performs an ioctl on /dev/drm. */ + +int r128_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int nr = DRM_IOCTL_NR(cmd); + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + int retcode = 0; + drm_ioctl_desc_t *ioctl; + drm_ioctl_t *func; + + atomic_inc(&dev->ioctl_count); + atomic_inc(&dev->total_ioctl); + ++priv->ioctl_count; + + DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n", + current->pid, cmd, nr, dev->device, priv->authenticated); + + if (nr >= R128_IOCTL_COUNT) { + retcode = -EINVAL; + } else { + ioctl = &r128_ioctls[nr]; + func = ioctl->func; + + if (!func) { + DRM_DEBUG("no function\n"); + retcode = -EINVAL; + } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN)) + || (ioctl->auth_needed && !priv->authenticated)) { + retcode = -EACCES; + } else { + retcode = (func)(inode, filp, cmd, arg); + } + } + + atomic_dec(&dev->ioctl_count); + return retcode; +} + +int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + DECLARE_WAITQUEUE(entry, current); + int ret = 0; + drm_lock_t lock; +#if DRM_DMA_HISTOGRAM + cycles_t start; + + dev->lck_start = start = get_cycles(); +#endif + + copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT); + + if (lock.context == DRM_KERNEL_CONTEXT) { + DRM_ERROR("Process %d using kernel context %d\n", + current->pid, lock.context); + return -EINVAL; + } + + DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", + lock.context, current->pid, dev->lock.hw_lock->lock, + lock.flags); + +#if 0 + /* dev->queue_count == 0 right now for + r128. FIXME? */ + if (lock.context < 0 || lock.context >= dev->queue_count) + return -EINVAL; +#endif + + if (!ret) { +#if 0 + if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) + != lock.context) { + long j = jiffies - dev->lock.lock_time; + + if (lock.context == r128_res_ctx.handle && + j >= 0 && j < DRM_LOCK_SLICE) { + /* Can't take lock if we just had it and + there is contention. */ + DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d jiffies=%d\n", + lock.context, current->pid, j, + dev->lock.lock_time, jiffies); + current->state = TASK_INTERRUPTIBLE; + current->policy |= SCHED_YIELD; + schedule_timeout(DRM_LOCK_SLICE-j); + DRM_DEBUG("jiffies=%d\n", jiffies); + } + } +#endif + add_wait_queue(&dev->lock.lock_queue, &entry); + for (;;) { + if (!dev->lock.hw_lock) { + /* Device has been unregistered */ + ret = -EINTR; + break; + } + if (drm_lock_take(&dev->lock.hw_lock->lock, + lock.context)) { + dev->lock.pid = current->pid; + dev->lock.lock_time = jiffies; + atomic_inc(&dev->total_locks); + break; /* Got lock */ + } + + /* Contention */ + atomic_inc(&dev->total_sleeps); + current->state = TASK_INTERRUPTIBLE; +#if 1 + current->policy |= SCHED_YIELD; +#endif + schedule(); + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + } + current->state = TASK_RUNNING; + remove_wait_queue(&dev->lock.lock_queue, &entry); + } + +#if 0 + if (!ret && dev->last_context != lock.context && + lock.context != r128_res_ctx.handle && + dev->last_context != r128_res_ctx.handle) { + add_wait_queue(&dev->context_wait, &entry); + current->state = TASK_INTERRUPTIBLE; + /* PRE: dev->last_context != lock.context */ + r128_context_switch(dev, dev->last_context, lock.context); + /* POST: we will wait for the context + switch and will dispatch on a later call + when dev->last_context == lock.context + NOTE WE HOLD THE LOCK THROUGHOUT THIS + TIME! */ + current->policy |= SCHED_YIELD; + schedule(); + current->state = TASK_RUNNING; + remove_wait_queue(&dev->context_wait, &entry); + if (signal_pending(current)) { + ret = -EINTR; + } else if (dev->last_context != lock.context) { + DRM_ERROR("Context mismatch: %d %d\n", + dev->last_context, lock.context); + } + } +#endif + + if (!ret) { + if (lock.flags & _DRM_LOCK_READY) { + /* Wait for space in DMA/FIFO */ + } + if (lock.flags & _DRM_LOCK_QUIESCENT) { + /* Make hardware quiescent */ +#if 0 + r128_quiescent(dev); +#endif + } + } + +#if 0 + DRM_ERROR("pid = %5d, old counter = %5ld\n", + current->pid, current->counter); +#endif + if (lock.context != r128_res_ctx.handle) { + current->counter = 5; + current->priority = DEF_PRIORITY/4; + } +#if 0 + while (current->counter > 25) + current->counter >>= 1; /* decrease time slice */ + DRM_ERROR("pid = %5d, new counter = %5ld\n", + current->pid, current->counter); +#endif + DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock"); + +#if DRM_DMA_HISTOGRAM + atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]); +#endif + + return ret; +} + + +int r128_unlock(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_lock_t lock; + + copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT); + + if (lock.context == DRM_KERNEL_CONTEXT) { + DRM_ERROR("Process %d using kernel context %d\n", + current->pid, lock.context); + return -EINVAL; + } + + DRM_DEBUG("%d frees lock (%d holds)\n", + lock.context, + _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); + atomic_inc(&dev->total_unlocks); + if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock)) + atomic_inc(&dev->total_contends); + drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT); + /* FIXME: Try to send data to card here */ + if (!dev->context_flag) { + if (drm_lock_free(dev, &dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT)) { + DRM_ERROR("\n"); + } + } + +#if 0 + current->policy |= SCHED_YIELD; + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(1000); +#endif + + if (lock.context != r128_res_ctx.handle) { + current->counter = 5; + current->priority = DEF_PRIORITY; + } +#if 0 + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(10); +#endif + + return 0; +} diff --git a/linux-core/tdfx_drv.c b/linux-core/tdfx_drv.c index 57c1c719..582832b5 100644 --- a/linux-core/tdfx_drv.c +++ b/linux-core/tdfx_drv.c @@ -1,8 +1,8 @@ /* tdfx.c -- tdfx driver -*- linux-c -*- * Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com - * Revised: Tue Oct 12 08:51:35 1999 by faith@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,10 +24,13 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/tdfx_drv.c,v 1.3 2000/02/23 04:47:31 martin Exp $ + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> + * Daryll Strauss <daryll@valinux.com> * */ +#include <linux/config.h> #define EXPORT_SYMTAB #include "drmP.h" #include "tdfx_drv.h" @@ -37,9 +40,9 @@ EXPORT_SYMBOL(tdfx_cleanup); #define TDFX_NAME "tdfx" #define TDFX_DESC "tdfx" #define TDFX_DATE "19991009" -#define TDFX_MAJOR 0 +#define TDFX_MAJOR 1 #define TDFX_MINOR 0 -#define TDFX_PATCHLEVEL 1 +#define TDFX_PATCHLEVEL 0 static drm_device_t tdfx_device; drm_ctx_t tdfx_res_ctx; @@ -85,6 +88,16 @@ static drm_ioctl_desc_t tdfx_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { tdfx_lock, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { tdfx_unlock, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 }, +#ifdef DRM_AGP + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire, 1, 1}, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release, 1, 1}, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable, 1, 1}, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info, 1, 1}, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc, 1, 1}, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free, 1, 1}, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_unbind, 1, 1}, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_bind, 1, 1}, +#endif }; #define TDFX_IOCTL_COUNT DRM_ARRAY_SIZE(tdfx_ioctls) @@ -228,7 +241,24 @@ static int tdfx_takedown(drm_device_t *dev) } dev->magiclist[i].head = dev->magiclist[i].tail = NULL; } - +#ifdef DRM_AGP + /* Clear AGP information */ + if (dev->agp) { + drm_agp_mem_t *temp; + drm_agp_mem_t *temp_next; + + temp = dev->agp->memory; + while(temp != NULL) { + temp_next = temp->next; + drm_free_agp(temp->memory, temp->pages); + drm_free(temp, sizeof(*temp), DRM_MEM_AGPLISTS); + temp = temp_next; + } + if(dev->agp->acquired) (*drm_agp.release)(); + drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); + dev->agp = NULL; + } +#endif /* Clear vma list (only built for debugging) */ if (dev->vmalist) { for (vma = dev->vmalist; vma; vma = vma_next) { @@ -262,6 +292,10 @@ static int tdfx_takedown(drm_device_t *dev) - PAGE_SHIFT, DRM_MEM_SAREA); break; + case _DRM_AGP: + /* Do nothing here, because this is all + handled in the AGP/GART driver. */ + break; } drm_free(map, sizeof(*map), DRM_MEM_MAPS); } @@ -309,6 +343,16 @@ int tdfx_init(void) drm_mem_init(); drm_proc_init(dev); +#ifdef DRM_AGP + dev->agp = drm_agp_init(); +#endif + if((retcode = drm_ctxbitmap_init(dev))) { + DRM_ERROR("Cannot allocate memory for context bitmap.\n"); + drm_proc_cleanup(); + misc_deregister(&tdfx_misc); + tdfx_takedown(dev); + return retcode; + } DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", TDFX_NAME, @@ -335,6 +379,7 @@ void tdfx_cleanup(void) } else { DRM_INFO("Module unloaded\n"); } + drm_ctxbitmap_cleanup(dev); tdfx_takedown(dev); } diff --git a/linux/Makefile.kernel b/linux/Makefile.kernel index 44d75c48..e262213a 100644 --- a/linux/Makefile.kernel +++ b/linux/Makefile.kernel @@ -9,12 +9,13 @@ # Note 2! The CFLAGS definitions are now inherited from the # parent makes.. # -# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/Makefile.kernel,v 1.5 2000/02/14 06:27:25 martin Exp $ +# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/Makefile.kernel,v 1.6 2000/06/17 00:03:34 martin Exp $ L_TARGET := libdrm.a L_OBJS := init.o memory.o proc.o auth.o context.o drawable.o bufs.o \ - lists.o lock.o ioctl.o fops.o vm.o dma.o + lists.o lock.o ioctl.o fops.o vm.o dma.o ctxbitmap.o \ + agpsupport.o M_OBJS := @@ -26,6 +27,14 @@ ifdef CONFIG_DRM_TDFX M_OBJS += tdfx.o endif +ifdef CONFIG_DRM_MGA +M_OBJS += mga.o +endif + +ifdef CONFIG_DRM_R128 +M_OBJS += r128.o +endif + include $(TOPDIR)/Rules.make gamma.o: gamma_drv.o gamma_dma.o $(L_TARGET) @@ -33,3 +42,12 @@ gamma.o: gamma_drv.o gamma_dma.o $(L_TARGET) tdfx.o: tdfx_drv.o tdfx_context.o $(L_TARGET) $(LD) $(LD_RFLAG) -r -o $@ tdfx_drv.o tdfx_context.o -L. -ldrm + +i810.o: i810_drv.o i810_context.o $(L_TARGET) + $(LD) $(LD_RFLAG) -r -o $@ i810_drv.o i810_bufs.o i810_dma.o i810_context.o -L. -ldrm + +mga.o: mga_drv.o mga_context.o mga_dma.o mga_bufs.o $(L_TARGET) + $(LD) $(LD_RFLAG) -r -o $@ mga_drv.o mga_bufs.o mga_dma.o mga_context.o mga_state.o -L. -ldrm + +r128.o: r128_drv.o r128_context.o $(L_TARGET) + $(LD) $(LD_RFLAG) -r -o $@ r128_drv.o r128_context.o -L. -ldrm diff --git a/linux/Makefile.linux b/linux/Makefile.linux index 8a41f880..ecc196bd 100644 --- a/linux/Makefile.linux +++ b/linux/Makefile.linux @@ -1,8 +1,8 @@ # Makefile -- For the Direct Rendering Manager module (drm) # Created: Mon Jan 4 09:26:53 1999 by faith@precisioninsight.com -# Revised: Sun Feb 13 23:15:59 2000 by kevin@precisioninsight.com # # Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. +# Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a @@ -24,19 +24,35 @@ # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # -# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/Makefile.linux,v 1.6 2000/02/23 04:47:25 martin Exp $ -# +# +# ***** NOTE NOTE NOTE NOTE NOTE ***** +# To override the automatic Linux source tree determination, pass the +# pathname for the kernel that you want to compile on the command line, +# like this: +# make TREE=/usr/my-kernel-tree/include +# +# +# ***** NOTE NOTE NOTE NOTE NOTE ***** +# Because some distributions patch 2.2.x kernels to make kill_fasync have +# three parameters, this script tries to determine, via the examination of +# header files, if your kernel has been patched. If this detection is +# incorrect, you can override the value on the command line, like this: +# make PARAMS=2 +# or +# make PARAMS=3 .SUFFIXES: # *** Setup -MODS= gamma.o tdfx.o +# **** End of SMP/MODVERSIONS detection + +MODS= gamma.o tdfx.o r128.o LIBS= libdrm.a PROGS= drmstat DRMOBJS= init.o memory.o proc.o auth.o context.o drawable.o bufs.o \ - lists.o lock.o ioctl.o fops.o vm.o dma.o + lists.o lock.o ioctl.o fops.o vm.o dma.o ctxbitmap.o DRMHEADERS= drm.h drmP.h GAMMAOBJS= gamma_drv.o gamma_dma.o @@ -45,9 +61,14 @@ GAMMAHEADERS= gamma_drv.h $(DRMHEADERS) TDFXOBJS= tdfx_drv.o tdfx_context.o TDFXHEADERS= tdfx_drv.h $(DRMHEADERS) +R128OBJS= r128_drv.o r128_dma.o r128_bufs.o r128_context.o +R128HEADERS= r128_drv.h r128_drm.h $(DRMHEADERS) + PROGOBJS= drmstat.po xf86drm.po xf86drmHash.po xf86drmRandom.po sigio.po PROGHEADERS= xf86drm.h $(DRMHEADERS) +INC= /usr/include + CFLAGS= -O2 $(WARNINGS) WARNINGS= -Wall -Wwrite-strings -Wpointer-arith -Wcast-align \ -Wstrict-prototypes -Wshadow -Wnested-externs \ @@ -90,11 +111,6 @@ endif endif endif -# To override this determination, pass the path on the make command line: -# make TREE=/usr/my-kernel-tree/include -# or hardcode a value for TREE here: -# TREE:=/usr/include - ifeq ($(TREE),0) all:; @echo Error: Could not locate kernel tree in $A $B $C else @@ -102,7 +118,30 @@ SMP := $(shell gcc -E -nostdinc -I$(TREE) picker.c 2>/dev/null \ | grep -s 'SMP = ' | cut -d' ' -f3) MODVERSIONS := $(shell gcc -E -I $(TREE) picker.c 2>/dev/null \ | grep -s 'MODVERSIONS = ' | cut -d' ' -f3) -all::;@echo KERNEL HEADERS IN $(TREE): SMP=${SMP} MODVERSIONS=${MODVERSIONS} +AGP := $(shell gcc -E -nostdinc -I$(TREE) picker.c 2>/dev/null \ + | grep -s 'AGP = ' | cut -d' ' -f3) +PARAMS := $(shell if fgrep kill_fasync $(TREE)/linux/fs.h \ + | fgrep -q band; then echo 3; else echo 2; fi) +ifeq ($(AGP),0) +AGP := $(shell gcc -E -nostdinc -I$(TREE) picker.c 2>/dev/null \ + | grep -s 'AGP_MODULE = ' | cut -d' ' -f3) +endif + +ifeq ($(AGP),1) +MODCFLAGS += -DDRM_AGP +DRMOBJS += agpsupport.o +MODS += mga.o i810.o + +MGAOBJS= mga_drv.o mga_dma.o mga_bufs.o mga_state.o mga_context.o +MGAHEADERS= mga_drv.h $(DRMHEADERS) + +I810OBJS= i810_drv.o i810_dma.o i810_bufs.o i810_context.o +I810HEADERS= i810_drv.h $(DRMHEADERS) +endif + +all::;@echo === KERNEL HEADERS IN $(TREE) +all::;@echo === SMP=${SMP} MODVERSIONS=${MODVERSIONS} AGP=${AGP} +all::;@echo === kill_fasync has $(PARAMS) parameters all:: $(LIBS) $(MODS) $(PROGS) endif @@ -115,6 +154,9 @@ endif ifeq ($(MODVERSIONS),1) MODCFLAGS += -DMODVERSIONS -include $(TREE)/linux/modversions.h endif +ifeq ($(PARAMS),3) +MODCFLAGS += -DKILLFASYNCHASTHREEPARAMETERS +endif # **** End of configuration @@ -128,6 +170,17 @@ gamma.o: $(GAMMAOBJS) $(LIBS) tdfx.o: $(TDFXOBJS) $(LIBS) $(LD) -r $^ -o $@ +r128.o: $(R128OBJS) $(LIBS) + $(LD) -r $^ -o $@ + +ifeq ($(AGP),1) +mga.o: $(MGAOBJS) $(LIBS) + $(LD) -r $^ -o $@ + +i810.o: $(I810OBJS) $(LIBS) + $(LD) -r $^ -o $@ +endif + drmstat: $(PROGOBJS) $(CC) $(PRGCFLAGS) $^ $(PRGLIBS) -o $@ @@ -140,7 +193,7 @@ ChangeLog: # .o files are used for modules %.o: %.c - $(CC) $(MODCFLAGS) -c $< -o $@ + $(CC) $(MODCFLAGS) -I$(TREE) -c $< -o $@ %.po: %.c $(CC) $(PRGCFLAGS) -DDRM_USE_MALLOC -c $< -o $@ @@ -149,8 +202,12 @@ ChangeLog: $(DRMOBJS): $(DRMHEADERS) $(GAMMAOBJS): $(GAMMAHEADERS) $(TDFXOBJS): $(TDFXHEADERS) +$(R128OBJS): $(R128HEADERS) +ifeq ($(AGP),1) +$(MGAOBJS): $(MGAHEADERS) +$(I810OBJS): $(I810HEADERS) +endif $(PROGOBJS): $(PROGHEADERS) clean: - rm -f *.o *.po *~ core $(PROGS) - + rm -f *.o *.a *.po *~ core $(PROGS) diff --git a/linux/agpsupport.c b/linux/agpsupport.c index fb58154d..c89c3e25 100644 --- a/linux/agpsupport.c +++ b/linux/agpsupport.c @@ -2,6 +2,7 @@ * Created: Mon Dec 13 09:56:45 1999 by faith@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -23,9 +24,7 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * Author: Rickard E. (Rik) Faith <faith@precisioninsight.com> - * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/agpsupport.c,v 1.1 2000/02/11 17:26:02 dawes Exp $ + * Author: Rickard E. (Rik) Faith <faith@valinux.com> * */ @@ -159,6 +158,8 @@ int drm_agp_alloc(struct inode *inode, struct file *filp, unsigned int cmd, -EFAULT); if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS))) return -ENOMEM; + + memset(entry, 0, sizeof(*entry)); pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE; type = (u32) request.type; @@ -237,6 +238,8 @@ int drm_agp_bind(struct inode *inode, struct file *filp, unsigned int cmd, page = (request.offset + PAGE_SIZE - 1) / PAGE_SIZE; if ((retcode = drm_bind_agp(entry->memory, page))) return retcode; entry->bound = dev->agp->base + (page << PAGE_SHIFT); + DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n", + dev->agp->base, entry->bound); return 0; } @@ -254,8 +257,10 @@ int drm_agp_free(struct inode *inode, struct file *filp, unsigned int cmd, if (!(entry = drm_agp_lookup_entry(dev, request.handle))) return -EINVAL; if (entry->bound) drm_unbind_agp(entry->memory); - entry->prev->next = entry->next; - entry->next->prev = entry->prev; + + if (entry->prev) entry->prev->next = entry->next; + else dev->agp->memory = entry->next; + if (entry->next) entry->next->prev = entry->prev; drm_free_agp(entry->memory, entry->pages); drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); return 0; @@ -269,15 +274,12 @@ drm_agp_head_t *drm_agp_init(void) for (fill = &drm_agp_fill[0]; fill->name; fill++) { char *n = (char *)fill->name; -#if 0 - *fill->f = (drm_agp_func_u)get_module_symbol(NULL, n); -#endif *fill->f = (drm_agp_func_u)get_module_symbol(NULL, n); - printk("%s resolves to 0x%08lx\n", n, (*fill->f).address); + DRM_DEBUG("%s resolves to 0x%08lx\n", n, (*fill->f).address); if (!(*fill->f).address) agp_available = 0; } - printk("agp_available = %d\n", agp_available); + DRM_DEBUG("agp_available = %d\n", agp_available); if (agp_available) { if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS))) diff --git a/linux/auth.c b/linux/auth.c index 0e3b1c56..9f81c539 100644 --- a/linux/auth.c +++ b/linux/auth.c @@ -1,8 +1,8 @@ /* auth.c -- IOCTLs for authentication -*- linux-c -*- * Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com - * Revised: Fri Aug 20 11:31:48 1999 by faith@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -23,8 +23,9 @@ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. - * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/auth.c,v 1.2 2000/02/23 04:47:25 martin Exp $ + * + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> * */ @@ -44,7 +45,6 @@ static drm_file_t *drm_find_file(drm_device_t *dev, drm_magic_t magic) down(&dev->struct_sem); for (pt = dev->magiclist[hash].head; pt; pt = pt->next) { - if (pt->priv->authenticated) continue; if (pt->magic == magic) { retval = pt->priv; break; diff --git a/linux/bufs.c b/linux/bufs.c index 85244c7d..011e4241 100644 --- a/linux/bufs.c +++ b/linux/bufs.c @@ -1,8 +1,8 @@ /* bufs.c -- IOCTLs to manage buffers -*- linux-c -*- * Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com - * Revised: Mon Feb 14 00:14:11 2000 by kevin@precisioninsight.com * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,11 +24,13 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/bufs.c,v 1.5 2000/02/23 04:47:25 martin Exp $ + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> * */ #define __NO_VERSION__ +#include <linux/config.h> #include "drmP.h" #include "linux/un.h" @@ -102,6 +104,11 @@ int drm_addmap(struct inode *inode, struct file *filp, unsigned int cmd, dev->lock.hw_lock = map->handle; /* Pointer to lock */ } break; +#ifdef DRM_AGP + case _DRM_AGP: + map->offset = map->offset + dev->agp->base; + break; +#endif default: drm_free(map, sizeof(*map), DRM_MEM_MAPS); return -EINVAL; @@ -172,7 +179,7 @@ int drm_addbufs(struct inode *inode, struct file *filp, unsigned int cmd, if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; if (dev->queue_count) return -EBUSY; /* Not while in use */ - alignment = (request.flags & DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) :size; + alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size):size; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; total = PAGE_SIZE << page_order; diff --git a/linux/context.c b/linux/context.c index fdf8fd9f..ca491094 100644 --- a/linux/context.c +++ b/linux/context.c @@ -1,8 +1,8 @@ /* context.c -- IOCTLs for contexts and DMA queues -*- linux-c -*- * Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com - * Revised: Fri Aug 20 11:32:09 1999 by faith@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,7 +24,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/context.c,v 1.2 2000/02/23 04:47:26 martin Exp $ + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> * */ diff --git a/linux/ctxbitmap.c b/linux/ctxbitmap.c index f67209d4..61550597 100644 --- a/linux/ctxbitmap.c +++ b/linux/ctxbitmap.c @@ -1,7 +1,8 @@ /* ctxbitmap.c -- Context bitmap management -*- linux-c -*- * Created: Thu Jan 6 03:56:42 2000 by jhartmann@precisioninsight.com * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -23,9 +24,7 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * Author: Jeff Hartmann <jhartmann@precisioninsight.com> - * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/ctxbitmap.c,v 1.1 2000/02/11 17:26:02 dawes Exp $ + * Author: Jeff Hartmann <jhartmann@valinux.com> * */ @@ -53,7 +52,7 @@ int drm_ctxbitmap_next(drm_device_t *dev) bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP); if (bit < DRM_MAX_CTXBITMAP) { set_bit(bit, dev->ctx_bitmap); - printk("drm_ctxbitmap_next bit : %d\n", bit); + DRM_DEBUG("drm_ctxbitmap_next bit : %d\n", bit); return bit; } return -1; @@ -64,15 +63,15 @@ int drm_ctxbitmap_init(drm_device_t *dev) int i; int temp; - dev->ctx_bitmap = (unsigned long *) drm_alloc(PAGE_SIZE * 4, + dev->ctx_bitmap = (unsigned long *) drm_alloc(PAGE_SIZE, DRM_MEM_CTXBITMAP); if(dev->ctx_bitmap == NULL) { return -ENOMEM; } - memset((void *) dev->ctx_bitmap, 0, PAGE_SIZE * 4); + memset((void *) dev->ctx_bitmap, 0, PAGE_SIZE); for(i = 0; i < DRM_RESERVED_CONTEXTS; i++) { temp = drm_ctxbitmap_next(dev); - printk("drm_ctxbitmap_init : %d\n", temp); + DRM_DEBUG("drm_ctxbitmap_init : %d\n", temp); } return 0; @@ -80,7 +79,7 @@ int drm_ctxbitmap_init(drm_device_t *dev) void drm_ctxbitmap_cleanup(drm_device_t *dev) { - drm_free((void *)dev->ctx_bitmap, PAGE_SIZE * 4, + drm_free((void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP); } diff --git a/linux/dma.c b/linux/dma.c index 8291e52e..ac2d1bc5 100644 --- a/linux/dma.c +++ b/linux/dma.c @@ -1,8 +1,8 @@ /* dma.c -- DMA IOCTL and function support -*- linux-c -*- * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com - * Revised: Sun Feb 13 23:19:45 2000 by kevin@precisioninsight.com * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,7 +24,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/dma.c,v 1.5 2000/02/23 04:47:26 martin Exp $ + * Authors: + * Rickard E. (Rik) Faith <faith@valinuxa.com> * */ @@ -63,15 +64,24 @@ void drm_dma_takedown(drm_device_t *dev) dma->bufs[i].page_order, DRM_MEM_DMA); } - drm_free(dma->bufs[i].buflist, - dma->buf_count - * sizeof(*dma->bufs[0].buflist), - DRM_MEM_BUFS); drm_free(dma->bufs[i].seglist, - dma->buf_count + dma->bufs[i].seg_count * sizeof(*dma->bufs[0].seglist), DRM_MEM_SEGS); - drm_freelist_destroy(&dma->bufs[i].freelist); + } + if(dma->bufs[i].buf_count) { + for(j = 0; j < dma->bufs[i].buf_count; j++) { + if(dma->bufs[i].buflist[j].dev_private) { + drm_free(dma->bufs[i].buflist[j].dev_private, + dma->bufs[i].buflist[j].dev_priv_size, + DRM_MEM_BUFS); + } + } + drm_free(dma->bufs[i].buflist, + dma->bufs[i].buf_count * + sizeof(*dma->bufs[0].buflist), + DRM_MEM_BUFS); + drm_freelist_destroy(&dma->bufs[i].freelist); } } diff --git a/linux/drawable.c b/linux/drawable.c index c7acecb9..03839f5b 100644 --- a/linux/drawable.c +++ b/linux/drawable.c @@ -1,8 +1,8 @@ /* drawable.c -- IOCTLs for drawables -*- linux-c -*- * Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com - * Revised: Fri Aug 20 09:27:03 1999 by faith@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,7 +24,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drawable.c,v 1.2 2000/02/23 04:47:26 martin Exp $ + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> * */ diff --git a/linux/drm.h b/linux/drm.h index 7b8e8826..4affcc41 100644 --- a/linux/drm.h +++ b/linux/drm.h @@ -1,8 +1,8 @@ /* drm.h -- Header for Direct Rendering Manager -*- linux-c -*- * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com - * Revised: Mon Feb 14 00:15:23 2000 by kevin@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,7 +24,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drm.h,v 1.5 2000/02/23 04:47:26 martin Exp $ + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> * * Acknowledgements: * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic cmpxchg. @@ -61,6 +62,20 @@ typedef unsigned int drm_context_t; typedef unsigned int drm_drawable_t; typedef unsigned int drm_magic_t; +/* Warning: If you change this structure, make sure you change + * XF86DRIClipRectRec in the server as well */ + +typedef struct drm_clip_rect { + unsigned short x1; + unsigned short y1; + unsigned short x2; + unsigned short y2; +} drm_clip_rect_t; + +/* Seperate include files for the i810/mga/r128 specific structures */ +#include "mga_drm.h" +#include "i810_drm.h" +#include "r128_drm.h" typedef struct drm_version { int version_major; /* Major version */ @@ -101,7 +116,8 @@ typedef struct drm_control { typedef enum drm_map_type { _DRM_FRAME_BUFFER = 0, /* WC (no caching), no core dump */ _DRM_REGISTERS = 1, /* no caching, no core dump */ - _DRM_SHM = 2 /* shared, cached */ + _DRM_SHM = 2, /* shared, cached */ + _DRM_AGP = 3 /* AGP/GART */ } drm_map_type_t; typedef enum drm_map_flags { @@ -165,8 +181,11 @@ typedef struct drm_buf_desc { int low_mark; /* Low water mark */ int high_mark; /* High water mark */ enum { - DRM_PAGE_ALIGN = 0x01 /* Align on page boundaries for DMA */ + _DRM_PAGE_ALIGN = 0x01, /* Align on page boundaries for DMA */ + _DRM_AGP_BUFFER = 0x02 /* Buffer is in agp space */ } flags; + unsigned long agp_start; /* Start address of where the agp buffers + * are in the agp aperture */ } drm_buf_desc_t; typedef struct drm_buf_info { @@ -237,6 +256,38 @@ typedef struct drm_irq_busid { int funcnum; } drm_irq_busid_t; +typedef struct drm_agp_mode { + unsigned long mode; +} drm_agp_mode_t; + + /* For drm_agp_alloc -- allocated a buffer */ +typedef struct drm_agp_buffer { + unsigned long size; /* In bytes -- will round to page boundary */ + unsigned long handle; /* Used for BIND/UNBIND ioctls */ + unsigned long type; /* Type of memory to allocate */ + unsigned long physical; /* Physical used by i810 */ +} drm_agp_buffer_t; + + /* For drm_agp_bind */ +typedef struct drm_agp_binding { + unsigned long handle; /* From drm_agp_buffer */ + unsigned long offset; /* In bytes -- will round to page boundary */ +} drm_agp_binding_t; + +typedef struct drm_agp_info { + int agp_version_major; + int agp_version_minor; + unsigned long mode; + unsigned long aperture_base; /* physical address */ + unsigned long aperture_size; /* bytes */ + unsigned long memory_allowed; /* bytes */ + unsigned long memory_used; + + /* PCI information */ + unsigned short id_vendor; + unsigned short id_device; +} drm_agp_info_t; + #define DRM_IOCTL_BASE 'd' #define DRM_IOCTL_NR(n) _IOC_NR(n) #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) @@ -247,7 +298,7 @@ typedef struct drm_irq_busid { #define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t) #define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t) -#define DRM_IOCTL_GET_MAGIC DRM_IOW( 0x02, drm_auth_t) +#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t) #define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t) #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t) @@ -276,4 +327,39 @@ typedef struct drm_irq_busid { #define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t) #define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t) +#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) +#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) +#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t) +#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t) +#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t) +#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t) +#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t) +#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t) + +/* Mga specific ioctls */ +#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t) +#define DRM_IOCTL_MGA_SWAP DRM_IOW( 0x41, drm_mga_swap_t) +#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x42, drm_mga_clear_t) +#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x43, drm_mga_iload_t) +#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x44, drm_mga_vertex_t) +#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x45, drm_lock_t ) +#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t) + +/* I810 specific ioctls */ +#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t) +#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t) +#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t) +#define DRM_IOCTL_I810_FLUSH DRM_IO ( 0x43) +#define DRM_IOCTL_I810_GETAGE DRM_IO ( 0x44) +#define DRM_IOCTL_I810_GETBUF DRM_IOWR(0x45, drm_i810_dma_t) +#define DRM_IOCTL_I810_SWAP DRM_IO ( 0x46) + +/* Rage 128 specific ioctls */ +#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t) +#define DRM_IOCTL_R128_RESET DRM_IO( 0x41) +#define DRM_IOCTL_R128_FLUSH DRM_IO( 0x42) +#define DRM_IOCTL_R128_IDLE DRM_IO( 0x43) +#define DRM_IOCTL_R128_PACKET DRM_IOW( 0x44, drm_r128_packet_t) +#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x45, drm_r128_vertex_t) + #endif diff --git a/linux/drmP.h b/linux/drmP.h index 312fba36..9ad83bde 100644 --- a/linux/drmP.h +++ b/linux/drmP.h @@ -1,8 +1,8 @@ /* drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*- * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com - * Revised: Sun Feb 13 23:34:30 2000 by kevin@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,7 +24,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmP.h,v 1.6 2000/02/23 04:47:27 martin Exp $ + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> * */ @@ -32,6 +33,7 @@ #define _DRM_P_H_ #ifdef __KERNEL__ +#include <linux/config.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/miscdevice.h> @@ -48,8 +50,12 @@ #ifdef CONFIG_MTRR #include <asm/mtrr.h> #endif +#ifdef DRM_AGP +#include <linux/types.h> +#include <linux/agp_backend.h> +#endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,1,0) -#include <asm/spinlock.h> +#include <linux/tqueue.h> #include <linux/poll.h> #endif #include "drm.h" @@ -69,21 +75,27 @@ #define DRM_FLAG_DEBUG 0x01 #define DRM_FLAG_NOCTX 0x02 -#define DRM_MEM_DMA 0 -#define DRM_MEM_SAREA 1 -#define DRM_MEM_DRIVER 2 -#define DRM_MEM_MAGIC 3 -#define DRM_MEM_IOCTLS 4 -#define DRM_MEM_MAPS 5 -#define DRM_MEM_VMAS 6 -#define DRM_MEM_BUFS 7 -#define DRM_MEM_SEGS 8 -#define DRM_MEM_PAGES 9 -#define DRM_MEM_FILES 10 -#define DRM_MEM_QUEUES 11 -#define DRM_MEM_CMDS 12 -#define DRM_MEM_MAPPINGS 13 -#define DRM_MEM_BUFLISTS 14 +#define DRM_MEM_DMA 0 +#define DRM_MEM_SAREA 1 +#define DRM_MEM_DRIVER 2 +#define DRM_MEM_MAGIC 3 +#define DRM_MEM_IOCTLS 4 +#define DRM_MEM_MAPS 5 +#define DRM_MEM_VMAS 6 +#define DRM_MEM_BUFS 7 +#define DRM_MEM_SEGS 8 +#define DRM_MEM_PAGES 9 +#define DRM_MEM_FILES 10 +#define DRM_MEM_QUEUES 11 +#define DRM_MEM_CMDS 12 +#define DRM_MEM_MAPPINGS 13 +#define DRM_MEM_BUFLISTS 14 +#define DRM_MEM_AGPLISTS 15 +#define DRM_MEM_TOTALAGP 16 +#define DRM_MEM_BOUNDAGP 17 +#define DRM_MEM_CTXBITMAP 18 + +#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) /* Backward compatibility section */ /* _PAGE_WT changed to _PAGE_PWT in 2.2.6 */ @@ -118,7 +130,6 @@ typedef struct wait_queue *wait_queue_head_t; #endif /* Generic cmpxchg added in 2.3.x */ -#if CPU != 386 #ifndef __HAVE_ARCH_CMPXCHG /* Include this here so that driver can be used with older kernels. */ @@ -153,10 +164,6 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o), \ (unsigned long)(n),sizeof(*(ptr)))) #endif -#else - /* Compiling for a 386 proper... */ -#error DRI not supported on Intel 80386 -#endif /* Macros to make printk easier */ #define DRM_ERROR(fmt, arg...) \ @@ -218,8 +225,8 @@ typedef struct drm_magic_entry { } drm_magic_entry_t; typedef struct drm_magic_head { - struct drm_magic_entry *head; - struct drm_magic_entry *tail; + struct drm_magic_entry *head; + struct drm_magic_entry *tail; } drm_magic_head_t; typedef struct drm_vma_entry { @@ -235,6 +242,7 @@ typedef struct drm_buf { int used; /* Amount of buffer in use (for DMA) */ unsigned long offset; /* Byte offset (used internally) */ void *address; /* Address of buffer */ + unsigned long bus_address; /* Bus address of buffer */ struct drm_buf *next; /* Kernel-only: used for free list */ __volatile__ int waiting; /* On kernel DMA queue */ __volatile__ int pending; /* On hardware DMA queue */ @@ -250,12 +258,16 @@ typedef struct drm_buf { DRM_LIST_PRIO = 4, DRM_LIST_RECLAIM = 5 } list; /* Which list we're on */ + #if DRM_DMA_HISTOGRAM cycles_t time_queued; /* Queued to kernel DMA queue */ cycles_t time_dispatched; /* Dispatched to hardware */ cycles_t time_completed; /* Completed by hardware */ cycles_t time_freed; /* Back on freelist */ #endif + + int dev_priv_size; /* Size of buffer private stoarge */ + void *dev_private; /* Per-buffer private storage */ } drm_buf_t; #if DRM_DMA_HISTOGRAM @@ -376,6 +388,9 @@ typedef struct drm_device_dma { int page_count; unsigned long *pagelist; unsigned long byte_count; + enum { + _DRM_DMA_USE_AGP = 0x01 + } flags; /* DMA support */ drm_buf_t *this_buffer; /* Buffer being sent */ @@ -384,6 +399,41 @@ typedef struct drm_device_dma { wait_queue_head_t waiting; /* Processes waiting on free bufs */ } drm_device_dma_t; +#ifdef DRM_AGP +typedef struct drm_agp_mem { + unsigned long handle; + agp_memory *memory; + unsigned long bound; /* address */ + int pages; + struct drm_agp_mem *prev; + struct drm_agp_mem *next; +} drm_agp_mem_t; + +typedef struct drm_agp_head { + agp_kern_info agp_info; + const char *chipset; + drm_agp_mem_t *memory; + unsigned long mode; + int enabled; + int acquired; + unsigned long base; + int agp_mtrr; +} drm_agp_head_t; + +typedef struct { + void (*free_memory)(agp_memory *); + agp_memory *(*allocate_memory)(size_t, u32); + int (*bind_memory)(agp_memory *, off_t); + int (*unbind_memory)(agp_memory *); + void (*enable)(u32); + int (*acquire)(void); + void (*release)(void); + void (*copy_info)(agp_kern_info *); +} drm_agp_func_t; + +extern drm_agp_func_t drm_agp; +#endif + typedef struct drm_device { const char *name; /* Simple driver name */ char *unique; /* Unique identifier: e.g., busid */ @@ -462,6 +512,12 @@ typedef struct drm_device { struct fasync_struct *buf_async;/* Processes waiting for SIGIO */ wait_queue_head_t buf_readers; /* Processes waiting to read */ wait_queue_head_t buf_writers; /* Processes waiting to ctx switch */ + +#ifdef DRM_AGP + drm_agp_head_t *agp; +#endif + unsigned long *ctx_bitmap; + void *dev_private; } drm_device_t; @@ -470,6 +526,7 @@ typedef struct drm_device { /* Misc. support (init.c) */ extern int drm_flags; extern void drm_parse_options(char *s); +extern int drm_cpu_valid(void); /* Device support (fops.c) */ @@ -533,6 +590,14 @@ extern void drm_free_pages(unsigned long address, int order, extern void *drm_ioremap(unsigned long offset, unsigned long size); extern void drm_ioremapfree(void *pt, unsigned long size); +#ifdef DRM_AGP +extern agp_memory *drm_alloc_agp(int pages, u32 type); +extern int drm_free_agp(agp_memory *handle, int pages); +extern int drm_bind_agp(agp_memory *handle, unsigned int start); +extern int drm_unbind_agp(agp_memory *handle); +#endif + + /* Buffer management support (bufs.c) */ extern int drm_order(unsigned long size); extern int drm_addmap(struct inode *inode, struct file *filp, @@ -642,5 +707,32 @@ extern int drm_flush_unblock(drm_device_t *dev, int context, drm_lock_flags_t flags); extern int drm_flush_block_and_flush(drm_device_t *dev, int context, drm_lock_flags_t flags); + + /* Context Bitmap support (ctxbitmap.c) */ +extern int drm_ctxbitmap_init(drm_device_t *dev); +extern void drm_ctxbitmap_cleanup(drm_device_t *dev); +extern int drm_ctxbitmap_next(drm_device_t *dev); +extern void drm_ctxbitmap_free(drm_device_t *dev, int ctx_handle); + +#ifdef DRM_AGP + /* AGP/GART support (agpsupport.c) */ +extern drm_agp_head_t *drm_agp_init(void); +extern int drm_agp_acquire(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_agp_release(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_agp_enable(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_agp_info(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_agp_alloc(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_agp_free(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_agp_unbind(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_agp_bind(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +#endif #endif #endif diff --git a/linux/fops.c b/linux/fops.c index 2a064e5d..4ade7aa1 100644 --- a/linux/fops.c +++ b/linux/fops.c @@ -1,8 +1,8 @@ /* fops.c -- File operations for DRM -*- linux-c -*- * Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com - * Revised: Fri Dec 3 10:26:26 1999 by faith@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,7 +24,9 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/fops.c,v 1.6 2000/02/23 04:47:27 martin Exp $ + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> + * Daryll Strauss <daryll@valinux.com> * */ @@ -40,6 +42,7 @@ int drm_open_helper(struct inode *inode, struct file *filp, drm_device_t *dev) drm_file_t *priv; if (filp->f_flags & O_EXCL) return -EBUSY; /* No exclusive opens */ + if (!drm_cpu_valid()) return -EINVAL; DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor); @@ -211,10 +214,15 @@ int drm_write_string(drm_device_t *dev, const char *s) send -= count; } -#if LINUX_VERSION_CODE < 0x020315 +#if LINUX_VERSION_CODE < 0x020315 && !defined(KILLFASYNCHASTHREEPARAMETERS) + /* The extra parameter to kill_fasync was added in 2.3.21, and is + _not_ present in _stock_ 2.2.14 and 2.2.15. However, some + distributions patch 2.2.x kernels to add this parameter. The + Makefile.linux attempts to detect this addition and defines + KILLFASYNCHASTHREEPARAMETERS if three parameters are found. */ if (dev->buf_async) kill_fasync(dev->buf_async, SIGIO); #else - /* Parameter added in 2.3.21 */ + /* Parameter added in 2.3.21 */ if (dev->buf_async) kill_fasync(dev->buf_async, SIGIO, POLL_IN); #endif DRM_DEBUG("waking\n"); diff --git a/linux/gamma_dma.c b/linux/gamma_dma.c index eec0f859..a99f24ca 100644 --- a/linux/gamma_dma.c +++ b/linux/gamma_dma.c @@ -1,8 +1,8 @@ /* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*- * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com - * Revised: Thu Sep 16 12:55:37 1999 by faith@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,7 +24,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/gamma_dma.c,v 1.2 2000/02/23 04:47:28 martin Exp $ + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> * */ @@ -87,13 +88,31 @@ static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address, GAMMA_WRITE(GAMMA_DMACOUNT, length / 4); } -static inline void gamma_dma_quiescent(drm_device_t *dev) +static inline void gamma_dma_quiescent_single(drm_device_t *dev) { while (GAMMA_READ(GAMMA_DMACOUNT)) ; while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3) ; + + GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10); + GAMMA_WRITE(GAMMA_SYNC, 0); + + do { + while (!GAMMA_READ(GAMMA_OUTFIFOWORDS)) + ; + } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG); +} + +static inline void gamma_dma_quiescent_dual(drm_device_t *dev) +{ + while (GAMMA_READ(GAMMA_DMACOUNT)) + ; + while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3) + ; + GAMMA_WRITE(GAMMA_BROADCASTMASK, 3); + GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10); GAMMA_WRITE(GAMMA_SYNC, 0); @@ -103,7 +122,6 @@ static inline void gamma_dma_quiescent(drm_device_t *dev) ; } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG); - /* Read from second MX */ do { while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000)) @@ -788,8 +806,13 @@ int gamma_lock(struct inode *inode, struct file *filp, unsigned int cmd, if (!ret) { if (lock.flags & _DRM_LOCK_READY) gamma_dma_ready(dev); - if (lock.flags & _DRM_LOCK_QUIESCENT) - gamma_dma_quiescent(dev); + if (lock.flags & _DRM_LOCK_QUIESCENT) { + if (gamma_found() == 1) { + gamma_dma_quiescent_single(dev); + } else { + gamma_dma_quiescent_dual(dev); + } + } } DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock"); diff --git a/linux/gamma_drv.c b/linux/gamma_drv.c index ce970835..987c903d 100644 --- a/linux/gamma_drv.c +++ b/linux/gamma_drv.c @@ -1,8 +1,8 @@ /* gamma.c -- 3dlabs GMX 2000 driver -*- linux-c -*- * Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com - * Revised: Tue Oct 12 08:51:36 1999 by faith@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,22 +24,32 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/gamma_drv.c,v 1.4 2000/02/23 04:47:28 martin Exp $ + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> * */ +#include <linux/config.h> #define EXPORT_SYMTAB #include "drmP.h" #include "gamma_drv.h" +#include <linux/pci.h> EXPORT_SYMBOL(gamma_init); EXPORT_SYMBOL(gamma_cleanup); +#ifndef PCI_DEVICE_ID_3DLABS_GAMMA +#define PCI_DEVICE_ID_3DLABS_GAMMA 0x0008 +#endif +#ifndef PCI_DEVICE_ID_3DLABS_MX +#define PCI_DEVICE_ID_3DLABS_MX 0x0006 +#endif + #define GAMMA_NAME "gamma" #define GAMMA_DESC "3dlabs GMX 2000" -#define GAMMA_DATE "19990830" -#define GAMMA_MAJOR 0 +#define GAMMA_DATE "20000606" +#define GAMMA_MAJOR 1 #define GAMMA_MINOR 0 -#define GAMMA_PATCHLEVEL 5 +#define GAMMA_PATCHLEVEL 0 static drm_device_t gamma_device; @@ -98,10 +108,13 @@ static drm_ioctl_desc_t gamma_ioctls[] = { int init_module(void); void cleanup_module(void); static char *gamma = NULL; +static int devices = 0; MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas."); MODULE_DESCRIPTION("3dlabs GMX 2000"); MODULE_PARM(gamma, "s"); +MODULE_PARM(devices, "i"); +MODULE_PARM_DESC(devices, "devices=x, where x is the number of MX chips on your card\n"); /* init_module is called when insmod is used to load the module */ @@ -121,7 +134,7 @@ void cleanup_module(void) #ifndef MODULE /* gamma_setup is called by the kernel to parse command-line options passed * via the boot-loader (e.g., LILO). It calls the insmod option routine, - * drm_parse_drm. + * drm_parse_options. * * This is not currently supported, since it requires changes to * linux/init/main.c. */ @@ -271,6 +284,12 @@ static int gamma_takedown(drm_device_t *dev) - PAGE_SHIFT, DRM_MEM_SAREA); break; +#ifdef DRM_AGP + case _DRM_AGP: + /* Do nothing here, because this is all + handled in the AGP/GART driver. */ + break; +#endif } drm_free(map, sizeof(*map), DRM_MEM_MAPS); } @@ -310,6 +329,34 @@ static int gamma_takedown(drm_device_t *dev) return 0; } +int gamma_found(void) +{ + return devices; +} + +int gamma_find_devices(void) +{ + struct pci_dev *d = NULL, *one = NULL, *two = NULL; + + d = pci_find_device(PCI_VENDOR_ID_3DLABS,PCI_DEVICE_ID_3DLABS_GAMMA,d); + if (!d) return 0; + + one = pci_find_device(PCI_VENDOR_ID_3DLABS,PCI_DEVICE_ID_3DLABS_MX,d); + if (!one) return 0; + + /* Make sure it's on the same card, if not - no MX's found */ + if (PCI_SLOT(d->devfn) != PCI_SLOT(one->devfn)) return 0; + + two = pci_find_device(PCI_VENDOR_ID_3DLABS,PCI_DEVICE_ID_3DLABS_MX,one); + if (!two) return 1; + + /* Make sure it's on the same card, if not - only 1 MX found */ + if (PCI_SLOT(d->devfn) != PCI_SLOT(two->devfn)) return 1; + + /* Two MX's found - we don't currently support more than 2 */ + return 2; +} + /* gamma_init is called via init_module at module load time, or via * linux/init/main.c (this is not currently supported). */ @@ -327,6 +374,8 @@ int gamma_init(void) #ifdef MODULE drm_parse_options(gamma); #endif + devices = gamma_find_devices(); + if (devices == 0) return -1; if ((retcode = misc_register(&gamma_misc))) { DRM_ERROR("Cannot register \"%s\"\n", GAMMA_NAME); @@ -338,13 +387,14 @@ int gamma_init(void) drm_mem_init(); drm_proc_init(dev); - DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", + DRM_INFO("Initialized %s %d.%d.%d %s on minor %d with %d MX devices\n", GAMMA_NAME, GAMMA_MAJOR, GAMMA_MINOR, GAMMA_PATCHLEVEL, GAMMA_DATE, - gamma_misc.minor); + gamma_misc.minor, + devices); return 0; } diff --git a/linux/gamma_drv.h b/linux/gamma_drv.h index d78a13eb..d7e70f7f 100644 --- a/linux/gamma_drv.h +++ b/linux/gamma_drv.h @@ -1,8 +1,8 @@ /* gamma_drv.h -- Private header for 3dlabs GMX 2000 driver -*- linux-c -*- * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com - * Revised: Fri Aug 20 09:24:27 1999 by faith@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,7 +24,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/gamma_drv.h,v 1.2 2000/02/23 04:47:28 martin Exp $ + * Authors: + * Rickard E. (Rik) Faith <faith@precisioninsight.com> * */ @@ -53,5 +54,7 @@ extern int gamma_irq_install(drm_device_t *dev, int irq); extern int gamma_irq_uninstall(drm_device_t *dev); extern int gamma_control(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); +extern int gamma_find_devices(void); +extern int gamma_found(void); #endif diff --git a/linux/i810_bufs.c b/linux/i810_bufs.c index 49455b43..fa1f84dc 100644 --- a/linux/i810_bufs.c +++ b/linux/i810_bufs.c @@ -2,6 +2,7 @@ * Created: Thu Jan 6 01:47:26 2000 by jhartmann@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -23,161 +24,35 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com> - * Jeff Hartmann <jhartmann@precisioninsight.com> + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> + * Jeff Hartmann <jhartmann@valinux.com> * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/i810_bufs.c,v 1.1 2000/02/11 17:26:04 dawes Exp $ - * */ #define __NO_VERSION__ #include "drmP.h" +#include "i810_drv.h" #include "linux/un.h" int i810_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - drm_device_dma_t *dma = dev->dma; - drm_buf_desc_t request; - drm_buf_entry_t *entry; - drm_buf_t *buf; - unsigned long offset; - unsigned long agp_offset; - int count; - int order; - int size; - int alignment; - int page_order; - int total; - int byte_count; - int i; - - if (!dma) return -EINVAL; - - copy_from_user_ret(&request, - (drm_buf_desc_t *)arg, - sizeof(request), - -EFAULT); - - count = request.count; - order = drm_order(request.size); - size = 1 << order; - agp_offset = request.agp_start; - alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) :size; - page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; - total = PAGE_SIZE << page_order; - byte_count = 0; - - if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; - if (dev->queue_count) return -EBUSY; /* Not while in use */ - spin_lock(&dev->count_lock); - if (dev->buf_use) { - spin_unlock(&dev->count_lock); - return -EBUSY; - } - atomic_inc(&dev->buf_alloc); - spin_unlock(&dev->count_lock); - - down(&dev->struct_sem); - entry = &dma->bufs[order]; - if (entry->buf_count) { - up(&dev->struct_sem); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; /* May only call once for each order */ - } - - entry->buflist = drm_alloc(count * sizeof(*entry->buflist), - DRM_MEM_BUFS); - if (!entry->buflist) { - up(&dev->struct_sem); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - memset(entry->buflist, 0, count * sizeof(*entry->buflist)); - - entry->buf_size = size; - entry->page_order = page_order; - - while(entry->buf_count < count) { - for(offset = 0; offset + size <= total && entry->buf_count < count; - offset += alignment, ++entry->buf_count) { - buf = &entry->buflist[entry->buf_count]; - buf->idx = dma->buf_count + entry->buf_count; - buf->total = alignment; - buf->order = order; - buf->used = 0; - buf->offset = agp_offset - dev->agp->base + offset;/* ?? */ - buf->bus_address = agp_offset + offset; - buf->address = agp_offset + offset + dev->agp->base; - buf->next = NULL; - buf->waiting = 0; - buf->pending = 0; - init_waitqueue_head(&buf->dma_wait); - buf->pid = 0; -#if DRM_DMA_HISTOGRAM - buf->time_queued = 0; - buf->time_dispatched = 0; - buf->time_completed = 0; - buf->time_freed = 0; -#endif - DRM_DEBUG("buffer %d @ %p\n", - entry->buf_count, buf->address); - } - byte_count += PAGE_SIZE << page_order; - } - - dma->buflist = drm_realloc(dma->buflist, - dma->buf_count * sizeof(*dma->buflist), - (dma->buf_count + entry->buf_count) - * sizeof(*dma->buflist), - DRM_MEM_BUFS); - for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++) - dma->buflist[i] = &entry->buflist[i - dma->buf_count]; - - dma->buf_count += entry->buf_count; - dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); - - drm_freelist_create(&entry->freelist, entry->buf_count); - for (i = 0; i < entry->buf_count; i++) { - drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]); - } - - up(&dev->struct_sem); - - request.count = entry->buf_count; - request.size = size; - - copy_to_user_ret((drm_buf_desc_t *)arg, - &request, - sizeof(request), - -EFAULT); - - atomic_dec(&dev->buf_alloc); - return 0; -} - -int i810_addbufs_pci(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) -{ - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - drm_device_dma_t *dma = dev->dma; - drm_buf_desc_t request; - int count; - int order; - int size; - int total; - int page_order; - drm_buf_entry_t *entry; - unsigned long page; - drm_buf_t *buf; - int alignment; - unsigned long offset; - int i; - int byte_count; - int page_count; + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_device_dma_t *dma = dev->dma; + drm_buf_desc_t request; + drm_buf_entry_t *entry; + drm_buf_t *buf; + unsigned long offset; + unsigned long agp_offset; + int count; + int order; + int size; + int alignment; + int page_order; + int total; + int byte_count; + int i; if (!dma) return -EINVAL; @@ -186,20 +61,17 @@ int i810_addbufs_pci(struct inode *inode, struct file *filp, unsigned int cmd, sizeof(request), -EFAULT); - count = request.count; - order = drm_order(request.size); - size = 1 << order; - - DRM_DEBUG("count = %d, size = %d (%d), order = %d, queue_count = %d\n", - request.count, request.size, size, order, dev->queue_count); - - if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; - if (dev->queue_count) return -EBUSY; /* Not while in use */ - + count = request.count; + order = drm_order(request.size); + size = 1 << order; + agp_offset = request.agp_start; alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) :size; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; - total = PAGE_SIZE << page_order; - + total = PAGE_SIZE << page_order; + byte_count = 0; + + if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; + if (dev->queue_count) return -EBUSY; /* Not while in use */ spin_lock(&dev->count_lock); if (dev->buf_use) { spin_unlock(&dev->count_lock); @@ -207,15 +79,15 @@ int i810_addbufs_pci(struct inode *inode, struct file *filp, unsigned int cmd, } atomic_inc(&dev->buf_alloc); spin_unlock(&dev->count_lock); - + down(&dev->struct_sem); entry = &dma->bufs[order]; if (entry->buf_count) { up(&dev->struct_sem); atomic_dec(&dev->buf_alloc); - return -ENOMEM; /* May only call once for each order */ + return -ENOMEM; /* May only call once for each order */ } - + entry->buflist = drm_alloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS); if (!entry->buflist) { @@ -224,69 +96,45 @@ int i810_addbufs_pci(struct inode *inode, struct file *filp, unsigned int cmd, return -ENOMEM; } memset(entry->buflist, 0, count * sizeof(*entry->buflist)); - - entry->seglist = drm_alloc(count * sizeof(*entry->seglist), - DRM_MEM_SEGS); - if (!entry->seglist) { - drm_free(entry->buflist, - count * sizeof(*entry->buflist), - DRM_MEM_BUFS); - up(&dev->struct_sem); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - memset(entry->seglist, 0, count * sizeof(*entry->seglist)); - - dma->pagelist = drm_realloc(dma->pagelist, - dma->page_count * sizeof(*dma->pagelist), - (dma->page_count + (count << page_order)) - * sizeof(*dma->pagelist), - DRM_MEM_PAGES); - DRM_DEBUG("pagelist: %d entries\n", - dma->page_count + (count << page_order)); - - - entry->buf_size = size; + + entry->buf_size = size; entry->page_order = page_order; - byte_count = 0; - page_count = 0; - while (entry->buf_count < count) { - if (!(page = drm_alloc_pages(page_order, DRM_MEM_DMA))) break; - entry->seglist[entry->seg_count++] = page; - for (i = 0; i < (1 << page_order); i++) { - DRM_DEBUG("page %d @ 0x%08lx\n", - dma->page_count + page_count, - page + PAGE_SIZE * i); - dma->pagelist[dma->page_count + page_count++] - = page + PAGE_SIZE * i; - } - for (offset = 0; - offset + size <= total && entry->buf_count < count; - offset += alignment, ++entry->buf_count) { - buf = &entry->buflist[entry->buf_count]; - buf->idx = dma->buf_count + entry->buf_count; - buf->total = alignment; - buf->order = order; - buf->used = 0; - buf->offset = (dma->byte_count + byte_count + offset); - buf->address = (void *)(page + offset); - buf->next = NULL; - buf->waiting = 0; - buf->pending = 0; - init_waitqueue_head(&buf->dma_wait); - buf->pid = 0; + offset = 0; + + while(entry->buf_count < count) { + buf = &entry->buflist[entry->buf_count]; + buf->idx = dma->buf_count + entry->buf_count; + buf->total = alignment; + buf->order = order; + buf->used = 0; + buf->offset = offset; + buf->bus_address = dev->agp->base + agp_offset + offset; + buf->address = (void *)(agp_offset + offset + dev->agp->base); + buf->next = NULL; + buf->waiting = 0; + buf->pending = 0; + init_waitqueue_head(&buf->dma_wait); + buf->pid = 0; + + buf->dev_private = drm_alloc(sizeof(drm_i810_buf_priv_t), + DRM_MEM_BUFS); + buf->dev_priv_size = sizeof(drm_i810_buf_priv_t); + memset(buf->dev_private, 0, sizeof(drm_i810_buf_priv_t)); + #if DRM_DMA_HISTOGRAM - buf->time_queued = 0; - buf->time_dispatched = 0; - buf->time_completed = 0; - buf->time_freed = 0; + buf->time_queued = 0; + buf->time_dispatched = 0; + buf->time_completed = 0; + buf->time_freed = 0; #endif - DRM_DEBUG("buffer %d @ %p\n", - entry->buf_count, buf->address); - } + offset = offset + alignment; + entry->buf_count++; byte_count += PAGE_SIZE << page_order; + + DRM_DEBUG("buffer %d @ %p\n", + entry->buf_count, buf->address); } - + dma->buflist = drm_realloc(dma->buflist, dma->buf_count * sizeof(*dma->buflist), (dma->buf_count + entry->buf_count) @@ -294,45 +142,43 @@ int i810_addbufs_pci(struct inode *inode, struct file *filp, unsigned int cmd, DRM_MEM_BUFS); for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++) dma->buflist[i] = &entry->buflist[i - dma->buf_count]; - - dma->buf_count += entry->buf_count; - dma->seg_count += entry->seg_count; - dma->page_count += entry->seg_count << page_order; - dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); - + + dma->buf_count += entry->buf_count; + dma->byte_count += byte_count; drm_freelist_create(&entry->freelist, entry->buf_count); for (i = 0; i < entry->buf_count; i++) { drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]); } - + up(&dev->struct_sem); - + request.count = entry->buf_count; request.size = size; - + copy_to_user_ret((drm_buf_desc_t *)arg, &request, sizeof(request), -EFAULT); - + atomic_dec(&dev->buf_alloc); + dma->flags = _DRM_DMA_USE_AGP; return 0; } int i810_addbufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_buf_desc_t request; + drm_buf_desc_t request; - copy_from_user_ret(&request, - (drm_buf_desc_t *)arg, - sizeof(request), - -EFAULT); + copy_from_user_ret(&request, + (drm_buf_desc_t *)arg, + sizeof(request), + -EFAULT); - if(request.flags & _DRM_AGP_BUFFER) - return i810_addbufs_agp(inode, filp, cmd, arg); - else - return i810_addbufs_pci(inode, filp, cmd, arg); + if(request.flags & _DRM_AGP_BUFFER) + return i810_addbufs_agp(inode, filp, cmd, arg); + else + return -EINVAL; } int i810_infobufs(struct inode *inode, struct file *filp, unsigned int cmd, @@ -486,99 +332,3 @@ int i810_freebufs(struct inode *inode, struct file *filp, unsigned int cmd, return 0; } -int i810_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) -{ - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - drm_device_dma_t *dma = dev->dma; - int retcode = 0; - const int zero = 0; - unsigned long virtual; - unsigned long address; - drm_buf_map_t request; - int i; - - if (!dma) return -EINVAL; - - DRM_DEBUG("\n"); - - spin_lock(&dev->count_lock); - if (atomic_read(&dev->buf_alloc)) { - spin_unlock(&dev->count_lock); - return -EBUSY; - } - ++dev->buf_use; /* Can't allocate more after this call */ - spin_unlock(&dev->count_lock); - - copy_from_user_ret(&request, - (drm_buf_map_t *)arg, - sizeof(request), - -EFAULT); - - if (request.count >= dma->buf_count) { - if(dma->flags & _DRM_DMA_USE_AGP) { - /* This is an ugly vicious hack */ - drm_map_t *map = NULL; - for(i = 0; i < dev->map_count; i++) { - map = dev->maplist[i]; - if(map->type == _DRM_AGP) break; - } - if (i >= dev->map_count || !map) { - retcode = -EINVAL; - goto done; - } - - virtual = do_mmap(filp, 0, map->size, PROT_READ|PROT_WRITE, - MAP_SHARED, (unsigned long)map->handle); - } - else { - virtual = do_mmap(filp, 0, dma->byte_count, - PROT_READ|PROT_WRITE, MAP_SHARED, 0); - } - if (virtual > -1024UL) { - /* Real error */ - retcode = (signed long)virtual; - goto done; - } - request.virtual = (void *)virtual; - - for (i = 0; i < dma->buf_count; i++) { - if (copy_to_user(&request.list[i].idx, - &dma->buflist[i]->idx, - sizeof(request.list[0].idx))) { - retcode = -EFAULT; - goto done; - } - if (copy_to_user(&request.list[i].total, - &dma->buflist[i]->total, - sizeof(request.list[0].total))) { - retcode = -EFAULT; - goto done; - } - if (copy_to_user(&request.list[i].used, - &zero, - sizeof(zero))) { - retcode = -EFAULT; - goto done; - } - address = virtual + dma->buflist[i]->offset; - if (copy_to_user(&request.list[i].address, - &address, - sizeof(address))) { - retcode = -EFAULT; - goto done; - } - } - } -done: - request.count = dma->buf_count; - DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode); - - copy_to_user_ret((drm_buf_map_t *)arg, - &request, - sizeof(request), - -EFAULT); - - return retcode; -} diff --git a/linux/i810_context.c b/linux/i810_context.c new file mode 100644 index 00000000..689814db --- /dev/null +++ b/linux/i810_context.c @@ -0,0 +1,205 @@ +/* i810_context.c -- IOCTLs for i810 contexts -*- linux-c -*- + * Created: Mon Dec 13 09:51:35 1999 by faith@precisioninsight.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> + * Jeff Hartmann <jhartmann@valinux.com> + * + */ + +#include <linux/sched.h> + +#define __NO_VERSION__ +#include "drmP.h" +#include "i810_drv.h" + +static int i810_alloc_queue(drm_device_t *dev) +{ + int temp = drm_ctxbitmap_next(dev); + DRM_DEBUG("i810_alloc_queue: %d\n", temp); + return temp; +} + +int i810_context_switch(drm_device_t *dev, int old, int new) +{ + char buf[64]; + + atomic_inc(&dev->total_ctx); + + if (test_and_set_bit(0, &dev->context_flag)) { + DRM_ERROR("Reentering -- FIXME\n"); + return -EBUSY; + } + +#if DRM_DMA_HISTOGRAM + dev->ctx_start = get_cycles(); +#endif + + DRM_DEBUG("Context switch from %d to %d\n", old, new); + + if (new == dev->last_context) { + clear_bit(0, &dev->context_flag); + return 0; + } + + if (drm_flags & DRM_FLAG_NOCTX) { + i810_context_switch_complete(dev, new); + } else { + sprintf(buf, "C %d %d\n", old, new); + drm_write_string(dev, buf); + } + + return 0; +} + +int i810_context_switch_complete(drm_device_t *dev, int new) +{ + dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ + dev->last_switch = jiffies; + + if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("Lock isn't held after context switch\n"); + } + + /* If a context switch is ever initiated + when the kernel holds the lock, release + that lock here. */ +#if DRM_DMA_HISTOGRAM + atomic_inc(&dev->histo.ctx[drm_histogram_slot(get_cycles() + - dev->ctx_start)]); + +#endif + clear_bit(0, &dev->context_flag); + wake_up(&dev->context_wait); + + return 0; +} + +int i810_resctx(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_ctx_res_t res; + drm_ctx_t ctx; + int i; + + DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS); + copy_from_user_ret(&res, (drm_ctx_res_t *)arg, sizeof(res), -EFAULT); + if (res.count >= DRM_RESERVED_CONTEXTS) { + memset(&ctx, 0, sizeof(ctx)); + for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { + ctx.handle = i; + copy_to_user_ret(&res.contexts[i], + &i, + sizeof(i), + -EFAULT); + } + } + res.count = DRM_RESERVED_CONTEXTS; + copy_to_user_ret((drm_ctx_res_t *)arg, &res, sizeof(res), -EFAULT); + return 0; +} + +int i810_addctx(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_ctx_t ctx; + + copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + if ((ctx.handle = i810_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) { + /* Skip kernel's context and get a new one. */ + ctx.handle = i810_alloc_queue(dev); + } + if (ctx.handle == -1) { + DRM_DEBUG("Not enough free contexts.\n"); + /* Should this return -EBUSY instead? */ + return -ENOMEM; + } + DRM_DEBUG("%d\n", ctx.handle); + copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT); + return 0; +} + +int i810_modctx(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + /* This does nothing for the i810 */ + return 0; +} + +int i810_getctx(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_ctx_t ctx; + + copy_from_user_ret(&ctx, (drm_ctx_t*)arg, sizeof(ctx), -EFAULT); + /* This is 0, because we don't hanlde any context flags */ + ctx.flags = 0; + copy_to_user_ret((drm_ctx_t*)arg, &ctx, sizeof(ctx), -EFAULT); + return 0; +} + +int i810_switchctx(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_ctx_t ctx; + + copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + DRM_DEBUG("%d\n", ctx.handle); + return i810_context_switch(dev, dev->last_context, ctx.handle); +} + +int i810_newctx(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_ctx_t ctx; + + copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + DRM_DEBUG("%d\n", ctx.handle); + i810_context_switch_complete(dev, ctx.handle); + + return 0; +} + +int i810_rmctx(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_ctx_t ctx; + + copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + DRM_DEBUG("%d\n", ctx.handle); + if(ctx.handle != DRM_KERNEL_CONTEXT) { + drm_ctxbitmap_free(dev, ctx.handle); + } + + return 0; +} diff --git a/linux/i810_dma.c b/linux/i810_dma.c index 09959b65..96b7a44f 100644 --- a/linux/i810_dma.c +++ b/linux/i810_dma.c @@ -2,6 +2,7 @@ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -23,10 +24,9 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com> - * Jeff Hartmann <jhartmann@precisioninsight.com> - * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/i810_dma.c,v 1.1 2000/02/11 17:26:04 dawes Exp $ + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> + * Jeff Hartmann <jhartmann@valinux.com> + * Keith Whitwell <keithw@valinux.com> * */ @@ -36,6 +36,18 @@ #include <linux/interrupt.h> /* For task queue support */ +/* in case we don't have a 2.3.99-pre6 kernel or later: */ +#ifndef VM_DONTCOPY +#define VM_DONTCOPY 0 +#endif + +#define I810_BUF_FREE 2 +#define I810_BUF_CLIENT 1 +#define I810_BUF_HARDWARE 0 + +#define I810_BUF_UNMAPPED 0 +#define I810_BUF_MAPPED 1 + #define I810_REG(reg) 2 #define I810_BASE(reg) ((unsigned long) \ dev->maplist[I810_REG(reg)]->handle) @@ -43,544 +55,835 @@ #define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg) #define I810_READ(reg) I810_DEREF(reg) #define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0) - -void i810_dma_init(drm_device_t *dev) +#define I810_DEREF16(reg) *(__volatile__ u16 *)I810_ADDR(reg) +#define I810_READ16(reg) I810_DEREF16(reg) +#define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0) + +#define RING_LOCALS unsigned int outring, ringmask; volatile char *virt; + +#define BEGIN_LP_RING(n) do { \ + if (I810_VERBOSE) \ + DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", \ + n, __FUNCTION__); \ + if (dev_priv->ring.space < n*4) \ + i810_wait_ring(dev, n*4); \ + dev_priv->ring.space -= n*4; \ + outring = dev_priv->ring.tail; \ + ringmask = dev_priv->ring.tail_mask; \ + virt = dev_priv->ring.virtual_start; \ +} while (0) + +#define ADVANCE_LP_RING() do { \ + if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \ + dev_priv->ring.tail = outring; \ + I810_WRITE(LP_RING + RING_TAIL, outring); \ +} while(0) + +#define OUT_RING(n) do { \ + if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ + *(volatile unsigned int *)(virt + outring) = n; \ + outring += 4; \ + outring &= ringmask; \ +} while (0); + +static inline void i810_print_status_page(drm_device_t *dev) { - printk(KERN_INFO "i810_dma_init\n"); + drm_device_dma_t *dma = dev->dma; + drm_i810_private_t *dev_priv = dev->dev_private; + u32 *temp = (u32 *)dev_priv->hw_status_page; + int i; + + DRM_DEBUG( "hw_status: Interrupt Status : %x\n", temp[0]); + DRM_DEBUG( "hw_status: LpRing Head ptr : %x\n", temp[1]); + DRM_DEBUG( "hw_status: IRing Head ptr : %x\n", temp[2]); + DRM_DEBUG( "hw_status: Reserved : %x\n", temp[3]); + DRM_DEBUG( "hw_status: Driver Counter : %d\n", temp[5]); + for(i = 6; i < dma->buf_count + 6; i++) { + DRM_DEBUG( "buffer status idx : %d used: %d\n", i - 6, temp[i]); + } } -void i810_dma_cleanup(drm_device_t *dev) +static drm_buf_t *i810_freelist_get(drm_device_t *dev) { - printk(KERN_INFO "i810_dma_cleanup\n"); + drm_device_dma_t *dma = dev->dma; + int i; + int used; + + /* Linear search might not be the best solution */ + + for (i = 0; i < dma->buf_count; i++) { + drm_buf_t *buf = dma->buflist[ i ]; + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + /* In use is already a pointer */ + used = cmpxchg(buf_priv->in_use, I810_BUF_FREE, + I810_BUF_CLIENT); + if(used == I810_BUF_FREE) { + return buf; + } + } + return NULL; } -static inline void i810_dma_dispatch(drm_device_t *dev, unsigned long address, - unsigned long length) +/* This should only be called if the buffer is not sent to the hardware + * yet, the hardware updates in use for us once its on the ring buffer. + */ + +static int i810_freelist_put(drm_device_t *dev, drm_buf_t *buf) { - printk(KERN_INFO "i810_dma_dispatch\n"); + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + int used; + + /* In use is already a pointer */ + used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE); + if(used != I810_BUF_CLIENT) { + DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx); + return -EINVAL; + } + + return 0; } -static inline void i810_dma_quiescent(drm_device_t *dev) +static struct file_operations i810_buffer_fops = { + open: i810_open, + flush: drm_flush, + release: i810_release, + ioctl: i810_ioctl, + mmap: i810_mmap_buffers, + read: drm_read, + fasync: drm_fasync, + poll: drm_poll, +}; + +int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) { + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_i810_private_t *dev_priv = dev->dev_private; + drm_buf_t *buf = dev_priv->mmap_buffer; + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + + vma->vm_flags |= (VM_IO | VM_DONTCOPY); + vma->vm_file = filp; + + buf_priv->currently_mapped = I810_BUF_MAPPED; + + if (remap_page_range(vma->vm_start, + VM_OFFSET(vma), + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) return -EAGAIN; + return 0; } -static inline void i810_dma_ready(drm_device_t *dev) +static int i810_map_buffer(drm_buf_t *buf, struct file *filp) { - i810_dma_quiescent(dev); - printk(KERN_INFO "i810_dma_ready\n"); + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + drm_i810_private_t *dev_priv = dev->dev_private; + struct file_operations *old_fops; + int retcode = 0; + + if(buf_priv->currently_mapped == I810_BUF_MAPPED) return -EINVAL; + down(¤t->mm->mmap_sem); + old_fops = filp->f_op; + filp->f_op = &i810_buffer_fops; + dev_priv->mmap_buffer = buf; + buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total, + PROT_READ|PROT_WRITE, + MAP_SHARED, + buf->bus_address); + dev_priv->mmap_buffer = NULL; + filp->f_op = old_fops; + if ((unsigned long)buf_priv->virtual > -1024UL) { + /* Real error */ + DRM_DEBUG("mmap error\n"); + retcode = (signed int)buf_priv->virtual; + buf_priv->virtual = 0; + } + up(¤t->mm->mmap_sem); + return retcode; } -static inline int i810_dma_is_ready(drm_device_t *dev) +static int i810_unmap_buffer(drm_buf_t *buf) { + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + int retcode = 0; - i810_dma_quiescent(dev); + if(buf_priv->currently_mapped != I810_BUF_MAPPED) return -EINVAL; + down(¤t->mm->mmap_sem); + retcode = do_munmap(current->mm, (unsigned long)buf_priv->virtual, + (size_t) buf->total); + buf_priv->currently_mapped = I810_BUF_UNMAPPED; + buf_priv->virtual = 0; + up(¤t->mm->mmap_sem); - printk(KERN_INFO "i810_dma_is_ready\n"); - return 1; + return retcode; } - -static void i810_dma_service(int irq, void *device, struct pt_regs *regs) +static int i810_dma_get_buffer(drm_device_t *dev, drm_i810_dma_t *d, + struct file *filp) { - drm_device_t *dev = (drm_device_t *)device; - drm_device_dma_t *dma = dev->dma; - - atomic_inc(&dev->total_irq); - if (i810_dma_is_ready(dev)) { - /* Free previous buffer */ - if (test_and_set_bit(0, &dev->dma_flag)) { - atomic_inc(&dma->total_missed_free); - return; - } - if (dma->this_buffer) { - drm_free_buffer(dev, dma->this_buffer); - dma->this_buffer = NULL; - } - clear_bit(0, &dev->dma_flag); - - /* Dispatch new buffer */ - queue_task(&dev->tq, &tq_immediate); - mark_bh(IMMEDIATE_BH); + drm_file_t *priv = filp->private_data; + drm_buf_t *buf; + drm_i810_buf_priv_t *buf_priv; + int retcode = 0; + + buf = i810_freelist_get(dev); + if (!buf) { + retcode = -ENOMEM; + DRM_DEBUG("%s retcode %d\n", __FUNCTION__, retcode); + goto out_get_buf; + } + + retcode = i810_map_buffer(buf, filp); + if(retcode) { + i810_freelist_put(dev, buf); + DRM_DEBUG("mapbuf failed in %s retcode %d\n", + __FUNCTION__, retcode); + goto out_get_buf; } + buf->pid = priv->pid; + buf_priv = buf->dev_private; + d->granted = 1; + d->request_idx = buf->idx; + d->request_size = buf->total; + d->virtual = buf_priv->virtual; + +out_get_buf: + return retcode; } -/* Only called by i810_dma_schedule. */ -static int i810_do_dma(drm_device_t *dev, int locked) +static unsigned long i810_alloc_page(drm_device_t *dev) { - unsigned long address; - unsigned long length; - drm_buf_t *buf; - int retcode = 0; - drm_device_dma_t *dma = dev->dma; -#if DRM_DMA_HISTOGRAM - cycles_t dma_start, dma_stop; -#endif - - if (test_and_set_bit(0, &dev->dma_flag)) { - atomic_inc(&dma->total_missed_dma); - return -EBUSY; - } + unsigned long address; + + address = __get_free_page(GFP_KERNEL); + if(address == 0UL) + return 0; -#if DRM_DMA_HISTOGRAM - dma_start = get_cycles(); -#endif - - if (!dma->next_buffer) { - DRM_ERROR("No next_buffer\n"); - clear_bit(0, &dev->dma_flag); - return -EINVAL; - } + atomic_inc(&mem_map[MAP_NR((void *) address)].count); + set_bit(PG_locked, &mem_map[MAP_NR((void *) address)].flags); + + return address; +} - buf = dma->next_buffer; - address = (unsigned long)buf->bus_address; - length = buf->used; +static void i810_free_page(drm_device_t *dev, unsigned long page) +{ + if(page == 0UL) + return; + atomic_dec(&mem_map[MAP_NR((void *) page)].count); + clear_bit(PG_locked, &mem_map[MAP_NR((void *) page)].flags); + wake_up(&mem_map[MAP_NR((void *) page)].wait); + free_page(page); + return; +} - DRM_DEBUG("context %d, buffer %d (%ld bytes)\n", - buf->context, buf->idx, length); - - if (buf->list == DRM_LIST_RECLAIM) { - drm_clear_next_buffer(dev); - drm_free_buffer(dev, buf); - clear_bit(0, &dev->dma_flag); - return -EINVAL; - } - - if (!length) { - DRM_ERROR("0 length buffer\n"); - drm_clear_next_buffer(dev); - drm_free_buffer(dev, buf); - clear_bit(0, &dev->dma_flag); - return 0; - } - - if (!i810_dma_is_ready(dev)) { - clear_bit(0, &dev->dma_flag); - return -EBUSY; - } +static int i810_dma_cleanup(drm_device_t *dev) +{ + drm_device_dma_t *dma = dev->dma; - if (buf->while_locked) { - if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { - DRM_ERROR("Dispatching buffer %d from pid %d" - " \"while locked\", but no lock held\n", - buf->idx, buf->pid); + if(dev->dev_private) { + int i; + drm_i810_private_t *dev_priv = + (drm_i810_private_t *) dev->dev_private; + + if(dev_priv->ring.virtual_start) { + drm_ioremapfree((void *) dev_priv->ring.virtual_start, + dev_priv->ring.Size); + } + if(dev_priv->hw_status_page != 0UL) { + i810_free_page(dev, dev_priv->hw_status_page); + /* Need to rewrite hardware status page */ + I810_WRITE(0x02080, 0x1ffff000); } - } else { - if (!locked && !drm_lock_take(&dev->lock.hw_lock->lock, - DRM_KERNEL_CONTEXT)) { - atomic_inc(&dma->total_missed_lock); - clear_bit(0, &dev->dma_flag); - return -EBUSY; + drm_free(dev->dev_private, sizeof(drm_i810_private_t), + DRM_MEM_DRIVER); + dev->dev_private = NULL; + + for (i = 0; i < dma->buf_count; i++) { + drm_buf_t *buf = dma->buflist[ i ]; + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + drm_ioremapfree(buf_priv->kernel_virtual, buf->total); } } + return 0; +} - if (dev->last_context != buf->context - && !(dev->queuelist[buf->context]->flags - & _DRM_CONTEXT_PRESERVED)) { - /* PRE: dev->last_context != buf->context */ - if (drm_context_switch(dev, dev->last_context, buf->context)) { - drm_clear_next_buffer(dev); - drm_free_buffer(dev, buf); +static int i810_wait_ring(drm_device_t *dev, int n) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + drm_i810_ring_buffer_t *ring = &(dev_priv->ring); + int iters = 0; + unsigned long end; + unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; + + end = jiffies + (HZ*3); + while (ring->space < n) { + int i; + + ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; + ring->space = ring->head - (ring->tail+8); + if (ring->space < 0) ring->space += ring->Size; + + if (ring->head != last_head) + end = jiffies + (HZ*3); + + iters++; + if((signed)(end - jiffies) <= 0) { + DRM_ERROR("space: %d wanted %d\n", ring->space, n); + DRM_ERROR("lockup\n"); + goto out_wait_ring; } - retcode = -EBUSY; - goto cleanup; - - /* POST: we will wait for the context - switch and will dispatch on a later call - when dev->last_context == buf->context. - NOTE WE HOLD THE LOCK THROUGHOUT THIS - TIME! */ - } - - drm_clear_next_buffer(dev); - buf->pending = 1; - buf->waiting = 0; - buf->list = DRM_LIST_PEND; -#if DRM_DMA_HISTOGRAM - buf->time_dispatched = get_cycles(); -#endif - - i810_dma_dispatch(dev, address, length); - drm_free_buffer(dev, dma->this_buffer); - dma->this_buffer = buf; - atomic_add(length, &dma->total_bytes); - atomic_inc(&dma->total_dmas); - - if (!buf->while_locked && !dev->context_flag && !locked) { - if (drm_lock_free(dev, &dev->lock.hw_lock->lock, - DRM_KERNEL_CONTEXT)) { - DRM_ERROR("\n"); - } + for (i = 0 ; i < 2000 ; i++) ; } -cleanup: - clear_bit(0, &dev->dma_flag); - -#if DRM_DMA_HISTOGRAM - dma_stop = get_cycles(); - atomic_inc(&dev->histo.dma[drm_histogram_slot(dma_stop - dma_start)]); -#endif - - return retcode; +out_wait_ring: + return iters; } -static void i810_dma_schedule_timer_wrapper(unsigned long dev) +static void i810_kernel_lost_context(drm_device_t *dev) { - i810_dma_schedule((drm_device_t *)dev, 0); + drm_i810_private_t *dev_priv = dev->dev_private; + drm_i810_ring_buffer_t *ring = &(dev_priv->ring); + + ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; + ring->tail = I810_READ(LP_RING + RING_TAIL); + ring->space = ring->head - (ring->tail+8); + if (ring->space < 0) ring->space += ring->Size; } -static void i810_dma_schedule_tq_wrapper(void *dev) +static int i810_freelist_init(drm_device_t *dev) { - i810_dma_schedule(dev, 0); + drm_device_dma_t *dma = dev->dma; + drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; + int my_idx = 24; + u32 *hw_status = (u32 *)(dev_priv->hw_status_page + my_idx); + int i; + + if(dma->buf_count > 1019) { + /* Not enough space in the status page for the freelist */ + return -EINVAL; + } + + for (i = 0; i < dma->buf_count; i++) { + drm_buf_t *buf = dma->buflist[ i ]; + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + + buf_priv->in_use = hw_status++; + buf_priv->my_use_idx = my_idx; + my_idx += 4; + + *buf_priv->in_use = I810_BUF_FREE; + + buf_priv->kernel_virtual = drm_ioremap(buf->bus_address, + buf->total); + } + return 0; } -int i810_dma_schedule(drm_device_t *dev, int locked) +static int i810_dma_initialize(drm_device_t *dev, + drm_i810_private_t *dev_priv, + drm_i810_init_t *init) { - int next; - drm_queue_t *q; - drm_buf_t *buf; - int retcode = 0; - int processed = 0; - int missed; - int expire = 20; - drm_device_dma_t *dma = dev->dma; -#if DRM_DMA_HISTOGRAM - cycles_t schedule_start; -#endif + drm_map_t *sarea_map; - if (test_and_set_bit(0, &dev->interrupt_flag)) { - /* Not reentrant */ - atomic_inc(&dma->total_missed_sched); - return -EBUSY; + dev->dev_private = (void *) dev_priv; + memset(dev_priv, 0, sizeof(drm_i810_private_t)); + + if (init->ring_map_idx >= dev->map_count || + init->buffer_map_idx >= dev->map_count) { + i810_dma_cleanup(dev); + DRM_ERROR("ring_map or buffer_map are invalid\n"); + return -EINVAL; + } + + dev_priv->ring_map_idx = init->ring_map_idx; + dev_priv->buffer_map_idx = init->buffer_map_idx; + sarea_map = dev->maplist[0]; + dev_priv->sarea_priv = (drm_i810_sarea_t *) + ((u8 *)sarea_map->handle + + init->sarea_priv_offset); + + atomic_set(&dev_priv->flush_done, 0); + init_waitqueue_head(&dev_priv->flush_queue); + + dev_priv->ring.Start = init->ring_start; + dev_priv->ring.End = init->ring_end; + dev_priv->ring.Size = init->ring_size; + + dev_priv->ring.virtual_start = drm_ioremap(dev->agp->base + + init->ring_start, + init->ring_size); + + dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; + + if (dev_priv->ring.virtual_start == NULL) { + i810_dma_cleanup(dev); + DRM_ERROR("can not ioremap virtual address for" + " ring buffer\n"); + return -ENOMEM; } - missed = atomic_read(&dma->total_missed_sched); -#if DRM_DMA_HISTOGRAM - schedule_start = get_cycles(); -#endif + dev_priv->w = init->w; + dev_priv->h = init->h; + dev_priv->pitch = init->pitch; + dev_priv->back_offset = init->back_offset; + dev_priv->depth_offset = init->depth_offset; -again: - if (dev->context_flag) { - clear_bit(0, &dev->interrupt_flag); - return -EBUSY; + dev_priv->front_di1 = init->front_offset | init->pitch_bits; + dev_priv->back_di1 = init->back_offset | init->pitch_bits; + dev_priv->zi1 = init->depth_offset | init->pitch_bits; + + + /* Program Hardware Status Page */ + dev_priv->hw_status_page = i810_alloc_page(dev); + memset((void *) dev_priv->hw_status_page, 0, PAGE_SIZE); + if(dev_priv->hw_status_page == 0UL) { + i810_dma_cleanup(dev); + DRM_ERROR("Can not allocate hardware status page\n"); + return -ENOMEM; } - if (dma->next_buffer) { - /* Unsent buffer that was previously - selected, but that couldn't be sent - because the lock could not be obtained - or the DMA engine wasn't ready. Try - again. */ - atomic_inc(&dma->total_tried); - if (!(retcode = i810_do_dma(dev, locked))) { - atomic_inc(&dma->total_hit); - ++processed; - } - } else { - do { - next = drm_select_queue(dev, - i810_dma_schedule_timer_wrapper); - if (next >= 0) { - q = dev->queuelist[next]; - buf = drm_waitlist_get(&q->waitlist); - dma->next_buffer = buf; - dma->next_queue = q; - if (buf && buf->list == DRM_LIST_RECLAIM) { - drm_clear_next_buffer(dev); - drm_free_buffer(dev, buf); - } - } - } while (next >= 0 && !dma->next_buffer); - if (dma->next_buffer) { - if (!(retcode = i810_do_dma(dev, locked))) { - ++processed; - } - } + DRM_DEBUG("hw status page @ %lx\n", dev_priv->hw_status_page); + + I810_WRITE(0x02080, virt_to_bus((void *)dev_priv->hw_status_page)); + DRM_DEBUG("Enabled hardware status page\n"); + + /* Now we need to init our freelist */ + if(i810_freelist_init(dev) != 0) { + i810_dma_cleanup(dev); + DRM_ERROR("Not enough space in the status page for" + " the freelist\n"); + return -ENOMEM; } + return 0; +} - if (--expire) { - if (missed != atomic_read(&dma->total_missed_sched)) { - atomic_inc(&dma->total_lost); - if (i810_dma_is_ready(dev)) goto again; - } - if (processed && i810_dma_is_ready(dev)) { - atomic_inc(&dma->total_lost); - processed = 0; - goto again; - } - } +int i810_dma_init(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_i810_private_t *dev_priv; + drm_i810_init_t init; + int retcode = 0; - clear_bit(0, &dev->interrupt_flag); + copy_from_user_ret(&init, (drm_i810_init_t *)arg, + sizeof(init), -EFAULT); -#if DRM_DMA_HISTOGRAM - atomic_inc(&dev->histo.schedule[drm_histogram_slot(get_cycles() - - schedule_start)]); -#endif - return retcode; + switch(init.func) { + case I810_INIT_DMA: + dev_priv = drm_alloc(sizeof(drm_i810_private_t), + DRM_MEM_DRIVER); + if(dev_priv == NULL) return -ENOMEM; + retcode = i810_dma_initialize(dev, dev_priv, &init); + break; + case I810_CLEANUP_DMA: + retcode = i810_dma_cleanup(dev); + break; + default: + retcode = -EINVAL; + break; + } + + return retcode; } -static int i810_dma_priority(drm_device_t *dev, drm_dma_t *d) -{ - unsigned long address; - unsigned long length; - int must_free = 0; - int retcode = 0; - int i; - int idx; - drm_buf_t *buf; - drm_buf_t *last_buf = NULL; - drm_device_dma_t *dma = dev->dma; - DECLARE_WAITQUEUE(entry, current); - /* Turn off interrupt handling */ - while (test_and_set_bit(0, &dev->interrupt_flag)) { - schedule(); - if (signal_pending(current)) return -EINTR; - } - if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) { - while (!drm_lock_take(&dev->lock.hw_lock->lock, - DRM_KERNEL_CONTEXT)) { - schedule(); - if (signal_pending(current)) { - clear_bit(0, &dev->interrupt_flag); - return -EINTR; - } - } - ++must_free; + +/* Most efficient way to verify state for the i810 is as it is + * emitted. Non-conformant state is silently dropped. + * + * Use 'volatile' & local var tmp to force the emitted values to be + * identical to the verified ones. + */ +static void i810EmitContextVerified( drm_device_t *dev, + volatile unsigned int *code ) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + int i, j = 0; + unsigned int tmp; + RING_LOCALS; + + BEGIN_LP_RING( I810_CTX_SETUP_SIZE ); + + OUT_RING( GFX_OP_COLOR_FACTOR ); + OUT_RING( code[I810_CTXREG_CF1] ); + + OUT_RING( GFX_OP_STIPPLE ); + OUT_RING( code[I810_CTXREG_ST1] ); + + for ( i = 4 ; i < I810_CTX_SETUP_SIZE ; i++ ) { + tmp = code[i]; + + if ((tmp & (7<<29)) == (3<<29) && + (tmp & (0x1f<<24)) < (0x1d<<24)) + { + OUT_RING( tmp ); + j++; + } } - atomic_inc(&dma->total_prio); - for (i = 0; i < d->send_count; i++) { - idx = d->send_indices[i]; - if (idx < 0 || idx >= dma->buf_count) { - DRM_ERROR("Index %d (of %d max)\n", - d->send_indices[i], dma->buf_count - 1); - continue; - } - buf = dma->buflist[ idx ]; - if (buf->pid != current->pid) { - DRM_ERROR("Process %d using buffer owned by %d\n", - current->pid, buf->pid); - retcode = -EINVAL; - goto cleanup; - } - if (buf->list != DRM_LIST_NONE) { - DRM_ERROR("Process %d using %d's buffer on list %d\n", - current->pid, buf->pid, buf->list); - retcode = -EINVAL; - goto cleanup; - } - /* This isn't a race condition on - buf->list, since our concern is the - buffer reclaim during the time the - process closes the /dev/drm? handle, so - it can't also be doing DMA. */ - buf->list = DRM_LIST_PRIO; - buf->used = d->send_sizes[i]; - buf->context = d->context; - buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED; - address = (unsigned long)buf->address; - length = buf->used; - if (!length) { - DRM_ERROR("0 length buffer\n"); - } - if (buf->pending) { - DRM_ERROR("Sending pending buffer:" - " buffer %d, offset %d\n", - d->send_indices[i], i); - retcode = -EINVAL; - goto cleanup; - } - if (buf->waiting) { - DRM_ERROR("Sending waiting buffer:" - " buffer %d, offset %d\n", - d->send_indices[i], i); - retcode = -EINVAL; - goto cleanup; + if (j & 1) + OUT_RING( 0 ); + + ADVANCE_LP_RING(); +} + +static void i810EmitTexVerified( drm_device_t *dev, + volatile unsigned int *code ) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + int i, j = 0; + unsigned int tmp; + RING_LOCALS; + + BEGIN_LP_RING( I810_TEX_SETUP_SIZE ); + + OUT_RING( GFX_OP_MAP_INFO ); + OUT_RING( code[I810_TEXREG_MI1] ); + OUT_RING( code[I810_TEXREG_MI2] ); + OUT_RING( code[I810_TEXREG_MI3] ); + + for ( i = 4 ; i < I810_TEX_SETUP_SIZE ; i++ ) { + tmp = code[i]; + + if ((tmp & (7<<29)) == (3<<29) && + (tmp & (0x1f<<24)) < (0x1d<<24)) + { + OUT_RING( tmp ); + j++; } - buf->pending = 1; + } - if (dev->last_context != buf->context - && !(dev->queuelist[buf->context]->flags - & _DRM_CONTEXT_PRESERVED)) { - add_wait_queue(&dev->context_wait, &entry); - current->state = TASK_INTERRUPTIBLE; - /* PRE: dev->last_context != buf->context */ - drm_context_switch(dev, dev->last_context, - buf->context); - /* POST: we will wait for the context - switch and will dispatch on a later call - when dev->last_context == buf->context. - NOTE WE HOLD THE LOCK THROUGHOUT THIS - TIME! */ - schedule(); - current->state = TASK_RUNNING; - remove_wait_queue(&dev->context_wait, &entry); - if (signal_pending(current)) { - retcode = -EINTR; - goto cleanup; - } - if (dev->last_context != buf->context) { - DRM_ERROR("Context mismatch: %d %d\n", - dev->last_context, - buf->context); - } - } + if (j & 1) + OUT_RING( 0 ); -#if DRM_DMA_HISTOGRAM - buf->time_queued = get_cycles(); - buf->time_dispatched = buf->time_queued; -#endif - i810_dma_dispatch(dev, address, length); - if (drm_lock_free(dev, &dev->lock.hw_lock->lock, - DRM_KERNEL_CONTEXT)) { - DRM_ERROR("\n"); - } + ADVANCE_LP_RING(); +} - atomic_add(length, &dma->total_bytes); - atomic_inc(&dma->total_dmas); - - if (last_buf) { - drm_free_buffer(dev, last_buf); - } - last_buf = buf; + +/* Need to do some additional checking when setting the dest buffer. + */ +static void i810EmitDestVerified( drm_device_t *dev, + volatile unsigned int *code ) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + unsigned int tmp; + RING_LOCALS; + + BEGIN_LP_RING( I810_DEST_SETUP_SIZE + 2 ); + + tmp = code[I810_DESTREG_DI1]; + if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) { + OUT_RING( CMD_OP_DESTBUFFER_INFO ); + OUT_RING( tmp ); + } else + DRM_DEBUG("bad di1 %x (allow %x or %x)\n", + tmp, dev_priv->front_di1, dev_priv->back_di1); + + /* invarient: + */ + OUT_RING( CMD_OP_Z_BUFFER_INFO ); + OUT_RING( dev_priv->zi1 ); + + OUT_RING( GFX_OP_DESTBUFFER_VARS ); + OUT_RING( code[I810_DESTREG_DV1] ); + + OUT_RING( GFX_OP_DRAWRECT_INFO ); + OUT_RING( code[I810_DESTREG_DR1] ); + OUT_RING( code[I810_DESTREG_DR2] ); + OUT_RING( code[I810_DESTREG_DR3] ); + OUT_RING( code[I810_DESTREG_DR4] ); + OUT_RING( 0 ); + + ADVANCE_LP_RING(); +} + + + +static void i810EmitState( drm_device_t *dev ) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; + unsigned int dirty = sarea_priv->dirty; + + if (dirty & I810_UPLOAD_BUFFERS) { + i810EmitDestVerified( dev, sarea_priv->BufferState ); + sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS; } + if (dirty & I810_UPLOAD_CTX) { + i810EmitContextVerified( dev, sarea_priv->ContextState ); + sarea_priv->dirty &= ~I810_UPLOAD_CTX; + } -cleanup: - if (last_buf) { - i810_dma_ready(dev); - drm_free_buffer(dev, last_buf); + if (dirty & I810_UPLOAD_TEX0) { + i810EmitTexVerified( dev, sarea_priv->TexState[0] ); + sarea_priv->dirty &= ~I810_UPLOAD_TEX0; } - - if (must_free && !dev->context_flag) { - if (drm_lock_free(dev, &dev->lock.hw_lock->lock, - DRM_KERNEL_CONTEXT)) { - DRM_ERROR("\n"); - } + + if (dirty & I810_UPLOAD_TEX1) { + i810EmitTexVerified( dev, sarea_priv->TexState[1] ); + sarea_priv->dirty &= ~I810_UPLOAD_TEX1; } - clear_bit(0, &dev->interrupt_flag); - return retcode; } -static int i810_dma_send_buffers(drm_device_t *dev, drm_dma_t *d) + + +/* need to verify + */ +static void i810_dma_dispatch_clear( drm_device_t *dev, int flags, + unsigned int clear_color, + unsigned int clear_zval ) { - DECLARE_WAITQUEUE(entry, current); - drm_buf_t *last_buf = NULL; - int retcode = 0; - drm_device_dma_t *dma = dev->dma; + drm_i810_private_t *dev_priv = dev->dev_private; + drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; + int nbox = sarea_priv->nbox; + drm_clip_rect_t *pbox = sarea_priv->boxes; + int pitch = dev_priv->pitch; + int cpp = 2; + int i; + RING_LOCALS; + + i810_kernel_lost_context(dev); + + if (nbox > I810_NR_SAREA_CLIPRECTS) + nbox = I810_NR_SAREA_CLIPRECTS; + + for (i = 0 ; i < nbox ; i++, pbox++) { + unsigned int x = pbox->x1; + unsigned int y = pbox->y1; + unsigned int width = (pbox->x2 - x) * cpp; + unsigned int height = pbox->y2 - y; + unsigned int start = y * pitch + x * cpp; + + if (pbox->x1 > pbox->x2 || + pbox->y1 > pbox->y2 || + pbox->x2 > dev_priv->w || + pbox->y2 > dev_priv->h) + continue; - if (d->flags & _DRM_DMA_BLOCK) { - last_buf = dma->buflist[d->send_indices[d->send_count-1]]; - add_wait_queue(&last_buf->dma_wait, &entry); - } - - if ((retcode = drm_dma_enqueue(dev, d))) { - if (d->flags & _DRM_DMA_BLOCK) - remove_wait_queue(&last_buf->dma_wait, &entry); - return retcode; - } - - i810_dma_schedule(dev, 0); - - if (d->flags & _DRM_DMA_BLOCK) { - DRM_DEBUG("%d waiting\n", current->pid); - current->state = TASK_INTERRUPTIBLE; - for (;;) { - if (!last_buf->waiting - && !last_buf->pending) - break; /* finished */ - schedule(); - if (signal_pending(current)) { - retcode = -EINTR; /* Can't restart */ - break; - } + if ( flags & I810_FRONT ) { + DRM_DEBUG("clear front\n"); + BEGIN_LP_RING( 6 ); + OUT_RING( BR00_BITBLT_CLIENT | + BR00_OP_COLOR_BLT | 0x3 ); + OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch ); + OUT_RING( (height << 16) | width ); + OUT_RING( start ); + OUT_RING( clear_color ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); } - current->state = TASK_RUNNING; - DRM_DEBUG("%d running\n", current->pid); - remove_wait_queue(&last_buf->dma_wait, &entry); - if (!retcode - || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) { - if (!waitqueue_active(&last_buf->dma_wait)) { - drm_free_buffer(dev, last_buf); - } + + if ( flags & I810_BACK ) { + DRM_DEBUG("clear back\n"); + BEGIN_LP_RING( 6 ); + OUT_RING( BR00_BITBLT_CLIENT | + BR00_OP_COLOR_BLT | 0x3 ); + OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch ); + OUT_RING( (height << 16) | width ); + OUT_RING( dev_priv->back_offset + start ); + OUT_RING( clear_color ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); } - if (retcode) { - DRM_ERROR("ctx%d w%d p%d c%d i%d l%d %d/%d\n", - d->context, - last_buf->waiting, - last_buf->pending, - DRM_WAITCOUNT(dev, d->context), - last_buf->idx, - last_buf->list, - last_buf->pid, - current->pid); + + if ( flags & I810_DEPTH ) { + DRM_DEBUG("clear depth\n"); + BEGIN_LP_RING( 6 ); + OUT_RING( BR00_BITBLT_CLIENT | + BR00_OP_COLOR_BLT | 0x3 ); + OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch ); + OUT_RING( (height << 16) | width ); + OUT_RING( dev_priv->depth_offset + start ); + OUT_RING( clear_zval ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); } } - return retcode; } -int i810_dma(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +static void i810_dma_dispatch_swap( drm_device_t *dev ) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - drm_device_dma_t *dma = dev->dma; - int retcode = 0; - drm_dma_t d; + drm_i810_private_t *dev_priv = dev->dev_private; + drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; + int nbox = sarea_priv->nbox; + drm_clip_rect_t *pbox = sarea_priv->boxes; + int pitch = dev_priv->pitch; + int cpp = 2; + int ofs = dev_priv->back_offset; + int i; + RING_LOCALS; + + DRM_DEBUG("swapbuffers\n"); + + i810_kernel_lost_context(dev); + + if (nbox > I810_NR_SAREA_CLIPRECTS) + nbox = I810_NR_SAREA_CLIPRECTS; + + for (i = 0 ; i < nbox; i++, pbox++) + { + unsigned int w = pbox->x2 - pbox->x1; + unsigned int h = pbox->y2 - pbox->y1; + unsigned int dst = pbox->x1*cpp + pbox->y1*pitch; + unsigned int start = ofs + dst; + + if (pbox->x1 > pbox->x2 || + pbox->y1 > pbox->y2 || + pbox->x2 > dev_priv->w || + pbox->y2 > dev_priv->h) + continue; + + DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n", + pbox[i].x1, pbox[i].y1, + pbox[i].x2, pbox[i].y2); + + BEGIN_LP_RING( 6 ); + OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4 ); + OUT_RING( pitch | (0xCC << 16)); + OUT_RING( (h << 16) | (w * cpp)); + OUT_RING( dst ); + OUT_RING( pitch ); + OUT_RING( start ); + ADVANCE_LP_RING(); + } +} - printk("i810_dma start\n"); - copy_from_user_ret(&d, (drm_dma_t *)arg, sizeof(d), -EFAULT); - DRM_DEBUG("%d %d: %d send, %d req\n", - current->pid, d.context, d.send_count, d.request_count); - if (d.context == DRM_KERNEL_CONTEXT || d.context >= dev->queue_slots) { - DRM_ERROR("Process %d using context %d\n", - current->pid, d.context); - return -EINVAL; +static void i810_dma_dispatch_vertex(drm_device_t *dev, + drm_buf_t *buf, + int discard, + int used) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_clip_rect_t *box = sarea_priv->boxes; + int nbox = sarea_priv->nbox; + unsigned long address = (unsigned long)buf->bus_address; + unsigned long start = address - dev->agp->base; + int i = 0, u; + RING_LOCALS; + + i810_kernel_lost_context(dev); + + if (nbox > I810_NR_SAREA_CLIPRECTS) + nbox = I810_NR_SAREA_CLIPRECTS; + + if (discard) { + u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, + I810_BUF_HARDWARE); + if(u != I810_BUF_CLIENT) { + DRM_DEBUG("xxxx 2\n"); + } } - if (d.send_count < 0 || d.send_count > dma->buf_count) { - DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n", - current->pid, d.send_count, dma->buf_count); - return -EINVAL; + if (used > 4*1024) + used = 0; + + if (sarea_priv->dirty) + i810EmitState( dev ); + + DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n", + address, used, nbox); + + dev_priv->counter++; + DRM_DEBUG( "dispatch counter : %ld\n", dev_priv->counter); + DRM_DEBUG( "i810_dma_dispatch\n"); + DRM_DEBUG( "start : %lx\n", start); + DRM_DEBUG( "used : %d\n", used); + DRM_DEBUG( "start + used - 4 : %ld\n", start + used - 4); + + if (buf_priv->currently_mapped == I810_BUF_MAPPED) { + *(u32 *)buf_priv->virtual = (GFX_OP_PRIMITIVE | + sarea_priv->vertex_prim | + ((used/4)-2)); + + if (used & 4) { + *(u32 *)((u32)buf_priv->virtual + used) = 0; + used += 4; + } + + i810_unmap_buffer(buf); } - if (d.request_count < 0 || d.request_count > dma->buf_count) { - DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", - current->pid, d.request_count, dma->buf_count); - return -EINVAL; + + if (used) { + do { + if (i < nbox) { + BEGIN_LP_RING(4); + OUT_RING( GFX_OP_SCISSOR | SC_UPDATE_SCISSOR | + SC_ENABLE ); + OUT_RING( GFX_OP_SCISSOR_INFO ); + OUT_RING( box[i].x1 | (box[i].y1<<16) ); + OUT_RING( (box[i].x2-1) | ((box[i].y2-1)<<16) ); + ADVANCE_LP_RING(); + } + + BEGIN_LP_RING(4); + OUT_RING( CMD_OP_BATCH_BUFFER ); + OUT_RING( start | BB1_PROTECTED ); + OUT_RING( start + used - 4 ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); + + } while (++i < nbox); } - if (d.send_count) { -#if 0 - if (d.flags & _DRM_DMA_PRIORITY) - retcode = i810_dma_priority(dev, &d); - else - retcode = i810_dma_send_buffers(dev, &d); -#endif - printk("i810_dma priority\n"); - - retcode = i810_dma_priority(dev, &d); + BEGIN_LP_RING(10); + OUT_RING( CMD_STORE_DWORD_IDX ); + OUT_RING( 20 ); + OUT_RING( dev_priv->counter ); + OUT_RING( 0 ); + + if (discard) { + OUT_RING( CMD_STORE_DWORD_IDX ); + OUT_RING( buf_priv->my_use_idx ); + OUT_RING( I810_BUF_FREE ); + OUT_RING( 0 ); } - d.granted_count = 0; + OUT_RING( CMD_REPORT_HEAD ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); +} - if (!retcode && d.request_count) { - retcode = drm_dma_get_buffers(dev, &d); - } - DRM_DEBUG("%d returning, granted = %d\n", - current->pid, d.granted_count); - copy_to_user_ret((drm_dma_t *)arg, &d, sizeof(d), -EFAULT); +/* Interrupts are only for flushing */ +static void i810_dma_service(int irq, void *device, struct pt_regs *regs) +{ + drm_device_t *dev = (drm_device_t *)device; + u16 temp; + + atomic_inc(&dev->total_irq); + temp = I810_READ16(I810REG_INT_IDENTITY_R); + temp = temp & ~(0x6000); + if(temp != 0) I810_WRITE16(I810REG_INT_IDENTITY_R, + temp); /* Clear all interrupts */ + else + return; + + queue_task(&dev->tq, &tq_immediate); + mark_bh(IMMEDIATE_BH); +} - printk("i810_dma end (granted)\n"); - return retcode; +static void i810_dma_task_queue(void *device) +{ + drm_device_t *dev = (drm_device_t *) device; + drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; + + atomic_set(&dev_priv->flush_done, 1); + wake_up_interruptible(&dev_priv->flush_queue); } int i810_irq_install(drm_device_t *dev, int irq) { int retcode; - + u16 temp; + if (!irq) return -EINVAL; down(&dev->struct_sem); @@ -591,6 +894,7 @@ int i810_irq_install(drm_device_t *dev, int irq) dev->irq = irq; up(&dev->struct_sem); + DRM_DEBUG( "Interrupt Install : %d\n", irq); DRM_DEBUG("%d\n", irq); dev->context_flag = 0; @@ -603,16 +907,25 @@ int i810_irq_install(drm_device_t *dev, int irq) dev->tq.next = NULL; dev->tq.sync = 0; - dev->tq.routine = i810_dma_schedule_tq_wrapper; + dev->tq.routine = i810_dma_task_queue; dev->tq.data = dev; - /* Before installing handler */ - /* TODO */ + temp = I810_READ16(I810REG_HWSTAM); + temp = temp & 0x6000; + I810_WRITE16(I810REG_HWSTAM, temp); + + temp = I810_READ16(I810REG_INT_MASK_R); + temp = temp & 0x6000; + I810_WRITE16(I810REG_INT_MASK_R, temp); /* Unmask interrupts */ + temp = I810_READ16(I810REG_INT_ENABLE_R); + temp = temp & 0x6000; + I810_WRITE16(I810REG_INT_ENABLE_R, temp); /* Disable all interrupts */ + /* Install handler */ if ((retcode = request_irq(dev->irq, i810_dma_service, - 0, + SA_SHIRQ, dev->devname, dev))) { down(&dev->struct_sem); @@ -620,15 +933,21 @@ int i810_irq_install(drm_device_t *dev, int irq) up(&dev->struct_sem); return retcode; } - - /* After installing handler */ - /* TODO */ + temp = I810_READ16(I810REG_INT_ENABLE_R); + temp = temp & 0x6000; + temp = temp | 0x0003; + I810_WRITE16(I810REG_INT_ENABLE_R, + temp); /* Enable bp & user interrupts */ return 0; } int i810_irq_uninstall(drm_device_t *dev) { int irq; + u16 temp; + + +/* return 0; */ down(&dev->struct_sem); irq = dev->irq; @@ -636,16 +955,25 @@ int i810_irq_uninstall(drm_device_t *dev) up(&dev->struct_sem); if (!irq) return -EINVAL; - + + DRM_DEBUG( "Interrupt UnInstall: %d\n", irq); DRM_DEBUG("%d\n", irq); - - /* TODO : Disable interrupts */ + + temp = I810_READ16(I810REG_INT_IDENTITY_R); + temp = temp & ~(0x6000); + if(temp != 0) I810_WRITE16(I810REG_INT_IDENTITY_R, + temp); /* Clear all interrupts */ + + temp = I810_READ16(I810REG_INT_ENABLE_R); + temp = temp & 0x6000; + I810_WRITE16(I810REG_INT_ENABLE_R, + temp); /* Disable all interrupts */ + free_irq(irq, dev); return 0; } - int i810_control(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { @@ -654,8 +982,7 @@ int i810_control(struct inode *inode, struct file *filp, unsigned int cmd, drm_control_t ctl; int retcode; - printk(KERN_INFO "i810_control\n"); - i810_dma_init(dev); + DRM_DEBUG( "i810_control\n"); copy_from_user_ret(&ctl, (drm_control_t *)arg, sizeof(ctl), -EFAULT); @@ -674,20 +1001,161 @@ int i810_control(struct inode *inode, struct file *filp, unsigned int cmd, return 0; } +static inline void i810_dma_emit_flush(drm_device_t *dev) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + i810_kernel_lost_context(dev); + + BEGIN_LP_RING(2); + OUT_RING( CMD_REPORT_HEAD ); + OUT_RING( GFX_OP_USER_INTERRUPT ); + ADVANCE_LP_RING(); + +/* i810_wait_ring( dev, dev_priv->ring.Size - 8 ); */ +/* atomic_set(&dev_priv->flush_done, 1); */ +/* wake_up_interruptible(&dev_priv->flush_queue); */ +} + +static inline void i810_dma_quiescent_emit(drm_device_t *dev) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + i810_kernel_lost_context(dev); + + BEGIN_LP_RING(4); + OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE ); + OUT_RING( CMD_REPORT_HEAD ); + OUT_RING( 0 ); + OUT_RING( GFX_OP_USER_INTERRUPT ); + ADVANCE_LP_RING(); + +/* i810_wait_ring( dev, dev_priv->ring.Size - 8 ); */ +/* atomic_set(&dev_priv->flush_done, 1); */ +/* wake_up_interruptible(&dev_priv->flush_queue); */ +} + +static void i810_dma_quiescent(drm_device_t *dev) +{ + DECLARE_WAITQUEUE(entry, current); + drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; + unsigned long end; + + if(dev_priv == NULL) { + return; + } + atomic_set(&dev_priv->flush_done, 0); + current->state = TASK_INTERRUPTIBLE; + add_wait_queue(&dev_priv->flush_queue, &entry); + end = jiffies + (HZ*3); + + for (;;) { + i810_dma_quiescent_emit(dev); + if (atomic_read(&dev_priv->flush_done) == 1) break; + if((signed)(end - jiffies) <= 0) { + DRM_ERROR("lockup\n"); + break; + } + schedule_timeout(HZ*3); + if (signal_pending(current)) { + break; + } + } + + current->state = TASK_RUNNING; + remove_wait_queue(&dev_priv->flush_queue, &entry); + + return; +} + +static int i810_flush_queue(drm_device_t *dev) +{ + DECLARE_WAITQUEUE(entry, current); + drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; + drm_device_dma_t *dma = dev->dma; + unsigned long end; + int i, ret = 0; + + if(dev_priv == NULL) { + return 0; + } + atomic_set(&dev_priv->flush_done, 0); + current->state = TASK_INTERRUPTIBLE; + add_wait_queue(&dev_priv->flush_queue, &entry); + end = jiffies + (HZ*3); + for (;;) { + i810_dma_emit_flush(dev); + if (atomic_read(&dev_priv->flush_done) == 1) break; + if((signed)(end - jiffies) <= 0) { + DRM_ERROR("lockup\n"); + break; + } + schedule_timeout(HZ*3); + if (signal_pending(current)) { + ret = -EINTR; /* Can't restart */ + break; + } + } + + current->state = TASK_RUNNING; + remove_wait_queue(&dev_priv->flush_queue, &entry); + + + for (i = 0; i < dma->buf_count; i++) { + drm_buf_t *buf = dma->buflist[ i ]; + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + + int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE, + I810_BUF_FREE); + + if (used == I810_BUF_HARDWARE) + DRM_DEBUG("reclaimed from HARDWARE\n"); + if (used == I810_BUF_CLIENT) + DRM_DEBUG("still on client HARDWARE\n"); + } + + return ret; +} + +/* Must be called with the lock held */ +void i810_reclaim_buffers(drm_device_t *dev, pid_t pid) +{ + drm_device_dma_t *dma = dev->dma; + int i; + + if (!dma) return; + if (!dev->dev_private) return; + if (!dma->buflist) return; + + i810_flush_queue(dev); + + for (i = 0; i < dma->buf_count; i++) { + drm_buf_t *buf = dma->buflist[ i ]; + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + + if (buf->pid == pid && buf_priv) { + int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, + I810_BUF_FREE); + + if (used == I810_BUF_CLIENT) + DRM_DEBUG("reclaimed from client\n"); + if(buf_priv->currently_mapped == I810_BUF_MAPPED) + buf_priv->currently_mapped = I810_BUF_UNMAPPED; + } + } +} + int i810_lock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; + DECLARE_WAITQUEUE(entry, current); int ret = 0; drm_lock_t lock; - drm_queue_t *q; -#if DRM_DMA_HISTOGRAM - cycles_t start; - - dev->lck_start = start = get_cycles(); -#endif copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT); @@ -696,26 +1164,18 @@ int i810_lock(struct inode *inode, struct file *filp, unsigned int cmd, current->pid, lock.context); return -EINVAL; } + + DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", + lock.context, current->pid, dev->lock.hw_lock->lock, + lock.flags); - if (lock.context < 0 || lock.context >= dev->queue_count) { + if (lock.context < 0) { return -EINVAL; } - q = dev->queuelist[lock.context]; - - ret = drm_flush_block_and_flush(dev, lock.context, lock.flags); + /* Only one queue: + */ if (!ret) { - if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) - != lock.context) { - long j = jiffies - dev->lock.lock_time; - - if (j > 0 && j <= DRM_LOCK_SLICE) { - /* Can't take lock if we just had it and - there is contention. */ - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(j); - } - } add_wait_queue(&dev->lock.lock_queue, &entry); for (;;) { if (!dev->lock.hw_lock) { @@ -728,13 +1188,13 @@ int i810_lock(struct inode *inode, struct file *filp, unsigned int cmd, dev->lock.pid = current->pid; dev->lock.lock_time = jiffies; atomic_inc(&dev->total_locks); - atomic_inc(&q->total_locks); break; /* Got lock */ } /* Contention */ atomic_inc(&dev->total_sleeps); current->state = TASK_INTERRUPTIBLE; + DRM_DEBUG("Calling lock schedule\n"); schedule(); if (signal_pending(current)) { ret = -ERESTARTSYS; @@ -744,19 +1204,153 @@ int i810_lock(struct inode *inode, struct file *filp, unsigned int cmd, current->state = TASK_RUNNING; remove_wait_queue(&dev->lock.lock_queue, &entry); } - - drm_flush_unblock(dev, lock.context, lock.flags); /* cleanup phase */ if (!ret) { - if (lock.flags & _DRM_LOCK_READY) - i810_dma_ready(dev); - if (lock.flags & _DRM_LOCK_QUIESCENT) - i810_dma_quiescent(dev); + if (lock.flags & _DRM_LOCK_QUIESCENT) { + DRM_DEBUG("_DRM_LOCK_QUIESCENT\n"); + DRM_DEBUG("fred\n"); + i810_dma_quiescent(dev); + } } + DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock"); + return ret; +} -#if DRM_DMA_HISTOGRAM - atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]); -#endif +int i810_flush_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + + DRM_DEBUG("i810_flush_ioctl\n"); + if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("i810_flush_ioctl called without lock held\n"); + return -EINVAL; + } + + i810_flush_queue(dev); + return 0; +} + + +int i810_dma_vertex(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_device_dma_t *dma = dev->dma; + drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; + u32 *hw_status = (u32 *)dev_priv->hw_status_page; + drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) + dev_priv->sarea_priv; + drm_i810_vertex_t vertex; + + copy_from_user_ret(&vertex, (drm_i810_vertex_t *)arg, sizeof(vertex), + -EFAULT); + + if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("i810_dma_vertex called without lock held\n"); + return -EINVAL; + } + + DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n", + vertex.idx, vertex.used, vertex.discard); + + i810_dma_dispatch_vertex( dev, + dma->buflist[ vertex.idx ], + vertex.discard, vertex.used ); + + atomic_add(vertex.used, &dma->total_bytes); + atomic_inc(&dma->total_dmas); + sarea_priv->last_enqueue = dev_priv->counter-1; + sarea_priv->last_dispatch = (int) hw_status[5]; + + return 0; +} + + + +int i810_clear_bufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_i810_clear_t clear; + + copy_from_user_ret(&clear, (drm_i810_clear_t *)arg, sizeof(clear), + -EFAULT); + + if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("i810_clear_bufs called without lock held\n"); + return -EINVAL; + } + + i810_dma_dispatch_clear( dev, clear.flags, + clear.clear_color, + clear.clear_depth ); + return 0; +} + +int i810_swap_bufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + + DRM_DEBUG("i810_swap_bufs\n"); + + if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("i810_swap_buf called without lock held\n"); + return -EINVAL; + } + + i810_dma_dispatch_swap( dev ); + return 0; +} + +int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; + u32 *hw_status = (u32 *)dev_priv->hw_status_page; + drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) + dev_priv->sarea_priv; + + sarea_priv->last_dispatch = (int) hw_status[5]; + return 0; +} + +int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + int retcode = 0; + drm_i810_dma_t d; + drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; + u32 *hw_status = (u32 *)dev_priv->hw_status_page; + drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) + dev_priv->sarea_priv; + + DRM_DEBUG("getbuf\n"); + copy_from_user_ret(&d, (drm_i810_dma_t *)arg, sizeof(d), -EFAULT); + + if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("i810_dma called without lock held\n"); + return -EINVAL; + } - return ret; + d.granted = 0; + + retcode = i810_dma_get_buffer(dev, &d, filp); + + DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", + current->pid, retcode, d.granted); + + copy_to_user_ret((drm_dma_t *)arg, &d, sizeof(d), -EFAULT); + sarea_priv->last_dispatch = (int) hw_status[5]; + + return retcode; } diff --git a/linux/i810_drm.h b/linux/i810_drm.h new file mode 100644 index 00000000..4c8e09f6 --- /dev/null +++ b/linux/i810_drm.h @@ -0,0 +1,188 @@ +#ifndef _I810_DRM_H_ +#define _I810_DRM_H_ + +/* WARNING: These defines must be the same as what the Xserver uses. + * if you change them, you must change the defines in the Xserver. + */ + +#ifndef _I810_DEFINES_ +#define _I810_DEFINES_ + +#define I810_DMA_BUF_ORDER 12 +#define I810_DMA_BUF_SZ (1<<I810_DMA_BUF_ORDER) +#define I810_DMA_BUF_NR 256 +#define I810_NR_SAREA_CLIPRECTS 8 + +/* Each region is a minimum of 64k, and there are at most 64 of them. + */ +#define I810_NR_TEX_REGIONS 64 +#define I810_LOG_MIN_TEX_REGION_SIZE 16 +#endif + +#define I810_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */ +#define I810_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */ +#define I810_UPLOAD_CTX 0x4 +#define I810_UPLOAD_BUFFERS 0x8 +#define I810_UPLOAD_TEX0 0x10 +#define I810_UPLOAD_TEX1 0x20 +#define I810_UPLOAD_CLIPRECTS 0x40 + + +/* Indices into buf.Setup where various bits of state are mirrored per + * context and per buffer. These can be fired at the card as a unit, + * or in a piecewise fashion as required. + */ + +/* Destbuffer state + * - backbuffer linear offset and pitch -- invarient in the current dri + * - zbuffer linear offset and pitch -- also invarient + * - drawing origin in back and depth buffers. + * + * Keep the depth/back buffer state here to acommodate private buffers + * in the future. + */ +#define I810_DESTREG_DI0 0 /* CMD_OP_DESTBUFFER_INFO (2 dwords) */ +#define I810_DESTREG_DI1 1 +#define I810_DESTREG_DV0 2 /* GFX_OP_DESTBUFFER_VARS (2 dwords) */ +#define I810_DESTREG_DV1 3 +#define I810_DESTREG_DR0 4 /* GFX_OP_DRAWRECT_INFO (4 dwords) */ +#define I810_DESTREG_DR1 5 +#define I810_DESTREG_DR2 6 +#define I810_DESTREG_DR3 7 +#define I810_DESTREG_DR4 8 +#define I810_DEST_SETUP_SIZE 10 + +/* Context state + */ +#define I810_CTXREG_CF0 0 /* GFX_OP_COLOR_FACTOR */ +#define I810_CTXREG_CF1 1 +#define I810_CTXREG_ST0 2 /* GFX_OP_STIPPLE */ +#define I810_CTXREG_ST1 3 +#define I810_CTXREG_VF 4 /* GFX_OP_VERTEX_FMT */ +#define I810_CTXREG_MT 5 /* GFX_OP_MAP_TEXELS */ +#define I810_CTXREG_MC0 6 /* GFX_OP_MAP_COLOR_STAGES - stage 0 */ +#define I810_CTXREG_MC1 7 /* GFX_OP_MAP_COLOR_STAGES - stage 1 */ +#define I810_CTXREG_MC2 8 /* GFX_OP_MAP_COLOR_STAGES - stage 2 */ +#define I810_CTXREG_MA0 9 /* GFX_OP_MAP_ALPHA_STAGES - stage 0 */ +#define I810_CTXREG_MA1 10 /* GFX_OP_MAP_ALPHA_STAGES - stage 1 */ +#define I810_CTXREG_MA2 11 /* GFX_OP_MAP_ALPHA_STAGES - stage 2 */ +#define I810_CTXREG_SDM 12 /* GFX_OP_SRC_DEST_MONO */ +#define I810_CTXREG_FOG 13 /* GFX_OP_FOG_COLOR */ +#define I810_CTXREG_B1 14 /* GFX_OP_BOOL_1 */ +#define I810_CTXREG_B2 15 /* GFX_OP_BOOL_2 */ +#define I810_CTXREG_LCS 16 /* GFX_OP_LINEWIDTH_CULL_SHADE_MODE */ +#define I810_CTXREG_PV 17 /* GFX_OP_PV_RULE -- Invarient! */ +#define I810_CTXREG_ZA 18 /* GFX_OP_ZBIAS_ALPHAFUNC */ +#define I810_CTXREG_AA 19 /* GFX_OP_ANTIALIAS */ +#define I810_CTX_SETUP_SIZE 20 + +/* Texture state (per tex unit) + */ +#define I810_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (4 dwords) */ +#define I810_TEXREG_MI1 1 +#define I810_TEXREG_MI2 2 +#define I810_TEXREG_MI3 3 +#define I810_TEXREG_MF 4 /* GFX_OP_MAP_FILTER */ +#define I810_TEXREG_MLC 5 /* GFX_OP_MAP_LOD_CTL */ +#define I810_TEXREG_MLL 6 /* GFX_OP_MAP_LOD_LIMITS */ +#define I810_TEXREG_MCS 7 /* GFX_OP_MAP_COORD_SETS ??? */ +#define I810_TEX_SETUP_SIZE 8 + +#define I810_FRONT 0x1 +#define I810_BACK 0x2 +#define I810_DEPTH 0x4 + + +typedef struct _drm_i810_init { + enum { + I810_INIT_DMA = 0x01, + I810_CLEANUP_DMA = 0x02 + } func; + int ring_map_idx; + int buffer_map_idx; + int sarea_priv_offset; + unsigned int ring_start; + unsigned int ring_end; + unsigned int ring_size; + unsigned int front_offset; + unsigned int back_offset; + unsigned int depth_offset; + unsigned int w; + unsigned int h; + unsigned int pitch; + unsigned int pitch_bits; +} drm_i810_init_t; + +/* Warning: If you change the SAREA structure you must change the Xserver + * structure as well */ + +typedef struct _drm_i810_tex_region { + unsigned char next, prev; /* indices to form a circular LRU */ + unsigned char in_use; /* owned by a client, or free? */ + int age; /* tracked by clients to update local LRU's */ +} drm_i810_tex_region_t; + +typedef struct _drm_i810_sarea { + unsigned int ContextState[I810_CTX_SETUP_SIZE]; + unsigned int BufferState[I810_DEST_SETUP_SIZE]; + unsigned int TexState[2][I810_TEX_SETUP_SIZE]; + unsigned int dirty; + + unsigned int nbox; + drm_clip_rect_t boxes[I810_NR_SAREA_CLIPRECTS]; + + /* Maintain an LRU of contiguous regions of texture space. If + * you think you own a region of texture memory, and it has an + * age different to the one you set, then you are mistaken and + * it has been stolen by another client. If global texAge + * hasn't changed, there is no need to walk the list. + * + * These regions can be used as a proxy for the fine-grained + * texture information of other clients - by maintaining them + * in the same lru which is used to age their own textures, + * clients have an approximate lru for the whole of global + * texture space, and can make informed decisions as to which + * areas to kick out. There is no need to choose whether to + * kick out your own texture or someone else's - simply eject + * them all in LRU order. + */ + + drm_i810_tex_region_t texList[I810_NR_TEX_REGIONS+1]; + /* Last elt is sentinal */ + int texAge; /* last time texture was uploaded */ + int last_enqueue; /* last time a buffer was enqueued */ + int last_dispatch; /* age of the most recently dispatched buffer */ + int last_quiescent; /* */ + int ctxOwner; /* last context to upload state */ + + int vertex_prim; + +} drm_i810_sarea_t; + +typedef struct _drm_i810_clear { + int clear_color; + int clear_depth; + int flags; +} drm_i810_clear_t; + + + +/* These may be placeholders if we have more cliprects than + * I810_NR_SAREA_CLIPRECTS. In that case, the client sets discard to + * false, indicating that the buffer will be dispatched again with a + * new set of cliprects. + */ +typedef struct _drm_i810_vertex { + int idx; /* buffer index */ + int used; /* nr bytes in use */ + int discard; /* client is finished with the buffer? */ +} drm_i810_vertex_t; + +typedef struct drm_i810_dma { + void *virtual; + int request_idx; + int request_size; + int granted; +} drm_i810_dma_t; + +#endif /* _I810_DRM_H_ */ diff --git a/linux/i810_drv.c b/linux/i810_drv.c index f33153a3..6f78fbc9 100644 --- a/linux/i810_drv.c +++ b/linux/i810_drv.c @@ -2,6 +2,7 @@ * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -23,25 +24,26 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com> - * Jeff Hartmann <jhartmann@precisioninsight.com> - * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/i810_drv.c,v 1.1 2000/02/11 17:26:05 dawes Exp $ + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> + * Jeff Hartmann <jhartmann@valinux.com> * */ +#include <linux/config.h> #define EXPORT_SYMTAB #include "drmP.h" #include "i810_drv.h" + + EXPORT_SYMBOL(i810_init); EXPORT_SYMBOL(i810_cleanup); #define I810_NAME "i810" -#define I810_DESC "Matrox g200/g400" +#define I810_DESC "Intel I810" #define I810_DATE "19991213" -#define I810_MAJOR 0 +#define I810_MAJOR 1 #define I810_MINOR 0 -#define I810_PATCHLEVEL 1 +#define I810_PATCHLEVEL 0 static drm_device_t i810_device; drm_ctx_t i810_res_ctx; @@ -54,6 +56,7 @@ static struct file_operations i810_fops = { mmap: drm_mmap, read: drm_read, fasync: drm_fasync, + poll: drm_poll, }; static struct miscdevice i810_misc = { @@ -77,21 +80,18 @@ static drm_ioctl_desc_t i810_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { i810_addbufs, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { i810_markbufs, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { i810_infobufs, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { i810_mapbufs, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { i810_freebufs, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { drm_addctx, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { drm_rmctx, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { drm_modctx, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { drm_getctx, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { drm_switchctx, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { drm_newctx, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { drm_resctx, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { i810_addctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { i810_rmctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { i810_modctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { i810_getctx, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { i810_switchctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { i810_newctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { i810_resctx, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { i810_dma, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { i810_lock, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { i810_unlock, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 }, @@ -104,6 +104,14 @@ static drm_ioctl_desc_t i810_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 }, + + [DRM_IOCTL_NR(DRM_IOCTL_I810_INIT)] = { i810_dma_init, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_I810_VERTEX)] = { i810_dma_vertex, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_I810_CLEAR)] = { i810_clear_bufs, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_I810_FLUSH)] = { i810_flush_ioctl,1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_I810_GETAGE)] = { i810_getage, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_I810_GETBUF)] = { i810_getbuf, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_I810_SWAP)] = { i810_swap_bufs, 1, 0 }, }; #define I810_IOCTL_COUNT DRM_ARRAY_SIZE(i810_ioctls) @@ -121,7 +129,7 @@ MODULE_PARM(i810, "s"); int init_module(void) { - printk("doing i810_init()\n"); + DRM_DEBUG("doing i810_init()\n"); return i810_init(); } @@ -364,7 +372,7 @@ int i810_init(void) #ifdef MODULE drm_parse_options(i810); #endif - printk("doing misc_register\n"); + DRM_DEBUG("doing misc_register\n"); if ((retcode = misc_register(&i810_misc))) { DRM_ERROR("Cannot register \"%s\"\n", I810_NAME); return retcode; @@ -372,13 +380,22 @@ int i810_init(void) dev->device = MKDEV(MISC_MAJOR, i810_misc.minor); dev->name = I810_NAME; - printk("doing mem init\n"); + DRM_DEBUG("doing mem init\n"); drm_mem_init(); - printk("doing proc init\n"); + DRM_DEBUG("doing proc init\n"); drm_proc_init(dev); - printk("doing agp init\n"); + DRM_DEBUG("doing agp init\n"); dev->agp = drm_agp_init(); - printk("doing ctxbitmap init\n"); + if(dev->agp == NULL) { + DRM_INFO("The i810 drm module requires the agpgart module" + " to function correctly\nPlease load the agpgart" + " module before you load the i810 module\n"); + drm_proc_cleanup(); + misc_deregister(&i810_misc); + i810_takedown(dev); + return -ENOMEM; + } + DRM_DEBUG("doing ctxbitmap init\n"); if((retcode = drm_ctxbitmap_init(dev))) { DRM_ERROR("Cannot allocate memory for context bitmap.\n"); drm_proc_cleanup(); @@ -386,10 +403,6 @@ int i810_init(void) i810_takedown(dev); return retcode; } -#if 0 - printk("doing i810_dma_init\n"); - i810_dma_init(dev); -#endif DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", I810_NAME, @@ -417,7 +430,6 @@ void i810_cleanup(void) DRM_INFO("Module unloaded\n"); } drm_ctxbitmap_cleanup(dev); - i810_dma_cleanup(dev); i810_takedown(dev); if (dev->agp) { drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); @@ -484,24 +496,82 @@ int i810_release(struct inode *inode, struct file *filp) drm_device_t *dev = priv->dev; int retcode = 0; - DRM_DEBUG("open_count = %d\n", dev->open_count); - if (!(retcode = drm_release(inode, filp))) { - MOD_DEC_USE_COUNT; - atomic_inc(&dev->total_close); - spin_lock(&dev->count_lock); - if (!--dev->open_count) { - if (atomic_read(&dev->ioctl_count) || dev->blocked) { - DRM_ERROR("Device busy: %d %d\n", - atomic_read(&dev->ioctl_count), - dev->blocked); - spin_unlock(&dev->count_lock); - return -EBUSY; + DRM_DEBUG("pid = %d, device = 0x%x, open_count = %d\n", + current->pid, dev->device, dev->open_count); + + if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) + && dev->lock.pid == current->pid) { + i810_reclaim_buffers(dev, priv->pid); + DRM_ERROR("Process %d dead, freeing lock for context %d\n", + current->pid, + _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); + drm_lock_free(dev, + &dev->lock.hw_lock->lock, + _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); + + /* FIXME: may require heavy-handed reset of + hardware at this point, possibly + processed via a callback to the X + server. */ + } else if (dev->lock.hw_lock) { + /* The lock is required to reclaim buffers */ + DECLARE_WAITQUEUE(entry, current); + add_wait_queue(&dev->lock.lock_queue, &entry); + for (;;) { + if (!dev->lock.hw_lock) { + /* Device has been unregistered */ + retcode = -EINTR; + break; + } + if (drm_lock_take(&dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT)) { + dev->lock.pid = priv->pid; + dev->lock.lock_time = jiffies; + atomic_inc(&dev->total_locks); + break; /* Got lock */ + } + /* Contention */ + atomic_inc(&dev->total_sleeps); + current->state = TASK_INTERRUPTIBLE; + schedule(); + if (signal_pending(current)) { + retcode = -ERESTARTSYS; + break; } - spin_unlock(&dev->count_lock); - return i810_takedown(dev); } - spin_unlock(&dev->count_lock); + current->state = TASK_RUNNING; + remove_wait_queue(&dev->lock.lock_queue, &entry); + if(!retcode) { + i810_reclaim_buffers(dev, priv->pid); + drm_lock_free(dev, &dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT); + } + } + drm_fasync(-1, filp, 0); + + down(&dev->struct_sem); + if (priv->prev) priv->prev->next = priv->next; + else dev->file_first = priv->next; + if (priv->next) priv->next->prev = priv->prev; + else dev->file_last = priv->prev; + up(&dev->struct_sem); + + drm_free(priv, sizeof(*priv), DRM_MEM_FILES); + MOD_DEC_USE_COUNT; + atomic_inc(&dev->total_close); + spin_lock(&dev->count_lock); + if (!--dev->open_count) { + if (atomic_read(&dev->ioctl_count) || dev->blocked) { + DRM_ERROR("Device busy: %d %d\n", + atomic_read(&dev->ioctl_count), + dev->blocked); + spin_unlock(&dev->count_lock); + return -EBUSY; + } + spin_unlock(&dev->count_lock); + return i810_takedown(dev); } + spin_unlock(&dev->count_lock); return retcode; } @@ -566,8 +636,7 @@ int i810_unlock(struct inode *inode, struct file *filp, unsigned int cmd, atomic_inc(&dev->total_unlocks); if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock)) atomic_inc(&dev->total_contends); - drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT); - i810_dma_schedule(dev, 1); + drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT); if (!dev->context_flag) { if (drm_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) { diff --git a/linux/i810_drv.h b/linux/i810_drv.h index 0f5f42bb..c387bf72 100644 --- a/linux/i810_drv.h +++ b/linux/i810_drv.h @@ -2,6 +2,7 @@ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -23,15 +24,58 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com> - * Jeff Hartmann <jhartmann@precisioninsight.com> + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> + * Jeff Hartmann <jhartmann@valinux.com> * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/i810_drv.h,v 1.1 2000/02/11 17:26:05 dawes Exp $ */ #ifndef _I810_DRV_H_ #define _I810_DRV_H_ +typedef struct drm_i810_buf_priv { + u32 *in_use; + int my_use_idx; + int currently_mapped; + void *virtual; + void *kernel_virtual; + int map_count; + struct vm_area_struct *vma; +} drm_i810_buf_priv_t; + +typedef struct _drm_i810_ring_buffer{ + int tail_mask; + unsigned long Start; + unsigned long End; + unsigned long Size; + u8 *virtual_start; + int head; + int tail; + int space; +} drm_i810_ring_buffer_t; + +typedef struct drm_i810_private { + int ring_map_idx; + int buffer_map_idx; + + drm_i810_ring_buffer_t ring; + drm_i810_sarea_t *sarea_priv; + + unsigned long hw_status_page; + unsigned long counter; + + atomic_t flush_done; + wait_queue_head_t flush_queue; /* Processes waiting until flush */ + drm_buf_t *mmap_buffer; + + + u32 front_di1, back_di1, zi1; + + int back_offset; + int depth_offset; + int w, h; + int pitch; +} drm_i810_private_t; + /* i810_drv.c */ extern int i810_init(void); extern void i810_cleanup(void); @@ -46,16 +90,22 @@ extern int i810_unlock(struct inode *inode, struct file *filp, /* i810_dma.c */ extern int i810_dma_schedule(drm_device_t *dev, int locked); -extern int i810_dma(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); +extern int i810_getbuf(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); extern int i810_irq_install(drm_device_t *dev, int irq); extern int i810_irq_uninstall(drm_device_t *dev); extern int i810_control(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int i810_lock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern void i810_dma_init(drm_device_t *dev); -extern void i810_dma_cleanup(drm_device_t *dev); +extern int i810_dma_init(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int i810_flush_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern void i810_reclaim_buffers(drm_device_t *dev, pid_t pid); +extern int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg); +extern int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma); /* i810_bufs.c */ @@ -67,10 +117,108 @@ extern int i810_markbufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int i810_freebufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int i810_mapbufs(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); extern int i810_addmap(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); + /* i810_context.c */ +extern int i810_resctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int i810_addctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int i810_modctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int i810_getctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int i810_switchctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int i810_newctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int i810_rmctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + +extern int i810_context_switch(drm_device_t *dev, int old, int new); +extern int i810_context_switch_complete(drm_device_t *dev, int new); + +#define I810_VERBOSE 0 + + +int i810_dma_vertex(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + +int i810_swap_bufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + +int i810_clear_bufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + +#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) +#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23)) +#define CMD_REPORT_HEAD (7<<23) +#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1) +#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1) + +#define INST_PARSER_CLIENT 0x00000000 +#define INST_OP_FLUSH 0x02000000 +#define INST_FLUSH_MAP_CACHE 0x00000001 + + +#define BB1_START_ADDR_MASK (~0x7) +#define BB1_PROTECTED (1<<0) +#define BB1_UNPROTECTED (0<<0) +#define BB2_END_ADDR_MASK (~0x7) + +#define I810REG_HWSTAM 0x02098 +#define I810REG_INT_IDENTITY_R 0x020a4 +#define I810REG_INT_MASK_R 0x020a8 +#define I810REG_INT_ENABLE_R 0x020a0 + +#define LP_RING 0x2030 +#define HP_RING 0x2040 +#define RING_TAIL 0x00 +#define TAIL_ADDR 0x000FFFF8 +#define RING_HEAD 0x04 +#define HEAD_WRAP_COUNT 0xFFE00000 +#define HEAD_WRAP_ONE 0x00200000 +#define HEAD_ADDR 0x001FFFFC +#define RING_START 0x08 +#define START_ADDR 0x00FFFFF8 +#define RING_LEN 0x0C +#define RING_NR_PAGES 0x000FF000 +#define RING_REPORT_MASK 0x00000006 +#define RING_REPORT_64K 0x00000002 +#define RING_REPORT_128K 0x00000004 +#define RING_NO_REPORT 0x00000000 +#define RING_VALID_MASK 0x00000001 +#define RING_VALID 0x00000001 +#define RING_INVALID 0x00000000 + +#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) +#define SC_UPDATE_SCISSOR (0x1<<1) +#define SC_ENABLE_MASK (0x1<<0) +#define SC_ENABLE (0x1<<0) + +#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) +#define SCI_YMIN_MASK (0xffff<<16) +#define SCI_XMIN_MASK (0xffff<<0) +#define SCI_YMAX_MASK (0xffff<<16) +#define SCI_XMAX_MASK (0xffff<<0) + +#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0) +#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) +#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x2) +#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) +#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) +#define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24)) + +#define CMD_OP_Z_BUFFER_INFO ((0x0<<29)|(0x16<<23)) +#define CMD_OP_DESTBUFFER_INFO ((0x0<<29)|(0x15<<23)) + +#define BR00_BITBLT_CLIENT 0x40000000 +#define BR00_OP_COLOR_BLT 0x10000000 +#define BR00_OP_SRC_COPY_BLT 0x10C00000 +#define BR13_SOLID_PATTERN 0x80000000 + + #endif + diff --git a/linux/init.c b/linux/init.c index 25c0aed2..aefc884e 100644 --- a/linux/init.c +++ b/linux/init.c @@ -1,8 +1,8 @@ /* init.c -- Setup/Cleanup for DRM -*- linux-c -*- * Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com - * Revised: Fri Aug 20 09:27:02 1999 by faith@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,7 +24,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/init.c,v 1.2 2000/02/23 04:47:29 martin Exp $ + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> * */ @@ -34,7 +35,7 @@ int drm_flags = 0; /* drm_parse_option parses a single option. See description for - drm_parse_drm for details. */ + drm_parse_options for details. */ static void drm_parse_option(char *s) { @@ -96,3 +97,10 @@ void drm_parse_options(char *s) } } +int drm_cpu_valid(void) +{ +#if defined(__i386__) + if (boot_cpu_data.x86 == 3) return 0; /* No cmpxchg on a 386 */ +#endif + return 1; +} diff --git a/linux/ioctl.c b/linux/ioctl.c index 8edfb437..b246f76e 100644 --- a/linux/ioctl.c +++ b/linux/ioctl.c @@ -1,8 +1,8 @@ /* ioctl.c -- IOCTL processing for DRM -*- linux-c -*- * Created: Fri Jan 8 09:01:26 1999 by faith@precisioninsight.com - * Revised: Fri Aug 20 09:27:02 1999 by faith@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,7 +24,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/ioctl.c,v 1.2 2000/02/23 04:47:29 martin Exp $ + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> * */ diff --git a/linux/lists.c b/linux/lists.c index af8f6150..f62495aa 100644 --- a/linux/lists.c +++ b/linux/lists.c @@ -1,8 +1,8 @@ /* lists.c -- Buffer list handling routines -*- linux-c -*- * Created: Mon Apr 19 20:54:22 1999 by faith@precisioninsight.com - * Revised: Sun Feb 13 23:37:52 2000 by kevin@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,7 +24,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/lists.c,v 1.6 2000/02/23 04:56:42 martin Exp $ + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> * */ diff --git a/linux/lock.c b/linux/lock.c index adf27ab5..55082727 100644 --- a/linux/lock.c +++ b/linux/lock.c @@ -1,8 +1,8 @@ /* lock.c -- IOCTLs for locking -*- linux-c -*- * Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com - * Revised: Sun Feb 13 23:38:25 2000 by kevin@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,7 +24,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/lock.c,v 1.5 2000/02/23 04:47:29 martin Exp $ + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> * */ diff --git a/linux/memory.c b/linux/memory.c index a8a81abd..0e92401b 100644 --- a/linux/memory.c +++ b/linux/memory.c @@ -1,8 +1,8 @@ /* memory.c -- Memory management wrappers for DRM -*- linux-c -*- * Created: Thu Feb 4 14:00:34 1999 by faith@precisioninsight.com - * Revised: Sun Feb 13 23:39:37 2000 by kevin@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,7 +24,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/memory.c,v 1.5 2000/02/23 04:47:30 martin Exp $ + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> * */ @@ -44,21 +45,25 @@ static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED; static unsigned long drm_ram_available = 0; /* In pages */ static unsigned long drm_ram_used = 0; static drm_mem_stats_t drm_mem_stats[] = { - [DRM_MEM_DMA] = { "dmabufs" }, - [DRM_MEM_SAREA] = { "sareas" }, - [DRM_MEM_DRIVER] = { "driver" }, - [DRM_MEM_MAGIC] = { "magic" }, - [DRM_MEM_IOCTLS] = { "ioctltab" }, - [DRM_MEM_MAPS] = { "maplist" }, - [DRM_MEM_VMAS] = { "vmalist" }, - [DRM_MEM_BUFS] = { "buflist" }, - [DRM_MEM_SEGS] = { "seglist" }, - [DRM_MEM_PAGES] = { "pagelist" }, - [DRM_MEM_FILES] = { "files" }, - [DRM_MEM_QUEUES] = { "queues" }, - [DRM_MEM_CMDS] = { "commands" }, - [DRM_MEM_MAPPINGS] = { "mappings" }, - [DRM_MEM_BUFLISTS] = { "buflists" }, + [DRM_MEM_DMA] = { "dmabufs" }, + [DRM_MEM_SAREA] = { "sareas" }, + [DRM_MEM_DRIVER] = { "driver" }, + [DRM_MEM_MAGIC] = { "magic" }, + [DRM_MEM_IOCTLS] = { "ioctltab" }, + [DRM_MEM_MAPS] = { "maplist" }, + [DRM_MEM_VMAS] = { "vmalist" }, + [DRM_MEM_BUFS] = { "buflist" }, + [DRM_MEM_SEGS] = { "seglist" }, + [DRM_MEM_PAGES] = { "pagelist" }, + [DRM_MEM_FILES] = { "files" }, + [DRM_MEM_QUEUES] = { "queues" }, + [DRM_MEM_CMDS] = { "commands" }, + [DRM_MEM_MAPPINGS] = { "mappings" }, + [DRM_MEM_BUFLISTS] = { "buflists" }, + [DRM_MEM_AGPLISTS] = { "agplist" }, + [DRM_MEM_TOTALAGP] = { "totalagp" }, + [DRM_MEM_BOUNDAGP] = { "boundagp" }, + [DRM_MEM_CTXBITMAP] = { "ctxbitmap"}, { NULL, 0, } /* Last entry must be null */ }; @@ -324,3 +329,120 @@ void drm_ioremapfree(void *pt, unsigned long size) free_count, alloc_count); } } + +#ifdef DRM_AGP +agp_memory *drm_alloc_agp(int pages, u32 type) +{ + agp_memory *handle; + + if (!pages) { + DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n"); + return NULL; + } + + if (drm_agp.allocate_memory) { + if ((handle = (*drm_agp.allocate_memory)(pages, + type))) { + spin_lock(&drm_mem_lock); + ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count; + drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated + += pages << PAGE_SHIFT; + spin_unlock(&drm_mem_lock); + return handle; + } + } + spin_lock(&drm_mem_lock); + ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count; + spin_unlock(&drm_mem_lock); + return NULL; +} + +int drm_free_agp(agp_memory *handle, int pages) +{ + int alloc_count; + int free_count; + int retval = -EINVAL; + + if (!handle) { + DRM_MEM_ERROR(DRM_MEM_TOTALAGP, + "Attempt to free NULL AGP handle\n"); + return retval;; + } + + if (drm_agp.free_memory) { + (*drm_agp.free_memory)(handle); + spin_lock(&drm_mem_lock); + free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count; + alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count; + drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed + += pages << PAGE_SHIFT; + spin_unlock(&drm_mem_lock); + if (free_count > alloc_count) { + DRM_MEM_ERROR(DRM_MEM_TOTALAGP, + "Excess frees: %d frees, %d allocs\n", + free_count, alloc_count); + } + return 0; + } + return retval; +} + +int drm_bind_agp(agp_memory *handle, unsigned int start) +{ + int retcode = -EINVAL; + + DRM_DEBUG("drm_bind_agp called\n"); + if (!handle) { + DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, + "Attempt to bind NULL AGP handle\n"); + return retcode; + } + + DRM_DEBUG("drm_agp.bind_memory : %p\n", drm_agp.bind_memory); + if (drm_agp.bind_memory) { + if (!(retcode = (*drm_agp.bind_memory)(handle, start))) { + spin_lock(&drm_mem_lock); + ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count; + drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated + += handle->page_count << PAGE_SHIFT; + spin_unlock(&drm_mem_lock); + DRM_DEBUG("drm_agp.bind_memory: retcode %d\n", retcode); + return retcode; + } + } + spin_lock(&drm_mem_lock); + ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count; + spin_unlock(&drm_mem_lock); + return retcode; +} + +int drm_unbind_agp(agp_memory *handle) +{ + int alloc_count; + int free_count; + int retcode = -EINVAL; + + if (!handle) { + DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, + "Attempt to unbind NULL AGP handle\n"); + return retcode; + } + + if (drm_agp.unbind_memory) { + int c = handle->page_count; + if ((retcode = (*drm_agp.unbind_memory)(handle))) + return retcode; + spin_lock(&drm_mem_lock); + free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count; + alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count; + drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed += c << PAGE_SHIFT; + spin_unlock(&drm_mem_lock); + if (free_count > alloc_count) { + DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, + "Excess frees: %d frees, %d allocs\n", + free_count, alloc_count); + } + } + return retcode; +} +#endif diff --git a/linux/mga_bufs.c b/linux/mga_bufs.c index 34f1112a..b97eb495 100644 --- a/linux/mga_bufs.c +++ b/linux/mga_bufs.c @@ -2,6 +2,7 @@ * Created: Thu Jan 6 01:47:26 2000 by jhartmann@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -23,179 +24,176 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com> - * Jeff Hartmann <jhartmann@precisioninsight.com> - * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_bufs.c,v 1.1 2000/02/11 17:26:06 dawes Exp $ + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> + * Jeff Hartmann <jhartmann@valinux.com> * */ #define __NO_VERSION__ #include "drmP.h" #include "mga_drv.h" -#include "mga_dma.h" #include "linux/un.h" int mga_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - drm_device_dma_t *dma = dev->dma; - drm_buf_desc_t request; - drm_buf_entry_t *entry; - drm_buf_t *buf; - unsigned long offset; - unsigned long agp_offset; - int count; - int order; - int size; - int alignment; - int page_order; - int total; - int byte_count; - int i; - - if (!dma) return -EINVAL; - - copy_from_user_ret(&request, - (drm_buf_desc_t *)arg, - sizeof(request), - -EFAULT); - - count = request.count; - order = drm_order(request.size); - size = 1 << order; - agp_offset = request.agp_start; - alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) :size; - page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; - total = PAGE_SIZE << page_order; - byte_count = 0; - - DRM_DEBUG("count: %d\n", count); - DRM_DEBUG("order: %d\n", order); - DRM_DEBUG("size: %d\n", size); - DRM_DEBUG("agp_offset: %d\n", agp_offset); - DRM_DEBUG("alignment: %d\n", alignment); - DRM_DEBUG("page_order: %d\n", page_order); - DRM_DEBUG("total: %d\n", total); - DRM_DEBUG("byte_count: %d\n", byte_count); - - if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; - if (dev->queue_count) return -EBUSY; /* Not while in use */ - spin_lock(&dev->count_lock); - if (dev->buf_use) { - spin_unlock(&dev->count_lock); - return -EBUSY; - } - atomic_inc(&dev->buf_alloc); - spin_unlock(&dev->count_lock); + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_device_dma_t *dma = dev->dma; + drm_buf_desc_t request; + drm_buf_entry_t *entry; + drm_buf_t *buf; + unsigned long offset; + unsigned long agp_offset; + int count; + int order; + int size; + int alignment; + int page_order; + int total; + int byte_count; + int i; + + if (!dma) return -EINVAL; + + copy_from_user_ret(&request, + (drm_buf_desc_t *)arg, + sizeof(request), + -EFAULT); + + count = request.count; + order = drm_order(request.size); + size = 1 << order; + agp_offset = request.agp_start; + alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) :size; + page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; + total = PAGE_SIZE << page_order; + byte_count = 0; + + DRM_DEBUG("count: %d\n", count); + DRM_DEBUG("order: %d\n", order); + DRM_DEBUG("size: %d\n", size); + DRM_DEBUG("agp_offset: %ld\n", agp_offset); + DRM_DEBUG("alignment: %d\n", alignment); + DRM_DEBUG("page_order: %d\n", page_order); + DRM_DEBUG("total: %d\n", total); + DRM_DEBUG("byte_count: %d\n", byte_count); + + if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; + if (dev->queue_count) return -EBUSY; /* Not while in use */ + spin_lock(&dev->count_lock); + if (dev->buf_use) { + spin_unlock(&dev->count_lock); + return -EBUSY; + } + atomic_inc(&dev->buf_alloc); + spin_unlock(&dev->count_lock); - down(&dev->struct_sem); - entry = &dma->bufs[order]; - if (entry->buf_count) { - up(&dev->struct_sem); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; /* May only call once for each order */ - } + down(&dev->struct_sem); + entry = &dma->bufs[order]; + if (entry->buf_count) { + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; /* May only call once for each order */ + } - entry->buflist = drm_alloc(count * sizeof(*entry->buflist), - DRM_MEM_BUFS); - if (!entry->buflist) { - up(&dev->struct_sem); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - memset(entry->buflist, 0, count * sizeof(*entry->buflist)); + entry->buflist = drm_alloc(count * sizeof(*entry->buflist), + DRM_MEM_BUFS); + if (!entry->buflist) { + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; + } + memset(entry->buflist, 0, count * sizeof(*entry->buflist)); - entry->buf_size = size; - entry->page_order = page_order; - offset = 0; + entry->buf_size = size; + entry->page_order = page_order; + offset = 0; - while(entry->buf_count < count) { - buf = &entry->buflist[entry->buf_count]; - buf->idx = dma->buf_count + entry->buf_count; - buf->total = alignment; - buf->order = order; - buf->used = 0; - - DRM_DEBUG("offset : %d\n", offset); - - buf->offset = offset; /* Hrm */ - buf->bus_address = dev->agp->base + agp_offset + offset; - buf->address = (void *)(agp_offset + offset + dev->agp->base); - buf->next = NULL; - buf->waiting = 0; - buf->pending = 0; - init_waitqueue_head(&buf->dma_wait); - buf->pid = 0; - - buf->dev_private = drm_alloc(sizeof(drm_mga_buf_priv_t), DRM_MEM_BUFS); - buf->dev_priv_size = sizeof(drm_mga_buf_priv_t); + while(entry->buf_count < count) { + buf = &entry->buflist[entry->buf_count]; + buf->idx = dma->buf_count + entry->buf_count; + buf->total = alignment; + buf->order = order; + buf->used = 0; + + DRM_DEBUG("offset : %ld\n", offset); + + buf->offset = offset; /* Hrm */ + buf->bus_address = dev->agp->base + agp_offset + offset; + buf->address = (void *)(agp_offset + offset + dev->agp->base); + buf->next = NULL; + buf->waiting = 0; + buf->pending = 0; + init_waitqueue_head(&buf->dma_wait); + buf->pid = 0; + + buf->dev_private = drm_alloc(sizeof(drm_mga_buf_priv_t), DRM_MEM_BUFS); + buf->dev_priv_size = sizeof(drm_mga_buf_priv_t); #if DRM_DMA_HISTOGRAM - buf->time_queued = 0; - buf->time_dispatched = 0; - buf->time_completed = 0; - buf->time_freed = 0; + buf->time_queued = 0; + buf->time_dispatched = 0; + buf->time_completed = 0; + buf->time_freed = 0; #endif - offset = offset + alignment; - entry->buf_count++; - byte_count += PAGE_SIZE << page_order; + offset = offset + alignment; + entry->buf_count++; + byte_count += PAGE_SIZE << page_order; - DRM_DEBUG("buffer %d @ %p\n", - entry->buf_count, buf->address); - } + DRM_DEBUG("buffer %d @ %p\n", + entry->buf_count, buf->address); + } - dma->buflist = drm_realloc(dma->buflist, - dma->buf_count * sizeof(*dma->buflist), - (dma->buf_count + entry->buf_count) - * sizeof(*dma->buflist), - DRM_MEM_BUFS); - for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++) - dma->buflist[i] = &entry->buflist[i - dma->buf_count]; + dma->buflist = drm_realloc(dma->buflist, + dma->buf_count * sizeof(*dma->buflist), + (dma->buf_count + entry->buf_count) + * sizeof(*dma->buflist), + DRM_MEM_BUFS); + for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++) + dma->buflist[i] = &entry->buflist[i - dma->buf_count]; - dma->buf_count += entry->buf_count; + dma->buf_count += entry->buf_count; - DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); + DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); - dma->byte_count += byte_count; + dma->byte_count += byte_count; - DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); + DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); - drm_freelist_create(&entry->freelist, entry->buf_count); - for (i = 0; i < entry->buf_count; i++) { - drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]); - } + drm_freelist_create(&entry->freelist, entry->buf_count); + for (i = 0; i < entry->buf_count; i++) { + drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]); + } - up(&dev->struct_sem); + up(&dev->struct_sem); - request.count = entry->buf_count; - request.size = size; + request.count = entry->buf_count; + request.size = size; - copy_to_user_ret((drm_buf_desc_t *)arg, - &request, - sizeof(request), - -EFAULT); + copy_to_user_ret((drm_buf_desc_t *)arg, + &request, + sizeof(request), + -EFAULT); - atomic_dec(&dev->buf_alloc); + atomic_dec(&dev->buf_alloc); - DRM_DEBUG("count: %d\n", count); - DRM_DEBUG("order: %d\n", order); - DRM_DEBUG("size: %d\n", size); - DRM_DEBUG("agp_offset: %d\n", agp_offset); - DRM_DEBUG("alignment: %d\n", alignment); - DRM_DEBUG("page_order: %d\n", page_order); - DRM_DEBUG("total: %d\n", total); - DRM_DEBUG("byte_count: %d\n", byte_count); + DRM_DEBUG("count: %d\n", count); + DRM_DEBUG("order: %d\n", order); + DRM_DEBUG("size: %d\n", size); + DRM_DEBUG("agp_offset: %ld\n", agp_offset); + DRM_DEBUG("alignment: %d\n", alignment); + DRM_DEBUG("page_order: %d\n", page_order); + DRM_DEBUG("total: %d\n", total); + DRM_DEBUG("byte_count: %d\n", byte_count); - dma->flags = _DRM_DMA_USE_AGP; + dma->flags = _DRM_DMA_USE_AGP; - DRM_DEBUG("dma->flags : %lx\n", dma->flags); + DRM_DEBUG("dma->flags : %x\n", dma->flags); - return 0; + return 0; } int mga_addbufs_pci(struct inode *inode, struct file *filp, unsigned int cmd, @@ -362,17 +360,17 @@ int mga_addbufs_pci(struct inode *inode, struct file *filp, unsigned int cmd, int mga_addbufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_buf_desc_t request; + drm_buf_desc_t request; - copy_from_user_ret(&request, - (drm_buf_desc_t *)arg, - sizeof(request), - -EFAULT); + copy_from_user_ret(&request, + (drm_buf_desc_t *)arg, + sizeof(request), + -EFAULT); - if(request.flags & _DRM_AGP_BUFFER) - return mga_addbufs_agp(inode, filp, cmd, arg); - else - return mga_addbufs_pci(inode, filp, cmd, arg); + if(request.flags & _DRM_AGP_BUFFER) + return mga_addbufs_agp(inode, filp, cmd, arg); + else + return mga_addbufs_pci(inode, filp, cmd, arg); } int mga_infobufs(struct inode *inode, struct file *filp, unsigned int cmd, @@ -546,7 +544,7 @@ int mga_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd, spin_lock(&dev->count_lock); if (atomic_read(&dev->buf_alloc)) { spin_unlock(&dev->count_lock); - DRM_DEBUG("Buzy\n"); + DRM_DEBUG("Busy\n"); return -EBUSY; } ++dev->buf_use; /* Can't allocate more after this call */ @@ -558,79 +556,84 @@ int mga_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd, -EFAULT); DRM_DEBUG("mga_mapbufs\n"); - DRM_DEBUG("dma->flags : %lx\n", dma->flags); + DRM_DEBUG("dma->flags : %x\n", dma->flags); - if (request.count >= dma->buf_count) { - if(dma->flags & _DRM_DMA_USE_AGP) { - drm_mga_private_t *dev_priv = dev->dev_private; - drm_map_t *map = NULL; + if (request.count >= dma->buf_count) { + if(dma->flags & _DRM_DMA_USE_AGP) { + drm_mga_private_t *dev_priv = dev->dev_private; + drm_map_t *map = NULL; - map = dev->maplist[dev_priv->buffer_map_idx]; - if (!map) { - DRM_DEBUG("map is null\n"); - retcode = -EINVAL; - goto done; - } - - DRM_DEBUG("map->offset : %lx\n", map->offset); - DRM_DEBUG("map->size : %lx\n", map->size); - DRM_DEBUG("map->type : %d\n", map->type); - DRM_DEBUG("map->flags : %x\n", map->flags); - DRM_DEBUG("map->handle : %lx\n", map->handle); - DRM_DEBUG("map->mtrr : %d\n", map->mtrr); - - virtual = do_mmap(filp, 0, map->size, PROT_READ|PROT_WRITE, - MAP_SHARED, (unsigned long)map->offset); - } else { - virtual = do_mmap(filp, 0, dma->byte_count, - PROT_READ|PROT_WRITE, MAP_SHARED, 0); - } - if (virtual > -1024UL) { - /* Real error */ - DRM_DEBUG("mmap error\n"); - retcode = (signed long)virtual; - goto done; - } - request.virtual = (void *)virtual; + map = dev->maplist[dev_priv->buffer_map_idx]; + if (!map) { + DRM_DEBUG("map is null\n"); + retcode = -EINVAL; + goto done; + } + + DRM_DEBUG("map->offset : %lx\n", map->offset); + DRM_DEBUG("map->size : %lx\n", map->size); + DRM_DEBUG("map->type : %d\n", map->type); + DRM_DEBUG("map->flags : %x\n", map->flags); + DRM_DEBUG("map->handle : %p\n", map->handle); + DRM_DEBUG("map->mtrr : %d\n", map->mtrr); + down(¤t->mm->mmap_sem); + virtual = do_mmap(filp, 0, map->size, + PROT_READ|PROT_WRITE, + MAP_SHARED, + (unsigned long)map->offset); + up(¤t->mm->mmap_sem); + } else { + down(¤t->mm->mmap_sem); + virtual = do_mmap(filp, 0, dma->byte_count, + PROT_READ|PROT_WRITE, MAP_SHARED, 0); + up(¤t->mm->mmap_sem); + } + if (virtual > -1024UL) { + /* Real error */ + DRM_DEBUG("mmap error\n"); + retcode = (signed long)virtual; + goto done; + } + request.virtual = (void *)virtual; - for (i = 0; i < dma->buf_count; i++) { - if (copy_to_user(&request.list[i].idx, - &dma->buflist[i]->idx, - sizeof(request.list[0].idx))) { - retcode = -EFAULT; - goto done; - } - if (copy_to_user(&request.list[i].total, - &dma->buflist[i]->total, - sizeof(request.list[0].total))) { - retcode = -EFAULT; - goto done; - } - if (copy_to_user(&request.list[i].used, - &zero, - sizeof(zero))) { - retcode = -EFAULT; - goto done; - } - address = virtual + dma->buflist[i]->offset; - if (copy_to_user(&request.list[i].address, - &address, - sizeof(address))) { - retcode = -EFAULT; - goto done; - } - } - } - done: - request.count = dma->buf_count; - DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode); + for (i = 0; i < dma->buf_count; i++) { + if (copy_to_user(&request.list[i].idx, + &dma->buflist[i]->idx, + sizeof(request.list[0].idx))) { + retcode = -EFAULT; + goto done; + } + if (copy_to_user(&request.list[i].total, + &dma->buflist[i]->total, + sizeof(request.list[0].total))) { + retcode = -EFAULT; + goto done; + } + if (copy_to_user(&request.list[i].used, + &zero, + sizeof(zero))) { + retcode = -EFAULT; + goto done; + } + address = virtual + dma->buflist[i]->offset; + if (copy_to_user(&request.list[i].address, + &address, + sizeof(address))) { + retcode = -EFAULT; + goto done; + } + } + } + done: + request.count = dma->buf_count; + DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode); - copy_to_user_ret((drm_buf_map_t *)arg, - &request, - sizeof(request), - -EFAULT); + copy_to_user_ret((drm_buf_map_t *)arg, + &request, + sizeof(request), + -EFAULT); - DRM_DEBUG("retcode : %d\n", retcode); + DRM_DEBUG("retcode : %d\n", retcode); - return retcode; + return retcode; } diff --git a/linux/mga_context.c b/linux/mga_context.c index fb0d336f..d0259274 100644 --- a/linux/mga_context.c +++ b/linux/mga_context.c @@ -2,6 +2,7 @@ * Created: Mon Dec 13 09:51:35 1999 by faith@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -23,9 +24,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * Author: Rickard E. (Rik) Faith <faith@precisioninsight.com> - * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_context.c,v 1.1 2000/02/11 17:26:06 dawes Exp $ + * Author: Rickard E. (Rik) Faith <faith@valinux.com> + * Jeff Hartmann <jhartmann@valinux.com> * */ @@ -38,7 +38,7 @@ static int mga_alloc_queue(drm_device_t *dev) { int temp = drm_ctxbitmap_next(dev); - printk("mga_alloc_queue: %d\n", temp); + DRM_DEBUG("mga_alloc_queue: %d\n", temp); return temp; } @@ -57,7 +57,7 @@ int mga_context_switch(drm_device_t *dev, int old, int new) dev->ctx_start = get_cycles(); #endif - printk("Context switch from %d to %d\n", old, new); + DRM_DEBUG("Context switch from %d to %d\n", old, new); if (new == dev->last_context) { clear_bit(0, &dev->context_flag); @@ -104,7 +104,7 @@ int mga_resctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_ctx_t ctx; int i; - printk("%d\n", DRM_RESERVED_CONTEXTS); + DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS); copy_from_user_ret(&res, (drm_ctx_res_t *)arg, sizeof(res), -EFAULT); if (res.count >= DRM_RESERVED_CONTEXTS) { memset(&ctx, 0, sizeof(ctx)); @@ -134,11 +134,11 @@ int mga_addctx(struct inode *inode, struct file *filp, unsigned int cmd, ctx.handle = mga_alloc_queue(dev); } if (ctx.handle == -1) { - printk("Not enough free contexts.\n"); + DRM_DEBUG("Not enough free contexts.\n"); /* Should this return -EBUSY instead? */ return -ENOMEM; } - printk("%d\n", ctx.handle); + DRM_DEBUG("%d\n", ctx.handle); copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT); return 0; } @@ -170,7 +170,7 @@ int mga_switchctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_ctx_t ctx; copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); - printk("%d\n", ctx.handle); + DRM_DEBUG("%d\n", ctx.handle); return mga_context_switch(dev, dev->last_context, ctx.handle); } @@ -182,7 +182,7 @@ int mga_newctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_ctx_t ctx; copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); - printk("%d\n", ctx.handle); + DRM_DEBUG("%d\n", ctx.handle); mga_context_switch_complete(dev, ctx.handle); return 0; @@ -194,51 +194,11 @@ int mga_rmctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_ctx_t ctx; - drm_queue_t *q; - drm_buf_t *buf; copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); - printk("%d\n", ctx.handle); - if(ctx.handle == DRM_KERNEL_CONTEXT) { - q = dev->queuelist[ctx.handle]; - atomic_inc(&q->use_count); - if (atomic_read(&q->use_count) == 1) { - /* No longer in use */ - atomic_dec(&q->use_count); - return -EINVAL; - } - atomic_inc(&q->finalization); /* Mark queue in finalization state */ - atomic_sub(2, &q->use_count); - /* Mark queue as unused (pending finalization) */ - - while (test_and_set_bit(0, &dev->interrupt_flag)) { - printk("Calling schedule from rmctx\n"); - schedule(); - if (signal_pending(current)) { - clear_bit(0, &dev->interrupt_flag); - return -EINTR; - } - } - - /* Remove queued buffers */ - while ((buf = drm_waitlist_get(&q->waitlist))) { - drm_free_buffer(dev, buf); - } - clear_bit(0, &dev->interrupt_flag); - - /* Wakeup blocked processes */ - wake_up_interruptible(&q->read_queue); - wake_up_interruptible(&q->write_queue); - wake_up_interruptible(&q->flush_queue); - - /* Finalization over. Queue is made - available when both use_count and - finalization become 0, which won't - happen until all the waiting processes - stop waiting. */ - atomic_dec(&q->finalization); - } else { - drm_ctxbitmap_free(dev, ctx.handle); + DRM_DEBUG("%d\n", ctx.handle); + if(ctx.handle != DRM_KERNEL_CONTEXT) { + drm_ctxbitmap_free(dev, ctx.handle); } return 0; diff --git a/linux/mga_dma.c b/linux/mga_dma.c index 2e24e5b4..28e8811c 100644 --- a/linux/mga_dma.c +++ b/linux/mga_dma.c @@ -2,6 +2,7 @@ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -23,19 +24,15 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com> - * Jeff Hartmann <jhartmann@precisioninsight.com> - * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_dma.c,v 1.1 2000/02/11 17:26:07 dawes Exp $ + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> + * Jeff Hartmann <jhartmann@valinux.com> + * Keith Whitwell <keithw@valinux.com> * */ #define __NO_VERSION__ #include "drmP.h" #include "mga_drv.h" -#include "mgareg_flags.h" -#include "mga_dma.h" -#include "mga_state.h" #include <linux/interrupt.h> /* For task queue support */ @@ -48,643 +45,578 @@ #define MGA_WRITE(reg,val) do { MGA_DEREF(reg) = val; } while (0) #define PDEA_pagpxfer_enable 0x2 -#define MGA_SYNC_TAG 0x423f4200 - -typedef enum { - TT_GENERAL, - TT_BLIT, - TT_VECTOR, - TT_VERTEX -} transferType_t; +static int mga_flush_queue(drm_device_t *dev); -static void mga_delay(void) +static unsigned long mga_alloc_page(drm_device_t *dev) { - return; + unsigned long address; + + DRM_DEBUG("%s\n", __FUNCTION__); + address = __get_free_page(GFP_KERNEL); + if(address == 0UL) { + return 0; + } + atomic_inc(&mem_map[MAP_NR((void *) address)].count); + set_bit(PG_locked, &mem_map[MAP_NR((void *) address)].flags); + + return address; } -int mga_dma_cleanup(drm_device_t *dev) +static void mga_free_page(drm_device_t *dev, unsigned long page) { - if(dev->dev_private) { - drm_mga_private_t *dev_priv = - (drm_mga_private_t *) dev->dev_private; - - if(dev_priv->ioremap) { - int temp = (dev_priv->warp_ucode_size + - dev_priv->primary_size + - PAGE_SIZE - 1) / PAGE_SIZE * PAGE_SIZE; + DRM_DEBUG("%s\n", __FUNCTION__); - drm_ioremapfree((void *) dev_priv->ioremap, temp); - } - - drm_free(dev->dev_private, sizeof(drm_mga_private_t), - DRM_MEM_DRIVER); - dev->dev_private = NULL; + if(page == 0UL) { + return; } - - return 0; + atomic_dec(&mem_map[MAP_NR((void *) page)].count); + clear_bit(PG_locked, &mem_map[MAP_NR((void *) page)].flags); + wake_up(&mem_map[MAP_NR((void *) page)].wait); + free_page(page); + return; } -static int mga_alloc_kernel_queue(drm_device_t *dev) +static void mga_delay(void) { - drm_queue_t *queue = NULL; - /* Allocate a new queue */ - down(&dev->struct_sem); - - if(dev->queue_count != 0) { - /* Reseting the kernel context here is not - * a race, since it can only happen when that - * queue is empty. - */ - queue = dev->queuelist[DRM_KERNEL_CONTEXT]; - printk("Kernel queue already allocated\n"); - } else { - queue = drm_alloc(sizeof(*queue), DRM_MEM_QUEUES); - if(!queue) { - up(&dev->struct_sem); - printk("out of memory\n"); - return -ENOMEM; - } - ++dev->queue_count; - dev->queuelist = drm_alloc(sizeof(*dev->queuelist), - DRM_MEM_QUEUES); - if(!dev->queuelist) { - up(&dev->struct_sem); - drm_free(queue, sizeof(*queue), DRM_MEM_QUEUES); - printk("out of memory\n"); - return -ENOMEM; - } - } - - memset(queue, 0, sizeof(*queue)); - atomic_set(&queue->use_count, 1); - atomic_set(&queue->finalization, 0); - atomic_set(&queue->block_count, 0); - atomic_set(&queue->block_read, 0); - atomic_set(&queue->block_write, 0); - atomic_set(&queue->total_queued, 0); - atomic_set(&queue->total_flushed, 0); - atomic_set(&queue->total_locks, 0); - - init_waitqueue_head(&queue->write_queue); - init_waitqueue_head(&queue->read_queue); - init_waitqueue_head(&queue->flush_queue); - - queue->flags = 0; - - drm_waitlist_create(&queue->waitlist, dev->dma->buf_count); - - dev->queue_slots = 1; - dev->queuelist[DRM_KERNEL_CONTEXT] = queue; - dev->queue_count--; - - up(&dev->struct_sem); - printk("%d (new)\n", dev->queue_count - 1); - return DRM_KERNEL_CONTEXT; + return; } -static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) { - drm_mga_private_t *dev_priv; - drm_map_t *prim_map = NULL; - drm_map_t *sarea_map = NULL; - int temp; +void mga_flush_write_combine(void) +{ + int xchangeDummy; + DRM_DEBUG("%s\n", __FUNCTION__); + __asm__ volatile(" push %%eax ; xchg %%eax, %0 ; pop %%eax" : : "m" (xchangeDummy)); + __asm__ volatile(" push %%eax ; push %%ebx ; push %%ecx ; push %%edx ;" + " movl $0,%%eax ; cpuid ; pop %%edx ; pop %%ecx ; pop %%ebx ;" + " pop %%eax" : /* no outputs */ : /* no inputs */ ); +} - dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); - if(dev_priv == NULL) return -ENOMEM; - dev->dev_private = (void *) dev_priv; +/* These are two age tags that will never be sent to + * the hardware */ +#define MGA_BUF_USED 0xffffffff +#define MGA_BUF_FREE 0 - printk("dev_private\n"); +static int mga_freelist_init(drm_device_t *dev) +{ + drm_device_dma_t *dma = dev->dma; + drm_buf_t *buf; + drm_mga_buf_priv_t *buf_priv; + drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; + drm_mga_freelist_t *item; + int i; - memset(dev_priv, 0, sizeof(drm_mga_private_t)); - atomic_set(&dev_priv->pending_bufs, 0); + DRM_DEBUG("%s\n", __FUNCTION__); - if((init->reserved_map_idx >= dev->map_count) || - (init->buffer_map_idx >= dev->map_count)) { - mga_dma_cleanup(dev); - printk("reserved_map or buffer_map are invalid\n"); - return -EINVAL; - } + dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); + if(dev_priv->head == NULL) return -ENOMEM; + memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t)); + dev_priv->head->age = MGA_BUF_USED; - if(mga_alloc_kernel_queue(dev) != DRM_KERNEL_CONTEXT) { - mga_dma_cleanup(dev); - DRM_ERROR("Kernel context queue not present\n"); + for (i = 0; i < dma->buf_count; i++) { + buf = dma->buflist[ i ]; + buf_priv = buf->dev_private; + item = drm_alloc(sizeof(drm_mga_freelist_t), + DRM_MEM_DRIVER); + if(item == NULL) return -ENOMEM; + memset(item, 0, sizeof(drm_mga_freelist_t)); + item->age = MGA_BUF_FREE; + item->prev = dev_priv->head; + item->next = dev_priv->head->next; + if(dev_priv->head->next != NULL) + dev_priv->head->next->prev = item; + if(item->next == NULL) dev_priv->tail = item; + item->buf = buf; + buf_priv->my_freelist = item; + buf_priv->discard = 0; + buf_priv->dispatched = 0; + dev_priv->head->next = item; } - - dev_priv->reserved_map_idx = init->reserved_map_idx; - dev_priv->buffer_map_idx = init->buffer_map_idx; - sarea_map = dev->maplist[0]; - dev_priv->sarea_priv = (drm_mga_sarea_t *) - ((u8 *)sarea_map->handle + - init->sarea_priv_offset); - printk("sarea_priv\n"); - - /* Scale primary size to the next page */ - dev_priv->primary_size = ((init->primary_size + PAGE_SIZE - 1) / - PAGE_SIZE) * PAGE_SIZE; - dev_priv->warp_ucode_size = init->warp_ucode_size; - dev_priv->chipset = init->chipset; - dev_priv->fbOffset = init->fbOffset; - dev_priv->backOffset = init->backOffset; - dev_priv->depthOffset = init->depthOffset; - dev_priv->textureOffset = init->textureOffset; - dev_priv->textureSize = init->textureSize; - dev_priv->cpp = init->cpp; - dev_priv->sgram = init->sgram; - dev_priv->stride = init->stride; - - dev_priv->frontOrg = init->frontOrg; - dev_priv->backOrg = init->backOrg; - dev_priv->depthOrg = init->depthOrg; - dev_priv->mAccess = init->mAccess; - - printk("memcpy\n"); - memcpy(&dev_priv->WarpIndex, &init->WarpIndex, - sizeof(mgaWarpIndex) * MGA_MAX_WARP_PIPES); - printk("memcpy done\n"); - prim_map = dev->maplist[init->reserved_map_idx]; - dev_priv->prim_phys_head = dev->agp->base + init->reserved_map_agpstart; - temp = init->warp_ucode_size + dev_priv->primary_size; - temp = ((temp + PAGE_SIZE - 1) / - PAGE_SIZE) * PAGE_SIZE; - printk("temp : %x\n", temp); - printk("dev->agp->base: %lx\n", dev->agp->base); - printk("init->reserved_map_agpstart: %x\n", init->reserved_map_agpstart); - - - dev_priv->ioremap = drm_ioremap(dev->agp->base + init->reserved_map_agpstart, - temp); - if(dev_priv->ioremap == NULL) { - printk("Ioremap failed\n"); - mga_dma_cleanup(dev); - return -ENOMEM; - } - - + return 0; +} - dev_priv->prim_head = (u32 *)dev_priv->ioremap; - printk("dev_priv->prim_head : %p\n", dev_priv->prim_head); - dev_priv->current_dma_ptr = dev_priv->prim_head; - dev_priv->prim_num_dwords = 0; - dev_priv->prim_max_dwords = dev_priv->primary_size / 4; - - printk("dma initialization\n"); +static void mga_freelist_cleanup(drm_device_t *dev) +{ + drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; + drm_mga_freelist_t *item; + drm_mga_freelist_t *prev; - /* Private is now filled in, initialize the hardware */ - { - PRIMLOCALS; - PRIMRESET( dev_priv ); - PRIMGETPTR( dev_priv ); - PRIMOUTREG(MGAREG_DMAPAD, 0); - PRIMOUTREG(MGAREG_DMAPAD, 0); - PRIMOUTREG(MGAREG_DWGSYNC, 0); - PRIMOUTREG(MGAREG_SOFTRAP, 0); - PRIMADVANCE( dev_priv ); + DRM_DEBUG("%s\n", __FUNCTION__); - /* Poll for the first buffer to insure that - * the status register will be correct - */ - printk("phys_head : %lx\n", phys_head); + item = dev_priv->head; + while(item) { + prev = item; + item = item->next; + drm_free(prev, sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); + } - MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG); + dev_priv->head = dev_priv->tail = NULL; +} - while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) { - int i; - for(i = 0 ; i < 4096; i++) mga_delay(); +/* Frees dispatch lock */ +static inline void mga_dma_quiescent(drm_device_t *dev) +{ + drm_device_dma_t *dma = dev->dma; + drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; + drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; + unsigned long end; + int i; + + DRM_DEBUG("%s\n", __FUNCTION__); + end = jiffies + (HZ*3); + while(1) { + if(!test_and_set_bit(MGA_IN_DISPATCH, + &dev_priv->dispatch_status)) { + break; } - - MGA_WRITE(MGAREG_PRIMADDRESS, phys_head | TT_GENERAL); - - MGA_WRITE(MGAREG_PRIMEND, ((phys_head + num_dwords * 4) | - PDEA_pagpxfer_enable)); - - while(MGA_READ(MGAREG_DWGSYNC) == MGA_SYNC_TAG) { - int i; - for(i = 0; i < 4096; i++) mga_delay(); + if((signed)(end - jiffies) <= 0) { + DRM_ERROR("irqs: %d wanted %d\n", + atomic_read(&dev->total_irq), + atomic_read(&dma->total_lost)); + DRM_ERROR("lockup\n"); + goto out_nolock; + } + for (i = 0 ; i < 2000 ; i++) mga_delay(); + } + end = jiffies + (HZ*3); + DRM_DEBUG("quiescent status : %x\n", MGA_READ(MGAREG_STATUS)); + while((MGA_READ(MGAREG_STATUS) & 0x00030001) != 0x00020000) { + if((signed)(end - jiffies) <= 0) { + DRM_ERROR("irqs: %d wanted %d\n", + atomic_read(&dev->total_irq), + atomic_read(&dma->total_lost)); + DRM_ERROR("lockup\n"); + goto out_status; } + for (i = 0 ; i < 2000 ; i++) mga_delay(); + } + sarea_priv->dirty |= MGA_DMA_FLUSH; + +out_status: + clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status); +out_nolock: +} +static void mga_reset_freelist(drm_device_t *dev) +{ + drm_device_dma_t *dma = dev->dma; + drm_buf_t *buf; + drm_mga_buf_priv_t *buf_priv; + int i; + + for (i = 0; i < dma->buf_count; i++) { + buf = dma->buflist[ i ]; + buf_priv = buf->dev_private; + buf_priv->my_freelist->age = MGA_BUF_FREE; } - - printk("dma init was successful\n"); - return 0; } -int mga_dma_init(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +/* Least recently used : + * These operations are not atomic b/c they are protected by the + * hardware lock */ + +drm_buf_t *mga_freelist_get(drm_device_t *dev) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - drm_mga_init_t init; + DECLARE_WAITQUEUE(entry, current); + drm_mga_private_t *dev_priv = + (drm_mga_private_t *) dev->dev_private; + drm_mga_freelist_t *prev; + drm_mga_freelist_t *next; + static int failed = 0; + + DRM_DEBUG("%s : tail->age : %d last_prim_age : %d\n", __FUNCTION__, + dev_priv->tail->age, dev_priv->last_prim_age); - copy_from_user_ret(&init, (drm_mga_init_t *)arg, sizeof(init), -EFAULT); + if(failed >= 1000 && dev_priv->tail->age >= dev_priv->last_prim_age) { + DRM_DEBUG("I'm waiting on the freelist!!! %d\n", + dev_priv->last_prim_age); + set_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status); + current->state = TASK_INTERRUPTIBLE; + add_wait_queue(&dev_priv->buf_queue, &entry); + for (;;) { + mga_dma_schedule(dev, 0); + if(!test_bit(MGA_IN_GETBUF, + &dev_priv->dispatch_status)) + break; + atomic_inc(&dev->total_sleeps); + schedule(); + if (signal_pending(current)) { + clear_bit(MGA_IN_GETBUF, + &dev_priv->dispatch_status); + goto failed_getbuf; + } + } + current->state = TASK_RUNNING; + remove_wait_queue(&dev_priv->buf_queue, &entry); + } - switch(init.func) { - case MGA_INIT_DMA: - return mga_dma_initialize(dev, &init); - case MGA_CLEANUP_DMA: - return mga_dma_cleanup(dev); + if(dev_priv->tail->age < dev_priv->last_prim_age) { + prev = dev_priv->tail->prev; + next = dev_priv->tail; + prev->next = NULL; + next->prev = next->next = NULL; + dev_priv->tail = prev; + next->age = MGA_BUF_USED; + failed = 0; + return next->buf; } - return -EINVAL; +failed_getbuf: + failed++; + return NULL; } -#define MGA_ILOAD_CMD (DC_opcod_iload | DC_atype_rpl | \ - DC_linear_linear | DC_bltmod_bfcol | \ - (0xC << DC_bop_SHIFT) | DC_sgnzero_enable | \ - DC_shftzero_enable | DC_clipdis_enable) - -static void __mga_iload_small(drm_device_t *dev, - drm_buf_t *buf, - int use_agp) +int mga_freelist_put(drm_device_t *dev, drm_buf_t *buf) { - drm_mga_private_t *dev_priv = dev->dev_private; + drm_mga_private_t *dev_priv = + (drm_mga_private_t *) dev->dev_private; drm_mga_buf_priv_t *buf_priv = buf->dev_private; - drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; - unsigned long address = (unsigned long)buf->bus_address; - int length = buf->used; - int y1 = buf_priv->boxes[0].y1; - int x1 = buf_priv->boxes[0].x1; - int y2 = buf_priv->boxes[0].y2; - int x2 = buf_priv->boxes[0].x2; - int dstorg = buf_priv->ContextState[MGA_CTXREG_DSTORG]; - int maccess = buf_priv->ContextState[MGA_CTXREG_MACCESS]; - PRIMLOCALS; - - PRIMRESET(dev_priv); - PRIMGETPTR(dev_priv); + drm_mga_freelist_t *prev; + drm_mga_freelist_t *head; + drm_mga_freelist_t *next; + + DRM_DEBUG("%s\n", __FUNCTION__); + + if(buf_priv->my_freelist->age == MGA_BUF_USED) { + /* Discarded buffer, put it on the tail */ + next = buf_priv->my_freelist; + next->age = MGA_BUF_FREE; + prev = dev_priv->tail; + prev->next = next; + next->prev = prev; + next->next = NULL; + dev_priv->tail = next; + DRM_DEBUG("Discarded\n"); + } else { + /* Normally aged buffer, put it on the head + 1, + * as the real head is a sentinal element + */ + next = buf_priv->my_freelist; + head = dev_priv->head; + prev = head->next; + head->next = next; + prev->prev = next; + next->prev = head; + next->next = prev; + } - PRIMOUTREG(MGAREG_DSTORG, dstorg | use_agp); - PRIMOUTREG(MGAREG_MACCESS, maccess); - PRIMOUTREG(MGAREG_PITCH, (1 << 15)); - PRIMOUTREG(MGAREG_YDST, y1 * (x2 - x1)); - PRIMOUTREG(MGAREG_LEN, 1); - PRIMOUTREG(MGAREG_FXBNDRY, ((x2 - x1) * (y2 - y1) - 1) << 16); - PRIMOUTREG(MGAREG_AR0, (x2 - x1) * (y2 - y1) - 1); - PRIMOUTREG(MGAREG_AR3, 0); - PRIMOUTREG(MGAREG_DMAPAD, 0); - PRIMOUTREG(MGAREG_DMAPAD, 0); - PRIMOUTREG(MGAREG_DMAPAD, 0); - PRIMOUTREG(MGAREG_DWGCTL+MGAREG_MGA_EXEC, MGA_ILOAD_CMD); - PRIMOUTREG(MGAREG_DMAPAD, 0); - PRIMOUTREG(MGAREG_DMAPAD, 0); - PRIMOUTREG(MGAREG_SECADDRESS, address | TT_BLIT); - PRIMOUTREG(MGAREG_SECEND, (address + length) | use_agp); - PRIMOUTREG(MGAREG_DMAPAD, 0); - PRIMOUTREG(MGAREG_DMAPAD, 0); - PRIMOUTREG(MGAREG_DWGSYNC, 0); - PRIMOUTREG(MGAREG_SOFTRAP, 0); - PRIMADVANCE(dev_priv); -#if 0 - /* For now we need to set this in the ioctl */ - sarea_priv->dirty |= MGASAREA_NEW_CONTEXT; -#endif - MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG); - while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) ; - - MGA_WRITE(MGAREG_PRIMADDRESS, dev_priv->prim_phys_head | TT_GENERAL); - MGA_WRITE(MGAREG_PRIMEND, (phys_head + num_dwords * 4) | use_agp); + return 0; } -static void __mga_iload_xy(drm_device_t *dev, - drm_buf_t *buf, - int use_agp) +static int mga_init_primary_bufs(drm_device_t *dev, drm_mga_init_t *init) { - drm_mga_private_t *dev_priv = dev->dev_private; - drm_mga_buf_priv_t *buf_priv = buf->dev_private; - drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; - unsigned long address = (unsigned long)buf->bus_address; - int length = buf->used; - int y1 = buf_priv->boxes[0].y1; - int x1 = buf_priv->boxes[0].x1; - int y2 = buf_priv->boxes[0].y2; - int x2 = buf_priv->boxes[0].x2; - int dstorg = buf_priv->ContextState[MGA_CTXREG_DSTORG]; - int maccess = buf_priv->ContextState[MGA_CTXREG_MACCESS]; - int pitch = buf_priv->ServerState[MGA_2DREG_PITCH]; - int width, height; - int texperdword = 0; - PRIMLOCALS; - - width = (x2 - x1); - height = (y2 - y1); - switch((maccess & 0x00000003)) { - case 0: - texperdword = 4; - break; - case 1: - texperdword = 2; - break; - case 2: - texperdword = 1; - break; - default: - DRM_ERROR("Invalid maccess value passed to __mga_iload_xy\n"); - return; + drm_mga_private_t *dev_priv = dev->dev_private; + drm_mga_prim_buf_t *prim_buffer; + int i, temp, size_of_buf; + int offset = init->reserved_map_agpstart; + + DRM_DEBUG("%s\n", __FUNCTION__); + dev_priv->primary_size = ((init->primary_size + PAGE_SIZE - 1) / + PAGE_SIZE) * PAGE_SIZE; + size_of_buf = dev_priv->primary_size / MGA_NUM_PRIM_BUFS; + dev_priv->warp_ucode_size = init->warp_ucode_size; + dev_priv->prim_bufs = drm_alloc(sizeof(drm_mga_prim_buf_t *) * + (MGA_NUM_PRIM_BUFS + 1), + DRM_MEM_DRIVER); + if(dev_priv->prim_bufs == NULL) { + DRM_ERROR("Unable to allocate memory for prim_buf\n"); + return -ENOMEM; } + memset(dev_priv->prim_bufs, + 0, sizeof(drm_mga_prim_buf_t *) * (MGA_NUM_PRIM_BUFS + 1)); - x2 = x1 + width; - x2 = (x2 + (texperdword - 1)) & ~(texperdword - 1); - x1 = (x1 + (texperdword - 1)) & ~(texperdword - 1); - width = x2 - x1; - - PRIMRESET(dev_priv); - PRIMGETPTR(dev_priv); - PRIMOUTREG(MGAREG_DSTORG, dstorg | use_agp); - PRIMOUTREG(MGAREG_MACCESS, maccess); - PRIMOUTREG(MGAREG_PITCH, pitch); - PRIMOUTREG(MGAREG_YDSTLEN, (y1 << 16) | height); - - PRIMOUTREG(MGAREG_FXBNDRY, ((x1+width-1) << 16) | x1); - PRIMOUTREG(MGAREG_AR0, width * height - 1); - PRIMOUTREG(MGAREG_AR3, 0 ); - PRIMOUTREG(MGAREG_DWGCTL+MGAREG_MGA_EXEC, MGA_ILOAD_CMD); - - PRIMOUTREG(MGAREG_DMAPAD, 0); - PRIMOUTREG(MGAREG_DMAPAD, 0); - PRIMOUTREG(MGAREG_SECADDRESS, address | TT_BLIT); - PRIMOUTREG(MGAREG_SECEND, (address + length) | use_agp); + temp = init->warp_ucode_size + dev_priv->primary_size; + temp = ((temp + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE; - PRIMOUTREG(MGAREG_DMAPAD, 0); - PRIMOUTREG(MGAREG_DMAPAD, 0); - PRIMOUTREG(MGAREG_DWGSYNC, 0); - PRIMOUTREG(MGAREG_SOFTRAP, 0); - PRIMADVANCE(dev_priv); -#if 0 - /* For now we need to set this in the ioctl */ - sarea_priv->dirty |= MGASAREA_NEW_CONTEXT; -#endif - MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG); - while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) ; - - MGA_WRITE(MGAREG_PRIMADDRESS, dev_priv->prim_phys_head | TT_GENERAL); - MGA_WRITE(MGAREG_PRIMEND, (phys_head + num_dwords * 4) | use_agp); + dev_priv->ioremap = drm_ioremap(dev->agp->base + offset, + temp); + if(dev_priv->ioremap == NULL) { + DRM_DEBUG("Ioremap failed\n"); + return -ENOMEM; + } + init_waitqueue_head(&dev_priv->wait_queue); + + for(i = 0; i < MGA_NUM_PRIM_BUFS; i++) { + prim_buffer = drm_alloc(sizeof(drm_mga_prim_buf_t), + DRM_MEM_DRIVER); + if(prim_buffer == NULL) return -ENOMEM; + memset(prim_buffer, 0, sizeof(drm_mga_prim_buf_t)); + prim_buffer->phys_head = offset + dev->agp->base; + prim_buffer->current_dma_ptr = + prim_buffer->head = + (u32 *) (dev_priv->ioremap + + offset - + init->reserved_map_agpstart); + prim_buffer->num_dwords = 0; + prim_buffer->max_dwords = size_of_buf / sizeof(u32); + prim_buffer->max_dwords -= 5; /* Leave room for the softrap */ + prim_buffer->sec_used = 0; + prim_buffer->idx = i; + prim_buffer->prim_age = i + 1; + offset = offset + size_of_buf; + dev_priv->prim_bufs[i] = prim_buffer; + } + dev_priv->current_prim_idx = 0; + dev_priv->next_prim = + dev_priv->last_prim = + dev_priv->current_prim = + dev_priv->prim_bufs[0]; + dev_priv->next_prim_age = 2; + dev_priv->last_prim_age = 1; + set_bit(MGA_BUF_IN_USE, &dev_priv->current_prim->buffer_status); + return 0; } -static void mga_dma_dispatch_iload(drm_device_t *dev, drm_buf_t *buf) +void mga_fire_primary(drm_device_t *dev, drm_mga_prim_buf_t *prim) { - drm_mga_buf_priv_t *buf_priv = buf->dev_private; - - int use_agp = PDEA_pagpxfer_enable; - int x1 = buf_priv->boxes[0].x1; - int x2 = buf_priv->boxes[0].x2; + drm_mga_private_t *dev_priv = dev->dev_private; + drm_device_dma_t *dma = dev->dma; + drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; + int use_agp = PDEA_pagpxfer_enable; + unsigned long end; + int i; + int next_idx; + PRIMLOCALS; + + DRM_DEBUG("%s\n", __FUNCTION__); + dev_priv->last_prim = prim; - if((x2 - x1) < 32) { - printk("using iload small\n"); - __mga_iload_small(dev, buf, use_agp); + /* We never check for overflow, b/c there is always room */ + PRIMPTR(prim); + if(num_dwords <= 0) { + DRM_DEBUG("num_dwords == 0 when dispatched\n"); + goto out_prim_wait; + } + PRIMOUTREG( MGAREG_DMAPAD, 0); + PRIMOUTREG( MGAREG_DMAPAD, 0); + PRIMOUTREG( MGAREG_DMAPAD, 0); + PRIMOUTREG( MGAREG_SOFTRAP, 0); + PRIMFINISH(prim); + + end = jiffies + (HZ*3); + if(sarea_priv->dirty & MGA_DMA_FLUSH) { + DRM_DEBUG("Dma top flush\n"); + while((MGA_READ(MGAREG_STATUS) & 0x00030001) != 0x00020000) { + if((signed)(end - jiffies) <= 0) { + DRM_ERROR("irqs: %d wanted %d\n", + atomic_read(&dev->total_irq), + atomic_read(&dma->total_lost)); + DRM_ERROR("lockup in fire primary " + "(Dma Top Flush)\n"); + goto out_prim_wait; + } + + for (i = 0 ; i < 4096 ; i++) mga_delay(); + } + sarea_priv->dirty &= ~(MGA_DMA_FLUSH); } else { - printk("using iload xy\n"); - __mga_iload_xy(dev, buf, use_agp); - } -} - -static void mga_dma_dispatch_vertex(drm_device_t *dev, drm_buf_t *buf) -{ - drm_mga_private_t *dev_priv = dev->dev_private; - drm_mga_buf_priv_t *buf_priv = buf->dev_private; - unsigned long address = (unsigned long)buf->bus_address; - int length = buf->used; - int use_agp = PDEA_pagpxfer_enable; - int i, count; - PRIMLOCALS; - - PRIMRESET(dev_priv); - - count = buf_priv->nbox; - if (count == 0) - count = 1; - - mgaEmitState( dev_priv, buf_priv ); - - for (i = 0 ; i < count ; i++) { - if (i < buf_priv->nbox) - mgaEmitClipRect( dev_priv, &buf_priv->boxes[i] ); - - PRIMGETPTR(dev_priv); - PRIMOUTREG( MGAREG_DMAPAD, 0); - PRIMOUTREG( MGAREG_DMAPAD, 0); - PRIMOUTREG( MGAREG_SECADDRESS, address | TT_VERTEX); - PRIMOUTREG( MGAREG_SECEND, (address + length) | use_agp); - - PRIMOUTREG( MGAREG_DMAPAD, 0); - PRIMOUTREG( MGAREG_DMAPAD, 0); - PRIMOUTREG( MGAREG_DWGSYNC, 0); - PRIMOUTREG( MGAREG_SOFTRAP, 0); - PRIMADVANCE(dev_priv); + DRM_DEBUG("Status wait\n"); + while((MGA_READ(MGAREG_STATUS) & 0x00020001) != 0x00020000) { + if((signed)(end - jiffies) <= 0) { + DRM_ERROR("irqs: %d wanted %d\n", + atomic_read(&dev->total_irq), + atomic_read(&dma->total_lost)); + DRM_ERROR("lockup in fire primary " + "(Status Wait)\n"); + goto out_prim_wait; + } + + for (i = 0 ; i < 4096 ; i++) mga_delay(); + } } - PRIMGETPTR( dev_priv ); - - MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG); - while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) ; - - MGA_WRITE(MGAREG_PRIMADDRESS, dev_priv->prim_phys_head | TT_GENERAL); - MGA_WRITE(MGAREG_PRIMEND, (phys_head + num_dwords * 4) | use_agp); + mga_flush_write_combine(); + atomic_inc(&dev_priv->pending_bufs); + MGA_WRITE(MGAREG_PRIMADDRESS, phys_head | TT_GENERAL); + MGA_WRITE(MGAREG_PRIMEND, (phys_head + num_dwords * 4) | use_agp); + prim->num_dwords = 0; + sarea_priv->last_enqueue = prim->prim_age; + + next_idx = prim->idx + 1; + if(next_idx >= MGA_NUM_PRIM_BUFS) + next_idx = 0; + + dev_priv->next_prim = dev_priv->prim_bufs[next_idx]; + return; + + out_prim_wait: + prim->num_dwords = 0; + prim->sec_used = 0; + clear_bit(MGA_BUF_IN_USE, &prim->buffer_status); + wake_up_interruptible(&dev_priv->wait_queue); + clear_bit(MGA_BUF_SWAP_PENDING, &prim->buffer_status); + clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status); } - -/* Used internally for the small buffers generated from client state - * information. - */ -static void mga_dma_dispatch_general(drm_device_t *dev, drm_buf_t *buf) +int mga_advance_primary(drm_device_t *dev) { + DECLARE_WAITQUEUE(entry, current); drm_mga_private_t *dev_priv = dev->dev_private; - unsigned long address = (unsigned long)buf->bus_address; - int length = buf->used; - int use_agp = PDEA_pagpxfer_enable; - PRIMLOCALS; - - PRIMRESET(dev_priv); - PRIMGETPTR(dev_priv); - - PRIMOUTREG( MGAREG_DMAPAD, 0); - PRIMOUTREG( MGAREG_DMAPAD, 0); - PRIMOUTREG( MGAREG_SECADDRESS, address | TT_GENERAL); - PRIMOUTREG( MGAREG_SECEND, (address + length) | use_agp); - - PRIMOUTREG( MGAREG_DMAPAD, 0); - PRIMOUTREG( MGAREG_DMAPAD, 0); - PRIMOUTREG( MGAREG_DWGSYNC, 0); - PRIMOUTREG( MGAREG_SOFTRAP, 0); - PRIMADVANCE(dev_priv); + drm_mga_prim_buf_t *prim_buffer; + drm_device_dma_t *dma = dev->dma; + int next_prim_idx; + int ret = 0; + + /* This needs to reset the primary buffer if available, + * we should collect stats on how many times it bites + * it's tail */ + DRM_DEBUG("%s\n", __FUNCTION__); + + next_prim_idx = dev_priv->current_prim_idx + 1; + if(next_prim_idx >= MGA_NUM_PRIM_BUFS) + next_prim_idx = 0; + prim_buffer = dev_priv->prim_bufs[next_prim_idx]; + set_bit(MGA_IN_WAIT, &dev_priv->dispatch_status); + + /* In use is cleared in interrupt handler */ + + if(test_and_set_bit(MGA_BUF_IN_USE, &prim_buffer->buffer_status)) { + add_wait_queue(&dev_priv->wait_queue, &entry); + current->state = TASK_INTERRUPTIBLE; + + for (;;) { + mga_dma_schedule(dev, 0); + if(!test_and_set_bit(MGA_BUF_IN_USE, + &prim_buffer->buffer_status)) + break; + atomic_inc(&dev->total_sleeps); + atomic_inc(&dma->total_missed_sched); + schedule(); + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + } + current->state = TASK_RUNNING; + remove_wait_queue(&dev_priv->wait_queue, &entry); + if(ret) return ret; + } + clear_bit(MGA_IN_WAIT, &dev_priv->dispatch_status); + + /* This primary buffer is now free to use */ + prim_buffer->current_dma_ptr = prim_buffer->head; + prim_buffer->num_dwords = 0; + prim_buffer->sec_used = 0; + prim_buffer->prim_age = dev_priv->next_prim_age++; + if(prim_buffer->prim_age == 0 || prim_buffer->prim_age == 0xffffffff) { + mga_flush_queue(dev); + mga_dma_quiescent(dev); + mga_reset_freelist(dev); + prim_buffer->prim_age = (dev_priv->next_prim_age += 2); + } - MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG); - while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) ; + /* Reset all buffer status stuff */ + clear_bit(MGA_BUF_NEEDS_OVERFLOW, &prim_buffer->buffer_status); + clear_bit(MGA_BUF_FORCE_FIRE, &prim_buffer->buffer_status); + clear_bit(MGA_BUF_SWAP_PENDING, &prim_buffer->buffer_status); - MGA_WRITE(MGAREG_PRIMADDRESS, dev_priv->prim_phys_head | TT_GENERAL); - MGA_WRITE(MGAREG_PRIMEND, (phys_head + num_dwords * 4) | use_agp); + dev_priv->current_prim = prim_buffer; + dev_priv->current_prim_idx = next_prim_idx; + return 0; } -/* Frees dispatch lock */ -static inline void mga_dma_quiescent(drm_device_t *dev) +/* More dynamic performance decisions */ +static inline int mga_decide_to_fire(drm_device_t *dev) { drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; + drm_device_dma_t *dma = dev->dma; - while(1) { - atomic_inc(&dev_priv->dispatch_lock); - if(atomic_read(&dev_priv->dispatch_lock) == 1) { - break; - } else { - atomic_dec(&dev_priv->dispatch_lock); - } - } - while((MGA_READ(MGAREG_STATUS) & 0x00020001) != 0x00020000) ; -#if 0 - MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG); -#endif - while(MGA_READ(MGAREG_DWGSYNC) == MGA_SYNC_TAG) ; - MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG); - while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) ; - atomic_dec(&dev_priv->dispatch_lock); -} + DRM_DEBUG("%s\n", __FUNCTION__); -/* Keeps dispatch lock held */ + if(test_bit(MGA_BUF_FORCE_FIRE, &dev_priv->next_prim->buffer_status)) { + atomic_inc(&dma->total_prio); + return 1; + } -static inline int mga_dma_is_ready(drm_device_t *dev) -{ - drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; - - atomic_inc(&dev_priv->dispatch_lock); - if(atomic_read(&dev_priv->dispatch_lock) == 1) { - /* We got the lock */ - return 1; - } else { - atomic_dec(&dev_priv->dispatch_lock); - return 0; + if (test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status) && + dev_priv->next_prim->num_dwords) { + atomic_inc(&dma->total_prio); + return 1; } -} -static inline int mga_dma_is_ready_no_hold(drm_device_t *dev) -{ - drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; + if (test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status) && + dev_priv->next_prim->num_dwords) { + atomic_inc(&dma->total_prio); + return 1; + } - atomic_inc(&dev_priv->dispatch_lock); - if(atomic_read(&dev_priv->dispatch_lock) == 1) { - /* We got the lock, but free it */ - atomic_dec(&dev_priv->dispatch_lock); - return 1; - } else { - atomic_dec(&dev_priv->dispatch_lock); - return 0; + if(atomic_read(&dev_priv->pending_bufs) <= MGA_NUM_PRIM_BUFS - 1) { + if(test_bit(MGA_BUF_SWAP_PENDING, + &dev_priv->next_prim->buffer_status)) { + atomic_inc(&dma->total_dmas); + return 1; + } } -} -static void mga_dma_service(int irq, void *device, struct pt_regs *regs) -{ - drm_device_t *dev = (drm_device_t *)device; - drm_device_dma_t *dma = dev->dma; - drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; - - atomic_dec(&dev_priv->dispatch_lock); - atomic_inc(&dev->total_irq); - MGA_WRITE(MGAREG_ICLEAR, 0xfa7); - - /* Free previous buffer */ - if (test_and_set_bit(0, &dev->dma_flag)) { - atomic_inc(&dma->total_missed_free); - return; - } - if (dma->this_buffer) { - drm_free_buffer(dev, dma->this_buffer); - dma->this_buffer = NULL; + if(atomic_read(&dev_priv->pending_bufs) <= MGA_NUM_PRIM_BUFS / 2) { + if(dev_priv->next_prim->sec_used >= MGA_DMA_BUF_NR / 8) { + atomic_inc(&dma->total_hit); + return 1; + } } - clear_bit(0, &dev->dma_flag); - /* Dispatch new buffer */ - queue_task(&dev->tq, &tq_immediate); - mark_bh(IMMEDIATE_BH); + if(atomic_read(&dev_priv->pending_bufs) >= MGA_NUM_PRIM_BUFS / 2) { + if(dev_priv->next_prim->sec_used >= MGA_DMA_BUF_NR / 4) { + atomic_inc(&dma->total_missed_free); + return 1; + } + } + atomic_inc(&dma->total_tried); + return 0; } -/* Only called by mga_dma_schedule. */ -static int mga_do_dma(drm_device_t *dev, int locked) +int mga_dma_schedule(drm_device_t *dev, int locked) { - drm_buf_t *buf; - int retcode = 0; - drm_device_dma_t *dma = dev->dma; drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; - drm_mga_buf_priv_t *buf_priv; + drm_device_dma_t *dma = dev->dma; + int retval = 0; - printk("mga_do_dma\n"); - if (test_and_set_bit(0, &dev->dma_flag)) { + if (test_and_set_bit(0, &dev->dma_flag)) { atomic_inc(&dma->total_missed_dma); - return -EBUSY; - } - - if (!dma->next_buffer) { - DRM_ERROR("No next_buffer\n"); - clear_bit(0, &dev->dma_flag); - return -EINVAL; - } - - buf = dma->next_buffer; - - printk("context %d, buffer %d\n", buf->context, buf->idx); - - if (buf->list == DRM_LIST_RECLAIM) { - drm_clear_next_buffer(dev); - drm_free_buffer(dev, buf); - clear_bit(0, &dev->dma_flag); - return -EINVAL; + retval = -EBUSY; + goto sch_out_wakeup; } + + DRM_DEBUG("%s\n", __FUNCTION__); - if (!buf->used) { - DRM_ERROR("0 length buffer\n"); - drm_clear_next_buffer(dev); - drm_free_buffer(dev, buf); - clear_bit(0, &dev->dma_flag); - return 0; - } - - if (mga_dma_is_ready(dev) == 0) { - clear_bit(0, &dev->dma_flag); - return -EBUSY; + if(test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status) || + test_bit(MGA_IN_WAIT, &dev_priv->dispatch_status) || + test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status)) { + locked = 1; } - /* Always hold the hardware lock while dispatching. - */ - if (!locked && !drm_lock_take(&dev->lock.hw_lock->lock, - DRM_KERNEL_CONTEXT)) { - atomic_inc(&dma->total_missed_lock); - clear_bit(0, &dev->dma_flag); - atomic_dec(&dev_priv->dispatch_lock); - return -EBUSY; + if (!locked && + !drm_lock_take(&dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) { + atomic_inc(&dma->total_missed_lock); + clear_bit(0, &dev->dma_flag); + DRM_DEBUG("Not locked\n"); + retval = -EBUSY; + goto sch_out_wakeup; } - - dma->next_queue = dev->queuelist[DRM_KERNEL_CONTEXT]; - drm_clear_next_buffer(dev); - buf->pending = 1; - buf->waiting = 0; - buf->list = DRM_LIST_PEND; - - buf_priv = buf->dev_private; - - printk("dispatch!\n"); - switch (buf_priv->dma_type) { - case MGA_DMA_GENERAL: - mga_dma_dispatch_general(dev, buf); - break; - case MGA_DMA_VERTEX: - mga_dma_dispatch_vertex(dev, buf); - break; -/* case MGA_DMA_SETUP: */ -/* mga_dma_dispatch_setup(dev, address, length); */ -/* break; */ - case MGA_DMA_ILOAD: - mga_dma_dispatch_iload(dev, buf); - break; - default: - printk("bad buffer type %x in dispatch\n", buf_priv->dma_type); - break; + DRM_DEBUG("I'm locked\n"); + + if(!test_and_set_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status)) { + /* Fire dma buffer */ + if(mga_decide_to_fire(dev)) { + DRM_DEBUG("idx :%d\n", dev_priv->next_prim->idx); + clear_bit(MGA_BUF_FORCE_FIRE, + &dev_priv->next_prim->buffer_status); + if(dev_priv->current_prim == dev_priv->next_prim) { + /* Schedule overflow for a later time */ + set_bit(MGA_BUF_NEEDS_OVERFLOW, + &dev_priv->next_prim->buffer_status); + } + mga_fire_primary(dev, dev_priv->next_prim); + } else { + clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status); + } + } else { + DRM_DEBUG("I can't get the dispatch lock\n"); } - atomic_dec(&dev_priv->pending_bufs); - - drm_free_buffer(dev, dma->this_buffer); - dma->this_buffer = buf; - - atomic_add(buf->used, &dma->total_bytes); - atomic_inc(&dma->total_dmas); - + if (!locked) { if (drm_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) { @@ -692,90 +624,242 @@ static int mga_do_dma(drm_device_t *dev, int locked) } } - clear_bit(0, &dev->dma_flag); - - if(!atomic_read(&dev_priv->pending_bufs)) { - wake_up_interruptible(&dev->queuelist[DRM_KERNEL_CONTEXT]->flush_queue); - } - -#if 0 - wake_up_interruptible(&dev->lock.lock_queue); -#endif - - /* We hold the dispatch lock until the interrupt handler - * frees it - */ - return retcode; +sch_out_wakeup: + if(test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status) && + atomic_read(&dev_priv->pending_bufs) == 0) { + /* Everything has been processed by the hardware */ + clear_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status); + wake_up_interruptible(&dev_priv->flush_queue); + } + + if(test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status) && + dev_priv->tail->age < dev_priv->last_prim_age) { + clear_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status); + DRM_DEBUG("Waking up buf queue\n"); + wake_up_interruptible(&dev_priv->buf_queue); + } else if (test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status)) { + DRM_DEBUG("Not waking buf_queue on %d %d\n", + atomic_read(&dev->total_irq), + dev_priv->last_prim_age); + } + + clear_bit(0, &dev->dma_flag); + return retval; } -static void mga_dma_schedule_timer_wrapper(unsigned long dev) +static void mga_dma_service(int irq, void *device, struct pt_regs *regs) { - mga_dma_schedule((drm_device_t *)dev, 0); + drm_device_t *dev = (drm_device_t *)device; + drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; + drm_mga_prim_buf_t *last_prim_buffer; + + DRM_DEBUG("%s\n", __FUNCTION__); + atomic_inc(&dev->total_irq); + if((MGA_READ(MGAREG_STATUS) & 0x00000001) != 0x00000001) return; + MGA_WRITE(MGAREG_ICLEAR, 0x00000001); + last_prim_buffer = dev_priv->last_prim; + last_prim_buffer->num_dwords = 0; + last_prim_buffer->sec_used = 0; + dev_priv->sarea_priv->last_dispatch = + dev_priv->last_prim_age = last_prim_buffer->prim_age; + clear_bit(MGA_BUF_IN_USE, &last_prim_buffer->buffer_status); + wake_up_interruptible(&dev_priv->wait_queue); + clear_bit(MGA_BUF_SWAP_PENDING, &last_prim_buffer->buffer_status); + clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status); + atomic_dec(&dev_priv->pending_bufs); + queue_task(&dev->tq, &tq_immediate); + mark_bh(IMMEDIATE_BH); } -static void mga_dma_schedule_tq_wrapper(void *dev) +static void mga_dma_task_queue(void *device) { - mga_dma_schedule(dev, 0); + DRM_DEBUG("%s\n", __FUNCTION__); + mga_dma_schedule((drm_device_t *)device, 0); } -int mga_dma_schedule(drm_device_t *dev, int locked) +int mga_dma_cleanup(drm_device_t *dev) { - drm_queue_t *q; - drm_buf_t *buf; - int retcode = 0; - int processed = 0; - int missed; - int expire = 20; - drm_device_dma_t *dma = dev->dma; - - printk("mga_dma_schedule\n"); - - if (test_and_set_bit(0, &dev->interrupt_flag)) { - /* Not reentrant */ - atomic_inc(&dma->total_missed_sched); - return -EBUSY; - } - missed = atomic_read(&dma->total_missed_sched); + DRM_DEBUG("%s\n", __FUNCTION__); -again: - /* There is only one queue: - */ - if (!dma->next_buffer && DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) { - q = dev->queuelist[DRM_KERNEL_CONTEXT]; - buf = drm_waitlist_get(&q->waitlist); - dma->next_buffer = buf; - dma->next_queue = q; - if (buf && buf->list == DRM_LIST_RECLAIM) { - drm_clear_next_buffer(dev); - drm_free_buffer(dev, buf); + if(dev->dev_private) { + drm_mga_private_t *dev_priv = + (drm_mga_private_t *) dev->dev_private; + + if(dev_priv->ioremap) { + int temp = (dev_priv->warp_ucode_size + + dev_priv->primary_size + + PAGE_SIZE - 1) / PAGE_SIZE * PAGE_SIZE; + + drm_ioremapfree((void *) dev_priv->ioremap, temp); + } + if(dev_priv->status_page != NULL) { + iounmap(dev_priv->status_page); } + if(dev_priv->real_status_page != 0UL) { + mga_free_page(dev, dev_priv->real_status_page); + } + if(dev_priv->prim_bufs != NULL) { + int i; + for(i = 0; i < MGA_NUM_PRIM_BUFS; i++) { + if(dev_priv->prim_bufs[i] != NULL) { + drm_free(dev_priv->prim_bufs[i], + sizeof(drm_mga_prim_buf_t), + DRM_MEM_DRIVER); + } + } + drm_free(dev_priv->prim_bufs, sizeof(void *) * + (MGA_NUM_PRIM_BUFS + 1), + DRM_MEM_DRIVER); + } + if(dev_priv->head != NULL) { + mga_freelist_cleanup(dev); + } + + + drm_free(dev->dev_private, sizeof(drm_mga_private_t), + DRM_MEM_DRIVER); + dev->dev_private = NULL; } - if (dma->next_buffer) { - if (!(retcode = mga_do_dma(dev, locked))) - ++processed; + return 0; +} + +static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) { + drm_mga_private_t *dev_priv; + drm_map_t *sarea_map = NULL; + int i; + + DRM_DEBUG("%s\n", __FUNCTION__); + + dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); + if(dev_priv == NULL) return -ENOMEM; + dev->dev_private = (void *) dev_priv; + + memset(dev_priv, 0, sizeof(drm_mga_private_t)); + + if((init->reserved_map_idx >= dev->map_count) || + (init->buffer_map_idx >= dev->map_count)) { + mga_dma_cleanup(dev); + DRM_DEBUG("reserved_map or buffer_map are invalid\n"); + return -EINVAL; } + + dev_priv->reserved_map_idx = init->reserved_map_idx; + dev_priv->buffer_map_idx = init->buffer_map_idx; + sarea_map = dev->maplist[0]; + dev_priv->sarea_priv = (drm_mga_sarea_t *) + ((u8 *)sarea_map->handle + + init->sarea_priv_offset); - /* Try again if we succesfully dispatched a buffer, or if someone - * tried to schedule while we were working. - */ - if (--expire) { - if (missed != atomic_read(&dma->total_missed_sched)) { - atomic_inc(&dma->total_lost); - if (mga_dma_is_ready_no_hold(dev)) - goto again; - } + /* Scale primary size to the next page */ + dev_priv->chipset = init->chipset; + dev_priv->frontOffset = init->frontOffset; + dev_priv->backOffset = init->backOffset; + dev_priv->depthOffset = init->depthOffset; + dev_priv->textureOffset = init->textureOffset; + dev_priv->textureSize = init->textureSize; + dev_priv->cpp = init->cpp; + dev_priv->sgram = init->sgram; + dev_priv->stride = init->stride; - if (processed && mga_dma_is_ready_no_hold(dev)) { - atomic_inc(&dma->total_lost); - processed = 0; - goto again; - } + dev_priv->mAccess = init->mAccess; + init_waitqueue_head(&dev_priv->flush_queue); + init_waitqueue_head(&dev_priv->buf_queue); + dev_priv->WarpPipe = -1; + + DRM_DEBUG("chipset: %d ucode_size: %d backOffset: %x depthOffset: %x\n", + dev_priv->chipset, dev_priv->warp_ucode_size, + dev_priv->backOffset, dev_priv->depthOffset); + DRM_DEBUG("cpp: %d sgram: %d stride: %d maccess: %x\n", + dev_priv->cpp, dev_priv->sgram, dev_priv->stride, + dev_priv->mAccess); + + memcpy(&dev_priv->WarpIndex, &init->WarpIndex, + sizeof(drm_mga_warp_index_t) * MGA_MAX_WARP_PIPES); + + for (i = 0 ; i < MGA_MAX_WARP_PIPES ; i++) + DRM_DEBUG("warp pipe %d: installed: %d phys: %lx size: %x\n", + i, + dev_priv->WarpIndex[i].installed, + dev_priv->WarpIndex[i].phys_addr, + dev_priv->WarpIndex[i].size); + + if(mga_init_primary_bufs(dev, init) != 0) { + DRM_ERROR("Can not initialize primary buffers\n"); + mga_dma_cleanup(dev); + return -ENOMEM; } - - clear_bit(0, &dev->interrupt_flag); - - return retcode; + dev_priv->real_status_page = mga_alloc_page(dev); + if(dev_priv->real_status_page == 0UL) { + mga_dma_cleanup(dev); + DRM_ERROR("Can not allocate status page\n"); + return -ENOMEM; + } + + dev_priv->status_page = + ioremap_nocache(virt_to_bus((void *)dev_priv->real_status_page), + PAGE_SIZE); + + if(dev_priv->status_page == NULL) { + mga_dma_cleanup(dev); + DRM_ERROR("Can not remap status page\n"); + return -ENOMEM; + } + + /* Write status page when secend or softrap occurs */ + MGA_WRITE(MGAREG_PRIMPTR, + virt_to_bus((void *)dev_priv->real_status_page) | 0x00000003); + + + /* Private is now filled in, initialize the hardware */ + { + PRIMLOCALS; + PRIMGETPTR( dev_priv ); + + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DWGSYNC, 0x0100); + PRIMOUTREG(MGAREG_SOFTRAP, 0); + /* Poll for the first buffer to insure that + * the status register will be correct + */ + + mga_flush_write_combine(); + MGA_WRITE(MGAREG_PRIMADDRESS, phys_head | TT_GENERAL); + + MGA_WRITE(MGAREG_PRIMEND, ((phys_head + num_dwords * 4) | + PDEA_pagpxfer_enable)); + + while(MGA_READ(MGAREG_DWGSYNC) != 0x0100) ; + } + + if(mga_freelist_init(dev) != 0) { + DRM_ERROR("Could not initialize freelist\n"); + mga_dma_cleanup(dev); + return -ENOMEM; + } + return 0; +} + +int mga_dma_init(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_mga_init_t init; + + DRM_DEBUG("%s\n", __FUNCTION__); + + copy_from_user_ret(&init, (drm_mga_init_t *)arg, sizeof(init), -EFAULT); + + switch(init.func) { + case MGA_INIT_DMA: + return mga_dma_initialize(dev, &init); + case MGA_CLEANUP_DMA: + return mga_dma_cleanup(dev); + } + + return -EINVAL; } int mga_irq_install(drm_device_t *dev, int irq) @@ -792,7 +876,7 @@ int mga_irq_install(drm_device_t *dev, int irq) dev->irq = irq; up(&dev->struct_sem); - printk("install irq handler %d\n", irq); + DRM_DEBUG("install irq handler %d\n", irq); dev->context_flag = 0; dev->interrupt_flag = 0; @@ -802,17 +886,15 @@ int mga_irq_install(drm_device_t *dev, int irq) dev->dma->this_buffer = NULL; dev->tq.next = NULL; dev->tq.sync = 0; - dev->tq.routine = mga_dma_schedule_tq_wrapper; + dev->tq.routine = mga_dma_task_queue; dev->tq.data = dev; /* Before installing handler */ - MGA_WRITE(MGAREG_ICLEAR, 0xfa7); MGA_WRITE(MGAREG_IEN, 0); - /* Install handler */ if ((retcode = request_irq(dev->irq, mga_dma_service, - 0, + SA_SHIRQ, dev->devname, dev))) { down(&dev->struct_sem); @@ -820,11 +902,9 @@ int mga_irq_install(drm_device_t *dev, int irq) up(&dev->struct_sem); return retcode; } - /* After installing handler */ - MGA_WRITE(MGAREG_ICLEAR, 0xfa7); + MGA_WRITE(MGAREG_ICLEAR, 0x00000001); MGA_WRITE(MGAREG_IEN, 0x00000001); - return 0; } @@ -838,19 +918,13 @@ int mga_irq_uninstall(drm_device_t *dev) up(&dev->struct_sem); if (!irq) return -EINVAL; - - printk("remove irq handler %d\n", irq); - - MGA_WRITE(MGAREG_ICLEAR, 0xfa7); + DRM_DEBUG("remove irq handler %d\n", irq); + MGA_WRITE(MGAREG_ICLEAR, 0x00000001); MGA_WRITE(MGAREG_IEN, 0); - MGA_WRITE(MGAREG_ICLEAR, 0xfa7); - free_irq(irq, dev); - return 0; } - int mga_control(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { @@ -859,7 +933,9 @@ int mga_control(struct inode *inode, struct file *filp, unsigned int cmd, drm_control_t ctl; copy_from_user_ret(&ctl, (drm_control_t *)arg, sizeof(ctl), -EFAULT); - + + DRM_DEBUG("%s\n", __FUNCTION__); + switch (ctl.func) { case DRM_INST_HANDLER: return mga_irq_install(dev, ctl.irq); @@ -870,36 +946,69 @@ int mga_control(struct inode *inode, struct file *filp, unsigned int cmd, } } -int mga_flush_queue(drm_device_t *dev) +static int mga_flush_queue(drm_device_t *dev) { DECLARE_WAITQUEUE(entry, current); - drm_queue_t *q = dev->queuelist[DRM_KERNEL_CONTEXT]; - drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; + drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; int ret = 0; - - printk("mga_flush_queue\n"); - if(atomic_read(&dev_priv->pending_bufs) != 0) { - current->state = TASK_INTERRUPTIBLE; - add_wait_queue(&q->flush_queue, &entry); - for (;;) { - if (!atomic_read(&dev_priv->pending_bufs)) break; - printk("Calling schedule from flush_queue : %d\n", - atomic_read(&dev_priv->pending_bufs)); - mga_dma_schedule(dev, 1); - schedule(); - if (signal_pending(current)) { - ret = -EINTR; /* Can't restart */ - break; - } - } - printk("Exited out of schedule from flush_queue\n"); - current->state = TASK_RUNNING; - remove_wait_queue(&q->flush_queue, &entry); + + DRM_DEBUG("%s\n", __FUNCTION__); + + if(dev_priv == NULL) { + return 0; } + if(dev_priv->next_prim->num_dwords != 0) { + current->state = TASK_INTERRUPTIBLE; + add_wait_queue(&dev_priv->flush_queue, &entry); + set_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status); + mga_dma_schedule(dev, 0); + for (;;) { + if (!test_bit(MGA_IN_FLUSH, + &dev_priv->dispatch_status)) + break; + atomic_inc(&dev->total_sleeps); + schedule(); + if (signal_pending(current)) { + ret = -EINTR; /* Can't restart */ + clear_bit(MGA_IN_FLUSH, + &dev_priv->dispatch_status); + break; + } + } + current->state = TASK_RUNNING; + remove_wait_queue(&dev_priv->flush_queue, &entry); + } return ret; } +/* Must be called with the lock held */ +void mga_reclaim_buffers(drm_device_t *dev, pid_t pid) +{ + drm_device_dma_t *dma = dev->dma; + int i; + + if (!dma) return; + if(dev->dev_private == NULL) return; + if(dma->buflist == NULL) return; + + DRM_DEBUG("%s\n", __FUNCTION__); + mga_flush_queue(dev); + + for (i = 0; i < dma->buf_count; i++) { + drm_buf_t *buf = dma->buflist[ i ]; + drm_mga_buf_priv_t *buf_priv = buf->dev_private; + + /* Only buffers that need to get reclaimed ever + * get set to free + */ + if (buf->pid == pid && buf_priv) { + if(buf_priv->my_freelist->age == MGA_BUF_USED) + buf_priv->my_freelist->age = MGA_BUF_FREE; + } + } +} + int mga_lock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { @@ -909,6 +1018,7 @@ int mga_lock(struct inode *inode, struct file *filp, unsigned int cmd, int ret = 0; drm_lock_t lock; + DRM_DEBUG("%s\n", __FUNCTION__); copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT); if (lock.context == DRM_KERNEL_CONTEXT) { @@ -916,16 +1026,15 @@ int mga_lock(struct inode *inode, struct file *filp, unsigned int cmd, current->pid, lock.context); return -EINVAL; } - - printk("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", - lock.context, current->pid, dev->lock.hw_lock->lock, - lock.flags); - + + DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", + lock.context, current->pid, dev->lock.hw_lock->lock, + lock.flags); if (lock.context < 0) { return -EINVAL; } - + /* Only one queue: */ @@ -948,8 +1057,6 @@ int mga_lock(struct inode *inode, struct file *filp, unsigned int cmd, /* Contention */ atomic_inc(&dev->total_sleeps); current->state = TASK_INTERRUPTIBLE; - current->policy |= SCHED_YIELD; - printk("Calling lock schedule\n"); schedule(); if (signal_pending(current)) { ret = -ERESTARTSYS; @@ -962,17 +1069,47 @@ int mga_lock(struct inode *inode, struct file *filp, unsigned int cmd, if (!ret) { if (lock.flags & _DRM_LOCK_QUIESCENT) { - printk("_DRM_LOCK_QUIESCENT\n"); - ret = mga_flush_queue(dev); - if(ret != 0) { - drm_lock_free(dev, &dev->lock.hw_lock->lock, - lock.context); - } else { - mga_dma_quiescent(dev); - } + DRM_DEBUG("_DRM_LOCK_QUIESCENT\n"); + mga_flush_queue(dev); + mga_dma_quiescent(dev); } } - printk("%d %s\n", lock.context, ret ? "interrupted" : "has lock"); + + DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock"); return ret; } +int mga_flush_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_lock_t lock; + drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; + + DRM_DEBUG("%s\n", __FUNCTION__); + copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT); + + if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("mga_flush_ioctl called without lock held\n"); + return -EINVAL; + } + + if(lock.flags & _DRM_LOCK_FLUSH || lock.flags & _DRM_LOCK_FLUSH_ALL) { + drm_mga_prim_buf_t *temp_buf; + + temp_buf = dev_priv->current_prim; + + if(temp_buf && temp_buf->num_dwords) { + set_bit(MGA_BUF_FORCE_FIRE, &temp_buf->buffer_status); + mga_advance_primary(dev); + } + mga_dma_schedule(dev, 1); + } + if(lock.flags & _DRM_LOCK_QUIESCENT) { + mga_flush_queue(dev); + mga_dma_quiescent(dev); + } + + return 0; +} diff --git a/linux/mga_drm.h b/linux/mga_drm.h new file mode 100644 index 00000000..e75e91a4 --- /dev/null +++ b/linux/mga_drm.h @@ -0,0 +1,269 @@ +/* mga_drm.h -- Public header for the Matrox g200/g400 driver -*- linux-c -*- + * Created: Tue Jan 25 01:50:01 1999 by jhartmann@precisioninsight.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Jeff Hartmann <jhartmann@valinux.com> + * Keith Whitwell <keithw@valinux.com> + * + */ + +#ifndef _MGA_DRM_H_ +#define _MGA_DRM_H_ + +/* WARNING: If you change any of these defines, make sure to change the + * defines in the Xserver file (xf86drmMga.h) + */ +#ifndef _MGA_DEFINES_ +#define _MGA_DEFINES_ + +#define MGA_F 0x1 /* fog */ +#define MGA_A 0x2 /* alpha */ +#define MGA_S 0x4 /* specular */ +#define MGA_T2 0x8 /* multitexture */ + +#define MGA_WARP_TGZ 0 +#define MGA_WARP_TGZF (MGA_F) +#define MGA_WARP_TGZA (MGA_A) +#define MGA_WARP_TGZAF (MGA_F|MGA_A) +#define MGA_WARP_TGZS (MGA_S) +#define MGA_WARP_TGZSF (MGA_S|MGA_F) +#define MGA_WARP_TGZSA (MGA_S|MGA_A) +#define MGA_WARP_TGZSAF (MGA_S|MGA_F|MGA_A) +#define MGA_WARP_T2GZ (MGA_T2) +#define MGA_WARP_T2GZF (MGA_T2|MGA_F) +#define MGA_WARP_T2GZA (MGA_T2|MGA_A) +#define MGA_WARP_T2GZAF (MGA_T2|MGA_A|MGA_F) +#define MGA_WARP_T2GZS (MGA_T2|MGA_S) +#define MGA_WARP_T2GZSF (MGA_T2|MGA_S|MGA_F) +#define MGA_WARP_T2GZSA (MGA_T2|MGA_S|MGA_A) +#define MGA_WARP_T2GZSAF (MGA_T2|MGA_S|MGA_F|MGA_A) + +#define MGA_MAX_G400_PIPES 16 +#define MGA_MAX_G200_PIPES 8 /* no multitex */ +#define MGA_MAX_WARP_PIPES MGA_MAX_G400_PIPES + +#define MGA_CARD_TYPE_G200 1 +#define MGA_CARD_TYPE_G400 2 + +#define MGA_FRONT 0x1 +#define MGA_BACK 0x2 +#define MGA_DEPTH 0x4 + +/* 3d state excluding texture units: + */ +#define MGA_CTXREG_DSTORG 0 /* validated */ +#define MGA_CTXREG_MACCESS 1 +#define MGA_CTXREG_PLNWT 2 +#define MGA_CTXREG_DWGCTL 3 +#define MGA_CTXREG_ALPHACTRL 4 +#define MGA_CTXREG_FOGCOLOR 5 +#define MGA_CTXREG_WFLAG 6 +#define MGA_CTXREG_TDUAL0 7 +#define MGA_CTXREG_TDUAL1 8 +#define MGA_CTXREG_FCOL 9 +#define MGA_CTX_SETUP_SIZE 10 + +/* 2d state + */ +#define MGA_2DREG_PITCH 0 +#define MGA_2D_SETUP_SIZE 1 + +/* Each texture unit has a state: + */ +#define MGA_TEXREG_CTL 0 +#define MGA_TEXREG_CTL2 1 +#define MGA_TEXREG_FILTER 2 +#define MGA_TEXREG_BORDERCOL 3 +#define MGA_TEXREG_ORG 4 /* validated */ +#define MGA_TEXREG_ORG1 5 +#define MGA_TEXREG_ORG2 6 +#define MGA_TEXREG_ORG3 7 +#define MGA_TEXREG_ORG4 8 +#define MGA_TEXREG_WIDTH 9 +#define MGA_TEXREG_HEIGHT 10 +#define MGA_TEX_SETUP_SIZE 11 + +/* What needs to be changed for the current vertex dma buffer? + */ +#define MGA_UPLOAD_CTX 0x1 +#define MGA_UPLOAD_TEX0 0x2 +#define MGA_UPLOAD_TEX1 0x4 +#define MGA_UPLOAD_PIPE 0x8 +#define MGA_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */ +#define MGA_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */ +#define MGA_UPLOAD_2D 0x40 +#define MGA_WAIT_AGE 0x80 /* handled client-side */ +#define MGA_UPLOAD_CLIPRECTS 0x100 /* handled client-side */ +#define MGA_DMA_FLUSH 0x200 /* set when someone gets the lock + quiescent */ + +/* 32 buffers of 64k each, total 2 meg. + */ +#define MGA_DMA_BUF_ORDER 16 +#define MGA_DMA_BUF_SZ (1<<MGA_DMA_BUF_ORDER) +#define MGA_DMA_BUF_NR 31 + +/* Keep these small for testing. + */ +#define MGA_NR_SAREA_CLIPRECTS 8 + +/* 2 heaps (1 for card, 1 for agp), each divided into upto 128 + * regions, subject to a minimum region size of (1<<16) == 64k. + * + * Clients may subdivide regions internally, but when sharing between + * clients, the region size is the minimum granularity. + */ + +#define MGA_CARD_HEAP 0 +#define MGA_AGP_HEAP 1 +#define MGA_NR_TEX_HEAPS 2 +#define MGA_NR_TEX_REGIONS 16 +#define MGA_LOG_MIN_TEX_REGION_SIZE 16 +#endif + +typedef struct _drm_mga_warp_index { + int installed; + unsigned long phys_addr; + int size; +} drm_mga_warp_index_t; + +typedef struct drm_mga_init { + enum { + MGA_INIT_DMA = 0x01, + MGA_CLEANUP_DMA = 0x02 + } func; + int reserved_map_agpstart; + int reserved_map_idx; + int buffer_map_idx; + int sarea_priv_offset; + int primary_size; + int warp_ucode_size; + unsigned int frontOffset; + unsigned int backOffset; + unsigned int depthOffset; + unsigned int textureOffset; + unsigned int textureSize; + unsigned int agpTextureOffset; + unsigned int agpTextureSize; + unsigned int cpp; + unsigned int stride; + int sgram; + int chipset; + drm_mga_warp_index_t WarpIndex[MGA_MAX_WARP_PIPES]; + unsigned int mAccess; +} drm_mga_init_t; + +/* Warning: if you change the sarea structure, you must change the Xserver + * structures as well */ + +typedef struct _drm_mga_tex_region { + unsigned char next, prev; + unsigned char in_use; + unsigned int age; +} drm_mga_tex_region_t; + +typedef struct _drm_mga_sarea { + /* The channel for communication of state information to the kernel + * on firing a vertex dma buffer. + */ + unsigned int ContextState[MGA_CTX_SETUP_SIZE]; + unsigned int ServerState[MGA_2D_SETUP_SIZE]; + unsigned int TexState[2][MGA_TEX_SETUP_SIZE]; + unsigned int WarpPipe; + unsigned int dirty; + + unsigned int nbox; + drm_clip_rect_t boxes[MGA_NR_SAREA_CLIPRECTS]; + + + /* Information about the most recently used 3d drawable. The + * client fills in the req_* fields, the server fills in the + * exported_ fields and puts the cliprects into boxes, above. + * + * The client clears the exported_drawable field before + * clobbering the boxes data. + */ + unsigned int req_drawable; /* the X drawable id */ + unsigned int req_draw_buffer; /* MGA_FRONT or MGA_BACK */ + + unsigned int exported_drawable; + unsigned int exported_index; + unsigned int exported_stamp; + unsigned int exported_buffers; + unsigned int exported_nfront; + unsigned int exported_nback; + int exported_back_x, exported_front_x, exported_w; + int exported_back_y, exported_front_y, exported_h; + drm_clip_rect_t exported_boxes[MGA_NR_SAREA_CLIPRECTS]; + + /* Counters for aging textures and for client-side throttling. + */ + unsigned int last_enqueue; /* last time a buffer was enqueued */ + unsigned int last_dispatch; /* age of the most recently dispatched buffer */ + unsigned int last_quiescent; /* */ + + + /* LRU lists for texture memory in agp space and on the card + */ + drm_mga_tex_region_t texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS+1]; + unsigned int texAge[MGA_NR_TEX_HEAPS]; + + /* Mechanism to validate card state. + */ + int ctxOwner; +} drm_mga_sarea_t; + +/* Device specific ioctls: + */ +typedef struct _drm_mga_clear { + unsigned int clear_color; + unsigned int clear_depth; + unsigned int flags; +} drm_mga_clear_t; + +typedef struct _drm_mga_swap { + int dummy; +} drm_mga_swap_t; + +typedef struct _drm_mga_iload { + int idx; + int length; + unsigned int destOrg; +} drm_mga_iload_t; + +typedef struct _drm_mga_vertex { + int idx; /* buffer to queue */ + int used; /* bytes in use */ + int discard; /* client finished with buffer? */ +} drm_mga_vertex_t; + +typedef struct _drm_mga_indices { + int idx; /* buffer to queue */ + unsigned int start; + unsigned int end; + int discard; /* client finished with buffer? */ +} drm_mga_indices_t; + +#endif diff --git a/linux/mga_drv.c b/linux/mga_drv.c index 8bc25617..e77d827b 100644 --- a/linux/mga_drv.c +++ b/linux/mga_drv.c @@ -2,6 +2,7 @@ * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -23,13 +24,13 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com> - * Jeff Hartmann <jhartmann@precisioninsight.com> + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> + * Jeff Hartmann <jhartmann@valinux.com> * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_drv.c,v 1.1 2000/02/11 17:26:07 dawes Exp $ * */ +#include <linux/config.h> #define EXPORT_SYMTAB #include "drmP.h" #include "mga_drv.h" @@ -39,9 +40,9 @@ EXPORT_SYMBOL(mga_cleanup); #define MGA_NAME "mga" #define MGA_DESC "Matrox g200/g400" #define MGA_DATE "19991213" -#define MGA_MAJOR 0 +#define MGA_MAJOR 1 #define MGA_MINOR 0 -#define MGA_PATCHLEVEL 1 +#define MGA_PATCHLEVEL 0 static drm_device_t mga_device; drm_ctx_t mga_res_ctx; @@ -54,6 +55,7 @@ static struct file_operations mga_fops = { mmap: drm_mmap, read: drm_read, fasync: drm_fasync, + poll: drm_poll, }; static struct miscdevice mga_misc = { @@ -105,9 +107,12 @@ static drm_ioctl_desc_t mga_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_MGA_INIT)] = { mga_dma_init, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_MGA_SWAP)] = { mga_clear_bufs, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_MGA_CLEAR)] = { mga_swap_bufs, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_MGA_ILOAD)] = { mga_iload, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_MGA_SWAP)] = { mga_swap_bufs, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_MGA_CLEAR)] = { mga_clear_bufs, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_MGA_ILOAD)] = { mga_iload, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_MGA_VERTEX)] = { mga_vertex, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_MGA_FLUSH)] = { mga_flush_ioctl, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_MGA_INDICES)] = { mga_indices, 1, 0 }, }; #define MGA_IOCTL_COUNT DRM_ARRAY_SIZE(mga_ioctls) @@ -380,6 +385,21 @@ int mga_init(void) drm_proc_init(dev); DRM_DEBUG("doing agp init\n"); dev->agp = drm_agp_init(); + if(dev->agp == NULL) { + DRM_INFO("The mga drm module requires the agpgart module" + " to function correctly\nPlease load the agpgart" + " module before you load the mga module\n"); + drm_proc_cleanup(); + misc_deregister(&mga_misc); + mga_takedown(dev); + return -ENOMEM; + } +#ifdef CONFIG_MTRR + dev->agp->agp_mtrr = mtrr_add(dev->agp->agp_info.aper_base, + dev->agp->agp_info.aper_size * 1024 * 1024, + MTRR_TYPE_WRCOMB, + 1); +#endif DRM_DEBUG("doing ctxbitmap init\n"); if((retcode = drm_ctxbitmap_init(dev))) { DRM_ERROR("Cannot allocate memory for context bitmap.\n"); @@ -416,6 +436,16 @@ void mga_cleanup(void) } drm_ctxbitmap_cleanup(dev); mga_dma_cleanup(dev); +#ifdef CONFIG_MTRR + if(dev->agp && dev->agp->agp_mtrr) { + int retval; + retval = mtrr_del(dev->agp->agp_mtrr, + dev->agp->agp_info.aper_base, + dev->agp->agp_info.aper_size * 1024*1024); + DRM_DEBUG("mtrr_del = %d\n", retval); + } +#endif + mga_takedown(dev); if (dev->agp) { drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); @@ -482,27 +512,86 @@ int mga_release(struct inode *inode, struct file *filp) drm_device_t *dev = priv->dev; int retcode = 0; - DRM_DEBUG("open_count = %d\n", dev->open_count); - if (!(retcode = drm_release(inode, filp))) { - MOD_DEC_USE_COUNT; - atomic_inc(&dev->total_close); - spin_lock(&dev->count_lock); - if (!--dev->open_count) { - if (atomic_read(&dev->ioctl_count) || dev->blocked) { - DRM_ERROR("Device busy: %d %d\n", - atomic_read(&dev->ioctl_count), - dev->blocked); - spin_unlock(&dev->count_lock); - return -EBUSY; + DRM_DEBUG("pid = %d, device = 0x%x, open_count = %d\n", + current->pid, dev->device, dev->open_count); + + if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) + && dev->lock.pid == current->pid) { + mga_reclaim_buffers(dev, priv->pid); + DRM_ERROR("Process %d dead, freeing lock for context %d\n", + current->pid, + _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); + drm_lock_free(dev, + &dev->lock.hw_lock->lock, + _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); + + /* FIXME: may require heavy-handed reset of + hardware at this point, possibly + processed via a callback to the X + server. */ + } else if (dev->lock.hw_lock) { + /* The lock is required to reclaim buffers */ + DECLARE_WAITQUEUE(entry, current); + add_wait_queue(&dev->lock.lock_queue, &entry); + for (;;) { + if (!dev->lock.hw_lock) { + /* Device has been unregistered */ + retcode = -EINTR; + break; + } + if (drm_lock_take(&dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT)) { + dev->lock.pid = priv->pid; + dev->lock.lock_time = jiffies; + atomic_inc(&dev->total_locks); + break; /* Got lock */ + } + /* Contention */ + atomic_inc(&dev->total_sleeps); + current->state = TASK_INTERRUPTIBLE; + schedule(); + if (signal_pending(current)) { + retcode = -ERESTARTSYS; + break; } - spin_unlock(&dev->count_lock); - return mga_takedown(dev); } - spin_unlock(&dev->count_lock); + current->state = TASK_RUNNING; + remove_wait_queue(&dev->lock.lock_queue, &entry); + if(!retcode) { + mga_reclaim_buffers(dev, priv->pid); + drm_lock_free(dev, &dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT); + } } + drm_fasync(-1, filp, 0); + + down(&dev->struct_sem); + if (priv->prev) priv->prev->next = priv->next; + else dev->file_first = priv->next; + if (priv->next) priv->next->prev = priv->prev; + else dev->file_last = priv->prev; + up(&dev->struct_sem); + + drm_free(priv, sizeof(*priv), DRM_MEM_FILES); + MOD_DEC_USE_COUNT; + atomic_inc(&dev->total_close); + spin_lock(&dev->count_lock); + if (!--dev->open_count) { + if (atomic_read(&dev->ioctl_count) || dev->blocked) { + DRM_ERROR("Device busy: %d %d\n", + atomic_read(&dev->ioctl_count), + dev->blocked); + spin_unlock(&dev->count_lock); + return -EBUSY; + } + spin_unlock(&dev->count_lock); + return mga_takedown(dev); + } + spin_unlock(&dev->count_lock); return retcode; } + /* drm_ioctl is called whenever a process performs an ioctl on /dev/drm. */ int mga_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, diff --git a/linux/mga_drv.h b/linux/mga_drv.h index bc7808b0..f217acb9 100644 --- a/linux/mga_drv.h +++ b/linux/mga_drv.h @@ -2,6 +2,7 @@ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -23,51 +24,81 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com> - * Jeff Hartmann <jhartmann@precisioninsight.com> + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> + * Jeff Hartmann <jhartmann@valinux.com> * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_drv.h,v 1.1 2000/02/11 17:26:08 dawes Exp $ */ #ifndef _MGA_DRV_H_ #define _MGA_DRV_H_ -#include "mga_drm_public.h" + +#define MGA_BUF_IN_USE 0 +#define MGA_BUF_SWAP_PENDING 1 +#define MGA_BUF_FORCE_FIRE 2 +#define MGA_BUF_NEEDS_OVERFLOW 3 + +typedef struct { + u32 buffer_status; + unsigned int num_dwords; + unsigned int max_dwords; + u32 *current_dma_ptr; + u32 *head; + u32 phys_head; + unsigned int prim_age; + int sec_used; + int idx; +} drm_mga_prim_buf_t; + +typedef struct _drm_mga_freelist { + unsigned int age; + drm_buf_t *buf; + struct _drm_mga_freelist *next; + struct _drm_mga_freelist *prev; +} drm_mga_freelist_t; + +#define MGA_IN_DISPATCH 0 +#define MGA_IN_FLUSH 1 +#define MGA_IN_WAIT 2 +#define MGA_IN_GETBUF 3 typedef struct _drm_mga_private { + u32 dispatch_status; + unsigned int next_prim_age; + __volatile__ unsigned int last_prim_age; int reserved_map_idx; int buffer_map_idx; drm_mga_sarea_t *sarea_priv; int primary_size; int warp_ucode_size; int chipset; - int fbOffset; - int backOffset; - int depthOffset; - int textureOffset; - int textureSize; + unsigned int frontOffset; + unsigned int backOffset; + unsigned int depthOffset; + unsigned int textureOffset; + unsigned int textureSize; int cpp; - int stride; + unsigned int stride; int sgram; int use_agp; - mgaWarpIndex WarpIndex[MGA_MAX_G400_PIPES]; - __volatile__ unsigned long softrap_age; - atomic_t dispatch_lock; + drm_mga_warp_index_t WarpIndex[MGA_MAX_G400_PIPES]; + unsigned int WarpPipe; atomic_t pending_bufs; - void *ioremap; - u32 *prim_head; - u32 *current_dma_ptr; - u32 prim_phys_head; - int prim_num_dwords; - int prim_max_dwords; - - + void *status_page; + unsigned long real_status_page; + u8 *ioremap; + drm_mga_prim_buf_t **prim_bufs; + drm_mga_prim_buf_t *next_prim; + drm_mga_prim_buf_t *last_prim; + drm_mga_prim_buf_t *current_prim; + int current_prim_idx; + drm_mga_freelist_t *head; + drm_mga_freelist_t *tail; + wait_queue_head_t flush_queue; /* Processes waiting until flush */ + wait_queue_head_t wait_queue; /* Processes waiting until interrupt */ + wait_queue_head_t buf_queue; /* Processes waiting for a free buf */ /* Some validated register values: */ - u32 frontOrg; - u32 backOrg; - u32 depthOrg; u32 mAccess; - } drm_mga_private_t; /* mga_drv.c */ @@ -92,16 +123,19 @@ extern int mga_control(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int mga_lock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -#if 0 -extern void mga_dma_init(drm_device_t *dev); -extern void mga_dma_cleanup(drm_device_t *dev); -#endif +/* mga_dma_init does init and release */ extern int mga_dma_init(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int mga_dma_cleanup(drm_device_t *dev); - -/* mga_dma_init does init and release */ +extern int mga_flush_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern void mga_flush_write_combine(void); +extern unsigned int mga_create_sync_tag(drm_device_t *dev); +extern drm_buf_t *mga_freelist_get(drm_device_t *dev); +extern int mga_freelist_put(drm_device_t *dev, drm_buf_t *buf); +extern int mga_advance_primary(drm_device_t *dev); +extern void mga_reclaim_buffers(drm_device_t *dev, pid_t pid); /* mga_bufs.c */ @@ -124,6 +158,10 @@ extern int mga_swap_bufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int mga_iload(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); +extern int mga_vertex(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int mga_indices(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); /* mga_context.c */ extern int mga_resctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); @@ -144,13 +182,124 @@ extern int mga_context_switch(drm_device_t *dev, int old, int new); extern int mga_context_switch_complete(drm_device_t *dev, int new); +typedef enum { + TT_GENERAL, + TT_BLIT, + TT_VECTOR, + TT_VERTEX +} transferType_t; + +typedef struct { + drm_mga_freelist_t *my_freelist; + int discard; + int dispatched; +} drm_mga_buf_priv_t; + +#define DWGREG0 0x1c00 +#define DWGREG0_END 0x1dff +#define DWGREG1 0x2c00 +#define DWGREG1_END 0x2dff + +#define ISREG0(r) (r >= DWGREG0 && r <= DWGREG0_END) +#define ADRINDEX0(r) (u8)((r - DWGREG0) >> 2) +#define ADRINDEX1(r) (u8)(((r - DWGREG1) >> 2) | 0x80) +#define ADRINDEX(r) (ISREG0(r) ? ADRINDEX0(r) : ADRINDEX1(r)) + +#define MGA_VERBOSE 0 +#define MGA_NUM_PRIM_BUFS 8 + +#define PRIMLOCALS u8 tempIndex[4]; u32 *dma_ptr; u32 phys_head; \ + int outcount, num_dwords + +#define PRIM_OVERFLOW(dev, dev_priv, length) do { \ + drm_mga_prim_buf_t *tmp_buf = \ + dev_priv->prim_bufs[dev_priv->current_prim_idx]; \ + if( test_bit(MGA_BUF_NEEDS_OVERFLOW, \ + &tmp_buf->buffer_status)) { \ + mga_advance_primary(dev); \ + mga_dma_schedule(dev, 1); \ + } else if( tmp_buf->max_dwords - tmp_buf->num_dwords < length ||\ + tmp_buf->sec_used > MGA_DMA_BUF_NR/2) { \ + set_bit(MGA_BUF_FORCE_FIRE, &tmp_buf->buffer_status); \ + mga_advance_primary(dev); \ + mga_dma_schedule(dev, 1); \ + } \ +} while(0) + +#define PRIMGETPTR(dev_priv) do { \ + drm_mga_prim_buf_t *tmp_buf = \ + dev_priv->prim_bufs[dev_priv->current_prim_idx]; \ + if(MGA_VERBOSE) \ + DRM_DEBUG("PRIMGETPTR in %s\n", __FUNCTION__); \ + dma_ptr = tmp_buf->current_dma_ptr; \ + num_dwords = tmp_buf->num_dwords; \ + phys_head = tmp_buf->phys_head; \ + outcount = 0; \ +} while(0) + +#define PRIMPTR(prim_buf) do { \ + if(MGA_VERBOSE) \ + DRM_DEBUG("PRIMPTR in %s\n", __FUNCTION__); \ + dma_ptr = prim_buf->current_dma_ptr; \ + num_dwords = prim_buf->num_dwords; \ + phys_head = prim_buf->phys_head; \ + outcount = 0; \ +} while(0) + +#define PRIMFINISH(prim_buf) do { \ + if (MGA_VERBOSE) { \ + DRM_DEBUG( "PRIMFINISH in %s\n", __FUNCTION__); \ + if (outcount & 3) \ + DRM_DEBUG(" --- truncation\n"); \ + } \ + prim_buf->num_dwords = num_dwords; \ + prim_buf->current_dma_ptr = dma_ptr; \ +} while(0) + +#define PRIMADVANCE(dev_priv) do { \ +drm_mga_prim_buf_t *tmp_buf = \ + dev_priv->prim_bufs[dev_priv->current_prim_idx]; \ + if (MGA_VERBOSE) { \ + DRM_DEBUG("PRIMADVANCE in %s\n", __FUNCTION__); \ + if (outcount & 3) \ + DRM_DEBUG(" --- truncation\n"); \ + } \ + tmp_buf->num_dwords = num_dwords; \ + tmp_buf->current_dma_ptr = dma_ptr; \ +} while (0) + +#define PRIMUPDATE(dev_priv) do { \ + drm_mga_prim_buf_t *tmp_buf = \ + dev_priv->prim_bufs[dev_priv->current_prim_idx]; \ + tmp_buf->sec_used++; \ +} while (0) + +#define AGEBUF(dev_priv, buf_priv) do { \ + drm_mga_prim_buf_t *tmp_buf = \ + dev_priv->prim_bufs[dev_priv->current_prim_idx]; \ + buf_priv->my_freelist->age = tmp_buf->prim_age; \ +} while (0) + + +#define PRIMOUTREG(reg, val) do { \ + tempIndex[outcount]=ADRINDEX(reg); \ + dma_ptr[1+outcount] = val; \ + if (MGA_VERBOSE) \ + DRM_DEBUG(" PRIMOUT %d: 0x%x -- 0x%x\n", \ + num_dwords + 1 + outcount, ADRINDEX(reg), val); \ + if( ++outcount == 4) { \ + outcount = 0; \ + dma_ptr[0] = *(u32 *)tempIndex; \ + dma_ptr+=5; \ + num_dwords += 5; \ + } \ +}while (0) + +/* A reduced set of the mga registers. + */ #define MGAREG_MGA_EXEC 0x0100 -#define MGAREG_AGP_PLL 0x1e4c #define MGAREG_ALPHACTRL 0x2c7c -#define MGAREG_ALPHASTART 0x2c70 -#define MGAREG_ALPHAXINC 0x2c74 -#define MGAREG_ALPHAYINC 0x2c78 #define MGAREG_AR0 0x1c60 #define MGAREG_AR1 0x1c64 #define MGAREG_AR2 0x1c68 @@ -158,39 +307,16 @@ extern int mga_context_switch_complete(drm_device_t *dev, int new); #define MGAREG_AR4 0x1c70 #define MGAREG_AR5 0x1c74 #define MGAREG_AR6 0x1c78 -#define MGAREG_BCOL 0x1c20 #define MGAREG_CXBNDRY 0x1c80 #define MGAREG_CXLEFT 0x1ca0 #define MGAREG_CXRIGHT 0x1ca4 #define MGAREG_DMAPAD 0x1c54 -#define MGAREG_DR0_Z32LSB 0x2c50 -#define MGAREG_DR0_Z32MSB 0x2c54 -#define MGAREG_DR2_Z32LSB 0x2c60 -#define MGAREG_DR2_Z32MSB 0x2c64 -#define MGAREG_DR3_Z32LSB 0x2c68 -#define MGAREG_DR3_Z32MSB 0x2c6c -#define MGAREG_DR0 0x1cc0 -#define MGAREG_DR2 0x1cc8 -#define MGAREG_DR3 0x1ccc -#define MGAREG_DR4 0x1cd0 -#define MGAREG_DR6 0x1cd8 -#define MGAREG_DR7 0x1cdc -#define MGAREG_DR8 0x1ce0 -#define MGAREG_DR10 0x1ce8 -#define MGAREG_DR11 0x1cec -#define MGAREG_DR12 0x1cf0 -#define MGAREG_DR14 0x1cf8 -#define MGAREG_DR15 0x1cfc #define MGAREG_DSTORG 0x2cb8 -#define MGAREG_DWG_INDIR_WT 0x1e80 #define MGAREG_DWGCTL 0x1c00 #define MGAREG_DWGSYNC 0x2c4c #define MGAREG_FCOL 0x1c24 #define MGAREG_FIFOSTATUS 0x1e10 #define MGAREG_FOGCOL 0x1cf4 -#define MGAREG_FOGSTART 0x1cc4 -#define MGAREG_FOGXINC 0x1cd4 -#define MGAREG_FOGYINC 0x1ce4 #define MGAREG_FXBNDRY 0x1c84 #define MGAREG_FXLEFT 0x1ca8 #define MGAREG_FXRIGHT 0x1cac @@ -198,44 +324,22 @@ extern int mga_context_switch_complete(drm_device_t *dev, int new); #define MGAREG_IEN 0x1e1c #define MGAREG_LEN 0x1c5c #define MGAREG_MACCESS 0x1c04 -#define MGAREG_MCTLWTST 0x1c08 -#define MGAREG_MEMRDBK 0x1e44 -#define MGAREG_OPMODE 0x1e54 -#define MGAREG_PAT0 0x1c10 -#define MGAREG_PAT1 0x1c14 #define MGAREG_PITCH 0x1c8c #define MGAREG_PLNWT 0x1c1c #define MGAREG_PRIMADDRESS 0x1e58 #define MGAREG_PRIMEND 0x1e5c #define MGAREG_PRIMPTR 0x1e50 -#define MGAREG_RST 0x1e40 #define MGAREG_SECADDRESS 0x2c40 #define MGAREG_SECEND 0x2c44 #define MGAREG_SETUPADDRESS 0x2cd0 #define MGAREG_SETUPEND 0x2cd4 -#define MGAREG_SGN 0x1c58 -#define MGAREG_SHIFT 0x1c50 #define MGAREG_SOFTRAP 0x2c48 -#define MGAREG_SPECBSTART 0x2c98 -#define MGAREG_SPECBXINC 0x2c9c -#define MGAREG_SPECBYINC 0x2ca0 -#define MGAREG_SPECGSTART 0x2c8c -#define MGAREG_SPECGXINC 0x2c90 -#define MGAREG_SPECGYINC 0x2c94 -#define MGAREG_SPECRSTART 0x2c80 -#define MGAREG_SPECRXINC 0x2c84 -#define MGAREG_SPECRYINC 0x2c88 -#define MGAREG_SRC0 0x1c30 -#define MGAREG_SRC1 0x1c34 -#define MGAREG_SRC2 0x1c38 -#define MGAREG_SRC3 0x1c3c #define MGAREG_SRCORG 0x2cb4 #define MGAREG_STATUS 0x1e14 #define MGAREG_STENCIL 0x2cc8 #define MGAREG_STENCILCTL 0x2ccc #define MGAREG_TDUALSTAGE0 0x2cf8 #define MGAREG_TDUALSTAGE1 0x2cfc -#define MGAREG_TEST0 0x1e48 #define MGAREG_TEXBORDERCOL 0x2c5c #define MGAREG_TEXCTL 0x2c30 #define MGAREG_TEXCTL2 0x2c3c @@ -249,18 +353,6 @@ extern int mga_context_switch_complete(drm_device_t *dev, int new); #define MGAREG_TEXTRANS 0x2c34 #define MGAREG_TEXTRANSHIGH 0x2c38 #define MGAREG_TEXWIDTH 0x2c28 -#define MGAREG_TMR0 0x2c00 -#define MGAREG_TMR1 0x2c04 -#define MGAREG_TMR2 0x2c08 -#define MGAREG_TMR3 0x2c0c -#define MGAREG_TMR4 0x2c10 -#define MGAREG_TMR5 0x2c14 -#define MGAREG_TMR6 0x2c18 -#define MGAREG_TMR7 0x2c1c -#define MGAREG_TMR8 0x2c20 -#define MGAREG_VBIADDR0 0x3e08 -#define MGAREG_VBIADDR1 0x3e0c -#define MGAREG_VCOUNT 0x1e20 #define MGAREG_WACCEPTSEQ 0x1dd4 #define MGAREG_WCODEADDR 0x1e6c #define MGAREG_WFLAG 0x1dc4 @@ -270,18 +362,8 @@ extern int mga_context_switch_complete(drm_device_t *dev, int new); #define MGAREG_WGETMSB 0x1dc8 #define MGAREG_WIADDR 0x1dc0 #define MGAREG_WIADDR2 0x1dd8 -#define MGAREG_WIADDRNB 0x1e60 -#define MGAREG_WIADDRNB1 0x1e04 -#define MGAREG_WIADDRNB2 0x1e00 -#define MGAREG_WIMEMADDR 0x1e68 -#define MGAREG_WIMEMDATA 0x2000 -#define MGAREG_WIMEMDATA1 0x2100 #define MGAREG_WMISC 0x1e70 -#define MGAREG_WR 0x2d00 #define MGAREG_WVRTXSZ 0x1dcc -#define MGAREG_XDST 0x1cb0 -#define MGAREG_XYEND 0x1c44 -#define MGAREG_XYSTRT 0x1c40 #define MGAREG_YBOT 0x1c9c #define MGAREG_YDST 0x1c90 #define MGAREG_YDSTLEN 0x1c88 @@ -289,4 +371,77 @@ extern int mga_context_switch_complete(drm_device_t *dev, int new); #define MGAREG_YTOP 0x1c98 #define MGAREG_ZORG 0x1c0c +#define PDEA_pagpxfer_enable 0x2 + +#define WIA_wmode_suspend 0x0 +#define WIA_wmode_start 0x3 +#define WIA_wagp_agp 0x4 + +#define DC_opcod_line_open 0x0 +#define DC_opcod_autoline_open 0x1 +#define DC_opcod_line_close 0x2 +#define DC_opcod_autoline_close 0x3 +#define DC_opcod_trap 0x4 +#define DC_opcod_texture_trap 0x6 +#define DC_opcod_bitblt 0x8 +#define DC_opcod_iload 0x9 +#define DC_atype_rpl 0x0 +#define DC_atype_rstr 0x10 +#define DC_atype_zi 0x30 +#define DC_atype_blk 0x40 +#define DC_atype_i 0x70 +#define DC_linear_xy 0x0 +#define DC_linear_linear 0x80 +#define DC_zmode_nozcmp 0x0 +#define DC_zmode_ze 0x200 +#define DC_zmode_zne 0x300 +#define DC_zmode_zlt 0x400 +#define DC_zmode_zlte 0x500 +#define DC_zmode_zgt 0x600 +#define DC_zmode_zgte 0x700 +#define DC_solid_disable 0x0 +#define DC_solid_enable 0x800 +#define DC_arzero_disable 0x0 +#define DC_arzero_enable 0x1000 +#define DC_sgnzero_disable 0x0 +#define DC_sgnzero_enable 0x2000 +#define DC_shftzero_disable 0x0 +#define DC_shftzero_enable 0x4000 +#define DC_bop_SHIFT 16 +#define DC_trans_SHIFT 20 +#define DC_bltmod_bmonolef 0x0 +#define DC_bltmod_bmonowf 0x8000000 +#define DC_bltmod_bplan 0x2000000 +#define DC_bltmod_bfcol 0x4000000 +#define DC_bltmod_bu32bgr 0x6000000 +#define DC_bltmod_bu32rgb 0xe000000 +#define DC_bltmod_bu24bgr 0x16000000 +#define DC_bltmod_bu24rgb 0x1e000000 +#define DC_pattern_disable 0x0 +#define DC_pattern_enable 0x20000000 +#define DC_transc_disable 0x0 +#define DC_transc_enable 0x40000000 +#define DC_clipdis_disable 0x0 +#define DC_clipdis_enable 0x80000000 + +#define SETADD_mode_vertlist 0x0 + + +#define MGA_CLEAR_CMD (DC_opcod_trap | DC_arzero_enable | \ + DC_sgnzero_enable | DC_shftzero_enable | \ + (0xC << DC_bop_SHIFT) | DC_clipdis_enable | \ + DC_solid_enable | DC_transc_enable) + + +#define MGA_COPY_CMD (DC_opcod_bitblt | DC_atype_rpl | DC_linear_xy | \ + DC_solid_disable | DC_arzero_disable | \ + DC_sgnzero_enable | DC_shftzero_enable | \ + (0xC << DC_bop_SHIFT) | DC_bltmod_bfcol | \ + DC_pattern_disable | DC_transc_disable | \ + DC_clipdis_enable) \ + +#define MGA_FLUSH_CMD (DC_opcod_texture_trap | (0xF << DC_trans_SHIFT) |\ + DC_arzero_enable | DC_sgnzero_enable | \ + DC_atype_i) + #endif diff --git a/linux/mga_state.c b/linux/mga_state.c index d09881ba..723ccc53 100644 --- a/linux/mga_state.c +++ b/linux/mga_state.c @@ -2,6 +2,7 @@ * Created: Thu Jan 27 02:53:43 2000 by jhartmann@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -23,340 +24,1044 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * Authors: Jeff Hartmann <jhartmann@precisioninsight.com> - * Keith Whitwell <keithw@precisioninsight.com> - * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_state.c,v 1.1 2000/02/11 17:26:08 dawes Exp $ + * Authors: Jeff Hartmann <jhartmann@valinux.com> + * Keith Whitwell <keithw@valinux.com> * */ - + #define __NO_VERSION__ #include "drmP.h" #include "mga_drv.h" -#include "mgareg_flags.h" -#include "mga_dma.h" -#include "mga_state.h" #include "drm.h" -void mgaEmitClipRect( drm_mga_private_t *dev_priv, xf86drmClipRectRec *box ) +static void mgaEmitClipRect(drm_mga_private_t * dev_priv, + drm_clip_rect_t * box) { + drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; + unsigned int *regs = sarea_priv->ContextState; PRIMLOCALS; + DRM_DEBUG("%s\n", __FUNCTION__); - PRIMGETPTR( dev_priv ); + /* This takes 10 dwords */ + PRIMGETPTR(dev_priv); - /* The G400 seems to have an issue with the second WARP not - * stalling clipper register writes. This bothers me, but the only - * way I could get it to never clip the last triangle under any - * circumstances is by inserting TWO dwgsync commands. - */ - if (dev_priv->chipset == MGA_CARD_TYPE_G400) { - PRIMOUTREG( MGAREG_DWGSYNC, 0 ); - PRIMOUTREG( MGAREG_DWGSYNC, 0 ); - } + /* Force reset of dwgctl (eliminates clip disable) */ +#if 0 + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DWGSYNC, 0); + PRIMOUTREG(MGAREG_DWGSYNC, 0); + PRIMOUTREG(MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL]); +#else + PRIMOUTREG(MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL]); + PRIMOUTREG(MGAREG_LEN + MGAREG_MGA_EXEC, 0x80000000); + PRIMOUTREG(MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL]); + PRIMOUTREG(MGAREG_LEN + MGAREG_MGA_EXEC, 0x80000000); +#endif - PRIMOUTREG( MGAREG_CXBNDRY, ((box->x2)<<16)|(box->x1) ); - PRIMOUTREG( MGAREG_YTOP, box->y1 * dev_priv->stride ); - PRIMOUTREG( MGAREG_YBOT, box->y2 * dev_priv->stride ); - PRIMADVANCE( dev_priv ); -} + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_CXBNDRY, ((box->x2) << 16) | (box->x1)); + PRIMOUTREG(MGAREG_YTOP, box->y1 * dev_priv->stride / 2); + PRIMOUTREG(MGAREG_YBOT, box->y2 * dev_priv->stride / 2); + PRIMADVANCE(dev_priv); +} -static void mgaEmitContext(drm_mga_private_t *dev_priv, - drm_mga_buf_priv_t *buf_priv) +static void mgaEmitContext(drm_mga_private_t * dev_priv) { - unsigned int *regs = buf_priv->ContextState; + drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; + unsigned int *regs = sarea_priv->ContextState; PRIMLOCALS; - - PRIMGETPTR( dev_priv ); - PRIMOUTREG( MGAREG_DSTORG, regs[MGA_CTXREG_DSTORG] ); - PRIMOUTREG( MGAREG_MACCESS, regs[MGA_CTXREG_MACCESS] ); - PRIMOUTREG( MGAREG_PLNWT, regs[MGA_CTXREG_PLNWT] ); - PRIMOUTREG( MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL] ); - PRIMOUTREG( MGAREG_ALPHACTRL, regs[MGA_CTXREG_ALPHACTRL] ); - PRIMOUTREG( MGAREG_FOGCOL, regs[MGA_CTXREG_FOGCOLOR] ); - PRIMOUTREG( MGAREG_WFLAG, regs[MGA_CTXREG_WFLAG] ); - - if (dev_priv->chipset == MGA_CARD_TYPE_G400) { - PRIMOUTREG( MGAREG_WFLAG1, regs[MGA_CTXREG_WFLAG] ); - PRIMOUTREG( MGAREG_TDUALSTAGE0, regs[MGA_CTXREG_TDUAL0] ); - PRIMOUTREG( MGAREG_TDUALSTAGE1, regs[MGA_CTXREG_TDUAL1] ); - } - - PRIMADVANCE( dev_priv ); + DRM_DEBUG("%s\n", __FUNCTION__); + + /* This takes a max of 15 dwords */ + PRIMGETPTR(dev_priv); + + PRIMOUTREG(MGAREG_DSTORG, regs[MGA_CTXREG_DSTORG]); + PRIMOUTREG(MGAREG_MACCESS, regs[MGA_CTXREG_MACCESS]); + PRIMOUTREG(MGAREG_PLNWT, regs[MGA_CTXREG_PLNWT]); + PRIMOUTREG(MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL]); + + PRIMOUTREG(MGAREG_ALPHACTRL, regs[MGA_CTXREG_ALPHACTRL]); + PRIMOUTREG(MGAREG_FOGCOL, regs[MGA_CTXREG_FOGCOLOR]); + PRIMOUTREG(MGAREG_WFLAG, regs[MGA_CTXREG_WFLAG]); + PRIMOUTREG(MGAREG_ZORG, dev_priv->depthOffset); /* invarient */ + + if (dev_priv->chipset == MGA_CARD_TYPE_G400) { + PRIMOUTREG(MGAREG_WFLAG1, regs[MGA_CTXREG_WFLAG]); + PRIMOUTREG(MGAREG_TDUALSTAGE0, regs[MGA_CTXREG_TDUAL0]); + PRIMOUTREG(MGAREG_TDUALSTAGE1, regs[MGA_CTXREG_TDUAL1]); + PRIMOUTREG(MGAREG_FCOL, regs[MGA_CTXREG_FCOL]); + } else { + PRIMOUTREG(MGAREG_FCOL, regs[MGA_CTXREG_FCOL]); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); + } + + PRIMADVANCE(dev_priv); } -static void mgaG200EmitTex( drm_mga_private_t *dev_priv, - drm_mga_buf_priv_t *buf_priv ) +static void mgaG200EmitTex(drm_mga_private_t * dev_priv) { - unsigned int *regs = buf_priv->TexState[0]; + drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; + unsigned int *regs = sarea_priv->TexState[0]; PRIMLOCALS; + DRM_DEBUG("%s\n", __FUNCTION__); + + PRIMGETPTR(dev_priv); + + /* This takes 20 dwords */ + + PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2]); + PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL]); + PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER]); + PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL]); + + PRIMOUTREG(MGAREG_TEXORG, regs[MGA_TEXREG_ORG]); + PRIMOUTREG(MGAREG_TEXORG1, regs[MGA_TEXREG_ORG1]); + PRIMOUTREG(MGAREG_TEXORG2, regs[MGA_TEXREG_ORG2]); + PRIMOUTREG(MGAREG_TEXORG3, regs[MGA_TEXREG_ORG3]); + + PRIMOUTREG(MGAREG_TEXORG4, regs[MGA_TEXREG_ORG4]); + PRIMOUTREG(MGAREG_TEXWIDTH, regs[MGA_TEXREG_WIDTH]); + PRIMOUTREG(MGAREG_TEXHEIGHT, regs[MGA_TEXREG_HEIGHT]); + PRIMOUTREG(0x2d00 + 24 * 4, regs[MGA_TEXREG_WIDTH]); - PRIMGETPTR( dev_priv ); - PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] ); - PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL] ); - PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER] ); - PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL] ); - PRIMOUTREG(MGAREG_TEXORG, regs[MGA_TEXREG_ORG] ); - PRIMOUTREG(MGAREG_TEXORG1, regs[MGA_TEXREG_ORG1] ); - PRIMOUTREG(MGAREG_TEXORG2, regs[MGA_TEXREG_ORG2] ); - PRIMOUTREG(MGAREG_TEXORG3, regs[MGA_TEXREG_ORG3] ); - PRIMOUTREG(MGAREG_TEXORG4, regs[MGA_TEXREG_ORG4] ); - PRIMOUTREG(MGAREG_TEXWIDTH, regs[MGA_TEXREG_WIDTH] ); - PRIMOUTREG(MGAREG_TEXHEIGHT, regs[MGA_TEXREG_HEIGHT] ); - - PRIMOUTREG(0x2d00 + 24*4, regs[MGA_TEXREG_WIDTH] ); - PRIMOUTREG(0x2d00 + 34*4, regs[MGA_TEXREG_HEIGHT] ); - - PRIMADVANCE( dev_priv ); + PRIMOUTREG(0x2d00 + 34 * 4, regs[MGA_TEXREG_HEIGHT]); + PRIMOUTREG(MGAREG_TEXTRANS, 0xffff); + PRIMOUTREG(MGAREG_TEXTRANSHIGH, 0xffff); + PRIMOUTREG(MGAREG_DMAPAD, 0); + + PRIMADVANCE(dev_priv); } -static void mgaG400EmitTex0( drm_mga_private_t *dev_priv, - drm_mga_buf_priv_t *buf_priv ) +static void mgaG400EmitTex0(drm_mga_private_t * dev_priv) { - unsigned int *regs = buf_priv->TexState[0]; - int multitex = buf_priv->WarpPipe & MGA_T2; - + drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; + unsigned int *regs = sarea_priv->TexState[0]; + int multitex = sarea_priv->WarpPipe & MGA_T2; PRIMLOCALS; - PRIMGETPTR( dev_priv ); - - PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] ); - PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL] ); - PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER] ); - PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL] ); - PRIMOUTREG(MGAREG_TEXORG, regs[MGA_TEXREG_ORG] ); - PRIMOUTREG(MGAREG_TEXORG1, regs[MGA_TEXREG_ORG1] ); - PRIMOUTREG(MGAREG_TEXORG2, regs[MGA_TEXREG_ORG2] ); - PRIMOUTREG(MGAREG_TEXORG3, regs[MGA_TEXREG_ORG3] ); - PRIMOUTREG(MGAREG_TEXORG4, regs[MGA_TEXREG_ORG4] ); - PRIMOUTREG(MGAREG_TEXWIDTH, regs[MGA_TEXREG_WIDTH] ); - PRIMOUTREG(MGAREG_TEXHEIGHT, regs[MGA_TEXREG_HEIGHT] ); - - PRIMOUTREG(0x2d00 + 49*4, 0); - PRIMOUTREG(0x2d00 + 57*4, 0); - PRIMOUTREG(0x2d00 + 53*4, 0); - PRIMOUTREG(0x2d00 + 61*4, 0); + DRM_DEBUG("%s\n", __FUNCTION__); + + PRIMGETPTR(dev_priv); + + /* This takes a max of 30 dwords */ + + PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] | 0x00008000); + PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL]); + PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER]); + PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL]); + + PRIMOUTREG(MGAREG_TEXORG, regs[MGA_TEXREG_ORG]); + PRIMOUTREG(MGAREG_TEXORG1, regs[MGA_TEXREG_ORG1]); + PRIMOUTREG(MGAREG_TEXORG2, regs[MGA_TEXREG_ORG2]); + PRIMOUTREG(MGAREG_TEXORG3, regs[MGA_TEXREG_ORG3]); + + PRIMOUTREG(MGAREG_TEXORG4, regs[MGA_TEXREG_ORG4]); + PRIMOUTREG(MGAREG_TEXWIDTH, regs[MGA_TEXREG_WIDTH]); + PRIMOUTREG(MGAREG_TEXHEIGHT, regs[MGA_TEXREG_HEIGHT]); + PRIMOUTREG(0x2d00 + 49 * 4, 0); + + PRIMOUTREG(0x2d00 + 57 * 4, 0); + PRIMOUTREG(0x2d00 + 53 * 4, 0); + PRIMOUTREG(0x2d00 + 61 * 4, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); if (!multitex) { - PRIMOUTREG(0x2d00 + 52*4, 0x40 ); - PRIMOUTREG(0x2d00 + 60*4, 0x40 ); - } + PRIMOUTREG(0x2d00 + 52 * 4, 0x40); + PRIMOUTREG(0x2d00 + 60 * 4, 0x40); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); + } - PRIMOUTREG(0x2d00 + 54*4, regs[MGA_TEXREG_WIDTH] | 0x40 ); - PRIMOUTREG(0x2d00 + 62*4, regs[MGA_TEXREG_HEIGHT] | 0x40 ); + PRIMOUTREG(0x2d00 + 54 * 4, regs[MGA_TEXREG_WIDTH] | 0x40); + PRIMOUTREG(0x2d00 + 62 * 4, regs[MGA_TEXREG_HEIGHT] | 0x40); + PRIMOUTREG(MGAREG_TEXTRANS, 0xffff); + PRIMOUTREG(MGAREG_TEXTRANSHIGH, 0xffff); - PRIMADVANCE( dev_priv ); + PRIMADVANCE(dev_priv); } -#define TMC_map1_enable 0x80000000 - +#define TMC_map1_enable 0x80000000 -static void mgaG400EmitTex1( drm_mga_private_t *dev_priv, - drm_mga_buf_priv_t *buf_priv ) +static void mgaG400EmitTex1(drm_mga_private_t * dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int *regs = sarea_priv->TexState[1]; - PRIMLOCALS; + DRM_DEBUG("%s\n", __FUNCTION__); + PRIMGETPTR(dev_priv); - PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] | TMC_map1_enable); - PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL] ); - PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER] ); - PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL] ); - PRIMOUTREG(MGAREG_TEXORG, regs[MGA_TEXREG_ORG] ); - PRIMOUTREG(MGAREG_TEXORG1, regs[MGA_TEXREG_ORG1] ); - PRIMOUTREG(MGAREG_TEXORG2, regs[MGA_TEXREG_ORG2] ); - PRIMOUTREG(MGAREG_TEXORG3, regs[MGA_TEXREG_ORG3] ); - PRIMOUTREG(MGAREG_TEXORG4, regs[MGA_TEXREG_ORG4] ); - PRIMOUTREG(MGAREG_TEXWIDTH, regs[MGA_TEXREG_WIDTH] ); - PRIMOUTREG(MGAREG_TEXHEIGHT, regs[MGA_TEXREG_HEIGHT] ); - - PRIMOUTREG(0x2d00 + 49*4, 0); - PRIMOUTREG(0x2d00 + 57*4, 0); - PRIMOUTREG(0x2d00 + 53*4, 0); - PRIMOUTREG(0x2d00 + 61*4, 0); - - PRIMOUTREG(0x2d00 + 52*4, regs[MGA_TEXREG_WIDTH] | 0x40 ); - PRIMOUTREG(0x2d00 + 60*4, regs[MGA_TEXREG_HEIGHT] | 0x40 ); - - PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] ); - - PRIMADVANCE( dev_priv ); -} + /* This takes 25 dwords */ + + PRIMOUTREG(MGAREG_TEXCTL2, + regs[MGA_TEXREG_CTL2] | TMC_map1_enable | 0x00008000); + PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL]); + PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER]); + PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL]); + + PRIMOUTREG(MGAREG_TEXORG, regs[MGA_TEXREG_ORG]); + PRIMOUTREG(MGAREG_TEXORG1, regs[MGA_TEXREG_ORG1]); + PRIMOUTREG(MGAREG_TEXORG2, regs[MGA_TEXREG_ORG2]); + PRIMOUTREG(MGAREG_TEXORG3, regs[MGA_TEXREG_ORG3]); + + PRIMOUTREG(MGAREG_TEXORG4, regs[MGA_TEXREG_ORG4]); + PRIMOUTREG(MGAREG_TEXWIDTH, regs[MGA_TEXREG_WIDTH]); + PRIMOUTREG(MGAREG_TEXHEIGHT, regs[MGA_TEXREG_HEIGHT]); + PRIMOUTREG(0x2d00 + 49 * 4, 0); + PRIMOUTREG(0x2d00 + 57 * 4, 0); + PRIMOUTREG(0x2d00 + 53 * 4, 0); + PRIMOUTREG(0x2d00 + 61 * 4, 0); + PRIMOUTREG(0x2d00 + 52 * 4, regs[MGA_TEXREG_WIDTH] | 0x40); -static void mgaG400EmitPipe(drm_mga_private_t *dev_priv, - drm_mga_buf_priv_t *buf_priv) + PRIMOUTREG(0x2d00 + 60 * 4, regs[MGA_TEXREG_HEIGHT] | 0x40); + PRIMOUTREG(MGAREG_TEXTRANS, 0xffff); + PRIMOUTREG(MGAREG_TEXTRANSHIGH, 0xffff); + PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] | 0x00008000); + + PRIMADVANCE(dev_priv); +} + +#define EMIT_PIPE 50 +static void mgaG400EmitPipe(drm_mga_private_t * dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int pipe = sarea_priv->WarpPipe; float fParam = 12800.0f; PRIMLOCALS; - + DRM_DEBUG("%s\n", __FUNCTION__); + PRIMGETPTR(dev_priv); - PRIMOUTREG(MGAREG_WIADDR2, WIA_wmode_suspend); - + + /* This takes 50 dwords */ + /* Establish vertex size. */ + PRIMOUTREG(MGAREG_WIADDR2, WIA_wmode_suspend); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); + if (pipe & MGA_T2) { PRIMOUTREG(MGAREG_WVRTXSZ, 0x00001e09); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); + + PRIMOUTREG(MGAREG_WACCEPTSEQ, 0); + PRIMOUTREG(MGAREG_WACCEPTSEQ, 0); + PRIMOUTREG(MGAREG_WACCEPTSEQ, 0); PRIMOUTREG(MGAREG_WACCEPTSEQ, 0x1e000000); } else { + if (dev_priv->WarpPipe & MGA_T2) { + /* Flush the WARP pipe */ + PRIMOUTREG(MGAREG_YDST, 0); + PRIMOUTREG(MGAREG_FXLEFT, 0); + PRIMOUTREG(MGAREG_FXRIGHT, 1); + PRIMOUTREG(MGAREG_DWGCTL, MGA_FLUSH_CMD); + + PRIMOUTREG(MGAREG_LEN + MGAREG_MGA_EXEC, 1); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DWGSYNC, 0x7000); + PRIMOUTREG(MGAREG_DMAPAD, 0); + + PRIMOUTREG(MGAREG_TEXCTL2, 0 | 0x00008000); + PRIMOUTREG(MGAREG_LEN + MGAREG_MGA_EXEC, 0); + PRIMOUTREG(MGAREG_TEXCTL2, 0x80 | 0x00008000); + PRIMOUTREG(MGAREG_LEN + MGAREG_MGA_EXEC, 0); + } + PRIMOUTREG(MGAREG_WVRTXSZ, 0x00001807); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); + + PRIMOUTREG(MGAREG_WACCEPTSEQ, 0); + PRIMOUTREG(MGAREG_WACCEPTSEQ, 0); + PRIMOUTREG(MGAREG_WACCEPTSEQ, 0); PRIMOUTREG(MGAREG_WACCEPTSEQ, 0x18000000); } - + PRIMOUTREG(MGAREG_WFLAG, 0); PRIMOUTREG(MGAREG_WFLAG1, 0); - - PRIMOUTREG(0x2d00 + 56*4, *((u32 *)(&fParam))); - PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(0x2d00 + 56 * 4, *((u32 *) (&fParam))); PRIMOUTREG(MGAREG_DMAPAD, 0); - - PRIMOUTREG(0x2d00 + 49*4, 0); /* Tex stage 0 */ - PRIMOUTREG(0x2d00 + 57*4, 0); /* Tex stage 0 */ - PRIMOUTREG(0x2d00 + 53*4, 0); /* Tex stage 1 */ - PRIMOUTREG(0x2d00 + 61*4, 0); /* Tex stage 1 */ - - PRIMOUTREG(0x2d00 + 54*4, 0x40); /* Tex stage 0 : w */ - PRIMOUTREG(0x2d00 + 62*4, 0x40); /* Tex stage 0 : h */ - PRIMOUTREG(0x2d00 + 52*4, 0x40); /* Tex stage 1 : w */ - PRIMOUTREG(0x2d00 + 60*4, 0x40); /* Tex stage 1 : h */ - + + PRIMOUTREG(0x2d00 + 49 * 4, 0); /* Tex stage 0 */ + PRIMOUTREG(0x2d00 + 57 * 4, 0); /* Tex stage 0 */ + PRIMOUTREG(0x2d00 + 53 * 4, 0); /* Tex stage 1 */ + PRIMOUTREG(0x2d00 + 61 * 4, 0); /* Tex stage 1 */ + + PRIMOUTREG(0x2d00 + 54 * 4, 0x40); /* Tex stage 0 : w */ + PRIMOUTREG(0x2d00 + 62 * 4, 0x40); /* Tex stage 0 : h */ + PRIMOUTREG(0x2d00 + 52 * 4, 0x40); /* Tex stage 1 : w */ + PRIMOUTREG(0x2d00 + 60 * 4, 0x40); /* Tex stage 1 : h */ + /* Dma pading required due to hw bug */ PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff); PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff); PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff); - PRIMOUTREG(MGAREG_WIADDR2, (dev_priv->WarpIndex[pipe].phys_addr | - WIA_wmode_start | WIA_wagp_agp)); + PRIMOUTREG(MGAREG_WIADDR2, + (u32) (dev_priv->WarpIndex[pipe]. + phys_addr | WIA_wmode_start | WIA_wagp_agp)); PRIMADVANCE(dev_priv); } -static void mgaG200EmitPipe( drm_mga_private_t *dev_priv, - drm_mga_buf_priv_t *buf_priv ) +static void mgaG200EmitPipe(drm_mga_private_t * dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int pipe = sarea_priv->WarpPipe; PRIMLOCALS; + DRM_DEBUG("%s\n", __FUNCTION__); PRIMGETPTR(dev_priv); + + /* This takes 15 dwords */ + PRIMOUTREG(MGAREG_WIADDR, WIA_wmode_suspend); PRIMOUTREG(MGAREG_WVRTXSZ, 7); PRIMOUTREG(MGAREG_WFLAG, 0); - PRIMOUTREG(0x2d00 + 24*4, 0); /* tex w/h */ - - PRIMOUTREG(0x2d00 + 25*4, 0x100); - PRIMOUTREG(0x2d00 + 34*4, 0); /* tex w/h */ - PRIMOUTREG(0x2d00 + 42*4, 0xFFFF); - PRIMOUTREG(0x2d00 + 60*4, 0xFFFF); - + PRIMOUTREG(0x2d00 + 24 * 4, 0); /* tex w/h */ + + PRIMOUTREG(0x2d00 + 25 * 4, 0x100); + PRIMOUTREG(0x2d00 + 34 * 4, 0); /* tex w/h */ + PRIMOUTREG(0x2d00 + 42 * 4, 0xFFFF); + PRIMOUTREG(0x2d00 + 60 * 4, 0xFFFF); + /* Dma pading required due to hw bug */ PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff); PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff); PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff); - PRIMOUTREG(MGAREG_WIADDR, (dev_priv->WarpIndex[pipe].phys_addr | - WIA_wmode_start | WIA_wagp_agp)); + PRIMOUTREG(MGAREG_WIADDR, + (u32) (dev_priv->WarpIndex[pipe]. + phys_addr | WIA_wmode_start | WIA_wagp_agp)); - PRIMADVANCE(dev_priv); + PRIMADVANCE( dev_priv ); } -void mgaEmitState( drm_mga_private_t *dev_priv, drm_mga_buf_priv_t *buf_priv ) +static void mgaEmitState(drm_mga_private_t * dev_priv) { - unsigned int dirty = buf_priv->dirty; + drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; + unsigned int dirty = sarea_priv->dirty; + DRM_DEBUG("%s\n", __FUNCTION__); + + if (dev_priv->chipset == MGA_CARD_TYPE_G400) { + int multitex = sarea_priv->WarpPipe & MGA_T2; + + if (sarea_priv->WarpPipe != dev_priv->WarpPipe) { + mgaG400EmitPipe(dev_priv); + dev_priv->WarpPipe = sarea_priv->WarpPipe; + } - if (dev_priv->chipset == MGA_CARD_TYPE_G400) { - if (dirty & MGASAREA_NEW_CONTEXT) - mgaEmitContext( dev_priv, buf_priv ); + if (dirty & MGA_UPLOAD_CTX) { + mgaEmitContext(dev_priv); + sarea_priv->dirty &= ~MGA_UPLOAD_CTX; + } - if (dirty & MGASAREA_NEW_TEX1) - mgaG400EmitTex1( dev_priv, buf_priv ); - - if (dirty & MGASAREA_NEW_TEX0) - mgaG400EmitTex0( dev_priv, buf_priv ); + if (dirty & MGA_UPLOAD_TEX0) { + mgaG400EmitTex0(dev_priv); + sarea_priv->dirty &= ~MGA_UPLOAD_TEX0; + } - if (dirty & MGASAREA_NEW_PIPE) - mgaG400EmitPipe( dev_priv, buf_priv ); + if ((dirty & MGA_UPLOAD_TEX1) && multitex) { + mgaG400EmitTex1(dev_priv); + sarea_priv->dirty &= ~MGA_UPLOAD_TEX1; + } } else { - if (dirty & MGASAREA_NEW_CONTEXT) - mgaEmitContext( dev_priv, buf_priv ); + if (sarea_priv->WarpPipe != dev_priv->WarpPipe) { + mgaG200EmitPipe(dev_priv); + dev_priv->WarpPipe = sarea_priv->WarpPipe; + } - if (dirty & MGASAREA_NEW_TEX0) - mgaG200EmitTex( dev_priv, buf_priv ); + if (dirty & MGA_UPLOAD_CTX) { + mgaEmitContext(dev_priv); + sarea_priv->dirty &= ~MGA_UPLOAD_CTX; + } - if (dirty & MGASAREA_NEW_PIPE) - mgaG200EmitPipe( dev_priv, buf_priv ); - } + if (dirty & MGA_UPLOAD_TEX0) { + mgaG200EmitTex(dev_priv); + sarea_priv->dirty &= ~MGA_UPLOAD_TEX0; + } + } } - /* Disallow all write destinations except the front and backbuffer. */ -static int mgaCopyContext(drm_mga_private_t *dev_priv, - drm_mga_buf_priv_t *buf_priv) +static int mgaVerifyContext(drm_mga_private_t * dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int *regs = sarea_priv->ContextState; - - if (regs[MGA_CTXREG_DSTORG] != dev_priv->frontOrg && - regs[MGA_CTXREG_DSTORG] != dev_priv->backOrg) + + DRM_DEBUG("%s\n", __FUNCTION__); + + if (regs[MGA_CTXREG_DSTORG] != dev_priv->frontOffset && + regs[MGA_CTXREG_DSTORG] != dev_priv->backOffset) { + DRM_DEBUG("BAD DSTORG: %x (front %x, back %x)\n\n", + regs[MGA_CTXREG_DSTORG], dev_priv->frontOffset, + dev_priv->backOffset); + regs[MGA_CTXREG_DSTORG] = 0; return -1; + } - memcpy(buf_priv->ContextState, sarea_priv->ContextState, - sizeof(buf_priv->ContextState)); return 0; } - /* Disallow texture reads from PCI space. */ -static int mgaCopyTex(drm_mga_private_t *dev_priv, - drm_mga_buf_priv_t *buf_priv, - int unit) +static int mgaVerifyTex(drm_mga_private_t * dev_priv, int unit) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; - if ((sarea_priv->TexState[unit][MGA_TEXREG_ORG] & 0x3) == 0x1) - return -1; + DRM_DEBUG("%s\n", __FUNCTION__); - memcpy(buf_priv->TexState[unit], sarea_priv->TexState[unit], - sizeof(buf_priv->TexState[0])); + if ((sarea_priv->TexState[unit][MGA_TEXREG_ORG] & 0x3) == 0x1) { + DRM_DEBUG("BAD TEXREG_ORG: %x, unit %d\n", + sarea_priv->TexState[unit][MGA_TEXREG_ORG], + unit); + sarea_priv->TexState[unit][MGA_TEXREG_ORG] = 0; + return -1; + } return 0; } - -int mgaCopyAndVerifyState( drm_mga_private_t *dev_priv, - drm_mga_buf_priv_t *buf_priv ) +static int mgaVerifyState(drm_mga_private_t * dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; - unsigned int dirty = sarea_priv->dirty ; + unsigned int dirty = sarea_priv->dirty; int rv = 0; - buf_priv->dirty = sarea_priv->dirty; - buf_priv->WarpPipe = sarea_priv->WarpPipe; - - if (dirty & MGASAREA_NEW_CONTEXT) - rv |= mgaCopyContext( dev_priv, buf_priv ); - - if (dirty & MGASAREA_NEW_TEX0) - rv |= mgaCopyTex( dev_priv, buf_priv, 0 ); - - if (dev_priv->chipset == MGA_CARD_TYPE_G400) - { - if (dirty & MGASAREA_NEW_TEX1) - rv |= mgaCopyTex( dev_priv, buf_priv, 1 ); - - if (dirty & MGASAREA_NEW_PIPE) - rv |= (buf_priv->WarpPipe > MGA_MAX_G400_PIPES); - } - else - { - if (dirty & MGASAREA_NEW_PIPE) - rv |= (buf_priv->WarpPipe > MGA_MAX_G200_PIPES); - } + DRM_DEBUG("%s\n", __FUNCTION__); + + if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) + sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; + + if (dirty & MGA_UPLOAD_CTX) + rv |= mgaVerifyContext(dev_priv); + + if (dirty & MGA_UPLOAD_TEX0) + rv |= mgaVerifyTex(dev_priv, 0); + + if (dev_priv->chipset == MGA_CARD_TYPE_G400) { + if (dirty & MGA_UPLOAD_TEX1) + rv |= mgaVerifyTex(dev_priv, 1); + + if (dirty & MGA_UPLOAD_PIPE) + rv |= (sarea_priv->WarpPipe > MGA_MAX_G400_PIPES); + } else { + if (dirty & MGA_UPLOAD_PIPE) + rv |= (sarea_priv->WarpPipe > MGA_MAX_G200_PIPES); + } return rv == 0; } +static int mgaVerifyIload(drm_mga_private_t * dev_priv, + unsigned long bus_address, + unsigned int dstOrg, int length) +{ + DRM_DEBUG("%s\n", __FUNCTION__); + + if (dstOrg < dev_priv->textureOffset || + dstOrg + length > + (dev_priv->textureOffset + dev_priv->textureSize)) { + return -EINVAL; + } + if (length % 64) { + return -EINVAL; + } + return 0; +} + +/* This copies a 64 byte aligned agp region to the frambuffer + * with a standard blit, the ioctl needs to do checking */ + +static void mga_dma_dispatch_tex_blit(drm_device_t * dev, + unsigned long bus_address, + int length, unsigned int destOrg) +{ + drm_mga_private_t *dev_priv = dev->dev_private; + int use_agp = PDEA_pagpxfer_enable | 0x00000001; + u16 y2; + PRIMLOCALS; + DRM_DEBUG("%s\n", __FUNCTION__); + + y2 = length / 64; + + PRIM_OVERFLOW(dev, dev_priv, 30); + PRIMGETPTR(dev_priv); + + PRIMOUTREG(MGAREG_DSTORG, destOrg); + PRIMOUTREG(MGAREG_MACCESS, 0x00000000); + DRM_DEBUG("srcorg : %lx\n", bus_address | use_agp); + PRIMOUTREG(MGAREG_SRCORG, (u32) bus_address | use_agp); + PRIMOUTREG(MGAREG_AR5, 64); + + PRIMOUTREG(MGAREG_PITCH, 64); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DWGCTL, MGA_COPY_CMD); + + PRIMOUTREG(MGAREG_AR0, 63); + PRIMOUTREG(MGAREG_AR3, 0); + PRIMOUTREG(MGAREG_FXBNDRY, (63 << 16)); + PRIMOUTREG(MGAREG_YDSTLEN + MGAREG_MGA_EXEC, y2); + + PRIMOUTREG(MGAREG_SRCORG, 0); + PRIMOUTREG(MGAREG_PITCH, dev_priv->stride / dev_priv->cpp); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMADVANCE(dev_priv); +} + +static void mga_dma_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf) +{ + drm_mga_private_t *dev_priv = dev->dev_private; + drm_mga_buf_priv_t *buf_priv = buf->dev_private; + drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; + unsigned long address = (unsigned long) buf->bus_address; + int length = buf->used; + int use_agp = PDEA_pagpxfer_enable; + int i = 0; + int primary_needed; + PRIMLOCALS; + DRM_DEBUG("%s\n", __FUNCTION__); + + DRM_DEBUG("dispatch vertex %d addr 0x%lx, " + "length 0x%x nbox %d dirty %x\n", + buf->idx, address, length, + sarea_priv->nbox, sarea_priv->dirty); + + DRM_DEBUG("used : %d, total : %d\n", buf->used, buf->total); + + if (buf->used) { + /* WARNING: if you change any of the state functions verify + * these numbers (Overestimating this doesn't hurt). + */ + buf_priv->dispatched = 1; + primary_needed = (50 + 15 + 15 + 30 + 25 + + 10 + 15 * MGA_NR_SAREA_CLIPRECTS); + PRIM_OVERFLOW(dev, dev_priv, primary_needed); + mgaEmitState(dev_priv); + do { + if (i < sarea_priv->nbox) { + DRM_DEBUG("idx %d Emit box %d/%d:" + "%d,%d - %d,%d\n", + buf->idx, + i, sarea_priv->nbox, + sarea_priv->boxes[i].x1, + sarea_priv->boxes[i].y1, + sarea_priv->boxes[i].x2, + sarea_priv->boxes[i].y2); + + mgaEmitClipRect(dev_priv, + &sarea_priv->boxes[i]); + } + + PRIMGETPTR(dev_priv); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_SECADDRESS, + ((u32) address) | TT_VERTEX); + PRIMOUTREG(MGAREG_SECEND, + (((u32) (address + length)) | use_agp)); + PRIMADVANCE(dev_priv); + } while (++i < sarea_priv->nbox); + } + if (buf_priv->discard) { + if (buf_priv->dispatched == 1) + AGEBUF(dev_priv, buf_priv); + buf_priv->dispatched = 0; + mga_freelist_put(dev, buf); + } + + +} + + +static void mga_dma_dispatch_indices(drm_device_t * dev, + drm_buf_t * buf, + unsigned int start, unsigned int end) +{ + drm_mga_private_t *dev_priv = dev->dev_private; + drm_mga_buf_priv_t *buf_priv = buf->dev_private; + drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; + unsigned int address = (unsigned int) buf->bus_address; + int use_agp = PDEA_pagpxfer_enable; + int i = 0; + int primary_needed; + PRIMLOCALS; + DRM_DEBUG("%s\n", __FUNCTION__); + + DRM_DEBUG("dispatch indices %d addr 0x%x, " + "start 0x%x end 0x%x nbox %d dirty %x\n", + buf->idx, address, start, end, + sarea_priv->nbox, sarea_priv->dirty); + + if (start != end) { + /* WARNING: if you change any of the state functions verify + * these numbers (Overestimating this doesn't hurt). + */ + buf_priv->dispatched = 1; + primary_needed = (50 + 15 + 15 + 30 + 25 + + 10 + 15 * MGA_NR_SAREA_CLIPRECTS); + PRIM_OVERFLOW(dev, dev_priv, primary_needed); + mgaEmitState(dev_priv); + + do { + if (i < sarea_priv->nbox) { + DRM_DEBUG("idx %d Emit box %d/%d:" + "%d,%d - %d,%d\n", + buf->idx, + i, sarea_priv->nbox, + sarea_priv->boxes[i].x1, + sarea_priv->boxes[i].y1, + sarea_priv->boxes[i].x2, + sarea_priv->boxes[i].y2); + + mgaEmitClipRect(dev_priv, + &sarea_priv->boxes[i]); + } + + PRIMGETPTR(dev_priv); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_SETUPADDRESS, + ((address + start) | + SETADD_mode_vertlist)); + PRIMOUTREG(MGAREG_SETUPEND, + ((address + end) | use_agp)); + PRIMADVANCE(dev_priv); + } while (++i < sarea_priv->nbox); + } + if (buf_priv->discard) { + if (buf_priv->dispatched == 1) + AGEBUF(dev_priv, buf_priv); + buf_priv->dispatched = 0; + mga_freelist_put(dev, buf); + } +} + + +static void mga_dma_dispatch_clear(drm_device_t * dev, int flags, + unsigned int clear_color, + unsigned int clear_zval) +{ + drm_mga_private_t *dev_priv = dev->dev_private; + drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; + unsigned int *regs = sarea_priv->ContextState; + int nbox = sarea_priv->nbox; + drm_clip_rect_t *pbox = sarea_priv->boxes; + unsigned int cmd; + int i; + int primary_needed; + PRIMLOCALS; + DRM_DEBUG("%s\n", __FUNCTION__); + + if (dev_priv->sgram) + cmd = MGA_CLEAR_CMD | DC_atype_blk; + else + cmd = MGA_CLEAR_CMD | DC_atype_rstr; + + primary_needed = nbox * 70; + if (primary_needed == 0) + primary_needed = 70; + PRIM_OVERFLOW(dev, dev_priv, primary_needed); + PRIMGETPTR(dev_priv); + + for (i = 0; i < nbox; i++) { + unsigned int height = pbox[i].y2 - pbox[i].y1; + + DRM_DEBUG("dispatch clear %d,%d-%d,%d flags %x!\n", + pbox[i].x1, pbox[i].y1, pbox[i].x2, + pbox[i].y2, flags); + + if (flags & MGA_FRONT) { + DRM_DEBUG("clear front\n"); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_YDSTLEN, + (pbox[i].y1 << 16) | height); + PRIMOUTREG(MGAREG_FXBNDRY, + (pbox[i].x2 << 16) | pbox[i].x1); + + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_FCOL, clear_color); + PRIMOUTREG(MGAREG_DSTORG, dev_priv->frontOffset); + PRIMOUTREG(MGAREG_DWGCTL + MGAREG_MGA_EXEC, cmd); + } + + if (flags & MGA_BACK) { + DRM_DEBUG("clear back\n"); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_YDSTLEN, + (pbox[i].y1 << 16) | height); + PRIMOUTREG(MGAREG_FXBNDRY, + (pbox[i].x2 << 16) | pbox[i].x1); + + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_FCOL, clear_color); + PRIMOUTREG(MGAREG_DSTORG, dev_priv->backOffset); + PRIMOUTREG(MGAREG_DWGCTL + MGAREG_MGA_EXEC, cmd); + } + + if (flags & MGA_DEPTH) { + DRM_DEBUG("clear depth\n"); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_YDSTLEN, + (pbox[i].y1 << 16) | height); + PRIMOUTREG(MGAREG_FXBNDRY, + (pbox[i].x2 << 16) | pbox[i].x1); + + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_FCOL, clear_zval); + PRIMOUTREG(MGAREG_DSTORG, dev_priv->depthOffset); + PRIMOUTREG(MGAREG_DWGCTL + MGAREG_MGA_EXEC, cmd); + } + } + + /* Force reset of DWGCTL */ + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL]); + PRIMADVANCE(dev_priv); +} + +static void mga_dma_dispatch_swap(drm_device_t * dev) +{ + drm_mga_private_t *dev_priv = dev->dev_private; + drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; + unsigned int *regs = sarea_priv->ContextState; + int nbox = sarea_priv->nbox; + drm_clip_rect_t *pbox = sarea_priv->boxes; + int i; + int primary_needed; + PRIMLOCALS; + DRM_DEBUG("%s\n", __FUNCTION__); + + primary_needed = nbox * 5; + primary_needed += 65; + PRIM_OVERFLOW(dev, dev_priv, primary_needed); + PRIMGETPTR(dev_priv); + + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DWGSYNC, 0x7100); + PRIMOUTREG(MGAREG_DWGSYNC, 0x7000); + + PRIMOUTREG(MGAREG_DSTORG, dev_priv->frontOffset); + PRIMOUTREG(MGAREG_MACCESS, dev_priv->mAccess); + PRIMOUTREG(MGAREG_SRCORG, dev_priv->backOffset); + PRIMOUTREG(MGAREG_AR5, dev_priv->stride / 2); + + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DWGCTL, MGA_COPY_CMD); + + for (i = 0; i < nbox; i++) { + unsigned int h = pbox[i].y2 - pbox[i].y1; + unsigned int start = pbox[i].y1 * dev_priv->stride / 2; + + DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n", + pbox[i].x1, pbox[i].y1, pbox[i].x2, pbox[i].y2); + + PRIMOUTREG(MGAREG_AR0, start + pbox[i].x2 - 1); + PRIMOUTREG(MGAREG_AR3, start + pbox[i].x1); + PRIMOUTREG(MGAREG_FXBNDRY, + pbox[i].x1 | ((pbox[i].x2 - 1) << 16)); + PRIMOUTREG(MGAREG_YDSTLEN + MGAREG_MGA_EXEC, + (pbox[i].y1 << 16) | h); + } + + /* Force reset of DWGCTL */ + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_SRCORG, 0); + PRIMOUTREG(MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL]); + + PRIMADVANCE(dev_priv); +} + +int mga_clear_bufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_mga_private_t *dev_priv = + (drm_mga_private_t *) dev->dev_private; + drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_mga_clear_t clear; + + copy_from_user_ret(&clear, (drm_mga_clear_t *) arg, sizeof(clear), + -EFAULT); + DRM_DEBUG("%s\n", __FUNCTION__); + + if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("mga_clear_bufs called without lock held\n"); + return -EINVAL; + } + + if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) + sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; + + /* Make sure we restore the 3D state next time. + */ + dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CTX; + mga_dma_dispatch_clear(dev, clear.flags, + clear.clear_color, clear.clear_depth); + PRIMUPDATE(dev_priv); + mga_flush_write_combine(); + mga_dma_schedule(dev, 1); + return 0; +} + +int mga_swap_bufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_mga_private_t *dev_priv = + (drm_mga_private_t *) dev->dev_private; + drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; + DRM_DEBUG("%s\n", __FUNCTION__); + + if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("mga_swap_bufs called without lock held\n"); + return -EINVAL; + } + + if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) + sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; + + /* Make sure we restore the 3D state next time. + */ + dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CTX; + mga_dma_dispatch_swap(dev); + PRIMUPDATE(dev_priv); + set_bit(MGA_BUF_SWAP_PENDING, + &dev_priv->current_prim->buffer_status); + mga_flush_write_combine(); + mga_dma_schedule(dev, 1); + return 0; +} + +int mga_iload(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_device_dma_t *dma = dev->dma; + drm_mga_private_t *dev_priv = + (drm_mga_private_t *) dev->dev_private; + drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_buf_t *buf; + drm_mga_buf_priv_t *buf_priv; + drm_mga_iload_t iload; + unsigned long bus_address; + DRM_DEBUG("%s\n", __FUNCTION__); + + DRM_DEBUG("Starting Iload\n"); + copy_from_user_ret(&iload, (drm_mga_iload_t *) arg, sizeof(iload), + -EFAULT); + + if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("mga_iload called without lock held\n"); + return -EINVAL; + } + + buf = dma->buflist[iload.idx]; + buf_priv = buf->dev_private; + bus_address = buf->bus_address; + DRM_DEBUG("bus_address %lx, length %d, destorg : %x\n", + bus_address, iload.length, iload.destOrg); + + if (mgaVerifyIload(dev_priv, + bus_address, iload.destOrg, iload.length)) { + mga_freelist_put(dev, buf); + return -EINVAL; + } + + sarea_priv->dirty |= MGA_UPLOAD_CTX; + + mga_dma_dispatch_tex_blit(dev, bus_address, iload.length, + iload.destOrg); + AGEBUF(dev_priv, buf_priv); + buf_priv->discard = 1; + mga_freelist_put(dev, buf); + mga_flush_write_combine(); + mga_dma_schedule(dev, 1); + return 0; +} + +int mga_vertex(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_mga_private_t *dev_priv = + (drm_mga_private_t *) dev->dev_private; + drm_device_dma_t *dma = dev->dma; + drm_buf_t *buf; + drm_mga_buf_priv_t *buf_priv; + drm_mga_vertex_t vertex; + DRM_DEBUG("%s\n", __FUNCTION__); + + copy_from_user_ret(&vertex, (drm_mga_vertex_t *) arg, + sizeof(vertex), -EFAULT); + + if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("mga_vertex called without lock held\n"); + return -EINVAL; + } + + DRM_DEBUG("mga_vertex\n"); + + buf = dma->buflist[vertex.idx]; + buf_priv = buf->dev_private; + + buf->used = vertex.used; + buf_priv->discard = vertex.discard; + + if (!mgaVerifyState(dev_priv)) { + if (vertex.discard) { + if (buf_priv->dispatched == 1) + AGEBUF(dev_priv, buf_priv); + buf_priv->dispatched = 0; + mga_freelist_put(dev, buf); + } + DRM_DEBUG("bad state\n"); + return -EINVAL; + } + + mga_dma_dispatch_vertex(dev, buf); + + PRIMUPDATE(dev_priv); + mga_flush_write_combine(); + mga_dma_schedule(dev, 1); + return 0; +} + + +int mga_indices(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_mga_private_t *dev_priv = + (drm_mga_private_t *) dev->dev_private; + drm_device_dma_t *dma = dev->dma; + drm_buf_t *buf; + drm_mga_buf_priv_t *buf_priv; + drm_mga_indices_t indices; + DRM_DEBUG("%s\n", __FUNCTION__); + + copy_from_user_ret(&indices, (drm_mga_indices_t *) arg, + sizeof(indices), -EFAULT); + + if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("mga_indices called without lock held\n"); + return -EINVAL; + } + + DRM_DEBUG("mga_indices\n"); + + buf = dma->buflist[indices.idx]; + buf_priv = buf->dev_private; + + buf_priv->discard = indices.discard; + + if (!mgaVerifyState(dev_priv)) { + if (indices.discard) { + if (buf_priv->dispatched == 1) + AGEBUF(dev_priv, buf_priv); + buf_priv->dispatched = 0; + mga_freelist_put(dev, buf); + } + return -EINVAL; + } + + mga_dma_dispatch_indices(dev, buf, indices.start, indices.end); + + PRIMUPDATE(dev_priv); + mga_flush_write_combine(); + mga_dma_schedule(dev, 1); + return 0; +} + + + +static int mga_dma_get_buffers(drm_device_t * dev, drm_dma_t * d) +{ + int i; + drm_buf_t *buf; + DRM_DEBUG("%s\n", __FUNCTION__); + + for (i = d->granted_count; i < d->request_count; i++) { + buf = mga_freelist_get(dev); + if (!buf) + break; + buf->pid = current->pid; + copy_to_user_ret(&d->request_indices[i], + &buf->idx, sizeof(buf->idx), -EFAULT); + copy_to_user_ret(&d->request_sizes[i], + &buf->total, sizeof(buf->total), -EFAULT); + ++d->granted_count; + } + return 0; +} + +int mga_dma(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_device_dma_t *dma = dev->dma; + int retcode = 0; + drm_dma_t d; + DRM_DEBUG("%s\n", __FUNCTION__); + + copy_from_user_ret(&d, (drm_dma_t *) arg, sizeof(d), -EFAULT); + DRM_DEBUG("%d %d: %d send, %d req\n", + current->pid, d.context, d.send_count, d.request_count); + + if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("mga_dma called without lock held\n"); + return -EINVAL; + } + + /* Please don't send us buffers. + */ + if (d.send_count != 0) { + DRM_ERROR + ("Process %d trying to send %d buffers via drmDMA\n", + current->pid, d.send_count); + return -EINVAL; + } + + /* We'll send you buffers. + */ + if (d.request_count < 0 || d.request_count > dma->buf_count) { + DRM_ERROR + ("Process %d trying to get %d buffers (of %d max)\n", + current->pid, d.request_count, dma->buf_count); + return -EINVAL; + } + d.granted_count = 0; + + if (d.request_count) { + retcode = mga_dma_get_buffers(dev, &d); + } + + DRM_DEBUG("%d returning, granted = %d\n", + current->pid, d.granted_count); + copy_to_user_ret((drm_dma_t *) arg, &d, sizeof(d), -EFAULT); + return retcode; +} diff --git a/linux/picker.c b/linux/picker.c index ecdb2c15..0bd8bfd5 100644 --- a/linux/picker.c +++ b/linux/picker.c @@ -9,6 +9,16 @@ #define CONFIG_MODVERSIONS 0 #endif +#ifndef CONFIG_AGP_MODULE +#define CONFIG_AGP_MODULE 0 +#endif + +#ifndef CONFIG_AGP +#define CONFIG_AGP 0 +#endif + SMP = CONFIG_SMP MODVERSIONS = CONFIG_MODVERSIONS +AGP = CONFIG_AGP +AGP_MODULE = CONFIG_AGP_MODULE RELEASE = UTS_RELEASE diff --git a/linux/proc.c b/linux/proc.c index 54aba58c..ba6dee00 100644 --- a/linux/proc.c +++ b/linux/proc.c @@ -1,8 +1,8 @@ /* proc.c -- /proc support for DRM -*- linux-c -*- * Created: Mon Jan 11 09:48:47 1999 by faith@precisioninsight.com - * Revised: Sun Feb 13 23:41:04 2000 by kevin@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -23,9 +23,9 @@ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. - * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/proc.c,v 1.6 2000/02/23 04:47:30 martin Exp $ * + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> */ #define __NO_VERSION__ @@ -164,7 +164,10 @@ static int _drm_vm_info(char *buf, char **start, off_t offset, int len, { drm_device_t *dev = (drm_device_t *)data; drm_map_t *map; - const char *types[] = { "FB", "REG", "SHM" }; + /* Hardcoded from _DRM_FRAME_BUFFER, + _DRM_REGISTERS, _DRM_SHM, and + _DRM_AGP. */ + const char *types[] = { "FB", "REG", "SHM", "AGP" }; const char *type; int i; @@ -175,7 +178,7 @@ static int _drm_vm_info(char *buf, char **start, off_t offset, int len, "address mtrr\n\n"); for (i = 0; i < dev->map_count; i++) { map = dev->maplist[i]; - if (map->type < 0 || map->type > 2) type = "??"; + if (map->type < 0 || map->type > 3) type = "??"; else type = types[map->type]; DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ", i, @@ -397,6 +400,7 @@ static int _drm_vma_info(char *buf, char **start, off_t offset, int len, pgprot & _PAGE_GLOBAL ? 'g' : 'l' ); #endif DRM_PROC_PRINT("\n"); +#if 0 for (i = vma->vm_start; i < vma->vm_end; i += PAGE_SIZE) { pgd = pgd_offset(vma->vm_mm, i); pmd = pmd_offset(pgd, i); @@ -417,6 +421,7 @@ static int _drm_vma_info(char *buf, char **start, off_t offset, int len, DRM_PROC_PRINT(" 0x%08lx\n", i); } } +#endif } return len; diff --git a/linux/r128_bufs.c b/linux/r128_bufs.c new file mode 100644 index 00000000..e8ff4df4 --- /dev/null +++ b/linux/r128_bufs.c @@ -0,0 +1,308 @@ +/* r128_bufs.c -- IOCTLs to manage buffers -*- linux-c -*- + * Created: Wed Apr 12 16:19:08 2000 by kevin@precisioninsight.com + * + * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Kevin E. Martin <martin@valinux.com> + * Rickard E. (Rik) Faith <faith@valinux.com> + * Jeff Hartmann <jhartmann@valinux.com> + * + */ + +#define __NO_VERSION__ +#include "drmP.h" +#include "r128_drv.h" +#include "linux/un.h" + + +#ifdef DRM_AGP +int r128_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_device_dma_t *dma = dev->dma; + drm_buf_desc_t request; + drm_buf_entry_t *entry; + drm_buf_t *buf; + unsigned long offset; + unsigned long agp_offset; + int count; + int order; + int size; + int alignment; + int page_order; + int total; + int byte_count; + int i; + + if (!dma) return -EINVAL; + + copy_from_user_ret(&request, + (drm_buf_desc_t *)arg, + sizeof(request), + -EFAULT); + + count = request.count; + order = drm_order(request.size); + size = 1 << order; + + alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size):size; + page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; + total = PAGE_SIZE << page_order; + + byte_count = 0; + agp_offset = dev->agp->base + request.agp_start; + + DRM_DEBUG("count: %d\n", count); + DRM_DEBUG("order: %d\n", order); + DRM_DEBUG("size: %d\n", size); + DRM_DEBUG("agp_offset: %ld\n", agp_offset); + DRM_DEBUG("alignment: %d\n", alignment); + DRM_DEBUG("page_order: %d\n", page_order); + DRM_DEBUG("total: %d\n", total); + + if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; + if (dev->queue_count) return -EBUSY; /* Not while in use */ + + spin_lock(&dev->count_lock); + if (dev->buf_use) { + spin_unlock(&dev->count_lock); + return -EBUSY; + } + atomic_inc(&dev->buf_alloc); + spin_unlock(&dev->count_lock); + + down(&dev->struct_sem); + entry = &dma->bufs[order]; + if (entry->buf_count) { + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; /* May only call once for each order */ + } + + entry->buflist = drm_alloc(count * sizeof(*entry->buflist), + DRM_MEM_BUFS); + if (!entry->buflist) { + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; + } + memset(entry->buflist, 0, count * sizeof(*entry->buflist)); + + entry->buf_size = size; + entry->page_order = page_order; + offset = 0; + + for (offset = 0; + entry->buf_count < count; + offset += alignment, ++entry->buf_count) { + buf = &entry->buflist[entry->buf_count]; + buf->idx = dma->buf_count + entry->buf_count; + buf->total = alignment; + buf->order = order; + buf->used = 0; + buf->offset = (dma->byte_count + offset); + buf->address = (void *)(agp_offset + offset); + buf->next = NULL; + buf->waiting = 0; + buf->pending = 0; + init_waitqueue_head(&buf->dma_wait); + buf->pid = 0; + + buf->dev_priv_size = sizeof(drm_r128_buf_priv_t); + buf->dev_private = drm_alloc(sizeof(drm_r128_buf_priv_t), + DRM_MEM_BUFS); + memset(buf->dev_private, 0, buf->dev_priv_size); + +#if DRM_DMA_HISTOGRAM + buf->time_queued = 0; + buf->time_dispatched = 0; + buf->time_completed = 0; + buf->time_freed = 0; +#endif + + byte_count += PAGE_SIZE << page_order; + + DRM_DEBUG("buffer %d @ %p\n", + entry->buf_count, buf->address); + } + + DRM_DEBUG("byte_count: %d\n", byte_count); + + dma->buflist = drm_realloc(dma->buflist, + dma->buf_count * sizeof(*dma->buflist), + (dma->buf_count + entry->buf_count) + * sizeof(*dma->buflist), + DRM_MEM_BUFS); + for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++) + dma->buflist[i] = &entry->buflist[i - dma->buf_count]; + + dma->buf_count += entry->buf_count; + dma->byte_count += byte_count; + + drm_freelist_create(&entry->freelist, entry->buf_count); + for (i = 0; i < entry->buf_count; i++) { + drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]); + } + + up(&dev->struct_sem); + + request.count = entry->buf_count; + request.size = size; + + copy_to_user_ret((drm_buf_desc_t *)arg, + &request, + sizeof(request), + -EFAULT); + + dma->flags = _DRM_DMA_USE_AGP; + + atomic_dec(&dev->buf_alloc); + return 0; +} +#endif + +int r128_addbufs(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_r128_private_t *dev_priv = dev->dev_private; + drm_buf_desc_t request; + + if (!dev_priv || dev_priv->is_pci) return -EINVAL; + + copy_from_user_ret(&request, + (drm_buf_desc_t *)arg, + sizeof(request), + -EFAULT); + +#ifdef DRM_AGP + if (request.flags & _DRM_AGP_BUFFER) + return r128_addbufs_agp(inode, filp, cmd, arg); + else +#endif + return -EINVAL; +} + +int r128_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_r128_private_t *dev_priv = dev->dev_private; + drm_device_dma_t *dma = dev->dma; + int retcode = 0; + const int zero = 0; + unsigned long virtual; + unsigned long address; + drm_buf_map_t request; + int i; + + if (!dma || !dev_priv || dev_priv->is_pci) return -EINVAL; + + DRM_DEBUG("\n"); + + spin_lock(&dev->count_lock); + if (atomic_read(&dev->buf_alloc)) { + spin_unlock(&dev->count_lock); + return -EBUSY; + } + ++dev->buf_use; /* Can't allocate more after this call */ + spin_unlock(&dev->count_lock); + + copy_from_user_ret(&request, + (drm_buf_map_t *)arg, + sizeof(request), + -EFAULT); + + if (request.count >= dma->buf_count) { + if (dma->flags & _DRM_DMA_USE_AGP) { + drm_map_t *map; + + map = dev_priv->agp_vertbufs; + if (!map) { + retcode = -EINVAL; + goto done; + } + + down(¤t->mm->mmap_sem); + virtual = do_mmap(filp, 0, map->size, + PROT_READ|PROT_WRITE, + MAP_SHARED, + (unsigned long)map->offset); + up(¤t->mm->mmap_sem); + } else { + down(¤t->mm->mmap_sem); + virtual = do_mmap(filp, 0, dma->byte_count, + PROT_READ|PROT_WRITE, MAP_SHARED, 0); + up(¤t->mm->mmap_sem); + } + if (virtual > -1024UL) { + /* Real error */ + retcode = (signed long)virtual; + goto done; + } + request.virtual = (void *)virtual; + + for (i = 0; i < dma->buf_count; i++) { + if (copy_to_user(&request.list[i].idx, + &dma->buflist[i]->idx, + sizeof(request.list[0].idx))) { + retcode = -EFAULT; + goto done; + } + if (copy_to_user(&request.list[i].total, + &dma->buflist[i]->total, + sizeof(request.list[0].total))) { + retcode = -EFAULT; + goto done; + } + if (copy_to_user(&request.list[i].used, + &zero, + sizeof(zero))) { + retcode = -EFAULT; + goto done; + } + address = virtual + dma->buflist[i]->offset; + if (copy_to_user(&request.list[i].address, + &address, + sizeof(address))) { + retcode = -EFAULT; + goto done; + } + } + } + done: + request.count = dma->buf_count; + DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode); + + copy_to_user_ret((drm_buf_map_t *)arg, + &request, + sizeof(request), + -EFAULT); + + return retcode; +} diff --git a/linux/r128_context.c b/linux/r128_context.c new file mode 100644 index 00000000..f11453ba --- /dev/null +++ b/linux/r128_context.c @@ -0,0 +1,213 @@ +/* r128_context.c -- IOCTLs for r128 contexts -*- linux-c -*- + * Created: Mon Dec 13 09:51:35 1999 by faith@precisioninsight.com + * + * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Author: Rickard E. (Rik) Faith <faith@valinux.com> + * + */ + +#include <linux/sched.h> + +#define __NO_VERSION__ +#include "drmP.h" +#include "r128_drv.h" + +extern drm_ctx_t r128_res_ctx; + +static int r128_alloc_queue(drm_device_t *dev) +{ +#if 0 + static int context = 0; +#endif + + return drm_ctxbitmap_next(dev); +} + +int r128_context_switch(drm_device_t *dev, int old, int new) +{ + char buf[64]; + + atomic_inc(&dev->total_ctx); + + if (test_and_set_bit(0, &dev->context_flag)) { + DRM_ERROR("Reentering -- FIXME\n"); + return -EBUSY; + } + +#if DRM_DMA_HISTOGRAM + dev->ctx_start = get_cycles(); +#endif + + DRM_DEBUG("Context switch from %d to %d\n", old, new); + + if (new == dev->last_context) { + clear_bit(0, &dev->context_flag); + return 0; + } + + if (drm_flags & DRM_FLAG_NOCTX) { + r128_context_switch_complete(dev, new); + } else { + sprintf(buf, "C %d %d\n", old, new); + drm_write_string(dev, buf); + } + + return 0; +} + +int r128_context_switch_complete(drm_device_t *dev, int new) +{ + dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ + dev->last_switch = jiffies; + + if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("Lock isn't held after context switch\n"); + } + + /* If a context switch is ever initiated + when the kernel holds the lock, release + that lock here. */ +#if DRM_DMA_HISTOGRAM + atomic_inc(&dev->histo.ctx[drm_histogram_slot(get_cycles() + - dev->ctx_start)]); + +#endif + clear_bit(0, &dev->context_flag); + wake_up(&dev->context_wait); + + return 0; +} + + +int r128_resctx(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_ctx_res_t res; + drm_ctx_t ctx; + int i; + + DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS); + copy_from_user_ret(&res, (drm_ctx_res_t *)arg, sizeof(res), -EFAULT); + if (res.count >= DRM_RESERVED_CONTEXTS) { + memset(&ctx, 0, sizeof(ctx)); + for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { + ctx.handle = i; + copy_to_user_ret(&res.contexts[i], + &i, + sizeof(i), + -EFAULT); + } + } + res.count = DRM_RESERVED_CONTEXTS; + copy_to_user_ret((drm_ctx_res_t *)arg, &res, sizeof(res), -EFAULT); + return 0; +} + + +int r128_addctx(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_ctx_t ctx; + + copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + if ((ctx.handle = r128_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) { + /* Skip kernel's context and get a new one. */ + ctx.handle = r128_alloc_queue(dev); + } + DRM_DEBUG("%d\n", ctx.handle); + if (ctx.handle == -1) { + DRM_DEBUG("Not enough free contexts.\n"); + /* Should this return -EBUSY instead? */ + return -ENOMEM; + } + + copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT); + return 0; +} + +int r128_modctx(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_ctx_t ctx; + + copy_from_user_ret(&ctx, (drm_ctx_t*)arg, sizeof(ctx), -EFAULT); + if (ctx.flags==_DRM_CONTEXT_PRESERVED) + r128_res_ctx.handle=ctx.handle; + return 0; +} + +int r128_getctx(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_ctx_t ctx; + + copy_from_user_ret(&ctx, (drm_ctx_t*)arg, sizeof(ctx), -EFAULT); + /* This is 0, because we don't hanlde any context flags */ + ctx.flags = 0; + copy_to_user_ret((drm_ctx_t*)arg, &ctx, sizeof(ctx), -EFAULT); + return 0; +} + +int r128_switchctx(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_ctx_t ctx; + + copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + DRM_DEBUG("%d\n", ctx.handle); + return r128_context_switch(dev, dev->last_context, ctx.handle); +} + +int r128_newctx(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_ctx_t ctx; + + copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + DRM_DEBUG("%d\n", ctx.handle); + r128_context_switch_complete(dev, ctx.handle); + + return 0; +} + +int r128_rmctx(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_ctx_t ctx; + + copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + DRM_DEBUG("%d\n", ctx.handle); + drm_ctxbitmap_free(dev, ctx.handle); + + return 0; +} diff --git a/linux/r128_dma.c b/linux/r128_dma.c new file mode 100644 index 00000000..16f79c1f --- /dev/null +++ b/linux/r128_dma.c @@ -0,0 +1,922 @@ +/* r128_drv.c -- ATI Rage 128 driver -*- linux-c -*- + * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com + * + * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Kevin E. Martin <martin@valinux.com> + * + */ + +#define __NO_VERSION__ +#include "drmP.h" +#include "r128_drv.h" + +#include <linux/interrupt.h> /* For task queue support */ +#include <linux/delay.h> + + + +#define DO_REMAP(_m) (_m)->handle = drm_ioremap((_m)->offset, (_m)->size) + +#define DO_REMAPFREE(_m) \ + do { \ + if ((_m)->handle && (_m)->size) \ + drm_ioremapfree((_m)->handle, (_m)->size); \ + } while (0) + +#define DO_FIND_MAP(_m, _o) \ + do { \ + int _i; \ + for (_i = 0; _i < dev->map_count; _i++) { \ + if (dev->maplist[_i]->offset == _o) { \ + _m = dev->maplist[_i]; \ + break; \ + } \ + } \ + } while (0) + + +#define R128_MAX_VBUF_AGE 0x10000000 +#define R128_VB_AGE_REG R128_GUI_SCRATCH_REG0 + +int R128_READ_PLL(drm_device_t *dev, int addr) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + + R128_WRITE8(R128_CLOCK_CNTL_INDEX, addr & 0x1f); + return R128_READ(R128_CLOCK_CNTL_DATA); +} + +static void r128_flush_write_combine(void) +{ + int xchangeDummy; + + __asm__ volatile("push %%eax ;" + "xchg %%eax, %0 ;" + "pop %%eax" : : "m" (xchangeDummy)); + __asm__ volatile("push %%eax ;" + "push %%ebx ;" + "push %%ecx ;" + "push %%edx ;" + "movl $0,%%eax ;" + "cpuid ;" + "pop %%edx ;" + "pop %%ecx ;" + "pop %%ebx ;" + "pop %%eax" : /* no outputs */ : /* no inputs */ ); +} + +static void r128_status(drm_device_t *dev) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + + printk("GUI_STAT = 0x%08x\n", + (unsigned int)R128_READ(R128_GUI_STAT)); + printk("PM4_STAT = 0x%08x\n", + (unsigned int)R128_READ(R128_PM4_STAT)); + printk("PM4_BUFFER_DL_WPTR = 0x%08x\n", + (unsigned int)R128_READ(R128_PM4_BUFFER_DL_WPTR)); + printk("PM4_BUFFER_DL_RPTR = 0x%08x\n", + (unsigned int)R128_READ(R128_PM4_BUFFER_DL_RPTR)); +} + +static int r128_do_cleanup_cce(drm_device_t *dev) +{ + if (dev->dev_private) { + drm_r128_private_t *dev_priv = dev->dev_private; + + if (!dev_priv->is_pci) { + DO_REMAPFREE(dev_priv->agp_ring); + DO_REMAPFREE(dev_priv->agp_read_ptr); + DO_REMAPFREE(dev_priv->agp_vertbufs); + DO_REMAPFREE(dev_priv->agp_indbufs); + DO_REMAPFREE(dev_priv->agp_textures); + } + + drm_free(dev->dev_private, sizeof(drm_r128_private_t), + DRM_MEM_DRIVER); + dev->dev_private = NULL; + } + + return 0; +} + +static int r128_do_init_cce(drm_device_t *dev, drm_r128_init_t *init) +{ + drm_r128_private_t *dev_priv; + int i; + + dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER); + if (dev_priv == NULL) return -ENOMEM; + dev->dev_private = (void *)dev_priv; + + memset(dev_priv, 0, sizeof(drm_r128_private_t)); + + dev_priv->is_pci = init->is_pci; + + dev_priv->usec_timeout = init->usec_timeout; + if (dev_priv->usec_timeout < 1 || + dev_priv->usec_timeout > R128_MAX_USEC_TIMEOUT) { + drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); + dev->dev_private = NULL; + return -EINVAL; + } + + dev_priv->cce_mode = init->cce_mode; + dev_priv->cce_fifo_size = init->cce_fifo_size; + dev_priv->cce_is_bm_mode = + ((init->cce_mode == R128_PM4_192BM) || + (init->cce_mode == R128_PM4_128BM_64INDBM) || + (init->cce_mode == R128_PM4_64BM_128INDBM) || + (init->cce_mode == R128_PM4_64BM_64VCBM_64INDBM)); + dev_priv->cce_secure = init->cce_secure; + + if (dev_priv->cce_is_bm_mode && dev_priv->is_pci) { + drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); + dev->dev_private = NULL; + return -EINVAL; + } + + for (i = 0; i < dev->map_count; i++) { + if (dev->maplist[i]->type == _DRM_SHM) { + dev_priv->sarea = dev->maplist[i]; + break; + } + } + + DO_FIND_MAP(dev_priv->fb, init->fb_offset); + if (!dev_priv->is_pci) { + DO_FIND_MAP(dev_priv->agp_ring, init->agp_ring_offset); + DO_FIND_MAP(dev_priv->agp_read_ptr, init->agp_read_ptr_offset); + DO_FIND_MAP(dev_priv->agp_vertbufs, init->agp_vertbufs_offset); + DO_FIND_MAP(dev_priv->agp_indbufs, init->agp_indbufs_offset); + DO_FIND_MAP(dev_priv->agp_textures, init->agp_textures_offset); + } + DO_FIND_MAP(dev_priv->mmio, init->mmio_offset); + + dev_priv->sarea_priv = + (drm_r128_sarea_t *)((u8 *)dev_priv->sarea->handle + + init->sarea_priv_offset); + + if (!dev_priv->is_pci) { + DO_REMAP(dev_priv->agp_ring); + DO_REMAP(dev_priv->agp_read_ptr); + DO_REMAP(dev_priv->agp_vertbufs); +#if 0 + DO_REMAP(dev_priv->agp_indirectbufs); + DO_REMAP(dev_priv->agp_textures); +#endif + + dev_priv->ring_size = init->ring_size; + dev_priv->ring_sizel2qw = drm_order(init->ring_size/8); + dev_priv->ring_entries = init->ring_size/sizeof(u32); + dev_priv->ring_read_ptr = ((__volatile__ u32 *) + dev_priv->agp_read_ptr->handle); + dev_priv->ring_start = (u32 *)dev_priv->agp_ring->handle; + dev_priv->ring_end = ((u32 *)dev_priv->agp_ring->handle + + dev_priv->ring_entries); + } + + dev_priv->submit_age = 0; + R128_WRITE(R128_VB_AGE_REG, dev_priv->submit_age); + + return 0; +} + +int r128_init_cce(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_r128_init_t init; + + copy_from_user_ret(&init, (drm_r128_init_t *)arg, sizeof(init), + -EFAULT); + + switch (init.func) { + case R128_INIT_CCE: + return r128_do_init_cce(dev, &init); + case R128_CLEANUP_CCE: + return r128_do_cleanup_cce(dev); + } + + return -EINVAL; +} + +static void r128_mark_vertbufs_done(drm_device_t *dev) +{ + drm_device_dma_t *dma = dev->dma; + int i; + + for (i = 0; i < dma->buf_count; i++) { + drm_buf_t *buf = dma->buflist[i]; + drm_r128_buf_priv_t *buf_priv = buf->dev_private; + buf_priv->age = 0; + } +} + +static int r128_do_pixcache_flush(drm_device_t *dev) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + u32 tmp; + int i; + + tmp = R128_READ(R128_PC_NGUI_CTLSTAT) | R128_PC_FLUSH_ALL; + R128_WRITE(R128_PC_NGUI_CTLSTAT, tmp); + + for (i = 0; i < dev_priv->usec_timeout; i++) { + if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY)) + return 0; + udelay(1); + } + + return -EBUSY; +} + +static int r128_do_wait_for_fifo(drm_device_t *dev, int entries) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + int i; + + for (i = 0; i < dev_priv->usec_timeout; i++) { + int slots = R128_READ(R128_GUI_STAT) & R128_GUI_FIFOCNT_MASK; + if (slots >= entries) return 0; + udelay(1); + } + return -EBUSY; +} + +static int r128_do_wait_for_idle(drm_device_t *dev) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + int i, ret; + + if (!(ret = r128_do_wait_for_fifo(dev, 64))) return ret; + + for (i = 0; i < dev_priv->usec_timeout; i++) { + if (!(R128_READ(R128_GUI_STAT) & R128_GUI_ACTIVE)) { + (void)r128_do_pixcache_flush(dev); + return 0; + } + udelay(1); + } + return -EBUSY; +} + +int r128_do_engine_reset(drm_device_t *dev) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + u32 clock_cntl_index, mclk_cntl, gen_reset_cntl; + + (void)r128_do_pixcache_flush(dev); + + clock_cntl_index = R128_READ(R128_CLOCK_CNTL_INDEX); + mclk_cntl = R128_READ_PLL(dev, R128_MCLK_CNTL); + + R128_WRITE_PLL(R128_MCLK_CNTL, + mclk_cntl | R128_FORCE_GCP | R128_FORCE_PIPE3D_CP); + + gen_reset_cntl = R128_READ(R128_GEN_RESET_CNTL); + + R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl | R128_SOFT_RESET_GUI); + (void)R128_READ(R128_GEN_RESET_CNTL); + R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl & ~R128_SOFT_RESET_GUI); + (void)R128_READ(R128_GEN_RESET_CNTL); + + R128_WRITE_PLL(R128_MCLK_CNTL, mclk_cntl); + R128_WRITE(R128_CLOCK_CNTL_INDEX, clock_cntl_index); + R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl); + + /* For CCE ring buffer only */ + if (dev_priv->cce_is_bm_mode) { + R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0); + R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0); + *dev_priv->ring_read_ptr = 0; + dev_priv->sarea_priv->ring_write = 0; + } + + /* Reset the CCE mode */ + (void)r128_do_wait_for_idle(dev); + R128_WRITE(R128_PM4_BUFFER_CNTL, + dev_priv->cce_mode | dev_priv->ring_sizel2qw); + (void)R128_READ(R128_PM4_BUFFER_ADDR); /* as per the sample code */ + R128_WRITE(R128_PM4_MICRO_CNTL, R128_PM4_MICRO_FREERUN); + + r128_mark_vertbufs_done(dev); + return 0; +} + +int r128_eng_reset(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + + if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || + dev->lock.pid != current->pid) { + DRM_ERROR("r128_eng_reset called without holding the lock\n"); + return -EINVAL; + } + + return r128_do_engine_reset(dev); +} + +static int r128_do_engine_flush(drm_device_t *dev) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + u32 tmp; + + tmp = R128_READ(R128_PM4_BUFFER_DL_WPTR); + R128_WRITE(R128_PM4_BUFFER_DL_WPTR, tmp | R128_PM4_BUFFER_DL_DONE); + + return 0; +} + +int r128_eng_flush(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + + if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || + dev->lock.pid != current->pid) { + DRM_ERROR("r128_eng_flush called without holding the lock\n"); + return -EINVAL; + } + + return r128_do_engine_flush(dev); +} + +static int r128_do_cce_wait_for_fifo(drm_device_t *dev, int entries) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + int i; + + for (i = 0; i < dev_priv->usec_timeout; i++) { + int slots = R128_READ(R128_PM4_STAT) & R128_PM4_FIFOCNT_MASK; + if (slots >= entries) return 0; + udelay(1); + } + return -EBUSY; +} + +int r128_do_cce_wait_for_idle(drm_device_t *dev) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + int i; + + if (dev_priv->cce_is_bm_mode) { + for (i = 0; i < dev_priv->usec_timeout; i++) { + if (*dev_priv->ring_read_ptr == dev_priv->sarea_priv->ring_write) { + int pm4stat = R128_READ(R128_PM4_STAT); + if ((pm4stat & R128_PM4_FIFOCNT_MASK) >= dev_priv->cce_fifo_size && + !(pm4stat & (R128_PM4_BUSY | R128_PM4_GUI_ACTIVE))) { + return r128_do_pixcache_flush(dev); + } + } + udelay(1); + } + return -EBUSY; + } else { + int ret = r128_do_cce_wait_for_fifo(dev, dev_priv->cce_fifo_size); + if (ret < 0) return ret; + + for (i = 0; i < dev_priv->usec_timeout; i++) { + int pm4stat = R128_READ(R128_PM4_STAT); + if (!(pm4stat & (R128_PM4_BUSY | R128_PM4_GUI_ACTIVE))) { + return r128_do_pixcache_flush(dev); + } + udelay(1); + } + return -EBUSY; + } +} + +int r128_cce_idle(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + + if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || + dev->lock.pid != current->pid) { + DRM_ERROR("r128_wait_idle called without holding the lock\n"); + return -EINVAL; + } + + return r128_do_cce_wait_for_idle(dev); +} + +static int r128_submit_packets_ring_secure(drm_device_t *dev, + u32 *commands, int *count) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + int write = dev_priv->sarea_priv->ring_write; + int *write_ptr = dev_priv->ring_start + write; + int c = *count; + u32 tmp = 0; + int psize = 0; + int writing = 1; + int timeout; + + while (c > 0) { + tmp = *commands++; + if (!psize) { + writing = 1; + + if ((tmp & R128_CCE_PACKET_MASK) == R128_CCE_PACKET0) { + if ((tmp & R128_CCE_PACKET0_REG_MASK) <= (0x1004 >> 2)) { + if ((tmp & R128_CCE_PACKET0_REG_MASK) != + (R128_PM4_VC_FPU_SETUP >> 2)) { + writing = 0; + } + } + psize = ((tmp & R128_CCE_PACKET_COUNT_MASK) >> 16) + 2; + } else if ((tmp & R128_CCE_PACKET_MASK) == R128_CCE_PACKET1) { + if ((tmp & R128_CCE_PACKET1_REG0_MASK) <= (0x1004 >> 2)) { + if ((tmp & R128_CCE_PACKET1_REG0_MASK) != + (R128_PM4_VC_FPU_SETUP >> 2)) { + writing = 0; + } + } else if ((tmp & R128_CCE_PACKET1_REG1_MASK) <= + (0x1004 << 9)) { + if ((tmp & R128_CCE_PACKET1_REG1_MASK) != + (R128_PM4_VC_FPU_SETUP << 9)) { + writing = 0; + } + } + psize = 3; + } else { + psize = ((tmp & R128_CCE_PACKET_COUNT_MASK) >> 16) + 2; + } + } + psize--; + + if (writing) { + write++; + *write_ptr++ = tmp; + } + if (write >= dev_priv->ring_entries) { + write = 0; + write_ptr = dev_priv->ring_start; + } + timeout = 0; + while (write == *dev_priv->ring_read_ptr) { + (void)R128_READ(R128_PM4_BUFFER_DL_RPTR); + if (timeout++ >= dev_priv->usec_timeout) + return -EBUSY; + udelay(1); + } + c--; + } + + if (write < 32) + memcpy(dev_priv->ring_end, + dev_priv->ring_start, + write * sizeof(u32)); + + /* Make sure WC cache has been flushed */ + r128_flush_write_combine(); + + dev_priv->sarea_priv->ring_write = write; + R128_WRITE(R128_PM4_BUFFER_DL_WPTR, write); + + *count = 0; + + return 0; +} + +static int r128_submit_packets_pio_secure(drm_device_t *dev, + u32 *commands, int *count) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + u32 tmp = 0; + int psize = 0; + int writing = 1; + int addr = R128_PM4_FIFO_DATA_EVEN; + int ret; + + while (*count > 0) { + tmp = *commands++; + if (!psize) { + writing = 1; + + if ((tmp & R128_CCE_PACKET_MASK) == R128_CCE_PACKET0) { + if ((tmp & R128_CCE_PACKET0_REG_MASK) <= (0x1004 >> 2)) { + if ((tmp & R128_CCE_PACKET0_REG_MASK) != + (R128_PM4_VC_FPU_SETUP >> 2)) { + writing = 0; + } + } + psize = ((tmp & R128_CCE_PACKET_COUNT_MASK) >> 16) + 2; + } else if ((tmp & R128_CCE_PACKET_MASK) == R128_CCE_PACKET1) { + if ((tmp & R128_CCE_PACKET1_REG0_MASK) <= (0x1004 >> 2)) { + if ((tmp & R128_CCE_PACKET1_REG0_MASK) != + (R128_PM4_VC_FPU_SETUP >> 2)) { + writing = 0; + } + } else if ((tmp & R128_CCE_PACKET1_REG1_MASK) <= + (0x1004 << 9)) { + if ((tmp & R128_CCE_PACKET1_REG1_MASK) != + (R128_PM4_VC_FPU_SETUP << 9)) { + writing = 0; + } + } + psize = 3; + } else { + psize = ((tmp & R128_CCE_PACKET_COUNT_MASK) >> 16) + 2; + } + } + psize--; + + if (writing) { + if ((ret = r128_do_cce_wait_for_fifo(dev, 1)) < 0) + return ret; + R128_WRITE(addr, tmp); + addr ^= 0x0004; + } + + *count -= 1; + } + + if (addr == R128_PM4_FIFO_DATA_ODD) { + if ((ret = r128_do_cce_wait_for_fifo(dev, 1)) < 0) return ret; + R128_WRITE(addr, R128_CCE_PACKET2); + } + + return 0; +} + +static int r128_submit_packets_ring(drm_device_t *dev, + u32 *commands, int *count) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + int write = dev_priv->sarea_priv->ring_write; + int *write_ptr = dev_priv->ring_start + write; + int c = *count; + int timeout; + + while (c > 0) { + write++; + *write_ptr++ = *commands++; + if (write >= dev_priv->ring_entries) { + write = 0; + write_ptr = dev_priv->ring_start; + } + timeout = 0; + while (write == *dev_priv->ring_read_ptr) { + (void)R128_READ(R128_PM4_BUFFER_DL_RPTR); + if (timeout++ >= dev_priv->usec_timeout) + return -EBUSY; + udelay(1); + } + c--; + } + + if (write < 32) + memcpy(dev_priv->ring_end, + dev_priv->ring_start, + write * sizeof(u32)); + + /* Make sure WC cache has been flushed */ + r128_flush_write_combine(); + + dev_priv->sarea_priv->ring_write = write; + R128_WRITE(R128_PM4_BUFFER_DL_WPTR, write); + + *count = 0; + + return 0; +} + +static int r128_submit_packets_pio(drm_device_t *dev, + u32 *commands, int *count) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + int ret; + + while (*count > 1) { + if ((ret = r128_do_cce_wait_for_fifo(dev, 2)) < 0) return ret; + R128_WRITE(R128_PM4_FIFO_DATA_EVEN, *commands++); + R128_WRITE(R128_PM4_FIFO_DATA_ODD, *commands++); + *count -= 2; + } + + if (*count) { + if ((ret = r128_do_cce_wait_for_fifo(dev, 2)) < 0) return ret; + R128_WRITE(R128_PM4_FIFO_DATA_EVEN, *commands++); + R128_WRITE(R128_PM4_FIFO_DATA_ODD, R128_CCE_PACKET2); + *count = 0; + } + + return 0; +} + +static int r128_do_submit_packets(drm_device_t *dev, u32 *buffer, int count) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + int c = count; + int ret; + + if (dev_priv->cce_is_bm_mode) { + int left = 0; + + if (c >= dev_priv->ring_entries) { + c = dev_priv->ring_entries-1; + left = count - c; + } + + /* Since this is only used by the kernel we can use the + insecure ring buffer submit packet routine */ + ret = r128_submit_packets_ring(dev, buffer, &c); + + c += left; + } else { + /* Since this is only used by the kernel we can use the + insecure PIO submit packet routine */ + ret = r128_submit_packets_pio(dev, buffer, &c); + } + + if (ret < 0) return ret; + else return c; +} + +int r128_submit_pkt(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_r128_private_t *dev_priv = dev->dev_private; + drm_r128_packet_t packet; + u32 *buffer; + int c; + int size; + int ret = 0; + + if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || + dev->lock.pid != current->pid) { + DRM_ERROR("r128_submit_pkt called without holding the lock\n"); + return -EINVAL; + } + + copy_from_user_ret(&packet, (drm_r128_packet_t *)arg, sizeof(packet), + -EFAULT); + + c = packet.count; + size = c * sizeof(*buffer); + + if (dev_priv->cce_is_bm_mode) { + int left = 0; + + if (c >= dev_priv->ring_entries) { + c = dev_priv->ring_entries-1; + size = c * sizeof(*buffer); + left = packet.count - c; + } + + if ((buffer = kmalloc(size, 0)) == NULL) return -ENOMEM; + copy_from_user_ret(buffer, packet.buffer, size, -EFAULT); + + if (dev_priv->cce_secure) + ret = r128_submit_packets_ring_secure(dev, buffer, &c); + else + ret = r128_submit_packets_ring(dev, buffer, &c); + + c += left; + } else { + if ((buffer = kmalloc(size, 0)) == NULL) return -ENOMEM; + copy_from_user_ret(buffer, packet.buffer, size, -EFAULT); + + if (dev_priv->cce_secure) + ret = r128_submit_packets_pio_secure(dev, buffer, &c); + else + ret = r128_submit_packets_pio(dev, buffer, &c); + } + + kfree(buffer); + + packet.count = c; + copy_to_user_ret((drm_r128_packet_t *)arg, &packet, sizeof(packet), + -EFAULT); + + if (ret) return ret; + else if (c > 0) return -EAGAIN; + + return 0; +} + +static int r128_send_vertbufs(drm_device_t *dev, drm_r128_vertex_t *v) +{ + drm_device_dma_t *dma = dev->dma; + drm_r128_private_t *dev_priv = dev->dev_private; + drm_r128_buf_priv_t *buf_priv; + drm_buf_t *buf; + int i, ret; + u32 cce[2]; + + /* Make sure we have valid data */ + for (i = 0; i < v->send_count; i++) { + int idx = v->send_indices[i]; + + if (idx < 0 || idx >= dma->buf_count) { + DRM_ERROR("Index %d (of %d max)\n", + idx, dma->buf_count - 1); + return -EINVAL; + } + buf = dma->buflist[idx]; + if (buf->pid != current->pid) { + DRM_ERROR("Process %d using buffer owned by %d\n", + current->pid, buf->pid); + return -EINVAL; + } + if (buf->pending) { + DRM_ERROR("Sending pending buffer:" + " buffer %d, offset %d\n", + v->send_indices[i], i); + return -EINVAL; + } + } + + /* Wait for idle, if we've wrapped to make sure that all pending + buffers have been processed */ + if (dev_priv->submit_age == R128_MAX_VBUF_AGE) { + if ((ret = r128_do_cce_wait_for_idle(dev)) < 0) return ret; + dev_priv->submit_age = 0; + r128_mark_vertbufs_done(dev); + } + + /* Make sure WC cache has been flushed (if in PIO mode) */ + if (!dev_priv->cce_is_bm_mode) r128_flush_write_combine(); + + /* FIXME: Add support for sending vertex buffer to the CCE here + instead of in client code. The v->prim holds the primitive + type that should be drawn. Loop over the list buffers in + send_indices[] and submit a packet for each VB. + + This will require us to loop over the clip rects here as + well, which implies that we extend the kernel driver to allow + cliprects to be stored here. Note that the cliprects could + possibly come from the X server instead of the client, but + this will require additional changes to the DRI to allow for + this optimization. */ + + /* Submit a CCE packet that writes submit_age to R128_VB_AGE_REG */ + cce[0] = R128CCE0(R128_CCE_PACKET0, R128_VB_AGE_REG, 0); + cce[1] = dev_priv->submit_age; + if ((ret = r128_do_submit_packets(dev, cce, 2)) < 0) { + /* Until we add support for sending VBs to the CCE in + this routine, we can recover from this error. After + we add that support, we won't be able to easily + recover, so we will probably have to implement + another mechanism for handling timeouts from packets + submitted directly by the kernel. */ + return ret; + } + + /* Now that the submit packet request has succeeded, we can mark + the buffers as pending */ + for (i = 0; i < v->send_count; i++) { + buf = dma->buflist[v->send_indices[i]]; + buf->pending = 1; + + buf_priv = buf->dev_private; + buf_priv->age = dev_priv->submit_age; + } + + dev_priv->submit_age++; + + return 0; +} + +static drm_buf_t *r128_freelist_get(drm_device_t *dev) +{ + drm_device_dma_t *dma = dev->dma; + drm_r128_private_t *dev_priv = dev->dev_private; + drm_r128_buf_priv_t *buf_priv; + drm_buf_t *buf; + int i, t; + + /* FIXME: Optimize -- use freelist code */ + + for (i = 0; i < dma->buf_count; i++) { + buf = dma->buflist[i]; + buf_priv = buf->dev_private; + if (buf->pid == 0) return buf; + } + + for (t = 0; t < dev_priv->usec_timeout; t++) { + u32 done_age = R128_READ(R128_VB_AGE_REG); + + for (i = 0; i < dma->buf_count; i++) { + buf = dma->buflist[i]; + buf_priv = buf->dev_private; + if (buf->pending && buf_priv->age <= done_age) { + /* The buffer has been processed, so it + can now be used */ + buf->pending = 0; + return buf; + } + } + udelay(1); + } + + r128_status(dev); + return NULL; +} + + +static int r128_get_vertbufs(drm_device_t *dev, drm_r128_vertex_t *v) +{ + drm_buf_t *buf; + int i; + + for (i = v->granted_count; i < v->request_count; i++) { + buf = r128_freelist_get(dev); + if (!buf) break; + buf->pid = current->pid; + copy_to_user_ret(&v->request_indices[i], + &buf->idx, + sizeof(buf->idx), + -EFAULT); + copy_to_user_ret(&v->request_sizes[i], + &buf->total, + sizeof(buf->total), + -EFAULT); + ++v->granted_count; + } + return 0; +} + +int r128_vertex_buf(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_r128_private_t *dev_priv = dev->dev_private; + drm_device_dma_t *dma = dev->dma; + int retcode = 0; + drm_r128_vertex_t v; + + if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || + dev->lock.pid != current->pid) { + DRM_ERROR("r128_vertex_buf called without holding the lock\n"); + return -EINVAL; + } + + if (!dev_priv || dev_priv->is_pci) { + DRM_ERROR("r128_vertex_buf called with a PCI card\n"); + return -EINVAL; + } + + copy_from_user_ret(&v, (drm_r128_vertex_t *)arg, sizeof(v), -EFAULT); + DRM_DEBUG("%d: %d send, %d req\n", + current->pid, v.send_count, v.request_count); + + if (v.send_count < 0 || v.send_count > dma->buf_count) { + DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n", + current->pid, v.send_count, dma->buf_count); + return -EINVAL; + } + if (v.request_count < 0 || v.request_count > dma->buf_count) { + DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", + current->pid, v.request_count, dma->buf_count); + return -EINVAL; + } + + if (v.send_count) { + retcode = r128_send_vertbufs(dev, &v); + } + + v.granted_count = 0; + + if (!retcode && v.request_count) { + retcode = r128_get_vertbufs(dev, &v); + } + + DRM_DEBUG("%d returning, granted = %d\n", + current->pid, v.granted_count); + copy_to_user_ret((drm_r128_vertex_t *)arg, &v, sizeof(v), -EFAULT); + + return retcode; +} diff --git a/linux/r128_drv.c b/linux/r128_drv.c new file mode 100644 index 00000000..8b669888 --- /dev/null +++ b/linux/r128_drv.c @@ -0,0 +1,737 @@ +/* r128_drv.c -- ATI Rage 128 driver -*- linux-c -*- + * Created: Mon Dec 13 09:47:27 1999 by faith@precisioninsight.com + * + * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> + * Kevin E. Martin <martin@valinux.com> + * + */ + +#include <linux/config.h> +#define EXPORT_SYMTAB +#include "drmP.h" +#include "r128_drv.h" +EXPORT_SYMBOL(r128_init); +EXPORT_SYMBOL(r128_cleanup); + +#define R128_NAME "r128" +#define R128_DESC "r128" +#define R128_DATE "20000607" +#define R128_MAJOR 1 +#define R128_MINOR 0 +#define R128_PATCHLEVEL 0 + +static drm_device_t r128_device; +drm_ctx_t r128_res_ctx; + +static struct file_operations r128_fops = { + open: r128_open, + flush: drm_flush, + release: r128_release, + ioctl: r128_ioctl, + mmap: drm_mmap, + read: drm_read, + fasync: drm_fasync, + poll: drm_poll, +}; + +static struct miscdevice r128_misc = { + minor: MISC_DYNAMIC_MINOR, + name: R128_NAME, + fops: &r128_fops, +}; + +static drm_ioctl_desc_t r128_ioctls[] = { + [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { r128_version, 0, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 }, + + [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { r128_addbufs, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { r128_mapbufs, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 }, + + [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { r128_addctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { r128_rmctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { r128_modctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { r128_getctx, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { r128_switchctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { r128_newctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { r128_resctx, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { r128_lock, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { r128_unlock, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 }, + +#ifdef DRM_AGP + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 }, +#endif + + [DRM_IOCTL_NR(DRM_IOCTL_R128_INIT)] = { r128_init_cce, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_RESET)] = { r128_eng_reset, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_FLUSH)] = { r128_eng_flush, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_PACKET)] = { r128_submit_pkt, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_IDLE)] = { r128_cce_idle, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_VERTEX)] = { r128_vertex_buf, 1, 0 }, +}; +#define R128_IOCTL_COUNT DRM_ARRAY_SIZE(r128_ioctls) + +#ifdef MODULE +int init_module(void); +void cleanup_module(void); +static char *r128 = NULL; + +MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas."); +MODULE_DESCRIPTION("r128"); +MODULE_PARM(r128, "s"); + +/* init_module is called when insmod is used to load the module */ + +int init_module(void) +{ + return r128_init(); +} + +/* cleanup_module is called when rmmod is used to unload the module */ + +void cleanup_module(void) +{ + r128_cleanup(); +} +#endif + +#ifndef MODULE +/* r128_setup is called by the kernel to parse command-line options passed + * via the boot-loader (e.g., LILO). It calls the insmod option routine, + * drm_parse_drm. + * + * This is not currently supported, since it requires changes to + * linux/init/main.c. */ + + +void __init r128_setup(char *str, int *ints) +{ + if (ints[0] != 0) { + DRM_ERROR("Illegal command line format, ignored\n"); + return; + } + drm_parse_options(str); +} +#endif + +static int r128_setup(drm_device_t *dev) +{ + int i; + + atomic_set(&dev->ioctl_count, 0); + atomic_set(&dev->vma_count, 0); + dev->buf_use = 0; + atomic_set(&dev->buf_alloc, 0); + + drm_dma_setup(dev); + + atomic_set(&dev->total_open, 0); + atomic_set(&dev->total_close, 0); + atomic_set(&dev->total_ioctl, 0); + atomic_set(&dev->total_irq, 0); + atomic_set(&dev->total_ctx, 0); + atomic_set(&dev->total_locks, 0); + atomic_set(&dev->total_unlocks, 0); + atomic_set(&dev->total_contends, 0); + atomic_set(&dev->total_sleeps, 0); + + for (i = 0; i < DRM_HASH_SIZE; i++) { + dev->magiclist[i].head = NULL; + dev->magiclist[i].tail = NULL; + } + dev->maplist = NULL; + dev->map_count = 0; + dev->vmalist = NULL; + dev->lock.hw_lock = NULL; + init_waitqueue_head(&dev->lock.lock_queue); + dev->queue_count = 0; + dev->queue_reserved = 0; + dev->queue_slots = 0; + dev->queuelist = NULL; + dev->irq = 0; + dev->context_flag = 0; + dev->interrupt_flag = 0; + dev->dma_flag = 0; + dev->last_context = 0; + dev->last_switch = 0; + dev->last_checked = 0; + init_timer(&dev->timer); + init_waitqueue_head(&dev->context_wait); + + dev->ctx_start = 0; + dev->lck_start = 0; + + dev->buf_rp = dev->buf; + dev->buf_wp = dev->buf; + dev->buf_end = dev->buf + DRM_BSZ; + dev->buf_async = NULL; + init_waitqueue_head(&dev->buf_readers); + init_waitqueue_head(&dev->buf_writers); + + r128_res_ctx.handle=-1; + + DRM_DEBUG("\n"); + + /* The kernel's context could be created here, but is now created + in drm_dma_enqueue. This is more resource-efficient for + hardware that does not do DMA, but may mean that + drm_select_queue fails between the time the interrupt is + initialized and the time the queues are initialized. */ + + return 0; +} + + +static int r128_takedown(drm_device_t *dev) +{ + int i; + drm_magic_entry_t *pt, *next; + drm_map_t *map; + drm_vma_entry_t *vma, *vma_next; + + DRM_DEBUG("\n"); + + down(&dev->struct_sem); + del_timer(&dev->timer); + + if (dev->devname) { + drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER); + dev->devname = NULL; + } + + if (dev->unique) { + drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER); + dev->unique = NULL; + dev->unique_len = 0; + } + /* Clear pid list */ + for (i = 0; i < DRM_HASH_SIZE; i++) { + for (pt = dev->magiclist[i].head; pt; pt = next) { + next = pt->next; + drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); + } + dev->magiclist[i].head = dev->magiclist[i].tail = NULL; + } + +#ifdef DRM_AGP + /* Clear AGP information */ + if (dev->agp) { + drm_agp_mem_t *entry; + drm_agp_mem_t *nexte; + + /* Remove AGP resources, but leave dev->agp + intact until r128_cleanup is called. */ + for (entry = dev->agp->memory; entry; entry = nexte) { + nexte = entry->next; + if (entry->bound) drm_unbind_agp(entry->memory); + drm_free_agp(entry->memory, entry->pages); + drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); + } + dev->agp->memory = NULL; + + if (dev->agp->acquired && drm_agp.release) + (*drm_agp.release)(); + + dev->agp->acquired = 0; + dev->agp->enabled = 0; + } +#endif + + /* Clear vma list (only built for debugging) */ + if (dev->vmalist) { + for (vma = dev->vmalist; vma; vma = vma_next) { + vma_next = vma->next; + drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); + } + dev->vmalist = NULL; + } + + /* Clear map area and mtrr information */ + if (dev->maplist) { + for (i = 0; i < dev->map_count; i++) { + map = dev->maplist[i]; + switch (map->type) { + case _DRM_REGISTERS: + case _DRM_FRAME_BUFFER: +#ifdef CONFIG_MTRR + if (map->mtrr >= 0) { + int retcode; + retcode = mtrr_del(map->mtrr, + map->offset, + map->size); + DRM_DEBUG("mtrr_del = %d\n", retcode); + } +#endif + drm_ioremapfree(map->handle, map->size); + break; + case _DRM_SHM: + drm_free_pages((unsigned long)map->handle, + drm_order(map->size) + - PAGE_SHIFT, + DRM_MEM_SAREA); + break; + case _DRM_AGP: + /* Do nothing here, because this is all + handled in the AGP/GART driver. */ + break; + } + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + } + drm_free(dev->maplist, + dev->map_count * sizeof(*dev->maplist), + DRM_MEM_MAPS); + dev->maplist = NULL; + dev->map_count = 0; + } + + drm_dma_takedown(dev); + + dev->queue_count = 0; + if (dev->lock.hw_lock) { + dev->lock.hw_lock = NULL; /* SHM removed */ + dev->lock.pid = 0; + wake_up_interruptible(&dev->lock.lock_queue); + } + up(&dev->struct_sem); + + return 0; +} + +/* r128_init is called via init_module at module load time, or via + * linux/init/main.c (this is not currently supported). */ + +int r128_init(void) +{ + int retcode; + drm_device_t *dev = &r128_device; + + DRM_DEBUG("\n"); + + memset((void *)dev, 0, sizeof(*dev)); + dev->count_lock = SPIN_LOCK_UNLOCKED; + sema_init(&dev->struct_sem, 1); + +#ifdef MODULE + drm_parse_options(r128); +#endif + + if ((retcode = misc_register(&r128_misc))) { + DRM_ERROR("Cannot register \"%s\"\n", R128_NAME); + return retcode; + } + dev->device = MKDEV(MISC_MAJOR, r128_misc.minor); + dev->name = R128_NAME; + + drm_mem_init(); + drm_proc_init(dev); + +#ifdef DRM_AGP + dev->agp = drm_agp_init(); + +#ifdef CONFIG_MTRR + dev->agp->agp_mtrr = mtrr_add(dev->agp->agp_info.aper_base, + dev->agp->agp_info.aper_size*1024*1024, + MTRR_TYPE_WRCOMB, + 1); +#endif +#endif + + if((retcode = drm_ctxbitmap_init(dev))) { + DRM_ERROR("Cannot allocate memory for context bitmap.\n"); + drm_proc_cleanup(); + misc_deregister(&r128_misc); + r128_takedown(dev); + return retcode; + } + + DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", + R128_NAME, + R128_MAJOR, + R128_MINOR, + R128_PATCHLEVEL, + R128_DATE, + r128_misc.minor); + + return 0; +} + +/* r128_cleanup is called via cleanup_module at module unload time. */ + +void r128_cleanup(void) +{ + drm_device_t *dev = &r128_device; + + DRM_DEBUG("\n"); + + drm_proc_cleanup(); + if (misc_deregister(&r128_misc)) { + DRM_ERROR("Cannot unload module\n"); + } else { + DRM_INFO("Module unloaded\n"); + } + drm_ctxbitmap_cleanup(dev); + r128_takedown(dev); +#ifdef DRM_AGP + if (dev->agp) { + /* FIXME -- free other information, too */ + drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); + dev->agp = NULL; + } +#endif +} + +int r128_version(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_version_t version; + int len; + + copy_from_user_ret(&version, + (drm_version_t *)arg, + sizeof(version), + -EFAULT); + +#define DRM_COPY(name,value) \ + len = strlen(value); \ + if (len > name##_len) len = name##_len; \ + name##_len = strlen(value); \ + if (len && name) { \ + copy_to_user_ret(name, value, len, -EFAULT); \ + } + + version.version_major = R128_MAJOR; + version.version_minor = R128_MINOR; + version.version_patchlevel = R128_PATCHLEVEL; + + DRM_COPY(version.name, R128_NAME); + DRM_COPY(version.date, R128_DATE); + DRM_COPY(version.desc, R128_DESC); + + copy_to_user_ret((drm_version_t *)arg, + &version, + sizeof(version), + -EFAULT); + return 0; +} + +int r128_open(struct inode *inode, struct file *filp) +{ + drm_device_t *dev = &r128_device; + int retcode = 0; + + DRM_DEBUG("open_count = %d\n", dev->open_count); + if (!(retcode = drm_open_helper(inode, filp, dev))) { + MOD_INC_USE_COUNT; + atomic_inc(&dev->total_open); + spin_lock(&dev->count_lock); + if (!dev->open_count++) { + spin_unlock(&dev->count_lock); + return r128_setup(dev); + } + spin_unlock(&dev->count_lock); + } + return retcode; +} + +int r128_release(struct inode *inode, struct file *filp) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + int retcode = 0; + + DRM_DEBUG("open_count = %d\n", dev->open_count); + if (!(retcode = drm_release(inode, filp))) { + MOD_DEC_USE_COUNT; + atomic_inc(&dev->total_close); + spin_lock(&dev->count_lock); + if (!--dev->open_count) { + if (atomic_read(&dev->ioctl_count) || dev->blocked) { + DRM_ERROR("Device busy: %d %d\n", + atomic_read(&dev->ioctl_count), + dev->blocked); + spin_unlock(&dev->count_lock); + return -EBUSY; + } + spin_unlock(&dev->count_lock); + return r128_takedown(dev); + } + spin_unlock(&dev->count_lock); + } + return retcode; +} + +/* r128_ioctl is called whenever a process performs an ioctl on /dev/drm. */ + +int r128_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int nr = DRM_IOCTL_NR(cmd); + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + int retcode = 0; + drm_ioctl_desc_t *ioctl; + drm_ioctl_t *func; + + atomic_inc(&dev->ioctl_count); + atomic_inc(&dev->total_ioctl); + ++priv->ioctl_count; + + DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n", + current->pid, cmd, nr, dev->device, priv->authenticated); + + if (nr >= R128_IOCTL_COUNT) { + retcode = -EINVAL; + } else { + ioctl = &r128_ioctls[nr]; + func = ioctl->func; + + if (!func) { + DRM_DEBUG("no function\n"); + retcode = -EINVAL; + } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN)) + || (ioctl->auth_needed && !priv->authenticated)) { + retcode = -EACCES; + } else { + retcode = (func)(inode, filp, cmd, arg); + } + } + + atomic_dec(&dev->ioctl_count); + return retcode; +} + +int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + DECLARE_WAITQUEUE(entry, current); + int ret = 0; + drm_lock_t lock; +#if DRM_DMA_HISTOGRAM + cycles_t start; + + dev->lck_start = start = get_cycles(); +#endif + + copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT); + + if (lock.context == DRM_KERNEL_CONTEXT) { + DRM_ERROR("Process %d using kernel context %d\n", + current->pid, lock.context); + return -EINVAL; + } + + DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", + lock.context, current->pid, dev->lock.hw_lock->lock, + lock.flags); + +#if 0 + /* dev->queue_count == 0 right now for + r128. FIXME? */ + if (lock.context < 0 || lock.context >= dev->queue_count) + return -EINVAL; +#endif + + if (!ret) { +#if 0 + if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) + != lock.context) { + long j = jiffies - dev->lock.lock_time; + + if (lock.context == r128_res_ctx.handle && + j >= 0 && j < DRM_LOCK_SLICE) { + /* Can't take lock if we just had it and + there is contention. */ + DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d jiffies=%d\n", + lock.context, current->pid, j, + dev->lock.lock_time, jiffies); + current->state = TASK_INTERRUPTIBLE; + current->policy |= SCHED_YIELD; + schedule_timeout(DRM_LOCK_SLICE-j); + DRM_DEBUG("jiffies=%d\n", jiffies); + } + } +#endif + add_wait_queue(&dev->lock.lock_queue, &entry); + for (;;) { + if (!dev->lock.hw_lock) { + /* Device has been unregistered */ + ret = -EINTR; + break; + } + if (drm_lock_take(&dev->lock.hw_lock->lock, + lock.context)) { + dev->lock.pid = current->pid; + dev->lock.lock_time = jiffies; + atomic_inc(&dev->total_locks); + break; /* Got lock */ + } + + /* Contention */ + atomic_inc(&dev->total_sleeps); + current->state = TASK_INTERRUPTIBLE; +#if 1 + current->policy |= SCHED_YIELD; +#endif + schedule(); + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + } + current->state = TASK_RUNNING; + remove_wait_queue(&dev->lock.lock_queue, &entry); + } + +#if 0 + if (!ret && dev->last_context != lock.context && + lock.context != r128_res_ctx.handle && + dev->last_context != r128_res_ctx.handle) { + add_wait_queue(&dev->context_wait, &entry); + current->state = TASK_INTERRUPTIBLE; + /* PRE: dev->last_context != lock.context */ + r128_context_switch(dev, dev->last_context, lock.context); + /* POST: we will wait for the context + switch and will dispatch on a later call + when dev->last_context == lock.context + NOTE WE HOLD THE LOCK THROUGHOUT THIS + TIME! */ + current->policy |= SCHED_YIELD; + schedule(); + current->state = TASK_RUNNING; + remove_wait_queue(&dev->context_wait, &entry); + if (signal_pending(current)) { + ret = -EINTR; + } else if (dev->last_context != lock.context) { + DRM_ERROR("Context mismatch: %d %d\n", + dev->last_context, lock.context); + } + } +#endif + + if (!ret) { + if (lock.flags & _DRM_LOCK_READY) { + /* Wait for space in DMA/FIFO */ + } + if (lock.flags & _DRM_LOCK_QUIESCENT) { + /* Make hardware quiescent */ +#if 0 + r128_quiescent(dev); +#endif + } + } + +#if 0 + DRM_ERROR("pid = %5d, old counter = %5ld\n", + current->pid, current->counter); +#endif + if (lock.context != r128_res_ctx.handle) { + current->counter = 5; + current->priority = DEF_PRIORITY/4; + } +#if 0 + while (current->counter > 25) + current->counter >>= 1; /* decrease time slice */ + DRM_ERROR("pid = %5d, new counter = %5ld\n", + current->pid, current->counter); +#endif + DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock"); + +#if DRM_DMA_HISTOGRAM + atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]); +#endif + + return ret; +} + + +int r128_unlock(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_lock_t lock; + + copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT); + + if (lock.context == DRM_KERNEL_CONTEXT) { + DRM_ERROR("Process %d using kernel context %d\n", + current->pid, lock.context); + return -EINVAL; + } + + DRM_DEBUG("%d frees lock (%d holds)\n", + lock.context, + _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); + atomic_inc(&dev->total_unlocks); + if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock)) + atomic_inc(&dev->total_contends); + drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT); + /* FIXME: Try to send data to card here */ + if (!dev->context_flag) { + if (drm_lock_free(dev, &dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT)) { + DRM_ERROR("\n"); + } + } + +#if 0 + current->policy |= SCHED_YIELD; + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(1000); +#endif + + if (lock.context != r128_res_ctx.handle) { + current->counter = 5; + current->priority = DEF_PRIORITY; + } +#if 0 + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(10); +#endif + + return 0; +} diff --git a/linux/r128_drv.h b/linux/r128_drv.h new file mode 100644 index 00000000..81390bb8 --- /dev/null +++ b/linux/r128_drv.h @@ -0,0 +1,226 @@ +/* r128_drv.h -- Private header for r128 driver -*- linux-c -*- + * Created: Mon Dec 13 09:51:11 1999 by faith@precisioninsight.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> + * Kevin E. Martin <martin@valinux.com> + * + */ + +#ifndef _R128_DRV_H_ +#define _R128_DRV_H_ + +typedef struct drm_r128_private { + int is_pci; + + int cce_mode; + int cce_fifo_size; + int cce_is_bm_mode; + int cce_secure; + + drm_r128_sarea_t *sarea_priv; + + __volatile__ u32 *ring_read_ptr; + + u32 *ring_start; + u32 *ring_end; + int ring_size; + int ring_sizel2qw; + int ring_entries; + + int submit_age; + + int usec_timeout; + + drm_map_t *sarea; + drm_map_t *fb; + drm_map_t *agp_ring; + drm_map_t *agp_read_ptr; + drm_map_t *agp_vertbufs; + drm_map_t *agp_indbufs; + drm_map_t *agp_textures; + drm_map_t *mmio; +} drm_r128_private_t; + +typedef struct drm_r128_buf_priv { + u32 age; +} drm_r128_buf_priv_t; + + /* r128_drv.c */ +extern int r128_init(void); +extern void r128_cleanup(void); +extern int r128_version(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int r128_open(struct inode *inode, struct file *filp); +extern int r128_release(struct inode *inode, struct file *filp); +extern int r128_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int r128_lock(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int r128_unlock(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + + /* r128_dma.c */ +extern int r128_init_cce(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int r128_eng_reset(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int r128_eng_flush(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int r128_submit_pkt(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int r128_cce_idle(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int r128_vertex_buf(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + + /* r128_bufs.c */ +extern int r128_addbufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int r128_mapbufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + + /* r128_context.c */ +extern int r128_resctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int r128_addctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int r128_modctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int r128_getctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int r128_switchctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int r128_newctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int r128_rmctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + +extern int r128_context_switch(drm_device_t *dev, int old, int new); +extern int r128_context_switch_complete(drm_device_t *dev, int new); + + +/* Register definitions, register access macros and drmAddMap constants + * for Rage 128 kernel driver. + */ + +#define R128_PC_NGUI_CTLSTAT 0x0184 +# define R128_PC_FLUSH_ALL 0x00ff +# define R128_PC_BUSY (1 << 31) + +#define R128_CLOCK_CNTL_INDEX 0x0008 +#define R128_CLOCK_CNTL_DATA 0x000c +# define R128_PLL_WR_EN (1 << 7) + +#define R128_MCLK_CNTL 0x000f +# define R128_FORCE_GCP (1 << 16) +# define R128_FORCE_PIPE3D_CP (1 << 17) +# define R128_FORCE_RCP (1 << 18) + +#define R128_GEN_RESET_CNTL 0x00f0 +# define R128_SOFT_RESET_GUI (1 << 0) + +#define R128_PM4_BUFFER_CNTL 0x0704 +# define R128_PM4_NONPM4 (0 << 28) +# define R128_PM4_192PIO (1 << 28) +# define R128_PM4_192BM (2 << 28) +# define R128_PM4_128PIO_64INDBM (3 << 28) +# define R128_PM4_128BM_64INDBM (4 << 28) +# define R128_PM4_64PIO_128INDBM (5 << 28) +# define R128_PM4_64BM_128INDBM (6 << 28) +# define R128_PM4_64PIO_64VCBM_64INDBM (7 << 28) +# define R128_PM4_64BM_64VCBM_64INDBM (8 << 28) +# define R128_PM4_64PIO_64VCPIO_64INDPIO (15 << 28) + + +#define R128_PM4_BUFFER_DL_RPTR 0x0710 +#define R128_PM4_BUFFER_DL_WPTR 0x0714 +# define R128_PM4_BUFFER_DL_DONE (1 << 31) + +#define R128_PM4_VC_FPU_SETUP 0x071c + +#define R128_PM4_STAT 0x07b8 +# define R128_PM4_FIFOCNT_MASK 0x0fff +# define R128_PM4_BUSY (1 << 16) +# define R128_PM4_GUI_ACTIVE (1 << 31) + +#define R128_PM4_BUFFER_ADDR 0x07f0 +#define R128_PM4_MICRO_CNTL 0x07fc +# define R128_PM4_MICRO_FREERUN (1 << 30) + +#define R128_PM4_FIFO_DATA_EVEN 0x1000 +#define R128_PM4_FIFO_DATA_ODD 0x1004 + +#define R128_GUI_SCRATCH_REG0 0x15e0 +#define R128_GUI_SCRATCH_REG1 0x15e4 +#define R128_GUI_SCRATCH_REG2 0x15e8 +#define R128_GUI_SCRATCH_REG3 0x15ec +#define R128_GUI_SCRATCH_REG4 0x15f0 +#define R128_GUI_SCRATCH_REG5 0x15f4 + +#define R128_GUI_STAT 0x1740 +# define R128_GUI_FIFOCNT_MASK 0x0fff +# define R128_GUI_ACTIVE (1 << 31) + + +/* CCE command packets */ +#define R128_CCE_PACKET0 0x00000000 +#define R128_CCE_PACKET1 0x40000000 +#define R128_CCE_PACKET2 0x80000000 +# define R128_CCE_PACKET_MASK 0xC0000000 +# define R128_CCE_PACKET_COUNT_MASK 0x3fff0000 +# define R128_CCE_PACKET0_REG_MASK 0x000007ff +# define R128_CCE_PACKET1_REG0_MASK 0x000007ff +# define R128_CCE_PACKET1_REG1_MASK 0x003ff800 + + +#define R128_MAX_USEC_TIMEOUT 100000 /* 100 ms */ + + +#define R128_BASE(reg) ((u32)(dev_priv->mmio->handle)) +#define R128_ADDR(reg) (R128_BASE(reg) + reg) + +#define R128_DEREF(reg) *(__volatile__ int *)R128_ADDR(reg) +#define R128_READ(reg) R128_DEREF(reg) +#define R128_WRITE(reg,val) do { R128_DEREF(reg) = val; } while (0) + +#define R128_DEREF8(reg) *(__volatile__ char *)R128_ADDR(reg) +#define R128_READ8(reg) R128_DEREF8(reg) +#define R128_WRITE8(reg,val) do { R128_DEREF8(reg) = val; } while (0) + +#define R128_WRITE_PLL(addr,val) \ +do { \ + R128_WRITE8(R128_CLOCK_CNTL_INDEX, ((addr) & 0x1f) | R128_PLL_WR_EN); \ + R128_WRITE(R128_CLOCK_CNTL_DATA, (val)); \ +} while (0) + +extern int R128_READ_PLL(drm_device_t *dev, int addr); + +#define R128CCE0(p,r,n) ((p) | ((n) << 16) | ((r) >> 2)) +#define R128CCE1(p,r1,r2) ((p) | (((r2) >> 2) << 11) | ((r1) >> 2)) +#define R128CCE2(p) ((p)) +#define R128CCE3(p,n) ((p) | ((n) << 16)) + +#endif diff --git a/linux/tdfx_context.c b/linux/tdfx_context.c index 842d6f5d..c8d6e50e 100644 --- a/linux/tdfx_context.c +++ b/linux/tdfx_context.c @@ -1,8 +1,8 @@ /* tdfx_context.c -- IOCTLs for tdfx contexts -*- linux-c -*- * Created: Thu Oct 7 10:50:22 1999 by faith@precisioninsight.com - * Revised: Sat Oct 9 23:39:56 1999 by faith@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,8 +24,10 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/tdfx_context.c,v 1.2 2000/02/23 04:47:30 martin Exp $ - * + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> + * Daryll Strauss <daryll@valinux.com> + * */ #include <linux/sched.h> @@ -38,9 +40,7 @@ extern drm_ctx_t tdfx_res_ctx; static int tdfx_alloc_queue(drm_device_t *dev) { - static int context = 0; - - return ++context; /* Should this reuse contexts in the future? */ + return drm_ctxbitmap_next(dev); } int tdfx_context_switch(drm_device_t *dev, int old, int new) @@ -137,6 +137,12 @@ int tdfx_addctx(struct inode *inode, struct file *filp, unsigned int cmd, ctx.handle = tdfx_alloc_queue(dev); } DRM_DEBUG("%d\n", ctx.handle); + if (ctx.handle == -1) { + DRM_DEBUG("Not enough free contexts.\n"); + /* Should this return -EBUSY instead? */ + return -ENOMEM; + } + copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT); return 0; } @@ -193,13 +199,13 @@ int tdfx_newctx(struct inode *inode, struct file *filp, unsigned int cmd, int tdfx_rmctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; drm_ctx_t ctx; copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); DRM_DEBUG("%d\n", ctx.handle); - /* This is currently a noop because we - don't reuse context values. Perhaps we - should? */ - + drm_ctxbitmap_free(dev, ctx.handle); + return 0; } diff --git a/linux/tdfx_drv.c b/linux/tdfx_drv.c index 57c1c719..582832b5 100644 --- a/linux/tdfx_drv.c +++ b/linux/tdfx_drv.c @@ -1,8 +1,8 @@ /* tdfx.c -- tdfx driver -*- linux-c -*- * Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com - * Revised: Tue Oct 12 08:51:35 1999 by faith@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,10 +24,13 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/tdfx_drv.c,v 1.3 2000/02/23 04:47:31 martin Exp $ + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> + * Daryll Strauss <daryll@valinux.com> * */ +#include <linux/config.h> #define EXPORT_SYMTAB #include "drmP.h" #include "tdfx_drv.h" @@ -37,9 +40,9 @@ EXPORT_SYMBOL(tdfx_cleanup); #define TDFX_NAME "tdfx" #define TDFX_DESC "tdfx" #define TDFX_DATE "19991009" -#define TDFX_MAJOR 0 +#define TDFX_MAJOR 1 #define TDFX_MINOR 0 -#define TDFX_PATCHLEVEL 1 +#define TDFX_PATCHLEVEL 0 static drm_device_t tdfx_device; drm_ctx_t tdfx_res_ctx; @@ -85,6 +88,16 @@ static drm_ioctl_desc_t tdfx_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { tdfx_lock, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { tdfx_unlock, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 }, +#ifdef DRM_AGP + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire, 1, 1}, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release, 1, 1}, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable, 1, 1}, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info, 1, 1}, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc, 1, 1}, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free, 1, 1}, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_unbind, 1, 1}, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_bind, 1, 1}, +#endif }; #define TDFX_IOCTL_COUNT DRM_ARRAY_SIZE(tdfx_ioctls) @@ -228,7 +241,24 @@ static int tdfx_takedown(drm_device_t *dev) } dev->magiclist[i].head = dev->magiclist[i].tail = NULL; } - +#ifdef DRM_AGP + /* Clear AGP information */ + if (dev->agp) { + drm_agp_mem_t *temp; + drm_agp_mem_t *temp_next; + + temp = dev->agp->memory; + while(temp != NULL) { + temp_next = temp->next; + drm_free_agp(temp->memory, temp->pages); + drm_free(temp, sizeof(*temp), DRM_MEM_AGPLISTS); + temp = temp_next; + } + if(dev->agp->acquired) (*drm_agp.release)(); + drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); + dev->agp = NULL; + } +#endif /* Clear vma list (only built for debugging) */ if (dev->vmalist) { for (vma = dev->vmalist; vma; vma = vma_next) { @@ -262,6 +292,10 @@ static int tdfx_takedown(drm_device_t *dev) - PAGE_SHIFT, DRM_MEM_SAREA); break; + case _DRM_AGP: + /* Do nothing here, because this is all + handled in the AGP/GART driver. */ + break; } drm_free(map, sizeof(*map), DRM_MEM_MAPS); } @@ -309,6 +343,16 @@ int tdfx_init(void) drm_mem_init(); drm_proc_init(dev); +#ifdef DRM_AGP + dev->agp = drm_agp_init(); +#endif + if((retcode = drm_ctxbitmap_init(dev))) { + DRM_ERROR("Cannot allocate memory for context bitmap.\n"); + drm_proc_cleanup(); + misc_deregister(&tdfx_misc); + tdfx_takedown(dev); + return retcode; + } DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", TDFX_NAME, @@ -335,6 +379,7 @@ void tdfx_cleanup(void) } else { DRM_INFO("Module unloaded\n"); } + drm_ctxbitmap_cleanup(dev); tdfx_takedown(dev); } diff --git a/linux/tdfx_drv.h b/linux/tdfx_drv.h index 3866010a..879e6d37 100644 --- a/linux/tdfx_drv.h +++ b/linux/tdfx_drv.h @@ -1,8 +1,8 @@ /* tdfx_drv.h -- Private header for tdfx driver -*- linux-c -*- * Created: Thu Oct 7 10:40:04 1999 by faith@precisioninsight.com - * Revised: Sat Oct 9 23:38:19 1999 by faith@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,7 +24,9 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/tdfx_drv.h,v 1.2 2000/02/23 04:47:31 martin Exp $ + * Authors: + * Rickard E. (Rik) Faith <faith@precisioninsight.com> + * Daryll Strauss <daryll@precisioninsight.com> * */ @@ -1,8 +1,8 @@ /* vm.c -- Memory mapping for DRM -*- linux-c -*- * Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com - * Revised: Mon Feb 14 00:16:45 2000 by kevin@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,7 +24,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/vm.c,v 1.5 2000/02/23 04:47:31 martin Exp $ + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> * */ @@ -246,13 +247,26 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) /* Check for valid size. */ if (map->size != vma->vm_end - vma->vm_start) return -EINVAL; + if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) { + vma->vm_flags &= VM_MAYWRITE; +#if defined(__i386__) + pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; +#else + /* Ye gads this is ugly. With more thought + we could move this up higher and use + `protection_map' instead. */ + vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect( + __pte(pgprot_val(vma->vm_page_prot))))); +#endif + } switch (map->type) { case _DRM_FRAME_BUFFER: case _DRM_REGISTERS: + case _DRM_AGP: if (VM_OFFSET(vma) >= __pa(high_memory)) { #if defined(__i386__) - if (boot_cpu_data.x86 > 3) { + if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) { pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; } @@ -264,6 +278,10 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EAGAIN; + DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," + " offset = 0x%lx\n", + map->type, + vma->vm_start, vma->vm_end, VM_OFFSET(vma)); vma->vm_ops = &drm_vm_ops; break; case _DRM_SHM: @@ -276,19 +294,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) return -EINVAL; /* This should never happen. */ } vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */ - if (map->flags & _DRM_READ_ONLY) { -#if defined(__i386__) - pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; -#else - /* Ye gads this is ugly. With more thought - we could move this up higher and use - `protection_map' instead. */ - vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect( - __pte(pgprot_val(vma->vm_page_prot))))); -#endif - } - #if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */ /* In Linux 2.2.3 and above, this is handled in do_mmap() in mm/mmap.c. */ diff --git a/shared-core/drm.h b/shared-core/drm.h index 7b8e8826..4affcc41 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -1,8 +1,8 @@ /* drm.h -- Header for Direct Rendering Manager -*- linux-c -*- * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com - * Revised: Mon Feb 14 00:15:23 2000 by kevin@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,7 +24,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drm.h,v 1.5 2000/02/23 04:47:26 martin Exp $ + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> * * Acknowledgements: * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic cmpxchg. @@ -61,6 +62,20 @@ typedef unsigned int drm_context_t; typedef unsigned int drm_drawable_t; typedef unsigned int drm_magic_t; +/* Warning: If you change this structure, make sure you change + * XF86DRIClipRectRec in the server as well */ + +typedef struct drm_clip_rect { + unsigned short x1; + unsigned short y1; + unsigned short x2; + unsigned short y2; +} drm_clip_rect_t; + +/* Seperate include files for the i810/mga/r128 specific structures */ +#include "mga_drm.h" +#include "i810_drm.h" +#include "r128_drm.h" typedef struct drm_version { int version_major; /* Major version */ @@ -101,7 +116,8 @@ typedef struct drm_control { typedef enum drm_map_type { _DRM_FRAME_BUFFER = 0, /* WC (no caching), no core dump */ _DRM_REGISTERS = 1, /* no caching, no core dump */ - _DRM_SHM = 2 /* shared, cached */ + _DRM_SHM = 2, /* shared, cached */ + _DRM_AGP = 3 /* AGP/GART */ } drm_map_type_t; typedef enum drm_map_flags { @@ -165,8 +181,11 @@ typedef struct drm_buf_desc { int low_mark; /* Low water mark */ int high_mark; /* High water mark */ enum { - DRM_PAGE_ALIGN = 0x01 /* Align on page boundaries for DMA */ + _DRM_PAGE_ALIGN = 0x01, /* Align on page boundaries for DMA */ + _DRM_AGP_BUFFER = 0x02 /* Buffer is in agp space */ } flags; + unsigned long agp_start; /* Start address of where the agp buffers + * are in the agp aperture */ } drm_buf_desc_t; typedef struct drm_buf_info { @@ -237,6 +256,38 @@ typedef struct drm_irq_busid { int funcnum; } drm_irq_busid_t; +typedef struct drm_agp_mode { + unsigned long mode; +} drm_agp_mode_t; + + /* For drm_agp_alloc -- allocated a buffer */ +typedef struct drm_agp_buffer { + unsigned long size; /* In bytes -- will round to page boundary */ + unsigned long handle; /* Used for BIND/UNBIND ioctls */ + unsigned long type; /* Type of memory to allocate */ + unsigned long physical; /* Physical used by i810 */ +} drm_agp_buffer_t; + + /* For drm_agp_bind */ +typedef struct drm_agp_binding { + unsigned long handle; /* From drm_agp_buffer */ + unsigned long offset; /* In bytes -- will round to page boundary */ +} drm_agp_binding_t; + +typedef struct drm_agp_info { + int agp_version_major; + int agp_version_minor; + unsigned long mode; + unsigned long aperture_base; /* physical address */ + unsigned long aperture_size; /* bytes */ + unsigned long memory_allowed; /* bytes */ + unsigned long memory_used; + + /* PCI information */ + unsigned short id_vendor; + unsigned short id_device; +} drm_agp_info_t; + #define DRM_IOCTL_BASE 'd' #define DRM_IOCTL_NR(n) _IOC_NR(n) #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) @@ -247,7 +298,7 @@ typedef struct drm_irq_busid { #define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t) #define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t) -#define DRM_IOCTL_GET_MAGIC DRM_IOW( 0x02, drm_auth_t) +#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t) #define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t) #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t) @@ -276,4 +327,39 @@ typedef struct drm_irq_busid { #define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t) #define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t) +#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) +#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) +#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t) +#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t) +#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t) +#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t) +#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t) +#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t) + +/* Mga specific ioctls */ +#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t) +#define DRM_IOCTL_MGA_SWAP DRM_IOW( 0x41, drm_mga_swap_t) +#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x42, drm_mga_clear_t) +#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x43, drm_mga_iload_t) +#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x44, drm_mga_vertex_t) +#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x45, drm_lock_t ) +#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t) + +/* I810 specific ioctls */ +#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t) +#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t) +#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t) +#define DRM_IOCTL_I810_FLUSH DRM_IO ( 0x43) +#define DRM_IOCTL_I810_GETAGE DRM_IO ( 0x44) +#define DRM_IOCTL_I810_GETBUF DRM_IOWR(0x45, drm_i810_dma_t) +#define DRM_IOCTL_I810_SWAP DRM_IO ( 0x46) + +/* Rage 128 specific ioctls */ +#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t) +#define DRM_IOCTL_R128_RESET DRM_IO( 0x41) +#define DRM_IOCTL_R128_FLUSH DRM_IO( 0x42) +#define DRM_IOCTL_R128_IDLE DRM_IO( 0x43) +#define DRM_IOCTL_R128_PACKET DRM_IOW( 0x44, drm_r128_packet_t) +#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x45, drm_r128_vertex_t) + #endif diff --git a/shared/drm.h b/shared/drm.h index 7b8e8826..4affcc41 100644 --- a/shared/drm.h +++ b/shared/drm.h @@ -1,8 +1,8 @@ /* drm.h -- Header for Direct Rendering Manager -*- linux-c -*- * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com - * Revised: Mon Feb 14 00:15:23 2000 by kevin@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,7 +24,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drm.h,v 1.5 2000/02/23 04:47:26 martin Exp $ + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> * * Acknowledgements: * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic cmpxchg. @@ -61,6 +62,20 @@ typedef unsigned int drm_context_t; typedef unsigned int drm_drawable_t; typedef unsigned int drm_magic_t; +/* Warning: If you change this structure, make sure you change + * XF86DRIClipRectRec in the server as well */ + +typedef struct drm_clip_rect { + unsigned short x1; + unsigned short y1; + unsigned short x2; + unsigned short y2; +} drm_clip_rect_t; + +/* Seperate include files for the i810/mga/r128 specific structures */ +#include "mga_drm.h" +#include "i810_drm.h" +#include "r128_drm.h" typedef struct drm_version { int version_major; /* Major version */ @@ -101,7 +116,8 @@ typedef struct drm_control { typedef enum drm_map_type { _DRM_FRAME_BUFFER = 0, /* WC (no caching), no core dump */ _DRM_REGISTERS = 1, /* no caching, no core dump */ - _DRM_SHM = 2 /* shared, cached */ + _DRM_SHM = 2, /* shared, cached */ + _DRM_AGP = 3 /* AGP/GART */ } drm_map_type_t; typedef enum drm_map_flags { @@ -165,8 +181,11 @@ typedef struct drm_buf_desc { int low_mark; /* Low water mark */ int high_mark; /* High water mark */ enum { - DRM_PAGE_ALIGN = 0x01 /* Align on page boundaries for DMA */ + _DRM_PAGE_ALIGN = 0x01, /* Align on page boundaries for DMA */ + _DRM_AGP_BUFFER = 0x02 /* Buffer is in agp space */ } flags; + unsigned long agp_start; /* Start address of where the agp buffers + * are in the agp aperture */ } drm_buf_desc_t; typedef struct drm_buf_info { @@ -237,6 +256,38 @@ typedef struct drm_irq_busid { int funcnum; } drm_irq_busid_t; +typedef struct drm_agp_mode { + unsigned long mode; +} drm_agp_mode_t; + + /* For drm_agp_alloc -- allocated a buffer */ +typedef struct drm_agp_buffer { + unsigned long size; /* In bytes -- will round to page boundary */ + unsigned long handle; /* Used for BIND/UNBIND ioctls */ + unsigned long type; /* Type of memory to allocate */ + unsigned long physical; /* Physical used by i810 */ +} drm_agp_buffer_t; + + /* For drm_agp_bind */ +typedef struct drm_agp_binding { + unsigned long handle; /* From drm_agp_buffer */ + unsigned long offset; /* In bytes -- will round to page boundary */ +} drm_agp_binding_t; + +typedef struct drm_agp_info { + int agp_version_major; + int agp_version_minor; + unsigned long mode; + unsigned long aperture_base; /* physical address */ + unsigned long aperture_size; /* bytes */ + unsigned long memory_allowed; /* bytes */ + unsigned long memory_used; + + /* PCI information */ + unsigned short id_vendor; + unsigned short id_device; +} drm_agp_info_t; + #define DRM_IOCTL_BASE 'd' #define DRM_IOCTL_NR(n) _IOC_NR(n) #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) @@ -247,7 +298,7 @@ typedef struct drm_irq_busid { #define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t) #define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t) -#define DRM_IOCTL_GET_MAGIC DRM_IOW( 0x02, drm_auth_t) +#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t) #define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t) #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t) @@ -276,4 +327,39 @@ typedef struct drm_irq_busid { #define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t) #define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t) +#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) +#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) +#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t) +#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t) +#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t) +#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t) +#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t) +#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t) + +/* Mga specific ioctls */ +#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t) +#define DRM_IOCTL_MGA_SWAP DRM_IOW( 0x41, drm_mga_swap_t) +#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x42, drm_mga_clear_t) +#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x43, drm_mga_iload_t) +#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x44, drm_mga_vertex_t) +#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x45, drm_lock_t ) +#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t) + +/* I810 specific ioctls */ +#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t) +#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t) +#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t) +#define DRM_IOCTL_I810_FLUSH DRM_IO ( 0x43) +#define DRM_IOCTL_I810_GETAGE DRM_IO ( 0x44) +#define DRM_IOCTL_I810_GETBUF DRM_IOWR(0x45, drm_i810_dma_t) +#define DRM_IOCTL_I810_SWAP DRM_IO ( 0x46) + +/* Rage 128 specific ioctls */ +#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t) +#define DRM_IOCTL_R128_RESET DRM_IO( 0x41) +#define DRM_IOCTL_R128_FLUSH DRM_IO( 0x42) +#define DRM_IOCTL_R128_IDLE DRM_IO( 0x43) +#define DRM_IOCTL_R128_PACKET DRM_IOW( 0x44, drm_r128_packet_t) +#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x45, drm_r128_vertex_t) + #endif diff --git a/tests/drmstat.c b/tests/drmstat.c index 8fe6f707..8c691c49 100644 --- a/tests/drmstat.c +++ b/tests/drmstat.c @@ -1,8 +1,8 @@ /* drmstat.c -- DRM device status and testing program * Created: Tue Jan 5 08:19:24 1999 by faith@precisioninsight.com - * Revised: Sun Feb 13 23:35:00 2000 by kevin@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,7 +24,7 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmstat.c,v 1.6 2000/02/23 04:47:27 martin Exp $ + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> * */ @@ -187,7 +187,7 @@ int main(int argc, char **argv) case 'b': count = strtoul(optarg, &pt, 0); size = strtoul(pt+1, NULL, 0); - if ((r = drmAddBufs(fd, count, size, 0)) < 0) { + if ((r = drmAddBufs(fd, count, size, 0, 65536)) < 0) { drmError(r, argv[0]); return 1; } |