summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--linux-core/mach64_drv.c664
-rw-r--r--linux/mach64_bufs.c491
-rw-r--r--linux/mach64_context.c211
-rw-r--r--linux/mach64_drv.c664
4 files changed, 2030 insertions, 0 deletions
diff --git a/linux-core/mach64_drv.c b/linux-core/mach64_drv.c
new file mode 100644
index 00000000..14b7fb19
--- /dev/null
+++ b/linux-core/mach64_drv.c
@@ -0,0 +1,664 @@
+/* mach64_drv.c -- mach64 driver -*- linux-c -*-
+ * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#include <linux/config.h>
+#include "drmP.h"
+#include "mach64_drv.h"
+
+#define MACH64_NAME "mach64"
+#define MACH64_DESC "ATI Mach64 (Rage Pro)"
+#define MACH64_DATE "20000908"
+#define MACH64_MAJOR 1
+#define MACH64_MINOR 0
+#define MACH64_PATCHLEVEL 0
+
+static drm_device_t mach64_device;
+drm_ctx_t mach64_res_ctx;
+
+static struct file_operations mach64_fops = {
+#if LINUX_VERSION_CODE >= 0x020400
+ /* This started being used during 2.4.0-test */
+ owner: THIS_MODULE,
+#endif
+ open: mach64_open,
+ flush: drm_flush,
+ release: mach64_release,
+ ioctl: mach64_ioctl,
+ mmap: drm_mmap,
+ read: drm_read,
+ fasync: drm_fasync,
+ poll: drm_poll,
+};
+
+static struct miscdevice mach64_misc = {
+ minor: MISC_DYNAMIC_MINOR,
+ name: MACH64_NAME,
+ fops: &mach64_fops,
+};
+
+static drm_ioctl_desc_t mach64_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { mach64_version, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { mach64_control, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { mach64_addbufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { mach64_markbufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { mach64_infobufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { mach64_freebufs, 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { mach64_addctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { mach64_rmctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { mach64_modctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { mach64_getctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { mach64_switchctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { mach64_newctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { mach64_resctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { mach64_lock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { mach64_unlock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_MACH64_INIT)] = { mach64_dma_init, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MACH64_VERTEX)] = { mach64_dma_vertex, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MACH64_CLEAR)] = { mach64_clear_bufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MACH64_FLUSH)] = { mach64_flush_ioctl, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MACH64_GETAGE)] = { mach64_getage, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MACH64_GETBUF)] = { mach64_getbuf, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MACH64_SWAP)] = { mach64_swap_bufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MACH64_COPY)] = { mach64_copybuf, 1, 0 },
+};
+
+#define MACH64_IOCTL_COUNT DRM_ARRAY_SIZE( mach64_ioctls )
+
+#ifdef MODULE
+static char *mach64 = NULL;
+#endif
+
+MODULE_AUTHOR( "Gareth Hughes <gareth@valinux.com>" );
+MODULE_DESCRIPTION( "ATI Mach64 (Rage Pro)" );
+MODULE_PARM( mach64, "s" );
+
+#ifndef MODULE
+/* mach64_options is called by the kernel to parse command-line options
+ * passed via the boot-loader (e.g., LILO). It calls the insmod option
+ * routine, drm_parse_drm.
+ */
+
+static int __init mach64_options( char *str )
+{
+ drm_parse_options( str );
+ return 1;
+}
+
+__setup( "mach64=", mach64_options );
+#endif
+
+static int mach64_setup( drm_device_t *dev )
+{
+ int i;
+
+ atomic_set( &dev->ioctl_count, 0 );
+ atomic_set( &dev->vma_count, 0 );
+ dev->buf_use = 0;
+ atomic_set( &dev->buf_alloc, 0 );
+
+ drm_dma_setup( dev );
+
+ atomic_set( &dev->total_open, 0 );
+ atomic_set( &dev->total_close, 0 );
+ atomic_set( &dev->total_ioctl, 0 );
+ atomic_set( &dev->total_irq, 0 );
+ atomic_set( &dev->total_ctx, 0 );
+ atomic_set( &dev->total_locks, 0 );
+ atomic_set( &dev->total_unlocks, 0 );
+ atomic_set( &dev->total_contends, 0 );
+ atomic_set( &dev->total_sleeps, 0 );
+
+ for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
+ dev->magiclist[i].head = NULL;
+ dev->magiclist[i].tail = NULL;
+ }
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ dev->vmalist = NULL;
+ dev->lock.hw_lock = NULL;
+ init_waitqueue_head( &dev->lock.lock_queue );
+ dev->queue_count = 0;
+ dev->queue_reserved = 0;
+ dev->queue_slots = 0;
+ dev->queuelist = NULL;
+ dev->irq = 0;
+ dev->context_flag = 0;
+ dev->interrupt_flag = 0;
+ dev->dma_flag = 0;
+ dev->last_context = 0;
+ dev->last_switch = 0;
+ dev->last_checked = 0;
+ init_timer( &dev->timer );
+ init_waitqueue_head( &dev->context_wait );
+#if DRM_DMA_HISTO
+ memset( &dev->histo, 0, sizeof(dev->histo) );
+#endif
+ dev->ctx_start = 0;
+ dev->lck_start = 0;
+
+ dev->buf_rp = dev->buf;
+ dev->buf_wp = dev->buf;
+ dev->buf_end = dev->buf + DRM_BSZ;
+ dev->buf_async = NULL;
+ init_waitqueue_head( &dev->buf_readers );
+ init_waitqueue_head( &dev->buf_writers );
+
+ DRM_DEBUG( "\n" );
+
+ /* The kernel's context could be created here, but is now created
+ * in drm_dma_enqueue. This is more resource-efficient for
+ * hardware that does not do DMA, but may mean that
+ * drm_select_queue fails between the time the interrupt is
+ * initialized and the time the queues are initialized.
+ */
+
+ return 0;
+}
+
+
+static int mach64_takedown( drm_device_t *dev )
+{
+ drm_magic_entry_t *pt, *next;
+ drm_map_t *map;
+ drm_vma_entry_t *vma, *vma_next;
+ int i;
+
+ DRM_DEBUG( "\n" );
+
+ if ( dev->irq )
+ mach64_irq_uninstall( dev );
+
+ down( &dev->struct_sem );
+ del_timer( &dev->timer );
+
+ if ( dev->devname ) {
+ drm_free( dev->devname, strlen( dev->devname ) + 1,
+ DRM_MEM_DRIVER );
+ dev->devname = NULL;
+ }
+
+ if ( dev->unique ) {
+ drm_free( dev->unique, strlen( dev->unique ) + 1,
+ DRM_MEM_DRIVER );
+ dev->unique = NULL;
+ dev->unique_len = 0;
+ }
+
+ /* Clear pid list */
+ for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
+ for ( pt = dev->magiclist[i].head ; pt ; pt = next ) {
+ next = pt->next;
+ drm_free( pt, sizeof(*pt), DRM_MEM_MAGIC );
+ }
+ dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
+ }
+
+ /* Clear AGP information */
+ if ( dev->agp ) {
+ drm_agp_mem_t *entry;
+ drm_agp_mem_t *nexte;
+
+ /* Remove AGP resources, but leave dev->agp intact until
+ * mach64_cleanup is called.
+ */
+ for ( entry = dev->agp->memory ; entry ; entry = nexte ) {
+ nexte = entry->next;
+ if ( entry->bound )
+ drm_unbind_agp( entry->memory );
+ drm_free_agp( entry->memory, entry->pages );
+ drm_free( entry, sizeof(*entry), DRM_MEM_AGPLISTS );
+ }
+ dev->agp->memory = NULL;
+
+ if ( dev->agp->acquired && drm_agp.release )
+ (*drm_agp.release)();
+
+ dev->agp->acquired = 0;
+ dev->agp->enabled = 0;
+ }
+
+ /* Clear vma list (only built for debugging) */
+ if ( dev->vmalist ) {
+ for ( vma = dev->vmalist ; vma ; vma = vma_next ) {
+ vma_next = vma->next;
+ drm_free( vma, sizeof(*vma), DRM_MEM_VMAS );
+ }
+ dev->vmalist = NULL;
+ }
+
+ /* Clear map area and mtrr information */
+ if ( dev->maplist ) {
+ for ( i = 0 ; i < dev->map_count ; i++ ) {
+ map = dev->maplist[i];
+ switch ( map->type ) {
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+#ifdef CONFIG_MTRR
+ if ( map->mtrr >= 0 ) {
+ int retcode;
+ retcode = mtrr_del( map->mtrr,
+ map->offset,
+ map->size );
+ DRM_DEBUG( "mtrr_del=%d\n", retcode );
+ }
+#endif
+ drm_ioremapfree( map->handle, map->size );
+ break;
+ case _DRM_SHM:
+ drm_free_pages( (unsigned long)map->handle,
+ drm_order( map->size )
+ - PAGE_SHIFT,
+ DRM_MEM_SAREA );
+ break;
+ case _DRM_AGP:
+ break;
+ }
+ drm_free( map, sizeof(*map), DRM_MEM_MAPS );
+ }
+ drm_free( dev->maplist,
+ dev->map_count * sizeof(*dev->maplist),
+ DRM_MEM_MAPS );
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ }
+
+ if ( dev->queuelist ) {
+ for ( i = 0 ; i < dev->queue_count ; i++ ) {
+ drm_waitlist_destroy( &dev->queuelist[i]->waitlist );
+ if ( dev->queuelist[i] ) {
+ drm_free( dev->queuelist[i],
+ sizeof(*dev->queuelist[0]),
+ DRM_MEM_QUEUES );
+ dev->queuelist[i] = NULL;
+ }
+ }
+ drm_free( dev->queuelist,
+ dev->queue_slots * sizeof(*dev->queuelist),
+ DRM_MEM_QUEUES );
+ dev->queuelist = NULL;
+ }
+
+ drm_dma_takedown( dev );
+
+ dev->queue_count = 0;
+ if ( dev->lock.hw_lock ) {
+ dev->lock.hw_lock = NULL; /* SHM removed */
+ dev->lock.pid = 0;
+ wake_up_interruptible( &dev->lock.lock_queue );
+ }
+ up( &dev->struct_sem );
+
+ return 0;
+}
+
+/* mach64_init is called via init_module at module load time, or via
+ * linux/init/main.c (this is not currently supported). */
+
+static int mach64_init( void )
+{
+ int retcode;
+ drm_device_t *dev = &mach64_device;
+
+ DRM_DEBUG( "\n" );
+
+ memset( (void *)dev, 0, sizeof(*dev) );
+ dev->count_lock = SPIN_LOCK_UNLOCKED;
+ sema_init( &dev->struct_sem, 1 );
+
+#ifdef MODULE
+ drm_parse_options( mach64 );
+#endif
+ DRM_DEBUG( "doing misc_register\n" );
+ retcode = misc_register( &mach64_misc );
+ if ( retcode ) {
+ DRM_ERROR( "Cannot register \"%s\"\n", MACH64_NAME );
+ return retcode;
+ }
+ dev->device = MKDEV( MISC_MAJOR, mach64_misc.minor );
+ dev->name = MACH64_NAME;
+
+ DRM_DEBUG( "doing mem init\n" );
+ drm_mem_init();
+ DRM_DEBUG( "doing proc init\n" );
+ drm_proc_init( dev );
+ DRM_DEBUG( "doing agp init\n" );
+ dev->agp = drm_agp_init();
+ if ( dev->agp == NULL ) {
+ DRM_INFO( "The mach64 drm module requires the agpgart module"
+ " to function correctly\nPlease load the agpgart"
+ " module before you load the mach64 module\n" );
+ drm_proc_cleanup();
+ misc_deregister( &mach64_misc );
+ mach64_takedown( dev );
+ return -ENOMEM;
+ }
+ DRM_DEBUG( "doing ctxbitmap init\n" );
+ retcode = drm_ctxbitmap_init( dev );
+ if ( retcode ) {
+ DRM_ERROR( "Cannot allocate memory for context bitmap.\n" );
+ drm_proc_cleanup();
+ misc_deregister( &mach64_misc );
+ mach64_takedown( dev );
+ return retcode;
+ }
+
+ DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d\n",
+ MACH64_NAME,
+ MACH64_MAJOR,
+ MACH64_MINOR,
+ MACH64_PATCHLEVEL,
+ MACH64_DATE,
+ mach64_misc.minor );
+
+ return 0;
+}
+
+/* mach64_cleanup is called via cleanup_module at module unload time. */
+
+static void mach64_cleanup( void )
+{
+ drm_device_t *dev = &mach64_device;
+
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+ drm_proc_cleanup();
+ if ( misc_deregister( &mach64_misc ) ) {
+ DRM_ERROR( "Cannot unload module\n" );
+ } else {
+ DRM_INFO( "Module unloaded\n" );
+ }
+ drm_ctxbitmap_cleanup( dev );
+ mach64_takedown( dev );
+ if ( dev->agp ) {
+ drm_agp_uninit();
+ drm_free( dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS );
+ dev->agp = NULL;
+ }
+}
+
+
+module_init( mach64_init );
+module_exit( mach64_cleanup );
+
+
+int mach64_version( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_version_t version;
+ int len;
+
+ if ( copy_from_user( &version, (drm_version_t *)arg,
+ sizeof(version) ) )
+ return -EFAULT;
+
+#define DRM_COPY( name, value ) \
+ len = strlen( value ); \
+ if ( len > name##_len ) len = name##_len; \
+ name##_len = strlen( value ); \
+ if ( len && name ) { \
+ if ( copy_to_user( name, value, len ) ) \
+ return -EFAULT; \
+ }
+
+ version.version_major = MACH64_MAJOR;
+ version.version_minor = MACH64_MINOR;
+ version.version_patchlevel = MACH64_PATCHLEVEL;
+
+ DRM_COPY( version.name, MACH64_NAME );
+ DRM_COPY( version.date, MACH64_DATE );
+ DRM_COPY( version.desc, MACH64_DESC );
+
+ if ( copy_to_user( (drm_version_t *)arg, &version, sizeof(version) ) )
+ return -EFAULT;
+ return 0;
+}
+
+int mach64_open( struct inode *inode, struct file *filp )
+{
+ drm_device_t *dev = &mach64_device;
+ int retcode = 0;
+
+ DRM_DEBUG( "open_count = %d\n", dev->open_count );
+ retcode = drm_open_helper( inode, filp, dev );
+ if ( !retcode ) {
+#if LINUX_VERSION_CODE < 0x020333
+ MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
+#endif
+ atomic_inc( &dev->total_open );
+ spin_lock( &dev->count_lock );
+ if ( !dev->open_count++ ) {
+ spin_unlock( &dev->count_lock );
+ return mach64_setup( dev );
+ }
+ spin_unlock( &dev->count_lock );
+ }
+ return retcode;
+}
+
+int mach64_release( struct inode *inode, struct file *filp )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev;
+ int retcode = 0;
+
+ lock_kernel();
+ dev = priv->dev;
+ DRM_DEBUG( "pid = %d, device = 0x%x, open_count = %d\n",
+ current->pid, dev->device, dev->open_count );
+
+ if ( dev->lock.hw_lock && _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock )
+ && dev->lock.pid == current->pid ) {
+ mach64_reclaim_buffers( dev, priv->pid );
+ DRM_ERROR( "Process %d dead, freeing lock for context %d\n",
+ current->pid,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
+ drm_lock_free( dev,
+ &dev->lock.hw_lock->lock,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
+
+ /* FIXME: may require heavy-handed reset of hardware at this
+ * point, possibly processed via a callback to the X server.
+ */
+ } else if ( dev->lock.hw_lock ) {
+ /* The lock is required to reclaim buffers */
+ DECLARE_WAITQUEUE( entry, current );
+ add_wait_queue( &dev->lock.lock_queue, &entry );
+ while ( 1 ) {
+ if ( !dev->lock.hw_lock ) {
+ /* Device has been unregistered */
+ retcode = -EINTR;
+ break;
+ }
+ if ( drm_lock_take( &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT ) ) {
+ dev->lock.pid = priv->pid;
+ dev->lock.lock_time = jiffies;
+ atomic_inc( &dev->total_locks );
+ break; /* Got lock */
+ }
+ /* Contention */
+ atomic_inc( &dev->total_sleeps );
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if ( signal_pending( current ) ) {
+ retcode = -ERESTARTSYS;
+ break;
+ }
+ }
+ current->state = TASK_RUNNING;
+ remove_wait_queue( &dev->lock.lock_queue, &entry );
+ if ( !retcode ) {
+ mach64_reclaim_buffers( dev, priv->pid );
+ drm_lock_free( dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT );
+ }
+ }
+ drm_fasync( -1, filp, 0 );
+
+ down( &dev->struct_sem );
+ if ( priv->prev ) {
+ priv->prev->next = priv->next;
+ } else {
+ dev->file_first = priv->next;
+ }
+ if ( priv->next ) {
+ priv->next->prev = priv->prev;
+ } else {
+ dev->file_last = priv->prev;
+ }
+ up( &dev->struct_sem );
+
+ drm_free( priv, sizeof(*priv), DRM_MEM_FILES );
+#if LINUX_VERSION_CODE < 0x020333
+ MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
+#endif
+ atomic_inc( &dev->total_close );
+ spin_lock( &dev->count_lock );
+ if ( !--dev->open_count ) {
+ if ( atomic_read( &dev->ioctl_count ) || dev->blocked ) {
+ DRM_ERROR( "Device busy: %d %d\n",
+ atomic_read( &dev->ioctl_count ),
+ dev->blocked );
+ spin_unlock( &dev->count_lock );
+ unlock_kernel();
+ return -EBUSY;
+ }
+ spin_unlock( &dev->count_lock );
+ unlock_kernel();
+ return mach64_takedown( dev );
+ }
+ spin_unlock( &dev->count_lock );
+ unlock_kernel();
+ return retcode;
+}
+
+/* drm_ioctl is called whenever a process performs an ioctl on /dev/drm. */
+
+int mach64_ioctl( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ int nr = DRM_IOCTL_NR( cmd );
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ int retcode = 0;
+ drm_ioctl_desc_t *ioctl;
+ drm_ioctl_t *func;
+
+ atomic_inc( &dev->ioctl_count );
+ atomic_inc( &dev->total_ioctl );
+ ++priv->ioctl_count;
+
+ DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%x, auth=%d\n",
+ current->pid, cmd, nr, dev->device, priv->authenticated );
+
+ if ( nr >= MACH64_IOCTL_COUNT ) {
+ retcode = -EINVAL;
+ } else {
+ ioctl = &mach64_ioctls[nr];
+ func = ioctl->func;
+
+ if ( !func ) {
+ DRM_DEBUG( "no function\n" );
+ retcode = -EINVAL;
+ } else if ( ( ioctl->root_only &&
+ !capable( CAP_SYS_ADMIN ) ) ||
+ ( ioctl->auth_needed &&
+ !priv->authenticated ) ) {
+ retcode = -EACCES;
+ } else {
+ retcode = (func)( inode, filp, cmd, arg );
+ }
+ }
+
+ atomic_dec( &dev->ioctl_count );
+ return retcode;
+}
+
+int mach64_unlock( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_lock_t lock;
+
+ if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
+ return -EFAULT;
+
+ if ( lock.context == DRM_KERNEL_CONTEXT ) {
+ DRM_ERROR( "Process %d using kernel context %d\n",
+ current->pid, lock.context );
+ return -EINVAL;
+ }
+
+ DRM_DEBUG( "%d frees lock (%d holds)\n",
+ lock.context,
+ _DRM_LOCKING_CONTEXT( dev->lock.hw_lock->lock ) );
+ atomic_inc( &dev->total_unlocks );
+ if ( _DRM_LOCK_IS_CONT( dev->lock.hw_lock->lock ) )
+ atomic_inc( &dev->total_contends );
+ drm_lock_transfer( dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT );
+ if ( !dev->context_flag ) {
+ if ( drm_lock_free( dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT ) ) {
+ DRM_ERROR( "\n" );
+ }
+ }
+#if DRM_DMA_HISTOGRAM
+ atomic_inc( &dev->histo.lhld[drm_histogram_slot(get_cycles()
+ - dev->lck_start)] );
+#endif
+
+ return 0;
+}
diff --git a/linux/mach64_bufs.c b/linux/mach64_bufs.c
new file mode 100644
index 00000000..d907481a
--- /dev/null
+++ b/linux/mach64_bufs.c
@@ -0,0 +1,491 @@
+/* mach64_bufs.c -- IOCTLs to manage buffers -*- linux-c -*-
+ * Created: Thu Jan 6 01:47:26 2000 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include "mach64_drv.h"
+#include "linux/un.h"
+
+int mach64_addbufs_agp( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ drm_buf_entry_t *entry;
+ drm_buf_t *buf;
+ unsigned long offset;
+ unsigned long agp_offset;
+ int count;
+ int order;
+ int size;
+ int alignment;
+ int page_order;
+ int total;
+ int byte_count;
+ int i;
+
+ if ( !dma )
+ return -EINVAL;
+
+ if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
+ sizeof(request) ) )
+ return -EFAULT;
+
+ count = request.count;
+ order = drm_order( request.size );
+ size = 1 << order;
+ agp_offset = request.agp_start;
+ alignment = ( request.flags & _DRM_PAGE_ALIGN ) ?
+ PAGE_ALIGN(size) : size;
+ page_order = ( order - PAGE_SHIFT > 0 ) ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
+ byte_count = 0;
+
+ if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER )
+ return -EINVAL;
+ if ( dev->queue_count )
+ return -EBUSY; /* Not while in use */
+
+ spin_lock( &dev->count_lock );
+ if ( dev->buf_use ) {
+ spin_unlock( &dev->count_lock );
+ return -EBUSY;
+ }
+ atomic_inc( &dev->buf_alloc );
+ spin_unlock( &dev->count_lock );
+
+ down( &dev->struct_sem );
+ entry = &dma->bufs[order];
+ if ( entry->buf_count ) {
+ up( &dev->struct_sem );
+ atomic_dec( &dev->buf_alloc );
+ return -ENOMEM; /* May only call once for each order */
+ }
+
+ entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS );
+ if ( !entry->buflist ) {
+ up( &dev->struct_sem );
+ atomic_dec( &dev->buf_alloc );
+ return -ENOMEM;
+ }
+ memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
+
+ entry->buf_size = size;
+ entry->page_order = page_order;
+ offset = 0;
+
+ while ( entry->buf_count < count ) {
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
+ buf->offset = offset;
+ buf->bus_address = dev->agp->base + agp_offset + offset;
+ buf->address = (void *)(agp_offset + offset + dev->agp->base);
+ buf->next = NULL;
+ buf->waiting = 0;
+ buf->pending = 0;
+ init_waitqueue_head( &buf->dma_wait );
+ buf->pid = 0;
+
+ buf->dev_private = drm_alloc( sizeof(drm_mach64_buf_priv_t),
+ DRM_MEM_BUFS );
+ buf->dev_priv_size = sizeof(drm_mach64_buf_priv_t);
+ memset( buf->dev_private, 0, sizeof(drm_mach64_buf_priv_t) );
+
+#if DRM_DMA_HISTOGRAM
+ buf->time_queued = 0;
+ buf->time_dispatched = 0;
+ buf->time_completed = 0;
+ buf->time_freed = 0;
+#endif
+ offset = offset + alignment;
+ entry->buf_count++;
+ byte_count += PAGE_SIZE << page_order;
+
+ DRM_DEBUG( "buffer %d @ %p\n",
+ entry->buf_count, buf->address );
+ }
+
+ dma->buflist = drm_realloc( dma->buflist,
+ dma->buf_count * sizeof(*dma->buflist),
+ (dma->buf_count + entry->buf_count)
+ * sizeof(*dma->buflist),
+ DRM_MEM_BUFS );
+ for ( i = 0 ; i < entry->buf_count ; i++ ) {
+ dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+ }
+
+ dma->buf_count += entry->buf_count;
+ dma->byte_count += byte_count;
+ drm_freelist_create( &entry->freelist, entry->buf_count );
+ for ( i = 0 ; i < entry->buf_count ; i++ ) {
+ drm_freelist_put( dev, &entry->freelist, &entry->buflist[i] );
+ }
+
+ up( &dev->struct_sem );
+
+ request.count = entry->buf_count;
+ request.size = size;
+
+ if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
+ return -EFAULT;
+
+ atomic_dec( &dev->buf_alloc );
+ dma->flags = _DRM_DMA_USE_AGP;
+ return 0;
+}
+
+int mach64_addbufs_pci(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ int count;
+ int order;
+ int size;
+ int total;
+ int page_order;
+ drm_buf_entry_t *entry;
+ unsigned long page;
+ drm_buf_t *buf;
+ int alignment;
+ unsigned long offset;
+ int i;
+ int byte_count;
+ int page_count;
+
+ if (!dma) return -EINVAL;
+
+ copy_from_user_ret(&request,
+ (drm_buf_desc_t *)arg,
+ sizeof(request),
+ -EFAULT);
+
+ count = request.count;
+ order = drm_order(request.size);
+ size = 1 << order;
+
+ DRM_DEBUG("count = %d, size = %d (%d), order = %d, queue_count = %d\n",
+ request.count, request.size, size, order, dev->queue_count);
+
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
+ if (dev->queue_count) return -EBUSY; /* Not while in use */
+
+ alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) :size;
+ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
+
+ spin_lock(&dev->count_lock);
+ if (dev->buf_use) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ atomic_inc(&dev->buf_alloc);
+ spin_unlock(&dev->count_lock);
+
+ down(&dev->struct_sem);
+ entry = &dma->bufs[order];
+ if (entry->buf_count) {
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM; /* May only call once for each order */
+ }
+
+ entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS);
+ if (!entry->buflist) {
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
+ }
+ memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+ entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
+ DRM_MEM_SEGS);
+ if (!entry->seglist) {
+ drm_free(entry->buflist,
+ count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS);
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
+ }
+ memset(entry->seglist, 0, count * sizeof(*entry->seglist));
+
+ dma->pagelist = drm_realloc(dma->pagelist,
+ dma->page_count * sizeof(*dma->pagelist),
+ (dma->page_count + (count << page_order))
+ * sizeof(*dma->pagelist),
+ DRM_MEM_PAGES);
+ DRM_DEBUG("pagelist: %d entries\n",
+ dma->page_count + (count << page_order));
+
+
+ entry->buf_size = size;
+ entry->page_order = page_order;
+ byte_count = 0;
+ page_count = 0;
+ while (entry->buf_count < count) {
+ if (!(page = drm_alloc_pages(page_order, DRM_MEM_DMA))) break;
+ entry->seglist[entry->seg_count++] = page;
+ for (i = 0; i < (1 << page_order); i++) {
+ DRM_DEBUG("page %d @ 0x%08lx\n",
+ dma->page_count + page_count,
+ page + PAGE_SIZE * i);
+ dma->pagelist[dma->page_count + page_count++]
+ = page + PAGE_SIZE * i;
+ }
+ for (offset = 0;
+ offset + size <= total && entry->buf_count < count;
+ offset += alignment, ++entry->buf_count) {
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
+ buf->offset = (dma->byte_count + byte_count + offset);
+ buf->address = (void *)(page + offset);
+ buf->next = NULL;
+ buf->waiting = 0;
+ buf->pending = 0;
+ init_waitqueue_head(&buf->dma_wait);
+ buf->pid = 0;
+#if DRM_DMA_HISTOGRAM
+ buf->time_queued = 0;
+ buf->time_dispatched = 0;
+ buf->time_completed = 0;
+ buf->time_freed = 0;
+#endif
+ DRM_DEBUG("buffer %d @ %p\n",
+ entry->buf_count, buf->address);
+ }
+ byte_count += PAGE_SIZE << page_order;
+ }
+
+ dma->buflist = drm_realloc(dma->buflist,
+ dma->buf_count * sizeof(*dma->buflist),
+ (dma->buf_count + entry->buf_count)
+ * sizeof(*dma->buflist),
+ DRM_MEM_BUFS);
+ for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
+ dma->buflist[i] = &entry->buflist[i - dma->buf_count];
+
+ dma->buf_count += entry->buf_count;
+ dma->seg_count += entry->seg_count;
+ dma->page_count += entry->seg_count << page_order;
+ dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
+
+ drm_freelist_create(&entry->freelist, entry->buf_count);
+ for (i = 0; i < entry->buf_count; i++) {
+ drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
+ }
+
+ up(&dev->struct_sem);
+
+ request.count = entry->buf_count;
+ request.size = size;
+
+ if (copy_to_user((drm_buf_desc_t *)arg, &request, sizeof(request)))
+ return -EFAULT;
+
+ atomic_dec(&dev->buf_alloc);
+ return 0;
+}
+
+int mach64_addbufs( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_buf_desc_t request;
+
+ if ( copy_from_user( &request,
+ (drm_buf_desc_t *)arg,
+ sizeof(request) ) )
+ return -EFAULT;
+
+ if ( request.flags & _DRM_AGP_BUFFER ) {
+ return mach64_addbufs_agp( inode, filp, cmd, arg );
+ } else {
+ return mach64_addbufs_pci( inode, filp, cmd, arg );
+ }
+}
+
+int mach64_infobufs( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_info_t request;
+ int i;
+ int count;
+
+ if ( !dma )
+ return -EINVAL;
+
+ spin_lock( &dev->count_lock );
+ if ( atomic_read( &dev->buf_alloc ) ) {
+ spin_unlock( &dev->count_lock );
+ return -EBUSY;
+ }
+ dev->buf_use++; /* Can't allocate more after this call */
+ spin_unlock( &dev->count_lock );
+
+ if ( copy_from_user( &request,
+ (drm_buf_info_t *)arg,
+ sizeof(request) ) )
+ return -EFAULT;
+
+ for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
+ if ( dma->bufs[i].buf_count ) count++;
+ }
+ DRM_DEBUG( "%s: count = %d\n", __FUNCTION__, count );
+
+ if ( request.count >= count ) {
+ for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
+ if ( dma->bufs[i].buf_count ) {
+ if ( copy_to_user( &request.list[count].count,
+ &dma->bufs[i].buf_count,
+ sizeof(dma->bufs[0].buf_count) ) )
+ return -EFAULT;
+ if ( copy_to_user( &request.list[count].size,
+ &dma->bufs[i].buf_size,
+ sizeof(dma->bufs[0].buf_size) ) )
+ return -EFAULT;
+ if ( copy_to_user( &request.list[count].low_mark,
+ &dma->bufs[i].freelist.low_mark,
+ sizeof(dma->bufs[0].freelist.low_mark) ) )
+ return -EFAULT;
+ if ( copy_to_user( &request.list[count].high_mark,
+ &dma->bufs[i].freelist.high_mark,
+ sizeof(dma->bufs[0].freelist.high_mark) ) )
+ return -EFAULT;
+ DRM_DEBUG( "%s: %d %d %d %d %d\n",
+ __FUNCTION__, i,
+ dma->bufs[i].buf_count,
+ dma->bufs[i].buf_size,
+ dma->bufs[i].freelist.low_mark,
+ dma->bufs[i].freelist.high_mark );
+ count++;
+ }
+ }
+ }
+ request.count = count;
+
+ if ( copy_to_user( (drm_buf_info_t *)arg, &request, sizeof(request) ) )
+ return -EFAULT;
+
+ return 0;
+}
+
+int mach64_markbufs( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ int order;
+ drm_buf_entry_t *entry;
+
+ if ( !dma )
+ return -EINVAL;
+
+ if ( copy_from_user( &request,
+ (drm_buf_desc_t *)arg,
+ sizeof(request) ) )
+ return -EFAULT;
+
+ DRM_DEBUG( "%s: %d, %d, %d\n",
+ __FUNCTION__, request.size,
+ request.low_mark, request.high_mark );
+ order = drm_order( request.size );
+ if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER )
+ return -EINVAL;
+ entry = &dma->bufs[order];
+
+ if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
+ return -EINVAL;
+ if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
+ return -EINVAL;
+
+ entry->freelist.low_mark = request.low_mark;
+ entry->freelist.high_mark = request.high_mark;
+
+ return 0;
+}
+
+int mach64_freebufs( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_free_t request;
+ int i;
+ int idx;
+ drm_buf_t *buf;
+
+ if ( !dma )
+ return -EINVAL;
+
+ if ( copy_from_user( &request,
+ (drm_buf_free_t *)arg,
+ sizeof(request) ) )
+ return -EFAULT;
+
+ DRM_DEBUG( "%s: count = %d\n", __FUNCTION__, request.count );
+ for ( i = 0 ; i < request.count ; i++ ) {
+ if ( copy_from_user( &idx,
+ &request.list[i],
+ sizeof(idx) ) )
+ return -EFAULT;
+ if ( idx < 0 || idx >= dma->buf_count ) {
+ DRM_ERROR( "Index %d (of %d max)\n",
+ idx, dma->buf_count - 1 );
+ return -EINVAL;
+ }
+ buf = dma->buflist[idx];
+ if ( buf->pid != current->pid ) {
+ DRM_ERROR( "Process %d freeing buffer owned by %d\n",
+ current->pid, buf->pid );
+ return -EINVAL;
+ }
+ drm_free_buffer( dev, buf );
+ }
+
+ return 0;
+}
diff --git a/linux/mach64_context.c b/linux/mach64_context.c
new file mode 100644
index 00000000..ec3580d2
--- /dev/null
+++ b/linux/mach64_context.c
@@ -0,0 +1,211 @@
+/* mach64_context.c -- IOCTLs for mach64 contexts -*- linux-c -*-
+ * Created: Mon Dec 13 09:51:35 1999 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright 2000 Gareth Hughes
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include "mach64_drv.h"
+
+static int mach64_alloc_queue(drm_device_t *dev)
+{
+ int temp = drm_ctxbitmap_next(dev);
+ DRM_DEBUG("mach64_alloc_queue: %d\n", temp);
+ return temp;
+}
+
+int mach64_context_switch(drm_device_t *dev, int old, int new)
+{
+ char buf[64];
+
+ atomic_inc(&dev->total_ctx);
+
+ if (test_and_set_bit(0, &dev->context_flag)) {
+ DRM_ERROR("Reentering -- FIXME\n");
+ return -EBUSY;
+ }
+
+#if DRM_DMA_HISTOGRAM
+ dev->ctx_start = get_cycles();
+#endif
+
+ DRM_DEBUG("Context switch from %d to %d\n", old, new);
+
+ if (new == dev->last_context) {
+ clear_bit(0, &dev->context_flag);
+ return 0;
+ }
+
+ if (drm_flags & DRM_FLAG_NOCTX) {
+ mach64_context_switch_complete(dev, new);
+ } else {
+ sprintf(buf, "C %d %d\n", old, new);
+ drm_write_string(dev, buf);
+ }
+
+ return 0;
+}
+
+int mach64_context_switch_complete(drm_device_t *dev, int new)
+{
+ dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
+ dev->last_switch = jiffies;
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("Lock isn't held after context switch\n");
+ }
+
+ /* If a context switch is ever initiated
+ when the kernel holds the lock, release
+ that lock here. */
+#if DRM_DMA_HISTOGRAM
+ atomic_inc(&dev->histo.ctx[drm_histogram_slot(get_cycles()
+ - dev->ctx_start)]);
+
+#endif
+ clear_bit(0, &dev->context_flag);
+ wake_up(&dev->context_wait);
+
+ return 0;
+}
+
+int mach64_resctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_ctx_res_t res;
+ drm_ctx_t ctx;
+ int i;
+
+ DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
+ if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res)))
+ return -EFAULT;
+ if (res.count >= DRM_RESERVED_CONTEXTS) {
+ memset(&ctx, 0, sizeof(ctx));
+ for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
+ ctx.handle = i;
+ if (copy_to_user(&res.contexts[i], &i, sizeof(i)))
+ return -EFAULT;
+ }
+ }
+ res.count = DRM_RESERVED_CONTEXTS;
+ if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res)))
+ return -EFAULT;
+ return 0;
+}
+
+int mach64_addctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_ctx_t ctx;
+
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
+ return -EFAULT;
+ if ((ctx.handle = mach64_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
+ /* Skip kernel's context and get a new one. */
+ ctx.handle = mach64_alloc_queue(dev);
+ }
+ if (ctx.handle == -1) {
+ DRM_DEBUG("Not enough free contexts.\n");
+ /* Should this return -EBUSY instead? */
+ return -ENOMEM;
+ }
+ DRM_DEBUG("%d\n", ctx.handle);
+ if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
+ return -EFAULT;
+ return 0;
+}
+
+int mach64_modctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ /* This does nothing for the mach64 */
+ return 0;
+}
+
+int mach64_getctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_ctx_t ctx;
+
+ if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx)))
+ return -EFAULT;
+ /* This is 0, because we don't hanlde any context flags */
+ ctx.flags = 0;
+ if (copy_to_user((drm_ctx_t*)arg, &ctx, sizeof(ctx)))
+ return -EFAULT;
+ return 0;
+}
+
+int mach64_switchctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_ctx_t ctx;
+
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
+ return -EFAULT;
+ DRM_DEBUG("%d\n", ctx.handle);
+ return mach64_context_switch(dev, dev->last_context, ctx.handle);
+}
+
+int mach64_newctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_ctx_t ctx;
+
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
+ return -EFAULT;
+ DRM_DEBUG("%d\n", ctx.handle);
+ mach64_context_switch_complete(dev, ctx.handle);
+
+ return 0;
+}
+
+int mach64_rmctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_ctx_t ctx;
+
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
+ return -EFAULT;
+ DRM_DEBUG("%d\n", ctx.handle);
+ if(ctx.handle != DRM_KERNEL_CONTEXT) {
+ drm_ctxbitmap_free(dev, ctx.handle);
+ }
+
+ return 0;
+}
diff --git a/linux/mach64_drv.c b/linux/mach64_drv.c
new file mode 100644
index 00000000..14b7fb19
--- /dev/null
+++ b/linux/mach64_drv.c
@@ -0,0 +1,664 @@
+/* mach64_drv.c -- mach64 driver -*- linux-c -*-
+ * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#include <linux/config.h>
+#include "drmP.h"
+#include "mach64_drv.h"
+
+#define MACH64_NAME "mach64"
+#define MACH64_DESC "ATI Mach64 (Rage Pro)"
+#define MACH64_DATE "20000908"
+#define MACH64_MAJOR 1
+#define MACH64_MINOR 0
+#define MACH64_PATCHLEVEL 0
+
+static drm_device_t mach64_device;
+drm_ctx_t mach64_res_ctx;
+
+static struct file_operations mach64_fops = {
+#if LINUX_VERSION_CODE >= 0x020400
+ /* This started being used during 2.4.0-test */
+ owner: THIS_MODULE,
+#endif
+ open: mach64_open,
+ flush: drm_flush,
+ release: mach64_release,
+ ioctl: mach64_ioctl,
+ mmap: drm_mmap,
+ read: drm_read,
+ fasync: drm_fasync,
+ poll: drm_poll,
+};
+
+static struct miscdevice mach64_misc = {
+ minor: MISC_DYNAMIC_MINOR,
+ name: MACH64_NAME,
+ fops: &mach64_fops,
+};
+
+static drm_ioctl_desc_t mach64_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { mach64_version, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { mach64_control, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { mach64_addbufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { mach64_markbufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { mach64_infobufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { mach64_freebufs, 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { mach64_addctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { mach64_rmctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { mach64_modctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { mach64_getctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { mach64_switchctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { mach64_newctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { mach64_resctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { mach64_lock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { mach64_unlock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_MACH64_INIT)] = { mach64_dma_init, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MACH64_VERTEX)] = { mach64_dma_vertex, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MACH64_CLEAR)] = { mach64_clear_bufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MACH64_FLUSH)] = { mach64_flush_ioctl, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MACH64_GETAGE)] = { mach64_getage, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MACH64_GETBUF)] = { mach64_getbuf, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MACH64_SWAP)] = { mach64_swap_bufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MACH64_COPY)] = { mach64_copybuf, 1, 0 },
+};
+
+#define MACH64_IOCTL_COUNT DRM_ARRAY_SIZE( mach64_ioctls )
+
+#ifdef MODULE
+static char *mach64 = NULL;
+#endif
+
+MODULE_AUTHOR( "Gareth Hughes <gareth@valinux.com>" );
+MODULE_DESCRIPTION( "ATI Mach64 (Rage Pro)" );
+MODULE_PARM( mach64, "s" );
+
+#ifndef MODULE
+/* mach64_options is called by the kernel to parse command-line options
+ * passed via the boot-loader (e.g., LILO). It calls the insmod option
+ * routine, drm_parse_drm.
+ */
+
+static int __init mach64_options( char *str )
+{
+ drm_parse_options( str );
+ return 1;
+}
+
+__setup( "mach64=", mach64_options );
+#endif
+
+static int mach64_setup( drm_device_t *dev )
+{
+ int i;
+
+ atomic_set( &dev->ioctl_count, 0 );
+ atomic_set( &dev->vma_count, 0 );
+ dev->buf_use = 0;
+ atomic_set( &dev->buf_alloc, 0 );
+
+ drm_dma_setup( dev );
+
+ atomic_set( &dev->total_open, 0 );
+ atomic_set( &dev->total_close, 0 );
+ atomic_set( &dev->total_ioctl, 0 );
+ atomic_set( &dev->total_irq, 0 );
+ atomic_set( &dev->total_ctx, 0 );
+ atomic_set( &dev->total_locks, 0 );
+ atomic_set( &dev->total_unlocks, 0 );
+ atomic_set( &dev->total_contends, 0 );
+ atomic_set( &dev->total_sleeps, 0 );
+
+ for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
+ dev->magiclist[i].head = NULL;
+ dev->magiclist[i].tail = NULL;
+ }
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ dev->vmalist = NULL;
+ dev->lock.hw_lock = NULL;
+ init_waitqueue_head( &dev->lock.lock_queue );
+ dev->queue_count = 0;
+ dev->queue_reserved = 0;
+ dev->queue_slots = 0;
+ dev->queuelist = NULL;
+ dev->irq = 0;
+ dev->context_flag = 0;
+ dev->interrupt_flag = 0;
+ dev->dma_flag = 0;
+ dev->last_context = 0;
+ dev->last_switch = 0;
+ dev->last_checked = 0;
+ init_timer( &dev->timer );
+ init_waitqueue_head( &dev->context_wait );
+#if DRM_DMA_HISTO
+ memset( &dev->histo, 0, sizeof(dev->histo) );
+#endif
+ dev->ctx_start = 0;
+ dev->lck_start = 0;
+
+ dev->buf_rp = dev->buf;
+ dev->buf_wp = dev->buf;
+ dev->buf_end = dev->buf + DRM_BSZ;
+ dev->buf_async = NULL;
+ init_waitqueue_head( &dev->buf_readers );
+ init_waitqueue_head( &dev->buf_writers );
+
+ DRM_DEBUG( "\n" );
+
+ /* The kernel's context could be created here, but is now created
+ * in drm_dma_enqueue. This is more resource-efficient for
+ * hardware that does not do DMA, but may mean that
+ * drm_select_queue fails between the time the interrupt is
+ * initialized and the time the queues are initialized.
+ */
+
+ return 0;
+}
+
+
+static int mach64_takedown( drm_device_t *dev )
+{
+ drm_magic_entry_t *pt, *next;
+ drm_map_t *map;
+ drm_vma_entry_t *vma, *vma_next;
+ int i;
+
+ DRM_DEBUG( "\n" );
+
+ if ( dev->irq )
+ mach64_irq_uninstall( dev );
+
+ down( &dev->struct_sem );
+ del_timer( &dev->timer );
+
+ if ( dev->devname ) {
+ drm_free( dev->devname, strlen( dev->devname ) + 1,
+ DRM_MEM_DRIVER );
+ dev->devname = NULL;
+ }
+
+ if ( dev->unique ) {
+ drm_free( dev->unique, strlen( dev->unique ) + 1,
+ DRM_MEM_DRIVER );
+ dev->unique = NULL;
+ dev->unique_len = 0;
+ }
+
+ /* Clear pid list */
+ for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
+ for ( pt = dev->magiclist[i].head ; pt ; pt = next ) {
+ next = pt->next;
+ drm_free( pt, sizeof(*pt), DRM_MEM_MAGIC );
+ }
+ dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
+ }
+
+ /* Clear AGP information */
+ if ( dev->agp ) {
+ drm_agp_mem_t *entry;
+ drm_agp_mem_t *nexte;
+
+ /* Remove AGP resources, but leave dev->agp intact until
+ * mach64_cleanup is called.
+ */
+ for ( entry = dev->agp->memory ; entry ; entry = nexte ) {
+ nexte = entry->next;
+ if ( entry->bound )
+ drm_unbind_agp( entry->memory );
+ drm_free_agp( entry->memory, entry->pages );
+ drm_free( entry, sizeof(*entry), DRM_MEM_AGPLISTS );
+ }
+ dev->agp->memory = NULL;
+
+ if ( dev->agp->acquired && drm_agp.release )
+ (*drm_agp.release)();
+
+ dev->agp->acquired = 0;
+ dev->agp->enabled = 0;
+ }
+
+ /* Clear vma list (only built for debugging) */
+ if ( dev->vmalist ) {
+ for ( vma = dev->vmalist ; vma ; vma = vma_next ) {
+ vma_next = vma->next;
+ drm_free( vma, sizeof(*vma), DRM_MEM_VMAS );
+ }
+ dev->vmalist = NULL;
+ }
+
+ /* Clear map area and mtrr information */
+ if ( dev->maplist ) {
+ for ( i = 0 ; i < dev->map_count ; i++ ) {
+ map = dev->maplist[i];
+ switch ( map->type ) {
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+#ifdef CONFIG_MTRR
+ if ( map->mtrr >= 0 ) {
+ int retcode;
+ retcode = mtrr_del( map->mtrr,
+ map->offset,
+ map->size );
+ DRM_DEBUG( "mtrr_del=%d\n", retcode );
+ }
+#endif
+ drm_ioremapfree( map->handle, map->size );
+ break;
+ case _DRM_SHM:
+ drm_free_pages( (unsigned long)map->handle,
+ drm_order( map->size )
+ - PAGE_SHIFT,
+ DRM_MEM_SAREA );
+ break;
+ case _DRM_AGP:
+ break;
+ }
+ drm_free( map, sizeof(*map), DRM_MEM_MAPS );
+ }
+ drm_free( dev->maplist,
+ dev->map_count * sizeof(*dev->maplist),
+ DRM_MEM_MAPS );
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ }
+
+ if ( dev->queuelist ) {
+ for ( i = 0 ; i < dev->queue_count ; i++ ) {
+ drm_waitlist_destroy( &dev->queuelist[i]->waitlist );
+ if ( dev->queuelist[i] ) {
+ drm_free( dev->queuelist[i],
+ sizeof(*dev->queuelist[0]),
+ DRM_MEM_QUEUES );
+ dev->queuelist[i] = NULL;
+ }
+ }
+ drm_free( dev->queuelist,
+ dev->queue_slots * sizeof(*dev->queuelist),
+ DRM_MEM_QUEUES );
+ dev->queuelist = NULL;
+ }
+
+ drm_dma_takedown( dev );
+
+ dev->queue_count = 0;
+ if ( dev->lock.hw_lock ) {
+ dev->lock.hw_lock = NULL; /* SHM removed */
+ dev->lock.pid = 0;
+ wake_up_interruptible( &dev->lock.lock_queue );
+ }
+ up( &dev->struct_sem );
+
+ return 0;
+}
+
+/* mach64_init is called via init_module at module load time, or via
+ * linux/init/main.c (this is not currently supported). */
+
+static int mach64_init( void )
+{
+ int retcode;
+ drm_device_t *dev = &mach64_device;
+
+ DRM_DEBUG( "\n" );
+
+ memset( (void *)dev, 0, sizeof(*dev) );
+ dev->count_lock = SPIN_LOCK_UNLOCKED;
+ sema_init( &dev->struct_sem, 1 );
+
+#ifdef MODULE
+ drm_parse_options( mach64 );
+#endif
+ DRM_DEBUG( "doing misc_register\n" );
+ retcode = misc_register( &mach64_misc );
+ if ( retcode ) {
+ DRM_ERROR( "Cannot register \"%s\"\n", MACH64_NAME );
+ return retcode;
+ }
+ dev->device = MKDEV( MISC_MAJOR, mach64_misc.minor );
+ dev->name = MACH64_NAME;
+
+ DRM_DEBUG( "doing mem init\n" );
+ drm_mem_init();
+ DRM_DEBUG( "doing proc init\n" );
+ drm_proc_init( dev );
+ DRM_DEBUG( "doing agp init\n" );
+ dev->agp = drm_agp_init();
+ if ( dev->agp == NULL ) {
+ DRM_INFO( "The mach64 drm module requires the agpgart module"
+ " to function correctly\nPlease load the agpgart"
+ " module before you load the mach64 module\n" );
+ drm_proc_cleanup();
+ misc_deregister( &mach64_misc );
+ mach64_takedown( dev );
+ return -ENOMEM;
+ }
+ DRM_DEBUG( "doing ctxbitmap init\n" );
+ retcode = drm_ctxbitmap_init( dev );
+ if ( retcode ) {
+ DRM_ERROR( "Cannot allocate memory for context bitmap.\n" );
+ drm_proc_cleanup();
+ misc_deregister( &mach64_misc );
+ mach64_takedown( dev );
+ return retcode;
+ }
+
+ DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d\n",
+ MACH64_NAME,
+ MACH64_MAJOR,
+ MACH64_MINOR,
+ MACH64_PATCHLEVEL,
+ MACH64_DATE,
+ mach64_misc.minor );
+
+ return 0;
+}
+
+/* mach64_cleanup is called via cleanup_module at module unload time. */
+
+static void mach64_cleanup( void )
+{
+ drm_device_t *dev = &mach64_device;
+
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+ drm_proc_cleanup();
+ if ( misc_deregister( &mach64_misc ) ) {
+ DRM_ERROR( "Cannot unload module\n" );
+ } else {
+ DRM_INFO( "Module unloaded\n" );
+ }
+ drm_ctxbitmap_cleanup( dev );
+ mach64_takedown( dev );
+ if ( dev->agp ) {
+ drm_agp_uninit();
+ drm_free( dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS );
+ dev->agp = NULL;
+ }
+}
+
+
+module_init( mach64_init );
+module_exit( mach64_cleanup );
+
+
+int mach64_version( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_version_t version;
+ int len;
+
+ if ( copy_from_user( &version, (drm_version_t *)arg,
+ sizeof(version) ) )
+ return -EFAULT;
+
+#define DRM_COPY( name, value ) \
+ len = strlen( value ); \
+ if ( len > name##_len ) len = name##_len; \
+ name##_len = strlen( value ); \
+ if ( len && name ) { \
+ if ( copy_to_user( name, value, len ) ) \
+ return -EFAULT; \
+ }
+
+ version.version_major = MACH64_MAJOR;
+ version.version_minor = MACH64_MINOR;
+ version.version_patchlevel = MACH64_PATCHLEVEL;
+
+ DRM_COPY( version.name, MACH64_NAME );
+ DRM_COPY( version.date, MACH64_DATE );
+ DRM_COPY( version.desc, MACH64_DESC );
+
+ if ( copy_to_user( (drm_version_t *)arg, &version, sizeof(version) ) )
+ return -EFAULT;
+ return 0;
+}
+
+int mach64_open( struct inode *inode, struct file *filp )
+{
+ drm_device_t *dev = &mach64_device;
+ int retcode = 0;
+
+ DRM_DEBUG( "open_count = %d\n", dev->open_count );
+ retcode = drm_open_helper( inode, filp, dev );
+ if ( !retcode ) {
+#if LINUX_VERSION_CODE < 0x020333
+ MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
+#endif
+ atomic_inc( &dev->total_open );
+ spin_lock( &dev->count_lock );
+ if ( !dev->open_count++ ) {
+ spin_unlock( &dev->count_lock );
+ return mach64_setup( dev );
+ }
+ spin_unlock( &dev->count_lock );
+ }
+ return retcode;
+}
+
+int mach64_release( struct inode *inode, struct file *filp )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev;
+ int retcode = 0;
+
+ lock_kernel();
+ dev = priv->dev;
+ DRM_DEBUG( "pid = %d, device = 0x%x, open_count = %d\n",
+ current->pid, dev->device, dev->open_count );
+
+ if ( dev->lock.hw_lock && _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock )
+ && dev->lock.pid == current->pid ) {
+ mach64_reclaim_buffers( dev, priv->pid );
+ DRM_ERROR( "Process %d dead, freeing lock for context %d\n",
+ current->pid,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
+ drm_lock_free( dev,
+ &dev->lock.hw_lock->lock,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
+
+ /* FIXME: may require heavy-handed reset of hardware at this
+ * point, possibly processed via a callback to the X server.
+ */
+ } else if ( dev->lock.hw_lock ) {
+ /* The lock is required to reclaim buffers */
+ DECLARE_WAITQUEUE( entry, current );
+ add_wait_queue( &dev->lock.lock_queue, &entry );
+ while ( 1 ) {
+ if ( !dev->lock.hw_lock ) {
+ /* Device has been unregistered */
+ retcode = -EINTR;
+ break;
+ }
+ if ( drm_lock_take( &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT ) ) {
+ dev->lock.pid = priv->pid;
+ dev->lock.lock_time = jiffies;
+ atomic_inc( &dev->total_locks );
+ break; /* Got lock */
+ }
+ /* Contention */
+ atomic_inc( &dev->total_sleeps );
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if ( signal_pending( current ) ) {
+ retcode = -ERESTARTSYS;
+ break;
+ }
+ }
+ current->state = TASK_RUNNING;
+ remove_wait_queue( &dev->lock.lock_queue, &entry );
+ if ( !retcode ) {
+ mach64_reclaim_buffers( dev, priv->pid );
+ drm_lock_free( dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT );
+ }
+ }
+ drm_fasync( -1, filp, 0 );
+
+ down( &dev->struct_sem );
+ if ( priv->prev ) {
+ priv->prev->next = priv->next;
+ } else {
+ dev->file_first = priv->next;
+ }
+ if ( priv->next ) {
+ priv->next->prev = priv->prev;
+ } else {
+ dev->file_last = priv->prev;
+ }
+ up( &dev->struct_sem );
+
+ drm_free( priv, sizeof(*priv), DRM_MEM_FILES );
+#if LINUX_VERSION_CODE < 0x020333
+ MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
+#endif
+ atomic_inc( &dev->total_close );
+ spin_lock( &dev->count_lock );
+ if ( !--dev->open_count ) {
+ if ( atomic_read( &dev->ioctl_count ) || dev->blocked ) {
+ DRM_ERROR( "Device busy: %d %d\n",
+ atomic_read( &dev->ioctl_count ),
+ dev->blocked );
+ spin_unlock( &dev->count_lock );
+ unlock_kernel();
+ return -EBUSY;
+ }
+ spin_unlock( &dev->count_lock );
+ unlock_kernel();
+ return mach64_takedown( dev );
+ }
+ spin_unlock( &dev->count_lock );
+ unlock_kernel();
+ return retcode;
+}
+
+/* drm_ioctl is called whenever a process performs an ioctl on /dev/drm. */
+
+int mach64_ioctl( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ int nr = DRM_IOCTL_NR( cmd );
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ int retcode = 0;
+ drm_ioctl_desc_t *ioctl;
+ drm_ioctl_t *func;
+
+ atomic_inc( &dev->ioctl_count );
+ atomic_inc( &dev->total_ioctl );
+ ++priv->ioctl_count;
+
+ DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%x, auth=%d\n",
+ current->pid, cmd, nr, dev->device, priv->authenticated );
+
+ if ( nr >= MACH64_IOCTL_COUNT ) {
+ retcode = -EINVAL;
+ } else {
+ ioctl = &mach64_ioctls[nr];
+ func = ioctl->func;
+
+ if ( !func ) {
+ DRM_DEBUG( "no function\n" );
+ retcode = -EINVAL;
+ } else if ( ( ioctl->root_only &&
+ !capable( CAP_SYS_ADMIN ) ) ||
+ ( ioctl->auth_needed &&
+ !priv->authenticated ) ) {
+ retcode = -EACCES;
+ } else {
+ retcode = (func)( inode, filp, cmd, arg );
+ }
+ }
+
+ atomic_dec( &dev->ioctl_count );
+ return retcode;
+}
+
+int mach64_unlock( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_lock_t lock;
+
+ if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
+ return -EFAULT;
+
+ if ( lock.context == DRM_KERNEL_CONTEXT ) {
+ DRM_ERROR( "Process %d using kernel context %d\n",
+ current->pid, lock.context );
+ return -EINVAL;
+ }
+
+ DRM_DEBUG( "%d frees lock (%d holds)\n",
+ lock.context,
+ _DRM_LOCKING_CONTEXT( dev->lock.hw_lock->lock ) );
+ atomic_inc( &dev->total_unlocks );
+ if ( _DRM_LOCK_IS_CONT( dev->lock.hw_lock->lock ) )
+ atomic_inc( &dev->total_contends );
+ drm_lock_transfer( dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT );
+ if ( !dev->context_flag ) {
+ if ( drm_lock_free( dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT ) ) {
+ DRM_ERROR( "\n" );
+ }
+ }
+#if DRM_DMA_HISTOGRAM
+ atomic_inc( &dev->histo.lhld[drm_histogram_slot(get_cycles()
+ - dev->lck_start)] );
+#endif
+
+ return 0;
+}