summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeff Hartmann <jhartmann@valinux.com>2000-02-09 17:19:36 +0000
committerJeff Hartmann <jhartmann@valinux.com>2000-02-09 17:19:36 +0000
commit20a9e9338751c9610854f61e769bba859e354a34 (patch)
tree03b01cdf559b1ed7b026c00ab25b9673b486298f
parent3169aa74b737ffd11e62407cf3eba741b52797c9 (diff)
Commit for keith
-rw-r--r--linux-core/mga_drv.c16
-rw-r--r--linux/Makefile.linux3
-rw-r--r--linux/context.c1
-rw-r--r--linux/ctxbitmap.c7
-rw-r--r--linux/dma.c1
-rw-r--r--linux/lists.c1
-rw-r--r--linux/lock.c3
-rw-r--r--linux/mga_clear.c17
-rw-r--r--linux/mga_context.c245
-rw-r--r--linux/mga_dma.c239
-rw-r--r--linux/mga_dma.h2
-rw-r--r--linux/mga_drv.c16
-rw-r--r--linux/mga_drv.h19
13 files changed, 486 insertions, 84 deletions
diff --git a/linux-core/mga_drv.c b/linux-core/mga_drv.c
index 583afe77..edd60148 100644
--- a/linux-core/mga_drv.c
+++ b/linux-core/mga_drv.c
@@ -80,13 +80,13 @@ static drm_ioctl_desc_t mga_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { mga_mapbufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { mga_freebufs, 1, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { drm_addctx, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { drm_rmctx, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { drm_modctx, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { drm_getctx, 1, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { drm_switchctx, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { drm_newctx, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { drm_resctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { mga_addctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { mga_rmctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { mga_modctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { mga_getctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { mga_switchctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { mga_newctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { mga_resctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
@@ -571,6 +571,6 @@ int mga_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
-
+
return 0;
}
diff --git a/linux/Makefile.linux b/linux/Makefile.linux
index c0ab34ab..5cc5334f 100644
--- a/linux/Makefile.linux
+++ b/linux/Makefile.linux
@@ -71,7 +71,8 @@ GAMMAHEADERS= gamma_drv.h $(DRMHEADERS)
TDFXOBJS= tdfx_drv.o tdfx_context.o
TDFXHEADERS= tdfx_drv.h $(DRMHEADERS)
-MGAOBJS= mga_drv.o mga_dma.o mga_bufs.o mga_state.o mga_clear.o
+MGAOBJS= mga_drv.o mga_dma.o mga_bufs.o mga_state.o mga_clear.o \
+ mga_context.o
MGAHEADERS= mga_drv.h mga_drm_public.h $(DRMHEADERS)
R128OBJS= r128_drv.o r128_context.o
diff --git a/linux/context.c b/linux/context.c
index 998401d1..9edfeae9 100644
--- a/linux/context.c
+++ b/linux/context.c
@@ -281,6 +281,7 @@ int drm_rmctx(struct inode *inode, struct file *filp, unsigned int cmd,
finalization) */
while (test_and_set_bit(0, &dev->interrupt_flag)) {
+ printk("Calling schedule from rmctx\n");
schedule();
if (signal_pending(current)) {
clear_bit(0, &dev->interrupt_flag);
diff --git a/linux/ctxbitmap.c b/linux/ctxbitmap.c
index ad23331c..262a4f22 100644
--- a/linux/ctxbitmap.c
+++ b/linux/ctxbitmap.c
@@ -41,7 +41,7 @@ void drm_ctxbitmap_free(drm_device_t *dev, int ctx_handle)
return;
}
failed:
- DRM_DEBUG("Attempt to free invalid context handle: %d\n",
+ DRM_ERROR("Attempt to free invalid context handle: %d\n",
ctx_handle);
return;
}
@@ -53,6 +53,7 @@ int drm_ctxbitmap_next(drm_device_t *dev)
bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP);
if (bit < DRM_MAX_CTXBITMAP) {
set_bit(bit, dev->ctx_bitmap);
+ printk("drm_ctxbitmap_next bit : %d\n", bit);
return bit;
}
return -1;
@@ -61,6 +62,7 @@ int drm_ctxbitmap_next(drm_device_t *dev)
int drm_ctxbitmap_init(drm_device_t *dev)
{
int i;
+ int temp;
dev->ctx_bitmap = (unsigned long *) drm_alloc(PAGE_SIZE * 4,
DRM_MEM_CTXBITMAP);
@@ -69,7 +71,8 @@ int drm_ctxbitmap_init(drm_device_t *dev)
}
memset((void *) dev->ctx_bitmap, 0, PAGE_SIZE * 4);
for(i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
- drm_ctxbitmap_next(dev);
+ temp = drm_ctxbitmap_next(dev);
+ printk("drm_ctxbitmap_init : %d\n", temp);
}
return 0;
diff --git a/linux/dma.c b/linux/dma.c
index d796e679..099311c2 100644
--- a/linux/dma.c
+++ b/linux/dma.c
@@ -402,6 +402,7 @@ int drm_dma_enqueue(drm_device_t *dev, drm_dma_t *d)
atomic_inc(&q->block_count);
for (;;) {
if (!atomic_read(&q->block_write)) break;
+ printk("Calling schedule from dma_enqueue\n");
schedule();
if (signal_pending(current)) {
atomic_dec(&q->use_count);
diff --git a/linux/lists.c b/linux/lists.c
index 6917527b..17c9759a 100644
--- a/linux/lists.c
+++ b/linux/lists.c
@@ -230,6 +230,7 @@ drm_buf_t *drm_freelist_get(drm_freelist_t *bl, int block)
for (;;) {
if (!atomic_read(&bl->wfh)
&& (buf = drm_freelist_try(bl))) break;
+ printk("calling schedule from freelist_get\n");
schedule();
if (signal_pending(current)) break;
}
diff --git a/linux/lock.c b/linux/lock.c
index b1c38e28..3f7eaff6 100644
--- a/linux/lock.c
+++ b/linux/lock.c
@@ -133,12 +133,15 @@ int drm_flush_queue(drm_device_t *dev, int context)
atomic_inc(&q->block_count);
for (;;) {
if (!DRM_BUFCOUNT(&q->waitlist)) break;
+ printk("Calling schedule from flush_queue : %d\n",
+ DRM_BUFCOUNT(&q->waitlist));
schedule();
if (signal_pending(current)) {
ret = -EINTR; /* Can't restart */
break;
}
}
+ printk("Exited out of schedule from flush_queue\n");
atomic_dec(&q->block_count);
current->state = TASK_RUNNING;
remove_wait_queue(&q->flush_queue, &entry);
diff --git a/linux/mga_clear.c b/linux/mga_clear.c
index e9eef908..cb2e9d02 100644
--- a/linux/mga_clear.c
+++ b/linux/mga_clear.c
@@ -139,7 +139,9 @@ static int mgaClearBuffers(drm_device_t *dev,
d.request_sizes = NULL;
d.granted_count = 0;
- drm_dma_enqueue(dev, &d);
+ atomic_inc(&dev_priv->pending_bufs);
+ if((drm_dma_enqueue(dev, &d)) != 0)
+ atomic_dec(&dev_priv->pending_bufs);
mga_dma_schedule(dev, 1);
return 0;
}
@@ -208,8 +210,10 @@ int mgaSwapBuffers(drm_device_t *dev, int flags)
d.request_indices = NULL;
d.request_sizes = NULL;
d.granted_count = 0;
-
- drm_dma_enqueue(dev, &d);
+
+ atomic_inc(&dev_priv->pending_bufs);
+ if((drm_dma_enqueue(dev, &d)) != 0)
+ atomic_dec(&dev_priv->pending_bufs);
mga_dma_schedule(dev, 1);
return 0;
}
@@ -265,7 +269,9 @@ static int mgaIload(drm_device_t *dev, drm_mga_iload_t *args)
d.request_sizes = NULL;
d.granted_count = 0;
- drm_dma_enqueue(dev, &d);
+ atomic_inc(&dev_priv->pending_bufs);
+ if((drm_dma_enqueue(dev, &d)) != 0)
+ atomic_dec(&dev_priv->pending_bufs);
mga_dma_schedule(dev, 1);
return 0;
@@ -390,7 +396,10 @@ int mga_dma(struct inode *inode, struct file *filp, unsigned int cmd,
*/
mgaCopyAndVerifyState( dev_priv, buf_priv );
+ atomic_inc(&dev_priv->pending_bufs);
retcode = drm_dma_enqueue(dev, &d);
+ if(retcode != 0)
+ atomic_dec(&dev_priv->pending_bufs);
mga_dma_schedule(dev, 1);
}
diff --git a/linux/mga_context.c b/linux/mga_context.c
new file mode 100644
index 00000000..4e793213
--- /dev/null
+++ b/linux/mga_context.c
@@ -0,0 +1,245 @@
+/* mga_context.c -- IOCTLs for mga contexts -*- linux-c -*-
+ * Created: Mon Dec 13 09:51:35 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Rickard E. (Rik) Faith <faith@precisioninsight.com>
+ *
+ * $XFree86$
+ *
+ */
+
+#include <linux/sched.h>
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include "mga_drv.h"
+
+static int mga_alloc_queue(drm_device_t *dev)
+{
+ int temp = drm_ctxbitmap_next(dev);
+ printk("mga_alloc_queue: %d\n", temp);
+ return temp;
+}
+
+int mga_context_switch(drm_device_t *dev, int old, int new)
+{
+ char buf[64];
+
+ atomic_inc(&dev->total_ctx);
+
+ if (test_and_set_bit(0, &dev->context_flag)) {
+ DRM_ERROR("Reentering -- FIXME\n");
+ return -EBUSY;
+ }
+
+#if DRM_DMA_HISTOGRAM
+ dev->ctx_start = get_cycles();
+#endif
+
+ printk("Context switch from %d to %d\n", old, new);
+
+ if (new == dev->last_context) {
+ clear_bit(0, &dev->context_flag);
+ return 0;
+ }
+
+ if (drm_flags & DRM_FLAG_NOCTX) {
+ mga_context_switch_complete(dev, new);
+ } else {
+ sprintf(buf, "C %d %d\n", old, new);
+ drm_write_string(dev, buf);
+ }
+
+ return 0;
+}
+
+int mga_context_switch_complete(drm_device_t *dev, int new)
+{
+ dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
+ dev->last_switch = jiffies;
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("Lock isn't held after context switch\n");
+ }
+
+ /* If a context switch is ever initiated
+ when the kernel holds the lock, release
+ that lock here. */
+#if DRM_DMA_HISTOGRAM
+ atomic_inc(&dev->histo.ctx[drm_histogram_slot(get_cycles()
+ - dev->ctx_start)]);
+
+#endif
+ clear_bit(0, &dev->context_flag);
+ wake_up(&dev->context_wait);
+
+ return 0;
+}
+
+int mga_resctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_ctx_res_t res;
+ drm_ctx_t ctx;
+ int i;
+
+ printk("%d\n", DRM_RESERVED_CONTEXTS);
+ copy_from_user_ret(&res, (drm_ctx_res_t *)arg, sizeof(res), -EFAULT);
+ if (res.count >= DRM_RESERVED_CONTEXTS) {
+ memset(&ctx, 0, sizeof(ctx));
+ for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
+ ctx.handle = i;
+ copy_to_user_ret(&res.contexts[i],
+ &i,
+ sizeof(i),
+ -EFAULT);
+ }
+ }
+ res.count = DRM_RESERVED_CONTEXTS;
+ copy_to_user_ret((drm_ctx_res_t *)arg, &res, sizeof(res), -EFAULT);
+ return 0;
+}
+
+int mga_addctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_ctx_t ctx;
+
+ copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
+ if ((ctx.handle = mga_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
+ /* Skip kernel's context and get a new one. */
+ ctx.handle = mga_alloc_queue(dev);
+ }
+ if (ctx.handle == -1) {
+ printk("Not enough free contexts.\n");
+ /* Should this return -EBUSY instead? */
+ return -ENOMEM;
+ }
+ printk("%d\n", ctx.handle);
+ copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT);
+ return 0;
+}
+
+int mga_modctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ /* This does nothing for the mga */
+ return 0;
+}
+
+int mga_getctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_ctx_t ctx;
+
+ copy_from_user_ret(&ctx, (drm_ctx_t*)arg, sizeof(ctx), -EFAULT);
+ /* This is 0, because we don't hanlde any context flags */
+ ctx.flags = 0;
+ copy_to_user_ret((drm_ctx_t*)arg, &ctx, sizeof(ctx), -EFAULT);
+ return 0;
+}
+
+int mga_switchctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_ctx_t ctx;
+
+ copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
+ printk("%d\n", ctx.handle);
+ return mga_context_switch(dev, dev->last_context, ctx.handle);
+}
+
+int mga_newctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_ctx_t ctx;
+
+ copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
+ printk("%d\n", ctx.handle);
+ mga_context_switch_complete(dev, ctx.handle);
+
+ return 0;
+}
+
+int mga_rmctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_ctx_t ctx;
+ drm_queue_t *q;
+ drm_buf_t *buf;
+
+ copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
+ printk("%d\n", ctx.handle);
+ if(ctx.handle == DRM_KERNEL_CONTEXT) {
+ q = dev->queuelist[ctx.handle];
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->use_count) == 1) {
+ /* No longer in use */
+ atomic_dec(&q->use_count);
+ return -EINVAL;
+ }
+ atomic_inc(&q->finalization); /* Mark queue in finalization state */
+ atomic_sub(2, &q->use_count);
+ /* Mark queue as unused (pending finalization) */
+
+ while (test_and_set_bit(0, &dev->interrupt_flag)) {
+ printk("Calling schedule from rmctx\n");
+ schedule();
+ if (signal_pending(current)) {
+ clear_bit(0, &dev->interrupt_flag);
+ return -EINTR;
+ }
+ }
+
+ /* Remove queued buffers */
+ while ((buf = drm_waitlist_get(&q->waitlist))) {
+ drm_free_buffer(dev, buf);
+ }
+ clear_bit(0, &dev->interrupt_flag);
+
+ /* Wakeup blocked processes */
+ wake_up_interruptible(&q->read_queue);
+ wake_up_interruptible(&q->write_queue);
+ wake_up_interruptible(&q->flush_queue);
+
+ /* Finalization over. Queue is made
+ available when both use_count and
+ finalization become 0, which won't
+ happen until all the waiting processes
+ stop waiting. */
+ atomic_dec(&q->finalization);
+ } else {
+ drm_ctxbitmap_free(dev, ctx.handle);
+ }
+
+ return 0;
+}
diff --git a/linux/mga_dma.c b/linux/mga_dma.c
index 1464576f..f4732fe6 100644
--- a/linux/mga_dma.c
+++ b/linux/mga_dma.c
@@ -82,12 +82,67 @@ int mga_dma_cleanup(drm_device_t *dev)
dev->dev_private = NULL;
}
- /* NOT DONE: Free the dma buffer dev privates. These will
- * disappear with Jeff's work, anyway.
- */
return 0;
}
+static int mga_alloc_kernel_queue(drm_device_t *dev)
+{
+ drm_queue_t *queue = NULL;
+ /* Allocate a new queue */
+ down(&dev->struct_sem);
+
+ if(dev->queue_count != 0) {
+ /* Reseting the kernel context here is not
+ * a race, since it can only happen when that
+ * queue is empty.
+ */
+ queue = dev->queuelist[DRM_KERNEL_CONTEXT];
+ printk("Kernel queue already allocated\n");
+ } else {
+ queue = drm_alloc(sizeof(*queue), DRM_MEM_QUEUES);
+ if(!queue) {
+ up(&dev->struct_sem);
+ printk("out of memory\n");
+ return -ENOMEM;
+ }
+ ++dev->queue_count;
+ dev->queuelist = drm_alloc(sizeof(*dev->queuelist),
+ DRM_MEM_QUEUES);
+ if(!dev->queuelist) {
+ up(&dev->struct_sem);
+ drm_free(queue, sizeof(*queue), DRM_MEM_QUEUES);
+ printk("out of memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ memset(queue, 0, sizeof(*queue));
+ atomic_set(&queue->use_count, 1);
+ atomic_set(&queue->finalization, 0);
+ atomic_set(&queue->block_count, 0);
+ atomic_set(&queue->block_read, 0);
+ atomic_set(&queue->block_write, 0);
+ atomic_set(&queue->total_queued, 0);
+ atomic_set(&queue->total_flushed, 0);
+ atomic_set(&queue->total_locks, 0);
+
+ init_waitqueue_head(&queue->write_queue);
+ init_waitqueue_head(&queue->read_queue);
+ init_waitqueue_head(&queue->flush_queue);
+
+ queue->flags = 0;
+
+ drm_waitlist_create(&queue->waitlist, dev->dma->buf_count);
+
+ dev->queue_slots = 1;
+ dev->queuelist[DRM_KERNEL_CONTEXT] = queue;
+ dev->queue_count--;
+
+ up(&dev->struct_sem);
+ printk("%d (new)\n", dev->queue_count - 1);
+ return DRM_KERNEL_CONTEXT;
+}
+
static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) {
drm_mga_private_t *dev_priv;
drm_map_t *prim_map = NULL;
@@ -99,15 +154,22 @@ static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) {
if(dev_priv == NULL) return -ENOMEM;
dev->dev_private = (void *) dev_priv;
- DRM_DEBUG("dev_private\n");
+ printk("dev_private\n");
memset(dev_priv, 0, sizeof(drm_mga_private_t));
+ atomic_set(&dev_priv->pending_bufs, 0);
+
if((init->reserved_map_idx >= dev->map_count) ||
(init->buffer_map_idx >= dev->map_count)) {
mga_dma_cleanup(dev);
- DRM_DEBUG("reserved_map or buffer_map are invalid\n");
+ printk("reserved_map or buffer_map are invalid\n");
return -EINVAL;
}
+
+ if(mga_alloc_kernel_queue(dev) != DRM_KERNEL_CONTEXT) {
+ mga_dma_cleanup(dev);
+ DRM_ERROR("Kernel context queue not present\n");
+ }
dev_priv->reserved_map_idx = init->reserved_map_idx;
dev_priv->buffer_map_idx = init->buffer_map_idx;
@@ -115,7 +177,7 @@ static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) {
dev_priv->sarea_priv = (drm_mga_sarea_t *)
((u8 *)sarea_map->handle +
init->sarea_priv_offset);
- DRM_DEBUG("sarea_priv\n");
+ printk("sarea_priv\n");
/* Scale primary size to the next page */
dev_priv->primary_size = ((init->primary_size + PAGE_SIZE - 1) /
@@ -137,24 +199,24 @@ static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) {
dev_priv->mAccess = init->mAccess;
- DRM_DEBUG("memcpy\n");
+ printk("memcpy\n");
memcpy(&dev_priv->WarpIndex, &init->WarpIndex,
sizeof(mgaWarpIndex) * MGA_MAX_WARP_PIPES);
- DRM_DEBUG("memcpy done\n");
+ printk("memcpy done\n");
prim_map = dev->maplist[init->reserved_map_idx];
dev_priv->prim_phys_head = dev->agp->base + init->reserved_map_agpstart;
temp = init->warp_ucode_size + dev_priv->primary_size;
temp = ((temp + PAGE_SIZE - 1) /
PAGE_SIZE) * PAGE_SIZE;
- DRM_DEBUG("temp : %x\n", temp);
- DRM_DEBUG("dev->agp->base: %lx\n", dev->agp->base);
- DRM_DEBUG("init->reserved_map_agpstart: %x\n", init->reserved_map_agpstart);
+ printk("temp : %x\n", temp);
+ printk("dev->agp->base: %lx\n", dev->agp->base);
+ printk("init->reserved_map_agpstart: %x\n", init->reserved_map_agpstart);
dev_priv->ioremap = drm_ioremap(dev->agp->base + init->reserved_map_agpstart,
temp);
if(dev_priv->ioremap == NULL) {
- DRM_DEBUG("Ioremap failed\n");
+ printk("Ioremap failed\n");
mga_dma_cleanup(dev);
return -ENOMEM;
}
@@ -162,12 +224,12 @@ static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) {
dev_priv->prim_head = (u32 *)dev_priv->ioremap;
- DRM_DEBUG("dev_priv->prim_head : %p\n", dev_priv->prim_head);
+ printk("dev_priv->prim_head : %p\n", dev_priv->prim_head);
dev_priv->current_dma_ptr = dev_priv->prim_head;
dev_priv->prim_num_dwords = 0;
dev_priv->prim_max_dwords = dev_priv->primary_size / 4;
- DRM_DEBUG("dma initialization\n");
+ printk("dma initialization\n");
/* Private is now filled in, initialize the hardware */
{
@@ -183,7 +245,7 @@ static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) {
/* Poll for the first buffer to insure that
* the status register will be correct
*/
- DRM_DEBUG("phys_head : %lx\n", phys_head);
+ printk("phys_head : %lx\n", phys_head);
MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
@@ -204,7 +266,7 @@ static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) {
}
- DRM_DEBUG("dma init was successful\n");
+ printk("dma init was successful\n");
return 0;
}
@@ -238,6 +300,7 @@ static void __mga_iload_small(drm_device_t *dev,
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_buf_priv_t *buf_priv = buf->dev_private;
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned long address = (unsigned long)buf->bus_address;
int length = buf->used;
int y1 = buf_priv->boxes[0].y1;
@@ -289,6 +352,7 @@ static void __mga_iload_xy(drm_device_t *dev,
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_buf_priv_t *buf_priv = buf->dev_private;
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned long address = (unsigned long)buf->bus_address;
int length = buf->used;
int y1 = buf_priv->boxes[0].y1;
@@ -366,10 +430,10 @@ static void mga_dma_dispatch_iload(drm_device_t *dev, drm_buf_t *buf)
int x2 = buf_priv->boxes[0].x2;
if((x2 - x1) < 32) {
- DRM_DEBUG("using iload small\n");
+ printk("using iload small\n");
__mga_iload_small(dev, buf, use_agp);
} else {
- DRM_DEBUG("using iload xy\n");
+ printk("using iload xy\n");
__mga_iload_xy(dev, buf, use_agp);
}
}
@@ -465,8 +529,12 @@ static inline void mga_dma_quiescent(drm_device_t *dev)
}
}
while((MGA_READ(MGAREG_STATUS) & 0x00020001) != 0x00020000) ;
- MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
- while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) ;
+#if 0
+ MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
+#endif
+ while(MGA_READ(MGAREG_DWGSYNC) == MGA_SYNC_TAG) ;
+ MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
+ while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) ;
atomic_dec(&dev_priv->dispatch_lock);
}
@@ -537,6 +605,7 @@ static int mga_do_dma(drm_device_t *dev, int locked)
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
drm_mga_buf_priv_t *buf_priv;
+ printk("mga_do_dma\n");
if (test_and_set_bit(0, &dev->dma_flag)) {
atomic_inc(&dma->total_missed_dma);
return -EBUSY;
@@ -550,7 +619,7 @@ static int mga_do_dma(drm_device_t *dev, int locked)
buf = dma->next_buffer;
- DRM_DEBUG("context %d, buffer %d\n", buf->context, buf->idx);
+ printk("context %d, buffer %d\n", buf->context, buf->idx);
if (buf->list == DRM_LIST_RECLAIM) {
drm_clear_next_buffer(dev);
@@ -581,8 +650,8 @@ static int mga_do_dma(drm_device_t *dev, int locked)
atomic_dec(&dev_priv->dispatch_lock);
return -EBUSY;
}
-
+ dma->next_queue = dev->queuelist[DRM_KERNEL_CONTEXT];
drm_clear_next_buffer(dev);
buf->pending = 1;
buf->waiting = 0;
@@ -590,6 +659,7 @@ static int mga_do_dma(drm_device_t *dev, int locked)
buf_priv = buf->dev_private;
+ printk("dispatch!\n");
switch (buf_priv->dma_type) {
case MGA_DMA_GENERAL:
mga_dma_dispatch_general(dev, buf);
@@ -604,9 +674,10 @@ static int mga_do_dma(drm_device_t *dev, int locked)
mga_dma_dispatch_iload(dev, buf);
break;
default:
- DRM_DEBUG("bad buffer type %x in dispatch\n", buf_priv->dma_type);
+ printk("bad buffer type %x in dispatch\n", buf_priv->dma_type);
break;
}
+ atomic_dec(&dev_priv->pending_bufs);
drm_free_buffer(dev, dma->this_buffer);
dma->this_buffer = buf;
@@ -622,19 +693,25 @@ static int mga_do_dma(drm_device_t *dev, int locked)
}
clear_bit(0, &dev->dma_flag);
-
+
+ if(!atomic_read(&dev_priv->pending_bufs)) {
+ wake_up_interruptible(&dev->queuelist[DRM_KERNEL_CONTEXT]->flush_queue);
+ }
+
+#if 0
+ wake_up_interruptible(&dev->lock.lock_queue);
+#endif
+
/* We hold the dispatch lock until the interrupt handler
* frees it
*/
return retcode;
}
-/*
static void mga_dma_schedule_timer_wrapper(unsigned long dev)
{
mga_dma_schedule((drm_device_t *)dev, 0);
}
-*/
static void mga_dma_schedule_tq_wrapper(void *dev)
{
@@ -651,6 +728,8 @@ int mga_dma_schedule(drm_device_t *dev, int locked)
int expire = 20;
drm_device_dma_t *dma = dev->dma;
+ printk("mga_dma_schedule\n");
+
if (test_and_set_bit(0, &dev->interrupt_flag)) {
/* Not reentrant */
atomic_inc(&dma->total_missed_sched);
@@ -658,7 +737,6 @@ int mga_dma_schedule(drm_device_t *dev, int locked)
}
missed = atomic_read(&dma->total_missed_sched);
-
again:
/* There is only one queue:
*/
@@ -696,11 +774,10 @@ again:
}
clear_bit(0, &dev->interrupt_flag);
+
return retcode;
}
-
-
int mga_irq_install(drm_device_t *dev, int irq)
{
int retcode;
@@ -715,7 +792,7 @@ int mga_irq_install(drm_device_t *dev, int irq)
dev->irq = irq;
up(&dev->struct_sem);
- DRM_DEBUG("install irq handler %d\n", irq);
+ printk("install irq handler %d\n", irq);
dev->context_flag = 0;
dev->interrupt_flag = 0;
@@ -762,7 +839,7 @@ int mga_irq_uninstall(drm_device_t *dev)
if (!irq) return -EINVAL;
- DRM_DEBUG("remove irq handler %d\n", irq);
+ printk("remove irq handler %d\n", irq);
MGA_WRITE(MGAREG_ICLEAR, 0xfa7);
MGA_WRITE(MGAREG_IEN, 0);
@@ -793,6 +870,36 @@ int mga_control(struct inode *inode, struct file *filp, unsigned int cmd,
}
}
+int mga_flush_queue(drm_device_t *dev)
+{
+ DECLARE_WAITQUEUE(entry, current);
+ drm_queue_t *q = dev->queuelist[DRM_KERNEL_CONTEXT];
+ drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
+ int ret = 0;
+
+ printk("mga_flush_queue\n");
+ if(atomic_read(&dev_priv->pending_bufs) != 0) {
+ current->state = TASK_INTERRUPTIBLE;
+ add_wait_queue(&q->flush_queue, &entry);
+ for (;;) {
+ if (!atomic_read(&dev_priv->pending_bufs)) break;
+ printk("Calling schedule from flush_queue : %d\n",
+ atomic_read(&dev_priv->pending_bufs));
+ mga_dma_schedule(dev, 1);
+ schedule();
+ if (signal_pending(current)) {
+ ret = -EINTR; /* Can't restart */
+ break;
+ }
+ }
+ printk("Exited out of schedule from flush_queue\n");
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&q->flush_queue, &entry);
+ }
+
+ return ret;
+}
+
int mga_lock(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
@@ -810,50 +917,62 @@ int mga_lock(struct inode *inode, struct file *filp, unsigned int cmd,
return -EINVAL;
}
- DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
+ printk("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock.context, current->pid, dev->lock.hw_lock->lock,
lock.flags);
- if (lock.context < 0 || lock.context >= dev->queue_count) {
+ if (lock.context < 0) {
return -EINVAL;
}
-
- for (;;) {
- if (!dev->lock.hw_lock) { /* Device has been unregistered */
- ret = -EINTR;
- break;
- }
+ /* Only one queue:
+ */
- if (drm_lock_take(&dev->lock.hw_lock->lock, lock.context)) {
- dev->lock.pid = current->pid;
- dev->lock.lock_time = jiffies;
- atomic_inc(&dev->total_locks);
- break; /* Got lock */
- }
+ if (!ret) {
+ add_wait_queue(&dev->lock.lock_queue, &entry);
+ for (;;) {
+ if (!dev->lock.hw_lock) {
+ /* Device has been unregistered */
+ ret = -EINTR;
+ break;
+ }
+ if (drm_lock_take(&dev->lock.hw_lock->lock,
+ lock.context)) {
+ dev->lock.pid = current->pid;
+ dev->lock.lock_time = jiffies;
+ atomic_inc(&dev->total_locks);
+ break; /* Got lock */
+ }
- /* Contention */
- atomic_inc(&dev->total_sleeps);
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- if (signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
+ /* Contention */
+ atomic_inc(&dev->total_sleeps);
+ current->state = TASK_INTERRUPTIBLE;
+ current->policy |= SCHED_YIELD;
+ printk("Calling lock schedule\n");
+ schedule();
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
}
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&dev->lock.lock_queue, &entry);
}
-
- current->state = TASK_RUNNING;
- remove_wait_queue(&dev->lock.lock_queue, &entry);
if (!ret) {
if (lock.flags & _DRM_LOCK_QUIESCENT) {
- DRM_DEBUG("_DRM_LOCK_QUIESCENT\n");
- drm_flush_queue(dev, DRM_KERNEL_CONTEXT);
- drm_flush_unblock_queue(dev, DRM_KERNEL_CONTEXT);
- mga_dma_quiescent(dev);
+ printk("_DRM_LOCK_QUIESCENT\n");
+ ret = mga_flush_queue(dev);
+ if(ret != 0) {
+ drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ lock.context);
+ } else {
+ mga_dma_quiescent(dev);
+ }
}
}
- DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
+ printk("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
return ret;
}
+
diff --git a/linux/mga_dma.h b/linux/mga_dma.h
index 958756a4..7f1c5795 100644
--- a/linux/mga_dma.h
+++ b/linux/mga_dma.h
@@ -67,7 +67,7 @@ typedef struct {
-#define VERBO 1
+#define VERBO 0
/* Primary buffer versions of above -- pretty similar really.
diff --git a/linux/mga_drv.c b/linux/mga_drv.c
index 583afe77..edd60148 100644
--- a/linux/mga_drv.c
+++ b/linux/mga_drv.c
@@ -80,13 +80,13 @@ static drm_ioctl_desc_t mga_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { mga_mapbufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { mga_freebufs, 1, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { drm_addctx, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { drm_rmctx, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { drm_modctx, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { drm_getctx, 1, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { drm_switchctx, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { drm_newctx, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { drm_resctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { mga_addctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { mga_rmctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { mga_modctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { mga_getctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { mga_switchctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { mga_newctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { mga_resctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
@@ -571,6 +571,6 @@ int mga_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
-
+
return 0;
}
diff --git a/linux/mga_drv.h b/linux/mga_drv.h
index 3dd50827..ef299978 100644
--- a/linux/mga_drv.h
+++ b/linux/mga_drv.h
@@ -52,6 +52,7 @@ typedef struct _drm_mga_private {
mgaWarpIndex WarpIndex[MGA_MAX_G400_PIPES];
__volatile__ unsigned long softrap_age;
atomic_t dispatch_lock;
+ atomic_t pending_bufs;
void *ioremap;
u32 *prim_head;
u32 *current_dma_ptr;
@@ -123,6 +124,24 @@ extern int mga_swap_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int mga_iload(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
+ /* mga_context.c */
+extern int mga_resctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int mga_addctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int mga_modctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int mga_getctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int mga_switchctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int mga_newctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int mga_rmctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+extern int mga_context_switch(drm_device_t *dev, int old, int new);
+extern int mga_context_switch_complete(drm_device_t *dev, int new);