summaryrefslogtreecommitdiff
path: root/shared
diff options
context:
space:
mode:
Diffstat (limited to 'shared')
-rw-r--r--shared/drm.h21
-rw-r--r--shared/mga_drv.h6
-rw-r--r--shared/r128.h2
-rw-r--r--shared/radeon.h35
-rw-r--r--shared/radeon_cp.c41
-rw-r--r--shared/radeon_drm.h2
-rw-r--r--shared/radeon_drv.h17
-rw-r--r--shared/radeon_irq.c20
-rw-r--r--shared/radeon_mem.c12
-rw-r--r--shared/radeon_state.c207
10 files changed, 194 insertions, 169 deletions
diff --git a/shared/drm.h b/shared/drm.h
index f26d4442..d1d66943 100644
--- a/shared/drm.h
+++ b/shared/drm.h
@@ -346,17 +346,30 @@ typedef struct drm_irq_busid {
} drm_irq_busid_t;
typedef enum {
- _DRM_VBLANK_ABSOLUTE = 0x0, /* Wait for specific vblank sequence number */
- _DRM_VBLANK_RELATIVE = 0x1 /* Wait for given number of vblanks */
+ _DRM_VBLANK_ABSOLUTE = 0x0, /* Wait for specific vblank sequence number */
+ _DRM_VBLANK_RELATIVE = 0x1, /* Wait for given number of vblanks */
+ _DRM_VBLANK_SIGNAL = 0x40000000 /* Send signal instead of blocking */
} drm_vblank_seq_type_t;
-typedef struct drm_radeon_vbl_wait {
+#define _DRM_VBLANK_FLAGS_MASK _DRM_VBLANK_SIGNAL
+
+struct drm_wait_vblank_request {
+ drm_vblank_seq_type_t type;
+ unsigned int sequence;
+ unsigned long signal;
+};
+
+struct drm_wait_vblank_reply {
drm_vblank_seq_type_t type;
unsigned int sequence;
long tval_sec;
long tval_usec;
-} drm_wait_vblank_t;
+};
+typedef union drm_wait_vblank {
+ struct drm_wait_vblank_request request;
+ struct drm_wait_vblank_reply reply;
+} drm_wait_vblank_t;
typedef struct drm_agp_mode {
unsigned long mode;
diff --git a/shared/mga_drv.h b/shared/mga_drv.h
index d7f85af5..0e650b42 100644
--- a/shared/mga_drv.h
+++ b/shared/mga_drv.h
@@ -142,12 +142,12 @@ extern int mga_warp_init( drm_mga_private_t *dev_priv );
#define MGA_READ( reg ) (_MGA_READ((u32 *)MGA_ADDR(reg)))
#define MGA_READ8( reg ) (_MGA_READ((u8 *)MGA_ADDR(reg)))
-#define MGA_WRITE( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF( reg ) = val; } while (0)
-#define MGA_WRITE8( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8( reg ) = val; } while (0)
+#define MGA_WRITE( reg, val ) do { DRM_WRITEMEMORYBARRIER(dev_priv->mmio); MGA_DEREF( reg ) = val; } while (0)
+#define MGA_WRITE8( reg, val ) do { DRM_WRITEMEMORYBARRIER(dev_priv->mmio); MGA_DEREF8( reg ) = val; } while (0)
static inline u32 _MGA_READ(u32 *addr)
{
- DRM_READMEMORYBARRIER();
+ DRM_READMEMORYBARRIER(dev_priv->mmio);
return *(volatile u32 *)addr;
}
#else
diff --git a/shared/r128.h b/shared/r128.h
index cdf18ffb..29b26cd2 100644
--- a/shared/r128.h
+++ b/shared/r128.h
@@ -71,7 +71,7 @@
[DRM_IOCTL_NR(DRM_IOCTL_R128_DEPTH)] = { r128_cce_depth, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_R128_STIPPLE)] = { r128_cce_stipple, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_R128_INDIRECT)] = { r128_cce_indirect, 1, 1 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_R128_GETPARAM)] = { r128_getparam, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_R128_GETPARAM)] = { r128_getparam, 1, 0 },
/* Driver customization:
*/
diff --git a/shared/radeon.h b/shared/radeon.h
index fe71687a..c36accb4 100644
--- a/shared/radeon.h
+++ b/shared/radeon.h
@@ -51,7 +51,7 @@
#define DRIVER_DATE "20020828"
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 7
+#define DRIVER_MINOR 8
#define DRIVER_PATCHLEVEL 0
/* Interface history:
@@ -77,6 +77,7 @@
* and R200_PP_CUBIC_OFFSET_F1_[0..5].
* Added packets R200_EMIT_PP_CUBIC_FACES_[0..5] and
* R200_EMIT_PP_CUBIC_OFFSETS_[0..5]. (brian)
+ * 1.8 - Remove need to call cleanup ioctls on last client exit (keith)
*/
#define DRIVER_IOCTLS \
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 }, \
@@ -105,11 +106,6 @@
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_IRQ_WAIT)] = { radeon_irq_wait, 1, 0 },
-#define USE_IRQS 1
-#if USE_IRQS
-#define __HAVE_DMA_IRQ 1
-#define __HAVE_VBL_IRQ 1
-#define __HAVE_SHARED_IRQ 1
/* When a client dies:
* - Check for and clean up flipped page state
@@ -117,35 +113,36 @@
*
* DRM infrastructure takes care of reclaiming dma buffers.
*/
-#define DRIVER_PRERELEASE() do { \
+#define DRIVER_PRERELEASE() \
+do { \
if ( dev->dev_private ) { \
drm_radeon_private_t *dev_priv = dev->dev_private; \
if ( dev_priv->page_flipping ) { \
radeon_do_cleanup_pageflip( dev ); \
} \
radeon_mem_release( dev_priv->agp_heap ); \
+ radeon_mem_release( dev_priv->fb_heap ); \
} \
} while (0)
-/* On unloading the module:
- * - Free memory heap structure
- * - Remove mappings made at startup and free dev_private.
+/* When the last client dies, shut down the CP and free dev->dev_priv.
*/
-#define DRIVER_PRETAKEDOWN() do { \
- if ( dev->dev_private ) { \
- drm_radeon_private_t *dev_priv = dev->dev_private; \
- radeon_mem_takedown( &(dev_priv->agp_heap) ); \
- radeon_do_cleanup_cp( dev ); \
- } \
+#define __HAVE_RELEASE 1
+#define DRIVER_RELEASE() \
+do { \
+ DRM(reclaim_buffers)( dev, priv->pid ); \
+ if ( dev->open_count == 1) \
+ radeon_do_release( dev ); \
} while (0)
-#else
-#define __HAVE_DMA_IRQ 0
-#endif
+
/* DMA customization:
*/
#define __HAVE_DMA 1
+#define __HAVE_DMA_IRQ 1
+#define __HAVE_VBL_IRQ 1
+#define __HAVE_SHARED_IRQ 1
/* Buffer customization:
diff --git a/shared/radeon_cp.c b/shared/radeon_cp.c
index d004b681..89c9eab1 100644
--- a/shared/radeon_cp.c
+++ b/shared/radeon_cp.c
@@ -1355,6 +1355,9 @@ int radeon_cp_stop( DRM_IOCTL_ARGS )
DRM_COPY_FROM_USER_IOCTL( stop, (drm_radeon_cp_stop_t *)data, sizeof(stop) );
+ if (!dev_priv->cp_running)
+ return 0;
+
/* Flush any pending CP commands. This ensures any outstanding
* commands are exectuted by the engine before we turn it off.
*/
@@ -1382,6 +1385,39 @@ int radeon_cp_stop( DRM_IOCTL_ARGS )
return 0;
}
+
+void radeon_do_release( drm_device_t *dev )
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ if (dev_priv) {
+ if (dev_priv->cp_running) {
+ /* Stop the cp */
+ while ((ret = radeon_do_cp_idle( dev_priv )) != 0) {
+ DRM_DEBUG("radeon_do_cp_idle %d\n", ret);
+#ifdef __linux__
+ schedule();
+#else
+ tsleep(&ret, PZERO, "rdnrel", 1);
+#endif
+ }
+ radeon_do_cp_stop( dev_priv );
+ radeon_do_engine_reset( dev );
+ }
+
+ /* Disable *all* interrupts */
+ RADEON_WRITE( RADEON_GEN_INT_CNTL, 0 );
+
+ /* Free memory heap structures */
+ radeon_mem_takedown( &(dev_priv->agp_heap) );
+ radeon_mem_takedown( &(dev_priv->fb_heap) );
+
+ /* deallocate kernel resources */
+ radeon_do_cleanup_cp( dev );
+ }
+}
+
/* Just reset the CP ring. Called as part of an X Server engine reset.
*/
int radeon_cp_reset( DRM_IOCTL_ARGS )
@@ -1413,9 +1449,6 @@ int radeon_cp_idle( DRM_IOCTL_ARGS )
LOCK_TEST_WITH_RETURN( dev );
-/* if (dev->irq) */
-/* radeon_emit_and_wait_irq( dev ); */
-
return radeon_do_cp_idle( dev_priv );
}
@@ -1498,7 +1531,7 @@ drm_buf_t *radeon_freelist_get( drm_device_t *dev )
}
}
- DRM_ERROR( "returning NULL!\n" );
+ DRM_DEBUG( "returning NULL!\n" );
return NULL;
}
#if 0
diff --git a/shared/radeon_drm.h b/shared/radeon_drm.h
index 014a96c3..3ab57309 100644
--- a/shared/radeon_drm.h
+++ b/shared/radeon_drm.h
@@ -382,7 +382,7 @@ typedef struct {
#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( 0x4c, drm_radeon_stipple_t)
#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t)
#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t)
-#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( 0x4f, drm_radeon_vertex_t)
+#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( 0x4f, drm_radeon_vertex2_t)
#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( 0x50, drm_radeon_cmd_buffer_t)
#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(0x51, drm_radeon_getparam_t)
#define DRM_IOCTL_RADEON_FLIP DRM_IO( 0x52)
diff --git a/shared/radeon_drv.h b/shared/radeon_drv.h
index 882c77fd..502ba89b 100644
--- a/shared/radeon_drv.h
+++ b/shared/radeon_drv.h
@@ -194,6 +194,7 @@ extern int radeon_emit_and_wait_irq(drm_device_t *dev);
extern int radeon_wait_irq(drm_device_t *dev, int swi_nr);
extern int radeon_emit_irq(drm_device_t *dev);
+extern void radeon_do_release(drm_device_t *dev);
/* Flags for stats.boxes
*/
@@ -821,13 +822,6 @@ do { \
* Ring control
*/
-#if defined(__powerpc__)
-#define radeon_flush_write_combine() (void) GET_RING_HEAD( &dev_priv->ring )
-#else
-#define radeon_flush_write_combine() DRM_WRITEMEMORYBARRIER( dev_priv->ring_rtpr )
-#endif
-
-
#define RADEON_VERBOSE 0
#define RING_LOCALS int write, _nr; unsigned int mask; u32 *ring;
@@ -861,8 +855,13 @@ do { \
dev_priv->ring.tail = write; \
} while (0)
-#define COMMIT_RING() do { \
- RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail ); \
+#define COMMIT_RING() do { \
+ /* Flush writes to ring */ \
+ DRM_READMEMORYBARRIER(dev_priv->mmio); \
+ GET_RING_HEAD( &dev_priv->ring ); \
+ RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail ); \
+ /* read from PCI bus to ensure correct posting */ \
+ RADEON_READ( RADEON_CP_RB_RPTR ); \
} while (0)
#define OUT_RING( x ) do { \
diff --git a/shared/radeon_irq.c b/shared/radeon_irq.c
index 54702bee..596706bf 100644
--- a/shared/radeon_irq.c
+++ b/shared/radeon_irq.c
@@ -61,7 +61,11 @@ void DRM(dma_service)( DRM_IRQ_ARGS )
(drm_radeon_private_t *)dev->dev_private;
u32 stat;
- stat = RADEON_READ(RADEON_GEN_INT_STATUS);
+ /* Only consider the bits we're interested in - others could be used
+ * outside the DRM
+ */
+ stat = RADEON_READ(RADEON_GEN_INT_STATUS)
+ & (RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT);
if (!stat)
return;
@@ -70,23 +74,21 @@ void DRM(dma_service)( DRM_IRQ_ARGS )
DRM_WAKEUP( &dev_priv->swi_queue );
}
-#if __HAVE_VBL_IRQ
/* VBLANK interrupt */
if (stat & RADEON_CRTC_VBLANK_STAT) {
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
+ DRM(vbl_send_signals)( dev );
}
-#endif
- /* Acknowledge all the bits in GEN_INT_STATUS -- seem to get
- * more than we asked for...
- */
+ /* Acknowledge interrupts we handle */
RADEON_WRITE(RADEON_GEN_INT_STATUS, stat);
}
static __inline__ void radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv)
{
- u32 tmp = RADEON_READ( RADEON_GEN_INT_STATUS );
+ u32 tmp = RADEON_READ( RADEON_GEN_INT_STATUS )
+ & (RADEON_SW_INT_TEST_ACK | RADEON_CRTC_VBLANK_STAT);
if (tmp)
RADEON_WRITE( RADEON_GEN_INT_STATUS, tmp );
}
@@ -138,7 +140,6 @@ int radeon_emit_and_wait_irq(drm_device_t *dev)
}
-#if __HAVE_VBL_IRQ
int DRM(vblank_wait)(drm_device_t *dev, unsigned int *sequence)
{
drm_radeon_private_t *dev_priv =
@@ -161,13 +162,12 @@ int DRM(vblank_wait)(drm_device_t *dev, unsigned int *sequence)
*/
DRM_WAIT_ON( ret, dev->vbl_queue, 3*DRM_HZ,
( ( ( cur_vblank = atomic_read(&dev->vbl_received ) )
- + ~*sequence + 1 ) <= (1<<23) ) );
+ - *sequence ) <= (1<<23) ) );
*sequence = cur_vblank;
return ret;
}
-#endif
/* Needs the lock as it touches the ring.
diff --git a/shared/radeon_mem.c b/shared/radeon_mem.c
index 50b6ee38..7ca10753 100644
--- a/shared/radeon_mem.c
+++ b/shared/radeon_mem.c
@@ -130,18 +130,6 @@ static void free_block( struct mem_block *p )
}
}
-#if 0
-static void print_heap( struct mem_block *heap )
-{
- struct mem_block *p;
-
- for (p = heap->next ; p != heap ; p = p->next)
- DRM_DEBUG("0x%x..0x%x (0x%x) -- owner %d\n",
- p->start, p->start + p->size,
- p->size, p->pid);
-}
-#endif
-
/* Initialize. How to check for an uninitialized heap?
*/
static int init_heap(struct mem_block **heap, int start, int size)
diff --git a/shared/radeon_state.c b/shared/radeon_state.c
index 7b480a7e..d9bc948c 100644
--- a/shared/radeon_state.c
+++ b/shared/radeon_state.c
@@ -1073,20 +1073,31 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
u32 *buffer;
const u8 *data;
int size, dwords, tex_width, blit_width;
- u32 y, height;
- int ret = 0, i;
+ u32 height;
+ int i;
RING_LOCALS;
dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
- /* FIXME: Be smarter about this...
+ /* Flush the pixel cache. This ensures no pixel data gets mixed
+ * up with the texture data from the host data blit, otherwise
+ * part of the texture image may be corrupted.
+ */
+ BEGIN_RING( 4 );
+ RADEON_FLUSH_CACHE();
+ RADEON_WAIT_UNTIL_IDLE();
+ ADVANCE_RING();
+
+#ifdef __BIG_ENDIAN
+ /* The Mesa texture functions provide the data in little endian as the
+ * chip wants it, but we need to compensate for the fact that the CP
+ * ring gets byte-swapped
*/
- buf = radeon_freelist_get( dev );
- if ( !buf ) return DRM_ERR(EAGAIN);
+ BEGIN_RING( 2 );
+ OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );
+ ADVANCE_RING();
+#endif
- DRM_DEBUG( "tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
- tex->offset >> 10, tex->pitch, tex->format,
- image->x, image->y, image->width, image->height );
/* The compiler won't optimize away a division by a variable,
* even if the only legal values are powers of two. Thus, we'll
@@ -1120,127 +1131,111 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
return DRM_ERR(EINVAL);
}
- DRM_DEBUG( " tex=%dx%d blit=%d\n",
- tex_width, tex->height, blit_width );
+ DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width );
- /* Flush the pixel cache. This ensures no pixel data gets mixed
- * up with the texture data from the host data blit, otherwise
- * part of the texture image may be corrupted.
- */
- BEGIN_RING( 4 );
-
- RADEON_FLUSH_CACHE();
- RADEON_WAIT_UNTIL_IDLE();
-
- ADVANCE_RING();
-
-#ifdef __BIG_ENDIAN
- /* The Mesa texture functions provide the data in little endian as the
- * chip wants it, but we need to compensate for the fact that the CP
- * ring gets byte-swapped
- */
- BEGIN_RING( 2 );
- OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );
- ADVANCE_RING();
-#endif
-
- /* Make a copy of the parameters in case we have to update them
- * for a multi-pass texture blit.
- */
- y = image->y;
- height = image->height;
- data = (const u8 *)image->data;
-
- size = height * blit_width;
-
- if ( size > RADEON_MAX_TEXTURE_SIZE ) {
- /* Texture image is too large, do a multipass upload */
- ret = DRM_ERR(EAGAIN);
+ do {
+ DRM_DEBUG( "tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
+ tex->offset >> 10, tex->pitch, tex->format,
+ image->x, image->y, image->width, image->height );
- /* Adjust the blit size to fit the indirect buffer */
- height = RADEON_MAX_TEXTURE_SIZE / blit_width;
+ /* Make a copy of some parameters in case we have to
+ * update them for a multi-pass texture blit.
+ */
+ height = image->height;
+ data = (const u8 *)image->data;
+
size = height * blit_width;
- /* Update the input parameters for next time */
- image->y += height;
- image->height -= height;
- image->data = (const char *)image->data + size;
+ if ( size > RADEON_MAX_TEXTURE_SIZE ) {
+ height = RADEON_MAX_TEXTURE_SIZE / blit_width;
+ size = height * blit_width;
+ } else if ( size < 4 && size > 0 ) {
+ size = 4;
+ } else if ( size == 0 ) {
+ return 0;
+ }
- if ( DRM_COPY_TO_USER( tex->image, image, sizeof(*image) ) ) {
- DRM_ERROR( "EFAULT on tex->image\n" );
- return DRM_ERR(EFAULT);
+ buf = radeon_freelist_get( dev );
+ if ( 0 && !buf ) {
+ radeon_do_cp_idle( dev_priv );
+ buf = radeon_freelist_get( dev );
+ }
+ if ( !buf ) {
+ DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n");
+ DRM_COPY_TO_USER( tex->image, image, sizeof(*image) );
+ return DRM_ERR(EAGAIN);
}
- } else if ( size < 4 && size > 0 ) {
- size = 4;
- }
- dwords = size / 4;
- /* Dispatch the indirect buffer.
- */
- buffer = (u32 *)((char *)dev_priv->buffers->handle + buf->offset);
-
- buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
- buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
- RADEON_GMC_BRUSH_NONE |
- (format << 8) |
- RADEON_GMC_SRC_DATATYPE_COLOR |
- RADEON_ROP3_S |
- RADEON_DP_SRC_SOURCE_HOST_DATA |
- RADEON_GMC_CLR_CMP_CNTL_DIS |
- RADEON_GMC_WR_MSK_DIS);
-
- buffer[2] = (tex->pitch << 22) | (tex->offset >> 10);
- buffer[3] = 0xffffffff;
- buffer[4] = 0xffffffff;
- buffer[5] = (y << 16) | image->x;
- buffer[6] = (height << 16) | image->width;
- buffer[7] = dwords;
-
- buffer += 8;
-
- if ( tex_width >= 32 ) {
- /* Texture image width is larger than the minimum, so we
- * can upload it directly.
+ /* Dispatch the indirect buffer.
*/
- if ( DRM_COPY_FROM_USER( buffer, data, dwords * sizeof(u32) ) ) {
- DRM_ERROR( "EFAULT on data, %d dwords\n", dwords );
- return DRM_ERR(EFAULT);
- }
- } else {
- /* Texture image width is less than the minimum, so we
- * need to pad out each image scanline to the minimum
- * width.
- */
- for ( i = 0 ; i < tex->height ; i++ ) {
- if ( DRM_COPY_FROM_USER( buffer, data, tex_width ) ) {
- DRM_ERROR( "EFAULT on pad, %d bytes\n",
- tex_width );
+ buffer = (u32*)((char*)dev_priv->buffers->handle + buf->offset);
+ dwords = size / 4;
+ buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
+ buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+ RADEON_GMC_BRUSH_NONE |
+ (format << 8) |
+ RADEON_GMC_SRC_DATATYPE_COLOR |
+ RADEON_ROP3_S |
+ RADEON_DP_SRC_SOURCE_HOST_DATA |
+ RADEON_GMC_CLR_CMP_CNTL_DIS |
+ RADEON_GMC_WR_MSK_DIS);
+
+ buffer[2] = (tex->pitch << 22) | (tex->offset >> 10);
+ buffer[3] = 0xffffffff;
+ buffer[4] = 0xffffffff;
+ buffer[5] = (image->y << 16) | image->x;
+ buffer[6] = (height << 16) | image->width;
+ buffer[7] = dwords;
+ buffer += 8;
+
+ if ( tex_width >= 32 ) {
+ /* Texture image width is larger than the minimum, so we
+ * can upload it directly.
+ */
+ if ( DRM_COPY_FROM_USER( buffer, data,
+ dwords * sizeof(u32) ) ) {
+ DRM_ERROR( "EFAULT on data, %d dwords\n",
+ dwords );
return DRM_ERR(EFAULT);
}
- buffer += 8;
- data += tex_width;
+ } else {
+ /* Texture image width is less than the minimum, so we
+ * need to pad out each image scanline to the minimum
+ * width.
+ */
+ for ( i = 0 ; i < tex->height ; i++ ) {
+ if ( DRM_COPY_FROM_USER( buffer, data,
+ tex_width ) ) {
+ DRM_ERROR( "EFAULT on pad, %d bytes\n",
+ tex_width );
+ return DRM_ERR(EFAULT);
+ }
+ buffer += 8;
+ data += tex_width;
+ }
}
- }
- buf->pid = DRM_CURRENTPID;
- buf->used = (dwords + 8) * sizeof(u32);
+ buf->pid = DRM_CURRENTPID;
+ buf->used = (dwords + 8) * sizeof(u32);
+ radeon_cp_dispatch_indirect( dev, buf, 0, buf->used );
+ radeon_cp_discard_buffer( dev, buf );
- radeon_cp_dispatch_indirect( dev, buf, 0, buf->used );
- radeon_cp_discard_buffer( dev, buf );
+ /* Update the input parameters for next time */
+ image->y += height;
+ image->height -= height;
+ (const u8 *)image->data += size;
+ } while (image->height > 0);
/* Flush the pixel cache after the blit completes. This ensures
* the texture data is written out to memory before rendering
* continues.
*/
BEGIN_RING( 4 );
-
RADEON_FLUSH_CACHE();
RADEON_WAIT_UNTIL_2D_IDLE();
-
ADVANCE_RING();
-
- return ret;
+ return 0;
}