diff options
author | Nathan Hand <nathanh@manu.com.au> | 2001-01-08 15:34:25 +0000 |
---|---|---|
committer | Nathan Hand <nathanh@manu.com.au> | 2001-01-08 15:34:25 +0000 |
commit | be15c03e7a02f585908d9b851bec91ad74b76576 (patch) | |
tree | 025718ecb95f0b8acba945ebdb9f517144a71fb4 | |
parent | 98b5d4d26f4e97613afccfe8cb0bb05ad96bc960 (diff) |
merge from trunktdfx-3-0-0-20010108tdfx-3-0-0-branch
34 files changed, 9843 insertions, 1735 deletions
diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index b1b8d9768..9fe0038fc 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -6,7 +6,8 @@ # drm.o is a fake target -- it is never built # The real targets are in the module-list O_TARGET := drm.o -module-list := gamma.o tdfx.o r128.o ffb.o mga.o i810.o + +module-list := gamma.o tdfx.o r128.o radeon.o ffb.o mga.o i810.o export-objs := $(patsubst %.o,%_drv.o,$(module-list)) # libs-objs are included in every module so that radical changes to the @@ -25,6 +26,11 @@ export-objs := $(patsubst %.o,%_drv.o,$(module-list)) # memory waste (in the dual-head case) for greatly improved long-term # maintainability. # +# NOTE: lib-objs will be eliminated in future versions, thereby +# eliminating the need to compile the .o files into every module, but +# for now we still need them. +# + lib-objs := init.o memory.o proc.o auth.o context.o drawable.o bufs.o lib-objs += lists.o lock.o ioctl.o fops.o vm.o dma.o ctxbitmap.o @@ -32,57 +38,71 @@ ifeq ($(CONFIG_AGP),y) lib-objs += agpsupport.o else ifeq ($(CONFIG_AGP),m) - lib-objs += agpsupport.o + lib-objs += agpsupport.o endif endif -gamma-objs := $(lib-objs) gamma_drv.o gamma_dma.o -tdfx-objs := $(lib-objs) tdfx_drv.o tdfx_context.o -r128-objs := $(lib-objs) r128_drv.o r128_dma.o r128_context.o r128_bufs.o -ffb-objs := $(lib-objs) ffb_drv.o ffb_context.o -mga-objs := $(lib-objs) mga_drv.o mga_dma.o mga_context.o mga_bufs.o \ - mga_state.o -i810-objs := $(lib-objs) i810_drv.o i810_dma.o i810_context.o i810_bufs.o - -obj-$(CONFIG_DRM_GAMMA) += gamma.o $(gamma-objs) -obj-$(CONFIG_DRM_TDFX) += tdfx.o $(tdfx-objs) -obj-$(CONFIG_DRM_R128) += r128.o $(r128-objs) -obj-$(CONFIG_DRM_FFB) += ffb.o $(ffb-objs) - -ifneq ($CONFIG_AGP),) -obj-$(CONFIG_DRM_MGA) += mga.o $(mga-objs) -obj-$(CONFIG_DRM_I810) += i810.o $(i810-objs) +gamma-objs := gamma_drv.o gamma_dma.o +tdfx-objs := tdfx_drv.o tdfx_context.o +r128-objs := r128_drv.o r128_cce.o r128_context.o r128_bufs.o \ + r128_state.o +radeon-objs := radeon_drv.o radeon_cp.o radeon_context.o radeon_bufs.o \ + radeon_state.o +ffb-objs := ffb_drv.o ffb_context.o +mga-objs := mga_drv.o mga_dma.o mga_context.o mga_bufs.o \ + mga_state.o +i810-objs := i810_drv.o i810_dma.o i810_context.o i810_bufs.o + +obj-$(CONFIG_DRM_GAMMA) += gamma.o +obj-$(CONFIG_DRM_TDFX) += tdfx.o +obj-$(CONFIG_DRM_R128) += r128.o +obj-$(CONFIG_DRM_RADEON) += radeon.o +obj-$(CONFIG_DRM_FFB) += ffb.o +obj-$(CONFIG_DRM_MGA) += mga.o +obj-$(CONFIG_DRM_I810) += i810.o + + +# When linking into the kernel, link the library just once. +# If making modules, we include the library into each module + +lib-objs-mod := $(patsubst %.o,%-mod.o,$(lib-objs)) + +ifdef MAKING_MODULES + lib = drmlib-mod.a +else + obj-y += drmlib.a endif -# Take module names out of obj-y and int-m +include $(TOPDIR)/Rules.make -obj-y := $(filter-out $(module-list), $(obj-y)) -int-m := $(filter-out $(module-list), $(obj-m)) +$(patsubst %.o,%.c,$(lib-objs-mod)): + @ln -sf $(subst -mod,,$@) $@ -# Translate to Rules.make lists. +drmlib-mod.a: $(lib-objs-mod) + rm -f $@ + $(AR) $(EXTRA_ARFLAGS) rcs $@ $(lib-objs-mod) -O_OBJS := $(filter-out $(export-objs), $(obj-y)) -OX_OBJS := $(filter $(export-objs), $(obj-y)) -M_OBJS := $(sort $(filter $(module-list), $(obj-m))) -MI_OBJS := $(sort $(filter-out $(export-objs), $(int-m))) -MIX_OBJS := $(sort $(filter $(export-objs), $(int-m))) +drmlib.a: $(lib-objs) + rm -f $@ + $(AR) $(EXTRA_ARFLAGS) rcs $@ $(lib-objs) -include $(TOPDIR)/Rules.make +gamma.o: $(gamma-objs) $(lib) + $(LD) -r -o $@ $(gamma-objs) $(lib) -gamma.o: $(gamma-objs) - $(LD) -r -o $@ $(gamma-objs) +tdfx.o: $(tdfx-objs) $(lib) + $(LD) -r -o $@ $(tdfx-objs) $(lib) -tdfx.o: $(tdfx-objs) - $(LD) -r -o $@ $(tdfx-objs) +mga.o: $(mga-objs) $(lib) + $(LD) -r -o $@ $(mga-objs) $(lib) -mga.o: $(mga-objs) - $(LD) -r -o $@ $(mga-objs) +i810.o: $(i810-objs) $(lib) + $(LD) -r -o $@ $(i810-objs) $(lib) -i810.o: $(i810-objs) - $(LD) -r -o $@ $(i810-objs) +r128.o: $(r128-objs) $(lib) + $(LD) -r -o $@ $(r128-objs) $(lib) -r128.o: $(r128-objs) - $(LD) -r -o $@ $(r128-objs) +radeon.o: $(radeon-objs) $(lib) + $(LD) -r -o $@ $(radeon-objs) $(lib) -ffb.o: $(ffb-objs) - $(LD) -r -o $@ $(ffb-objs) +ffb.o: $(ffb-objs) $(lib) + $(LD) -r -o $@ $(ffb-objs) $(lib) diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c index 26fe0c1c1..aa824a79c 100644 --- a/linux-core/i810_dma.c +++ b/linux-core/i810_dma.c @@ -924,7 +924,7 @@ int i810_irq_install(drm_device_t *dev, int irq) dev->dma->next_queue = NULL; dev->dma->this_buffer = NULL; - dev->tq.next = NULL; + INIT_LIST_HEAD(&dev->tq.list); dev->tq.sync = 0; dev->tq.routine = i810_dma_task_queue; dev->tq.data = dev; diff --git a/linux-core/mga_drv.c b/linux-core/mga_drv.c index 883eb75ca..d1c39e999 100644 --- a/linux-core/mga_drv.c +++ b/linux-core/mga_drv.c @@ -38,7 +38,7 @@ #define MGA_DESC "Matrox G200/G400" #define MGA_DATE "20000928" #define MGA_MAJOR 2 -#define MGA_MINOR 0 +#define MGA_MINOR 1 #define MGA_PATCHLEVEL 1 static drm_device_t mga_device; @@ -114,6 +114,7 @@ static drm_ioctl_desc_t mga_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_MGA_VERTEX)] = { mga_vertex, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_MGA_FLUSH)] = { mga_flush_ioctl, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_MGA_INDICES)] = { mga_indices, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_MGA_BLIT)] = { mga_blit, 1, 0 }, }; #define MGA_IOCTL_COUNT DRM_ARRAY_SIZE(mga_ioctls) @@ -502,7 +503,7 @@ int mga_release(struct inode *inode, struct file *filp) if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && dev->lock.pid == current->pid) { mga_reclaim_buffers(dev, priv->pid); - DRM_INFO("Process %d dead (ctx %d, d_s = 0x%02x)\n", + DRM_INFO("Process %d dead (ctx %d, d_s = 0x%02lx)\n", current->pid, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock), dev->dev_private ? diff --git a/linux-core/r128_drv.c b/linux-core/r128_drv.c index 969ada93f..cf2589251 100644 --- a/linux-core/r128_drv.c +++ b/linux-core/r128_drv.c @@ -24,8 +24,10 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * Authors: Rickard E. (Rik) Faith <faith@valinux.com> - * Kevin E. Martin <martin@valinux.com> + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> + * Kevin E. Martin <martin@valinux.com> + * Gareth Hughes <gareth@valinux.com> * */ @@ -33,15 +35,15 @@ #include "drmP.h" #include "r128_drv.h" -#define R128_NAME "r128" -#define R128_DESC "ATI Rage 128" -#define R128_DATE "20000928" -#define R128_MAJOR 1 -#define R128_MINOR 0 -#define R128_PATCHLEVEL 0 +#define R128_NAME "r128" +#define R128_DESC "ATI Rage 128" +#define R128_DATE "20010101" +#define R128_MAJOR 2 +#define R128_MINOR 1 +#define R128_PATCHLEVEL 4 -static drm_device_t r128_device; -drm_ctx_t r128_res_ctx; +static drm_device_t r128_device; +drm_ctx_t r128_res_ctx; static struct file_operations r128_fops = { #if LINUX_VERSION_CODE >= 0x020400 @@ -65,52 +67,63 @@ static struct miscdevice r128_misc = { }; static drm_ioctl_desc_t r128_ioctls[] = { - [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { r128_version, 0, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 }, - - [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { r128_addbufs, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { r128_mapbufs, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 }, - - [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { r128_addctx, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { r128_rmctx, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { r128_modctx, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { r128_getctx, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { r128_switchctx, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { r128_newctx, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { r128_resctx, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { r128_lock, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { r128_unlock, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { r128_version, 0, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 }, + + [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { r128_addbufs, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { r128_mapbufs, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 }, + + [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { r128_addctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { r128_rmctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { r128_modctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { r128_getctx, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { r128_switchctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { r128_newctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { r128_resctx, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { r128_cce_buffers, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { r128_lock, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { r128_unlock, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 }, #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 }, #endif - [DRM_IOCTL_NR(DRM_IOCTL_R128_INIT)] = { r128_init_cce, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_R128_RESET)] = { r128_eng_reset, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_R128_FLUSH)] = { r128_eng_flush, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_R128_PACKET)] = { r128_submit_pkt, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_R128_IDLE)] = { r128_cce_idle, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_R128_VERTEX)] = { r128_vertex_buf, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_INIT)] = { r128_cce_init, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_START)] = { r128_cce_start, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_STOP)] = { r128_cce_stop, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_RESET)] = { r128_cce_reset, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_IDLE)] = { r128_cce_idle, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_RESET)] = { r128_engine_reset, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_FULLSCREEN)]= { r128_fullscreen, 1, 0 }, + + [DRM_IOCTL_NR(DRM_IOCTL_R128_SWAP)] = { r128_cce_swap, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_CLEAR)] = { r128_cce_clear, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_VERTEX)] = { r128_cce_vertex, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_INDICES)] = { r128_cce_indices, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_BLIT)] = { r128_cce_blit, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_DEPTH)] = { r128_cce_depth, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_STIPPLE)] = { r128_cce_stipple, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_INDIRECT)] = { r128_cce_indirect, 1, 1 }, }; #define R128_IOCTL_COUNT DRM_ARRAY_SIZE(r128_ioctls) @@ -349,12 +362,12 @@ static int __init r128_init(void) #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) dev->agp = drm_agp_init(); - if (dev->agp == NULL) { - DRM_ERROR("Cannot initialize agpgart module.\n"); - drm_proc_cleanup(); - misc_deregister(&r128_misc); - r128_takedown(dev); - return -ENOMEM; + if (dev->agp == NULL) { + DRM_ERROR("Cannot initialize agpgart module.\n"); + drm_proc_cleanup(); + misc_deregister(&r128_misc); + r128_takedown(dev); + return -ENOMEM; } #ifdef CONFIG_MTRR @@ -413,8 +426,8 @@ module_init(r128_init); module_exit(r128_cleanup); -int r128_version(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +int r128_version(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) { drm_version_t version; int len; @@ -424,13 +437,13 @@ int r128_version(struct inode *inode, struct file *filp, unsigned int cmd, sizeof(version))) return -EFAULT; -#define DRM_COPY(name,value) \ - len = strlen(value); \ - if (len > name##_len) len = name##_len; \ - name##_len = strlen(value); \ - if (len && name) { \ - if (copy_to_user(name, value, len)) \ - return -EFAULT; \ +#define DRM_COPY(name,value) \ + len = strlen(value); \ + if (len > name##_len) len = name##_len; \ + name##_len = strlen(value); \ + if (len && name) { \ + if (copy_to_user(name, value, len)) \ + return -EFAULT; \ } version.version_major = R128_MAJOR; @@ -478,7 +491,17 @@ int r128_release(struct inode *inode, struct file *filp) lock_kernel(); dev = priv->dev; + DRM_DEBUG("open_count = %d\n", dev->open_count); + + /* Force the cleanup of page flipping when required */ + if ( dev->dev_private ) { + drm_r128_private_t *dev_priv = dev->dev_private; + if ( dev_priv->page_flipping ) { + r128_do_cleanup_pageflip( dev ); + } + } + if (!(retcode = drm_release(inode, filp))) { #if LINUX_VERSION_CODE < 0x020333 MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */ @@ -506,9 +529,8 @@ int r128_release(struct inode *inode, struct file *filp) } /* r128_ioctl is called whenever a process performs an ioctl on /dev/drm. */ - -int r128_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +int r128_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) { int nr = DRM_IOCTL_NR(cmd); drm_file_t *priv = filp->private_data; @@ -534,19 +556,25 @@ int r128_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, DRM_DEBUG("no function\n"); retcode = -EINVAL; } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN)) - || (ioctl->auth_needed && !priv->authenticated)) { + || (ioctl->auth_needed && !priv->authenticated)) { retcode = -EACCES; } else { retcode = (func)(inode, filp, cmd, arg); } } +#if 0 + if ( retcode ) { + DRM_INFO( "%s 0x%x ret = %d\n", __FUNCTION__, nr, retcode ); + } +#endif + atomic_dec(&dev->ioctl_count); return retcode; } -int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +int r128_lock(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; @@ -572,33 +600,10 @@ int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd, lock.context, current->pid, dev->lock.hw_lock->lock, lock.flags); -#if 0 - /* dev->queue_count == 0 right now for - r128. FIXME? */ - if (lock.context < 0 || lock.context >= dev->queue_count) + if (lock.context < 0) return -EINVAL; -#endif if (!ret) { -#if 0 - if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) - != lock.context) { - long j = jiffies - dev->lock.lock_time; - - if (lock.context == r128_res_ctx.handle && - j >= 0 && j < DRM_LOCK_SLICE) { - /* Can't take lock if we just had it and - there is contention. */ - DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d jiffies=%d\n", - lock.context, current->pid, j, - dev->lock.lock_time, jiffies); - current->state = TASK_INTERRUPTIBLE; - current->policy |= SCHED_YIELD; - schedule_timeout(DRM_LOCK_SLICE-j); - DRM_DEBUG("jiffies=%d\n", jiffies); - } - } -#endif add_wait_queue(&dev->lock.lock_queue, &entry); for (;;) { current->state = TASK_INTERRUPTIBLE; @@ -617,9 +622,6 @@ int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd, /* Contention */ atomic_inc(&dev->total_sleeps); -#if 1 - current->policy |= SCHED_YIELD; -#endif schedule(); if (signal_pending(current)) { ret = -ERESTARTSYS; @@ -630,32 +632,6 @@ int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd, remove_wait_queue(&dev->lock.lock_queue, &entry); } -#if 0 - if (!ret && dev->last_context != lock.context && - lock.context != r128_res_ctx.handle && - dev->last_context != r128_res_ctx.handle) { - add_wait_queue(&dev->context_wait, &entry); - current->state = TASK_INTERRUPTIBLE; - /* PRE: dev->last_context != lock.context */ - r128_context_switch(dev, dev->last_context, lock.context); - /* POST: we will wait for the context - switch and will dispatch on a later call - when dev->last_context == lock.context - NOTE WE HOLD THE LOCK THROUGHOUT THIS - TIME! */ - current->policy |= SCHED_YIELD; - schedule(); - current->state = TASK_RUNNING; - remove_wait_queue(&dev->context_wait, &entry); - if (signal_pending(current)) { - ret = -EINTR; - } else if (dev->last_context != lock.context) { - DRM_ERROR("Context mismatch: %d %d\n", - dev->last_context, lock.context); - } - } -#endif - if (!ret) { sigemptyset(&dev->sigmask); sigaddset(&dev->sigmask, SIGSTOP); @@ -670,6 +646,7 @@ int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd, } if (lock.flags & _DRM_LOCK_QUIESCENT) { /* Make hardware quiescent */ + DRM_DEBUG( "not quiescent!\n" ); #if 0 r128_quiescent(dev); #endif @@ -692,8 +669,8 @@ int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd, } -int r128_unlock(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +int r128_unlock(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; diff --git a/linux-core/radeon_drv.c b/linux-core/radeon_drv.c new file mode 100644 index 000000000..0113ed97c --- /dev/null +++ b/linux-core/radeon_drv.c @@ -0,0 +1,702 @@ +/* radeon_drv.c -- ATI Radeon driver -*- linux-c -*- + * + * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Fremont, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Kevin E. Martin <martin@valinux.com> + * Rickard E. (Rik) Faith <faith@valinux.com> + * + */ + +#include <linux/config.h> +#include "drmP.h" +#include "radeon_drv.h" + +#define RADEON_NAME "radeon" +#define RADEON_DESC "ATI Radeon" +#define RADEON_DATE "20010105" +#define RADEON_MAJOR 1 +#define RADEON_MINOR 0 +#define RADEON_PATCHLEVEL 0 + +static drm_device_t radeon_device; +drm_ctx_t radeon_res_ctx; + +static struct file_operations radeon_fops = { +#if LINUX_VERSION_CODE >= 0x020400 + /* This started being used during 2.4.0-test */ + owner: THIS_MODULE, +#endif + open: radeon_open, + flush: drm_flush, + release: radeon_release, + ioctl: radeon_ioctl, + mmap: drm_mmap, + read: drm_read, + fasync: drm_fasync, + poll: drm_poll, +}; + +static struct miscdevice radeon_misc = { + minor: MISC_DYNAMIC_MINOR, + name: RADEON_NAME, + fops: &radeon_fops, +}; + +static drm_ioctl_desc_t radeon_ioctls[] = { + [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { radeon_version, 0, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 }, + + [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { radeon_addbufs, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { radeon_mapbufs, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 }, + + [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { radeon_addctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { radeon_rmctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { radeon_modctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { radeon_getctx, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { radeon_switchctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { radeon_newctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { radeon_resctx, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { radeon_lock, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { radeon_unlock, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 }, + +#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 }, +#endif + + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_INIT)] = { radeon_cp_init, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_START)] = { radeon_cp_start, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_STOP)] = { radeon_cp_stop, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_RESET)] = { radeon_cp_reset, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_IDLE)] = { radeon_cp_idle, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_RESET)] = { radeon_engine_reset, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_FULLSCREEN)] = { radeon_fullscreen, 1, 0 }, + + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_SWAP)] = { radeon_cp_swap, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CLEAR)] = { radeon_cp_clear, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_VERTEX)] = { radeon_cp_vertex, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDICES)] = { radeon_cp_indices, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_BLIT)] = { radeon_cp_blit, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_STIPPLE)] = { radeon_cp_stipple, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDIRECT)]= { radeon_cp_indirect,1, 1 }, +}; +#define RADEON_IOCTL_COUNT DRM_ARRAY_SIZE(radeon_ioctls) + +#ifdef MODULE +static char *radeon = NULL; +#endif + +MODULE_AUTHOR("VA Linux Systems, Inc."); +MODULE_DESCRIPTION("radeon"); +MODULE_PARM(radeon, "s"); + +#ifndef MODULE +/* radeon_options is called by the kernel to parse command-line options + * passed via the boot-loader (e.g., LILO). It calls the insmod option + * routine, drm_parse_drm. + */ + +static int __init radeon_options(char *str) +{ + drm_parse_options(str); + return 1; +} + +__setup("radeon=", radeon_options); +#endif + +static int radeon_setup(drm_device_t *dev) +{ + int i; + + atomic_set(&dev->ioctl_count, 0); + atomic_set(&dev->vma_count, 0); + dev->buf_use = 0; + atomic_set(&dev->buf_alloc, 0); + + drm_dma_setup(dev); + + atomic_set(&dev->total_open, 0); + atomic_set(&dev->total_close, 0); + atomic_set(&dev->total_ioctl, 0); + atomic_set(&dev->total_irq, 0); + atomic_set(&dev->total_ctx, 0); + atomic_set(&dev->total_locks, 0); + atomic_set(&dev->total_unlocks, 0); + atomic_set(&dev->total_contends, 0); + atomic_set(&dev->total_sleeps, 0); + + for (i = 0; i < DRM_HASH_SIZE; i++) { + dev->magiclist[i].head = NULL; + dev->magiclist[i].tail = NULL; + } + dev->maplist = NULL; + dev->map_count = 0; + dev->vmalist = NULL; + dev->lock.hw_lock = NULL; + init_waitqueue_head(&dev->lock.lock_queue); + dev->queue_count = 0; + dev->queue_reserved = 0; + dev->queue_slots = 0; + dev->queuelist = NULL; + dev->irq = 0; + dev->context_flag = 0; + dev->interrupt_flag = 0; + dev->dma_flag = 0; + dev->last_context = 0; + dev->last_switch = 0; + dev->last_checked = 0; + init_timer(&dev->timer); + init_waitqueue_head(&dev->context_wait); + + dev->ctx_start = 0; + dev->lck_start = 0; + + dev->buf_rp = dev->buf; + dev->buf_wp = dev->buf; + dev->buf_end = dev->buf + DRM_BSZ; + dev->buf_async = NULL; + init_waitqueue_head(&dev->buf_readers); + init_waitqueue_head(&dev->buf_writers); + + radeon_res_ctx.handle = -1; + + DRM_DEBUG("\n"); + + /* The kernel's context could be created here, but is now created + in drm_dma_enqueue. This is more resource-efficient for + hardware that does not do DMA, but may mean that + drm_select_queue fails between the time the interrupt is + initialized and the time the queues are initialized. */ + + return 0; +} + + +static int radeon_takedown(drm_device_t *dev) +{ + int i; + drm_magic_entry_t *pt, *next; + drm_map_t *map; + drm_vma_entry_t *vma, *vma_next; + + DRM_DEBUG("\n"); + + down(&dev->struct_sem); + del_timer(&dev->timer); + + if (dev->devname) { + drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER); + dev->devname = NULL; + } + + if (dev->unique) { + drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER); + dev->unique = NULL; + dev->unique_len = 0; + } + /* Clear pid list */ + for (i = 0; i < DRM_HASH_SIZE; i++) { + for (pt = dev->magiclist[i].head; pt; pt = next) { + next = pt->next; + drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); + } + dev->magiclist[i].head = dev->magiclist[i].tail = NULL; + } + +#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) + /* Clear AGP information */ + if (dev->agp) { + drm_agp_mem_t *entry; + drm_agp_mem_t *nexte; + + /* Remove AGP resources, but leave dev->agp + intact until radeon_cleanup is called. */ + for (entry = dev->agp->memory; entry; entry = nexte) { + nexte = entry->next; + if (entry->bound) drm_unbind_agp(entry->memory); + drm_free_agp(entry->memory, entry->pages); + drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); + } + dev->agp->memory = NULL; + + if (dev->agp->acquired) _drm_agp_release(); + + dev->agp->acquired = 0; + dev->agp->enabled = 0; + } +#endif + + /* Clear vma list (only built for debugging) */ + if (dev->vmalist) { + for (vma = dev->vmalist; vma; vma = vma_next) { + vma_next = vma->next; + drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); + } + dev->vmalist = NULL; + } + + /* Clear map area and mtrr information */ + if (dev->maplist) { + for (i = 0; i < dev->map_count; i++) { + map = dev->maplist[i]; + switch (map->type) { + case _DRM_REGISTERS: + case _DRM_FRAME_BUFFER: +#ifdef CONFIG_MTRR + if (map->mtrr >= 0) { + int retcode; + retcode = mtrr_del(map->mtrr, + map->offset, + map->size); + DRM_DEBUG("mtrr_del = %d\n", retcode); + } +#endif + drm_ioremapfree(map->handle, map->size); + break; + case _DRM_SHM: + drm_free_pages((unsigned long)map->handle, + drm_order(map->size) + - PAGE_SHIFT, + DRM_MEM_SAREA); + break; + case _DRM_AGP: + /* Do nothing here, because this is all + handled in the AGP/GART driver. */ + break; + } + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + } + drm_free(dev->maplist, + dev->map_count * sizeof(*dev->maplist), + DRM_MEM_MAPS); + dev->maplist = NULL; + dev->map_count = 0; + } + + drm_dma_takedown(dev); + + dev->queue_count = 0; + if (dev->lock.hw_lock) { + dev->lock.hw_lock = NULL; /* SHM removed */ + dev->lock.pid = 0; + wake_up_interruptible(&dev->lock.lock_queue); + } + up(&dev->struct_sem); + + return 0; +} + +/* radeon_init is called via init_module at module load time, or via + * linux/init/main.c (this is not currently supported). */ + +static int __init radeon_init(void) +{ + int retcode; + drm_device_t *dev = &radeon_device; + + DRM_DEBUG("\n"); + + memset((void *)dev, 0, sizeof(*dev)); + dev->count_lock = SPIN_LOCK_UNLOCKED; + sema_init(&dev->struct_sem, 1); + +#ifdef MODULE + drm_parse_options(radeon); +#endif + + if ((retcode = misc_register(&radeon_misc))) { + DRM_ERROR("Cannot register \"%s\"\n", RADEON_NAME); + return retcode; + } + dev->device = MKDEV(MISC_MAJOR, radeon_misc.minor); + dev->name = RADEON_NAME; + + drm_mem_init(); + drm_proc_init(dev); + +#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) + dev->agp = drm_agp_init(); + if (dev->agp == NULL) { + DRM_ERROR("Cannot initialize agpgart module.\n"); + drm_proc_cleanup(); + misc_deregister(&radeon_misc); + radeon_takedown(dev); + return -ENOMEM; + } + +#ifdef CONFIG_MTRR + dev->agp->agp_mtrr = mtrr_add(dev->agp->agp_info.aper_base, + dev->agp->agp_info.aper_size*1024*1024, + MTRR_TYPE_WRCOMB, + 1); +#endif +#endif + + if((retcode = drm_ctxbitmap_init(dev))) { + DRM_ERROR("Cannot allocate memory for context bitmap.\n"); + drm_proc_cleanup(); + misc_deregister(&radeon_misc); + radeon_takedown(dev); + return retcode; + } + + DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", + RADEON_NAME, + RADEON_MAJOR, + RADEON_MINOR, + RADEON_PATCHLEVEL, + RADEON_DATE, + radeon_misc.minor); + + return 0; +} + +/* radeon_cleanup is called via cleanup_module at module unload time. */ + +static void __exit radeon_cleanup(void) +{ + drm_device_t *dev = &radeon_device; + + DRM_DEBUG("\n"); + + drm_proc_cleanup(); + if (misc_deregister(&radeon_misc)) { + DRM_ERROR("Cannot unload module\n"); + } else { + DRM_INFO("Module unloaded\n"); + } + drm_ctxbitmap_cleanup(dev); + radeon_takedown(dev); +#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) + if (dev->agp) { + drm_agp_uninit(); + drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); + dev->agp = NULL; + } +#endif +} + +module_init(radeon_init); +module_exit(radeon_cleanup); + + +int radeon_version(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_version_t version; + int len; + + if (copy_from_user(&version, + (drm_version_t *)arg, + sizeof(version))) + return -EFAULT; + +#define DRM_COPY(name,value) \ + len = strlen(value); \ + if (len > name##_len) len = name##_len; \ + name##_len = strlen(value); \ + if (len && name) { \ + if (copy_to_user(name, value, len)) \ + return -EFAULT; \ + } + + version.version_major = RADEON_MAJOR; + version.version_minor = RADEON_MINOR; + version.version_patchlevel = RADEON_PATCHLEVEL; + + DRM_COPY(version.name, RADEON_NAME); + DRM_COPY(version.date, RADEON_DATE); + DRM_COPY(version.desc, RADEON_DESC); + + if (copy_to_user((drm_version_t *)arg, + &version, + sizeof(version))) + return -EFAULT; + return 0; +} + +int radeon_open(struct inode *inode, struct file *filp) +{ + drm_device_t *dev = &radeon_device; + int retcode = 0; + + DRM_DEBUG("open_count = %d\n", dev->open_count); + if (!(retcode = drm_open_helper(inode, filp, dev))) { +#if LINUX_VERSION_CODE < 0x020333 + MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */ +#endif + atomic_inc(&dev->total_open); + spin_lock(&dev->count_lock); + if (!dev->open_count++) { + spin_unlock(&dev->count_lock); + return radeon_setup(dev); + } + spin_unlock(&dev->count_lock); + } + + return retcode; +} + +int radeon_release(struct inode *inode, struct file *filp) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev; + int retcode = 0; + + lock_kernel(); + dev = priv->dev; + + DRM_DEBUG("open_count = %d\n", dev->open_count); + + /* Force the cleanup of page flipping when required */ + if ( dev->dev_private ) { + drm_radeon_private_t *dev_priv = dev->dev_private; + if ( dev_priv->page_flipping ) { + radeon_do_cleanup_pageflip( dev ); + } + } + + if (!(retcode = drm_release(inode, filp))) { +#if LINUX_VERSION_CODE < 0x020333 + MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */ +#endif + atomic_inc(&dev->total_close); + spin_lock(&dev->count_lock); + if (!--dev->open_count) { + if (atomic_read(&dev->ioctl_count) || dev->blocked) { + DRM_ERROR("Device busy: %d %d\n", + atomic_read(&dev->ioctl_count), + dev->blocked); + spin_unlock(&dev->count_lock); + unlock_kernel(); + return -EBUSY; + } + spin_unlock(&dev->count_lock); + unlock_kernel(); + return radeon_takedown(dev); + } + spin_unlock(&dev->count_lock); + } + + unlock_kernel(); + return retcode; +} + +/* radeon_ioctl is called whenever a process performs an ioctl on /dev/drm. */ + +int radeon_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int nr = DRM_IOCTL_NR(cmd); + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + int retcode = 0; + drm_ioctl_desc_t *ioctl; + drm_ioctl_t *func; + + atomic_inc(&dev->ioctl_count); + atomic_inc(&dev->total_ioctl); + ++priv->ioctl_count; + + DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n", + current->pid, cmd, nr, dev->device, priv->authenticated); + + if (nr >= RADEON_IOCTL_COUNT) { + retcode = -EINVAL; + } else { + ioctl = &radeon_ioctls[nr]; + func = ioctl->func; + + if (!func) { + DRM_DEBUG("no function\n"); + retcode = -EINVAL; + } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN)) + || (ioctl->auth_needed && !priv->authenticated)) { + retcode = -EACCES; + } else { + retcode = (func)(inode, filp, cmd, arg); + } + } + + atomic_dec(&dev->ioctl_count); + return retcode; +} + +int radeon_lock(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + DECLARE_WAITQUEUE(entry, current); + int ret = 0; + drm_lock_t lock; +#if DRM_DMA_HISTOGRAM + cycles_t start; + + dev->lck_start = start = get_cycles(); +#endif + + if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock))) + return -EFAULT; + + if (lock.context == DRM_KERNEL_CONTEXT) { + DRM_ERROR("Process %d using kernel context %d\n", + current->pid, lock.context); + return -EINVAL; + } + + DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", + lock.context, current->pid, dev->lock.hw_lock->lock, + lock.flags); + + if (lock.context < 0 /* || lock.context >= dev->queue_count */) + return -EINVAL; + + if (!ret) { + add_wait_queue(&dev->lock.lock_queue, &entry); + for (;;) { + current->state = TASK_INTERRUPTIBLE; + if (!dev->lock.hw_lock) { + /* Device has been unregistered */ + ret = -EINTR; + break; + } + if (drm_lock_take(&dev->lock.hw_lock->lock, + lock.context)) { + dev->lock.pid = current->pid; + dev->lock.lock_time = jiffies; + atomic_inc(&dev->total_locks); + break; /* Got lock */ + } + + /* Contention */ + atomic_inc(&dev->total_sleeps); + schedule(); + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + } + current->state = TASK_RUNNING; + remove_wait_queue(&dev->lock.lock_queue, &entry); + } + + if (!ret) { + sigemptyset(&dev->sigmask); + sigaddset(&dev->sigmask, SIGSTOP); + sigaddset(&dev->sigmask, SIGTSTP); + sigaddset(&dev->sigmask, SIGTTIN); + sigaddset(&dev->sigmask, SIGTTOU); + dev->sigdata.context = lock.context; + dev->sigdata.lock = dev->lock.hw_lock; + block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); + if (lock.flags & _DRM_LOCK_READY) { + /* Wait for space in DMA/FIFO */ + } + if (lock.flags & _DRM_LOCK_QUIESCENT) { + /* Make hardware quiescent */ + DRM_DEBUG("not quiescent!\n"); +#if 0 + radeon_quiescent(dev); +#endif + } + } + +#if LINUX_VERSION_CODE < 0x020400 + if (lock.context != radeon_res_ctx.handle) { + current->counter = 5; + current->priority = DEF_PRIORITY/4; + } +#endif + DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock"); + +#if DRM_DMA_HISTOGRAM + atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]); +#endif + + return ret; +} + + +int radeon_unlock(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_lock_t lock; + + if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock))) + return -EFAULT; + + if (lock.context == DRM_KERNEL_CONTEXT) { + DRM_ERROR("Process %d using kernel context %d\n", + current->pid, lock.context); + return -EINVAL; + } + + DRM_DEBUG("%d frees lock (%d holds)\n", + lock.context, + _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); + atomic_inc(&dev->total_unlocks); + if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock)) + atomic_inc(&dev->total_contends); + drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT); + /* FIXME: Try to send data to card here */ + if (!dev->context_flag) { + if (drm_lock_free(dev, &dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT)) { + DRM_ERROR("\n"); + } + } + +#if LINUX_VERSION_CODE < 0x020400 + if (lock.context != radeon_res_ctx.handle) { + current->counter = 5; + current->priority = DEF_PRIORITY; + } +#endif + unblock_all_signals(); + return 0; +} diff --git a/linux/Makefile.kernel b/linux/Makefile.kernel index b1b8d9768..9fe0038fc 100644 --- a/linux/Makefile.kernel +++ b/linux/Makefile.kernel @@ -6,7 +6,8 @@ # drm.o is a fake target -- it is never built # The real targets are in the module-list O_TARGET := drm.o -module-list := gamma.o tdfx.o r128.o ffb.o mga.o i810.o + +module-list := gamma.o tdfx.o r128.o radeon.o ffb.o mga.o i810.o export-objs := $(patsubst %.o,%_drv.o,$(module-list)) # libs-objs are included in every module so that radical changes to the @@ -25,6 +26,11 @@ export-objs := $(patsubst %.o,%_drv.o,$(module-list)) # memory waste (in the dual-head case) for greatly improved long-term # maintainability. # +# NOTE: lib-objs will be eliminated in future versions, thereby +# eliminating the need to compile the .o files into every module, but +# for now we still need them. +# + lib-objs := init.o memory.o proc.o auth.o context.o drawable.o bufs.o lib-objs += lists.o lock.o ioctl.o fops.o vm.o dma.o ctxbitmap.o @@ -32,57 +38,71 @@ ifeq ($(CONFIG_AGP),y) lib-objs += agpsupport.o else ifeq ($(CONFIG_AGP),m) - lib-objs += agpsupport.o + lib-objs += agpsupport.o endif endif -gamma-objs := $(lib-objs) gamma_drv.o gamma_dma.o -tdfx-objs := $(lib-objs) tdfx_drv.o tdfx_context.o -r128-objs := $(lib-objs) r128_drv.o r128_dma.o r128_context.o r128_bufs.o -ffb-objs := $(lib-objs) ffb_drv.o ffb_context.o -mga-objs := $(lib-objs) mga_drv.o mga_dma.o mga_context.o mga_bufs.o \ - mga_state.o -i810-objs := $(lib-objs) i810_drv.o i810_dma.o i810_context.o i810_bufs.o - -obj-$(CONFIG_DRM_GAMMA) += gamma.o $(gamma-objs) -obj-$(CONFIG_DRM_TDFX) += tdfx.o $(tdfx-objs) -obj-$(CONFIG_DRM_R128) += r128.o $(r128-objs) -obj-$(CONFIG_DRM_FFB) += ffb.o $(ffb-objs) - -ifneq ($CONFIG_AGP),) -obj-$(CONFIG_DRM_MGA) += mga.o $(mga-objs) -obj-$(CONFIG_DRM_I810) += i810.o $(i810-objs) +gamma-objs := gamma_drv.o gamma_dma.o +tdfx-objs := tdfx_drv.o tdfx_context.o +r128-objs := r128_drv.o r128_cce.o r128_context.o r128_bufs.o \ + r128_state.o +radeon-objs := radeon_drv.o radeon_cp.o radeon_context.o radeon_bufs.o \ + radeon_state.o +ffb-objs := ffb_drv.o ffb_context.o +mga-objs := mga_drv.o mga_dma.o mga_context.o mga_bufs.o \ + mga_state.o +i810-objs := i810_drv.o i810_dma.o i810_context.o i810_bufs.o + +obj-$(CONFIG_DRM_GAMMA) += gamma.o +obj-$(CONFIG_DRM_TDFX) += tdfx.o +obj-$(CONFIG_DRM_R128) += r128.o +obj-$(CONFIG_DRM_RADEON) += radeon.o +obj-$(CONFIG_DRM_FFB) += ffb.o +obj-$(CONFIG_DRM_MGA) += mga.o +obj-$(CONFIG_DRM_I810) += i810.o + + +# When linking into the kernel, link the library just once. +# If making modules, we include the library into each module + +lib-objs-mod := $(patsubst %.o,%-mod.o,$(lib-objs)) + +ifdef MAKING_MODULES + lib = drmlib-mod.a +else + obj-y += drmlib.a endif -# Take module names out of obj-y and int-m +include $(TOPDIR)/Rules.make -obj-y := $(filter-out $(module-list), $(obj-y)) -int-m := $(filter-out $(module-list), $(obj-m)) +$(patsubst %.o,%.c,$(lib-objs-mod)): + @ln -sf $(subst -mod,,$@) $@ -# Translate to Rules.make lists. +drmlib-mod.a: $(lib-objs-mod) + rm -f $@ + $(AR) $(EXTRA_ARFLAGS) rcs $@ $(lib-objs-mod) -O_OBJS := $(filter-out $(export-objs), $(obj-y)) -OX_OBJS := $(filter $(export-objs), $(obj-y)) -M_OBJS := $(sort $(filter $(module-list), $(obj-m))) -MI_OBJS := $(sort $(filter-out $(export-objs), $(int-m))) -MIX_OBJS := $(sort $(filter $(export-objs), $(int-m))) +drmlib.a: $(lib-objs) + rm -f $@ + $(AR) $(EXTRA_ARFLAGS) rcs $@ $(lib-objs) -include $(TOPDIR)/Rules.make +gamma.o: $(gamma-objs) $(lib) + $(LD) -r -o $@ $(gamma-objs) $(lib) -gamma.o: $(gamma-objs) - $(LD) -r -o $@ $(gamma-objs) +tdfx.o: $(tdfx-objs) $(lib) + $(LD) -r -o $@ $(tdfx-objs) $(lib) -tdfx.o: $(tdfx-objs) - $(LD) -r -o $@ $(tdfx-objs) +mga.o: $(mga-objs) $(lib) + $(LD) -r -o $@ $(mga-objs) $(lib) -mga.o: $(mga-objs) - $(LD) -r -o $@ $(mga-objs) +i810.o: $(i810-objs) $(lib) + $(LD) -r -o $@ $(i810-objs) $(lib) -i810.o: $(i810-objs) - $(LD) -r -o $@ $(i810-objs) +r128.o: $(r128-objs) $(lib) + $(LD) -r -o $@ $(r128-objs) $(lib) -r128.o: $(r128-objs) - $(LD) -r -o $@ $(r128-objs) +radeon.o: $(radeon-objs) $(lib) + $(LD) -r -o $@ $(radeon-objs) $(lib) -ffb.o: $(ffb-objs) - $(LD) -r -o $@ $(ffb-objs) +ffb.o: $(ffb-objs) $(lib) + $(LD) -r -o $@ $(ffb-objs) $(lib) diff --git a/linux/Makefile.linux b/linux/Makefile.linux index 6968276b0..862bb6c3f 100644 --- a/linux/Makefile.linux +++ b/linux/Makefile.linux @@ -11,11 +11,11 @@ # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: -# +# # The above copyright notice and this permission notice (including the next # paragraph) shall be included in all copies or substantial portions of the # Software. -# +# # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL @@ -47,7 +47,7 @@ # **** End of SMP/MODVERSIONS detection -MODS= gamma.o tdfx.o r128.o +MODS= gamma.o tdfx.o r128.o radeon.o LIBS= libdrm.a DRMOBJS= init.o memory.o proc.o auth.o context.o drawable.o bufs.o \ @@ -60,9 +60,6 @@ GAMMAHEADERS= gamma_drv.h $(DRMHEADERS) TDFXOBJS= tdfx_drv.o tdfx_context.o TDFXHEADERS= tdfx_drv.h $(DRMHEADERS) -R128OBJS= r128_drv.o r128_dma.o r128_bufs.o r128_context.o -R128HEADERS= r128_drv.h r128_drm.h $(DRMHEADERS) - INC= /usr/include CFLAGS= -O2 $(WARNINGS) @@ -138,11 +135,18 @@ MODS += i810.o endif -MGAOBJS= mga_drv.o mga_dma.o mga_bufs.o mga_state.o mga_context.o +MGAOBJS= mga_drv.o mga_dma.o mga_bufs.o mga_context.o mga_state.o MGAHEADERS= mga_drv.h $(DRMHEADERS) I810OBJS= i810_drv.o i810_dma.o i810_bufs.o i810_context.o I810HEADERS= i810_drv.h $(DRMHEADERS) + +R128OBJS= r128_drv.o r128_cce.o r128_bufs.o r128_context.o r128_state.o +R128HEADERS= r128_drv.h r128_drm.h $(DRMHEADERS) + +RADEONOBJS= radeon_drv.o radeon_cp.o radeon_bufs.o radeon_context.o \ + radeon_state.o +RADEONHEADERS= radeon_drv.h radeon_drm.h $(DRMHEADERS) endif ifeq ($(SIS),1) @@ -156,6 +160,7 @@ MODS += sis.o SISOBJS= sis_drv.o sis_context.o sis_ds.o sis_mm.o SISHEADERS= sis_drv.h sis_ds.h sis_drm.h $(DRMHEADERS) +MODCFLAGS += -DCONFIG_DRM_SIS endif all::;@echo === KERNEL HEADERS IN $(TREE) @@ -205,11 +210,6 @@ tdfx_drv.o: tdfx_drv.c tdfx.o: $(TDFXOBJS) $(LIBS) $(LD) -r $^ -o $@ -r128_drv.o: r128_drv.c - $(CC) $(MODCFLAGS) -DEXPORT_SYMTAB -I$(TREE) -c $< -o $@ -r128.o: $(R128OBJS) $(LIBS) - $(LD) -r $^ -o $@ - sis.o: $(SISOBJS) $(LIBS) $(LD) -r $^ -o $@ @@ -223,6 +223,16 @@ i810_drv.o: i810_drv.c $(CC) $(MODCFLAGS) -DEXPORT_SYMTAB -I$(TREE) -c $< -o $@ i810.o: $(I810OBJS) $(LIBS) $(LD) -r $^ -o $@ + +r128_drv.o: r128_drv.c + $(CC) $(MODCFLAGS) -DEXPORT_SYMTAB -I$(TREE) -c $< -o $@ +r128.o: $(R128OBJS) $(LIBS) + $(LD) -r $^ -o $@ + +radeon_drv.o: radeon_drv.c + $(CC) $(MODCFLAGS) -DEXPORT_SYMTAB -I$(TREE) -c $< -o $@ +radeon.o: $(RADEONOBJS) $(LIBS) + $(LD) -r $^ -o $@ endif .PHONY: ChangeLog @@ -241,10 +251,11 @@ ChangeLog: $(DRMOBJS): $(DRMHEADERS) $(GAMMAOBJS): $(GAMMAHEADERS) $(TDFXOBJS): $(TDFXHEADERS) -$(R128OBJS): $(R128HEADERS) ifeq ($(AGP),1) $(MGAOBJS): $(MGAHEADERS) $(I810OBJS): $(I810HEADERS) +$(R128OBJS): $(R128HEADERS) +$(RADEONOBJS): $(RADEONHEADERS) endif clean: diff --git a/linux/compat-pre24.h b/linux/compat-pre24.h index 77a2bee1c..f6dae008d 100644 --- a/linux/compat-pre24.h +++ b/linux/compat-pre24.h @@ -42,4 +42,9 @@ #define __exit #endif + /* This is a hack that only works for + this code base -- because we always + call this with dev->tq.* */ +#define INIT_LIST_HEAD(pointer) dev->tq.next = NULL + #endif diff --git a/linux/drm.h b/linux/drm.h index 57032d6e2..dc3d262d4 100644 --- a/linux/drm.h +++ b/linux/drm.h @@ -11,11 +11,11 @@ * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL @@ -23,7 +23,7 @@ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. - * + * * Authors: * Rickard E. (Rik) Faith <faith@valinux.com> * @@ -82,7 +82,10 @@ typedef struct drm_clip_rect { #include "mga_drm.h" #include "i810_drm.h" #include "r128_drm.h" +#include "radeon_drm.h" +#ifdef CONFIG_DRM_SIS #include "sis_drm.h" +#endif typedef struct drm_version { int version_major; /* Major version */ @@ -295,89 +298,117 @@ typedef struct drm_agp_info { unsigned short id_device; } drm_agp_info_t; -#define DRM_IOCTL_BASE 'd' -#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) -#define DRM_IOR(nr,size) _IOR(DRM_IOCTL_BASE,nr,size) -#define DRM_IOW(nr,size) _IOW(DRM_IOCTL_BASE,nr,size) -#define DRM_IOWR(nr,size) _IOWR(DRM_IOCTL_BASE,nr,size) - - -#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t) -#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t) -#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t) -#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t) - -#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t) -#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t) -#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, drm_block_t) -#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, drm_block_t) -#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, drm_control_t) -#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, drm_map_t) -#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, drm_buf_desc_t) -#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, drm_buf_desc_t) -#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm_buf_info_t) -#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t) -#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t) - -#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t) -#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t) -#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t) -#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, drm_ctx_t) -#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, drm_ctx_t) -#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, drm_ctx_t) -#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, drm_ctx_res_t) -#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, drm_draw_t) -#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, drm_draw_t) -#define DRM_IOCTL_DMA DRM_IOWR(0x29, drm_dma_t) -#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, drm_lock_t) -#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t) -#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t) - -#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) -#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) -#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t) -#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t) -#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t) -#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t) -#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t) -#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t) +#define DRM_IOCTL_BASE 'd' +#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) +#define DRM_IOR(nr,size) _IOR(DRM_IOCTL_BASE,nr,size) +#define DRM_IOW(nr,size) _IOW(DRM_IOCTL_BASE,nr,size) +#define DRM_IOWR(nr,size) _IOWR(DRM_IOCTL_BASE,nr,size) + + +#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t) +#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t) +#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t) +#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t) + +#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t) +#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t) +#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, drm_block_t) +#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, drm_block_t) +#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, drm_control_t) +#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, drm_map_t) +#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, drm_buf_desc_t) +#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, drm_buf_desc_t) +#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm_buf_info_t) +#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t) +#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t) + +#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t) +#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t) +#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t) +#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, drm_ctx_t) +#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, drm_ctx_t) +#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, drm_ctx_t) +#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, drm_ctx_res_t) +#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, drm_draw_t) +#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, drm_draw_t) +#define DRM_IOCTL_DMA DRM_IOWR(0x29, drm_dma_t) +#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, drm_lock_t) +#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t) +#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t) + +#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) +#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) +#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t) +#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t) +#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t) +#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t) +#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t) +#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t) /* Mga specific ioctls */ -#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t) -#define DRM_IOCTL_MGA_SWAP DRM_IOW( 0x41, drm_mga_swap_t) -#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x42, drm_mga_clear_t) -#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x43, drm_mga_iload_t) -#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x44, drm_mga_vertex_t) -#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x45, drm_lock_t ) -#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t) +#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t) +#define DRM_IOCTL_MGA_SWAP DRM_IOW( 0x41, drm_mga_swap_t) +#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x42, drm_mga_clear_t) +#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x43, drm_mga_iload_t) +#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x44, drm_mga_vertex_t) +#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x45, drm_lock_t ) +#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t) +#define DRM_IOCTL_MGA_BLIT DRM_IOW( 0x47, drm_mga_blit_t) /* I810 specific ioctls */ -#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t) -#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t) -#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t) -#define DRM_IOCTL_I810_FLUSH DRM_IO ( 0x43) -#define DRM_IOCTL_I810_GETAGE DRM_IO ( 0x44) -#define DRM_IOCTL_I810_GETBUF DRM_IOWR(0x45, drm_i810_dma_t) -#define DRM_IOCTL_I810_SWAP DRM_IO ( 0x46) -#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t) -#define DRM_IOCTL_I810_DOCOPY DRM_IO ( 0x48) +#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t) +#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t) +#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t) +#define DRM_IOCTL_I810_FLUSH DRM_IO( 0x43) +#define DRM_IOCTL_I810_GETAGE DRM_IO( 0x44) +#define DRM_IOCTL_I810_GETBUF DRM_IOWR(0x45, drm_i810_dma_t) +#define DRM_IOCTL_I810_SWAP DRM_IO( 0x46) +#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t) +#define DRM_IOCTL_I810_DOCOPY DRM_IO( 0x48) /* Rage 128 specific ioctls */ -#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t) -#define DRM_IOCTL_R128_RESET DRM_IO( 0x41) -#define DRM_IOCTL_R128_FLUSH DRM_IO( 0x42) -#define DRM_IOCTL_R128_IDLE DRM_IO( 0x43) -#define DRM_IOCTL_R128_PACKET DRM_IOW( 0x44, drm_r128_packet_t) -#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x45, drm_r128_vertex_t) - +#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t) +#define DRM_IOCTL_R128_CCE_START DRM_IO( 0x41) +#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( 0x42, drm_r128_cce_stop_t) +#define DRM_IOCTL_R128_CCE_RESET DRM_IO( 0x43) +#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( 0x44) +#define DRM_IOCTL_R128_RESET DRM_IO( 0x46) +#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( 0x47, drm_r128_fullscreen_t) +#define DRM_IOCTL_R128_SWAP DRM_IO( 0x48) +#define DRM_IOCTL_R128_CLEAR DRM_IOW( 0x49, drm_r128_clear_t) +#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x4a, drm_r128_vertex_t) +#define DRM_IOCTL_R128_INDICES DRM_IOW( 0x4b, drm_r128_indices_t) +#define DRM_IOCTL_R128_BLIT DRM_IOW( 0x4c, drm_r128_blit_t) +#define DRM_IOCTL_R128_DEPTH DRM_IOW( 0x4d, drm_r128_depth_t) +#define DRM_IOCTL_R128_STIPPLE DRM_IOW( 0x4e, drm_r128_stipple_t) +#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(0x4f, drm_r128_indirect_t) + +/* Radeon specific ioctls */ +#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( 0x40, drm_radeon_init_t) +#define DRM_IOCTL_RADEON_CP_START DRM_IO( 0x41) +#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( 0x42, drm_radeon_cp_stop_t) +#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( 0x43) +#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( 0x44) +#define DRM_IOCTL_RADEON_RESET DRM_IO( 0x45) +#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( 0x46, drm_radeon_fullscreen_t) +#define DRM_IOCTL_RADEON_SWAP DRM_IO( 0x47) +#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( 0x48, drm_radeon_clear_t) +#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( 0x49, drm_radeon_vertex_t) +#define DRM_IOCTL_RADEON_INDICES DRM_IOW( 0x4a, drm_radeon_indices_t) +#define DRM_IOCTL_RADEON_BLIT DRM_IOW( 0x4b, drm_radeon_blit_t) +#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( 0x4c, drm_radeon_stipple_t) +#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t) + +#ifdef CONFIG_DRM_SIS /* SiS specific ioctls */ -#define SIS_IOCTL_FB_ALLOC DRM_IOWR( 0x44, drm_sis_mem_t) -#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t) -#define SIS_IOCTL_AGP_INIT DRM_IOWR( 0x53, drm_sis_agp_t) -#define SIS_IOCTL_AGP_ALLOC DRM_IOWR( 0x54, drm_sis_mem_t) -#define SIS_IOCTL_AGP_FREE DRM_IOW( 0x55, drm_sis_mem_t) -#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t) -#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49) -#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50) +#define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t) +#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t) +#define SIS_IOCTL_AGP_INIT DRM_IOWR(0x53, drm_sis_agp_t) +#define SIS_IOCTL_AGP_ALLOC DRM_IOWR(0x54, drm_sis_mem_t) +#define SIS_IOCTL_AGP_FREE DRM_IOW( 0x55, drm_sis_mem_t) +#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t) +#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49) +#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50) +#endif #endif diff --git a/linux/gamma_dma.c b/linux/gamma_dma.c index 4ab67281c..4854c56d5 100644 --- a/linux/gamma_dma.c +++ b/linux/gamma_dma.c @@ -651,7 +651,7 @@ int gamma_irq_install(drm_device_t *dev, int irq) dev->dma->next_queue = NULL; dev->dma->this_buffer = NULL; - dev->tq.next = NULL; + INIT_LIST_HEAD(&dev->tq.list); dev->tq.sync = 0; dev->tq.routine = gamma_dma_schedule_tq_wrapper; dev->tq.data = dev; diff --git a/linux/i810_dma.c b/linux/i810_dma.c index 26fe0c1c1..aa824a79c 100644 --- a/linux/i810_dma.c +++ b/linux/i810_dma.c @@ -924,7 +924,7 @@ int i810_irq_install(drm_device_t *dev, int irq) dev->dma->next_queue = NULL; dev->dma->this_buffer = NULL; - dev->tq.next = NULL; + INIT_LIST_HEAD(&dev->tq.list); dev->tq.sync = 0; dev->tq.routine = i810_dma_task_queue; dev->tq.data = dev; diff --git a/linux/mga_dma.c b/linux/mga_dma.c index 6adffb8a9..356376ca9 100644 --- a/linux/mga_dma.c +++ b/linux/mga_dma.c @@ -143,7 +143,7 @@ static inline void mga_dma_quiescent(drm_device_t *dev) unsigned long end; int i; - DRM_DEBUG("dispatch_status = 0x%02x\n", dev_priv->dispatch_status); + DRM_DEBUG("dispatch_status = 0x%02lx\n", dev_priv->dispatch_status); end = jiffies + (HZ*3); while(1) { if(!test_and_set_bit(MGA_IN_DISPATCH, @@ -154,7 +154,7 @@ static inline void mga_dma_quiescent(drm_device_t *dev) DRM_ERROR("irqs: %d wanted %d\n", atomic_read(&dev->total_irq), atomic_read(&dma->total_lost)); - DRM_ERROR("lockup: dispatch_status = 0x%02x," + DRM_ERROR("lockup: dispatch_status = 0x%02lx," " jiffies = %lu, end = %lu\n", dev_priv->dispatch_status, jiffies, end); return; @@ -177,7 +177,7 @@ static inline void mga_dma_quiescent(drm_device_t *dev) sarea_priv->dirty |= MGA_DMA_FLUSH; clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status); - DRM_DEBUG("exit, dispatch_status = 0x%02x\n", + DRM_DEBUG("exit, dispatch_status = 0x%02lx\n", dev_priv->dispatch_status); } @@ -818,7 +818,7 @@ int mga_irq_install(drm_device_t *dev, int irq) dev->dma->next_buffer = NULL; dev->dma->next_queue = NULL; dev->dma->this_buffer = NULL; - dev->tq.next = NULL; + INIT_LIST_HEAD(&dev->tq.list); dev->tq.sync = 0; dev->tq.routine = mga_dma_task_queue; dev->tq.data = dev; diff --git a/linux/mga_drm.h b/linux/mga_drm.h index 52cf77665..538133937 100644 --- a/linux/mga_drm.h +++ b/linux/mga_drm.h @@ -271,4 +271,14 @@ typedef struct _drm_mga_indices { int discard; /* client finished with buffer? */ } drm_mga_indices_t; +typedef struct _drm_mga_blit { + unsigned int planemask; + unsigned int source; + unsigned int dest; + int delta_sx, delta_sy; + int delta_dx, delta_dy; + int height, ydir; /* flip image vertically */ + int source_pitch, dest_pitch; +} drm_mga_blit_t; + #endif diff --git a/linux/mga_drv.c b/linux/mga_drv.c index 883eb75ca..d1c39e999 100644 --- a/linux/mga_drv.c +++ b/linux/mga_drv.c @@ -38,7 +38,7 @@ #define MGA_DESC "Matrox G200/G400" #define MGA_DATE "20000928" #define MGA_MAJOR 2 -#define MGA_MINOR 0 +#define MGA_MINOR 1 #define MGA_PATCHLEVEL 1 static drm_device_t mga_device; @@ -114,6 +114,7 @@ static drm_ioctl_desc_t mga_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_MGA_VERTEX)] = { mga_vertex, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_MGA_FLUSH)] = { mga_flush_ioctl, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_MGA_INDICES)] = { mga_indices, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_MGA_BLIT)] = { mga_blit, 1, 0 }, }; #define MGA_IOCTL_COUNT DRM_ARRAY_SIZE(mga_ioctls) @@ -502,7 +503,7 @@ int mga_release(struct inode *inode, struct file *filp) if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && dev->lock.pid == current->pid) { mga_reclaim_buffers(dev, priv->pid); - DRM_INFO("Process %d dead (ctx %d, d_s = 0x%02x)\n", + DRM_INFO("Process %d dead (ctx %d, d_s = 0x%02lx)\n", current->pid, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock), dev->dev_private ? diff --git a/linux/mga_drv.h b/linux/mga_drv.h index e37cc2ef4..d7bf63263 100644 --- a/linux/mga_drv.h +++ b/linux/mga_drv.h @@ -38,7 +38,7 @@ #define MGA_BUF_NEEDS_OVERFLOW 3 typedef struct { - u32 buffer_status; + long buffer_status; /* long req'd for set_bit() --RR */ int num_dwords; int max_dwords; u32 *current_dma_ptr; @@ -62,7 +62,7 @@ typedef struct _drm_mga_freelist { #define MGA_IN_GETBUF 3 typedef struct _drm_mga_private { - u32 dispatch_status; + long dispatch_status; /* long req'd for set_bit() --RR */ unsigned int next_prim_age; __volatile__ unsigned int last_prim_age; int reserved_map_idx; @@ -160,6 +160,8 @@ extern int mga_vertex(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int mga_indices(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); +extern int mga_blit(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); /* mga_context.c */ extern int mga_resctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); @@ -339,6 +341,7 @@ drm_mga_prim_buf_t *tmp_buf = \ #define MGAREG_SECEND 0x2c44 #define MGAREG_SETUPADDRESS 0x2cd0 #define MGAREG_SETUPEND 0x2cd4 +#define MGAREG_SGN 0x1c58 #define MGAREG_SOFTRAP 0x2c48 #define MGAREG_SRCORG 0x2cb4 #define MGAREG_STATUS 0x1e14 @@ -497,6 +500,12 @@ drm_mga_prim_buf_t *tmp_buf = \ #define DC_clipdis_enable 0x80000000 +#define SO_srcacc_pci 0x0 +#define SO_srcacc_agp 0x2 +#define SO_srcmap_fb 0x0 +#define SO_srcmap_sys 0x1 + + #define SETADD_mode_vertlist 0x0 diff --git a/linux/mga_state.c b/linux/mga_state.c index 6ac3d6059..bba8fa35b 100644 --- a/linux/mga_state.c +++ b/linux/mga_state.c @@ -491,6 +491,8 @@ static void mga_dma_dispatch_tex_blit(drm_device_t * dev, int length, unsigned int destOrg) { drm_mga_private_t *dev_priv = dev->dev_private; + drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; + unsigned int *regs = sarea_priv->ContextState; int use_agp = PDEA_pagpxfer_enable | 0x00000001; u16 y2; PRIMLOCALS; @@ -505,7 +507,7 @@ static void mga_dma_dispatch_tex_blit(drm_device_t * dev, PRIMOUTREG(MGAREG_AR5, 64); PRIMOUTREG(MGAREG_PITCH, 64); - PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_PLNWT, ~0); PRIMOUTREG(MGAREG_DMAPAD, 0); PRIMOUTREG(MGAREG_DWGCTL, MGA_COPY_CMD); @@ -514,7 +516,7 @@ static void mga_dma_dispatch_tex_blit(drm_device_t * dev, PRIMOUTREG(MGAREG_FXBNDRY, (63 << 16)); PRIMOUTREG(MGAREG_YDSTLEN + MGAREG_MGA_EXEC, y2); - PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_PLNWT, regs[MGA_CTXREG_PLNWT]); PRIMOUTREG(MGAREG_SRCORG, 0); PRIMOUTREG(MGAREG_PITCH, dev_priv->stride / dev_priv->cpp); PRIMOUTREG(MGAREG_DWGSYNC, 0x7000); @@ -691,7 +693,7 @@ static void mga_dma_dispatch_clear(drm_device_t * dev, int flags, /* Force reset of DWGCTL */ PRIMOUTREG(MGAREG_DMAPAD, 0); PRIMOUTREG(MGAREG_DMAPAD, 0); - PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_PLNWT, regs[MGA_CTXREG_PLNWT]); PRIMOUTREG(MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL]); PRIMADVANCE(dev_priv); } @@ -722,7 +724,7 @@ static void mga_dma_dispatch_swap(drm_device_t * dev) PRIMOUTREG(MGAREG_DMAPAD, 0); PRIMOUTREG(MGAREG_DMAPAD, 0); - PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_PLNWT, ~0); PRIMOUTREG(MGAREG_DWGCTL, MGA_COPY_CMD); for (i = 0; i < nbox; i++) { @@ -739,13 +741,141 @@ static void mga_dma_dispatch_swap(drm_device_t * dev) /* Force reset of DWGCTL */ PRIMOUTREG(MGAREG_DMAPAD, 0); - PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_PLNWT, regs[MGA_CTXREG_PLNWT]); PRIMOUTREG(MGAREG_SRCORG, 0); PRIMOUTREG(MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL]); PRIMADVANCE(dev_priv); } +/* #define BLIT_LEFT 1 */ +/* #define BLIT_UP 4 */ + +static void mga_dma_dispatch_blit(drm_device_t * dev, + unsigned int planemask, + unsigned int source, + unsigned int dest, + int delta_sx, int delta_sy, + int delta_dx, int delta_dy, + int source_pitch, + int dest_pitch, + int height, + int ydir) +{ + drm_mga_private_t *dev_priv = dev->dev_private; + drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; + unsigned int *regs = sarea_priv->ContextState; + int nbox = sarea_priv->nbox; + drm_clip_rect_t *pbox = sarea_priv->boxes; + int pixel_stride = dev_priv->stride / dev_priv->cpp; + u32 scandir = 0, i; + + PRIMLOCALS; + + PRIM_OVERFLOW(dev, dev_priv, (MGA_NR_SAREA_CLIPRECTS * 5) + 20); + + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_DWGSYNC, 0x7100); + PRIMOUTREG(MGAREG_DWGSYNC, 0x7000); + + PRIMOUTREG(MGAREG_DWGCTL, MGA_COPY_CMD); + PRIMOUTREG(MGAREG_PLNWT, planemask); + PRIMOUTREG(MGAREG_SRCORG, source); + PRIMOUTREG(MGAREG_DSTORG, dest); + + PRIMOUTREG(MGAREG_SGN, scandir); + PRIMOUTREG(MGAREG_MACCESS, dev_priv->mAccess); + PRIMOUTREG(MGAREG_AR5, ydir * source_pitch); + PRIMOUTREG(MGAREG_PITCH, dest_pitch); + + for (i = 0; i < nbox; i++) { + int srcx = pbox[i].x1 + delta_sx; + int srcy = pbox[i].y1 + delta_sy; + int dstx = pbox[i].x1 + delta_dx; + int dsty = pbox[i].y1 + delta_dy; + int h = pbox[i].y2 - pbox[i].y1; + int w = pbox[i].x2 - pbox[i].x1 - 1; + int start; + + if (ydir == -1) { + srcy = height - srcy - 1; + } + + start = srcy * source_pitch + srcx; + + PRIMOUTREG(MGAREG_AR0, start + w); + PRIMOUTREG(MGAREG_AR3, start); + PRIMOUTREG(MGAREG_FXBNDRY, ((dstx+w) << 16) | (dstx & 0xffff)); + PRIMOUTREG(MGAREG_YDSTLEN + MGAREG_MGA_EXEC, (dsty << 16) | h); + } + + /* Do something to flush AGP? + */ + + /* Force reset of DWGCTL */ + PRIMOUTREG(MGAREG_DMAPAD, 0); + PRIMOUTREG(MGAREG_PLNWT, regs[MGA_CTXREG_PLNWT]); + PRIMOUTREG(MGAREG_PITCH, pixel_stride); + PRIMOUTREG(MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL]); + + PRIMADVANCE(dev_priv); +} + + +int mga_blit(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_mga_private_t *dev_priv = + (drm_mga_private_t *) dev->dev_private; + drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_mga_blit_t blit; + + if (copy_from_user(&blit, (drm_mga_blit_t *) arg, sizeof(blit))) + return -EFAULT; + + DRM_DEBUG("%s\n", __FUNCTION__); + + if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("mga_blit_bufs called without lock held\n"); + return -EINVAL; + } + + if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) + sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; + + /* Make sure we restore the 3D state next time. + */ + dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CTX; + + if ((blit.source & 0x3) != (SO_srcmap_sys|SO_srcacc_pci) && + (blit.dest & 0x3) != (SO_srcmap_sys|SO_srcacc_pci)) + { + mga_dma_dispatch_blit(dev, + blit.planemask, + blit.source, + blit.dest, + blit.delta_sx, blit.delta_sy, + blit.delta_dx, blit.delta_dy, + blit.source_pitch, + blit.dest_pitch, + blit.height, + blit.ydir); + } + + + PRIMUPDATE(dev_priv); + +#ifdef __i386__ + mga_flush_write_combine(); +#endif + mga_dma_schedule(dev, 1); + return 0; +} + + int mga_clear_bufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { diff --git a/linux/r128_bufs.c b/linux/r128_bufs.c index 7e76441ee..a060749e3 100644 --- a/linux/r128_bufs.c +++ b/linux/r128_bufs.c @@ -11,11 +11,11 @@ * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL @@ -23,11 +23,11 @@ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. - * + * * Authors: Kevin E. Martin <martin@valinux.com> * Rickard E. (Rik) Faith <faith@valinux.com> * Jeff Hartmann <jhartmann@valinux.com> - * + * */ #define __NO_VERSION__ @@ -94,7 +94,7 @@ int r128_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd, } atomic_inc(&dev->buf_alloc); spin_unlock(&dev->count_lock); - + down(&dev->struct_sem); entry = &dma->bufs[order]; if (entry->buf_count) { @@ -102,7 +102,7 @@ int r128_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd, atomic_dec(&dev->buf_alloc); return -ENOMEM; /* May only call once for each order */ } - + entry->buflist = drm_alloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS); if (!entry->buflist) { @@ -111,7 +111,7 @@ int r128_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd, return -ENOMEM; } memset(entry->buflist, 0, count * sizeof(*entry->buflist)); - + entry->buf_size = size; entry->page_order = page_order; offset = 0; @@ -243,16 +243,16 @@ int r128_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd, if (dma->flags & _DRM_DMA_USE_AGP) { drm_map_t *map; - map = dev_priv->agp_vertbufs; + map = dev_priv->buffers; if (!map) { retcode = -EINVAL; goto done; } down(¤t->mm->mmap_sem); - virtual = do_mmap(filp, 0, map->size, + virtual = do_mmap(filp, 0, map->size, PROT_READ|PROT_WRITE, - MAP_SHARED, + MAP_SHARED, (unsigned long)map->offset); up(¤t->mm->mmap_sem); } else { diff --git a/linux/r128_cce.c b/linux/r128_cce.c new file mode 100644 index 000000000..6df2b45ad --- /dev/null +++ b/linux/r128_cce.c @@ -0,0 +1,970 @@ +/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*- + * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com + * + * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Gareth Hughes <gareth@valinux.com> + * + */ + +#define __NO_VERSION__ +#include "drmP.h" +#include "r128_drv.h" + +#include <linux/interrupt.h> /* For task queue support */ +#include <linux/delay.h> + +#define R128_FIFO_DEBUG 0 + + +/* CCE microcode (from ATI) */ +static u32 r128_cce_microcode[] = { + 0, 276838400, 0, 268449792, 2, 142, 2, 145, 0, 1076765731, 0, + 1617039951, 0, 774592877, 0, 1987540286, 0, 2307490946U, 0, + 599558925, 0, 589505315, 0, 596487092, 0, 589505315, 1, + 11544576, 1, 206848, 1, 311296, 1, 198656, 2, 912273422, 11, + 262144, 0, 0, 1, 33559837, 1, 7438, 1, 14809, 1, 6615, 12, 28, + 1, 6614, 12, 28, 2, 23, 11, 18874368, 0, 16790922, 1, 409600, 9, + 30, 1, 147854772, 16, 420483072, 3, 8192, 0, 10240, 1, 198656, + 1, 15630, 1, 51200, 10, 34858, 9, 42, 1, 33559823, 2, 10276, 1, + 15717, 1, 15718, 2, 43, 1, 15936948, 1, 570480831, 1, 14715071, + 12, 322123831, 1, 33953125, 12, 55, 1, 33559908, 1, 15718, 2, + 46, 4, 2099258, 1, 526336, 1, 442623, 4, 4194365, 1, 509952, 1, + 459007, 3, 0, 12, 92, 2, 46, 12, 176, 1, 15734, 1, 206848, 1, + 18432, 1, 133120, 1, 100670734, 1, 149504, 1, 165888, 1, + 15975928, 1, 1048576, 6, 3145806, 1, 15715, 16, 2150645232U, 2, + 268449859, 2, 10307, 12, 176, 1, 15734, 1, 15735, 1, 15630, 1, + 15631, 1, 5253120, 6, 3145810, 16, 2150645232U, 1, 15864, 2, 82, + 1, 343310, 1, 1064207, 2, 3145813, 1, 15728, 1, 7817, 1, 15729, + 3, 15730, 12, 92, 2, 98, 1, 16168, 1, 16167, 1, 16002, 1, 16008, + 1, 15974, 1, 15975, 1, 15990, 1, 15976, 1, 15977, 1, 15980, 0, + 15981, 1, 10240, 1, 5253120, 1, 15720, 1, 198656, 6, 110, 1, + 180224, 1, 103824738, 2, 112, 2, 3145839, 0, 536885440, 1, + 114880, 14, 125, 12, 206975, 1, 33559995, 12, 198784, 0, + 33570236, 1, 15803, 0, 15804, 3, 294912, 1, 294912, 3, 442370, + 1, 11544576, 0, 811612160, 1, 12593152, 1, 11536384, 1, + 14024704, 7, 310382726, 0, 10240, 1, 14796, 1, 14797, 1, 14793, + 1, 14794, 0, 14795, 1, 268679168, 1, 9437184, 1, 268449792, 1, + 198656, 1, 9452827, 1, 1075854602, 1, 1075854603, 1, 557056, 1, + 114880, 14, 159, 12, 198784, 1, 1109409213, 12, 198783, 1, + 1107312059, 12, 198784, 1, 1109409212, 2, 162, 1, 1075854781, 1, + 1073757627, 1, 1075854780, 1, 540672, 1, 10485760, 6, 3145894, + 16, 274741248, 9, 168, 3, 4194304, 3, 4209949, 0, 0, 0, 256, 14, + 174, 1, 114857, 1, 33560007, 12, 176, 0, 10240, 1, 114858, 1, + 33560018, 1, 114857, 3, 33560007, 1, 16008, 1, 114874, 1, + 33560360, 1, 114875, 1, 33560154, 0, 15963, 0, 256, 0, 4096, 1, + 409611, 9, 188, 0, 10240, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +}; + + +#define DO_IOREMAP(_m) (_m)->handle = drm_ioremap((_m)->offset, (_m)->size) + +#define DO_IOREMAPFREE(_m) \ + do { \ + if ((_m)->handle && (_m)->size) \ + drm_ioremapfree((_m)->handle, (_m)->size); \ + } while (0) + +#define DO_FIND_MAP(_m, _o) \ + do { \ + int _i; \ + for (_i = 0; _i < dev->map_count; _i++) { \ + if (dev->maplist[_i]->offset == _o) { \ + _m = dev->maplist[_i]; \ + break; \ + } \ + } \ + } while (0) + + +int R128_READ_PLL(drm_device_t *dev, int addr) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + + R128_WRITE8(R128_CLOCK_CNTL_INDEX, addr & 0x1f); + return R128_READ(R128_CLOCK_CNTL_DATA); +} + +#if R128_FIFO_DEBUG +static void r128_status( drm_r128_private_t *dev_priv ) +{ + printk( "GUI_STAT = 0x%08x\n", + (unsigned int)R128_READ( R128_GUI_STAT ) ); + printk( "PM4_STAT = 0x%08x\n", + (unsigned int)R128_READ( R128_PM4_STAT ) ); + printk( "PM4_BUFFER_DL_WPTR = 0x%08x\n", + (unsigned int)R128_READ( R128_PM4_BUFFER_DL_WPTR ) ); + printk( "PM4_BUFFER_DL_RPTR = 0x%08x\n", + (unsigned int)R128_READ( R128_PM4_BUFFER_DL_RPTR ) ); + printk( "PM4_MICRO_CNTL = 0x%08x\n", + (unsigned int)R128_READ( R128_PM4_MICRO_CNTL ) ); + printk( "PM4_BUFFER_CNTL = 0x%08x\n", + (unsigned int)R128_READ( R128_PM4_BUFFER_CNTL ) ); +} +#endif + + +/* ================================================================ + * Engine, FIFO control + */ + +static int r128_do_pixcache_flush( drm_r128_private_t *dev_priv ) +{ + u32 tmp; + int i; + + tmp = R128_READ( R128_PC_NGUI_CTLSTAT ) | R128_PC_FLUSH_ALL; + R128_WRITE( R128_PC_NGUI_CTLSTAT, tmp ); + + for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { + if ( !(R128_READ( R128_PC_NGUI_CTLSTAT ) & R128_PC_BUSY) ) { + return 0; + } + udelay( 1 ); + } + + DRM_ERROR( "%s failed!\n", __FUNCTION__ ); + return -EBUSY; +} + +static int r128_do_wait_for_fifo( drm_r128_private_t *dev_priv, int entries ) +{ + int i; + + for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { + int slots = R128_READ( R128_GUI_STAT ) & R128_GUI_FIFOCNT_MASK; + if ( slots >= entries ) return 0; + udelay( 1 ); + } + + DRM_ERROR( "%s failed!\n", __FUNCTION__ ); + return -EBUSY; +} + +static int r128_do_wait_for_idle( drm_r128_private_t *dev_priv ) +{ + int i, ret; + + ret = r128_do_wait_for_fifo( dev_priv, 64 ); + if ( ret < 0 ) return ret; + + for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { + if ( !(R128_READ( R128_GUI_STAT ) & R128_GUI_ACTIVE) ) { + r128_do_pixcache_flush( dev_priv ); + return 0; + } + udelay( 1 ); + } + + DRM_ERROR( "%s failed!\n", __FUNCTION__ ); + return -EBUSY; +} + + +/* ================================================================ + * CCE control, initialization + */ + +/* Load the microcode for the CCE */ +static void r128_cce_load_microcode( drm_r128_private_t *dev_priv ) +{ + int i; + + r128_do_wait_for_idle( dev_priv ); + + R128_WRITE( R128_PM4_MICROCODE_ADDR, 0 ); + for ( i = 0 ; i < 256 ; i++ ) { + R128_WRITE( R128_PM4_MICROCODE_DATAH, + r128_cce_microcode[i * 2] ); + R128_WRITE( R128_PM4_MICROCODE_DATAL, + r128_cce_microcode[i * 2 + 1] ); + } +} + +/* Flush any pending commands to the CCE. This should only be used just + * prior to a wait for idle, as it informs the engine that the command + * stream is ending. + */ +static void r128_do_cce_flush( drm_r128_private_t *dev_priv ) +{ + u32 tmp; + + tmp = R128_READ( R128_PM4_BUFFER_DL_WPTR ) | R128_PM4_BUFFER_DL_DONE; + R128_WRITE( R128_PM4_BUFFER_DL_WPTR, tmp ); +} + +/* Wait for the CCE to go idle. + */ +static int r128_do_cce_idle( drm_r128_private_t *dev_priv ) +{ + int i; + + for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { + if ( *dev_priv->ring.head == dev_priv->ring.tail ) { + int pm4stat = R128_READ( R128_PM4_STAT ); + if ( ( (pm4stat & R128_PM4_FIFOCNT_MASK) >= + dev_priv->cce_fifo_size ) && + !(pm4stat & (R128_PM4_BUSY | + R128_PM4_GUI_ACTIVE)) ) { + return r128_do_pixcache_flush( dev_priv ); + } + } + udelay( 1 ); + } + +#if R128_FIFO_DEBUG + DRM_ERROR( "failed!\n" ); + r128_status( dev_priv ); +#endif + return -EBUSY; +} + +/* Start the Concurrent Command Engine. + */ +static void r128_do_cce_start( drm_r128_private_t *dev_priv ) +{ + r128_do_wait_for_idle( dev_priv ); + + R128_WRITE( R128_PM4_BUFFER_CNTL, + dev_priv->cce_mode | dev_priv->ring.size_l2qw ); + R128_READ( R128_PM4_BUFFER_ADDR ); /* as per the sample code */ + R128_WRITE( R128_PM4_MICRO_CNTL, R128_PM4_MICRO_FREERUN ); + + dev_priv->cce_running = 1; +} + +/* Reset the Concurrent Command Engine. This will not flush any pending + * commands, so you must wait for the CCE command stream to complete + * before calling this routine. + */ +static void r128_do_cce_reset( drm_r128_private_t *dev_priv ) +{ + R128_WRITE( R128_PM4_BUFFER_DL_WPTR, 0 ); + R128_WRITE( R128_PM4_BUFFER_DL_RPTR, 0 ); + *dev_priv->ring.head = 0; + dev_priv->ring.tail = 0; +} + +/* Stop the Concurrent Command Engine. This will not flush any pending + * commands, so you must flush the command stream and wait for the CCE + * to go idle before calling this routine. + */ +static void r128_do_cce_stop( drm_r128_private_t *dev_priv ) +{ + R128_WRITE( R128_PM4_MICRO_CNTL, 0 ); + R128_WRITE( R128_PM4_BUFFER_CNTL, R128_PM4_NONPM4 ); + + dev_priv->cce_running = 0; +} + +/* Reset the engine. This will stop the CCE if it is running. + */ +static int r128_do_engine_reset( drm_device_t *dev ) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + u32 clock_cntl_index, mclk_cntl, gen_reset_cntl; + + r128_do_pixcache_flush( dev_priv ); + + clock_cntl_index = R128_READ( R128_CLOCK_CNTL_INDEX ); + mclk_cntl = R128_READ_PLL( dev, R128_MCLK_CNTL ); + + R128_WRITE_PLL( R128_MCLK_CNTL, + mclk_cntl | R128_FORCE_GCP | R128_FORCE_PIPE3D_CP ); + + gen_reset_cntl = R128_READ( R128_GEN_RESET_CNTL ); + + /* Taken from the sample code - do not change */ + R128_WRITE( R128_GEN_RESET_CNTL, + gen_reset_cntl | R128_SOFT_RESET_GUI ); + R128_READ( R128_GEN_RESET_CNTL ); + R128_WRITE( R128_GEN_RESET_CNTL, + gen_reset_cntl & ~R128_SOFT_RESET_GUI ); + R128_READ( R128_GEN_RESET_CNTL ); + + R128_WRITE_PLL( R128_MCLK_CNTL, mclk_cntl ); + R128_WRITE( R128_CLOCK_CNTL_INDEX, clock_cntl_index ); + R128_WRITE( R128_GEN_RESET_CNTL, gen_reset_cntl ); + + /* Reset the CCE ring */ + r128_do_cce_reset( dev_priv ); + + /* The CCE is no longer running after an engine reset */ + dev_priv->cce_running = 0; + + /* Reset any pending vertex, indirect buffers */ + r128_freelist_reset( dev ); + + return 0; +} + +static void r128_cce_init_ring_buffer( drm_device_t *dev ) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + u32 ring_start; + u32 tmp; + + /* The manual (p. 2) says this address is in "VM space". This + * means it's an offset from the start of AGP space. + */ + ring_start = dev_priv->cce_ring->offset - dev->agp->base; + R128_WRITE( R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET ); + + R128_WRITE( R128_PM4_BUFFER_DL_WPTR, 0 ); + R128_WRITE( R128_PM4_BUFFER_DL_RPTR, 0 ); + + /* DL_RPTR_ADDR is a physical address in AGP space. */ + *dev_priv->ring.head = 0; + R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR, + dev_priv->ring_rptr->offset ); + + /* Set watermark control */ + R128_WRITE( R128_PM4_BUFFER_WM_CNTL, + ((R128_WATERMARK_L/4) << R128_WMA_SHIFT) + | ((R128_WATERMARK_M/4) << R128_WMB_SHIFT) + | ((R128_WATERMARK_N/4) << R128_WMC_SHIFT) + | ((R128_WATERMARK_K/64) << R128_WB_WM_SHIFT) ); + + /* Force read. Why? Because it's in the examples... */ + R128_READ( R128_PM4_BUFFER_ADDR ); + + /* Turn on bus mastering */ + tmp = R128_READ( R128_BUS_CNTL ) & ~R128_BUS_MASTER_DIS; + R128_WRITE( R128_BUS_CNTL, tmp ); +} + +static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init ) +{ + drm_r128_private_t *dev_priv; + int i; + + dev_priv = drm_alloc( sizeof(drm_r128_private_t), DRM_MEM_DRIVER ); + if ( dev_priv == NULL ) + return -ENOMEM; + dev->dev_private = (void *)dev_priv; + + memset( dev_priv, 0, sizeof(drm_r128_private_t) ); + + dev_priv->is_pci = init->is_pci; + + /* GH: We don't support PCI cards until PCI GART is implemented. + * Fail here so we can remove all checks for PCI cards around + * the CCE ring code. + */ + if ( dev_priv->is_pci ) { + drm_free( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER ); + dev->dev_private = NULL; + return -EINVAL; + } + + dev_priv->usec_timeout = init->usec_timeout; + if ( dev_priv->usec_timeout < 1 || + dev_priv->usec_timeout > R128_MAX_USEC_TIMEOUT ) { + drm_free( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER ); + dev->dev_private = NULL; + return -EINVAL; + } + + dev_priv->cce_mode = init->cce_mode; + + /* GH: Simple idle check. + */ + atomic_set( &dev_priv->idle_count, 0 ); + + /* We don't support anything other than bus-mastering ring mode, + * but the ring can be in either AGP or PCI space for the ring + * read pointer. + */ + if ( ( init->cce_mode != R128_PM4_192BM ) && + ( init->cce_mode != R128_PM4_128BM_64INDBM ) && + ( init->cce_mode != R128_PM4_64BM_128INDBM ) && + ( init->cce_mode != R128_PM4_64BM_64VCBM_64INDBM ) ) { + drm_free( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER ); + dev->dev_private = NULL; + return -EINVAL; + } + + switch ( init->cce_mode ) { + case R128_PM4_NONPM4: + dev_priv->cce_fifo_size = 0; + break; + case R128_PM4_192PIO: + case R128_PM4_192BM: + dev_priv->cce_fifo_size = 192; + break; + case R128_PM4_128PIO_64INDBM: + case R128_PM4_128BM_64INDBM: + dev_priv->cce_fifo_size = 128; + break; + case R128_PM4_64PIO_128INDBM: + case R128_PM4_64BM_128INDBM: + case R128_PM4_64PIO_64VCBM_64INDBM: + case R128_PM4_64BM_64VCBM_64INDBM: + case R128_PM4_64PIO_64VCPIO_64INDPIO: + dev_priv->cce_fifo_size = 64; + break; + } + + dev_priv->fb_bpp = init->fb_bpp; + dev_priv->front_offset = init->front_offset; + dev_priv->front_pitch = init->front_pitch; + dev_priv->back_offset = init->back_offset; + dev_priv->back_pitch = init->back_pitch; + + dev_priv->depth_bpp = init->depth_bpp; + dev_priv->depth_offset = init->depth_offset; + dev_priv->depth_pitch = init->depth_pitch; + dev_priv->span_offset = init->span_offset; + + dev_priv->front_pitch_offset_c = (((dev_priv->front_pitch/8) << 21) | + (dev_priv->front_offset >> 5)); + dev_priv->back_pitch_offset_c = (((dev_priv->back_pitch/8) << 21) | + (dev_priv->back_offset >> 5)); + dev_priv->depth_pitch_offset_c = (((dev_priv->depth_pitch/8) << 21) | + (dev_priv->depth_offset >> 5) | + R128_DST_TILE); + dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch/8) << 21) | + (dev_priv->span_offset >> 5)); + + /* FIXME: We want multiple shared areas, including one shared + * only by the X Server and kernel module. + */ + for ( i = 0 ; i < dev->map_count ; i++ ) { + if ( dev->maplist[i]->type == _DRM_SHM ) { + dev_priv->sarea = dev->maplist[i]; + break; + } + } + + DO_FIND_MAP( dev_priv->fb, init->fb_offset ); + DO_FIND_MAP( dev_priv->mmio, init->mmio_offset ); + DO_FIND_MAP( dev_priv->cce_ring, init->ring_offset ); + DO_FIND_MAP( dev_priv->ring_rptr, init->ring_rptr_offset ); + DO_FIND_MAP( dev_priv->buffers, init->buffers_offset ); + + if ( !dev_priv->is_pci ) { + DO_FIND_MAP( dev_priv->agp_textures, + init->agp_textures_offset ); + } + + dev_priv->sarea_priv = + (drm_r128_sarea_t *)((u8 *)dev_priv->sarea->handle + + init->sarea_priv_offset); + + DO_IOREMAP( dev_priv->cce_ring ); + DO_IOREMAP( dev_priv->ring_rptr ); + DO_IOREMAP( dev_priv->buffers ); +#if 0 + if ( !dev_priv->is_pci ) { + DO_IOREMAP( dev_priv->agp_textures ); + } +#endif + + dev_priv->ring.head = ((__volatile__ u32 *) + dev_priv->ring_rptr->handle); + + dev_priv->ring.start = (u32 *)dev_priv->cce_ring->handle; + dev_priv->ring.end = ((u32 *)dev_priv->cce_ring->handle + + init->ring_size / sizeof(u32)); + dev_priv->ring.size = init->ring_size; + dev_priv->ring.size_l2qw = drm_order( init->ring_size / 8 ); + + dev_priv->ring.tail_mask = + (dev_priv->ring.size / sizeof(u32)) - 1; + + dev_priv->sarea_priv->last_frame = 0; + R128_WRITE( R128_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame ); + + dev_priv->sarea_priv->last_dispatch = 0; + R128_WRITE( R128_LAST_DISPATCH_REG, + dev_priv->sarea_priv->last_dispatch ); + + r128_cce_init_ring_buffer( dev ); + r128_cce_load_microcode( dev_priv ); + r128_do_engine_reset( dev ); + + return 0; +} + +static int r128_do_cleanup_cce( drm_device_t *dev ) +{ + if ( dev->dev_private ) { + drm_r128_private_t *dev_priv = dev->dev_private; + + DO_IOREMAPFREE( dev_priv->cce_ring ); + DO_IOREMAPFREE( dev_priv->ring_rptr ); + DO_IOREMAPFREE( dev_priv->buffers ); +#if 0 + if ( !dev_priv->is_pci ) { + DO_IOREMAPFREE( dev_priv->agp_textures ); + } +#endif + + drm_free( dev->dev_private, sizeof(drm_r128_private_t), + DRM_MEM_DRIVER ); + dev->dev_private = NULL; + } + + return 0; +} + +int r128_cce_init( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_r128_init_t init; + + if ( copy_from_user( &init, (drm_r128_init_t *)arg, sizeof(init) ) ) + return -EFAULT; + + switch ( init.func ) { + case R128_INIT_CCE: + return r128_do_init_cce( dev, &init ); + case R128_CLEANUP_CCE: + return r128_do_cleanup_cce( dev ); + } + + return -EINVAL; +} + +int r128_cce_start( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_r128_private_t *dev_priv = dev->dev_private; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + if ( dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4 ) { + DRM_DEBUG( "%s while CCE running\n", __FUNCTION__ ); + return 0; + } + + r128_do_cce_start( dev_priv ); + + return 0; +} + +/* Stop the CCE. The engine must have been idled before calling this + * routine. + */ +int r128_cce_stop( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_r128_private_t *dev_priv = dev->dev_private; + drm_r128_cce_stop_t stop; + int ret; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + + if ( copy_from_user( &stop, (drm_r128_init_t *)arg, sizeof(stop) ) ) + return -EFAULT; + + /* Flush any pending CCE commands. This ensures any outstanding + * commands are exectuted by the engine before we turn it off. + */ + if ( stop.flush ) { + r128_do_cce_flush( dev_priv ); + } + + /* If we fail to make the engine go idle, we return an error + * code so that the DRM ioctl wrapper can try again. + */ + if ( stop.idle ) { + ret = r128_do_cce_idle( dev_priv ); + if ( ret < 0 ) return ret; + } + + /* Finally, we can turn off the CCE. If the engine isn't idle, + * we will get some dropped triangles as they won't be fully + * rendered before the CCE is shut down. + */ + r128_do_cce_stop( dev_priv ); + + /* Reset the engine */ + r128_do_engine_reset( dev ); + + return 0; +} + +/* Just reset the CCE ring. Called as part of an X Server engine reset. + */ +int r128_cce_reset( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_r128_private_t *dev_priv = dev->dev_private; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + if ( !dev_priv ) { + DRM_DEBUG( "%s called before init done\n", __FUNCTION__ ); + return -EINVAL; + } + + r128_do_cce_reset( dev_priv ); + + /* The CCE is no longer running after an engine reset */ + dev_priv->cce_running = 0; + + return 0; +} + +int r128_cce_idle( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_r128_private_t *dev_priv = dev->dev_private; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + + if ( dev_priv->cce_running ) { + r128_do_cce_flush( dev_priv ); + } + + return r128_do_cce_idle( dev_priv ); +} + +int r128_engine_reset( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + + return r128_do_engine_reset( dev ); +} + + +/* ================================================================ + * Fullscreen mode + */ + +static int r128_do_init_pageflip( drm_device_t *dev ) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + dev_priv->crtc_offset = R128_READ( R128_CRTC_OFFSET ); + dev_priv->crtc_offset_cntl = R128_READ( R128_CRTC_OFFSET_CNTL ); + + R128_WRITE( R128_CRTC_OFFSET, dev_priv->front_offset ); + R128_WRITE( R128_CRTC_OFFSET_CNTL, + dev_priv->crtc_offset_cntl | R128_CRTC_OFFSET_FLIP_CNTL ); + + dev_priv->page_flipping = 1; + dev_priv->current_page = 0; + + return 0; +} + +int r128_do_cleanup_pageflip( drm_device_t *dev ) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + R128_WRITE( R128_CRTC_OFFSET, dev_priv->crtc_offset ); + R128_WRITE( R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl ); + + dev_priv->page_flipping = 0; + dev_priv->current_page = 0; + + return 0; +} + +int r128_fullscreen( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_r128_fullscreen_t fs; + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + + if ( copy_from_user( &fs, (drm_r128_fullscreen_t *)arg, sizeof(fs) ) ) + return -EFAULT; + + switch ( fs.func ) { + case R128_INIT_FULLSCREEN: + return r128_do_init_pageflip( dev ); + case R128_CLEANUP_FULLSCREEN: + return r128_do_cleanup_pageflip( dev ); + } + + return -EINVAL; +} + + +/* ================================================================ + * Freelist management + */ +#define R128_BUFFER_USED 0xffffffff +#define R128_BUFFER_FREE 0 + +#if 0 +static int r128_freelist_init( drm_device_t *dev ) +{ + drm_device_dma_t *dma = dev->dma; + drm_r128_private_t *dev_priv = dev->dev_private; + drm_buf_t *buf; + drm_r128_buf_priv_t *buf_priv; + drm_r128_freelist_t *entry; + int i; + + dev_priv->head = drm_alloc( sizeof(drm_r128_freelist_t), + DRM_MEM_DRIVER ); + if ( dev_priv->head == NULL ) + return -ENOMEM; + + memset( dev_priv->head, 0, sizeof(drm_r128_freelist_t) ); + dev_priv->head->age = R128_BUFFER_USED; + + for ( i = 0 ; i < dma->buf_count ; i++ ) { + buf = dma->buflist[i]; + buf_priv = buf->dev_private; + + entry = drm_alloc( sizeof(drm_r128_freelist_t), + DRM_MEM_DRIVER ); + if ( !entry ) return -ENOMEM; + + entry->age = R128_BUFFER_FREE; + entry->buf = buf; + entry->prev = dev_priv->head; + entry->next = dev_priv->head->next; + if ( !entry->next ) + dev_priv->tail = entry; + + buf_priv->discard = 0; + buf_priv->dispatched = 0; + buf_priv->list_entry = entry; + + dev_priv->head->next = entry; + + if ( dev_priv->head->next ) + dev_priv->head->next->prev = entry; + } + + return 0; + +} +#endif + +drm_buf_t *r128_freelist_get( drm_device_t *dev ) +{ + drm_device_dma_t *dma = dev->dma; + drm_r128_private_t *dev_priv = dev->dev_private; + drm_r128_buf_priv_t *buf_priv; + drm_buf_t *buf; + int i, t; + + /* FIXME: Optimize -- use freelist code */ + + for ( i = 0 ; i < dma->buf_count ; i++ ) { + buf = dma->buflist[i]; + buf_priv = buf->dev_private; + if ( buf->pid == 0 ) + return buf; + } + + for ( t = 0 ; t < dev_priv->usec_timeout ; t++ ) { + u32 done_age = R128_READ( R128_LAST_DISPATCH_REG ); + + for ( i = 0 ; i < dma->buf_count ; i++ ) { + buf = dma->buflist[i]; + buf_priv = buf->dev_private; + if ( buf->pending && buf_priv->age <= done_age ) { + /* The buffer has been processed, so it + * can now be used. + */ + buf->pending = 0; + return buf; + } + } + udelay( 1 ); + } + + DRM_ERROR( "returning NULL!\n" ); + return NULL; +} + +void r128_freelist_reset( drm_device_t *dev ) +{ + drm_device_dma_t *dma = dev->dma; + int i; + + for ( i = 0 ; i < dma->buf_count ; i++ ) { + drm_buf_t *buf = dma->buflist[i]; + drm_r128_buf_priv_t *buf_priv = buf->dev_private; + buf_priv->age = 0; + } +} + + +/* ================================================================ + * CCE command submission + */ + +int r128_wait_ring( drm_r128_private_t *dev_priv, int n ) +{ + drm_r128_ring_buffer_t *ring = &dev_priv->ring; + int i; + + for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { + ring->space = *ring->head - ring->tail; + if ( ring->space <= 0 ) + ring->space += ring->size; + + if ( ring->space >= n ) + return 0; + + udelay( 1 ); + } + + /* FIXME: This is being ignored... */ + DRM_ERROR( "failed!\n" ); + return -EBUSY; +} + +void r128_update_ring_snapshot( drm_r128_private_t *dev_priv ) +{ + drm_r128_ring_buffer_t *ring = &dev_priv->ring; + + ring->space = *ring->head - ring->tail; +#if R128_PERFORMANCE_BOXES + if ( ring->space == 0 ) + atomic_inc( &dev_priv->idle_count ); +#endif + if ( ring->space <= 0 ) + ring->space += ring->size; +} + +static int r128_cce_get_buffers( drm_device_t *dev, drm_dma_t *d ) +{ + int i; + drm_buf_t *buf; + + for ( i = d->granted_count ; i < d->request_count ; i++ ) { + buf = r128_freelist_get( dev ); + if ( !buf ) return -EAGAIN; + + buf->pid = current->pid; + + if ( copy_to_user( &d->request_indices[i], &buf->idx, + sizeof(buf->idx) ) ) + return -EFAULT; + if ( copy_to_user( &d->request_sizes[i], &buf->total, + sizeof(buf->total) ) ) + return -EFAULT; + + d->granted_count++; + } + return 0; +} + +int r128_cce_buffers( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_device_dma_t *dma = dev->dma; + int ret = 0; + drm_dma_t d; + + if ( copy_from_user( &d, (drm_dma_t *) arg, sizeof(d) ) ) + return -EFAULT; + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + + /* Please don't send us buffers. + */ + if ( d.send_count != 0 ) { + DRM_ERROR( "Process %d trying to send %d buffers via drmDMA\n", + current->pid, d.send_count ); + return -EINVAL; + } + + /* We'll send you buffers. + */ + if ( d.request_count < 0 || d.request_count > dma->buf_count ) { + DRM_ERROR( "Process %d trying to get %d buffers (of %d max)\n", + current->pid, d.request_count, dma->buf_count ); + return -EINVAL; + } + + d.granted_count = 0; + + if ( d.request_count ) { + ret = r128_cce_get_buffers( dev, &d ); + } + + if ( copy_to_user( (drm_dma_t *) arg, &d, sizeof(d) ) ) + return -EFAULT; + + return ret; +} diff --git a/linux/r128_context.c b/linux/r128_context.c index 9cadadbaf..0741e7745 100644 --- a/linux/r128_context.c +++ b/linux/r128_context.c @@ -11,11 +11,11 @@ * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL @@ -23,7 +23,7 @@ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. - * + * * Author: Rickard E. (Rik) Faith <faith@valinux.com> * */ @@ -53,21 +53,21 @@ int r128_context_switch(drm_device_t *dev, int old, int new) #if DRM_DMA_HISTOGRAM dev->ctx_start = get_cycles(); #endif - + DRM_DEBUG("Context switch from %d to %d\n", old, new); if (new == dev->last_context) { clear_bit(0, &dev->context_flag); return 0; } - + if (drm_flags & DRM_FLAG_NOCTX) { r128_context_switch_complete(dev, new); } else { sprintf(buf, "C %d %d\n", old, new); drm_write_string(dev, buf); } - + return 0; } @@ -75,7 +75,7 @@ int r128_context_switch_complete(drm_device_t *dev, int new) { dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ dev->last_switch = jiffies; - + if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("Lock isn't held after context switch\n"); } @@ -86,11 +86,11 @@ int r128_context_switch_complete(drm_device_t *dev, int new) #if DRM_DMA_HISTOGRAM atomic_inc(&dev->histo.ctx[drm_histogram_slot(get_cycles() - dev->ctx_start)]); - + #endif clear_bit(0, &dev->context_flag); wake_up(&dev->context_wait); - + return 0; } diff --git a/linux/r128_dma.c b/linux/r128_dma.c deleted file mode 100644 index bcba67826..000000000 --- a/linux/r128_dma.c +++ /dev/null @@ -1,909 +0,0 @@ -/* r128_drv.c -- ATI Rage 128 driver -*- linux-c -*- - * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com - * - * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Authors: Kevin E. Martin <martin@valinux.com> - * - */ - -#define __NO_VERSION__ -#include "drmP.h" -#include "r128_drv.h" - -#include <linux/interrupt.h> /* For task queue support */ -#include <linux/delay.h> - - - -#define DO_REMAP(_m) (_m)->handle = drm_ioremap((_m)->offset, (_m)->size) - -#define DO_REMAPFREE(_m) \ - do { \ - if ((_m)->handle && (_m)->size) \ - drm_ioremapfree((_m)->handle, (_m)->size); \ - } while (0) - -#define DO_FIND_MAP(_m, _o) \ - do { \ - int _i; \ - for (_i = 0; _i < dev->map_count; _i++) { \ - if (dev->maplist[_i]->offset == _o) { \ - _m = dev->maplist[_i]; \ - break; \ - } \ - } \ - } while (0) - - -#define R128_MAX_VBUF_AGE 0x10000000 -#define R128_VB_AGE_REG R128_GUI_SCRATCH_REG0 - -int R128_READ_PLL(drm_device_t *dev, int addr) -{ - drm_r128_private_t *dev_priv = dev->dev_private; - - R128_WRITE8(R128_CLOCK_CNTL_INDEX, addr & 0x1f); - return R128_READ(R128_CLOCK_CNTL_DATA); -} - -#define r128_flush_write_combine() mb() - - -static void r128_status(drm_device_t *dev) -{ - drm_r128_private_t *dev_priv = dev->dev_private; - - printk("GUI_STAT = 0x%08x\n", - (unsigned int)R128_READ(R128_GUI_STAT)); - printk("PM4_STAT = 0x%08x\n", - (unsigned int)R128_READ(R128_PM4_STAT)); - printk("PM4_BUFFER_DL_WPTR = 0x%08x\n", - (unsigned int)R128_READ(R128_PM4_BUFFER_DL_WPTR)); - printk("PM4_BUFFER_DL_RPTR = 0x%08x\n", - (unsigned int)R128_READ(R128_PM4_BUFFER_DL_RPTR)); -} - -static int r128_do_cleanup_cce(drm_device_t *dev) -{ - if (dev->dev_private) { - drm_r128_private_t *dev_priv = dev->dev_private; - - if (!dev_priv->is_pci) { - DO_REMAPFREE(dev_priv->agp_ring); - DO_REMAPFREE(dev_priv->agp_read_ptr); - DO_REMAPFREE(dev_priv->agp_vertbufs); - DO_REMAPFREE(dev_priv->agp_indbufs); - DO_REMAPFREE(dev_priv->agp_textures); - } - - drm_free(dev->dev_private, sizeof(drm_r128_private_t), - DRM_MEM_DRIVER); - dev->dev_private = NULL; - } - - return 0; -} - -static int r128_do_init_cce(drm_device_t *dev, drm_r128_init_t *init) -{ - drm_r128_private_t *dev_priv; - int i; - - dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER); - if (dev_priv == NULL) return -ENOMEM; - dev->dev_private = (void *)dev_priv; - - memset(dev_priv, 0, sizeof(drm_r128_private_t)); - - dev_priv->is_pci = init->is_pci; - - dev_priv->usec_timeout = init->usec_timeout; - if (dev_priv->usec_timeout < 1 || - dev_priv->usec_timeout > R128_MAX_USEC_TIMEOUT) { - drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); - dev->dev_private = NULL; - return -EINVAL; - } - - dev_priv->cce_mode = init->cce_mode; - dev_priv->cce_fifo_size = init->cce_fifo_size; - dev_priv->cce_is_bm_mode = - ((init->cce_mode == R128_PM4_192BM) || - (init->cce_mode == R128_PM4_128BM_64INDBM) || - (init->cce_mode == R128_PM4_64BM_128INDBM) || - (init->cce_mode == R128_PM4_64BM_64VCBM_64INDBM)); - dev_priv->cce_secure = init->cce_secure; - - if (dev_priv->cce_is_bm_mode && dev_priv->is_pci) { - drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); - dev->dev_private = NULL; - return -EINVAL; - } - - for (i = 0; i < dev->map_count; i++) { - if (dev->maplist[i]->type == _DRM_SHM) { - dev_priv->sarea = dev->maplist[i]; - break; - } - } - - DO_FIND_MAP(dev_priv->fb, init->fb_offset); - if (!dev_priv->is_pci) { - DO_FIND_MAP(dev_priv->agp_ring, init->agp_ring_offset); - DO_FIND_MAP(dev_priv->agp_read_ptr, init->agp_read_ptr_offset); - DO_FIND_MAP(dev_priv->agp_vertbufs, init->agp_vertbufs_offset); - DO_FIND_MAP(dev_priv->agp_indbufs, init->agp_indbufs_offset); - DO_FIND_MAP(dev_priv->agp_textures, init->agp_textures_offset); - } - DO_FIND_MAP(dev_priv->mmio, init->mmio_offset); - - dev_priv->sarea_priv = - (drm_r128_sarea_t *)((u8 *)dev_priv->sarea->handle + - init->sarea_priv_offset); - - if (!dev_priv->is_pci) { - DO_REMAP(dev_priv->agp_ring); - DO_REMAP(dev_priv->agp_read_ptr); - DO_REMAP(dev_priv->agp_vertbufs); -#if 0 - DO_REMAP(dev_priv->agp_indirectbufs); - DO_REMAP(dev_priv->agp_textures); -#endif - - dev_priv->ring_size = init->ring_size; - dev_priv->ring_sizel2qw = drm_order(init->ring_size/8); - dev_priv->ring_entries = init->ring_size/sizeof(u32); - dev_priv->ring_read_ptr = ((__volatile__ u32 *) - dev_priv->agp_read_ptr->handle); - dev_priv->ring_start = (u32 *)dev_priv->agp_ring->handle; - dev_priv->ring_end = ((u32 *)dev_priv->agp_ring->handle - + dev_priv->ring_entries); - } - - dev_priv->submit_age = 0; - R128_WRITE(R128_VB_AGE_REG, dev_priv->submit_age); - - return 0; -} - -int r128_init_cce(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) -{ - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - drm_r128_init_t init; - - if (copy_from_user(&init, (drm_r128_init_t *)arg, sizeof(init))) - return -EFAULT; - - switch (init.func) { - case R128_INIT_CCE: - return r128_do_init_cce(dev, &init); - case R128_CLEANUP_CCE: - return r128_do_cleanup_cce(dev); - } - - return -EINVAL; -} - -static void r128_mark_vertbufs_done(drm_device_t *dev) -{ - drm_device_dma_t *dma = dev->dma; - int i; - - for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; - drm_r128_buf_priv_t *buf_priv = buf->dev_private; - buf_priv->age = 0; - } -} - -static int r128_do_pixcache_flush(drm_device_t *dev) -{ - drm_r128_private_t *dev_priv = dev->dev_private; - u32 tmp; - int i; - - tmp = R128_READ(R128_PC_NGUI_CTLSTAT) | R128_PC_FLUSH_ALL; - R128_WRITE(R128_PC_NGUI_CTLSTAT, tmp); - - for (i = 0; i < dev_priv->usec_timeout; i++) { - if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY)) - return 0; - udelay(1); - } - - return -EBUSY; -} - -static int r128_do_wait_for_fifo(drm_device_t *dev, int entries) -{ - drm_r128_private_t *dev_priv = dev->dev_private; - int i; - - for (i = 0; i < dev_priv->usec_timeout; i++) { - int slots = R128_READ(R128_GUI_STAT) & R128_GUI_FIFOCNT_MASK; - if (slots >= entries) return 0; - udelay(1); - } - return -EBUSY; -} - -static int r128_do_wait_for_idle(drm_device_t *dev) -{ - drm_r128_private_t *dev_priv = dev->dev_private; - int i, ret; - - if (!(ret = r128_do_wait_for_fifo(dev, 64))) return ret; - - for (i = 0; i < dev_priv->usec_timeout; i++) { - if (!(R128_READ(R128_GUI_STAT) & R128_GUI_ACTIVE)) { - (void)r128_do_pixcache_flush(dev); - return 0; - } - udelay(1); - } - return -EBUSY; -} - -int r128_do_engine_reset(drm_device_t *dev) -{ - drm_r128_private_t *dev_priv = dev->dev_private; - u32 clock_cntl_index, mclk_cntl, gen_reset_cntl; - - (void)r128_do_pixcache_flush(dev); - - clock_cntl_index = R128_READ(R128_CLOCK_CNTL_INDEX); - mclk_cntl = R128_READ_PLL(dev, R128_MCLK_CNTL); - - R128_WRITE_PLL(R128_MCLK_CNTL, - mclk_cntl | R128_FORCE_GCP | R128_FORCE_PIPE3D_CP); - - gen_reset_cntl = R128_READ(R128_GEN_RESET_CNTL); - - R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl | R128_SOFT_RESET_GUI); - (void)R128_READ(R128_GEN_RESET_CNTL); - R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl & ~R128_SOFT_RESET_GUI); - (void)R128_READ(R128_GEN_RESET_CNTL); - - R128_WRITE_PLL(R128_MCLK_CNTL, mclk_cntl); - R128_WRITE(R128_CLOCK_CNTL_INDEX, clock_cntl_index); - R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl); - - /* For CCE ring buffer only */ - if (dev_priv->cce_is_bm_mode) { - R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0); - R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0); - *dev_priv->ring_read_ptr = 0; - dev_priv->sarea_priv->ring_write = 0; - } - - /* Reset the CCE mode */ - (void)r128_do_wait_for_idle(dev); - R128_WRITE(R128_PM4_BUFFER_CNTL, - dev_priv->cce_mode | dev_priv->ring_sizel2qw); - (void)R128_READ(R128_PM4_BUFFER_ADDR); /* as per the sample code */ - R128_WRITE(R128_PM4_MICRO_CNTL, R128_PM4_MICRO_FREERUN); - - r128_mark_vertbufs_done(dev); - return 0; -} - -int r128_eng_reset(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) -{ - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - - if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || - dev->lock.pid != current->pid) { - DRM_ERROR("r128_eng_reset called without holding the lock\n"); - return -EINVAL; - } - - return r128_do_engine_reset(dev); -} - -static int r128_do_engine_flush(drm_device_t *dev) -{ - drm_r128_private_t *dev_priv = dev->dev_private; - u32 tmp; - - tmp = R128_READ(R128_PM4_BUFFER_DL_WPTR); - R128_WRITE(R128_PM4_BUFFER_DL_WPTR, tmp | R128_PM4_BUFFER_DL_DONE); - - return 0; -} - -int r128_eng_flush(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) -{ - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - - if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || - dev->lock.pid != current->pid) { - DRM_ERROR("r128_eng_flush called without holding the lock\n"); - return -EINVAL; - } - - return r128_do_engine_flush(dev); -} - -static int r128_do_cce_wait_for_fifo(drm_device_t *dev, int entries) -{ - drm_r128_private_t *dev_priv = dev->dev_private; - int i; - - for (i = 0; i < dev_priv->usec_timeout; i++) { - int slots = R128_READ(R128_PM4_STAT) & R128_PM4_FIFOCNT_MASK; - if (slots >= entries) return 0; - udelay(1); - } - return -EBUSY; -} - -int r128_do_cce_wait_for_idle(drm_device_t *dev) -{ - drm_r128_private_t *dev_priv = dev->dev_private; - int i; - - if (dev_priv->cce_is_bm_mode) { - for (i = 0; i < dev_priv->usec_timeout; i++) { - if (*dev_priv->ring_read_ptr == dev_priv->sarea_priv->ring_write) { - int pm4stat = R128_READ(R128_PM4_STAT); - if ((pm4stat & R128_PM4_FIFOCNT_MASK) >= dev_priv->cce_fifo_size && - !(pm4stat & (R128_PM4_BUSY | R128_PM4_GUI_ACTIVE))) { - return r128_do_pixcache_flush(dev); - } - } - udelay(1); - } - return -EBUSY; - } else { - int ret = r128_do_cce_wait_for_fifo(dev, dev_priv->cce_fifo_size); - if (ret < 0) return ret; - - for (i = 0; i < dev_priv->usec_timeout; i++) { - int pm4stat = R128_READ(R128_PM4_STAT); - if (!(pm4stat & (R128_PM4_BUSY | R128_PM4_GUI_ACTIVE))) { - return r128_do_pixcache_flush(dev); - } - udelay(1); - } - return -EBUSY; - } -} - -int r128_cce_idle(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) -{ - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - - if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || - dev->lock.pid != current->pid) { - DRM_ERROR("r128_wait_idle called without holding the lock\n"); - return -EINVAL; - } - - return r128_do_cce_wait_for_idle(dev); -} - -static int r128_submit_packets_ring_secure(drm_device_t *dev, - u32 *commands, int *count) -{ - drm_r128_private_t *dev_priv = dev->dev_private; - int write = dev_priv->sarea_priv->ring_write; - int *write_ptr = dev_priv->ring_start + write; - int c = *count; - u32 tmp = 0; - int psize = 0; - int writing = 1; - int timeout; - - while (c > 0) { - tmp = *commands++; - if (!psize) { - writing = 1; - - if ((tmp & R128_CCE_PACKET_MASK) == R128_CCE_PACKET0) { - if ((tmp & R128_CCE_PACKET0_REG_MASK) <= (0x1004 >> 2)) { - if ((tmp & R128_CCE_PACKET0_REG_MASK) != - (R128_PM4_VC_FPU_SETUP >> 2)) { - writing = 0; - } - } - psize = ((tmp & R128_CCE_PACKET_COUNT_MASK) >> 16) + 2; - } else if ((tmp & R128_CCE_PACKET_MASK) == R128_CCE_PACKET1) { - if ((tmp & R128_CCE_PACKET1_REG0_MASK) <= (0x1004 >> 2)) { - if ((tmp & R128_CCE_PACKET1_REG0_MASK) != - (R128_PM4_VC_FPU_SETUP >> 2)) { - writing = 0; - } - } else if ((tmp & R128_CCE_PACKET1_REG1_MASK) <= - (0x1004 << 9)) { - if ((tmp & R128_CCE_PACKET1_REG1_MASK) != - (R128_PM4_VC_FPU_SETUP << 9)) { - writing = 0; - } - } - psize = 3; - } else { - psize = ((tmp & R128_CCE_PACKET_COUNT_MASK) >> 16) + 2; - } - } - psize--; - - if (writing) { - write++; - *write_ptr++ = tmp; - } - if (write >= dev_priv->ring_entries) { - write = 0; - write_ptr = dev_priv->ring_start; - } - timeout = 0; - while (write == *dev_priv->ring_read_ptr) { - (void)R128_READ(R128_PM4_BUFFER_DL_RPTR); - if (timeout++ >= dev_priv->usec_timeout) - return -EBUSY; - udelay(1); - } - c--; - } - - if (write < 32) - memcpy(dev_priv->ring_end, - dev_priv->ring_start, - write * sizeof(u32)); - - /* Make sure WC cache has been flushed */ - r128_flush_write_combine(); - - dev_priv->sarea_priv->ring_write = write; - R128_WRITE(R128_PM4_BUFFER_DL_WPTR, write); - - *count = 0; - - return 0; -} - -static int r128_submit_packets_pio_secure(drm_device_t *dev, - u32 *commands, int *count) -{ - drm_r128_private_t *dev_priv = dev->dev_private; - u32 tmp = 0; - int psize = 0; - int writing = 1; - int addr = R128_PM4_FIFO_DATA_EVEN; - int ret; - - while (*count > 0) { - tmp = *commands++; - if (!psize) { - writing = 1; - - if ((tmp & R128_CCE_PACKET_MASK) == R128_CCE_PACKET0) { - if ((tmp & R128_CCE_PACKET0_REG_MASK) <= (0x1004 >> 2)) { - if ((tmp & R128_CCE_PACKET0_REG_MASK) != - (R128_PM4_VC_FPU_SETUP >> 2)) { - writing = 0; - } - } - psize = ((tmp & R128_CCE_PACKET_COUNT_MASK) >> 16) + 2; - } else if ((tmp & R128_CCE_PACKET_MASK) == R128_CCE_PACKET1) { - if ((tmp & R128_CCE_PACKET1_REG0_MASK) <= (0x1004 >> 2)) { - if ((tmp & R128_CCE_PACKET1_REG0_MASK) != - (R128_PM4_VC_FPU_SETUP >> 2)) { - writing = 0; - } - } else if ((tmp & R128_CCE_PACKET1_REG1_MASK) <= - (0x1004 << 9)) { - if ((tmp & R128_CCE_PACKET1_REG1_MASK) != - (R128_PM4_VC_FPU_SETUP << 9)) { - writing = 0; - } - } - psize = 3; - } else { - psize = ((tmp & R128_CCE_PACKET_COUNT_MASK) >> 16) + 2; - } - } - psize--; - - if (writing) { - if ((ret = r128_do_cce_wait_for_fifo(dev, 1)) < 0) - return ret; - R128_WRITE(addr, tmp); - addr ^= 0x0004; - } - - *count -= 1; - } - - if (addr == R128_PM4_FIFO_DATA_ODD) { - if ((ret = r128_do_cce_wait_for_fifo(dev, 1)) < 0) return ret; - R128_WRITE(addr, R128_CCE_PACKET2); - } - - return 0; -} - -static int r128_submit_packets_ring(drm_device_t *dev, - u32 *commands, int *count) -{ - drm_r128_private_t *dev_priv = dev->dev_private; - int write = dev_priv->sarea_priv->ring_write; - int *write_ptr = dev_priv->ring_start + write; - int c = *count; - int timeout; - - while (c > 0) { - write++; - *write_ptr++ = *commands++; - if (write >= dev_priv->ring_entries) { - write = 0; - write_ptr = dev_priv->ring_start; - } - timeout = 0; - while (write == *dev_priv->ring_read_ptr) { - (void)R128_READ(R128_PM4_BUFFER_DL_RPTR); - if (timeout++ >= dev_priv->usec_timeout) - return -EBUSY; - udelay(1); - } - c--; - } - - if (write < 32) - memcpy(dev_priv->ring_end, - dev_priv->ring_start, - write * sizeof(u32)); - - /* Make sure WC cache has been flushed */ - r128_flush_write_combine(); - - dev_priv->sarea_priv->ring_write = write; - R128_WRITE(R128_PM4_BUFFER_DL_WPTR, write); - - *count = 0; - - return 0; -} - -static int r128_submit_packets_pio(drm_device_t *dev, - u32 *commands, int *count) -{ - drm_r128_private_t *dev_priv = dev->dev_private; - int ret; - - while (*count > 1) { - if ((ret = r128_do_cce_wait_for_fifo(dev, 2)) < 0) return ret; - R128_WRITE(R128_PM4_FIFO_DATA_EVEN, *commands++); - R128_WRITE(R128_PM4_FIFO_DATA_ODD, *commands++); - *count -= 2; - } - - if (*count) { - if ((ret = r128_do_cce_wait_for_fifo(dev, 2)) < 0) return ret; - R128_WRITE(R128_PM4_FIFO_DATA_EVEN, *commands++); - R128_WRITE(R128_PM4_FIFO_DATA_ODD, R128_CCE_PACKET2); - *count = 0; - } - - return 0; -} - -static int r128_do_submit_packets(drm_device_t *dev, u32 *buffer, int count) -{ - drm_r128_private_t *dev_priv = dev->dev_private; - int c = count; - int ret; - - if (dev_priv->cce_is_bm_mode) { - int left = 0; - - if (c >= dev_priv->ring_entries) { - c = dev_priv->ring_entries-1; - left = count - c; - } - - /* Since this is only used by the kernel we can use the - insecure ring buffer submit packet routine */ - ret = r128_submit_packets_ring(dev, buffer, &c); - - c += left; - } else { - /* Since this is only used by the kernel we can use the - insecure PIO submit packet routine */ - ret = r128_submit_packets_pio(dev, buffer, &c); - } - - if (ret < 0) return ret; - else return c; -} - -int r128_submit_pkt(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) -{ - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - drm_r128_private_t *dev_priv = dev->dev_private; - drm_r128_packet_t packet; - u32 *buffer; - int c; - int size; - int ret = 0; - - if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || - dev->lock.pid != current->pid) { - DRM_ERROR("r128_submit_pkt called without holding the lock\n"); - return -EINVAL; - } - - if (copy_from_user(&packet, (drm_r128_packet_t *)arg, sizeof(packet))) - return -EFAULT; - - c = packet.count; - size = c * sizeof(*buffer); - - if (dev_priv->cce_is_bm_mode) { - int left = 0; - - if (c >= dev_priv->ring_entries) { - c = dev_priv->ring_entries-1; - size = c * sizeof(*buffer); - left = packet.count - c; - } - - if ((buffer = kmalloc(size, 0)) == NULL) return -ENOMEM; - if (copy_from_user(buffer, packet.buffer, size)) - return -EFAULT; - - if (dev_priv->cce_secure) - ret = r128_submit_packets_ring_secure(dev, buffer, &c); - else - ret = r128_submit_packets_ring(dev, buffer, &c); - - c += left; - } else { - if ((buffer = kmalloc(size, 0)) == NULL) return -ENOMEM; - if (copy_from_user(buffer, packet.buffer, size)) - return -EFAULT; - - if (dev_priv->cce_secure) - ret = r128_submit_packets_pio_secure(dev, buffer, &c); - else - ret = r128_submit_packets_pio(dev, buffer, &c); - } - - kfree(buffer); - - packet.count = c; - if (copy_to_user((drm_r128_packet_t *)arg, &packet, sizeof(packet))) - return -EFAULT; - - if (ret) return ret; - else if (c > 0) return -EAGAIN; - - return 0; -} - -static int r128_send_vertbufs(drm_device_t *dev, drm_r128_vertex_t *v) -{ - drm_device_dma_t *dma = dev->dma; - drm_r128_private_t *dev_priv = dev->dev_private; - drm_r128_buf_priv_t *buf_priv; - drm_buf_t *buf; - int i, ret; - u32 cce[2]; - - /* Make sure we have valid data */ - for (i = 0; i < v->send_count; i++) { - int idx = v->send_indices[i]; - - if (idx < 0 || idx >= dma->buf_count) { - DRM_ERROR("Index %d (of %d max)\n", - idx, dma->buf_count - 1); - return -EINVAL; - } - buf = dma->buflist[idx]; - if (buf->pid != current->pid) { - DRM_ERROR("Process %d using buffer owned by %d\n", - current->pid, buf->pid); - return -EINVAL; - } - if (buf->pending) { - DRM_ERROR("Sending pending buffer:" - " buffer %d, offset %d\n", - v->send_indices[i], i); - return -EINVAL; - } - } - - /* Wait for idle, if we've wrapped to make sure that all pending - buffers have been processed */ - if (dev_priv->submit_age == R128_MAX_VBUF_AGE) { - if ((ret = r128_do_cce_wait_for_idle(dev)) < 0) return ret; - dev_priv->submit_age = 0; - r128_mark_vertbufs_done(dev); - } - - /* Make sure WC cache has been flushed (if in PIO mode) */ - if (!dev_priv->cce_is_bm_mode) r128_flush_write_combine(); - - /* FIXME: Add support for sending vertex buffer to the CCE here - instead of in client code. The v->prim holds the primitive - type that should be drawn. Loop over the list buffers in - send_indices[] and submit a packet for each VB. - - This will require us to loop over the clip rects here as - well, which implies that we extend the kernel driver to allow - cliprects to be stored here. Note that the cliprects could - possibly come from the X server instead of the client, but - this will require additional changes to the DRI to allow for - this optimization. */ - - /* Submit a CCE packet that writes submit_age to R128_VB_AGE_REG */ - cce[0] = R128CCE0(R128_CCE_PACKET0, R128_VB_AGE_REG, 0); - cce[1] = dev_priv->submit_age; - if ((ret = r128_do_submit_packets(dev, cce, 2)) < 0) { - /* Until we add support for sending VBs to the CCE in - this routine, we can recover from this error. After - we add that support, we won't be able to easily - recover, so we will probably have to implement - another mechanism for handling timeouts from packets - submitted directly by the kernel. */ - return ret; - } - - /* Now that the submit packet request has succeeded, we can mark - the buffers as pending */ - for (i = 0; i < v->send_count; i++) { - buf = dma->buflist[v->send_indices[i]]; - buf->pending = 1; - - buf_priv = buf->dev_private; - buf_priv->age = dev_priv->submit_age; - } - - dev_priv->submit_age++; - - return 0; -} - -static drm_buf_t *r128_freelist_get(drm_device_t *dev) -{ - drm_device_dma_t *dma = dev->dma; - drm_r128_private_t *dev_priv = dev->dev_private; - drm_r128_buf_priv_t *buf_priv; - drm_buf_t *buf; - int i, t; - - /* FIXME: Optimize -- use freelist code */ - - for (i = 0; i < dma->buf_count; i++) { - buf = dma->buflist[i]; - buf_priv = buf->dev_private; - if (buf->pid == 0) return buf; - } - - for (t = 0; t < dev_priv->usec_timeout; t++) { - u32 done_age = R128_READ(R128_VB_AGE_REG); - - for (i = 0; i < dma->buf_count; i++) { - buf = dma->buflist[i]; - buf_priv = buf->dev_private; - if (buf->pending && buf_priv->age <= done_age) { - /* The buffer has been processed, so it - can now be used */ - buf->pending = 0; - return buf; - } - } - udelay(1); - } - - r128_status(dev); - return NULL; -} - - -static int r128_get_vertbufs(drm_device_t *dev, drm_r128_vertex_t *v) -{ - drm_buf_t *buf; - int i; - - for (i = v->granted_count; i < v->request_count; i++) { - buf = r128_freelist_get(dev); - if (!buf) break; - buf->pid = current->pid; - if (copy_to_user(&v->request_indices[i], - &buf->idx, - sizeof(buf->idx)) || - copy_to_user(&v->request_sizes[i], - &buf->total, - sizeof(buf->total))) - return -EFAULT; - ++v->granted_count; - } - return 0; -} - -int r128_vertex_buf(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) -{ - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - drm_r128_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; - int retcode = 0; - drm_r128_vertex_t v; - - if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || - dev->lock.pid != current->pid) { - DRM_ERROR("r128_vertex_buf called without holding the lock\n"); - return -EINVAL; - } - - if (!dev_priv || dev_priv->is_pci) { - DRM_ERROR("r128_vertex_buf called with a PCI card\n"); - return -EINVAL; - } - - if (copy_from_user(&v, (drm_r128_vertex_t *)arg, sizeof(v))) - return -EFAULT; - DRM_DEBUG("%d: %d send, %d req\n", - current->pid, v.send_count, v.request_count); - - if (v.send_count < 0 || v.send_count > dma->buf_count) { - DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n", - current->pid, v.send_count, dma->buf_count); - return -EINVAL; - } - if (v.request_count < 0 || v.request_count > dma->buf_count) { - DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", - current->pid, v.request_count, dma->buf_count); - return -EINVAL; - } - - if (v.send_count) { - retcode = r128_send_vertbufs(dev, &v); - } - - v.granted_count = 0; - - if (!retcode && v.request_count) { - retcode = r128_get_vertbufs(dev, &v); - } - - DRM_DEBUG("%d returning, granted = %d\n", - current->pid, v.granted_count); - if (copy_to_user((drm_r128_vertex_t *)arg, &v, sizeof(v))) - return -EFAULT; - - return retcode; -} diff --git a/linux/r128_drm.h b/linux/r128_drm.h index 8a4842cf3..94dba1ed9 100644 --- a/linux/r128_drm.h +++ b/linux/r128_drm.h @@ -11,11 +11,11 @@ * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL @@ -24,88 +24,252 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * Authors: Kevin E. Martin <martin@valinux.com> + * Authors: + * Kevin E. Martin <martin@valinux.com> + * Gareth Hughes <gareth@valinux.com> * */ -#ifndef _R128_DRM_H_ -#define _R128_DRM_H_ +#ifndef __R128_DRM_H__ +#define __R128_DRM_H__ + +/* WARNING: If you change any of these defines, make sure to change the + * defines in the X server file (r128_sarea.h) + */ +#ifndef __R128_SAREA_DEFINES__ +#define __R128_SAREA_DEFINES__ + +/* What needs to be changed for the current vertex buffer? + */ +#define R128_UPLOAD_CONTEXT 0x001 +#define R128_UPLOAD_SETUP 0x002 +#define R128_UPLOAD_TEX0 0x004 +#define R128_UPLOAD_TEX1 0x008 +#define R128_UPLOAD_TEX0IMAGES 0x010 +#define R128_UPLOAD_TEX1IMAGES 0x020 +#define R128_UPLOAD_CORE 0x040 +#define R128_UPLOAD_MASKS 0x080 +#define R128_UPLOAD_WINDOW 0x100 +#define R128_UPLOAD_CLIPRECTS 0x200 /* handled client-side */ +#define R128_REQUIRE_QUIESCENCE 0x400 +#define R128_UPLOAD_ALL 0x7ff + +#define R128_FRONT 0x1 +#define R128_BACK 0x2 +#define R128_DEPTH 0x4 + +/* Primitive types + */ +#define R128_POINTS 0x1 +#define R128_LINES 0x2 +#define R128_LINE_STRIP 0x3 +#define R128_TRIANGLES 0x4 +#define R128_TRIANGLE_FAN 0x5 +#define R128_TRIANGLE_STRIP 0x6 + +/* Vertex/indirect buffer size + */ +#define R128_BUFFER_SIZE 16384 + +/* Byte offsets for indirect buffer data + */ +#define R128_INDEX_PRIM_OFFSET 20 +#define R128_HOSTDATA_BLIT_OFFSET 32 + +/* Keep these small for testing. + */ +#define R128_NR_SAREA_CLIPRECTS 12 + +/* There are 2 heaps (local/AGP). Each region within a heap is a + * minimum of 64k, and there are at most 64 of them per heap. + */ +#define R128_LOCAL_TEX_HEAP 0 +#define R128_AGP_TEX_HEAP 1 +#define R128_NR_TEX_HEAPS 2 +#define R128_NR_TEX_REGIONS 64 +#define R128_LOG_TEX_GRANULARITY 16 + +#define R128_NR_CONTEXT_REGS 12 + +#define R128_MAX_TEXTURE_LEVELS 11 +#define R128_MAX_TEXTURE_UNITS 2 + +#endif /* __R128_SAREA_DEFINES__ */ + +typedef struct { + /* Context state - can be written in one large chunk */ + unsigned int dst_pitch_offset_c; + unsigned int dp_gui_master_cntl_c; + unsigned int sc_top_left_c; + unsigned int sc_bottom_right_c; + unsigned int z_offset_c; + unsigned int z_pitch_c; + unsigned int z_sten_cntl_c; + unsigned int tex_cntl_c; + unsigned int misc_3d_state_cntl_reg; + unsigned int texture_clr_cmp_clr_c; + unsigned int texture_clr_cmp_msk_c; + unsigned int fog_color_c; + + /* Texture state */ + unsigned int tex_size_pitch_c; + unsigned int constant_color_c; + + /* Setup state */ + unsigned int pm4_vc_fpu_setup; + unsigned int setup_cntl; + + /* Mask state */ + unsigned int dp_write_mask; + unsigned int sten_ref_mask_c; + unsigned int plane_3d_mask_c; + + /* Window state */ + unsigned int window_xy_offset; + + /* Core state */ + unsigned int scale_3d_cntl; +} drm_r128_context_regs_t; + +/* Setup registers for each texture unit + */ +typedef struct { + unsigned int tex_cntl; + unsigned int tex_combine_cntl; + unsigned int tex_size_pitch; + unsigned int tex_offset[R128_MAX_TEXTURE_LEVELS]; + unsigned int tex_border_color; +} drm_r128_texture_regs_t; + + +typedef struct drm_tex_region { + unsigned char next, prev; + unsigned char in_use; + int age; +} drm_tex_region_t; + +typedef struct drm_r128_sarea { + /* The channel for communication of state information to the kernel + * on firing a vertex buffer. + */ + drm_r128_context_regs_t context_state; + drm_r128_texture_regs_t tex_state[R128_MAX_TEXTURE_UNITS]; + unsigned int dirty; + unsigned int vertsize; + unsigned int vc_format; + + /* The current cliprects, or a subset thereof. + */ + drm_clip_rect_t boxes[R128_NR_SAREA_CLIPRECTS]; + unsigned int nbox; + + /* Counters for client-side throttling of rendering clients. + */ + unsigned int last_frame; + unsigned int last_dispatch; + + drm_tex_region_t tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS+1]; + int tex_age[R128_NR_TEX_HEAPS]; + int ctx_owner; +} drm_r128_sarea_t; + /* WARNING: If you change any of these defines, make sure to change the * defines in the Xserver file (xf86drmR128.h) */ typedef struct drm_r128_init { - enum { + enum { R128_INIT_CCE = 0x01, R128_CLEANUP_CCE = 0x02 } func; int sarea_priv_offset; int is_pci; int cce_mode; - int cce_fifo_size; int cce_secure; int ring_size; int usec_timeout; - int fb_offset; - int agp_ring_offset; - int agp_read_ptr_offset; - int agp_vertbufs_offset; - int agp_indbufs_offset; - int agp_textures_offset; - int mmio_offset; + unsigned int fb_bpp; + unsigned int front_offset, front_pitch; + unsigned int back_offset, back_pitch; + unsigned int depth_bpp; + unsigned int depth_offset, depth_pitch; + unsigned int span_offset; + + unsigned int fb_offset; + unsigned int mmio_offset; + unsigned int ring_offset; + unsigned int ring_rptr_offset; + unsigned int buffers_offset; + unsigned int agp_textures_offset; } drm_r128_init_t; -typedef struct drm_r128_packet { - unsigned int *buffer; - int count; - int flags; -} drm_r128_packet_t; - -typedef enum drm_r128_prim { - _DRM_R128_PRIM_NONE = 0x0001, - _DRM_R128_PRIM_POINT = 0x0002, - _DRM_R128_PRIM_LINE = 0x0004, - _DRM_R128_PRIM_POLY_LINE = 0x0008, - _DRM_R128_PRIM_TRI_LIST = 0x0010, - _DRM_R128_PRIM_TRI_FAN = 0x0020, - _DRM_R128_PRIM_TRI_STRIP = 0x0040, - _DRM_R128_PRIM_TRI_TYPE2 = 0x0080 -} drm_r128_prim_t; +typedef struct drm_r128_cce_stop { + int flush; + int idle; +} drm_r128_cce_stop_t; + +typedef struct drm_r128_fullscreen { + enum { + R128_INIT_FULLSCREEN = 0x01, + R128_CLEANUP_FULLSCREEN = 0x02 + } func; +} drm_r128_fullscreen_t; + +typedef struct drm_r128_clear { + unsigned int flags; + int x, y, w, h; + unsigned int clear_color; + unsigned int clear_depth; +} drm_r128_clear_t; typedef struct drm_r128_vertex { - /* Indices here refer to the offset into - buflist in drm_buf_get_t. */ - int send_count; /* Number of buffers to send */ - int *send_indices; /* List of handles to buffers */ - int *send_sizes; /* Lengths of data to send */ - drm_r128_prim_t prim; /* Primitive type */ - int request_count; /* Number of buffers requested */ - int *request_indices; /* Buffer information */ - int *request_sizes; - int granted_count; /* Number of buffers granted */ + int prim; + int idx; /* Index of vertex buffer */ + int count; /* Number of vertices in buffer */ + int discard; /* Client finished with buffer? */ } drm_r128_vertex_t; -/* WARNING: If you change any of these defines, make sure to change the - * defines in the Xserver file (r128_sarea.h) - */ -#define R128_LOCAL_TEX_HEAP 0 -#define R128_AGP_TEX_HEAP 1 -#define R128_NR_TEX_HEAPS 2 -#define R128_NR_TEX_REGIONS 64 -#define R128_LOG_TEX_GRANULARITY 16 +typedef struct drm_r128_indices { + int prim; + int idx; + int start; + int end; + int discard; /* Client finished with buffer? */ +} drm_r128_indices_t; -typedef struct drm_tex_region { - unsigned char next, prev; - unsigned char in_use; - int age; -} drm_tex_region_t; +typedef struct drm_r128_blit { + int idx; + int pitch; + int offset; + int format; + unsigned short x, y; + unsigned short width, height; +} drm_r128_blit_t; -typedef struct drm_r128_sarea { - drm_tex_region_t tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS+1]; - int tex_age[R128_NR_TEX_HEAPS]; - int ctx_owner; - int ring_write; -} drm_r128_sarea_t; +typedef struct drm_r128_depth { + enum { + R128_WRITE_SPAN = 0x01, + R128_WRITE_PIXELS = 0x02, + R128_READ_SPAN = 0x03, + R128_READ_PIXELS = 0x04 + } func; + int n; + int *x; + int *y; + unsigned int *buffer; + unsigned char *mask; +} drm_r128_depth_t; + +typedef struct drm_r128_stipple { + unsigned int *mask; +} drm_r128_stipple_t; + +typedef struct drm_r128_indirect { + int idx; + int start; + int end; + int discard; +} drm_r128_indirect_t; #endif diff --git a/linux/r128_drv.c b/linux/r128_drv.c index 969ada93f..cf2589251 100644 --- a/linux/r128_drv.c +++ b/linux/r128_drv.c @@ -24,8 +24,10 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * Authors: Rickard E. (Rik) Faith <faith@valinux.com> - * Kevin E. Martin <martin@valinux.com> + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> + * Kevin E. Martin <martin@valinux.com> + * Gareth Hughes <gareth@valinux.com> * */ @@ -33,15 +35,15 @@ #include "drmP.h" #include "r128_drv.h" -#define R128_NAME "r128" -#define R128_DESC "ATI Rage 128" -#define R128_DATE "20000928" -#define R128_MAJOR 1 -#define R128_MINOR 0 -#define R128_PATCHLEVEL 0 +#define R128_NAME "r128" +#define R128_DESC "ATI Rage 128" +#define R128_DATE "20010101" +#define R128_MAJOR 2 +#define R128_MINOR 1 +#define R128_PATCHLEVEL 4 -static drm_device_t r128_device; -drm_ctx_t r128_res_ctx; +static drm_device_t r128_device; +drm_ctx_t r128_res_ctx; static struct file_operations r128_fops = { #if LINUX_VERSION_CODE >= 0x020400 @@ -65,52 +67,63 @@ static struct miscdevice r128_misc = { }; static drm_ioctl_desc_t r128_ioctls[] = { - [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { r128_version, 0, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 }, - - [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { r128_addbufs, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { r128_mapbufs, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 }, - - [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { r128_addctx, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { r128_rmctx, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { r128_modctx, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { r128_getctx, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { r128_switchctx, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { r128_newctx, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { r128_resctx, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { r128_lock, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { r128_unlock, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { r128_version, 0, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 }, + + [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { r128_addbufs, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { r128_mapbufs, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 }, + + [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { r128_addctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { r128_rmctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { r128_modctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { r128_getctx, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { r128_switchctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { r128_newctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { r128_resctx, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { r128_cce_buffers, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { r128_lock, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { r128_unlock, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 }, #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 }, #endif - [DRM_IOCTL_NR(DRM_IOCTL_R128_INIT)] = { r128_init_cce, 1, 1 }, - [DRM_IOCTL_NR(DRM_IOCTL_R128_RESET)] = { r128_eng_reset, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_R128_FLUSH)] = { r128_eng_flush, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_R128_PACKET)] = { r128_submit_pkt, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_R128_IDLE)] = { r128_cce_idle, 1, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_R128_VERTEX)] = { r128_vertex_buf, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_INIT)] = { r128_cce_init, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_START)] = { r128_cce_start, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_STOP)] = { r128_cce_stop, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_RESET)] = { r128_cce_reset, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_IDLE)] = { r128_cce_idle, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_RESET)] = { r128_engine_reset, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_FULLSCREEN)]= { r128_fullscreen, 1, 0 }, + + [DRM_IOCTL_NR(DRM_IOCTL_R128_SWAP)] = { r128_cce_swap, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_CLEAR)] = { r128_cce_clear, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_VERTEX)] = { r128_cce_vertex, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_INDICES)] = { r128_cce_indices, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_BLIT)] = { r128_cce_blit, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_DEPTH)] = { r128_cce_depth, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_STIPPLE)] = { r128_cce_stipple, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_R128_INDIRECT)] = { r128_cce_indirect, 1, 1 }, }; #define R128_IOCTL_COUNT DRM_ARRAY_SIZE(r128_ioctls) @@ -349,12 +362,12 @@ static int __init r128_init(void) #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) dev->agp = drm_agp_init(); - if (dev->agp == NULL) { - DRM_ERROR("Cannot initialize agpgart module.\n"); - drm_proc_cleanup(); - misc_deregister(&r128_misc); - r128_takedown(dev); - return -ENOMEM; + if (dev->agp == NULL) { + DRM_ERROR("Cannot initialize agpgart module.\n"); + drm_proc_cleanup(); + misc_deregister(&r128_misc); + r128_takedown(dev); + return -ENOMEM; } #ifdef CONFIG_MTRR @@ -413,8 +426,8 @@ module_init(r128_init); module_exit(r128_cleanup); -int r128_version(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +int r128_version(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) { drm_version_t version; int len; @@ -424,13 +437,13 @@ int r128_version(struct inode *inode, struct file *filp, unsigned int cmd, sizeof(version))) return -EFAULT; -#define DRM_COPY(name,value) \ - len = strlen(value); \ - if (len > name##_len) len = name##_len; \ - name##_len = strlen(value); \ - if (len && name) { \ - if (copy_to_user(name, value, len)) \ - return -EFAULT; \ +#define DRM_COPY(name,value) \ + len = strlen(value); \ + if (len > name##_len) len = name##_len; \ + name##_len = strlen(value); \ + if (len && name) { \ + if (copy_to_user(name, value, len)) \ + return -EFAULT; \ } version.version_major = R128_MAJOR; @@ -478,7 +491,17 @@ int r128_release(struct inode *inode, struct file *filp) lock_kernel(); dev = priv->dev; + DRM_DEBUG("open_count = %d\n", dev->open_count); + + /* Force the cleanup of page flipping when required */ + if ( dev->dev_private ) { + drm_r128_private_t *dev_priv = dev->dev_private; + if ( dev_priv->page_flipping ) { + r128_do_cleanup_pageflip( dev ); + } + } + if (!(retcode = drm_release(inode, filp))) { #if LINUX_VERSION_CODE < 0x020333 MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */ @@ -506,9 +529,8 @@ int r128_release(struct inode *inode, struct file *filp) } /* r128_ioctl is called whenever a process performs an ioctl on /dev/drm. */ - -int r128_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +int r128_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) { int nr = DRM_IOCTL_NR(cmd); drm_file_t *priv = filp->private_data; @@ -534,19 +556,25 @@ int r128_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, DRM_DEBUG("no function\n"); retcode = -EINVAL; } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN)) - || (ioctl->auth_needed && !priv->authenticated)) { + || (ioctl->auth_needed && !priv->authenticated)) { retcode = -EACCES; } else { retcode = (func)(inode, filp, cmd, arg); } } +#if 0 + if ( retcode ) { + DRM_INFO( "%s 0x%x ret = %d\n", __FUNCTION__, nr, retcode ); + } +#endif + atomic_dec(&dev->ioctl_count); return retcode; } -int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +int r128_lock(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; @@ -572,33 +600,10 @@ int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd, lock.context, current->pid, dev->lock.hw_lock->lock, lock.flags); -#if 0 - /* dev->queue_count == 0 right now for - r128. FIXME? */ - if (lock.context < 0 || lock.context >= dev->queue_count) + if (lock.context < 0) return -EINVAL; -#endif if (!ret) { -#if 0 - if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) - != lock.context) { - long j = jiffies - dev->lock.lock_time; - - if (lock.context == r128_res_ctx.handle && - j >= 0 && j < DRM_LOCK_SLICE) { - /* Can't take lock if we just had it and - there is contention. */ - DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d jiffies=%d\n", - lock.context, current->pid, j, - dev->lock.lock_time, jiffies); - current->state = TASK_INTERRUPTIBLE; - current->policy |= SCHED_YIELD; - schedule_timeout(DRM_LOCK_SLICE-j); - DRM_DEBUG("jiffies=%d\n", jiffies); - } - } -#endif add_wait_queue(&dev->lock.lock_queue, &entry); for (;;) { current->state = TASK_INTERRUPTIBLE; @@ -617,9 +622,6 @@ int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd, /* Contention */ atomic_inc(&dev->total_sleeps); -#if 1 - current->policy |= SCHED_YIELD; -#endif schedule(); if (signal_pending(current)) { ret = -ERESTARTSYS; @@ -630,32 +632,6 @@ int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd, remove_wait_queue(&dev->lock.lock_queue, &entry); } -#if 0 - if (!ret && dev->last_context != lock.context && - lock.context != r128_res_ctx.handle && - dev->last_context != r128_res_ctx.handle) { - add_wait_queue(&dev->context_wait, &entry); - current->state = TASK_INTERRUPTIBLE; - /* PRE: dev->last_context != lock.context */ - r128_context_switch(dev, dev->last_context, lock.context); - /* POST: we will wait for the context - switch and will dispatch on a later call - when dev->last_context == lock.context - NOTE WE HOLD THE LOCK THROUGHOUT THIS - TIME! */ - current->policy |= SCHED_YIELD; - schedule(); - current->state = TASK_RUNNING; - remove_wait_queue(&dev->context_wait, &entry); - if (signal_pending(current)) { - ret = -EINTR; - } else if (dev->last_context != lock.context) { - DRM_ERROR("Context mismatch: %d %d\n", - dev->last_context, lock.context); - } - } -#endif - if (!ret) { sigemptyset(&dev->sigmask); sigaddset(&dev->sigmask, SIGSTOP); @@ -670,6 +646,7 @@ int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd, } if (lock.flags & _DRM_LOCK_QUIESCENT) { /* Make hardware quiescent */ + DRM_DEBUG( "not quiescent!\n" ); #if 0 r128_quiescent(dev); #endif @@ -692,8 +669,8 @@ int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd, } -int r128_unlock(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +int r128_unlock(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; diff --git a/linux/r128_drv.h b/linux/r128_drv.h index 63b98c727..08ef5dc92 100644 --- a/linux/r128_drv.h +++ b/linux/r128_drv.h @@ -24,75 +24,144 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * Authors: Rickard E. (Rik) Faith <faith@valinux.com> - * Kevin E. Martin <martin@valinux.com> + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> + * Kevin E. Martin <martin@valinux.com> + * Gareth Hughes <gareth@valinux.com> * */ -#ifndef _R128_DRV_H_ -#define _R128_DRV_H_ +#ifndef __R128_DRV_H__ +#define __R128_DRV_H__ -typedef struct drm_r128_private { - int is_pci; - - int cce_mode; - int cce_fifo_size; - int cce_is_bm_mode; - int cce_secure; - - drm_r128_sarea_t *sarea_priv; - - __volatile__ u32 *ring_read_ptr; +typedef struct drm_r128_freelist { + unsigned int age; + drm_buf_t *buf; + struct drm_r128_freelist *next; + struct drm_r128_freelist *prev; +} drm_r128_freelist_t; - u32 *ring_start; - u32 *ring_end; - int ring_size; - int ring_sizel2qw; - int ring_entries; +typedef struct drm_r128_ring_buffer { + u32 *start; + u32 *end; + int size; + int size_l2qw; - int submit_age; + volatile u32 *head; + u32 tail; + u32 tail_mask; + int space; +} drm_r128_ring_buffer_t; - int usec_timeout; +typedef struct drm_r128_private { + drm_r128_ring_buffer_t ring; + drm_r128_sarea_t *sarea_priv; - drm_map_t *sarea; - drm_map_t *fb; - drm_map_t *agp_ring; - drm_map_t *agp_read_ptr; - drm_map_t *agp_vertbufs; - drm_map_t *agp_indbufs; - drm_map_t *agp_textures; - drm_map_t *mmio; + int cce_mode; + int cce_fifo_size; + int cce_running; + + drm_r128_freelist_t *head; + drm_r128_freelist_t *tail; + + int usec_timeout; + int is_pci; + + atomic_t idle_count; + + int page_flipping; + int current_page; + u32 crtc_offset; + u32 crtc_offset_cntl; + + unsigned int fb_bpp; + unsigned int front_offset; + unsigned int front_pitch; + unsigned int back_offset; + unsigned int back_pitch; + + unsigned int depth_bpp; + unsigned int depth_offset; + unsigned int depth_pitch; + unsigned int span_offset; + + u32 front_pitch_offset_c; + u32 back_pitch_offset_c; + u32 depth_pitch_offset_c; + u32 span_pitch_offset_c; + + drm_map_t *sarea; + drm_map_t *fb; + drm_map_t *mmio; + drm_map_t *cce_ring; + drm_map_t *ring_rptr; + drm_map_t *buffers; + drm_map_t *agp_textures; } drm_r128_private_t; typedef struct drm_r128_buf_priv { - u32 age; + u32 age; + int prim; + int discard; + int dispatched; + drm_r128_freelist_t *list_entry; } drm_r128_buf_priv_t; /* r128_drv.c */ -extern int r128_version(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int r128_open(struct inode *inode, struct file *filp); -extern int r128_release(struct inode *inode, struct file *filp); -extern int r128_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int r128_lock(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int r128_unlock(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); - - /* r128_dma.c */ -extern int r128_init_cce(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int r128_eng_reset(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int r128_eng_flush(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int r128_submit_pkt(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int r128_cce_idle(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int r128_vertex_buf(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); +extern int r128_version( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int r128_open( struct inode *inode, struct file *filp ); +extern int r128_release( struct inode *inode, struct file *filp ); +extern int r128_ioctl( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int r128_lock( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int r128_unlock( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); + + /* r128_cce.c */ +extern int r128_cce_init( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int r128_cce_start( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int r128_cce_stop( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int r128_cce_reset( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int r128_cce_idle( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int r128_engine_reset( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int r128_fullscreen( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int r128_cce_buffers( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); + +extern void r128_freelist_reset( drm_device_t *dev ); +extern drm_buf_t *r128_freelist_get( drm_device_t *dev ); + +extern int r128_wait_ring( drm_r128_private_t *dev_priv, int n ); +extern void r128_update_ring_snapshot( drm_r128_private_t *dev_priv ); + +extern int r128_do_cleanup_pageflip( drm_device_t *dev ); + + /* r128_state.c */ +extern int r128_cce_clear( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int r128_cce_swap( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int r128_cce_vertex( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int r128_cce_indices( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int r128_cce_blit( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int r128_cce_depth( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int r128_cce_stipple( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int r128_cce_indirect( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); /* r128_bufs.c */ extern int r128_addbufs(struct inode *inode, struct file *filp, @@ -124,77 +193,206 @@ extern int r128_context_switch_complete(drm_device_t *dev, int new); * for Rage 128 kernel driver. */ -#define R128_PC_NGUI_CTLSTAT 0x0184 -# define R128_PC_FLUSH_ALL 0x00ff -# define R128_PC_BUSY (1 << 31) - -#define R128_CLOCK_CNTL_INDEX 0x0008 -#define R128_CLOCK_CNTL_DATA 0x000c -# define R128_PLL_WR_EN (1 << 7) - -#define R128_MCLK_CNTL 0x000f -# define R128_FORCE_GCP (1 << 16) -# define R128_FORCE_PIPE3D_CP (1 << 17) -# define R128_FORCE_RCP (1 << 18) - -#define R128_GEN_RESET_CNTL 0x00f0 -# define R128_SOFT_RESET_GUI (1 << 0) - -#define R128_PM4_BUFFER_CNTL 0x0704 -# define R128_PM4_NONPM4 (0 << 28) -# define R128_PM4_192PIO (1 << 28) -# define R128_PM4_192BM (2 << 28) -# define R128_PM4_128PIO_64INDBM (3 << 28) -# define R128_PM4_128BM_64INDBM (4 << 28) -# define R128_PM4_64PIO_128INDBM (5 << 28) -# define R128_PM4_64BM_128INDBM (6 << 28) -# define R128_PM4_64PIO_64VCBM_64INDBM (7 << 28) -# define R128_PM4_64BM_64VCBM_64INDBM (8 << 28) -# define R128_PM4_64PIO_64VCPIO_64INDPIO (15 << 28) - - -#define R128_PM4_BUFFER_DL_RPTR 0x0710 -#define R128_PM4_BUFFER_DL_WPTR 0x0714 -# define R128_PM4_BUFFER_DL_DONE (1 << 31) - -#define R128_PM4_VC_FPU_SETUP 0x071c - -#define R128_PM4_STAT 0x07b8 -# define R128_PM4_FIFOCNT_MASK 0x0fff -# define R128_PM4_BUSY (1 << 16) -# define R128_PM4_GUI_ACTIVE (1 << 31) - -#define R128_PM4_BUFFER_ADDR 0x07f0 -#define R128_PM4_MICRO_CNTL 0x07fc -# define R128_PM4_MICRO_FREERUN (1 << 30) - -#define R128_PM4_FIFO_DATA_EVEN 0x1000 -#define R128_PM4_FIFO_DATA_ODD 0x1004 - -#define R128_GUI_SCRATCH_REG0 0x15e0 -#define R128_GUI_SCRATCH_REG1 0x15e4 -#define R128_GUI_SCRATCH_REG2 0x15e8 -#define R128_GUI_SCRATCH_REG3 0x15ec -#define R128_GUI_SCRATCH_REG4 0x15f0 -#define R128_GUI_SCRATCH_REG5 0x15f4 - -#define R128_GUI_STAT 0x1740 -# define R128_GUI_FIFOCNT_MASK 0x0fff -# define R128_GUI_ACTIVE (1 << 31) - +#define R128_AUX_SC_CNTL 0x1660 +# define R128_AUX1_SC_EN (1 << 0) +# define R128_AUX1_SC_MODE_OR (0 << 1) +# define R128_AUX1_SC_MODE_NAND (1 << 1) +# define R128_AUX2_SC_EN (1 << 2) +# define R128_AUX2_SC_MODE_OR (0 << 3) +# define R128_AUX2_SC_MODE_NAND (1 << 3) +# define R128_AUX3_SC_EN (1 << 4) +# define R128_AUX3_SC_MODE_OR (0 << 5) +# define R128_AUX3_SC_MODE_NAND (1 << 5) +#define R128_AUX1_SC_LEFT 0x1664 +#define R128_AUX1_SC_RIGHT 0x1668 +#define R128_AUX1_SC_TOP 0x166c +#define R128_AUX1_SC_BOTTOM 0x1670 +#define R128_AUX2_SC_LEFT 0x1674 +#define R128_AUX2_SC_RIGHT 0x1678 +#define R128_AUX2_SC_TOP 0x167c +#define R128_AUX2_SC_BOTTOM 0x1680 +#define R128_AUX3_SC_LEFT 0x1684 +#define R128_AUX3_SC_RIGHT 0x1688 +#define R128_AUX3_SC_TOP 0x168c +#define R128_AUX3_SC_BOTTOM 0x1690 + +#define R128_BRUSH_DATA0 0x1480 +#define R128_BUS_CNTL 0x0030 +# define R128_BUS_MASTER_DIS (1 << 6) + +#define R128_CLOCK_CNTL_INDEX 0x0008 +#define R128_CLOCK_CNTL_DATA 0x000c +# define R128_PLL_WR_EN (1 << 7) +#define R128_CONSTANT_COLOR_C 0x1d34 +#define R128_CRTC_OFFSET 0x0224 +#define R128_CRTC_OFFSET_CNTL 0x0228 +# define R128_CRTC_OFFSET_FLIP_CNTL (1 << 16) + +#define R128_DP_GUI_MASTER_CNTL 0x146c +# define R128_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0) +# define R128_GMC_DST_PITCH_OFFSET_CNTL (1 << 1) +# define R128_GMC_BRUSH_SOLID_COLOR (13 << 4) +# define R128_GMC_BRUSH_NONE (15 << 4) +# define R128_GMC_DST_16BPP (4 << 8) +# define R128_GMC_DST_24BPP (5 << 8) +# define R128_GMC_DST_32BPP (6 << 8) +# define R128_GMC_DST_DATATYPE_SHIFT 8 +# define R128_GMC_SRC_DATATYPE_COLOR (3 << 12) +# define R128_DP_SRC_SOURCE_MEMORY (2 << 24) +# define R128_DP_SRC_SOURCE_HOST_DATA (3 << 24) +# define R128_GMC_CLR_CMP_CNTL_DIS (1 << 28) +# define R128_GMC_AUX_CLIP_DIS (1 << 29) +# define R128_GMC_WR_MSK_DIS (1 << 30) +# define R128_ROP3_S 0x00cc0000 +# define R128_ROP3_P 0x00f00000 +#define R128_DP_WRITE_MASK 0x16cc +#define R128_DST_PITCH_OFFSET_C 0x1c80 +# define R128_DST_TILE (1 << 31) + +#define R128_GEN_RESET_CNTL 0x00f0 +# define R128_SOFT_RESET_GUI (1 << 0) + +#define R128_GUI_SCRATCH_REG0 0x15e0 +#define R128_GUI_SCRATCH_REG1 0x15e4 +#define R128_GUI_SCRATCH_REG2 0x15e8 +#define R128_GUI_SCRATCH_REG3 0x15ec +#define R128_GUI_SCRATCH_REG4 0x15f0 +#define R128_GUI_SCRATCH_REG5 0x15f4 + +#define R128_GUI_STAT 0x1740 +# define R128_GUI_FIFOCNT_MASK 0x0fff +# define R128_GUI_ACTIVE (1 << 31) + +#define R128_MCLK_CNTL 0x000f +# define R128_FORCE_GCP (1 << 16) +# define R128_FORCE_PIPE3D_CP (1 << 17) +# define R128_FORCE_RCP (1 << 18) + +#define R128_PC_GUI_CTLSTAT 0x1748 +#define R128_PC_NGUI_CTLSTAT 0x0184 +# define R128_PC_FLUSH_GUI (3 << 0) +# define R128_PC_RI_GUI (1 << 2) +# define R128_PC_FLUSH_ALL 0x00ff +# define R128_PC_BUSY (1 << 31) + +#define R128_PRIM_TEX_CNTL_C 0x1cb0 + +#define R128_SCALE_3D_CNTL 0x1a00 +#define R128_SEC_TEX_CNTL_C 0x1d00 +#define R128_SEC_TEXTURE_BORDER_COLOR_C 0x1d3c +#define R128_SETUP_CNTL 0x1bc4 +#define R128_STEN_REF_MASK_C 0x1d40 + +#define R128_TEX_CNTL_C 0x1c9c +# define R128_TEX_CACHE_FLUSH (1 << 23) + +#define R128_WINDOW_XY_OFFSET 0x1bcc + + +/* CCE registers + */ +#define R128_PM4_BUFFER_OFFSET 0x0700 +#define R128_PM4_BUFFER_CNTL 0x0704 +# define R128_PM4_MASK (15 << 28) +# define R128_PM4_NONPM4 (0 << 28) +# define R128_PM4_192PIO (1 << 28) +# define R128_PM4_192BM (2 << 28) +# define R128_PM4_128PIO_64INDBM (3 << 28) +# define R128_PM4_128BM_64INDBM (4 << 28) +# define R128_PM4_64PIO_128INDBM (5 << 28) +# define R128_PM4_64BM_128INDBM (6 << 28) +# define R128_PM4_64PIO_64VCBM_64INDBM (7 << 28) +# define R128_PM4_64BM_64VCBM_64INDBM (8 << 28) +# define R128_PM4_64PIO_64VCPIO_64INDPIO (15 << 28) + +#define R128_PM4_BUFFER_WM_CNTL 0x0708 +# define R128_WMA_SHIFT 0 +# define R128_WMB_SHIFT 8 +# define R128_WMC_SHIFT 16 +# define R128_WB_WM_SHIFT 24 + +#define R128_PM4_BUFFER_DL_RPTR_ADDR 0x070c +#define R128_PM4_BUFFER_DL_RPTR 0x0710 +#define R128_PM4_BUFFER_DL_WPTR 0x0714 +# define R128_PM4_BUFFER_DL_DONE (1 << 31) + +#define R128_PM4_VC_FPU_SETUP 0x071c + +#define R128_PM4_IW_INDOFF 0x0738 +#define R128_PM4_IW_INDSIZE 0x073c + +#define R128_PM4_STAT 0x07b8 +# define R128_PM4_FIFOCNT_MASK 0x0fff +# define R128_PM4_BUSY (1 << 16) +# define R128_PM4_GUI_ACTIVE (1 << 31) + +#define R128_PM4_MICROCODE_ADDR 0x07d4 +#define R128_PM4_MICROCODE_RADDR 0x07d8 +#define R128_PM4_MICROCODE_DATAH 0x07dc +#define R128_PM4_MICROCODE_DATAL 0x07e0 + +#define R128_PM4_BUFFER_ADDR 0x07f0 +#define R128_PM4_MICRO_CNTL 0x07fc +# define R128_PM4_MICRO_FREERUN (1 << 30) + +#define R128_PM4_FIFO_DATA_EVEN 0x1000 +#define R128_PM4_FIFO_DATA_ODD 0x1004 + + +/* CCE command packets + */ +#define R128_CCE_PACKET0 0x00000000 +#define R128_CCE_PACKET1 0x40000000 +#define R128_CCE_PACKET2 0x80000000 +#define R128_CCE_PACKET3 0xC0000000 +# define R128_CNTL_HOSTDATA_BLT 0x00009400 +# define R128_CNTL_PAINT_MULTI 0x00009A00 +# define R128_CNTL_BITBLT_MULTI 0x00009B00 +# define R128_3D_RNDR_GEN_INDX_PRIM 0x00002300 + +#define R128_CCE_PACKET_MASK 0xC0000000 +#define R128_CCE_PACKET_COUNT_MASK 0x3fff0000 +#define R128_CCE_PACKET0_REG_MASK 0x000007ff +#define R128_CCE_PACKET1_REG0_MASK 0x000007ff +#define R128_CCE_PACKET1_REG1_MASK 0x003ff800 + +#define R128_CCE_VC_CNTL_PRIM_TYPE_NONE 0x00000000 +#define R128_CCE_VC_CNTL_PRIM_TYPE_POINT 0x00000001 +#define R128_CCE_VC_CNTL_PRIM_TYPE_LINE 0x00000002 +#define R128_CCE_VC_CNTL_PRIM_TYPE_POLY_LINE 0x00000003 +#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_LIST 0x00000004 +#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_FAN 0x00000005 +#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_STRIP 0x00000006 +#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2 0x00000007 +#define R128_CCE_VC_CNTL_PRIM_WALK_IND 0x00000010 +#define R128_CCE_VC_CNTL_PRIM_WALK_LIST 0x00000020 +#define R128_CCE_VC_CNTL_PRIM_WALK_RING 0x00000030 +#define R128_CCE_VC_CNTL_NUM_SHIFT 16 + +#define R128_DATATYPE_CI8 2 +#define R128_DATATYPE_ARGB1555 3 +#define R128_DATATYPE_RGB565 4 +#define R128_DATATYPE_RGB888 5 +#define R128_DATATYPE_ARGB8888 6 +#define R128_DATATYPE_RGB332 7 +#define R128_DATATYPE_RGB8 9 +#define R128_DATATYPE_ARGB4444 15 + +/* Constants */ +#define R128_AGP_OFFSET 0x02000000 + +#define R128_WATERMARK_L 16 +#define R128_WATERMARK_M 8 +#define R128_WATERMARK_N 8 +#define R128_WATERMARK_K 128 -/* CCE command packets */ -#define R128_CCE_PACKET0 0x00000000 -#define R128_CCE_PACKET1 0x40000000 -#define R128_CCE_PACKET2 0x80000000 -# define R128_CCE_PACKET_MASK 0xC0000000 -# define R128_CCE_PACKET_COUNT_MASK 0x3fff0000 -# define R128_CCE_PACKET0_REG_MASK 0x000007ff -# define R128_CCE_PACKET1_REG0_MASK 0x000007ff -# define R128_CCE_PACKET1_REG1_MASK 0x003ff800 +#define R128_MAX_USEC_TIMEOUT 100000 /* 100 ms */ +#define R128_LAST_FRAME_REG R128_GUI_SCRATCH_REG0 +#define R128_LAST_DISPATCH_REG R128_GUI_SCRATCH_REG1 +#define R128_MAX_VB_AGE 0xffffffff -#define R128_MAX_USEC_TIMEOUT 100000 /* 100 ms */ +#define R128_MAX_VB_VERTS (0xffff) #define R128_BASE(reg) ((u32)(dev_priv->mmio->handle)) @@ -221,4 +419,58 @@ extern int R128_READ_PLL(drm_device_t *dev, int addr); #define R128CCE2(p) ((p)) #define R128CCE3(p,n) ((p) | ((n) << 16)) -#endif + + + +#define CCE_PACKET0( reg, n ) (R128_CCE_PACKET0 | \ + ((n) << 16) | ((reg) >> 2)) +#define CCE_PACKET1( reg0, reg1 ) (R128_CCE_PACKET1 | \ + (((reg1) >> 2) << 11) | ((reg0) >> 2)) +#define CCE_PACKET2() (R128_CCE_PACKET2) +#define CCE_PACKET3( pkt, n ) (R128_CCE_PACKET3 | \ + (pkt) | ((n) << 16)) + + +#define r128_flush_write_combine() mb() + + +#define R128_VERBOSE 0 + +#define RING_LOCALS int write; unsigned int tail_mask; volatile u32 *ring; + +#define BEGIN_RING( n ) do { \ + if ( R128_VERBOSE ) { \ + DRM_INFO( "BEGIN_RING( %d ) in %s\n", \ + (n), __FUNCTION__ ); \ + } \ + if ( dev_priv->ring.space < (n) * sizeof(u32) ) { \ + r128_wait_ring( dev_priv, (n) * sizeof(u32) ); \ + } \ + dev_priv->ring.space -= (n) * sizeof(u32); \ + ring = dev_priv->ring.start; \ + write = dev_priv->ring.tail; \ + tail_mask = dev_priv->ring.tail_mask; \ +} while (0) + +#define ADVANCE_RING() do { \ + if ( R128_VERBOSE ) { \ + DRM_INFO( "ADVANCE_RING() tail=0x%06x wr=0x%06x\n", \ + write, dev_priv->ring.tail ); \ + } \ + r128_flush_write_combine(); \ + dev_priv->ring.tail = write; \ + R128_WRITE( R128_PM4_BUFFER_DL_WPTR, write ); \ +} while (0) + +#define OUT_RING( x ) do { \ + if ( R128_VERBOSE ) { \ + DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \ + (unsigned int)(x), write ); \ + } \ + ring[write++] = (x); \ + write &= tail_mask; \ +} while (0) + +#define R128_PERFORMANCE_BOXES 0 + +#endif /* __R128_DRV_H__ */ diff --git a/linux/r128_state.c b/linux/r128_state.c new file mode 100644 index 000000000..ba0039293 --- /dev/null +++ b/linux/r128_state.c @@ -0,0 +1,1676 @@ +/* r128_state.c -- State support for r128 -*- linux-c -*- + * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com + * + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Gareth Hughes <gareth@valinux.com> + * + */ + +#define __NO_VERSION__ +#include "drmP.h" +#include "r128_drv.h" +#include "drm.h" + + +/* ================================================================ + * CCE hardware state programming functions + */ + +static void r128_emit_clip_rects( drm_r128_private_t *dev_priv, + drm_clip_rect_t *boxes, int count ) +{ + u32 aux_sc_cntl = 0x00000000; + RING_LOCALS; + DRM_DEBUG( " %s\n", __FUNCTION__ ); + + BEGIN_RING( 17 ); + + if ( count >= 1 ) { + OUT_RING( CCE_PACKET0( R128_AUX1_SC_LEFT, 3 ) ); + OUT_RING( boxes[0].x1 ); + OUT_RING( boxes[0].x2 - 1 ); + OUT_RING( boxes[0].y1 ); + OUT_RING( boxes[0].y2 - 1 ); + + aux_sc_cntl |= (R128_AUX1_SC_EN | R128_AUX1_SC_MODE_OR); + } + if ( count >= 2 ) { + OUT_RING( CCE_PACKET0( R128_AUX2_SC_LEFT, 3 ) ); + OUT_RING( boxes[1].x1 ); + OUT_RING( boxes[1].x2 - 1 ); + OUT_RING( boxes[1].y1 ); + OUT_RING( boxes[1].y2 - 1 ); + + aux_sc_cntl |= (R128_AUX2_SC_EN | R128_AUX2_SC_MODE_OR); + } + if ( count >= 3 ) { + OUT_RING( CCE_PACKET0( R128_AUX3_SC_LEFT, 3 ) ); + OUT_RING( boxes[2].x1 ); + OUT_RING( boxes[2].x2 - 1 ); + OUT_RING( boxes[2].y1 ); + OUT_RING( boxes[2].y2 - 1 ); + + aux_sc_cntl |= (R128_AUX3_SC_EN | R128_AUX3_SC_MODE_OR); + } + + OUT_RING( CCE_PACKET0( R128_AUX_SC_CNTL, 0 ) ); + OUT_RING( aux_sc_cntl ); + + ADVANCE_RING(); +} + +static inline void r128_emit_core( drm_r128_private_t *dev_priv ) +{ + drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_r128_context_regs_t *ctx = &sarea_priv->context_state; + RING_LOCALS; + DRM_DEBUG( " %s\n", __FUNCTION__ ); + + BEGIN_RING( 2 ); + + OUT_RING( CCE_PACKET0( R128_SCALE_3D_CNTL, 0 ) ); + OUT_RING( ctx->scale_3d_cntl ); + + ADVANCE_RING(); +} + +static inline void r128_emit_context( drm_r128_private_t *dev_priv ) +{ + drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_r128_context_regs_t *ctx = &sarea_priv->context_state; + RING_LOCALS; + DRM_DEBUG( " %s\n", __FUNCTION__ ); + + BEGIN_RING( 13 ); + + OUT_RING( CCE_PACKET0( R128_DST_PITCH_OFFSET_C, 11 ) ); + OUT_RING( ctx->dst_pitch_offset_c ); + OUT_RING( ctx->dp_gui_master_cntl_c ); + OUT_RING( ctx->sc_top_left_c ); + OUT_RING( ctx->sc_bottom_right_c ); + OUT_RING( ctx->z_offset_c ); + OUT_RING( ctx->z_pitch_c ); + OUT_RING( ctx->z_sten_cntl_c ); + OUT_RING( ctx->tex_cntl_c ); + OUT_RING( ctx->misc_3d_state_cntl_reg ); + OUT_RING( ctx->texture_clr_cmp_clr_c ); + OUT_RING( ctx->texture_clr_cmp_msk_c ); + OUT_RING( ctx->fog_color_c ); + + ADVANCE_RING(); +} + +static inline void r128_emit_setup( drm_r128_private_t *dev_priv ) +{ + drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_r128_context_regs_t *ctx = &sarea_priv->context_state; + RING_LOCALS; + DRM_DEBUG( " %s\n", __FUNCTION__ ); + + BEGIN_RING( 3 ); + + OUT_RING( CCE_PACKET1( R128_SETUP_CNTL, R128_PM4_VC_FPU_SETUP ) ); + OUT_RING( ctx->setup_cntl ); + OUT_RING( ctx->pm4_vc_fpu_setup ); + + ADVANCE_RING(); +} + +static inline void r128_emit_masks( drm_r128_private_t *dev_priv ) +{ + drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_r128_context_regs_t *ctx = &sarea_priv->context_state; + RING_LOCALS; + DRM_DEBUG( " %s\n", __FUNCTION__ ); + + BEGIN_RING( 5 ); + + OUT_RING( CCE_PACKET0( R128_DP_WRITE_MASK, 0 ) ); + OUT_RING( ctx->dp_write_mask ); + + OUT_RING( CCE_PACKET0( R128_STEN_REF_MASK_C, 1 ) ); + OUT_RING( ctx->sten_ref_mask_c ); + OUT_RING( ctx->plane_3d_mask_c ); + + ADVANCE_RING(); +} + +static inline void r128_emit_window( drm_r128_private_t *dev_priv ) +{ + drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_r128_context_regs_t *ctx = &sarea_priv->context_state; + RING_LOCALS; + DRM_DEBUG( " %s\n", __FUNCTION__ ); + + BEGIN_RING( 2 ); + + OUT_RING( CCE_PACKET0( R128_WINDOW_XY_OFFSET, 0 ) ); + OUT_RING( ctx->window_xy_offset ); + + ADVANCE_RING(); +} + +static inline void r128_emit_tex0( drm_r128_private_t *dev_priv ) +{ + drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_r128_context_regs_t *ctx = &sarea_priv->context_state; + drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0]; + int i; + RING_LOCALS; + DRM_DEBUG( " %s\n", __FUNCTION__ ); + + BEGIN_RING( 7 + R128_MAX_TEXTURE_LEVELS ); + + OUT_RING( CCE_PACKET0( R128_PRIM_TEX_CNTL_C, + 2 + R128_MAX_TEXTURE_LEVELS ) ); + OUT_RING( tex->tex_cntl ); + OUT_RING( tex->tex_combine_cntl ); + OUT_RING( ctx->tex_size_pitch_c ); + for ( i = 0 ; i < R128_MAX_TEXTURE_LEVELS ; i++ ) { + OUT_RING( tex->tex_offset[i] ); + } + + OUT_RING( CCE_PACKET0( R128_CONSTANT_COLOR_C, 1 ) ); + OUT_RING( ctx->constant_color_c ); + OUT_RING( tex->tex_border_color ); + + ADVANCE_RING(); +} + +static inline void r128_emit_tex1( drm_r128_private_t *dev_priv ) +{ + drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1]; + int i; + RING_LOCALS; + DRM_DEBUG( " %s\n", __FUNCTION__ ); + + BEGIN_RING( 5 + R128_MAX_TEXTURE_LEVELS ); + + OUT_RING( CCE_PACKET0( R128_SEC_TEX_CNTL_C, + 1 + R128_MAX_TEXTURE_LEVELS ) ); + OUT_RING( tex->tex_cntl ); + OUT_RING( tex->tex_combine_cntl ); + for ( i = 0 ; i < R128_MAX_TEXTURE_LEVELS ; i++ ) { + OUT_RING( tex->tex_offset[i] ); + } + + OUT_RING( CCE_PACKET0( R128_SEC_TEXTURE_BORDER_COLOR_C, 0 ) ); + OUT_RING( tex->tex_border_color ); + + ADVANCE_RING(); +} + +static inline void r128_emit_state( drm_r128_private_t *dev_priv ) +{ + drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; + unsigned int dirty = sarea_priv->dirty; + + DRM_DEBUG( "%s: dirty=0x%08x\n", __FUNCTION__, dirty ); + + if ( dirty & R128_UPLOAD_CORE ) { + r128_emit_core( dev_priv ); + sarea_priv->dirty &= ~R128_UPLOAD_CORE; + } + + if ( dirty & R128_UPLOAD_CONTEXT ) { + r128_emit_context( dev_priv ); + sarea_priv->dirty &= ~R128_UPLOAD_CONTEXT; + } + + if ( dirty & R128_UPLOAD_SETUP ) { + r128_emit_setup( dev_priv ); + sarea_priv->dirty &= ~R128_UPLOAD_SETUP; + } + + if ( dirty & R128_UPLOAD_MASKS ) { + r128_emit_masks( dev_priv ); + sarea_priv->dirty &= ~R128_UPLOAD_MASKS; + } + + if ( dirty & R128_UPLOAD_WINDOW ) { + r128_emit_window( dev_priv ); + sarea_priv->dirty &= ~R128_UPLOAD_WINDOW; + } + + if ( dirty & R128_UPLOAD_TEX0 ) { + r128_emit_tex0( dev_priv ); + sarea_priv->dirty &= ~R128_UPLOAD_TEX0; + } + + if ( dirty & R128_UPLOAD_TEX1 ) { + r128_emit_tex1( dev_priv ); + sarea_priv->dirty &= ~R128_UPLOAD_TEX1; + } + + /* Turn off the texture cache flushing */ + sarea_priv->context_state.tex_cntl_c &= ~R128_TEX_CACHE_FLUSH; + + sarea_priv->dirty &= ~R128_REQUIRE_QUIESCENCE; +} + + +#if R128_PERFORMANCE_BOXES +/* ================================================================ + * Performance monitoring functions + */ + +static void r128_clear_box( drm_r128_private_t *dev_priv, + int x, int y, int w, int h, + int r, int g, int b ) +{ + u32 pitch, offset; + u32 fb_bpp, color; + RING_LOCALS; + + switch ( dev_priv->fb_bpp ) { + case 16: + fb_bpp = R128_GMC_DST_16BPP; + color = (((r & 0xf8) << 8) | + ((g & 0xfc) << 3) | + ((b & 0xf8) >> 3)); + break; + case 24: + fb_bpp = R128_GMC_DST_24BPP; + color = ((r << 16) | (g << 8) | b); + break; + case 32: + fb_bpp = R128_GMC_DST_32BPP; + color = (((0xff) << 24) | (r << 16) | (g << 8) | b); + break; + default: + return; + } + + offset = dev_priv->back_offset; + pitch = dev_priv->back_pitch >> 3; + + BEGIN_RING( 6 ); + + OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) ); + OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL | + R128_GMC_BRUSH_SOLID_COLOR | + fb_bpp | + R128_GMC_SRC_DATATYPE_COLOR | + R128_ROP3_P | + R128_GMC_CLR_CMP_CNTL_DIS | + R128_GMC_AUX_CLIP_DIS ); + + OUT_RING( (pitch << 21) | (offset >> 5) ); + OUT_RING( color ); + + OUT_RING( (x << 16) | y ); + OUT_RING( (w << 16) | h ); + + ADVANCE_RING(); +} + +static void r128_cce_performance_boxes( drm_r128_private_t *dev_priv ) +{ + if ( atomic_read( &dev_priv->idle_count ) == 0 ) { + r128_clear_box( dev_priv, 64, 4, 8, 8, 0, 255, 0 ); + } else { + atomic_set( &dev_priv->idle_count, 0 ); + } +} + +#endif + + +/* ================================================================ + * CCE command dispatch functions + */ + +static void r128_print_dirty( const char *msg, unsigned int flags ) +{ + DRM_INFO( "%s: (0x%x) %s%s%s%s%s%s%s%s%s\n", + msg, + flags, + (flags & R128_UPLOAD_CORE) ? "core, " : "", + (flags & R128_UPLOAD_CONTEXT) ? "context, " : "", + (flags & R128_UPLOAD_SETUP) ? "setup, " : "", + (flags & R128_UPLOAD_TEX0) ? "tex0, " : "", + (flags & R128_UPLOAD_TEX1) ? "tex1, " : "", + (flags & R128_UPLOAD_MASKS) ? "masks, " : "", + (flags & R128_UPLOAD_WINDOW) ? "window, " : "", + (flags & R128_UPLOAD_CLIPRECTS) ? "cliprects, " : "", + (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "" ); +} + +static void r128_cce_dispatch_clear( drm_device_t *dev, + unsigned int flags, + int cx, int cy, int cw, int ch, + unsigned int clear_color, + unsigned int clear_depth ) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; + int nbox = sarea_priv->nbox; + drm_clip_rect_t *pbox = sarea_priv->boxes; + u32 fb_bpp, depth_bpp; + int i; + RING_LOCALS; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + r128_update_ring_snapshot( dev_priv ); + + switch ( dev_priv->fb_bpp ) { + case 16: + fb_bpp = R128_GMC_DST_16BPP; + break; + case 32: + fb_bpp = R128_GMC_DST_32BPP; + break; + default: + return; + } + switch ( dev_priv->depth_bpp ) { + case 16: + depth_bpp = R128_GMC_DST_16BPP; + break; + case 24: + case 32: + depth_bpp = R128_GMC_DST_32BPP; + break; + default: + return; + } + + if ( dev_priv->page_flipping && dev_priv->current_page == 1) { + unsigned int tmp = flags; + + flags &= ~(R128_FRONT | R128_BACK); + if ( tmp & R128_FRONT ) flags |= R128_BACK; + if ( tmp & R128_BACK ) flags |= R128_FRONT; + } + + for ( i = 0 ; i < nbox ; i++ ) { + int x = pbox[i].x1; + int y = pbox[i].y1; + int w = pbox[i].x2 - x; + int h = pbox[i].y2 - y; + + DRM_DEBUG( "dispatch clear %d,%d-%d,%d flags 0x%x\n", + pbox[i].x1, pbox[i].y1, pbox[i].x2, + pbox[i].y2, flags ); + + if ( flags & (R128_FRONT | R128_BACK) ) { + BEGIN_RING( 2 ); + + OUT_RING( CCE_PACKET0( R128_DP_WRITE_MASK, 0 ) ); + OUT_RING( sarea_priv->context_state.plane_3d_mask_c ); + + ADVANCE_RING(); + } + + if ( flags & R128_FRONT ) { + BEGIN_RING( 6 ); + + OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) ); + OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL | + R128_GMC_BRUSH_SOLID_COLOR | + fb_bpp | + R128_GMC_SRC_DATATYPE_COLOR | + R128_ROP3_P | + R128_GMC_CLR_CMP_CNTL_DIS | + R128_GMC_AUX_CLIP_DIS ); + + OUT_RING( dev_priv->front_pitch_offset_c ); + OUT_RING( clear_color ); + + OUT_RING( (x << 16) | y ); + OUT_RING( (w << 16) | h ); + + ADVANCE_RING(); + } + + if ( flags & R128_BACK ) { + BEGIN_RING( 6 ); + + OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) ); + OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL | + R128_GMC_BRUSH_SOLID_COLOR | + fb_bpp | + R128_GMC_SRC_DATATYPE_COLOR | + R128_ROP3_P | + R128_GMC_CLR_CMP_CNTL_DIS | + R128_GMC_AUX_CLIP_DIS ); + + OUT_RING( dev_priv->back_pitch_offset_c ); + OUT_RING( clear_color ); + + OUT_RING( (x << 16) | y ); + OUT_RING( (w << 16) | h ); + + ADVANCE_RING(); + } + + if ( flags & R128_DEPTH ) { + BEGIN_RING( 6 ); + + OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) ); + OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL | + R128_GMC_BRUSH_SOLID_COLOR | + depth_bpp | + R128_GMC_SRC_DATATYPE_COLOR | + R128_ROP3_P | + R128_GMC_CLR_CMP_CNTL_DIS | + R128_GMC_AUX_CLIP_DIS | + R128_GMC_WR_MSK_DIS ); + + OUT_RING( dev_priv->depth_pitch_offset_c ); + OUT_RING( clear_depth ); + + OUT_RING( (x << 16) | y ); + OUT_RING( (w << 16) | h ); + + ADVANCE_RING(); + } + } +} + +static void r128_cce_dispatch_swap( drm_device_t *dev ) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; + int nbox = sarea_priv->nbox; + drm_clip_rect_t *pbox = sarea_priv->boxes; + u32 fb_bpp; + int i; + RING_LOCALS; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + r128_update_ring_snapshot( dev_priv ); + +#if R128_PERFORMANCE_BOXES + /* Do some trivial performance monitoring... + */ + r128_cce_performance_boxes( dev_priv ); +#endif + + switch ( dev_priv->fb_bpp ) { + case 16: + fb_bpp = R128_GMC_DST_16BPP; + break; + case 32: + default: + fb_bpp = R128_GMC_DST_32BPP; + break; + } + + for ( i = 0 ; i < nbox ; i++ ) { + int x = pbox[i].x1; + int y = pbox[i].y1; + int w = pbox[i].x2 - x; + int h = pbox[i].y2 - y; + + BEGIN_RING( 7 ); + + OUT_RING( CCE_PACKET3( R128_CNTL_BITBLT_MULTI, 5 ) ); + OUT_RING( R128_GMC_SRC_PITCH_OFFSET_CNTL | + R128_GMC_DST_PITCH_OFFSET_CNTL | + R128_GMC_BRUSH_NONE | + fb_bpp | + R128_GMC_SRC_DATATYPE_COLOR | + R128_ROP3_S | + R128_DP_SRC_SOURCE_MEMORY | + R128_GMC_CLR_CMP_CNTL_DIS | + R128_GMC_AUX_CLIP_DIS | + R128_GMC_WR_MSK_DIS ); + + OUT_RING( dev_priv->back_pitch_offset_c ); + OUT_RING( dev_priv->front_pitch_offset_c ); + + OUT_RING( (x << 16) | y ); + OUT_RING( (x << 16) | y ); + OUT_RING( (w << 16) | h ); + + ADVANCE_RING(); + } + + /* Increment the frame counter. The client-side 3D driver must + * throttle the framerate by waiting for this value before + * performing the swapbuffer ioctl. + */ + dev_priv->sarea_priv->last_frame++; + + BEGIN_RING( 2 ); + + OUT_RING( CCE_PACKET0( R128_LAST_FRAME_REG, 0 ) ); + OUT_RING( dev_priv->sarea_priv->last_frame ); + + ADVANCE_RING(); +} + +static void r128_cce_dispatch_flip( drm_device_t *dev ) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + DRM_DEBUG( "%s: page=%d\n", __FUNCTION__, dev_priv->current_page ); + + r128_update_ring_snapshot( dev_priv ); + +#if R128_PERFORMANCE_BOXES + /* Do some trivial performance monitoring... + */ + r128_cce_performance_boxes( dev_priv ); +#endif + + BEGIN_RING( 2 ); + + OUT_RING( CCE_PACKET0( R128_CRTC_OFFSET, 0 ) ); + + if ( dev_priv->current_page == 0 ) { + OUT_RING( dev_priv->back_offset ); + dev_priv->current_page = 1; + } else { + OUT_RING( dev_priv->front_offset ); + dev_priv->current_page = 0; + } + + ADVANCE_RING(); + + /* Increment the frame counter. The client-side 3D driver must + * throttle the framerate by waiting for this value before + * performing the swapbuffer ioctl. + */ + dev_priv->sarea_priv->last_frame++; + + BEGIN_RING( 2 ); + + OUT_RING( CCE_PACKET0( R128_LAST_FRAME_REG, 0 ) ); + OUT_RING( dev_priv->sarea_priv->last_frame ); + + ADVANCE_RING(); +} + +static void r128_cce_dispatch_vertex( drm_device_t *dev, + drm_buf_t *buf ) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + drm_r128_buf_priv_t *buf_priv = buf->dev_private; + drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; + int format = sarea_priv->vc_format; + int offset = dev_priv->buffers->offset + buf->offset - dev->agp->base; + int size = buf->used; + int prim = buf_priv->prim; + int i = 0; + RING_LOCALS; + DRM_DEBUG( "%s: buf=%d nbox=%d\n", + __FUNCTION__, buf->idx, sarea_priv->nbox ); + + r128_update_ring_snapshot( dev_priv ); + + if ( 0 ) + r128_print_dirty( "dispatch_vertex", sarea_priv->dirty ); + + if ( buf->used ) { + buf_priv->dispatched = 1; + + if ( sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS ) { + r128_emit_state( dev_priv ); + } + + do { + /* Emit the next set of up to three cliprects */ + if ( i < sarea_priv->nbox ) { + r128_emit_clip_rects( dev_priv, + &sarea_priv->boxes[i], + sarea_priv->nbox - i ); + } + + /* Emit the vertex buffer rendering commands */ + BEGIN_RING( 5 ); + + OUT_RING( CCE_PACKET3( R128_3D_RNDR_GEN_INDX_PRIM, 3 ) ); + OUT_RING( offset ); + OUT_RING( size ); + OUT_RING( format ); + OUT_RING( prim | R128_CCE_VC_CNTL_PRIM_WALK_LIST | + (size << R128_CCE_VC_CNTL_NUM_SHIFT) ); + + ADVANCE_RING(); + + i += 3; + } while ( i < sarea_priv->nbox ); + } + + if ( buf_priv->discard ) { + buf_priv->age = dev_priv->sarea_priv->last_dispatch; + + /* Emit the vertex buffer age */ + BEGIN_RING( 2 ); + + OUT_RING( CCE_PACKET0( R128_LAST_DISPATCH_REG, 0 ) ); + OUT_RING( buf_priv->age ); + + ADVANCE_RING(); + + buf->pending = 1; + buf->used = 0; + /* FIXME: Check dispatched field */ + buf_priv->dispatched = 0; + } + + dev_priv->sarea_priv->last_dispatch++; + +#if 0 + if ( dev_priv->submit_age == R128_MAX_VB_AGE ) { + ret = r128_do_cce_idle( dev_priv ); + if ( ret < 0 ) return ret; + dev_priv->submit_age = 0; + r128_freelist_reset( dev ); + } +#endif + + sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS; + sarea_priv->nbox = 0; +} + + + + +static void r128_cce_dispatch_indirect( drm_device_t *dev, + drm_buf_t *buf, + int start, int end ) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + drm_r128_buf_priv_t *buf_priv = buf->dev_private; + RING_LOCALS; + DRM_DEBUG( "indirect: buf=%d s=0x%x e=0x%x\n", + buf->idx, start, end ); + + r128_update_ring_snapshot( dev_priv ); + + if ( start != end ) { + int offset = (dev_priv->buffers->offset - dev->agp->base + + buf->offset + start); + int dwords = (end - start + 3) / sizeof(u32); + + /* Indirect buffer data must be an even number of + * dwords, so if we've been given an odd number we must + * pad the data with a Type-2 CCE packet. + */ + if ( dwords & 1 ) { + u32 *data = (u32 *) + ((char *)dev_priv->buffers->handle + + buf->offset + start); + data[dwords++] = R128_CCE_PACKET2; + } + + buf_priv->dispatched = 1; + + /* Fire off the indirect buffer */ + BEGIN_RING( 3 ); + + OUT_RING( CCE_PACKET0( R128_PM4_IW_INDOFF, 1 ) ); + OUT_RING( offset ); + OUT_RING( dwords ); + + ADVANCE_RING(); + } + + if ( buf_priv->discard ) { + buf_priv->age = dev_priv->sarea_priv->last_dispatch; + + /* Emit the indirect buffer age */ + BEGIN_RING( 2 ); + + OUT_RING( CCE_PACKET0( R128_LAST_DISPATCH_REG, 0 ) ); + OUT_RING( buf_priv->age ); + + ADVANCE_RING(); + + buf->pending = 1; + buf->used = 0; + /* FIXME: Check dispatched field */ + buf_priv->dispatched = 0; + } + + dev_priv->sarea_priv->last_dispatch++; + +#if 0 + if ( dev_priv->submit_age == R128_MAX_VB_AGE ) { + ret = r128_do_cce_idle( dev_priv ); + if ( ret < 0 ) return ret; + dev_priv->submit_age = 0; + r128_freelist_reset( dev ); + } +#endif +} + +static void r128_cce_dispatch_indices( drm_device_t *dev, + drm_buf_t *buf, + int start, int end, + int count ) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + drm_r128_buf_priv_t *buf_priv = buf->dev_private; + drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; + int format = sarea_priv->vc_format; + int offset = dev_priv->buffers->offset - dev->agp->base; + int prim = buf_priv->prim; + u32 *data; + int dwords; + int i = 0; + RING_LOCALS; + DRM_DEBUG( "indices: s=%d e=%d c=%d\n", start, end, count ); + + r128_update_ring_snapshot( dev_priv ); + + if ( 0 ) + r128_print_dirty( "dispatch_indices", sarea_priv->dirty ); + + if ( start != end ) { + buf_priv->dispatched = 1; + + if ( sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS ) { + r128_emit_state( dev_priv ); + } + + dwords = (end - start + 3) / sizeof(u32); + + data = (u32 *)((char *)dev_priv->buffers->handle + + buf->offset + start); + + data[0] = CCE_PACKET3( R128_3D_RNDR_GEN_INDX_PRIM, dwords-2 ); + + data[1] = offset; + data[2] = R128_MAX_VB_VERTS; + data[3] = format; + data[4] = (prim | R128_CCE_VC_CNTL_PRIM_WALK_IND | + (count << 16)); + + if ( count & 0x1 ) { + data[dwords-1] &= 0x0000ffff; + } + + do { + /* Emit the next set of up to three cliprects */ + if ( i < sarea_priv->nbox ) { + r128_emit_clip_rects( dev_priv, + &sarea_priv->boxes[i], + sarea_priv->nbox - i ); + } + + r128_cce_dispatch_indirect( dev, buf, start, end ); + + i += 3; + } while ( i < sarea_priv->nbox ); + } + + if ( buf_priv->discard ) { + buf_priv->age = dev_priv->sarea_priv->last_dispatch; + + /* Emit the vertex buffer age */ + BEGIN_RING( 2 ); + + OUT_RING( CCE_PACKET0( R128_LAST_DISPATCH_REG, 0 ) ); + OUT_RING( buf_priv->age ); + + ADVANCE_RING(); + + buf->pending = 1; + /* FIXME: Check dispatched field */ + buf_priv->dispatched = 0; + } + + dev_priv->sarea_priv->last_dispatch++; + +#if 0 + if ( dev_priv->submit_age == R128_MAX_VB_AGE ) { + ret = r128_do_cce_idle( dev_priv ); + if ( ret < 0 ) return ret; + dev_priv->submit_age = 0; + r128_freelist_reset( dev ); + } +#endif + + sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS; + sarea_priv->nbox = 0; +} + +static int r128_cce_dispatch_blit( drm_device_t *dev, + drm_r128_blit_t *blit ) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + drm_device_dma_t *dma = dev->dma; + drm_buf_t *buf; + drm_r128_buf_priv_t *buf_priv; + u32 *data; + int dword_shift, dwords; + RING_LOCALS; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + r128_update_ring_snapshot( dev_priv ); + + /* The compiler won't optimize away a division by a variable, + * even if the only legal values are powers of two. Thus, we'll + * use a shift instead. + */ + switch ( blit->format ) { + case R128_DATATYPE_ARGB8888: + dword_shift = 0; + break; + case R128_DATATYPE_ARGB1555: + case R128_DATATYPE_RGB565: + case R128_DATATYPE_ARGB4444: + dword_shift = 1; + break; + case R128_DATATYPE_CI8: + case R128_DATATYPE_RGB8: + dword_shift = 2; + break; + default: + DRM_ERROR( "invalid blit format %d\n", blit->format ); + return -EINVAL; + } + + /* Flush the pixel cache, and mark the contents as Read Invalid. + * This ensures no pixel data gets mixed up with the texture + * data from the host data blit, otherwise part of the texture + * image may be corrupted. + */ + BEGIN_RING( 2 ); + + OUT_RING( CCE_PACKET0( R128_PC_GUI_CTLSTAT, 0 ) ); + OUT_RING( R128_PC_RI_GUI | R128_PC_FLUSH_GUI ); + + ADVANCE_RING(); + + /* Dispatch the indirect buffer. + */ + buf = dma->buflist[blit->idx]; + buf_priv = buf->dev_private; + + if ( buf->pid != current->pid ) { + DRM_ERROR( "process %d using buffer owned by %d\n", + current->pid, buf->pid ); + return -EINVAL; + } + if ( buf->pending ) { + DRM_ERROR( "sending pending buffer %d\n", blit->idx ); + return -EINVAL; + } + + buf_priv->discard = 1; + + dwords = (blit->width * blit->height) >> dword_shift; + + data = (u32 *)((char *)dev_priv->buffers->handle + buf->offset); + + data[0] = CCE_PACKET3( R128_CNTL_HOSTDATA_BLT, dwords + 6 ); + data[1] = (R128_GMC_DST_PITCH_OFFSET_CNTL | + R128_GMC_BRUSH_NONE | + (blit->format << 8) | + R128_GMC_SRC_DATATYPE_COLOR | + R128_ROP3_S | + R128_DP_SRC_SOURCE_HOST_DATA | + R128_GMC_CLR_CMP_CNTL_DIS | + R128_GMC_AUX_CLIP_DIS | + R128_GMC_WR_MSK_DIS); + + data[2] = (blit->pitch << 21) | (blit->offset >> 5); + data[3] = 0xffffffff; + data[4] = 0xffffffff; + data[5] = (blit->y << 16) | blit->x; + data[6] = (blit->height << 16) | blit->width; + data[7] = dwords; + + buf->used = (dwords + 8) * sizeof(u32); + + r128_cce_dispatch_indirect( dev, buf, 0, buf->used ); + + /* Flush the pixel cache after the blit completes. This ensures + * the texture data is written out to memory before rendering + * continues. + */ + BEGIN_RING( 2 ); + + OUT_RING( CCE_PACKET0( R128_PC_GUI_CTLSTAT, 0 ) ); + OUT_RING( R128_PC_FLUSH_GUI ); + + ADVANCE_RING(); + + return 0; +} + + +/* ================================================================ + * Tiled depth buffer management + * + * FIXME: These should all set the destination write mask for when we + * have hardware stencil support. + */ + +static int r128_cce_dispatch_write_span( drm_device_t *dev, + drm_r128_depth_t *depth ) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + int count, x, y; + u32 *buffer; + u8 *mask; + u32 depth_bpp; + int i; + RING_LOCALS; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + r128_update_ring_snapshot( dev_priv ); + + switch ( dev_priv->depth_bpp ) { + case 16: + depth_bpp = R128_GMC_DST_16BPP; + break; + case 24: + case 32: + depth_bpp = R128_GMC_DST_32BPP; + break; + default: + return -EINVAL; + } + + count = depth->n; + if ( copy_from_user( &x, depth->x, sizeof(x) ) ) { + return -EFAULT; + } + if ( copy_from_user( &y, depth->y, sizeof(y) ) ) { + return -EFAULT; + } + + buffer = kmalloc( depth->n * sizeof(u32), 0 ); + if ( buffer == NULL ) + return -ENOMEM; + if ( copy_from_user( buffer, depth->buffer, + depth->n * sizeof(u32) ) ) { + kfree( buffer ); + return -EFAULT; + } + + if ( depth->mask ) { + mask = kmalloc( depth->n * sizeof(u8), 0 ); + if ( mask == NULL ) { + kfree( buffer ); + return -ENOMEM; + } + if ( copy_from_user( mask, depth->mask, + depth->n * sizeof(u8) ) ) { + kfree( buffer ); + kfree( mask ); + return -EFAULT; + } + + for ( i = 0 ; i < count ; i++, x++ ) { + if ( mask[i] ) { + BEGIN_RING( 6 ); + + OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) ); + OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL | + R128_GMC_BRUSH_SOLID_COLOR | + depth_bpp | + R128_GMC_SRC_DATATYPE_COLOR | + R128_ROP3_P | + R128_GMC_CLR_CMP_CNTL_DIS | + R128_GMC_WR_MSK_DIS ); + + OUT_RING( dev_priv->depth_pitch_offset_c ); + OUT_RING( buffer[i] ); + + OUT_RING( (x << 16) | y ); + OUT_RING( (1 << 16) | 1 ); + + ADVANCE_RING(); + } + } + + kfree( mask ); + } else { + for ( i = 0 ; i < count ; i++, x++ ) { + BEGIN_RING( 6 ); + + OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) ); + OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL | + R128_GMC_BRUSH_SOLID_COLOR | + depth_bpp | + R128_GMC_SRC_DATATYPE_COLOR | + R128_ROP3_P | + R128_GMC_CLR_CMP_CNTL_DIS | + R128_GMC_WR_MSK_DIS ); + + OUT_RING( dev_priv->depth_pitch_offset_c ); + OUT_RING( buffer[i] ); + + OUT_RING( (x << 16) | y ); + OUT_RING( (1 << 16) | 1 ); + + ADVANCE_RING(); + } + } + + kfree( buffer ); + + return 0; +} + +static int r128_cce_dispatch_write_pixels( drm_device_t *dev, + drm_r128_depth_t *depth ) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + int count, *x, *y; + u32 *buffer; + u8 *mask; + u32 depth_bpp; + int i; + RING_LOCALS; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + r128_update_ring_snapshot( dev_priv ); + + switch ( dev_priv->depth_bpp ) { + case 16: + depth_bpp = R128_GMC_DST_16BPP; + break; + case 24: + case 32: + depth_bpp = R128_GMC_DST_32BPP; + break; + default: + return -EINVAL; + } + + count = depth->n; + + x = kmalloc( count * sizeof(*x), 0 ); + if ( x == NULL ) { + return -ENOMEM; + } + y = kmalloc( count * sizeof(*y), 0 ); + if ( y == NULL ) { + kfree( x ); + return -ENOMEM; + } + if ( copy_from_user( x, depth->x, count * sizeof(int) ) ) { + kfree( x ); + kfree( y ); + return -EFAULT; + } + if ( copy_from_user( y, depth->y, count * sizeof(int) ) ) { + kfree( x ); + kfree( y ); + return -EFAULT; + } + + buffer = kmalloc( depth->n * sizeof(u32), 0 ); + if ( buffer == NULL ) { + kfree( x ); + kfree( y ); + return -ENOMEM; + } + if ( copy_from_user( buffer, depth->buffer, + depth->n * sizeof(u32) ) ) { + kfree( x ); + kfree( y ); + kfree( buffer ); + return -EFAULT; + } + + if ( depth->mask ) { + mask = kmalloc( depth->n * sizeof(u8), 0 ); + if ( mask == NULL ) { + kfree( x ); + kfree( y ); + kfree( buffer ); + return -ENOMEM; + } + if ( copy_from_user( mask, depth->mask, + depth->n * sizeof(u8) ) ) { + kfree( x ); + kfree( y ); + kfree( buffer ); + kfree( mask ); + return -EFAULT; + } + + for ( i = 0 ; i < count ; i++ ) { + if ( mask[i] ) { + BEGIN_RING( 6 ); + + OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) ); + OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL | + R128_GMC_BRUSH_SOLID_COLOR | + depth_bpp | + R128_GMC_SRC_DATATYPE_COLOR | + R128_ROP3_P | + R128_GMC_CLR_CMP_CNTL_DIS | + R128_GMC_WR_MSK_DIS ); + + OUT_RING( dev_priv->depth_pitch_offset_c ); + OUT_RING( buffer[i] ); + + OUT_RING( (x[i] << 16) | y[i] ); + OUT_RING( (1 << 16) | 1 ); + + ADVANCE_RING(); + } + } + + kfree( mask ); + } else { + for ( i = 0 ; i < count ; i++ ) { + BEGIN_RING( 6 ); + + OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) ); + OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL | + R128_GMC_BRUSH_SOLID_COLOR | + depth_bpp | + R128_GMC_SRC_DATATYPE_COLOR | + R128_ROP3_P | + R128_GMC_CLR_CMP_CNTL_DIS | + R128_GMC_WR_MSK_DIS ); + + OUT_RING( dev_priv->depth_pitch_offset_c ); + OUT_RING( buffer[i] ); + + OUT_RING( (x[i] << 16) | y[i] ); + OUT_RING( (1 << 16) | 1 ); + + ADVANCE_RING(); + } + } + + kfree( x ); + kfree( y ); + kfree( buffer ); + + return 0; +} + +static int r128_cce_dispatch_read_span( drm_device_t *dev, + drm_r128_depth_t *depth ) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + int count, x, y; + u32 depth_bpp; + RING_LOCALS; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + r128_update_ring_snapshot( dev_priv ); + + switch ( dev_priv->depth_bpp ) { + case 16: + depth_bpp = R128_GMC_DST_16BPP; + break; + case 24: + case 32: + depth_bpp = R128_GMC_DST_32BPP; + break; + default: + return -EINVAL; + } + + count = depth->n; + if ( copy_from_user( &x, depth->x, sizeof(x) ) ) { + return -EFAULT; + } + if ( copy_from_user( &y, depth->y, sizeof(y) ) ) { + return -EFAULT; + } + + BEGIN_RING( 7 ); + + OUT_RING( CCE_PACKET3( R128_CNTL_BITBLT_MULTI, 5 ) ); + OUT_RING( R128_GMC_SRC_PITCH_OFFSET_CNTL | + R128_GMC_DST_PITCH_OFFSET_CNTL | + R128_GMC_BRUSH_NONE | + depth_bpp | + R128_GMC_SRC_DATATYPE_COLOR | + R128_ROP3_S | + R128_DP_SRC_SOURCE_MEMORY | + R128_GMC_CLR_CMP_CNTL_DIS | + R128_GMC_WR_MSK_DIS ); + + OUT_RING( dev_priv->depth_pitch_offset_c ); + OUT_RING( dev_priv->span_pitch_offset_c ); + + OUT_RING( (x << 16) | y ); + OUT_RING( (0 << 16) | 0 ); + OUT_RING( (count << 16) | 1 ); + + ADVANCE_RING(); + + return 0; +} + +static int r128_cce_dispatch_read_pixels( drm_device_t *dev, + drm_r128_depth_t *depth ) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + int count, *x, *y; + u32 depth_bpp; + int i; + RING_LOCALS; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + r128_update_ring_snapshot( dev_priv ); + + switch ( dev_priv->depth_bpp ) { + case 16: + depth_bpp = R128_GMC_DST_16BPP; + break; + case 24: + case 32: + depth_bpp = R128_GMC_DST_32BPP; + break; + default: + return -EINVAL; + } + + count = depth->n; + if ( count > dev_priv->depth_pitch ) { + count = dev_priv->depth_pitch; + } + + x = kmalloc( count * sizeof(*x), 0 ); + if ( x == NULL ) { + return -ENOMEM; + } + y = kmalloc( count * sizeof(*y), 0 ); + if ( y == NULL ) { + kfree( x ); + return -ENOMEM; + } + if ( copy_from_user( x, depth->x, count * sizeof(int) ) ) { + kfree( x ); + kfree( y ); + return -EFAULT; + } + if ( copy_from_user( y, depth->y, count * sizeof(int) ) ) { + kfree( x ); + kfree( y ); + return -EFAULT; + } + + for ( i = 0 ; i < count ; i++ ) { + BEGIN_RING( 7 ); + + OUT_RING( CCE_PACKET3( R128_CNTL_BITBLT_MULTI, 5 ) ); + OUT_RING( R128_GMC_SRC_PITCH_OFFSET_CNTL | + R128_GMC_DST_PITCH_OFFSET_CNTL | + R128_GMC_BRUSH_NONE | + depth_bpp | + R128_GMC_SRC_DATATYPE_COLOR | + R128_ROP3_S | + R128_DP_SRC_SOURCE_MEMORY | + R128_GMC_CLR_CMP_CNTL_DIS | + R128_GMC_WR_MSK_DIS ); + + OUT_RING( dev_priv->depth_pitch_offset_c ); + OUT_RING( dev_priv->span_pitch_offset_c ); + + OUT_RING( (x[i] << 16) | y[i] ); + OUT_RING( (i << 16) | 0 ); + OUT_RING( (1 << 16) | 1 ); + + ADVANCE_RING(); + } + + kfree( x ); + kfree( y ); + + return 0; +} + + +/* ================================================================ + * Polygon stipple + */ + +static void r128_cce_dispatch_stipple( drm_device_t *dev, u32 *stipple ) +{ + drm_r128_private_t *dev_priv = dev->dev_private; + int i; + RING_LOCALS; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + r128_update_ring_snapshot( dev_priv ); + + BEGIN_RING( 33 ); + + OUT_RING( CCE_PACKET0( R128_BRUSH_DATA0, 31 ) ); + for ( i = 0 ; i < 32 ; i++ ) { + OUT_RING( stipple[i] ); + } + + ADVANCE_RING(); +} + + +/* ================================================================ + * IOCTL functions + */ + +int r128_cce_clear( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_r128_private_t *dev_priv = dev->dev_private; + drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_r128_clear_t clear; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "r128_cce_clear called without lock held\n" ); + return -EINVAL; + } + + if ( copy_from_user( &clear, (drm_r128_clear_t *) arg, + sizeof(clear) ) ) + return -EFAULT; + + if ( sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS ) + sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS; + + r128_cce_dispatch_clear( dev, clear.flags, + clear.x, clear.y, clear.w, clear.h, + clear.clear_color, clear.clear_depth ); + + /* Make sure we restore the 3D state next time. + */ + dev_priv->sarea_priv->dirty |= R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS; + + return 0; +} + +int r128_cce_swap( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_r128_private_t *dev_priv = dev->dev_private; + drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "r128_cce_swap called without lock held\n" ); + return -EINVAL; + } + + if ( sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS ) + sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS; + + if ( !dev_priv->page_flipping ) { + r128_cce_dispatch_swap( dev ); + dev_priv->sarea_priv->dirty |= (R128_UPLOAD_CONTEXT | + R128_UPLOAD_MASKS); + } else { + r128_cce_dispatch_flip( dev ); + } + + return 0; +} + +int r128_cce_vertex( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_r128_private_t *dev_priv = dev->dev_private; + drm_device_dma_t *dma = dev->dma; + drm_buf_t *buf; + drm_r128_buf_priv_t *buf_priv; + drm_r128_vertex_t vertex; + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + if ( !dev_priv || dev_priv->is_pci ) { + DRM_ERROR( "%s called with a PCI card\n", __FUNCTION__ ); + return -EINVAL; + } + + if ( copy_from_user( &vertex, (drm_r128_vertex_t *)arg, + sizeof(vertex) ) ) + return -EFAULT; + + DRM_DEBUG( "%s: pid=%d index=%d count=%d discard=%d\n", + __FUNCTION__, current->pid, + vertex.idx, vertex.count, vertex.discard ); + + if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) { + DRM_ERROR( "buffer index %d (of %d max)\n", + vertex.idx, dma->buf_count - 1 ); + return -EINVAL; + } + if ( vertex.prim < 0 || + vertex.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2 ) { + DRM_ERROR( "buffer prim %d\n", vertex.prim ); + return -EINVAL; + } + + buf = dma->buflist[vertex.idx]; + buf_priv = buf->dev_private; + + if ( buf->pid != current->pid ) { + DRM_ERROR( "process %d using buffer owned by %d\n", + current->pid, buf->pid ); + return -EINVAL; + } + if ( buf->pending ) { + DRM_ERROR( "sending pending buffer %d\n", vertex.idx ); + return -EINVAL; + } + + buf->used = vertex.count; + buf_priv->prim = vertex.prim; + buf_priv->discard = vertex.discard; + + r128_cce_dispatch_vertex( dev, buf ); + + return 0; +} + +int r128_cce_indices( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_r128_private_t *dev_priv = dev->dev_private; + drm_device_dma_t *dma = dev->dma; + drm_buf_t *buf; + drm_r128_buf_priv_t *buf_priv; + drm_r128_indices_t elts; + int count; + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + if ( !dev_priv || dev_priv->is_pci ) { + DRM_ERROR( "%s called with a PCI card\n", __FUNCTION__ ); + return -EINVAL; + } + + if ( copy_from_user( &elts, (drm_r128_indices_t *)arg, + sizeof(elts) ) ) + return -EFAULT; + + DRM_DEBUG( "%s: pid=%d buf=%d s=%d e=%d d=%d\n", + __FUNCTION__, current->pid, + elts.idx, elts.start, elts.end, elts.discard ); + + if ( elts.idx < 0 || elts.idx >= dma->buf_count ) { + DRM_ERROR( "buffer index %d (of %d max)\n", + elts.idx, dma->buf_count - 1 ); + return -EINVAL; + } + if ( elts.prim < 0 || + elts.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2 ) { + DRM_ERROR( "buffer prim %d\n", elts.prim ); + return -EINVAL; + } + + buf = dma->buflist[elts.idx]; + buf_priv = buf->dev_private; + + if ( buf->pid != current->pid ) { + DRM_ERROR( "process %d using buffer owned by %d\n", + current->pid, buf->pid ); + return -EINVAL; + } + if ( buf->pending ) { + DRM_ERROR( "sending pending buffer %d\n", elts.idx ); + return -EINVAL; + } + + count = (elts.end - elts.start) / sizeof(u16); + elts.start -= R128_INDEX_PRIM_OFFSET; + + if ( elts.start & 0x7 ) { + DRM_ERROR( "misaligned buffer 0x%x\n", elts.start ); + return -EINVAL; + } + if ( elts.start < buf->used ) { + DRM_ERROR( "no header 0x%x - 0x%x\n", elts.start, buf->used ); + return -EINVAL; + } + + buf->used = elts.end; + buf_priv->prim = elts.prim; + buf_priv->discard = elts.discard; + + r128_cce_dispatch_indices( dev, buf, elts.start, elts.end, count ); + + return 0; +} + +int r128_cce_blit( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_device_dma_t *dma = dev->dma; + drm_r128_blit_t blit; + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + + if ( copy_from_user( &blit, (drm_r128_blit_t *)arg, + sizeof(blit) ) ) + return -EFAULT; + + DRM_DEBUG( "%s: pid=%d index=%d\n", + __FUNCTION__, current->pid, blit.idx ); + + if ( blit.idx < 0 || blit.idx >= dma->buf_count ) { + DRM_ERROR( "buffer index %d (of %d max)\n", + blit.idx, dma->buf_count - 1 ); + return -EINVAL; + } + + return r128_cce_dispatch_blit( dev, &blit ); +} + +int r128_cce_depth( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_r128_depth_t depth; + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + + if ( copy_from_user( &depth, (drm_r128_depth_t *)arg, + sizeof(depth) ) ) + return -EFAULT; + + switch ( depth.func ) { + case R128_WRITE_SPAN: + return r128_cce_dispatch_write_span( dev, &depth ); + case R128_WRITE_PIXELS: + return r128_cce_dispatch_write_pixels( dev, &depth ); + case R128_READ_SPAN: + return r128_cce_dispatch_read_span( dev, &depth ); + case R128_READ_PIXELS: + return r128_cce_dispatch_read_pixels( dev, &depth ); + } + + return -EINVAL; +} + +int r128_cce_stipple( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_r128_stipple_t stipple; + u32 mask[32]; + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + + if ( copy_from_user( &stipple, (drm_r128_stipple_t *)arg, + sizeof(stipple) ) ) + return -EFAULT; + + if ( copy_from_user( &mask, stipple.mask, + 32 * sizeof(u32) ) ) + return -EFAULT; + + r128_cce_dispatch_stipple( dev, mask ); + + return 0; +} + +int r128_cce_indirect( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + + /* Indirect buffer firing is not supported at this time. + */ + return -EINVAL; +} diff --git a/linux/radeon_bufs.c b/linux/radeon_bufs.c new file mode 100644 index 000000000..9a3093eb1 --- /dev/null +++ b/linux/radeon_bufs.c @@ -0,0 +1,298 @@ +/* radeon_bufs.c -- IOCTLs to manage buffers -*- linux-c -*- + * + * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Fremont, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Kevin E. Martin <martin@valinux.com> + * Rickard E. (Rik) Faith <faith@valinux.com> + * Jeff Hartmann <jhartmann@valinux.com> + * + */ + +#define __NO_VERSION__ +#include <linux/config.h> +#include "drmP.h" +#include "radeon_drv.h" +#include "linux/un.h" + + +#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) +int radeon_addbufs_agp(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_device_dma_t *dma = dev->dma; + drm_buf_desc_t request; + drm_buf_entry_t *entry; + drm_buf_t *buf; + unsigned long offset; + unsigned long agp_offset; + int count; + int order; + int size; + int alignment; + int page_order; + int total; + int byte_count; + int i; + + if (!dma) return -EINVAL; + + if (copy_from_user(&request, (drm_buf_desc_t *)arg, sizeof(request))) + return -EFAULT; + + count = request.count; + order = drm_order(request.size); + size = 1 << order; + + alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size):size; + page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; + total = PAGE_SIZE << page_order; + + byte_count = 0; + agp_offset = dev->agp->base + request.agp_start; + + DRM_DEBUG("count: %d\n", count); + DRM_DEBUG("order: %d\n", order); + DRM_DEBUG("size: %d\n", size); + DRM_DEBUG("agp_offset: %ld\n", agp_offset); + DRM_DEBUG("alignment: %d\n", alignment); + DRM_DEBUG("page_order: %d\n", page_order); + DRM_DEBUG("total: %d\n", total); + + if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; + if (dev->queue_count) return -EBUSY; /* Not while in use */ + + spin_lock(&dev->count_lock); + if (dev->buf_use) { + spin_unlock(&dev->count_lock); + return -EBUSY; + } + atomic_inc(&dev->buf_alloc); + spin_unlock(&dev->count_lock); + + down(&dev->struct_sem); + entry = &dma->bufs[order]; + if (entry->buf_count) { + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; /* May only call once for each order */ + } + + entry->buflist = drm_alloc(count * sizeof(*entry->buflist), + DRM_MEM_BUFS); + if (!entry->buflist) { + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; + } + memset(entry->buflist, 0, count * sizeof(*entry->buflist)); + + entry->buf_size = size; + entry->page_order = page_order; + offset = 0; + + for (offset = 0; + entry->buf_count < count; + offset += alignment, ++entry->buf_count) { + buf = &entry->buflist[entry->buf_count]; + buf->idx = dma->buf_count + entry->buf_count; + buf->total = alignment; + buf->order = order; + buf->used = 0; + buf->offset = (dma->byte_count + offset); + buf->address = (void *)(agp_offset + offset); + buf->next = NULL; + buf->waiting = 0; + buf->pending = 0; + init_waitqueue_head(&buf->dma_wait); + buf->pid = 0; + + buf->dev_priv_size = sizeof(drm_radeon_buf_priv_t); + buf->dev_private = drm_alloc(sizeof(drm_radeon_buf_priv_t), + DRM_MEM_BUFS); + memset(buf->dev_private, 0, buf->dev_priv_size); + +#if DRM_DMA_HISTOGRAM + buf->time_queued = 0; + buf->time_dispatched = 0; + buf->time_completed = 0; + buf->time_freed = 0; +#endif + + byte_count += PAGE_SIZE << page_order; + + DRM_DEBUG("buffer %d @ %p\n", + entry->buf_count, buf->address); + } + + DRM_DEBUG("byte_count: %d\n", byte_count); + + dma->buflist = drm_realloc(dma->buflist, + dma->buf_count * sizeof(*dma->buflist), + (dma->buf_count + entry->buf_count) + * sizeof(*dma->buflist), + DRM_MEM_BUFS); + for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++) + dma->buflist[i] = &entry->buflist[i - dma->buf_count]; + + dma->buf_count += entry->buf_count; + dma->byte_count += byte_count; + + drm_freelist_create(&entry->freelist, entry->buf_count); + for (i = 0; i < entry->buf_count; i++) { + drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]); + } + + up(&dev->struct_sem); + + request.count = entry->buf_count; + request.size = size; + + if (copy_to_user((drm_buf_desc_t *)arg, &request, sizeof(request))) + return -EFAULT; + + dma->flags = _DRM_DMA_USE_AGP; + + atomic_dec(&dev->buf_alloc); + return 0; +} +#endif + +int radeon_addbufs(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_buf_desc_t request; + + if (!dev_priv || dev_priv->is_pci) return -EINVAL; + + if (copy_from_user(&request, (drm_buf_desc_t *)arg, sizeof(request))) + return -EFAULT; + +#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) + if (request.flags & _DRM_AGP_BUFFER) + return radeon_addbufs_agp(inode, filp, cmd, arg); + else +#endif + return -EINVAL; +} + +int radeon_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_device_dma_t *dma = dev->dma; + int retcode = 0; + const int zero = 0; + unsigned long virtual; + unsigned long address; + drm_buf_map_t request; + int i; + + if (!dma || !dev_priv || dev_priv->is_pci) return -EINVAL; + + DRM_DEBUG("\n"); + + spin_lock(&dev->count_lock); + if (atomic_read(&dev->buf_alloc)) { + spin_unlock(&dev->count_lock); + return -EBUSY; + } + ++dev->buf_use; /* Can't allocate more after this call */ + spin_unlock(&dev->count_lock); + + if (copy_from_user(&request, (drm_buf_map_t *)arg, sizeof(request))) + return -EFAULT; + + if (request.count >= dma->buf_count) { + if (dma->flags & _DRM_DMA_USE_AGP) { + drm_map_t *map; + + map = dev_priv->buffers; + if (!map) { + retcode = -EINVAL; + goto done; + } + + down(¤t->mm->mmap_sem); + virtual = do_mmap(filp, 0, map->size, + PROT_READ|PROT_WRITE, + MAP_SHARED, + (unsigned long)map->offset); + up(¤t->mm->mmap_sem); + } else { + down(¤t->mm->mmap_sem); + virtual = do_mmap(filp, 0, dma->byte_count, + PROT_READ|PROT_WRITE, MAP_SHARED, 0); + up(¤t->mm->mmap_sem); + } + if (virtual > -1024UL) { + /* Real error */ + retcode = (signed long)virtual; + goto done; + } + request.virtual = (void *)virtual; + + for (i = 0; i < dma->buf_count; i++) { + if (copy_to_user(&request.list[i].idx, + &dma->buflist[i]->idx, + sizeof(request.list[0].idx))) { + retcode = -EFAULT; + goto done; + } + if (copy_to_user(&request.list[i].total, + &dma->buflist[i]->total, + sizeof(request.list[0].total))) { + retcode = -EFAULT; + goto done; + } + if (copy_to_user(&request.list[i].used, + &zero, + sizeof(zero))) { + retcode = -EFAULT; + goto done; + } + address = virtual + dma->buflist[i]->offset; + if (copy_to_user(&request.list[i].address, + &address, + sizeof(address))) { + retcode = -EFAULT; + goto done; + } + } + } + done: + request.count = dma->buf_count; + DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode); + + if (copy_to_user((drm_buf_map_t *)arg, &request, sizeof(request))) + return -EFAULT; + + return retcode; +} diff --git a/linux/radeon_context.c b/linux/radeon_context.c new file mode 100644 index 000000000..e428dc22c --- /dev/null +++ b/linux/radeon_context.c @@ -0,0 +1,215 @@ +/* radeon_context.c -- IOCTLs for Radeon contexts -*- linux-c -*- + * + * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Fremont, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Author: Kevin E. Martin <martin@valinux.com> + * Rickard E. (Rik) Faith <faith@valinux.com> + * + */ + +#define __NO_VERSION__ +#include "drmP.h" +#include "radeon_drv.h" + +extern drm_ctx_t radeon_res_ctx; + +static int radeon_alloc_queue(drm_device_t *dev) +{ + return drm_ctxbitmap_next(dev); +} + +int radeon_context_switch(drm_device_t *dev, int old, int new) +{ + char buf[64]; + + atomic_inc(&dev->total_ctx); + + if (test_and_set_bit(0, &dev->context_flag)) { + DRM_ERROR("Reentering -- FIXME\n"); + return -EBUSY; + } + +#if DRM_DMA_HISTOGRAM + dev->ctx_start = get_cycles(); +#endif + + DRM_DEBUG("Context switch from %d to %d\n", old, new); + + if (new == dev->last_context) { + clear_bit(0, &dev->context_flag); + return 0; + } + + if (drm_flags & DRM_FLAG_NOCTX) { + radeon_context_switch_complete(dev, new); + } else { + sprintf(buf, "C %d %d\n", old, new); + drm_write_string(dev, buf); + } + + return 0; +} + +int radeon_context_switch_complete(drm_device_t *dev, int new) +{ + dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ + dev->last_switch = jiffies; + + if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("Lock isn't held after context switch\n"); + } + + /* If a context switch is ever initiated + when the kernel holds the lock, release + that lock here. */ +#if DRM_DMA_HISTOGRAM + atomic_inc(&dev->histo.ctx[drm_histogram_slot(get_cycles() + - dev->ctx_start)]); + +#endif + clear_bit(0, &dev->context_flag); + wake_up(&dev->context_wait); + + return 0; +} + + +int radeon_resctx(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_ctx_res_t res; + drm_ctx_t ctx; + int i; + + DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS); + if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res))) + return -EFAULT; + if (res.count >= DRM_RESERVED_CONTEXTS) { + memset(&ctx, 0, sizeof(ctx)); + for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { + ctx.handle = i; + if (copy_to_user(&res.contexts[i], &i, sizeof(i))) + return -EFAULT; + } + } + res.count = DRM_RESERVED_CONTEXTS; + if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res))) + return -EFAULT; + return 0; +} + + +int radeon_addctx(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_ctx_t ctx; + + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; + if ((ctx.handle = radeon_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) { + /* Skip kernel's context and get a new one. */ + ctx.handle = radeon_alloc_queue(dev); + } + DRM_DEBUG("%d\n", ctx.handle); + if (ctx.handle == -1) { + DRM_DEBUG("Not enough free contexts.\n"); + /* Should this return -EBUSY instead? */ + return -ENOMEM; + } + + if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx))) + return -EFAULT; + return 0; +} + +int radeon_modctx(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_ctx_t ctx; + + if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx))) + return -EFAULT; + if (ctx.flags==_DRM_CONTEXT_PRESERVED) + radeon_res_ctx.handle=ctx.handle; + return 0; +} + +int radeon_getctx(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_ctx_t ctx; + + if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx))) + return -EFAULT; + /* This is 0, because we don't hanlde any context flags */ + ctx.flags = 0; + if (copy_to_user((drm_ctx_t*)arg, &ctx, sizeof(ctx))) + return -EFAULT; + return 0; +} + +int radeon_switchctx(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_ctx_t ctx; + + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; + DRM_DEBUG("%d\n", ctx.handle); + return radeon_context_switch(dev, dev->last_context, ctx.handle); +} + +int radeon_newctx(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_ctx_t ctx; + + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; + DRM_DEBUG("%d\n", ctx.handle); + radeon_context_switch_complete(dev, ctx.handle); + + return 0; +} + +int radeon_rmctx(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_ctx_t ctx; + + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; + DRM_DEBUG("%d\n", ctx.handle); + drm_ctxbitmap_free(dev, ctx.handle); + + return 0; +} diff --git a/linux/radeon_cp.c b/linux/radeon_cp.c new file mode 100644 index 000000000..5d662bc08 --- /dev/null +++ b/linux/radeon_cp.c @@ -0,0 +1,1314 @@ +/* radeon_cp.c -- CP support for Radeon -*- linux-c -*- + * + * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Fremont, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Kevin E. Martin <martin@valinux.com> + * Gareth Hughes <gareth@valinux.com> + * + */ + +#define __NO_VERSION__ +#include "drmP.h" +#include "radeon_drv.h" + +#include <linux/interrupt.h> /* For task queue support */ +#include <linux/delay.h> + +#define RADEON_FIFO_DEBUG 0 + + +/* CP microcode (from ATI) */ +static u32 radeon_cp_microcode[][2] = { + { 0x21007000, 0000000000 }, + { 0x20007000, 0000000000 }, + { 0x000000b4, 0x00000004 }, + { 0x000000b8, 0x00000004 }, + { 0x6f5b4d4c, 0000000000 }, + { 0x4c4c427f, 0000000000 }, + { 0x5b568a92, 0000000000 }, + { 0x4ca09c6d, 0000000000 }, + { 0xad4c4c4c, 0000000000 }, + { 0x4ce1af3d, 0000000000 }, + { 0xd8afafaf, 0000000000 }, + { 0xd64c4cdc, 0000000000 }, + { 0x4cd10d10, 0000000000 }, + { 0x000f0000, 0x00000016 }, + { 0x362f242d, 0000000000 }, + { 0x00000012, 0x00000004 }, + { 0x000f0000, 0x00000016 }, + { 0x362f282d, 0000000000 }, + { 0x000380e7, 0x00000002 }, + { 0x04002c97, 0x00000002 }, + { 0x000f0001, 0x00000016 }, + { 0x333a3730, 0000000000 }, + { 0x000077ef, 0x00000002 }, + { 0x00061000, 0x00000002 }, + { 0x00000021, 0x0000001a }, + { 0x00004000, 0x0000001e }, + { 0x00061000, 0x00000002 }, + { 0x00000021, 0x0000001a }, + { 0x00004000, 0x0000001e }, + { 0x00061000, 0x00000002 }, + { 0x00000021, 0x0000001a }, + { 0x00004000, 0x0000001e }, + { 0x00000017, 0x00000004 }, + { 0x0003802b, 0x00000002 }, + { 0x040067e0, 0x00000002 }, + { 0x00000017, 0x00000004 }, + { 0x000077e0, 0x00000002 }, + { 0x00065000, 0x00000002 }, + { 0x000037e1, 0x00000002 }, + { 0x040067e1, 0x00000006 }, + { 0x000077e0, 0x00000002 }, + { 0x000077e1, 0x00000002 }, + { 0x000077e1, 0x00000006 }, + { 0xffffffff, 0000000000 }, + { 0x10000000, 0000000000 }, + { 0x0003802b, 0x00000002 }, + { 0x040067e0, 0x00000006 }, + { 0x00007675, 0x00000002 }, + { 0x00007676, 0x00000002 }, + { 0x00007677, 0x00000002 }, + { 0x00007678, 0x00000006 }, + { 0x0003802c, 0x00000002 }, + { 0x04002676, 0x00000002 }, + { 0x00007677, 0x00000002 }, + { 0x00007678, 0x00000006 }, + { 0x0000002f, 0x00000018 }, + { 0x0000002f, 0x00000018 }, + { 0000000000, 0x00000006 }, + { 0x00000030, 0x00000018 }, + { 0x00000030, 0x00000018 }, + { 0000000000, 0x00000006 }, + { 0x01605000, 0x00000002 }, + { 0x00065000, 0x00000002 }, + { 0x00098000, 0x00000002 }, + { 0x00061000, 0x00000002 }, + { 0x64c0603e, 0x00000004 }, + { 0x000380e6, 0x00000002 }, + { 0x040025c5, 0x00000002 }, + { 0x00080000, 0x00000016 }, + { 0000000000, 0000000000 }, + { 0x0400251d, 0x00000002 }, + { 0x00007580, 0x00000002 }, + { 0x00067581, 0x00000002 }, + { 0x04002580, 0x00000002 }, + { 0x00067581, 0x00000002 }, + { 0x00000049, 0x00000004 }, + { 0x00005000, 0000000000 }, + { 0x000380e6, 0x00000002 }, + { 0x040025c5, 0x00000002 }, + { 0x00061000, 0x00000002 }, + { 0x0000750e, 0x00000002 }, + { 0x00019000, 0x00000002 }, + { 0x00011055, 0x00000014 }, + { 0x00000055, 0x00000012 }, + { 0x0400250f, 0x00000002 }, + { 0x0000504f, 0x00000004 }, + { 0x000380e6, 0x00000002 }, + { 0x040025c5, 0x00000002 }, + { 0x00007565, 0x00000002 }, + { 0x00007566, 0x00000002 }, + { 0x00000058, 0x00000004 }, + { 0x000380e6, 0x00000002 }, + { 0x040025c5, 0x00000002 }, + { 0x01e655b4, 0x00000002 }, + { 0x4401b0e4, 0x00000002 }, + { 0x01c110e4, 0x00000002 }, + { 0x26667066, 0x00000018 }, + { 0x040c2565, 0x00000002 }, + { 0x00000066, 0x00000018 }, + { 0x04002564, 0x00000002 }, + { 0x00007566, 0x00000002 }, + { 0x0000005d, 0x00000004 }, + { 0x00401069, 0x00000008 }, + { 0x00101000, 0x00000002 }, + { 0x000d80ff, 0x00000002 }, + { 0x0080006c, 0x00000008 }, + { 0x000f9000, 0x00000002 }, + { 0x000e00ff, 0x00000002 }, + { 0000000000, 0x00000006 }, + { 0x0000008f, 0x00000018 }, + { 0x0000005b, 0x00000004 }, + { 0x000380e6, 0x00000002 }, + { 0x040025c5, 0x00000002 }, + { 0x00007576, 0x00000002 }, + { 0x00065000, 0x00000002 }, + { 0x00009000, 0x00000002 }, + { 0x00041000, 0x00000002 }, + { 0x0c00350e, 0x00000002 }, + { 0x00049000, 0x00000002 }, + { 0x00051000, 0x00000002 }, + { 0x01e785f8, 0x00000002 }, + { 0x00200000, 0x00000002 }, + { 0x0060007e, 0x0000000c }, + { 0x00007563, 0x00000002 }, + { 0x006075f0, 0x00000021 }, + { 0x20007073, 0x00000004 }, + { 0x00005073, 0x00000004 }, + { 0x000380e6, 0x00000002 }, + { 0x040025c5, 0x00000002 }, + { 0x00007576, 0x00000002 }, + { 0x00007577, 0x00000002 }, + { 0x0000750e, 0x00000002 }, + { 0x0000750f, 0x00000002 }, + { 0x00a05000, 0x00000002 }, + { 0x00600083, 0x0000000c }, + { 0x006075f0, 0x00000021 }, + { 0x000075f8, 0x00000002 }, + { 0x00000083, 0x00000004 }, + { 0x000a750e, 0x00000002 }, + { 0x000380e6, 0x00000002 }, + { 0x040025c5, 0x00000002 }, + { 0x0020750f, 0x00000002 }, + { 0x00600086, 0x00000004 }, + { 0x00007570, 0x00000002 }, + { 0x00007571, 0x00000002 }, + { 0x00007572, 0x00000006 }, + { 0x000380e6, 0x00000002 }, + { 0x040025c5, 0x00000002 }, + { 0x00005000, 0x00000002 }, + { 0x00a05000, 0x00000002 }, + { 0x00007568, 0x00000002 }, + { 0x00061000, 0x00000002 }, + { 0x00000095, 0x0000000c }, + { 0x00058000, 0x00000002 }, + { 0x0c607562, 0x00000002 }, + { 0x00000097, 0x00000004 }, + { 0x000380e6, 0x00000002 }, + { 0x040025c5, 0x00000002 }, + { 0x00600096, 0x00000004 }, + { 0x400070e5, 0000000000 }, + { 0x000380e6, 0x00000002 }, + { 0x040025c5, 0x00000002 }, + { 0x000380e5, 0x00000002 }, + { 0x000000a8, 0x0000001c }, + { 0x000650aa, 0x00000018 }, + { 0x040025bb, 0x00000002 }, + { 0x000610ab, 0x00000018 }, + { 0x040075bc, 0000000000 }, + { 0x000075bb, 0x00000002 }, + { 0x000075bc, 0000000000 }, + { 0x00090000, 0x00000006 }, + { 0x00090000, 0x00000002 }, + { 0x000d8002, 0x00000006 }, + { 0x00007832, 0x00000002 }, + { 0x00005000, 0x00000002 }, + { 0x000380e7, 0x00000002 }, + { 0x04002c97, 0x00000002 }, + { 0x00007820, 0x00000002 }, + { 0x00007821, 0x00000002 }, + { 0x00007800, 0000000000 }, + { 0x01200000, 0x00000002 }, + { 0x20077000, 0x00000002 }, + { 0x01200000, 0x00000002 }, + { 0x20007000, 0x00000002 }, + { 0x00061000, 0x00000002 }, + { 0x0120751b, 0x00000002 }, + { 0x8040750a, 0x00000002 }, + { 0x8040750b, 0x00000002 }, + { 0x00110000, 0x00000002 }, + { 0x000380e5, 0x00000002 }, + { 0x000000c6, 0x0000001c }, + { 0x000610ab, 0x00000018 }, + { 0x844075bd, 0x00000002 }, + { 0x000610aa, 0x00000018 }, + { 0x840075bb, 0x00000002 }, + { 0x000610ab, 0x00000018 }, + { 0x844075bc, 0x00000002 }, + { 0x000000c9, 0x00000004 }, + { 0x804075bd, 0x00000002 }, + { 0x800075bb, 0x00000002 }, + { 0x804075bc, 0x00000002 }, + { 0x00108000, 0x00000002 }, + { 0x01400000, 0x00000002 }, + { 0x006000cd, 0x0000000c }, + { 0x20c07000, 0x00000020 }, + { 0x000000cf, 0x00000012 }, + { 0x00800000, 0x00000006 }, + { 0x0080751d, 0x00000006 }, + { 0000000000, 0000000000 }, + { 0x0000775c, 0x00000002 }, + { 0x00a05000, 0x00000002 }, + { 0x00661000, 0x00000002 }, + { 0x0460275d, 0x00000020 }, + { 0x00004000, 0000000000 }, + { 0x01e00830, 0x00000002 }, + { 0x21007000, 0000000000 }, + { 0x6464614d, 0000000000 }, + { 0x69687420, 0000000000 }, + { 0x00000073, 0000000000 }, + { 0000000000, 0000000000 }, + { 0x00005000, 0x00000002 }, + { 0x000380d0, 0x00000002 }, + { 0x040025e0, 0x00000002 }, + { 0x000075e1, 0000000000 }, + { 0x00000001, 0000000000 }, + { 0x000380e0, 0x00000002 }, + { 0x04002394, 0x00000002 }, + { 0x00005000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0x00000008, 0000000000 }, + { 0x00000004, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, +}; + + +#define DO_IOREMAP(_m) (_m)->handle = drm_ioremap((_m)->offset, (_m)->size) + +#define DO_IOREMAPFREE(_m) \ + do { \ + if ((_m)->handle && (_m)->size) \ + drm_ioremapfree((_m)->handle, (_m)->size); \ + } while (0) + +#define DO_FIND_MAP(_m, _o) \ + do { \ + int _i; \ + for (_i = 0; _i < dev->map_count; _i++) { \ + if (dev->maplist[_i]->offset == _o) { \ + _m = dev->maplist[_i]; \ + break; \ + } \ + } \ + } while (0) + + +int RADEON_READ_PLL(drm_device_t *dev, int addr) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + + RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x1f); + return RADEON_READ(RADEON_CLOCK_CNTL_DATA); +} + +#if RADEON_FIFO_DEBUG +static void radeon_status( drm_radeon_private_t *dev_priv ) +{ + printk( "%s:\n", __FUNCTION__ ); + printk( "RBBM_STATUS = 0x%08x\n", + (unsigned int)RADEON_READ( RADEON_RBBM_STATUS ) ); + printk( "CP_RB_RTPR = 0x%08x\n", + (unsigned int)RADEON_READ( RADEON_CP_RB_RPTR ) ); + printk( "CP_RB_WTPR = 0x%08x\n", + (unsigned int)RADEON_READ( RADEON_CP_RB_WPTR ) ); +} +#endif + + +/* ================================================================ + * Engine, FIFO control + */ + +static int radeon_do_pixcache_flush( drm_radeon_private_t *dev_priv ) +{ + u32 tmp; + int i; + + tmp = RADEON_READ( RADEON_RB2D_DSTCACHE_CTLSTAT ); + tmp |= RADEON_RB2D_DC_FLUSH_ALL; + RADEON_WRITE( RADEON_RB2D_DSTCACHE_CTLSTAT, tmp ); + + for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { + if ( !(RADEON_READ( RADEON_RB2D_DSTCACHE_CTLSTAT ) + & RADEON_RB2D_DC_BUSY) ) { + return 0; + } + udelay( 1 ); + } + +#if RADEON_FIFO_DEBUG + DRM_ERROR( "failed!\n" ); + radeon_status( dev_priv ); +#endif + return -EBUSY; +} + +static int radeon_do_wait_for_fifo( drm_radeon_private_t *dev_priv, + int entries ) +{ + int i; + + for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { + int slots = ( RADEON_READ( RADEON_RBBM_STATUS ) + & RADEON_RBBM_FIFOCNT_MASK ); + if ( slots >= entries ) return 0; + udelay( 1 ); + } + +#if RADEON_FIFO_DEBUG + DRM_ERROR( "failed!\n" ); + radeon_status( dev_priv ); +#endif + return -EBUSY; +} + +static int radeon_do_wait_for_idle( drm_radeon_private_t *dev_priv ) +{ + int i, ret; + + ret = radeon_do_wait_for_fifo( dev_priv, 64 ); + if ( ret < 0 ) return ret; + + for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { + if ( !(RADEON_READ( RADEON_RBBM_STATUS ) + & RADEON_RBBM_ACTIVE) ) { + radeon_do_pixcache_flush( dev_priv ); + return 0; + } + udelay( 1 ); + } + +#if RADEON_FIFO_DEBUG + DRM_ERROR( "failed!\n" ); + radeon_status( dev_priv ); +#endif + return -EBUSY; +} + + +/* ================================================================ + * CP control, initialization + */ + +/* Load the microcode for the CP */ +static void radeon_cp_load_microcode( drm_radeon_private_t *dev_priv ) +{ + int i; + + radeon_do_wait_for_idle( dev_priv ); + + RADEON_WRITE( RADEON_CP_ME_RAM_ADDR, 0 ); + for ( i = 0 ; i < 256 ; i++ ) { + RADEON_WRITE( RADEON_CP_ME_RAM_DATAH, + radeon_cp_microcode[i][1] ); + RADEON_WRITE( RADEON_CP_ME_RAM_DATAL, + radeon_cp_microcode[i][0] ); + } +} + +/* Flush any pending commands to the CP. This should only be used just + * prior to a wait for idle, as it informs the engine that the command + * stream is ending. + */ +static void radeon_do_cp_flush( drm_radeon_private_t *dev_priv ) +{ +#if 0 + u32 tmp; + + tmp = RADEON_READ( RADEON_CP_RB_WPTR ) | (1 << 31); + RADEON_WRITE( RADEON_CP_RB_WPTR, tmp ); +#endif +} + +/* Wait for the CP to go idle. + */ +int radeon_do_cp_idle( drm_radeon_private_t *dev_priv ) +{ + RING_LOCALS; + + BEGIN_RING( 6 ); + + RADEON_PURGE_CACHE(); + RADEON_PURGE_ZCACHE(); + RADEON_WAIT_UNTIL_IDLE(); + + ADVANCE_RING(); + + return radeon_do_wait_for_idle( dev_priv ); +} + +/* Start the Command Processor. + */ +static void radeon_do_cp_start( drm_radeon_private_t *dev_priv ) +{ + RING_LOCALS; + + radeon_do_wait_for_idle( dev_priv ); + + RADEON_WRITE( RADEON_CP_CSQ_CNTL, dev_priv->cp_mode ); + + dev_priv->cp_running = 1; + + BEGIN_RING( 6 ); + + RADEON_PURGE_CACHE(); + RADEON_PURGE_ZCACHE(); + RADEON_WAIT_UNTIL_IDLE(); + + ADVANCE_RING(); +} + +/* Reset the Command Processor. This will not flush any pending + * commands, so you must wait for the CP command stream to complete + * before calling this routine. + */ +static void radeon_do_cp_reset( drm_radeon_private_t *dev_priv ) +{ + u32 cur_read_ptr; + + cur_read_ptr = RADEON_READ( RADEON_CP_RB_RPTR ); + RADEON_WRITE( RADEON_CP_RB_WPTR, cur_read_ptr ); + *dev_priv->ring.head = cur_read_ptr; + dev_priv->ring.tail = cur_read_ptr; +} + +/* Stop the Command Processor. This will not flush any pending + * commands, so you must flush the command stream and wait for the CP + * to go idle before calling this routine. + */ +static void radeon_do_cp_stop( drm_radeon_private_t *dev_priv ) +{ + RADEON_WRITE( RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS ); + + dev_priv->cp_running = 0; +} + +/* Reset the engine. This will stop the CP if it is running. + */ +static int radeon_do_engine_reset( drm_device_t *dev ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + u32 clock_cntl_index, mclk_cntl, rbbm_soft_reset; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + radeon_do_pixcache_flush( dev_priv ); + + clock_cntl_index = RADEON_READ( RADEON_CLOCK_CNTL_INDEX ); + mclk_cntl = RADEON_READ_PLL( dev, RADEON_MCLK_CNTL ); + + /* FIXME: remove magic number here and in radeon ddx driver!!! */ + RADEON_WRITE_PLL( RADEON_MCLK_CNTL, mclk_cntl | 0x003f00000 ); + + rbbm_soft_reset = RADEON_READ( RADEON_RBBM_SOFT_RESET ); + + RADEON_WRITE( RADEON_RBBM_SOFT_RESET, ( rbbm_soft_reset | + RADEON_SOFT_RESET_CP | + RADEON_SOFT_RESET_HI | + RADEON_SOFT_RESET_SE | + RADEON_SOFT_RESET_RE | + RADEON_SOFT_RESET_PP | + RADEON_SOFT_RESET_E2 | + RADEON_SOFT_RESET_RB | + RADEON_SOFT_RESET_HDP ) ); + RADEON_READ( RADEON_RBBM_SOFT_RESET ); + RADEON_WRITE( RADEON_RBBM_SOFT_RESET, ( rbbm_soft_reset & + ~( RADEON_SOFT_RESET_CP | + RADEON_SOFT_RESET_HI | + RADEON_SOFT_RESET_SE | + RADEON_SOFT_RESET_RE | + RADEON_SOFT_RESET_PP | + RADEON_SOFT_RESET_E2 | + RADEON_SOFT_RESET_RB | + RADEON_SOFT_RESET_HDP ) ) ); + RADEON_READ( RADEON_RBBM_SOFT_RESET ); + + + RADEON_WRITE_PLL( RADEON_MCLK_CNTL, mclk_cntl ); + RADEON_WRITE( RADEON_CLOCK_CNTL_INDEX, clock_cntl_index ); + RADEON_WRITE( RADEON_RBBM_SOFT_RESET, rbbm_soft_reset ); + + /* Reset the CP ring */ + radeon_do_cp_reset( dev_priv ); + + /* The CP is no longer running after an engine reset */ + dev_priv->cp_running = 0; + + /* Reset any pending vertex, indirect buffers */ + radeon_freelist_reset( dev ); + + return 0; +} + +static void radeon_cp_init_ring_buffer( drm_device_t *dev ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + u32 ring_start, cur_read_ptr; + u32 tmp; + + /* Initialize the memory controller */ + RADEON_WRITE( RADEON_MC_FB_LOCATION, + (dev_priv->agp_vm_start - 1) & 0xffff0000 ); + RADEON_WRITE( RADEON_MC_AGP_LOCATION, + (((dev_priv->agp_vm_start - 1 + + dev_priv->agp_size) & 0xffff0000) | + (dev_priv->agp_vm_start >> 16)) ); + + ring_start = (dev_priv->cp_ring->offset + - dev->agp->base + + dev_priv->agp_vm_start); + + RADEON_WRITE( RADEON_CP_RB_BASE, ring_start ); + + /* Set the write pointer delay */ + RADEON_WRITE( RADEON_CP_RB_WPTR_DELAY, 0 ); + + /* Initialize the ring buffer's read and write pointers */ + cur_read_ptr = RADEON_READ( RADEON_CP_RB_RPTR ); + RADEON_WRITE( RADEON_CP_RB_WPTR, cur_read_ptr ); + *dev_priv->ring.head = cur_read_ptr; + dev_priv->ring.tail = cur_read_ptr; + + RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, dev_priv->ring_rptr->offset ); + + /* Set ring buffer size */ + RADEON_WRITE( RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw ); + + radeon_do_wait_for_idle( dev_priv ); + + /* Turn off PCI GART */ + tmp = RADEON_READ( RADEON_AIC_CNTL ) & ~RADEON_PCIGART_TRANSLATE_EN; + RADEON_WRITE( RADEON_AIC_CNTL, tmp ); + + /* Turn on bus mastering */ + tmp = RADEON_READ( RADEON_BUS_CNTL ) & ~RADEON_BUS_MASTER_DIS; + RADEON_WRITE( RADEON_BUS_CNTL, tmp ); + + /* Sync everything up */ + RADEON_WRITE( RADEON_ISYNC_CNTL, + (RADEON_ISYNC_ANY2D_IDLE3D | + RADEON_ISYNC_ANY3D_IDLE2D | + RADEON_ISYNC_WAIT_IDLEGUI | + RADEON_ISYNC_CPSCRATCH_IDLEGUI) ); +} + +static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init ) +{ + drm_radeon_private_t *dev_priv; + int i; + + dev_priv = drm_alloc( sizeof(drm_radeon_private_t), DRM_MEM_DRIVER ); + if ( dev_priv == NULL ) + return -ENOMEM; + dev->dev_private = (void *)dev_priv; + + memset( dev_priv, 0, sizeof(drm_radeon_private_t) ); + + dev_priv->is_pci = init->is_pci; + + /* We don't support PCI cards until PCI GART is implemented. + * Fail here so we can remove all checks for PCI cards around + * the CP ring code. + */ + if ( dev_priv->is_pci ) { + drm_free( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER ); + dev->dev_private = NULL; + return -EINVAL; + } + + dev_priv->usec_timeout = init->usec_timeout; + if ( dev_priv->usec_timeout < 1 || + dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT ) { + drm_free( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER ); + dev->dev_private = NULL; + return -EINVAL; + } + + dev_priv->cp_mode = init->cp_mode; + + /* Simple idle check. + */ + atomic_set( &dev_priv->idle_count, 0 ); + + /* We don't support anything other than bus-mastering ring mode, + * but the ring can be in either AGP or PCI space for the ring + * read pointer. + */ + if ( ( init->cp_mode != RADEON_CSQ_PRIBM_INDDIS ) && + ( init->cp_mode != RADEON_CSQ_PRIBM_INDBM ) ) { + drm_free( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER ); + dev->dev_private = NULL; + return -EINVAL; + } + + switch ( init->fb_bpp ) { + case 16: + dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565; + break; + case 32: + default: + dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888; + break; + } + dev_priv->front_offset = init->front_offset; + dev_priv->front_pitch = init->front_pitch; + dev_priv->back_offset = init->back_offset; + dev_priv->back_pitch = init->back_pitch; + + switch ( init->depth_bpp ) { + case 16: + dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z; + break; + case 32: + default: + dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z; + break; + } + dev_priv->depth_offset = init->depth_offset; + dev_priv->depth_pitch = init->depth_pitch; + + dev_priv->front_pitch_offset = (((dev_priv->front_pitch/64) << 22) | + (dev_priv->front_offset >> 10)); + dev_priv->back_pitch_offset = (((dev_priv->back_pitch/64) << 22) | + (dev_priv->back_offset >> 10)); + dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch/64) << 22) | + (dev_priv->depth_offset >> 10)); + + /* Hardware state for depth clears. Remove this if/when we no + * longer clear the depth buffer with a 3D rectangle. Hard-code + * all values to prevent unwanted 3D state from slipping through + * and screwing with the clear operation. + */ + dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE | + RADEON_Z_ENABLE | + (dev_priv->color_fmt << 10) | + RADEON_ZBLOCK16); + + dev_priv->depth_clear.rb3d_zstencilcntl = (dev_priv->depth_fmt | + RADEON_Z_TEST_ALWAYS | + RADEON_STENCIL_TEST_ALWAYS | + RADEON_STENCIL_S_FAIL_KEEP | + RADEON_STENCIL_ZPASS_KEEP | + RADEON_STENCIL_ZFAIL_KEEP | + RADEON_Z_WRITE_ENABLE); + + dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW | + RADEON_BFACE_SOLID | + RADEON_FFACE_SOLID | + RADEON_FLAT_SHADE_VTX_LAST | + + RADEON_DIFFUSE_SHADE_FLAT | + RADEON_ALPHA_SHADE_FLAT | + RADEON_SPECULAR_SHADE_FLAT | + RADEON_FOG_SHADE_FLAT | + + RADEON_VTX_PIX_CENTER_OGL | + RADEON_ROUND_MODE_TRUNC | + RADEON_ROUND_PREC_8TH_PIX); + + /* FIXME: We want multiple shared areas, including one shared + * only by the X Server and kernel module. + */ + for ( i = 0 ; i < dev->map_count ; i++ ) { + if ( dev->maplist[i]->type == _DRM_SHM ) { + dev_priv->sarea = dev->maplist[i]; + break; + } + } + + DO_FIND_MAP( dev_priv->fb, init->fb_offset ); + DO_FIND_MAP( dev_priv->mmio, init->mmio_offset ); + DO_FIND_MAP( dev_priv->cp_ring, init->ring_offset ); + DO_FIND_MAP( dev_priv->ring_rptr, init->ring_rptr_offset ); + DO_FIND_MAP( dev_priv->buffers, init->buffers_offset ); + + if ( !dev_priv->is_pci ) { + DO_FIND_MAP( dev_priv->agp_textures, + init->agp_textures_offset ); + } + + dev_priv->sarea_priv = + (drm_radeon_sarea_t *)((u8 *)dev_priv->sarea->handle + + init->sarea_priv_offset); + + DO_IOREMAP( dev_priv->cp_ring ); + DO_IOREMAP( dev_priv->ring_rptr ); + DO_IOREMAP( dev_priv->buffers ); +#if 0 + if ( !dev_priv->is_pci ) { + DO_IOREMAP( dev_priv->agp_textures ); + } +#endif + + dev_priv->agp_size = init->agp_size; + dev_priv->agp_vm_start = RADEON_READ( RADEON_CONFIG_APER_SIZE ); + dev_priv->agp_buffers_offset = (dev_priv->buffers->offset + - dev->agp->base + + dev_priv->agp_vm_start); + + dev_priv->ring.head = ((__volatile__ u32 *) + dev_priv->ring_rptr->handle); + + dev_priv->ring.start = (u32 *)dev_priv->cp_ring->handle; + dev_priv->ring.end = ((u32 *)dev_priv->cp_ring->handle + + init->ring_size / sizeof(u32)); + dev_priv->ring.size = init->ring_size; + dev_priv->ring.size_l2qw = drm_order( init->ring_size / 8 ); + + dev_priv->ring.tail_mask = + (dev_priv->ring.size / sizeof(u32)) - 1; + +#if 0 + /* Initialize the scratch register pointer. This will cause + * the scratch register values to be written out to memory + * whenever they are updated. + * FIXME: This doesn't quite work yet, so we're disabling it + * for the release. + */ + RADEON_WRITE( RADEON_SCRATCH_ADDR, (dev_priv->ring_rptr->offset + + RADEON_SCRATCH_REG_OFFSET) ); + RADEON_WRITE( RADEON_SCRATCH_UMSK, 0x7 ); +#endif + + dev_priv->scratch = ((__volatile__ u32 *) + dev_priv->ring_rptr->handle + + (RADEON_SCRATCH_REG_OFFSET / sizeof(u32))); + + dev_priv->sarea_priv->last_frame = 0; + RADEON_WRITE( RADEON_LAST_FRAME_REG, + dev_priv->sarea_priv->last_frame ); + + dev_priv->sarea_priv->last_dispatch = 0; + RADEON_WRITE( RADEON_LAST_DISPATCH_REG, + dev_priv->sarea_priv->last_dispatch ); + + dev_priv->sarea_priv->last_clear = 0; + RADEON_WRITE( RADEON_LAST_CLEAR_REG, + dev_priv->sarea_priv->last_clear ); + + radeon_cp_load_microcode( dev_priv ); + radeon_cp_init_ring_buffer( dev ); + radeon_do_engine_reset( dev ); + +#if ROTATE_BUFS + dev_priv->last_buf = 0; +#endif + + return 0; +} + +static int radeon_do_cleanup_cp( drm_device_t *dev ) +{ + if ( dev->dev_private ) { + drm_radeon_private_t *dev_priv = dev->dev_private; + + DO_IOREMAPFREE( dev_priv->cp_ring ); + DO_IOREMAPFREE( dev_priv->ring_rptr ); + DO_IOREMAPFREE( dev_priv->buffers ); +#if 0 + if ( !dev_priv->is_pci ) { + DO_IOREMAPFREE( dev_priv->agp_textures ); + } +#endif + + drm_free( dev->dev_private, sizeof(drm_radeon_private_t), + DRM_MEM_DRIVER ); + dev->dev_private = NULL; + } + + return 0; +} + +int radeon_cp_init( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_init_t init; + + if ( copy_from_user( &init, (drm_radeon_init_t *)arg, sizeof(init) ) ) + return -EFAULT; + + switch ( init.func ) { + case RADEON_INIT_CP: + return radeon_do_init_cp( dev, &init ); + case RADEON_CLEANUP_CP: + return radeon_do_cleanup_cp( dev ); + } + + return -EINVAL; +} + +int radeon_cp_start( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + if ( dev_priv->cp_running ) { + DRM_DEBUG( "%s while CP running\n", __FUNCTION__ ); + return 0; + } + if ( dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS ) { + DRM_DEBUG( "%s called with bogus CP mode (%d)\n", + __FUNCTION__, dev_priv->cp_mode ); + return 0; + } + + radeon_do_cp_start( dev_priv ); + + return 0; +} + +/* Stop the CP. The engine must have been idled before calling this + * routine. + */ +int radeon_cp_stop( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_cp_stop_t stop; + int ret; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + + if ( copy_from_user( &stop, (drm_radeon_init_t *)arg, sizeof(stop) ) ) + return -EFAULT; + + /* Flush any pending CP commands. This ensures any outstanding + * commands are exectuted by the engine before we turn it off. + */ + if ( stop.flush ) { + radeon_do_cp_flush( dev_priv ); + } + + /* If we fail to make the engine go idle, we return an error + * code so that the DRM ioctl wrapper can try again. + */ + if ( stop.idle ) { + ret = radeon_do_cp_idle( dev_priv ); + if ( ret < 0 ) return ret; + } + + /* Finally, we can turn off the CP. If the engine isn't idle, + * we will get some dropped triangles as they won't be fully + * rendered before the CP is shut down. + */ + radeon_do_cp_stop( dev_priv ); + + /* Reset the engine */ + radeon_do_engine_reset( dev ); + + return 0; +} + +/* Just reset the CP ring. Called as part of an X Server engine reset. + */ +int radeon_cp_reset( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + if ( !dev_priv ) { + DRM_DEBUG( "%s called before init done\n", __FUNCTION__ ); + return -EINVAL; + } + + radeon_do_cp_reset( dev_priv ); + + /* The CP is no longer running after an engine reset */ + dev_priv->cp_running = 0; + + return 0; +} + +int radeon_cp_idle( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + + return radeon_do_cp_idle( dev_priv ); +} + +int radeon_engine_reset( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + + return radeon_do_engine_reset( dev ); +} + + +/* ================================================================ + * Fullscreen mode + */ + +static int radeon_do_init_pageflip( drm_device_t *dev ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + dev_priv->crtc_offset = RADEON_READ( RADEON_CRTC_OFFSET ); + dev_priv->crtc_offset_cntl = RADEON_READ( RADEON_CRTC_OFFSET_CNTL ); + + RADEON_WRITE( RADEON_CRTC_OFFSET, dev_priv->front_offset ); + RADEON_WRITE( RADEON_CRTC_OFFSET_CNTL, + dev_priv->crtc_offset_cntl | + RADEON_CRTC_OFFSET_FLIP_CNTL ); + + dev_priv->page_flipping = 1; + dev_priv->current_page = 0; + + return 0; +} + +int radeon_do_cleanup_pageflip( drm_device_t *dev ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + RADEON_WRITE( RADEON_CRTC_OFFSET, dev_priv->crtc_offset ); + RADEON_WRITE( RADEON_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl ); + + dev_priv->page_flipping = 0; + dev_priv->current_page = 0; + + return 0; +} + +int radeon_fullscreen( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_fullscreen_t fs; + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + + if ( copy_from_user( &fs, (drm_radeon_fullscreen_t *)arg, + sizeof(fs) ) ) + return -EFAULT; + + switch ( fs.func ) { + case RADEON_INIT_FULLSCREEN: + return radeon_do_init_pageflip( dev ); + case RADEON_CLEANUP_FULLSCREEN: + return radeon_do_cleanup_pageflip( dev ); + } + + return -EINVAL; +} + + +/* ================================================================ + * Freelist management + */ +#define RADEON_BUFFER_USED 0xffffffff +#define RADEON_BUFFER_FREE 0 + +#if 0 +static int radeon_freelist_init( drm_device_t *dev ) +{ + drm_device_dma_t *dma = dev->dma; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_buf_t *buf; + drm_radeon_buf_priv_t *buf_priv; + drm_radeon_freelist_t *entry; + int i; + + dev_priv->head = drm_alloc( sizeof(drm_radeon_freelist_t), + DRM_MEM_DRIVER ); + if ( dev_priv->head == NULL ) + return -ENOMEM; + + memset( dev_priv->head, 0, sizeof(drm_radeon_freelist_t) ); + dev_priv->head->age = RADEON_BUFFER_USED; + + for ( i = 0 ; i < dma->buf_count ; i++ ) { + buf = dma->buflist[i]; + buf_priv = buf->dev_private; + + entry = drm_alloc( sizeof(drm_radeon_freelist_t), + DRM_MEM_DRIVER ); + if ( !entry ) return -ENOMEM; + + entry->age = RADEON_BUFFER_FREE; + entry->buf = buf; + entry->prev = dev_priv->head; + entry->next = dev_priv->head->next; + if ( !entry->next ) + dev_priv->tail = entry; + + buf_priv->discard = 0; + buf_priv->dispatched = 0; + buf_priv->list_entry = entry; + + dev_priv->head->next = entry; + + if ( dev_priv->head->next ) + dev_priv->head->next->prev = entry; + } + + return 0; + +} +#endif + +drm_buf_t *radeon_freelist_get( drm_device_t *dev ) +{ + drm_device_dma_t *dma = dev->dma; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_buf_priv_t *buf_priv; + drm_buf_t *buf; + int i, t; +#if ROTATE_BUFS + int start; +#endif + + /* FIXME: Optimize -- use freelist code */ + + for ( i = 0 ; i < dma->buf_count ; i++ ) { + buf = dma->buflist[i]; + buf_priv = buf->dev_private; + if ( buf->pid == 0 ) { + DRM_DEBUG( " ret buf=%d last=%d pid=0\n", + buf->idx, dev_priv->last_buf ); + return buf; + } + DRM_DEBUG( " skipping buf=%d pid=%d\n", + buf->idx, buf->pid ); + } + +#if ROTATE_BUFS + if ( ++dev_priv->last_buf >= dma->buf_count ) + dev_priv->last_buf = 0; + start = dev_priv->last_buf; +#endif + for ( t = 0 ; t < dev_priv->usec_timeout ; t++ ) { +#if 0 + /* FIXME: Disable this for now */ + u32 done_age = dev_priv->scratch[RADEON_LAST_DISPATCH]; +#else + u32 done_age = RADEON_READ( RADEON_LAST_DISPATCH_REG ); +#endif +#if ROTATE_BUFS + for ( i = start ; i < dma->buf_count ; i++ ) { +#else + for ( i = 0 ; i < dma->buf_count ; i++ ) { +#endif + buf = dma->buflist[i]; + buf_priv = buf->dev_private; + if ( buf->pending && buf_priv->age <= done_age ) { + /* The buffer has been processed, so it + * can now be used. + */ + buf->pending = 0; + DRM_DEBUG( " ret buf=%d last=%d age=%d done=%d\n", buf->idx, dev_priv->last_buf, buf_priv->age, done_age ); + return buf; + } + DRM_DEBUG( " skipping buf=%d age=%d done=%d\n", + buf->idx, buf_priv->age, + done_age ); +#if ROTATE_BUFS + start = 0; +#endif + } + udelay( 1 ); + } + + DRM_ERROR( "returning NULL!\n" ); + return NULL; +} + +void radeon_freelist_reset( drm_device_t *dev ) +{ + drm_device_dma_t *dma = dev->dma; +#if ROTATE_BUFS + drm_radeon_private_t *dev_priv = dev->dev_private; +#endif + int i; + +#if ROTATE_BUFS + dev_priv->last_buf = 0; +#endif + for ( i = 0 ; i < dma->buf_count ; i++ ) { + drm_buf_t *buf = dma->buflist[i]; + drm_radeon_buf_priv_t *buf_priv = buf->dev_private; + buf_priv->age = 0; + } +} + + +/* ================================================================ + * CP command submission + */ + +int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n ) +{ + drm_radeon_ring_buffer_t *ring = &dev_priv->ring; + int i; + + for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { + ring->space = *ring->head - ring->tail; + if ( ring->space <= 0 ) + ring->space += ring->size; + + if ( ring->space >= n ) + return 0; + + udelay( 1 ); + } + + /* FIXME: This return value is ignored in the BEGIN_RING macro! */ + DRM_ERROR( "failed!\n" ); + return -EBUSY; +} + +void radeon_update_ring_snapshot( drm_radeon_private_t *dev_priv ) +{ + drm_radeon_ring_buffer_t *ring = &dev_priv->ring; + + ring->space = *ring->head - ring->tail; + if ( ring->space == 0 ) + atomic_inc( &dev_priv->idle_count ); + if ( ring->space <= 0 ) + ring->space += ring->size; +} + +static int radeon_cp_get_buffers( drm_device_t *dev, drm_dma_t *d ) +{ + int i; + drm_buf_t *buf; + + for ( i = d->granted_count ; i < d->request_count ; i++ ) { + buf = radeon_freelist_get( dev ); + if ( !buf ) return -EAGAIN; + + buf->pid = current->pid; + + if ( copy_to_user( &d->request_indices[i], &buf->idx, + sizeof(buf->idx) ) ) + return -EFAULT; + if ( copy_to_user( &d->request_sizes[i], &buf->total, + sizeof(buf->total) ) ) + return -EFAULT; + + d->granted_count++; + } + return 0; +} + +int radeon_cp_buffers( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_device_dma_t *dma = dev->dma; + int ret = 0; + drm_dma_t d; + + if ( copy_from_user( &d, (drm_dma_t *) arg, sizeof(d) ) ) + return -EFAULT; + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + + /* Please don't send us buffers. + */ + if ( d.send_count != 0 ) { + DRM_ERROR( "Process %d trying to send %d buffers via drmDMA\n", + current->pid, d.send_count ); + return -EINVAL; + } + + /* We'll send you buffers. + */ + if ( d.request_count < 0 || d.request_count > dma->buf_count ) { + DRM_ERROR( "Process %d trying to get %d buffers (of %d max)\n", + current->pid, d.request_count, dma->buf_count ); + return -EINVAL; + } + + d.granted_count = 0; + + if ( d.request_count ) { + ret = radeon_cp_get_buffers( dev, &d ); + } + + if ( copy_to_user( (drm_dma_t *) arg, &d, sizeof(d) ) ) + return -EFAULT; + + return ret; +} diff --git a/linux/radeon_drm.h b/linux/radeon_drm.h new file mode 100644 index 000000000..c5f9f66d1 --- /dev/null +++ b/linux/radeon_drm.h @@ -0,0 +1,325 @@ +/* radeon_drm.h -- Public header for the radeon driver -*- linux-c -*- + * + * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Fremont, California. + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Kevin E. Martin <martin@valinux.com> + * Gareth Hughes <gareth@valinux.com> + * + */ + +#ifndef __RADEON_DRM_H__ +#define __RADEON_DRM_H__ + +/* WARNING: If you change any of these defines, make sure to change the + * defines in the X server file (radeon_sarea.h) + */ +#ifndef __RADEON_SAREA_DEFINES__ +#define __RADEON_SAREA_DEFINES__ + +/* What needs to be changed for the current vertex buffer? + */ +#define RADEON_UPLOAD_CONTEXT 0x00000001 +#define RADEON_UPLOAD_VERTFMT 0x00000002 +#define RADEON_UPLOAD_LINE 0x00000004 +#define RADEON_UPLOAD_BUMPMAP 0x00000008 +#define RADEON_UPLOAD_MASKS 0x00000010 +#define RADEON_UPLOAD_VIEWPORT 0x00000020 +#define RADEON_UPLOAD_SETUP 0x00000040 +#define RADEON_UPLOAD_TCL 0x00000080 +#define RADEON_UPLOAD_MISC 0x00000100 +#define RADEON_UPLOAD_TEX0 0x00000200 +#define RADEON_UPLOAD_TEX1 0x00000400 +#define RADEON_UPLOAD_TEX2 0x00000800 +#define RADEON_UPLOAD_TEX0IMAGES 0x00001000 +#define RADEON_UPLOAD_TEX1IMAGES 0x00002000 +#define RADEON_UPLOAD_TEX2IMAGES 0x00004000 +#define RADEON_UPLOAD_CLIPRECTS 0x00008000 /* handled client-side */ +#define RADEON_REQUIRE_QUIESCENCE 0x00010000 +#define RADEON_UPLOAD_ALL 0x0001ffff + +#define RADEON_FRONT 0x1 +#define RADEON_BACK 0x2 +#define RADEON_DEPTH 0x4 + +/* Primitive types + */ +#define RADEON_POINTS 0x1 +#define RADEON_LINES 0x2 +#define RADEON_LINE_STRIP 0x3 +#define RADEON_TRIANGLES 0x4 +#define RADEON_TRIANGLE_FAN 0x5 +#define RADEON_TRIANGLE_STRIP 0x6 + +/* Vertex/indirect buffer size + */ +#define RADEON_BUFFER_SIZE 16384 + +/* Byte offsets for indirect buffer data + */ +#define RADEON_INDEX_PRIM_OFFSET 20 +#define RADEON_HOSTDATA_BLIT_OFFSET 32 + +#define RADEON_SCRATCH_REG_OFFSET 32 + +/* Keep these small for testing + */ +#define RADEON_NR_SAREA_CLIPRECTS 12 + +/* There are 2 heaps (local/AGP). Each region within a heap is a + * minimum of 64k, and there are at most 64 of them per heap. + */ +#define RADEON_LOCAL_TEX_HEAP 0 +#define RADEON_AGP_TEX_HEAP 1 +#define RADEON_NR_TEX_HEAPS 2 +#define RADEON_NR_TEX_REGIONS 64 +#define RADEON_LOG_TEX_GRANULARITY 16 + +#define RADEON_MAX_TEXTURE_LEVELS 11 +#define RADEON_MAX_TEXTURE_UNITS 3 + +#endif /* __RADEON_SAREA_DEFINES__ */ + +typedef struct { + unsigned int red; + unsigned int green; + unsigned int blue; + unsigned int alpha; +} radeon_color_regs_t; + +typedef struct { + /* Context state */ + unsigned int pp_misc; /* 0x1c14 */ + unsigned int pp_fog_color; + unsigned int re_solid_color; + unsigned int rb3d_blendcntl; + unsigned int rb3d_depthoffset; + unsigned int rb3d_depthpitch; + unsigned int rb3d_zstencilcntl; + + unsigned int pp_cntl; /* 0x1c38 */ + unsigned int rb3d_cntl; + unsigned int rb3d_coloroffset; + unsigned int re_width_height; + unsigned int rb3d_colorpitch; + unsigned int se_cntl; + + /* Vertex format state */ + unsigned int se_coord_fmt; /* 0x1c50 */ + + /* Line state */ + unsigned int re_line_pattern; /* 0x1cd0 */ + unsigned int re_line_state; + + unsigned int se_line_width; /* 0x1db8 */ + + /* Bumpmap state */ + unsigned int pp_lum_matrix; /* 0x1d00 */ + + unsigned int pp_rot_matrix_0; /* 0x1d58 */ + unsigned int pp_rot_matrix_1; + + /* Mask state */ + unsigned int rb3d_stencilrefmask; /* 0x1d7c */ + unsigned int rb3d_ropcntl; + unsigned int rb3d_planemask; + + /* Viewport state */ + unsigned int se_vport_xscale; /* 0x1d98 */ + unsigned int se_vport_xoffset; + unsigned int se_vport_yscale; + unsigned int se_vport_yoffset; + unsigned int se_vport_zscale; + unsigned int se_vport_zoffset; + + /* Setup state */ + unsigned int se_cntl_status; /* 0x2140 */ + +#ifdef TCL_ENABLE + /* TCL state */ + radeon_color_regs_t se_tcl_material_emmissive; /* 0x2210 */ + radeon_color_regs_t se_tcl_material_ambient; + radeon_color_regs_t se_tcl_material_diffuse; + radeon_color_regs_t se_tcl_material_specular; + unsigned int se_tcl_shininess; + unsigned int se_tcl_output_vtx_fmt; + unsigned int se_tcl_output_vtx_sel; + unsigned int se_tcl_matrix_select_0; + unsigned int se_tcl_matrix_select_1; + unsigned int se_tcl_ucp_vert_blend_ctl; + unsigned int se_tcl_texture_proc_ctl; + unsigned int se_tcl_light_model_ctl; + unsigned int se_tcl_per_light_ctl[4]; +#endif + + /* Misc state */ + unsigned int re_top_left; /* 0x26c0 */ + unsigned int re_misc; +} drm_radeon_context_regs_t; + +/* Setup registers for each texture unit + */ +typedef struct { + unsigned int pp_txfilter; + unsigned int pp_txformat; + unsigned int pp_txoffset; + unsigned int pp_txcblend; + unsigned int pp_txablend; + unsigned int pp_tfactor; + + unsigned int pp_border_color; + +#ifdef CUBIC_ENABLE + unsigned int pp_cubic_faces; + unsigned int pp_cubic_offset[5]; +#endif +} drm_radeon_texture_regs_t; + +typedef struct { + unsigned char next, prev; + unsigned char in_use; + int age; +} drm_radeon_tex_region_t; + +typedef struct { + /* The channel for communication of state information to the kernel + * on firing a vertex buffer. + */ + drm_radeon_context_regs_t context_state; + drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS]; + unsigned int dirty; + unsigned int vertsize; + unsigned int vc_format; + + /* The current cliprects, or a subset thereof. + */ + drm_clip_rect_t boxes[RADEON_NR_SAREA_CLIPRECTS]; + unsigned int nbox; + + /* Counters for client-side throttling of rendering clients. + */ + unsigned int last_frame; + unsigned int last_dispatch; + unsigned int last_clear; + + drm_radeon_tex_region_t tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS+1]; + int tex_age[RADEON_NR_TEX_HEAPS]; + int ctx_owner; +} drm_radeon_sarea_t; + + +/* WARNING: If you change any of these defines, make sure to change the + * defines in the Xserver file (xf86drmRadeon.h) + */ +typedef struct drm_radeon_init { + enum { + RADEON_INIT_CP = 0x01, + RADEON_CLEANUP_CP = 0x02 + } func; + int sarea_priv_offset; + int is_pci; + int cp_mode; + int agp_size; + int ring_size; + int usec_timeout; + + unsigned int fb_bpp; + unsigned int front_offset, front_pitch; + unsigned int back_offset, back_pitch; + unsigned int depth_bpp; + unsigned int depth_offset, depth_pitch; + + unsigned int fb_offset; + unsigned int mmio_offset; + unsigned int ring_offset; + unsigned int ring_rptr_offset; + unsigned int buffers_offset; + unsigned int agp_textures_offset; +} drm_radeon_init_t; + +typedef struct drm_radeon_cp_stop { + int flush; + int idle; +} drm_radeon_cp_stop_t; + +typedef struct drm_radeon_fullscreen { + enum { + RADEON_INIT_FULLSCREEN = 0x01, + RADEON_CLEANUP_FULLSCREEN = 0x02 + } func; +} drm_radeon_fullscreen_t; + +#define CLEAR_X1 0 +#define CLEAR_Y1 1 +#define CLEAR_X2 2 +#define CLEAR_Y2 3 +#define CLEAR_DEPTH 4 + +typedef struct drm_radeon_clear { + unsigned int flags; + int x, y, w, h; + unsigned int clear_color; + unsigned int clear_depth; + union { + float f[5]; + unsigned int ui[5]; + } rect; +} drm_radeon_clear_t; + +typedef struct drm_radeon_vertex { + int prim; + int idx; /* Index of vertex buffer */ + int count; /* Number of vertices in buffer */ + int discard; /* Client finished with buffer? */ +} drm_radeon_vertex_t; + +typedef struct drm_radeon_indices { + int prim; + int idx; + int start; + int end; + int discard; /* Client finished with buffer? */ +} drm_radeon_indices_t; + +typedef struct drm_radeon_blit { + int idx; + int pitch; + int offset; + int format; + unsigned short x, y; + unsigned short width, height; +} drm_radeon_blit_t; + +typedef struct drm_radeon_stipple { + unsigned int *mask; +} drm_radeon_stipple_t; + +typedef struct drm_radeon_indirect { + int idx; + int start; + int end; + int discard; +} drm_radeon_indirect_t; + +#endif diff --git a/linux/radeon_drv.c b/linux/radeon_drv.c new file mode 100644 index 000000000..0113ed97c --- /dev/null +++ b/linux/radeon_drv.c @@ -0,0 +1,702 @@ +/* radeon_drv.c -- ATI Radeon driver -*- linux-c -*- + * + * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Fremont, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Kevin E. Martin <martin@valinux.com> + * Rickard E. (Rik) Faith <faith@valinux.com> + * + */ + +#include <linux/config.h> +#include "drmP.h" +#include "radeon_drv.h" + +#define RADEON_NAME "radeon" +#define RADEON_DESC "ATI Radeon" +#define RADEON_DATE "20010105" +#define RADEON_MAJOR 1 +#define RADEON_MINOR 0 +#define RADEON_PATCHLEVEL 0 + +static drm_device_t radeon_device; +drm_ctx_t radeon_res_ctx; + +static struct file_operations radeon_fops = { +#if LINUX_VERSION_CODE >= 0x020400 + /* This started being used during 2.4.0-test */ + owner: THIS_MODULE, +#endif + open: radeon_open, + flush: drm_flush, + release: radeon_release, + ioctl: radeon_ioctl, + mmap: drm_mmap, + read: drm_read, + fasync: drm_fasync, + poll: drm_poll, +}; + +static struct miscdevice radeon_misc = { + minor: MISC_DYNAMIC_MINOR, + name: RADEON_NAME, + fops: &radeon_fops, +}; + +static drm_ioctl_desc_t radeon_ioctls[] = { + [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { radeon_version, 0, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 }, + + [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { radeon_addbufs, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { radeon_mapbufs, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 }, + + [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { radeon_addctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { radeon_rmctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { radeon_modctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { radeon_getctx, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { radeon_switchctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { radeon_newctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { radeon_resctx, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { radeon_lock, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { radeon_unlock, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 }, + +#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 }, +#endif + + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_INIT)] = { radeon_cp_init, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_START)] = { radeon_cp_start, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_STOP)] = { radeon_cp_stop, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_RESET)] = { radeon_cp_reset, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_IDLE)] = { radeon_cp_idle, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_RESET)] = { radeon_engine_reset, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_FULLSCREEN)] = { radeon_fullscreen, 1, 0 }, + + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_SWAP)] = { radeon_cp_swap, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CLEAR)] = { radeon_cp_clear, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_VERTEX)] = { radeon_cp_vertex, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDICES)] = { radeon_cp_indices, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_BLIT)] = { radeon_cp_blit, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_STIPPLE)] = { radeon_cp_stipple, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDIRECT)]= { radeon_cp_indirect,1, 1 }, +}; +#define RADEON_IOCTL_COUNT DRM_ARRAY_SIZE(radeon_ioctls) + +#ifdef MODULE +static char *radeon = NULL; +#endif + +MODULE_AUTHOR("VA Linux Systems, Inc."); +MODULE_DESCRIPTION("radeon"); +MODULE_PARM(radeon, "s"); + +#ifndef MODULE +/* radeon_options is called by the kernel to parse command-line options + * passed via the boot-loader (e.g., LILO). It calls the insmod option + * routine, drm_parse_drm. + */ + +static int __init radeon_options(char *str) +{ + drm_parse_options(str); + return 1; +} + +__setup("radeon=", radeon_options); +#endif + +static int radeon_setup(drm_device_t *dev) +{ + int i; + + atomic_set(&dev->ioctl_count, 0); + atomic_set(&dev->vma_count, 0); + dev->buf_use = 0; + atomic_set(&dev->buf_alloc, 0); + + drm_dma_setup(dev); + + atomic_set(&dev->total_open, 0); + atomic_set(&dev->total_close, 0); + atomic_set(&dev->total_ioctl, 0); + atomic_set(&dev->total_irq, 0); + atomic_set(&dev->total_ctx, 0); + atomic_set(&dev->total_locks, 0); + atomic_set(&dev->total_unlocks, 0); + atomic_set(&dev->total_contends, 0); + atomic_set(&dev->total_sleeps, 0); + + for (i = 0; i < DRM_HASH_SIZE; i++) { + dev->magiclist[i].head = NULL; + dev->magiclist[i].tail = NULL; + } + dev->maplist = NULL; + dev->map_count = 0; + dev->vmalist = NULL; + dev->lock.hw_lock = NULL; + init_waitqueue_head(&dev->lock.lock_queue); + dev->queue_count = 0; + dev->queue_reserved = 0; + dev->queue_slots = 0; + dev->queuelist = NULL; + dev->irq = 0; + dev->context_flag = 0; + dev->interrupt_flag = 0; + dev->dma_flag = 0; + dev->last_context = 0; + dev->last_switch = 0; + dev->last_checked = 0; + init_timer(&dev->timer); + init_waitqueue_head(&dev->context_wait); + + dev->ctx_start = 0; + dev->lck_start = 0; + + dev->buf_rp = dev->buf; + dev->buf_wp = dev->buf; + dev->buf_end = dev->buf + DRM_BSZ; + dev->buf_async = NULL; + init_waitqueue_head(&dev->buf_readers); + init_waitqueue_head(&dev->buf_writers); + + radeon_res_ctx.handle = -1; + + DRM_DEBUG("\n"); + + /* The kernel's context could be created here, but is now created + in drm_dma_enqueue. This is more resource-efficient for + hardware that does not do DMA, but may mean that + drm_select_queue fails between the time the interrupt is + initialized and the time the queues are initialized. */ + + return 0; +} + + +static int radeon_takedown(drm_device_t *dev) +{ + int i; + drm_magic_entry_t *pt, *next; + drm_map_t *map; + drm_vma_entry_t *vma, *vma_next; + + DRM_DEBUG("\n"); + + down(&dev->struct_sem); + del_timer(&dev->timer); + + if (dev->devname) { + drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER); + dev->devname = NULL; + } + + if (dev->unique) { + drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER); + dev->unique = NULL; + dev->unique_len = 0; + } + /* Clear pid list */ + for (i = 0; i < DRM_HASH_SIZE; i++) { + for (pt = dev->magiclist[i].head; pt; pt = next) { + next = pt->next; + drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); + } + dev->magiclist[i].head = dev->magiclist[i].tail = NULL; + } + +#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) + /* Clear AGP information */ + if (dev->agp) { + drm_agp_mem_t *entry; + drm_agp_mem_t *nexte; + + /* Remove AGP resources, but leave dev->agp + intact until radeon_cleanup is called. */ + for (entry = dev->agp->memory; entry; entry = nexte) { + nexte = entry->next; + if (entry->bound) drm_unbind_agp(entry->memory); + drm_free_agp(entry->memory, entry->pages); + drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); + } + dev->agp->memory = NULL; + + if (dev->agp->acquired) _drm_agp_release(); + + dev->agp->acquired = 0; + dev->agp->enabled = 0; + } +#endif + + /* Clear vma list (only built for debugging) */ + if (dev->vmalist) { + for (vma = dev->vmalist; vma; vma = vma_next) { + vma_next = vma->next; + drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); + } + dev->vmalist = NULL; + } + + /* Clear map area and mtrr information */ + if (dev->maplist) { + for (i = 0; i < dev->map_count; i++) { + map = dev->maplist[i]; + switch (map->type) { + case _DRM_REGISTERS: + case _DRM_FRAME_BUFFER: +#ifdef CONFIG_MTRR + if (map->mtrr >= 0) { + int retcode; + retcode = mtrr_del(map->mtrr, + map->offset, + map->size); + DRM_DEBUG("mtrr_del = %d\n", retcode); + } +#endif + drm_ioremapfree(map->handle, map->size); + break; + case _DRM_SHM: + drm_free_pages((unsigned long)map->handle, + drm_order(map->size) + - PAGE_SHIFT, + DRM_MEM_SAREA); + break; + case _DRM_AGP: + /* Do nothing here, because this is all + handled in the AGP/GART driver. */ + break; + } + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + } + drm_free(dev->maplist, + dev->map_count * sizeof(*dev->maplist), + DRM_MEM_MAPS); + dev->maplist = NULL; + dev->map_count = 0; + } + + drm_dma_takedown(dev); + + dev->queue_count = 0; + if (dev->lock.hw_lock) { + dev->lock.hw_lock = NULL; /* SHM removed */ + dev->lock.pid = 0; + wake_up_interruptible(&dev->lock.lock_queue); + } + up(&dev->struct_sem); + + return 0; +} + +/* radeon_init is called via init_module at module load time, or via + * linux/init/main.c (this is not currently supported). */ + +static int __init radeon_init(void) +{ + int retcode; + drm_device_t *dev = &radeon_device; + + DRM_DEBUG("\n"); + + memset((void *)dev, 0, sizeof(*dev)); + dev->count_lock = SPIN_LOCK_UNLOCKED; + sema_init(&dev->struct_sem, 1); + +#ifdef MODULE + drm_parse_options(radeon); +#endif + + if ((retcode = misc_register(&radeon_misc))) { + DRM_ERROR("Cannot register \"%s\"\n", RADEON_NAME); + return retcode; + } + dev->device = MKDEV(MISC_MAJOR, radeon_misc.minor); + dev->name = RADEON_NAME; + + drm_mem_init(); + drm_proc_init(dev); + +#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) + dev->agp = drm_agp_init(); + if (dev->agp == NULL) { + DRM_ERROR("Cannot initialize agpgart module.\n"); + drm_proc_cleanup(); + misc_deregister(&radeon_misc); + radeon_takedown(dev); + return -ENOMEM; + } + +#ifdef CONFIG_MTRR + dev->agp->agp_mtrr = mtrr_add(dev->agp->agp_info.aper_base, + dev->agp->agp_info.aper_size*1024*1024, + MTRR_TYPE_WRCOMB, + 1); +#endif +#endif + + if((retcode = drm_ctxbitmap_init(dev))) { + DRM_ERROR("Cannot allocate memory for context bitmap.\n"); + drm_proc_cleanup(); + misc_deregister(&radeon_misc); + radeon_takedown(dev); + return retcode; + } + + DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", + RADEON_NAME, + RADEON_MAJOR, + RADEON_MINOR, + RADEON_PATCHLEVEL, + RADEON_DATE, + radeon_misc.minor); + + return 0; +} + +/* radeon_cleanup is called via cleanup_module at module unload time. */ + +static void __exit radeon_cleanup(void) +{ + drm_device_t *dev = &radeon_device; + + DRM_DEBUG("\n"); + + drm_proc_cleanup(); + if (misc_deregister(&radeon_misc)) { + DRM_ERROR("Cannot unload module\n"); + } else { + DRM_INFO("Module unloaded\n"); + } + drm_ctxbitmap_cleanup(dev); + radeon_takedown(dev); +#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) + if (dev->agp) { + drm_agp_uninit(); + drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); + dev->agp = NULL; + } +#endif +} + +module_init(radeon_init); +module_exit(radeon_cleanup); + + +int radeon_version(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_version_t version; + int len; + + if (copy_from_user(&version, + (drm_version_t *)arg, + sizeof(version))) + return -EFAULT; + +#define DRM_COPY(name,value) \ + len = strlen(value); \ + if (len > name##_len) len = name##_len; \ + name##_len = strlen(value); \ + if (len && name) { \ + if (copy_to_user(name, value, len)) \ + return -EFAULT; \ + } + + version.version_major = RADEON_MAJOR; + version.version_minor = RADEON_MINOR; + version.version_patchlevel = RADEON_PATCHLEVEL; + + DRM_COPY(version.name, RADEON_NAME); + DRM_COPY(version.date, RADEON_DATE); + DRM_COPY(version.desc, RADEON_DESC); + + if (copy_to_user((drm_version_t *)arg, + &version, + sizeof(version))) + return -EFAULT; + return 0; +} + +int radeon_open(struct inode *inode, struct file *filp) +{ + drm_device_t *dev = &radeon_device; + int retcode = 0; + + DRM_DEBUG("open_count = %d\n", dev->open_count); + if (!(retcode = drm_open_helper(inode, filp, dev))) { +#if LINUX_VERSION_CODE < 0x020333 + MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */ +#endif + atomic_inc(&dev->total_open); + spin_lock(&dev->count_lock); + if (!dev->open_count++) { + spin_unlock(&dev->count_lock); + return radeon_setup(dev); + } + spin_unlock(&dev->count_lock); + } + + return retcode; +} + +int radeon_release(struct inode *inode, struct file *filp) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev; + int retcode = 0; + + lock_kernel(); + dev = priv->dev; + + DRM_DEBUG("open_count = %d\n", dev->open_count); + + /* Force the cleanup of page flipping when required */ + if ( dev->dev_private ) { + drm_radeon_private_t *dev_priv = dev->dev_private; + if ( dev_priv->page_flipping ) { + radeon_do_cleanup_pageflip( dev ); + } + } + + if (!(retcode = drm_release(inode, filp))) { +#if LINUX_VERSION_CODE < 0x020333 + MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */ +#endif + atomic_inc(&dev->total_close); + spin_lock(&dev->count_lock); + if (!--dev->open_count) { + if (atomic_read(&dev->ioctl_count) || dev->blocked) { + DRM_ERROR("Device busy: %d %d\n", + atomic_read(&dev->ioctl_count), + dev->blocked); + spin_unlock(&dev->count_lock); + unlock_kernel(); + return -EBUSY; + } + spin_unlock(&dev->count_lock); + unlock_kernel(); + return radeon_takedown(dev); + } + spin_unlock(&dev->count_lock); + } + + unlock_kernel(); + return retcode; +} + +/* radeon_ioctl is called whenever a process performs an ioctl on /dev/drm. */ + +int radeon_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int nr = DRM_IOCTL_NR(cmd); + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + int retcode = 0; + drm_ioctl_desc_t *ioctl; + drm_ioctl_t *func; + + atomic_inc(&dev->ioctl_count); + atomic_inc(&dev->total_ioctl); + ++priv->ioctl_count; + + DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n", + current->pid, cmd, nr, dev->device, priv->authenticated); + + if (nr >= RADEON_IOCTL_COUNT) { + retcode = -EINVAL; + } else { + ioctl = &radeon_ioctls[nr]; + func = ioctl->func; + + if (!func) { + DRM_DEBUG("no function\n"); + retcode = -EINVAL; + } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN)) + || (ioctl->auth_needed && !priv->authenticated)) { + retcode = -EACCES; + } else { + retcode = (func)(inode, filp, cmd, arg); + } + } + + atomic_dec(&dev->ioctl_count); + return retcode; +} + +int radeon_lock(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + DECLARE_WAITQUEUE(entry, current); + int ret = 0; + drm_lock_t lock; +#if DRM_DMA_HISTOGRAM + cycles_t start; + + dev->lck_start = start = get_cycles(); +#endif + + if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock))) + return -EFAULT; + + if (lock.context == DRM_KERNEL_CONTEXT) { + DRM_ERROR("Process %d using kernel context %d\n", + current->pid, lock.context); + return -EINVAL; + } + + DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", + lock.context, current->pid, dev->lock.hw_lock->lock, + lock.flags); + + if (lock.context < 0 /* || lock.context >= dev->queue_count */) + return -EINVAL; + + if (!ret) { + add_wait_queue(&dev->lock.lock_queue, &entry); + for (;;) { + current->state = TASK_INTERRUPTIBLE; + if (!dev->lock.hw_lock) { + /* Device has been unregistered */ + ret = -EINTR; + break; + } + if (drm_lock_take(&dev->lock.hw_lock->lock, + lock.context)) { + dev->lock.pid = current->pid; + dev->lock.lock_time = jiffies; + atomic_inc(&dev->total_locks); + break; /* Got lock */ + } + + /* Contention */ + atomic_inc(&dev->total_sleeps); + schedule(); + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + } + current->state = TASK_RUNNING; + remove_wait_queue(&dev->lock.lock_queue, &entry); + } + + if (!ret) { + sigemptyset(&dev->sigmask); + sigaddset(&dev->sigmask, SIGSTOP); + sigaddset(&dev->sigmask, SIGTSTP); + sigaddset(&dev->sigmask, SIGTTIN); + sigaddset(&dev->sigmask, SIGTTOU); + dev->sigdata.context = lock.context; + dev->sigdata.lock = dev->lock.hw_lock; + block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); + if (lock.flags & _DRM_LOCK_READY) { + /* Wait for space in DMA/FIFO */ + } + if (lock.flags & _DRM_LOCK_QUIESCENT) { + /* Make hardware quiescent */ + DRM_DEBUG("not quiescent!\n"); +#if 0 + radeon_quiescent(dev); +#endif + } + } + +#if LINUX_VERSION_CODE < 0x020400 + if (lock.context != radeon_res_ctx.handle) { + current->counter = 5; + current->priority = DEF_PRIORITY/4; + } +#endif + DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock"); + +#if DRM_DMA_HISTOGRAM + atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]); +#endif + + return ret; +} + + +int radeon_unlock(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_lock_t lock; + + if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock))) + return -EFAULT; + + if (lock.context == DRM_KERNEL_CONTEXT) { + DRM_ERROR("Process %d using kernel context %d\n", + current->pid, lock.context); + return -EINVAL; + } + + DRM_DEBUG("%d frees lock (%d holds)\n", + lock.context, + _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); + atomic_inc(&dev->total_unlocks); + if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock)) + atomic_inc(&dev->total_contends); + drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT); + /* FIXME: Try to send data to card here */ + if (!dev->context_flag) { + if (drm_lock_free(dev, &dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT)) { + DRM_ERROR("\n"); + } + } + +#if LINUX_VERSION_CODE < 0x020400 + if (lock.context != radeon_res_ctx.handle) { + current->counter = 5; + current->priority = DEF_PRIORITY; + } +#endif + unblock_all_signals(); + return 0; +} diff --git a/linux/radeon_drv.h b/linux/radeon_drv.h new file mode 100644 index 000000000..06b541991 --- /dev/null +++ b/linux/radeon_drv.h @@ -0,0 +1,709 @@ +/* radeon_drv.h -- Private header for radeon driver -*- linux-c -*- + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Fremont, California. + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> + * Kevin E. Martin <martin@valinux.com> + * Gareth Hughes <gareth@valinux.com> + * + */ + +#ifndef __RADEON_DRV_H__ +#define __RADEON_DRV_H__ + +typedef struct drm_radeon_freelist { + unsigned int age; + drm_buf_t *buf; + struct drm_radeon_freelist *next; + struct drm_radeon_freelist *prev; +} drm_radeon_freelist_t; + +typedef struct drm_radeon_ring_buffer { + u32 *start; + u32 *end; + int size; + int size_l2qw; + + volatile u32 *head; + u32 tail; + u32 tail_mask; + int space; +} drm_radeon_ring_buffer_t; + +typedef struct drm_radeon_depth_clear_t { + u32 rb3d_cntl; + u32 rb3d_zstencilcntl; + u32 se_cntl; +} drm_radeon_depth_clear_t; + +typedef struct drm_radeon_private { + drm_radeon_ring_buffer_t ring; + drm_radeon_sarea_t *sarea_priv; + + int agp_size; + u32 agp_vm_start; + u32 agp_buffers_offset; + + int cp_mode; + int cp_running; + + drm_radeon_freelist_t *head; + drm_radeon_freelist_t *tail; +/* FIXME: ROTATE_BUFS is a hask to cycle through bufs until freelist + code is used. Note this hides a problem with the scratch register + (used to keep track of last buffer completed) being written to before + the last buffer has actually completed rendering. */ +#define ROTATE_BUFS 1 +#if ROTATE_BUFS + int last_buf; +#endif + volatile u32 *scratch; + + int usec_timeout; + int is_pci; + + atomic_t idle_count; + + int page_flipping; + int current_page; + u32 crtc_offset; + u32 crtc_offset_cntl; + + unsigned int color_fmt; + unsigned int front_offset; + unsigned int front_pitch; + unsigned int back_offset; + unsigned int back_pitch; + + unsigned int depth_fmt; + unsigned int depth_offset; + unsigned int depth_pitch; + + u32 front_pitch_offset; + u32 back_pitch_offset; + u32 depth_pitch_offset; + + drm_radeon_depth_clear_t depth_clear; + + drm_map_t *sarea; + drm_map_t *fb; + drm_map_t *mmio; + drm_map_t *cp_ring; + drm_map_t *ring_rptr; + drm_map_t *buffers; + drm_map_t *agp_textures; +} drm_radeon_private_t; + +typedef struct drm_radeon_buf_priv { + u32 age; + int prim; + int discard; + int dispatched; + drm_radeon_freelist_t *list_entry; +} drm_radeon_buf_priv_t; + + /* radeon_drv.c */ +extern int radeon_version( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int radeon_open( struct inode *inode, struct file *filp ); +extern int radeon_release( struct inode *inode, struct file *filp ); +extern int radeon_ioctl( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int radeon_lock( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int radeon_unlock( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); + + /* radeon_cp.c */ +extern int radeon_cp_init( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int radeon_cp_start( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int radeon_cp_stop( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int radeon_cp_reset( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int radeon_cp_idle( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int radeon_engine_reset( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int radeon_fullscreen( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int radeon_cp_buffers( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); + +extern void radeon_freelist_reset( drm_device_t *dev ); +extern drm_buf_t *radeon_freelist_get( drm_device_t *dev ); + +extern int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n ); +extern void radeon_update_ring_snapshot( drm_radeon_private_t *dev_priv ); + +extern int radeon_do_cp_idle( drm_radeon_private_t *dev_priv ); +extern int radeon_do_cleanup_pageflip( drm_device_t *dev ); + + /* radeon_state.c */ +extern int radeon_cp_clear( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int radeon_cp_swap( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int radeon_cp_vertex( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int radeon_cp_indices( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int radeon_cp_blit( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int radeon_cp_stipple( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int radeon_cp_indirect( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); + + /* radeon_bufs.c */ +extern int radeon_addbufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int radeon_mapbufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + + /* radeon_context.c */ +extern int radeon_resctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int radeon_addctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int radeon_modctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int radeon_getctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int radeon_switchctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int radeon_newctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int radeon_rmctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + +extern int radeon_context_switch(drm_device_t *dev, int old, int new); +extern int radeon_context_switch_complete(drm_device_t *dev, int new); + + +/* Register definitions, register access macros and drmAddMap constants + * for Radeon kernel driver. + */ + +#define RADEON_AUX_SCISSOR_CNTL 0x26f0 +# define RADEON_EXCLUSIVE_SCISSOR_0 (1 << 24) +# define RADEON_EXCLUSIVE_SCISSOR_1 (1 << 25) +# define RADEON_EXCLUSIVE_SCISSOR_2 (1 << 26) +# define RADEON_SCISSOR_0_ENABLE (1 << 28) +# define RADEON_SCISSOR_1_ENABLE (1 << 29) +# define RADEON_SCISSOR_2_ENABLE (1 << 30) + +#define RADEON_BUS_CNTL 0x0030 +# define RADEON_BUS_MASTER_DIS (1 << 6) + +#define RADEON_CLOCK_CNTL_DATA 0x000c +# define RADEON_PLL_WR_EN (1 << 7) +#define RADEON_CLOCK_CNTL_INDEX 0x0008 +#define RADEON_CONFIG_APER_SIZE 0x0108 +#define RADEON_CRTC_OFFSET 0x0224 +#define RADEON_CRTC_OFFSET_CNTL 0x0228 +# define RADEON_CRTC_TILE_EN (1 << 15) +# define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16) + +#define RADEON_RB3D_COLORPITCH 0x1c48 +#define RADEON_RB3D_DEPTHCLEARVALUE 0x1c30 +#define RADEON_RB3D_DEPTHXY_OFFSET 0x1c60 + +#define RADEON_DP_GUI_MASTER_CNTL 0x146c +# define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0) +# define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1) +# define RADEON_GMC_BRUSH_SOLID_COLOR (13 << 4) +# define RADEON_GMC_BRUSH_NONE (15 << 4) +# define RADEON_GMC_DST_16BPP (4 << 8) +# define RADEON_GMC_DST_24BPP (5 << 8) +# define RADEON_GMC_DST_32BPP (6 << 8) +# define RADEON_GMC_DST_DATATYPE_SHIFT 8 +# define RADEON_GMC_SRC_DATATYPE_COLOR (3 << 12) +# define RADEON_DP_SRC_SOURCE_MEMORY (2 << 24) +# define RADEON_DP_SRC_SOURCE_HOST_DATA (3 << 24) +# define RADEON_GMC_CLR_CMP_CNTL_DIS (1 << 28) +# define RADEON_GMC_WR_MSK_DIS (1 << 30) +# define RADEON_ROP3_S 0x00cc0000 +# define RADEON_ROP3_P 0x00f00000 +#define RADEON_DP_WRITE_MASK 0x16cc +#define RADEON_DST_PITCH_OFFSET 0x142c +#define RADEON_DST_PITCH_OFFSET_C 0x1c80 +# define RADEON_DST_TILE_LINEAR (0 << 30) +# define RADEON_DST_TILE_MACRO (1 << 30) +# define RADEON_DST_TILE_MICRO (2 << 30) +# define RADEON_DST_TILE_BOTH (3 << 30) + +#define RADEON_SCRATCH_REG0 0x15e0 +#define RADEON_SCRATCH_REG1 0x15e4 +#define RADEON_SCRATCH_REG2 0x15e8 +#define RADEON_SCRATCH_REG3 0x15ec +#define RADEON_SCRATCH_REG4 0x15f0 +#define RADEON_SCRATCH_REG5 0x15f4 +#define RADEON_SCRATCH_UMSK 0x0770 +#define RADEON_SCRATCH_ADDR 0x0774 + +#define RADEON_HOST_PATH_CNTL 0x0130 +# define RADEON_HDP_SOFT_RESET (1 << 26) +# define RADEON_HDP_WC_TIMEOUT_MASK (7 << 28) +# define RADEON_HDP_WC_TIMEOUT_28BCLK (7 << 28) + +#define RADEON_ISYNC_CNTL 0x1724 +# define RADEON_ISYNC_ANY2D_IDLE3D (1 << 0) +# define RADEON_ISYNC_ANY3D_IDLE2D (1 << 1) +# define RADEON_ISYNC_TRIG2D_IDLE3D (1 << 2) +# define RADEON_ISYNC_TRIG3D_IDLE2D (1 << 3) +# define RADEON_ISYNC_WAIT_IDLEGUI (1 << 4) +# define RADEON_ISYNC_CPSCRATCH_IDLEGUI (1 << 5) + +#define RADEON_MC_AGP_LOCATION 0x014c +#define RADEON_MC_FB_LOCATION 0x0148 +#define RADEON_MCLK_CNTL 0x0012 + +#define RADEON_PP_BORDER_COLOR_0 0x1d40 +#define RADEON_PP_BORDER_COLOR_1 0x1d44 +#define RADEON_PP_BORDER_COLOR_2 0x1d48 +#define RADEON_PP_CNTL 0x1c38 +# define RADEON_SCISSOR_ENABLE (1 << 1) +#define RADEON_PP_LUM_MATRIX 0x1d00 +#define RADEON_PP_MISC 0x1c14 +#define RADEON_PP_ROT_MATRIX_0 0x1d58 +#define RADEON_PP_TXFILTER_0 0x1c54 +#define RADEON_PP_TXFILTER_1 0x1c6c +#define RADEON_PP_TXFILTER_2 0x1c84 + +#define RADEON_RB2D_DSTCACHE_CTLSTAT 0x342c +# define RADEON_RB2D_DC_FLUSH (3 << 0) +# define RADEON_RB2D_DC_FREE (3 << 2) +# define RADEON_RB2D_DC_FLUSH_ALL 0xf +# define RADEON_RB2D_DC_BUSY (1 << 31) +#define RADEON_RB3D_CNTL 0x1c3c +# define RADEON_ALPHA_BLEND_ENABLE (1 << 0) +# define RADEON_PLANE_MASK_ENABLE (1 << 1) +# define RADEON_DITHER_ENABLE (1 << 2) +# define RADEON_ROUND_ENABLE (1 << 3) +# define RADEON_SCALE_DITHER_ENABLE (1 << 4) +# define RADEON_DITHER_INIT (1 << 5) +# define RADEON_ROP_ENABLE (1 << 6) +# define RADEON_STENCIL_ENABLE (1 << 7) +# define RADEON_Z_ENABLE (1 << 8) +# define RADEON_DEPTH_XZ_OFFEST_ENABLE (1 << 9) +# define RADEON_ZBLOCK8 (0 << 15) +# define RADEON_ZBLOCK16 (1 << 15) +#define RADEON_RB3D_DEPTHOFFSET 0x1c24 +#define RADEON_RB3D_PLANEMASK 0x1d84 +#define RADEON_RB3D_STENCILREFMASK 0x1d7c +#define RADEON_RB3D_ZCACHE_MODE 0x3250 +#define RADEON_RB3D_ZCACHE_CTLSTAT 0x3254 +# define RADEON_RB3D_ZC_FLUSH (1 << 0) +# define RADEON_RB3D_ZC_FREE (1 << 2) +# define RADEON_RB3D_ZC_FLUSH_ALL 0x5 +# define RADEON_RB3D_ZC_BUSY (1 << 31) +#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c +# define RADEON_Z_TEST_MASK (7 << 4) +# define RADEON_Z_TEST_ALWAYS (7 << 4) +# define RADEON_STENCIL_TEST_ALWAYS (7 << 12) +# define RADEON_STENCIL_S_FAIL_KEEP (0 << 16) +# define RADEON_STENCIL_ZPASS_KEEP (0 << 20) +# define RADEON_STENCIL_ZFAIL_KEEP (0 << 20) +# define RADEON_Z_WRITE_ENABLE (1 << 30) +#define RADEON_RBBM_SOFT_RESET 0x00f0 +# define RADEON_SOFT_RESET_CP (1 << 0) +# define RADEON_SOFT_RESET_HI (1 << 1) +# define RADEON_SOFT_RESET_SE (1 << 2) +# define RADEON_SOFT_RESET_RE (1 << 3) +# define RADEON_SOFT_RESET_PP (1 << 4) +# define RADEON_SOFT_RESET_E2 (1 << 5) +# define RADEON_SOFT_RESET_RB (1 << 6) +# define RADEON_SOFT_RESET_HDP (1 << 7) +#define RADEON_RBBM_STATUS 0x0e40 +# define RADEON_RBBM_FIFOCNT_MASK 0x007f +# define RADEON_RBBM_ACTIVE (1 << 31) +#define RADEON_RE_LINE_PATTERN 0x1cd0 +#define RADEON_RE_MISC 0x26c4 +#define RADEON_RE_TOP_LEFT 0x26c0 +#define RADEON_RE_WIDTH_HEIGHT 0x1c44 +#define RADEON_RE_STIPPLE_ADDR 0x1cc8 +#define RADEON_RE_STIPPLE_DATA 0x1ccc + +#define RADEON_SCISSOR_TL_0 0x1cd8 +#define RADEON_SCISSOR_BR_0 0x1cdc +#define RADEON_SCISSOR_TL_1 0x1ce0 +#define RADEON_SCISSOR_BR_1 0x1ce4 +#define RADEON_SCISSOR_TL_2 0x1ce8 +#define RADEON_SCISSOR_BR_2 0x1cec +#define RADEON_SE_COORD_FMT 0x1c50 +#define RADEON_SE_CNTL 0x1c4c +# define RADEON_FFACE_CULL_CW (0 << 0) +# define RADEON_BFACE_SOLID (3 << 1) +# define RADEON_FFACE_SOLID (3 << 3) +# define RADEON_FLAT_SHADE_VTX_LAST (3 << 6) +# define RADEON_DIFFUSE_SHADE_FLAT (1 << 8) +# define RADEON_DIFFUSE_SHADE_GOURAUD (2 << 8) +# define RADEON_ALPHA_SHADE_FLAT (1 << 10) +# define RADEON_ALPHA_SHADE_GOURAUD (2 << 10) +# define RADEON_SPECULAR_SHADE_FLAT (1 << 12) +# define RADEON_SPECULAR_SHADE_GOURAUD (2 << 12) +# define RADEON_FOG_SHADE_FLAT (1 << 14) +# define RADEON_FOG_SHADE_GOURAUD (2 << 14) +# define RADEON_VPORT_XY_XFORM_ENABLE (1 << 24) +# define RADEON_VPORT_Z_XFORM_ENABLE (1 << 25) +# define RADEON_VTX_PIX_CENTER_OGL (1 << 27) +# define RADEON_ROUND_MODE_TRUNC (0 << 28) +# define RADEON_ROUND_PREC_8TH_PIX (1 << 30) +#define RADEON_SE_CNTL_STATUS 0x2140 +#define RADEON_SE_LINE_WIDTH 0x1db8 +#define RADEON_SE_VPORT_XSCALE 0x1d98 +#define RADEON_SURFACE_ACCESS_FLAGS 0x0bf8 +#define RADEON_SURFACE_ACCESS_CLR 0x0bfc +#define RADEON_SURFACE_CNTL 0x0b00 +# define RADEON_SURF_TRANSLATION_DIS (1 << 8) +# define RADEON_NONSURF_AP0_SWP_MASK (3 << 20) +# define RADEON_NONSURF_AP0_SWP_LITTLE (0 << 20) +# define RADEON_NONSURF_AP0_SWP_BIG16 (1 << 20) +# define RADEON_NONSURF_AP0_SWP_BIG32 (2 << 20) +# define RADEON_NONSURF_AP1_SWP_MASK (3 << 22) +# define RADEON_NONSURF_AP1_SWP_LITTLE (0 << 22) +# define RADEON_NONSURF_AP1_SWP_BIG16 (1 << 22) +# define RADEON_NONSURF_AP1_SWP_BIG32 (2 << 22) +#define RADEON_SURFACE0_INFO 0x0b0c +# define RADEON_SURF_PITCHSEL_MASK (0x1ff << 0) +# define RADEON_SURF_TILE_MODE_MASK (3 << 16) +# define RADEON_SURF_TILE_MODE_MACRO (0 << 16) +# define RADEON_SURF_TILE_MODE_MICRO (1 << 16) +# define RADEON_SURF_TILE_MODE_32BIT_Z (2 << 16) +# define RADEON_SURF_TILE_MODE_16BIT_Z (3 << 16) +#define RADEON_SURFACE0_LOWER_BOUND 0x0b04 +#define RADEON_SURFACE0_UPPER_BOUND 0x0b08 +#define RADEON_SURFACE1_INFO 0x0b1c +#define RADEON_SURFACE1_LOWER_BOUND 0x0b14 +#define RADEON_SURFACE1_UPPER_BOUND 0x0b18 +#define RADEON_SURFACE2_INFO 0x0b2c +#define RADEON_SURFACE2_LOWER_BOUND 0x0b24 +#define RADEON_SURFACE2_UPPER_BOUND 0x0b28 +#define RADEON_SURFACE3_INFO 0x0b3c +#define RADEON_SURFACE3_LOWER_BOUND 0x0b34 +#define RADEON_SURFACE3_UPPER_BOUND 0x0b38 +#define RADEON_SURFACE4_INFO 0x0b4c +#define RADEON_SURFACE4_LOWER_BOUND 0x0b44 +#define RADEON_SURFACE4_UPPER_BOUND 0x0b48 +#define RADEON_SURFACE5_INFO 0x0b5c +#define RADEON_SURFACE5_LOWER_BOUND 0x0b54 +#define RADEON_SURFACE5_UPPER_BOUND 0x0b58 +#define RADEON_SURFACE6_INFO 0x0b6c +#define RADEON_SURFACE6_LOWER_BOUND 0x0b64 +#define RADEON_SURFACE6_UPPER_BOUND 0x0b68 +#define RADEON_SURFACE7_INFO 0x0b7c +#define RADEON_SURFACE7_LOWER_BOUND 0x0b74 +#define RADEON_SURFACE7_UPPER_BOUND 0x0b78 +#define RADEON_SW_SEMAPHORE 0x013c + +#define RADEON_WAIT_UNTIL 0x1720 +# define RADEON_WAIT_CRTC_PFLIP (1 << 0) +# define RADEON_WAIT_2D_IDLECLEAN (1 << 16) +# define RADEON_WAIT_3D_IDLECLEAN (1 << 17) +# define RADEON_WAIT_HOST_IDLECLEAN (1 << 18) + +#define RADEON_RB3D_ZMASKOFFSET 0x1c34 +#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c +# define RADEON_DEPTH_FORMAT_16BIT_INT_Z (0 << 0) +# define RADEON_DEPTH_FORMAT_24BIT_INT_Z (2 << 0) + + +/* CP registers */ +#define RADEON_CP_ME_RAM_ADDR 0x07d4 +#define RADEON_CP_ME_RAM_RADDR 0x07d8 +#define RADEON_CP_ME_RAM_DATAH 0x07dc +#define RADEON_CP_ME_RAM_DATAL 0x07e0 + +#define RADEON_CP_RB_BASE 0x0700 +#define RADEON_CP_RB_CNTL 0x0704 +#define RADEON_CP_RB_RPTR_ADDR 0x070c +#define RADEON_CP_RB_RPTR 0x0710 +#define RADEON_CP_RB_WPTR 0x0714 + +#define RADEON_CP_RB_WPTR_DELAY 0x0718 +# define RADEON_PRE_WRITE_TIMER_SHIFT 0 +# define RADEON_PRE_WRITE_LIMIT_SHIFT 23 + +#define RADEON_CP_IB_BASE 0x0738 + +#define RADEON_CP_CSQ_CNTL 0x0740 +# define RADEON_CSQ_CNT_PRIMARY_MASK (0xff << 0) +# define RADEON_CSQ_PRIDIS_INDDIS (0 << 28) +# define RADEON_CSQ_PRIPIO_INDDIS (1 << 28) +# define RADEON_CSQ_PRIBM_INDDIS (2 << 28) +# define RADEON_CSQ_PRIPIO_INDBM (3 << 28) +# define RADEON_CSQ_PRIBM_INDBM (4 << 28) +# define RADEON_CSQ_PRIPIO_INDPIO (15 << 28) + +#define RADEON_AIC_CNTL 0x01d0 +# define RADEON_PCIGART_TRANSLATE_EN (1 << 0) + +/* CP command packets */ +#define RADEON_CP_PACKET0 0x00000000 +# define RADEON_ONE_REG_WR (1 << 15) +#define RADEON_CP_PACKET1 0x40000000 +#define RADEON_CP_PACKET2 0x80000000 +#define RADEON_CP_PACKET3 0xC0000000 +# define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300 +# define RADEON_WAIT_FOR_IDLE 0x00002600 +# define RADEON_3D_DRAW_IMMD 0x00002900 +# define RADEON_3D_CLEAR_ZMASK 0x00003200 +# define RADEON_CNTL_HOSTDATA_BLT 0x00009400 +# define RADEON_CNTL_PAINT_MULTI 0x00009A00 +# define RADEON_CNTL_BITBLT_MULTI 0x00009B00 + +#define RADEON_CP_PACKET_MASK 0xC0000000 +#define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000 +#define RADEON_CP_PACKET0_REG_MASK 0x000007ff +#define RADEON_CP_PACKET1_REG0_MASK 0x000007ff +#define RADEON_CP_PACKET1_REG1_MASK 0x003ff800 + +#define RADEON_VTX_Z_PRESENT (1 << 31) + +#define RADEON_PRIM_TYPE_NONE (0 << 0) +#define RADEON_PRIM_TYPE_POINT (1 << 0) +#define RADEON_PRIM_TYPE_LINE (2 << 0) +#define RADEON_PRIM_TYPE_LINE_STRIP (3 << 0) +#define RADEON_PRIM_TYPE_TRI_LIST (4 << 0) +#define RADEON_PRIM_TYPE_TRI_FAN (5 << 0) +#define RADEON_PRIM_TYPE_TRI_STRIP (6 << 0) +#define RADEON_PRIM_TYPE_TRI_TYPE2 (7 << 0) +#define RADEON_PRIM_TYPE_RECT_LIST (8 << 0) +#define RADEON_PRIM_TYPE_3VRT_POINT_LIST (9 << 0) +#define RADEON_PRIM_TYPE_3VRT_LINE_LIST (10 << 0) +#define RADEON_PRIM_WALK_IND (1 << 4) +#define RADEON_PRIM_WALK_LIST (2 << 4) +#define RADEON_PRIM_WALK_RING (3 << 4) +#define RADEON_COLOR_ORDER_BGRA (0 << 6) +#define RADEON_COLOR_ORDER_RGBA (1 << 6) +#define RADEON_MAOS_ENABLE (1 << 7) +#define RADEON_VTX_FMT_R128_MODE (0 << 8) +#define RADEON_VTX_FMT_RADEON_MODE (1 << 8) +#define RADEON_NUM_VERTICES_SHIFT 16 + +#define RADEON_COLOR_FORMAT_CI8 2 +#define RADEON_COLOR_FORMAT_ARGB1555 3 +#define RADEON_COLOR_FORMAT_RGB565 4 +#define RADEON_COLOR_FORMAT_ARGB8888 6 +#define RADEON_COLOR_FORMAT_RGB332 7 +#define RADEON_COLOR_FORMAT_RGB8 9 +#define RADEON_COLOR_FORMAT_ARGB4444 15 + +#define RADEON_TXF_8BPP_I 0 +#define RADEON_TXF_16BPP_AI88 1 +#define RADEON_TXF_8BPP_RGB332 2 +#define RADEON_TXF_16BPP_ARGB1555 3 +#define RADEON_TXF_16BPP_RGB565 4 +#define RADEON_TXF_16BPP_ARGB4444 5 +#define RADEON_TXF_32BPP_ARGB8888 6 +#define RADEON_TXF_32BPP_RGBA8888 7 + +/* Constants */ +#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ + +#define RADEON_LAST_FRAME_REG RADEON_SCRATCH_REG0 +#define RADEON_LAST_DISPATCH_REG RADEON_SCRATCH_REG1 +#define RADEON_LAST_CLEAR_REG RADEON_SCRATCH_REG2 +#define RADEON_LAST_DISPATCH 1 + +#define RADEON_MAX_VB_AGE 0x7fffffff +#define RADEON_MAX_VB_VERTS (0xffff) + + +#define RADEON_BASE(reg) ((u32)(dev_priv->mmio->handle)) +#define RADEON_ADDR(reg) (RADEON_BASE(reg) + reg) + +#define RADEON_DEREF(reg) *(__volatile__ u32 *)RADEON_ADDR(reg) +#define RADEON_READ(reg) RADEON_DEREF(reg) +#define RADEON_WRITE(reg,val) do { RADEON_DEREF(reg) = val; } while (0) + +#define RADEON_DEREF8(reg) *(__volatile__ u8 *)RADEON_ADDR(reg) +#define RADEON_READ8(reg) RADEON_DEREF8(reg) +#define RADEON_WRITE8(reg,val) do { RADEON_DEREF8(reg) = val; } while (0) + +#define RADEON_WRITE_PLL(addr,val) \ +do { \ + RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, \ + ((addr) & 0x1f) | RADEON_PLL_WR_EN); \ + RADEON_WRITE(RADEON_CLOCK_CNTL_DATA, (val)); \ +} while (0) + +extern int RADEON_READ_PLL(drm_device_t *dev, int addr); + + + +#define CP_PACKET0( reg, n ) \ + (RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2)) +#define CP_PACKET0_TABLE( reg, n ) \ + (RADEON_CP_PACKET0 | RADEON_ONE_REG_WR | ((n) << 16) | ((reg) >> 2)) +#define CP_PACKET1( reg0, reg1 ) \ + (RADEON_CP_PACKET1 | (((reg1) >> 2) << 15) | ((reg0) >> 2)) +#define CP_PACKET2() \ + (RADEON_CP_PACKET2) +#define CP_PACKET3( pkt, n ) \ + (RADEON_CP_PACKET3 | (pkt) | ((n) << 16)) + + +/* ================================================================ + * Engine control helper macros + */ + +#define RADEON_WAIT_UNTIL_2D_IDLE() \ +do { \ + OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \ + OUT_RING( (RADEON_WAIT_2D_IDLECLEAN | \ + RADEON_WAIT_HOST_IDLECLEAN) ); \ +} while (0) + +#define RADEON_WAIT_UNTIL_3D_IDLE() \ +do { \ + OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \ + OUT_RING( (RADEON_WAIT_3D_IDLECLEAN | \ + RADEON_WAIT_HOST_IDLECLEAN) ); \ +} while (0) + +#define RADEON_WAIT_UNTIL_IDLE() \ +do { \ + OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \ + OUT_RING( (RADEON_WAIT_2D_IDLECLEAN | \ + RADEON_WAIT_3D_IDLECLEAN | \ + RADEON_WAIT_HOST_IDLECLEAN) ); \ +} while (0) + +#define RADEON_WAIT_UNTIL_PAGE_FLIPPED() \ +do { \ + OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \ + OUT_RING( RADEON_WAIT_CRTC_PFLIP ); \ +} while (0) + +#define RADEON_FLUSH_CACHE() \ +do { \ + OUT_RING( CP_PACKET0( RADEON_RB2D_DSTCACHE_CTLSTAT, 0 ) ); \ + OUT_RING( RADEON_RB2D_DC_FLUSH ); \ +} while (0) + +#define RADEON_PURGE_CACHE() \ +do { \ + OUT_RING( CP_PACKET0( RADEON_RB2D_DSTCACHE_CTLSTAT, 0 ) ); \ + OUT_RING( RADEON_RB2D_DC_FLUSH_ALL ); \ +} while (0) + +#define RADEON_FLUSH_ZCACHE() \ +do { \ + OUT_RING( CP_PACKET0( RADEON_RB3D_ZCACHE_CTLSTAT, 0 ) ); \ + OUT_RING( RADEON_RB3D_ZC_FLUSH ); \ +} while (0) + +#define RADEON_PURGE_ZCACHE() \ +do { \ + OUT_RING( CP_PACKET0( RADEON_RB3D_ZCACHE_CTLSTAT, 0 ) ); \ + OUT_RING( RADEON_RB3D_ZC_FLUSH_ALL ); \ +} while (0) + + +/* ================================================================ + * Misc helper macros + */ + +#define VB_AGE_CHECK_WITH_RET( dev_priv ) \ +do { \ + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; \ + if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) { \ + int __ret = radeon_do_cp_idle( dev_priv ); \ + if ( __ret < 0 ) return __ret; \ + sarea_priv->last_dispatch = 0; \ + radeon_freelist_reset( dev ); \ + } \ +} while (0) + +#define RADEON_DISPATCH_AGE( age ) \ +do { \ + OUT_RING( CP_PACKET0( RADEON_LAST_DISPATCH_REG, 0 ) ); \ + OUT_RING( age ); \ +} while (0) + +#define RADEON_FRAME_AGE( age ) \ +do { \ + OUT_RING( CP_PACKET0( RADEON_LAST_FRAME_REG, 0 ) ); \ + OUT_RING( age ); \ +} while (0) + +#define RADEON_CLEAR_AGE( age ) \ +do { \ + OUT_RING( CP_PACKET0( RADEON_LAST_CLEAR_REG, 0 ) ); \ + OUT_RING( age ); \ +} while (0) + + +/* ================================================================ + * Ring control + */ + +#define radeon_flush_write_combine() mb() + + +#define RADEON_VERBOSE 0 + +#define RING_LOCALS int write; unsigned int mask; volatile u32 *ring; + +#define BEGIN_RING( n ) do { \ + if ( RADEON_VERBOSE ) { \ + DRM_INFO( "BEGIN_RING( %d ) in %s\n", \ + n, __FUNCTION__ ); \ + } \ + if ( dev_priv->ring.space < (n) * sizeof(u32) ) { \ + radeon_wait_ring( dev_priv, (n) * sizeof(u32) ); \ + } \ + dev_priv->ring.space -= (n) * sizeof(u32); \ + ring = dev_priv->ring.start; \ + write = dev_priv->ring.tail; \ + mask = dev_priv->ring.tail_mask; \ +} while (0) + +#define ADVANCE_RING() do { \ + if ( RADEON_VERBOSE ) { \ + DRM_INFO( "ADVANCE_RING() tail=0x%06x wr=0x%06x\n", \ + write, dev_priv->ring.tail ); \ + } \ + radeon_flush_write_combine(); \ + dev_priv->ring.tail = write; \ + RADEON_WRITE( RADEON_CP_RB_WPTR, write ); \ +} while (0) + +#define OUT_RING( x ) do { \ + if ( RADEON_VERBOSE ) { \ + DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \ + (unsigned int)(x), write ); \ + } \ + ring[write++] = (x); \ + write &= mask; \ +} while (0) + +#define RADEON_PERFORMANCE_BOXES 0 + +#endif /* __RADEON_DRV_H__ */ diff --git a/linux/radeon_state.c b/linux/radeon_state.c new file mode 100644 index 000000000..7bfefb2ca --- /dev/null +++ b/linux/radeon_state.c @@ -0,0 +1,1447 @@ +/* radeon_state.c -- State support for Radeon -*- linux-c -*- + * + * Copyright 2000 VA Linux Systems, Inc., Fremont, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Kevin E. Martin <martin@valinux.com> + * Gareth Hughes <gareth@valinux.com> + * + */ + +#define __NO_VERSION__ +#include "drmP.h" +#include "radeon_drv.h" +#include "drm.h" +#include <linux/delay.h> + + +/* ================================================================ + * CP hardware state programming functions + */ + +static inline void radeon_emit_clip_rect( drm_radeon_private_t *dev_priv, + drm_clip_rect_t *box ) +{ + RING_LOCALS; + + DRM_DEBUG( " box: x1=%d y1=%d x2=%d y2=%d\n", + box->x1, box->y1, box->x2, box->y2 ); + + BEGIN_RING( 4 ); + + OUT_RING( CP_PACKET0( RADEON_RE_TOP_LEFT, 0 ) ); + OUT_RING( (box->y1 << 16) | box->x1 ); + + OUT_RING( CP_PACKET0( RADEON_RE_WIDTH_HEIGHT, 0 ) ); + OUT_RING( ((box->y2 - 1) << 16) | (box->x2 - 1) ); + + ADVANCE_RING(); +} + +static inline void radeon_emit_context( drm_radeon_private_t *dev_priv ) +{ + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; + RING_LOCALS; + DRM_DEBUG( " %s\n", __FUNCTION__ ); + + BEGIN_RING( 14 ); + + OUT_RING( CP_PACKET0( RADEON_PP_MISC, 6 ) ); + OUT_RING( ctx->pp_misc ); + OUT_RING( ctx->pp_fog_color ); + OUT_RING( ctx->re_solid_color ); + OUT_RING( ctx->rb3d_blendcntl ); + OUT_RING( ctx->rb3d_depthoffset ); + OUT_RING( ctx->rb3d_depthpitch ); + OUT_RING( ctx->rb3d_zstencilcntl ); + + OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 2 ) ); + OUT_RING( ctx->pp_cntl ); + OUT_RING( ctx->rb3d_cntl ); + OUT_RING( ctx->rb3d_coloroffset ); + + OUT_RING( CP_PACKET0( RADEON_RB3D_COLORPITCH, 0 ) ); + OUT_RING( ctx->rb3d_colorpitch ); + + ADVANCE_RING(); +} + +static inline void radeon_emit_vertfmt( drm_radeon_private_t *dev_priv ) +{ + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; + RING_LOCALS; + DRM_DEBUG( " %s\n", __FUNCTION__ ); + + BEGIN_RING( 2 ); + + OUT_RING( CP_PACKET0( RADEON_SE_COORD_FMT, 0 ) ); + OUT_RING( ctx->se_coord_fmt ); + + ADVANCE_RING(); +} + +static inline void radeon_emit_line( drm_radeon_private_t *dev_priv ) +{ + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; + RING_LOCALS; + DRM_DEBUG( " %s\n", __FUNCTION__ ); + + BEGIN_RING( 5 ); + + OUT_RING( CP_PACKET0( RADEON_RE_LINE_PATTERN, 1 ) ); + OUT_RING( ctx->re_line_pattern ); + OUT_RING( ctx->re_line_state ); + + OUT_RING( CP_PACKET0( RADEON_SE_LINE_WIDTH, 0 ) ); + OUT_RING( ctx->se_line_width ); + + ADVANCE_RING(); +} + +static inline void radeon_emit_bumpmap( drm_radeon_private_t *dev_priv ) +{ + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; + RING_LOCALS; + DRM_DEBUG( " %s\n", __FUNCTION__ ); + + BEGIN_RING( 5 ); + + OUT_RING( CP_PACKET0( RADEON_PP_LUM_MATRIX, 0 ) ); + OUT_RING( ctx->pp_lum_matrix ); + + OUT_RING( CP_PACKET0( RADEON_PP_ROT_MATRIX_0, 1 ) ); + OUT_RING( ctx->pp_rot_matrix_0 ); + OUT_RING( ctx->pp_rot_matrix_1 ); + + ADVANCE_RING(); +} + +static inline void radeon_emit_masks( drm_radeon_private_t *dev_priv ) +{ + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; + RING_LOCALS; + DRM_DEBUG( " %s\n", __FUNCTION__ ); + + BEGIN_RING( 4 ); + + OUT_RING( CP_PACKET0( RADEON_RB3D_STENCILREFMASK, 2 ) ); + OUT_RING( ctx->rb3d_stencilrefmask ); + OUT_RING( ctx->rb3d_ropcntl ); + OUT_RING( ctx->rb3d_planemask ); + + ADVANCE_RING(); +} + +static inline void radeon_emit_viewport( drm_radeon_private_t *dev_priv ) +{ + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; + RING_LOCALS; + DRM_DEBUG( " %s\n", __FUNCTION__ ); + + BEGIN_RING( 7 ); + + OUT_RING( CP_PACKET0( RADEON_SE_VPORT_XSCALE, 5 ) ); + OUT_RING( ctx->se_vport_xscale ); + OUT_RING( ctx->se_vport_xoffset ); + OUT_RING( ctx->se_vport_yscale ); + OUT_RING( ctx->se_vport_yoffset ); + OUT_RING( ctx->se_vport_zscale ); + OUT_RING( ctx->se_vport_zoffset ); + + ADVANCE_RING(); +} + +static inline void radeon_emit_setup( drm_radeon_private_t *dev_priv ) +{ + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; + RING_LOCALS; + DRM_DEBUG( " %s\n", __FUNCTION__ ); + + BEGIN_RING( 4 ); + + OUT_RING( CP_PACKET0( RADEON_SE_CNTL, 0 ) ); + OUT_RING( ctx->se_cntl ); + OUT_RING( CP_PACKET0( RADEON_SE_CNTL_STATUS, 0 ) ); + OUT_RING( ctx->se_cntl_status ); + + ADVANCE_RING(); +} + +static inline void radeon_emit_tcl( drm_radeon_private_t *dev_priv ) +{ +#ifdef TCL_ENABLE + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; + RING_LOCALS; + DRM_DEBUG( " %s\n", __FUNCTION__ ); + + BEGIN_RING( 29 ); + + OUT_RING( CP_PACKET0( RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED, 27 ) ); + OUT_RING( ctx->se_tcl_material_emmissive.red ); + OUT_RING( ctx->se_tcl_material_emmissive.green ); + OUT_RING( ctx->se_tcl_material_emmissive.blue ); + OUT_RING( ctx->se_tcl_material_emmissive.alpha ); + OUT_RING( ctx->se_tcl_material_ambient.red ); + OUT_RING( ctx->se_tcl_material_ambient.green ); + OUT_RING( ctx->se_tcl_material_ambient.blue ); + OUT_RING( ctx->se_tcl_material_ambient.alpha ); + OUT_RING( ctx->se_tcl_material_diffuse.red ); + OUT_RING( ctx->se_tcl_material_diffuse.green ); + OUT_RING( ctx->se_tcl_material_diffuse.blue ); + OUT_RING( ctx->se_tcl_material_diffuse.alpha ); + OUT_RING( ctx->se_tcl_material_specular.red ); + OUT_RING( ctx->se_tcl_material_specular.green ); + OUT_RING( ctx->se_tcl_material_specular.blue ); + OUT_RING( ctx->se_tcl_material_specular.alpha ); + OUT_RING( ctx->se_tcl_shininess ); + OUT_RING( ctx->se_tcl_output_vtx_fmt ); + OUT_RING( ctx->se_tcl_output_vtx_sel ); + OUT_RING( ctx->se_tcl_matrix_select_0 ); + OUT_RING( ctx->se_tcl_matrix_select_1 ); + OUT_RING( ctx->se_tcl_ucp_vert_blend_ctl ); + OUT_RING( ctx->se_tcl_texture_proc_ctl ); + OUT_RING( ctx->se_tcl_light_model_ctl ); + for ( i = 0 ; i < 4 ; i++ ) { + OUT_RING( ctx->se_tcl_per_light_ctl[i] ); + } + + ADVANCE_RING(); +#else + DRM_ERROR( "TCL not enabled!\n" ); +#endif +} + +static inline void radeon_emit_misc( drm_radeon_private_t *dev_priv ) +{ + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; + RING_LOCALS; + DRM_DEBUG( " %s\n", __FUNCTION__ ); + + BEGIN_RING( 2 ); + + OUT_RING( CP_PACKET0( RADEON_RE_MISC, 0 ) ); + OUT_RING( ctx->re_misc ); + + ADVANCE_RING(); +} + +static inline void radeon_emit_tex0( drm_radeon_private_t *dev_priv ) +{ + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_radeon_texture_regs_t *tex = &sarea_priv->tex_state[0]; + RING_LOCALS; + DRM_DEBUG( " %s: offset=0x%x\n", __FUNCTION__, tex->pp_txoffset ); + + BEGIN_RING( 9 ); + + OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_0, 5 ) ); + OUT_RING( tex->pp_txfilter ); + OUT_RING( tex->pp_txformat ); + OUT_RING( tex->pp_txoffset ); + OUT_RING( tex->pp_txcblend ); + OUT_RING( tex->pp_txablend ); + OUT_RING( tex->pp_tfactor ); + + OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_0, 0 ) ); + OUT_RING( tex->pp_border_color ); + + ADVANCE_RING(); +} + +static inline void radeon_emit_tex1( drm_radeon_private_t *dev_priv ) +{ + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_radeon_texture_regs_t *tex = &sarea_priv->tex_state[1]; + RING_LOCALS; + DRM_DEBUG( " %s: offset=0x%x\n", __FUNCTION__, tex->pp_txoffset ); + + BEGIN_RING( 9 ); + + OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_1, 5 ) ); + OUT_RING( tex->pp_txfilter ); + OUT_RING( tex->pp_txformat ); + OUT_RING( tex->pp_txoffset ); + OUT_RING( tex->pp_txcblend ); + OUT_RING( tex->pp_txablend ); + OUT_RING( tex->pp_tfactor ); + + OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_1, 0 ) ); + OUT_RING( tex->pp_border_color ); + + ADVANCE_RING(); +} + +static inline void radeon_emit_tex2( drm_radeon_private_t *dev_priv ) +{ + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_radeon_texture_regs_t *tex = &sarea_priv->tex_state[2]; + RING_LOCALS; + DRM_DEBUG( " %s\n", __FUNCTION__ ); + + BEGIN_RING( 9 ); + + OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_2, 5 ) ); + OUT_RING( tex->pp_txfilter ); + OUT_RING( tex->pp_txformat ); + OUT_RING( tex->pp_txoffset ); + OUT_RING( tex->pp_txcblend ); + OUT_RING( tex->pp_txablend ); + OUT_RING( tex->pp_tfactor ); + + OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_2, 0 ) ); + OUT_RING( tex->pp_border_color ); + + ADVANCE_RING(); +} + +static inline void radeon_emit_state( drm_radeon_private_t *dev_priv ) +{ + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + unsigned int dirty = sarea_priv->dirty; + + DRM_DEBUG( "%s: dirty=0x%08x\n", __FUNCTION__, dirty ); + + if ( dirty & RADEON_UPLOAD_CONTEXT ) { + radeon_emit_context( dev_priv ); + sarea_priv->dirty &= ~RADEON_UPLOAD_CONTEXT; + } + + if ( dirty & RADEON_UPLOAD_VERTFMT ) { + radeon_emit_vertfmt( dev_priv ); + sarea_priv->dirty &= ~RADEON_UPLOAD_VERTFMT; + } + + if ( dirty & RADEON_UPLOAD_LINE ) { + radeon_emit_line( dev_priv ); + sarea_priv->dirty &= ~RADEON_UPLOAD_LINE; + } + + if ( dirty & RADEON_UPLOAD_BUMPMAP ) { + radeon_emit_bumpmap( dev_priv ); + sarea_priv->dirty &= ~RADEON_UPLOAD_BUMPMAP; + } + + if ( dirty & RADEON_UPLOAD_MASKS ) { + radeon_emit_masks( dev_priv ); + sarea_priv->dirty &= ~RADEON_UPLOAD_MASKS; + } + + if ( dirty & RADEON_UPLOAD_VIEWPORT ) { + radeon_emit_viewport( dev_priv ); + sarea_priv->dirty &= ~RADEON_UPLOAD_VIEWPORT; + } + + if ( dirty & RADEON_UPLOAD_SETUP ) { + radeon_emit_setup( dev_priv ); + sarea_priv->dirty &= ~RADEON_UPLOAD_SETUP; + } + + if ( dirty & RADEON_UPLOAD_TCL ) { +#ifdef TCL_ENABLE + radeon_emit_tcl( dev_priv ); +#endif + sarea_priv->dirty &= ~RADEON_UPLOAD_TCL; + } + + if ( dirty & RADEON_UPLOAD_MISC ) { + radeon_emit_misc( dev_priv ); + sarea_priv->dirty &= ~RADEON_UPLOAD_MISC; + } + + if ( dirty & RADEON_UPLOAD_TEX0 ) { + radeon_emit_tex0( dev_priv ); + sarea_priv->dirty &= ~RADEON_UPLOAD_TEX0; + } + + if ( dirty & RADEON_UPLOAD_TEX1 ) { + radeon_emit_tex1( dev_priv ); + sarea_priv->dirty &= ~RADEON_UPLOAD_TEX1; + } + + if ( dirty & RADEON_UPLOAD_TEX2 ) { +#if 0 + radeon_emit_tex2( dev_priv ); +#endif + sarea_priv->dirty &= ~RADEON_UPLOAD_TEX2; + } + + sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | + RADEON_UPLOAD_TEX1IMAGES | + RADEON_UPLOAD_TEX2IMAGES | + RADEON_REQUIRE_QUIESCENCE); +} + + +#if RADEON_PERFORMANCE_BOXES +/* ================================================================ + * Performance monitoring functions + */ + +static void radeon_clear_box( drm_radeon_private_t *dev_priv, + int x, int y, int w, int h, + int r, int g, int b ) +{ + u32 pitch, offset; + u32 color; + RING_LOCALS; + + switch ( dev_priv->color_fmt ) { + case RADEON_COLOR_FORMAT_RGB565: + color = (((r & 0xf8) << 8) | + ((g & 0xfc) << 3) | + ((b & 0xf8) >> 3)); + break; + case RADEON_COLOR_FORMAT_ARGB8888: + default: + color = (((0xff) << 24) | (r << 16) | (g << 8) | b); + break; + } + + offset = dev_priv->back_offset; + pitch = dev_priv->back_pitch >> 3; + + BEGIN_RING( 6 ); + + OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) ); + OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL | + RADEON_GMC_BRUSH_SOLID_COLOR | + (dev_priv->color_fmt << 8) | + RADEON_GMC_SRC_DATATYPE_COLOR | + RADEON_ROP3_P | + RADEON_GMC_CLR_CMP_CNTL_DIS ); + + OUT_RING( (pitch << 22) | (offset >> 5) ); + OUT_RING( color ); + + OUT_RING( (x << 16) | y ); + OUT_RING( (w << 16) | h ); + + ADVANCE_RING(); +} + +static void radeon_cp_performance_boxes( drm_radeon_private_t *dev_priv ) +{ + if ( atomic_read( &dev_priv->idle_count ) == 0 ) { + radeon_clear_box( dev_priv, 64, 4, 8, 8, 0, 255, 0 ); + } else { + atomic_set( &dev_priv->idle_count, 0 ); + } +} + +#endif + + +/* ================================================================ + * CP command dispatch functions + */ + +static void radeon_print_dirty( const char *msg, unsigned int flags ) +{ + DRM_DEBUG( "%s: (0x%x) %s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", + msg, + flags, + (flags & RADEON_UPLOAD_CONTEXT) ? "context, " : "", + (flags & RADEON_UPLOAD_VERTFMT) ? "vertfmt, " : "", + (flags & RADEON_UPLOAD_LINE) ? "line, " : "", + (flags & RADEON_UPLOAD_BUMPMAP) ? "bumpmap, " : "", + (flags & RADEON_UPLOAD_MASKS) ? "masks, " : "", + (flags & RADEON_UPLOAD_VIEWPORT) ? "viewport, " : "", + (flags & RADEON_UPLOAD_SETUP) ? "setup, " : "", + (flags & RADEON_UPLOAD_TCL) ? "tcl, " : "", + (flags & RADEON_UPLOAD_MISC) ? "misc, " : "", + (flags & RADEON_UPLOAD_TEX0) ? "tex0, " : "", + (flags & RADEON_UPLOAD_TEX1) ? "tex1, " : "", + (flags & RADEON_UPLOAD_TEX2) ? "tex2, " : "", + (flags & RADEON_UPLOAD_CLIPRECTS) ? "cliprects, " : "", + (flags & RADEON_REQUIRE_QUIESCENCE) ? "quiescence, " : "" ); +} + +static void radeon_cp_dispatch_clear( drm_device_t *dev, + drm_radeon_clear_t *clear ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + int nbox = sarea_priv->nbox; + drm_clip_rect_t *pbox = sarea_priv->boxes; + unsigned int flags = clear->flags; + int i; + RING_LOCALS; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + radeon_update_ring_snapshot( dev_priv ); + + if ( dev_priv->page_flipping && dev_priv->current_page == 1 ) { + unsigned int tmp = flags; + + flags &= ~(RADEON_FRONT | RADEON_BACK); + if ( tmp & RADEON_FRONT ) flags |= RADEON_BACK; + if ( tmp & RADEON_BACK ) flags |= RADEON_FRONT; + } + + for ( i = 0 ; i < nbox ; i++ ) { + int x = pbox[i].x1; + int y = pbox[i].y1; + int w = pbox[i].x2 - x; + int h = pbox[i].y2 - y; + + DRM_DEBUG( "dispatch clear %d,%d-%d,%d flags 0x%x\n", + x, y, w, h, flags ); + + if ( flags & (RADEON_FRONT | RADEON_BACK) ) { + BEGIN_RING( 4 ); + + /* Ensure the 3D stream is idle before doing a + * 2D fill to clear the front or back buffer. + */ + RADEON_WAIT_UNTIL_3D_IDLE(); + + OUT_RING( CP_PACKET0( RADEON_DP_WRITE_MASK, 0 ) ); + OUT_RING( sarea_priv->context_state.rb3d_planemask ); + + ADVANCE_RING(); + + /* Make sure we restore the 3D state next time. + */ + dev_priv->sarea_priv->dirty |= (RADEON_UPLOAD_CONTEXT | + RADEON_UPLOAD_MASKS); + } + + if ( flags & RADEON_FRONT ) { + BEGIN_RING( 6 ); + + OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) ); + OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL | + RADEON_GMC_BRUSH_SOLID_COLOR | + (dev_priv->color_fmt << 8) | + RADEON_GMC_SRC_DATATYPE_COLOR | + RADEON_ROP3_P | + RADEON_GMC_CLR_CMP_CNTL_DIS ); + + OUT_RING( dev_priv->front_pitch_offset ); + OUT_RING( clear->clear_color ); + + OUT_RING( (x << 16) | y ); + OUT_RING( (w << 16) | h ); + + ADVANCE_RING(); + } + + if ( flags & RADEON_BACK ) { + BEGIN_RING( 6 ); + + OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) ); + OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL | + RADEON_GMC_BRUSH_SOLID_COLOR | + (dev_priv->color_fmt << 8) | + RADEON_GMC_SRC_DATATYPE_COLOR | + RADEON_ROP3_P | + RADEON_GMC_CLR_CMP_CNTL_DIS ); + + OUT_RING( dev_priv->back_pitch_offset ); + OUT_RING( clear->clear_color ); + + OUT_RING( (x << 16) | y ); + OUT_RING( (w << 16) | h ); + + ADVANCE_RING(); + + } + + if ( flags & RADEON_DEPTH ) { + drm_radeon_depth_clear_t *depth_clear = + &dev_priv->depth_clear; + + if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { + radeon_emit_state( dev_priv ); + } + + /* FIXME: Render a rectangle to clear the depth + * buffer. So much for those "fast Z clears"... + */ + BEGIN_RING( 23 ); + + RADEON_WAIT_UNTIL_2D_IDLE(); + + OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 1 ) ); + OUT_RING( 0x00000000 ); + OUT_RING( depth_clear->rb3d_cntl ); + OUT_RING( CP_PACKET0( RADEON_RB3D_ZSTENCILCNTL, 0 ) ); + OUT_RING( depth_clear->rb3d_zstencilcntl ); + OUT_RING( CP_PACKET0( RADEON_RB3D_PLANEMASK, 0 ) ); + OUT_RING( 0x00000000 ); + OUT_RING( CP_PACKET0( RADEON_SE_CNTL, 0 ) ); + OUT_RING( depth_clear->se_cntl ); + + OUT_RING( CP_PACKET3( RADEON_3D_DRAW_IMMD, 10 ) ); + OUT_RING( RADEON_VTX_Z_PRESENT ); + OUT_RING( (RADEON_PRIM_TYPE_RECT_LIST | + RADEON_PRIM_WALK_RING | + RADEON_MAOS_ENABLE | + RADEON_VTX_FMT_RADEON_MODE | + (3 << RADEON_NUM_VERTICES_SHIFT)) ); + + OUT_RING( clear->rect.ui[CLEAR_X1] ); + OUT_RING( clear->rect.ui[CLEAR_Y1] ); + OUT_RING( clear->rect.ui[CLEAR_DEPTH] ); + + OUT_RING( clear->rect.ui[CLEAR_X1] ); + OUT_RING( clear->rect.ui[CLEAR_Y2] ); + OUT_RING( clear->rect.ui[CLEAR_DEPTH] ); + + OUT_RING( clear->rect.ui[CLEAR_X2] ); + OUT_RING( clear->rect.ui[CLEAR_Y2] ); + OUT_RING( clear->rect.ui[CLEAR_DEPTH] ); + + ADVANCE_RING(); + + /* Make sure we restore the 3D state next time. + */ + dev_priv->sarea_priv->dirty |= (RADEON_UPLOAD_CONTEXT | + RADEON_UPLOAD_SETUP | + RADEON_UPLOAD_MASKS); + } + } + + /* Increment the clear counter. The client-side 3D driver must + * wait on this value before performing the clear ioctl. We + * need this because the card's so damned fast... + */ + dev_priv->sarea_priv->last_clear++; + + BEGIN_RING( 4 ); + + RADEON_CLEAR_AGE( dev_priv->sarea_priv->last_clear ); + RADEON_WAIT_UNTIL_IDLE(); + + ADVANCE_RING(); +} + +static void radeon_cp_dispatch_swap( drm_device_t *dev ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + int nbox = sarea_priv->nbox; + drm_clip_rect_t *pbox = sarea_priv->boxes; + int i; + RING_LOCALS; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + radeon_update_ring_snapshot( dev_priv ); + +#if RADEON_PERFORMANCE_BOXES + /* Do some trivial performance monitoring... + */ + radeon_cp_performance_boxes( dev_priv ); +#endif + + /* Wait for the 3D stream to idle before dispatching the bitblt. + * This will prevent data corruption between the two streams. + */ + BEGIN_RING( 2 ); + + RADEON_WAIT_UNTIL_3D_IDLE(); + + ADVANCE_RING(); + + for ( i = 0 ; i < nbox ; i++ ) { + int x = pbox[i].x1; + int y = pbox[i].y1; + int w = pbox[i].x2 - x; + int h = pbox[i].y2 - y; + + DRM_DEBUG( "dispatch swap %d,%d-%d,%d\n", + x, y, w, h ); + + BEGIN_RING( 7 ); + + OUT_RING( CP_PACKET3( RADEON_CNTL_BITBLT_MULTI, 5 ) ); + OUT_RING( RADEON_GMC_SRC_PITCH_OFFSET_CNTL | + RADEON_GMC_DST_PITCH_OFFSET_CNTL | + RADEON_GMC_BRUSH_NONE | + (dev_priv->color_fmt << 8) | + RADEON_GMC_SRC_DATATYPE_COLOR | + RADEON_ROP3_S | + RADEON_DP_SRC_SOURCE_MEMORY | + RADEON_GMC_CLR_CMP_CNTL_DIS | + RADEON_GMC_WR_MSK_DIS ); + + OUT_RING( dev_priv->back_pitch_offset ); + OUT_RING( dev_priv->front_pitch_offset ); + + OUT_RING( (x << 16) | y ); + OUT_RING( (x << 16) | y ); + OUT_RING( (w << 16) | h ); + + ADVANCE_RING(); + } + + /* Increment the frame counter. The client-side 3D driver must + * throttle the framerate by waiting for this value before + * performing the swapbuffer ioctl. + */ + dev_priv->sarea_priv->last_frame++; + + BEGIN_RING( 4 ); + + RADEON_FRAME_AGE( dev_priv->sarea_priv->last_frame ); + RADEON_WAIT_UNTIL_2D_IDLE(); + + ADVANCE_RING(); +} + +static void radeon_cp_dispatch_flip( drm_device_t *dev ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + DRM_DEBUG( "%s: page=%d\n", __FUNCTION__, dev_priv->current_page ); + + radeon_update_ring_snapshot( dev_priv ); + +#if RADEON_PERFORMANCE_BOXES + /* Do some trivial performance monitoring... + */ + radeon_cp_performance_boxes( dev_priv ); +#endif + + BEGIN_RING( 6 ); + + RADEON_WAIT_UNTIL_3D_IDLE(); + RADEON_WAIT_UNTIL_PAGE_FLIPPED(); + + OUT_RING( CP_PACKET0( RADEON_CRTC_OFFSET, 0 ) ); + + if ( dev_priv->current_page == 0 ) { + OUT_RING( dev_priv->back_offset ); + dev_priv->current_page = 1; + } else { + OUT_RING( dev_priv->front_offset ); + dev_priv->current_page = 0; + } + + ADVANCE_RING(); + + /* Increment the frame counter. The client-side 3D driver must + * throttle the framerate by waiting for this value before + * performing the swapbuffer ioctl. + */ + dev_priv->sarea_priv->last_frame++; + + BEGIN_RING( 2 ); + + RADEON_FRAME_AGE( dev_priv->sarea_priv->last_frame ); + + ADVANCE_RING(); +} + +static void radeon_cp_dispatch_vertex( drm_device_t *dev, + drm_buf_t *buf ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_buf_priv_t *buf_priv = buf->dev_private; + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + int format = sarea_priv->vc_format; + int offset = dev_priv->agp_buffers_offset + buf->offset; + int size = buf->used; + int prim = buf_priv->prim; + int i = 0; + RING_LOCALS; + DRM_DEBUG( "%s: nbox=%d\n", __FUNCTION__, sarea_priv->nbox ); + + radeon_update_ring_snapshot( dev_priv ); + + if ( 1 ) + radeon_print_dirty( "dispatch_vertex", sarea_priv->dirty ); + + if ( buf->used ) { + buf_priv->dispatched = 1; + + if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { + radeon_emit_state( dev_priv ); + } + + do { + /* Emit the next set of up to three cliprects */ + if ( i < sarea_priv->nbox ) { + radeon_emit_clip_rect( dev_priv, + &sarea_priv->boxes[i] ); + } + + /* Emit the vertex buffer rendering commands */ + BEGIN_RING( 5 ); + + OUT_RING( CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, 3 ) ); + OUT_RING( offset ); + OUT_RING( size ); + OUT_RING( format ); + OUT_RING( prim | RADEON_PRIM_WALK_LIST | + RADEON_COLOR_ORDER_RGBA | + RADEON_VTX_FMT_RADEON_MODE | + (size << RADEON_NUM_VERTICES_SHIFT) ); + + ADVANCE_RING(); + + i++; + } while ( i < sarea_priv->nbox ); + } + + if ( buf_priv->discard ) { + buf_priv->age = dev_priv->sarea_priv->last_dispatch; + + /* Emit the vertex buffer age */ + BEGIN_RING( 2 ); + RADEON_DISPATCH_AGE( buf_priv->age ); + ADVANCE_RING(); + + buf->pending = 1; + buf->used = 0; + /* FIXME: Check dispatched field */ + buf_priv->dispatched = 0; + } + + dev_priv->sarea_priv->last_dispatch++; + + sarea_priv->dirty &= ~RADEON_UPLOAD_CLIPRECTS; + sarea_priv->nbox = 0; +} + + +static void radeon_cp_dispatch_indirect( drm_device_t *dev, + drm_buf_t *buf, + int start, int end ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_buf_priv_t *buf_priv = buf->dev_private; + RING_LOCALS; + DRM_DEBUG( "indirect: buf=%d s=0x%x e=0x%x\n", + buf->idx, start, end ); + + radeon_update_ring_snapshot( dev_priv ); + + if ( start != end ) { + int offset = (dev_priv->agp_buffers_offset + + buf->offset + start); + int dwords = (end - start + 3) / sizeof(u32); + + /* Indirect buffer data must be an even number of + * dwords, so if we've been given an odd number we must + * pad the data with a Type-2 CP packet. + */ + if ( dwords & 1 ) { + u32 *data = (u32 *) + ((char *)dev_priv->buffers->handle + + buf->offset + start); + data[dwords++] = RADEON_CP_PACKET2; + } + + buf_priv->dispatched = 1; + + /* Fire off the indirect buffer */ + BEGIN_RING( 3 ); + + OUT_RING( CP_PACKET0( RADEON_CP_IB_BASE, 1 ) ); + OUT_RING( offset ); + OUT_RING( dwords ); + + ADVANCE_RING(); + } + + if ( buf_priv->discard ) { + buf_priv->age = dev_priv->sarea_priv->last_dispatch; + + /* Emit the indirect buffer age */ + BEGIN_RING( 2 ); + RADEON_DISPATCH_AGE( buf_priv->age ); + ADVANCE_RING(); + + buf->pending = 1; + buf->used = 0; + /* FIXME: Check dispatched field */ + buf_priv->dispatched = 0; + } + + dev_priv->sarea_priv->last_dispatch++; +} + +static void radeon_cp_dispatch_indices( drm_device_t *dev, + drm_buf_t *buf, + int start, int end, + int count ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_buf_priv_t *buf_priv = buf->dev_private; + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + int format = sarea_priv->vc_format; + int offset = dev_priv->agp_buffers_offset; + int prim = buf_priv->prim; + u32 *data; + int dwords; + int i = 0; + RING_LOCALS; + DRM_DEBUG( "indices: s=%d e=%d c=%d\n", start, end, count ); + + radeon_update_ring_snapshot( dev_priv ); + + if ( 0 ) + radeon_print_dirty( "dispatch_indices", sarea_priv->dirty ); + + if ( start != end ) { + buf_priv->dispatched = 1; + + if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { + radeon_emit_state( dev_priv ); + } + + dwords = (end - start + 3) / sizeof(u32); + + data = (u32 *)((char *)dev_priv->buffers->handle + + buf->offset + start); + + data[0] = CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, dwords-2 ); + + data[1] = offset; + data[2] = RADEON_MAX_VB_VERTS; + data[3] = format; + data[4] = (prim | RADEON_PRIM_WALK_IND | + RADEON_COLOR_ORDER_RGBA | + RADEON_VTX_FMT_RADEON_MODE | + (count << RADEON_NUM_VERTICES_SHIFT) ); + + if ( count & 0x1 ) { + data[dwords-1] &= 0x0000ffff; + } + + do { + /* Emit the next set of up to three cliprects */ + if ( i < sarea_priv->nbox ) { + radeon_emit_clip_rect( dev_priv, + &sarea_priv->boxes[i] ); + } + + radeon_cp_dispatch_indirect( dev, buf, start, end ); + + i++; + } while ( i < sarea_priv->nbox ); + } + + if ( buf_priv->discard ) { + buf_priv->age = dev_priv->sarea_priv->last_dispatch; + + /* Emit the vertex buffer age */ + BEGIN_RING( 2 ); + RADEON_DISPATCH_AGE( buf_priv->age ); + ADVANCE_RING(); + + buf->pending = 1; + /* FIXME: Check dispatched field */ + buf_priv->dispatched = 0; + } + + dev_priv->sarea_priv->last_dispatch++; + + sarea_priv->dirty &= ~RADEON_UPLOAD_CLIPRECTS; + sarea_priv->nbox = 0; +} + +static int radeon_cp_dispatch_blit( drm_device_t *dev, + drm_radeon_blit_t *blit ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_device_dma_t *dma = dev->dma; + drm_buf_t *buf; + drm_radeon_buf_priv_t *buf_priv; + u32 format; + u32 *data; + int dword_shift, dwords; + RING_LOCALS; + DRM_DEBUG( "blit: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n", + blit->offset >> 10, blit->pitch, blit->format, + blit->x, blit->y, blit->width, blit->height ); + + radeon_update_ring_snapshot( dev_priv ); + + /* The compiler won't optimize away a division by a variable, + * even if the only legal values are powers of two. Thus, we'll + * use a shift instead. + */ + switch ( blit->format ) { + case RADEON_TXF_32BPP_ARGB8888: + case RADEON_TXF_32BPP_RGBA8888: + format = RADEON_COLOR_FORMAT_ARGB8888; + dword_shift = 0; + break; + case RADEON_TXF_16BPP_AI88: + case RADEON_TXF_16BPP_ARGB1555: + case RADEON_TXF_16BPP_RGB565: + case RADEON_TXF_16BPP_ARGB4444: + format = RADEON_COLOR_FORMAT_RGB565; + dword_shift = 1; + break; + case RADEON_TXF_8BPP_I: + case RADEON_TXF_8BPP_RGB332: + format = RADEON_COLOR_FORMAT_CI8; + dword_shift = 2; + break; + default: + DRM_ERROR( "invalid blit format %d\n", blit->format ); + return -EINVAL; + } + + /* Flush the pixel cache. This ensures no pixel data gets mixed + * up with the texture data from the host data blit, otherwise + * part of the texture image may be corrupted. + */ + BEGIN_RING( 4 ); + + RADEON_FLUSH_CACHE(); + RADEON_WAIT_UNTIL_IDLE(); + + ADVANCE_RING(); + + /* Dispatch the indirect buffer. + */ + buf = dma->buflist[blit->idx]; + buf_priv = buf->dev_private; + + if ( buf->pid != current->pid ) { + DRM_ERROR( "process %d using buffer owned by %d\n", + current->pid, buf->pid ); + return -EINVAL; + } + if ( buf->pending ) { + DRM_ERROR( "sending pending buffer %d\n", blit->idx ); + return -EINVAL; + } + + buf_priv->discard = 1; + + dwords = (blit->width * blit->height) >> dword_shift; + if ( !dwords ) dwords = 1; + + data = (u32 *)((char *)dev_priv->buffers->handle + buf->offset); + + data[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 ); + data[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL | + RADEON_GMC_BRUSH_NONE | + (format << 8) | + RADEON_GMC_SRC_DATATYPE_COLOR | + RADEON_ROP3_S | + RADEON_DP_SRC_SOURCE_HOST_DATA | + RADEON_GMC_CLR_CMP_CNTL_DIS | + RADEON_GMC_WR_MSK_DIS); + + data[2] = (blit->pitch << 22) | (blit->offset >> 10); + data[3] = 0xffffffff; + data[4] = 0xffffffff; + data[5] = (blit->y << 16) | blit->x; + data[6] = (blit->height << 16) | blit->width; + data[7] = dwords; + + buf->used = (dwords + 8) * sizeof(u32); + + radeon_cp_dispatch_indirect( dev, buf, 0, buf->used ); + + /* Flush the pixel cache after the blit completes. This ensures + * the texture data is written out to memory before rendering + * continues. + */ + BEGIN_RING( 4 ); + + RADEON_FLUSH_CACHE(); + RADEON_WAIT_UNTIL_2D_IDLE(); + + ADVANCE_RING(); + + return 0; +} + +static void radeon_cp_dispatch_stipple( drm_device_t *dev, u32 *stipple ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + int i; + RING_LOCALS; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + radeon_update_ring_snapshot( dev_priv ); + + BEGIN_RING( 35 ); + + OUT_RING( CP_PACKET0( RADEON_RE_STIPPLE_ADDR, 0 ) ); + OUT_RING( 0x00000000 ); + + OUT_RING( CP_PACKET0_TABLE( RADEON_RE_STIPPLE_DATA, 31 ) ); + for ( i = 0 ; i < 32 ; i++ ) { + OUT_RING( stipple[i] ); + } + + ADVANCE_RING(); +} + + +/* ================================================================ + * IOCTL functions + */ + +int radeon_cp_clear( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_radeon_clear_t clear; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + + if ( copy_from_user( &clear, (drm_radeon_clear_t *) arg, + sizeof(clear) ) ) + return -EFAULT; + + if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS ) + sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; + + radeon_cp_dispatch_clear( dev, &clear ); + + return 0; +} + +int radeon_cp_swap( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + + if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS ) + sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; + + if ( !dev_priv->page_flipping ) { + radeon_cp_dispatch_swap( dev ); + dev_priv->sarea_priv->dirty |= (RADEON_UPLOAD_CONTEXT | + RADEON_UPLOAD_MASKS); + } else { + radeon_cp_dispatch_flip( dev ); + } + + return 0; +} + +int radeon_cp_vertex( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_device_dma_t *dma = dev->dma; + drm_buf_t *buf; + drm_radeon_buf_priv_t *buf_priv; + drm_radeon_vertex_t vertex; + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + if ( !dev_priv || dev_priv->is_pci ) { + DRM_ERROR( "%s called with a PCI card\n", __FUNCTION__ ); + return -EINVAL; + } + + if ( copy_from_user( &vertex, (drm_radeon_vertex_t *)arg, + sizeof(vertex) ) ) + return -EFAULT; + + DRM_DEBUG( "%s: pid=%d index=%d count=%d discard=%d\n", + __FUNCTION__, current->pid, + vertex.idx, vertex.count, vertex.discard ); + + if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) { + DRM_ERROR( "buffer index %d (of %d max)\n", + vertex.idx, dma->buf_count - 1 ); + return -EINVAL; + } + if ( vertex.prim < 0 || + vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST ) { + DRM_ERROR( "buffer prim %d\n", vertex.prim ); + return -EINVAL; + } + + VB_AGE_CHECK_WITH_RET( dev_priv ); + + buf = dma->buflist[vertex.idx]; + buf_priv = buf->dev_private; + + if ( buf->pid != current->pid ) { + DRM_ERROR( "process %d using buffer owned by %d\n", + current->pid, buf->pid ); + return -EINVAL; + } + if ( buf->pending ) { + DRM_ERROR( "sending pending buffer %d\n", vertex.idx ); + return -EINVAL; + } + + buf->used = vertex.count; + buf_priv->prim = vertex.prim; + buf_priv->discard = vertex.discard; + + radeon_cp_dispatch_vertex( dev, buf ); + + return 0; +} + +int radeon_cp_indices( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_device_dma_t *dma = dev->dma; + drm_buf_t *buf; + drm_radeon_buf_priv_t *buf_priv; + drm_radeon_indices_t elts; + int count; + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + if ( !dev_priv || dev_priv->is_pci ) { + DRM_ERROR( "%s called with a PCI card\n", __FUNCTION__ ); + return -EINVAL; + } + + if ( copy_from_user( &elts, (drm_radeon_indices_t *)arg, + sizeof(elts) ) ) + return -EFAULT; + + DRM_DEBUG( "%s: pid=%d index=%d start=%d end=%d discard=%d\n", + __FUNCTION__, current->pid, + elts.idx, elts.start, elts.end, elts.discard ); + + if ( elts.idx < 0 || elts.idx >= dma->buf_count ) { + DRM_ERROR( "buffer index %d (of %d max)\n", + elts.idx, dma->buf_count - 1 ); + return -EINVAL; + } + if ( elts.prim < 0 || + elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST ) { + DRM_ERROR( "buffer prim %d\n", elts.prim ); + return -EINVAL; + } + + VB_AGE_CHECK_WITH_RET( dev_priv ); + + buf = dma->buflist[elts.idx]; + buf_priv = buf->dev_private; + + if ( buf->pid != current->pid ) { + DRM_ERROR( "process %d using buffer owned by %d\n", + current->pid, buf->pid ); + return -EINVAL; + } + if ( buf->pending ) { + DRM_ERROR( "sending pending buffer %d\n", elts.idx ); + return -EINVAL; + } + + count = (elts.end - elts.start) / sizeof(u16); + elts.start -= RADEON_INDEX_PRIM_OFFSET; + + if ( elts.start & 0x7 ) { + DRM_ERROR( "misaligned buffer 0x%x\n", elts.start ); + return -EINVAL; + } + if ( elts.start < buf->used ) { + DRM_ERROR( "no header 0x%x - 0x%x\n", elts.start, buf->used ); + return -EINVAL; + } + + buf->used = elts.end; + buf_priv->prim = elts.prim; + buf_priv->discard = elts.discard; + + radeon_cp_dispatch_indices( dev, buf, elts.start, elts.end, count ); + + return 0; +} + +int radeon_cp_blit( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_device_dma_t *dma = dev->dma; + drm_radeon_blit_t blit; + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + + if ( copy_from_user( &blit, (drm_radeon_blit_t *)arg, + sizeof(blit) ) ) + return -EFAULT; + + DRM_DEBUG( "%s: pid=%d index=%d\n", + __FUNCTION__, current->pid, blit.idx ); + + if ( blit.idx < 0 || blit.idx > dma->buf_count ) { + DRM_ERROR( "sending %d buffers (of %d max)\n", + blit.idx, dma->buf_count ); + return -EINVAL; + } + + VB_AGE_CHECK_WITH_RET( dev_priv ); + + return radeon_cp_dispatch_blit( dev, &blit ); +} + +int radeon_cp_stipple( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_stipple_t stipple; + u32 mask[32]; + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + + if ( copy_from_user( &stipple, (drm_radeon_stipple_t *)arg, + sizeof(stipple) ) ) + return -EFAULT; + + if ( copy_from_user( &mask, stipple.mask, + 32 * sizeof(u32) ) ) + return -EFAULT; + + radeon_cp_dispatch_stipple( dev, mask ); + + return 0; +} + +int radeon_cp_indirect( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_device_dma_t *dma = dev->dma; + drm_buf_t *buf; + drm_radeon_buf_priv_t *buf_priv; + drm_radeon_indirect_t indirect; + RING_LOCALS; + + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || + dev->lock.pid != current->pid ) { + DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); + return -EINVAL; + } + if ( !dev_priv || dev_priv->is_pci ) { + DRM_ERROR( "%s called with a PCI card\n", __FUNCTION__ ); + return -EINVAL; + } + + if ( copy_from_user( &indirect, (drm_radeon_indirect_t *)arg, + sizeof(indirect) ) ) + return -EFAULT; + + DRM_DEBUG( "indirect: idx=%d s=%d e=%d d=%d\n", + indirect.idx, indirect.start, + indirect.end, indirect.discard ); + + if ( indirect.idx < 0 || indirect.idx >= dma->buf_count ) { + DRM_ERROR( "buffer index %d (of %d max)\n", + indirect.idx, dma->buf_count - 1 ); + return -EINVAL; + } + + buf = dma->buflist[indirect.idx]; + buf_priv = buf->dev_private; + + if ( buf->pid != current->pid ) { + DRM_ERROR( "process %d using buffer owned by %d\n", + current->pid, buf->pid ); + return -EINVAL; + } + if ( buf->pending ) { + DRM_ERROR( "sending pending buffer %d\n", indirect.idx ); + return -EINVAL; + } + + if ( indirect.start < buf->used ) { + DRM_ERROR( "reusing indirect: start=0x%x actual=0x%x\n", + indirect.start, buf->used ); + return -EINVAL; + } + + VB_AGE_CHECK_WITH_RET( dev_priv ); + + buf->used = indirect.end; + buf_priv->discard = indirect.discard; + + /* Wait for the 3D stream to idle before the indirect buffer + * containing 2D acceleration commands is processed. + */ + BEGIN_RING( 2 ); + + RADEON_WAIT_UNTIL_3D_IDLE(); + + ADVANCE_RING(); + + /* Dispatch the indirect buffer full of commands from the + * X server. This is insecure and is thus only available to + * privileged clients. + */ + radeon_cp_dispatch_indirect( dev, buf, indirect.start, indirect.end ); + + return 0; +} diff --git a/linux/sis_drm.h b/linux/sis_drm.h index 73807f317..299143f61 100644 --- a/linux/sis_drm.h +++ b/linux/sis_drm.h @@ -2,30 +2,19 @@ #ifndef _sis_drm_public_h_ #define _sis_drm_public_h_ -typedef struct { +typedef struct { int context; unsigned int offset; unsigned int size; unsigned int free; -} drm_sis_mem_t; +} drm_sis_mem_t; -typedef struct { +typedef struct { unsigned int offset, size; -} drm_sis_agp_t; +} drm_sis_agp_t; -typedef struct { +typedef struct { unsigned int left, right; -} drm_sis_flip_t; - -#define SIS_IOCTL_FB_ALLOC DRM_IOWR( 0x44, drm_sis_mem_t) -#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t) - -#define SIS_IOCTL_AGP_INIT DRM_IOWR( 0x53, drm_sis_agp_t) -#define SIS_IOCTL_AGP_ALLOC DRM_IOWR( 0x54, drm_sis_mem_t) -#define SIS_IOCTL_AGP_FREE DRM_IOW( 0x55, drm_sis_mem_t) - -#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t) -#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49) -#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50) +} drm_sis_flip_t; #endif diff --git a/shared-core/drm.h b/shared-core/drm.h index 57032d6e2..dc3d262d4 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -11,11 +11,11 @@ * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL @@ -23,7 +23,7 @@ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. - * + * * Authors: * Rickard E. (Rik) Faith <faith@valinux.com> * @@ -82,7 +82,10 @@ typedef struct drm_clip_rect { #include "mga_drm.h" #include "i810_drm.h" #include "r128_drm.h" +#include "radeon_drm.h" +#ifdef CONFIG_DRM_SIS #include "sis_drm.h" +#endif typedef struct drm_version { int version_major; /* Major version */ @@ -295,89 +298,117 @@ typedef struct drm_agp_info { unsigned short id_device; } drm_agp_info_t; -#define DRM_IOCTL_BASE 'd' -#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) -#define DRM_IOR(nr,size) _IOR(DRM_IOCTL_BASE,nr,size) -#define DRM_IOW(nr,size) _IOW(DRM_IOCTL_BASE,nr,size) -#define DRM_IOWR(nr,size) _IOWR(DRM_IOCTL_BASE,nr,size) - - -#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t) -#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t) -#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t) -#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t) - -#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t) -#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t) -#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, drm_block_t) -#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, drm_block_t) -#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, drm_control_t) -#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, drm_map_t) -#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, drm_buf_desc_t) -#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, drm_buf_desc_t) -#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm_buf_info_t) -#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t) -#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t) - -#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t) -#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t) -#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t) -#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, drm_ctx_t) -#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, drm_ctx_t) -#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, drm_ctx_t) -#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, drm_ctx_res_t) -#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, drm_draw_t) -#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, drm_draw_t) -#define DRM_IOCTL_DMA DRM_IOWR(0x29, drm_dma_t) -#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, drm_lock_t) -#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t) -#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t) - -#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) -#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) -#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t) -#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t) -#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t) -#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t) -#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t) -#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t) +#define DRM_IOCTL_BASE 'd' +#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) +#define DRM_IOR(nr,size) _IOR(DRM_IOCTL_BASE,nr,size) +#define DRM_IOW(nr,size) _IOW(DRM_IOCTL_BASE,nr,size) +#define DRM_IOWR(nr,size) _IOWR(DRM_IOCTL_BASE,nr,size) + + +#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t) +#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t) +#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t) +#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t) + +#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t) +#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t) +#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, drm_block_t) +#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, drm_block_t) +#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, drm_control_t) +#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, drm_map_t) +#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, drm_buf_desc_t) +#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, drm_buf_desc_t) +#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm_buf_info_t) +#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t) +#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t) + +#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t) +#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t) +#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t) +#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, drm_ctx_t) +#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, drm_ctx_t) +#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, drm_ctx_t) +#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, drm_ctx_res_t) +#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, drm_draw_t) +#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, drm_draw_t) +#define DRM_IOCTL_DMA DRM_IOWR(0x29, drm_dma_t) +#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, drm_lock_t) +#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t) +#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t) + +#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) +#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) +#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t) +#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t) +#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t) +#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t) +#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t) +#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t) /* Mga specific ioctls */ -#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t) -#define DRM_IOCTL_MGA_SWAP DRM_IOW( 0x41, drm_mga_swap_t) -#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x42, drm_mga_clear_t) -#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x43, drm_mga_iload_t) -#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x44, drm_mga_vertex_t) -#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x45, drm_lock_t ) -#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t) +#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t) +#define DRM_IOCTL_MGA_SWAP DRM_IOW( 0x41, drm_mga_swap_t) +#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x42, drm_mga_clear_t) +#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x43, drm_mga_iload_t) +#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x44, drm_mga_vertex_t) +#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x45, drm_lock_t ) +#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t) +#define DRM_IOCTL_MGA_BLIT DRM_IOW( 0x47, drm_mga_blit_t) /* I810 specific ioctls */ -#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t) -#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t) -#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t) -#define DRM_IOCTL_I810_FLUSH DRM_IO ( 0x43) -#define DRM_IOCTL_I810_GETAGE DRM_IO ( 0x44) -#define DRM_IOCTL_I810_GETBUF DRM_IOWR(0x45, drm_i810_dma_t) -#define DRM_IOCTL_I810_SWAP DRM_IO ( 0x46) -#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t) -#define DRM_IOCTL_I810_DOCOPY DRM_IO ( 0x48) +#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t) +#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t) +#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t) +#define DRM_IOCTL_I810_FLUSH DRM_IO( 0x43) +#define DRM_IOCTL_I810_GETAGE DRM_IO( 0x44) +#define DRM_IOCTL_I810_GETBUF DRM_IOWR(0x45, drm_i810_dma_t) +#define DRM_IOCTL_I810_SWAP DRM_IO( 0x46) +#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t) +#define DRM_IOCTL_I810_DOCOPY DRM_IO( 0x48) /* Rage 128 specific ioctls */ -#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t) -#define DRM_IOCTL_R128_RESET DRM_IO( 0x41) -#define DRM_IOCTL_R128_FLUSH DRM_IO( 0x42) -#define DRM_IOCTL_R128_IDLE DRM_IO( 0x43) -#define DRM_IOCTL_R128_PACKET DRM_IOW( 0x44, drm_r128_packet_t) -#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x45, drm_r128_vertex_t) - +#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t) +#define DRM_IOCTL_R128_CCE_START DRM_IO( 0x41) +#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( 0x42, drm_r128_cce_stop_t) +#define DRM_IOCTL_R128_CCE_RESET DRM_IO( 0x43) +#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( 0x44) +#define DRM_IOCTL_R128_RESET DRM_IO( 0x46) +#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( 0x47, drm_r128_fullscreen_t) +#define DRM_IOCTL_R128_SWAP DRM_IO( 0x48) +#define DRM_IOCTL_R128_CLEAR DRM_IOW( 0x49, drm_r128_clear_t) +#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x4a, drm_r128_vertex_t) +#define DRM_IOCTL_R128_INDICES DRM_IOW( 0x4b, drm_r128_indices_t) +#define DRM_IOCTL_R128_BLIT DRM_IOW( 0x4c, drm_r128_blit_t) +#define DRM_IOCTL_R128_DEPTH DRM_IOW( 0x4d, drm_r128_depth_t) +#define DRM_IOCTL_R128_STIPPLE DRM_IOW( 0x4e, drm_r128_stipple_t) +#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(0x4f, drm_r128_indirect_t) + +/* Radeon specific ioctls */ +#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( 0x40, drm_radeon_init_t) +#define DRM_IOCTL_RADEON_CP_START DRM_IO( 0x41) +#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( 0x42, drm_radeon_cp_stop_t) +#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( 0x43) +#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( 0x44) +#define DRM_IOCTL_RADEON_RESET DRM_IO( 0x45) +#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( 0x46, drm_radeon_fullscreen_t) +#define DRM_IOCTL_RADEON_SWAP DRM_IO( 0x47) +#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( 0x48, drm_radeon_clear_t) +#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( 0x49, drm_radeon_vertex_t) +#define DRM_IOCTL_RADEON_INDICES DRM_IOW( 0x4a, drm_radeon_indices_t) +#define DRM_IOCTL_RADEON_BLIT DRM_IOW( 0x4b, drm_radeon_blit_t) +#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( 0x4c, drm_radeon_stipple_t) +#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t) + +#ifdef CONFIG_DRM_SIS /* SiS specific ioctls */ -#define SIS_IOCTL_FB_ALLOC DRM_IOWR( 0x44, drm_sis_mem_t) -#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t) -#define SIS_IOCTL_AGP_INIT DRM_IOWR( 0x53, drm_sis_agp_t) -#define SIS_IOCTL_AGP_ALLOC DRM_IOWR( 0x54, drm_sis_mem_t) -#define SIS_IOCTL_AGP_FREE DRM_IOW( 0x55, drm_sis_mem_t) -#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t) -#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49) -#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50) +#define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t) +#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t) +#define SIS_IOCTL_AGP_INIT DRM_IOWR(0x53, drm_sis_agp_t) +#define SIS_IOCTL_AGP_ALLOC DRM_IOWR(0x54, drm_sis_mem_t) +#define SIS_IOCTL_AGP_FREE DRM_IOW( 0x55, drm_sis_mem_t) +#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t) +#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49) +#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50) +#endif #endif diff --git a/shared/drm.h b/shared/drm.h index 57032d6e2..dc3d262d4 100644 --- a/shared/drm.h +++ b/shared/drm.h @@ -11,11 +11,11 @@ * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL @@ -23,7 +23,7 @@ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. - * + * * Authors: * Rickard E. (Rik) Faith <faith@valinux.com> * @@ -82,7 +82,10 @@ typedef struct drm_clip_rect { #include "mga_drm.h" #include "i810_drm.h" #include "r128_drm.h" +#include "radeon_drm.h" +#ifdef CONFIG_DRM_SIS #include "sis_drm.h" +#endif typedef struct drm_version { int version_major; /* Major version */ @@ -295,89 +298,117 @@ typedef struct drm_agp_info { unsigned short id_device; } drm_agp_info_t; -#define DRM_IOCTL_BASE 'd' -#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) -#define DRM_IOR(nr,size) _IOR(DRM_IOCTL_BASE,nr,size) -#define DRM_IOW(nr,size) _IOW(DRM_IOCTL_BASE,nr,size) -#define DRM_IOWR(nr,size) _IOWR(DRM_IOCTL_BASE,nr,size) - - -#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t) -#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t) -#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t) -#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t) - -#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t) -#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t) -#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, drm_block_t) -#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, drm_block_t) -#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, drm_control_t) -#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, drm_map_t) -#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, drm_buf_desc_t) -#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, drm_buf_desc_t) -#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm_buf_info_t) -#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t) -#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t) - -#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t) -#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t) -#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t) -#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, drm_ctx_t) -#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, drm_ctx_t) -#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, drm_ctx_t) -#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, drm_ctx_res_t) -#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, drm_draw_t) -#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, drm_draw_t) -#define DRM_IOCTL_DMA DRM_IOWR(0x29, drm_dma_t) -#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, drm_lock_t) -#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t) -#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t) - -#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) -#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) -#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t) -#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t) -#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t) -#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t) -#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t) -#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t) +#define DRM_IOCTL_BASE 'd' +#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) +#define DRM_IOR(nr,size) _IOR(DRM_IOCTL_BASE,nr,size) +#define DRM_IOW(nr,size) _IOW(DRM_IOCTL_BASE,nr,size) +#define DRM_IOWR(nr,size) _IOWR(DRM_IOCTL_BASE,nr,size) + + +#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t) +#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t) +#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t) +#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t) + +#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t) +#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t) +#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, drm_block_t) +#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, drm_block_t) +#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, drm_control_t) +#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, drm_map_t) +#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, drm_buf_desc_t) +#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, drm_buf_desc_t) +#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm_buf_info_t) +#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t) +#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t) + +#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t) +#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t) +#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t) +#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, drm_ctx_t) +#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, drm_ctx_t) +#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, drm_ctx_t) +#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, drm_ctx_res_t) +#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, drm_draw_t) +#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, drm_draw_t) +#define DRM_IOCTL_DMA DRM_IOWR(0x29, drm_dma_t) +#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, drm_lock_t) +#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t) +#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t) + +#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) +#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) +#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t) +#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t) +#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t) +#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t) +#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t) +#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t) /* Mga specific ioctls */ -#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t) -#define DRM_IOCTL_MGA_SWAP DRM_IOW( 0x41, drm_mga_swap_t) -#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x42, drm_mga_clear_t) -#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x43, drm_mga_iload_t) -#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x44, drm_mga_vertex_t) -#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x45, drm_lock_t ) -#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t) +#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t) +#define DRM_IOCTL_MGA_SWAP DRM_IOW( 0x41, drm_mga_swap_t) +#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x42, drm_mga_clear_t) +#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x43, drm_mga_iload_t) +#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x44, drm_mga_vertex_t) +#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x45, drm_lock_t ) +#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t) +#define DRM_IOCTL_MGA_BLIT DRM_IOW( 0x47, drm_mga_blit_t) /* I810 specific ioctls */ -#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t) -#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t) -#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t) -#define DRM_IOCTL_I810_FLUSH DRM_IO ( 0x43) -#define DRM_IOCTL_I810_GETAGE DRM_IO ( 0x44) -#define DRM_IOCTL_I810_GETBUF DRM_IOWR(0x45, drm_i810_dma_t) -#define DRM_IOCTL_I810_SWAP DRM_IO ( 0x46) -#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t) -#define DRM_IOCTL_I810_DOCOPY DRM_IO ( 0x48) +#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t) +#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t) +#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t) +#define DRM_IOCTL_I810_FLUSH DRM_IO( 0x43) +#define DRM_IOCTL_I810_GETAGE DRM_IO( 0x44) +#define DRM_IOCTL_I810_GETBUF DRM_IOWR(0x45, drm_i810_dma_t) +#define DRM_IOCTL_I810_SWAP DRM_IO( 0x46) +#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t) +#define DRM_IOCTL_I810_DOCOPY DRM_IO( 0x48) /* Rage 128 specific ioctls */ -#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t) -#define DRM_IOCTL_R128_RESET DRM_IO( 0x41) -#define DRM_IOCTL_R128_FLUSH DRM_IO( 0x42) -#define DRM_IOCTL_R128_IDLE DRM_IO( 0x43) -#define DRM_IOCTL_R128_PACKET DRM_IOW( 0x44, drm_r128_packet_t) -#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x45, drm_r128_vertex_t) - +#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t) +#define DRM_IOCTL_R128_CCE_START DRM_IO( 0x41) +#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( 0x42, drm_r128_cce_stop_t) +#define DRM_IOCTL_R128_CCE_RESET DRM_IO( 0x43) +#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( 0x44) +#define DRM_IOCTL_R128_RESET DRM_IO( 0x46) +#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( 0x47, drm_r128_fullscreen_t) +#define DRM_IOCTL_R128_SWAP DRM_IO( 0x48) +#define DRM_IOCTL_R128_CLEAR DRM_IOW( 0x49, drm_r128_clear_t) +#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x4a, drm_r128_vertex_t) +#define DRM_IOCTL_R128_INDICES DRM_IOW( 0x4b, drm_r128_indices_t) +#define DRM_IOCTL_R128_BLIT DRM_IOW( 0x4c, drm_r128_blit_t) +#define DRM_IOCTL_R128_DEPTH DRM_IOW( 0x4d, drm_r128_depth_t) +#define DRM_IOCTL_R128_STIPPLE DRM_IOW( 0x4e, drm_r128_stipple_t) +#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(0x4f, drm_r128_indirect_t) + +/* Radeon specific ioctls */ +#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( 0x40, drm_radeon_init_t) +#define DRM_IOCTL_RADEON_CP_START DRM_IO( 0x41) +#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( 0x42, drm_radeon_cp_stop_t) +#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( 0x43) +#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( 0x44) +#define DRM_IOCTL_RADEON_RESET DRM_IO( 0x45) +#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( 0x46, drm_radeon_fullscreen_t) +#define DRM_IOCTL_RADEON_SWAP DRM_IO( 0x47) +#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( 0x48, drm_radeon_clear_t) +#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( 0x49, drm_radeon_vertex_t) +#define DRM_IOCTL_RADEON_INDICES DRM_IOW( 0x4a, drm_radeon_indices_t) +#define DRM_IOCTL_RADEON_BLIT DRM_IOW( 0x4b, drm_radeon_blit_t) +#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( 0x4c, drm_radeon_stipple_t) +#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t) + +#ifdef CONFIG_DRM_SIS /* SiS specific ioctls */ -#define SIS_IOCTL_FB_ALLOC DRM_IOWR( 0x44, drm_sis_mem_t) -#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t) -#define SIS_IOCTL_AGP_INIT DRM_IOWR( 0x53, drm_sis_agp_t) -#define SIS_IOCTL_AGP_ALLOC DRM_IOWR( 0x54, drm_sis_mem_t) -#define SIS_IOCTL_AGP_FREE DRM_IOW( 0x55, drm_sis_mem_t) -#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t) -#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49) -#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50) +#define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t) +#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t) +#define SIS_IOCTL_AGP_INIT DRM_IOWR(0x53, drm_sis_agp_t) +#define SIS_IOCTL_AGP_ALLOC DRM_IOWR(0x54, drm_sis_mem_t) +#define SIS_IOCTL_AGP_FREE DRM_IOW( 0x55, drm_sis_mem_t) +#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t) +#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49) +#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50) +#endif #endif |