diff options
author | David Dawes <dawes@xfree86.org> | 2002-02-14 02:00:26 +0000 |
---|---|---|
committer | David Dawes <dawes@xfree86.org> | 2002-02-14 02:00:26 +0000 |
commit | ab87c5d0d1b5c35006ce8b99a9260e3116c732dc (patch) | |
tree | 24d25b8fa4987897180d249b671ac2ab9e8edab1 /linux | |
parent | 65d25572deec33b7da13c211bf0aa78c361f535a (diff) |
First pass of mesa-4-0 branch merge into trunk.
Diffstat (limited to 'linux')
-rw-r--r-- | linux/drm.h | 7 | ||||
-rw-r--r-- | linux/drm_auth.h | 1 | ||||
-rw-r--r-- | linux/drm_dma.h | 19 | ||||
-rw-r--r-- | linux/drm_drv.h | 2 | ||||
-rw-r--r-- | linux/drm_lists.h | 2 | ||||
-rw-r--r-- | linux/drm_scatter.h | 2 | ||||
-rw-r--r-- | linux/gamma.h | 36 | ||||
-rw-r--r-- | linux/gamma_dma.c | 316 | ||||
-rw-r--r-- | linux/gamma_drm.h | 79 | ||||
-rw-r--r-- | linux/gamma_drv.c | 12 | ||||
-rw-r--r-- | linux/gamma_drv.h | 19 | ||||
-rw-r--r-- | linux/i810.h | 48 | ||||
-rw-r--r-- | linux/i810_dma.c | 371 | ||||
-rw-r--r-- | linux/i810_drm.h | 13 | ||||
-rw-r--r-- | linux/i810_drv.c | 14 | ||||
-rw-r--r-- | linux/i810_drv.h | 11 | ||||
-rw-r--r-- | linux/i830_dma.c | 2 | ||||
-rw-r--r-- | linux/mga_state.c | 8 | ||||
-rw-r--r-- | linux/radeon_cp.c | 16 | ||||
-rw-r--r-- | linux/radeon_drm.h | 75 | ||||
-rw-r--r-- | linux/radeon_drv.c | 14 | ||||
-rw-r--r-- | linux/radeon_drv.h | 15 | ||||
-rw-r--r-- | linux/radeon_state.c | 655 |
23 files changed, 1046 insertions, 691 deletions
diff --git a/linux/drm.h b/linux/drm.h index 0db586fc..79dd5814 100644 --- a/linux/drm.h +++ b/linux/drm.h @@ -106,6 +106,7 @@ typedef struct drm_tex_region { #include "radeon_drm.h" #include "sis_drm.h" #include "i830_drm.h" +#include "gamma_drm.h" typedef struct drm_version { int version_major; /* Major version */ @@ -471,6 +472,7 @@ typedef struct drm_scatter_gather { #define DRM_IOCTL_R128_STIPPLE DRM_IOW( 0x4d, drm_r128_stipple_t) #define DRM_IOCTL_R128_INDIRECT DRM_IOWR(0x4f, drm_r128_indirect_t) #define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( 0x50, drm_r128_fullscreen_t) +#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( 0x51, drm_r128_clear2_t) /* Radeon specific ioctls */ #define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( 0x40, drm_radeon_init_t) @@ -487,6 +489,11 @@ typedef struct drm_scatter_gather { #define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( 0x4c, drm_radeon_stipple_t) #define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t) #define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t) +#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( 0x4f, drm_radeon_vertex_t) + +/* Gamma specific ioctls */ +#define DRM_IOCTL_GAMMA_INIT DRM_IOW( 0x40, drm_gamma_init_t) +#define DRM_IOCTL_GAMMA_COPY DRM_IOW( 0x41, drm_gamma_copy_t) /* SiS specific ioctls */ #define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t) diff --git a/linux/drm_auth.h b/linux/drm_auth.h index 2636e617..8fce6dfc 100644 --- a/linux/drm_auth.h +++ b/linux/drm_auth.h @@ -64,6 +64,7 @@ int DRM(add_magic)(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic) hash = DRM(hash_magic)(magic); entry = DRM(alloc)(sizeof(*entry), DRM_MEM_MAGIC); if (!entry) return -ENOMEM; + memset(entry, 0, sizeof(*entry)); entry->magic = magic; entry->priv = priv; entry->next = NULL; diff --git a/linux/drm_dma.h b/linux/drm_dma.h index 85fa1472..dce376f6 100644 --- a/linux/drm_dma.h +++ b/linux/drm_dma.h @@ -598,6 +598,25 @@ int DRM(control)( struct inode *inode, struct file *filp, } } +#else + +int DRM(control)( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_control_t ctl; + + if ( copy_from_user( &ctl, (drm_control_t *)arg, sizeof(ctl) ) ) + return -EFAULT; + + switch ( ctl.func ) { + case DRM_INST_HANDLER: + case DRM_UNINST_HANDLER: + return 0; + default: + return -EINVAL; + } +} + #endif /* __HAVE_DMA_IRQ */ #endif /* __HAVE_DMA */ diff --git a/linux/drm_drv.h b/linux/drm_drv.h index bf8d76b8..49862f3f 100644 --- a/linux/drm_drv.h +++ b/linux/drm_drv.h @@ -187,10 +187,8 @@ static drm_ioctl_desc_t DRM(ioctls)[] = { /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */ -#if __HAVE_DMA_IRQ [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { DRM(control), 1, 1 }, #endif -#endif #if __REALLY_HAVE_AGP [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { DRM(agp_acquire), 1, 1 }, diff --git a/linux/drm_lists.h b/linux/drm_lists.h index 3ad487fd..5cd8cd47 100644 --- a/linux/drm_lists.h +++ b/linux/drm_lists.h @@ -42,7 +42,7 @@ int DRM(waitlist_create)(drm_waitlist_t *bl, int count) DRM_MEM_BUFLISTS); if(!bl->bufs) return -ENOMEM; - + memset(bl->bufs, 0, sizeof(*bl->bufs)); bl->count = count; bl->rp = bl->bufs; bl->wp = bl->bufs; diff --git a/linux/drm_scatter.h b/linux/drm_scatter.h index a6b8275f..07e8e4e5 100644 --- a/linux/drm_scatter.h +++ b/linux/drm_scatter.h @@ -97,6 +97,8 @@ int DRM(sg_alloc)( struct inode *inode, struct file *filp, return -ENOMEM; } + memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist)); + entry->busaddr = DRM(alloc)( pages * sizeof(*entry->busaddr), DRM_MEM_PAGES ); if ( !entry->busaddr ) { diff --git a/linux/gamma.h b/linux/gamma.h index 232ed018..a38c3c28 100644 --- a/linux/gamma.h +++ b/linux/gamma.h @@ -41,6 +41,8 @@ /* DMA customization: */ #define __HAVE_DMA 1 +#define __HAVE_AGP 1 +#define __MUST_HAVE_AGP 0 #define __HAVE_OLD_DMA 1 #define __HAVE_PCI_DMA 1 @@ -61,33 +63,61 @@ #define __HAVE_DMA_QUIESCENT 1 #define DRIVER_DMA_QUIESCENT() do { \ /* FIXME ! */ \ - gamma_dma_quiescent_dual(dev); \ + gamma_dma_quiescent_single(dev); \ return 0; \ } while (0) #define __HAVE_DMA_IRQ 1 #define __HAVE_DMA_IRQ_BH 1 + +#if 1 #define DRIVER_PREINSTALL() do { \ drm_gamma_private_t *dev_priv = \ (drm_gamma_private_t *)dev->dev_private;\ - GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000000 ); \ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2); \ + GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000004 ); \ GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 ); \ } while (0) - #define DRIVER_POSTINSTALL() do { \ drm_gamma_private_t *dev_priv = \ (drm_gamma_private_t *)dev->dev_private;\ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2); \ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3); \ GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002001 ); \ GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000008 ); \ GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00039090 ); \ } while (0) +#else +#define DRIVER_POSTINSTALL() do { \ + drm_gamma_private_t *dev_priv = \ + (drm_gamma_private_t *)dev->dev_private;\ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2); \ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2); \ + GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002000 ); \ + GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000004 ); \ +} while (0) + +#define DRIVER_PREINSTALL() do { \ + drm_gamma_private_t *dev_priv = \ + (drm_gamma_private_t *)dev->dev_private;\ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2); \ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2); \ + GAMMA_WRITE( GAMMA_GCOMMANDMODE, GAMMA_QUEUED_DMA_MODE );\ + GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 );\ +} while (0) +#endif #define DRIVER_UNINSTALL() do { \ drm_gamma_private_t *dev_priv = \ (drm_gamma_private_t *)dev->dev_private;\ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2); \ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3); \ GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00000000 ); \ GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000000 ); \ GAMMA_WRITE( GAMMA_GINTENABLE, 0x00000000 ); \ } while (0) +#define DRIVER_AGP_BUFFERS_MAP( dev ) \ + ((drm_gamma_private_t *)((dev)->dev_private))->buffers + #endif /* __GAMMA_H__ */ diff --git a/linux/gamma_dma.c b/linux/gamma_dma.c index 7510e2b5..094f51d6 100644 --- a/linux/gamma_dma.c +++ b/linux/gamma_dma.c @@ -37,28 +37,25 @@ #include <linux/interrupt.h> /* For task queue support */ #include <linux/delay.h> - static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address, unsigned long length) { drm_gamma_private_t *dev_priv = - (drm_gamma_private_t *)dev->dev_private; - - GAMMA_WRITE(GAMMA_DMAADDRESS, virt_to_phys((void *)address)); - while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4) - ; + (drm_gamma_private_t *)dev->dev_private; + mb(); + while ( GAMMA_READ(GAMMA_INFIFOSPACE) < 2); + GAMMA_WRITE(GAMMA_DMAADDRESS, address); + while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4); GAMMA_WRITE(GAMMA_DMACOUNT, length / 4); } void gamma_dma_quiescent_single(drm_device_t *dev) { drm_gamma_private_t *dev_priv = - (drm_gamma_private_t *)dev->dev_private; + (drm_gamma_private_t *)dev->dev_private; + while (GAMMA_READ(GAMMA_DMACOUNT)); - while (GAMMA_READ(GAMMA_DMACOUNT)) - ; - while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3) - ; + while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2); GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10); GAMMA_WRITE(GAMMA_SYNC, 0); @@ -72,56 +69,50 @@ void gamma_dma_quiescent_single(drm_device_t *dev) void gamma_dma_quiescent_dual(drm_device_t *dev) { drm_gamma_private_t *dev_priv = - (drm_gamma_private_t *)dev->dev_private; + (drm_gamma_private_t *)dev->dev_private; + while (GAMMA_READ(GAMMA_DMACOUNT)); - while (GAMMA_READ(GAMMA_DMACOUNT)) - ; - while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3) - ; + while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3); GAMMA_WRITE(GAMMA_BROADCASTMASK, 3); - GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10); GAMMA_WRITE(GAMMA_SYNC, 0); - /* Read from first MX */ + /* Read from first MX */ do { - while (!GAMMA_READ(GAMMA_OUTFIFOWORDS)) - ; + while (!GAMMA_READ(GAMMA_OUTFIFOWORDS)); } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG); - /* Read from second MX */ + /* Read from second MX */ do { - while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000)) - ; + while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000)); } while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG); } void gamma_dma_ready(drm_device_t *dev) { drm_gamma_private_t *dev_priv = - (drm_gamma_private_t *)dev->dev_private; - - while (GAMMA_READ(GAMMA_DMACOUNT)) - ; + (drm_gamma_private_t *)dev->dev_private; + while (GAMMA_READ(GAMMA_DMACOUNT)); } static inline int gamma_dma_is_ready(drm_device_t *dev) { drm_gamma_private_t *dev_priv = - (drm_gamma_private_t *)dev->dev_private; - - return !GAMMA_READ(GAMMA_DMACOUNT); + (drm_gamma_private_t *)dev->dev_private; + return(!GAMMA_READ(GAMMA_DMACOUNT)); } void gamma_dma_service(int irq, void *device, struct pt_regs *regs) { - drm_device_t *dev = (drm_device_t *)device; - drm_device_dma_t *dma = dev->dma; + drm_device_t *dev = (drm_device_t *)device; + drm_device_dma_t *dma = dev->dma; drm_gamma_private_t *dev_priv = - (drm_gamma_private_t *)dev->dev_private; + (drm_gamma_private_t *)dev->dev_private; atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */ + + while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3); GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */ GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8); GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001); @@ -165,7 +156,9 @@ static int gamma_do_dma(drm_device_t *dev, int locked) } buf = dma->next_buffer; - address = (unsigned long)buf->address; + /* WE NOW ARE ON LOGICAL PAGES!! - using page table setup in dma_init */ + /* So we pass the buffer index value into the physical page offset */ + address = buf->idx << 12; length = buf->used; DRM_DEBUG("context %d, buffer %d (%ld bytes)\n", @@ -232,6 +225,9 @@ static int gamma_do_dma(drm_device_t *dev, int locked) buf->time_dispatched = get_cycles(); #endif + /* WE NOW ARE ON LOGICAL PAGES!!! - overriding address */ + address = buf->idx << 12; + gamma_dma_dispatch(dev, address, length); gamma_free_buffer(dev, dma->this_buffer); dma->this_buffer = buf; @@ -582,3 +578,255 @@ int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd, return retcode; } + +/* ============================================================= + * DMA initialization, cleanup + */ + +static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init ) +{ + drm_gamma_private_t *dev_priv; + drm_device_dma_t *dma = dev->dma; + drm_buf_t *buf; + int i; + struct list_head *list; + unsigned int *pgt; + + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + dev_priv = DRM(alloc)( sizeof(drm_gamma_private_t), + DRM_MEM_DRIVER ); + if ( !dev_priv ) + return -ENOMEM; + + dev->dev_private = (void *)dev_priv; + + memset( dev_priv, 0, sizeof(drm_gamma_private_t) ); + + list_for_each(list, &dev->maplist->head) { + drm_map_list_t *r_list = (drm_map_list_t *)list; + if( r_list->map && + r_list->map->type == _DRM_SHM && + r_list->map->flags & _DRM_CONTAINS_LOCK ) { + dev_priv->sarea = r_list->map; + break; + } + } + + DRM_FIND_MAP( dev_priv->mmio0, init->mmio0 ); + DRM_FIND_MAP( dev_priv->mmio1, init->mmio1 ); + DRM_FIND_MAP( dev_priv->mmio2, init->mmio2 ); + DRM_FIND_MAP( dev_priv->mmio3, init->mmio3 ); + + dev_priv->sarea_priv = (drm_gamma_sarea_t *) + ((u8 *)dev_priv->sarea->handle + + init->sarea_priv_offset); + + if (init->pcimode) { + buf = dma->buflist[GLINT_DRI_BUF_COUNT]; + pgt = buf->address; + + for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) { + buf = dma->buflist[i]; + *pgt = virt_to_phys((void*)buf->address) | 0x07; + pgt++; + } + + buf = dma->buflist[GLINT_DRI_BUF_COUNT]; + } else { + DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset ); + + DRM_IOREMAP( dev_priv->buffers ); + + buf = dma->buflist[GLINT_DRI_BUF_COUNT]; + pgt = buf->address; + + for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) { + buf = dma->buflist[i]; + *pgt = (unsigned int)buf->address + 0x07; + pgt++; + } + + buf = dma->buflist[GLINT_DRI_BUF_COUNT]; + + while (GAMMA_READ(GAMMA_INFIFOSPACE) < 1); + GAMMA_WRITE( GAMMA_GDMACONTROL, 0xe); + } + while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2); + GAMMA_WRITE( GAMMA_PAGETABLEADDR, virt_to_phys((void*)buf->address) ); + GAMMA_WRITE( GAMMA_PAGETABLELENGTH, 2 ); + + return 0; +} + +int gamma_do_cleanup_dma( drm_device_t *dev ) +{ + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + if ( dev->dev_private ) { + drm_gamma_private_t *dev_priv = dev->dev_private; + + DRM_IOREMAPFREE( dev_priv->buffers ); + + DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t), + DRM_MEM_DRIVER ); + dev->dev_private = NULL; + } + + return 0; +} + +int gamma_dma_init( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_gamma_init_t init; + + if ( copy_from_user( &init, (drm_gamma_init_t *)arg, sizeof(init) ) ) + return -EFAULT; + + switch ( init.func ) { + case GAMMA_INIT_DMA: + return gamma_do_init_dma( dev, &init ); + case GAMMA_CLEANUP_DMA: + return gamma_do_cleanup_dma( dev ); + } + + return -EINVAL; +} + +static int gamma_do_copy_dma( drm_device_t *dev, drm_gamma_copy_t *copy ) +{ + drm_device_dma_t *dma = dev->dma; + unsigned int *screenbuf; + + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + /* We've DRM_RESTRICTED this DMA buffer */ + + screenbuf = dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ]->address; + +#if 0 + *buffer++ = 0x180; /* Tag (FilterMode) */ + *buffer++ = 0x200; /* Allow FBColor through */ + *buffer++ = 0x53B; /* Tag */ + *buffer++ = copy->Pitch; + *buffer++ = 0x53A; /* Tag */ + *buffer++ = copy->SrcAddress; + *buffer++ = 0x539; /* Tag */ + *buffer++ = copy->WidthHeight; /* Initiates transfer */ + *buffer++ = 0x53C; /* Tag - DMAOutputAddress */ + *buffer++ = virt_to_phys((void*)screenbuf); + *buffer++ = 0x53D; /* Tag - DMAOutputCount */ + *buffer++ = copy->Count; /* Reads HostOutFifo BLOCKS until ..*/ + + /* Data now sitting in dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ] */ + /* Now put it back to the screen */ + + *buffer++ = 0x180; /* Tag (FilterMode) */ + *buffer++ = 0x400; /* Allow Sync through */ + *buffer++ = 0x538; /* Tag - DMARectangleReadTarget */ + *buffer++ = 0x155; /* FBSourceData | count */ + *buffer++ = 0x537; /* Tag */ + *buffer++ = copy->Pitch; + *buffer++ = 0x536; /* Tag */ + *buffer++ = copy->DstAddress; + *buffer++ = 0x535; /* Tag */ + *buffer++ = copy->WidthHeight; /* Initiates transfer */ + *buffer++ = 0x530; /* Tag - DMAAddr */ + *buffer++ = virt_to_phys((void*)screenbuf); + *buffer++ = 0x531; + *buffer++ = copy->Count; /* initiates DMA transfer of color data */ +#endif + + /* need to dispatch it now */ + + return 0; +} + +int gamma_dma_copy( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_gamma_copy_t copy; + + if ( copy_from_user( ©, (drm_gamma_copy_t *)arg, sizeof(copy) ) ) + return -EFAULT; + + return gamma_do_copy_dma( dev, © ); +} + +/* ============================================================= + * Per Context SAREA Support + */ + +int gamma_getsareactx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_ctx_priv_map_t request; + drm_map_t *map; + + if (copy_from_user(&request, + (drm_ctx_priv_map_t *)arg, + sizeof(request))) + return -EFAULT; + + down(&dev->struct_sem); + if ((int)request.ctx_id >= dev->max_context) { + up(&dev->struct_sem); + return -EINVAL; + } + + map = dev->context_sareas[request.ctx_id]; + up(&dev->struct_sem); + + request.handle = map->handle; + if (copy_to_user((drm_ctx_priv_map_t *)arg, &request, sizeof(request))) + return -EFAULT; + return 0; +} + +int gamma_setsareactx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_ctx_priv_map_t request; + drm_map_t *map = NULL; + drm_map_list_t *r_list; + struct list_head *list; + + if (copy_from_user(&request, + (drm_ctx_priv_map_t *)arg, + sizeof(request))) + return -EFAULT; + + down(&dev->struct_sem); + r_list = NULL; + list_for_each(list, &dev->maplist->head) { + r_list = (drm_map_list_t *)list; + if(r_list->map && + r_list->map->handle == request.handle) break; + } + if (list == &(dev->maplist->head)) { + up(&dev->struct_sem); + return -EINVAL; + } + map = r_list->map; + up(&dev->struct_sem); + + if (!map) return -EINVAL; + + down(&dev->struct_sem); + if ((int)request.ctx_id >= dev->max_context) { + up(&dev->struct_sem); + return -EINVAL; + } + dev->context_sareas[request.ctx_id] = map; + up(&dev->struct_sem); + return 0; +} diff --git a/linux/gamma_drm.h b/linux/gamma_drm.h new file mode 100644 index 00000000..d06763ae --- /dev/null +++ b/linux/gamma_drm.h @@ -0,0 +1,79 @@ +#ifndef _GAMMA_DRM_H_ +#define _GAMMA_DRM_H_ + +typedef struct _drm_gamma_tex_region { + unsigned char next, prev; /* indices to form a circular LRU */ + unsigned char in_use; /* owned by a client, or free? */ + int age; /* tracked by clients to update local LRU's */ +} drm_gamma_tex_region_t; + +typedef struct { + unsigned int GDeltaMode; + unsigned int GDepthMode; + unsigned int GGeometryMode; + unsigned int GTransformMode; +} drm_gamma_context_regs_t; + +typedef struct _drm_gamma_sarea { + drm_gamma_context_regs_t context_state; + + unsigned int dirty; + + + /* Maintain an LRU of contiguous regions of texture space. If + * you think you own a region of texture memory, and it has an + * age different to the one you set, then you are mistaken and + * it has been stolen by another client. If global texAge + * hasn't changed, there is no need to walk the list. + * + * These regions can be used as a proxy for the fine-grained + * texture information of other clients - by maintaining them + * in the same lru which is used to age their own textures, + * clients have an approximate lru for the whole of global + * texture space, and can make informed decisions as to which + * areas to kick out. There is no need to choose whether to + * kick out your own texture or someone else's - simply eject + * them all in LRU order. + */ + +#define GAMMA_NR_TEX_REGIONS 64 + drm_gamma_tex_region_t texList[GAMMA_NR_TEX_REGIONS+1]; + /* Last elt is sentinal */ + int texAge; /* last time texture was uploaded */ + int last_enqueue; /* last time a buffer was enqueued */ + int last_dispatch; /* age of the most recently dispatched buffer */ + int last_quiescent; /* */ + int ctxOwner; /* last context to upload state */ + + int vertex_prim; +} drm_gamma_sarea_t; + +typedef struct drm_gamma_copy { + unsigned int DMAOutputAddress; + unsigned int DMAOutputCount; + unsigned int DMAReadGLINTSource; + unsigned int DMARectangleWriteAddress; + unsigned int DMARectangleWriteLinePitch; + unsigned int DMARectangleWrite; + unsigned int DMARectangleReadAddress; + unsigned int DMARectangleReadLinePitch; + unsigned int DMARectangleRead; + unsigned int DMARectangleReadTarget; +} drm_gamma_copy_t; + +typedef struct drm_gamma_init { + enum { + GAMMA_INIT_DMA = 0x01, + GAMMA_CLEANUP_DMA = 0x02 + } func; + + int sarea_priv_offset; + int pcimode; + unsigned int mmio0; + unsigned int mmio1; + unsigned int mmio2; + unsigned int mmio3; + unsigned int buffers_offset; +} drm_gamma_init_t; + +#endif /* _GAMMA_DRM_H_ */ diff --git a/linux/gamma_drv.c b/linux/gamma_drv.c index c05465c5..58cea241 100644 --- a/linux/gamma_drv.c +++ b/linux/gamma_drv.c @@ -38,15 +38,19 @@ #define DRIVER_NAME "gamma" #define DRIVER_DESC "3DLabs gamma" -#define DRIVER_DATE "20010216" +#define DRIVER_DATE "20010624" -#define DRIVER_MAJOR 1 +#define DRIVER_MAJOR 2 #define DRIVER_MINOR 0 #define DRIVER_PATCHLEVEL 0 #define DRIVER_IOCTLS \ - [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { gamma_dma, 1, 0 } + [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { gamma_dma, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_GAMMA_INIT)] = { gamma_dma_init, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_GAMMA_COPY)] = { gamma_dma_copy, 1, 1 } +#define IOCTL_TABLE_NAME DRM(ioctls) +#define IOCTL_FUNC_NAME DRM(ioctl) #define __HAVE_COUNTERS 5 #define __HAVE_COUNTER6 _DRM_STAT_IRQ @@ -57,6 +61,7 @@ #include "drm_auth.h" +#include "drm_agpsupport.h" #include "drm_bufs.h" #include "drm_context.h" #include "drm_dma.h" @@ -82,7 +87,6 @@ static int __init gamma_options( char *str ) __setup( DRIVER_NAME "=", gamma_options ); #endif - #include "drm_fops.h" #include "drm_init.h" #include "drm_ioctl.h" diff --git a/linux/gamma_drv.h b/linux/gamma_drv.h index 68b52070..e7d0c896 100644 --- a/linux/gamma_drv.h +++ b/linux/gamma_drv.h @@ -32,8 +32,9 @@ #ifndef _GAMMA_DRV_H_ #define _GAMMA_DRV_H_ - typedef struct drm_gamma_private { + drm_gamma_sarea_t *sarea_priv; + drm_map_t *sarea; drm_map_t *buffers; drm_map_t *mmio0; drm_map_t *mmio1; @@ -51,6 +52,11 @@ do { \ } \ } while (0) + /* gamma_dma.c */ +extern int gamma_dma_init( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int gamma_dma_copy( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); extern void gamma_dma_ready(drm_device_t *dev); extern void gamma_dma_quiescent_single(drm_device_t *dev); @@ -63,6 +69,7 @@ extern int gamma_dma(struct inode *inode, struct file *filp, extern int gamma_find_devices(void); extern int gamma_found(void); +#define GLINT_DRI_BUF_COUNT 256 #define GAMMA_OFF(reg) \ ((reg < 0x1000) \ @@ -78,7 +85,6 @@ extern int gamma_found(void); ((reg < 0x10000) ? dev_priv->mmio1->handle : \ ((reg < 0x11000) ? dev_priv->mmio2->handle : \ dev_priv->mmio3->handle)))) - #define GAMMA_ADDR(reg) (GAMMA_BASE(reg) + GAMMA_OFF(reg)) #define GAMMA_DEREF(reg) *(__volatile__ int *)GAMMA_ADDR(reg) #define GAMMA_READ(reg) GAMMA_DEREF(reg) @@ -91,9 +97,11 @@ extern int gamma_found(void); #define GAMMA_FILTERMODE 0x8c00 #define GAMMA_GCOMMANDINTFLAGS 0x0c50 #define GAMMA_GCOMMANDMODE 0x0c40 +#define GAMMA_QUEUED_DMA_MODE 1<<1 #define GAMMA_GCOMMANDSTATUS 0x0c60 #define GAMMA_GDELAYTIMER 0x0c38 #define GAMMA_GDMACONTROL 0x0060 +#define GAMMA_USE_AGP 1<<1 #define GAMMA_GINTENABLE 0x0808 #define GAMMA_GINTFLAGS 0x0810 #define GAMMA_INFIFOSPACE 0x0018 @@ -101,5 +109,12 @@ extern int gamma_found(void); #define GAMMA_OUTPUTFIFO 0x2000 #define GAMMA_SYNC 0x8c40 #define GAMMA_SYNC_TAG 0x0188 +#define GAMMA_PAGETABLEADDR 0x0C00 +#define GAMMA_PAGETABLELENGTH 0x0C08 + +#define GAMMA_PASSTHROUGH 0x1FE +#define GAMMA_DMAADDRTAG 0x530 +#define GAMMA_DMACOUNTTAG 0x531 +#define GAMMA_COMMANDINTTAG 0x532 #endif diff --git a/linux/i810.h b/linux/i810.h index a5152bc0..64309f59 100644 --- a/linux/i810.h +++ b/linux/i810.h @@ -60,50 +60,10 @@ i810_dma_quiescent( dev ); \ } while (0) -#define __HAVE_DMA_IRQ 1 -#define __HAVE_DMA_IRQ_BH 1 -#define __HAVE_SHARED_IRQ 1 -#define DRIVER_PREINSTALL() do { \ - drm_i810_private_t *dev_priv = \ - (drm_i810_private_t *)dev->dev_private; \ - u16 tmp; \ - tmp = I810_READ16( I810REG_HWSTAM ); \ - tmp = tmp & 0x6000; \ - I810_WRITE16( I810REG_HWSTAM, tmp ); \ - \ - tmp = I810_READ16( I810REG_INT_MASK_R ); \ - tmp = tmp & 0x6000; /* Unmask interrupts */ \ - I810_WRITE16( I810REG_INT_MASK_R, tmp ); \ - tmp = I810_READ16( I810REG_INT_ENABLE_R ); \ - tmp = tmp & 0x6000; /* Disable all interrupts */ \ - I810_WRITE16( I810REG_INT_ENABLE_R, tmp ); \ -} while (0) - -#define DRIVER_POSTINSTALL() do { \ - drm_i810_private_t *dev_priv = \ - (drm_i810_private_t *)dev->dev_private; \ - u16 tmp; \ - tmp = I810_READ16( I810REG_INT_ENABLE_R ); \ - tmp = tmp & 0x6000; \ - tmp = tmp | 0x0003; /* Enable bp & user interrupts */ \ - I810_WRITE16( I810REG_INT_ENABLE_R, tmp ); \ -} while (0) - -#define DRIVER_UNINSTALL() do { \ - drm_i810_private_t *dev_priv = \ - (drm_i810_private_t *)dev->dev_private; \ - u16 tmp; \ - if ( dev_priv ) { \ - tmp = I810_READ16( I810REG_INT_IDENTITY_R ); \ - tmp = tmp & ~(0x6000); /* Clear all interrupts */ \ - if ( tmp != 0 ) \ - I810_WRITE16( I810REG_INT_IDENTITY_R, tmp ); \ - \ - tmp = I810_READ16( I810REG_INT_ENABLE_R ); \ - tmp = tmp & 0x6000; /* Disable all interrupts */ \ - I810_WRITE16( I810REG_INT_ENABLE_R, tmp ); \ - } \ -} while (0) +/* Don't need an irq any more. The template code will make sure that + * a noop stub is generated for compatibility. + */ +#define __HAVE_DMA_IRQ 0 /* Buffer customization: */ diff --git a/linux/i810_dma.c b/linux/i810_dma.c index ae0a3616..4f434199 100644 --- a/linux/i810_dma.c +++ b/linux/i810_dma.c @@ -26,7 +26,7 @@ * * Authors: Rickard E. (Rik) Faith <faith@valinux.com> * Jeff Hartmann <jhartmann@valinux.com> - * Keith Whitwell <keithw@valinux.com> + * Keith Whitwell <keith_whitwell@yahoo.com> * */ @@ -36,11 +36,6 @@ #include "i810_drv.h" #include <linux/interrupt.h> /* For task queue support */ -/* in case we don't have a 2.3.99-pre6 kernel or later: */ -#ifndef VM_DONTCOPY -#define VM_DONTCOPY 0 -#endif - #define I810_BUF_FREE 2 #define I810_BUF_CLIENT 1 #define I810_BUF_HARDWARE 0 @@ -50,30 +45,28 @@ #define RING_LOCALS unsigned int outring, ringmask; volatile char *virt; -#define BEGIN_LP_RING(n) do { \ - if (I810_VERBOSE) \ - DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", \ - n, __FUNCTION__); \ - if (dev_priv->ring.space < n*4) \ - i810_wait_ring(dev, n*4); \ - dev_priv->ring.space -= n*4; \ - outring = dev_priv->ring.tail; \ - ringmask = dev_priv->ring.tail_mask; \ - virt = dev_priv->ring.virtual_start; \ +#define BEGIN_LP_RING(n) do { \ + if (0) DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", n, __FUNCTION__); \ + if (dev_priv->ring.space < n*4) \ + i810_wait_ring(dev, n*4); \ + dev_priv->ring.space -= n*4; \ + outring = dev_priv->ring.tail; \ + ringmask = dev_priv->ring.tail_mask; \ + virt = dev_priv->ring.virtual_start; \ } while (0) -#define ADVANCE_LP_RING() do { \ - if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \ - dev_priv->ring.tail = outring; \ - I810_WRITE(LP_RING + RING_TAIL, outring); \ +#define ADVANCE_LP_RING() do { \ + if (0) DRM_DEBUG("ADVANCE_LP_RING\n"); \ + dev_priv->ring.tail = outring; \ + I810_WRITE(LP_RING + RING_TAIL, outring); \ } while(0) -#define OUT_RING(n) do { \ - if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ - *(volatile unsigned int *)(virt + outring) = n; \ - outring += 4; \ - outring &= ringmask; \ -} while (0); +#define OUT_RING(n) do { \ + if (0) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ + *(volatile unsigned int *)(virt + outring) = n; \ + outring += 4; \ + outring &= ringmask; \ +} while (0) static inline void i810_print_status_page(drm_device_t *dev) { @@ -182,36 +175,31 @@ static int i810_map_buffer(drm_buf_t *buf, struct file *filp) if(buf_priv->currently_mapped == I810_BUF_MAPPED) return -EINVAL; - if(VM_DONTCOPY != 0) { #if LINUX_VERSION_CODE <= 0x020402 - down( ¤t->mm->mmap_sem ); + down( ¤t->mm->mmap_sem ); #else - down_write( ¤t->mm->mmap_sem ); + down_write( ¤t->mm->mmap_sem ); #endif - old_fops = filp->f_op; - filp->f_op = &i810_buffer_fops; - dev_priv->mmap_buffer = buf; - buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total, - PROT_READ|PROT_WRITE, - MAP_SHARED, - buf->bus_address); - dev_priv->mmap_buffer = NULL; - filp->f_op = old_fops; - if ((unsigned long)buf_priv->virtual > -1024UL) { - /* Real error */ - DRM_DEBUG("mmap error\n"); - retcode = (signed int)buf_priv->virtual; - buf_priv->virtual = 0; - } + old_fops = filp->f_op; + filp->f_op = &i810_buffer_fops; + dev_priv->mmap_buffer = buf; + buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total, + PROT_READ|PROT_WRITE, + MAP_SHARED, + buf->bus_address); + dev_priv->mmap_buffer = NULL; + filp->f_op = old_fops; + if ((unsigned long)buf_priv->virtual > -1024UL) { + /* Real error */ + DRM_DEBUG("mmap error\n"); + retcode = (signed int)buf_priv->virtual; + buf_priv->virtual = 0; + } #if LINUX_VERSION_CODE <= 0x020402 - up( ¤t->mm->mmap_sem ); + up( ¤t->mm->mmap_sem ); #else - up_write( ¤t->mm->mmap_sem ); + up_write( ¤t->mm->mmap_sem ); #endif - } else { - buf_priv->virtual = buf_priv->kernel_virtual; - buf_priv->currently_mapped = I810_BUF_MAPPED; - } return retcode; } @@ -220,23 +208,21 @@ static int i810_unmap_buffer(drm_buf_t *buf) drm_i810_buf_priv_t *buf_priv = buf->dev_private; int retcode = 0; - if(VM_DONTCOPY != 0) { - if(buf_priv->currently_mapped != I810_BUF_MAPPED) - return -EINVAL; + if(buf_priv->currently_mapped != I810_BUF_MAPPED) + return -EINVAL; #if LINUX_VERSION_CODE <= 0x020402 - down( ¤t->mm->mmap_sem ); + down( ¤t->mm->mmap_sem ); #else - down_write( ¤t->mm->mmap_sem ); + down_write( ¤t->mm->mmap_sem ); #endif - retcode = do_munmap(current->mm, - (unsigned long)buf_priv->virtual, - (size_t) buf->total); + retcode = do_munmap(current->mm, + (unsigned long)buf_priv->virtual, + (size_t) buf->total); #if LINUX_VERSION_CODE <= 0x020402 - up( ¤t->mm->mmap_sem ); + up( ¤t->mm->mmap_sem ); #else - up_write( ¤t->mm->mmap_sem ); + up_write( ¤t->mm->mmap_sem ); #endif - } buf_priv->currently_mapped = I810_BUF_UNMAPPED; buf_priv->virtual = 0; @@ -445,9 +431,6 @@ static int i810_dma_initialize(drm_device_t *dev, ((u8 *)dev_priv->sarea_map->handle + init->sarea_priv_offset); - atomic_set(&dev_priv->flush_done, 0); - init_waitqueue_head(&dev_priv->flush_queue); - dev_priv->ring.Start = init->ring_start; dev_priv->ring.End = init->ring_end; dev_priv->ring.Size = init->ring_size; @@ -540,16 +523,12 @@ int i810_dma_init(struct inode *inode, struct file *filp, /* Most efficient way to verify state for the i810 is as it is * emitted. Non-conformant state is silently dropped. - * - * Use 'volatile' & local var tmp to force the emitted values to be - * identical to the verified ones. */ static void i810EmitContextVerified( drm_device_t *dev, - volatile unsigned int *code ) + unsigned int *code ) { drm_i810_private_t *dev_priv = dev->dev_private; int i, j = 0; - unsigned int tmp; RING_LOCALS; BEGIN_LP_RING( I810_CTX_SETUP_SIZE ); @@ -561,14 +540,13 @@ static void i810EmitContextVerified( drm_device_t *dev, OUT_RING( code[I810_CTXREG_ST1] ); for ( i = 4 ; i < I810_CTX_SETUP_SIZE ; i++ ) { - tmp = code[i]; - - if ((tmp & (7<<29)) == (3<<29) && - (tmp & (0x1f<<24)) < (0x1d<<24)) + if ((code[i] & (7<<29)) == (3<<29) && + (code[i] & (0x1f<<24)) < (0x1d<<24)) { - OUT_RING( tmp ); + OUT_RING( code[i] ); j++; } + else printk("constext state dropped!!!\n"); } if (j & 1) @@ -582,7 +560,6 @@ static void i810EmitTexVerified( drm_device_t *dev, { drm_i810_private_t *dev_priv = dev->dev_private; int i, j = 0; - unsigned int tmp; RING_LOCALS; BEGIN_LP_RING( I810_TEX_SETUP_SIZE ); @@ -593,14 +570,14 @@ static void i810EmitTexVerified( drm_device_t *dev, OUT_RING( code[I810_TEXREG_MI3] ); for ( i = 4 ; i < I810_TEX_SETUP_SIZE ; i++ ) { - tmp = code[i]; - if ((tmp & (7<<29)) == (3<<29) && - (tmp & (0x1f<<24)) < (0x1d<<24)) + if ((code[i] & (7<<29)) == (3<<29) && + (code[i] & (0x1f<<24)) < (0x1d<<24)) { - OUT_RING( tmp ); + OUT_RING( code[i] ); j++; } + else printk("texture state dropped!!!\n"); } if (j & 1) @@ -625,9 +602,9 @@ static void i810EmitDestVerified( drm_device_t *dev, if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) { OUT_RING( CMD_OP_DESTBUFFER_INFO ); OUT_RING( tmp ); - } else - DRM_DEBUG("bad di1 %x (allow %x or %x)\n", - tmp, dev_priv->front_di1, dev_priv->back_di1); + } + else + printk("buffer state dropped\n"); /* invarient: */ @@ -712,7 +689,6 @@ static void i810_dma_dispatch_clear( drm_device_t *dev, int flags, continue; if ( flags & I810_FRONT ) { - DRM_DEBUG("clear front\n"); BEGIN_LP_RING( 6 ); OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3 ); @@ -725,7 +701,6 @@ static void i810_dma_dispatch_clear( drm_device_t *dev, int flags, } if ( flags & I810_BACK ) { - DRM_DEBUG("clear back\n"); BEGIN_LP_RING( 6 ); OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3 ); @@ -738,7 +713,6 @@ static void i810_dma_dispatch_clear( drm_device_t *dev, int flags, } if ( flags & I810_DEPTH ) { - DRM_DEBUG("clear depth\n"); BEGIN_LP_RING( 6 ); OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3 ); @@ -764,8 +738,6 @@ static void i810_dma_dispatch_swap( drm_device_t *dev ) int i; RING_LOCALS; - DRM_DEBUG("swapbuffers\n"); - i810_kernel_lost_context(dev); if (nbox > I810_NR_SAREA_CLIPRECTS) @@ -784,10 +756,6 @@ static void i810_dma_dispatch_swap( drm_device_t *dev ) pbox->y2 > dev_priv->h) continue; - DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n", - pbox[i].x1, pbox[i].y1, - pbox[i].x2, pbox[i].y2); - BEGIN_LP_RING( 6 ); OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4 ); OUT_RING( pitch | (0xCC << 16)); @@ -812,7 +780,7 @@ static void i810_dma_dispatch_vertex(drm_device_t *dev, int nbox = sarea_priv->nbox; unsigned long address = (unsigned long)buf->bus_address; unsigned long start = address - dev->agp->base; - int i = 0, u; + int i = 0; RING_LOCALS; i810_kernel_lost_context(dev); @@ -820,33 +788,16 @@ static void i810_dma_dispatch_vertex(drm_device_t *dev, if (nbox > I810_NR_SAREA_CLIPRECTS) nbox = I810_NR_SAREA_CLIPRECTS; - if (discard) { - u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, - I810_BUF_HARDWARE); - if(u != I810_BUF_CLIENT) { - DRM_DEBUG("xxxx 2\n"); - } - } - if (used > 4*1024) used = 0; if (sarea_priv->dirty) i810EmitState( dev ); - DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n", - address, used, nbox); - - dev_priv->counter++; - DRM_DEBUG( "dispatch counter : %ld\n", dev_priv->counter); - DRM_DEBUG( "i810_dma_dispatch\n"); - DRM_DEBUG( "start : %lx\n", start); - DRM_DEBUG( "used : %d\n", used); - DRM_DEBUG( "start + used - 4 : %ld\n", start + used - 4); - if (buf_priv->currently_mapped == I810_BUF_MAPPED) { - *(u32 *)buf_priv->virtual = (GFX_OP_PRIMITIVE | - sarea_priv->vertex_prim | + unsigned int prim = (sarea_priv->vertex_prim & PR_MASK); + + *(u32 *)buf_priv->virtual = (GFX_OP_PRIMITIVE | prim | ((used/4)-2)); if (used & 4) { @@ -879,154 +830,62 @@ static void i810_dma_dispatch_vertex(drm_device_t *dev, } while (++i < nbox); } - BEGIN_LP_RING(10); - OUT_RING( CMD_STORE_DWORD_IDX ); - OUT_RING( 20 ); - OUT_RING( dev_priv->counter ); - OUT_RING( 0 ); - if (discard) { + dev_priv->counter++; + + (void) cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, + I810_BUF_HARDWARE); + + BEGIN_LP_RING(8); + OUT_RING( CMD_STORE_DWORD_IDX ); + OUT_RING( 20 ); + OUT_RING( dev_priv->counter ); OUT_RING( CMD_STORE_DWORD_IDX ); OUT_RING( buf_priv->my_use_idx ); OUT_RING( I810_BUF_FREE ); + OUT_RING( CMD_REPORT_HEAD ); OUT_RING( 0 ); + ADVANCE_LP_RING(); } - - OUT_RING( CMD_REPORT_HEAD ); - OUT_RING( 0 ); - ADVANCE_LP_RING(); -} - - -/* Interrupts are only for flushing */ -void i810_dma_service(int irq, void *device, struct pt_regs *regs) -{ - drm_device_t *dev = (drm_device_t *)device; - drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; - u16 temp; - - atomic_inc(&dev->counts[_DRM_STAT_IRQ]); - temp = I810_READ16(I810REG_INT_IDENTITY_R); - temp = temp & ~(0x6000); - if(temp != 0) I810_WRITE16(I810REG_INT_IDENTITY_R, - temp); /* Clear all interrupts */ - else - return; - - queue_task(&dev->tq, &tq_immediate); - mark_bh(IMMEDIATE_BH); } -void i810_dma_immediate_bh(void *device) -{ - drm_device_t *dev = (drm_device_t *) device; - drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; - - atomic_set(&dev_priv->flush_done, 1); - wake_up_interruptible(&dev_priv->flush_queue); -} - -static inline void i810_dma_emit_flush(drm_device_t *dev) -{ - drm_i810_private_t *dev_priv = dev->dev_private; - RING_LOCALS; - - i810_kernel_lost_context(dev); - - BEGIN_LP_RING(2); - OUT_RING( CMD_REPORT_HEAD ); - OUT_RING( GFX_OP_USER_INTERRUPT ); - ADVANCE_LP_RING(); - -/* i810_wait_ring( dev, dev_priv->ring.Size - 8 ); */ -/* atomic_set(&dev_priv->flush_done, 1); */ -/* wake_up_interruptible(&dev_priv->flush_queue); */ -} -static inline void i810_dma_quiescent_emit(drm_device_t *dev) +void i810_dma_quiescent(drm_device_t *dev) { drm_i810_private_t *dev_priv = dev->dev_private; RING_LOCALS; +/* printk("%s\n", __FUNCTION__); */ + i810_kernel_lost_context(dev); BEGIN_LP_RING(4); OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE ); OUT_RING( CMD_REPORT_HEAD ); OUT_RING( 0 ); - OUT_RING( GFX_OP_USER_INTERRUPT ); + OUT_RING( 0 ); ADVANCE_LP_RING(); -/* i810_wait_ring( dev, dev_priv->ring.Size - 8 ); */ -/* atomic_set(&dev_priv->flush_done, 1); */ -/* wake_up_interruptible(&dev_priv->flush_queue); */ -} - -void i810_dma_quiescent(drm_device_t *dev) -{ - DECLARE_WAITQUEUE(entry, current); - drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; - unsigned long end; - - if(dev_priv == NULL) { - return; - } - atomic_set(&dev_priv->flush_done, 0); - add_wait_queue(&dev_priv->flush_queue, &entry); - end = jiffies + (HZ*3); - - for (;;) { - current->state = TASK_INTERRUPTIBLE; - i810_dma_quiescent_emit(dev); - if (atomic_read(&dev_priv->flush_done) == 1) break; - if((signed)(end - jiffies) <= 0) { - DRM_ERROR("lockup\n"); - break; - } - schedule_timeout(HZ*3); - if (signal_pending(current)) { - break; - } - } - - current->state = TASK_RUNNING; - remove_wait_queue(&dev_priv->flush_queue, &entry); - - return; + i810_wait_ring( dev, dev_priv->ring.Size - 8 ); } static int i810_flush_queue(drm_device_t *dev) { - DECLARE_WAITQUEUE(entry, current); - drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; + drm_i810_private_t *dev_priv = dev->dev_private; drm_device_dma_t *dma = dev->dma; - unsigned long end; int i, ret = 0; + RING_LOCALS; + +/* printk("%s\n", __FUNCTION__); */ - if(dev_priv == NULL) { - return 0; - } - atomic_set(&dev_priv->flush_done, 0); - add_wait_queue(&dev_priv->flush_queue, &entry); - end = jiffies + (HZ*3); - for (;;) { - current->state = TASK_INTERRUPTIBLE; - i810_dma_emit_flush(dev); - if (atomic_read(&dev_priv->flush_done) == 1) break; - if((signed)(end - jiffies) <= 0) { - DRM_ERROR("lockup\n"); - break; - } - schedule_timeout(HZ*3); - if (signal_pending(current)) { - ret = -EINTR; /* Can't restart */ - break; - } - } + i810_kernel_lost_context(dev); - current->state = TASK_RUNNING; - remove_wait_queue(&dev_priv->flush_queue, &entry); + BEGIN_LP_RING(2); + OUT_RING( CMD_REPORT_HEAD ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); + i810_wait_ring( dev, dev_priv->ring.Size - 8 ); for (i = 0; i < dma->buf_count; i++) { drm_buf_t *buf = dma->buflist[ i ]; @@ -1038,7 +897,7 @@ static int i810_flush_queue(drm_device_t *dev) if (used == I810_BUF_HARDWARE) DRM_DEBUG("reclaimed from HARDWARE\n"); if (used == I810_BUF_CLIENT) - DRM_DEBUG("still on client HARDWARE\n"); + DRM_DEBUG("still on client\n"); } return ret; @@ -1078,7 +937,6 @@ int i810_flush_ioctl(struct inode *inode, struct file *filp, drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; - DRM_DEBUG("i810_flush_ioctl\n"); if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("i810_flush_ioctl called without lock held\n"); return -EINVAL; @@ -1109,9 +967,6 @@ int i810_dma_vertex(struct inode *inode, struct file *filp, return -EINVAL; } - DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n", - vertex.idx, vertex.used, vertex.discard); - if(vertex.idx < 0 || vertex.idx > dma->buf_count) return -EINVAL; i810_dma_dispatch_vertex( dev, @@ -1160,8 +1015,6 @@ int i810_swap_bufs(struct inode *inode, struct file *filp, drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; - DRM_DEBUG("i810_swap_bufs\n"); - if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("i810_swap_buf called without lock held\n"); return -EINVAL; @@ -1197,7 +1050,6 @@ int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) dev_priv->sarea_priv; - DRM_DEBUG("getbuf\n"); if (copy_from_user(&d, (drm_i810_dma_t *)arg, sizeof(d))) return -EFAULT; @@ -1210,9 +1062,6 @@ int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, retcode = i810_dma_get_buffer(dev, &d, filp); - DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", - current->pid, retcode, d.granted); - if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d))) return -EFAULT; sarea_priv->last_dispatch = (int) hw_status[5]; @@ -1220,47 +1069,19 @@ int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, return retcode; } -int i810_copybuf(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +int i810_copybuf(struct inode *inode, + struct file *filp, + unsigned int cmd, + unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - drm_i810_copy_t d; - drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; - u32 *hw_status = (u32 *)dev_priv->hw_status_page; - drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) - dev_priv->sarea_priv; - drm_buf_t *buf; - drm_i810_buf_priv_t *buf_priv; - drm_device_dma_t *dma = dev->dma; - - if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { - DRM_ERROR("i810_dma called without lock held\n"); - return -EINVAL; - } - - if (copy_from_user(&d, (drm_i810_copy_t *)arg, sizeof(d))) - return -EFAULT; - - if(d.idx < 0 || d.idx > dma->buf_count) return -EINVAL; - buf = dma->buflist[ d.idx ]; - buf_priv = buf->dev_private; - if (buf_priv->currently_mapped != I810_BUF_MAPPED) return -EPERM; - - if(d.used < 0 || d.used > buf->total) return -EINVAL; - - if (copy_from_user(buf_priv->virtual, d.address, d.used)) - return -EFAULT; - - sarea_priv->last_dispatch = (int) hw_status[5]; - + /* Never copy - 2.4.x doesn't need it */ return 0; } int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - if(VM_DONTCOPY == 0) return 1; + /* Never copy - 2.4.x doesn't need it */ return 0; } diff --git a/linux/i810_drm.h b/linux/i810_drm.h index 8413293b..bff61637 100644 --- a/linux/i810_drm.h +++ b/linux/i810_drm.h @@ -88,6 +88,8 @@ #define I810_TEXREG_MCS 7 /* GFX_OP_MAP_COORD_SETS ??? */ #define I810_TEX_SETUP_SIZE 8 +/* Flags for clear ioctl + */ #define I810_FRONT 0x1 #define I810_BACK 0x2 #define I810_DEPTH 0x4 @@ -191,6 +193,17 @@ typedef struct _drm_i810_copy_t { void *address; /* Address to copy from */ } drm_i810_copy_t; +#define PR_TRIANGLES (0x0<<18) +#define PR_TRISTRIP_0 (0x1<<18) +#define PR_TRISTRIP_1 (0x2<<18) +#define PR_TRIFAN (0x3<<18) +#define PR_POLYGON (0x4<<18) +#define PR_LINES (0x5<<18) +#define PR_LINESTRIP (0x6<<18) +#define PR_RECTS (0x7<<18) +#define PR_MASK (0x7<<18) + + typedef struct drm_i810_dma { void *virtual; int request_idx; diff --git a/linux/i810_drv.c b/linux/i810_drv.c index 559b2617..f792e378 100644 --- a/linux/i810_drv.c +++ b/linux/i810_drv.c @@ -39,11 +39,19 @@ #define DRIVER_NAME "i810" #define DRIVER_DESC "Intel i810" -#define DRIVER_DATE "20010920" +#define DRIVER_DATE "20020211" +/* Interface history + * + * 1.1 - XFree86 4.1 + * 1.2 - XvMC interfaces + * - XFree86 4.2 + * 1.2.1 - Disable copying code (leave stub ioctls for backwards compatibility) + * - Remove requirement for interrupt (leave stubs again) + */ #define DRIVER_MAJOR 1 -#define DRIVER_MINOR 2 -#define DRIVER_PATCHLEVEL 0 +#define DRIVER_MINOR 2 +#define DRIVER_PATCHLEVEL 1 #define DRIVER_IOCTLS \ [DRM_IOCTL_NR(DRM_IOCTL_I810_INIT)] = { i810_dma_init, 1, 1 }, \ diff --git a/linux/i810_drv.h b/linux/i810_drv.h index a27384d4..99165cf8 100644 --- a/linux/i810_drv.h +++ b/linux/i810_drv.h @@ -64,8 +64,6 @@ typedef struct drm_i810_private { unsigned long hw_status_page; unsigned long counter; - atomic_t flush_done; - wait_queue_head_t flush_queue; /* Processes waiting until flush */ drm_buf_t *mmap_buffer; @@ -77,6 +75,7 @@ typedef struct drm_i810_private { int overlay_physical; int w, h; int pitch; + } drm_i810_private_t; /* i810_dma.c */ @@ -91,8 +90,13 @@ extern void i810_reclaim_buffers(drm_device_t *dev, pid_t pid); extern int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma); + +/* Obsolete: + */ extern int i810_copybuf(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); +/* Obsolete: + */ extern int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); @@ -110,9 +114,6 @@ extern int i810_dma_mc(struct inode *inode, struct file *filp, extern void i810_dma_quiescent(drm_device_t *dev); -#define I810_VERBOSE 0 - - int i810_dma_vertex(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); diff --git a/linux/i830_dma.c b/linux/i830_dma.c index 661987fb..69b1c8a3 100644 --- a/linux/i830_dma.c +++ b/linux/i830_dma.c @@ -91,7 +91,7 @@ do { \ *(volatile unsigned int *)(virt + outring) = n; \ outring += 4; \ outring &= ringmask; \ -} while (0); +} while (0) static inline void i830_print_status_page(drm_device_t *dev) { diff --git a/linux/mga_state.c b/linux/mga_state.c index 41b2e9a1..16919514 100644 --- a/linux/mga_state.c +++ b/linux/mga_state.c @@ -163,6 +163,9 @@ static inline void mga_g400_emit_tex0( drm_mga_private_t *dev_priv ) drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0]; DMA_LOCALS; +/* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */ +/* tex->texctl, tex->texctl2); */ + BEGIN_DMA( 6 ); DMA_BLOCK( MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC, @@ -204,6 +207,9 @@ static inline void mga_g400_emit_tex1( drm_mga_private_t *dev_priv ) drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1]; DMA_LOCALS; +/* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */ +/* tex->texctl, tex->texctl2); */ + BEGIN_DMA( 5 ); DMA_BLOCK( MGA_TEXCTL2, (tex->texctl2 | @@ -272,6 +278,8 @@ static inline void mga_g400_emit_pipe( drm_mga_private_t *dev_priv ) unsigned int pipe = sarea_priv->warp_pipe; DMA_LOCALS; +/* printk("mga_g400_emit_pipe %x\n", pipe); */ + BEGIN_DMA( 10 ); DMA_BLOCK( MGA_WIADDR2, MGA_WMODE_SUSPEND, diff --git a/linux/radeon_cp.c b/linux/radeon_cp.c index 904c8b77..0acaca8e 100644 --- a/linux/radeon_cp.c +++ b/linux/radeon_cp.c @@ -744,17 +744,17 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init ) * and screwing with the clear operation. */ dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE | - RADEON_Z_ENABLE | (dev_priv->color_fmt << 10) | RADEON_ZBLOCK16); - dev_priv->depth_clear.rb3d_zstencilcntl = (dev_priv->depth_fmt | - RADEON_Z_TEST_ALWAYS | - RADEON_STENCIL_TEST_ALWAYS | - RADEON_STENCIL_S_FAIL_KEEP | - RADEON_STENCIL_ZPASS_KEEP | - RADEON_STENCIL_ZFAIL_KEEP | - RADEON_Z_WRITE_ENABLE); + dev_priv->depth_clear.rb3d_zstencilcntl = + (dev_priv->depth_fmt | + RADEON_Z_TEST_ALWAYS | + RADEON_STENCIL_TEST_ALWAYS | + RADEON_STENCIL_S_FAIL_REPLACE | + RADEON_STENCIL_ZPASS_REPLACE | + RADEON_STENCIL_ZFAIL_REPLACE | + RADEON_Z_WRITE_ENABLE); dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW | RADEON_BFACE_SOLID | diff --git a/linux/radeon_drm.h b/linux/radeon_drm.h index 4ae387ff..81e76b19 100644 --- a/linux/radeon_drm.h +++ b/linux/radeon_drm.h @@ -26,6 +26,7 @@ * Authors: * Kevin E. Martin <martin@valinux.com> * Gareth Hughes <gareth@valinux.com> + * Keith Whitwell <keith_whitwell@yahoo.com> */ #ifndef __RADEON_DRM_H__ @@ -56,11 +57,14 @@ #define RADEON_UPLOAD_TEX2IMAGES 0x00004000 #define RADEON_UPLOAD_CLIPRECTS 0x00008000 /* handled client-side */ #define RADEON_REQUIRE_QUIESCENCE 0x00010000 -#define RADEON_UPLOAD_ALL 0x0001ffff +#define RADEON_UPLOAD_ZBIAS 0x00020000 /* version 1.2 and newer */ +#define RADEON_UPLOAD_ALL 0x0002ffff +#define RADEON_UPLOAD_CONTEXT_ALL 0x000201ff #define RADEON_FRONT 0x1 #define RADEON_BACK 0x2 #define RADEON_DEPTH 0x4 +#define RADEON_STENCIL 0x8 /* Primitive types */ @@ -82,8 +86,6 @@ #define RADEON_SCRATCH_REG_OFFSET 32 -/* Keep these small for testing - */ #define RADEON_NR_SAREA_CLIPRECTS 12 /* There are 2 heaps (local/AGP). Each region within a heap is a @@ -95,7 +97,7 @@ #define RADEON_NR_TEX_REGIONS 64 #define RADEON_LOG_TEX_GRANULARITY 16 -#define RADEON_MAX_TEXTURE_LEVELS 11 +#define RADEON_MAX_TEXTURE_LEVELS 12 #define RADEON_MAX_TEXTURE_UNITS 3 #endif /* __RADEON_SAREA_DEFINES__ */ @@ -155,28 +157,18 @@ typedef struct { /* Setup state */ unsigned int se_cntl_status; /* 0x2140 */ -#ifdef TCL_ENABLE - /* TCL state */ - radeon_color_regs_t se_tcl_material_emmissive; /* 0x2210 */ - radeon_color_regs_t se_tcl_material_ambient; - radeon_color_regs_t se_tcl_material_diffuse; - radeon_color_regs_t se_tcl_material_specular; - unsigned int se_tcl_shininess; - unsigned int se_tcl_output_vtx_fmt; - unsigned int se_tcl_output_vtx_sel; - unsigned int se_tcl_matrix_select_0; - unsigned int se_tcl_matrix_select_1; - unsigned int se_tcl_ucp_vert_blend_ctl; - unsigned int se_tcl_texture_proc_ctl; - unsigned int se_tcl_light_model_ctl; - unsigned int se_tcl_per_light_ctl[4]; -#endif - /* Misc state */ unsigned int re_top_left; /* 0x26c0 */ unsigned int re_misc; } drm_radeon_context_regs_t; +typedef struct { + /* Zbias state */ + unsigned int se_zbias_factor; /* 0x1dac */ + unsigned int se_zbias_constant; +} drm_radeon_context2_regs_t; + + /* Setup registers for each texture unit */ typedef struct { @@ -186,15 +178,28 @@ typedef struct { unsigned int pp_txcblend; unsigned int pp_txablend; unsigned int pp_tfactor; - unsigned int pp_border_color; - -#ifdef CUBIC_ENABLE - unsigned int pp_cubic_faces; - unsigned int pp_cubic_offset[5]; -#endif } drm_radeon_texture_regs_t; +/* Space is crucial; there is some redunancy here: + */ +typedef struct { + unsigned int start; + unsigned int finish; + unsigned int prim:8; + unsigned int stateidx:8; + unsigned int numverts:16; /* overloaded as offset/64 for elt prims */ + unsigned int vc_format; /* vertex format */ +} drm_radeon_prim_t; + +typedef struct { + drm_radeon_context_regs_t context; + drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS]; + drm_radeon_context2_regs_t context2; + unsigned int dirty; +} drm_radeon_state_t; + + typedef struct { unsigned char next, prev; unsigned char in_use; @@ -202,8 +207,9 @@ typedef struct { } drm_radeon_tex_region_t; typedef struct { - /* The channel for communication of state information to the kernel - * on firing a vertex buffer. + /* The channel for communication of state information to the + * kernel on firing a vertex buffer with either of the + * obsoleted vertex/index ioctls. */ drm_radeon_context_regs_t context_state; drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS]; @@ -285,7 +291,7 @@ typedef struct drm_radeon_clear { unsigned int clear_color; unsigned int clear_depth; unsigned int color_mask; - unsigned int depth_mask; + unsigned int depth_mask; /* misnamed field: should be stencil */ drm_radeon_clear_rect_t *depth_boxes; } drm_radeon_clear_t; @@ -296,6 +302,15 @@ typedef struct drm_radeon_vertex { int discard; /* Client finished with buffer? */ } drm_radeon_vertex_t; +typedef struct drm_radeon_vertex2 { + int idx; /* Index of vertex buffer */ + int discard; /* Client finished with buffer? */ + int nr_states; + drm_radeon_state_t *state; + int nr_prims; + drm_radeon_prim_t *prim; +} drm_radeon_vertex2_t; + typedef struct drm_radeon_indices { int prim; int idx; diff --git a/linux/radeon_drv.c b/linux/radeon_drv.c index 49599bc8..847c71c9 100644 --- a/linux/radeon_drv.c +++ b/linux/radeon_drv.c @@ -40,9 +40,16 @@ #define DRIVER_DATE "20010405" #define DRIVER_MAJOR 1 -#define DRIVER_MINOR 1 -#define DRIVER_PATCHLEVEL 1 +#define DRIVER_MINOR 2 +#define DRIVER_PATCHLEVEL 0 +/* Interface history: + * + * 1.1 - ?? + * 1.2 - Add vertex2 ioctl (keith) + * - Add stencil capability to clear ioctl (gareth, keith) + * - Increase MAX_TEXTURE_LEVELS (brian) + */ #define DRIVER_IOCTLS \ [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_INIT)] = { radeon_cp_init, 1, 1 }, \ @@ -58,7 +65,8 @@ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDICES)] = { radeon_cp_indices, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_TEXTURE)] = { radeon_cp_texture, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_STIPPLE)] = { radeon_cp_stipple, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDIRECT)] = { radeon_cp_indirect, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDIRECT)] = { radeon_cp_indirect, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_VERTEX2)] = { radeon_cp_vertex2, 1, 0 }, #if 0 diff --git a/linux/radeon_drv.h b/linux/radeon_drv.h index 5afea7d1..abbf7179 100644 --- a/linux/radeon_drv.h +++ b/linux/radeon_drv.h @@ -176,6 +176,8 @@ extern int radeon_cp_stipple( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ); extern int radeon_cp_indirect( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ); +extern int radeon_cp_vertex2( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); /* Register definitions, register access macros and drmAddMap constants @@ -306,9 +308,9 @@ extern int radeon_cp_indirect( struct inode *inode, struct file *filp, # define RADEON_Z_TEST_MASK (7 << 4) # define RADEON_Z_TEST_ALWAYS (7 << 4) # define RADEON_STENCIL_TEST_ALWAYS (7 << 12) -# define RADEON_STENCIL_S_FAIL_KEEP (0 << 16) -# define RADEON_STENCIL_ZPASS_KEEP (0 << 20) -# define RADEON_STENCIL_ZFAIL_KEEP (0 << 20) +# define RADEON_STENCIL_S_FAIL_REPLACE (2 << 16) +# define RADEON_STENCIL_ZPASS_REPLACE (2 << 20) +# define RADEON_STENCIL_ZFAIL_REPLACE (2 << 24) # define RADEON_Z_WRITE_ENABLE (1 << 30) #define RADEON_RBBM_SOFT_RESET 0x00f0 # define RADEON_SOFT_RESET_CP (1 << 0) @@ -357,6 +359,7 @@ extern int radeon_cp_indirect( struct inode *inode, struct file *filp, #define RADEON_SE_CNTL_STATUS 0x2140 #define RADEON_SE_LINE_WIDTH 0x1db8 #define RADEON_SE_VPORT_XSCALE 0x1d98 +#define RADEON_SE_ZBIAS_FACTOR 0x1db0 #define RADEON_SURFACE_ACCESS_FLAGS 0x0bf8 #define RADEON_SURFACE_ACCESS_CLR 0x0bfc #define RADEON_SURFACE_CNTL 0x0b00 @@ -482,6 +485,7 @@ extern int radeon_cp_indirect( struct inode *inode, struct file *filp, #define RADEON_PRIM_TYPE_RECT_LIST (8 << 0) #define RADEON_PRIM_TYPE_3VRT_POINT_LIST (9 << 0) #define RADEON_PRIM_TYPE_3VRT_LINE_LIST (10 << 0) +#define RADEON_PRIM_TYPE_MASK 0xf #define RADEON_PRIM_WALK_IND (1 << 4) #define RADEON_PRIM_WALK_LIST (2 << 4) #define RADEON_PRIM_WALK_RING (3 << 4) @@ -734,6 +738,11 @@ do { \ write &= mask; \ } while (0) +#define OUT_RING_REG( reg, val ) do { \ + OUT_RING( CP_PACKET0( reg, 0 ) ); \ + OUT_RING( val ); \ +} while (0) + #define RADEON_PERFORMANCE_BOXES 0 #endif /* __RADEON_DRV_H__ */ diff --git a/linux/radeon_state.c b/linux/radeon_state.c index 9322292a..5efa447a 100644 --- a/linux/radeon_state.c +++ b/linux/radeon_state.c @@ -58,10 +58,9 @@ static inline void radeon_emit_clip_rect( drm_radeon_private_t *dev_priv, ADVANCE_RING(); } -static inline void radeon_emit_context( drm_radeon_private_t *dev_priv ) +static inline void radeon_emit_context( drm_radeon_private_t *dev_priv, + drm_radeon_context_regs_t *ctx ) { - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; RING_LOCALS; DRM_DEBUG( " %s\n", __FUNCTION__ ); @@ -87,10 +86,9 @@ static inline void radeon_emit_context( drm_radeon_private_t *dev_priv ) ADVANCE_RING(); } -static inline void radeon_emit_vertfmt( drm_radeon_private_t *dev_priv ) +static inline void radeon_emit_vertfmt( drm_radeon_private_t *dev_priv, + drm_radeon_context_regs_t *ctx ) { - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; RING_LOCALS; DRM_DEBUG( " %s\n", __FUNCTION__ ); @@ -102,12 +100,14 @@ static inline void radeon_emit_vertfmt( drm_radeon_private_t *dev_priv ) ADVANCE_RING(); } -static inline void radeon_emit_line( drm_radeon_private_t *dev_priv ) +static inline void radeon_emit_line( drm_radeon_private_t *dev_priv, + drm_radeon_context_regs_t *ctx ) { - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); +/* printk( " %s %x %x %x\n", __FUNCTION__, */ +/* ctx->re_line_pattern, */ +/* ctx->re_line_state, */ +/* ctx->se_line_width); */ BEGIN_RING( 5 ); @@ -121,10 +121,9 @@ static inline void radeon_emit_line( drm_radeon_private_t *dev_priv ) ADVANCE_RING(); } -static inline void radeon_emit_bumpmap( drm_radeon_private_t *dev_priv ) +static inline void radeon_emit_bumpmap( drm_radeon_private_t *dev_priv, + drm_radeon_context_regs_t *ctx ) { - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; RING_LOCALS; DRM_DEBUG( " %s\n", __FUNCTION__ ); @@ -140,10 +139,9 @@ static inline void radeon_emit_bumpmap( drm_radeon_private_t *dev_priv ) ADVANCE_RING(); } -static inline void radeon_emit_masks( drm_radeon_private_t *dev_priv ) +static inline void radeon_emit_masks( drm_radeon_private_t *dev_priv, + drm_radeon_context_regs_t *ctx ) { - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; RING_LOCALS; DRM_DEBUG( " %s\n", __FUNCTION__ ); @@ -157,10 +155,9 @@ static inline void radeon_emit_masks( drm_radeon_private_t *dev_priv ) ADVANCE_RING(); } -static inline void radeon_emit_viewport( drm_radeon_private_t *dev_priv ) +static inline void radeon_emit_viewport( drm_radeon_private_t *dev_priv, + drm_radeon_context_regs_t *ctx ) { - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; RING_LOCALS; DRM_DEBUG( " %s\n", __FUNCTION__ ); @@ -173,14 +170,12 @@ static inline void radeon_emit_viewport( drm_radeon_private_t *dev_priv ) OUT_RING( ctx->se_vport_yoffset ); OUT_RING( ctx->se_vport_zscale ); OUT_RING( ctx->se_vport_zoffset ); - ADVANCE_RING(); } -static inline void radeon_emit_setup( drm_radeon_private_t *dev_priv ) +static inline void radeon_emit_setup( drm_radeon_private_t *dev_priv, + drm_radeon_context_regs_t *ctx ) { - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; RING_LOCALS; DRM_DEBUG( " %s\n", __FUNCTION__ ); @@ -194,55 +189,10 @@ static inline void radeon_emit_setup( drm_radeon_private_t *dev_priv ) ADVANCE_RING(); } -static inline void radeon_emit_tcl( drm_radeon_private_t *dev_priv ) -{ -#ifdef TCL_ENABLE - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 29 ); - - OUT_RING( CP_PACKET0( RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED, 27 ) ); - OUT_RING( ctx->se_tcl_material_emmissive.red ); - OUT_RING( ctx->se_tcl_material_emmissive.green ); - OUT_RING( ctx->se_tcl_material_emmissive.blue ); - OUT_RING( ctx->se_tcl_material_emmissive.alpha ); - OUT_RING( ctx->se_tcl_material_ambient.red ); - OUT_RING( ctx->se_tcl_material_ambient.green ); - OUT_RING( ctx->se_tcl_material_ambient.blue ); - OUT_RING( ctx->se_tcl_material_ambient.alpha ); - OUT_RING( ctx->se_tcl_material_diffuse.red ); - OUT_RING( ctx->se_tcl_material_diffuse.green ); - OUT_RING( ctx->se_tcl_material_diffuse.blue ); - OUT_RING( ctx->se_tcl_material_diffuse.alpha ); - OUT_RING( ctx->se_tcl_material_specular.red ); - OUT_RING( ctx->se_tcl_material_specular.green ); - OUT_RING( ctx->se_tcl_material_specular.blue ); - OUT_RING( ctx->se_tcl_material_specular.alpha ); - OUT_RING( ctx->se_tcl_shininess ); - OUT_RING( ctx->se_tcl_output_vtx_fmt ); - OUT_RING( ctx->se_tcl_output_vtx_sel ); - OUT_RING( ctx->se_tcl_matrix_select_0 ); - OUT_RING( ctx->se_tcl_matrix_select_1 ); - OUT_RING( ctx->se_tcl_ucp_vert_blend_ctl ); - OUT_RING( ctx->se_tcl_texture_proc_ctl ); - OUT_RING( ctx->se_tcl_light_model_ctl ); - for ( i = 0 ; i < 4 ; i++ ) { - OUT_RING( ctx->se_tcl_per_light_ctl[i] ); - } - ADVANCE_RING(); -#else - DRM_ERROR( "TCL not enabled!\n" ); -#endif -} - -static inline void radeon_emit_misc( drm_radeon_private_t *dev_priv ) +static inline void radeon_emit_misc( drm_radeon_private_t *dev_priv, + drm_radeon_context_regs_t *ctx ) { - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; RING_LOCALS; DRM_DEBUG( " %s\n", __FUNCTION__ ); @@ -254,10 +204,9 @@ static inline void radeon_emit_misc( drm_radeon_private_t *dev_priv ) ADVANCE_RING(); } -static inline void radeon_emit_tex0( drm_radeon_private_t *dev_priv ) +static inline void radeon_emit_tex0( drm_radeon_private_t *dev_priv, + drm_radeon_texture_regs_t *tex ) { - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_texture_regs_t *tex = &sarea_priv->tex_state[0]; RING_LOCALS; DRM_DEBUG( " %s: offset=0x%x\n", __FUNCTION__, tex->pp_txoffset ); @@ -277,10 +226,9 @@ static inline void radeon_emit_tex0( drm_radeon_private_t *dev_priv ) ADVANCE_RING(); } -static inline void radeon_emit_tex1( drm_radeon_private_t *dev_priv ) +static inline void radeon_emit_tex1( drm_radeon_private_t *dev_priv, + drm_radeon_texture_regs_t *tex ) { - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_texture_regs_t *tex = &sarea_priv->tex_state[1]; RING_LOCALS; DRM_DEBUG( " %s: offset=0x%x\n", __FUNCTION__, tex->pp_txoffset ); @@ -300,10 +248,9 @@ static inline void radeon_emit_tex1( drm_radeon_private_t *dev_priv ) ADVANCE_RING(); } -static inline void radeon_emit_tex2( drm_radeon_private_t *dev_priv ) +static inline void radeon_emit_tex2( drm_radeon_private_t *dev_priv, + drm_radeon_texture_regs_t *tex ) { - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_texture_regs_t *tex = &sarea_priv->tex_state[2]; RING_LOCALS; DRM_DEBUG( " %s\n", __FUNCTION__ ); @@ -323,83 +270,106 @@ static inline void radeon_emit_tex2( drm_radeon_private_t *dev_priv ) ADVANCE_RING(); } -static inline void radeon_emit_state( drm_radeon_private_t *dev_priv ) +#if 0 +static void radeon_print_dirty( const char *msg, unsigned int flags ) { - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - unsigned int dirty = sarea_priv->dirty; + DRM_DEBUG( "%s: (0x%x) %s%s%s%s%s%s%s%s%s%s%s%s%s\n", + msg, + flags, + (flags & RADEON_UPLOAD_CONTEXT) ? "context, " : "", + (flags & RADEON_UPLOAD_VERTFMT) ? "vertfmt, " : "", + (flags & RADEON_UPLOAD_LINE) ? "line, " : "", + (flags & RADEON_UPLOAD_BUMPMAP) ? "bumpmap, " : "", + (flags & RADEON_UPLOAD_MASKS) ? "masks, " : "", + (flags & RADEON_UPLOAD_VIEWPORT) ? "viewport, " : "", + (flags & RADEON_UPLOAD_SETUP) ? "setup, " : "", + (flags & RADEON_UPLOAD_MISC) ? "misc, " : "", + (flags & RADEON_UPLOAD_TEX0) ? "tex0, " : "", + (flags & RADEON_UPLOAD_TEX1) ? "tex1, " : "", + (flags & RADEON_UPLOAD_TEX2) ? "tex2, " : "", + (flags & RADEON_UPLOAD_CLIPRECTS) ? "cliprects, " : "", + (flags & RADEON_REQUIRE_QUIESCENCE) ? "quiescence, " : "" ); +} +#endif +static inline void radeon_emit_state( drm_radeon_private_t *dev_priv, + drm_radeon_context_regs_t *ctx, + drm_radeon_texture_regs_t *tex, + unsigned int dirty ) +{ DRM_DEBUG( "%s: dirty=0x%08x\n", __FUNCTION__, dirty ); if ( dirty & RADEON_UPLOAD_CONTEXT ) { - radeon_emit_context( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_CONTEXT; + radeon_emit_context( dev_priv, ctx ); } if ( dirty & RADEON_UPLOAD_VERTFMT ) { - radeon_emit_vertfmt( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_VERTFMT; + radeon_emit_vertfmt( dev_priv, ctx ); } if ( dirty & RADEON_UPLOAD_LINE ) { - radeon_emit_line( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_LINE; + radeon_emit_line( dev_priv, ctx ); } if ( dirty & RADEON_UPLOAD_BUMPMAP ) { - radeon_emit_bumpmap( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_BUMPMAP; + radeon_emit_bumpmap( dev_priv, ctx ); } if ( dirty & RADEON_UPLOAD_MASKS ) { - radeon_emit_masks( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_MASKS; + radeon_emit_masks( dev_priv, ctx ); } if ( dirty & RADEON_UPLOAD_VIEWPORT ) { - radeon_emit_viewport( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_VIEWPORT; + radeon_emit_viewport( dev_priv, ctx ); } if ( dirty & RADEON_UPLOAD_SETUP ) { - radeon_emit_setup( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_SETUP; - } - - if ( dirty & RADEON_UPLOAD_TCL ) { -#ifdef TCL_ENABLE - radeon_emit_tcl( dev_priv ); -#endif - sarea_priv->dirty &= ~RADEON_UPLOAD_TCL; + radeon_emit_setup( dev_priv, ctx ); } if ( dirty & RADEON_UPLOAD_MISC ) { - radeon_emit_misc( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_MISC; + radeon_emit_misc( dev_priv, ctx ); } if ( dirty & RADEON_UPLOAD_TEX0 ) { - radeon_emit_tex0( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_TEX0; + radeon_emit_tex0( dev_priv, &tex[0] ); } if ( dirty & RADEON_UPLOAD_TEX1 ) { - radeon_emit_tex1( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_TEX1; + radeon_emit_tex1( dev_priv, &tex[1] ); } if ( dirty & RADEON_UPLOAD_TEX2 ) { -#if 0 - radeon_emit_tex2( dev_priv ); -#endif - sarea_priv->dirty &= ~RADEON_UPLOAD_TEX2; + radeon_emit_tex2( dev_priv, &tex[2] ); } +} + - sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | - RADEON_UPLOAD_TEX1IMAGES | - RADEON_UPLOAD_TEX2IMAGES | - RADEON_REQUIRE_QUIESCENCE); + +static inline void radeon_emit_zbias( drm_radeon_private_t *dev_priv, + drm_radeon_context2_regs_t *ctx ) +{ + RING_LOCALS; +/* printk( " %s %x %x\n", __FUNCTION__, */ +/* ctx->se_zbias_factor, */ +/* ctx->se_zbias_constant ); */ + + BEGIN_RING( 3 ); + OUT_RING( CP_PACKET0( RADEON_SE_ZBIAS_FACTOR, 1 ) ); + OUT_RING( ctx->se_zbias_factor ); + OUT_RING( ctx->se_zbias_constant ); + ADVANCE_RING(); } +static inline void radeon_emit_state2( drm_radeon_private_t *dev_priv, + drm_radeon_state_t *state ) +{ + if (state->dirty & RADEON_UPLOAD_ZBIAS) + radeon_emit_zbias( dev_priv, &state->context2 ); + + radeon_emit_state( dev_priv, &state->context, + state->tex, state->dirty ); +} #if RADEON_PERFORMANCE_BOXES /* ================================================================ @@ -464,39 +434,20 @@ static void radeon_cp_performance_boxes( drm_radeon_private_t *dev_priv ) * CP command dispatch functions */ -static void radeon_print_dirty( const char *msg, unsigned int flags ) -{ - DRM_DEBUG( "%s: (0x%x) %s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", - msg, - flags, - (flags & RADEON_UPLOAD_CONTEXT) ? "context, " : "", - (flags & RADEON_UPLOAD_VERTFMT) ? "vertfmt, " : "", - (flags & RADEON_UPLOAD_LINE) ? "line, " : "", - (flags & RADEON_UPLOAD_BUMPMAP) ? "bumpmap, " : "", - (flags & RADEON_UPLOAD_MASKS) ? "masks, " : "", - (flags & RADEON_UPLOAD_VIEWPORT) ? "viewport, " : "", - (flags & RADEON_UPLOAD_SETUP) ? "setup, " : "", - (flags & RADEON_UPLOAD_TCL) ? "tcl, " : "", - (flags & RADEON_UPLOAD_MISC) ? "misc, " : "", - (flags & RADEON_UPLOAD_TEX0) ? "tex0, " : "", - (flags & RADEON_UPLOAD_TEX1) ? "tex1, " : "", - (flags & RADEON_UPLOAD_TEX2) ? "tex2, " : "", - (flags & RADEON_UPLOAD_CLIPRECTS) ? "cliprects, " : "", - (flags & RADEON_REQUIRE_QUIESCENCE) ? "quiescence, " : "" ); -} - static void radeon_cp_dispatch_clear( drm_device_t *dev, drm_radeon_clear_t *clear, drm_radeon_clear_rect_t *depth_boxes ) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear; int nbox = sarea_priv->nbox; drm_clip_rect_t *pbox = sarea_priv->boxes; unsigned int flags = clear->flags; + u32 rb3d_cntl = 0, rb3d_stencilrefmask= 0; int i; RING_LOCALS; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( __FUNCTION__": flags = 0x%x\n", flags ); if ( dev_priv->page_flipping && dev_priv->current_page == 1 ) { unsigned int tmp = flags; @@ -506,6 +457,28 @@ static void radeon_cp_dispatch_clear( drm_device_t *dev, if ( tmp & RADEON_BACK ) flags |= RADEON_FRONT; } + /* We have to clear the depth and/or stencil buffers by + * rendering a quad into just those buffers. Thus, we have to + * make sure the 3D engine is configured correctly. + */ + if ( flags & (RADEON_DEPTH | RADEON_STENCIL) ) { + rb3d_cntl = depth_clear->rb3d_cntl; + + if ( flags & RADEON_DEPTH ) { + rb3d_cntl |= RADEON_Z_ENABLE; + } else { + rb3d_cntl &= ~RADEON_Z_ENABLE; + } + + if ( flags & RADEON_STENCIL ) { + rb3d_cntl |= RADEON_STENCIL_ENABLE; + rb3d_stencilrefmask = clear->depth_mask; /* misnamed field */ + } else { + rb3d_cntl &= ~RADEON_STENCIL_ENABLE; + rb3d_stencilrefmask = 0x00000000; + } + } + for ( i = 0 ; i < nbox ; i++ ) { int x = pbox[i].x1; int y = pbox[i].y1; @@ -530,8 +503,7 @@ static void radeon_cp_dispatch_clear( drm_device_t *dev, /* Make sure we restore the 3D state next time. */ - dev_priv->sarea_priv->dirty |= (RADEON_UPLOAD_CONTEXT | - RADEON_UPLOAD_MASKS); + dev_priv->sarea_priv->ctx_owner = 0; } if ( flags & RADEON_FRONT ) { @@ -572,33 +544,29 @@ static void radeon_cp_dispatch_clear( drm_device_t *dev, OUT_RING( (w << 16) | h ); ADVANCE_RING(); - } - if ( flags & RADEON_DEPTH ) { - drm_radeon_depth_clear_t *depth_clear = - &dev_priv->depth_clear; + if ( flags & (RADEON_DEPTH | RADEON_STENCIL) ) { - if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { - radeon_emit_state( dev_priv ); - } + radeon_emit_clip_rect( dev_priv, + &sarea_priv->boxes[i] ); - /* FIXME: Render a rectangle to clear the depth - * buffer. So much for those "fast Z clears"... - */ - BEGIN_RING( 23 ); + BEGIN_RING( 25 ); RADEON_WAIT_UNTIL_2D_IDLE(); OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 1 ) ); OUT_RING( 0x00000000 ); - OUT_RING( depth_clear->rb3d_cntl ); - OUT_RING( CP_PACKET0( RADEON_RB3D_ZSTENCILCNTL, 0 ) ); - OUT_RING( depth_clear->rb3d_zstencilcntl ); - OUT_RING( CP_PACKET0( RADEON_RB3D_PLANEMASK, 0 ) ); - OUT_RING( 0x00000000 ); - OUT_RING( CP_PACKET0( RADEON_SE_CNTL, 0 ) ); - OUT_RING( depth_clear->se_cntl ); + OUT_RING( rb3d_cntl ); + + OUT_RING_REG( RADEON_RB3D_ZSTENCILCNTL, + depth_clear->rb3d_zstencilcntl ); + OUT_RING_REG( RADEON_RB3D_STENCILREFMASK, + rb3d_stencilrefmask ); + OUT_RING_REG( RADEON_RB3D_PLANEMASK, + 0x00000000 ); + OUT_RING_REG( RADEON_SE_CNTL, + depth_clear->se_cntl ); OUT_RING( CP_PACKET3( RADEON_3D_DRAW_IMMD, 10 ) ); OUT_RING( RADEON_VTX_Z_PRESENT ); @@ -608,6 +576,13 @@ static void radeon_cp_dispatch_clear( drm_device_t *dev, RADEON_VTX_FMT_RADEON_MODE | (3 << RADEON_NUM_VERTICES_SHIFT)) ); +/* printk( "depth box %d: %x %x %x %x\n", */ +/* i, */ +/* depth_boxes[i].ui[CLEAR_X1], */ +/* depth_boxes[i].ui[CLEAR_Y1], */ +/* depth_boxes[i].ui[CLEAR_X2], */ +/* depth_boxes[i].ui[CLEAR_Y2]); */ + OUT_RING( depth_boxes[i].ui[CLEAR_X1] ); OUT_RING( depth_boxes[i].ui[CLEAR_Y1] ); OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] ); @@ -624,9 +599,7 @@ static void radeon_cp_dispatch_clear( drm_device_t *dev, /* Make sure we restore the 3D state next time. */ - dev_priv->sarea_priv->dirty |= (RADEON_UPLOAD_CONTEXT | - RADEON_UPLOAD_SETUP | - RADEON_UPLOAD_MASKS); + dev_priv->sarea_priv->ctx_owner = 0; } } @@ -757,76 +730,72 @@ static void radeon_cp_dispatch_flip( drm_device_t *dev ) ADVANCE_RING(); } + static void radeon_cp_dispatch_vertex( drm_device_t *dev, - drm_buf_t *buf ) + drm_buf_t *buf, + drm_radeon_prim_t *prim ) { drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_buf_priv_t *buf_priv = buf->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - int format = sarea_priv->vc_format; - int offset = dev_priv->agp_buffers_offset + buf->offset; - int size = buf->used; - int prim = buf_priv->prim; + drm_radeon_buf_priv_t *buf_priv = buf->dev_private; + int offset = dev_priv->agp_buffers_offset + buf->offset + prim->start; + int numverts = (int)prim->numverts; int i = 0; RING_LOCALS; - DRM_DEBUG( "%s: nbox=%d\n", __FUNCTION__, sarea_priv->nbox ); - if ( 0 ) - radeon_print_dirty( "dispatch_vertex", sarea_priv->dirty ); + DRM_DEBUG( __FUNCTION__": nbox=%d %d..%d prim %x nvert %d\n", + sarea_priv->nbox, prim->start, prim->finish, + prim->prim, numverts ); - if ( buf->used ) { - buf_priv->dispatched = 1; + buf_priv->dispatched = 1; - if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { - radeon_emit_state( dev_priv ); + do { + /* Emit the next cliprect */ + if ( i < sarea_priv->nbox ) { + radeon_emit_clip_rect( dev_priv, + &sarea_priv->boxes[i] ); } - do { - /* Emit the next set of up to three cliprects */ - if ( i < sarea_priv->nbox ) { - radeon_emit_clip_rect( dev_priv, - &sarea_priv->boxes[i] ); - } + /* Emit the vertex buffer rendering commands */ + BEGIN_RING( 5 ); - /* Emit the vertex buffer rendering commands */ - BEGIN_RING( 5 ); + OUT_RING( CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, 3 ) ); + OUT_RING( offset ); + OUT_RING( numverts ); + OUT_RING( prim->vc_format ); + OUT_RING( prim->prim | RADEON_PRIM_WALK_LIST | + RADEON_COLOR_ORDER_RGBA | + RADEON_VTX_FMT_RADEON_MODE | + (numverts << RADEON_NUM_VERTICES_SHIFT) ); - OUT_RING( CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, 3 ) ); - OUT_RING( offset ); - OUT_RING( size ); - OUT_RING( format ); - OUT_RING( prim | RADEON_PRIM_WALK_LIST | - RADEON_COLOR_ORDER_RGBA | - RADEON_VTX_FMT_RADEON_MODE | - (size << RADEON_NUM_VERTICES_SHIFT) ); + ADVANCE_RING(); - ADVANCE_RING(); + i++; + } while ( i < sarea_priv->nbox ); - i++; - } while ( i < sarea_priv->nbox ); - } + dev_priv->sarea_priv->last_dispatch++; +} - if ( buf_priv->discard ) { - buf_priv->age = dev_priv->sarea_priv->last_dispatch; - /* Emit the vertex buffer age */ - BEGIN_RING( 2 ); - RADEON_DISPATCH_AGE( buf_priv->age ); - ADVANCE_RING(); +static void radeon_cp_discard_buffer( drm_device_t *dev, drm_buf_t *buf ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_buf_priv_t *buf_priv = buf->dev_private; + RING_LOCALS; - buf->pending = 1; - buf->used = 0; - /* FIXME: Check dispatched field */ - buf_priv->dispatched = 0; - } + buf_priv->age = dev_priv->sarea_priv->last_dispatch; - dev_priv->sarea_priv->last_dispatch++; + /* Emit the vertex buffer age */ + BEGIN_RING( 2 ); + RADEON_DISPATCH_AGE( buf_priv->age ); + ADVANCE_RING(); - sarea_priv->dirty &= ~RADEON_UPLOAD_CLIPRECTS; - sarea_priv->nbox = 0; + buf->pending = 1; + buf->used = 0; + /* FIXME: Check dispatched field */ + buf_priv->dispatched = 0; } - static void radeon_cp_dispatch_indirect( drm_device_t *dev, drm_buf_t *buf, int start, int end ) @@ -865,66 +834,47 @@ static void radeon_cp_dispatch_indirect( drm_device_t *dev, ADVANCE_RING(); } - if ( buf_priv->discard ) { - buf_priv->age = dev_priv->sarea_priv->last_dispatch; - - /* Emit the indirect buffer age */ - BEGIN_RING( 2 ); - RADEON_DISPATCH_AGE( buf_priv->age ); - ADVANCE_RING(); - - buf->pending = 1; - buf->used = 0; - /* FIXME: Check dispatched field */ - buf_priv->dispatched = 0; - } - dev_priv->sarea_priv->last_dispatch++; } static void radeon_cp_dispatch_indices( drm_device_t *dev, - drm_buf_t *buf, - int start, int end, - int count ) + drm_buf_t *elt_buf, + drm_radeon_prim_t *prim ) { drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_buf_priv_t *buf_priv = buf->dev_private; + drm_radeon_buf_priv_t *buf_priv = elt_buf->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - int format = sarea_priv->vc_format; - int offset = dev_priv->agp_buffers_offset; - int prim = buf_priv->prim; + int offset = dev_priv->agp_buffers_offset + prim->numverts * 64; u32 *data; int dwords; int i = 0; - RING_LOCALS; - DRM_DEBUG( "indices: s=%d e=%d c=%d\n", start, end, count ); + int start = prim->start + RADEON_INDEX_PRIM_OFFSET; + int count = (prim->finish - start) / sizeof(u16); - if ( 0 ) - radeon_print_dirty( "dispatch_indices", sarea_priv->dirty ); + DRM_DEBUG( "indices: start=%x/%x end=%x count=%d nv %d offset %x\n", + prim->start, start, prim->finish, + count, prim->numverts, offset ); - if ( start != end ) { + if ( start < prim->finish ) { buf_priv->dispatched = 1; - if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { - radeon_emit_state( dev_priv ); - } - - dwords = (end - start + 3) / sizeof(u32); + dwords = (prim->finish - prim->start + 3) / sizeof(u32); - data = (u32 *)((char *)dev_priv->buffers->handle - + buf->offset + start); + data = (u32 *)((char *)dev_priv->buffers->handle + + elt_buf->offset + prim->start); data[0] = CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, dwords-2 ); - data[1] = offset; data[2] = RADEON_MAX_VB_VERTS; - data[3] = format; - data[4] = (prim | RADEON_PRIM_WALK_IND | + data[3] = prim->vc_format; + data[4] = (prim->prim | + RADEON_PRIM_WALK_IND | RADEON_COLOR_ORDER_RGBA | RADEON_VTX_FMT_RADEON_MODE | (count << RADEON_NUM_VERTICES_SHIFT) ); if ( count & 0x1 ) { + /* unnecessary? */ data[dwords-1] &= 0x0000ffff; } @@ -935,29 +885,15 @@ static void radeon_cp_dispatch_indices( drm_device_t *dev, &sarea_priv->boxes[i] ); } - radeon_cp_dispatch_indirect( dev, buf, start, end ); + radeon_cp_dispatch_indirect( dev, elt_buf, + prim->start, + prim->finish ); i++; } while ( i < sarea_priv->nbox ); } - if ( buf_priv->discard ) { - buf_priv->age = dev_priv->sarea_priv->last_dispatch; - - /* Emit the vertex buffer age */ - BEGIN_RING( 2 ); - RADEON_DISPATCH_AGE( buf_priv->age ); - ADVANCE_RING(); - - buf->pending = 1; - /* FIXME: Check dispatched field */ - buf_priv->dispatched = 0; - } - - dev_priv->sarea_priv->last_dispatch++; - - sarea_priv->dirty &= ~RADEON_UPLOAD_CLIPRECTS; - sarea_priv->nbox = 0; + sarea_priv->last_dispatch++; } #define RADEON_MAX_TEXTURE_SIZE (RADEON_BUFFER_SIZE - 8 * sizeof(u32)) @@ -1116,6 +1052,7 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev, buf_priv->discard = 1; radeon_cp_dispatch_indirect( dev, buf, 0, buf->used ); + radeon_cp_discard_buffer( dev, buf ); /* Flush the pixel cache after the blit completes. This ensures * the texture data is written out to memory before rendering @@ -1182,6 +1119,20 @@ int radeon_cp_clear( struct inode *inode, struct file *filp, sarea_priv->nbox * sizeof(depth_boxes[0]) ) ) return -EFAULT; + /* Needed for depth clears via triangles??? + */ + if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { + radeon_emit_state( dev_priv, + &sarea_priv->context_state, + sarea_priv->tex_state, + sarea_priv->dirty ); + + sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | + RADEON_UPLOAD_TEX1IMAGES | + RADEON_UPLOAD_TEX2IMAGES | + RADEON_REQUIRE_QUIESCENCE); + } + radeon_cp_dispatch_clear( dev, &clear, depth_boxes ); return 0; @@ -1205,8 +1156,7 @@ int radeon_cp_swap( struct inode *inode, struct file *filp, if ( !dev_priv->page_flipping ) { radeon_cp_dispatch_swap( dev ); - dev_priv->sarea_priv->dirty |= (RADEON_UPLOAD_CONTEXT | - RADEON_UPLOAD_MASKS); + dev_priv->sarea_priv->ctx_owner = 0; } else { radeon_cp_dispatch_flip( dev ); } @@ -1220,10 +1170,12 @@ int radeon_cp_vertex( struct inode *inode, struct file *filp, drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; drm_radeon_buf_priv_t *buf_priv; drm_radeon_vertex_t vertex; + drm_radeon_prim_t prim; LOCK_TEST_WITH_RETURN( dev ); @@ -1267,11 +1219,33 @@ int radeon_cp_vertex( struct inode *inode, struct file *filp, return -EINVAL; } - buf->used = vertex.count; - buf_priv->prim = vertex.prim; - buf_priv->discard = vertex.discard; + buf->used = vertex.count; /* not used? */ + + if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { + radeon_emit_state( dev_priv, + &sarea_priv->context_state, + sarea_priv->tex_state, + sarea_priv->dirty ); + + sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | + RADEON_UPLOAD_TEX1IMAGES | + RADEON_UPLOAD_TEX2IMAGES | + RADEON_REQUIRE_QUIESCENCE); + } - radeon_cp_dispatch_vertex( dev, buf ); + /* Build up a prim_t record: + */ + prim.start = 0; + prim.finish = vertex.count; /* unused */ + prim.prim = vertex.prim; + prim.stateidx = 0xff; /* unused */ + prim.numverts = vertex.count; + prim.vc_format = dev_priv->sarea_priv->vc_format; + + radeon_cp_dispatch_vertex( dev, buf, &prim ); + if (vertex.discard) { + radeon_cp_discard_buffer( dev, buf ); + } return 0; } @@ -1282,10 +1256,12 @@ int radeon_cp_indices( struct inode *inode, struct file *filp, drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; drm_radeon_buf_priv_t *buf_priv; drm_radeon_indices_t elts; + drm_radeon_prim_t prim; int count; LOCK_TEST_WITH_RETURN( dev ); @@ -1343,10 +1319,33 @@ int radeon_cp_indices( struct inode *inode, struct file *filp, } buf->used = elts.end; - buf_priv->prim = elts.prim; - buf_priv->discard = elts.discard; - radeon_cp_dispatch_indices( dev, buf, elts.start, elts.end, count ); + if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { + radeon_emit_state( dev_priv, + &sarea_priv->context_state, + sarea_priv->tex_state, + sarea_priv->dirty ); + + sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | + RADEON_UPLOAD_TEX1IMAGES | + RADEON_UPLOAD_TEX2IMAGES | + RADEON_REQUIRE_QUIESCENCE); + } + + + /* Build up a prim_t record: + */ + prim.start = elts.start; + prim.finish = elts.end; /* unused */ + prim.prim = elts.prim; + prim.stateidx = 0xff; /* unused */ + prim.numverts = count; + prim.vc_format = dev_priv->sarea_priv->vc_format; + + radeon_cp_dispatch_indices( dev, buf, &prim ); + if (elts.discard) { + radeon_cp_discard_buffer( dev, buf ); + } return 0; } @@ -1478,6 +1477,116 @@ int radeon_cp_indirect( struct inode *inode, struct file *filp, * privileged clients. */ radeon_cp_dispatch_indirect( dev, buf, indirect.start, indirect.end ); + if (indirect.discard) { + radeon_cp_discard_buffer( dev, buf ); + } + + + return 0; +} + +int radeon_cp_vertex2( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_device_dma_t *dma = dev->dma; + drm_buf_t *buf; + drm_radeon_buf_priv_t *buf_priv; + drm_radeon_vertex2_t vertex; + int i; + unsigned char laststate; + + LOCK_TEST_WITH_RETURN( dev ); + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + if ( copy_from_user( &vertex, (drm_radeon_vertex_t *)arg, + sizeof(vertex) ) ) + return -EFAULT; + + DRM_DEBUG( __FUNCTION__": pid=%d index=%d discard=%d\n", + current->pid, vertex.idx, vertex.discard ); + + if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) { + DRM_ERROR( "buffer index %d (of %d max)\n", + vertex.idx, dma->buf_count - 1 ); + return -EINVAL; + } + + RING_SPACE_TEST_WITH_RETURN( dev_priv ); + VB_AGE_TEST_WITH_RETURN( dev_priv ); + + buf = dma->buflist[vertex.idx]; + buf_priv = buf->dev_private; + + if ( buf->pid != current->pid ) { + DRM_ERROR( "process %d using buffer owned by %d\n", + current->pid, buf->pid ); + return -EINVAL; + } + + if ( buf->pending ) { + DRM_ERROR( "sending pending buffer %d\n", vertex.idx ); + return -EINVAL; + } + + for (laststate = 0xff, i = 0 ; i < vertex.nr_prims ; i++) { + drm_radeon_prim_t prim; + + if ( copy_from_user( &prim, &vertex.prim[i], sizeof(prim) ) ) + return -EFAULT; + +/* printk( "prim %d vfmt %x hwprim %x start %d finish %d\n", */ +/* i, prim.vc_format, prim.prim, */ +/* prim.start, prim.finish ); */ + + if ( (prim.prim & RADEON_PRIM_TYPE_MASK) > + RADEON_PRIM_TYPE_3VRT_LINE_LIST ) { + DRM_ERROR( "buffer prim %d\n", prim.prim ); + return -EINVAL; + } + + if ( prim.stateidx != laststate ) { + drm_radeon_state_t state; + + if ( copy_from_user( &state, + &vertex.state[prim.stateidx], + sizeof(state) ) ) + return -EFAULT; + +/* printk("emit state %d (%p) dirty %x\n", */ +/* prim.stateidx, */ +/* &vertex.state[prim.stateidx], */ +/* state.dirty); */ + + radeon_emit_state2( dev_priv, &state ); + + laststate = prim.stateidx; + } + + if ( prim.finish <= prim.start ) + continue; + + if ( prim.start & 0x7 ) { + DRM_ERROR( "misaligned buffer 0x%x\n", prim.start ); + return -EINVAL; + } + + if ( prim.prim & RADEON_PRIM_WALK_IND ) { + radeon_cp_dispatch_indices( dev, buf, &prim ); + } else { + radeon_cp_dispatch_vertex( dev, buf, &prim ); + } + } + + if ( vertex.discard ) { + radeon_cp_discard_buffer( dev, buf ); + } return 0; } |