diff options
author | Kristian Høgsberg <krh@redhat.com> | 2008-08-01 13:35:56 -0400 |
---|---|---|
committer | Kristian Høgsberg <krh@redhat.com> | 2008-08-01 13:35:56 -0400 |
commit | 086716c8e2516dd71e94ebda03e20943188a5e5e (patch) | |
tree | 860462e19e868694f0e8838bc7418cc69fab0391 /shared-core | |
parent | 5052e966ec7fe5146c2d73b90482003619add5da (diff) | |
parent | ccbaad52f79162a77d98d0dde00681b1dbf14165 (diff) |
Merge commit 'origin/drm-gem' into modesetting-gem
Conflicts:
linux-core/Makefile.kernel
linux-core/drmP.h
linux-core/drm_mm.c
linux-core/drm_stub.c
linux-core/i915_gem.c
linux-core/i915_opregion.c
shared-core/i915_dma.c
shared-core/i915_drv.h
shared-core/i915_irq.c
Diffstat (limited to 'shared-core')
-rw-r--r-- | shared-core/i915_dma.c | 61 | ||||
-rw-r--r-- | shared-core/i915_drm.h | 65 | ||||
-rw-r--r-- | shared-core/i915_drv.h | 276 | ||||
-rw-r--r-- | shared-core/i915_init.c | 107 | ||||
-rw-r--r-- | shared-core/i915_irq.c | 7 |
5 files changed, 254 insertions, 262 deletions
diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 8d09828a..1fdc5e17 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -41,12 +41,12 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_ring_buffer *ring = &(dev_priv->ring); u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; - u32 acthd_reg = IS_I965G(dev) ? I965REG_ACTHD : I915REG_ACTHD; + u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; u32 last_acthd = I915_READ(acthd_reg); u32 acthd; int i; - for (i = 0; i < 10000; i++) { + for (i = 0; i < 100000; i++) { ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; acthd = I915_READ(acthd_reg); ring->space = ring->head - (ring->tail + 8); @@ -132,19 +132,6 @@ int i915_dma_cleanup(struct drm_device * dev) dev_priv->ring.Size = 0; } - if (dev_priv->status_page_dmah) { - drm_pci_free(dev, dev_priv->status_page_dmah); - dev_priv->status_page_dmah = NULL; - /* Need to rewrite hardware status page */ - I915_WRITE(0x02080, 0x1ffff000); - } - - if (dev_priv->hws_agpoffset) { - dev_priv->hws_agpoffset = 0; - drm_core_ioremapfree(&dev_priv->hws_map, dev); - I915_WRITE(0x02080, 0x1ffff000); - } - return 0; } @@ -266,25 +253,6 @@ static int i915_initialize(struct drm_device * dev, */ dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A; - /* Program Hardware Status Page */ - if (!I915_NEED_GFX_HWS(dev)) { - dev_priv->status_page_dmah = - drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); - - if (!dev_priv->status_page_dmah) { - i915_dma_cleanup(dev); - DRM_ERROR("Can not allocate hardware status page\n"); - return -ENOMEM; - } - dev_priv->hws_vaddr = dev_priv->status_page_dmah->vaddr; - dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; - - memset(dev_priv->hws_vaddr, 0, PAGE_SIZE); - - I915_WRITE(0x02080, dev_priv->dma_status_page); - } - DRM_DEBUG("Enabled hardware status page\n"); - #ifdef I915_HAVE_BUFFER if (!drm_core_check_feature(dev, DRIVER_MODESET)) { mutex_init(&dev_priv->cmdbuf_mutex); @@ -320,14 +288,14 @@ static int i915_dma_resume(struct drm_device * dev) } /* Program Hardware Status Page */ - if (!dev_priv->hws_vaddr) { + if (!dev_priv->hw_status_page) { DRM_ERROR("Can not find hardware status page\n"); return -EINVAL; } - DRM_DEBUG("hw status page @ %p\n", dev_priv->hws_vaddr); + DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); - if (dev_priv->hws_agpoffset != 0) - I915_WRITE(HWS_PGA, dev_priv->hws_agpoffset); + if (dev_priv->status_gfx_addr != 0) + I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); else I915_WRITE(HWS_PGA, dev_priv->dma_status_page); DRM_DEBUG("Enabled hardware status page\n"); @@ -896,6 +864,9 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_CHIPSET_ID: value = dev->pci_device; break; + case I915_PARAM_HAS_GEM: + value = 1; + break; default: DRM_ERROR("Unknown parameter %d\n", param->param); return -EINVAL; @@ -1033,7 +1004,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr); - dev_priv->hws_agpoffset = hws->addr & (0x1ffff<<12); + dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); dev_priv->hws_map.offset = dev->agp->base + hws->addr; dev_priv->hws_map.size = 4*1024; @@ -1044,17 +1015,17 @@ static int i915_set_status_page(struct drm_device *dev, void *data, drm_core_ioremap(&dev_priv->hws_map, dev); if (dev_priv->hws_map.handle == NULL) { i915_dma_cleanup(dev); - dev_priv->hws_agpoffset = 0; + dev_priv->status_gfx_addr = 0; DRM_ERROR("can not ioremap virtual address for" " G33 hw status page\n"); return -ENOMEM; } - dev_priv->hws_vaddr = dev_priv->hws_map.handle; + dev_priv->hw_status_page = dev_priv->hws_map.handle; - memset(dev_priv->hws_vaddr, 0, PAGE_SIZE); - I915_WRITE(HWS_PGA, dev_priv->hws_agpoffset); - DRM_DEBUG("load hws at %p\n", dev_priv->hws_vaddr); + memset(dev_priv->hw_status_page, 0, PAGE_SIZE); + I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); + DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page); return 0; } @@ -1095,6 +1066,8 @@ struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0), DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0), DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0), + DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), + DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h index 8ba71687..53087b57 100644 --- a/shared-core/i915_drm.h +++ b/shared-core/i915_drm.h @@ -190,6 +190,8 @@ typedef struct drm_i915_sarea { #define DRM_I915_GEM_MMAP 0x1e #define DRM_I915_GEM_SET_DOMAIN 0x1f #define DRM_I915_GEM_SW_FINISH 0x20 +#define DRM_I915_GEM_SET_TILING 0x21 +#define DRM_I915_GEM_GET_TILING 0x22 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) @@ -223,6 +225,8 @@ typedef struct drm_i915_sarea { #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) +#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) +#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) /* Asynchronous page flipping: */ @@ -276,6 +280,7 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_ALLOW_BATCHBUFFER 2 #define I915_PARAM_LAST_DISPATCH 3 #define I915_PARAM_CHIPSET_ID 4 +#define I915_PARAM_HAS_GEM 5 typedef struct drm_i915_getparam { int param; @@ -655,4 +660,64 @@ struct drm_i915_gem_busy { uint32_t busy; }; +#define I915_TILING_NONE 0 +#define I915_TILING_X 1 +#define I915_TILING_Y 2 + +#define I915_BIT_6_SWIZZLE_NONE 0 +#define I915_BIT_6_SWIZZLE_9 1 +#define I915_BIT_6_SWIZZLE_9_10 2 +#define I915_BIT_6_SWIZZLE_9_11 3 +#define I915_BIT_6_SWIZZLE_9_10_11 4 +/* Not seen by userland */ +#define I915_BIT_6_SWIZZLE_UNKNOWN 5 + +struct drm_i915_gem_set_tiling { + /** Handle of the buffer to have its tiling state updated */ + uint32_t handle; + + /** + * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, + * I915_TILING_Y). + * + * This value is to be set on request, and will be updated by the + * kernel on successful return with the actual chosen tiling layout. + * + * The tiling mode may be demoted to I915_TILING_NONE when the system + * has bit 6 swizzling that can't be managed correctly by GEM. + * + * Buffer contents become undefined when changing tiling_mode. + */ + uint32_t tiling_mode; + + /** + * Stride in bytes for the object when in I915_TILING_X or + * I915_TILING_Y. + */ + uint32_t stride; + + /** + * Returned address bit 6 swizzling required for CPU access through + * mmap mapping. + */ + uint32_t swizzle_mode; +}; + +struct drm_i915_gem_get_tiling { + /** Handle of the buffer to get tiling state for. */ + uint32_t handle; + + /** + * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, + * I915_TILING_Y). + */ + uint32_t tiling_mode; + + /** + * Returned address bit 6 swizzling required for CPU access through + * mmap mapping. + */ + uint32_t swizzle_mode; +}; + #endif /* _I915_DRM_H_ */ diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index a0f73298..2d186558 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -39,7 +39,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20080611" +#define DRIVER_DATE "20080730" #if defined(__linux__) #define I915_HAVE_FENCE @@ -63,7 +63,7 @@ */ #define DRIVER_MAJOR 1 #if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER) -#define DRIVER_MINOR 14 +#define DRIVER_MINOR 13 #else #define DRIVER_MINOR 6 #endif @@ -79,6 +79,14 @@ enum pipe { struct drm_i915_validate_buffer; #endif +#define WATCH_COHERENCY 0 +#define WATCH_BUF 0 +#define WATCH_EXEC 0 +#define WATCH_LRU 0 +#define WATCH_RELOC 0 +#define WATCH_INACTIVE 0 +#define WATCH_PWRITE 0 + struct drm_i915_ring_buffer { int tail_mask; unsigned long Size; @@ -140,12 +148,12 @@ struct drm_i915_private { struct drm_i915_ring_buffer ring; struct drm_dma_handle *status_page_dmah; + void *hw_status_page; dma_addr_t dma_status_page; uint32_t counter; - uint32_t hws_agpoffset; + uint32_t status_gfx_addr; drm_local_map_t hws_map; - void *hws_vaddr; - struct drm_memrange_node *hws; + struct drm_gem_object *hws_obj; unsigned int cpp; @@ -166,7 +174,7 @@ struct drm_i915_private { bool cursor_needs_physical; - struct drm_memrange vram; + struct drm_mm vram; #ifdef I915_HAVE_FENCE uint32_t flush_sequence; @@ -204,83 +212,6 @@ struct drm_i915_private { int lvds_vbt:1; int int_crt_support:1; - struct { - struct drm_memrange gtt_space; - - /** - * List of objects currently involved in rendering from the - * ringbuffer. - * - * A reference is held on the buffer while on this list. - */ - struct list_head active_list; - - /** - * List of objects which are not in the ringbuffer but which - * still have a write_domain which needs to be flushed before - * unbinding. - * - * A reference is held on the buffer while on this list. - */ - struct list_head flushing_list; - - /** - * LRU list of objects which are not in the ringbuffer and - * are ready to unbind, but are still in the GTT. - * - * A reference is not held on the buffer while on this list, - * as merely being GTT-bound shouldn't prevent its being - * freed, and we'll pull it off the list in the free path. - */ - struct list_head inactive_list; - - /** - * List of breadcrumbs associated with GPU requests currently - * outstanding. - */ - struct list_head request_list; - - /** - * We leave the user IRQ off as much as possible, - * but this means that requests will finish and never - * be retired once the system goes idle. Set a timer to - * fire periodically while the ring is running. When it - * fires, go retire requests. - */ - struct delayed_work retire_work; - - uint32_t next_gem_seqno; - - /** - * Waiting sequence number, if any - */ - uint32_t waiting_gem_seqno; - - /** - * Last seq seen at irq time - */ - uint32_t irq_gem_seqno; - - /** - * Flag if the X Server, and thus DRM, is not currently in - * control of the device. - * - * This is set between LeaveVT and EnterVT. It needs to be - * replaced with a semaphore. It also needs to be - * transitioned away from for kernel modesetting. - */ - int suspended; - - /** - * Flag if the hardware appears to be wedged. - * - * This is set when attempts to idle the device timeout. - * It prevents command submission from occuring and makes - * every pending request fail - */ - int wedged; - } mm; - struct work_struct user_interrupt_task; #ifdef __linux__ @@ -375,7 +306,89 @@ struct drm_i915_private { u8 saveDACMASK; u8 saveDACDATA[256*3]; /* 256 3-byte colors */ u8 saveCR[37]; -}; + + struct { + struct drm_mm gtt_space; + + /** + * List of objects currently involved in rendering from the + * ringbuffer. + * + * A reference is held on the buffer while on this list. + */ + struct list_head active_list; + + /** + * List of objects which are not in the ringbuffer but which + * still have a write_domain which needs to be flushed before + * unbinding. + * + * A reference is held on the buffer while on this list. + */ + struct list_head flushing_list; + + /** + * LRU list of objects which are not in the ringbuffer and + * are ready to unbind, but are still in the GTT. + * + * A reference is not held on the buffer while on this list, + * as merely being GTT-bound shouldn't prevent its being + * freed, and we'll pull it off the list in the free path. + */ + struct list_head inactive_list; + + /** + * List of breadcrumbs associated with GPU requests currently + * outstanding. + */ + struct list_head request_list; + + /** + * We leave the user IRQ off as much as possible, + * but this means that requests will finish and never + * be retired once the system goes idle. Set a timer to + * fire periodically while the ring is running. When it + * fires, go retire requests. + */ + struct delayed_work retire_work; + + uint32_t next_gem_seqno; + + /** + * Waiting sequence number, if any + */ + uint32_t waiting_gem_seqno; + + /** + * Last seq seen at irq time + */ + uint32_t irq_gem_seqno; + + /** + * Flag if the X Server, and thus DRM, is not currently in + * control of the device. + * + * This is set between LeaveVT and EnterVT. It needs to be + * replaced with a semaphore. It also needs to be + * transitioned away from for kernel modesetting. + */ + int suspended; + + /** + * Flag if the hardware appears to be wedged. + * + * This is set when attempts to idle the device timeout. + * It prevents command submission from occuring and makes + * every pending request fail + */ + int wedged; + + /** Bit 6 swizzling required for X tiling */ + uint32_t bit_6_swizzle_x; + /** Bit 6 swizzling required for Y tiling */ + uint32_t bit_6_swizzle_y; + } mm; +} drm_i915_private_t; struct drm_i915_file_private { struct { @@ -396,7 +409,7 @@ struct drm_i915_gem_object { struct drm_gem_object *obj; /** Current space allocated to this object in the GTT, if any. */ - struct drm_memrange_node *gtt_space; + struct drm_mm_node *gtt_space; /** This object's place on the active/flushing/inactive lists */ struct list_head list; @@ -434,6 +447,15 @@ struct drm_i915_gem_object { /** Breadcrumb of last rendering to the buffer. */ uint32_t last_rendering_seqno; + + /** Current tiling mode for the object. */ + uint32_t tiling_mode; + + /** + * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when + * GEM_DOMAIN_CPU is not in the object's read domain. + */ + uint8_t *page_cpu_valid; }; /** @@ -588,6 +610,11 @@ int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int i915_gem_set_tiling(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_get_tiling(struct drm_device *dev, void *data, + struct drm_file *file_priv); +void i915_gem_load(struct drm_device *dev); int i915_gem_proc_init(struct drm_minor *minor); void i915_gem_proc_cleanup(struct drm_minor *minor); int i915_gem_init_object(struct drm_gem_object *obj); @@ -602,10 +629,27 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev); int i915_gem_do_init(struct drm_device *dev, unsigned long start, unsigned long end); void i915_gem_retire_work_handler(struct work_struct *work); +void i915_gem_clflush_object(struct drm_gem_object *obj); #endif extern unsigned int i915_fbpercrtc; +/* i915_gem_tiling.c */ +void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); + +/* i915_gem_debug.c */ +void i915_gem_dump_object(struct drm_gem_object *obj, int len, + const char *where, uint32_t mark); +#if WATCH_INACTIVE +void i915_verify_inactive(struct drm_device *dev, char *file, int line); +#else +#define i915_verify_inactive(dev,file,line) +#endif +void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle); +void i915_gem_dump_object(struct drm_gem_object *obj, int len, + const char *where, uint32_t mark); +void i915_dump_lru(struct drm_device *dev, const char *where); + #ifdef __linux__ /* i915_opregion.c */ extern int intel_opregion_init(struct drm_device *dev); @@ -686,6 +730,39 @@ void i915_ring_validate(struct drm_device *dev, const char *func, int line); extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); +#define BREADCRUMB_BITS 31 +#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1) + +#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5]) +/** + * Reads a dword out of the status page, which is written to from the command + * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or + * MI_STORE_DATA_IMM. + * + * The following dwords have a reserved meaning: + * 0: ISR copy, updated when an ISR bit not set in the HWSTAM changes. + * 4: ring 0 head pointer + * 5: ring 1 head pointer (915-class) + * 6: ring 2 head pointer (915-class) + * + * The area from dword 0x10 to 0x3ff is available for driver usage. + */ +#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) +#define I915_GEM_HWS_INDEX 0x10 + +/* MCH MMIO space */ +/** 915-945 and GM965 MCH register controlling DRAM channel access */ +#define DCC 0x200 +#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) +#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0) +#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) +#define DCC_ADDRESSING_MODE_MASK (3 << 0) +#define DCC_CHANNEL_XOR_DISABLE (1 << 10) + +/** 965 MCH register controlling DRAM channel configuration */ +#define CHDECMISC 0x111 +#define CHDECMISC_FLEXMEMORY (1 << 1) + /* * The Bridge device's PCI config space has information about the * fb aperture size and the amount of pre-reserved memory. @@ -793,27 +870,6 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define MI_BATCH_NON_SECURE_I965 (1<<8) #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) -#define BREADCRUMB_BITS 31 -#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1) - -#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hws_vaddr))[5]) - -/** - * Reads a dword out of the status page, which is written to from the command - * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or - * MI_STORE_DATA_IMM. - * - * The following dwords have a reserved meaning: - * 0: ISR copy, updated when an ISR bit not set in the HWSTAM changes. - * 4: ring 0 head pointer - * 5: ring 1 head pointer (915-class) - * 6: ring 2 head pointer (915-class) - * - * The area from dword 0x10 to 0x3ff is available for driver usage. - */ -#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hws_vaddr))[reg]) -#define I915_GEM_HWS_INDEX 0x10 - /* * 3D instructions used by the kernel */ @@ -837,6 +893,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) #define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4) #define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) +#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) #define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) #define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) #define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) @@ -880,8 +937,10 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define PRB1_HEAD 0x02044 /* 915+ only */ #define PRB1_START 0x02048 /* 915+ only */ #define PRB1_CTL 0x0204c /* 915+ only */ -#define I965REG_ACTHD 0x02074 +#define ACTHD_I965 0x02074 #define HWS_PGA 0x02080 +#define HWS_ADDRESS_MASK 0xfffff000 +#define HWS_START_ADDRESS_SHIFT 4 #define IPEIR 0x02088 #define NOPID 0x02094 #define HWSTAM 0x02098 @@ -911,7 +970,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define EMR 0x020b4 #define ESR 0x020b8 #define INSTPM 0x020c0 -#define I915REG_ACTHD 0x020C8 +#define ACTHD 0x020c8 #define FW_BLC 0x020d8 #define FW_BLC_SELF 0x020e0 /* 915+ only */ #define MI_ARB_STATE 0x020e4 /* 915+ only */ @@ -961,7 +1020,6 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); /* * GPIO regs */ - #define GPIOA 0x5010 #define GPIOB 0x5014 #define GPIOC 0x5018 diff --git a/shared-core/i915_init.c b/shared-core/i915_init.c index 009d447b..4f2d3a4f 100644 --- a/shared-core/i915_init.c +++ b/shared-core/i915_init.c @@ -100,92 +100,6 @@ int i915_probe_agp(struct pci_dev *pdev, unsigned long *aperture_size, return 0; } -static int i915_init_hwstatus(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_memrange_node *free_space; - int ret = 0; - - /* Program Hardware Status Page */ - if (!IS_G33(dev)) { - dev_priv->status_page_dmah = - drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); - - if (!dev_priv->status_page_dmah) { - DRM_ERROR("Can not allocate hardware status page\n"); - ret = -ENOMEM; - goto out; - } - dev_priv->hws_vaddr = dev_priv->status_page_dmah->vaddr; - dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; - - I915_WRITE(HWS_PGA, dev_priv->dma_status_page); - } else { - free_space = drm_memrange_search_free(&dev_priv->vram, - PAGE_SIZE, - PAGE_SIZE, 0); - if (!free_space) { - DRM_ERROR("No free vram available, aborting\n"); - ret = -ENOMEM; - goto out; - } - - dev_priv->hws = drm_memrange_get_block(free_space, PAGE_SIZE, - PAGE_SIZE); - if (!dev_priv->hws) { - DRM_ERROR("Unable to allocate or pin hw status page\n"); - ret = -EINVAL; - goto out; - } - - dev_priv->hws_agpoffset = dev_priv->hws->start; - dev_priv->hws_map.offset = dev->agp->base + - dev_priv->hws->start; - dev_priv->hws_map.size = PAGE_SIZE; - dev_priv->hws_map.type= 0; - dev_priv->hws_map.flags= 0; - dev_priv->hws_map.mtrr = 0; - - drm_core_ioremap(&dev_priv->hws_map, dev); - if (dev_priv->hws_map.handle == NULL) { - dev_priv->hws_agpoffset = 0; - DRM_ERROR("can not ioremap virtual addr for" - "G33 hw status page\n"); - ret = -ENOMEM; - goto out_free; - } - dev_priv->hws_vaddr = dev_priv->hws_map.handle; - I915_WRITE(HWS_PGA, dev_priv->hws_agpoffset); - } - - memset(dev_priv->hws_vaddr, 0, PAGE_SIZE); - - DRM_DEBUG("Enabled hardware status page\n"); - - return 0; - -out_free: - /* free hws */ -out: - return ret; -} - -static void i915_cleanup_hwstatus(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (!IS_G33(dev)) { - if (dev_priv->status_page_dmah) - drm_pci_free(dev, dev_priv->status_page_dmah); - } else { - if (dev_priv->hws_map.handle) - drm_core_ioremapfree(&dev_priv->hws_map, dev); - if (dev_priv->hws) - drm_memrange_put_block(dev_priv->hws); - } - I915_WRITE(HWS_PGA, 0x1ffff000); -} - static int i915_load_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -195,7 +109,7 @@ static int i915_load_modeset_init(struct drm_device *dev) i915_probe_agp(dev->pdev, &agp_size, &prealloc_size); /* Basic memrange allocator for stolen space (aka vram) */ - drm_memrange_init(&dev_priv->vram, 0, prealloc_size); + drm_mm_init(&dev_priv->vram, 0, prealloc_size); /* Let GEM Manage from end of prealloc space to end of aperture */ i915_gem_do_init(dev, prealloc_size, agp_size); @@ -203,10 +117,6 @@ static int i915_load_modeset_init(struct drm_device *dev) if (ret) goto out; - ret = i915_init_hwstatus(dev); - if (ret) - goto destroy_ringbuffer; - /* Allow hardware batchbuffers unless told otherwise. */ dev_priv->allow_batchbuffer = 1; @@ -217,7 +127,7 @@ static int i915_load_modeset_init(struct drm_device *dev) if (dev_priv->wq == 0) { DRM_DEBUG("Error\n"); ret = -EINVAL; - goto destroy_hws; + goto destroy_ringbuffer; } ret = intel_init_bios(dev); @@ -247,8 +157,6 @@ modeset_cleanup: intel_modeset_cleanup(dev); destroy_wq: destroy_workqueue(dev_priv->wq); -destroy_hws: - i915_cleanup_hwstatus(dev); destroy_ringbuffer: i915_gem_cleanup_ringbuffer(dev); out: @@ -324,13 +232,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto free_priv; } - INIT_LIST_HEAD(&dev_priv->mm.active_list); - INIT_LIST_HEAD(&dev_priv->mm.flushing_list); - INIT_LIST_HEAD(&dev_priv->mm.inactive_list); - INIT_LIST_HEAD(&dev_priv->mm.request_list); - INIT_DELAYED_WORK(&dev_priv->mm.retire_work, - i915_gem_retire_work_handler); - dev_priv->mm.next_gem_seqno = 1; + i915_gem_load(dev); #ifdef __linux__ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) @@ -445,13 +347,12 @@ int i915_driver_unload(struct drm_device *dev) dev_priv->sarea_bo = NULL; } #endif - i915_cleanup_hwstatus(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { mutex_lock(&dev->struct_mutex); i915_gem_cleanup_ringbuffer(dev); mutex_unlock(&dev->struct_mutex); - drm_memrange_takedown(&dev_priv->vram); + drm_mm_takedown(&dev_priv->vram); i915_gem_lastclose(dev); } diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c index b4004a8f..00570e11 100644 --- a/shared-core/i915_irq.c +++ b/shared-core/i915_irq.c @@ -502,6 +502,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) I915_WRITE(IMR, dev_priv->irq_mask_reg); (void) I915_READ(IMR); } + return IRQ_NONE; } /* @@ -768,7 +769,6 @@ int i915_enable_vblank(struct drm_device *dev, int plane) PIPE_VBLANK_INTERRUPT_STATUS); I915_WRITE(pipestat_reg, pipestat); } - DRM_SPINLOCK(&dev_priv->user_irq_lock); i915_enable_irq(dev_priv, mask_reg); DRM_SPINUNLOCK(&dev_priv->user_irq_lock); @@ -889,11 +889,6 @@ void i915_enable_interrupt (struct drm_device *dev) opregion_enable_asle(dev); #endif #endif - - I915_WRITE(IMR, dev_priv->irq_mask_reg); - I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK); - (void) I915_READ (IER); - dev_priv->irq_enabled = 1; } |