aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlan Hourihane <alanh@tungstengraphics.com>2008-02-18 22:35:46 +0000
committerAlan Hourihane <alanh@tungstengraphics.com>2008-02-18 22:35:46 +0000
commitf24ed2ad6c66e50268fd175146a1661ae4bbd350 (patch)
treedf804321f182607e8183df3375a16807ff42ba85
parent2b1c9cd696049d23845870329d2b61a5873f7b13 (diff)
parent5d8c754bc2c720d70bbdeca6b294660105717a62 (diff)
Merge branch 'master' of git+ssh://git.freedesktop.org/git/mesa/drm into modesetting-101
Conflicts: linux-core/i915_fence.c linux-core/via_fence.c shared-core/i915_dma.c shared-core/i915_drv.h shared-core/i915_irq.c
-rw-r--r--linux-core/drmP.h2
-rw-r--r--linux-core/drm_bo.c34
-rw-r--r--linux-core/drm_fence.c392
-rw-r--r--linux-core/drm_irq.c29
-rw-r--r--linux-core/drm_objects.h76
-rw-r--r--linux-core/i915_drv.c51
-rw-r--r--linux-core/i915_fence.c246
-rw-r--r--linux-core/nouveau_buffer.c3
-rw-r--r--linux-core/nouveau_fence.c32
-rw-r--r--linux-core/via_fence.c116
-rw-r--r--linux-core/xgi_drv.c11
-rw-r--r--linux-core/xgi_fence.c55
-rw-r--r--shared-core/drm_pciids.txt2
-rw-r--r--shared-core/i915_dma.c123
-rw-r--r--shared-core/i915_drm.h9
-rw-r--r--shared-core/i915_drv.h33
-rw-r--r--shared-core/mach64_irq.c45
-rw-r--r--shared-core/nouveau_fifo.c1
-rw-r--r--shared-core/nouveau_mem.c4
-rw-r--r--shared-core/nouveau_reg.h25
-rw-r--r--shared-core/nouveau_state.c1
-rw-r--r--shared-core/nv20_graph.c45
-rw-r--r--shared-core/nv40_fb.c7
-rw-r--r--shared-core/nv40_graph.c4
-rw-r--r--shared-core/radeon_cp.c86
-rw-r--r--shared-core/radeon_drv.h42
-rw-r--r--shared-core/via_drv.c14
-rw-r--r--shared-core/via_drv.h11
-rw-r--r--shared-core/via_map.c3
29 files changed, 943 insertions, 559 deletions
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index ab17ba0e..297d8d60 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -878,6 +878,8 @@ struct drm_device {
u32 *last_vblank; /* protected by dev->vbl_lock, used */
/* for wraparound handling */
u32 *vblank_offset; /* used to track how many vblanks */
+ int *vblank_enabled; /* so we don't call enable more than
+ once per disable */
u32 *vblank_premodeset; /* were lost during modeset */
struct timer_list vblank_disable_timer;
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index 6b3d4e14..1f3c2d2c 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -287,7 +287,7 @@ int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
DRM_ASSERT_LOCKED(&bo->mutex);
if (bo->fence) {
- if (drm_fence_object_signaled(bo->fence, bo->fence_type, 0)) {
+ if (drm_fence_object_signaled(bo->fence, bo->fence_type)) {
drm_fence_usage_deref_unlocked(&bo->fence);
return 0;
}
@@ -354,7 +354,7 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
if (bo->fence && drm_fence_object_signaled(bo->fence,
- bo->fence_type, 0))
+ bo->fence_type))
drm_fence_usage_deref_unlocked(&bo->fence);
if (bo->fence && remove_all)
@@ -559,7 +559,7 @@ void drm_putback_buffer_objects(struct drm_device *dev)
list_del_init(&entry->lru);
DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
- DRM_WAKEUP(&entry->event_queue);
+ wake_up_all(&entry->event_queue);
/*
* FIXME: Might want to put back on head of list
@@ -659,7 +659,7 @@ int drm_fence_buffer_objects(struct drm_device *dev,
entry->fence_type = entry->new_fence_type;
DRM_FLAG_MASKED(entry->priv_flags, 0,
_DRM_BO_FLAG_UNFENCED);
- DRM_WAKEUP(&entry->event_queue);
+ wake_up_all(&entry->event_queue);
drm_bo_add_to_lru(entry);
}
mutex_unlock(&entry->mutex);
@@ -1031,7 +1031,7 @@ static int drm_bo_quick_busy(struct drm_buffer_object *bo)
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
if (fence) {
- if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
+ if (drm_fence_object_signaled(fence, bo->fence_type)) {
drm_fence_usage_deref_unlocked(&bo->fence);
return 0;
}
@@ -1051,12 +1051,12 @@ static int drm_bo_busy(struct drm_buffer_object *bo)
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
if (fence) {
- if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
+ if (drm_fence_object_signaled(fence, bo->fence_type)) {
drm_fence_usage_deref_unlocked(&bo->fence);
return 0;
}
drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
- if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
+ if (drm_fence_object_signaled(fence, bo->fence_type)) {
drm_fence_usage_deref_unlocked(&bo->fence);
return 0;
}
@@ -1248,7 +1248,7 @@ static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
mutex_unlock(&dev->struct_mutex);
if (ret) {
if (atomic_add_negative(-1, &bo->mapped))
- DRM_WAKEUP(&bo->event_queue);
+ wake_up_all(&bo->event_queue);
} else
drm_bo_fill_rep_arg(bo, rep);
@@ -1305,7 +1305,7 @@ static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
BUG_ON(action != _DRM_REF_TYPE1);
if (atomic_add_negative(-1, &bo->mapped))
- DRM_WAKEUP(&bo->event_queue);
+ wake_up_all(&bo->event_queue);
}
/*
@@ -1363,7 +1363,7 @@ out_unlock:
}
drm_bo_add_to_lru(bo);
if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
- DRM_WAKEUP(&bo->event_queue);
+ wake_up_all(&bo->event_queue);
DRM_FLAG_MASKED(bo->priv_flags, 0,
_DRM_BO_FLAG_UNFENCED);
}
@@ -1441,13 +1441,21 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
* We're switching command submission mechanism,
* or cannot simply rely on the hardware serializing for us.
*
- * Wait for buffer idle.
+ * Insert a driver-dependant barrier or wait for buffer idle.
*/
if ((fence_class != bo->fence_class) ||
((ftype ^ bo->fence_type) & bo->fence_type)) {
- ret = drm_bo_wait(bo, 0, 0, no_wait);
+ ret = -EINVAL;
+ if (driver->command_stream_barrier) {
+ ret = driver->command_stream_barrier(bo,
+ fence_class,
+ ftype,
+ no_wait);
+ }
+ if (ret)
+ ret = drm_bo_wait(bo, 0, 0, no_wait);
if (ret)
return ret;
@@ -1538,7 +1546,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
} else {
drm_bo_add_to_lru(bo);
if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
- DRM_WAKEUP(&bo->event_queue);
+ wake_up_all(&bo->event_queue);
DRM_FLAG_MASKED(bo->priv_flags, 0,
_DRM_BO_FLAG_UNFENCED);
}
diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c
index b2c4e9c9..9d80327f 100644
--- a/linux-core/drm_fence.c
+++ b/linux-core/drm_fence.c
@@ -30,6 +30,57 @@
#include "drmP.h"
+
+/*
+ * Convenience function to be called by fence::wait methods that
+ * need polling.
+ */
+
+int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
+ int interruptible, uint32_t mask,
+ unsigned long end_jiffies)
+{
+ struct drm_device *dev = fence->dev;
+ struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
+ uint32_t count = 0;
+ int ret;
+
+ DECLARE_WAITQUEUE(entry, current);
+ add_wait_queue(&fc->fence_queue, &entry);
+
+ ret = 0;
+
+ for (;;) {
+ __set_current_state((interruptible) ?
+ TASK_INTERRUPTIBLE :
+ TASK_UNINTERRUPTIBLE);
+ if (drm_fence_object_signaled(fence, mask))
+ break;
+ if (time_after_eq(jiffies, end_jiffies)) {
+ ret = -EBUSY;
+ break;
+ }
+ if (lazy)
+ schedule_timeout(1);
+ else if ((++count & 0x0F) == 0){
+ __set_current_state(TASK_RUNNING);
+ schedule();
+ __set_current_state((interruptible) ?
+ TASK_INTERRUPTIBLE :
+ TASK_UNINTERRUPTIBLE);
+ }
+ if (interruptible && signal_pending(current)) {
+ ret = -EAGAIN;
+ break;
+ }
+ }
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&fc->fence_queue, &entry);
+ return ret;
+}
+EXPORT_SYMBOL(drm_fence_wait_polling);
+
/*
* Typically called by the IRQ handler.
*/
@@ -39,27 +90,14 @@ void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
{
int wake = 0;
uint32_t diff;
- uint32_t relevant;
+ uint32_t relevant_type;
+ uint32_t new_type;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
struct drm_fence_driver *driver = dev->driver->fence_driver;
struct list_head *head;
struct drm_fence_object *fence, *next;
int found = 0;
- int is_exe = (type & DRM_FENCE_TYPE_EXE);
- int ge_last_exe;
-
-
- diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
-
- if (fc->pending_exe_flush && is_exe && diff < driver->wrap_diff)
- fc->pending_exe_flush = 0;
-
- diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;
- ge_last_exe = diff < driver->wrap_diff;
-
- if (is_exe && ge_last_exe)
- fc->last_exe_flush = sequence;
if (list_empty(&fc->ring))
return;
@@ -72,7 +110,7 @@ void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
}
}
- fc->pending_flush &= ~type;
+ fc->waiting_types &= ~type;
head = (found) ? &fence->ring : &fc->ring;
list_for_each_entry_safe_reverse(fence, next, head, ring) {
@@ -81,64 +119,60 @@ void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
if (error) {
fence->error = error;
- fence->signaled = fence->type;
- fence->submitted_flush = fence->type;
- fence->flush_mask = fence->type;
+ fence->signaled_types = fence->type;
list_del_init(&fence->ring);
wake = 1;
break;
}
- if (is_exe)
- type |= fence->native_type;
+ if (type & DRM_FENCE_TYPE_EXE)
+ type |= fence->native_types;
- relevant = type & fence->type;
+ relevant_type = type & fence->type;
+ new_type = (fence->signaled_types | relevant_type) ^
+ fence->signaled_types;
- if ((fence->signaled | relevant) != fence->signaled) {
- fence->signaled |= relevant;
- fence->flush_mask |= relevant;
- fence->submitted_flush |= relevant;
+ if (new_type) {
+ fence->signaled_types |= new_type;
DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
- fence->base.hash.key, fence->signaled);
- wake = 1;
- }
+ fence->base.hash.key, fence->signaled_types);
+
+ if (driver->needed_flush)
+ fc->pending_flush |= driver->needed_flush(fence);
- relevant = fence->flush_mask &
- ~(fence->submitted_flush | fence->signaled);
+ if (new_type & fence->waiting_types)
+ wake = 1;
+ }
- fc->pending_flush |= relevant;
- fence->submitted_flush |= relevant;
+ fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
- if (!(fence->type & ~fence->signaled)) {
+ if (!(fence->type & ~fence->signaled_types)) {
DRM_DEBUG("Fence completely signaled 0x%08lx\n",
fence->base.hash.key);
list_del_init(&fence->ring);
}
-
}
/*
- * Reinstate lost flush flags.
+ * Reinstate lost waiting types.
*/
- if ((fc->pending_flush & type) != type) {
+ if ((fc->waiting_types & type) != type) {
head = head->prev;
list_for_each_entry(fence, head, ring) {
if (&fence->ring == &fc->ring)
break;
- diff = (fc->last_exe_flush - fence->sequence) &
+ diff = (fc->highest_waiting_sequence - fence->sequence) &
driver->sequence_mask;
if (diff > driver->wrap_diff)
break;
-
- relevant = fence->submitted_flush & ~fence->signaled;
- fc->pending_flush |= relevant;
+
+ fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
}
}
- if (wake) {
- DRM_WAKEUP(&fc->fence_queue);
- }
+ if (wake)
+ wake_up_all(&fc->fence_queue);
}
EXPORT_SYMBOL(drm_fence_handler);
@@ -219,41 +253,28 @@ static void drm_fence_object_destroy(struct drm_file *priv,
drm_fence_usage_deref_locked(&fence);
}
-int drm_fence_object_signaled(struct drm_fence_object *fence,
- uint32_t mask, int poke_flush)
+int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask)
{
unsigned long flags;
int signaled;
struct drm_device *dev = fence->dev;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_driver *driver = dev->driver->fence_driver;
-
- if (poke_flush)
- driver->poke_flush(dev, fence->fence_class);
+
+ mask &= fence->type;
read_lock_irqsave(&fm->lock, flags);
- signaled =
- (fence->type & mask & fence->signaled) == (fence->type & mask);
+ signaled = (mask & fence->signaled_types) == mask;
read_unlock_irqrestore(&fm->lock, flags);
-
+ if (!signaled && driver->poll) {
+ write_lock_irqsave(&fm->lock, flags);
+ driver->poll(dev, fence->fence_class, mask);
+ signaled = (mask & fence->signaled_types) == mask;
+ write_unlock_irqrestore(&fm->lock, flags);
+ }
return signaled;
}
EXPORT_SYMBOL(drm_fence_object_signaled);
-static void drm_fence_flush_exe(struct drm_fence_class_manager *fc,
- struct drm_fence_driver *driver,
- uint32_t sequence)
-{
- uint32_t diff;
-
- if (!fc->pending_exe_flush) {
- fc->exe_flush_sequence = sequence;
- fc->pending_exe_flush = 1;
- } else {
- diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
- if (diff < driver->wrap_diff)
- fc->exe_flush_sequence = sequence;
- }
-}
int drm_fence_object_flush(struct drm_fence_object *fence,
uint32_t type)
@@ -262,7 +283,10 @@ int drm_fence_object_flush(struct drm_fence_object *fence,
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
struct drm_fence_driver *driver = dev->driver->fence_driver;
- unsigned long flags;
+ unsigned long irq_flags;
+ uint32_t saved_pending_flush;
+ uint32_t diff;
+ int call_flush;
if (type & ~fence->type) {
DRM_ERROR("Flush trying to extend fence type, "
@@ -270,24 +294,36 @@ int drm_fence_object_flush(struct drm_fence_object *fence,
return -EINVAL;
}
- write_lock_irqsave(&fm->lock, flags);
- fence->flush_mask |= type;
- if ((fence->submitted_flush & fence->signaled)
- == fence->submitted_flush) {
- if ((fence->type & DRM_FENCE_TYPE_EXE) &&
- !(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
- drm_fence_flush_exe(fc, driver, fence->sequence);
- fence->submitted_flush |= DRM_FENCE_TYPE_EXE;
- } else {
- fc->pending_flush |= (fence->flush_mask &
- ~fence->submitted_flush);
- fence->submitted_flush = fence->flush_mask;
- }
- }
- write_unlock_irqrestore(&fm->lock, flags);
- driver->poke_flush(dev, fence->fence_class);
+ write_lock_irqsave(&fm->lock, irq_flags);
+ fence->waiting_types |= type;
+ fc->waiting_types |= fence->waiting_types;
+ diff = (fence->sequence - fc->highest_waiting_sequence) &
+ driver->sequence_mask;
+
+ if (diff < driver->wrap_diff)
+ fc->highest_waiting_sequence = fence->sequence;
+
+ /*
+ * fence->waiting_types has changed. Determine whether
+ * we need to initiate some kind of flush as a result of this.
+ */
+
+ saved_pending_flush = fc->pending_flush;
+ if (driver->needed_flush)
+ fc->pending_flush |= driver->needed_flush(fence);
+
+ if (driver->poll)
+ driver->poll(dev, fence->fence_class, fence->waiting_types);
+
+ call_flush = fc->pending_flush;
+ write_unlock_irqrestore(&fm->lock, irq_flags);
+
+ if (call_flush && driver->flush)
+ driver->flush(dev, fence->fence_class);
+
return 0;
}
+EXPORT_SYMBOL(drm_fence_object_flush);
/*
* Make sure old fence objects are signaled before their fence sequences are
@@ -299,90 +335,52 @@ void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
{
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
- struct drm_fence_driver *driver = dev->driver->fence_driver;
- uint32_t old_sequence;
- unsigned long flags;
struct drm_fence_object *fence;
+ unsigned long irq_flags;
+ struct drm_fence_driver *driver = dev->driver->fence_driver;
+ int call_flush;
+
uint32_t diff;
- write_lock_irqsave(&fm->lock, flags);
- old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
- diff = (old_sequence - fc->last_exe_flush) & driver->sequence_mask;
+ write_lock_irqsave(&fm->lock, irq_flags);
- if ((diff < driver->wrap_diff) && !fc->pending_exe_flush) {
- fc->pending_exe_flush = 1;
- fc->exe_flush_sequence = sequence - (driver->flush_diff / 2);
- }
- write_unlock_irqrestore(&fm->lock, flags);
+ list_for_each_entry_reverse(fence, &fc->ring, ring) {
+ diff = (sequence - fence->sequence) & driver->sequence_mask;
+ if (diff <= driver->flush_diff)
+ break;
+
+ fence->waiting_types = fence->type;
+ fc->waiting_types |= fence->type;
- mutex_lock(&dev->struct_mutex);
- read_lock_irqsave(&fm->lock, flags);
+ if (driver->needed_flush)
+ fc->pending_flush |= driver->needed_flush(fence);
+ }
+
+ if (driver->poll)
+ driver->poll(dev, fence_class, fc->waiting_types);
- if (list_empty(&fc->ring)) {
- read_unlock_irqrestore(&fm->lock, flags);
- mutex_unlock(&dev->struct_mutex);
- return;
- }
- fence = drm_fence_reference_locked(list_entry(fc->ring.next, struct drm_fence_object, ring));
- mutex_unlock(&dev->struct_mutex);
- diff = (old_sequence - fence->sequence) & driver->sequence_mask;
- read_unlock_irqrestore(&fm->lock, flags);
- if (diff < driver->wrap_diff)
- drm_fence_object_flush(fence, fence->type);
- drm_fence_usage_deref_unlocked(&fence);
-}
-EXPORT_SYMBOL(drm_fence_flush_old);
+ call_flush = fc->pending_flush;
+ write_unlock_irqrestore(&fm->lock, irq_flags);
-static int drm_fence_lazy_wait(struct drm_fence_object *fence,
- int ignore_signals,
- uint32_t mask)
-{
- struct drm_device *dev = fence->dev;
- struct drm_fence_manager *fm = &dev->fm;
- struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
- int signaled;
- unsigned long _end = jiffies + 3*DRM_HZ;
- int ret = 0;
+ if (call_flush && driver->flush)
+ driver->flush(dev, fence->fence_class);
+
+ /*
+ * FIXME: Shold we implement a wait here for really old fences?
+ */
- do {
- DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
- (signaled = drm_fence_object_signaled(fence, mask, 1)));
- if (signaled)
- return 0;
- if (time_after_eq(jiffies, _end))
- break;
- } while (ret == -EINTR && ignore_signals);
- if (drm_fence_object_signaled(fence, mask, 0))
- return 0;
- if (time_after_eq(jiffies, _end))
- ret = -EBUSY;
- if (ret) {
- if (ret == -EBUSY) {
- DRM_ERROR("Fence timeout. "
- "GPU lockup or fence driver was "
- "taken down. %d 0x%08x 0x%02x 0x%02x 0x%02x\n",
- fence->fence_class,
- fence->sequence,
- fence->type,
- mask,
- fence->signaled);
- DRM_ERROR("Pending exe flush %d 0x%08x\n",
- fc->pending_exe_flush,
- fc->exe_flush_sequence);
- }
- return ((ret == -EINTR) ? -EAGAIN : ret);
- }
- return 0;
}
+EXPORT_SYMBOL(drm_fence_flush_old);
int drm_fence_object_wait(struct drm_fence_object *fence,
int lazy, int ignore_signals, uint32_t mask)
{
struct drm_device *dev = fence->dev;
struct drm_fence_driver *driver = dev->driver->fence_driver;
+ struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
int ret = 0;
- unsigned long _end;
- int signaled;
+ unsigned long _end = 3 * DRM_HZ;
if (mask & ~fence->type) {
DRM_ERROR("Wait trying to extend fence type"
@@ -391,58 +389,39 @@ int drm_fence_object_wait(struct drm_fence_object *fence,
return -EINVAL;
}
- if (drm_fence_object_signaled(fence, mask, 0))
- return 0;
+ if (driver->wait)
+ return driver->wait(fence, lazy, !ignore_signals, mask);
- _end = jiffies + 3 * DRM_HZ;
drm_fence_object_flush(fence, mask);
+ if (driver->has_irq(dev, fence->fence_class, mask)) {
+ if (!ignore_signals)
+ ret = wait_event_interruptible_timeout
+ (fc->fence_queue,
+ drm_fence_object_signaled(fence, mask),
+ 3 * DRM_HZ);
+ else
+ ret = wait_event_timeout
+ (fc->fence_queue,
+ drm_fence_object_signaled(fence, mask),
+ 3 * DRM_HZ);
+
+ if (unlikely(ret == -ERESTARTSYS))
+ return -EAGAIN;
+
+ if (unlikely(ret == 0))
+ return -EBUSY;
- if (lazy && driver->lazy_capable) {
-
- ret = drm_fence_lazy_wait(fence, ignore_signals, mask);
- if (ret)
- return ret;
-
- } else {
-
- if (driver->has_irq(dev, fence->fence_class,
- DRM_FENCE_TYPE_EXE)) {
- ret = drm_fence_lazy_wait(fence, ignore_signals,
- DRM_FENCE_TYPE_EXE);
- if (ret)
- return ret;
- }
-
- if (driver->has_irq(dev, fence->fence_class,
- mask & ~DRM_FENCE_TYPE_EXE)) {
- ret = drm_fence_lazy_wait(fence, ignore_signals,
- mask);
- if (ret)
- return ret;
- }
- }
- if (drm_fence_object_signaled(fence, mask, 0))
return 0;
+ }
- /*
- * Avoid kernel-space busy-waits.
- */
- if (!ignore_signals)
- return -EAGAIN;
-
- do {
- schedule();
- signaled = drm_fence_object_signaled(fence, mask, 1);
- } while (!signaled && !time_after_eq(jiffies, _end));
-
- if (!signaled)
- return -EBUSY;
-
- return 0;
+ return drm_fence_wait_polling(fence, lazy, !ignore_signals, mask,
+ _end);
}
EXPORT_SYMBOL(drm_fence_object_wait);
+
+
int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
uint32_t fence_class, uint32_t type)
{
@@ -452,26 +431,26 @@ int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
unsigned long flags;
uint32_t sequence;
- uint32_t native_type;
+ uint32_t native_types;
int ret;
drm_fence_unring(dev, &fence->ring);
ret = driver->emit(dev, fence_class, fence_flags, &sequence,
- &native_type);
+ &native_types);
if (ret)
return ret;
write_lock_irqsave(&fm->lock, flags);
fence->fence_class = fence_class;
fence->type = type;
- fence->flush_mask = 0x00;
- fence->submitted_flush = 0x00;
- fence->signaled = 0x00;
+ fence->waiting_types = 0;
+ fence->signaled_types = 0;
fence->sequence = sequence;
- fence->native_type = native_type;
+ fence->native_types = native_types;
if (list_empty(&fc->ring))
- fc->last_exe_flush = sequence - 1;
+ fc->highest_waiting_sequence = sequence - 1;
list_add_tail(&fence->ring, &fc->ring);
+ fc->latest_queued_sequence = sequence;
write_unlock_irqrestore(&fm->lock, flags);
return 0;
}
@@ -500,9 +479,8 @@ static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
INIT_LIST_HEAD(&fence->base.list);
fence->fence_class = fence_class;
fence->type = type;
- fence->flush_mask = 0;
- fence->submitted_flush = 0;
- fence->signaled = 0;
+ fence->signaled_types = 0;
+ fence->waiting_types = 0;
fence->sequence = 0;
fence->dev = dev;
write_unlock_irqrestore(&fm->lock, flags);
@@ -577,8 +555,8 @@ void drm_fence_manager_init(struct drm_device *dev)
for (i = 0; i < fm->num_classes; ++i) {
fence_class = &fm->fence_class[i];
+ memset(fence_class, 0, sizeof(*fence_class));
INIT_LIST_HEAD(&fence_class->ring);
- fence_class->pending_flush = 0;
DRM_INIT_WAITQUEUE(&fence_class->fence_queue);
}
@@ -598,7 +576,7 @@ void drm_fence_fill_arg(struct drm_fence_object *fence,
arg->handle = fence->base.hash.key;
arg->fence_class = fence->fence_class;
arg->type = fence->type;
- arg->signaled = fence->signaled;
+ arg->signaled = fence->signaled_types;
arg->error = fence->error;
arg->sequence = fence->sequence;
read_unlock_irqrestore(&fm->lock, irq_flags);
diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c
index d88269a4..cb279bcd 100644
--- a/linux-core/drm_irq.c
+++ b/linux-core/drm_irq.c
@@ -74,11 +74,18 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
static void vblank_disable_fn(unsigned long arg)
{
struct drm_device *dev = (struct drm_device *)arg;
+ unsigned long irqflags;
int i;
- for (i = 0; i < dev->num_crtcs; i++)
- if (atomic_read(&dev->vblank_refcount[i]) == 0)
+ for (i = 0; i < dev->num_crtcs; i++) {
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
+ dev->vblank_enabled[i]) {
dev->driver->disable_vblank(dev, i);
+ dev->vblank_enabled[i] = 0;
+ }
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+ }
}
int drm_vblank_init(struct drm_device *dev, int num_crtcs)
@@ -111,6 +118,11 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
if (!dev->vblank_refcount)
goto err;
+ dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
+ DRM_MEM_DRIVER);
+ if (!dev->vblank_enabled)
+ goto err;
+
dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
if (!dev->last_vblank)
goto err;
@@ -143,6 +155,8 @@ err:
DRM_MEM_DRIVER);
drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
num_crtcs, DRM_MEM_DRIVER);
+ drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) * num_crtcs,
+ DRM_MEM_DRIVER);
drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * num_crtcs,
DRM_MEM_DRIVER);
drm_free(dev->vblank_premodeset, sizeof(*dev->vblank_premodeset) *
@@ -357,14 +371,20 @@ EXPORT_SYMBOL(drm_update_vblank_count);
*/
int drm_vblank_get(struct drm_device *dev, int crtc)
{
+ unsigned long irqflags;
int ret = 0;
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
- if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
+ !dev->vblank_enabled[crtc]) {
ret = dev->driver->enable_vblank(dev, crtc);
if (ret)
atomic_dec(&dev->vblank_refcount[crtc]);
+ else
+ dev->vblank_enabled[crtc] = 1;
}
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
return ret;
}
@@ -382,8 +402,7 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
{
/* Last user schedules interrupt disable */
if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
- mod_timer(&dev->vblank_disable_timer,
- round_jiffies_relative(DRM_HZ));
+ mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
}
EXPORT_SYMBOL(drm_vblank_put);
diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h
index 53fad3af..7b585c3e 100644
--- a/linux-core/drm_objects.h
+++ b/linux-core/drm_objects.h
@@ -147,12 +147,11 @@ struct drm_fence_object {
struct list_head ring;
int fence_class;
- uint32_t native_type;
+ uint32_t native_types;
uint32_t type;
- uint32_t signaled;
+ uint32_t signaled_types;
uint32_t sequence;
- uint32_t flush_mask;
- uint32_t submitted_flush;
+ uint32_t waiting_types;
uint32_t error;
};
@@ -161,10 +160,10 @@ struct drm_fence_object {
struct drm_fence_class_manager {
struct list_head ring;
uint32_t pending_flush;
+ uint32_t waiting_types;
wait_queue_head_t fence_queue;
- int pending_exe_flush;
- uint32_t last_exe_flush;
- uint32_t exe_flush_sequence;
+ uint32_t highest_waiting_sequence;
+ uint32_t latest_queued_sequence;
};
struct drm_fence_manager {
@@ -176,19 +175,49 @@ struct drm_fence_manager {
};
struct drm_fence_driver {
+ unsigned long *waiting_jiffies;
uint32_t num_classes;
uint32_t wrap_diff;
uint32_t flush_diff;
uint32_t sequence_mask;
- int lazy_capable;
+
+ /*
+ * Driver implemented functions:
+ * has_irq() : 1 if the hardware can update the indicated type_flags using an
+ * irq handler. 0 if polling is required.
+ *
+ * emit() : Emit a sequence number to the command stream.
+ * Return the sequence number.
+ *
+ * flush() : Make sure the flags indicated in fc->pending_flush will eventually
+ * signal for fc->highest_received_sequence and all preceding sequences.
+ * Acknowledge by clearing the flags fc->pending_flush.
+ *
+ * poll() : Call drm_fence_handler with any new information.
+ *
+ * needed_flush() : Given the current state of the fence->type flags and previusly
+ * executed or queued flushes, return the type_flags that need flushing.
+ *
+ * wait(): Wait for the "mask" flags to signal on a given fence, performing
+ * whatever's necessary to make this happen.
+ */
+
int (*has_irq) (struct drm_device *dev, uint32_t fence_class,
uint32_t flags);
int (*emit) (struct drm_device *dev, uint32_t fence_class,
uint32_t flags, uint32_t *breadcrumb,
uint32_t *native_type);
- void (*poke_flush) (struct drm_device *dev, uint32_t fence_class);
+ void (*flush) (struct drm_device *dev, uint32_t fence_class);
+ void (*poll) (struct drm_device *dev, uint32_t fence_class,
+ uint32_t types);
+ uint32_t (*needed_flush) (struct drm_fence_object *fence);
+ int (*wait) (struct drm_fence_object *fence, int lazy,
+ int interruptible, uint32_t mask);
};
+extern int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
+ int interruptible, uint32_t mask,
+ unsigned long end_jiffies);
extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
uint32_t sequence, uint32_t type,
uint32_t error);
@@ -199,7 +228,7 @@ extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
extern int drm_fence_object_flush(struct drm_fence_object *fence,
uint32_t type);
extern int drm_fence_object_signaled(struct drm_fence_object *fence,
- uint32_t type, int flush);
+ uint32_t type);
extern void drm_fence_usage_deref_locked(struct drm_fence_object **fence);
extern void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence);
extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src);
@@ -575,6 +604,33 @@ struct drm_bo_driver {
* ttm_cache_flush
*/
void (*ttm_cache_flush)(struct drm_ttm *ttm);
+
+ /*
+ * command_stream_barrier
+ *
+ * @dev: The drm device.
+ *
+ * @bo: The buffer object to validate.
+ *
+ * @new_fence_class: The new fence class for the buffer object.
+ *
+ * @new_fence_type: The new fence type for the buffer object.
+ *
+ * @no_wait: whether this should give up and return -EBUSY
+ * if this operation would require sleeping
+ *
+ * Insert a command stream barrier that makes sure that the
+ * buffer is idle once the commands associated with the
+ * current validation are starting to execute. If an error
+ * condition is returned, or the function pointer is NULL,
+ * the drm core will force buffer idle
+ * during validation.
+ */
+
+ int (*command_stream_barrier) (struct drm_buffer_object *bo,
+ uint32_t new_fence_class,
+ uint32_t new_fence_type,
+ int no_wait);
};
/*
diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c
index 1f841642..64c805f5 100644
--- a/linux-core/i915_drv.c
+++ b/linux-core/i915_drv.c
@@ -40,17 +40,9 @@ static struct pci_device_id pciidlist[] = {
};
#ifdef I915_HAVE_FENCE
-static struct drm_fence_driver i915_fence_driver = {
- .num_classes = 1,
- .wrap_diff = (1U << (BREADCRUMB_BITS - 1)),
- .flush_diff = (1U << (BREADCRUMB_BITS - 2)),
- .sequence_mask = BREADCRUMB_MASK,
- .lazy_capable = 1,
- .emit = i915_fence_emit_sequence,
- .poke_flush = i915_poke_flush,
- .has_irq = i915_fence_has_irq,
-};
+extern struct drm_fence_driver i915_fence_driver;
#endif
+
#ifdef I915_HAVE_BUFFER
static uint32_t i915_mem_prios[] = {DRM_BO_MEM_VRAM, DRM_BO_MEM_PRIV0, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL};
@@ -68,6 +60,7 @@ static struct drm_bo_driver i915_bo_driver = {
.evict_flags = i915_evict_flags,
.move = i915_move,
.ttm_cache_flush = i915_flush_ttm,
+ .command_stream_barrier = NULL,
};
#endif
@@ -193,6 +186,7 @@ static void i915_save_vga(struct drm_device *dev)
dev_priv->saveAR[i] = i915_read_ar(st01, i, 0);
inb(st01);
outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX);
+ inb(st01);
/* Graphics controller registers */
for (i = 0; i < 9; i++)
@@ -258,6 +252,7 @@ static void i915_restore_vga(struct drm_device *dev)
i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
inb(st01); /* switch back to index mode */
outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX);
+ inb(st01);
/* VGA color palette registers */
outb(dev_priv->saveDACMASK, VGA_DACMASK);
@@ -309,6 +304,7 @@ static int i915_suspend(struct drm_device *dev)
dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
}
i915_save_palette(dev, PIPE_A);
+ dev_priv->savePIPEASTAT = I915_READ(I915REG_PIPEASTAT);
/* Pipe & plane B info */
dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
@@ -336,6 +332,7 @@ static int i915_suspend(struct drm_device *dev)
dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
}
i915_save_palette(dev, PIPE_B);
+ dev_priv->savePIPEBSTAT = I915_READ(I915REG_PIPEBSTAT);
/* CRT state */
dev_priv->saveADPA = I915_READ(ADPA);
@@ -362,12 +359,26 @@ static int i915_suspend(struct drm_device *dev)
dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+ /* Interrupt state */
+ dev_priv->saveIIR = I915_READ(I915REG_INT_IDENTITY_R);
+ dev_priv->saveIER = I915_READ(I915REG_INT_ENABLE_R);
+ dev_priv->saveIMR = I915_READ(I915REG_INT_MASK_R);
+
/* VGA state */
dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0);
dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1);
dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV);
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
+ /* Clock gating state */
+ dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
+
+ /* Cache mode state */
+ dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+
+ /* Memory Arbitration state */
+ dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
+
/* Scratch space */
for (i = 0; i < 16; i++) {
dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2));
@@ -433,9 +444,7 @@ static int i915_resume(struct drm_device *dev)
I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
}
- if ((dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) &&
- (dev_priv->saveDPLL_A & DPLL_VGA_MODE_DIS))
- I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
+ I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
@@ -477,10 +486,9 @@ static int i915_resume(struct drm_device *dev)
I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
}
- if ((dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) &&
- (dev_priv->saveDPLL_B & DPLL_VGA_MODE_DIS))
- I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
- i915_restore_palette(dev, PIPE_A);
+ I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
+
+ i915_restore_palette(dev, PIPE_B);
/* Enable the plane */
I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
I915_WRITE(DSPBBASE, I915_READ(DSPBBASE));
@@ -518,6 +526,15 @@ static int i915_resume(struct drm_device *dev)
I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV);
udelay(150);
+ /* Clock gating state */
+ I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
+
+ /* Cache mode state */
+ I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
+
+ /* Memory arbitration state */
+ I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
+
for (i = 0; i < 16; i++) {
I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]);
I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c
index 21d032b0..de64a4f2 100644
--- a/linux-core/i915_fence.c
+++ b/linux-core/i915_fence.c
@@ -35,60 +35,14 @@
#include "i915_drv.h"
/*
- * Implements an intel sync flush operation.
+ * Initiate a sync flush if it's not already pending.
*/
-static void i915_perform_flush(struct drm_device *dev)
+static inline void i915_initiate_rwflush(struct drm_i915_private *dev_priv,
+ struct drm_fence_class_manager *fc)
{
- struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
- struct drm_fence_manager *fm = &dev->fm;
- struct drm_fence_class_manager *fc = &fm->fence_class[0];
- struct drm_fence_driver *driver = dev->driver->fence_driver;
- uint32_t flush_flags = 0;
- uint32_t flush_sequence = 0;
- uint32_t i_status;
- uint32_t diff;
- uint32_t sequence;
- int rwflush;
-
- if (!dev_priv)
- return;
-
- if (fc->pending_exe_flush) {
- sequence = READ_BREADCRUMB(dev_priv);
-
- /*
- * First update fences with the current breadcrumb.
- */
-
- diff = (sequence - fc->last_exe_flush) & BREADCRUMB_MASK;
- if (diff < driver->wrap_diff && diff != 0) {
- drm_fence_handler(dev, 0, sequence,
- DRM_FENCE_TYPE_EXE, 0);
- }
-
- if (dev_priv->fence_irq_on && !fc->pending_exe_flush) {
- i915_user_irq_off(dev_priv);
- dev_priv->fence_irq_on = 0;
- } else if (!dev_priv->fence_irq_on && fc->pending_exe_flush) {
- i915_user_irq_on(dev_priv);
- dev_priv->fence_irq_on = 1;
- }
- }
-
- if (dev_priv->flush_pending) {
- i_status = READ_HWSP(dev_priv, 0);
- if ((i_status & (1 << 12)) !=
- (dev_priv->saved_flush_status & (1 << 12))) {
- flush_flags = dev_priv->flush_flags;
- flush_sequence = dev_priv->flush_sequence;
- dev_priv->flush_pending = 0;
- drm_fence_handler(dev, 0, flush_sequence, flush_flags, 0);
- }
- }
-
- rwflush = fc->pending_flush & DRM_I915_FENCE_TYPE_RW;
- if (rwflush && !dev_priv->flush_pending) {
+ if ((fc->pending_flush & DRM_I915_FENCE_TYPE_RW) &&
+ !dev_priv->flush_pending) {
dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv);
dev_priv->flush_flags = fc->pending_flush;
dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0);
@@ -96,36 +50,105 @@ static void i915_perform_flush(struct drm_device *dev)
dev_priv->flush_pending = 1;
fc->pending_flush &= ~DRM_I915_FENCE_TYPE_RW;
}
+}
+
+static inline void i915_report_rwflush(struct drm_device *dev,
+ struct drm_i915_private *dev_priv)
+{
+ if (unlikely(dev_priv->flush_pending)) {
+
+ uint32_t flush_flags;
+ uint32_t i_status;
+ uint32_t flush_sequence;
- if (dev_priv->flush_pending) {
i_status = READ_HWSP(dev_priv, 0);
if ((i_status & (1 << 12)) !=
(dev_priv->saved_flush_status & (1 << 12))) {
flush_flags = dev_priv->flush_flags;
flush_sequence = dev_priv->flush_sequence;
dev_priv->flush_pending = 0;
- drm_fence_handler(dev, 0, flush_sequence, flush_flags, 0);
+ drm_fence_handler(dev, 0, flush_sequence,
+ flush_flags, 0);
}
}
+}
+
+static void i915_fence_flush(struct drm_device *dev,
+ uint32_t fence_class)
+{
+ struct drm_i915_private *dev_priv =
+ (struct drm_i915_private *) dev->dev_private;
+ struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_class_manager *fc = &fm->fence_class[0];
+ unsigned long irq_flags;
+ if (unlikely(!dev_priv))
+ return;
+
+ write_lock_irqsave(&fm->lock, irq_flags);
+ i915_initiate_rwflush(dev_priv, fc);
+ write_unlock_irqrestore(&fm->lock, irq_flags);
}
-void i915_poke_flush(struct drm_device *dev, uint32_t class)
+
+static void i915_fence_poll(struct drm_device *dev, uint32_t fence_class,
+ uint32_t waiting_types)
{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_fence_manager *fm = &dev->fm;
- unsigned long flags;
+ struct drm_fence_class_manager *fc = &fm->fence_class[0];
+ uint32_t sequence;
+
+ if (unlikely(!dev_priv))
+ return;
+
+ /*
+ * First, report any executed sync flush:
+ */
+
+ i915_report_rwflush(dev, dev_priv);
+
+ /*
+ * Report A new breadcrumb, and adjust IRQs.
+ */
+
+ if (waiting_types & DRM_FENCE_TYPE_EXE) {
+
+ sequence = READ_BREADCRUMB(dev_priv);
+ drm_fence_handler(dev, 0, sequence,
+ DRM_FENCE_TYPE_EXE, 0);
+
+ if (dev_priv->fence_irq_on &&
+ !(fc->waiting_types & DRM_FENCE_TYPE_EXE)) {
+ i915_user_irq_off(dev_priv);
+ dev_priv->fence_irq_on = 0;
+ } else if (!dev_priv->fence_irq_on &&
+ (fc->waiting_types & DRM_FENCE_TYPE_EXE)) {
+ i915_user_irq_on(dev_priv);
+ dev_priv->fence_irq_on = 1;
+ }
+ }
+
+ /*
+ * There may be new RW flushes pending. Start them.
+ */
+
+ i915_initiate_rwflush(dev_priv, fc);
+
+ /*
+ * And possibly, but unlikely, they finish immediately.
+ */
+
+ i915_report_rwflush(dev, dev_priv);
- write_lock_irqsave(&fm->lock, flags);
- i915_perform_flush(dev);
- write_unlock_irqrestore(&fm->lock, flags);
}
-int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class,
+static int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class,
uint32_t flags, uint32_t *sequence,
uint32_t *native_type)
{
- struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
- if (!dev_priv)
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ if (unlikely(!dev_priv))
return -EINVAL;
i915_emit_irq(dev);
@@ -140,20 +163,109 @@ int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class,
void i915_fence_handler(struct drm_device *dev)
{
struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_class_manager *fc = &fm->fence_class[0];
write_lock(&fm->lock);
- i915_perform_flush(dev);
+ i915_fence_poll(dev, 0, fc->waiting_types);
write_unlock(&fm->lock);
}
-int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags)
+/*
+ * We need a separate wait function since we need to poll for
+ * sync flushes.
+ */
+
+static int i915_fence_wait(struct drm_fence_object *fence,
+ int lazy, int interruptible, uint32_t mask)
{
+ struct drm_device *dev = fence->dev;
+ drm_i915_private_t *dev_priv = (struct drm_i915_private *) dev->dev_private;
+ struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_class_manager *fc = &fm->fence_class[0];
+ int ret;
+ unsigned long _end = jiffies + 3 * DRM_HZ;
+
+ drm_fence_object_flush(fence, mask);
+ if (likely(interruptible))
+ ret = wait_event_interruptible_timeout
+ (fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE),
+ 3 * DRM_HZ);
+ else
+ ret = wait_event_timeout
+ (fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE),
+ 3 * DRM_HZ);
+
+ if (unlikely(ret == -ERESTARTSYS))
+ return -EAGAIN;
+
+ if (unlikely(ret == 0))
+ return -EBUSY;
+
+ if (likely(mask == DRM_FENCE_TYPE_EXE ||
+ drm_fence_object_signaled(fence, mask)))
+ return 0;
+
/*
- * We have an irq that tells us when we have a new breadcrumb.
+ * Remove this code snippet when fixed. HWSTAM doesn't let
+ * flush info through...
*/
- if (class == 0 && flags == DRM_FENCE_TYPE_EXE)
- return 1;
+ if (unlikely(dev_priv && !dev_priv->irq_enabled)) {
+ unsigned long irq_flags;
- return 0;
+ DRM_ERROR("X server disabled IRQs before releasing frame buffer.\n");
+ msleep(100);
+ dev_priv->flush_pending = 0;
+ write_lock_irqsave(&fm->lock, irq_flags);
+ drm_fence_handler(dev, fence->fence_class,
+ fence->sequence, fence->type, 0);
+ write_unlock_irqrestore(&fm->lock, irq_flags);
+ }
+
+ /*
+ * Poll for sync flush completion.
+ */
+
+ return drm_fence_wait_polling(fence, lazy, interruptible, mask, _end);
}
+
+static uint32_t i915_fence_needed_flush(struct drm_fence_object *fence)
+{
+ uint32_t flush_flags = fence->waiting_types &
+ ~(DRM_FENCE_TYPE_EXE | fence->signaled_types);
+
+ if (likely(flush_flags == 0 ||
+ ((flush_flags & ~fence->native_types) == 0) ||
+ (fence->signaled_types != DRM_FENCE_TYPE_EXE)))
+ return 0;
+ else {
+ struct drm_device *dev = fence->dev;
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+ struct drm_fence_driver *driver = dev->driver->fence_driver;
+
+ if (unlikely(!dev_priv))
+ return 0;
+
+ if (dev_priv->flush_pending) {
+ uint32_t diff = (dev_priv->flush_sequence - fence->sequence) &
+ driver->sequence_mask;
+
+ if (diff < driver->wrap_diff)
+ return 0;
+ }
+ }
+ return flush_flags;
+}
+
+struct drm_fence_driver i915_fence_driver = {
+ .num_classes = 1,
+ .wrap_diff = (1U << (BREADCRUMB_BITS - 1)),
+ .flush_diff = (1U << (BREADCRUMB_BITS - 2)),
+ .sequence_mask = BREADCRUMB_MASK,
+ .has_irq = NULL,
+ .emit = i915_fence_emit_sequence,
+ .flush = i915_fence_flush,
+ .poll = i915_fence_poll,
+ .needed_flush = i915_fence_needed_flush,
+ .wait = i915_fence_wait,
+};
diff --git a/linux-core/nouveau_buffer.c b/linux-core/nouveau_buffer.c
index a652bb1d..11549317 100644
--- a/linux-core/nouveau_buffer.c
+++ b/linux-core/nouveau_buffer.c
@@ -294,5 +294,6 @@ struct drm_bo_driver nouveau_bo_driver = {
.init_mem_type = nouveau_bo_init_mem_type,
.evict_flags = nouveau_bo_evict_flags,
.move = nouveau_bo_move,
- .ttm_cache_flush= nouveau_bo_flush_ttm
+ .ttm_cache_flush= nouveau_bo_flush_ttm,
+ .command_stream_barrier = NULL
};
diff --git a/linux-core/nouveau_fence.c b/linux-core/nouveau_fence.c
index 4e624a7a..59dcf7d0 100644
--- a/linux-core/nouveau_fence.c
+++ b/linux-core/nouveau_fence.c
@@ -75,7 +75,7 @@ nouveau_fence_emit(struct drm_device *dev, uint32_t class, uint32_t flags,
}
static void
-nouveau_fence_perform_flush(struct drm_device *dev, uint32_t class)
+nouveau_fence_poll(struct drm_device *dev, uint32_t class, uint32_t waiting_types)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_fence_class_manager *fc = &dev->fm.fence_class[class];
@@ -83,42 +83,26 @@ nouveau_fence_perform_flush(struct drm_device *dev, uint32_t class)
uint32_t pending_types = 0;
DRM_DEBUG("class=%d\n", class);
-
- pending_types = fc->pending_flush |
- ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
- DRM_DEBUG("pending: 0x%08x 0x%08x\n", pending_types,
- fc->pending_flush);
+ DRM_DEBUG("pending: 0x%08x 0x%08x\n", waiting_types, fc->waiting_types);
if (pending_types) {
uint32_t sequence = NV_READ(chan->ref_cnt);
DRM_DEBUG("got 0x%08x\n", sequence);
- drm_fence_handler(dev, class, sequence, pending_types, 0);
+ drm_fence_handler(dev, class, sequence, waiting_types, 0);
}
}
-static void
-nouveau_fence_poke_flush(struct drm_device *dev, uint32_t class)
-{
- struct drm_fence_manager *fm = &dev->fm;
- unsigned long flags;
-
- DRM_DEBUG("class=%d\n", class);
-
- write_lock_irqsave(&fm->lock, flags);
- nouveau_fence_perform_flush(dev, class);
- write_unlock_irqrestore(&fm->lock, flags);
-}
-
void
nouveau_fence_handler(struct drm_device *dev, int channel)
{
struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_class_manager *fc = &fm->fence_class[channel];
DRM_DEBUG("class=%d\n", channel);
write_lock(&fm->lock);
- nouveau_fence_perform_flush(dev, channel);
+ nouveau_fence_poll(dev, channel, fc->waiting_types);
write_unlock(&fm->lock);
}
@@ -127,8 +111,10 @@ struct drm_fence_driver nouveau_fence_driver = {
.wrap_diff = (1 << 30),
.flush_diff = (1 << 29),
.sequence_mask = 0xffffffffU,
- .lazy_capable = 1,
.has_irq = nouveau_fence_has_irq,
.emit = nouveau_fence_emit,
- .poke_flush = nouveau_fence_poke_flush
+ .flush = NULL,
+ .poll = nouveau_fence_poll,
+ .needed_flush = NULL,
+ .wait = NULL
};
diff --git a/linux-core/via_fence.c b/linux-core/via_fence.c
index b853df5c..20df4779 100644
--- a/linux-core/via_fence.c
+++ b/linux-core/via_fence.c
@@ -38,27 +38,21 @@
* DRM_VIA_FENCE_TYPE_ACCEL guarantees that all 2D & 3D rendering is complete.
*/
-
-static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class)
+static void via_fence_poll(struct drm_device *dev, uint32_t class,
+ uint32_t waiting_types)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- struct drm_fence_class_manager *fc = &dev->fm.fence_class[class];
- uint32_t pending_flush_types = 0;
uint32_t signaled_flush_types = 0;
uint32_t status;
if (class != 0)
- return 0;
+ return;
- if (!dev_priv)
- return 0;
+ if (unlikely(!dev_priv))
+ return;
spin_lock(&dev_priv->fence_lock);
-
- pending_flush_types = fc->pending_flush |
- ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
-
- if (pending_flush_types) {
+ if (waiting_types) {
/*
* Take the idlelock. This guarantees that the next time a client tries
@@ -77,7 +71,7 @@ static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class)
* Check if AGP command reader is idle.
*/
- if (pending_flush_types & DRM_FENCE_TYPE_EXE)
+ if (waiting_types & DRM_FENCE_TYPE_EXE)
if (VIA_READ(0x41C) & 0x80000000)
signaled_flush_types |= DRM_FENCE_TYPE_EXE;
@@ -85,7 +79,7 @@ static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class)
* Check VRAM command queue empty and 2D + 3D engines idle.
*/
- if (pending_flush_types & DRM_VIA_FENCE_TYPE_ACCEL) {
+ if (waiting_types & DRM_VIA_FENCE_TYPE_ACCEL) {
status = VIA_READ(VIA_REG_STATUS);
if ((status & VIA_VR_QUEUE_BUSY) &&
!(status & (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY | VIA_3D_ENG_BUSY)))
@@ -93,9 +87,9 @@ static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class)
}
if (signaled_flush_types) {
- pending_flush_types &= ~signaled_flush_types;
- if (!pending_flush_types && dev_priv->have_idlelock) {
- drm_idlelock_release(&dev->primary->master->lock);
+ waiting_types &= ~signaled_flush_types;
+ if (!waiting_types && dev_priv->have_idlelock) {
+ drm_idlelock_release(&dev->lock);
dev_priv->have_idlelock = 0;
}
drm_fence_handler(dev, 0, dev_priv->emit_0_sequence,
@@ -105,8 +99,7 @@ static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class)
spin_unlock(&dev_priv->fence_lock);
- return fc->pending_flush |
- ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
+ return;
}
@@ -114,8 +107,8 @@ static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class)
* Emit a fence sequence.
*/
-int via_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags,
- uint32_t * sequence, uint32_t * native_type)
+static int via_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags,
+ uint32_t * sequence, uint32_t * native_type)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
int ret = 0;
@@ -150,36 +143,6 @@ int via_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t fl
}
/**
- * Manual poll (from the fence manager).
- */
-
-void via_poke_flush(struct drm_device * dev, uint32_t class)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- struct drm_fence_manager *fm = &dev->fm;
- unsigned long flags;
- uint32_t pending_flush;
-
- if (!dev_priv)
- return ;
-
- write_lock_irqsave(&fm->lock, flags);
- pending_flush = via_perform_flush(dev, class);
- if (pending_flush)
- pending_flush = via_perform_flush(dev, class);
- write_unlock_irqrestore(&fm->lock, flags);
-
- /*
- * Kick the timer if there are more flushes pending.
- */
-
- if (pending_flush && !timer_pending(&dev_priv->fence_timer)) {
- dev_priv->fence_timer.expires = jiffies + 1;
- add_timer(&dev_priv->fence_timer);
- }
-}
-
-/**
* No irq fence expirations implemented yet.
* Although both the HQV engines and PCI dmablit engines signal
* idle with an IRQ, we haven't implemented this yet.
@@ -187,45 +150,20 @@ void via_poke_flush(struct drm_device * dev, uint32_t class)
* unless the caller wanting to wait for a fence object has indicated a lazy wait.
*/
-int via_fence_has_irq(struct drm_device * dev, uint32_t class,
- uint32_t flags)
+static int via_fence_has_irq(struct drm_device * dev, uint32_t class,
+ uint32_t flags)
{
return 0;
}
-/**
- * Regularly call the flush function. This enables lazy waits, so we can
- * set lazy_capable. Lazy waits don't really care when the fence expires,
- * so a timer tick delay should be fine.
- */
-
-void via_fence_timer(unsigned long data)
-{
- struct drm_device *dev = (struct drm_device *) data;
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- struct drm_fence_manager *fm = &dev->fm;
- uint32_t pending_flush;
- struct drm_fence_class_manager *fc = &dev->fm.fence_class[0];
-
- if (!dev_priv)
- return;
- if (!fm->initialized)
- goto out_unlock;
-
- via_poke_flush(dev, 0);
- pending_flush = fc->pending_flush |
- ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
-
- /*
- * disable timer if there are no more flushes pending.
- */
-
- if (!pending_flush && timer_pending(&dev_priv->fence_timer)) {
- BUG_ON(dev_priv->have_idlelock);
- del_timer(&dev_priv->fence_timer);
- }
- return;
-out_unlock:
- return;
-
-}
+struct drm_fence_driver via_fence_driver = {
+ .num_classes = 1,
+ .wrap_diff = (1 << 30),
+ .flush_diff = (1 << 20),
+ .sequence_mask = 0xffffffffU,
+ .has_irq = via_fence_has_irq,
+ .emit = via_fence_emit_sequence,
+ .poll = via_fence_poll,
+ .needed_flush = NULL,
+ .wait = NULL
+};
diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c
index 4f0b4ed0..f0225f89 100644
--- a/linux-core/xgi_drv.c
+++ b/linux-core/xgi_drv.c
@@ -37,16 +37,7 @@ static struct pci_device_id pciidlist[] = {
xgi_PCI_IDS
};
-static struct drm_fence_driver xgi_fence_driver = {
- .num_classes = 1,
- .wrap_diff = BEGIN_BEGIN_IDENTIFICATION_MASK,
- .flush_diff = BEGIN_BEGIN_IDENTIFICATION_MASK - 1,
- .sequence_mask = BEGIN_BEGIN_IDENTIFICATION_MASK,
- .lazy_capable = 1,
- .emit = xgi_fence_emit_sequence,
- .poke_flush = xgi_poke_flush,
- .has_irq = xgi_fence_has_irq
-};
+extern struct drm_fence_driver xgi_fence_driver;
int xgi_bootstrap(struct drm_device *, void *, struct drm_file *);
diff --git a/linux-core/xgi_fence.c b/linux-core/xgi_fence.c
index 9a75581a..63ed29ee 100644
--- a/linux-core/xgi_fence.c
+++ b/linux-core/xgi_fence.c
@@ -30,44 +30,37 @@
#include "xgi_misc.h"
#include "xgi_cmdlist.h"
-static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class)
+static void xgi_fence_poll(struct drm_device * dev, uint32_t class,
+ uint32_t waiting_types)
{
struct xgi_info * info = dev->dev_private;
- struct drm_fence_class_manager * fc = &dev->fm.fence_class[class];
- uint32_t pending_flush_types = 0;
- uint32_t signaled_flush_types = 0;
+ uint32_t signaled_types = 0;
if ((info == NULL) || (class != 0))
- return 0;
+ return;
DRM_SPINLOCK(&info->fence_lock);
- pending_flush_types = fc->pending_flush |
- ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
-
- if (pending_flush_types) {
- if (pending_flush_types & DRM_FENCE_TYPE_EXE) {
+ if (waiting_types) {
+ if (waiting_types & DRM_FENCE_TYPE_EXE) {
const u32 begin_id = le32_to_cpu(DRM_READ32(info->mmio_map,
0x2820))
& BEGIN_BEGIN_IDENTIFICATION_MASK;
if (begin_id != info->complete_sequence) {
info->complete_sequence = begin_id;
- signaled_flush_types |= DRM_FENCE_TYPE_EXE;
+ signaled_types |= DRM_FENCE_TYPE_EXE;
}
}
- if (signaled_flush_types) {
+ if (signaled_types) {
drm_fence_handler(dev, 0, info->complete_sequence,
- signaled_flush_types, 0);
+ signaled_types, 0);
}
}
DRM_SPINUNLOCK(&info->fence_lock);
-
- return fc->pending_flush |
- ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
}
@@ -98,25 +91,13 @@ int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class,
}
-void xgi_poke_flush(struct drm_device * dev, uint32_t class)
-{
- struct drm_fence_manager * fm = &dev->fm;
- unsigned long flags;
-
-
- write_lock_irqsave(&fm->lock, flags);
- xgi_do_flush(dev, class);
- write_unlock_irqrestore(&fm->lock, flags);
-}
-
-
void xgi_fence_handler(struct drm_device * dev)
{
struct drm_fence_manager * fm = &dev->fm;
-
+ struct drm_fence_class_manager *fc = &fm->fence_class[0];
write_lock(&fm->lock);
- xgi_do_flush(dev, 0);
+ xgi_fence_poll(dev, 0, fc->waiting_types);
write_unlock(&fm->lock);
}
@@ -125,3 +106,17 @@ int xgi_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags)
{
return ((class == 0) && (flags == DRM_FENCE_TYPE_EXE)) ? 1 : 0;
}
+
+struct drm_fence_driver xgi_fence_driver = {
+ .num_classes = 1,
+ .wrap_diff = BEGIN_BEGIN_IDENTIFICATION_MASK,
+ .flush_diff = BEGIN_BEGIN_IDENTIFICATION_MASK - 1,
+ .sequence_mask = BEGIN_BEGIN_IDENTIFICATION_MASK,
+ .has_irq = xgi_fence_has_irq,
+ .emit = xgi_fence_emit_sequence,
+ .flush = NULL,
+ .poll = xgi_fence_poll,
+ .needed_flush = NULL,
+ .wait = NULL
+};
+
diff --git a/shared-core/drm_pciids.txt b/shared-core/drm_pciids.txt
index 7ef34dfd..c895d7f7 100644
--- a/shared-core/drm_pciids.txt
+++ b/shared-core/drm_pciids.txt
@@ -83,6 +83,7 @@
0x1002 0x5460 CHIP_RV380|RADEON_IS_MOBILITY "ATI Radeon Mobility X300 M22"
0x1002 0x5462 CHIP_RV380|RADEON_IS_MOBILITY "ATI Radeon Mobility X600 SE M24C"
0x1002 0x5464 CHIP_RV380|RADEON_IS_MOBILITY "ATI FireGL M22 GL 5464"
+0x1002 0x5657 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X550XTX"
0x1002 0x5548 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800"
0x1002 0x5549 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 Pro"
0x1002 0x554A CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 XT PE"
@@ -236,6 +237,7 @@
0x1002 0x7297 CHIP_RV560|RADEON_NEW_MEMMAP "ATI RV560"
0x1002 0x7834 CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP "ATI Radeon RS350 9000/9100 IGP"
0x1002 0x7835 CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon RS350 Mobility IGP"
+0x1002 0x791e CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART "ATI Radeon RS690 X1250 IGP"
[r128]
0x1002 0x4c45 0 "ATI Rage 128 Mobility LE (PCI)"
diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c
index 0a3d82a0..3d489231 100644
--- a/shared-core/i915_dma.c
+++ b/shared-core/i915_dma.c
@@ -85,7 +85,70 @@ int i915_dma_cleanup(struct drm_device * dev)
return 0;
}
-static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
+
+#define DRI2_SAREA_BLOCK_TYPE(b) ((b) >> 16)
+#define DRI2_SAREA_BLOCK_SIZE(b) ((b) & 0xffff)
+#define DRI2_SAREA_BLOCK_NEXT(p) \
+ ((void *) ((unsigned char *) (p) + \
+ DRI2_SAREA_BLOCK_SIZE(*(unsigned int *) p)))
+
+#define DRI2_SAREA_BLOCK_END 0x0000
+#define DRI2_SAREA_BLOCK_LOCK 0x0001
+#define DRI2_SAREA_BLOCK_EVENT_BUFFER 0x0002
+
+static int
+setup_dri2_sarea(struct drm_device * dev,
+ struct drm_file *file_priv,
+ drm_i915_init_t * init)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+ unsigned int *p, *end, *next;
+
+ mutex_lock(&dev->struct_mutex);
+ dev_priv->sarea_bo =
+ drm_lookup_buffer_object(file_priv,
+ init->sarea_handle, 1);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (!dev_priv->sarea_bo) {
+ DRM_ERROR("did not find sarea bo\n");
+ return -EINVAL;
+ }
+
+ ret = drm_bo_kmap(dev_priv->sarea_bo, 0,
+ dev_priv->sarea_bo->num_pages,
+ &dev_priv->sarea_kmap);
+ if (ret) {
+ DRM_ERROR("could not map sarea bo\n");
+ return ret;
+ }
+
+ p = dev_priv->sarea_kmap.virtual;
+ end = (void *) p + (dev_priv->sarea_bo->num_pages << PAGE_SHIFT);
+ while (p < end && DRI2_SAREA_BLOCK_TYPE(*p) != DRI2_SAREA_BLOCK_END) {
+ switch (DRI2_SAREA_BLOCK_TYPE(*p)) {
+ case DRI2_SAREA_BLOCK_LOCK:
+ dev->lock.hw_lock = (void *) (p + 1);
+ dev->sigdata.lock = dev->lock.hw_lock;
+ break;
+ }
+ next = DRI2_SAREA_BLOCK_NEXT(p);
+ if (next <= p || end < next) {
+ DRM_ERROR("malformed dri2 sarea: next is %p should be within %p-%p\n",
+ next, p, end);
+ return -EINVAL;
+ }
+ p = next;
+ }
+
+ return 0;
+}
+
+
+static int i915_initialize(struct drm_device * dev,
+ struct drm_file *file_priv,
+ drm_i915_init_t * init)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -163,6 +226,17 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
#ifdef I915_HAVE_BUFFER
mutex_init(&dev_priv->cmdbuf_mutex);
#endif
+
+ if (init->func == I915_INIT_DMA2) {
+ ret = setup_dri2_sarea(dev, file_priv, init);
+ if (ret) {
+ i915_dma_cleanup(dev);
+ DRM_ERROR("could not set up dri2 sarea\n");
+ return ret;
+ }
+ }
+
+
return 0;
}
@@ -207,7 +281,8 @@ static int i915_dma_init(struct drm_device *dev, void *data,
switch (init->func) {
case I915_INIT_DMA:
- retcode = i915_initialize(dev, init);
+ case I915_INIT_DMA2:
+ retcode = i915_initialize(dev, file_priv, init);
break;
case I915_CLEANUP_DMA:
retcode = i915_dma_cleanup(dev);
@@ -458,7 +533,8 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
i915_emit_breadcrumb(dev);
#ifdef I915_HAVE_FENCE
- drm_fence_flush_old(dev, 0, dev_priv->counter);
+ if (unlikely((dev_priv->counter & 0xFF) == 0))
+ drm_fence_flush_old(dev, 0, dev_priv->counter);
#endif
return 0;
}
@@ -512,7 +588,8 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
i915_emit_breadcrumb(dev);
#ifdef I915_HAVE_FENCE
- drm_fence_flush_old(dev, 0, dev_priv->counter);
+ if (unlikely((dev_priv->counter & 0xFF) == 0))
+ drm_fence_flush_old(dev, 0, dev_priv->counter);
#endif
return 0;
}
@@ -587,7 +664,7 @@ void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
i915_emit_breadcrumb(dev);
#ifdef I915_HAVE_FENCE
- if (!sync)
+ if (unlikely(!sync && ((dev_priv->counter & 0xFF) == 0)))
drm_fence_flush_old(dev, 0, dev_priv->counter);
#endif
}
@@ -1071,7 +1148,8 @@ static int i915_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto out_err0;
- sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+ if (sarea_priv)
+ sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
/* fence */
ret = drm_fence_buffer_objects(dev, NULL, fence_arg->flags,
@@ -1085,7 +1163,7 @@ static int i915_execbuffer(struct drm_device *dev, void *data,
fence_arg->handle = fence->base.hash.key;
fence_arg->fence_class = fence->fence_class;
fence_arg->type = fence->type;
- fence_arg->signaled = fence->signaled;
+ fence_arg->signaled = fence->signaled_types;
}
}
drm_fence_usage_deref_unlocked(&fence);
@@ -1171,6 +1249,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_LAST_DISPATCH:
value = READ_BREADCRUMB(dev_priv);
break;
+ case I915_PARAM_CHIPSET_ID:
+ value = dev->pci_device;
+ break;
default:
DRM_ERROR("Unknown parameter %d\n", param->param);
return -EINVAL;
@@ -1309,6 +1390,34 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
return 0;
}
+#if 0 /* FIXME DRI2 */
+void i915_driver_lastclose(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (drm_getsarea(dev) && dev_priv->sarea_priv)
+ i915_do_cleanup_pageflip(dev);
+ if (dev_priv->agp_heap)
+ i915_mem_takedown(&(dev_priv->agp_heap));
+
+ if (dev_priv->sarea_kmap.virtual) {
+ drm_bo_kunmap(&dev_priv->sarea_kmap);
+ dev_priv->sarea_kmap.virtual = NULL;
+ dev->lock.hw_lock = NULL;
+ dev->sigdata.lock = NULL;
+ }
+
+ if (dev_priv->sarea_bo) {
+ mutex_lock(&dev->struct_mutex);
+ drm_bo_usage_deref_locked(&dev_priv->sarea_bo);
+ mutex_unlock(&dev->struct_mutex);
+ dev_priv->sarea_bo = NULL;
+ }
+
+ i915_dma_cleanup(dev);
+}
+#endif
+
struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h
index d48d7665..6067fa02 100644
--- a/shared-core/i915_drm.h
+++ b/shared-core/i915_drm.h
@@ -43,7 +43,12 @@ typedef struct drm_i915_init {
enum {
I915_INIT_DMA = 0x01,
I915_CLEANUP_DMA = 0x02,
- I915_RESUME_DMA = 0x03
+ I915_RESUME_DMA = 0x03,
+
+ /* Since this struct isn't versioned, just used a new
+ * 'func' code to indicate the presence of dri2 sarea
+ * info. */
+ I915_INIT_DMA2 = 0x04
} func;
unsigned int mmio_offset;
int sarea_priv_offset;
@@ -61,6 +66,7 @@ typedef struct drm_i915_init {
unsigned int depth_pitch;
unsigned int cpp;
unsigned int chipset;
+ unsigned int sarea_handle;
} drm_i915_init_t;
typedef struct drm_i915_sarea {
@@ -232,6 +238,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_IRQ_ACTIVE 1
#define I915_PARAM_ALLOW_BATCHBUFFER 2
#define I915_PARAM_LAST_DISPATCH 3
+#define I915_PARAM_CHIPSET_ID 4
typedef struct drm_i915_getparam {
int param;
diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h
index 1432806d..f6c0005d 100644
--- a/shared-core/i915_drv.h
+++ b/shared-core/i915_drv.h
@@ -164,7 +164,11 @@ struct drm_i915_private {
bool panel_wants_dither;
struct drm_display_mode *panel_fixed_mode;
- /* Register state */
+ /* DRI2 sarea */
+ struct drm_buffer_object *sarea_bo;
+ struct drm_bo_kmap_obj sarea_kmap;
+
+ /* Register state */
u8 saveLBB;
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
@@ -183,6 +187,7 @@ struct drm_i915_private {
u32 saveVBLANK_A;
u32 saveVSYNC_A;
u32 saveBCLRPAT_A;
+ u32 savePIPEASTAT;
u32 saveDSPASTRIDE;
u32 saveDSPASIZE;
u32 saveDSPAPOS;
@@ -203,6 +208,7 @@ struct drm_i915_private {
u32 saveVBLANK_B;
u32 saveVSYNC_B;
u32 saveBCLRPAT_B;
+ u32 savePIPEBSTAT;
u32 saveDSPBSTRIDE;
u32 saveDSPBSIZE;
u32 saveDSPBPOS;
@@ -231,12 +237,18 @@ struct drm_i915_private {
u32 saveFBC_LL_BASE;
u32 saveFBC_CONTROL;
u32 saveFBC_CONTROL2;
+ u32 saveIER;
+ u32 saveIIR;
+ u32 saveIMR;
+ u32 saveCACHE_MODE_0;
+ u32 saveDSPCLK_GATE_D;
+ u32 saveMI_ARB_STATE;
u32 saveSWF0[16];
u32 saveSWF1[16];
u32 saveSWF2[3];
u8 saveMSR;
u8 saveSR[8];
- u8 saveGR[24];
+ u8 saveGR[25];
u8 saveAR_INDEX;
u8 saveAR[20];
u8 saveDACMASK;
@@ -312,15 +324,9 @@ extern void i915_mem_release(struct drm_device * dev,
struct mem_block *heap);
#ifdef I915_HAVE_FENCE
/* i915_fence.c */
-
-
extern void i915_fence_handler(struct drm_device *dev);
-extern int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class,
- uint32_t flags,
- uint32_t *sequence,
- uint32_t *native_type);
-extern void i915_poke_flush(struct drm_device *dev, uint32_t class);
-extern int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags);
+extern void i915_invalidate_reported_sequence(struct drm_device *dev);
+
#endif
#ifdef I915_HAVE_BUFFER
@@ -685,6 +691,10 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
*/
#define DMA_FADD_S 0x20d4
+/* Memory Interface Arbitration State
+ */
+#define MI_ARB_STATE 0x20e4
+
/* Cache mode 0 reg.
* - Manipulating render cache behaviour is central
* to the concept of zone rendering, tuning this reg can help avoid
@@ -695,6 +705,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
* bit of interest either set or cleared. EG: (BIT<<16) | BIT to set.
*/
#define Cache_Mode_0 0x2120
+#define CACHE_MODE_0 0x2120
#define CM0_MASK_SHIFT 16
#define CM0_IZ_OPT_DISABLE (1<<6)
#define CM0_ZR_OPT_DISABLE (1<<5)
@@ -891,6 +902,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
/** P1 value is 2 greater than this field */
# define VGA0_PD_P1_MASK (0x1f << 0)
+#define DSPCLK_GATE_D 0x6200
+
/* I830 CRTC registers */
#define HTOTAL_A 0x60000
#define HBLANK_A 0x60004
diff --git a/shared-core/mach64_irq.c b/shared-core/mach64_irq.c
index 2d522a6c..57879e8d 100644
--- a/shared-core/mach64_irq.c
+++ b/shared-core/mach64_irq.c
@@ -71,11 +71,10 @@ irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS)
u32 mach64_get_vblank_counter(struct drm_device * dev, int crtc)
{
const drm_mach64_private_t *const dev_priv = dev->dev_private;
-
- if (crtc != 0) {
+
+ if (crtc != 0)
return 0;
- }
-
+
return atomic_read(&dev_priv->vbl_received);
}
@@ -83,14 +82,15 @@ int mach64_enable_vblank(struct drm_device * dev, int crtc)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
u32 status = MACH64_READ(MACH64_CRTC_INT_CNTL);
-
+
if (crtc != 0) {
- DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", crtc);
- return 0;
+ DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+ crtc);
+ return -EINVAL;
}
-
+
DRM_DEBUG("before enable vblank CRTC_INT_CTNL: 0x%08x\n", status);
-
+
/* Turn on VBLANK interrupt */
MACH64_WRITE(MACH64_CRTC_INT_CNTL, MACH64_READ(MACH64_CRTC_INT_CNTL)
| MACH64_CRTC_VBLANK_INT_EN);
@@ -98,12 +98,31 @@ int mach64_enable_vblank(struct drm_device * dev, int crtc)
return 0;
}
-
void mach64_disable_vblank(struct drm_device * dev, int crtc)
{
+ if (crtc != 0) {
+ DRM_ERROR("tried to disable vblank on non-existent crtc %d\n",
+ crtc);
+ return;
+ }
+
+ /*
+ * FIXME: implement proper interrupt disable by using the vblank
+ * counter register (if available).
+ */
+}
+
+static void mach64_disable_vblank_local(struct drm_device * dev, int crtc)
+{
drm_mach64_private_t *dev_priv = dev->dev_private;
u32 status = MACH64_READ(MACH64_CRTC_INT_CNTL);
+ if (crtc != 0) {
+ DRM_ERROR("tried to disable vblank on non-existent crtc %d\n",
+ crtc);
+ return;
+ }
+
DRM_DEBUG("before disable vblank CRTC_INT_CTNL: 0x%08x\n", status);
/* Disable and clear VBLANK interrupt */
@@ -111,8 +130,6 @@ void mach64_disable_vblank(struct drm_device * dev, int crtc)
| MACH64_CRTC_VBLANK_INT);
}
-/* drm_dma.h hooks
-*/
void mach64_driver_irq_preinstall(struct drm_device * dev)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
@@ -121,7 +138,7 @@ void mach64_driver_irq_preinstall(struct drm_device * dev)
DRM_DEBUG("before install CRTC_INT_CTNL: 0x%08x\n", status);
- mach64_disable_vblank(dev,0);
+ mach64_disable_vblank_local(dev, 0);
}
int mach64_driver_irq_postinstall(struct drm_device * dev)
@@ -135,7 +152,7 @@ void mach64_driver_irq_uninstall(struct drm_device * dev)
if (!dev_priv)
return;
- mach64_disable_vblank(dev, 0);
+ mach64_disable_vblank_local(dev, 0);
DRM_DEBUG("after uninstall CRTC_INT_CTNL: 0x%08x\n",
MACH64_READ(MACH64_CRTC_INT_CNTL));
diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c
index d00f1938..0daf9ac4 100644
--- a/shared-core/nouveau_fifo.c
+++ b/shared-core/nouveau_fifo.c
@@ -64,7 +64,6 @@ static int nouveau_fifo_instmem_configure(struct drm_device *dev)
switch(dev_priv->card_type)
{
- case NV_50:
case NV_40:
switch (dev_priv->chipset) {
case 0x47:
diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c
index f4e641b9..3d376aed 100644
--- a/shared-core/nouveau_mem.c
+++ b/shared-core/nouveau_mem.c
@@ -393,7 +393,7 @@ nouveau_mem_init_ttm(struct drm_device *dev)
}
/* GART */
-#ifndef __powerpc__
+#if !defined(__powerpc__) && !defined(__ia64__)
if (drm_device_is_agp(dev) && dev->agp) {
if ((ret = nouveau_mem_init_agp(dev, 1)))
DRM_ERROR("Error initialising AGP: %d\n", ret);
@@ -462,7 +462,7 @@ int nouveau_mem_init(struct drm_device *dev)
dev_priv->fb_nomap_heap=NULL;
}
-#ifndef __powerpc__
+#if !defined(__powerpc__) && !defined(__ia64__)
/* Init AGP / NV50 PCIEGART */
if (drm_device_is_agp(dev) && dev->agp) {
if ((ret = nouveau_mem_init_agp(dev, 0)))
diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h
index a2506146..2f7d77cf 100644
--- a/shared-core/nouveau_reg.h
+++ b/shared-core/nouveau_reg.h
@@ -138,6 +138,7 @@
#define NV40_PFB_TLIMIT(i) (0x00100604 + (i*16))
#define NV40_PFB_TSIZE(i) (0x00100608 + (i*16))
#define NV40_PFB_TSTATUS(i) (0x0010060C + (i*16))
+#define NV40_PFB_UNK_800 0x00100800
#define NV04_PGRAPH_DEBUG_0 0x00400080
#define NV04_PGRAPH_DEBUG_1 0x00400084
@@ -334,19 +335,19 @@
#define NV04_PGRAPH_BLEND 0x00400824
#define NV04_PGRAPH_STORED_FMT 0x00400830
#define NV04_PGRAPH_PATT_COLORRAM 0x00400900
-#define NV40_PGRAPH_TILE0(i) 0x00400900
-#define NV40_PGRAPH_TLIMIT0(i) 0x00400904
-#define NV40_PGRAPH_TSIZE0(i) 0x00400908
-#define NV40_PGRAPH_TSTATUS0(i) 0x0040090C
+#define NV40_PGRAPH_TILE0(i) (0x00400900 + (i*16))
+#define NV40_PGRAPH_TLIMIT0(i) (0x00400904 + (i*16))
+#define NV40_PGRAPH_TSIZE0(i) (0x00400908 + (i*16))
+#define NV40_PGRAPH_TSTATUS0(i) (0x0040090C + (i*16))
#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16))
#define NV04_PGRAPH_U_RAM 0x00400D00
-#define NV47_PGRAPH_TILE0(i) 0x00400D00
-#define NV47_PGRAPH_TLIMIT0(i) 0x00400D04
-#define NV47_PGRAPH_TSIZE0(i) 0x00400D08
-#define NV47_PGRAPH_TSTATUS0(i) 0x00400D0C
+#define NV47_PGRAPH_TILE0(i) (0x00400D00 + (i*16))
+#define NV47_PGRAPH_TLIMIT0(i) (0x00400D04 + (i*16))
+#define NV47_PGRAPH_TSIZE0(i) (0x00400D08 + (i*16))
+#define NV47_PGRAPH_TSTATUS0(i) (0x00400D0C + (i*16))
#define NV04_PGRAPH_V_RAM 0x00400D40
#define NV04_PGRAPH_W_RAM 0x00400D80
#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
@@ -394,10 +395,10 @@
#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098
#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C
#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0
-#define NV40_PGRAPH_TILE1(i) 0x00406900
-#define NV40_PGRAPH_TLIMIT1(i) 0x00406904
-#define NV40_PGRAPH_TSIZE1(i) 0x00406908
-#define NV40_PGRAPH_TSTATUS1(i) 0x0040690C
+#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16))
+#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16))
+#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16))
+#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16))
/* It's a guess that this works on NV03. Confirmed on NV04, though */
diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c
index 7086a0ab..12162167 100644
--- a/shared-core/nouveau_state.c
+++ b/shared-core/nouveau_state.c
@@ -212,6 +212,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.save_context = nv10_fifo_save_context;
break;
case 0x40:
+ case 0x60:
engine->instmem.init = nv04_instmem_init;
engine->instmem.takedown= nv04_instmem_takedown;
engine->instmem.populate = nv04_instmem_populate;
diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c
index 37a147b5..ad73ea91 100644
--- a/shared-core/nv20_graph.c
+++ b/shared-core/nv20_graph.c
@@ -804,7 +804,7 @@ void nv20_graph_takedown(struct drm_device *dev)
int nv30_graph_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t vramsz, tmp;
+// uint32_t vramsz, tmp;
int ret, i;
NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
@@ -834,6 +834,7 @@ int nv30_graph_init(struct drm_device *dev)
NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00008000);
NV_WRITE(NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
NV_WRITE(0x400B80, 0x1003d888);
+ NV_WRITE(0x400B84, 0x0c000000);
NV_WRITE(0x400098, 0x00000000);
NV_WRITE(0x40009C, 0x0005ad00);
NV_WRITE(0x400B88, 0x62ff00ff); // suspiciously like PGRAPH_DEBUG_2
@@ -843,30 +844,47 @@ int nv30_graph_init(struct drm_device *dev)
NV_WRITE(0x400ba0, 0x002f8685);
NV_WRITE(0x400ba4, 0x00231f3f);
NV_WRITE(0x4008a4, 0x40000020);
- NV_WRITE(0x400B84, 0x0c000000);
- NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x62ff0f7f);
+
+ if (dev_priv->chipset == 0x34) {
+ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00200201);
+ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
+ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000008);
+ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000032);
+ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E00004);
+ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000002);
+ }
+
NV_WRITE(0x4000c0, 0x00000016);
/* copy tile info from PFB */
- for (i=0; i<NV10_PFB_TILE__SIZE; i++) {
- NV_WRITE(NV10_PGRAPH_TILE(i), NV_READ(NV10_PFB_TILE(i)));
- NV_WRITE(NV10_PGRAPH_TLIMIT(i), NV_READ(NV10_PFB_TLIMIT(i)));
- NV_WRITE(NV10_PGRAPH_TSIZE(i), NV_READ(NV10_PFB_TSIZE(i)));
- NV_WRITE(NV10_PGRAPH_TSTATUS(i), NV_READ(NV10_PFB_TSTATUS(i)));
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
+ NV_WRITE(0x00400904 + i*0x10, NV_READ(NV10_PFB_TLIMIT(i)));
+ /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
+ NV_WRITE(0x00400908 + i*0x10, NV_READ(NV10_PFB_TSIZE(i)));
+ /* which is NV40_PGRAPH_TSIZE0(i) ?? */
+ NV_WRITE(0x00400900 + i*0x10, NV_READ(NV10_PFB_TILE(i)));
+ /* which is NV40_PGRAPH_TILE0(i) ?? */
}
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000100);
NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF);
+ NV_WRITE(0x0040075c , 0x00000001);
NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001);
/* begin RAM config */
- vramsz = drm_get_resource_len(dev, 0) - 1;
+// vramsz = drm_get_resource_len(dev, 0) - 1;
NV_WRITE(0x4009A4, NV_READ(NV04_PFB_CFG0));
NV_WRITE(0x4009A8, NV_READ(NV04_PFB_CFG1));
- NV_WRITE(0x400750, 0x00EA0000);
- NV_WRITE(0x400754, NV_READ(NV04_PFB_CFG0));
- NV_WRITE(0x400750, 0x00EA0004);
- NV_WRITE(0x400754, NV_READ(NV04_PFB_CFG1));
+ if (dev_priv->chipset != 0x34) {
+ NV_WRITE(0x400750, 0x00EA0000);
+ NV_WRITE(0x400754, NV_READ(NV04_PFB_CFG0));
+ NV_WRITE(0x400750, 0x00EA0004);
+ NV_WRITE(0x400754, NV_READ(NV04_PFB_CFG1));
+ }
+
+#if 0
NV_WRITE(0x400820, 0);
NV_WRITE(0x400824, 0);
NV_WRITE(0x400864, vramsz-1);
@@ -885,6 +903,7 @@ int nv30_graph_init(struct drm_device *dev)
NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
+#endif
return 0;
}
diff --git a/shared-core/nv40_fb.c b/shared-core/nv40_fb.c
index ceae8079..ae784cb8 100644
--- a/shared-core/nv40_fb.c
+++ b/shared-core/nv40_fb.c
@@ -11,6 +11,13 @@ nv40_fb_init(struct drm_device *dev)
int num_tiles;
int i;
+ /* This is strictly a NV4x register (don't know about NV5x). */
+ /* The blob sets these to all kinds of values, and they mess up our setup. */
+ /* I got value 0x52802 instead. For some cards the blob even sets it back to 0x1. */
+ /* Note: the blob doesn't read this value, so i'm pretty sure this is safe for all cards. */
+ /* Any idea what this is? */
+ NV_WRITE(NV40_PFB_UNK_800, 0x1);
+
switch (dev_priv->chipset) {
case 0x40:
case 0x45:
diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c
index fdf51519..6ef02bf9 100644
--- a/shared-core/nv40_graph.c
+++ b/shared-core/nv40_graph.c
@@ -1511,6 +1511,7 @@ nv40_graph_create_context(struct nouveau_channel *chan)
ctx_init = nv4b_graph_context_init;
break;
case 0x4c:
+ case 0x67:
ctx_size = NV4C_GRCTX_SIZE;
ctx_init = nv4c_graph_context_init;
break;
@@ -2007,7 +2008,8 @@ nv40_graph_init(struct drm_device *dev)
case 0x49: ctx_voodoo = nv49_4b_ctx_voodoo; break;
case 0x4a: ctx_voodoo = nv4a_ctx_voodoo; break;
case 0x4b: ctx_voodoo = nv49_4b_ctx_voodoo; break;
- case 0x4c: ctx_voodoo = nv4c_ctx_voodoo; break;
+ case 0x4c:
+ case 0x67: ctx_voodoo = nv4c_ctx_voodoo; break;
case 0x4e: ctx_voodoo = nv4e_ctx_voodoo; break;
default:
DRM_ERROR("Unknown ctx_voodoo for chipset 0x%02x\n",
diff --git a/shared-core/radeon_cp.c b/shared-core/radeon_cp.c
index f0e55105..f0eda664 100644
--- a/shared-core/radeon_cp.c
+++ b/shared-core/radeon_cp.c
@@ -825,11 +825,19 @@ static u32 RADEON_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
return ret;
}
+static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
+{
+ RADEON_WRITE(RS690_MC_INDEX, (addr & RS690_MC_INDEX_MASK));
+ return RADEON_READ(RS690_MC_DATA);
+}
+
u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
{
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
return RADEON_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION);
+ else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
+ return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
return RADEON_READ_MCIND(dev_priv, R520_MC_FB_LOCATION);
else
@@ -840,6 +848,8 @@ static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
{
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
RADEON_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc);
+ else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
+ RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
RADEON_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc);
else
@@ -850,6 +860,8 @@ static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_lo
{
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
RADEON_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc);
+ else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
+ RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
RADEON_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc);
else
@@ -1362,6 +1374,74 @@ static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
}
}
+/* Enable or disable RS690 GART on the chip */
+static void radeon_set_rs690gart(drm_radeon_private_t * dev_priv, int on)
+{
+ u32 temp;
+
+ if (on) {
+ DRM_DEBUG("programming rs690 gart %08X %08lX %08X\n",
+ dev_priv->gart_vm_start,
+ (long)dev_priv->gart_info.bus_addr,
+ dev_priv->gart_size);
+
+ temp = RS690_READ_MCIND(dev_priv, RS690_MC_MISC_CNTL);
+ RS690_WRITE_MCIND(RS690_MC_MISC_CNTL, 0x5000);
+
+ RS690_WRITE_MCIND(RS690_MC_AGP_SIZE,
+ RS690_MC_GART_EN | RS690_MC_AGP_SIZE_32MB);
+
+ temp = RS690_READ_MCIND(dev_priv, RS690_MC_GART_FEATURE_ID);
+ RS690_WRITE_MCIND(RS690_MC_GART_FEATURE_ID, 0x42040800);
+
+ RS690_WRITE_MCIND(RS690_MC_GART_BASE,
+ dev_priv->gart_info.bus_addr);
+
+ temp = RS690_READ_MCIND(dev_priv, RS690_MC_AGP_MODE_CONTROL);
+ RS690_WRITE_MCIND(RS690_MC_AGP_MODE_CONTROL, 0x01400000);
+
+ RS690_WRITE_MCIND(RS690_MC_AGP_BASE,
+ (unsigned int)dev_priv->gart_vm_start);
+
+ dev_priv->gart_size = 32*1024*1024;
+ temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) &
+ 0xffff0000) | (dev_priv->gart_vm_start >> 16));
+
+ RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, temp);
+
+ temp = RS690_READ_MCIND(dev_priv, RS690_MC_AGP_SIZE);
+ RS690_WRITE_MCIND(RS690_MC_AGP_SIZE,
+ RS690_MC_GART_EN | RS690_MC_AGP_SIZE_32MB);
+
+ do
+ {
+ temp = RS690_READ_MCIND(dev_priv, RS690_MC_GART_CACHE_CNTL);
+ if ((temp & RS690_MC_GART_CLEAR_STATUS) ==
+ RS690_MC_GART_CLEAR_DONE)
+ break;
+ DRM_UDELAY(1);
+ } while(1);
+
+ RS690_WRITE_MCIND(RS690_MC_GART_CACHE_CNTL,
+ RS690_MC_GART_CC_CLEAR);
+ do
+ {
+ temp = RS690_READ_MCIND(dev_priv, RS690_MC_GART_CACHE_CNTL);
+ if ((temp & RS690_MC_GART_CLEAR_STATUS) ==
+ RS690_MC_GART_CLEAR_DONE)
+ break;
+ DRM_UDELAY(1);
+ } while(1);
+
+ RS690_WRITE_MCIND(RS690_MC_GART_CACHE_CNTL,
+ RS690_MC_GART_CC_NO_CHANGE);
+ }
+ else
+ {
+ RS690_WRITE_MCIND(RS690_MC_AGP_SIZE, RS690_MC_GART_DIS);
+ }
+}
+
static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
{
u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL);
@@ -1396,6 +1476,12 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
{
u32 tmp;
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
+ {
+ radeon_set_rs690gart(dev_priv, on);
+ return;
+ }
+
if (dev_priv->flags & RADEON_IS_IGPGART) {
radeon_set_igpgart(dev_priv, on);
return;
diff --git a/shared-core/radeon_drv.h b/shared-core/radeon_drv.h
index 63877008..0c503257 100644
--- a/shared-core/radeon_drv.h
+++ b/shared-core/radeon_drv.h
@@ -129,6 +129,7 @@ enum radeon_family {
CHIP_R420,
CHIP_RV410,
CHIP_RS400,
+ CHIP_RS690,
CHIP_RV515,
CHIP_R520,
CHIP_RV530,
@@ -503,6 +504,36 @@ extern int radeon_move(struct drm_buffer_object * bo,
#define RADEON_IGPGART_ENABLE 0x38
#define RADEON_IGPGART_UNK_39 0x39
+#define RS690_MC_INDEX 0x78
+# define RS690_MC_INDEX_MASK 0x1ff
+# define RS690_MC_INDEX_WR_EN (1 << 9)
+# define RS690_MC_INDEX_WR_ACK 0x7f
+#define RS690_MC_DATA 0x7c
+
+#define RS690_MC_MISC_CNTL 0x18
+#define RS690_MC_GART_FEATURE_ID 0x2b
+#define RS690_MC_GART_BASE 0x2c
+#define RS690_MC_GART_CACHE_CNTL 0x2e
+# define RS690_MC_GART_CC_NO_CHANGE 0x0
+# define RS690_MC_GART_CC_CLEAR 0x1
+# define RS690_MC_GART_CLEAR_STATUS (1 << 1)
+# define RS690_MC_GART_CLEAR_DONE (0 << 1)
+# define RS690_MC_GART_CLEAR_PENDING (1 << 1)
+#define RS690_MC_AGP_SIZE 0x38
+# define RS690_MC_GART_DIS 0x0
+# define RS690_MC_GART_EN 0x1
+# define RS690_MC_AGP_SIZE_32MB (0 << 1)
+# define RS690_MC_AGP_SIZE_64MB (1 << 1)
+# define RS690_MC_AGP_SIZE_128MB (2 << 1)
+# define RS690_MC_AGP_SIZE_256MB (3 << 1)
+# define RS690_MC_AGP_SIZE_512MB (4 << 1)
+# define RS690_MC_AGP_SIZE_1GB (5 << 1)
+# define RS690_MC_AGP_SIZE_2GB (6 << 1)
+#define RS690_MC_AGP_MODE_CONTROL 0x39
+#define RS690_MC_FB_LOCATION 0x100
+#define RS690_MC_AGP_LOCATION 0x101
+#define RS690_MC_AGP_BASE 0x102
+
#define R520_MC_IND_INDEX 0x70
#define R520_MC_IND_WR_EN (1<<24)
#define R520_MC_IND_DATA 0x74
@@ -1114,8 +1145,8 @@ extern int radeon_move(struct drm_buffer_object * bo,
#define RADEON_PCIGART_TABLE_SIZE (32*1024)
-#define RADEON_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
-#define RADEON_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) )
+#define RADEON_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
+#define RADEON_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) )
#define RADEON_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) )
#define RADEON_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) )
@@ -1148,6 +1179,13 @@ do { \
RADEON_WRITE(R520_MC_IND_INDEX, 0); \
} while (0)
+#define RS690_WRITE_MCIND( addr, val ) \
+do { \
+ RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_EN | ((addr) & RS690_MC_INDEX_MASK)); \
+ RADEON_WRITE(RS690_MC_DATA, val); \
+ RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); \
+} while (0)
+
#define CP_PACKET0( reg, n ) \
(RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
#define CP_PACKET0_TABLE( reg, n ) \
diff --git a/shared-core/via_drv.c b/shared-core/via_drv.c
index a802e4ae..dd632c3d 100644
--- a/shared-core/via_drv.c
+++ b/shared-core/via_drv.c
@@ -40,17 +40,9 @@ static struct pci_device_id pciidlist[] = {
#ifdef VIA_HAVE_FENCE
-static struct drm_fence_driver via_fence_driver = {
- .num_classes = 1,
- .wrap_diff = (1 << 30),
- .flush_diff = (1 << 20),
- .sequence_mask = 0xffffffffU,
- .lazy_capable = 1,
- .emit = via_fence_emit_sequence,
- .poke_flush = via_poke_flush,
- .has_irq = via_fence_has_irq,
-};
+extern struct drm_fence_driver via_fence_driver;
#endif
+
#ifdef VIA_HAVE_BUFFER
/**
@@ -76,6 +68,8 @@ static struct drm_bo_driver via_bo_driver = {
.init_mem_type = via_init_mem_type,
.evict_flags = via_evict_flags,
.move = NULL,
+ .ttm_cache_flush = NULL,
+ .command_stream_barrier = NULL
};
#endif
diff --git a/shared-core/via_drv.h b/shared-core/via_drv.h
index 8dd4a727..941a2d77 100644
--- a/shared-core/via_drv.h
+++ b/shared-core/via_drv.h
@@ -196,17 +196,6 @@ extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq
extern void via_init_dmablit(struct drm_device *dev);
#endif
-#ifdef VIA_HAVE_FENCE
-extern void via_fence_timer(unsigned long data);
-extern void via_poke_flush(struct drm_device * dev, uint32_t class);
-extern int via_fence_emit_sequence(struct drm_device * dev, uint32_t class,
- uint32_t flags,
- uint32_t * sequence,
- uint32_t * native_type);
-extern int via_fence_has_irq(struct drm_device * dev, uint32_t class,
- uint32_t flags);
-#endif
-
#ifdef VIA_HAVE_BUFFER
extern struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device *dev);
extern int via_fence_types(struct drm_buffer_object *bo, uint32_t *fclass,
diff --git a/shared-core/via_map.c b/shared-core/via_map.c
index 11bfa551..54934367 100644
--- a/shared-core/via_map.c
+++ b/shared-core/via_map.c
@@ -69,9 +69,6 @@ static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init)
dev_priv->emit_0_sequence = 0;
dev_priv->have_idlelock = 0;
spin_lock_init(&dev_priv->fence_lock);
- init_timer(&dev_priv->fence_timer);
- dev_priv->fence_timer.function = &via_fence_timer;
- dev_priv->fence_timer.data = (unsigned long) dev;
#endif /* VIA_HAVE_FENCE */
dev->dev_private = (void *)dev_priv;
#ifdef VIA_HAVE_BUFFER