diff options
Diffstat (limited to 'linux-core')
-rw-r--r-- | linux-core/drmP.h | 2 | ||||
-rw-r--r-- | linux-core/drm_agpsupport.c | 1 | ||||
-rw-r--r-- | linux-core/drm_bo.c | 136 | ||||
-rw-r--r-- | linux-core/drm_compat.c | 7 | ||||
-rw-r--r-- | linux-core/drm_fence.c | 15 | ||||
-rw-r--r-- | linux-core/drm_ttm.c | 63 | ||||
-rw-r--r-- | linux-core/drm_ttm.h | 3 | ||||
-rw-r--r-- | linux-core/i915_fence.c | 17 |
8 files changed, 172 insertions, 72 deletions
diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 7de7422b..da14bdfd 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -801,7 +801,7 @@ typedef struct drm_buffer_manager{ struct list_head unfenced; struct list_head ddestroy; struct list_head other; - struct timer_list timer; + struct work_struct wq; uint32_t fence_flags; } drm_buffer_manager_t; diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index e7226f1f..60ebc567 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -630,6 +630,7 @@ static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) { } agp_free_memory(mem); } + agp_priv->mem = NULL; } diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 68af5c31..74722b1b 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -98,10 +98,7 @@ static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo) drm_fence_object_flush(dev, bo->fence, bo->fence_flags); list_add_tail(&bo->ddestroy, &bm->ddestroy); - if (!timer_pending(&bm->timer)) { - bm->timer.expires = jiffies + 1; - add_timer(&bm->timer); - } + schedule_delayed_work(&bm->wq, 2); return; } else { @@ -109,15 +106,14 @@ static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo) bo->fence = NULL; } } - /* * Take away from lru lists. */ - list_del(&bo->head); + list_del_init(&bo->head); if (bo->tt) { - drm_unbind_ttm_region(bo->ttm_region); + drm_unbind_ttm_region(bo->ttm_region); drm_mm_put_block(&bm->tt_manager, bo->tt); bo->tt = NULL; } @@ -152,7 +148,9 @@ static void drm_bo_delayed_delete(drm_device_t * dev) entry->fence = NULL; } if (!entry->fence) { - DRM_DEBUG("Destroying delayed buffer object\n"); +#ifdef BODEBUG + DRM_ERROR("Destroying delayed buffer object\n"); +#endif list_del(&entry->ddestroy); drm_bo_destroy_locked(dev, entry); } @@ -161,16 +159,18 @@ static void drm_bo_delayed_delete(drm_device_t * dev) mutex_unlock(&dev->struct_mutex); } -static void drm_bo_delayed_timer(unsigned long data) +static void drm_bo_delayed_workqueue(void *data) { drm_device_t *dev = (drm_device_t *) data; drm_buffer_manager_t *bm = &dev->bm; +#ifdef BODEBUG + DRM_ERROR("Delayed delete Worker\n"); +#endif drm_bo_delayed_delete(dev); - mutex_lock(&dev->struct_mutex); - if (!list_empty(&bm->ddestroy) && !timer_pending(&bm->timer)) { - bm->timer.expires = jiffies + 1; - add_timer(&bm->timer); + mutex_lock(&dev->struct_mutex); + if (!list_empty(&bm->ddestroy)) { + schedule_delayed_work(&bm->wq, 2); } mutex_unlock(&dev->struct_mutex); } @@ -220,14 +220,29 @@ int drm_fence_buffer_objects(drm_file_t * priv, mutex_lock(&dev->struct_mutex); + if (!list) + list = &bm->unfenced; + list_for_each_entry(entry, list, head) { BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED)); fence_flags |= entry->fence_flags; count++; } - if (!count) + if (!count) { + DRM_ERROR("No buffers to fence\n"); + ret = -EINVAL; goto out; + } + + /* + * Transfer to a local list before we release the dev->struct_mutex; + * This is so we don't get any new unfenced objects while fencing + * these. + */ + + list_add_tail(&f_list, list); + list_del_init(list); if (fence) { if ((fence_flags & fence->type) != fence_flags) { @@ -237,20 +252,13 @@ int drm_fence_buffer_objects(drm_file_t * priv, goto out; } } else { + mutex_unlock(&dev->struct_mutex); ret = drm_fence_object_create(dev, fence_flags, 1, &fence); + mutex_lock(&dev->struct_mutex); if (ret) goto out; } - /* - * Transfer to a private list before we release the dev->struct_mutex; - * This is so we don't get any new unfenced objects while fencing - * these. - */ - - f_list = *list; - INIT_LIST_HEAD(list); - count = 0; l = f_list.next; while (l != &f_list) { @@ -259,7 +267,7 @@ int drm_fence_buffer_objects(drm_file_t * priv, mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); mutex_lock(&dev->struct_mutex); - + list_del_init(l); if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) { count++; if (entry->fence) @@ -268,7 +276,6 @@ int drm_fence_buffer_objects(drm_file_t * priv, DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); DRM_WAKEUP(&entry->event_queue); - list_del_init(&entry->head); if (entry->flags & DRM_BO_FLAG_NO_EVICT) list_add_tail(&entry->head, &bm->other); else if (entry->flags & DRM_BO_FLAG_MEM_TT) @@ -277,12 +284,19 @@ int drm_fence_buffer_objects(drm_file_t * priv, list_add_tail(&entry->head, &bm->vram_lru); else list_add_tail(&entry->head, &bm->other); + } else { +#ifdef BODEBUG + DRM_ERROR("Huh? Fenced object on unfenced list\n"); +#endif } mutex_unlock(&entry->mutex); drm_bo_usage_deref_locked(dev, entry); l = f_list.next; } atomic_add(count, &fence->usage); +#ifdef BODEBUG + DRM_ERROR("Fenced %d buffers\n", count); +#endif out: mutex_unlock(&dev->struct_mutex); *used_fence = fence; @@ -303,7 +317,6 @@ static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, drm_fence_object_t *fence = bo->fence; int ret; - BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); if (fence) { drm_device_t *dev = bo->dev; if (drm_fence_object_signaled(fence, bo->fence_flags)) { @@ -424,6 +437,7 @@ int drm_bo_alloc_space(drm_buffer_object_t * buf, int tt, int no_wait) } else { buf->vram = node; } + buf->offset = node->start * PAGE_SIZE; return 0; } @@ -431,6 +445,7 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; + drm_ttm_backend_t *be; int ret; BUG_ON(bo->tt); @@ -450,7 +465,8 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait) if (ret) return ret; - if (bo->ttm_region->be->needs_cache_adjust(bo->ttm_region->be)) + be = bo->ttm_region->be; + if (be->needs_cache_adjust(be)) bo->flags &= ~DRM_BO_FLAG_CACHED; bo->flags &= ~DRM_BO_MASK_MEM; bo->flags |= DRM_BO_FLAG_MEM_TT; @@ -458,7 +474,7 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait) if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) { ret = dev->driver->bo_driver->invalidate_caches(dev, bo->flags); if (ret) - DRM_ERROR("Warning: Could not flush read caches\n"); + DRM_ERROR("Could not flush read caches\n"); } DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_EVICTED); @@ -776,12 +792,13 @@ static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo, */ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle, - uint32_t map_flags, int no_wait, + uint32_t map_flags, unsigned hint, drm_bo_arg_reply_t * rep) { drm_buffer_object_t *bo; drm_device_t *dev = priv->head->dev; int ret = 0; + int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; mutex_lock(&dev->struct_mutex); bo = drm_lookup_buffer_object(priv, handle, 1); @@ -791,9 +808,11 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle, return -EINVAL; mutex_lock(&bo->mutex); - ret = drm_bo_wait_unfenced(bo, no_wait, 0); - if (ret) - goto out; + if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) { + ret = drm_bo_wait_unfenced(bo, no_wait, 0); + if (ret) + goto out; + } /* * If this returns true, we are currently unmapped. @@ -979,7 +998,11 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, * Check whether we need to move buffer. */ - if (flag_diff & DRM_BO_MASK_MEM) { + if ((bo->type != drm_bo_type_fake) && (flag_diff & DRM_BO_MASK_MEM)) { + if (bo->type == drm_bo_type_user) { + DRM_ERROR("User buffers are not implemented yet.\n"); + return -EINVAL; + } ret = drm_bo_move_buffer(bo, new_flags, no_wait); if (ret) return ret; @@ -1151,7 +1174,7 @@ static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo, bo->ttm_object = to; ttm = drm_ttm_from_object(to); ret = drm_create_ttm_region(ttm, bo->buffer_start >> PAGE_SHIFT, - bo->num_pages,1, + bo->num_pages, 0, /* bo->mask & DRM_BO_FLAG_BIND_CACHED,*/ &bo->ttm_region); @@ -1177,9 +1200,11 @@ int drm_buffer_object_create(drm_file_t * priv, int ret = 0; uint32_t new_flags; unsigned long num_pages; - + drm_bo_delayed_delete(dev); - if (buffer_start & ~PAGE_MASK) { + + if ((buffer_start & ~PAGE_MASK) && + (type != drm_bo_type_fake)) { DRM_ERROR("Invalid buffer object start.\n"); return -EINVAL; } @@ -1206,24 +1231,24 @@ int drm_buffer_object_create(drm_file_t * priv, bo->dev = dev; bo->type = type; bo->num_pages = num_pages; - bo->buffer_start = buffer_start; + if (bo->type == drm_bo_type_fake) { + bo->offset = buffer_start; + bo->buffer_start = 0; + } else { + bo->buffer_start = buffer_start; + } bo->priv_flags = 0; bo->flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; ret = drm_bo_new_flags(dev, bo->flags, mask, hint, 1, &new_flags, &bo->mask); - DRM_ERROR("New flags: 0x%08x\n", new_flags); if (ret) goto out_err; ret = drm_bo_add_ttm(priv, bo, ttm_handle); if (ret) goto out_err; -#if 1 ret = drm_buffer_object_validate(bo, new_flags, 0, hint & DRM_BO_HINT_DONT_BLOCK); -#else - bo->flags = new_flags; -#endif if (ret) goto out_err; @@ -1268,7 +1293,7 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.req; + drm_bo_arg_request_t *req = &arg.d.req; drm_bo_arg_reply_t rep; unsigned long next; drm_user_object_t *uo; @@ -1321,8 +1346,7 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) case drm_bo_map: rep.ret = drm_buffer_object_map(priv, req->handle, req->mask, - req->hint & - DRM_BO_HINT_DONT_BLOCK, + req->hint, &rep); break; case drm_bo_destroy: @@ -1394,10 +1418,9 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) return -EAGAIN; arg.handled = 1; - arg.rep = rep; + arg.d.rep = rep; DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); data = next; - } while (data); return 0; } @@ -1409,17 +1432,22 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) static void drm_bo_force_clean(drm_device_t * dev) { drm_buffer_manager_t *bm = &dev->bm; - - drm_buffer_object_t *entry, *next; + struct list_head *l; + drm_buffer_object_t *entry; int nice_mode = 1; int ret = 0; - list_for_each_entry_safe(entry, next, &bm->ddestroy, ddestroy) { + l = bm->ddestroy.next; + while(l != &bm->ddestroy) { + entry = list_entry(l, drm_buffer_object_t, ddestroy); + list_del(l); if (entry->fence) { if (nice_mode) { unsigned long _end = jiffies + 3 * DRM_HZ; do { + mutex_unlock(&dev->struct_mutex); ret = drm_bo_wait(entry, 0, 1, 0); + mutex_lock(&dev->struct_mutex); } while ((ret == -EINTR) && !time_after_eq(jiffies, _end)); } else { @@ -1436,8 +1464,8 @@ static void drm_bo_force_clean(drm_device_t * dev) } DRM_DEBUG("Destroying delayed buffer object\n"); - list_del(&entry->ddestroy); drm_bo_destroy_locked(dev, entry); + l = bm->ddestroy.next; } } @@ -1541,11 +1569,9 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) INIT_LIST_HEAD(&bm->ddestroy); INIT_LIST_HEAD(&bm->other); - init_timer(&bm->timer); - bm->timer.function = &drm_bo_delayed_timer; - bm->timer.data = (unsigned long)dev; - + INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev); bm->initialized = 1; + break; case mm_takedown: if (drm_bo_clean_mm(dev)) { diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 86bae306..162e4656 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -59,9 +59,14 @@ static inline void change_pte_range(struct mm_struct *mm, pmd_t * pmd, do { if (pte_present(*pte)) { pte_t ptent; - ptent = *pte; ptep_get_and_clear(mm, addr, pte); + ptent = *pte; lazy_mmu_prot_update(ptent); + } else { + ptep_get_and_clear(mm, addr, pte); + } + if (!pte_none(*pte)) { + DRM_ERROR("Ugh. Pte was presen\n"); } } while (pte++, addr += PAGE_SIZE, addr != end); pte_unmap(pte - 1); diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index fd43d8bc..eaaf7f40 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -111,6 +111,10 @@ void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type) relevant = type & fence->type; if ((fence->signaled | relevant) != fence->signaled) { fence->signaled |= relevant; +#ifdef BODEBUG + DRM_ERROR("Fence 0x%08lx signaled 0x%08x\n", + fence->base.hash.key, fence->signaled); +#endif fence->submitted_flush |= relevant; wake = 1; } @@ -130,6 +134,10 @@ void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type) */ if (!(fence->type & ~fence->signaled)) { +#ifdef BODEBUG + DRM_ERROR("Fence completely signaled 0x%08lx\n", + fence->base.hash.key); +#endif fence_list = &fence->ring; for (i = 0; i < driver->no_types; ++i) { if (fm->fence_types[i] == fence_list) @@ -172,6 +180,10 @@ void drm_fence_usage_deref_locked(drm_device_t * dev, { if (atomic_dec_and_test(&fence->usage)) { drm_fence_unring(dev, &fence->ring); +#ifdef BODEBUG + DRM_ERROR("Destroyed a fence object 0x%08lx\n", + fence->base.hash.key); +#endif kmem_cache_free(drm_cache.fence_object, fence); } } @@ -430,6 +442,9 @@ int drm_fence_add_user_object(drm_file_t *priv, drm_fence_object_t *fence, return ret; fence->base.type = drm_fence_type; fence->base.remove = &drm_fence_object_destroy; +#ifdef BODEBUG + DRM_ERROR("Fence 0x%08lx created\n", fence->base.hash.key); +#endif return 0; } EXPORT_SYMBOL(drm_fence_add_user_object); diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index 26133f9c..a83d6401 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -43,6 +43,38 @@ typedef struct drm_val_action { } drm_val_action_t; /* + * Use kmalloc if possible. Otherwise fall back to vmalloc. + */ + + +static void *ttm_alloc(unsigned long size, int type, int *do_vmalloc) +{ + void *ret = NULL; + + *do_vmalloc = 0; + if (size <= 4*PAGE_SIZE) { + ret = drm_alloc(size, type); + } + if (!ret) { + *do_vmalloc = 1; + ret = vmalloc(size); + } + return ret; +} + +static void ttm_free(void *pointer, unsigned long size, int type, + int do_vfree) +{ + if (!do_vfree) { + drm_free(pointer, size, type); + }else { + vfree(pointer); + } +} + + + +/* * We may be manipulating other processes page tables, so for each TTM, keep track of * which mm_structs are currently mapping the ttm so that we can take the appropriate * locks when we modify their page tables. A typical application is when we evict another @@ -161,6 +193,7 @@ static int unmap_vma_pages(drm_ttm_t * ttm, unsigned long page_offset, list_for_each(list, &ttm->vma_list->head) { drm_ttm_vma_list_t *entry = list_entry(list, drm_ttm_vma_list_t, head); + drm_clear_vma(entry->vma, entry->vma->vm_start + (page_offset << PAGE_SHIFT), @@ -205,7 +238,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm) return -EBUSY; } - DRM_ERROR("Destroying a ttm\n"); + DRM_DEBUG("Destroying a ttm\n"); if (ttm->be_list) { list_for_each_safe(list, next, &ttm->be_list->head) { drm_ttm_backend_list_t *entry = @@ -231,12 +264,13 @@ int drm_destroy_ttm(drm_ttm_t * ttm) } } global_flush_tlb(); - vfree(ttm->pages); + ttm_free(ttm->pages, ttm->num_pages*sizeof(*ttm->pages), + DRM_MEM_TTM, ttm->pages_vmalloc); ttm->pages = NULL; } if (ttm->page_flags) { - vfree(ttm->page_flags); + ttm_free(ttm->page_flags, ttm->num_pages*sizeof(*ttm->page_flags), DRM_MEM_TTM, ttm->pf_vmalloc); ttm->page_flags = NULL; } @@ -280,7 +314,8 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size) ttm->destroy = 0; ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; - ttm->page_flags = vmalloc(ttm->num_pages * sizeof(*ttm->page_flags)); + ttm->page_flags = ttm_alloc(ttm->num_pages * sizeof(*ttm->page_flags), + DRM_MEM_TTM, &ttm->pf_vmalloc); if (!ttm->page_flags) { drm_destroy_ttm(ttm); DRM_ERROR("Failed allocating page_flags table\n"); @@ -288,7 +323,8 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size) } memset(ttm->page_flags, 0, ttm->num_pages * sizeof(*ttm->page_flags)); - ttm->pages = vmalloc(ttm->num_pages * sizeof(*ttm->pages)); + ttm->pages = ttm_alloc(ttm->num_pages * sizeof(*ttm->pages), + DRM_MEM_TTM, &ttm->pages_vmalloc); if (!ttm->pages) { drm_destroy_ttm(ttm); DRM_ERROR("Failed allocating page table\n"); @@ -483,12 +519,13 @@ void drm_destroy_ttm_region(drm_ttm_backend_list_t * entry) uint32_t *cur_page_flags; int i; - DRM_ERROR("Destroying a TTM region\n"); + DRM_DEBUG("Destroying a TTM region\n"); list_del_init(&entry->head); drm_unbind_ttm_region(entry); if (be) { be->clear(entry->be); +#if 0 /* Hmm, Isn't this done in unbind? */ if (be->needs_cache_adjust(be)) { int ret = drm_ttm_lock_mmap_sem(ttm); drm_ttm_lock_mm(ttm, 0, 1); @@ -500,6 +537,7 @@ void drm_destroy_ttm_region(drm_ttm_backend_list_t * entry) if (!ret) drm_ttm_unlock_mm(ttm, 1, 0); } +#endif be->destroy(be); } cur_page_flags = ttm->page_flags + entry->page_offset; @@ -609,6 +647,12 @@ int drm_bind_ttm_region(drm_ttm_backend_list_t * region, ret = drm_ttm_lock_mmap_sem(ttm); if (ret) return ret; + + drm_ttm_lock_mm(ttm, 0, 1); + unmap_vma_pages(ttm, region->page_offset, + region->num_pages); + drm_ttm_unlock_mm(ttm, 0, 1); + drm_set_caching(ttm, region->page_offset, region->num_pages, DRM_TTM_PAGE_UNCACHED, 1); } else { @@ -676,7 +720,9 @@ void drm_user_destroy_region(drm_ttm_backend_list_t * entry) page_cache_release(*cur_page); cur_page++; } - vfree(entry->anon_pages); + ttm_free(entry->anon_pages, + sizeof(*entry->anon_pages)*entry->anon_locked, + DRM_MEM_TTM, entry->pages_vmalloc); } be->destroy(be); @@ -721,7 +767,8 @@ int drm_user_create_region(drm_device_t * dev, unsigned long start, int len, return -EFAULT; } - tmp->anon_pages = vmalloc(sizeof(*(tmp->anon_pages)) * len); + tmp->anon_pages = ttm_alloc(sizeof(*(tmp->anon_pages)) * len, + DRM_MEM_TTM, &tmp->pages_vmalloc); if (!tmp->anon_pages) { drm_user_destroy_region(tmp); diff --git a/linux-core/drm_ttm.h b/linux-core/drm_ttm.h index d647578c..5c65e747 100644 --- a/linux-core/drm_ttm.h +++ b/linux-core/drm_ttm.h @@ -70,6 +70,7 @@ typedef struct drm_ttm_backend_list { drm_file_t *anon_owner; struct page **anon_pages; int anon_locked; + int pages_vmalloc; enum { ttm_bound, ttm_evicted, @@ -99,6 +100,8 @@ typedef struct drm_ttm { atomic_t vma_count; int mmap_sem_locked; int destroy; + int pages_vmalloc; + int pf_vmalloc; } drm_ttm_t; typedef struct drm_ttm_object { diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c index 80ef3ab2..20e12d6a 100644 --- a/linux-core/i915_fence.c +++ b/linux-core/i915_fence.c @@ -55,13 +55,18 @@ static void i915_perform_flush(drm_device_t * dev) diff = sequence - fm->last_exe_flush; if (diff < driver->wrap_diff && diff != 0) { drm_fence_handler(dev, sequence, DRM_FENCE_EXE); - diff = sequence - fm->exe_flush_sequence; - if (diff < driver->wrap_diff) { - fm->pending_exe_flush = 0; + } + + diff = sequence - fm->exe_flush_sequence; + if (diff < driver->wrap_diff) { + fm->pending_exe_flush = 0; + if (dev_priv->fence_irq_on) { i915_user_irq_off(dev_priv); - } else { - i915_user_irq_on(dev_priv); + dev_priv->fence_irq_on = 0; } + } else if (!dev_priv->fence_irq_on) { + i915_user_irq_on(dev_priv); + dev_priv->fence_irq_on = 1; } } if (dev_priv->flush_pending) { @@ -82,8 +87,6 @@ static void i915_perform_flush(drm_device_t * dev) dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv); dev_priv->flush_flags = fm->pending_flush; dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0); - DRM_ERROR("Saved flush status is 0x%08x\n", - dev_priv->saved_flush_status); I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21)); dev_priv->flush_pending = 1; fm->pending_flush = 0; |