From 3024f23c6551e219b0236041a8205bf1bc60ed94 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 31 Jan 2007 14:50:57 +0100 Subject: memory manager: Make device driver aware of different memory types. Memory types are either fixed (on-card or pre-bound AGP) or not fixed (dynamically bound) to an aperture. They also carry information about: 1) Whether they can be mapped cached. 2) Whether they are at all mappable. 3) Whether they need an ioremap to be accessible from kernel space. In this way VRAM memory and, for example, pre-bound AGP appear identical to the memory manager. This also makes support for unmappable VRAM simple to implement. --- linux-core/drm_bo.c | 87 ++++++++++++++++++++++++++++++++--------------------- 1 file changed, 52 insertions(+), 35 deletions(-) (limited to 'linux-core/drm_bo.c') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 2b960c75..b72e9912 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -74,8 +74,10 @@ static void drm_bo_add_to_lru(drm_buffer_object_t * bo, drm_buffer_manager_t * bm) { struct list_head *list; - bo->mem_type = 0; + drm_mem_type_manager_t *man; + bo->mem_type = 0; + switch(bo->flags & DRM_BO_MASK_MEM) { case DRM_BO_FLAG_MEM_TT: bo->mem_type = DRM_BO_MEM_TT; @@ -89,8 +91,10 @@ static void drm_bo_add_to_lru(drm_buffer_object_t * bo, default: BUG_ON(1); } + + man = &bm->man[bo->mem_type]; list = (bo->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? - &bm->pinned[bo->mem_type] : &bm->lru[bo->mem_type]; + &man->pinned : &man->lru; list_add_tail(&bo->lru, list); return; } @@ -543,7 +547,8 @@ int drm_bo_alloc_space(drm_buffer_object_t * bo, unsigned mem_type, drm_mm_node_t *node; drm_buffer_manager_t *bm = &dev->bm; drm_buffer_object_t *entry; - drm_mm_t *mm = &bm->manager[mem_type]; + drm_mem_type_manager_t *man = &bm->man[mem_type]; + drm_mm_t *mm = &man->manager; struct list_head *lru; unsigned long size = bo->num_pages; int ret; @@ -554,7 +559,7 @@ int drm_bo_alloc_space(drm_buffer_object_t * bo, unsigned mem_type, if (node) break; - lru = &bm->lru[mem_type]; + lru = &man->lru; if (lru->next == lru) break; @@ -638,7 +643,6 @@ static int drm_bo_new_flags(drm_device_t * dev, { uint32_t new_flags = 0; uint32_t new_props; - drm_bo_driver_t *driver = dev->driver->bo_driver; drm_buffer_manager_t *bm = &dev->bm; unsigned i; @@ -647,7 +651,7 @@ static int drm_bo_new_flags(drm_device_t * dev, */ for (i = 0; i < DRM_BO_MEM_TYPES; ++i) { - if (!bm->use_type[i]) + if (!bm->man[i].use_type) new_mask &= ~drm_bo_type_flags(i); } @@ -659,14 +663,18 @@ static int drm_bo_new_flags(drm_device_t * dev, } if (new_mask & DRM_BO_FLAG_BIND_CACHED) { if (((new_mask & DRM_BO_FLAG_MEM_TT) && - !driver->cached[DRM_BO_MEM_TT]) && - ((new_mask & DRM_BO_FLAG_MEM_VRAM) - && !driver->cached[DRM_BO_MEM_VRAM])) { + !(bm->man[DRM_BO_MEM_TT].flags & + _DRM_FLAG_MEMTYPE_CACHED) && + ((new_mask & DRM_BO_FLAG_MEM_VRAM) + && !(bm->man[DRM_BO_MEM_VRAM].flags & + _DRM_FLAG_MEMTYPE_CACHED)))) { new_mask &= ~DRM_BO_FLAG_BIND_CACHED; } else { - if (!driver->cached[DRM_BO_MEM_TT]) + if (!(bm->man[DRM_BO_MEM_TT].flags & + _DRM_FLAG_MEMTYPE_CACHED)) new_flags &= DRM_BO_FLAG_MEM_TT; - if (!driver->cached[DRM_BO_MEM_VRAM]) + if (!(bm->man[DRM_BO_MEM_VRAM].flags & + _DRM_FLAG_MEMTYPE_CACHED)) new_flags &= DRM_BO_FLAG_MEM_VRAM; } } @@ -1735,6 +1743,8 @@ static int drm_bo_force_list_clean(drm_device_t * dev, int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) { drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man = &bm->man[mem_type]; + drm_mem_type_manager_t *local_man = &bm->man[DRM_BO_MEM_LOCAL]; int ret = -EINVAL; if (mem_type >= DRM_BO_MEM_TYPES) { @@ -1742,13 +1752,13 @@ int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) return ret; } - if (!bm->has_type[mem_type]) { + if (!man->has_type) { DRM_ERROR("Trying to take down uninitialized " "memory manager type\n"); return ret; } - bm->use_type[mem_type] = 0; - bm->has_type[mem_type] = 0; + man->use_type = 0; + man->has_type = 0; ret = 0; if (mem_type > 0) { @@ -1763,15 +1773,12 @@ int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) * Throw out evicted no-move buffers. */ - drm_bo_force_list_clean(dev, &bm->pinned[DRM_BO_MEM_LOCAL], - mem_type, 1, 0); - drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 1, - 0); - drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 1, - 0); + drm_bo_force_list_clean(dev, &local_man->pinned, mem_type, 1, 0); + drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0); + drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0); - if (drm_mm_clean(&bm->manager[mem_type])) { - drm_mm_takedown(&bm->manager[mem_type]); + if (drm_mm_clean(&man->manager)) { + drm_mm_takedown(&man->manager); } else { ret = -EBUSY; } @@ -1784,6 +1791,7 @@ static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type) { int ret; drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man = &bm->man[mem_type]; if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) { DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type); @@ -1793,11 +1801,11 @@ static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type) ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1); if (ret) return ret; - ret = drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 0, 1); + ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1); if (ret) return ret; ret = - drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 0, 1); + drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1); return ret; } @@ -1807,32 +1815,39 @@ static int drm_bo_init_mm(drm_device_t * dev, { drm_buffer_manager_t *bm = &dev->bm; int ret = -EINVAL; + drm_mem_type_manager_t *man; if (type >= DRM_BO_MEM_TYPES) { DRM_ERROR("Illegal memory type %d\n", type); return ret; } - if (bm->has_type[type]) { + + man = &bm->man[type]; + if (man->has_type) { DRM_ERROR("Memory manager already initialized for type %d\n", type); return ret; } + ret = dev->driver->bo_driver->init_mem_type(dev, type, man); + if (ret) + return ret; + ret = 0; if (type != DRM_BO_MEM_LOCAL) { if (!p_size) { DRM_ERROR("Zero size memory manager type %d\n", type); return ret; } - ret = drm_mm_init(&bm->manager[type], p_offset, p_size); + ret = drm_mm_init(&man->manager, p_offset, p_size); if (ret) return ret; } - bm->has_type[type] = 1; - bm->use_type[type] = 1; + man->has_type = 1; + man->use_type = 1; - INIT_LIST_HEAD(&bm->lru[type]); - INIT_LIST_HEAD(&bm->pinned[type]); + INIT_LIST_HEAD(&man->lru); + INIT_LIST_HEAD(&man->pinned); return 0; } @@ -1847,6 +1862,7 @@ int drm_bo_driver_finish(drm_device_t * dev) drm_buffer_manager_t *bm = &dev->bm; int ret = 0; unsigned i = DRM_BO_MEM_TYPES; + drm_mem_type_manager_t *man; mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); @@ -1856,14 +1872,15 @@ int drm_bo_driver_finish(drm_device_t * dev) bm->initialized = 0; while (i--) { - if (bm->has_type[i]) { - bm->use_type[i] = 0; + man = &bm->man[i]; + if (man->has_type) { + man->use_type = 0; if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) { ret = -EBUSY; DRM_ERROR("DRM memory manager type %d " "is not clean.\n", i); } - bm->has_type[i] = 0; + man->has_type = 0; } } mutex_unlock(&dev->struct_mutex); @@ -1875,10 +1892,10 @@ int drm_bo_driver_finish(drm_device_t * dev) if (list_empty(&bm->ddestroy)) { DRM_DEBUG("Delayed destroy list was clean\n"); } - if (list_empty(&bm->lru[0])) { + if (list_empty(&bm->man[0].lru)) { DRM_DEBUG("Swap list was clean\n"); } - if (list_empty(&bm->pinned[0])) { + if (list_empty(&bm->man[0].pinned)) { DRM_DEBUG("NO_MOVE list was clean\n"); } if (list_empty(&bm->unfenced)) { -- cgit v1.2.3 From c269d560e4d71448cfc9c2ea51eee3d5feafaad4 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 2 Feb 2007 14:47:44 +0100 Subject: Make vm handle buffer objects instead of ttm objects. Remove ttm objects. Make vm aware of PCI memory type buffer objects. (Only works for pre 2.6.16 kernels for now). --- linux-core/drm_bo.c | 291 +++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 244 insertions(+), 47 deletions(-) (limited to 'linux-core/drm_bo.c') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index b72e9912..8fe5e8ef 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -32,30 +32,30 @@ #include "drmP.h" /* - * Buffer object locking policy: - * Lock dev->struct_mutex; - * Increase usage - * Unlock dev->struct_mutex; - * Lock buffer->mutex; - * Do whatever you want; - * Unlock buffer->mutex; - * Decrease usage. Call destruction if zero. + * Locking may look a bit complicated but isn't really: * - * User object visibility ups usage just once, since it has its own - * refcounting. + * The buffer usage atomic_t needs to be protected by dev->struct_mutex + * when there is a chance that it can be zero before or after the operation. + * + * dev->struct_mutex also protects all lists and list heads. Hash tables and hash + * heads. + * + * bo->mutex protects the buffer object itself excluding the usage field. + * bo->mutex does also protect the buffer list heads, so to manipulate those, we need + * both the bo->mutex and the dev->struct_mutex. * - * Destruction: - * lock dev->struct_mutex; - * Verify that usage is zero. Otherwise unlock and continue. - * Destroy object. - * unlock dev->struct_mutex; + * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit + * complicated. When dev->struct_mutex is released to grab bo->mutex, the list + * traversal will, in general, need to be restarted. * - * Mutex and spinlock locking orders: - * 1.) Buffer mutex - * 2.) Refer to ttm locking orders. */ + + static void drm_bo_destroy_locked(drm_buffer_object_t *bo); +static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo); +static void drm_bo_takedown_vm_locked(drm_buffer_object_t *bo); +static void drm_bo_unmap_virtual(drm_buffer_object_t *bo); #define DRM_FLAG_MASKED(_old, _new, _mask) {\ (_old) ^= (((_old) ^ (_new)) & (_mask)); \ @@ -110,6 +110,7 @@ static int drm_move_tt_to_local(drm_buffer_object_t * bo, int evict, int ret; if (bo->mm_node) { + drm_bo_unmap_virtual(bo); mutex_lock(&dev->struct_mutex); if (evict) ret = drm_evict_ttm(bo->ttm); @@ -278,12 +279,9 @@ static void drm_bo_destroy_locked(drm_buffer_object_t *bo) DRM_ERROR("Couldn't unbind TTM region while destroying a buffer. " "Bad. Continuing anyway\n"); } + drm_destroy_ttm(bo->ttm); + bo->ttm = NULL; } - - if (bo->ttm_object) { - drm_ttm_object_deref_locked(dev, bo->ttm_object); - } - atomic_dec(&bm->count); drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ); @@ -362,7 +360,7 @@ static void drm_bo_delayed_workqueue(struct work_struct *work) mutex_unlock(&dev->struct_mutex); } -static void drm_bo_usage_deref_locked(drm_buffer_object_t * bo) +void drm_bo_usage_deref_locked(drm_buffer_object_t * bo) { if (atomic_dec_and_test(&bo->usage)) { drm_bo_destroy_locked(bo); @@ -371,8 +369,11 @@ static void drm_bo_usage_deref_locked(drm_buffer_object_t * bo) static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo) { - drm_bo_usage_deref_locked(drm_user_object_entry(uo, drm_buffer_object_t, - base)); + drm_buffer_object_t *bo = + drm_user_object_entry(uo, drm_buffer_object_t, base); + + drm_bo_takedown_vm_locked(bo); + drm_bo_usage_deref_locked(bo); } static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo) @@ -608,6 +609,7 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait) DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->mm_node->start); + drm_bo_unmap_virtual(bo); mutex_lock(&dev->struct_mutex); ret = drm_bind_ttm(bo->ttm, bo->flags & DRM_BO_FLAG_BIND_CACHED, bo->mm_node->start); @@ -927,13 +929,7 @@ static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo, rep->flags = bo->flags; rep->size = bo->num_pages * PAGE_SIZE; rep->offset = bo->offset; - - if (bo->ttm_object) { - rep->arg_handle = bo->ttm_object->map_list.user_token; - } else { - rep->arg_handle = 0; - } - + rep->arg_handle = bo->map_list.user_token; rep->mask = bo->mask; rep->buffer_start = bo->buffer_start; rep->fence_flags = bo->fence_type; @@ -1322,19 +1318,21 @@ static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle, static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo) { drm_device_t *dev = bo->dev; - drm_ttm_object_t *to = NULL; int ret = 0; - uint32_t ttm_flags = 0; - bo->ttm_object = NULL; bo->ttm = NULL; + bo->map_list.user_token = 0ULL; switch (bo->type) { case drm_bo_type_dc: mutex_lock(&dev->struct_mutex); - ret = drm_ttm_object_create(dev, bo->num_pages * PAGE_SIZE, - ttm_flags, &to); + ret = drm_bo_setup_vm_locked(bo); mutex_unlock(&dev->struct_mutex); + if (ret) + break; + bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT); + if (!bo->ttm) + ret = -ENOMEM; break; case drm_bo_type_user: case drm_bo_type_fake: @@ -1345,14 +1343,6 @@ static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo) break; } - if (ret) { - return ret; - } - - if (to) { - bo->ttm_object = to; - bo->ttm = drm_ttm_from_object(to); - } return ret; } @@ -1384,7 +1374,6 @@ int drm_buffer_object_transfer(drm_buffer_object_t *bo, bo->mm_node = NULL; bo->ttm = NULL; - bo->ttm_object = NULL; bo->fence = NULL; bo->flags = 0; @@ -2023,3 +2012,211 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return 0; } + +/* + * buffer object vm functions. + */ + +/** + * \c Get the PCI offset for the buffer object memory. + * + * \param bo The buffer object. + * \param bus_base On return the base of the PCI region + * \param bus_offset On return the byte offset into the PCI region + * \param bus_size On return the byte size of the buffer object or zero if + * the buffer object memory is not accessible through a PCI region. + * \return Failure indication. + * + * Returns -EINVAL if the buffer object is currently not mappable. + * Otherwise returns zero. Call bo->mutex locked. + */ + +int drm_bo_pci_offset(const drm_buffer_object_t *bo, + unsigned long *bus_base, + unsigned long *bus_offset, + unsigned long *bus_size) +{ + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man = &bm->man[bo->mem_type]; + + *bus_size = 0; + + if (bo->type != drm_bo_type_dc) + return -EINVAL; + + if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)) + return -EINVAL; + + if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { + drm_ttm_t *ttm = bo->ttm; + + if (!bo->ttm) { + return -EINVAL; + } + + drm_ttm_fixup_caching(ttm); + + if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED)) + return 0; + if (ttm->be->flags & DRM_BE_FLAG_CMA) + return 0; + + *bus_base = ttm->be->aperture_base; + } else { + *bus_base = man->io_offset; + } + + *bus_offset = bo->mm_node->start << PAGE_SHIFT; + *bus_size = bo->num_pages << PAGE_SHIFT; + + return 0; +} + +/** + * \c Return a kernel virtual address to the buffer object PCI memory. + * + * \param bo The buffer object. + * \return Failure indication. + * + * Returns -EINVAL if the buffer object is currently not mappable. + * Returns -ENOMEM if the ioremap operation failed. + * Otherwise returns zero. + * + * After a successfull call, bo->iomap contains the virtual address, or NULL + * if the buffer object content is not accessible through PCI space. + * Call bo->mutex locked. + */ + +int drm_bo_ioremap(drm_buffer_object_t *bo) +{ + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man = &bm->man[bo->mem_type]; + unsigned long bus_offset; + unsigned long bus_size; + unsigned long bus_base; + int ret; + + BUG_ON(bo->iomap); + + ret = drm_bo_pci_offset(bo, &bus_base, &bus_offset, &bus_size); + if (ret || bus_size == 0) + return ret; + + if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) + bo->iomap = (void *) (((u8 *)man->io_addr) + bus_offset); + else { + bo->iomap = ioremap_nocache(bus_base + bus_offset, bus_size); + if (bo->iomap) + return -ENOMEM; + } + + return 0; +} + +/** + * \c Unmap mapping obtained using drm_bo_ioremap + * + * \param bo The buffer object. + * + * Call bo->mutex locked. + */ + +void drm_bo_iounmap(drm_buffer_object_t *bo) +{ + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm; + drm_mem_type_manager_t *man; + + + bm = &dev->bm; + man = &bm->man[bo->mem_type]; + + if (bo->iomap && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) + iounmap(bo->iomap); + + bo->iomap = NULL; +} + +/** + * \c Kill all user-space virtual mappings of this buffer object. + * + * \param bo The buffer object. + * + * Call bo->mutex locked. + */ + +void drm_bo_unmap_virtual(drm_buffer_object_t *bo) +{ + drm_device_t *dev = bo->dev; + loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT; + loff_t holelen = ((loff_t) bo->num_pages) << PAGE_SHIFT; + + unmap_mapping_range(dev->dev_mapping, offset, holelen, 1); +} + +static void drm_bo_takedown_vm_locked(drm_buffer_object_t *bo) +{ + drm_map_list_t *list = &bo->map_list; + drm_local_map_t *map; + drm_device_t *dev = bo->dev; + + if (list->user_token) { + drm_ht_remove_item(&dev->map_hash, &list->hash); + list->user_token = 0; + } + if (list->file_offset_node) { + drm_mm_put_block(list->file_offset_node); + list->file_offset_node = NULL; + } + + map = list->map; + if (!map) + return; + + drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ); + list->map = NULL; + list->user_token = 0ULL; + drm_bo_usage_deref_locked(bo); +} + +static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo) +{ + drm_map_list_t *list = &bo->map_list; + drm_local_map_t *map; + drm_device_t *dev = bo->dev; + + list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ); + if (!list->map) + return -ENOMEM; + + map = list->map; + map->offset = 0; + map->type = _DRM_TTM; + map->flags = _DRM_REMOVABLE; + map->size = bo->num_pages * PAGE_SIZE; + atomic_inc(&bo->usage); + map->handle = (void *) bo; + + list->file_offset_node = drm_mm_search_free(&dev->offset_manager, + bo->num_pages, 0, 0); + + if (!list->file_offset_node) { + drm_bo_takedown_vm_locked(bo); + return -ENOMEM; + } + + list->file_offset_node = drm_mm_get_block(list->file_offset_node, + bo->num_pages, 0); + + list->hash.key = list->file_offset_node->start; + if (drm_ht_insert_item(&dev->map_hash, &list->hash)) { + drm_bo_takedown_vm_locked(bo); + return -ENOMEM; + } + + list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT; + + return 0; +} -- cgit v1.2.3 From 63f2abd721c40f1cddae555c79b4ab4c55aae006 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 2 Feb 2007 19:49:11 +0100 Subject: Make also later kernels work with buffer object vm and clean up some function names. --- linux-core/drm_bo.c | 106 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 62 insertions(+), 44 deletions(-) (limited to 'linux-core/drm_bo.c') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 8fe5e8ef..9a27a4b5 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -107,23 +107,31 @@ static int drm_move_tt_to_local(drm_buffer_object_t * bo, int evict, int force_no_move) { drm_device_t *dev = bo->dev; - int ret; + int ret = 0; if (bo->mm_node) { - drm_bo_unmap_virtual(bo); +#ifdef DRM_ODD_MM_COMPAT mutex_lock(&dev->struct_mutex); - if (evict) - ret = drm_evict_ttm(bo->ttm); - else - ret = drm_unbind_ttm(bo->ttm); - + ret = drm_bo_lock_kmm(bo); if (ret) { mutex_unlock(&dev->struct_mutex); if (ret == -EAGAIN) schedule(); return ret; } + drm_bo_unmap_virtual(bo); + drm_bo_finish_unmap(bo); + drm_bo_unlock_kmm(bo); +#else + drm_bo_unmap_virtual(bo); + mutex_lock(&dev->struct_mutex); +#endif + if (evict) + drm_ttm_evict(bo->ttm); + else + drm_ttm_unbind(bo->ttm); + bo->mem_type = DRM_BO_MEM_LOCAL; if (!(bo->flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) { drm_mm_put_block(bo->mm_node); bo->mm_node = NULL; @@ -262,23 +270,13 @@ static void drm_bo_destroy_locked(drm_buffer_object_t *bo) if (list_empty(&bo->lru) && bo->mm_node == NULL && atomic_read(&bo->usage) == 0) { BUG_ON(bo->fence != NULL); - if (bo->ttm) { - unsigned long _end = jiffies + DRM_HZ; - int ret; - - do { - ret = drm_unbind_ttm(bo->ttm); - if (ret == -EAGAIN) { - mutex_unlock(&dev->struct_mutex); - schedule(); - mutex_lock(&dev->struct_mutex); - } - } while (ret == -EAGAIN && !time_after_eq(jiffies, _end)); +#ifdef DRM_ODD_MM_COMPAT + BUG_ON(!list_empty(&bo->vma_list)); + BUG_ON(!list_empty(&bo->p_mm_list)); +#endif - if (ret) { - DRM_ERROR("Couldn't unbind TTM region while destroying a buffer. " - "Bad. Continuing anyway\n"); - } + if (bo->ttm) { + drm_ttm_unbind(bo->ttm); drm_destroy_ttm(bo->ttm); bo->ttm = NULL; } @@ -597,8 +595,7 @@ int drm_bo_alloc_space(drm_buffer_object_t * bo, unsigned mem_type, static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait) { drm_device_t *dev = bo->dev; - drm_ttm_backend_t *be; - int ret; + int ret = 0; if (!(bo->mm_node && (bo->flags & DRM_BO_FLAG_NO_MOVE))) { BUG_ON(bo->mm_node); @@ -608,26 +605,41 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait) } DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->mm_node->start); - - drm_bo_unmap_virtual(bo); + +#ifdef DRM_ODD_MM_COMPAT mutex_lock(&dev->struct_mutex); + ret = drm_bo_lock_kmm(bo); + if (ret) { + mutex_unlock(&dev->struct_mutex); + goto out_put_unlock; + } +#endif + drm_bo_unmap_virtual(bo); ret = drm_bind_ttm(bo->ttm, bo->flags & DRM_BO_FLAG_BIND_CACHED, bo->mm_node->start); + if (ret) { - drm_mm_put_block(bo->mm_node); - bo->mm_node = NULL; +#ifdef DRM_ODD_MM_COMPAT + drm_bo_unlock_kmm(bo); + mutex_unlock(&dev->struct_mutex); +#endif + goto out_put_unlock; } - mutex_unlock(&dev->struct_mutex); + + if (!(bo->flags & DRM_BO_FLAG_BIND_CACHED)) + bo->flags &= DRM_BO_FLAG_CACHED; + bo->flags &= ~DRM_BO_MASK_MEM; + bo->flags |= DRM_BO_FLAG_MEM_TT; + bo->mem_type = DRM_BO_MEM_TT; +#ifdef DRM_ODD_MM_COMPAT + ret = drm_bo_remap_bound(bo); if (ret) { - return ret; + /* FIXME */ } - - be = bo->ttm->be; - if (be->needs_ub_cache_adjust(be)) - bo->flags &= ~DRM_BO_FLAG_CACHED; - bo->flags &= ~DRM_BO_MASK_MEM; - bo->flags |= DRM_BO_FLAG_MEM_TT; + drm_bo_unlock_kmm(bo); + mutex_unlock(&dev->struct_mutex); +#endif if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) { ret = dev->driver->bo_driver->invalidate_caches(dev, bo->flags); @@ -637,6 +649,13 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait) DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_EVICTED); return 0; + +out_put_unlock: + mutex_lock(&dev->struct_mutex); + drm_mm_put_block(bo->mm_node); + bo->mm_node = NULL; + mutex_unlock(&dev->struct_mutex); + return ret; } static int drm_bo_new_flags(drm_device_t * dev, @@ -1120,7 +1139,6 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_flags, } else { drm_move_tt_to_local(bo, 0, force_no_move); } - return 0; } @@ -1213,13 +1231,12 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, list_add_tail(&bo->lru, &bm->unfenced); mutex_unlock(&dev->struct_mutex); } else { - mutex_lock(&dev->struct_mutex); list_del_init(&bo->lru); drm_bo_add_to_lru(bo, bm); mutex_unlock(&dev->struct_mutex); } - + bo->flags = new_flags; return 0; } @@ -1427,6 +1444,10 @@ int drm_buffer_object_create(drm_file_t * priv, DRM_INIT_WAITQUEUE(&bo->event_queue); INIT_LIST_HEAD(&bo->lru); INIT_LIST_HEAD(&bo->ddestroy); +#ifdef DRM_ODD_MM_COMPAT + INIT_LIST_HEAD(&bo->p_mm_list); + INIT_LIST_HEAD(&bo->vma_list); +#endif bo->dev = dev; bo->type = type; bo->num_pages = num_pages; @@ -2041,7 +2062,6 @@ int drm_bo_pci_offset(const drm_buffer_object_t *bo, drm_mem_type_manager_t *man = &bm->man[bo->mem_type]; *bus_size = 0; - if (bo->type != drm_bo_type_dc) return -EINVAL; @@ -2057,11 +2077,10 @@ int drm_bo_pci_offset(const drm_buffer_object_t *bo, drm_ttm_fixup_caching(ttm); - if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED)) + if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED)) return 0; if (ttm->be->flags & DRM_BE_FLAG_CMA) return 0; - *bus_base = ttm->be->aperture_base; } else { *bus_base = man->io_offset; @@ -2069,7 +2088,6 @@ int drm_bo_pci_offset(const drm_buffer_object_t *bo, *bus_offset = bo->mm_node->start << PAGE_SHIFT; *bus_size = bo->num_pages << PAGE_SHIFT; - return 0; } -- cgit v1.2.3 From 609e3b037526021d20c7cc18b7fed1152206dc68 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 6 Feb 2007 14:20:33 +0100 Subject: Implement a policy for selecting memory types. --- linux-core/drm_bo.c | 212 +++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 167 insertions(+), 45 deletions(-) (limited to 'linux-core/drm_bo.c') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 9a27a4b5..fa659d04 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -56,6 +56,8 @@ static void drm_bo_destroy_locked(drm_buffer_object_t *bo); static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo); static void drm_bo_takedown_vm_locked(drm_buffer_object_t *bo); static void drm_bo_unmap_virtual(drm_buffer_object_t *bo); +static int drm_bo_mem_space(drm_device_t *dev, drm_bo_mem_reg_t *mem, + int no_wait); #define DRM_FLAG_MASKED(_old, _new, _mask) {\ (_old) ^= (((_old) ^ (_new)) & (_mask)); \ @@ -497,6 +499,7 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, int ret = 0; drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; + drm_bo_mem_reg_t evict_mem; /* * Someone might have modified the buffer before we took the buffer mutex. @@ -509,22 +512,39 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, ret = drm_bo_wait(bo, 0, 0, no_wait); - if (ret) { - if (ret != -EAGAIN) - DRM_ERROR("Failed to expire fence before " - "buffer eviction.\n"); + if (ret && ret != -EAGAIN) { + DRM_ERROR("Failed to expire fence before " + "buffer eviction.\n"); goto out; } - if (mem_type == DRM_BO_MEM_TT) { - ret = drm_move_tt_to_local(bo, 1, force_no_move); - if (ret) - goto out; - mutex_lock(&dev->struct_mutex); - list_del_init(&bo->lru); - drm_bo_add_to_lru(bo, bm); - mutex_unlock(&dev->struct_mutex); + evict_mem.num_pages = bo->num_pages; + evict_mem.page_alignment = bo->page_alignment; + evict_mem.size = evict_mem.num_pages << PAGE_SHIFT; + evict_mem.mask = dev->driver->bo_driver->evict_flags(dev, mem_type); + + ret = drm_bo_mem_space(dev, &evict_mem, no_wait); + + if (ret && ret != -EAGAIN) { + DRM_ERROR("Failed to find memory space for " + "buffer eviction.\n"); + goto out; + } + + if ((mem_type != DRM_BO_MEM_TT) && + (evict_mem.mem_type != DRM_BO_MEM_LOCAL)) { + ret = -EINVAL; + DRM_ERROR("Unsupported memory types for eviction.\n"); + goto out; } + + ret = drm_move_tt_to_local(bo, 1, force_no_move); + if (ret) + goto out; + mutex_lock(&dev->struct_mutex); + list_del_init(&bo->lru); + drm_bo_add_to_lru(bo, bm); + mutex_unlock(&dev->struct_mutex); if (ret) goto out; @@ -535,26 +555,25 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, return ret; } -/* - * bo->mutex locked. - */ -int drm_bo_alloc_space(drm_buffer_object_t * bo, unsigned mem_type, - int no_wait) + +static int drm_bo_mem_force_space(drm_device_t *dev, + drm_bo_mem_reg_t *mem, + uint32_t mem_type, + int no_wait) { - drm_device_t *dev = bo->dev; drm_mm_node_t *node; drm_buffer_manager_t *bm = &dev->bm; drm_buffer_object_t *entry; drm_mem_type_manager_t *man = &bm->man[mem_type]; - drm_mm_t *mm = &man->manager; struct list_head *lru; - unsigned long size = bo->num_pages; + unsigned long num_pages = mem->num_pages; int ret; mutex_lock(&dev->struct_mutex); do { - node = drm_mm_search_free(mm, size, bo->page_alignment, 1); + node = drm_mm_search_free(&man->manager, num_pages, + mem->page_alignment, 1); if (node) break; @@ -563,11 +582,11 @@ int drm_bo_alloc_space(drm_buffer_object_t * bo, unsigned mem_type, break; entry = list_entry(lru->next, drm_buffer_object_t, lru); - atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); - BUG_ON(bo->flags & DRM_BO_FLAG_NO_MOVE); + BUG_ON(entry->flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); + ret = drm_bo_evict(entry, mem_type, no_wait, 0); mutex_unlock(&entry->mutex); drm_bo_usage_deref_unlocked(entry); @@ -577,34 +596,108 @@ int drm_bo_alloc_space(drm_buffer_object_t * bo, unsigned mem_type, } while (1); if (!node) { - DRM_ERROR("Out of videoram / aperture space\n"); mutex_unlock(&dev->struct_mutex); return -ENOMEM; } - node = drm_mm_get_block(node, size, bo->page_alignment); + node = drm_mm_get_block(node, num_pages, mem->page_alignment); mutex_unlock(&dev->struct_mutex); - BUG_ON(!node); - node->private = (void *)bo; - - bo->mm_node = node; - bo->offset = node->start * PAGE_SIZE; + mem->mm_node = node; + mem->mem_type = mem_type; + mem->flags = drm_bo_type_flags(mem_type); return 0; } -static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait) + +static int drm_bo_mem_space(drm_device_t *dev, + drm_bo_mem_reg_t *mem, + int no_wait) { - drm_device_t *dev = bo->dev; - int ret = 0; + drm_buffer_manager_t *bm= &dev->bm; + drm_mem_type_manager_t *man; - if (!(bo->mm_node && (bo->flags & DRM_BO_FLAG_NO_MOVE))) { - BUG_ON(bo->mm_node); - ret = drm_bo_alloc_space(bo, DRM_BO_MEM_TT, no_wait); - if (ret) - return ret; + uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; + const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; + uint32_t i; + uint32_t mem_type = DRM_BO_MEM_LOCAL; + int type_found = 0; + int type_ok = 0; + int has_eagain = 0; + drm_mm_node_t *node = NULL; + int ret; + + for (i=0; imask ; + if (!type_ok) + continue; + + if (mem_type == DRM_BO_MEM_LOCAL) + break; + + man = &bm->man[mem_type]; + mutex_lock(&dev->struct_mutex); + if (man->has_type && man->use_type) { + type_found = 1; + node = drm_mm_search_free(&man->manager, mem->num_pages, + mem->page_alignment, 1); + if (node) + node = drm_mm_get_block(node, mem->num_pages, + mem->page_alignment); + } + mutex_unlock(&dev->struct_mutex); + if (node) + break; } + + if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) { + mem->mm_node = node; + mem->mem_type = mem_type; + mem->flags = drm_bo_type_flags(mem_type); + return 0; + } + + if (!type_found) { + DRM_ERROR("Requested memory types are not supported\n"); + return -EINVAL; + } + + num_prios = dev->driver->bo_driver->num_mem_busy_prio; + prios = dev->driver->bo_driver->mem_busy_prio; + + for (i=0; imask)) + continue; + + man = &bm->man[mem_type]; + ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait); + + if (ret == 0) + return 0; + + if (ret == -EAGAIN) + has_eagain = 1; + } + + ret = (has_eagain) ? -EAGAIN : -ENOMEM; + return ret; +} + - DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->mm_node->start); + + +static int drm_move_local_to_tt(drm_buffer_object_t * bo, + drm_bo_mem_reg_t * mem, + int no_wait) +{ + drm_device_t *dev = bo->dev; + int ret = 0; + + bo->mm_node = mem->mm_node; + + DRM_DEBUG("Flipping in to AGP 0x%08lx 0x%08lx\n", + bo->mm_node->start, bo->mm_node->size); #ifdef DRM_ODD_MM_COMPAT mutex_lock(&dev->struct_mutex); @@ -631,6 +724,7 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait) bo->flags &= ~DRM_BO_MASK_MEM; bo->flags |= DRM_BO_FLAG_MEM_TT; bo->mem_type = DRM_BO_MEM_TT; + bo->offset = bo->mm_node->start << PAGE_SHIFT; #ifdef DRM_ODD_MM_COMPAT ret = drm_bo_remap_bound(bo); @@ -1103,14 +1197,18 @@ static void drm_buffer_user_object_unmap(drm_file_t * priv, * bo->mutex locked. */ -static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_flags, +static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, int no_wait, int force_no_move) { + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm = &dev->bm; int ret = 0; + drm_bo_mem_reg_t mem; /* * Flush outstanding fences. */ + drm_bo_busy(bo); /* @@ -1126,16 +1224,38 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_flags, */ ret = drm_bo_wait(bo, 0, 0, no_wait); + if (ret) + return ret; - if (ret == -EINTR) - return -EAGAIN; + + mem.num_pages = bo->num_pages; + mem.size = mem.num_pages << PAGE_SHIFT; + mem.mask = new_mem_flags; + mem.page_alignment = bo->page_alignment; + + mutex_lock(&bm->evict_mutex); + mutex_lock(&dev->struct_mutex); + list_del(&bo->lru); + list_add_tail(&bo->lru,&bm->unfenced); + DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, _DRM_BO_FLAG_UNFENCED); + mutex_unlock(&dev->struct_mutex); + + ret = drm_bo_mem_space(dev, &mem, no_wait); + mutex_unlock(&bm->evict_mutex); + if (ret) return ret; - if (new_flags & DRM_BO_FLAG_MEM_TT) { - ret = drm_move_local_to_tt(bo, no_wait); - if (ret) + if (mem.mem_type == DRM_BO_MEM_TT) { + ret = drm_move_local_to_tt(bo, &mem, no_wait); + if (ret) { + mutex_lock(&dev->struct_mutex); + list_del_init(&bo->lru); + drm_bo_add_to_lru(bo, bm); + mutex_unlock(&dev->struct_mutex); + DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); return ret; + } } else { drm_move_tt_to_local(bo, 0, force_no_move); } @@ -1231,6 +1351,8 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, list_add_tail(&bo->lru, &bm->unfenced); mutex_unlock(&dev->struct_mutex); } else { + DRM_FLAG_MASKED(bo->priv_flags, 0, + _DRM_BO_FLAG_UNFENCED); mutex_lock(&dev->struct_mutex); list_del_init(&bo->lru); drm_bo_add_to_lru(bo, bm); -- cgit v1.2.3 From 40ce53dfde11f84d7bf8db5db93fb73715b2e96e Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 6 Feb 2007 15:56:43 +0100 Subject: Implement a drm_mem_reg_t substructure in the buffer object type. --- linux-core/drm_bo.c | 173 ++++++++++++++++++++++++++-------------------------- 1 file changed, 88 insertions(+), 85 deletions(-) (limited to 'linux-core/drm_bo.c') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index fa659d04..8a6b49dc 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -78,24 +78,24 @@ static void drm_bo_add_to_lru(drm_buffer_object_t * bo, struct list_head *list; drm_mem_type_manager_t *man; - bo->mem_type = 0; + bo->mem.mem_type = 0; - switch(bo->flags & DRM_BO_MASK_MEM) { + switch(bo->mem.flags & DRM_BO_MASK_MEM) { case DRM_BO_FLAG_MEM_TT: - bo->mem_type = DRM_BO_MEM_TT; + bo->mem.mem_type = DRM_BO_MEM_TT; break; case DRM_BO_FLAG_MEM_VRAM: - bo->mem_type = DRM_BO_MEM_VRAM; + bo->mem.mem_type = DRM_BO_MEM_VRAM; break; case DRM_BO_FLAG_MEM_LOCAL: - bo->mem_type = DRM_BO_MEM_LOCAL; + bo->mem.mem_type = DRM_BO_MEM_LOCAL; break; default: BUG_ON(1); } - man = &bm->man[bo->mem_type]; - list = (bo->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? + man = &bm->man[bo->mem.mem_type]; + list = (bo->mem.flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? &man->pinned : &man->lru; list_add_tail(&bo->lru, list); return; @@ -111,7 +111,7 @@ static int drm_move_tt_to_local(drm_buffer_object_t * bo, int evict, drm_device_t *dev = bo->dev; int ret = 0; - if (bo->mm_node) { + if (bo->mem.mm_node) { #ifdef DRM_ODD_MM_COMPAT mutex_lock(&dev->struct_mutex); ret = drm_bo_lock_kmm(bo); @@ -133,16 +133,16 @@ static int drm_move_tt_to_local(drm_buffer_object_t * bo, int evict, else drm_ttm_unbind(bo->ttm); - bo->mem_type = DRM_BO_MEM_LOCAL; - if (!(bo->flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) { - drm_mm_put_block(bo->mm_node); - bo->mm_node = NULL; + bo->mem.mem_type = DRM_BO_MEM_LOCAL; + if (!(bo->mem.flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) { + drm_mm_put_block(bo->mem.mm_node); + bo->mem.mm_node = NULL; } mutex_unlock(&dev->struct_mutex); } - bo->flags &= ~DRM_BO_FLAG_MEM_TT; - bo->flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; + bo->mem.flags &= ~DRM_BO_FLAG_MEM_TT; + bo->mem.flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; return 0; } @@ -235,9 +235,9 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all) if (!bo->fence) { list_del_init(&bo->lru); - if (bo->mm_node) { - drm_mm_put_block(bo->mm_node); - bo->mm_node = NULL; + if (bo->mem.mm_node) { + drm_mm_put_block(bo->mem.mm_node); + bo->mem.mm_node = NULL; } list_del_init(&bo->ddestroy); mutex_unlock(&bo->mutex); @@ -269,7 +269,7 @@ static void drm_bo_destroy_locked(drm_buffer_object_t *bo) drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; - if (list_empty(&bo->lru) && bo->mm_node == NULL && atomic_read(&bo->usage) == 0) { + if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && atomic_read(&bo->usage) == 0) { BUG_ON(bo->fence != NULL); #ifdef DRM_ODD_MM_COMPAT @@ -507,7 +507,7 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) goto out; - if (!(bo->flags & drm_bo_type_flags(mem_type))) + if (!(bo->mem.flags & drm_bo_type_flags(mem_type))) goto out; ret = drm_bo_wait(bo, 0, 0, no_wait); @@ -518,9 +518,7 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, goto out; } - evict_mem.num_pages = bo->num_pages; - evict_mem.page_alignment = bo->page_alignment; - evict_mem.size = evict_mem.num_pages << PAGE_SHIFT; + evict_mem = bo->mem; evict_mem.mask = dev->driver->bo_driver->evict_flags(dev, mem_type); ret = drm_bo_mem_space(dev, &evict_mem, no_wait); @@ -585,7 +583,7 @@ static int drm_bo_mem_force_space(drm_device_t *dev, atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); - BUG_ON(entry->flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); + BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); ret = drm_bo_evict(entry, mem_type, no_wait, 0); mutex_unlock(&entry->mutex); @@ -694,10 +692,10 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, drm_device_t *dev = bo->dev; int ret = 0; - bo->mm_node = mem->mm_node; + bo->mem.mm_node = mem->mm_node; DRM_DEBUG("Flipping in to AGP 0x%08lx 0x%08lx\n", - bo->mm_node->start, bo->mm_node->size); + bo->mem.mm_node->start, bo->mem.mm_node->size); #ifdef DRM_ODD_MM_COMPAT mutex_lock(&dev->struct_mutex); @@ -708,8 +706,8 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, } #endif drm_bo_unmap_virtual(bo); - ret = drm_bind_ttm(bo->ttm, bo->flags & DRM_BO_FLAG_BIND_CACHED, - bo->mm_node->start); + ret = drm_bind_ttm(bo->ttm, bo->mem.flags & DRM_BO_FLAG_BIND_CACHED, + bo->mem.mm_node->start); if (ret) { #ifdef DRM_ODD_MM_COMPAT @@ -719,12 +717,12 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, goto out_put_unlock; } - if (!(bo->flags & DRM_BO_FLAG_BIND_CACHED)) - bo->flags &= DRM_BO_FLAG_CACHED; - bo->flags &= ~DRM_BO_MASK_MEM; - bo->flags |= DRM_BO_FLAG_MEM_TT; - bo->mem_type = DRM_BO_MEM_TT; - bo->offset = bo->mm_node->start << PAGE_SHIFT; + if (!(bo->mem.flags & DRM_BO_FLAG_BIND_CACHED)) + bo->mem.flags &= DRM_BO_FLAG_CACHED; + bo->mem.flags &= ~DRM_BO_MASK_MEM; + bo->mem.flags |= DRM_BO_FLAG_MEM_TT; + bo->mem.mem_type = DRM_BO_MEM_TT; + bo->offset = bo->mem.mm_node->start << PAGE_SHIFT; #ifdef DRM_ODD_MM_COMPAT ret = drm_bo_remap_bound(bo); @@ -736,7 +734,7 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, #endif if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) { - ret = dev->driver->bo_driver->invalidate_caches(dev, bo->flags); + ret = dev->driver->bo_driver->invalidate_caches(dev, bo->mem.flags); if (ret) DRM_ERROR("Could not flush read caches\n"); } @@ -746,8 +744,8 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, out_put_unlock: mutex_lock(&dev->struct_mutex); - drm_mm_put_block(bo->mm_node); - bo->mm_node = NULL; + drm_mm_put_block(bo->mem.mm_node); + bo->mem.mm_node = NULL; mutex_unlock(&dev->struct_mutex); return ret; } @@ -948,7 +946,7 @@ static int drm_bo_read_cached(drm_buffer_object_t * bo) int ret = 0; BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); - if (bo->mm_node) + if (bo->mem.mm_node) ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1, 0); return ret; } @@ -1039,15 +1037,15 @@ static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo, drm_bo_arg_reply_t * rep) { rep->handle = bo->base.hash.key; - rep->flags = bo->flags; - rep->size = bo->num_pages * PAGE_SIZE; + rep->flags = bo->mem.flags; + rep->size = bo->mem.num_pages * PAGE_SIZE; rep->offset = bo->offset; rep->arg_handle = bo->map_list.user_token; - rep->mask = bo->mask; + rep->mask = bo->mem.mask; rep->buffer_start = bo->buffer_start; rep->fence_flags = bo->fence_type; rep->rep_flags = 0; - rep->page_alignment = bo->page_alignment; + rep->page_alignment = bo->mem.page_alignment; if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) { DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY, @@ -1105,14 +1103,14 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle, } if ((map_flags & DRM_BO_FLAG_READ) && - (bo->flags & DRM_BO_FLAG_READ_CACHED) && - (!(bo->flags & DRM_BO_FLAG_CACHED))) { + (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) && + (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) { drm_bo_read_cached(bo); } break; } else if ((map_flags & DRM_BO_FLAG_READ) && - (bo->flags & DRM_BO_FLAG_READ_CACHED) && - (!(bo->flags & DRM_BO_FLAG_CACHED))) { + (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) && + (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) { /* * We are already mapped with different flags. @@ -1228,18 +1226,23 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, return ret; - mem.num_pages = bo->num_pages; + mem.num_pages = bo->mem.num_pages; mem.size = mem.num_pages << PAGE_SHIFT; mem.mask = new_mem_flags; - mem.page_alignment = bo->page_alignment; + mem.page_alignment = bo->mem.page_alignment; mutex_lock(&bm->evict_mutex); mutex_lock(&dev->struct_mutex); list_del(&bo->lru); list_add_tail(&bo->lru,&bm->unfenced); - DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, _DRM_BO_FLAG_UNFENCED); + DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, + _DRM_BO_FLAG_UNFENCED); mutex_unlock(&dev->struct_mutex); + /* + * Determine where to move the buffer. + */ + ret = drm_bo_mem_space(dev, &mem, no_wait); mutex_unlock(&bm->evict_mutex); @@ -1272,7 +1275,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; - uint32_t flag_diff = (new_flags ^ bo->flags); + uint32_t flag_diff = (new_flags ^ bo->mem.flags); drm_bo_driver_t *driver = dev->driver->bo_driver; int ret; @@ -1282,7 +1285,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, return -EINVAL; } - DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", new_flags, bo->flags); + DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", new_flags, bo->mem.flags); ret = driver->fence_type(new_flags, &bo->fence_class, &bo->fence_type); if (ret) { DRM_ERROR("Driver did not support given buffer permissions\n"); @@ -1294,8 +1297,8 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, */ if ((flag_diff & DRM_BO_FLAG_BIND_CACHED) && - !(bo->flags & DRM_BO_FLAG_MEM_LOCAL)) { - if (bo->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { + !(bo->mem.flags & DRM_BO_FLAG_MEM_LOCAL)) { + if (bo->mem.flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { DRM_ERROR("Cannot change caching policy of " "pinned buffer.\n"); return -EINVAL; @@ -1307,8 +1310,8 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, return ret; } } - DRM_MASK_VAL(bo->flags, DRM_BO_FLAG_BIND_CACHED, new_flags); - flag_diff = (new_flags ^ bo->flags); + DRM_MASK_VAL(bo->mem.flags, DRM_BO_FLAG_BIND_CACHED, new_flags); + flag_diff = (new_flags ^ bo->mem.flags); /* * Check whether we dropped no_move policy, and in that case, @@ -1318,9 +1321,9 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, if ((flag_diff & DRM_BO_FLAG_NO_MOVE) && !(new_flags & DRM_BO_FLAG_NO_MOVE)) { mutex_lock(&dev->struct_mutex); - if (bo->mm_node) { - drm_mm_put_block(bo->mm_node); - bo->mm_node = NULL; + if (bo->mem.mm_node) { + drm_mm_put_block(bo->mem.mm_node); + bo->mem.mm_node = NULL; } mutex_unlock(&dev->struct_mutex); } @@ -1359,7 +1362,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, mutex_unlock(&dev->struct_mutex); } - bo->flags = new_flags; + bo->mem.flags = new_flags; return 0; } @@ -1384,9 +1387,9 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, if (ret) goto out; - ret = drm_bo_new_flags(dev, bo->flags, - (flags & mask) | (bo->mask & ~mask), hint, - 0, &new_flags, &bo->mask); + ret = drm_bo_new_flags(dev, bo->mem.flags, + (flags & mask) | (bo->mem.mask & ~mask), hint, + 0, &new_flags, &bo->mem.mask); if (ret) goto out; @@ -1469,7 +1472,7 @@ static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo) mutex_unlock(&dev->struct_mutex); if (ret) break; - bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT); + bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); if (!bo->ttm) ret = -ENOMEM; break; @@ -1511,12 +1514,12 @@ int drm_buffer_object_transfer(drm_buffer_object_t *bo, INIT_LIST_HEAD(&fbo->lru); list_splice_init(&bo->lru, &fbo->lru); - bo->mm_node = NULL; + bo->mem.mm_node = NULL; bo->ttm = NULL; bo->fence = NULL; - bo->flags = 0; + bo->mem.flags = 0; - fbo->mm_node->private = (void *)fbo; + fbo->mem.mm_node->private = (void *)fbo; atomic_set(&fbo->usage, 1); atomic_inc(&bm->count); mutex_unlock(&dev->struct_mutex); @@ -1572,9 +1575,9 @@ int drm_buffer_object_create(drm_file_t * priv, #endif bo->dev = dev; bo->type = type; - bo->num_pages = num_pages; - bo->mm_node = NULL; - bo->page_alignment = page_alignment; + bo->mem.num_pages = num_pages; + bo->mem.mm_node = NULL; + bo->mem.page_alignment = page_alignment; if (bo->type == drm_bo_type_fake) { bo->offset = buffer_start; bo->buffer_start = 0; @@ -1582,10 +1585,10 @@ int drm_buffer_object_create(drm_file_t * priv, bo->buffer_start = buffer_start; } bo->priv_flags = 0; - bo->flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; + bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; atomic_inc(&bm->count); - ret = drm_bo_new_flags(dev, bo->flags, mask, hint, - 1, &new_flags, &bo->mask); + ret = drm_bo_new_flags(dev, bo->mem.flags, mask, hint, + 1, &new_flags, &bo->mem.mask); if (ret) goto out_err; ret = drm_bo_add_ttm(priv, bo); @@ -1800,7 +1803,7 @@ static int drm_bo_force_list_clean(drm_device_t * dev, drm_bo_usage_deref_locked(entry); goto retry; } - if (entry->mm_node) { + if (entry->mem.mm_node) { clean = 0; /* @@ -1836,14 +1839,14 @@ static int drm_bo_force_list_clean(drm_device_t * dev, 0); if (force_no_move) { - DRM_MASK_VAL(entry->flags, DRM_BO_FLAG_NO_MOVE, + DRM_MASK_VAL(entry->mem.flags, DRM_BO_FLAG_NO_MOVE, 0); } - if (entry->flags & DRM_BO_FLAG_NO_EVICT) { + if (entry->mem.flags & DRM_BO_FLAG_NO_EVICT) { DRM_ERROR("A DRM_BO_NO_EVICT buffer present at " "cleanup. Removing flag and evicting.\n"); - entry->flags &= ~DRM_BO_FLAG_NO_EVICT; - entry->mask &= ~DRM_BO_FLAG_NO_EVICT; + entry->mem.flags &= ~DRM_BO_FLAG_NO_EVICT; + entry->mem.mask &= ~DRM_BO_FLAG_NO_EVICT; } ret = drm_bo_evict(entry, mem_type, 1, force_no_move); @@ -2181,7 +2184,7 @@ int drm_bo_pci_offset(const drm_buffer_object_t *bo, { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[bo->mem_type]; + drm_mem_type_manager_t *man = &bm->man[bo->mem.mem_type]; *bus_size = 0; if (bo->type != drm_bo_type_dc) @@ -2208,8 +2211,8 @@ int drm_bo_pci_offset(const drm_buffer_object_t *bo, *bus_base = man->io_offset; } - *bus_offset = bo->mm_node->start << PAGE_SHIFT; - *bus_size = bo->num_pages << PAGE_SHIFT; + *bus_offset = bo->mem.mm_node->start << PAGE_SHIFT; + *bus_size = bo->mem.num_pages << PAGE_SHIFT; return 0; } @@ -2232,7 +2235,7 @@ int drm_bo_ioremap(drm_buffer_object_t *bo) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[bo->mem_type]; + drm_mem_type_manager_t *man = &bm->man[bo->mem.mem_type]; unsigned long bus_offset; unsigned long bus_size; unsigned long bus_base; @@ -2271,7 +2274,7 @@ void drm_bo_iounmap(drm_buffer_object_t *bo) bm = &dev->bm; - man = &bm->man[bo->mem_type]; + man = &bm->man[bo->mem.mem_type]; if (bo->iomap && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) iounmap(bo->iomap); @@ -2291,7 +2294,7 @@ void drm_bo_unmap_virtual(drm_buffer_object_t *bo) { drm_device_t *dev = bo->dev; loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT; - loff_t holelen = ((loff_t) bo->num_pages) << PAGE_SHIFT; + loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT; unmap_mapping_range(dev->dev_mapping, offset, holelen, 1); } @@ -2335,12 +2338,12 @@ static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo) map->offset = 0; map->type = _DRM_TTM; map->flags = _DRM_REMOVABLE; - map->size = bo->num_pages * PAGE_SIZE; + map->size = bo->mem.num_pages * PAGE_SIZE; atomic_inc(&bo->usage); map->handle = (void *) bo; list->file_offset_node = drm_mm_search_free(&dev->offset_manager, - bo->num_pages, 0, 0); + bo->mem.num_pages, 0, 0); if (!list->file_offset_node) { drm_bo_takedown_vm_locked(bo); @@ -2348,7 +2351,7 @@ static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo) } list->file_offset_node = drm_mm_get_block(list->file_offset_node, - bo->num_pages, 0); + bo->mem.num_pages, 0); list->hash.key = list->file_offset_node->start; if (drm_ht_insert_item(&dev->map_hash, &list->hash)) { -- cgit v1.2.3 From 71b9e876f99db219fcbf4e3ab977b64b068cc2b4 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 6 Feb 2007 16:59:45 +0100 Subject: Simplify pci map vs no pci map choice. --- linux-core/drm_bo.c | 58 +++++++++++++++++++++++++++++------------------------ 1 file changed, 32 insertions(+), 26 deletions(-) (limited to 'linux-core/drm_bo.c') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 8a6b49dc..16c89f61 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -718,7 +718,7 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, } if (!(bo->mem.flags & DRM_BO_FLAG_BIND_CACHED)) - bo->mem.flags &= DRM_BO_FLAG_CACHED; + bo->mem.flags &= ~DRM_BO_FLAG_CACHED; bo->mem.flags &= ~DRM_BO_MASK_MEM; bo->mem.flags |= DRM_BO_FLAG_MEM_TT; bo->mem.mem_type = DRM_BO_MEM_TT; @@ -2163,6 +2163,26 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) * buffer object vm functions. */ +int drm_mem_reg_is_pci(drm_device_t *dev, drm_bo_mem_reg_t *mem) +{ + drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + + if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { + if (mem->mem_type == DRM_BO_MEM_LOCAL) + return 0; + + if (man->flags & _DRM_FLAG_MEMTYPE_CMA) + return 0; + + if ((mem->mask & DRM_BO_FLAG_BIND_CACHED) && + (man->flags & _DRM_FLAG_MEMTYPE_CACHED)) + return 0; + } + return 1; +} +EXPORT_SYMBOL(drm_mem_reg_is_pci); + /** * \c Get the PCI offset for the buffer object memory. * @@ -2174,48 +2194,32 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) * \return Failure indication. * * Returns -EINVAL if the buffer object is currently not mappable. - * Otherwise returns zero. Call bo->mutex locked. + * Otherwise returns zero. */ -int drm_bo_pci_offset(const drm_buffer_object_t *bo, +int drm_bo_pci_offset(drm_device_t *dev, + drm_bo_mem_reg_t *mem, unsigned long *bus_base, unsigned long *bus_offset, unsigned long *bus_size) { - drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[bo->mem.mem_type]; + drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; *bus_size = 0; - if (bo->type != drm_bo_type_dc) - return -EINVAL; - if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)) return -EINVAL; - - if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { - drm_ttm_t *ttm = bo->ttm; - if (!bo->ttm) { - return -EINVAL; - } - - drm_ttm_fixup_caching(ttm); - - if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED)) - return 0; - if (ttm->be->flags & DRM_BE_FLAG_CMA) - return 0; - *bus_base = ttm->be->aperture_base; - } else { + if (drm_mem_reg_is_pci(dev, mem)) { + *bus_offset = mem->mm_node->start << PAGE_SHIFT; + *bus_size = mem->num_pages << PAGE_SHIFT; *bus_base = man->io_offset; } - *bus_offset = bo->mem.mm_node->start << PAGE_SHIFT; - *bus_size = bo->mem.num_pages << PAGE_SHIFT; return 0; } + /** * \c Return a kernel virtual address to the buffer object PCI memory. * @@ -2231,7 +2235,8 @@ int drm_bo_pci_offset(const drm_buffer_object_t *bo, * Call bo->mutex locked. */ -int drm_bo_ioremap(drm_buffer_object_t *bo) +#if 0 +int drm_mem_reg_ioremap(drm_bo_mem_reg_t *mem) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; @@ -2281,6 +2286,7 @@ void drm_bo_iounmap(drm_buffer_object_t *bo) bo->iomap = NULL; } +#endif /** * \c Kill all user-space virtual mappings of this buffer object. -- cgit v1.2.3 From af24465b2eddfcc5296edc830ea5ed86065a4abd Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 7 Feb 2007 12:52:23 +0100 Subject: Fix a stray unlock_kernel() in drm_vm.c Add a file for memory move helpers, drm_bo_move.c Implement generic memory move. Cached, no_move and unmapped memory temporarily broken. --- linux-core/drm_bo.c | 353 ++++++++++++++++++++++++---------------------------- 1 file changed, 162 insertions(+), 191 deletions(-) (limited to 'linux-core/drm_bo.c') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 16c89f61..3f1e891d 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -78,22 +78,6 @@ static void drm_bo_add_to_lru(drm_buffer_object_t * bo, struct list_head *list; drm_mem_type_manager_t *man; - bo->mem.mem_type = 0; - - switch(bo->mem.flags & DRM_BO_MASK_MEM) { - case DRM_BO_FLAG_MEM_TT: - bo->mem.mem_type = DRM_BO_MEM_TT; - break; - case DRM_BO_FLAG_MEM_VRAM: - bo->mem.mem_type = DRM_BO_MEM_VRAM; - break; - case DRM_BO_FLAG_MEM_LOCAL: - bo->mem.mem_type = DRM_BO_MEM_LOCAL; - break; - default: - BUG_ON(1); - } - man = &bm->man[bo->mem.mem_type]; list = (bo->mem.flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? &man->pinned : &man->lru; @@ -101,53 +85,136 @@ static void drm_bo_add_to_lru(drm_buffer_object_t * bo, return; } +static int drm_bo_vm_pre_move(drm_buffer_object_t *bo, + int old_is_pci) +{ +#ifdef DRM_ODD_MM_COMPAT + int ret; + + ret = drm_bo_lock_kmm(bo); + if (ret) { + if (ret == -EAGAIN) + schedule(); + return ret; + } + drm_bo_unmap_virtual(bo); + if (old_is_pci) + drm_bo_finish_unmap(bo); +#else + drm_bo_unmap_virtual(bo); +#endif + return 0; +} + +static void drm_bo_vm_post_move(drm_buffer_object_t *bo) +{ +#ifdef DRM_ODD_MM_COMPAT + int ret; + + ret = drm_bo_remap_bound(bo); + if (ret) { + DRM_ERROR("Failed to remap a bound buffer object.\n" + "\tThis might cause a sigbus later.\n"); + } + drm_bo_unlock_kmm(bo); +#endif +} + /* - * bo locked. + * Call bo->mutex locked. */ -static int drm_move_tt_to_local(drm_buffer_object_t * bo, int evict, - int force_no_move) +static int drm_bo_add_ttm(drm_buffer_object_t * bo) { drm_device_t *dev = bo->dev; int ret = 0; - if (bo->mem.mm_node) { -#ifdef DRM_ODD_MM_COMPAT - mutex_lock(&dev->struct_mutex); - ret = drm_bo_lock_kmm(bo); - if (ret) { - mutex_unlock(&dev->struct_mutex); - if (ret == -EAGAIN) - schedule(); - return ret; - } - drm_bo_unmap_virtual(bo); - drm_bo_finish_unmap(bo); - drm_bo_unlock_kmm(bo); -#else - drm_bo_unmap_virtual(bo); - mutex_lock(&dev->struct_mutex); + bo->ttm = NULL; + + switch (bo->type) { + case drm_bo_type_dc: + bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); + if (!bo->ttm) + ret = -ENOMEM; + break; + case drm_bo_type_user: + case drm_bo_type_fake: + break; + default: + DRM_ERROR("Illegal buffer object type\n"); + ret = -EINVAL; + break; + } + + return ret; +} + + +static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, + drm_bo_mem_reg_t *mem, + int evict, + int no_wait) +{ + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm = &dev->bm; + int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem); + int new_is_pci = drm_mem_reg_is_pci(dev, mem); + drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type]; + drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type]; + int ret = 0; + + + if (old_is_pci || new_is_pci) + ret = drm_bo_vm_pre_move(bo, old_is_pci); + if (ret) + return ret; + + if ((!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) || + !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) && + (bo->ttm == NULL)) + ret = drm_bo_add_ttm(bo); + if (ret) + return ret; + + if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && + !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { + ret = drm_bo_move_ttm(dev, bo->ttm, evict, no_wait, + &bo->mem, mem); + } else if (dev->driver->bo_driver->move) { + ret = dev->driver->bo_driver->move(dev, bo->ttm, evict, + no_wait, &bo->mem, mem); + } else { + ret = -EINVAL; + DRM_ERROR("Unsupported function\n"); +#if 0 + ret = drm_bo_move_memcpy(dev, bo->ttm, evict, no_wait, + &bo->mem, mem); + ret = 0; #endif - if (evict) - drm_ttm_evict(bo->ttm); - else - drm_ttm_unbind(bo->ttm); + } - bo->mem.mem_type = DRM_BO_MEM_LOCAL; - if (!(bo->mem.flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) { - drm_mm_put_block(bo->mem.mm_node); - bo->mem.mm_node = NULL; - } - mutex_unlock(&dev->struct_mutex); + if (old_is_pci || new_is_pci) + drm_bo_vm_post_move(bo); + + if (ret) + return ret; + + if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) { + ret = dev->driver->bo_driver->invalidate_caches(dev, bo->mem.flags); + if (ret) + DRM_ERROR("Can not flush read caches\n"); } + + DRM_FLAG_MASKED(bo->priv_flags, + (evict) ? _DRM_BO_FLAG_EVICTED : 0, + _DRM_BO_FLAG_EVICTED); - bo->mem.flags &= ~DRM_BO_FLAG_MEM_TT; - bo->mem.flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; + if (bo->mem.mm_node) + bo->offset = bo->mem.mm_node->start << PAGE_SHIFT; return 0; } - /* * Call bo->mutex locked. * Wait until the buffer is idle. @@ -503,12 +570,11 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, /* * Someone might have modified the buffer before we took the buffer mutex. - */ +< */ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) goto out; - if (!(bo->mem.flags & drm_bo_type_flags(mem_type))) - goto out; + if (bo->mem.mem_type != mem_type) ret = drm_bo_wait(bo, 0, 0, no_wait); @@ -520,36 +586,36 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, evict_mem = bo->mem; evict_mem.mask = dev->driver->bo_driver->evict_flags(dev, mem_type); - ret = drm_bo_mem_space(dev, &evict_mem, no_wait); - if (ret && ret != -EAGAIN) { - DRM_ERROR("Failed to find memory space for " - "buffer eviction.\n"); + if (ret) { + if (ret != -EAGAIN) + DRM_ERROR("Failed to find memory space for " + "buffer eviction.\n"); goto out; } - if ((mem_type != DRM_BO_MEM_TT) && - (evict_mem.mem_type != DRM_BO_MEM_LOCAL)) { - ret = -EINVAL; - DRM_ERROR("Unsupported memory types for eviction.\n"); + ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait); + + if (ret) { + if (ret != -EAGAIN) + DRM_ERROR("Buffer eviction failed\n"); goto out; } - - ret = drm_move_tt_to_local(bo, 1, force_no_move); - if (ret) - goto out; + mutex_lock(&dev->struct_mutex); - list_del_init(&bo->lru); + if (evict_mem.mm_node) { + drm_mm_put_block(evict_mem.mm_node); + evict_mem.mm_node = NULL; + } + list_del(&bo->lru); drm_bo_add_to_lru(bo, bm); mutex_unlock(&dev->struct_mutex); - if (ret) - goto out; - DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED, _DRM_BO_FLAG_EVICTED); - out: + +out: return ret; } @@ -682,74 +748,6 @@ static int drm_bo_mem_space(drm_device_t *dev, return ret; } - - - -static int drm_move_local_to_tt(drm_buffer_object_t * bo, - drm_bo_mem_reg_t * mem, - int no_wait) -{ - drm_device_t *dev = bo->dev; - int ret = 0; - - bo->mem.mm_node = mem->mm_node; - - DRM_DEBUG("Flipping in to AGP 0x%08lx 0x%08lx\n", - bo->mem.mm_node->start, bo->mem.mm_node->size); - -#ifdef DRM_ODD_MM_COMPAT - mutex_lock(&dev->struct_mutex); - ret = drm_bo_lock_kmm(bo); - if (ret) { - mutex_unlock(&dev->struct_mutex); - goto out_put_unlock; - } -#endif - drm_bo_unmap_virtual(bo); - ret = drm_bind_ttm(bo->ttm, bo->mem.flags & DRM_BO_FLAG_BIND_CACHED, - bo->mem.mm_node->start); - - if (ret) { -#ifdef DRM_ODD_MM_COMPAT - drm_bo_unlock_kmm(bo); - mutex_unlock(&dev->struct_mutex); -#endif - goto out_put_unlock; - } - - if (!(bo->mem.flags & DRM_BO_FLAG_BIND_CACHED)) - bo->mem.flags &= ~DRM_BO_FLAG_CACHED; - bo->mem.flags &= ~DRM_BO_MASK_MEM; - bo->mem.flags |= DRM_BO_FLAG_MEM_TT; - bo->mem.mem_type = DRM_BO_MEM_TT; - bo->offset = bo->mem.mm_node->start << PAGE_SHIFT; - -#ifdef DRM_ODD_MM_COMPAT - ret = drm_bo_remap_bound(bo); - if (ret) { - /* FIXME */ - } - drm_bo_unlock_kmm(bo); - mutex_unlock(&dev->struct_mutex); -#endif - - if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) { - ret = dev->driver->bo_driver->invalidate_caches(dev, bo->mem.flags); - if (ret) - DRM_ERROR("Could not flush read caches\n"); - } - DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_EVICTED); - - return 0; - -out_put_unlock: - mutex_lock(&dev->struct_mutex); - drm_mm_put_block(bo->mem.mm_node); - bo->mem.mm_node = NULL; - mutex_unlock(&dev->struct_mutex); - return ret; -} - static int drm_bo_new_flags(drm_device_t * dev, uint32_t flags, uint32_t new_mask, uint32_t hint, int init, uint32_t * n_flags, uint32_t * n_mask) @@ -1249,19 +1247,21 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, if (ret) return ret; - if (mem.mem_type == DRM_BO_MEM_TT) { - ret = drm_move_local_to_tt(bo, &mem, no_wait); - if (ret) { - mutex_lock(&dev->struct_mutex); - list_del_init(&bo->lru); - drm_bo_add_to_lru(bo, bm); - mutex_unlock(&dev->struct_mutex); - DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - return ret; + ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait); + + if (ret) { + mutex_lock(&dev->struct_mutex); + if (mem.mm_node) { + drm_mm_put_block(mem.mm_node); + mem.mm_node = NULL; } - } else { - drm_move_tt_to_local(bo, 0, force_no_move); + DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + list_del_init(&bo->lru); + drm_bo_add_to_lru(bo, bm); + mutex_unlock(&dev->struct_mutex); + return ret; } + return 0; } @@ -1280,11 +1280,6 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, int ret; - if (new_flags & DRM_BO_FLAG_MEM_VRAM) { - DRM_ERROR("Vram support not implemented yet\n"); - return -EINVAL; - } - DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", new_flags, bo->mem.flags); ret = driver->fence_type(new_flags, &bo->fence_class, &bo->fence_type); if (ret) { @@ -1341,6 +1336,13 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, } } + + if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) { + ret = drm_bo_add_ttm(bo); + if (ret) + return ret; + } + if (move_unfenced) { /* @@ -1453,41 +1455,6 @@ static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle, return ret; } -/* - * Call bo->mutex locked. - */ - -static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo) -{ - drm_device_t *dev = bo->dev; - int ret = 0; - - bo->ttm = NULL; - bo->map_list.user_token = 0ULL; - - switch (bo->type) { - case drm_bo_type_dc: - mutex_lock(&dev->struct_mutex); - ret = drm_bo_setup_vm_locked(bo); - mutex_unlock(&dev->struct_mutex); - if (ret) - break; - bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); - if (!bo->ttm) - ret = -ENOMEM; - break; - case drm_bo_type_user: - case drm_bo_type_fake: - break; - default: - DRM_ERROR("Illegal buffer object type\n"); - ret = -EINVAL; - break; - } - - return ret; -} - /* * Transfer a buffer object's memory and LRU status to a newly * created object. User-space references remains with the old @@ -1591,10 +1558,14 @@ int drm_buffer_object_create(drm_file_t * priv, 1, &new_flags, &bo->mem.mask); if (ret) goto out_err; - ret = drm_bo_add_ttm(priv, bo); - if (ret) - goto out_err; - + + if (bo->type == drm_bo_type_dc) { + mutex_lock(&dev->struct_mutex); + ret = drm_bo_setup_vm_locked(bo); + mutex_unlock(&dev->struct_mutex); + if (ret) + goto out_err; + } ret = drm_buffer_object_validate(bo, new_flags, 0, hint & DRM_BO_HINT_DONT_BLOCK); if (ret) -- cgit v1.2.3 From c1fbd8a56653b91af57a408bbcf20a760a2bd8c8 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 7 Feb 2007 17:25:13 +0100 Subject: Checkpoint commit. Flag handling and memory type selection cleanup. glxgears won't start. --- linux-core/drm_bo.c | 282 +++++++++++++++++++--------------------------------- 1 file changed, 104 insertions(+), 178 deletions(-) (limited to 'linux-core/drm_bo.c') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 3f1e891d..64abb118 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -59,10 +59,6 @@ static void drm_bo_unmap_virtual(drm_buffer_object_t *bo); static int drm_bo_mem_space(drm_device_t *dev, drm_bo_mem_reg_t *mem, int no_wait); -#define DRM_FLAG_MASKED(_old, _new, _mask) {\ -(_old) ^= (((_old) ^ (_new)) & (_mask)); \ -} - static inline uint32_t drm_bo_type_flags(unsigned type) { return (1 << (24 + type)); @@ -570,7 +566,7 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, /* * Someone might have modified the buffer before we took the buffer mutex. -< */ + */ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) goto out; @@ -668,11 +664,34 @@ static int drm_bo_mem_force_space(drm_device_t *dev, mutex_unlock(&dev->struct_mutex); mem->mm_node = node; mem->mem_type = mem_type; - mem->flags = drm_bo_type_flags(mem_type); return 0; } +static int drm_bo_mt_compatible(drm_mem_type_manager_t *man, + uint32_t mem_type, + uint32_t mask, + uint32_t *res_mask) +{ + uint32_t cur_flags = drm_bo_type_flags(mem_type); + + if (man->flags & _DRM_FLAG_MEMTYPE_CACHED) + cur_flags |= DRM_BO_FLAG_CACHED; + if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE) + cur_flags |= DRM_BO_FLAG_MAPPABLE; + if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT) + DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED); + + if (!(mask & DRM_BO_FLAG_FORCE_CACHING)) + DRM_FLAG_MASKED(mask, cur_flags, DRM_BO_FLAG_CACHED); + if (!(mask & DRM_BO_FLAG_FORCE_MAPPABLE)) + DRM_FLAG_MASKED(mask, cur_flags, DRM_BO_FLAG_MAPPABLE); + + *res_mask = mask; + return ((cur_flags & mask & DRM_BO_MASK_MEMTYPE) == cur_flags); +} + + static int drm_bo_mem_space(drm_device_t *dev, drm_bo_mem_reg_t *mem, int no_wait) @@ -684,6 +703,7 @@ static int drm_bo_mem_space(drm_device_t *dev, const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; uint32_t i; uint32_t mem_type = DRM_BO_MEM_LOCAL; + uint32_t cur_flags; int type_found = 0; int type_ok = 0; int has_eagain = 0; @@ -692,14 +712,17 @@ static int drm_bo_mem_space(drm_device_t *dev, for (i=0; imask ; + man = &bm->man[mem_type]; + + type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask, + &cur_flags); + if (!type_ok) continue; if (mem_type == DRM_BO_MEM_LOCAL) break; - man = &bm->man[mem_type]; mutex_lock(&dev->struct_mutex); if (man->has_type && man->use_type) { type_found = 1; @@ -717,28 +740,30 @@ static int drm_bo_mem_space(drm_device_t *dev, if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) { mem->mm_node = node; mem->mem_type = mem_type; - mem->flags = drm_bo_type_flags(mem_type); + mem->flags = cur_flags; return 0; } - if (!type_found) { - DRM_ERROR("Requested memory types are not supported\n"); + if (!type_found) return -EINVAL; - } - + num_prios = dev->driver->bo_driver->num_mem_busy_prio; prios = dev->driver->bo_driver->mem_busy_prio; for (i=0; imask)) + man = &bm->man[mem_type]; + + if (!drm_bo_mt_compatible(man, mem_type, mem->mask, + &cur_flags)) continue; - man = &bm->man[mem_type]; ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait); - if (ret == 0) + if (ret == 0) { + mem->flags = cur_flags; return 0; + } if (ret == -EAGAIN) has_eagain = 1; @@ -748,23 +773,10 @@ static int drm_bo_mem_space(drm_device_t *dev, return ret; } -static int drm_bo_new_flags(drm_device_t * dev, - uint32_t flags, uint32_t new_mask, uint32_t hint, - int init, uint32_t * n_flags, uint32_t * n_mask) +static int drm_bo_new_mask(drm_buffer_object_t *bo, + uint32_t new_mask, uint32_t hint) { - uint32_t new_flags = 0; uint32_t new_props; - drm_buffer_manager_t *bm = &dev->bm; - unsigned i; - - /* - * First adjust the mask to take away nonexistant memory types. - */ - - for (i = 0; i < DRM_BO_MEM_TYPES; ++i) { - if (!bm->man[i].use_type) - new_mask &= ~drm_bo_type_flags(i); - } if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { DRM_ERROR @@ -772,67 +784,7 @@ static int drm_bo_new_flags(drm_device_t * dev, "processes\n"); return -EPERM; } - if (new_mask & DRM_BO_FLAG_BIND_CACHED) { - if (((new_mask & DRM_BO_FLAG_MEM_TT) && - !(bm->man[DRM_BO_MEM_TT].flags & - _DRM_FLAG_MEMTYPE_CACHED) && - ((new_mask & DRM_BO_FLAG_MEM_VRAM) - && !(bm->man[DRM_BO_MEM_VRAM].flags & - _DRM_FLAG_MEMTYPE_CACHED)))) { - new_mask &= ~DRM_BO_FLAG_BIND_CACHED; - } else { - if (!(bm->man[DRM_BO_MEM_TT].flags & - _DRM_FLAG_MEMTYPE_CACHED)) - new_flags &= DRM_BO_FLAG_MEM_TT; - if (!(bm->man[DRM_BO_MEM_VRAM].flags & - _DRM_FLAG_MEMTYPE_CACHED)) - new_flags &= DRM_BO_FLAG_MEM_VRAM; - } - } - - if ((new_mask & DRM_BO_FLAG_READ_CACHED) && - !(new_mask & DRM_BO_FLAG_BIND_CACHED)) { - if ((new_mask & DRM_BO_FLAG_NO_EVICT) && - !(new_mask & DRM_BO_FLAG_MEM_LOCAL)) { - DRM_ERROR - ("Cannot read cached from a pinned VRAM / TT buffer\n"); - return -EINVAL; - } - } - - /* - * Determine new memory location: - */ - - if (!(flags & new_mask & DRM_BO_MASK_MEM) || init) { - new_flags = new_mask & DRM_BO_MASK_MEM; - - if (!new_flags) { - DRM_ERROR("Invalid buffer object memory flags\n"); - return -EINVAL; - } - - if (new_flags & DRM_BO_FLAG_MEM_LOCAL) { - if ((hint & DRM_BO_HINT_AVOID_LOCAL) && - new_flags & (DRM_BO_FLAG_MEM_VRAM | - DRM_BO_FLAG_MEM_TT)) { - new_flags &= ~DRM_BO_FLAG_MEM_LOCAL; - } else { - new_flags = DRM_BO_FLAG_MEM_LOCAL; - } - } - if (new_flags & DRM_BO_FLAG_MEM_TT) { - if ((new_mask & DRM_BO_FLAG_PREFER_VRAM) && - new_flags & DRM_BO_FLAG_MEM_VRAM) { - new_flags = DRM_BO_FLAG_MEM_VRAM; - } else { - new_flags = DRM_BO_FLAG_MEM_TT; - } - } - } else { - new_flags = flags & DRM_BO_MASK_MEM; - } new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_READ); @@ -842,22 +794,11 @@ static int drm_bo_new_flags(drm_device_t * dev, return -EINVAL; } - new_flags |= new_mask & ~DRM_BO_MASK_MEM; - - if (((flags ^ new_flags) & DRM_BO_FLAG_BIND_CACHED) && - (new_flags & DRM_BO_FLAG_NO_EVICT) && - (flags & (DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM))) { - if (!(flags & DRM_BO_FLAG_CACHED)) { - DRM_ERROR - ("Cannot change caching policy of pinned buffer\n"); - return -EINVAL; - } else { - new_flags &= ~DRM_BO_FLAG_CACHED; - } - } + /* + * FIXME: Check what can be done about pinned buffers here. + */ - *n_flags = new_flags; - *n_mask = new_mask; + bo->mem.mask = new_mask; return 0; } @@ -1200,7 +1141,6 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, drm_buffer_manager_t *bm = &dev->bm; int ret = 0; drm_bo_mem_reg_t mem; - /* * Flush outstanding fences. */ @@ -1232,7 +1172,7 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, mutex_lock(&bm->evict_mutex); mutex_lock(&dev->struct_mutex); list_del(&bo->lru); - list_add_tail(&bo->lru,&bm->unfenced); + list_add_tail(&bo->lru, &bm->unfenced); DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, _DRM_BO_FLAG_UNFENCED); mutex_unlock(&dev->struct_mutex); @@ -1240,7 +1180,6 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, /* * Determine where to move the buffer. */ - ret = drm_bo_mem_space(dev, &mem, no_wait); mutex_unlock(&bm->evict_mutex); @@ -1250,85 +1189,76 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait); if (ret) { - mutex_lock(&dev->struct_mutex); + mutex_lock(&dev->struct_mutex); if (mem.mm_node) { drm_mm_put_block(mem.mm_node); mem.mm_node = NULL; } DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - list_del_init(&bo->lru); + list_del(&bo->lru); drm_bo_add_to_lru(bo, bm); - mutex_unlock(&dev->struct_mutex); - return ret; + mutex_unlock(&dev->struct_mutex); } - return 0; + return ret; } + +static int drm_bo_mem_compat(drm_bo_mem_reg_t *mem) +{ + uint32_t + flag_diff = (mem->mask ^ mem->flags); + + if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0) + return 0; + if ((flag_diff & DRM_BO_FLAG_CACHED) && + (mem->mask & DRM_BO_FLAG_FORCE_CACHING)) + return 0; + if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && + (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)) + return 0; + return 1; +} + /* * bo locked. */ static int drm_buffer_object_validate(drm_buffer_object_t * bo, - uint32_t new_flags, int move_unfenced, int no_wait) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; - uint32_t flag_diff = (new_flags ^ bo->mem.flags); + uint32_t flag_diff = (bo->mem.mask ^ bo->mem.flags); drm_bo_driver_t *driver = dev->driver->bo_driver; int ret; - DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", new_flags, bo->mem.flags); - ret = driver->fence_type(new_flags, &bo->fence_class, &bo->fence_type); + DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask, + bo->mem.flags); + ret = driver->fence_type(bo->mem.mask, &bo->fence_class, &bo->fence_type); if (ret) { DRM_ERROR("Driver did not support given buffer permissions\n"); return ret; } - /* - * Move out if we need to change caching policy. - */ - - if ((flag_diff & DRM_BO_FLAG_BIND_CACHED) && - !(bo->mem.flags & DRM_BO_FLAG_MEM_LOCAL)) { - if (bo->mem.flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { - DRM_ERROR("Cannot change caching policy of " - "pinned buffer.\n"); - return -EINVAL; - } - ret = drm_bo_move_buffer(bo, DRM_BO_FLAG_MEM_LOCAL, no_wait, 0); - if (ret) { - if (ret != -EAGAIN) - DRM_ERROR("Failed moving buffer.\n"); - return ret; - } - } - DRM_MASK_VAL(bo->mem.flags, DRM_BO_FLAG_BIND_CACHED, new_flags); - flag_diff = (new_flags ^ bo->mem.flags); - /* * Check whether we dropped no_move policy, and in that case, - * release reserved manager regions. + * release reserved manager regions, if we're evicted. */ if ((flag_diff & DRM_BO_FLAG_NO_MOVE) && - !(new_flags & DRM_BO_FLAG_NO_MOVE)) { - mutex_lock(&dev->struct_mutex); - if (bo->mem.mm_node) { - drm_mm_put_block(bo->mem.mm_node); - bo->mem.mm_node = NULL; - } - mutex_unlock(&dev->struct_mutex); + !(bo->mem.mask & DRM_BO_FLAG_NO_MOVE)) { + /* FIXME */ } /* * Check whether we need to move buffer. */ - if ((bo->type != drm_bo_type_fake) && (flag_diff & DRM_BO_MASK_MEM)) { - ret = drm_bo_move_buffer(bo, new_flags, no_wait, 1); + if (!drm_bo_mem_compat(&bo->mem)) { + ret = drm_bo_move_buffer(bo, bo->mem.mask & DRM_BO_MASK_MEMTYPE, + no_wait, 1); if (ret) { if (ret != -EAGAIN) DRM_ERROR("Failed moving buffer.\n"); @@ -1336,6 +1266,9 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, } } + /* + * We might need to add a TTM. + */ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) { ret = drm_bo_add_ttm(bo); @@ -1344,11 +1277,6 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, } if (move_unfenced) { - - /* - * Place on unfenced list. - */ - DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, _DRM_BO_FLAG_UNFENCED); mutex_lock(&dev->struct_mutex); @@ -1356,15 +1284,19 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, list_add_tail(&bo->lru, &bm->unfenced); mutex_unlock(&dev->struct_mutex); } else { - DRM_FLAG_MASKED(bo->priv_flags, 0, - _DRM_BO_FLAG_UNFENCED); mutex_lock(&dev->struct_mutex); - list_del_init(&bo->lru); + list_del(&bo->lru); + if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) { + DRM_FLAG_MASKED(bo->priv_flags, 0, + _DRM_BO_FLAG_UNFENCED); + DRM_WAKEUP(& bo->event_queue); + } drm_bo_add_to_lru(bo, bm); mutex_unlock(&dev->struct_mutex); - } - - bo->mem.flags = new_flags; + } + + DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE); + return 0; } @@ -1373,10 +1305,8 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, drm_bo_arg_reply_t * rep) { drm_buffer_object_t *bo; - drm_device_t *dev = priv->head->dev; int ret; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; - uint32_t new_flags; bo = drm_lookup_buffer_object(priv, handle, 1); if (!bo) { @@ -1389,16 +1319,13 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, if (ret) goto out; - ret = drm_bo_new_flags(dev, bo->mem.flags, - (flags & mask) | (bo->mem.mask & ~mask), hint, - 0, &new_flags, &bo->mem.mask); - + DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask); + ret = drm_bo_new_mask(bo, flags, hint); if (ret) goto out; ret = - drm_buffer_object_validate(bo, new_flags, - !(hint & DRM_BO_HINT_DONT_FENCE), + drm_buffer_object_validate(bo, !(hint & DRM_BO_HINT_DONT_FENCE), no_wait); drm_bo_fill_rep_arg(bo, rep); @@ -1495,6 +1422,8 @@ int drm_buffer_object_transfer(drm_buffer_object_t *bo, *new_obj = fbo; return 0; } + + int drm_buffer_object_create(drm_file_t * priv, @@ -1510,7 +1439,6 @@ int drm_buffer_object_create(drm_file_t * priv, drm_buffer_manager_t *bm = &dev->bm; drm_buffer_object_t *bo; int ret = 0; - uint32_t new_flags; unsigned long num_pages; if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) { @@ -1553,9 +1481,10 @@ int drm_buffer_object_create(drm_file_t * priv, } bo->priv_flags = 0; bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; + bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; atomic_inc(&bm->count); - ret = drm_bo_new_flags(dev, bo->mem.flags, mask, hint, - 1, &new_flags, &bo->mem.mask); + ret = drm_bo_new_mask(bo, mask, hint); + if (ret) goto out_err; @@ -1566,7 +1495,7 @@ int drm_buffer_object_create(drm_file_t * priv, if (ret) goto out_err; } - ret = drm_buffer_object_validate(bo, new_flags, 0, + ret = drm_buffer_object_validate(bo, 0, hint & DRM_BO_HINT_DONT_BLOCK); if (ret) goto out_err; @@ -1806,12 +1735,10 @@ static int drm_bo_force_list_clean(drm_device_t * dev, entry->fence = NULL; } - DRM_MASK_VAL(entry->priv_flags, _DRM_BO_FLAG_UNFENCED, - 0); + DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); if (force_no_move) { - DRM_MASK_VAL(entry->mem.flags, DRM_BO_FLAG_NO_MOVE, - 0); + DRM_FLAG_MASKED(entry->mem.flags, 0, DRM_BO_FLAG_NO_MOVE); } if (entry->mem.flags & DRM_BO_FLAG_NO_EVICT) { DRM_ERROR("A DRM_BO_NO_EVICT buffer present at " @@ -2146,8 +2073,7 @@ int drm_mem_reg_is_pci(drm_device_t *dev, drm_bo_mem_reg_t *mem) if (man->flags & _DRM_FLAG_MEMTYPE_CMA) return 0; - if ((mem->mask & DRM_BO_FLAG_BIND_CACHED) && - (man->flags & _DRM_FLAG_MEMTYPE_CACHED)) + if (mem->flags & DRM_BO_FLAG_CACHED) return 0; } return 1; -- cgit v1.2.3 From 09984ad77bdeca0e9d87b1fe2be1489205fda297 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 8 Feb 2007 11:55:24 +0100 Subject: Update memory compatibility tests. Now only pinned buffers are broken. --- linux-core/drm_bo.c | 89 ++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 78 insertions(+), 11 deletions(-) (limited to 'linux-core/drm_bo.c') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 64abb118..f4147be2 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -129,7 +129,7 @@ static int drm_bo_add_ttm(drm_buffer_object_t * bo) switch (bo->type) { case drm_bo_type_dc: - bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); + bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); if (!bo->ttm) ret = -ENOMEM; break; @@ -173,10 +173,10 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, return ret; if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && - !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { + !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { ret = drm_bo_move_ttm(dev, bo->ttm, evict, no_wait, &bo->mem, mem); - } else if (dev->driver->bo_driver->move) { + } else if (dev->driver->bo_driver->move) { ret = dev->driver->bo_driver->move(dev, bo->ttm, evict, no_wait, &bo->mem, mem); } else { @@ -345,6 +345,7 @@ static void drm_bo_destroy_locked(drm_buffer_object_t *bo) drm_destroy_ttm(bo->ttm); bo->ttm = NULL; } + atomic_dec(&bm->count); drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ); @@ -571,6 +572,7 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) goto out; if (bo->mem.mem_type != mem_type) + goto out; ret = drm_bo_wait(bo, 0, 0, no_wait); @@ -580,6 +582,9 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, goto out; } + if (bo->type != drm_bo_type_dc) + goto out1; + evict_mem = bo->mem; evict_mem.mask = dev->driver->bo_driver->evict_flags(dev, mem_type); ret = drm_bo_mem_space(dev, &evict_mem, no_wait); @@ -599,6 +604,7 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, goto out; } +out1: mutex_lock(&dev->struct_mutex); if (evict_mem.mm_node) { drm_mm_put_block(evict_mem.mm_node); @@ -674,6 +680,7 @@ static int drm_bo_mt_compatible(drm_mem_type_manager_t *man, uint32_t *res_mask) { uint32_t cur_flags = drm_bo_type_flags(mem_type); + uint32_t flag_diff; if (man->flags & _DRM_FLAG_MEMTYPE_CACHED) cur_flags |= DRM_BO_FLAG_CACHED; @@ -682,13 +689,21 @@ static int drm_bo_mt_compatible(drm_mem_type_manager_t *man, if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT) DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED); - if (!(mask & DRM_BO_FLAG_FORCE_CACHING)) - DRM_FLAG_MASKED(mask, cur_flags, DRM_BO_FLAG_CACHED); - if (!(mask & DRM_BO_FLAG_FORCE_MAPPABLE)) - DRM_FLAG_MASKED(mask, cur_flags, DRM_BO_FLAG_MAPPABLE); - - *res_mask = mask; - return ((cur_flags & mask & DRM_BO_MASK_MEMTYPE) == cur_flags); + if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0) { + return 0; + } + flag_diff = (mask ^ cur_flags); + if ((flag_diff & DRM_BO_FLAG_CACHED) && + (mask & DRM_BO_FLAG_FORCE_CACHING)) { + return 0; + } + if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && + (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) { + return 0; + } + + *res_mask = cur_flags; + return 1; } @@ -778,6 +793,16 @@ static int drm_bo_new_mask(drm_buffer_object_t *bo, { uint32_t new_props; + if (bo->type == drm_bo_type_user) { + DRM_ERROR("User buffers are not supported yet\n"); + return -EINVAL; + } + if (bo->type == drm_bo_type_fake && + !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) { + DRM_ERROR("Fake buffers must be pinned.\n"); + return -EINVAL; + } + if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { DRM_ERROR ("DRM_BO_FLAG_NO_EVICT is only available to priviliged " @@ -785,7 +810,6 @@ static int drm_bo_new_mask(drm_buffer_object_t *bo, return -EPERM; } - new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_READ); @@ -1220,6 +1244,43 @@ static int drm_bo_mem_compat(drm_bo_mem_reg_t *mem) return 1; } +static int drm_bo_check_fake(drm_device_t *dev, drm_bo_mem_reg_t *mem) +{ + drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man; + uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; + const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; + uint32_t i; + int type_ok = 0; + uint32_t mem_type = 0; + uint32_t cur_flags; + + if (drm_bo_mem_compat(mem)) + return 0; + + BUG_ON(mem->mm_node); + + for (i=0; iman[mem_type]; + type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask, + &cur_flags); + if (type_ok) + break; + } + + if (type_ok) { + mem->mm_node = NULL; + mem->mem_type = mem_type; + mem->flags = cur_flags; + DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE); + return 0; + } + + DRM_ERROR("Illegal fake buffer flags 0x%08x\n", mem->mask); + return -EINVAL; +} + /* * bo locked. */ @@ -1242,6 +1303,12 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, return ret; } + if (bo->type == drm_bo_type_fake) { + ret = drm_bo_check_fake(dev, &bo->mem); + if (ret) + return ret; + } + /* * Check whether we dropped no_move policy, and in that case, * release reserved manager regions, if we're evicted. -- cgit v1.2.3 From 1257907fa9a24de7aa95485e1b3ab509fdc4d4e6 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 8 Feb 2007 13:29:08 +0100 Subject: Simplify external ttm page allocation. Implement a memcpy fallback for copying between buffers. --- linux-core/drm_bo.c | 73 ----------------------------------------------------- 1 file changed, 73 deletions(-) (limited to 'linux-core/drm_bo.c') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index f4147be2..845db3fe 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -180,13 +180,8 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, ret = dev->driver->bo_driver->move(dev, bo->ttm, evict, no_wait, &bo->mem, mem); } else { - ret = -EINVAL; - DRM_ERROR("Unsupported function\n"); -#if 0 ret = drm_bo_move_memcpy(dev, bo->ttm, evict, no_wait, &bo->mem, mem); - ret = 0; -#endif } if (old_is_pci || new_is_pci) @@ -2184,74 +2179,6 @@ int drm_bo_pci_offset(drm_device_t *dev, } -/** - * \c Return a kernel virtual address to the buffer object PCI memory. - * - * \param bo The buffer object. - * \return Failure indication. - * - * Returns -EINVAL if the buffer object is currently not mappable. - * Returns -ENOMEM if the ioremap operation failed. - * Otherwise returns zero. - * - * After a successfull call, bo->iomap contains the virtual address, or NULL - * if the buffer object content is not accessible through PCI space. - * Call bo->mutex locked. - */ - -#if 0 -int drm_mem_reg_ioremap(drm_bo_mem_reg_t *mem) -{ - drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[bo->mem.mem_type]; - unsigned long bus_offset; - unsigned long bus_size; - unsigned long bus_base; - int ret; - - BUG_ON(bo->iomap); - - ret = drm_bo_pci_offset(bo, &bus_base, &bus_offset, &bus_size); - if (ret || bus_size == 0) - return ret; - - if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) - bo->iomap = (void *) (((u8 *)man->io_addr) + bus_offset); - else { - bo->iomap = ioremap_nocache(bus_base + bus_offset, bus_size); - if (bo->iomap) - return -ENOMEM; - } - - return 0; -} - -/** - * \c Unmap mapping obtained using drm_bo_ioremap - * - * \param bo The buffer object. - * - * Call bo->mutex locked. - */ - -void drm_bo_iounmap(drm_buffer_object_t *bo) -{ - drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm; - drm_mem_type_manager_t *man; - - - bm = &dev->bm; - man = &bm->man[bo->mem.mem_type]; - - if (bo->iomap && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) - iounmap(bo->iomap); - - bo->iomap = NULL; -} -#endif - /** * \c Kill all user-space virtual mappings of this buffer object. * -- cgit v1.2.3 From e4b2da440699f581a8779ea8cb9e99e4c903e6a7 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 8 Feb 2007 16:21:38 +0100 Subject: A minor function interface change and some memcpy bugfixing. Hooray!! it sort of works with a fixed AGP area as faked VRAM. --- linux-core/drm_bo.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'linux-core/drm_bo.c') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 845db3fe..728afb41 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -124,7 +124,6 @@ static int drm_bo_add_ttm(drm_buffer_object_t * bo) { drm_device_t *dev = bo->dev; int ret = 0; - bo->ttm = NULL; switch (bo->type) { @@ -174,14 +173,11 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { - ret = drm_bo_move_ttm(dev, bo->ttm, evict, no_wait, - &bo->mem, mem); + ret = drm_bo_move_ttm(bo, evict, no_wait, mem); } else if (dev->driver->bo_driver->move) { - ret = dev->driver->bo_driver->move(dev, bo->ttm, evict, - no_wait, &bo->mem, mem); + ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem); } else { - ret = drm_bo_move_memcpy(dev, bo->ttm, evict, no_wait, - &bo->mem, mem); + ret = drm_bo_move_memcpy(bo, evict, no_wait, mem); } if (old_is_pci || new_is_pci) -- cgit v1.2.3 From a0ed808d05a7965366e329a6e8f4e538350b9c23 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 8 Feb 2007 19:06:39 +0100 Subject: Don't create a ttm just to copy from. --- linux-core/drm_bo.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'linux-core/drm_bo.c') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 728afb41..e3ecaf45 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -164,8 +164,7 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, if (ret) return ret; - if ((!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) || - !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) && + if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) ret = drm_bo_add_ttm(bo); if (ret) -- cgit v1.2.3 From b2bcbf874b0f26ca0c490fb0453bef64ce6d9dd7 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 8 Feb 2007 21:28:33 +0100 Subject: Add an accelerated buffer copy cleanup helper. Export helper functions and make some important buffer-object functions non-static. Add an i915 accelerated blit buffer move for pci memory buffers. --- linux-core/drm_bo.c | 62 ++++++++--------------------------------------------- 1 file changed, 9 insertions(+), 53 deletions(-) (limited to 'linux-core/drm_bo.c') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index e3ecaf45..67e7d37f 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -56,8 +56,6 @@ static void drm_bo_destroy_locked(drm_buffer_object_t *bo); static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo); static void drm_bo_takedown_vm_locked(drm_buffer_object_t *bo); static void drm_bo_unmap_virtual(drm_buffer_object_t *bo); -static int drm_bo_mem_space(drm_device_t *dev, drm_bo_mem_reg_t *mem, - int no_wait); static inline uint32_t drm_bo_type_flags(unsigned type) { @@ -68,8 +66,8 @@ static inline uint32_t drm_bo_type_flags(unsigned type) * bo locked. dev->struct_mutex locked. */ -static void drm_bo_add_to_lru(drm_buffer_object_t * bo, - drm_buffer_manager_t * bm) +void drm_bo_add_to_lru(drm_buffer_object_t * bo, + drm_buffer_manager_t * bm) { struct list_head *list; drm_mem_type_manager_t *man; @@ -206,8 +204,8 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, * Wait until the buffer is idle. */ -static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, - int no_wait) +int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, + int no_wait) { drm_fence_object_t *fence = bo->fence; @@ -697,9 +695,9 @@ static int drm_bo_mt_compatible(drm_mem_type_manager_t *man, } -static int drm_bo_mem_space(drm_device_t *dev, - drm_bo_mem_reg_t *mem, - int no_wait) +int drm_bo_mem_space(drm_device_t *dev, + drm_bo_mem_reg_t *mem, + int no_wait) { drm_buffer_manager_t *bm= &dev->bm; drm_mem_type_manager_t *man; @@ -777,6 +775,8 @@ static int drm_bo_mem_space(drm_device_t *dev, ret = (has_eagain) ? -EAGAIN : -ENOMEM; return ret; } +EXPORT_SYMBOL(drm_bo_mem_space); + static int drm_bo_new_mask(drm_buffer_object_t *bo, uint32_t new_mask, uint32_t hint) @@ -1439,50 +1439,6 @@ static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle, return ret; } -/* - * Transfer a buffer object's memory and LRU status to a newly - * created object. User-space references remains with the old - * object. Call bo->mutex locked. - */ - -int drm_buffer_object_transfer(drm_buffer_object_t *bo, - drm_buffer_object_t **new_obj) -{ - drm_buffer_object_t *fbo; - drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; - - fbo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ); - if (!fbo) - return -ENOMEM; - - *fbo = *bo; - mutex_init(&fbo->mutex); - mutex_lock(&fbo->mutex); - mutex_lock(&dev->struct_mutex); - - INIT_LIST_HEAD(&fbo->ddestroy); - INIT_LIST_HEAD(&fbo->lru); - list_splice_init(&bo->lru, &fbo->lru); - - bo->mem.mm_node = NULL; - bo->ttm = NULL; - bo->fence = NULL; - bo->mem.flags = 0; - - fbo->mem.mm_node->private = (void *)fbo; - atomic_set(&fbo->usage, 1); - atomic_inc(&bm->count); - mutex_unlock(&dev->struct_mutex); - mutex_unlock(&fbo->mutex); - - *new_obj = fbo; - return 0; -} - - - - int drm_buffer_object_create(drm_file_t * priv, unsigned long size, drm_bo_type_t type, -- cgit v1.2.3 From 6a49d9a8abd9f168211017c2d585d0d64e89c530 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 9 Feb 2007 00:02:02 +0100 Subject: Fix evict_mutex locking range. Implement unmappable buffers. (fault moves them to mappable when needed). Various bugfixes. --- linux-core/drm_bo.c | 31 +++++++++++++------------------ 1 file changed, 13 insertions(+), 18 deletions(-) (limited to 'linux-core/drm_bo.c') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 67e7d37f..3e0d05d2 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1149,7 +1149,7 @@ static void drm_buffer_user_object_unmap(drm_file_t * priv, */ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, - int no_wait, int force_no_move) + int no_wait, int force_no_move, int move_unfenced) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; @@ -1161,14 +1161,6 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, drm_bo_busy(bo); - /* - * Make sure we're not mapped. - */ - - ret = drm_bo_wait_unmapped(bo, no_wait); - if (ret) - return ret; - /* * Wait for outstanding fences. */ @@ -1195,15 +1187,15 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, * Determine where to move the buffer. */ ret = drm_bo_mem_space(dev, &mem, no_wait); - mutex_unlock(&bm->evict_mutex); if (ret) - return ret; + goto out_unlock; ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait); - if (ret) { - mutex_lock(&dev->struct_mutex); + out_unlock: + if (ret || !move_unfenced) { + mutex_lock(&dev->struct_mutex); if (mem.mm_node) { drm_mm_put_block(mem.mm_node); mem.mm_node = NULL; @@ -1214,6 +1206,7 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, mutex_unlock(&dev->struct_mutex); } + mutex_unlock(&bm->evict_mutex); return ret; } @@ -1293,6 +1286,10 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, return ret; } + ret = drm_bo_wait_unmapped(bo, no_wait); + if (ret) + return ret; + if (bo->type == drm_bo_type_fake) { ret = drm_bo_check_fake(dev, &bo->mem); if (ret) @@ -1315,7 +1312,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, if (!drm_bo_mem_compat(&bo->mem)) { ret = drm_bo_move_buffer(bo, bo->mem.mask & DRM_BO_MASK_MEMTYPE, - no_wait, 1); + no_wait, 1, move_unfenced); if (ret) { if (ret != -EAGAIN) DRM_ERROR("Failed moving buffer.\n"); @@ -1728,11 +1725,9 @@ static int drm_bo_force_list_clean(drm_device_t * dev, unsigned long _end = jiffies + 3 * DRM_HZ; do { ret = drm_bo_wait(entry, 0, 1, 0); - if (ret && allow_errors) { - if (ret == -EINTR) - ret = -EAGAIN; + if (ret && allow_errors) goto out_err; - } + } while (ret && !time_after_eq(jiffies, _end)); if (entry->fence) { -- cgit v1.2.3 From 99acdaee482fc8a2fc6718317e2f546401e93739 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 9 Feb 2007 00:07:29 +0100 Subject: Fix copyright statements. --- linux-core/drm_bo.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'linux-core/drm_bo.c') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 3e0d05d2..c8e1e2b6 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -10,6 +10,10 @@ * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, @@ -18,11 +22,6 @@ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * * **************************************************************************/ /* -- cgit v1.2.3 From d32b21e016c371b8676f42da5fc3aeded039a6c8 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 9 Feb 2007 00:11:53 +0100 Subject: Remove some code that should have gone in commit 6a49d9a8abd9f168211017c2d585d0d64e89c530 --- linux-core/drm_bo.c | 19 ------------------- 1 file changed, 19 deletions(-) (limited to 'linux-core/drm_bo.c') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index c8e1e2b6..3339d5a8 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1329,25 +1329,6 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, return ret; } - if (move_unfenced) { - DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, - _DRM_BO_FLAG_UNFENCED); - mutex_lock(&dev->struct_mutex); - list_del(&bo->lru); - list_add_tail(&bo->lru, &bm->unfenced); - mutex_unlock(&dev->struct_mutex); - } else { - mutex_lock(&dev->struct_mutex); - list_del(&bo->lru); - if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) { - DRM_FLAG_MASKED(bo->priv_flags, 0, - _DRM_BO_FLAG_UNFENCED); - DRM_WAKEUP(& bo->event_queue); - } - drm_bo_add_to_lru(bo, bm); - mutex_unlock(&dev->struct_mutex); - } - DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE); return 0; -- cgit v1.2.3 From 57df3980724d3da446c4576b3fadcd89c5da414e Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 9 Feb 2007 12:43:18 +0100 Subject: Reinstate some LRU handling. --- linux-core/drm_bo.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'linux-core/drm_bo.c') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 3339d5a8..8d2b544e 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1145,6 +1145,7 @@ static void drm_buffer_user_object_unmap(drm_file_t * priv, /* * bo->mutex locked. + * Note that new_mem_flags are NOT transferred to the bo->mem.mask. */ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, @@ -1200,6 +1201,7 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, mem.mm_node = NULL; } DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + DRM_WAKEUP(&bo->event_queue); list_del(&bo->lru); drm_bo_add_to_lru(bo, bm); mutex_unlock(&dev->struct_mutex); @@ -1329,6 +1331,25 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, return ret; } + /* + * Adjust lru to be sure. + */ + + mutex_lock(&dev->struct_mutex); + list_del(&bo->lru); + if (move_unfenced) { + list_add_tail(&bo->lru, &bm->unfenced); + DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, + _DRM_BO_FLAG_UNFENCED); + } else { + drm_bo_add_to_lru(bo, bm); + if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) { + DRM_WAKEUP(&bo->event_queue); + DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + } + } + mutex_unlock(&dev->struct_mutex); + DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE); return 0; -- cgit v1.2.3 From 85ee2a8d044cd4d8de4894a794151af9471648e3 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sat, 10 Feb 2007 12:06:36 +0100 Subject: Various bugfixes. --- linux-core/drm_bo.c | 79 +++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 62 insertions(+), 17 deletions(-) (limited to 'linux-core/drm_bo.c') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 8d2b544e..41f4e002 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -85,11 +85,8 @@ static int drm_bo_vm_pre_move(drm_buffer_object_t *bo, int ret; ret = drm_bo_lock_kmm(bo); - if (ret) { - if (ret == -EAGAIN) - schedule(); + if (ret) return ret; - } drm_bo_unmap_virtual(bo); if (old_is_pci) drm_bo_finish_unmap(bo); @@ -142,6 +139,8 @@ static int drm_bo_add_ttm(drm_buffer_object_t * bo) } + + static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, drm_bo_mem_reg_t *mem, int evict, @@ -155,33 +154,63 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type]; int ret = 0; - + if (old_is_pci || new_is_pci) ret = drm_bo_vm_pre_move(bo, old_is_pci); if (ret) return ret; + /* + * Create and bind a ttm if required. + */ + if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && - (bo->ttm == NULL)) + (bo->ttm == NULL)) { ret = drm_bo_add_ttm(bo); - if (ret) - return ret; + if (ret) + goto out_err; - if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && + if (mem->mem_type != DRM_BO_MEM_LOCAL) { + ret = drm_bind_ttm(bo->ttm, new_man->flags & + DRM_BO_FLAG_CACHED, + mem->mm_node->start); + if (ret) + goto out_err; + } + } + + if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) { + + drm_bo_mem_reg_t *old_mem = &bo->mem; + uint32_t save_flags = old_mem->flags; + uint32_t save_mask = old_mem->mask; + + *old_mem = *mem; + mem->mm_node = NULL; + old_mem->mask = save_mask; + DRM_FLAG_MASKED(save_flags, mem->flags, + DRM_BO_MASK_MEMTYPE); + + } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { + ret = drm_bo_move_ttm(bo, evict, no_wait, mem); + } else if (dev->driver->bo_driver->move) { ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem); + } else { + ret = drm_bo_move_memcpy(bo, evict, no_wait, mem); + } + if (ret) + goto out_err; + if (old_is_pci || new_is_pci) drm_bo_vm_post_move(bo); - if (ret) - return ret; - if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) { ret = dev->driver->bo_driver->invalidate_caches(dev, bo->mem.flags); if (ret) @@ -196,6 +225,19 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, bo->offset = bo->mem.mm_node->start << PAGE_SHIFT; return 0; + +out_err: + if (old_is_pci || new_is_pci) + drm_bo_vm_post_move(bo); + + new_man = &bm->man[bo->mem.mem_type]; + if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) { + drm_ttm_unbind(bo->ttm); + drm_destroy_ttm(bo->ttm); + bo->ttm = NULL; + } + + return ret; } /* @@ -269,7 +311,7 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all) bm->nice_mode = 0; DRM_ERROR("Detected GPU lockup or " "fence driver was taken down. " - "Evicting waiting buffers.\n"); + "Evicting buffer.\n"); } if (bo->fence) { drm_fence_usage_deref_unlocked(dev, bo->fence); @@ -1148,8 +1190,8 @@ static void drm_buffer_user_object_unmap(drm_file_t * priv, * Note that new_mem_flags are NOT transferred to the bo->mem.mask. */ -static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, - int no_wait, int force_no_move, int move_unfenced) +int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, + int no_wait, int force_no_move, int move_unfenced) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; @@ -1387,6 +1429,7 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, out: mutex_unlock(&bo->mutex); + drm_bo_usage_deref_unlocked(bo); return ret; } @@ -1481,6 +1524,7 @@ int drm_buffer_object_create(drm_file_t * priv, #endif bo->dev = dev; bo->type = type; + bo->mem.mem_type = DRM_BO_MEM_LOCAL; bo->mem.num_pages = num_pages; bo->mem.mm_node = NULL; bo->mem.page_alignment = page_alignment; @@ -1491,8 +1535,8 @@ int drm_buffer_object_create(drm_file_t * priv, bo->buffer_start = buffer_start; } bo->priv_flags = 0; - bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; - bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; + bo->mem.flags = 0; + bo->mem.mask = 0; atomic_inc(&bm->count); ret = drm_bo_new_mask(bo, mask, hint); @@ -1517,6 +1561,7 @@ int drm_buffer_object_create(drm_file_t * priv, out_err: mutex_unlock(&bo->mutex); + drm_bo_usage_deref_unlocked(bo); return ret; } -- cgit v1.2.3 From f02f83ee08a2bb87700544a9b67f475532e84af4 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 12 Feb 2007 17:47:57 +0100 Subject: Cleanup and fix support for pinned buffers. --- linux-core/drm_bo.c | 369 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 212 insertions(+), 157 deletions(-) (limited to 'linux-core/drm_bo.c') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 41f4e002..814175cd 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -65,19 +65,26 @@ static inline uint32_t drm_bo_type_flags(unsigned type) * bo locked. dev->struct_mutex locked. */ -void drm_bo_add_to_lru(drm_buffer_object_t * bo, - drm_buffer_manager_t * bm) +void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo) { - struct list_head *list; drm_mem_type_manager_t *man; - man = &bm->man[bo->mem.mem_type]; - list = (bo->mem.flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? - &man->pinned : &man->lru; - list_add_tail(&bo->lru, list); - return; + man = &bo->dev->bm.man[bo->pinned_mem_type]; + list_add_tail(&bo->pinned_lru, &man->pinned); } +void drm_bo_add_to_lru(drm_buffer_object_t * bo) +{ + drm_mem_type_manager_t *man; + + if (bo->mem.mm_node != bo->pinned_node) { + man = &bo->dev->bm.man[bo->mem.mem_type]; + list_add_tail(&bo->lru, &man->lru); + } else + INIT_LIST_HEAD(&bo->lru); +} + + static int drm_bo_vm_pre_move(drm_buffer_object_t *bo, int old_is_pci) { @@ -275,6 +282,39 @@ int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, return 0; } +static int drm_bo_expire_fence(drm_buffer_object_t *bo, + int allow_errors) +{ + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm = &dev->bm; + + if (bo->fence) { + if (bm->nice_mode) { + unsigned long _end = jiffies + 3 * DRM_HZ; + int ret; + do { + ret = drm_bo_wait(bo, 0, 1, 0); + if (ret && allow_errors) + return ret; + + } while (ret && !time_after_eq(jiffies, _end)); + + if (bo->fence) { + bm->nice_mode = 0; + DRM_ERROR("Detected GPU lockup or " + "fence driver was taken down. " + "Evicting buffer.\n"); + } + } + if (bo->fence) { + drm_fence_usage_deref_unlocked(dev, bo->fence); + bo->fence = NULL; + } + } + return 0; +} + + /* * Call dev->struct_mutex locked. * Attempts to remove all private references to a buffer by expiring its @@ -299,26 +339,9 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all) bo->fence = NULL; } - if (bo->fence && remove_all) { - if (bm->nice_mode) { - unsigned long _end = jiffies + 3 * DRM_HZ; - int ret; - do { - ret = drm_bo_wait(bo, 0, 1, 0); - } while (ret && !time_after_eq(jiffies, _end)); + if (bo->fence && remove_all) + (void) drm_bo_expire_fence(bo, 0); - if (bo->fence) { - bm->nice_mode = 0; - DRM_ERROR("Detected GPU lockup or " - "fence driver was taken down. " - "Evicting buffer.\n"); - } - if (bo->fence) { - drm_fence_usage_deref_unlocked(dev, bo->fence); - bo->fence = NULL; - } - } - } mutex_lock(&dev->struct_mutex); if (!atomic_dec_and_test(&bo->usage)) { @@ -331,6 +354,11 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all) drm_mm_put_block(bo->mem.mm_node); bo->mem.mm_node = NULL; } + list_del_init(&bo->pinned_lru); + if (bo->pinned_node) { + drm_mm_put_block(bo->pinned_node); + bo->pinned_node = NULL; + } list_del_init(&bo->ddestroy); mutex_unlock(&bo->mutex); drm_bo_destroy_locked(bo); @@ -361,7 +389,10 @@ static void drm_bo_destroy_locked(drm_buffer_object_t *bo) drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; - if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && atomic_read(&bo->usage) == 0) { + if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && + list_empty(&bo->pinned_lru) && bo->pinned_node == NULL && + list_empty(&bo->ddestroy) && + atomic_read(&bo->usage) == 0) { BUG_ON(bo->fence != NULL); #ifdef DRM_ODD_MM_COMPAT @@ -404,9 +435,10 @@ static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all) drm_buffer_object_t *entry, *nentry; struct list_head *list, *next; + list_for_each_safe(list, next, &bm->ddestroy) { entry = list_entry(list, drm_buffer_object_t, ddestroy); - + nentry = NULL; if (next != &bm->ddestroy) { nentry = list_entry(next, drm_buffer_object_t, @@ -420,7 +452,6 @@ static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all) atomic_dec(&nentry->usage); } } - } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) @@ -566,7 +597,7 @@ int drm_fence_buffer_objects(drm_file_t * priv, DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); DRM_WAKEUP(&entry->event_queue); - drm_bo_add_to_lru(entry, bm); + drm_bo_add_to_lru(entry); } mutex_unlock(&entry->mutex); drm_bo_usage_deref_locked(entry); @@ -587,11 +618,10 @@ EXPORT_SYMBOL(drm_fence_buffer_objects); */ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, - int no_wait, int force_no_move) + int no_wait) { int ret = 0; drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; drm_bo_mem_reg_t evict_mem; /* @@ -611,12 +641,21 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, goto out; } - if (bo->type != drm_bo_type_dc) + + evict_mem = bo->mem; + evict_mem.mm_node = NULL; + + if (bo->type == drm_bo_type_fake) { + bo->mem.mem_type = DRM_BO_MEM_LOCAL; + bo->mem.mm_node = NULL; + bo->pinned_mem_type = DRM_BO_MEM_LOCAL; + bo->pinned_node = NULL; goto out1; + } evict_mem = bo->mem; evict_mem.mask = dev->driver->bo_driver->evict_flags(dev, mem_type); - ret = drm_bo_mem_space(dev, &evict_mem, no_wait); + ret = drm_bo_mem_space(bo, &evict_mem, no_wait); if (ret) { if (ret != -EAGAIN) @@ -625,6 +664,9 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, goto out; } + if (bo->pinned_node) + DRM_ERROR("Evicting pinned buffer\n"); + ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait); if (ret) { @@ -640,7 +682,7 @@ out1: evict_mem.mm_node = NULL; } list_del(&bo->lru); - drm_bo_add_to_lru(bo, bm); + drm_bo_add_to_lru(bo); mutex_unlock(&dev->struct_mutex); DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED, @@ -682,7 +724,7 @@ static int drm_bo_mem_force_space(drm_device_t *dev, mutex_lock(&entry->mutex); BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); - ret = drm_bo_evict(entry, mem_type, no_wait, 0); + ret = drm_bo_evict(entry, mem_type, no_wait); mutex_unlock(&entry->mutex); drm_bo_usage_deref_unlocked(entry); if (ret) @@ -736,10 +778,11 @@ static int drm_bo_mt_compatible(drm_mem_type_manager_t *man, } -int drm_bo_mem_space(drm_device_t *dev, +int drm_bo_mem_space(drm_buffer_object_t *bo, drm_bo_mem_reg_t *mem, int no_wait) { + drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm= &dev->bm; drm_mem_type_manager_t *man; @@ -767,6 +810,13 @@ int drm_bo_mem_space(drm_device_t *dev, if (mem_type == DRM_BO_MEM_LOCAL) break; + if ((mem_type == bo->pinned_mem_type) && + (bo->pinned_node != NULL)) { + DRM_ERROR("Choosing pinned region\n"); + node = bo->pinned_node; + break; + } + mutex_lock(&dev->struct_mutex); if (man->has_type && man->use_type) { type_found = 1; @@ -849,10 +899,6 @@ static int drm_bo_new_mask(drm_buffer_object_t *bo, return -EINVAL; } - /* - * FIXME: Check what can be done about pinned buffers here. - */ - bo->mem.mask = new_mask; return 0; } @@ -941,7 +987,7 @@ static int drm_bo_read_cached(drm_buffer_object_t * bo) BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); if (bo->mem.mm_node) - ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1, 0); + ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1); return ret; } @@ -1191,7 +1237,7 @@ static void drm_buffer_user_object_unmap(drm_file_t * priv, */ int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, - int no_wait, int force_no_move, int move_unfenced) + int no_wait, int move_unfenced) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; @@ -1228,7 +1274,7 @@ int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, /* * Determine where to move the buffer. */ - ret = drm_bo_mem_space(dev, &mem, no_wait); + ret = drm_bo_mem_space(bo, &mem, no_wait); if (ret) goto out_unlock; @@ -1245,7 +1291,7 @@ int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); DRM_WAKEUP(&bo->event_queue); list_del(&bo->lru); - drm_bo_add_to_lru(bo, bm); + drm_bo_add_to_lru(bo); mutex_unlock(&dev->struct_mutex); } @@ -1316,9 +1362,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; - uint32_t flag_diff = (bo->mem.mask ^ bo->mem.flags); drm_bo_driver_t *driver = dev->driver->bo_driver; - int ret; DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask, @@ -1339,23 +1383,13 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, return ret; } - /* - * Check whether we dropped no_move policy, and in that case, - * release reserved manager regions, if we're evicted. - */ - - if ((flag_diff & DRM_BO_FLAG_NO_MOVE) && - !(bo->mem.mask & DRM_BO_FLAG_NO_MOVE)) { - /* FIXME */ - } - /* * Check whether we need to move buffer. */ if (!drm_bo_mem_compat(&bo->mem)) { ret = drm_bo_move_buffer(bo, bo->mem.mask & DRM_BO_MASK_MEMTYPE, - no_wait, 1, move_unfenced); + no_wait, move_unfenced); if (ret) { if (ret != -EAGAIN) DRM_ERROR("Failed moving buffer.\n"); @@ -1363,6 +1397,34 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, } } + /* + * Pinned buffers. + */ + + if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { + + bo->pinned_mem_type = bo->mem.mem_type; + mutex_lock(&dev->struct_mutex); + list_del_init(&bo->pinned_lru); + drm_bo_add_to_pinned_lru(bo); + + if (bo->pinned_node != bo->mem.mm_node) { + drm_mm_put_block(bo->pinned_node); + bo->pinned_node = bo->mem.mm_node; + } + + mutex_unlock(&dev->struct_mutex); + + } else if (bo->pinned_node != NULL) { + + mutex_lock(&dev->struct_mutex); + drm_mm_put_block(bo->pinned_node); + list_del_init(&bo->pinned_lru); + bo->pinned_node = NULL; + mutex_unlock(&dev->struct_mutex); + + } + /* * We might need to add a TTM. */ @@ -1372,9 +1434,10 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, if (ret) return ret; } + DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE); /* - * Adjust lru to be sure. + * Finally, adjust lru to be sure. */ mutex_lock(&dev->struct_mutex); @@ -1384,7 +1447,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, _DRM_BO_FLAG_UNFENCED); } else { - drm_bo_add_to_lru(bo, bm); + drm_bo_add_to_lru(bo); if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) { DRM_WAKEUP(&bo->event_queue); DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); @@ -1392,7 +1455,6 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, } mutex_unlock(&dev->struct_mutex); - DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE); return 0; } @@ -1517,6 +1579,7 @@ int drm_buffer_object_create(drm_file_t * priv, atomic_set(&bo->mapped, -1); DRM_INIT_WAITQUEUE(&bo->event_queue); INIT_LIST_HEAD(&bo->lru); + INIT_LIST_HEAD(&bo->pinned_lru); INIT_LIST_HEAD(&bo->ddestroy); #ifdef DRM_ODD_MM_COMPAT INIT_LIST_HEAD(&bo->p_mm_list); @@ -1729,6 +1792,65 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) return 0; } +static int drm_bo_leave_list(drm_buffer_object_t *bo, + uint32_t mem_type, + int free_pinned, + int allow_errors) +{ + drm_device_t *dev = bo->dev; + int ret = 0; + + atomic_inc(&bo->usage); + mutex_unlock(&dev->struct_mutex); + mutex_lock(&bo->mutex); + + ret = drm_bo_expire_fence(bo, allow_errors); + if (ret) + goto out; + + DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + + if (free_pinned) { + DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE); + mutex_lock(&dev->struct_mutex); + list_del_init(&bo->pinned_lru); + if (bo->pinned_node == bo->mem.mm_node) + bo->pinned_node = NULL; + if (bo->pinned_node != NULL) { + mutex_lock(&dev->struct_mutex); + drm_mm_put_block(bo->pinned_node); + bo->pinned_node = NULL; + mutex_unlock(&dev->struct_mutex); + } + mutex_unlock(&dev->struct_mutex); + } + + if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) { + DRM_ERROR("A DRM_BO_NO_EVICT buffer present at " + "cleanup. Removing flag and evicting.\n"); + bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT; + bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT; + } + + ret = drm_bo_evict(bo, mem_type, 0); + + if (ret){ + if (allow_errors){ + goto out; + } else { + ret = 0; + DRM_ERROR("Cleanup eviction failed\n"); + } + } + +out: + mutex_unlock(&bo->mutex); + mutex_lock(&dev->struct_mutex); + drm_bo_usage_deref_locked(bo); + return ret; +} + + /* * dev->struct_sem locked. */ @@ -1736,102 +1858,39 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) static int drm_bo_force_list_clean(drm_device_t * dev, struct list_head *head, unsigned mem_type, - int force_no_move, int allow_errors) + int free_pinned, + int allow_errors, + int pinned_list) { - drm_buffer_manager_t *bm = &dev->bm; - struct list_head *list, *next, *prev; + struct list_head *list; drm_buffer_object_t *entry; int ret; - int clean; - - retry: - clean = 1; - list_for_each_safe(list, next, head) { - prev = list->prev; - entry = list_entry(list, drm_buffer_object_t, lru); - atomic_inc(&entry->usage); - mutex_unlock(&dev->struct_mutex); - mutex_lock(&entry->mutex); - mutex_lock(&dev->struct_mutex); - - if (prev != list->prev || next != list->next) { - mutex_unlock(&entry->mutex); - drm_bo_usage_deref_locked(entry); - goto retry; - } - if (entry->mem.mm_node) { - clean = 0; - - /* - * Expire the fence. - */ - - mutex_unlock(&dev->struct_mutex); - if (entry->fence && bm->nice_mode) { - unsigned long _end = jiffies + 3 * DRM_HZ; - do { - ret = drm_bo_wait(entry, 0, 1, 0); - if (ret && allow_errors) - goto out_err; - - } while (ret && !time_after_eq(jiffies, _end)); - - if (entry->fence) { - bm->nice_mode = 0; - DRM_ERROR("Detected GPU hang or " - "fence manager was taken down. " - "Evicting waiting buffers\n"); - } - } - if (entry->fence) { - drm_fence_usage_deref_unlocked(dev, - entry->fence); - entry->fence = NULL; - } - - DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - - if (force_no_move) { - DRM_FLAG_MASKED(entry->mem.flags, 0, DRM_BO_FLAG_NO_MOVE); - } - if (entry->mem.flags & DRM_BO_FLAG_NO_EVICT) { - DRM_ERROR("A DRM_BO_NO_EVICT buffer present at " - "cleanup. Removing flag and evicting.\n"); - entry->mem.flags &= ~DRM_BO_FLAG_NO_EVICT; - entry->mem.mask &= ~DRM_BO_FLAG_NO_EVICT; - } - ret = drm_bo_evict(entry, mem_type, 1, force_no_move); - if (ret) { - if (allow_errors) { - goto out_err; - } else { - DRM_ERROR("Aargh. Eviction failed.\n"); - } - } - mutex_lock(&dev->struct_mutex); - } - mutex_unlock(&entry->mutex); - drm_bo_usage_deref_locked(entry); - if (prev != list->prev || next != list->next) { - goto retry; - } + list = head->next; + while(list != head) { + if (pinned_list) + entry = list_entry(list, drm_buffer_object_t, + pinned_lru); + else + entry = list_entry(list, drm_buffer_object_t, + lru); + + ret = drm_bo_leave_list(entry, mem_type, free_pinned, + allow_errors); + + if (ret) + return ret; + + list = head->next; } - if (!clean) - goto retry; return 0; - out_err: - mutex_unlock(&entry->mutex); - drm_bo_usage_deref_unlocked(entry); - mutex_lock(&dev->struct_mutex); - return ret; + } int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) { drm_buffer_manager_t *bm = &dev->bm; drm_mem_type_manager_t *man = &bm->man[mem_type]; - drm_mem_type_manager_t *local_man = &bm->man[DRM_BO_MEM_LOCAL]; int ret = -EINVAL; if (mem_type >= DRM_BO_MEM_TYPES) { @@ -1854,15 +1913,10 @@ int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) * Throw out unfenced buffers. */ - drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0); + drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0, 0); - /* - * Throw out evicted no-move buffers. - */ - - drm_bo_force_list_clean(dev, &local_man->pinned, mem_type, 1, 0); - drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0); - drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0); + drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0); + drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1); if (drm_mm_clean(&man->manager)) { drm_mm_takedown(&man->manager); @@ -1885,14 +1939,14 @@ static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type) return -EINVAL; } - ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1); + ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1, 0); if (ret) return ret; - ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1); + ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 1); if (ret) return ret; ret = - drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1); + drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1); return ret; } @@ -1971,6 +2025,7 @@ int drm_bo_driver_finish(drm_device_t * dev) } } mutex_unlock(&dev->struct_mutex); + if (!cancel_delayed_work(&bm->wq)) { flush_scheduled_work(); } -- cgit v1.2.3 From b0c5339ed69c6ff08b7817f870e895aae2ef04c7 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 12 Feb 2007 20:32:03 +0100 Subject: More bugfixes. --- linux-core/drm_bo.c | 58 ++++++++++++++++++++++++++++------------------------- 1 file changed, 31 insertions(+), 27 deletions(-) (limited to 'linux-core/drm_bo.c') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 814175cd..48cb5ef4 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -692,8 +692,6 @@ out: return ret; } - - static int drm_bo_mem_force_space(drm_device_t *dev, drm_bo_mem_reg_t *mem, uint32_t mem_type, @@ -744,7 +742,6 @@ static int drm_bo_mem_force_space(drm_device_t *dev, return 0; } - static int drm_bo_mt_compatible(drm_mem_type_manager_t *man, uint32_t mem_type, uint32_t mask, @@ -777,7 +774,6 @@ static int drm_bo_mt_compatible(drm_mem_type_manager_t *man, return 1; } - int drm_bo_mem_space(drm_buffer_object_t *bo, drm_bo_mem_reg_t *mem, int no_wait) @@ -1817,10 +1813,8 @@ static int drm_bo_leave_list(drm_buffer_object_t *bo, if (bo->pinned_node == bo->mem.mm_node) bo->pinned_node = NULL; if (bo->pinned_node != NULL) { - mutex_lock(&dev->struct_mutex); drm_mm_put_block(bo->pinned_node); bo->pinned_node = NULL; - mutex_unlock(&dev->struct_mutex); } mutex_unlock(&dev->struct_mutex); } @@ -1831,8 +1825,9 @@ static int drm_bo_leave_list(drm_buffer_object_t *bo, bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT; bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT; } - - ret = drm_bo_evict(bo, mem_type, 0); + + if (bo->mem.mem_type == mem_type) + ret = drm_bo_evict(bo, mem_type, 0); if (ret){ if (allow_errors){ @@ -1862,29 +1857,40 @@ static int drm_bo_force_list_clean(drm_device_t * dev, int allow_errors, int pinned_list) { - struct list_head *list; + struct list_head *list, *next; drm_buffer_object_t *entry; int ret; + int do_retry; - list = head->next; - while(list != head) { + /* + * We need to + * restart if a node disappears from under us. + * Nodes cannot be added since the hardware lock is needed + * For this operation. + */ + +retry: + list_for_each_safe(list, next, head) { if (pinned_list) entry = list_entry(list, drm_buffer_object_t, pinned_lru); else entry = list_entry(list, drm_buffer_object_t, lru); - + atomic_inc(&entry->usage); ret = drm_bo_leave_list(entry, mem_type, free_pinned, allow_errors); - + + do_retry = list->next != next; + drm_bo_usage_deref_locked(entry); + if (ret) return ret; - - list = head->next; + + if (do_retry) + goto retry; } - return 0; - + return 0; } int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) @@ -1909,12 +1915,7 @@ int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) ret = 0; if (mem_type > 0) { - /* - * Throw out unfenced buffers. - */ - drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0, 0); - drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0); drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1); @@ -1928,6 +1929,12 @@ int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) return ret; } +/** + *Evict all buffers of a particular mem_type, but leave memory manager + *regions for NO_MOVE buffers intact. New buffers cannot be added at this + *point since we have the hardware lock. + */ + static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type) { int ret; @@ -1942,11 +1949,8 @@ static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type) ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1, 0); if (ret) return ret; - ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 1); - if (ret) - return ret; - ret = - drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1); + ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0); + return ret; } -- cgit v1.2.3 From 398913dc0e632c71e3095a7d50dae911aed18884 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 12 Feb 2007 20:34:50 +0100 Subject: Lindent. --- linux-core/drm_bo.c | 304 +++++++++++++++++++++++----------------------------- 1 file changed, 137 insertions(+), 167 deletions(-) (limited to 'linux-core/drm_bo.c') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 48cb5ef4..ed089096 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -49,12 +49,10 @@ * */ - - -static void drm_bo_destroy_locked(drm_buffer_object_t *bo); -static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo); -static void drm_bo_takedown_vm_locked(drm_buffer_object_t *bo); -static void drm_bo_unmap_virtual(drm_buffer_object_t *bo); +static void drm_bo_destroy_locked(drm_buffer_object_t * bo); +static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo); +static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo); +static void drm_bo_unmap_virtual(drm_buffer_object_t * bo); static inline uint32_t drm_bo_type_flags(unsigned type) { @@ -80,19 +78,17 @@ void drm_bo_add_to_lru(drm_buffer_object_t * bo) if (bo->mem.mm_node != bo->pinned_node) { man = &bo->dev->bm.man[bo->mem.mem_type]; list_add_tail(&bo->lru, &man->lru); - } else + } else INIT_LIST_HEAD(&bo->lru); } - -static int drm_bo_vm_pre_move(drm_buffer_object_t *bo, - int old_is_pci) +static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci) { #ifdef DRM_ODD_MM_COMPAT int ret; ret = drm_bo_lock_kmm(bo); - if (ret) + if (ret) return ret; drm_bo_unmap_virtual(bo); if (old_is_pci) @@ -103,11 +99,11 @@ static int drm_bo_vm_pre_move(drm_buffer_object_t *bo, return 0; } -static void drm_bo_vm_post_move(drm_buffer_object_t *bo) +static void drm_bo_vm_post_move(drm_buffer_object_t * bo) { #ifdef DRM_ODD_MM_COMPAT int ret; - + ret = drm_bo_remap_bound(bo); if (ret) { DRM_ERROR("Failed to remap a bound buffer object.\n" @@ -129,7 +125,7 @@ static int drm_bo_add_ttm(drm_buffer_object_t * bo) switch (bo->type) { case drm_bo_type_dc: - bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); + bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); if (!bo->ttm) ret = -ENOMEM; break; @@ -145,13 +141,9 @@ static int drm_bo_add_ttm(drm_buffer_object_t * bo) return ret; } - - - -static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, - drm_bo_mem_reg_t *mem, - int evict, - int no_wait) +static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, + drm_bo_mem_reg_t * mem, + int evict, int no_wait) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; @@ -161,7 +153,6 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type]; int ret = 0; - if (old_is_pci || new_is_pci) ret = drm_bo_vm_pre_move(bo, old_is_pci); if (ret) @@ -171,8 +162,7 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, * Create and bind a ttm if required. */ - if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && - (bo->ttm == NULL)) { + if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) { ret = drm_bo_add_ttm(bo); if (ret) goto out_err; @@ -185,7 +175,7 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, goto out_err; } } - + if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) { drm_bo_mem_reg_t *old_mem = &bo->mem; @@ -195,15 +185,14 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, *old_mem = *mem; mem->mm_node = NULL; old_mem->mask = save_mask; - DRM_FLAG_MASKED(save_flags, mem->flags, - DRM_BO_MASK_MEMTYPE); + DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE); } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && - !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { + !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { ret = drm_bo_move_ttm(bo, evict, no_wait, mem); - } else if (dev->driver->bo_driver->move) { + } else if (dev->driver->bo_driver->move) { ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem); } else { @@ -217,13 +206,15 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, if (old_is_pci || new_is_pci) drm_bo_vm_post_move(bo); - + if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) { - ret = dev->driver->bo_driver->invalidate_caches(dev, bo->mem.flags); + ret = + dev->driver->bo_driver->invalidate_caches(dev, + bo->mem.flags); if (ret) DRM_ERROR("Can not flush read caches\n"); } - + DRM_FLAG_MASKED(bo->priv_flags, (evict) ? _DRM_BO_FLAG_EVICTED : 0, _DRM_BO_FLAG_EVICTED); @@ -233,10 +224,10 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, return 0; -out_err: + out_err: if (old_is_pci || new_is_pci) drm_bo_vm_post_move(bo); - + new_man = &bm->man[bo->mem.mem_type]; if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) { drm_ttm_unbind(bo->ttm); @@ -282,8 +273,7 @@ int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, return 0; } -static int drm_bo_expire_fence(drm_buffer_object_t *bo, - int allow_errors) +static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; @@ -312,8 +302,7 @@ static int drm_bo_expire_fence(drm_buffer_object_t *bo, } } return 0; -} - +} /* * Call dev->struct_mutex locked. @@ -321,8 +310,7 @@ static int drm_bo_expire_fence(drm_buffer_object_t *bo, * fence object and removing from lru lists and memory managers. */ - -static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all) +static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; @@ -333,14 +321,13 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all) DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - if (bo->fence && drm_fence_object_signaled(bo->fence, - bo->fence_type)) { + if (bo->fence && drm_fence_object_signaled(bo->fence, bo->fence_type)) { drm_fence_usage_deref_locked(dev, bo->fence); bo->fence = NULL; } - if (bo->fence && remove_all) - (void) drm_bo_expire_fence(bo, 0); + if (bo->fence && remove_all) + (void)drm_bo_expire_fence(bo, 0); mutex_lock(&dev->struct_mutex); @@ -369,30 +356,27 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all) drm_fence_object_flush(dev, bo->fence, bo->fence_type); list_add_tail(&bo->ddestroy, &bm->ddestroy); schedule_delayed_work(&bm->wq, - ((DRM_HZ / 100) < - 1) ? 1 : DRM_HZ / 100); + ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100); } -out: + out: mutex_unlock(&bo->mutex); return; } - /* * Verify that refcount is 0 and that there are no internal references * to the buffer object. Then destroy it. */ -static void drm_bo_destroy_locked(drm_buffer_object_t *bo) +static void drm_bo_destroy_locked(drm_buffer_object_t * bo) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; - if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && + if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && list_empty(&bo->pinned_lru) && bo->pinned_node == NULL && - list_empty(&bo->ddestroy) && - atomic_read(&bo->usage) == 0) { + list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) { BUG_ON(bo->fence != NULL); #ifdef DRM_ODD_MM_COMPAT @@ -423,7 +407,6 @@ static void drm_bo_destroy_locked(drm_buffer_object_t *bo) return; } - /* * Call dev->struct_mutex locked. */ @@ -435,10 +418,9 @@ static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all) drm_buffer_object_t *entry, *nentry; struct list_head *list, *next; - list_for_each_safe(list, next, &bm->ddestroy) { entry = list_entry(list, drm_buffer_object_t, ddestroy); - + nentry = NULL; if (next != &bm->ddestroy) { nentry = list_entry(next, drm_buffer_object_t, @@ -464,11 +446,11 @@ static void drm_bo_delayed_workqueue(struct work_struct *work) drm_device_t *dev = (drm_device_t *) data; drm_buffer_manager_t *bm = &dev->bm; #else - drm_buffer_manager_t *bm = container_of(work, drm_buffer_manager_t, wq.work); + drm_buffer_manager_t *bm = + container_of(work, drm_buffer_manager_t, wq.work); drm_device_t *dev = container_of(bm, drm_device_t, bm); #endif - DRM_DEBUG("Delayed delete Worker\n"); mutex_lock(&dev->struct_mutex); @@ -494,10 +476,10 @@ void drm_bo_usage_deref_locked(drm_buffer_object_t * bo) static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo) { drm_buffer_object_t *bo = - drm_user_object_entry(uo, drm_buffer_object_t, base); + drm_user_object_entry(uo, drm_buffer_object_t, base); drm_bo_takedown_vm_locked(bo); - drm_bo_usage_deref_locked(bo); + drm_bo_usage_deref_locked(bo); } static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo) @@ -641,7 +623,6 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, goto out; } - evict_mem = bo->mem; evict_mem.mm_node = NULL; @@ -663,19 +644,19 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, "buffer eviction.\n"); goto out; } - + if (bo->pinned_node) - DRM_ERROR("Evicting pinned buffer\n"); + DRM_ERROR("Evicting pinned buffer\n"); ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait); - + if (ret) { if (ret != -EAGAIN) DRM_ERROR("Buffer eviction failed\n"); goto out; } - -out1: + + out1: mutex_lock(&dev->struct_mutex); if (evict_mem.mm_node) { drm_mm_put_block(evict_mem.mm_node); @@ -687,15 +668,14 @@ out1: DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED, _DRM_BO_FLAG_EVICTED); - -out: + + out: return ret; } -static int drm_bo_mem_force_space(drm_device_t *dev, - drm_bo_mem_reg_t *mem, - uint32_t mem_type, - int no_wait) +static int drm_bo_mem_force_space(drm_device_t * dev, + drm_bo_mem_reg_t * mem, + uint32_t mem_type, int no_wait) { drm_mm_node_t *node; drm_buffer_manager_t *bm = &dev->bm; @@ -707,7 +687,7 @@ static int drm_bo_mem_force_space(drm_device_t *dev, mutex_lock(&dev->struct_mutex); do { - node = drm_mm_search_free(&man->manager, num_pages, + node = drm_mm_search_free(&man->manager, num_pages, mem->page_alignment, 1); if (node) break; @@ -720,7 +700,8 @@ static int drm_bo_mem_force_space(drm_device_t *dev, atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); - BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); + BUG_ON(entry->mem. + flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); ret = drm_bo_evict(entry, mem_type, no_wait); mutex_unlock(&entry->mutex); @@ -742,10 +723,9 @@ static int drm_bo_mem_force_space(drm_device_t *dev, return 0; } -static int drm_bo_mt_compatible(drm_mem_type_manager_t *man, +static int drm_bo_mt_compatible(drm_mem_type_manager_t * man, uint32_t mem_type, - uint32_t mask, - uint32_t *res_mask) + uint32_t mask, uint32_t * res_mask) { uint32_t cur_flags = drm_bo_type_flags(mem_type); uint32_t flag_diff; @@ -773,14 +753,13 @@ static int drm_bo_mt_compatible(drm_mem_type_manager_t *man, *res_mask = cur_flags; return 1; } - -int drm_bo_mem_space(drm_buffer_object_t *bo, - drm_bo_mem_reg_t *mem, - int no_wait) + +int drm_bo_mem_space(drm_buffer_object_t * bo, + drm_bo_mem_reg_t * mem, int no_wait) { drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm= &dev->bm; - drm_mem_type_manager_t *man; + drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man; uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; @@ -793,12 +772,12 @@ int drm_bo_mem_space(drm_buffer_object_t *bo, drm_mm_node_t *node = NULL; int ret; - for (i=0; iman[mem_type]; - type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask, - &cur_flags); + type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask, + &cur_flags); if (!type_ok) continue; @@ -808,7 +787,7 @@ int drm_bo_mem_space(drm_buffer_object_t *bo, if ((mem_type == bo->pinned_mem_type) && (bo->pinned_node != NULL)) { - DRM_ERROR("Choosing pinned region\n"); + DRM_ERROR("Choosing pinned region\n"); node = bo->pinned_node; break; } @@ -816,17 +795,17 @@ int drm_bo_mem_space(drm_buffer_object_t *bo, mutex_lock(&dev->struct_mutex); if (man->has_type && man->use_type) { type_found = 1; - node = drm_mm_search_free(&man->manager, mem->num_pages, + node = drm_mm_search_free(&man->manager, mem->num_pages, mem->page_alignment, 1); - if (node) - node = drm_mm_get_block(node, mem->num_pages, + if (node) + node = drm_mm_get_block(node, mem->num_pages, mem->page_alignment); } mutex_unlock(&dev->struct_mutex); if (node) break; } - + if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) { mem->mm_node = node; mem->mem_type = mem_type; @@ -834,27 +813,26 @@ int drm_bo_mem_space(drm_buffer_object_t *bo, return 0; } - if (!type_found) + if (!type_found) return -EINVAL; - + num_prios = dev->driver->bo_driver->num_mem_busy_prio; prios = dev->driver->bo_driver->mem_busy_prio; - for (i=0; iman[mem_type]; - if (!drm_bo_mt_compatible(man, mem_type, mem->mask, - &cur_flags)) + if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags)) continue; - + ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait); - + if (ret == 0) { mem->flags = cur_flags; return 0; } - + if (ret == -EAGAIN) has_eagain = 1; } @@ -862,10 +840,10 @@ int drm_bo_mem_space(drm_buffer_object_t *bo, ret = (has_eagain) ? -EAGAIN : -ENOMEM; return ret; } -EXPORT_SYMBOL(drm_bo_mem_space); +EXPORT_SYMBOL(drm_bo_mem_space); -static int drm_bo_new_mask(drm_buffer_object_t *bo, +static int drm_bo_new_mask(drm_buffer_object_t * bo, uint32_t new_mask, uint32_t hint) { uint32_t new_props; @@ -1253,7 +1231,6 @@ int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, if (ret) return ret; - mem.num_pages = bo->mem.num_pages; mem.size = mem.num_pages << PAGE_SHIFT; mem.mask = new_mem_flags; @@ -1263,7 +1240,7 @@ int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, mutex_lock(&dev->struct_mutex); list_del(&bo->lru); list_add_tail(&bo->lru, &bm->unfenced); - DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, + DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, _DRM_BO_FLAG_UNFENCED); mutex_unlock(&dev->struct_mutex); @@ -1271,13 +1248,13 @@ int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, * Determine where to move the buffer. */ ret = drm_bo_mem_space(bo, &mem, no_wait); - + if (ret) goto out_unlock; ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait); - out_unlock: + out_unlock: if (ret || !move_unfenced) { mutex_lock(&dev->struct_mutex); if (mem.mm_node) { @@ -1288,18 +1265,16 @@ int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, DRM_WAKEUP(&bo->event_queue); list_del(&bo->lru); drm_bo_add_to_lru(bo); - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->struct_mutex); } mutex_unlock(&bm->evict_mutex); return ret; } - -static int drm_bo_mem_compat(drm_bo_mem_reg_t *mem) +static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem) { - uint32_t - flag_diff = (mem->mask ^ mem->flags); + uint32_t flag_diff = (mem->mask ^ mem->flags); if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0) return 0; @@ -1311,11 +1286,11 @@ static int drm_bo_mem_compat(drm_bo_mem_reg_t *mem) return 0; return 1; } - -static int drm_bo_check_fake(drm_device_t *dev, drm_bo_mem_reg_t *mem) + +static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem) { drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man; + drm_mem_type_manager_t *man; uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; uint32_t i; @@ -1328,11 +1303,11 @@ static int drm_bo_check_fake(drm_device_t *dev, drm_bo_mem_reg_t *mem) BUG_ON(mem->mm_node); - for (i=0; iman[mem_type]; - type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask, - &cur_flags); + type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask, + &cur_flags); if (type_ok) break; } @@ -1348,7 +1323,7 @@ static int drm_bo_check_fake(drm_device_t *dev, drm_bo_mem_reg_t *mem) DRM_ERROR("Illegal fake buffer flags 0x%08x\n", mem->mask); return -EINVAL; } - + /* * bo locked. */ @@ -1361,9 +1336,10 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, drm_bo_driver_t *driver = dev->driver->bo_driver; int ret; - DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask, + DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask, bo->mem.flags); - ret = driver->fence_type(bo->mem.mask, &bo->fence_class, &bo->fence_type); + ret = + driver->fence_type(bo->mem.mask, &bo->fence_class, &bo->fence_type); if (ret) { DRM_ERROR("Driver did not support given buffer permissions\n"); return ret; @@ -1384,7 +1360,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, */ if (!drm_bo_mem_compat(&bo->mem)) { - ret = drm_bo_move_buffer(bo, bo->mem.mask & DRM_BO_MASK_MEMTYPE, + ret = drm_bo_move_buffer(bo, bo->mem.mask & DRM_BO_MASK_MEMTYPE, no_wait, move_unfenced); if (ret) { if (ret != -EAGAIN) @@ -1427,7 +1403,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) { ret = drm_bo_add_ttm(bo); - if (ret) + if (ret) return ret; } DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE); @@ -1435,23 +1411,23 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, /* * Finally, adjust lru to be sure. */ - + mutex_lock(&dev->struct_mutex); list_del(&bo->lru); if (move_unfenced) { list_add_tail(&bo->lru, &bm->unfenced); - DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, + DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, _DRM_BO_FLAG_UNFENCED); } else { drm_bo_add_to_lru(bo); if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) { DRM_WAKEUP(&bo->event_queue); - DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + DRM_FLAG_MASKED(bo->priv_flags, 0, + _DRM_BO_FLAG_UNFENCED); } } mutex_unlock(&dev->struct_mutex); - return 0; } @@ -1601,7 +1577,7 @@ int drm_buffer_object_create(drm_file_t * priv, if (ret) goto out_err; - + if (bo->type == drm_bo_type_dc) { mutex_lock(&dev->struct_mutex); ret = drm_bo_setup_vm_locked(bo); @@ -1609,8 +1585,7 @@ int drm_buffer_object_create(drm_file_t * priv, if (ret) goto out_err; } - ret = drm_buffer_object_validate(bo, 0, - hint & DRM_BO_HINT_DONT_BLOCK); + ret = drm_buffer_object_validate(bo, 0, hint & DRM_BO_HINT_DONT_BLOCK); if (ret) goto out_err; @@ -1788,10 +1763,9 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) return 0; } -static int drm_bo_leave_list(drm_buffer_object_t *bo, +static int drm_bo_leave_list(drm_buffer_object_t * bo, uint32_t mem_type, - int free_pinned, - int allow_errors) + int free_pinned, int allow_errors) { drm_device_t *dev = bo->dev; int ret = 0; @@ -1805,7 +1779,7 @@ static int drm_bo_leave_list(drm_buffer_object_t *bo, goto out; DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - + if (free_pinned) { DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE); mutex_lock(&dev->struct_mutex); @@ -1815,7 +1789,7 @@ static int drm_bo_leave_list(drm_buffer_object_t *bo, if (bo->pinned_node != NULL) { drm_mm_put_block(bo->pinned_node); bo->pinned_node = NULL; - } + } mutex_unlock(&dev->struct_mutex); } @@ -1825,26 +1799,25 @@ static int drm_bo_leave_list(drm_buffer_object_t *bo, bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT; bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT; } - + if (bo->mem.mem_type == mem_type) ret = drm_bo_evict(bo, mem_type, 0); - if (ret){ - if (allow_errors){ + if (ret) { + if (allow_errors) { goto out; } else { ret = 0; DRM_ERROR("Cleanup eviction failed\n"); } } - -out: + + out: mutex_unlock(&bo->mutex); mutex_lock(&dev->struct_mutex); drm_bo_usage_deref_locked(bo); return ret; } - /* * dev->struct_sem locked. @@ -1853,9 +1826,8 @@ out: static int drm_bo_force_list_clean(drm_device_t * dev, struct list_head *head, unsigned mem_type, - int free_pinned, - int allow_errors, - int pinned_list) + int free_pinned, + int allow_errors, int pinned_list) { struct list_head *list, *next; drm_buffer_object_t *entry; @@ -1869,16 +1841,15 @@ static int drm_bo_force_list_clean(drm_device_t * dev, * For this operation. */ -retry: + retry: list_for_each_safe(list, next, head) { if (pinned_list) - entry = list_entry(list, drm_buffer_object_t, + entry = list_entry(list, drm_buffer_object_t, pinned_lru); else - entry = list_entry(list, drm_buffer_object_t, - lru); + entry = list_entry(list, drm_buffer_object_t, lru); atomic_inc(&entry->usage); - ret = drm_bo_leave_list(entry, mem_type, free_pinned, + ret = drm_bo_leave_list(entry, mem_type, free_pinned, allow_errors); do_retry = list->next != next; @@ -1890,7 +1861,7 @@ retry: if (do_retry) goto retry; } - return 0; + return 0; } int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) @@ -1966,7 +1937,7 @@ static int drm_bo_init_mm(drm_device_t * dev, DRM_ERROR("Illegal memory type %d\n", type); return ret; } - + man = &bm->man[type]; if (man->has_type) { DRM_ERROR("Memory manager already initialized for type %d\n", @@ -1975,7 +1946,7 @@ static int drm_bo_init_mm(drm_device_t * dev, } ret = dev->driver->bo_driver->init_mem_type(dev, type, man); - if (ret) + if (ret) return ret; ret = 0; @@ -2174,15 +2145,15 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) * buffer object vm functions. */ -int drm_mem_reg_is_pci(drm_device_t *dev, drm_bo_mem_reg_t *mem) +int drm_mem_reg_is_pci(drm_device_t * dev, drm_bo_mem_reg_t * mem) { drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { if (mem->mem_type == DRM_BO_MEM_LOCAL) return 0; - + if (man->flags & _DRM_FLAG_MEMTYPE_CMA) return 0; @@ -2191,6 +2162,7 @@ int drm_mem_reg_is_pci(drm_device_t *dev, drm_bo_mem_reg_t *mem) } return 1; } + EXPORT_SYMBOL(drm_mem_reg_is_pci); /** @@ -2207,17 +2179,16 @@ EXPORT_SYMBOL(drm_mem_reg_is_pci); * Otherwise returns zero. */ -int drm_bo_pci_offset(drm_device_t *dev, - drm_bo_mem_reg_t *mem, +int drm_bo_pci_offset(drm_device_t * dev, + drm_bo_mem_reg_t * mem, unsigned long *bus_base, - unsigned long *bus_offset, - unsigned long *bus_size) + unsigned long *bus_offset, unsigned long *bus_size) { drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; *bus_size = 0; - if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)) + if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)) return -EINVAL; if (drm_mem_reg_is_pci(dev, mem)) { @@ -2229,7 +2200,6 @@ int drm_bo_pci_offset(drm_device_t *dev, return 0; } - /** * \c Kill all user-space virtual mappings of this buffer object. * @@ -2238,7 +2208,7 @@ int drm_bo_pci_offset(drm_device_t *dev, * Call bo->mutex locked. */ -void drm_bo_unmap_virtual(drm_buffer_object_t *bo) +void drm_bo_unmap_virtual(drm_buffer_object_t * bo) { drm_device_t *dev = bo->dev; loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT; @@ -2247,12 +2217,12 @@ void drm_bo_unmap_virtual(drm_buffer_object_t *bo) unmap_mapping_range(dev->dev_mapping, offset, holelen, 1); } -static void drm_bo_takedown_vm_locked(drm_buffer_object_t *bo) +static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo) { drm_map_list_t *list = &bo->map_list; drm_local_map_t *map; drm_device_t *dev = bo->dev; - + if (list->user_token) { drm_ht_remove_item(&dev->map_hash, &list->hash); list->user_token = 0; @@ -2272,12 +2242,12 @@ static void drm_bo_takedown_vm_locked(drm_buffer_object_t *bo) drm_bo_usage_deref_locked(bo); } -static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo) +static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo) { drm_map_list_t *list = &bo->map_list; drm_local_map_t *map; drm_device_t *dev = bo->dev; - + list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ); if (!list->map) return -ENOMEM; @@ -2288,8 +2258,8 @@ static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo) map->flags = _DRM_REMOVABLE; map->size = bo->mem.num_pages * PAGE_SIZE; atomic_inc(&bo->usage); - map->handle = (void *) bo; - + map->handle = (void *)bo; + list->file_offset_node = drm_mm_search_free(&dev->offset_manager, bo->mem.num_pages, 0, 0); @@ -2306,7 +2276,7 @@ static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo) drm_bo_takedown_vm_locked(bo); return -ENOMEM; } - + list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT; return 0; -- cgit v1.2.3 From 9efdae317ce01cea95f75855b175243ae858fde4 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 13 Feb 2007 20:05:32 +0100 Subject: More bugfixes. Fixed memory, pinned buffers and unmappable memory now seems fully functional. --- linux-core/drm_bo.c | 188 ++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 132 insertions(+), 56 deletions(-) (limited to 'linux-core/drm_bo.c') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index ed089096..e593258b 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -75,11 +75,12 @@ void drm_bo_add_to_lru(drm_buffer_object_t * bo) { drm_mem_type_manager_t *man; - if (bo->mem.mm_node != bo->pinned_node) { + if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) { man = &bo->dev->bm.man[bo->mem.mem_type]; list_add_tail(&bo->lru, &man->lru); - } else + } else { INIT_LIST_HEAD(&bo->lru); + } } static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci) @@ -339,6 +340,8 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) list_del_init(&bo->lru); if (bo->mem.mm_node) { drm_mm_put_block(bo->mem.mm_node); + if (bo->pinned_node == bo->mem.mm_node) + bo->pinned_node = NULL; bo->mem.mm_node = NULL; } list_del_init(&bo->pinned_lru); @@ -377,7 +380,11 @@ static void drm_bo_destroy_locked(drm_buffer_object_t * bo) if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && list_empty(&bo->pinned_lru) && bo->pinned_node == NULL && list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) { - BUG_ON(bo->fence != NULL); + if (bo->fence != NULL) { + DRM_ERROR("Fence was non-zero.\n"); + drm_bo_cleanup_refs(bo, 0); + return; + } #ifdef DRM_ODD_MM_COMPAT BUG_ON(!list_empty(&bo->vma_list)); @@ -565,6 +572,7 @@ int drm_fence_buffer_objects(drm_file_t * priv, count = 0; l = f_list.next; while (l != &f_list) { + prefetch(l->next); entry = list_entry(l, drm_buffer_object_t, lru); atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); @@ -629,8 +637,6 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, if (bo->type == drm_bo_type_fake) { bo->mem.mem_type = DRM_BO_MEM_LOCAL; bo->mem.mm_node = NULL; - bo->pinned_mem_type = DRM_BO_MEM_LOCAL; - bo->pinned_node = NULL; goto out1; } @@ -641,13 +647,10 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, if (ret) { if (ret != -EAGAIN) DRM_ERROR("Failed to find memory space for " - "buffer eviction.\n"); + "buffer 0x%p eviction.\n", bo); goto out; } - if (bo->pinned_node) - DRM_ERROR("Evicting pinned buffer\n"); - ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait); if (ret) { @@ -659,7 +662,8 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, out1: mutex_lock(&dev->struct_mutex); if (evict_mem.mm_node) { - drm_mm_put_block(evict_mem.mm_node); + if (evict_mem.mm_node != bo->pinned_node) + drm_mm_put_block(evict_mem.mm_node); evict_mem.mm_node = NULL; } list_del(&bo->lru); @@ -700,8 +704,7 @@ static int drm_bo_mem_force_space(drm_device_t * dev, atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); - BUG_ON(entry->mem. - flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); + BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); ret = drm_bo_evict(entry, mem_type, no_wait); mutex_unlock(&entry->mutex); @@ -737,18 +740,24 @@ static int drm_bo_mt_compatible(drm_mem_type_manager_t * man, if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT) DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED); - if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0) { + if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0) return 0; + + if (mem_type == DRM_BO_MEM_LOCAL) { + *res_mask = cur_flags; + return 1; } + flag_diff = (mask ^ cur_flags); if ((flag_diff & DRM_BO_FLAG_CACHED) && - (mask & DRM_BO_FLAG_FORCE_CACHING)) { + (!(mask & DRM_BO_FLAG_CACHED) || + (mask & DRM_BO_FLAG_FORCE_CACHING))) return 0; - } + if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && - (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) { + ((mask & DRM_BO_FLAG_MAPPABLE) || + (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) ) return 0; - } *res_mask = cur_flags; return 1; @@ -772,6 +781,7 @@ int drm_bo_mem_space(drm_buffer_object_t * bo, drm_mm_node_t *node = NULL; int ret; + mem->mm_node = NULL; for (i = 0; i < num_prios; ++i) { mem_type = prios[i]; man = &bm->man[mem_type]; @@ -787,7 +797,6 @@ int drm_bo_mem_space(drm_buffer_object_t * bo, if ((mem_type == bo->pinned_mem_type) && (bo->pinned_node != NULL)) { - DRM_ERROR("Choosing pinned region\n"); node = bo->pinned_node; break; } @@ -1248,17 +1257,17 @@ int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, * Determine where to move the buffer. */ ret = drm_bo_mem_space(bo, &mem, no_wait); - if (ret) goto out_unlock; ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait); - out_unlock: + out_unlock: if (ret || !move_unfenced) { mutex_lock(&dev->struct_mutex); if (mem.mm_node) { - drm_mm_put_block(mem.mm_node); + if (mem.mm_node != bo->pinned_node) + drm_mm_put_block(mem.mm_node); mem.mm_node = NULL; } DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); @@ -1279,10 +1288,13 @@ static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem) if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0) return 0; if ((flag_diff & DRM_BO_FLAG_CACHED) && - (mem->mask & DRM_BO_FLAG_FORCE_CACHING)) - return 0; + (!(mem->mask & DRM_BO_FLAG_CACHED) || + (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) { + return 0; + } if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && - (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)) + ((mem->mask & DRM_BO_FLAG_MAPPABLE) || + (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE))) return 0; return 1; } @@ -1360,8 +1372,8 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, */ if (!drm_bo_mem_compat(&bo->mem)) { - ret = drm_bo_move_buffer(bo, bo->mem.mask & DRM_BO_MASK_MEMTYPE, - no_wait, move_unfenced); + ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait, + move_unfenced); if (ret) { if (ret != -EAGAIN) DRM_ERROR("Failed moving buffer.\n"); @@ -1374,14 +1386,14 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, */ if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { - bo->pinned_mem_type = bo->mem.mem_type; mutex_lock(&dev->struct_mutex); list_del_init(&bo->pinned_lru); drm_bo_add_to_pinned_lru(bo); if (bo->pinned_node != bo->mem.mm_node) { - drm_mm_put_block(bo->pinned_node); + if (bo->pinned_node != NULL) + drm_mm_put_block(bo->pinned_node); bo->pinned_node = bo->mem.mm_node; } @@ -1763,6 +1775,39 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) return 0; } +/** + *Clean the unfenced list and put on regular LRU. + *This is part of the memory manager cleanup and should only be + *called with the DRI lock held. + *Call dev->struct_sem locked. + */ + +static void drm_bo_clean_unfenced(drm_device_t *dev) +{ + drm_buffer_manager_t *bm = &dev->bm; + struct list_head *head, *list; + drm_buffer_object_t *entry; + + head = &bm->unfenced; + + list = head->next; + while(list != head) { + prefetch(list->next); + entry = list_entry(list, drm_buffer_object_t, lru); + + atomic_inc(&entry->usage); + mutex_unlock(&dev->struct_mutex); + mutex_lock(&entry->mutex); + mutex_lock(&dev->struct_mutex); + + list_del(&entry->lru); + DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + drm_bo_add_to_lru(entry); + mutex_unlock(&entry->mutex); + list = head->next; + } +} + static int drm_bo_leave_list(drm_buffer_object_t * bo, uint32_t mem_type, int free_pinned, int allow_errors) @@ -1770,16 +1815,12 @@ static int drm_bo_leave_list(drm_buffer_object_t * bo, drm_device_t *dev = bo->dev; int ret = 0; - atomic_inc(&bo->usage); - mutex_unlock(&dev->struct_mutex); mutex_lock(&bo->mutex); ret = drm_bo_expire_fence(bo, allow_errors); if (ret) goto out; - DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - if (free_pinned) { DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE); mutex_lock(&dev->struct_mutex); @@ -1814,52 +1855,86 @@ static int drm_bo_leave_list(drm_buffer_object_t * bo, out: mutex_unlock(&bo->mutex); - mutex_lock(&dev->struct_mutex); - drm_bo_usage_deref_locked(bo); return ret; } + +static drm_buffer_object_t *drm_bo_entry(struct list_head *list, + int pinned_list) +{ + if (pinned_list) + return list_entry(list, drm_buffer_object_t, pinned_lru); + else + return list_entry(list, drm_buffer_object_t, lru); +} + /* - * dev->struct_sem locked. + * dev->struct_mutex locked. */ static int drm_bo_force_list_clean(drm_device_t * dev, struct list_head *head, unsigned mem_type, int free_pinned, - int allow_errors, int pinned_list) + int allow_errors, + int pinned_list) { - struct list_head *list, *next; - drm_buffer_object_t *entry; + struct list_head *list, *next, *prev; + drm_buffer_object_t *entry, *nentry; int ret; - int do_retry; + int do_restart; /* - * We need to - * restart if a node disappears from under us. - * Nodes cannot be added since the hardware lock is needed - * For this operation. + * The list traversal is a bit odd here, because an item may + * disappear from the list when we release the struct_mutex or + * when we decrease the usage count. Also we're not guaranteed + * to drain pinned lists, so we can't always restart. */ - retry: +restart: + nentry = NULL; list_for_each_safe(list, next, head) { - if (pinned_list) - entry = list_entry(list, drm_buffer_object_t, - pinned_lru); - else - entry = list_entry(list, drm_buffer_object_t, lru); + prev = list->prev; + + entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list); atomic_inc(&entry->usage); + if (nentry) { + atomic_dec(&nentry->usage); + nentry = NULL; + } + + /* + * Protect the next item from destruction, so we can check + * its list pointers later on. + */ + + if (next != head) { + nentry = drm_bo_entry(next, pinned_list); + atomic_inc(&nentry->usage); + } + mutex_unlock(&dev->struct_mutex); + ret = drm_bo_leave_list(entry, mem_type, free_pinned, allow_errors); + mutex_lock(&dev->struct_mutex); - do_retry = list->next != next; drm_bo_usage_deref_locked(entry); - if (ret) return ret; - if (do_retry) - goto retry; + /* + * Has the next item disappeared from the list? + */ + + do_restart = ((next->prev != list) && (next->prev != prev)); + + if (nentry != NULL && do_restart) { + drm_bo_usage_deref_locked(nentry); + nentry = NULL; + } + + if (do_restart) + goto restart; } return 0; } @@ -1886,7 +1961,7 @@ int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) ret = 0; if (mem_type > 0) { - drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0, 0); + drm_bo_clean_unfenced(dev); drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0); drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1); @@ -1917,10 +1992,11 @@ static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type) return -EINVAL; } - ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1, 0); + drm_bo_clean_unfenced(dev); + ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0); if (ret) return ret; - ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0); + ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1); return ret; } -- cgit v1.2.3