aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2009-12-08 15:33:32 +0100
committerDave Airlie <airlied@redhat.com>2009-12-10 15:09:02 +1000
commitca262a9998d46196750bb19a9dc4bd465b170ff7 (patch)
treeb128691e5c57f6305c5752ac5c1b09e6aedfb650 /drivers/gpu/drm/ttm
parenta2e68e92d384d37c8cc6bb7206d43b1eb9bc3f08 (diff)
drm/ttm: Rework validation & memory space allocation (V3)
This change allow driver to pass sorted memory placement, from most prefered placement to least prefered placement. In order to avoid long function prototype a structure is used to gather memory placement informations such as range restriction (if you need a buffer to be in given range). Range restriction is determined by fpfn & lpfn which are the first page and last page number btw which allocation can happen. If those fields are set to 0 ttm will assume buffer can be put anywhere in the address space (thus it avoids putting a burden on the driver to always properly set those fields). This patch also factor few functions like evicting first entry of lru list or getting a memory space. This avoid code duplication. V2: Change API to use placement flags and array instead of packing placement order into a quadword. V3: Make sure we set the appropriate mem.placement flag when validating or allocation memory space. [Pending Thomas Hellstrom further review but okay from preliminary review so far]. Signed-off-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c463
1 files changed, 221 insertions, 242 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index e13fd23f333..60d8179a8bc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -27,6 +27,14 @@
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
+/* Notes:
+ *
+ * We store bo pointer in drm_mm_node struct so we know which bo own a
+ * specific node. There is no protection on the pointer, thus to make
+ * sure things don't go berserk you have to access this pointer while
+ * holding the global lru lock and make sure anytime you free a node you
+ * reset the pointer to NULL.
+ */
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
@@ -247,7 +255,6 @@ EXPORT_SYMBOL(ttm_bo_unreserve);
/*
* Call bo->mutex locked.
*/
-
static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -329,14 +336,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
}
if (bo->mem.mem_type == TTM_PL_SYSTEM) {
-
- struct ttm_mem_reg *old_mem = &bo->mem;
- uint32_t save_flags = old_mem->placement;
-
- *old_mem = *mem;
+ bo->mem = *mem;
mem->mm_node = NULL;
- ttm_flag_masked(&save_flags, mem->placement,
- TTM_PL_MASK_MEMTYPE);
goto moved;
}
@@ -419,6 +420,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
kref_put(&bo->list_kref, ttm_bo_ref_bug);
}
if (bo->mem.mm_node) {
+ bo->mem.mm_node->private = NULL;
drm_mm_put_block(bo->mem.mm_node);
bo->mem.mm_node = NULL;
}
@@ -555,17 +557,14 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
}
EXPORT_SYMBOL(ttm_bo_unref);
-static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
- bool interruptible, bool no_wait)
+static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
+ bool no_wait)
{
- int ret = 0;
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
struct ttm_mem_reg evict_mem;
- uint32_t proposed_placement;
-
- if (bo->mem.mem_type != mem_type)
- goto out;
+ struct ttm_placement placement;
+ int ret = 0;
spin_lock(&bo->lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait);
@@ -585,14 +584,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
- proposed_placement = bdev->driver->evict_flags(bo);
-
- ret = ttm_bo_mem_space(bo, proposed_placement,
- &evict_mem, interruptible, no_wait);
- if (unlikely(ret != 0 && ret != -ERESTART))
- ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
- &evict_mem, interruptible, no_wait);
-
+ bdev->driver->evict_flags(bo, &placement);
+ ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
+ no_wait);
if (ret) {
if (ret != -ERESTART)
printk(KERN_ERR TTM_PFX
@@ -606,95 +600,117 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
if (ret) {
if (ret != -ERESTART)
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
+ spin_lock(&glob->lru_lock);
+ if (evict_mem.mm_node) {
+ evict_mem.mm_node->private = NULL;
+ drm_mm_put_block(evict_mem.mm_node);
+ evict_mem.mm_node = NULL;
+ }
+ spin_unlock(&glob->lru_lock);
goto out;
}
+ bo->evicted = true;
+out:
+ return ret;
+}
+
+static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+ uint32_t mem_type,
+ bool interruptible, bool no_wait)
+{
+ struct ttm_bo_global *glob = bdev->glob;
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ struct ttm_buffer_object *bo;
+ int ret, put_count = 0;
spin_lock(&glob->lru_lock);
- if (evict_mem.mm_node) {
- drm_mm_put_block(evict_mem.mm_node);
- evict_mem.mm_node = NULL;
- }
+ bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
+ kref_get(&bo->list_kref);
+ ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0);
+ if (likely(ret == 0))
+ put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
- bo->evicted = true;
-out:
+ if (unlikely(ret != 0))
+ return ret;
+ while (put_count--)
+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ret = ttm_bo_evict(bo, interruptible, no_wait);
+ ttm_bo_unreserve(bo);
+ kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
+static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
+ struct ttm_mem_type_manager *man,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ struct drm_mm_node **node)
+{
+ struct ttm_bo_global *glob = bo->glob;
+ unsigned long lpfn;
+ int ret;
+
+ lpfn = placement->lpfn;
+ if (!lpfn)
+ lpfn = man->size;
+ *node = NULL;
+ do {
+ ret = drm_mm_pre_get(&man->manager);
+ if (unlikely(ret))
+ return ret;
+
+ spin_lock(&glob->lru_lock);
+ *node = drm_mm_search_free_in_range(&man->manager,
+ mem->num_pages, mem->page_alignment,
+ placement->fpfn, lpfn, 1);
+ if (unlikely(*node == NULL)) {
+ spin_unlock(&glob->lru_lock);
+ return 0;
+ }
+ *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
+ mem->page_alignment,
+ placement->fpfn,
+ lpfn);
+ spin_unlock(&glob->lru_lock);
+ } while (*node == NULL);
+ return 0;
+}
+
/**
* Repeatedly evict memory from the LRU for @mem_type until we create enough
* space, or we've evicted everything and there isn't enough space.
*/
-static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem,
- uint32_t mem_type,
- bool interruptible, bool no_wait)
+static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
+ uint32_t mem_type,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ bool interruptible, bool no_wait)
{
+ struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bdev->glob;
- struct drm_mm_node *node;
- struct ttm_buffer_object *entry;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- struct list_head *lru;
- unsigned long num_pages = mem->num_pages;
- int put_count = 0;
+ struct drm_mm_node *node;
int ret;
-retry_pre_get:
- ret = drm_mm_pre_get(&man->manager);
- if (unlikely(ret != 0))
- return ret;
-
- spin_lock(&glob->lru_lock);
do {
- node = drm_mm_search_free(&man->manager, num_pages,
- mem->page_alignment, 1);
+ ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
+ if (unlikely(ret != 0))
+ return ret;
if (node)
break;
-
- lru = &man->lru;
- if (list_empty(lru))
+ spin_lock(&glob->lru_lock);
+ if (list_empty(&man->lru)) {
+ spin_unlock(&glob->lru_lock);
break;
-
- entry = list_first_entry(lru, struct ttm_buffer_object, lru);
- kref_get(&entry->list_kref);
-
- ret =
- ttm_bo_reserve_locked(entry, interruptible, no_wait,
- false, 0);
-
- if (likely(ret == 0))
- put_count = ttm_bo_del_from_lru(entry);
-
+ }
spin_unlock(&glob->lru_lock);
-
+ ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
+ no_wait);
if (unlikely(ret != 0))
return ret;
-
- while (put_count--)
- kref_put(&entry->list_kref, ttm_bo_ref_bug);
-
- ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
-
- ttm_bo_unreserve(entry);
-
- kref_put(&entry->list_kref, ttm_bo_release_list);
- if (ret)
- return ret;
-
- spin_lock(&glob->lru_lock);
} while (1);
-
- if (!node) {
- spin_unlock(&glob->lru_lock);
+ if (node == NULL)
return -ENOMEM;
- }
-
- node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
- if (unlikely(!node)) {
- spin_unlock(&glob->lru_lock);
- goto retry_pre_get;
- }
-
- spin_unlock(&glob->lru_lock);
mem->mm_node = node;
mem->mem_type = mem_type;
return 0;
@@ -725,7 +741,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
return result;
}
-
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
bool disallow_fixed,
uint32_t mem_type,
@@ -749,6 +764,18 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
return true;
}
+static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
+{
+ int i;
+
+ for (i = 0; i <= TTM_PL_PRIV5; i++)
+ if (flags & (1 << i)) {
+ *mem_type = i;
+ return 0;
+ }
+ return -EINVAL;
+}
+
/**
* Creates space for memory region @mem according to its type.
*
@@ -758,66 +785,55 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
* space.
*/
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
- uint32_t proposed_placement,
- struct ttm_mem_reg *mem,
- bool interruptible, bool no_wait)
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ bool interruptible, bool no_wait)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_global *glob = bo->glob;
struct ttm_mem_type_manager *man;
-
- uint32_t num_prios = bdev->driver->num_mem_type_prio;
- const uint32_t *prios = bdev->driver->mem_type_prio;
- uint32_t i;
uint32_t mem_type = TTM_PL_SYSTEM;
uint32_t cur_flags = 0;
bool type_found = false;
bool type_ok = false;
bool has_eagain = false;
struct drm_mm_node *node = NULL;
- int ret;
+ int i, ret;
mem->mm_node = NULL;
- for (i = 0; i < num_prios; ++i) {
- mem_type = prios[i];
+ for (i = 0; i <= placement->num_placement; ++i) {
+ ret = ttm_mem_type_from_flags(placement->placement[i],
+ &mem_type);
+ if (ret)
+ return ret;
man = &bdev->man[mem_type];
type_ok = ttm_bo_mt_compatible(man,
- bo->type == ttm_bo_type_user,
- mem_type, proposed_placement,
- &cur_flags);
+ bo->type == ttm_bo_type_user,
+ mem_type,
+ placement->placement[i],
+ &cur_flags);
if (!type_ok)
continue;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags);
+ /*
+ * Use the access and other non-mapping-related flag bits from
+ * the memory placement flags to the current flags
+ */
+ ttm_flag_masked(&cur_flags, placement->placement[i],
+ ~TTM_PL_MASK_MEMTYPE);
if (mem_type == TTM_PL_SYSTEM)
break;
if (man->has_type && man->use_type) {
type_found = true;
- do {
- ret = drm_mm_pre_get(&man->manager);
- if (unlikely(ret))
- return ret;
-
- spin_lock(&glob->lru_lock);
- node = drm_mm_search_free(&man->manager,
- mem->num_pages,
- mem->page_alignment,
- 1);
- if (unlikely(!node)) {
- spin_unlock(&glob->lru_lock);
- break;
- }
- node = drm_mm_get_block_atomic(node,
- mem->num_pages,
- mem->
- page_alignment);
- spin_unlock(&glob->lru_lock);
- } while (!node);
+ ret = ttm_bo_man_get_node(bo, man, placement, mem,
+ &node);
+ if (unlikely(ret))
+ return ret;
}
if (node)
break;
@@ -827,43 +843,48 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
mem->mm_node = node;
mem->mem_type = mem_type;
mem->placement = cur_flags;
+ if (node)
+ node->private = bo;
return 0;
}
if (!type_found)
return -EINVAL;
- num_prios = bdev->driver->num_mem_busy_prio;
- prios = bdev->driver->mem_busy_prio;
-
- for (i = 0; i < num_prios; ++i) {
- mem_type = prios[i];
+ for (i = 0; i <= placement->num_busy_placement; ++i) {
+ ret = ttm_mem_type_from_flags(placement->placement[i],
+ &mem_type);
+ if (ret)
+ return ret;
man = &bdev->man[mem_type];
-
if (!man->has_type)
continue;
-
if (!ttm_bo_mt_compatible(man,
- bo->type == ttm_bo_type_user,
- mem_type,
- proposed_placement, &cur_flags))
+ bo->type == ttm_bo_type_user,
+ mem_type,
+ placement->placement[i],
+ &cur_flags))
continue;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags);
+ /*
+ * Use the access and other non-mapping-related flag bits from
+ * the memory placement flags to the current flags
+ */
+ ttm_flag_masked(&cur_flags, placement->placement[i],
+ ~TTM_PL_MASK_MEMTYPE);
- ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
- interruptible, no_wait);
-
+ ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
+ interruptible, no_wait);
if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags;
+ mem->mm_node->private = bo;
return 0;
}
-
if (ret == -ERESTART)
has_eagain = true;
}
-
ret = (has_eagain) ? -ERESTART : -ENOMEM;
return ret;
}
@@ -886,8 +907,8 @@ int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
}
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
- uint32_t proposed_placement,
- bool interruptible, bool no_wait)
+ struct ttm_placement *placement,
+ bool interruptible, bool no_wait)
{
struct ttm_bo_global *glob = bo->glob;
int ret = 0;
@@ -900,101 +921,82 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* Have the driver move function wait for idle when necessary,
* instead of doing it here.
*/
-
spin_lock(&bo->lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait);
spin_unlock(&bo->lock);
-
if (ret)
return ret;
-
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment;
-
/*
* Determine where to move the buffer.
*/
-
- ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
- interruptible, no_wait);
+ ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
if (ret)
goto out_unlock;
-
ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
-
out_unlock:
if (ret && mem.mm_node) {
spin_lock(&glob->lru_lock);
+ mem.mm_node->private = NULL;
drm_mm_put_block(mem.mm_node);
spin_unlock(&glob->lru_lock);
}
return ret;
}
-static int ttm_bo_mem_compat(uint32_t proposed_placement,
+static int ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
- if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
- return 0;
- if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
- return 0;
-
- return 1;
+ int i;
+
+ for (i = 0; i < placement->num_placement; i++) {
+ if ((placement->placement[i] & mem->placement &
+ TTM_PL_MASK_CACHING) &&
+ (placement->placement[i] & mem->placement &
+ TTM_PL_MASK_MEM))
+ return i;
+ }
+ return -1;
}
int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
- uint32_t proposed_placement,
- bool interruptible, bool no_wait)
+ struct ttm_placement *placement,
+ bool interruptible, bool no_wait)
{
int ret;
BUG_ON(!atomic_read(&bo->reserved));
- bo->proposed_placement = proposed_placement;
-
- TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
- (unsigned long)proposed_placement,
- (unsigned long)bo->mem.placement);
-
+ /* Check that range is valid */
+ if (placement->lpfn || placement->fpfn)
+ if (placement->fpfn > placement->lpfn ||
+ (placement->lpfn - placement->fpfn) < bo->num_pages)
+ return -EINVAL;
/*
* Check whether we need to move buffer.
*/
-
- if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
- ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
- interruptible, no_wait);
- if (ret) {
- if (ret != -ERESTART)
- printk(KERN_ERR TTM_PFX
- "Failed moving buffer. "
- "Proposed placement 0x%08x\n",
- bo->proposed_placement);
- if (ret == -ENOMEM)
- printk(KERN_ERR TTM_PFX
- "Out of aperture space or "
- "DRM memory quota.\n");
+ ret = ttm_bo_mem_compat(placement, &bo->mem);
+ if (ret < 0) {
+ ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
+ if (ret)
return ret;
- }
+ } else {
+ /*
+ * Use the access and other non-mapping-related flag bits from
+ * the compatible memory placement flags to the active flags
+ */
+ ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
+ ~TTM_PL_MASK_MEMTYPE);
}
-
/*
* We might need to add a TTM.
*/
-
if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
ret = ttm_bo_add_ttm(bo, true);
if (ret)
return ret;
}
- /*
- * Validation has succeeded, move the access and other
- * non-mapping-related flag bits from the proposed flags to
- * the active flags
- */
-
- ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
- ~TTM_PL_MASK_MEMTYPE);
-
return 0;
}
EXPORT_SYMBOL(ttm_buffer_object_validate);
@@ -1042,8 +1044,10 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
size_t acc_size,
void (*destroy) (struct ttm_buffer_object *))
{
- int ret = 0;
+ int i, c, ret = 0;
unsigned long num_pages;
+ uint32_t placements[8];
+ struct ttm_placement placement;
size += buffer_start & ~PAGE_MASK;
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -1100,7 +1104,16 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
goto out_err;
}
- ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
+ placement.fpfn = 0;
+ placement.lpfn = 0;
+ for (i = 0, c = 0; i <= TTM_PL_PRIV5; i++)
+ if (flags & (1 << i))
+ placements[c++] = (flags & ~TTM_PL_MASK_MEM) | (1 << i);
+ placement.placement = placements;
+ placement.num_placement = c;
+ placement.busy_placement = placements;
+ placement.num_busy_placement = c;
+ ret = ttm_buffer_object_validate(bo, &placement, interruptible, false);
if (ret)
goto out_err;
@@ -1135,8 +1148,8 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
struct ttm_buffer_object **p_bo)
{
struct ttm_buffer_object *bo;
- int ret;
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+ int ret;
size_t acc_size =
ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
@@ -1161,66 +1174,32 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
return ret;
}
-static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
- uint32_t mem_type, bool allow_errors)
-{
- int ret;
-
- spin_lock(&bo->lock);
- ret = ttm_bo_wait(bo, false, false, false);
- spin_unlock(&bo->lock);
-
- if (ret && allow_errors)
- goto out;
-
- if (bo->mem.mem_type == mem_type)
- ret = ttm_bo_evict(bo, mem_type, false, false);
-
- if (ret) {
- if (allow_errors) {
- goto out;
- } else {
- ret = 0;
- printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
- }
- }
-
-out:
- return ret;
-}
-
static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
- struct list_head *head,
- unsigned mem_type, bool allow_errors)
+ unsigned mem_type, bool allow_errors)
{
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_bo_global *glob = bdev->glob;
- struct ttm_buffer_object *entry;
int ret;
- int put_count;
/*
* Can't use standard list traversal since we're unlocking.
*/
spin_lock(&glob->lru_lock);
-
- while (!list_empty(head)) {
- entry = list_first_entry(head, struct ttm_buffer_object, lru);
- kref_get(&entry->list_kref);
- ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
- put_count = ttm_bo_del_from_lru(entry);
+ while (!list_empty(&man->lru)) {
spin_unlock(&glob->lru_lock);
- while (put_count--)
- kref_put(&entry->list_kref, ttm_bo_ref_bug);
- BUG_ON(ret);
- ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
- ttm_bo_unreserve(entry);
- kref_put(&entry->list_kref, ttm_bo_release_list);
+ ret = ttm_mem_evict_first(bdev, mem_type, false, false);
+ if (ret) {
+ if (allow_errors) {
+ return ret;
+ } else {
+ printk(KERN_ERR TTM_PFX
+ "Cleanup eviction failed\n");
+ }
+ }
spin_lock(&glob->lru_lock);
}
-
spin_unlock(&glob->lru_lock);
-
return 0;
}
@@ -1247,7 +1226,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
ret = 0;
if (mem_type > 0) {
- ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
+ ttm_bo_force_list_clean(bdev, mem_type, false);
spin_lock(&glob->lru_lock);
if (drm_mm_clean(&man->manager))
@@ -1280,12 +1259,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
return 0;
}
- return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
+ return ttm_bo_force_list_clean(bdev, mem_type, true);
}
EXPORT_SYMBOL(ttm_bo_evict_mm);
int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
- unsigned long p_offset, unsigned long p_size)
+ unsigned long p_size)
{
int ret = -EINVAL;
struct ttm_mem_type_manager *man;
@@ -1315,7 +1294,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
type);
return ret;
}
- ret = drm_mm_init(&man->manager, p_offset, p_size);
+ ret = drm_mm_init(&man->manager, 0, p_size);
if (ret)
return ret;
}
@@ -1464,7 +1443,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
* Initialize the system memory buffer type.
* Other types need to be driver / IOCTL initialized.
*/
- ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
+ ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
if (unlikely(ret != 0))
goto out_no_sys;