aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/Makefile3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c545
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c117
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c311
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c16
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c452
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c1
9 files changed, 1179 insertions, 274 deletions
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b0a9de7a57c..1e138f5bae0 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -3,6 +3,7 @@
ccflags-y := -Iinclude/drm
ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
- ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o
+ ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
+ ttm_object.o ttm_lock.o ttm_execbuf_util.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 87c06252d46..a835b6fe42a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -27,6 +27,14 @@
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
+/* Notes:
+ *
+ * We store bo pointer in drm_mm_node struct so we know which bo own a
+ * specific node. There is no protection on the pointer, thus to make
+ * sure things don't go berserk you have to access this pointer while
+ * holding the global lru lock and make sure anytime you free a node you
+ * reset the pointer to NULL.
+ */
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
@@ -51,6 +59,60 @@ static struct attribute ttm_bo_count = {
.mode = S_IRUGO
};
+static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
+{
+ int i;
+
+ for (i = 0; i <= TTM_PL_PRIV5; i++)
+ if (flags & (1 << i)) {
+ *mem_type = i;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void ttm_mem_type_manager_debug(struct ttm_bo_global *glob,
+ struct ttm_mem_type_manager *man)
+{
+ printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
+ printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
+ printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
+ printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
+ printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
+ printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
+ printk(KERN_ERR TTM_PFX " size: %ld\n", (unsigned long)man->size);
+ printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
+ man->available_caching);
+ printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
+ man->default_caching);
+ spin_lock(&glob->lru_lock);
+ drm_mm_debug_table(&man->manager, TTM_PFX);
+ spin_unlock(&glob->lru_lock);
+}
+
+static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_global *glob = bo->glob;
+ struct ttm_mem_type_manager *man;
+ int i, ret, mem_type;
+
+ printk(KERN_ERR TTM_PFX "No space for %p (%ld pages, %ldK, %ldM)\n",
+ bo, bo->mem.num_pages, bo->mem.size >> 10,
+ bo->mem.size >> 20);
+ for (i = 0; i < placement->num_placement; i++) {
+ ret = ttm_mem_type_from_flags(placement->placement[i],
+ &mem_type);
+ if (ret)
+ return;
+ man = &bdev->man[mem_type];
+ printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
+ i, placement->placement[i], mem_type);
+ ttm_mem_type_manager_debug(glob, man);
+ }
+}
+
static ssize_t ttm_bo_global_show(struct kobject *kobj,
struct attribute *attr,
char *buffer)
@@ -117,7 +179,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
ret = wait_event_interruptible(bo->event_queue,
atomic_read(&bo->reserved) == 0);
if (unlikely(ret != 0))
- return -ERESTART;
+ return ret;
} else {
wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
}
@@ -247,7 +309,6 @@ EXPORT_SYMBOL(ttm_bo_unreserve);
/*
* Call bo->mutex locked.
*/
-
static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -275,9 +336,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
page_flags | TTM_PAGE_FLAG_USER,
glob->dummy_read_page);
- if (unlikely(bo->ttm == NULL))
+ if (unlikely(bo->ttm == NULL)) {
ret = -ENOMEM;
- break;
+ break;
+ }
ret = ttm_tt_set_user(bo->ttm, current,
bo->buffer_start, bo->num_pages);
@@ -328,14 +390,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
}
if (bo->mem.mem_type == TTM_PL_SYSTEM) {
-
- struct ttm_mem_reg *old_mem = &bo->mem;
- uint32_t save_flags = old_mem->placement;
-
- *old_mem = *mem;
+ bo->mem = *mem;
mem->mm_node = NULL;
- ttm_flag_masked(&save_flags, mem->placement,
- TTM_PL_MASK_MEMTYPE);
goto moved;
}
@@ -418,6 +474,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
kref_put(&bo->list_kref, ttm_bo_ref_bug);
}
if (bo->mem.mm_node) {
+ bo->mem.mm_node->private = NULL;
drm_mm_put_block(bo->mem.mm_node);
bo->mem.mm_node = NULL;
}
@@ -554,24 +611,21 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
}
EXPORT_SYMBOL(ttm_bo_unref);
-static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
- bool interruptible, bool no_wait)
+static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
+ bool no_wait)
{
- int ret = 0;
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
struct ttm_mem_reg evict_mem;
- uint32_t proposed_placement;
-
- if (bo->mem.mem_type != mem_type)
- goto out;
+ struct ttm_placement placement;
+ int ret = 0;
spin_lock(&bo->lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait);
spin_unlock(&bo->lock);
if (unlikely(ret != 0)) {
- if (ret != -ERESTART) {
+ if (ret != -ERESTARTSYS) {
printk(KERN_ERR TTM_PFX
"Failed to expire sync object before "
"buffer eviction.\n");
@@ -584,116 +638,139 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
- proposed_placement = bdev->driver->evict_flags(bo);
-
- ret = ttm_bo_mem_space(bo, proposed_placement,
- &evict_mem, interruptible, no_wait);
- if (unlikely(ret != 0 && ret != -ERESTART))
- ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
- &evict_mem, interruptible, no_wait);
-
+ placement.fpfn = 0;
+ placement.lpfn = 0;
+ placement.num_placement = 0;
+ placement.num_busy_placement = 0;
+ bdev->driver->evict_flags(bo, &placement);
+ ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
+ no_wait);
if (ret) {
- if (ret != -ERESTART)
+ if (ret != -ERESTARTSYS) {
printk(KERN_ERR TTM_PFX
"Failed to find memory space for "
"buffer 0x%p eviction.\n", bo);
+ ttm_bo_mem_space_debug(bo, &placement);
+ }
goto out;
}
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
no_wait);
if (ret) {
- if (ret != -ERESTART)
+ if (ret != -ERESTARTSYS)
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
+ spin_lock(&glob->lru_lock);
+ if (evict_mem.mm_node) {
+ evict_mem.mm_node->private = NULL;
+ drm_mm_put_block(evict_mem.mm_node);
+ evict_mem.mm_node = NULL;
+ }
+ spin_unlock(&glob->lru_lock);
goto out;
}
+ bo->evicted = true;
+out:
+ return ret;
+}
+
+static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+ uint32_t mem_type,
+ bool interruptible, bool no_wait)
+{
+ struct ttm_bo_global *glob = bdev->glob;
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ struct ttm_buffer_object *bo;
+ int ret, put_count = 0;
spin_lock(&glob->lru_lock);
- if (evict_mem.mm_node) {
- drm_mm_put_block(evict_mem.mm_node);
- evict_mem.mm_node = NULL;
- }
+ bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
+ kref_get(&bo->list_kref);
+ ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0);
+ if (likely(ret == 0))
+ put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
- bo->evicted = true;
-out:
+ if (unlikely(ret != 0))
+ return ret;
+ while (put_count--)
+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ret = ttm_bo_evict(bo, interruptible, no_wait);
+ ttm_bo_unreserve(bo);
+ kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
+static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
+ struct ttm_mem_type_manager *man,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ struct drm_mm_node **node)
+{
+ struct ttm_bo_global *glob = bo->glob;
+ unsigned long lpfn;
+ int ret;
+
+ lpfn = placement->lpfn;
+ if (!lpfn)
+ lpfn = man->size;
+ *node = NULL;
+ do {
+ ret = drm_mm_pre_get(&man->manager);
+ if (unlikely(ret))
+ return ret;
+
+ spin_lock(&glob->lru_lock);
+ *node = drm_mm_search_free_in_range(&man->manager,
+ mem->num_pages, mem->page_alignment,
+ placement->fpfn, lpfn, 1);
+ if (unlikely(*node == NULL)) {
+ spin_unlock(&glob->lru_lock);
+ return 0;
+ }
+ *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
+ mem->page_alignment,
+ placement->fpfn,
+ lpfn);
+ spin_unlock(&glob->lru_lock);
+ } while (*node == NULL);
+ return 0;
+}
+
/**
* Repeatedly evict memory from the LRU for @mem_type until we create enough
* space, or we've evicted everything and there isn't enough space.
*/
-static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem,
- uint32_t mem_type,
- bool interruptible, bool no_wait)
+static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
+ uint32_t mem_type,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ bool interruptible, bool no_wait)
{
+ struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bdev->glob;
- struct drm_mm_node *node;
- struct ttm_buffer_object *entry;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- struct list_head *lru;
- unsigned long num_pages = mem->num_pages;
- int put_count = 0;
+ struct drm_mm_node *node;
int ret;
-retry_pre_get:
- ret = drm_mm_pre_get(&man->manager);
- if (unlikely(ret != 0))
- return ret;
-
- spin_lock(&glob->lru_lock);
do {
- node = drm_mm_search_free(&man->manager, num_pages,
- mem->page_alignment, 1);
+ ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
+ if (unlikely(ret != 0))
+ return ret;
if (node)
break;
-
- lru = &man->lru;
- if (list_empty(lru))
+ spin_lock(&glob->lru_lock);
+ if (list_empty(&man->lru)) {
+ spin_unlock(&glob->lru_lock);
break;
-
- entry = list_first_entry(lru, struct ttm_buffer_object, lru);
- kref_get(&entry->list_kref);
-
- ret =
- ttm_bo_reserve_locked(entry, interruptible, no_wait,
- false, 0);
-
- if (likely(ret == 0))
- put_count = ttm_bo_del_from_lru(entry);
-
+ }
spin_unlock(&glob->lru_lock);
-
+ ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
+ no_wait);
if (unlikely(ret != 0))
return ret;
-
- while (put_count--)
- kref_put(&entry->list_kref, ttm_bo_ref_bug);
-
- ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
-
- ttm_bo_unreserve(entry);
-
- kref_put(&entry->list_kref, ttm_bo_release_list);
- if (ret)
- return ret;
-
- spin_lock(&glob->lru_lock);
} while (1);
-
- if (!node) {
- spin_unlock(&glob->lru_lock);
+ if (node == NULL)
return -ENOMEM;
- }
-
- node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
- if (unlikely(!node)) {
- spin_unlock(&glob->lru_lock);
- goto retry_pre_get;
- }
-
- spin_unlock(&glob->lru_lock);
mem->mm_node = node;
mem->mem_type = mem_type;
return 0;
@@ -724,7 +801,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
return result;
}
-
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
bool disallow_fixed,
uint32_t mem_type,
@@ -757,66 +833,55 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
* space.
*/
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
- uint32_t proposed_placement,
- struct ttm_mem_reg *mem,
- bool interruptible, bool no_wait)
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ bool interruptible, bool no_wait)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_global *glob = bo->glob;
struct ttm_mem_type_manager *man;
-
- uint32_t num_prios = bdev->driver->num_mem_type_prio;
- const uint32_t *prios = bdev->driver->mem_type_prio;
- uint32_t i;
uint32_t mem_type = TTM_PL_SYSTEM;
uint32_t cur_flags = 0;
bool type_found = false;
bool type_ok = false;
- bool has_eagain = false;
+ bool has_erestartsys = false;
struct drm_mm_node *node = NULL;
- int ret;
+ int i, ret;
mem->mm_node = NULL;
- for (i = 0; i < num_prios; ++i) {
- mem_type = prios[i];
+ for (i = 0; i <= placement->num_placement; ++i) {
+ ret = ttm_mem_type_from_flags(placement->placement[i],
+ &mem_type);
+ if (ret)
+ return ret;
man = &bdev->man[mem_type];
type_ok = ttm_bo_mt_compatible(man,
- bo->type == ttm_bo_type_user,
- mem_type, proposed_placement,
- &cur_flags);
+ bo->type == ttm_bo_type_user,
+ mem_type,
+ placement->placement[i],
+ &cur_flags);
if (!type_ok)
continue;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags);
+ /*
+ * Use the access and other non-mapping-related flag bits from
+ * the memory placement flags to the current flags
+ */
+ ttm_flag_masked(&cur_flags, placement->placement[i],
+ ~TTM_PL_MASK_MEMTYPE);
if (mem_type == TTM_PL_SYSTEM)
break;
if (man->has_type && man->use_type) {
type_found = true;
- do {
- ret = drm_mm_pre_get(&man->manager);
- if (unlikely(ret))
- return ret;
-
- spin_lock(&glob->lru_lock);
- node = drm_mm_search_free(&man->manager,
- mem->num_pages,
- mem->page_alignment,
- 1);
- if (unlikely(!node)) {
- spin_unlock(&glob->lru_lock);
- break;
- }
- node = drm_mm_get_block_atomic(node,
- mem->num_pages,
- mem->
- page_alignment);
- spin_unlock(&glob->lru_lock);
- } while (!node);
+ ret = ttm_bo_man_get_node(bo, man, placement, mem,
+ &node);
+ if (unlikely(ret))
+ return ret;
}
if (node)
break;
@@ -826,67 +891,65 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
mem->mm_node = node;
mem->mem_type = mem_type;
mem->placement = cur_flags;
+ if (node)
+ node->private = bo;
return 0;
}
if (!type_found)
return -EINVAL;
- num_prios = bdev->driver->num_mem_busy_prio;
- prios = bdev->driver->mem_busy_prio;
-
- for (i = 0; i < num_prios; ++i) {
- mem_type = prios[i];
+ for (i = 0; i <= placement->num_busy_placement; ++i) {
+ ret = ttm_mem_type_from_flags(placement->placement[i],
+ &mem_type);
+ if (ret)
+ return ret;
man = &bdev->man[mem_type];
-
if (!man->has_type)
continue;
-
if (!ttm_bo_mt_compatible(man,
- bo->type == ttm_bo_type_user,
- mem_type,
- proposed_placement, &cur_flags))
+ bo->type == ttm_bo_type_user,
+ mem_type,
+ placement->placement[i],
+ &cur_flags))
continue;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags);
+ /*
+ * Use the access and other non-mapping-related flag bits from
+ * the memory placement flags to the current flags
+ */
+ ttm_flag_masked(&cur_flags, placement->placement[i],
+ ~TTM_PL_MASK_MEMTYPE);
- ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
- interruptible, no_wait);
-
+ ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
+ interruptible, no_wait);
if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags;
+ mem->mm_node->private = bo;
return 0;
}
-
- if (ret == -ERESTART)
- has_eagain = true;
+ if (ret == -ERESTARTSYS)
+ has_erestartsys = true;
}
-
- ret = (has_eagain) ? -ERESTART : -ENOMEM;
+ ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
return ret;
}
EXPORT_SYMBOL(ttm_bo_mem_space);
int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
{
- int ret = 0;
-
if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
return -EBUSY;
- ret = wait_event_interruptible(bo->event_queue,
- atomic_read(&bo->cpu_writers) == 0);
-
- if (ret == -ERESTARTSYS)
- ret = -ERESTART;
-
- return ret;
+ return wait_event_interruptible(bo->event_queue,
+ atomic_read(&bo->cpu_writers) == 0);
}
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
- uint32_t proposed_placement,
- bool interruptible, bool no_wait)
+ struct ttm_placement *placement,
+ bool interruptible, bool no_wait)
{
struct ttm_bo_global *glob = bo->glob;
int ret = 0;
@@ -899,101 +962,82 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* Have the driver move function wait for idle when necessary,
* instead of doing it here.
*/
-
spin_lock(&bo->lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait);
spin_unlock(&bo->lock);
-
if (ret)
return ret;
-
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment;
-
/*
* Determine where to move the buffer.
*/
-
- ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
- interruptible, no_wait);
+ ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
if (ret)
goto out_unlock;
-
ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
-
out_unlock:
if (ret && mem.mm_node) {
spin_lock(&glob->lru_lock);
+ mem.mm_node->private = NULL;
drm_mm_put_block(mem.mm_node);
spin_unlock(&glob->lru_lock);
}
return ret;
}
-static int ttm_bo_mem_compat(uint32_t proposed_placement,
+static int ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
- if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
- return 0;
- if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
- return 0;
-
- return 1;
+ int i;
+
+ for (i = 0; i < placement->num_placement; i++) {
+ if ((placement->placement[i] & mem->placement &
+ TTM_PL_MASK_CACHING) &&
+ (placement->placement[i] & mem->placement &
+ TTM_PL_MASK_MEM))
+ return i;
+ }
+ return -1;
}
int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
- uint32_t proposed_placement,
- bool interruptible, bool no_wait)
+ struct ttm_placement *placement,
+ bool interruptible, bool no_wait)
{
int ret;
BUG_ON(!atomic_read(&bo->reserved));
- bo->proposed_placement = proposed_placement;
-
- TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
- (unsigned long)proposed_placement,
- (unsigned long)bo->mem.placement);
-
+ /* Check that range is valid */
+ if (placement->lpfn || placement->fpfn)
+ if (placement->fpfn > placement->lpfn ||
+ (placement->lpfn - placement->fpfn) < bo->num_pages)
+ return -EINVAL;
/*
* Check whether we need to move buffer.
*/
-
- if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
- ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
- interruptible, no_wait);
- if (ret) {
- if (ret != -ERESTART)
- printk(KERN_ERR TTM_PFX
- "Failed moving buffer. "
- "Proposed placement 0x%08x\n",
- bo->proposed_placement);
- if (ret == -ENOMEM)
- printk(KERN_ERR TTM_PFX
- "Out of aperture space or "
- "DRM memory quota.\n");
+ ret = ttm_bo_mem_compat(placement, &bo->mem);
+ if (ret < 0) {
+ ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
+ if (ret)
return ret;
- }
+ } else {
+ /*
+ * Use the access and other non-mapping-related flag bits from
+ * the compatible memory placement flags to the active flags
+ */
+ ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
+ ~TTM_PL_MASK_MEMTYPE);
}
-
/*
* We might need to add a TTM.
*/
-
if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
ret = ttm_bo_add_ttm(bo, true);
if (ret)
return ret;
}
- /*
- * Validation has succeeded, move the access and other
- * non-mapping-related flag bits from the proposed flags to
- * the active flags
- */
-
- ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
- ~TTM_PL_MASK_MEMTYPE);
-
return 0;
}
EXPORT_SYMBOL(ttm_buffer_object_validate);
@@ -1041,8 +1085,10 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
size_t acc_size,
void (*destroy) (struct ttm_buffer_object *))
{
- int ret = 0;
+ int i, c, ret = 0;
unsigned long num_pages;
+ uint32_t placements[8];
+ struct ttm_placement placement;
size += buffer_start & ~PAGE_MASK;
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -1099,7 +1145,16 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
goto out_err;
}
- ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
+ placement.fpfn = 0;
+ placement.lpfn = 0;
+ for (i = 0, c = 0; i <= TTM_PL_PRIV5; i++)
+ if (flags & (1 << i))
+ placements[c++] = (flags & ~TTM_PL_MASK_MEM) | (1 << i);
+ placement.placement = placements;
+ placement.num_placement = c;
+ placement.busy_placement = placements;
+ placement.num_busy_placement = c;
+ ret = ttm_buffer_object_validate(bo, &placement, interruptible, false);
if (ret)
goto out_err;
@@ -1134,8 +1189,8 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
struct ttm_buffer_object **p_bo)
{
struct ttm_buffer_object *bo;
- int ret;
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+ int ret;
size_t acc_size =
ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
@@ -1160,66 +1215,32 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
return ret;
}
-static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
- uint32_t mem_type, bool allow_errors)
-{
- int ret;
-
- spin_lock(&bo->lock);
- ret = ttm_bo_wait(bo, false, false, false);
- spin_unlock(&bo->lock);
-
- if (ret && allow_errors)
- goto out;
-
- if (bo->mem.mem_type == mem_type)
- ret = ttm_bo_evict(bo, mem_type, false, false);
-
- if (ret) {
- if (allow_errors) {
- goto out;
- } else {
- ret = 0;
- printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
- }
- }
-
-out:
- return ret;
-}
-
static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
- struct list_head *head,
- unsigned mem_type, bool allow_errors)
+ unsigned mem_type, bool allow_errors)
{
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_bo_global *glob = bdev->glob;
- struct ttm_buffer_object *entry;
int ret;
- int put_count;
/*
* Can't use standard list traversal since we're unlocking.
*/
spin_lock(&glob->lru_lock);
-
- while (!list_empty(head)) {
- entry = list_first_entry(head, struct ttm_buffer_object, lru);
- kref_get(&entry->list_kref);
- ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
- put_count = ttm_bo_del_from_lru(entry);
+ while (!list_empty(&man->lru)) {
spin_unlock(&glob->lru_lock);
- while (put_count--)
- kref_put(&entry->list_kref, ttm_bo_ref_bug);
- BUG_ON(ret);
- ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
- ttm_bo_unreserve(entry);
- kref_put(&entry->list_kref, ttm_bo_release_list);
+ ret = ttm_mem_evict_first(bdev, mem_type, false, false);
+ if (ret) {
+ if (allow_errors) {
+ return ret;
+ } else {
+ printk(KERN_ERR TTM_PFX
+ "Cleanup eviction failed\n");
+ }
+ }
spin_lock(&glob->lru_lock);
}
-
spin_unlock(&glob->lru_lock);
-
return 0;
}
@@ -1246,7 +1267,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
ret = 0;
if (mem_type > 0) {
- ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
+ ttm_bo_force_list_clean(bdev, mem_type, false);
spin_lock(&glob->lru_lock);
if (drm_mm_clean(&man->manager))
@@ -1279,12 +1300,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
return 0;
}
- return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
+ return ttm_bo_force_list_clean(bdev, mem_type, true);
}
EXPORT_SYMBOL(ttm_bo_evict_mm);
int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
- unsigned long p_offset, unsigned long p_size)
+ unsigned long p_size)
{
int ret = -EINVAL;
struct ttm_mem_type_manager *man;
@@ -1314,7 +1335,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
type);
return ret;
}
- ret = drm_mm_init(&man->manager, p_offset, p_size);
+ ret = drm_mm_init(&man->manager, 0, p_size);
if (ret)
return ret;
}
@@ -1463,7 +1484,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
* Initialize the system memory buffer type.
* Other types need to be driver / IOCTL initialized.
*/
- ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
+ ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
if (unlikely(ret != 0))
goto out_no_sys;
@@ -1693,7 +1714,7 @@ int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
ret = wait_event_interruptible
(bo->event_queue, atomic_read(&bo->reserved) == 0);
if (unlikely(ret != 0))
- return -ERESTART;
+ return ret;
} else {
wait_event(bo->event_queue,
atomic_read(&bo->reserved) == 0);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 61c5572d2b9..2ecf7d0c64f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -369,6 +369,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
#endif
return tmp;
}
+EXPORT_SYMBOL(ttm_io_prot);
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
unsigned long bus_base,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 1c040d04033..609a85a4d85 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -114,7 +114,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = ttm_bo_wait(bo, false, true, false);
spin_unlock(&bo->lock);
if (unlikely(ret != 0)) {
- retval = (ret != -ERESTART) ?
+ retval = (ret != -ERESTARTSYS) ?
VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
goto out_unlock;
}
@@ -349,9 +349,6 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
switch (ret) {
case 0:
break;
- case -ERESTART:
- ret = -EINTR;
- goto out_unref;
case -EBUSY:
ret = -EAGAIN;
goto out_unref;
@@ -421,8 +418,6 @@ ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
switch (ret) {
case 0:
break;
- case -ERESTART:
- return -EINTR;
case -EBUSY:
return -EAGAIN;
default:
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
new file mode 100644
index 00000000000..c285c2902d1
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -0,0 +1,117 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "ttm/ttm_execbuf_util.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+void ttm_eu_backoff_reservation(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+ if (!entry->reserved)
+ continue;
+
+ entry->reserved = false;
+ ttm_bo_unreserve(bo);
+ }
+}
+EXPORT_SYMBOL(ttm_eu_backoff_reservation);
+
+/*
+ * Reserve buffers for validation.
+ *
+ * If a buffer in the list is marked for CPU access, we back off and
+ * wait for that buffer to become free for GPU access.
+ *
+ * If a buffer is reserved for another validation, the validator with
+ * the highest validation sequence backs off and waits for that buffer
+ * to become unreserved. This prevents deadlocks when validating multiple
+ * buffers in different orders.
+ */
+
+int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
+{
+ struct ttm_validate_buffer *entry;
+ int ret;
+
+retry:
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+
+ entry->reserved = false;
+ ret = ttm_bo_reserve(bo, true, false, true, val_seq);
+ if (ret != 0) {
+ ttm_eu_backoff_reservation(list);
+ if (ret == -EAGAIN) {
+ ret = ttm_bo_wait_unreserved(bo, true);
+ if (unlikely(ret != 0))
+ return ret;
+ goto retry;
+ } else
+ return ret;
+ }
+
+ entry->reserved = true;
+ if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+ ttm_eu_backoff_reservation(list);
+ ret = ttm_bo_wait_cpu(bo, false);
+ if (ret)
+ return ret;
+ goto retry;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ttm_eu_reserve_buffers);
+
+void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+ struct ttm_bo_driver *driver = bo->bdev->driver;
+ void *old_sync_obj;
+
+ spin_lock(&bo->lock);
+ old_sync_obj = bo->sync_obj;
+ bo->sync_obj = driver->sync_obj_ref(sync_obj);
+ bo->sync_obj_arg = entry->new_sync_obj_arg;
+ spin_unlock(&bo->lock);
+ ttm_bo_unreserve(bo);
+ entry->reserved = false;
+ if (old_sync_obj)
+ driver->sync_obj_unref(&old_sync_obj);
+ }
+}
+EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
new file mode 100644
index 00000000000..f619ebcaa4e
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -0,0 +1,311 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include "ttm/ttm_lock.h"
+#include "ttm/ttm_module.h"
+#include <asm/atomic.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+#define TTM_WRITE_LOCK_PENDING (1 << 0)
+#define TTM_VT_LOCK_PENDING (1 << 1)
+#define TTM_SUSPEND_LOCK_PENDING (1 << 2)
+#define TTM_VT_LOCK (1 << 3)
+#define TTM_SUSPEND_LOCK (1 << 4)
+
+void ttm_lock_init(struct ttm_lock *lock)
+{
+ spin_lock_init(&lock->lock);
+ init_waitqueue_head(&lock->queue);
+ lock->rw = 0;
+ lock->flags = 0;
+ lock->kill_takers = false;
+ lock->signal = SIGKILL;
+}
+EXPORT_SYMBOL(ttm_lock_init);
+
+void ttm_read_unlock(struct ttm_lock *lock)
+{
+ spin_lock(&lock->lock);
+ if (--lock->rw == 0)
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+}
+EXPORT_SYMBOL(ttm_read_unlock);
+
+static bool __ttm_read_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ spin_lock(&lock->lock);
+ if (unlikely(lock->kill_takers)) {
+ send_sig(lock->signal, current, 0);
+ spin_unlock(&lock->lock);
+ return false;
+ }
+ if (lock->rw >= 0 && lock->flags == 0) {
+ ++lock->rw;
+ locked = true;
+ }
+ spin_unlock(&lock->lock);
+ return locked;
+}
+
+int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
+{
+ int ret = 0;
+
+ if (interruptible)
+ ret = wait_event_interruptible(lock->queue,
+ __ttm_read_lock(lock));
+ else
+ wait_event(lock->queue, __ttm_read_lock(lock));
+ return ret;
+}
+EXPORT_SYMBOL(ttm_read_lock);
+
+static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
+{
+ bool block = true;
+
+ *locked = false;
+
+ spin_lock(&lock->lock);
+ if (unlikely(lock->kill_takers)) {
+ send_sig(lock->signal, current, 0);
+ spin_unlock(&lock->lock);
+ return false;
+ }
+ if (lock->rw >= 0 && lock->flags == 0) {
+ ++lock->rw;
+ block = false;
+ *locked = true;
+ } else if (lock->flags == 0) {
+ block = false;
+ }
+ spin_unlock(&lock->lock);
+
+ return !block;
+}
+
+int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
+{
+ int ret = 0;
+ bool locked;
+
+ if (interruptible)
+ ret = wait_event_interruptible
+ (lock->queue, __ttm_read_trylock(lock, &locked));
+ else
+ wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
+
+ if (unlikely(ret != 0)) {
+ BUG_ON(locked);
+ return ret;
+ }
+
+ return (locked) ? 0 : -EBUSY;
+}
+
+void ttm_write_unlock(struct ttm_lock *lock)
+{
+ spin_lock(&lock->lock);
+ lock->rw = 0;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+}
+EXPORT_SYMBOL(ttm_write_unlock);
+
+static bool __ttm_write_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ spin_lock(&lock->lock);
+ if (unlikely(lock->kill_takers)) {
+ send_sig(lock->signal, current, 0);
+ spin_unlock(&lock->lock);
+ return false;
+ }
+ if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
+ lock->rw = -1;
+ lock->flags &= ~TTM_WRITE_LOCK_PENDING;
+ locked = true;
+ } else {
+ lock->flags |= TTM_WRITE_LOCK_PENDING;
+ }
+ spin_unlock(&lock->lock);
+ return locked;
+}
+
+int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
+{
+ int ret = 0;
+
+ if (interruptible) {
+ ret = wait_event_interruptible(lock->queue,
+ __ttm_write_lock(lock));
+ if (unlikely(ret != 0)) {
+ spin_lock(&lock->lock);
+ lock->flags &= ~TTM_WRITE_LOCK_PENDING;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+ }
+ } else
+ wait_event(lock->queue, __ttm_read_lock(lock));
+
+ return ret;
+}
+EXPORT_SYMBOL(ttm_write_lock);
+
+void ttm_write_lock_downgrade(struct ttm_lock *lock)
+{
+ spin_lock(&lock->lock);
+ lock->rw = 1;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+}
+
+static int __ttm_vt_unlock(struct ttm_lock *lock)
+{
+ int ret = 0;
+
+ spin_lock(&lock->lock);
+ if (unlikely(!(lock->flags & TTM_VT_LOCK)))
+ ret = -EINVAL;
+ lock->flags &= ~TTM_VT_LOCK;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+ printk(KERN_INFO TTM_PFX "vt unlock.\n");
+
+ return ret;
+}
+
+static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
+ int ret;
+
+ *p_base = NULL;
+ ret = __ttm_vt_unlock(lock);
+ BUG_ON(ret != 0);
+}
+
+static bool __ttm_vt_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ spin_lock(&lock->lock);
+ if (lock->rw == 0) {
+ lock->flags &= ~TTM_VT_LOCK_PENDING;
+ lock->flags |= TTM_VT_LOCK;
+ locked = true;
+ } else {
+ lock->flags |= TTM_VT_LOCK_PENDING;
+ }
+ spin_unlock(&lock->lock);
+ return locked;
+}
+
+int ttm_vt_lock(struct ttm_lock *lock,
+ bool interruptible,
+ struct ttm_object_file *tfile)
+{
+ int ret = 0;
+
+ if (interruptible) {
+ ret = wait_event_interruptible(lock->queue,
+ __ttm_vt_lock(lock));
+ if (unlikely(ret != 0)) {
+ spin_lock(&lock->lock);
+ lock->flags &= ~TTM_VT_LOCK_PENDING;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+ return ret;
+ }
+ } else
+ wait_event(lock->queue, __ttm_vt_lock(lock));
+
+ /*
+ * Add a base-object, the destructor of which will
+ * make sure the lock is released if the client dies
+ * while holding it.
+ */
+
+ ret = ttm_base_object_init(tfile, &lock->base, false,
+ ttm_lock_type, &ttm_vt_lock_remove, NULL);
+ if (ret)
+ (void)__ttm_vt_unlock(lock);
+ else {
+ lock->vt_holder = tfile;
+ printk(KERN_INFO TTM_PFX "vt lock.\n");
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ttm_vt_lock);
+
+int ttm_vt_unlock(struct ttm_lock *lock)
+{
+ return ttm_ref_object_base_unref(lock->vt_holder,
+ lock->base.hash.key, TTM_REF_USAGE);
+}
+EXPORT_SYMBOL(ttm_vt_unlock);
+
+void ttm_suspend_unlock(struct ttm_lock *lock)
+{
+ spin_lock(&lock->lock);
+ lock->flags &= ~TTM_SUSPEND_LOCK;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+}
+
+static bool __ttm_suspend_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ spin_lock(&lock->lock);
+ if (lock->rw == 0) {
+ lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
+ lock->flags |= TTM_SUSPEND_LOCK;
+ locked = true;
+ } else {
+ lock->flags |= TTM_SUSPEND_LOCK_PENDING;
+ }
+ spin_unlock(&lock->lock);
+ return locked;
+}
+
+void ttm_suspend_lock(struct ttm_lock *lock)
+{
+ wait_event(lock->queue, __ttm_suspend_lock(lock));
+}
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 072c281a6bb..f5245c02b8f 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -274,16 +274,17 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
const struct sysinfo *si)
{
- struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
+ struct ttm_mem_zone *zone;
uint64_t mem;
int ret;
- if (unlikely(!zone))
- return -ENOMEM;
-
if (si->totalhigh == 0)
return 0;
+ zone = kzalloc(sizeof(*zone), GFP_KERNEL);
+ if (unlikely(!zone))
+ return -ENOMEM;
+
mem = si->totalram;
mem *= si->mem_unit;
@@ -322,8 +323,10 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
* No special dma32 zone needed.
*/
- if (mem <= ((uint64_t) 1ULL << 32))
+ if (mem <= ((uint64_t) 1ULL << 32)) {
+ kfree(zone);
return 0;
+ }
/*
* Limit max dma32 memory to 4GB for now
@@ -460,6 +463,7 @@ void ttm_mem_global_free(struct ttm_mem_global *glob,
{
return ttm_mem_global_free_zone(glob, NULL, amount);
}
+EXPORT_SYMBOL(ttm_mem_global_free);
static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
struct ttm_mem_zone *single_zone,
@@ -533,6 +537,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
interruptible);
}
+EXPORT_SYMBOL(ttm_mem_global_alloc);
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
struct page *page,
@@ -588,3 +593,4 @@ size_t ttm_round_pot(size_t size)
}
return 0;
}
+EXPORT_SYMBOL(ttm_round_pot);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
new file mode 100644
index 00000000000..1099abac824
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -0,0 +1,452 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/** @file ttm_ref_object.c
+ *
+ * Base- and reference object implementation for the various
+ * ttm objects. Implements reference counting, minimal security checks
+ * and release on file close.
+ */
+
+/**
+ * struct ttm_object_file
+ *
+ * @tdev: Pointer to the ttm_object_device.
+ *
+ * @lock: Lock that protects the ref_list list and the
+ * ref_hash hash tables.
+ *
+ * @ref_list: List of ttm_ref_objects to be destroyed at
+ * file release.
+ *
+ * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
+ * for fast lookup of ref objects given a base object.
+ */
+
+#include "ttm/ttm_object.h"
+#include "ttm/ttm_module.h"
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <asm/atomic.h>
+
+struct ttm_object_file {
+ struct ttm_object_device *tdev;
+ rwlock_t lock;
+ struct list_head ref_list;
+ struct drm_open_hash ref_hash[TTM_REF_NUM];
+ struct kref refcount;
+};
+
+/**
+ * struct ttm_object_device
+ *
+ * @object_lock: lock that protects the object_hash hash table.
+ *
+ * @object_hash: hash table for fast lookup of object global names.
+ *
+ * @object_count: Per device object count.
+ *
+ * This is the per-device data structure needed for ttm object management.
+ */
+
+struct ttm_object_device {
+ rwlock_t object_lock;
+ struct drm_open_hash object_hash;
+ atomic_t object_count;
+ struct ttm_mem_global *mem_glob;
+};
+
+/**
+ * struct ttm_ref_object
+ *
+ * @hash: Hash entry for the per-file object reference hash.
+ *
+ * @head: List entry for the per-file list of ref-objects.
+ *
+ * @kref: Ref count.
+ *
+ * @obj: Base object this ref object is referencing.
+ *
+ * @ref_type: Type of ref object.
+ *
+ * This is similar to an idr object, but it also has a hash table entry
+ * that allows lookup with a pointer to the referenced object as a key. In
+ * that way, one can easily detect whether a base object is referenced by
+ * a particular ttm_object_file. It also carries a ref count to avoid creating
+ * multiple ref objects if a ttm_object_file references the same base
+ * object more than once.
+ */
+
+struct ttm_ref_object {
+ struct drm_hash_item hash;
+ struct list_head head;
+ struct kref kref;
+ struct ttm_base_object *obj;
+ enum ttm_ref_type ref_type;
+ struct ttm_object_file *tfile;
+};
+
+static inline struct ttm_object_file *
+ttm_object_file_ref(struct ttm_object_file *tfile)
+{
+ kref_get(&tfile->refcount);
+ return tfile;
+}
+
+static void ttm_object_file_destroy(struct kref *kref)
+{
+ struct ttm_object_file *tfile =
+ container_of(kref, struct ttm_object_file, refcount);
+
+ kfree(tfile);
+}
+
+
+static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
+{
+ struct ttm_object_file *tfile = *p_tfile;
+
+ *p_tfile = NULL;
+ kref_put(&tfile->refcount, ttm_object_file_destroy);
+}
+
+
+int ttm_base_object_init(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ bool shareable,
+ enum ttm_object_type object_type,
+ void (*refcount_release) (struct ttm_base_object **),
+ void (*ref_obj_release) (struct ttm_base_object *,
+ enum ttm_ref_type ref_type))
+{
+ struct ttm_object_device *tdev = tfile->tdev;
+ int ret;
+
+ base->shareable = shareable;
+ base->tfile = ttm_object_file_ref(tfile);
+ base->refcount_release = refcount_release;
+ base->ref_obj_release = ref_obj_release;
+ base->object_type = object_type;
+ write_lock(&tdev->object_lock);
+ kref_init(&base->refcount);
+ ret = drm_ht_just_insert_please(&tdev->object_hash,
+ &base->hash,
+ (unsigned long)base, 31, 0, 0);
+ write_unlock(&tdev->object_lock);
+ if (unlikely(ret != 0))
+ goto out_err0;
+
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ if (unlikely(ret != 0))
+ goto out_err1;
+
+ ttm_base_object_unref(&base);
+
+ return 0;
+out_err1:
+ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+out_err0:
+ return ret;
+}
+EXPORT_SYMBOL(ttm_base_object_init);
+
+static void ttm_release_base(struct kref *kref)
+{
+ struct ttm_base_object *base =
+ container_of(kref, struct ttm_base_object, refcount);
+ struct ttm_object_device *tdev = base->tfile->tdev;
+
+ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+ write_unlock(&tdev->object_lock);
+ if (base->refcount_release) {
+ ttm_object_file_unref(&base->tfile);
+ base->refcount_release(&base);
+ }
+ write_lock(&tdev->object_lock);
+}
+
+void ttm_base_object_unref(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct ttm_object_device *tdev = base->tfile->tdev;
+
+ *p_base = NULL;
+
+ /*
+ * Need to take the lock here to avoid racing with
+ * users trying to look up the object.
+ */
+
+ write_lock(&tdev->object_lock);
+ (void)kref_put(&base->refcount, &ttm_release_base);
+ write_unlock(&tdev->object_lock);
+}
+EXPORT_SYMBOL(ttm_base_object_unref);
+
+struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
+ uint32_t key)
+{
+ struct ttm_object_device *tdev = tfile->tdev;
+ struct ttm_base_object *base;
+ struct drm_hash_item *hash;
+ int ret;
+
+ read_lock(&tdev->object_lock);
+ ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
+
+ if (likely(ret == 0)) {
+ base = drm_hash_entry(hash, struct ttm_base_object, hash);
+ kref_get(&base->refcount);
+ }
+ read_unlock(&tdev->object_lock);
+
+ if (unlikely(ret != 0))
+ return NULL;
+
+ if (tfile != base->tfile && !base->shareable) {
+ printk(KERN_ERR TTM_PFX
+ "Attempted access of non-shareable object.\n");
+ ttm_base_object_unref(&base);
+ return NULL;
+ }
+
+ return base;
+}
+EXPORT_SYMBOL(ttm_base_object_lookup);
+
+int ttm_ref_object_add(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ enum ttm_ref_type ref_type, bool *existed)
+{
+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+ struct ttm_ref_object *ref;
+ struct drm_hash_item *hash;
+ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+ int ret = -EINVAL;
+
+ if (existed != NULL)
+ *existed = true;
+
+ while (ret == -EINVAL) {
+ read_lock(&tfile->lock);
+ ret = drm_ht_find_item(ht, base->hash.key, &hash);
+
+ if (ret == 0) {
+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+ kref_get(&ref->kref);
+ read_unlock(&tfile->lock);
+ break;
+ }
+
+ read_unlock(&tfile->lock);
+ ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
+ false, false);
+ if (unlikely(ret != 0))
+ return ret;
+ ref = kmalloc(sizeof(*ref), GFP_KERNEL);
+ if (unlikely(ref == NULL)) {
+ ttm_mem_global_free(mem_glob, sizeof(*ref));
+ return -ENOMEM;
+ }
+
+ ref->hash.key = base->hash.key;
+ ref->obj = base;
+ ref->tfile = tfile;
+ ref->ref_type = ref_type;
+ kref_init(&ref->kref);
+
+ write_lock(&tfile->lock);
+ ret = drm_ht_insert_item(ht, &ref->hash);
+
+ if (likely(ret == 0)) {
+ list_add_tail(&ref->head, &tfile->ref_list);
+ kref_get(&base->refcount);
+ write_unlock(&tfile->lock);
+ if (existed != NULL)
+ *existed = false;
+ break;
+ }
+
+ write_unlock(&tfile->lock);
+ BUG_ON(ret != -EINVAL);
+
+ ttm_mem_global_free(mem_glob, sizeof(*ref));
+ kfree(ref);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ttm_ref_object_add);
+
+static void ttm_ref_object_release(struct kref *kref)
+{
+ struct ttm_ref_object *ref =
+ container_of(kref, struct ttm_ref_object, kref);
+ struct ttm_base_object *base = ref->obj;
+ struct ttm_object_file *tfile = ref->tfile;
+ struct drm_open_hash *ht;
+ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+
+ ht = &tfile->ref_hash[ref->ref_type];
+ (void)drm_ht_remove_item(ht, &ref->hash);
+ list_del(&ref->head);
+ write_unlock(&tfile->lock);
+
+ if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
+ base->ref_obj_release(base, ref->ref_type);
+
+ ttm_base_object_unref(&ref->obj);
+ ttm_mem_global_free(mem_glob, sizeof(*ref));
+ kfree(ref);
+ write_lock(&tfile->lock);
+}
+
+int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
+ unsigned long key, enum ttm_ref_type ref_type)
+{
+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+ struct ttm_ref_object *ref;
+ struct drm_hash_item *hash;
+ int ret;
+
+ write_lock(&tfile->lock);
+ ret = drm_ht_find_item(ht, key, &hash);
+ if (unlikely(ret != 0)) {
+ write_unlock(&tfile->lock);
+ return -EINVAL;
+ }
+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+ kref_put(&ref->kref, ttm_ref_object_release);
+ write_unlock(&tfile->lock);
+ return 0;
+}
+EXPORT_SYMBOL(ttm_ref_object_base_unref);
+
+void ttm_object_file_release(struct ttm_object_file **p_tfile)
+{
+ struct ttm_ref_object *ref;
+ struct list_head *list;
+ unsigned int i;
+ struct ttm_object_file *tfile = *p_tfile;
+
+ *p_tfile = NULL;
+ write_lock(&tfile->lock);
+
+ /*
+ * Since we release the lock within the loop, we have to
+ * restart it from the beginning each time.
+ */
+
+ while (!list_empty(&tfile->ref_list)) {
+ list = tfile->ref_list.next;
+ ref = list_entry(list, struct ttm_ref_object, head);
+ ttm_ref_object_release(&ref->kref);
+ }
+
+ for (i = 0; i < TTM_REF_NUM; ++i)
+ drm_ht_remove(&tfile->ref_hash[i]);
+
+ write_unlock(&tfile->lock);
+ ttm_object_file_unref(&tfile);
+}
+EXPORT_SYMBOL(ttm_object_file_release);
+
+struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
+ unsigned int hash_order)
+{
+ struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
+ unsigned int i;
+ unsigned int j = 0;
+ int ret;
+
+ if (unlikely(tfile == NULL))
+ return NULL;
+
+ rwlock_init(&tfile->lock);
+ tfile->tdev = tdev;
+ kref_init(&tfile->refcount);
+ INIT_LIST_HEAD(&tfile->ref_list);
+
+ for (i = 0; i < TTM_REF_NUM; ++i) {
+ ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
+ if (ret) {
+ j = i;
+ goto out_err;
+ }
+ }
+
+ return tfile;
+out_err:
+ for (i = 0; i < j; ++i)
+ drm_ht_remove(&tfile->ref_hash[i]);
+
+ kfree(tfile);
+
+ return NULL;
+}
+EXPORT_SYMBOL(ttm_object_file_init);
+
+struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
+ *mem_glob,
+ unsigned int hash_order)
+{
+ struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
+ int ret;
+
+ if (unlikely(tdev == NULL))
+ return NULL;
+
+ tdev->mem_glob = mem_glob;
+ rwlock_init(&tdev->object_lock);
+ atomic_set(&tdev->object_count, 0);
+ ret = drm_ht_create(&tdev->object_hash, hash_order);
+
+ if (likely(ret == 0))
+ return tdev;
+
+ kfree(tdev);
+ return NULL;
+}
+EXPORT_SYMBOL(ttm_object_device_init);
+
+void ttm_object_device_release(struct ttm_object_device **p_tdev)
+{
+ struct ttm_object_device *tdev = *p_tdev;
+
+ *p_tdev = NULL;
+
+ write_lock(&tdev->object_lock);
+ drm_ht_remove(&tdev->object_hash);
+ write_unlock(&tdev->object_lock);
+
+ kfree(tdev);
+}
+EXPORT_SYMBOL(ttm_object_device_release);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 7bcb89f39ce..9c2b1cc5dba 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -192,6 +192,7 @@ int ttm_tt_populate(struct ttm_tt *ttm)
ttm->state = tt_unbound;
return 0;
}
+EXPORT_SYMBOL(ttm_tt_populate);
#ifdef CONFIG_X86
static inline int ttm_tt_set_page_caching(struct page *p,