aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--libdrm/xf86drm.c306
-rw-r--r--libdrm/xf86mm.h21
-rw-r--r--linux-core/drm_bo.c566
-rw-r--r--linux-core/drm_compat.c3
-rw-r--r--linux-core/drm_drv.c37
-rw-r--r--linux-core/drm_fence.c361
-rw-r--r--linux-core/drm_objects.h33
-rw-r--r--linux-core/i915_buffer.c5
-rw-r--r--linux-core/via_buffer.c5
-rw-r--r--shared-core/drm.h237
-rw-r--r--shared-core/i915_drv.h4
-rw-r--r--shared-core/via_drv.h4
12 files changed, 1036 insertions, 546 deletions
diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c
index 93185512..52a6d92f 100644
--- a/libdrm/xf86drm.c
+++ b/libdrm/xf86drm.c
@@ -2355,8 +2355,7 @@ int drmFenceCreate(int fd, unsigned flags, int class, unsigned type,
arg.flags = flags;
arg.type = type;
arg.class = class;
- arg.op = drm_fence_create;
- if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+ if (ioctl(fd, DRM_IOCTL_FENCE_CREATE, &arg))
return -errno;
fence->handle = arg.handle;
fence->class = arg.class;
@@ -2378,8 +2377,8 @@ int drmFenceBuffers(int fd, unsigned flags, drmFence *fence)
memset(&arg, 0, sizeof(arg));
arg.flags = flags;
- arg.op = drm_fence_buffers;
- if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+
+ if (ioctl(fd, DRM_IOCTL_FENCE_BUFFERS, &arg))
return -errno;
fence->handle = arg.handle;
fence->class = arg.class;
@@ -2395,8 +2394,8 @@ int drmFenceDestroy(int fd, const drmFence *fence)
memset(&arg, 0, sizeof(arg));
arg.handle = fence->handle;
- arg.op = drm_fence_destroy;
- if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+
+ if (ioctl(fd, DRM_IOCTL_FENCE_DESTROY, &arg))
return -errno;
return 0;
}
@@ -2407,8 +2406,8 @@ int drmFenceReference(int fd, unsigned handle, drmFence *fence)
memset(&arg, 0, sizeof(arg));
arg.handle = handle;
- arg.op = drm_fence_reference;
- if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+
+ if (ioctl(fd, DRM_IOCTL_FENCE_REFERENCE, &arg))
return -errno;
fence->handle = arg.handle;
fence->class = arg.class;
@@ -2424,8 +2423,8 @@ int drmFenceUnreference(int fd, const drmFence *fence)
memset(&arg, 0, sizeof(arg));
arg.handle = fence->handle;
- arg.op = drm_fence_unreference;
- if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+
+ if (ioctl(fd, DRM_IOCTL_FENCE_UNREFERENCE, &arg))
return -errno;
return 0;
}
@@ -2437,8 +2436,8 @@ int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type)
memset(&arg, 0, sizeof(arg));
arg.handle = fence->handle;
arg.type = flush_type;
- arg.op = drm_fence_flush;
- if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+
+ if (ioctl(fd, DRM_IOCTL_FENCE_FLUSH, &arg))
return -errno;
fence->class = arg.class;
fence->type = arg.type;
@@ -2452,8 +2451,8 @@ int drmFenceUpdate(int fd, drmFence *fence)
memset(&arg, 0, sizeof(arg));
arg.handle = fence->handle;
- arg.op = drm_fence_signaled;
- if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+
+ if (ioctl(fd, DRM_IOCTL_FENCE_SIGNALED, &arg))
return -errno;
fence->class = arg.class;
fence->type = arg.type;
@@ -2492,8 +2491,8 @@ int drmFenceEmit(int fd, unsigned flags, drmFence *fence, unsigned emit_type)
arg.flags = flags;
arg.handle = fence->handle;
arg.type = emit_type;
- arg.op = drm_fence_emit;
- if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+
+ if (ioctl(fd, DRM_IOCTL_FENCE_EMIT, &arg))
return -errno;
fence->class = arg.class;
fence->type = arg.type;
@@ -2526,9 +2525,9 @@ int drmFenceWait(int fd, unsigned flags, drmFence *fence, unsigned flush_type)
arg.handle = fence->handle;
arg.type = flush_type;
arg.flags = flags;
- arg.op = drm_fence_wait;
+
do {
- ret = ioctl(fd, DRM_IOCTL_FENCE, &arg);
+ ret = ioctl(fd, DRM_IOCTL_FENCE_WAIT, &arg);
} while (ret != 0 && errno == EAGAIN);
if (ret)
@@ -2678,7 +2677,7 @@ int drmBOCreateList(int numTarget, drmBOList *list)
return drmAdjustListNodes(list);
}
-static void drmBOCopyReply(const drm_bo_arg_reply_t *rep, drmBO *buf)
+static void drmBOCopyReply(const struct drm_bo_info_rep *rep, drmBO *buf)
{
buf->handle = rep->handle;
buf->flags = rep->flags;
@@ -2690,16 +2689,21 @@ static void drmBOCopyReply(const drm_bo_arg_reply_t *rep, drmBO *buf)
buf->fenceFlags = rep->fence_flags;
buf->replyFlags = rep->rep_flags;
buf->pageAlignment = rep->page_alignment;
+ buf->tileInfo = rep->tile_info;
+ buf->hwTileStride = rep->hw_tile_stride;
+ buf->desiredTileStride = rep->desired_tile_stride;
}
-int drmBOCreate(int fd, unsigned long start, unsigned long size,
- unsigned pageAlignment, void *user_buffer, drm_bo_type_t type,
- unsigned mask,
+
+
+int drmBOCreate(int fd, unsigned long start, unsigned long size,
+ unsigned pageAlignment, void *user_buffer, drm_bo_type_t type,
+ drm_u64_t mask,
unsigned hint, drmBO *buf)
{
- drm_bo_arg_t arg;
- drm_bo_arg_request_t *req = &arg.d.req;
- drm_bo_arg_reply_t *rep = &arg.d.rep;
+ struct drm_bo_create_arg arg;
+ struct drm_bo_create_req *req = &arg.d.req;
+ struct drm_bo_info_rep *rep = &arg.d.rep;
int ret;
memset(buf, 0, sizeof(*buf));
@@ -2726,21 +2730,13 @@ int drmBOCreate(int fd, unsigned long start, unsigned long size,
default:
return -EINVAL;
}
- req->op = drm_bo_create;
do {
- ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
+ ret = ioctl(fd, DRM_IOCTL_BO_CREATE, &arg);
} while (ret != 0 && errno == EAGAIN);
if (ret)
return -errno;
- if (!arg.handled) {
- return -EFAULT;
- }
- if (rep->ret) {
- fprintf(stderr, "Error %d\n", rep->ret);
- return rep->ret;
- }
drmBOCopyReply(rep, buf);
buf->mapVirtual = NULL;
@@ -2751,9 +2747,7 @@ int drmBOCreate(int fd, unsigned long start, unsigned long size,
int drmBODestroy(int fd, drmBO *buf)
{
- drm_bo_arg_t arg;
- drm_bo_arg_request_t *req = &arg.d.req;
- drm_bo_arg_reply_t *rep = &arg.d.rep;
+ struct drm_bo_handle_arg arg;
if (buf->mapVirtual && (buf->type != drm_bo_type_fake)) {
(void) drmUnmap(buf->mapVirtual, buf->start + buf->size);
@@ -2762,40 +2756,26 @@ int drmBODestroy(int fd, drmBO *buf)
}
memset(&arg, 0, sizeof(arg));
- req->handle = buf->handle;
- req->op = drm_bo_destroy;
+ arg.handle = buf->handle;
- if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg))
+ if (ioctl(fd, DRM_IOCTL_BO_DESTROY, &arg))
return -errno;
- if (!arg.handled) {
- return -EFAULT;
- }
- if (rep->ret) {
- return rep->ret;
- }
buf->handle = 0;
return 0;
}
-
+
int drmBOReference(int fd, unsigned handle, drmBO *buf)
{
- drm_bo_arg_t arg;
- drm_bo_arg_request_t *req = &arg.d.req;
- drm_bo_arg_reply_t *rep = &arg.d.rep;
+ struct drm_bo_reference_info_arg arg;
+ struct drm_bo_handle_arg *req = &arg.d.req;
+ struct drm_bo_info_rep *rep = &arg.d.rep;
memset(&arg, 0, sizeof(arg));
req->handle = handle;
- req->op = drm_bo_reference;
- if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg))
+ if (ioctl(fd, DRM_IOCTL_BO_REFERENCE, &arg))
return -errno;
- if (!arg.handled) {
- return -EFAULT;
- }
- if (rep->ret) {
- return rep->ret;
- }
drmBOCopyReply(rep, buf);
buf->type = drm_bo_type_dc;
@@ -2808,9 +2788,7 @@ int drmBOReference(int fd, unsigned handle, drmBO *buf)
int drmBOUnReference(int fd, drmBO *buf)
{
- drm_bo_arg_t arg;
- drm_bo_arg_request_t *req = &arg.d.req;
- drm_bo_arg_reply_t *rep = &arg.d.rep;
+ struct drm_bo_handle_arg arg;
if (buf->mapVirtual && (buf->type != drm_bo_type_fake)) {
(void) munmap(buf->mapVirtual, buf->start + buf->size);
@@ -2819,22 +2797,16 @@ int drmBOUnReference(int fd, drmBO *buf)
}
memset(&arg, 0, sizeof(arg));
- req->handle = buf->handle;
- req->op = drm_bo_unreference;
+ arg.handle = buf->handle;
- if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg))
+ if (ioctl(fd, DRM_IOCTL_BO_UNREFERENCE, &arg))
return -errno;
- if (!arg.handled) {
- return -EFAULT;
- }
- if (rep->ret) {
- return rep->ret;
- }
buf->handle = 0;
return 0;
}
+
/*
* Flags can be DRM_BO_FLAG_READ, DRM_BO_FLAG_WRITE or'ed together
* Hint currently be DRM_BO_HINT_DONT_BLOCK, which makes the
@@ -2844,9 +2816,9 @@ int drmBOUnReference(int fd, drmBO *buf)
int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
void **address)
{
- drm_bo_arg_t arg;
- drm_bo_arg_request_t *req = &arg.d.req;
- drm_bo_arg_reply_t *rep = &arg.d.rep;
+ struct drm_bo_map_wait_idle_arg arg;
+ struct drm_bo_info_req *req = &arg.d.req;
+ struct drm_bo_info_rep *rep = &arg.d.rep;
int ret = 0;
/*
@@ -2871,7 +2843,6 @@ int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
req->handle = buf->handle;
req->mask = mapFlags;
req->hint = mapHint;
- req->op = drm_bo_map;
/*
* May hang if the buffer object is busy.
@@ -2879,15 +2850,11 @@ int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
*/
do {
- ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
+ ret = ioctl(fd, DRM_IOCTL_BO_MAP, &arg);
} while (ret != 0 && errno == EAGAIN);
if (ret)
return -errno;
- if (!arg.handled)
- return -EFAULT;
- if (rep->ret)
- return rep->ret;
drmBOCopyReply(rep, buf);
buf->mapFlags = mapFlags;
@@ -2897,44 +2864,39 @@ int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
return 0;
}
+
int drmBOUnmap(int fd, drmBO *buf)
{
- drm_bo_arg_t arg;
- drm_bo_arg_request_t *req = &arg.d.req;
- drm_bo_arg_reply_t *rep = &arg.d.rep;
+ struct drm_bo_handle_arg arg;
memset(&arg, 0, sizeof(arg));
- req->handle = buf->handle;
- req->op = drm_bo_unmap;
+ arg.handle = buf->handle;
- if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg)) {
+ if (ioctl(fd, DRM_IOCTL_BO_UNMAP, &arg)) {
return -errno;
}
- if (!arg.handled)
- return -EFAULT;
- if (rep->ret)
- return rep->ret;
-
return 0;
}
-
-int drmBOValidate(int fd, drmBO *buf, unsigned flags, unsigned mask,
+
+int drmBOValidate(int fd, drmBO *buf,
+ drm_u64_t flags, drm_u64_t mask,
unsigned hint)
{
- drm_bo_arg_t arg;
- drm_bo_arg_request_t *req = &arg.d.req;
- drm_bo_arg_reply_t *rep = &arg.d.rep;
+ struct drm_bo_op_arg arg;
+ struct drm_bo_op_req *req = &arg.d.req;
+ struct drm_bo_arg_rep *rep = &arg.d.rep;
int ret = 0;
memset(&arg, 0, sizeof(arg));
- req->handle = buf->handle;
- req->mask = flags;
- req->hint = hint;
- req->arg_handle = mask; /* Encode mask in the arg_handle field :/ */
+ req->bo_req.handle = buf->handle;
+ req->bo_req.flags = flags;
+ req->bo_req.mask = mask;
+ req->bo_req.hint = hint;
+ req->bo_req.fence_class = 0; /* Backwards compatibility. */
req->op = drm_bo_validate;
do{
- ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
+ ret = ioctl(fd, DRM_IOCTL_BO_OP, &arg);
} while (ret && errno == EAGAIN);
if (ret)
@@ -2944,26 +2906,25 @@ int drmBOValidate(int fd, drmBO *buf, unsigned flags, unsigned mask,
if (rep->ret)
return rep->ret;
- drmBOCopyReply(rep, buf);
+ drmBOCopyReply(&rep->bo_info, buf);
return 0;
}
int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle)
{
- drm_bo_arg_t arg;
- drm_bo_arg_request_t *req = &arg.d.req;
- drm_bo_arg_reply_t *rep = &arg.d.rep;
+ struct drm_bo_op_arg arg;
+ struct drm_bo_op_req *req = &arg.d.req;
+ struct drm_bo_arg_rep *rep = &arg.d.rep;
int ret = 0;
memset(&arg, 0, sizeof(arg));
- req->handle = buf->handle;
- req->mask = flags;
+ req->bo_req.handle = buf->handle;
+ req->bo_req.flags = flags;
req->arg_handle = fenceHandle;
req->op = drm_bo_fence;
- ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
-
+ ret = ioctl(fd, DRM_IOCTL_BO_OP, &arg);
if (ret)
return -errno;
if (!arg.handled)
@@ -2975,51 +2936,42 @@ int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle)
int drmBOInfo(int fd, drmBO *buf)
{
- drm_bo_arg_t arg;
- drm_bo_arg_request_t *req = &arg.d.req;
- drm_bo_arg_reply_t *rep = &arg.d.rep;
+ struct drm_bo_reference_info_arg arg;
+ struct drm_bo_handle_arg *req = &arg.d.req;
+ struct drm_bo_info_rep *rep = &arg.d.rep;
int ret = 0;
memset(&arg, 0, sizeof(arg));
req->handle = buf->handle;
- req->op = drm_bo_info;
-
- ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
+ ret = ioctl(fd, DRM_IOCTL_BO_INFO, &arg);
if (ret)
return -errno;
- if (!arg.handled)
- return -EFAULT;
- if (rep->ret)
- return rep->ret;
+
drmBOCopyReply(rep, buf);
return 0;
}
int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint)
{
- drm_bo_arg_t arg;
- drm_bo_arg_request_t *req = &arg.d.req;
- drm_bo_arg_reply_t *rep = &arg.d.rep;
+ struct drm_bo_map_wait_idle_arg arg;
+ struct drm_bo_info_req *req = &arg.d.req;
+ struct drm_bo_info_rep *rep = &arg.d.rep;
int ret = 0;
if ((buf->flags & DRM_BO_FLAG_SHAREABLE) ||
(buf->replyFlags & DRM_BO_REP_BUSY)) {
memset(&arg, 0, sizeof(arg));
req->handle = buf->handle;
- req->op = drm_bo_wait_idle;
req->hint = hint;
do {
- ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
+ ret = ioctl(fd, DRM_IOCTL_BO_WAIT_IDLE, &arg);
} while (ret && errno == EAGAIN);
if (ret)
return -errno;
- if (!arg.handled)
- return -EFAULT;
- if (rep->ret)
- return rep->ret;
+
drmBOCopyReply(rep, buf);
}
return 0;
@@ -3041,7 +2993,6 @@ int drmBOBusy(int fd, drmBO *buf, int *busy)
}
}
-
int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags,
unsigned mask,
int *newItem)
@@ -3103,9 +3054,9 @@ int drmBOValidateList(int fd, drmBOList *list)
{
drmBONode *node;
drmMMListHead *l;
- drm_bo_arg_t *arg, *first;
- drm_bo_arg_request_t *req;
- drm_bo_arg_reply_t *rep;
+ struct drm_bo_op_arg *arg, *first;
+ struct drm_bo_op_req *req;
+ struct drm_bo_arg_rep *rep;
drm_u64_t *prevNext = NULL;
drmBO *buf;
int ret;
@@ -3113,63 +3064,63 @@ int drmBOValidateList(int fd, drmBOList *list)
first = NULL;
for (l = list->list.next; l != &list->list; l = l->next) {
- node = DRMLISTENTRY(drmBONode, l, head);
+ node = DRMLISTENTRY(drmBONode, l, head);
- arg = &node->bo_arg;
- req = &arg->d.req;
+ arg = &node->bo_arg;
+ req = &arg->d.req;
- if (!first)
- first = arg;
+ if (!first)
+ first = arg;
if (prevNext)
*prevNext = (unsigned long) arg;
memset(arg, 0, sizeof(*arg));
prevNext = &arg->next;
- req->handle = node->buf->handle;
+ req->bo_req.handle = node->buf->handle;
req->op = drm_bo_validate;
- req->mask = node->arg0;
- req->hint = 0;
- req->arg_handle = node->arg1;
+ req->bo_req.flags = node->arg0;
+ req->bo_req.hint = 0;
+ req->bo_req.mask = node->arg1;
+ req->bo_req.fence_class = 0; /* Backwards compat. */
}
-
- if (!first)
+
+ if (!first)
return 0;
- do {
- ret = ioctl(fd, DRM_IOCTL_BUFOBJ, first);
+ do{
+ ret = ioctl(fd, DRM_IOCTL_BO_OP, first);
} while (ret && errno == EAGAIN);
if (ret)
return -errno;
-
+
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(drmBONode, l, head);
arg = &node->bo_arg;
rep = &arg->d.rep;
-
+
if (!arg->handled) {
drmMsg("Unhandled request\n");
return -EFAULT;
}
if (rep->ret)
return rep->ret;
-
+
buf = node->buf;
- drmBOCopyReply(rep, buf);
+ drmBOCopyReply(&rep->bo_info, buf);
}
-
+
return 0;
}
-
int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle)
{
drmBONode *node;
drmMMListHead *l;
- drm_bo_arg_t *arg, *first;
- drm_bo_arg_request_t *req;
- drm_bo_arg_reply_t *rep;
+ struct drm_bo_op_arg *arg, *first;
+ struct drm_bo_op_req *req;
+ struct drm_bo_arg_rep *rep;
drm_u64_t *prevNext = NULL;
drmBO *buf;
unsigned fence_flags;
@@ -3178,8 +3129,8 @@ int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle)
first = NULL;
for (l = list->list.next; l != &list->list; l = l->next) {
- node = DRMLISTENTRY(drmBONode, l, head);
-
+ node = DRMLISTENTRY(drmBONode, l, head);
+
arg = &node->bo_arg;
req = &arg->d.req;
@@ -3191,29 +3142,31 @@ int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle)
memset(arg, 0, sizeof(*arg));
prevNext = &arg->next;
- req->handle = node->buf->handle;
+ req->bo_req.handle = node->buf->handle;
req->op = drm_bo_fence;
- req->mask = node->arg0;
+ req->bo_req.mask = node->arg0;
req->arg_handle = fenceHandle;
}
if (!first)
return 0;
- ret = ioctl(fd, DRM_IOCTL_BUFOBJ, first);
+ ret = ioctl(fd, DRM_IOCTL_BO_OP, first);
if (ret)
return -errno;
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(drmBONode, l, head);
+
arg = &node->bo_arg;
rep = &arg->d.rep;
+
if (!arg->handled)
return -EFAULT;
if (rep->ret)
return rep->ret;
- drmBOCopyReply(rep, node->buf);
+ drmBOCopyReply(&rep->bo_info, node->buf);
}
return 0;
@@ -3222,13 +3175,16 @@ int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle)
int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
unsigned memType)
{
- drm_mm_init_arg_t arg;
+ struct drm_mm_init_arg arg;
memset(&arg, 0, sizeof(arg));
- arg.req.op = mm_init;
- arg.req.p_offset = pOffset;
- arg.req.p_size = pSize;
- arg.req.mem_type = memType;
+
+ arg.magic = DRM_BO_INIT_MAGIC;
+ arg.major = DRM_BO_INIT_MAJOR;
+ arg.minor = DRM_BO_INIT_MINOR;
+ arg.p_offset = pOffset;
+ arg.p_size = pSize;
+ arg.mem_type = memType;
if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg))
return -errno;
@@ -3237,28 +3193,26 @@ int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
int drmMMTakedown(int fd, unsigned memType)
{
- drm_mm_init_arg_t arg;
+ struct drm_mm_type_arg arg;
memset(&arg, 0, sizeof(arg));
- arg.req.op = mm_takedown;
- arg.req.mem_type = memType;
+ arg.mem_type = memType;
- if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg))
+ if (ioctl(fd, DRM_IOCTL_MM_TAKEDOWN, &arg))
return -errno;
return 0;
}
int drmMMLock(int fd, unsigned memType)
{
- drm_mm_init_arg_t arg;
+ struct drm_mm_type_arg arg;
int ret;
memset(&arg, 0, sizeof(arg));
- arg.req.op = mm_lock;
- arg.req.mem_type = memType;
+ arg.mem_type = memType;
do{
- ret = ioctl(fd, DRM_IOCTL_MM_INIT, &arg);
+ ret = ioctl(fd, DRM_IOCTL_MM_LOCK, &arg);
} while (ret && errno == EAGAIN);
return (ret) ? -errno : 0;
@@ -3266,15 +3220,15 @@ int drmMMLock(int fd, unsigned memType)
int drmMMUnlock(int fd, unsigned memType)
{
- drm_mm_init_arg_t arg;
+ struct drm_mm_type_arg arg;
int ret;
memset(&arg, 0, sizeof(arg));
- arg.req.op = mm_unlock;
- arg.req.mem_type = memType;
+
+ arg.mem_type = memType;
do{
- ret = ioctl(fd, DRM_IOCTL_MM_INIT, &arg);
+ ret = ioctl(fd, DRM_IOCTL_MM_UNLOCK, &arg);
} while (ret && errno == EAGAIN);
return (ret) ? -errno : 0;
diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h
index b3822d4d..61978bc9 100644
--- a/libdrm/xf86mm.h
+++ b/libdrm/xf86mm.h
@@ -108,8 +108,8 @@ typedef struct _drmBO
drm_bo_type_t type;
unsigned handle;
drm_u64_t mapHandle;
- unsigned flags;
- unsigned mask;
+ drm_u64_t flags;
+ drm_u64_t mask;
unsigned mapFlags;
unsigned long size;
unsigned long offset;
@@ -117,6 +117,9 @@ typedef struct _drmBO
unsigned replyFlags;
unsigned fenceFlags;
unsigned pageAlignment;
+ unsigned tileInfo;
+ unsigned hwTileStride;
+ unsigned desiredTileStride;
void *virtual;
void *mapVirtual;
int mapCount;
@@ -127,7 +130,7 @@ typedef struct _drmBONode
{
drmMMListHead head;
drmBO *buf;
- drm_bo_arg_t bo_arg;
+ struct drm_bo_op_arg bo_arg;
unsigned long arg0;
unsigned long arg1;
} drmBONode;
@@ -176,8 +179,8 @@ extern int drmBOCreateList(int numTarget, drmBOList *list);
*/
extern int drmBOCreate(int fd, unsigned long start, unsigned long size,
- unsigned pageAlignment,void *user_buffer,
- drm_bo_type_t type, unsigned mask,
+ unsigned pageAlignment,void *user_buffer,
+ drm_bo_type_t type, drm_u64_t mask,
unsigned hint, drmBO *buf);
extern int drmBODestroy(int fd, drmBO *buf);
extern int drmBOReference(int fd, unsigned handle, drmBO *buf);
@@ -185,14 +188,14 @@ extern int drmBOUnReference(int fd, drmBO *buf);
extern int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
void **address);
extern int drmBOUnmap(int fd, drmBO *buf);
-extern int drmBOValidate(int fd, drmBO *buf, unsigned flags, unsigned mask,
- unsigned hint);
+extern int drmBOValidate(int fd, drmBO *buf, drm_u64_t flags,
+ drm_u64_t mask, unsigned hint);
+
extern int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle);
extern int drmBOInfo(int fd, drmBO *buf);
extern int drmBOBusy(int fd, drmBO *buf, int *busy);
-
-extern int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags,
+extern int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags,
unsigned mask,
int *newItem);
extern int drmBOValidateList(int fd, drmBOList *list);
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index ab257825..2ce3dbca 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -202,8 +202,8 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t * bo,
if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
drm_bo_mem_reg_t *old_mem = &bo->mem;
- uint32_t save_flags = old_mem->flags;
- uint32_t save_mask = old_mem->mask;
+ uint64_t save_flags = old_mem->flags;
+ uint64_t save_mask = old_mem->mask;
*old_mem = *mem;
mem->mm_node = NULL;
@@ -884,7 +884,7 @@ int drm_bo_mem_space(drm_buffer_object_t * bo,
EXPORT_SYMBOL(drm_bo_mem_space);
static int drm_bo_new_mask(drm_buffer_object_t * bo,
- uint32_t new_mask, uint32_t hint)
+ uint64_t new_mask, uint32_t hint)
{
uint32_t new_props;
@@ -1076,7 +1076,7 @@ static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
*/
static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
- drm_bo_arg_reply_t * rep)
+ struct drm_bo_info_rep *rep)
{
rep->handle = bo->base.hash.key;
rep->flags = bo->mem.flags;
@@ -1104,7 +1104,7 @@ static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
uint32_t map_flags, unsigned hint,
- drm_bo_arg_reply_t * rep)
+ struct drm_bo_info_rep *rep)
{
drm_buffer_object_t *bo;
drm_device_t *dev = priv->head->dev;
@@ -1351,7 +1351,8 @@ static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem)
return 0;
}
- DRM_ERROR("Illegal fake buffer flags 0x%08x\n", mem->mask);
+ DRM_ERROR("Illegal fake buffer flags 0x%016llx\n",
+ (unsigned long long) mem->mask);
return -EINVAL;
}
@@ -1360,22 +1361,45 @@ static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem)
*/
static int drm_buffer_object_validate(drm_buffer_object_t * bo,
+ uint32_t fence_class,
int move_unfenced, int no_wait)
{
drm_device_t *dev = bo->dev;
drm_buffer_manager_t *bm = &dev->bm;
drm_bo_driver_t *driver = dev->driver->bo_driver;
+ uint32_t ftype;
int ret;
- DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask,
- bo->mem.flags);
- ret =
- driver->fence_type(bo, &bo->fence_class, &bo->fence_type);
+ DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
+ (unsigned long long) bo->mem.mask,
+ (unsigned long long) bo->mem.flags);
+
+ ret = driver->fence_type(bo, &ftype);
+
if (ret) {
DRM_ERROR("Driver did not support given buffer permissions\n");
return ret;
}
+ /*
+ * We're switching command submission mechanism,
+ * or cannot simply rely on the hardware serializing for us.
+ *
+ * Wait for buffer idle.
+ */
+
+ if ((fence_class != bo->fence_class) ||
+ ((ftype ^ bo->fence_type) & bo->fence_type)) {
+
+ ret = drm_bo_wait(bo, 0, 0, no_wait);
+
+ if (ret)
+ return ret;
+
+ }
+
+ bo->fence_class = fence_class;
+ bo->fence_type = ftype;
ret = drm_bo_wait_unmapped(bo, no_wait);
if (ret)
return ret;
@@ -1465,9 +1489,11 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo,
return 0;
}
-static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
- uint32_t flags, uint32_t mask, uint32_t hint,
- drm_bo_arg_reply_t * rep)
+static int drm_bo_handle_validate(drm_file_t * priv,
+ uint32_t handle,
+ uint32_t fence_class,
+ uint64_t flags, uint64_t mask, uint32_t hint,
+ struct drm_bo_info_rep *rep)
{
struct drm_device *dev = priv->head->dev;
drm_buffer_object_t *bo;
@@ -1493,7 +1519,8 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
goto out;
ret =
- drm_buffer_object_validate(bo, !(hint & DRM_BO_HINT_DONT_FENCE),
+ drm_buffer_object_validate(bo, fence_class,
+ !(hint & DRM_BO_HINT_DONT_FENCE),
no_wait);
drm_bo_fill_rep_arg(bo, rep);
@@ -1505,8 +1532,8 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
return ret;
}
-static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
- drm_bo_arg_reply_t * rep)
+static int drm_bo_handle_info(drm_file_t *priv, uint32_t handle,
+ struct drm_bo_info_rep *rep)
{
struct drm_device *dev = priv->head->dev;
drm_buffer_object_t *bo;
@@ -1527,8 +1554,9 @@ static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
return 0;
}
-static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
- uint32_t hint, drm_bo_arg_reply_t * rep)
+static int drm_bo_handle_wait(drm_file_t *priv, uint32_t handle,
+ uint32_t hint,
+ struct drm_bo_info_rep *rep)
{
struct drm_device *dev = priv->head->dev;
drm_buffer_object_t *bo;
@@ -1562,7 +1590,7 @@ static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
int drm_buffer_object_create(drm_device_t *dev,
unsigned long size,
drm_bo_type_t type,
- uint32_t mask,
+ uint64_t mask,
uint32_t hint,
uint32_t page_alignment,
unsigned long buffer_start,
@@ -1614,8 +1642,8 @@ int drm_buffer_object_create(drm_device_t *dev,
bo->buffer_start = buffer_start;
}
bo->priv_flags = 0;
- bo->mem.flags = 0;
- bo->mem.mask = 0;
+ bo->mem.flags = 0ULL;
+ bo->mem.mask = 0ULL;
atomic_inc(&bm->count);
ret = drm_bo_new_mask(bo, mask, hint);
@@ -1629,7 +1657,7 @@ int drm_buffer_object_create(drm_device_t *dev,
if (ret)
goto out_err;
}
- ret = drm_buffer_object_validate(bo, 0, hint & DRM_BO_HINT_DONT_BLOCK);
+ ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
if (ret)
goto out_err;
@@ -1671,15 +1699,14 @@ static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
return 0;
}
-int drm_bo_ioctl(DRM_IOCTL_ARGS)
+int drm_bo_op_ioctl(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
- drm_bo_arg_t arg;
- drm_bo_arg_request_t *req = &arg.d.req;
- drm_bo_arg_reply_t rep;
+ struct drm_bo_op_arg arg;
+ struct drm_bo_op_req *req = &arg.d.req;
+ struct drm_bo_info_rep rep;
unsigned long next;
- drm_user_object_t *uo;
- drm_buffer_object_t *entry;
+ int ret;
if (!dev->bm.initialized) {
DRM_ERROR("Buffer object manager is not initialized.\n");
@@ -1694,97 +1721,29 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS)
continue;
}
- rep.ret = 0;
+ ret = 0;
switch (req->op) {
- case drm_bo_create:
- rep.ret = drm_bo_lock_test(dev, filp);
- if (rep.ret)
- break;
- rep.ret =
- drm_buffer_object_create(priv->head->dev,
- req->size,
- req->type,
- req->mask,
- req->hint,
- req->page_alignment,
- req->buffer_start, &entry);
- if (rep.ret)
- break;
-
- rep.ret =
- drm_bo_add_user_object(priv, entry,
- req->
- mask &
- DRM_BO_FLAG_SHAREABLE);
- if (rep.ret)
- drm_bo_usage_deref_unlocked(&entry);
-
- if (rep.ret)
- break;
-
- mutex_lock(&entry->mutex);
- drm_bo_fill_rep_arg(entry, &rep);
- mutex_unlock(&entry->mutex);
- break;
- case drm_bo_unmap:
- rep.ret = drm_buffer_object_unmap(priv, req->handle);
- break;
- case drm_bo_map:
- rep.ret = drm_buffer_object_map(priv, req->handle,
- req->mask,
- req->hint, &rep);
- break;
- case drm_bo_destroy:
- mutex_lock(&dev->struct_mutex);
- uo = drm_lookup_user_object(priv, req->handle);
- if (!uo || (uo->type != drm_buffer_type)
- || uo->owner != priv) {
- mutex_unlock(&dev->struct_mutex);
- rep.ret = -EINVAL;
- break;
- }
- rep.ret = drm_remove_user_object(priv, uo);
- mutex_unlock(&dev->struct_mutex);
- break;
- case drm_bo_reference:
- rep.ret = drm_user_object_ref(priv, req->handle,
- drm_buffer_type, &uo);
- if (rep.ret)
- break;
-
- rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
- break;
- case drm_bo_unreference:
- rep.ret = drm_user_object_unref(priv, req->handle,
- drm_buffer_type);
- break;
case drm_bo_validate:
- rep.ret = drm_bo_lock_test(dev, filp);
-
- if (rep.ret)
+ ret = drm_bo_lock_test(dev, filp);
+ if (ret)
break;
- rep.ret =
- drm_bo_handle_validate(priv, req->handle, req->mask,
- req->arg_handle, req->hint,
- &rep);
+ ret = drm_bo_handle_validate(priv, req->bo_req.handle,
+ req->bo_req.fence_class,
+ req->bo_req.flags,
+ req->bo_req.mask,
+ req->bo_req.hint,
+ &rep);
break;
case drm_bo_fence:
- rep.ret = drm_bo_lock_test(dev, filp);
- if (rep.ret)
- break;
- /**/ break;
- case drm_bo_info:
- rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
- break;
- case drm_bo_wait_idle:
- rep.ret = drm_bo_handle_wait(priv, req->handle,
- req->hint, &rep);
+ ret = -EINVAL;
+ DRM_ERROR("Function is not implemented yet.\n");
break;
case drm_bo_ref_fence:
- rep.ret = -EINVAL;
+ ret = -EINVAL;
DRM_ERROR("Function is not implemented yet.\n");
+ break;
default:
- rep.ret = -EINVAL;
+ ret = -EINVAL;
}
next = arg.next;
@@ -1792,17 +1751,221 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS)
* A signal interrupted us. Make sure the ioctl is restartable.
*/
- if (rep.ret == -EAGAIN)
+ if (ret == -EAGAIN)
return -EAGAIN;
arg.handled = 1;
- arg.d.rep = rep;
+ arg.d.rep.ret = ret;
+ arg.d.rep.bo_info = rep;
DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
data = next;
} while (data);
return 0;
}
+int drm_bo_create_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_bo_create_arg arg;
+ struct drm_bo_create_req *req = &arg.d.req;
+ struct drm_bo_info_rep *rep = &arg.d.rep;
+ drm_buffer_object_t *entry;
+ int ret = 0;
+
+ if (!dev->bm.initialized) {
+ DRM_ERROR("Buffer object manager is not initialized.\n");
+ return -EINVAL;
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+
+ ret = drm_bo_lock_test(dev, filp);
+ if (ret)
+ goto out;
+
+ ret = drm_buffer_object_create(priv->head->dev,
+ req->size, req->type, req->mask,
+ req->hint, req->page_alignment,
+ req->buffer_start, &entry);
+ if (ret)
+ goto out;
+
+ ret = drm_bo_add_user_object(priv, entry,
+ req->mask & DRM_BO_FLAG_SHAREABLE);
+ if (ret) {
+ drm_bo_usage_deref_unlocked(&entry);
+ goto out;
+ }
+
+ mutex_lock(&entry->mutex);
+ drm_bo_fill_rep_arg(entry, rep);
+ mutex_unlock(&entry->mutex);
+
+ DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
+out:
+ return ret;
+}
+
+
+int drm_bo_destroy_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_bo_handle_arg arg;
+ drm_user_object_t *uo;
+ int ret = 0;
+
+ if (!dev->bm.initialized) {
+ DRM_ERROR("Buffer object manager is not initialized.\n");
+ return -EINVAL;
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+
+ mutex_lock(&dev->struct_mutex);
+ uo = drm_lookup_user_object(priv, arg.handle);
+ if (!uo || (uo->type != drm_buffer_type) || uo->owner != priv) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+ ret = drm_remove_user_object(priv, uo);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+int drm_bo_map_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_bo_map_wait_idle_arg arg;
+ struct drm_bo_info_req *req = &arg.d.req;
+ struct drm_bo_info_rep *rep = &arg.d.rep;
+ int ret;
+ if (!dev->bm.initialized) {
+ DRM_ERROR("Buffer object manager is not initialized.\n");
+ return -EINVAL;
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+
+ ret = drm_buffer_object_map(priv, req->handle, req->mask,
+ req->hint, rep);
+ if (ret)
+ return ret;
+
+ DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
+ return 0;
+}
+
+int drm_bo_unmap_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_bo_handle_arg arg;
+ int ret;
+ if (!dev->bm.initialized) {
+ DRM_ERROR("Buffer object manager is not initialized.\n");
+ return -EINVAL;
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+
+ ret = drm_buffer_object_unmap(priv, arg.handle);
+ return ret;
+}
+
+
+int drm_bo_reference_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_bo_reference_info_arg arg;
+ struct drm_bo_handle_arg *req = &arg.d.req;
+ struct drm_bo_info_rep *rep = &arg.d.rep;
+ drm_user_object_t *uo;
+ int ret;
+
+ if (!dev->bm.initialized) {
+ DRM_ERROR("Buffer object manager is not initialized.\n");
+ return -EINVAL;
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+
+ ret = drm_user_object_ref(priv, req->handle,
+ drm_buffer_type, &uo);
+ if (ret)
+ return ret;
+
+ ret = drm_bo_handle_info(priv, req->handle, rep);
+ if (ret)
+ return ret;
+
+ DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
+ return 0;
+}
+
+int drm_bo_unreference_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_bo_handle_arg arg;
+ int ret = 0;
+
+ if (!dev->bm.initialized) {
+ DRM_ERROR("Buffer object manager is not initialized.\n");
+ return -EINVAL;
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+
+ ret = drm_user_object_unref(priv, arg.handle, drm_buffer_type);
+ return ret;
+}
+
+int drm_bo_info_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_bo_reference_info_arg arg;
+ struct drm_bo_handle_arg *req = &arg.d.req;
+ struct drm_bo_info_rep *rep = &arg.d.rep;
+ int ret;
+
+ if (!dev->bm.initialized) {
+ DRM_ERROR("Buffer object manager is not initialized.\n");
+ return -EINVAL;
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+
+ ret = drm_bo_handle_info(priv, req->handle, rep);
+ if (ret)
+ return ret;
+ DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
+ return 0;
+}
+
+int drm_bo_wait_idle_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_bo_map_wait_idle_arg arg;
+ struct drm_bo_info_req *req = &arg.d.req;
+ struct drm_bo_info_rep *rep = &arg.d.rep;
+ int ret;
+ if (!dev->bm.initialized) {
+ DRM_ERROR("Buffer object manager is not initialized.\n");
+ return -EINVAL;
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+
+ ret = drm_bo_handle_wait(priv, req->handle,
+ req->hint, rep);
+ if (ret)
+ return ret;
+
+ DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
+ return 0;
+}
+
+
+
/**
*Clean the unfenced list and put on regular LRU.
*This is part of the memory manager cleanup and should only be
@@ -2175,11 +2338,67 @@ EXPORT_SYMBOL(drm_bo_driver_init);
int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
+ struct drm_mm_init_arg arg;
+ drm_buffer_manager_t *bm = &dev->bm;
+ drm_bo_driver_t *driver = dev->driver->bo_driver;
+ int ret;
- int ret = 0;
- drm_mm_init_arg_t arg;
+ if (!driver) {
+ DRM_ERROR("Buffer objects are not supported by this driver\n");
+ return -EINVAL;
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+ ret = -EINVAL;
+ if (arg.magic != DRM_BO_INIT_MAGIC) {
+ DRM_ERROR("You are using an old libdrm that is not compatible with\n"
+ "\tthe kernel DRM module. Please upgrade your libdrm.\n");
+ return -EINVAL;
+ }
+ if (arg.major != DRM_BO_INIT_MAJOR) {
+ DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
+ "\tversion don't match. Got %d, expected %d,\n",
+ arg.major, DRM_BO_INIT_MAJOR);
+ return -EINVAL;
+ }
+ if (arg.minor > DRM_BO_INIT_MINOR) {
+ DRM_ERROR("libdrm expects a newer DRM buffer object interface.\n"
+ "\tlibdrm buffer object interface version is %d.%d.\n"
+ "\tkernel DRM buffer object interface version is %d.%d\n",
+ arg.major, arg.minor, DRM_BO_INIT_MAJOR, DRM_BO_INIT_MINOR);
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->bm.init_mutex);
+ mutex_lock(&dev->struct_mutex);
+ if (!bm->initialized) {
+ DRM_ERROR("DRM memory manager was not initialized.\n");
+ goto out;
+ }
+ if (arg.mem_type == 0) {
+ DRM_ERROR("System memory buffers already initialized.\n");
+ goto out;
+ }
+ ret = drm_bo_init_mm(dev, arg.mem_type,
+ arg.p_offset, arg.p_size);
+
+out:
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->bm.init_mutex);
+ if (ret)
+ return ret;
+
+ DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
+ return 0;
+}
+
+int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_mm_type_arg arg;
drm_buffer_manager_t *bm = &dev->bm;
drm_bo_driver_t *driver = dev->driver->bo_driver;
+ int ret;
if (!driver) {
DRM_ERROR("Buffer objects are not supported by this driver\n");
@@ -2188,59 +2407,78 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
- switch (arg.req.op) {
- case mm_init:
- ret = -EINVAL;
- mutex_lock(&dev->bm.init_mutex);
- mutex_lock(&dev->struct_mutex);
- if (!bm->initialized) {
- DRM_ERROR("DRM memory manager was not initialized.\n");
- break;
- }
- if (arg.req.mem_type == 0) {
- DRM_ERROR
- ("System memory buffers already initialized.\n");
- break;
- }
- ret = drm_bo_init_mm(dev, arg.req.mem_type,
- arg.req.p_offset, arg.req.p_size);
- break;
- case mm_takedown:
- LOCK_TEST_WITH_RETURN(dev, filp);
- mutex_lock(&dev->bm.init_mutex);
- mutex_lock(&dev->struct_mutex);
- ret = -EINVAL;
- if (!bm->initialized) {
- DRM_ERROR("DRM memory manager was not initialized\n");
- break;
- }
- if (arg.req.mem_type == 0) {
- DRM_ERROR("No takedown for System memory buffers.\n");
- break;
- }
- ret = 0;
- if (drm_bo_clean_mm(dev, arg.req.mem_type)) {
- DRM_ERROR("Memory manager type %d not clean. "
- "Delaying takedown\n", arg.req.mem_type);
- }
- break;
- case mm_lock:
- LOCK_TEST_WITH_RETURN(dev, filp);
- mutex_lock(&dev->bm.init_mutex);
- mutex_lock(&dev->struct_mutex);
- ret = drm_bo_lock_mm(dev, arg.req.mem_type);
- break;
- case mm_unlock:
- LOCK_TEST_WITH_RETURN(dev, filp);
- mutex_lock(&dev->bm.init_mutex);
- mutex_lock(&dev->struct_mutex);
- ret = 0;
- break;
- default:
- DRM_ERROR("Function not implemented yet\n");
+ LOCK_TEST_WITH_RETURN(dev, filp);
+ mutex_lock(&dev->bm.init_mutex);
+ mutex_lock(&dev->struct_mutex);
+ ret = -EINVAL;
+ if (!bm->initialized) {
+ DRM_ERROR("DRM memory manager was not initialized\n");
+ goto out;
+ }
+ if (arg.mem_type == 0) {
+ DRM_ERROR("No takedown for System memory buffers.\n");
+ goto out;
+ }
+ ret = 0;
+ if (drm_bo_clean_mm(dev, arg.mem_type)) {
+ DRM_ERROR("Memory manager type %d not clean. "
+ "Delaying takedown\n", arg.mem_type);
+ }
+out:
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->bm.init_mutex);
+ if (ret)
+ return ret;
+
+ DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
+ return 0;
+}
+
+int drm_mm_lock_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_mm_type_arg arg;
+ drm_bo_driver_t *driver = dev->driver->bo_driver;
+ int ret;
+
+ if (!driver) {
+ DRM_ERROR("Buffer objects are not supported by this driver\n");
+ return -EINVAL;
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+
+ LOCK_TEST_WITH_RETURN(dev, filp);
+ mutex_lock(&dev->bm.init_mutex);
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_bo_lock_mm(dev, arg.mem_type);
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->bm.init_mutex);
+ if (ret)
+ return ret;
+
+ DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
+ return 0;
+}
+
+int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_mm_type_arg arg;
+ drm_bo_driver_t *driver = dev->driver->bo_driver;
+ int ret;
+
+ if (!driver) {
+ DRM_ERROR("Buffer objects are not supported by this driver\n");
return -EINVAL;
}
+ DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+ LOCK_TEST_WITH_RETURN(dev, filp);
+ mutex_lock(&dev->bm.init_mutex);
+ mutex_lock(&dev->struct_mutex);
+ ret = 0;
+
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->bm.init_mutex);
if (ret)
diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c
index 08d20d06..5d1d62fa 100644
--- a/linux-core/drm_compat.c
+++ b/linux-core/drm_compat.c
@@ -196,7 +196,8 @@ static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
return ret;
}
-static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
+
+static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
struct fault_data *data)
{
unsigned long address = data->address;
diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c
index 0d446a12..30200ca4 100644
--- a/linux-core/drm_drv.c
+++ b/linux-core/drm_drv.c
@@ -117,12 +117,43 @@ static drm_ioctl_desc_t drm_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_FENCE)] = {drm_fence_ioctl, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH},
+
+ // [DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH},
+
+ [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+
+
[DRM_IOCTL_NR(DRM_IOCTL_MM_INIT)] = {drm_mm_init_ioctl,
DRM_AUTH },
+ [DRM_IOCTL_NR(DRM_IOCTL_MM_TAKEDOWN)] = {drm_mm_takedown_ioctl,
+ DRM_AUTH },
+ [DRM_IOCTL_NR(DRM_IOCTL_MM_LOCK)] = {drm_mm_lock_ioctl,
+ DRM_AUTH },
+ [DRM_IOCTL_NR(DRM_IOCTL_MM_UNLOCK)] = {drm_mm_unlock_ioctl,
+ DRM_AUTH },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_FENCE_CREATE)] = {drm_fence_create_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_FENCE_DESTROY)] = {drm_fence_destroy_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_FENCE_REFERENCE)] = {drm_fence_reference_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_FENCE_UNREFERENCE)] = {drm_fence_unreference_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_FENCE_SIGNALED)] = {drm_fence_signaled_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_FENCE_FLUSH)] = {drm_fence_flush_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_FENCE_WAIT)] = {drm_fence_wait_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_FENCE_EMIT)] = {drm_fence_emit_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_FENCE_BUFFERS)] = {drm_fence_buffers_ioctl, DRM_AUTH},
+
+ [DRM_IOCTL_NR(DRM_IOCTL_BO_CREATE)] = {drm_bo_create_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_BO_DESTROY)] = {drm_bo_destroy_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_BO_MAP)] = {drm_bo_map_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_BO_UNMAP)] = {drm_bo_unmap_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_BO_REFERENCE)] = {drm_bo_reference_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_BO_UNREFERENCE)] = {drm_bo_unreference_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_BO_OP)] = {drm_bo_op_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_BO_INFO)] = {drm_bo_info_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_BO_WAIT_IDLE)] = {drm_bo_wait_idle_ioctl, DRM_AUTH},
+
+
- [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c
index 5215feb6..b6f14249 100644
--- a/linux-core/drm_fence.c
+++ b/linux-core/drm_fence.c
@@ -565,14 +565,13 @@ drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
return fence;
}
-int drm_fence_ioctl(DRM_IOCTL_ARGS)
+int drm_fence_create_ioctl(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
int ret;
drm_fence_manager_t *fm = &dev->fm;
drm_fence_arg_t arg;
drm_fence_object_t *fence;
- drm_user_object_t *uo;
unsigned long flags;
ret = 0;
@@ -582,92 +581,288 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
}
DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
- switch (arg.op) {
- case drm_fence_create:
- if (arg.flags & DRM_FENCE_FLAG_EMIT)
- LOCK_TEST_WITH_RETURN(dev, filp);
- ret = drm_fence_object_create(dev, arg.class,
- arg.type, arg.flags, &fence);
- if (ret)
- return ret;
- ret = drm_fence_add_user_object(priv, fence,
- arg.flags &
- DRM_FENCE_FLAG_SHAREABLE);
- if (ret) {
- drm_fence_usage_deref_unlocked(&fence);
- return ret;
- }
- arg.handle = fence->base.hash.key;
+ if (arg.flags & DRM_FENCE_FLAG_EMIT)
+ LOCK_TEST_WITH_RETURN(dev, filp);
+ ret = drm_fence_object_create(dev, arg.class,
+ arg.type, arg.flags, &fence);
+ if (ret)
+ return ret;
+ ret = drm_fence_add_user_object(priv, fence,
+ arg.flags &
+ DRM_FENCE_FLAG_SHAREABLE);
+ if (ret) {
+ drm_fence_usage_deref_unlocked(&fence);
+ return ret;
+ }
+
+ /*
+ * usage > 0. No need to lock dev->struct_mutex;
+ */
+
+ atomic_inc(&fence->usage);
+ arg.handle = fence->base.hash.key;
- break;
- case drm_fence_destroy:
- mutex_lock(&dev->struct_mutex);
- uo = drm_lookup_user_object(priv, arg.handle);
- if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
- ret = drm_remove_user_object(priv, uo);
+ read_lock_irqsave(&fm->lock, flags);
+ arg.class = fence->class;
+ arg.type = fence->type;
+ arg.signaled = fence->signaled;
+ read_unlock_irqrestore(&fm->lock, flags);
+ drm_fence_usage_deref_unlocked(&fence);
+
+ DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
+ return ret;
+}
+
+int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ int ret;
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_arg_t arg;
+ drm_user_object_t *uo;
+ ret = 0;
+
+ if (!fm->initialized) {
+ DRM_ERROR("The DRM driver does not support fencing.\n");
+ return -EINVAL;
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+
+ mutex_lock(&dev->struct_mutex);
+ uo = drm_lookup_user_object(priv, arg.handle);
+ if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) {
mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+ ret = drm_remove_user_object(priv, uo);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+
+int drm_fence_reference_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ int ret;
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_arg_t arg;
+ drm_fence_object_t *fence;
+ drm_user_object_t *uo;
+ unsigned long flags;
+ ret = 0;
+
+ if (!fm->initialized) {
+ DRM_ERROR("The DRM driver does not support fencing.\n");
+ return -EINVAL;
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+ ret = drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo);
+ if (ret)
return ret;
- case drm_fence_reference:
- ret =
- drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo);
- if (ret)
- return ret;
- fence = drm_lookup_fence_object(priv, arg.handle);
- break;
- case drm_fence_unreference:
- ret = drm_user_object_unref(priv, arg.handle, drm_fence_type);
- return ret;
- case drm_fence_signaled:
- fence = drm_lookup_fence_object(priv, arg.handle);
- if (!fence)
- return -EINVAL;
- break;
- case drm_fence_flush:
- fence = drm_lookup_fence_object(priv, arg.handle);
- if (!fence)
- return -EINVAL;
- ret = drm_fence_object_flush(fence, arg.type);
- break;
- case drm_fence_wait:
- fence = drm_lookup_fence_object(priv, arg.handle);
- if (!fence)
- return -EINVAL;
- ret =
- drm_fence_object_wait(fence,
- arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
- 0, arg.type);
- break;
- case drm_fence_emit:
- LOCK_TEST_WITH_RETURN(dev, filp);
- fence = drm_lookup_fence_object(priv, arg.handle);
- if (!fence)
- return -EINVAL;
- ret = drm_fence_object_emit(fence, arg.flags, arg.class,
- arg.type);
- break;
- case drm_fence_buffers:
- if (!dev->bm.initialized) {
- DRM_ERROR("Buffer object manager is not initialized\n");
- return -EINVAL;
- }
- LOCK_TEST_WITH_RETURN(dev, filp);
- ret = drm_fence_buffer_objects(priv, NULL, arg.flags,
- NULL, &fence);
- if (ret)
- return ret;
- ret = drm_fence_add_user_object(priv, fence,
- arg.flags &
- DRM_FENCE_FLAG_SHAREABLE);
- if (ret)
- return ret;
- arg.handle = fence->base.hash.key;
- break;
- default:
+ fence = drm_lookup_fence_object(priv, arg.handle);
+
+ read_lock_irqsave(&fm->lock, flags);
+ arg.class = fence->class;
+ arg.type = fence->type;
+ arg.signaled = fence->signaled;
+ read_unlock_irqrestore(&fm->lock, flags);
+ drm_fence_usage_deref_unlocked(&fence);
+
+ DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
+ return ret;
+}
+
+
+int drm_fence_unreference_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ int ret;
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_arg_t arg;
+ ret = 0;
+
+ if (!fm->initialized) {
+ DRM_ERROR("The DRM driver does not support fencing.\n");
+ return -EINVAL;
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+ return drm_user_object_unref(priv, arg.handle, drm_fence_type);
+}
+
+int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ int ret;
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_arg_t arg;
+ drm_fence_object_t *fence;
+ unsigned long flags;
+ ret = 0;
+
+ if (!fm->initialized) {
+ DRM_ERROR("The DRM driver does not support fencing.\n");
+ return -EINVAL;
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+
+ fence = drm_lookup_fence_object(priv, arg.handle);
+ if (!fence)
+ return -EINVAL;
+
+ read_lock_irqsave(&fm->lock, flags);
+ arg.class = fence->class;
+ arg.type = fence->type;
+ arg.signaled = fence->signaled;
+ read_unlock_irqrestore(&fm->lock, flags);
+ drm_fence_usage_deref_unlocked(&fence);
+
+ DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
+ return ret;
+}
+
+int drm_fence_flush_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ int ret;
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_arg_t arg;
+ drm_fence_object_t *fence;
+ unsigned long flags;
+ ret = 0;
+
+ if (!fm->initialized) {
+ DRM_ERROR("The DRM driver does not support fencing.\n");
return -EINVAL;
}
+
+ DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+
+ fence = drm_lookup_fence_object(priv, arg.handle);
+ if (!fence)
+ return -EINVAL;
+ ret = drm_fence_object_flush(fence, arg.type);
+
+ read_lock_irqsave(&fm->lock, flags);
+ arg.class = fence->class;
+ arg.type = fence->type;
+ arg.signaled = fence->signaled;
+ read_unlock_irqrestore(&fm->lock, flags);
+ drm_fence_usage_deref_unlocked(&fence);
+
+ DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
+ return ret;
+}
+
+
+int drm_fence_wait_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ int ret;
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_arg_t arg;
+ drm_fence_object_t *fence;
+ unsigned long flags;
+ ret = 0;
+
+ if (!fm->initialized) {
+ DRM_ERROR("The DRM driver does not support fencing.\n");
+ return -EINVAL;
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+
+ fence = drm_lookup_fence_object(priv, arg.handle);
+ if (!fence)
+ return -EINVAL;
+ ret = drm_fence_object_wait(fence,
+ arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
+ 0, arg.type);
+
+ read_lock_irqsave(&fm->lock, flags);
+ arg.class = fence->class;
+ arg.type = fence->type;
+ arg.signaled = fence->signaled;
+ read_unlock_irqrestore(&fm->lock, flags);
+ drm_fence_usage_deref_unlocked(&fence);
+
+ DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
+ return ret;
+}
+
+
+int drm_fence_emit_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ int ret;
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_arg_t arg;
+ drm_fence_object_t *fence;
+ unsigned long flags;
+ ret = 0;
+
+ if (!fm->initialized) {
+ DRM_ERROR("The DRM driver does not support fencing.\n");
+ return -EINVAL;
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+
+ LOCK_TEST_WITH_RETURN(dev, filp);
+ fence = drm_lookup_fence_object(priv, arg.handle);
+ if (!fence)
+ return -EINVAL;
+ ret = drm_fence_object_emit(fence, arg.flags, arg.class,
+ arg.type);
+
+ read_lock_irqsave(&fm->lock, flags);
+ arg.class = fence->class;
+ arg.type = fence->type;
+ arg.signaled = fence->signaled;
+ read_unlock_irqrestore(&fm->lock, flags);
+ drm_fence_usage_deref_unlocked(&fence);
+
+ DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
+ return ret;
+}
+
+int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ int ret;
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_arg_t arg;
+ drm_fence_object_t *fence;
+ unsigned long flags;
+ ret = 0;
+
+ if (!fm->initialized) {
+ DRM_ERROR("The DRM driver does not support fencing.\n");
+ return -EINVAL;
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+
+ if (!dev->bm.initialized) {
+ DRM_ERROR("Buffer object manager is not initialized\n");
+ return -EINVAL;
+ }
+ LOCK_TEST_WITH_RETURN(dev, filp);
+ ret = drm_fence_buffer_objects(priv, NULL, arg.flags,
+ NULL, &fence);
+ if (ret)
+ return ret;
+ ret = drm_fence_add_user_object(priv, fence,
+ arg.flags &
+ DRM_FENCE_FLAG_SHAREABLE);
+ if (ret)
+ return ret;
+ atomic_inc(&fence->usage);
+ arg.handle = fence->base.hash.key;
+
read_lock_irqsave(&fm->lock, flags);
arg.class = fence->class;
arg.type = fence->type;
diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h
index f82d6628..8b65f90a 100644
--- a/linux-core/drm_objects.h
+++ b/linux-core/drm_objects.h
@@ -212,8 +212,16 @@ extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
drm_fence_object_t ** c_fence);
extern int drm_fence_add_user_object(drm_file_t * priv,
drm_fence_object_t * fence, int shareable);
-extern int drm_fence_ioctl(DRM_IOCTL_ARGS);
+extern int drm_fence_create_ioctl(DRM_IOCTL_ARGS);
+extern int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS);
+extern int drm_fence_reference_ioctl(DRM_IOCTL_ARGS);
+extern int drm_fence_unreference_ioctl(DRM_IOCTL_ARGS);
+extern int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS);
+extern int drm_fence_flush_ioctl(DRM_IOCTL_ARGS);
+extern int drm_fence_wait_ioctl(DRM_IOCTL_ARGS);
+extern int drm_fence_emit_ioctl(DRM_IOCTL_ARGS);
+extern int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS);
/**************************************************
*TTMs
*/
@@ -314,8 +322,8 @@ typedef struct drm_bo_mem_reg {
unsigned long num_pages;
uint32_t page_alignment;
uint32_t mem_type;
- uint32_t flags;
- uint32_t mask;
+ uint64_t flags;
+ uint64_t mask;
} drm_bo_mem_reg_t;
typedef struct drm_buffer_object {
@@ -416,8 +424,8 @@ typedef struct drm_bo_driver {
uint32_t num_mem_busy_prio;
drm_ttm_backend_t *(*create_ttm_backend_entry)
(struct drm_device * dev);
- int (*fence_type) (struct drm_buffer_object *bo, uint32_t * class, uint32_t * type);
- int (*invalidate_caches) (struct drm_device * dev, uint32_t flags);
+ int (*fence_type) (struct drm_buffer_object *bo, uint32_t * type);
+ int (*invalidate_caches) (struct drm_device * dev, uint64_t flags);
int (*init_mem_type) (struct drm_device * dev, uint32_t type,
drm_mem_type_manager_t * man);
uint32_t(*evict_mask) (struct drm_buffer_object *bo);
@@ -429,8 +437,21 @@ typedef struct drm_bo_driver {
* buffer objects (drm_bo.c)
*/
-extern int drm_bo_ioctl(DRM_IOCTL_ARGS);
+extern int drm_bo_create_ioctl(DRM_IOCTL_ARGS);
+extern int drm_bo_destroy_ioctl(DRM_IOCTL_ARGS);
+extern int drm_bo_map_ioctl(DRM_IOCTL_ARGS);
+extern int drm_bo_unmap_ioctl(DRM_IOCTL_ARGS);
+extern int drm_bo_reference_ioctl(DRM_IOCTL_ARGS);
+extern int drm_bo_unreference_ioctl(DRM_IOCTL_ARGS);
+extern int drm_bo_wait_idle_ioctl(DRM_IOCTL_ARGS);
+extern int drm_bo_info_ioctl(DRM_IOCTL_ARGS);
+extern int drm_bo_op_ioctl(DRM_IOCTL_ARGS);
+
+
extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mm_lock_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS);
extern int drm_bo_driver_finish(struct drm_device *dev);
extern int drm_bo_driver_init(struct drm_device *dev);
extern int drm_bo_pci_offset(struct drm_device *dev,
diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c
index 8589f467..2850fb94 100644
--- a/linux-core/i915_buffer.c
+++ b/linux-core/i915_buffer.c
@@ -38,9 +38,8 @@ drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t * dev)
return drm_agp_init_ttm(dev);
}
-int i915_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type)
+int i915_fence_types(drm_buffer_object_t *bo, uint32_t * type)
{
- *class = 0;
if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
*type = 3;
else
@@ -48,7 +47,7 @@ int i915_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type)
return 0;
}
-int i915_invalidate_caches(drm_device_t * dev, uint32_t flags)
+int i915_invalidate_caches(drm_device_t * dev, uint64_t flags)
{
/*
* FIXME: Only emit once per batchbuffer submission.
diff --git a/linux-core/via_buffer.c b/linux-core/via_buffer.c
index ebc8c371..86883998 100644
--- a/linux-core/via_buffer.c
+++ b/linux-core/via_buffer.c
@@ -37,14 +37,13 @@ drm_ttm_backend_t *via_create_ttm_backend_entry(drm_device_t * dev)
return drm_agp_init_ttm(dev);
}
-int via_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type)
+int via_fence_types(drm_buffer_object_t *bo, uint32_t * type)
{
- *class = 0;
*type = 3;
return 0;
}
-int via_invalidate_caches(drm_device_t * dev, uint32_t flags)
+int via_invalidate_caches(drm_device_t * dev, uint64_t flags)
{
/*
* FIXME: Invalidate texture caches here.
diff --git a/shared-core/drm.h b/shared-core/drm.h
index b4195419..e017c023 100644
--- a/shared-core/drm.h
+++ b/shared-core/drm.h
@@ -671,23 +671,13 @@ typedef struct drm_set_version {
#define DRM_FENCE_TYPE_EXE 0x00000001
typedef struct drm_fence_arg {
- unsigned handle;
- int class;
- unsigned type;
- unsigned flags;
- unsigned signaled;
- unsigned expand_pad[4]; /*Future expansion */
- enum {
- drm_fence_create,
- drm_fence_destroy,
- drm_fence_reference,
- drm_fence_unreference,
- drm_fence_signaled,
- drm_fence_flush,
- drm_fence_wait,
- drm_fence_emit,
- drm_fence_buffers
- } op;
+ unsigned int handle;
+ unsigned int class;
+ unsigned int type;
+ unsigned int flags;
+ unsigned int signaled;
+ unsigned int pad64;
+ drm_u64_t expand_pad[3]; /*Future expansion */
} drm_fence_arg_t;
/* Buffer permissions, referring to how the GPU uses the buffers.
@@ -696,9 +686,9 @@ typedef struct drm_fence_arg {
* a command (batch-) buffer is exe. Can be or-ed together.
*/
-#define DRM_BO_FLAG_READ 0x00000001
-#define DRM_BO_FLAG_WRITE 0x00000002
-#define DRM_BO_FLAG_EXE 0x00000004
+#define DRM_BO_FLAG_READ (1ULL << 0)
+#define DRM_BO_FLAG_WRITE (1ULL << 1)
+#define DRM_BO_FLAG_EXE (1ULL << 2)
/*
* Status flags. Can be read to determine the actual state of a buffer.
@@ -711,25 +701,25 @@ typedef struct drm_fence_arg {
* or lock.
* Flags: Acknowledge
*/
-#define DRM_BO_FLAG_NO_EVICT 0x00000010
+#define DRM_BO_FLAG_NO_EVICT (1ULL << 4)
/*
* Mask: Require that the buffer is placed in mappable memory when validated.
* If not set the buffer may or may not be in mappable memory when validated.
* Flags: If set, the buffer is in mappable memory.
*/
-#define DRM_BO_FLAG_MAPPABLE 0x00000020
+#define DRM_BO_FLAG_MAPPABLE (1ULL << 5)
/* Mask: The buffer should be shareable with other processes.
* Flags: The buffer is shareable with other processes.
*/
-#define DRM_BO_FLAG_SHAREABLE 0x00000040
+#define DRM_BO_FLAG_SHAREABLE (1ULL << 6)
/* Mask: If set, place the buffer in cache-coherent memory if available.
* If clear, never place the buffer in cache coherent memory if validated.
* Flags: The buffer is currently in cache-coherent memory.
*/
-#define DRM_BO_FLAG_CACHED 0x00000080
+#define DRM_BO_FLAG_CACHED (1ULL << 7)
/* Mask: Make sure that every time this buffer is validated,
* it ends up on the same location provided that the memory mask is the same.
@@ -738,23 +728,23 @@ typedef struct drm_fence_arg {
* part of buffer manager shutdown or locking.
* Flags: Acknowledge.
*/
-#define DRM_BO_FLAG_NO_MOVE 0x00000100
+#define DRM_BO_FLAG_NO_MOVE (1ULL << 8)
/* Mask: Make sure the buffer is in cached memory when mapped for reading.
* Flags: Acknowledge.
*/
-#define DRM_BO_FLAG_READ_CACHED 0x00080000
+#define DRM_BO_FLAG_READ_CACHED (1ULL << 19)
/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
* Flags: Acknowledge.
*/
-#define DRM_BO_FLAG_FORCE_CACHING 0x00002000
+#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13)
/*
* Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
* Flags: Acknowledge.
*/
-#define DRM_BO_FLAG_FORCE_MAPPABLE 0x00004000
+#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
/*
* Memory type flags that can be or'ed together in the mask, but only
@@ -762,21 +752,25 @@ typedef struct drm_fence_arg {
*/
/* System memory */
-#define DRM_BO_FLAG_MEM_LOCAL 0x01000000
+#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24)
/* Translation table memory */
-#define DRM_BO_FLAG_MEM_TT 0x02000000
+#define DRM_BO_FLAG_MEM_TT (1ULL << 25)
/* Vram memory */
-#define DRM_BO_FLAG_MEM_VRAM 0x04000000
+#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26)
/* Up to the driver to define. */
-#define DRM_BO_FLAG_MEM_PRIV0 0x08000000
-#define DRM_BO_FLAG_MEM_PRIV1 0x10000000
-#define DRM_BO_FLAG_MEM_PRIV2 0x20000000
-#define DRM_BO_FLAG_MEM_PRIV3 0x40000000
-#define DRM_BO_FLAG_MEM_PRIV4 0x80000000
+#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27)
+#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28)
+#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29)
+#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30)
+#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31)
+/* We can add more of these now with a 64-bit flag type */
/* Memory flag mask */
-#define DRM_BO_MASK_MEM 0xFF000000
-#define DRM_BO_MASK_MEMTYPE 0xFF0000A0
+#define DRM_BO_MASK_MEM 0x00000000FF000000ULL
+#define DRM_BO_MASK_MEMTYPE 0x00000000FF0000A0ULL
+
+/* Driver-private flags */
+#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL
/* Don't block on validate and map */
#define DRM_BO_HINT_DONT_BLOCK 0x00000002
@@ -785,6 +779,10 @@ typedef struct drm_fence_arg {
#define DRM_BO_HINT_WAIT_LAZY 0x00000008
#define DRM_BO_HINT_ALLOW_UNFENCED_MAP 0x00000010
+#define DRM_BO_INIT_MAGIC 0xfe769812
+#define DRM_BO_INIT_MAJOR 0
+#define DRM_BO_INIT_MINOR 1
+
typedef enum {
drm_bo_type_dc,
@@ -793,32 +791,34 @@ typedef enum {
drm_bo_type_kernel, /* for initial kernel allocations */
}drm_bo_type_t;
+struct drm_bo_info_req {
+ drm_u64_t mask;
+ drm_u64_t flags;
+ unsigned int handle;
+ unsigned int hint;
+ unsigned int fence_class;
+ unsigned int pad64;
+};
-typedef struct drm_bo_arg_request {
- unsigned handle; /* User space handle */
- unsigned mask;
- unsigned hint;
+struct drm_bo_create_req {
+ drm_u64_t mask;
drm_u64_t size;
- drm_bo_type_t type;
- unsigned arg_handle;
drm_u64_t buffer_start;
- unsigned page_alignment;
- unsigned expand_pad[4]; /*Future expansion */
+ unsigned int hint;
+ unsigned int page_alignment;
+ drm_bo_type_t type;
+ unsigned int pad64;
+};
+
+struct drm_bo_op_req {
enum {
- drm_bo_create,
drm_bo_validate,
- drm_bo_map,
- drm_bo_unmap,
drm_bo_fence,
- drm_bo_destroy,
- drm_bo_reference,
- drm_bo_unreference,
- drm_bo_info,
- drm_bo_wait_idle,
- drm_bo_ref_fence
+ drm_bo_ref_fence,
} op;
-} drm_bo_arg_request_t;
-
+ unsigned int arg_handle;
+ struct drm_bo_info_req bo_req;
+};
/*
* Reply flags
@@ -826,30 +826,64 @@ typedef struct drm_bo_arg_request {
#define DRM_BO_REP_BUSY 0x00000001
-typedef struct drm_bo_arg_reply {
- int ret;
- unsigned handle;
- unsigned flags;
+struct drm_bo_info_rep {
+ drm_u64_t flags;
+ drm_u64_t mask;
drm_u64_t size;
drm_u64_t offset;
drm_u64_t arg_handle;
- unsigned mask;
drm_u64_t buffer_start;
- unsigned fence_flags;
- unsigned rep_flags;
- unsigned page_alignment;
- unsigned expand_pad[4]; /*Future expansion */
-}drm_bo_arg_reply_t;
+ unsigned int handle;
+ unsigned int fence_flags;
+ unsigned int rep_flags;
+ unsigned int page_alignment;
+ unsigned int desired_tile_stride;
+ unsigned int hw_tile_stride;
+ unsigned int tile_info;
+ unsigned int pad64;
+ drm_u64_t expand_pad[4]; /*Future expansion */
+};
+struct drm_bo_arg_rep {
+ struct drm_bo_info_rep bo_info;
+ int ret;
+ unsigned int pad64;
+};
-typedef struct drm_bo_arg{
- int handled;
+struct drm_bo_create_arg {
+ union {
+ struct drm_bo_create_req req;
+ struct drm_bo_info_rep rep;
+ } d;
+};
+
+struct drm_bo_handle_arg {
+ unsigned int handle;
+};
+
+struct drm_bo_reference_info_arg {
+ union {
+ struct drm_bo_handle_arg req;
+ struct drm_bo_info_rep rep;
+ } d;
+};
+
+struct drm_bo_map_wait_idle_arg {
+ union {
+ struct drm_bo_info_req req;
+ struct drm_bo_info_rep rep;
+ } d;
+};
+
+struct drm_bo_op_arg {
drm_u64_t next;
union {
- drm_bo_arg_request_t req;
- drm_bo_arg_reply_t rep;
+ struct drm_bo_op_req req;
+ struct drm_bo_arg_rep rep;
} d;
-} drm_bo_arg_t;
+ int handled;
+ unsigned int pad64;
+};
#define DRM_BO_MEM_LOCAL 0
#define DRM_BO_MEM_TT 1
@@ -862,24 +896,17 @@ typedef struct drm_bo_arg{
#define DRM_BO_MEM_TYPES 8 /* For now. */
-typedef union drm_mm_init_arg{
- struct {
- enum {
- mm_init,
- mm_takedown,
- mm_query,
- mm_lock,
- mm_unlock
- } op;
- drm_u64_t p_offset;
- drm_u64_t p_size;
- unsigned mem_type;
- unsigned expand_pad[8]; /*Future expansion */
- } req;
- struct {
- drm_handle_t mm_sarea;
- unsigned expand_pad[8]; /*Future expansion */
- } rep;
+typedef struct drm_mm_type_arg {
+ unsigned int mem_type;
+} drm_mm_type_arg_t;
+
+typedef struct drm_mm_init_arg {
+ unsigned int magic;
+ unsigned int major;
+ unsigned int minor;
+ unsigned int mem_type;
+ drm_u64_t p_offset;
+ drm_u64_t p_size;
} drm_mm_init_arg_t;
/**
@@ -947,12 +974,34 @@ typedef union drm_mm_init_arg{
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, drm_wait_vblank_t)
-#define DRM_IOCTL_FENCE DRM_IOWR(0x3b, drm_fence_arg_t)
-#define DRM_IOCTL_BUFOBJ DRM_IOWR(0x3d, drm_bo_arg_t)
-#define DRM_IOCTL_MM_INIT DRM_IOWR(0x3e, drm_mm_init_arg_t)
-
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, drm_update_draw_t)
+#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, drm_mm_init_arg_t)
+#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, drm_mm_type_arg_t)
+#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, drm_mm_type_arg_t)
+#define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, drm_mm_type_arg_t)
+
+#define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, drm_fence_arg_t)
+#define DRM_IOCTL_FENCE_DESTROY DRM_IOWR(0xc5, drm_fence_arg_t)
+#define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, drm_fence_arg_t)
+#define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, drm_fence_arg_t)
+#define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, drm_fence_arg_t)
+#define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, drm_fence_arg_t)
+#define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, drm_fence_arg_t)
+#define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, drm_fence_arg_t)
+#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, drm_fence_arg_t)
+
+#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg)
+#define DRM_IOCTL_BO_DESTROY DRM_IOWR(0xce, struct drm_bo_handle_arg)
+#define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
+#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg)
+#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
+#define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg)
+#define DRM_IOCTL_BO_OP DRM_IOWR(0xd3, struct drm_bo_op_arg)
+#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
+#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
+
+
/*@}*/
/**
diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h
index e0432996..e19d372a 100644
--- a/shared-core/i915_drv.h
+++ b/shared-core/i915_drv.h
@@ -198,8 +198,8 @@ extern int i915_fence_has_irq(drm_device_t *dev, uint32_t class, uint32_t flags)
#ifdef I915_HAVE_BUFFER
/* i915_buffer.c */
extern drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev);
-extern int i915_fence_types(drm_buffer_object_t *bo, uint32_t *class, uint32_t *type);
-extern int i915_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags);
+extern int i915_fence_types(drm_buffer_object_t *bo, uint32_t *type);
+extern int i915_invalidate_caches(drm_device_t *dev, uint64_t buffer_flags);
extern int i915_init_mem_type(drm_device_t *dev, uint32_t type,
drm_mem_type_manager_t *man);
extern uint32_t i915_evict_mask(drm_buffer_object_t *bo);
diff --git a/shared-core/via_drv.h b/shared-core/via_drv.h
index baafbbff..b6dbf6c1 100644
--- a/shared-core/via_drv.h
+++ b/shared-core/via_drv.h
@@ -205,8 +205,8 @@ extern int via_fence_has_irq(struct drm_device * dev, uint32_t class,
#ifdef VIA_HAVE_BUFFER
extern drm_ttm_backend_t *via_create_ttm_backend_entry(drm_device_t *dev);
-extern int via_fence_types(drm_buffer_object_t *bo, uint32_t *class, uint32_t *type);
-extern int via_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags);
+extern int via_fence_types(drm_buffer_object_t *bo, uint32_t *type);
+extern int via_invalidate_caches(drm_device_t *dev, uint64_t buffer_flags);
extern int via_init_mem_type(drm_device_t *dev, uint32_t type,
drm_mem_type_manager_t *man);
extern uint32_t via_evict_mask(drm_buffer_object_t *bo);