diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/file.c | 154 |
1 files changed, 125 insertions, 29 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 514a1d50803..b4d38cb65f1 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -354,27 +354,54 @@ static int spufs_pipe_open(struct inode *inode, struct file *file) return nonseekable_open(inode, file); } +/* + * Read as many bytes from the mailbox as possible, until + * one of the conditions becomes true: + * + * - no more data available in the mailbox + * - end of the user provided buffer + * - end of the mapped area + */ static ssize_t spufs_mbox_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; - u32 mbox_data; - int ret; + u32 mbox_data, __user *udata; + ssize_t count; if (len < 4) return -EINVAL; + if (!access_ok(VERIFY_WRITE, buf, len)) + return -EFAULT; + + udata = (void __user *)buf; + spu_acquire(ctx); - ret = ctx->ops->mbox_read(ctx, &mbox_data); + for (count = 0; count <= len; count += 4, udata++) { + int ret; + ret = ctx->ops->mbox_read(ctx, &mbox_data); + if (ret == 0) + break; + + /* + * at the end of the mapped area, we can fault + * but still need to return the data we have + * read successfully so far. + */ + ret = __put_user(mbox_data, udata); + if (ret) { + if (!count) + count = -EFAULT; + break; + } + } spu_release(ctx); - if (!ret) - return -EAGAIN; - - if (copy_to_user(buf, &mbox_data, sizeof mbox_data)) - return -EFAULT; + if (!count) + count = -EAGAIN; - return 4; + return count; } static struct file_operations spufs_mbox_fops = { @@ -430,36 +457,70 @@ void spufs_ibox_callback(struct spu *spu) kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN); } +/* + * Read as many bytes from the interrupt mailbox as possible, until + * one of the conditions becomes true: + * + * - no more data available in the mailbox + * - end of the user provided buffer + * - end of the mapped area + * + * If the file is opened without O_NONBLOCK, we wait here until + * any data is available, but return when we have been able to + * read something. + */ static ssize_t spufs_ibox_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; - u32 ibox_data; - ssize_t ret; + u32 ibox_data, __user *udata; + ssize_t count; if (len < 4) return -EINVAL; + if (!access_ok(VERIFY_WRITE, buf, len)) + return -EFAULT; + + udata = (void __user *)buf; + spu_acquire(ctx); - ret = 0; + /* wait only for the first element */ + count = 0; if (file->f_flags & O_NONBLOCK) { if (!spu_ibox_read(ctx, &ibox_data)) - ret = -EAGAIN; + count = -EAGAIN; } else { - ret = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data)); + count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data)); } + if (count) + goto out; - spu_release(ctx); + /* if we can't write at all, return -EFAULT */ + count = __put_user(ibox_data, udata); + if (count) + goto out; - if (ret) - return ret; + for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { + int ret; + ret = ctx->ops->ibox_read(ctx, &ibox_data); + if (ret == 0) + break; + /* + * at the end of the mapped area, we can fault + * but still need to return the data we have + * read successfully so far. + */ + ret = __put_user(ibox_data, udata); + if (ret) + break; + } - ret = 4; - if (copy_to_user(buf, &ibox_data, sizeof ibox_data)) - ret = -EFAULT; +out: + spu_release(ctx); - return ret; + return count; } static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait) @@ -532,32 +593,67 @@ void spufs_wbox_callback(struct spu *spu) kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT); } +/* + * Write as many bytes to the interrupt mailbox as possible, until + * one of the conditions becomes true: + * + * - the mailbox is full + * - end of the user provided buffer + * - end of the mapped area + * + * If the file is opened without O_NONBLOCK, we wait here until + * space is availabyl, but return when we have been able to + * write something. + */ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; - u32 wbox_data; - int ret; + u32 wbox_data, __user *udata; + ssize_t count; if (len < 4) return -EINVAL; - if (copy_from_user(&wbox_data, buf, sizeof wbox_data)) + udata = (void __user *)buf; + if (!access_ok(VERIFY_READ, buf, len)) + return -EFAULT; + + if (__get_user(wbox_data, udata)) return -EFAULT; spu_acquire(ctx); - ret = 0; + /* + * make sure we can at least write one element, by waiting + * in case of !O_NONBLOCK + */ + count = 0; if (file->f_flags & O_NONBLOCK) { if (!spu_wbox_write(ctx, wbox_data)) - ret = -EAGAIN; + count = -EAGAIN; } else { - ret = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data)); + count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data)); } - spu_release(ctx); + if (count) + goto out; - return ret ? ret : sizeof wbox_data; + /* write aѕ much as possible */ + for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { + int ret; + ret = __get_user(wbox_data, udata); + if (ret) + break; + + ret = spu_wbox_write(ctx, wbox_data); + if (ret == 0) + break; + } + +out: + spu_release(ctx); + return count; } static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait) |