aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig17
-rw-r--r--block/Makefile1
-rw-r--r--block/as-iosched.c3
-rw-r--r--block/bsg.c1095
-rw-r--r--block/cfq-iosched.c100
-rw-r--r--block/deadline-iosched.c3
-rw-r--r--block/elevator.c16
-rw-r--r--block/genhd.c20
-rw-r--r--block/ll_rw_blk.c33
-rw-r--r--block/scsi_ioctl.c166
10 files changed, 1315 insertions, 139 deletions
diff --git a/block/Kconfig b/block/Kconfig
index a50f4811164..0768741d681 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -1,7 +1,7 @@
#
# Block layer core configuration
#
-config BLOCK
+menuconfig BLOCK
bool "Enable the block layer" if EMBEDDED
default y
help
@@ -49,6 +49,19 @@ config LSF
If unsure, say Y.
-endif
+endif # BLOCK
+
+config BLK_DEV_BSG
+ bool "Block layer SG support v4 (EXPERIMENTAL)"
+ depends on (SCSI=y) && EXPERIMENTAL
+ ---help---
+ Saying Y here will enable generic SG (SCSI generic) v4 support
+ for any block device.
+
+ Unlike SG v3 (aka block/scsi_ioctl.c drivers/scsi/sg.c), SG v4
+ can handle complicated SCSI commands: tagged variable length cdbs
+ with bidirectional data transfers and generic request/response
+ protocols (e.g. Task Management Functions and SMP in Serial
+ Attached SCSI).
source block/Kconfig.iosched
diff --git a/block/Makefile b/block/Makefile
index 4b84d0d5947..959feeb253b 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_BLOCK) := elevator.o ll_rw_blk.o ioctl.o genhd.o scsi_ioctl.o
+obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
obj-$(CONFIG_IOSCHED_AS) += as-iosched.o
obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 109e91b91ff..3e316dd7252 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1322,10 +1322,9 @@ static void *as_init_queue(request_queue_t *q)
{
struct as_data *ad;
- ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node);
+ ad = kmalloc_node(sizeof(*ad), GFP_KERNEL | __GFP_ZERO, q->node);
if (!ad)
return NULL;
- memset(ad, 0, sizeof(*ad));
ad->q = q; /* Identify what queue the data belongs to */
diff --git a/block/bsg.c b/block/bsg.c
new file mode 100644
index 00000000000..f2992e72b84
--- /dev/null
+++ b/block/bsg.c
@@ -0,0 +1,1095 @@
+/*
+ * bsg.c - block layer implementation of the sg v3 interface
+ *
+ * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
+ * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License version 2. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ */
+/*
+ * TODO
+ * - Should this get merged, block/scsi_ioctl.c will be migrated into
+ * this file. To keep maintenance down, it's easier to have them
+ * seperated right now.
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/file.h>
+#include <linux/blkdev.h>
+#include <linux/poll.h>
+#include <linux/cdev.h>
+#include <linux/percpu.h>
+#include <linux/uio.h>
+#include <linux/bsg.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/sg.h>
+
+#define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
+#define BSG_VERSION "0.4"
+
+struct bsg_device {
+ request_queue_t *queue;
+ spinlock_t lock;
+ struct list_head busy_list;
+ struct list_head done_list;
+ struct hlist_node dev_list;
+ atomic_t ref_count;
+ int minor;
+ int queued_cmds;
+ int done_cmds;
+ wait_queue_head_t wq_done;
+ wait_queue_head_t wq_free;
+ char name[BUS_ID_SIZE];
+ int max_queue;
+ unsigned long flags;
+};
+
+enum {
+ BSG_F_BLOCK = 1,
+ BSG_F_WRITE_PERM = 2,
+};
+
+#define BSG_DEFAULT_CMDS 64
+#define BSG_MAX_DEVS 32768
+
+#undef BSG_DEBUG
+
+#ifdef BSG_DEBUG
+#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ##args)
+#else
+#define dprintk(fmt, args...)
+#endif
+
+static DEFINE_MUTEX(bsg_mutex);
+static int bsg_device_nr, bsg_minor_idx;
+
+#define BSG_LIST_ARRAY_SIZE 8
+static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
+
+static struct class *bsg_class;
+static LIST_HEAD(bsg_class_list);
+static int bsg_major;
+
+static struct kmem_cache *bsg_cmd_cachep;
+
+/*
+ * our internal command type
+ */
+struct bsg_command {
+ struct bsg_device *bd;
+ struct list_head list;
+ struct request *rq;
+ struct bio *bio;
+ struct bio *bidi_bio;
+ int err;
+ struct sg_io_v4 hdr;
+ struct sg_io_v4 __user *uhdr;
+ char sense[SCSI_SENSE_BUFFERSIZE];
+};
+
+static void bsg_free_command(struct bsg_command *bc)
+{
+ struct bsg_device *bd = bc->bd;
+ unsigned long flags;
+
+ kmem_cache_free(bsg_cmd_cachep, bc);
+
+ spin_lock_irqsave(&bd->lock, flags);
+ bd->queued_cmds--;
+ spin_unlock_irqrestore(&bd->lock, flags);
+
+ wake_up(&bd->wq_free);
+}
+
+static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
+{
+ struct bsg_command *bc = ERR_PTR(-EINVAL);
+
+ spin_lock_irq(&bd->lock);
+
+ if (bd->queued_cmds >= bd->max_queue)
+ goto out;
+
+ bd->queued_cmds++;
+ spin_unlock_irq(&bd->lock);
+
+ bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
+ if (unlikely(!bc)) {
+ spin_lock_irq(&bd->lock);
+ bd->queued_cmds--;
+ bc = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ bc->bd = bd;
+ INIT_LIST_HEAD(&bc->list);
+ dprintk("%s: returning free cmd %p\n", bd->name, bc);
+ return bc;
+out:
+ spin_unlock_irq(&bd->lock);
+ return bc;
+}
+
+static inline struct hlist_head *bsg_dev_idx_hash(int index)
+{
+ return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
+}
+
+static int bsg_io_schedule(struct bsg_device *bd)
+{
+ DEFINE_WAIT(wait);
+ int ret = 0;
+
+ spin_lock_irq(&bd->lock);
+
+ BUG_ON(bd->done_cmds > bd->queued_cmds);
+
+ /*
+ * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no
+ * work to do", even though we return -ENOSPC after this same test
+ * during bsg_write() -- there, it means our buffer can't have more
+ * bsg_commands added to it, thus has no space left.
+ */
+ if (bd->done_cmds == bd->queued_cmds) {
+ ret = -ENODATA;
+ goto unlock;
+ }
+
+ if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
+ ret = -EAGAIN;
+ goto unlock;
+ }
+
+ prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock_irq(&bd->lock);
+ io_schedule();
+ finish_wait(&bd->wq_done, &wait);
+
+ return ret;
+unlock:
+ spin_unlock_irq(&bd->lock);
+ return ret;
+}
+
+static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq,
+ struct sg_io_v4 *hdr, int has_write_perm)
+{
+ memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
+
+ if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
+ hdr->request_len))
+ return -EFAULT;
+
+ if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
+ if (blk_verify_command(rq->cmd, has_write_perm))
+ return -EPERM;
+ } else if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ /*
+ * fill in request structure
+ */
+ rq->cmd_len = hdr->request_len;
+ rq->cmd_type = REQ_TYPE_BLOCK_PC;
+
+ rq->timeout = (hdr->timeout * HZ) / 1000;
+ if (!rq->timeout)
+ rq->timeout = q->sg_timeout;
+ if (!rq->timeout)
+ rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
+
+ return 0;
+}
+
+/*
+ * Check if sg_io_v4 from user is allowed and valid
+ */
+static int
+bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw)
+{
+ int ret = 0;
+
+ if (hdr->guard != 'Q')
+ return -EINVAL;
+ if (hdr->request_len > BLK_MAX_CDB)
+ return -EINVAL;
+ if (hdr->dout_xfer_len > (q->max_sectors << 9) ||
+ hdr->din_xfer_len > (q->max_sectors << 9))
+ return -EIO;
+
+ switch (hdr->protocol) {
+ case BSG_PROTOCOL_SCSI:
+ switch (hdr->subprotocol) {
+ case BSG_SUB_PROTOCOL_SCSI_CMD:
+ case BSG_SUB_PROTOCOL_SCSI_TRANSPORT:
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ *rw = hdr->dout_xfer_len ? WRITE : READ;
+ return ret;
+}
+
+/*
+ * map sg_io_v4 to a request.
+ */
+static struct request *
+bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
+{
+ request_queue_t *q = bd->queue;
+ struct request *rq, *next_rq = NULL;
+ int ret, rw;
+ unsigned int dxfer_len;
+ void *dxferp = NULL;
+
+ dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp,
+ hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
+ hdr->din_xfer_len);
+
+ ret = bsg_validate_sgv4_hdr(q, hdr, &rw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /*
+ * map scatter-gather elements seperately and string them to request
+ */
+ rq = blk_get_request(q, rw, GFP_KERNEL);
+ if (!rq)
+ return ERR_PTR(-ENOMEM);
+ ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, test_bit(BSG_F_WRITE_PERM,
+ &bd->flags));
+ if (ret)
+ goto out;
+
+ if (rw == WRITE && hdr->din_xfer_len) {
+ if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ next_rq = blk_get_request(q, READ, GFP_KERNEL);
+ if (!next_rq) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ rq->next_rq = next_rq;
+
+ dxferp = (void*)(unsigned long)hdr->din_xferp;
+ ret = blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len);
+ if (ret)
+ goto out;
+ }
+
+ if (hdr->dout_xfer_len) {
+ dxfer_len = hdr->dout_xfer_len;
+ dxferp = (void*)(unsigned long)hdr->dout_xferp;
+ } else if (hdr->din_xfer_len) {
+ dxfer_len = hdr->din_xfer_len;
+ dxferp = (void*)(unsigned long)hdr->din_xferp;
+ } else
+ dxfer_len = 0;
+
+ if (dxfer_len) {
+ ret = blk_rq_map_user(q, rq, dxferp, dxfer_len);
+ if (ret)
+ goto out;
+ }
+ return rq;
+out:
+ blk_put_request(rq);
+ if (next_rq) {
+ blk_rq_unmap_user(next_rq->bio);
+ blk_put_request(next_rq);
+ }
+ return ERR_PTR(ret);
+}
+
+/*
+ * async completion call-back from the block layer, when scsi/ide/whatever
+ * calls end_that_request_last() on a request
+ */
+static void bsg_rq_end_io(struct request *rq, int uptodate)
+{
+ struct bsg_command *bc = rq->end_io_data;
+ struct bsg_device *bd = bc->bd;
+ unsigned long flags;
+
+ dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
+ bd->name, rq, bc, bc->bio, uptodate);
+
+ bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
+
+ spin_lock_irqsave(&bd->lock, flags);
+ list_move_tail(&bc->list, &bd->done_list);
+ bd->done_cmds++;
+ spin_unlock_irqrestore(&bd->lock, flags);
+
+ wake_up(&bd->wq_done);
+}
+
+/*
+ * do final setup of a 'bc' and submit the matching 'rq' to the block
+ * layer for io
+ */
+static void bsg_add_command(struct bsg_device *bd, request_queue_t *q,
+ struct bsg_command *bc, struct request *rq)
+{
+ rq->sense = bc->sense;
+ rq->sense_len = 0;
+
+ /*
+ * add bc command to busy queue and submit rq for io
+ */
+ bc->rq = rq;
+ bc->bio = rq->bio;
+ if (rq->next_rq)
+ bc->bidi_bio = rq->next_rq->bio;
+ bc->hdr.duration = jiffies;
+ spin_lock_irq(&bd->lock);
+ list_add_tail(&bc->list, &bd->busy_list);
+ spin_unlock_irq(&bd->lock);
+
+ dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
+
+ rq->end_io_data = bc;
+ blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io);
+}
+
+static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
+{
+ struct bsg_command *bc = NULL;
+
+ spin_lock_irq(&bd->lock);
+ if (bd->done_cmds) {
+ bc = list_entry(bd->done_list.next, struct bsg_command, list);
+ list_del(&bc->list);
+ bd->done_cmds--;
+ }
+ spin_unlock_irq(&bd->lock);
+
+ return bc;
+}
+
+/*
+ * Get a finished command from the done list
+ */
+static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
+{
+ struct bsg_command *bc;
+ int ret;
+
+ do {
+ bc = bsg_next_done_cmd(bd);
+ if (bc)
+ break;
+
+ if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
+ bc = ERR_PTR(-EAGAIN);
+ break;
+ }
+
+ ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
+ if (ret) {
+ bc = ERR_PTR(-ERESTARTSYS);
+ break;
+ }
+ } while (1);
+
+ dprintk("%s: returning done %p\n", bd->name, bc);
+
+ return bc;
+}
+
+static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
+ struct bio *bio, struct bio *bidi_bio)
+{
+ int ret = 0;
+
+ dprintk("rq %p bio %p %u\n", rq, bio, rq->errors);
+ /*
+ * fill in all the output members
+ */
+ hdr->device_status = status_byte(rq->errors);
+ hdr->transport_status = host_byte(rq->errors);
+ hdr->driver_status = driver_byte(rq->errors);
+ hdr->info = 0;
+ if (hdr->device_status || hdr->transport_status || hdr->driver_status)
+ hdr->info |= SG_INFO_CHECK;
+ hdr->din_resid = rq->data_len;
+ hdr->response_len = 0;
+
+ if (rq->sense_len && hdr->response) {
+ int len = min_t(unsigned int, hdr->max_response_len,
+ rq->sense_len);
+
+ ret = copy_to_user((void*)(unsigned long)hdr->response,
+ rq->sense, len);
+ if (!ret)
+ hdr->response_len = len;
+ else
+ ret = -EFAULT;
+ }
+
+ if (rq->next_rq) {
+ blk_rq_unmap_user(bidi_bio);
+ blk_put_request(rq->next_rq);
+ }
+
+ blk_rq_unmap_user(bio);
+ blk_put_request(rq);
+
+ return ret;
+}
+
+static int bsg_complete_all_commands(struct bsg_device *bd)
+{
+ struct bsg_command *bc;
+ int ret, tret;
+
+ dprintk("%s: entered\n", bd->name);
+
+ set_bit(BSG_F_BLOCK, &bd->flags);
+
+ /*
+ * wait for all commands to complete
+ */
+ ret = 0;
+ do {
+ ret = bsg_io_schedule(bd);
+ /*
+ * look for -ENODATA specifically -- we'll sometimes get
+ * -ERESTARTSYS when we've taken a signal, but we can't
+ * return until we're done freeing the queue, so ignore
+ * it. The signal will get handled when we're done freeing
+ * the bsg_device.
+ */
+ } while (ret != -ENODATA);
+
+ /*
+ * discard done commands
+ */
+ ret = 0;
+ do {
+ spin_lock_irq(&bd->lock);
+ if (!bd->queued_cmds) {
+ spin_unlock_irq(&bd->lock);
+ break;
+ }
+ spin_unlock_irq(&bd->lock);
+
+ bc = bsg_get_done_cmd(bd);
+ if (IS_ERR(bc))
+ break;
+
+ tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
+ bc->bidi_bio);
+ if (!ret)
+ ret = tret;
+
+ bsg_free_command(bc);
+ } while (1);
+
+ return ret;
+}
+
+static int
+__bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
+ const struct iovec *iov, ssize_t *bytes_read)
+{
+ struct bsg_command *bc;
+ int nr_commands, ret;
+
+ if (count % sizeof(struct sg_io_v4))
+ return -EINVAL;
+
+ ret = 0;
+ nr_commands = count / sizeof(struct sg_io_v4);
+ while (nr_commands) {
+ bc = bsg_get_done_cmd(bd);
+ if (IS_ERR(bc)) {
+ ret = PTR_ERR(bc);
+ break;
+ }
+
+ /*
+ * this is the only case where we need to copy data back
+ * after completing the request. so do that here,
+ * bsg_complete_work() cannot do that for us
+ */
+ ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
+ bc->bidi_bio);
+
+ if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
+ ret = -EFAULT;
+
+ bsg_free_command(bc);
+
+ if (ret)
+ break;
+
+ buf += sizeof(struct sg_io_v4);
+ *bytes_read += sizeof(struct sg_io_v4);
+ nr_commands--;
+ }
+
+ return ret;
+}
+
+static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
+{
+ if (file->f_flags & O_NONBLOCK)
+ clear_bit(BSG_F_BLOCK, &bd->flags);
+ else
+ set_bit(BSG_F_BLOCK, &bd->flags);
+}
+
+static inline void bsg_set_write_perm(struct bsg_device *bd, struct file *file)
+{
+ if (file->f_mode & FMODE_WRITE)
+ set_bit(BSG_F_WRITE_PERM, &bd->flags);
+ else
+ clear_bit(BSG_F_WRITE_PERM, &bd->flags);
+}
+
+/*
+ * Check if the error is a "real" error that we should return.
+ */
+static inline int err_block_err(int ret)
+{
+ if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
+ return 1;
+
+ return 0;
+}
+
+static ssize_t
+bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ struct bsg_device *bd = file->private_data;
+ int ret;
+ ssize_t bytes_read;
+
+ dprintk("%s: read %Zd bytes\n", bd->name, count);
+
+ bsg_set_block(bd, file);
+ bytes_read = 0;
+ ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
+ *ppos = bytes_read;
+
+ if (!bytes_read || (bytes_read && err_block_err(ret)))
+ bytes_read = ret;
+
+ return bytes_read;
+}
+
+static int __bsg_write(struct bsg_device *bd, const char __user *buf,
+ size_t count, ssize_t *bytes_written)
+{
+ struct bsg_command *bc;
+ struct request *rq;
+ int ret, nr_commands;
+
+ if (count % sizeof(struct sg_io_v4))
+ return -EINVAL;
+
+ nr_commands = count / sizeof(struct sg_io_v4);
+ rq = NULL;
+ bc = NULL;
+ ret = 0;
+ while (nr_commands) {
+ request_queue_t *q = bd->queue;
+
+ bc = bsg_alloc_command(bd);
+ if (IS_ERR(bc)) {
+ ret = PTR_ERR(bc);
+ bc = NULL;
+ break;
+ }
+
+ bc->uhdr = (struct sg_io_v4 __user *) buf;
+ if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ /*
+ * get a request, fill in the blanks, and add to request queue
+ */
+ rq = bsg_map_hdr(bd, &bc->hdr);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ rq = NULL;
+ break;
+ }
+
+ bsg_add_command(bd, q, bc, rq);
+ bc = NULL;
+ rq = NULL;
+ nr_commands--;
+ buf += sizeof(struct sg_io_v4);
+ *bytes_written += sizeof(struct sg_io_v4);
+ }
+
+ if (bc)
+ bsg_free_command(bc);
+
+ return ret;
+}
+
+static ssize_t
+bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct bsg_device *bd = file->private_data;
+ ssize_t bytes_written;
+ int ret;
+
+ dprintk("%s: write %Zd bytes\n", bd->name, count);
+
+ bsg_set_block(bd, file);
+ bsg_set_write_perm(bd, file);
+
+ bytes_written = 0;
+ ret = __bsg_write(bd, buf, count, &bytes_written);
+ *ppos = bytes_written;
+
+ /*
+ * return bytes written on non-fatal errors
+ */
+ if (!bytes_written || (bytes_written && err_block_err(ret)))
+ bytes_written = ret;
+
+ dprintk("%s: returning %Zd\n", bd->name, bytes_written);
+ return bytes_written;
+}
+
+static struct bsg_device *bsg_alloc_device(void)
+{
+ struct bsg_device *bd;
+
+ bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
+ if (unlikely(!bd))
+ return NULL;
+
+ spin_lock_init(&bd->lock);
+
+ bd->max_queue = BSG_DEFAULT_CMDS;
+
+ INIT_LIST_HEAD(&bd->busy_list);
+ INIT_LIST_HEAD(&bd->done_list);
+ INIT_HLIST_NODE(&bd->dev_list);
+
+ init_waitqueue_head(&bd->wq_free);
+ init_waitqueue_head(&bd->wq_done);
+ return bd;
+}
+
+static int bsg_put_device(struct bsg_device *bd)
+{
+ int ret = 0;
+
+ mutex_lock(&bsg_mutex);
+
+ if (!atomic_dec_and_test(&bd->ref_count))
+ goto out;
+
+ dprintk("%s: tearing down\n", bd->name);
+
+ /*
+ * close can always block
+ */
+ set_bit(BSG_F_BLOCK, &bd->flags);
+
+ /*
+ * correct error detection baddies here again. it's the responsibility
+ * of the app to properly reap commands before close() if it wants
+ * fool-proof error detection
+ */
+ ret = bsg_complete_all_commands(bd);
+
+ blk_put_queue(bd->queue);
+ hlist_del(&bd->dev_list);
+ kfree(bd);
+out:
+ mutex_unlock(&bsg_mutex);
+ return ret;
+}
+
+static struct bsg_device *bsg_add_device(struct inode *inode,
+ struct request_queue *rq,
+ struct file *file)
+{
+ struct bsg_device *bd;
+#ifdef BSG_DEBUG
+ unsigned char buf[32];
+#endif
+
+ bd = bsg_alloc_device();
+ if (!bd)
+ return ERR_PTR(-ENOMEM);
+
+ bd->queue = rq;
+ kobject_get(&rq->kobj);
+ bsg_set_block(bd, file);
+
+ atomic_set(&bd->ref_count, 1);
+ bd->minor = iminor(inode);
+ mutex_lock(&bsg_mutex);
+ hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(bd->minor));
+
+ strncpy(bd->name, rq->bsg_dev.class_dev->class_id, sizeof(bd->name) - 1);
+ dprintk("bound to <%s>, max queue %d\n",
+ format_dev_t(buf, inode->i_rdev), bd->max_queue);
+
+ mutex_unlock(&bsg_mutex);
+ return bd;
+}
+
+static struct bsg_device *__bsg_get_device(int minor)
+{
+ struct bsg_device *bd = NULL;
+ struct hlist_node *entry;
+
+ mutex_lock(&bsg_mutex);
+
+ hlist_for_each(entry, bsg_dev_idx_hash(minor)) {
+ bd = hlist_entry(entry, struct bsg_device, dev_list);
+ if (bd->minor == minor) {
+ atomic_inc(&bd->ref_count);
+ break;
+ }
+
+ bd = NULL;
+ }
+
+ mutex_unlock(&bsg_mutex);
+ return bd;
+}
+
+static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
+{
+ struct bsg_device *bd = __bsg_get_device(iminor(inode));
+ struct bsg_class_device *bcd, *__bcd;
+
+ if (bd)
+ return bd;
+
+ /*
+ * find the class device
+ */
+ bcd = NULL;
+ mutex_lock(&bsg_mutex);
+ list_for_each_entry(__bcd, &bsg_class_list, list) {
+ if (__bcd->minor == iminor(inode)) {
+ bcd = __bcd;
+ break;
+ }
+ }
+ mutex_unlock(&bsg_mutex);
+
+ if (!bcd)
+ return ERR_PTR(-ENODEV);
+
+ return bsg_add_device(inode, bcd->queue, file);
+}
+
+static int bsg_open(struct inode *inode, struct file *file)
+{
+ struct bsg_device *bd = bsg_get_device(inode, file);
+
+ if (IS_ERR(bd))
+ return PTR_ERR(bd);
+
+ file->private_data = bd;
+ return 0;
+}
+
+static int bsg_release(struct inode *inode, struct file *file)
+{
+ struct bsg_device *bd = file->private_data;
+
+ file->private_data = NULL;
+ return bsg_put_device(bd);
+}
+
+static unsigned int bsg_poll(struct file *file, poll_table *wait)
+{
+ struct bsg_device *bd = file->private_data;
+ unsigned int mask = 0;
+
+ poll_wait(file, &bd->wq_done, wait);
+ poll_wait(file, &bd->wq_free, wait);
+
+ spin_lock_irq(&bd->lock);
+ if (!list_empty(&bd->done_list))
+ mask |= POLLIN | POLLRDNORM;
+ if (bd->queued_cmds >= bd->max_queue)
+ mask |= POLLOUT;
+ spin_unlock_irq(&bd->lock);
+
+ return mask;
+}
+
+static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct bsg_device *bd = file->private_data;
+ int __user *uarg = (int __user *) arg;
+
+ switch (cmd) {
+ /*
+ * our own ioctls
+ */
+ case SG_GET_COMMAND_Q:
+ return put_user(bd->max_queue, uarg);
+ case SG_SET_COMMAND_Q: {
+ int queue;
+
+ if (get_user(queue, uarg))
+ return -EFAULT;
+ if (queue < 1)
+ return -EINVAL;
+
+ spin_lock_irq(&bd->lock);
+ bd->max_queue = queue;
+ spin_unlock_irq(&bd->lock);
+ return 0;
+ }
+
+ /*
+ * SCSI/sg ioctls
+ */
+ case SG_GET_VERSION_NUM:
+ case SCSI_IOCTL_GET_IDLUN:
+ case SCSI_IOCTL_GET_BUS_NUMBER:
+ case SG_SET_TIMEOUT:
+ case SG_GET_TIMEOUT:
+ case SG_GET_RESERVED_SIZE:
+ case SG_SET_RESERVED_SIZE:
+ case SG_EMULATED_HOST:
+ case SCSI_IOCTL_SEND_COMMAND: {
+ void __user *uarg = (void __user *) arg;
+ return scsi_cmd_ioctl(file, bd->queue, NULL, cmd, uarg);
+ }
+ case SG_IO: {
+ struct request *rq;
+ struct bio *bio, *bidi_bio = NULL;
+ struct sg_io_v4 hdr;
+
+ if (copy_from_user(&hdr, uarg, sizeof(hdr)))
+ return -EFAULT;
+
+ rq = bsg_map_hdr(bd, &hdr);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ bio = rq->bio;
+ if (rq->next_rq)
+ bidi_bio = rq->next_rq->bio;
+ blk_execute_rq(bd->queue, NULL, rq, 0);
+ blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
+
+ if (copy_to_user(uarg, &hdr, sizeof(hdr)))
+ return -EFAULT;
+
+ return 0;
+ }
+ /*
+ * block device ioctls
+ */
+ default:
+#if 0
+ return ioctl_by_bdev(bd->bdev, cmd, arg);
+#else
+ return -ENOTTY;
+#endif
+ }
+}
+
+static struct file_operations bsg_fops = {
+ .read = bsg_read,
+ .write = bsg_write,
+ .poll = bsg_poll,
+ .open = bsg_open,
+ .release = bsg_release,
+ .unlocked_ioctl = bsg_ioctl,
+ .owner = THIS_MODULE,
+};
+
+void bsg_unregister_queue(struct request_queue *q)
+{
+ struct bsg_class_device *bcd = &q->bsg_dev;
+
+ WARN_ON(!bcd->class_dev);
+
+ mutex_lock(&bsg_mutex);
+ sysfs_remove_link(&q->kobj, "bsg");
+ class_device_destroy(bsg_class, MKDEV(bsg_major, bcd->minor));
+ bcd->class_dev = NULL;
+ list_del_init(&bcd->list);
+ bsg_device_nr--;
+ mutex_unlock(&bsg_mutex);
+}
+EXPORT_SYMBOL_GPL(bsg_unregister_queue);
+
+int bsg_register_queue(struct request_queue *q, const char *name)
+{
+ struct bsg_class_device *bcd, *__bcd;
+ dev_t dev;
+ int ret = -EMFILE;
+ struct class_device *class_dev = NULL;
+
+ /*
+ * we need a proper transport to send commands, not a stacked device
+ */
+ if (!q->request_fn)
+ return 0;
+
+ bcd = &q->bsg_dev;
+ memset(bcd, 0, sizeof(*bcd));
+ INIT_LIST_HEAD(&bcd->list);
+
+ mutex_lock(&bsg_mutex);
+ if (bsg_device_nr == BSG_MAX_DEVS) {
+ printk(KERN_ERR "bsg: too many bsg devices\n");
+ goto err;
+ }
+
+retry:
+ list_for_each_entry(__bcd, &bsg_class_list, list) {
+ if (__bcd->minor == bsg_minor_idx) {
+ bsg_minor_idx++;
+ if (bsg_minor_idx == BSG_MAX_DEVS)
+ bsg_minor_idx = 0;
+ goto retry;
+ }
+ }
+
+ bcd->minor = bsg_minor_idx++;
+ if (bsg_minor_idx == BSG_MAX_DEVS)
+ bsg_minor_idx = 0;
+
+ bcd->queue = q;
+ dev = MKDEV(bsg_major, bcd->minor);
+ class_dev = class_device_create(bsg_class, NULL, dev, bcd->dev, "%s", name);
+ if (IS_ERR(class_dev)) {
+ ret = PTR_ERR(class_dev);
+ goto err;
+ }
+ bcd->class_dev = class_dev;
+
+ if (q->kobj.sd) {
+ ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
+ if (ret)
+ goto err;
+ }
+
+ list_add_tail(&bcd->list, &bsg_class_list);
+ bsg_device_nr++;
+
+ mutex_unlock(&bsg_mutex);
+ return 0;
+err:
+ if (class_dev)
+ class_device_destroy(bsg_class, MKDEV(bsg_major, bcd->minor));
+ mutex_unlock(&bsg_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(bsg_register_queue);
+
+static int bsg_add(struct class_device *cl_dev, struct class_interface *cl_intf)
+{
+ int ret;
+ struct scsi_device *sdp = to_scsi_device(cl_dev->dev);
+ struct request_queue *rq = sdp->request_queue;
+
+ if (rq->kobj.parent)
+ ret = bsg_register_queue(rq, kobject_name(rq->kobj.parent));
+ else
+ ret = bsg_register_queue(rq, kobject_name(&sdp->sdev_gendev.kobj));
+ return ret;
+}
+
+static void bsg_remove(struct class_device *cl_dev, struct class_interface *cl_intf)
+{
+ bsg_unregister_queue(to_scsi_device(cl_dev->dev)->request_queue);
+}
+
+static struct class_interface bsg_intf = {
+ .add = bsg_add,
+ .remove = bsg_remove,
+};
+
+static struct cdev bsg_cdev = {
+ .kobj = {.name = "bsg", },
+ .owner = THIS_MODULE,
+};
+
+static int __init bsg_init(void)
+{
+ int ret, i;
+ dev_t devid;
+
+ bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
+ sizeof(struct bsg_command), 0, 0, NULL);
+ if (!bsg_cmd_cachep) {
+ printk(KERN_ERR "bsg: failed creating slab cache\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
+ INIT_HLIST_HEAD(&bsg_device_list[i]);
+
+ bsg_class = class_create(THIS_MODULE, "bsg");
+ if (IS_ERR(bsg_class)) {
+ ret = PTR_ERR(bsg_class);
+ goto destroy_kmemcache;
+ }
+
+ ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
+ if (ret)
+ goto destroy_bsg_class;
+
+ bsg_major = MAJOR(devid);
+
+ cdev_init(&bsg_cdev, &bsg_fops);
+ ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
+ if (ret)
+ goto unregister_chrdev;
+
+ ret = scsi_register_interface(&bsg_intf);
+ if (ret)
+ goto remove_cdev;
+
+ printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
+ " loaded (major %d)\n", bsg_major);
+ return 0;
+remove_cdev:
+ printk(KERN_ERR "bsg: failed register scsi interface %d\n", ret);
+ cdev_del(&bsg_cdev);
+unregister_chrdev:
+ unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
+destroy_bsg_class:
+ class_destroy(bsg_class);
+destroy_kmemcache:
+ kmem_cache_destroy(bsg_cmd_cachep);
+ return ret;
+}
+
+MODULE_AUTHOR("Jens Axboe");
+MODULE_DESCRIPTION(BSG_DESCRIPTION);
+MODULE_LICENSE("GPL");
+
+device_initcall(bsg_init);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index baef5fc7cff..d148ccbc36d 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -92,6 +92,12 @@ struct cfq_data {
struct cfq_queue *active_queue;
struct cfq_io_context *active_cic;
+ /*
+ * async queue for each priority case
+ */
+ struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
+ struct cfq_queue *async_idle_cfqq;
+
struct timer_list idle_class_timer;
sector_t last_position;
@@ -109,9 +115,6 @@ struct cfq_data {
unsigned int cfq_slice_idle;
struct list_head cic_list;
-
- sector_t new_seek_mean;
- u64 new_seek_total;
};
/*
@@ -151,8 +154,6 @@ struct cfq_queue {
/* various state flags, see below */
unsigned int flags;
-
- sector_t last_request_pos;
};
enum cfqq_state_flags {
@@ -1249,9 +1250,9 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
{
struct cfq_io_context *cic;
- cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node);
+ cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
+ cfqd->queue->node);
if (cic) {
- memset(cic, 0, sizeof(*cic));
cic->last_end_request = jiffies;
INIT_LIST_HEAD(&cic->queue_list);
cic->dtor = cfq_free_io_context;
@@ -1351,8 +1352,8 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc)
}
static struct cfq_queue *
-cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
- gfp_t gfp_mask)
+cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
+ struct task_struct *tsk, gfp_t gfp_mask)
{
struct cfq_queue *cfqq, *new_cfqq = NULL;
struct cfq_io_context *cic;
@@ -1374,17 +1375,19 @@ retry:
* free memory.
*/
spin_unlock_irq(cfqd->queue->queue_lock);
- new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node);
+ new_cfqq = kmem_cache_alloc_node(cfq_pool,
+ gfp_mask | __GFP_NOFAIL | __GFP_ZERO,
+ cfqd->queue->node);
spin_lock_irq(cfqd->queue->queue_lock);
goto retry;
} else {
- cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node);
+ cfqq = kmem_cache_alloc_node(cfq_pool,
+ gfp_mask | __GFP_ZERO,
+ cfqd->queue->node);
if (!cfqq)
goto out;
}
- memset(cfqq, 0, sizeof(*cfqq));
-
RB_CLEAR_NODE(&cfqq->rb_node);
INIT_LIST_HEAD(&cfqq->fifo);
@@ -1405,12 +1408,55 @@ retry:
if (new_cfqq)
kmem_cache_free(cfq_pool, new_cfqq);
- atomic_inc(&cfqq->ref);
out:
WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
return cfqq;
}
+static struct cfq_queue **
+cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
+{
+ switch(ioprio_class) {
+ case IOPRIO_CLASS_RT:
+ return &cfqd->async_cfqq[0][ioprio];
+ case IOPRIO_CLASS_BE:
+ return &cfqd->async_cfqq[1][ioprio];
+ case IOPRIO_CLASS_IDLE:
+ return &cfqd->async_idle_cfqq;
+ default:
+ BUG();
+ }
+}
+
+static struct cfq_queue *
+cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
+ gfp_t gfp_mask)
+{
+ const int ioprio = task_ioprio(tsk);
+ const int ioprio_class = task_ioprio_class(tsk);
+ struct cfq_queue **async_cfqq = NULL;
+ struct cfq_queue *cfqq = NULL;
+
+ if (!is_sync) {
+ async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
+ cfqq = *async_cfqq;
+ }
+
+ if (!cfqq)
+ cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask);
+
+ /*
+ * pin the queue now that it's allocated, scheduler exit will prune it
+ */
+ if (!is_sync && !(*async_cfqq)) {
+ atomic_inc(&cfqq->ref);
+ *async_cfqq = cfqq;
+ }
+
+ atomic_inc(&cfqq->ref);
+ return cfqq;
+}
+
/*
* We drop cfq io contexts lazily, so we may find a dead one.
*/
@@ -1570,11 +1616,6 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
else
sdist = cic->last_request_pos - rq->sector;
- if (!cic->seek_samples) {
- cfqd->new_seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
- cfqd->new_seek_mean = cfqd->new_seek_total / 256;
- }
-
/*
* Don't allow the seek distance to get too large from the
* odd fragment, pagein, etc
@@ -1710,7 +1751,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfq_update_idle_window(cfqd, cfqq, cic);
cic->last_request_pos = rq->sector + rq->nr_sectors;
- cfqq->last_request_pos = cic->last_request_pos;
if (cfqq == cfqd->active_queue) {
/*
@@ -2015,6 +2055,20 @@ static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
blk_sync_queue(cfqd->queue);
}
+static void cfq_put_async_queues(struct cfq_data *cfqd)
+{
+ int i;
+
+ for (i = 0; i < IOPRIO_BE_NR; i++) {
+ if (cfqd->async_cfqq[0][i])
+ cfq_put_queue(cfqd->async_cfqq[0][i]);
+ if (cfqd->async_cfqq[1][i])
+ cfq_put_queue(cfqd->async_cfqq[1][i]);
+ if (cfqd->async_idle_cfqq)
+ cfq_put_queue(cfqd->async_idle_cfqq);
+ }
+}
+
static void cfq_exit_queue(elevator_t *e)
{
struct cfq_data *cfqd = e->elevator_data;
@@ -2035,6 +2089,8 @@ static void cfq_exit_queue(elevator_t *e)
__cfq_exit_single_io_context(cfqd, cic);
}
+ cfq_put_async_queues(cfqd);
+
spin_unlock_irq(q->queue_lock);
cfq_shutdown_timer_wq(cfqd);
@@ -2046,12 +2102,10 @@ static void *cfq_init_queue(request_queue_t *q)
{
struct cfq_data *cfqd;
- cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
+ cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
if (!cfqd)
return NULL;
- memset(cfqd, 0, sizeof(*cfqd));
-
cfqd->service_tree = CFQ_RB_ROOT;
INIT_LIST_HEAD(&cfqd->cic_list);
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 6d673e938d3..87ca02ac84c 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -360,10 +360,9 @@ static void *deadline_init_queue(request_queue_t *q)
{
struct deadline_data *dd;
- dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
+ dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
if (!dd)
return NULL;
- memset(dd, 0, sizeof(*dd));
INIT_LIST_HEAD(&dd->fifo_list[READ]);
INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
diff --git a/block/elevator.c b/block/elevator.c
index ce866eb75f6..d265963d1ed 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -112,12 +112,8 @@ static inline int elv_try_merge(struct request *__rq, struct bio *bio)
static struct elevator_type *elevator_find(const char *name)
{
struct elevator_type *e;
- struct list_head *entry;
-
- list_for_each(entry, &elv_list) {
-
- e = list_entry(entry, struct elevator_type, list);
+ list_for_each_entry(e, &elv_list, list) {
if (!strcmp(e->elevator_name, name))
return e;
}
@@ -181,11 +177,10 @@ static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e)
elevator_t *eq;
int i;
- eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL, q->node);
+ eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node);
if (unlikely(!eq))
goto err;
- memset(eq, 0, sizeof(*eq));
eq->ops = &e->ops;
eq->elevator_type = e;
kobject_init(&eq->kobj);
@@ -1116,14 +1111,11 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name)
{
elevator_t *e = q->elevator;
struct elevator_type *elv = e->elevator_type;
- struct list_head *entry;
+ struct elevator_type *__e;
int len = 0;
spin_lock(&elv_list_lock);
- list_for_each(entry, &elv_list) {
- struct elevator_type *__e;
-
- __e = list_entry(entry, struct elevator_type, list);
+ list_for_each_entry(__e, &elv_list, list) {
if (!strcmp(elv->elevator_name, __e->elevator_name))
len += sprintf(name+len, "[%s] ", elv->elevator_name);
else
diff --git a/block/genhd.c b/block/genhd.c
index 863a8c0623e..3af1e7a378d 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -108,28 +108,24 @@ out:
EXPORT_SYMBOL(register_blkdev);
-/* todo: make void - error printk here */
-int unregister_blkdev(unsigned int major, const char *name)
+void unregister_blkdev(unsigned int major, const char *name)
{
struct blk_major_name **n;
struct blk_major_name *p = NULL;
int index = major_to_index(major);
- int ret = 0;
mutex_lock(&block_subsys_lock);
for (n = &major_names[index]; *n; n = &(*n)->next)
if ((*n)->major == major)
break;
- if (!*n || strcmp((*n)->name, name))
- ret = -EINVAL;
- else {
+ if (!*n || strcmp((*n)->name, name)) {
+ WARN_ON(1);
+ } else {
p = *n;
*n = p->next;
}
mutex_unlock(&block_subsys_lock);
kfree(p);
-
- return ret;
}
EXPORT_SYMBOL(unregister_blkdev);
@@ -726,21 +722,21 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
{
struct gendisk *disk;
- disk = kmalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
+ disk = kmalloc_node(sizeof(struct gendisk),
+ GFP_KERNEL | __GFP_ZERO, node_id);
if (disk) {
- memset(disk, 0, sizeof(struct gendisk));
if (!init_disk_stats(disk)) {
kfree(disk);
return NULL;
}
if (minors > 1) {
int size = (minors - 1) * sizeof(struct hd_struct *);
- disk->part = kmalloc_node(size, GFP_KERNEL, node_id);
+ disk->part = kmalloc_node(size,
+ GFP_KERNEL | __GFP_ZERO, node_id);
if (!disk->part) {
kfree(disk);
return NULL;
}
- memset(disk->part, 0, size);
}
disk->minors = minors;
kobj_set_kset_s(disk,block_subsys);
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 6b5173ac813..66056ca5e63 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -256,6 +256,7 @@ static void rq_init(request_queue_t *q, struct request *rq)
rq->end_io = NULL;
rq->end_io_data = NULL;
rq->completion_data = NULL;
+ rq->next_rq = NULL;
}
/**
@@ -340,6 +341,15 @@ unsigned blk_ordered_req_seq(struct request *rq)
if (rq == &q->post_flush_rq)
return QUEUE_ORDSEQ_POSTFLUSH;
+ /*
+ * !fs requests don't need to follow barrier ordering. Always
+ * put them at the front. This fixes the following deadlock.
+ *
+ * http://thread.gmane.org/gmane.linux.kernel/537473
+ */
+ if (!blk_fs_request(rq))
+ return QUEUE_ORDSEQ_DRAIN;
+
if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
(q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
return QUEUE_ORDSEQ_DRAIN;
@@ -518,8 +528,6 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp)
static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
{
request_queue_t *q = bio->bi_private;
- struct bio_vec *bvec;
- int i;
/*
* This is dry run, restore bio_sector and size. We'll finish
@@ -531,13 +539,6 @@ static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
if (bio->bi_size)
return 1;
- /* Rewind bvec's */
- bio->bi_idx = 0;
- bio_for_each_segment(bvec, bio, i) {
- bvec->bv_len += bvec->bv_offset;
- bvec->bv_offset = 0;
- }
-
/* Reset bio */
set_bit(BIO_UPTODATE, &bio->bi_flags);
bio->bi_size = q->bi_size;
@@ -1295,9 +1296,9 @@ static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID)))
blk_recount_segments(q, nxt);
if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
- BIOVEC_VIRT_OVERSIZE(bio->bi_hw_front_size + bio->bi_hw_back_size))
+ BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size))
return 0;
- if (bio->bi_size + nxt->bi_size > q->max_segment_size)
+ if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size)
return 0;
return 1;
@@ -1828,11 +1829,11 @@ request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
request_queue_t *q;
- q = kmem_cache_alloc_node(requestq_cachep, gfp_mask, node_id);
+ q = kmem_cache_alloc_node(requestq_cachep,
+ gfp_mask | __GFP_ZERO, node_id);
if (!q)
return NULL;
- memset(q, 0, sizeof(*q));
init_timer(&q->unplug_timer);
snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
@@ -3697,13 +3698,13 @@ int __init blk_dev_init(void)
panic("Failed to create kblockd\n");
request_cachep = kmem_cache_create("blkdev_requests",
- sizeof(struct request), 0, SLAB_PANIC, NULL, NULL);
+ sizeof(struct request), 0, SLAB_PANIC, NULL);
requestq_cachep = kmem_cache_create("blkdev_queue",
- sizeof(request_queue_t), 0, SLAB_PANIC, NULL, NULL);
+ sizeof(request_queue_t), 0, SLAB_PANIC, NULL);
iocontext_cachep = kmem_cache_create("blkdev_ioc",
- sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
+ sizeof(struct io_context), 0, SLAB_PANIC, NULL);
for_each_possible_cpu(i)
INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index e83f1dbf7c2..71bdf88884b 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -41,8 +41,6 @@ const unsigned char scsi_command_size[8] =
EXPORT_SYMBOL(scsi_command_size);
-#define BLK_DEFAULT_TIMEOUT (60 * HZ)
-
#include <scsi/sg.h>
static int sg_get_version(int __user *p)
@@ -114,7 +112,7 @@ static int sg_emulated_host(request_queue_t *q, int __user *p)
#define safe_for_read(cmd) [cmd] = CMD_READ_SAFE
#define safe_for_write(cmd) [cmd] = CMD_WRITE_SAFE
-static int verify_command(struct file *file, unsigned char *cmd)
+int blk_verify_command(unsigned char *cmd, int has_write_perm)
{
static unsigned char cmd_type[256] = {
@@ -193,18 +191,11 @@ static int verify_command(struct file *file, unsigned char *cmd)
safe_for_write(GPCMD_SET_STREAMING),
};
unsigned char type = cmd_type[cmd[0]];
- int has_write_perm = 0;
/* Anybody who can open the device can do a read-safe command */
if (type & CMD_READ_SAFE)
return 0;
- /*
- * file can be NULL from ioctl_by_bdev()...
- */
- if (file)
- has_write_perm = file->f_mode & FMODE_WRITE;
-
/* Write-safe commands just require a writable open.. */
if ((type & CMD_WRITE_SAFE) && has_write_perm)
return 0;
@@ -221,25 +212,96 @@ static int verify_command(struct file *file, unsigned char *cmd)
/* Otherwise fail it with an "Operation not permitted" */
return -EPERM;
}
+EXPORT_SYMBOL_GPL(blk_verify_command);
+
+int blk_fill_sghdr_rq(request_queue_t *q, struct request *rq,
+ struct sg_io_hdr *hdr, int has_write_perm)
+{
+ memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
+
+ if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
+ return -EFAULT;
+ if (blk_verify_command(rq->cmd, has_write_perm))
+ return -EPERM;
+
+ /*
+ * fill in request structure
+ */
+ rq->cmd_len = hdr->cmd_len;
+ rq->cmd_type = REQ_TYPE_BLOCK_PC;
+
+ rq->timeout = (hdr->timeout * HZ) / 1000;
+ if (!rq->timeout)
+ rq->timeout = q->sg_timeout;
+ if (!rq->timeout)
+ rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blk_fill_sghdr_rq);
+
+/*
+ * unmap a request that was previously mapped to this sg_io_hdr. handles
+ * both sg and non-sg sg_io_hdr.
+ */
+int blk_unmap_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr)
+{
+ blk_rq_unmap_user(rq->bio);
+ blk_put_request(rq);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blk_unmap_sghdr_rq);
+
+int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
+ struct bio *bio)
+{
+ int r, ret = 0;
+
+ /*
+ * fill in all the output members
+ */
+ hdr->status = rq->errors & 0xff;
+ hdr->masked_status = status_byte(rq->errors);
+ hdr->msg_status = msg_byte(rq->errors);
+ hdr->host_status = host_byte(rq->errors);
+ hdr->driver_status = driver_byte(rq->errors);
+ hdr->info = 0;
+ if (hdr->masked_status || hdr->host_status || hdr->driver_status)
+ hdr->info |= SG_INFO_CHECK;
+ hdr->resid = rq->data_len;
+ hdr->sb_len_wr = 0;
+
+ if (rq->sense_len && hdr->sbp) {
+ int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len);
+
+ if (!copy_to_user(hdr->sbp, rq->sense, len))
+ hdr->sb_len_wr = len;
+ else
+ ret = -EFAULT;
+ }
+
+ rq->bio = bio;
+ r = blk_unmap_sghdr_rq(rq, hdr);
+ if (ret)
+ r = ret;
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(blk_complete_sghdr_rq);
static int sg_io(struct file *file, request_queue_t *q,
struct gendisk *bd_disk, struct sg_io_hdr *hdr)
{
- unsigned long start_time, timeout;
- int writing = 0, ret = 0;
+ unsigned long start_time;
+ int writing = 0, ret = 0, has_write_perm = 0;
struct request *rq;
char sense[SCSI_SENSE_BUFFERSIZE];
- unsigned char cmd[BLK_MAX_CDB];
struct bio *bio;
if (hdr->interface_id != 'S')
return -EINVAL;
if (hdr->cmd_len > BLK_MAX_CDB)
return -EINVAL;
- if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
- return -EFAULT;
- if (verify_command(file, cmd))
- return -EPERM;
if (hdr->dxfer_len > (q->max_hw_sectors << 9))
return -EIO;
@@ -260,25 +322,13 @@ static int sg_io(struct file *file, request_queue_t *q,
if (!rq)
return -ENOMEM;
- /*
- * fill in request structure
- */
- rq->cmd_len = hdr->cmd_len;
- memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
- memcpy(rq->cmd, cmd, hdr->cmd_len);
-
- memset(sense, 0, sizeof(sense));
- rq->sense = sense;
- rq->sense_len = 0;
-
- rq->cmd_type = REQ_TYPE_BLOCK_PC;
+ if (file)
+ has_write_perm = file->f_mode & FMODE_WRITE;
- timeout = msecs_to_jiffies(hdr->timeout);
- rq->timeout = (timeout < INT_MAX) ? timeout : INT_MAX;
- if (!rq->timeout)
- rq->timeout = q->sg_timeout;
- if (!rq->timeout)
- rq->timeout = BLK_DEFAULT_TIMEOUT;
+ if (blk_fill_sghdr_rq(q, rq, hdr, has_write_perm)) {
+ blk_put_request(rq);
+ return -EFAULT;
+ }
if (hdr->iovec_count) {
const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
@@ -306,6 +356,9 @@ static int sg_io(struct file *file, request_queue_t *q,
goto out;
bio = rq->bio;
+ memset(sense, 0, sizeof(sense));
+ rq->sense = sense;
+ rq->sense_len = 0;
rq->retries = 0;
start_time = jiffies;
@@ -316,31 +369,9 @@ static int sg_io(struct file *file, request_queue_t *q,
*/
blk_execute_rq(q, bd_disk, rq, 0);
- /* write to all output members */
- hdr->status = 0xff & rq->errors;
- hdr->masked_status = status_byte(rq->errors);
- hdr->msg_status = msg_byte(rq->errors);
- hdr->host_status = host_byte(rq->errors);
- hdr->driver_status = driver_byte(rq->errors);
- hdr->info = 0;
- if (hdr->masked_status || hdr->host_status || hdr->driver_status)
- hdr->info |= SG_INFO_CHECK;
- hdr->resid = rq->data_len;
hdr->duration = ((jiffies - start_time) * 1000) / HZ;
- hdr->sb_len_wr = 0;
-
- if (rq->sense_len && hdr->sbp) {
- int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len);
-
- if (!copy_to_user(hdr->sbp, rq->sense, len))
- hdr->sb_len_wr = len;
- }
- if (blk_rq_unmap_user(bio))
- ret = -EFAULT;
-
- /* may not have succeeded, but output values written to control
- * structure (struct sg_io_hdr). */
+ return blk_complete_sghdr_rq(rq, hdr, bio);
out:
blk_put_request(rq);
return ret;
@@ -405,11 +436,10 @@ int sg_scsi_ioctl(struct file *file, struct request_queue *q,
bytes = max(in_len, out_len);
if (bytes) {
- buffer = kmalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN);
+ buffer = kzalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN);
if (!buffer)
return -ENOMEM;
- memset(buffer, 0, bytes);
}
rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
@@ -427,7 +457,7 @@ int sg_scsi_ioctl(struct file *file, struct request_queue *q,
if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
goto error;
- err = verify_command(file, rq->cmd);
+ err = blk_verify_command(rq->cmd, file->f_mode & FMODE_WRITE);
if (err)
goto error;
@@ -454,7 +484,7 @@ int sg_scsi_ioctl(struct file *file, struct request_queue *q,
rq->retries = 1;
break;
default:
- rq->timeout = BLK_DEFAULT_TIMEOUT;
+ rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
break;
}
@@ -501,7 +531,7 @@ static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int c
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->data = NULL;
rq->data_len = 0;
- rq->timeout = BLK_DEFAULT_TIMEOUT;
+ rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
memset(rq->cmd, 0, sizeof(rq->cmd));
rq->cmd[0] = cmd;
rq->cmd[4] = data;
@@ -517,16 +547,12 @@ static inline int blk_send_start_stop(request_queue_t *q, struct gendisk *bd_dis
return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data);
}
-int scsi_cmd_ioctl(struct file *file, struct gendisk *bd_disk, unsigned int cmd, void __user *arg)
+int scsi_cmd_ioctl(struct file *file, struct request_queue *q,
+ struct gendisk *bd_disk, unsigned int cmd, void __user *arg)
{
- request_queue_t *q;
int err;
- q = bd_disk->queue;
- if (!q)
- return -ENXIO;
-
- if (blk_get_queue(q))
+ if (!q || blk_get_queue(q))
return -ENXIO;
switch (cmd) {