aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-10-28 16:26:12 +0100
committerIngo Molnar <mingo@elte.hu>2008-10-28 16:26:12 +0100
commit7a9787e1eba95a166265e6a260cf30af04ef0a99 (patch)
treee730a4565e0318140d2fbd2f0415d18a339d7336 /block
parent41b9eb264c8407655db57b60b4457fe1b2ec9977 (diff)
parent0173a3265b228da319ceb9c1ec6a5682fd1b2d92 (diff)
Merge commit 'v2.6.28-rc2' into x86/pci-ioapic-boot-irq-quirks
Diffstat (limited to 'block')
-rw-r--r--block/Makefile4
-rw-r--r--block/as-iosched.c17
-rw-r--r--block/blk-barrier.c72
-rw-r--r--block/blk-core.c660
-rw-r--r--block/blk-exec.c6
-rw-r--r--block/blk-integrity.c33
-rw-r--r--block/blk-map.c72
-rw-r--r--block/blk-merge.c145
-rw-r--r--block/blk-settings.c45
-rw-r--r--block/blk-softirq.c175
-rw-r--r--block/blk-sysfs.c35
-rw-r--r--block/blk-tag.c28
-rw-r--r--block/blk-timeout.c238
-rw-r--r--block/blk.h49
-rw-r--r--block/blktrace.c32
-rw-r--r--block/bsg.c55
-rw-r--r--block/cfq-iosched.c57
-rw-r--r--block/cmd-filter.c210
-rw-r--r--block/compat_ioctl.c176
-rw-r--r--block/deadline-iosched.c40
-rw-r--r--block/elevator.c56
-rw-r--r--block/genhd.c963
-rw-r--r--block/ioctl.c298
-rw-r--r--block/scsi_ioctl.c119
24 files changed, 2332 insertions, 1253 deletions
diff --git a/block/Makefile b/block/Makefile
index 208000b0750..bfe73049f93 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -4,8 +4,8 @@
obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \
- blk-exec.o blk-merge.o ioctl.o genhd.o scsi_ioctl.o \
- cmd-filter.o
+ blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
+ ioctl.o genhd.o scsi_ioctl.o cmd-filter.o
obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 9735acb5b4f..71f0abb219e 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -462,7 +462,7 @@ static void as_antic_stop(struct as_data *ad)
del_timer(&ad->antic_timer);
ad->antic_status = ANTIC_FINISHED;
/* see as_work_handler */
- kblockd_schedule_work(&ad->antic_work);
+ kblockd_schedule_work(ad->q, &ad->antic_work);
}
}
@@ -483,7 +483,7 @@ static void as_antic_timeout(unsigned long data)
aic = ad->io_context->aic;
ad->antic_status = ANTIC_FINISHED;
- kblockd_schedule_work(&ad->antic_work);
+ kblockd_schedule_work(q, &ad->antic_work);
if (aic->ttime_samples == 0) {
/* process anticipated on has exited or timed out*/
@@ -745,6 +745,14 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq)
*/
static int as_can_anticipate(struct as_data *ad, struct request *rq)
{
+#if 0 /* disable for now, we need to check tag level as well */
+ /*
+ * SSD device without seek penalty, disable idling
+ */
+ if (blk_queue_nonrot(ad->q)) axman
+ return 0;
+#endif
+
if (!ad->io_context)
/*
* Last request submitted was a write
@@ -837,15 +845,14 @@ static void as_completed_request(struct request_queue *q, struct request *rq)
WARN_ON(!list_empty(&rq->queuelist));
if (RQ_STATE(rq) != AS_RQ_REMOVED) {
- printk("rq->state %d\n", RQ_STATE(rq));
- WARN_ON(1);
+ WARN(1, "rq->state %d\n", RQ_STATE(rq));
goto out;
}
if (ad->changed_batch && ad->nr_dispatched == 1) {
ad->current_batch_expires = jiffies +
ad->batch_expire[ad->batch_data_dir];
- kblockd_schedule_work(&ad->antic_work);
+ kblockd_schedule_work(q, &ad->antic_work);
ad->changed_batch = 0;
if (ad->batch_data_dir == REQ_SYNC)
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index a09ead19f9c..5c99ff8d2db 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -293,7 +293,7 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
bio->bi_end_io = bio_end_empty_barrier;
bio->bi_private = &wait;
bio->bi_bdev = bdev;
- submit_bio(1 << BIO_RW_BARRIER, bio);
+ submit_bio(WRITE_BARRIER, bio);
wait_for_completion(&wait);
@@ -315,3 +315,73 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
return ret;
}
EXPORT_SYMBOL(blkdev_issue_flush);
+
+static void blkdev_discard_end_io(struct bio *bio, int err)
+{
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ }
+
+ bio_put(bio);
+}
+
+/**
+ * blkdev_issue_discard - queue a discard
+ * @bdev: blockdev to issue discard for
+ * @sector: start sector
+ * @nr_sects: number of sectors to discard
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ *
+ * Description:
+ * Issue a discard request for the sectors in question. Does not wait.
+ */
+int blkdev_issue_discard(struct block_device *bdev,
+ sector_t sector, sector_t nr_sects, gfp_t gfp_mask)
+{
+ struct request_queue *q;
+ struct bio *bio;
+ int ret = 0;
+
+ if (bdev->bd_disk == NULL)
+ return -ENXIO;
+
+ q = bdev_get_queue(bdev);
+ if (!q)
+ return -ENXIO;
+
+ if (!q->prepare_discard_fn)
+ return -EOPNOTSUPP;
+
+ while (nr_sects && !ret) {
+ bio = bio_alloc(gfp_mask, 0);
+ if (!bio)
+ return -ENOMEM;
+
+ bio->bi_end_io = blkdev_discard_end_io;
+ bio->bi_bdev = bdev;
+
+ bio->bi_sector = sector;
+
+ if (nr_sects > q->max_hw_sectors) {
+ bio->bi_size = q->max_hw_sectors << 9;
+ nr_sects -= q->max_hw_sectors;
+ sector += q->max_hw_sectors;
+ } else {
+ bio->bi_size = nr_sects << 9;
+ nr_sects = 0;
+ }
+ bio_get(bio);
+ submit_bio(DISCARD_BARRIER, bio);
+
+ /* Check if it failed immediately */
+ if (bio_flagged(bio, BIO_EOPNOTSUPP))
+ ret = -EOPNOTSUPP;
+ else if (!bio_flagged(bio, BIO_UPTODATE))
+ ret = -EIO;
+ bio_put(bio);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(blkdev_issue_discard);
diff --git a/block/blk-core.c b/block/blk-core.c
index fef79ccb2a1..c3df30cfb3f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -26,8 +26,6 @@
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/task_io_accounting_ops.h>
-#include <linux/interrupt.h>
-#include <linux/cpu.h>
#include <linux/blktrace_api.h>
#include <linux/fault-inject.h>
@@ -50,27 +48,26 @@ struct kmem_cache *blk_requestq_cachep;
*/
static struct workqueue_struct *kblockd_workqueue;
-static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
-
static void drive_stat_acct(struct request *rq, int new_io)
{
struct hd_struct *part;
int rw = rq_data_dir(rq);
+ int cpu;
if (!blk_fs_request(rq) || !rq->rq_disk)
return;
- part = get_part(rq->rq_disk, rq->sector);
+ cpu = part_stat_lock();
+ part = disk_map_sector_rcu(rq->rq_disk, rq->sector);
+
if (!new_io)
- __all_stat_inc(rq->rq_disk, part, merges[rw], rq->sector);
+ part_stat_inc(cpu, part, merges[rw]);
else {
- disk_round_stats(rq->rq_disk);
- rq->rq_disk->in_flight++;
- if (part) {
- part_round_stats(part);
- part->in_flight++;
- }
+ part_round_stats(cpu, part);
+ part_inc_in_flight(part);
}
+
+ part_stat_unlock();
}
void blk_queue_congestion_threshold(struct request_queue *q)
@@ -113,7 +110,8 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
memset(rq, 0, sizeof(*rq));
INIT_LIST_HEAD(&rq->queuelist);
- INIT_LIST_HEAD(&rq->donelist);
+ INIT_LIST_HEAD(&rq->timeout_list);
+ rq->cpu = -1;
rq->q = q;
rq->sector = rq->hard_sector = (sector_t) -1;
INIT_HLIST_NODE(&rq->hash);
@@ -212,6 +210,24 @@ void blk_plug_device(struct request_queue *q)
}
EXPORT_SYMBOL(blk_plug_device);
+/**
+ * blk_plug_device_unlocked - plug a device without queue lock held
+ * @q: The &struct request_queue to plug
+ *
+ * Description:
+ * Like @blk_plug_device(), but grabs the queue lock and disables
+ * interrupts.
+ **/
+void blk_plug_device_unlocked(struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_plug_device(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+EXPORT_SYMBOL(blk_plug_device_unlocked);
+
/*
* remove the queue from the plugged list, if present. called with
* queue lock held and interrupts disabled.
@@ -241,7 +257,6 @@ void __generic_unplug_device(struct request_queue *q)
q->request_fn(q);
}
-EXPORT_SYMBOL(__generic_unplug_device);
/**
* generic_unplug_device - fire a request queue
@@ -290,7 +305,7 @@ void blk_unplug_timeout(unsigned long data)
blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
q->rq.count[READ] + q->rq.count[WRITE]);
- kblockd_schedule_work(&q->unplug_work);
+ kblockd_schedule_work(q, &q->unplug_work);
}
void blk_unplug(struct request_queue *q)
@@ -307,6 +322,24 @@ void blk_unplug(struct request_queue *q)
}
EXPORT_SYMBOL(blk_unplug);
+static void blk_invoke_request_fn(struct request_queue *q)
+{
+ if (unlikely(blk_queue_stopped(q)))
+ return;
+
+ /*
+ * one level of recursion is ok and is much faster than kicking
+ * the unplug handling
+ */
+ if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
+ q->request_fn(q);
+ queue_flag_clear(QUEUE_FLAG_REENTER, q);
+ } else {
+ queue_flag_set(QUEUE_FLAG_PLUGGED, q);
+ kblockd_schedule_work(q, &q->unplug_work);
+ }
+}
+
/**
* blk_start_queue - restart a previously stopped queue
* @q: The &struct request_queue in question
@@ -321,18 +354,7 @@ void blk_start_queue(struct request_queue *q)
WARN_ON(!irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
-
- /*
- * one level of recursion is ok and is much faster than kicking
- * the unplug handling
- */
- if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
- q->request_fn(q);
- queue_flag_clear(QUEUE_FLAG_REENTER, q);
- } else {
- blk_plug_device(q);
- kblockd_schedule_work(&q->unplug_work);
- }
+ blk_invoke_request_fn(q);
}
EXPORT_SYMBOL(blk_start_queue);
@@ -379,8 +401,13 @@ void blk_sync_queue(struct request_queue *q)
EXPORT_SYMBOL(blk_sync_queue);
/**
- * blk_run_queue - run a single device queue
+ * __blk_run_queue - run a single device queue
* @q: The queue to run
+ *
+ * Description:
+ * See @blk_run_queue. This variant must be called with the queue lock
+ * held and interrupts disabled.
+ *
*/
void __blk_run_queue(struct request_queue *q)
{
@@ -390,21 +417,20 @@ void __blk_run_queue(struct request_queue *q)
* Only recurse once to avoid overrunning the stack, let the unplug
* handling reinvoke the handler shortly if we already got there.
*/
- if (!elv_queue_empty(q)) {
- if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
- q->request_fn(q);
- queue_flag_clear(QUEUE_FLAG_REENTER, q);
- } else {
- blk_plug_device(q);
- kblockd_schedule_work(&q->unplug_work);
- }
- }
+ if (!elv_queue_empty(q))
+ blk_invoke_request_fn(q);
}
EXPORT_SYMBOL(__blk_run_queue);
/**
* blk_run_queue - run a single device queue
* @q: The queue to run
+ *
+ * Description:
+ * Invoke request handling on this queue, if it has pending work to do.
+ * May be used to restart queueing when a request has completed. Also
+ * See @blk_start_queueing.
+ *
*/
void blk_run_queue(struct request_queue *q)
{
@@ -423,6 +449,14 @@ void blk_put_queue(struct request_queue *q)
void blk_cleanup_queue(struct request_queue *q)
{
+ /*
+ * We know we have process context here, so we can be a little
+ * cautious and ensure that pending block actions on this device
+ * are done before moving on. Going into this function, we should
+ * not have processes doing IO to this device.
+ */
+ blk_sync_queue(q);
+
mutex_lock(&q->sysfs_lock);
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
mutex_unlock(&q->sysfs_lock);
@@ -478,6 +512,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
}
init_timer(&q->unplug_timer);
+ setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
+ INIT_LIST_HEAD(&q->timeout_list);
+ INIT_WORK(&q->unplug_work, blk_unplug_work);
kobject_init(&q->kobj, &blk_queue_ktype);
@@ -513,7 +550,7 @@ EXPORT_SYMBOL(blk_alloc_queue_node);
* request queue; this lock will be taken also from interrupt context, so irq
* disabling is needed for it.
*
- * Function returns a pointer to the initialized request queue, or NULL if
+ * Function returns a pointer to the initialized request queue, or %NULL if
* it didn't succeed.
*
* Note:
@@ -551,7 +588,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
q->request_fn = rfn;
q->prep_rq_fn = NULL;
q->unplug_fn = generic_unplug_device;
- q->queue_flags = (1 << QUEUE_FLAG_CLUSTER);
+ q->queue_flags = (1 << QUEUE_FLAG_CLUSTER |
+ 1 << QUEUE_FLAG_STACKABLE);
q->queue_lock = lock;
blk_queue_segment_boundary(q, 0xffffffff);
@@ -564,6 +602,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
q->sg_reserved_size = INT_MAX;
+ blk_set_cmd_filter_defaults(&q->cmd_filter);
+
/*
* all done
*/
@@ -604,10 +644,6 @@ blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
blk_rq_init(q, rq);
- /*
- * first three bits are identical in rq->cmd_flags and bio->bi_rw,
- * see bio.h and blkdev.h
- */
rq->cmd_flags = rw | REQ_ALLOCED;
if (priv) {
@@ -862,15 +898,18 @@ EXPORT_SYMBOL(blk_get_request);
*
* This is basically a helper to remove the need to know whether a queue
* is plugged or not if someone just wants to initiate dispatch of requests
- * for this queue.
+ * for this queue. Should be used to start queueing on a device outside
+ * of ->request_fn() context. Also see @blk_run_queue.
*
* The queue lock must be held with interrupts disabled.
*/
void blk_start_queueing(struct request_queue *q)
{
- if (!blk_queue_plugged(q))
+ if (!blk_queue_plugged(q)) {
+ if (unlikely(blk_queue_stopped(q)))
+ return;
q->request_fn(q);
- else
+ } else
__generic_unplug_device(q);
}
EXPORT_SYMBOL(blk_start_queueing);
@@ -887,6 +926,8 @@ EXPORT_SYMBOL(blk_start_queueing);
*/
void blk_requeue_request(struct request_queue *q, struct request *rq)
{
+ blk_delete_timer(rq);
+ blk_clear_rq_complete(rq);
blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
if (blk_rq_tagged(rq))
@@ -897,7 +938,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
EXPORT_SYMBOL(blk_requeue_request);
/**
- * blk_insert_request - insert a special request in to a request queue
+ * blk_insert_request - insert a special request into a request queue
* @q: request queue where request should be inserted
* @rq: request to be inserted
* @at_head: insert request at head or tail of queue
@@ -907,8 +948,8 @@ EXPORT_SYMBOL(blk_requeue_request);
* Many block devices need to execute commands asynchronously, so they don't
* block the whole kernel from preemption during request execution. This is
* accomplished normally by inserting aritficial requests tagged as
- * REQ_SPECIAL in to the corresponding request queue, and letting them be
- * scheduled for actual execution by the request queue.
+ * REQ_TYPE_SPECIAL in to the corresponding request queue, and letting them
+ * be scheduled for actual execution by the request queue.
*
* We have the option of inserting the head or the tail of the queue.
* Typically we use the tail for new ioctls and so forth. We use the head
@@ -962,9 +1003,24 @@ static inline void add_request(struct request_queue *q, struct request *req)
__elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
}
-/*
- * disk_round_stats() - Round off the performance stats on a struct
- * disk_stats.
+static void part_round_stats_single(int cpu, struct hd_struct *part,
+ unsigned long now)
+{
+ if (now == part->stamp)
+ return;
+
+ if (part->in_flight) {
+ __part_stat_add(cpu, part, time_in_queue,
+ part->in_flight * (now - part->stamp));
+ __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
+ }
+ part->stamp = now;
+}
+
+/**
+ * part_round_stats() - Round off the performance stats on a struct disk_stats.
+ * @cpu: cpu number for stats access
+ * @part: target partition
*
* The average IO queue length and utilisation statistics are maintained
* by observing the current state of the queue length and the amount of
@@ -977,36 +1033,15 @@ static inline void add_request(struct request_queue *q, struct request *req)
* /proc/diskstats. This accounts immediately for all queue usage up to
* the current jiffies and restarts the counters again.
*/
-void disk_round_stats(struct gendisk *disk)
-{
- unsigned long now = jiffies;
-
- if (now == disk->stamp)
- return;
-
- if (disk->in_flight) {
- __disk_stat_add(disk, time_in_queue,
- disk->in_flight * (now - disk->stamp));
- __disk_stat_add(disk, io_ticks, (now - disk->stamp));
- }
- disk->stamp = now;
-}
-EXPORT_SYMBOL_GPL(disk_round_stats);
-
-void part_round_stats(struct hd_struct *part)
+void part_round_stats(int cpu, struct hd_struct *part)
{
unsigned long now = jiffies;
- if (now == part->stamp)
- return;
-
- if (part->in_flight) {
- __part_stat_add(part, time_in_queue,
- part->in_flight * (now - part->stamp));
- __part_stat_add(part, io_ticks, (now - part->stamp));
- }
- part->stamp = now;
+ if (part->partno)
+ part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
+ part_round_stats_single(cpu, part, now);
}
+EXPORT_SYMBOL_GPL(part_round_stats);
/*
* queue lock must be held
@@ -1050,18 +1085,31 @@ EXPORT_SYMBOL(blk_put_request);
void init_request_from_bio(struct request *req, struct bio *bio)
{
+ req->cpu = bio->bi_comp_cpu;
req->cmd_type = REQ_TYPE_FS;
/*
* inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
*/
- if (bio_rw_ahead(bio) || bio_failfast(bio))
- req->cmd_flags |= REQ_FAILFAST;
+ if (bio_rw_ahead(bio))
+ req->cmd_flags |= (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER);
+ if (bio_failfast_dev(bio))
+ req->cmd_flags |= REQ_FAILFAST_DEV;
+ if (bio_failfast_transport(bio))
+ req->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+ if (bio_failfast_driver(bio))
+ req->cmd_flags |= REQ_FAILFAST_DRIVER;
/*
* REQ_BARRIER implies no merging, but lets make it explicit
*/
- if (unlikely(bio_barrier(bio)))
+ if (unlikely(bio_discard(bio))) {
+ req->cmd_flags |= REQ_DISCARD;
+ if (bio_barrier(bio))
+ req->cmd_flags |= REQ_SOFTBARRIER;
+ req->q->prepare_discard_fn(req->q, req);
+ } else if (unlikely(bio_barrier(bio)))
req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
if (bio_sync(bio))
@@ -1079,7 +1127,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
static int __make_request(struct request_queue *q, struct bio *bio)
{
struct request *req;
- int el_ret, nr_sectors, barrier, err;
+ int el_ret, nr_sectors, barrier, discard, err;
const unsigned short prio = bio_prio(bio);
const int sync = bio_sync(bio);
int rw_flags;
@@ -1094,7 +1142,14 @@ static int __make_request(struct request_queue *q, struct bio *bio)
blk_queue_bounce(q, &bio);
barrier = bio_barrier(bio);
- if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
+ if (unlikely(barrier) && bio_has_data(bio) &&
+ (q->next_ordered == QUEUE_ORDERED_NONE)) {
+ err = -EOPNOTSUPP;
+ goto end_io;
+ }
+
+ discard = bio_discard(bio);
+ if (unlikely(discard) && !q->prepare_discard_fn) {
err = -EOPNOTSUPP;
goto end_io;
}
@@ -1118,6 +1173,8 @@ static int __make_request(struct request_queue *q, struct bio *bio)
req->biotail = bio;
req->nr_sectors = req->hard_nr_sectors += nr_sectors;
req->ioprio = ioprio_best(req->ioprio, prio);
+ if (!blk_rq_cpu_valid(req))
+ req->cpu = bio->bi_comp_cpu;
drive_stat_acct(req, 0);
if (!attempt_back_merge(q, req))
elv_merged_request(q, req, el_ret);
@@ -1145,6 +1202,8 @@ static int __make_request(struct request_queue *q, struct bio *bio)
req->sector = req->hard_sector = bio->bi_sector;
req->nr_sectors = req->hard_nr_sectors += nr_sectors;
req->ioprio = ioprio_best(req->ioprio, prio);
+ if (!blk_rq_cpu_valid(req))
+ req->cpu = bio->bi_comp_cpu;
drive_stat_acct(req, 0);
if (!attempt_front_merge(q, req))
elv_merged_request(q, req, el_ret);
@@ -1180,13 +1239,15 @@ get_rq:
init_request_from_bio(req, bio);
spin_lock_irq(q->queue_lock);
+ if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
+ bio_flagged(bio, BIO_CPU_AFFINE))
+ req->cpu = blk_cpu_to_group(smp_processor_id());
if (elv_queue_empty(q))
blk_plug_device(q);
add_request(q, req);
out:
if (sync)
__generic_unplug_device(q);
-
spin_unlock_irq(q->queue_lock);
return 0;
@@ -1240,8 +1301,9 @@ __setup("fail_make_request=", setup_fail_make_request);
static int should_fail_request(struct bio *bio)
{
- if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) ||
- (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail))
+ struct hd_struct *part = bio->bi_bdev->bd_part;
+
+ if (part_to_disk(part)->part0.make_it_fail || part->make_it_fail)
return should_fail(&fail_make_request, bio->bi_size);
return 0;
@@ -1294,7 +1356,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
}
/**
- * generic_make_request: hand a buffer to its device driver for I/O
+ * generic_make_request - hand a buffer to its device driver for I/O
* @bio: The bio describing the location in memory and on the device.
*
* generic_make_request() is used to make I/O requests of block
@@ -1389,7 +1451,8 @@ end_io:
if (bio_check_eod(bio, nr_sectors))
goto end_io;
- if (bio_empty_barrier(bio) && !q->prepare_flush_fn) {
+ if ((bio_empty_barrier(bio) && !q->prepare_flush_fn) ||
+ (bio_discard(bio) && !q->prepare_discard_fn)) {
err = -EOPNOTSUPP;
goto end_io;
}
@@ -1451,13 +1514,13 @@ void generic_make_request(struct bio *bio)
EXPORT_SYMBOL(generic_make_request);
/**
- * submit_bio: submit a bio to the block device layer for I/O
+ * submit_bio - submit a bio to the block device layer for I/O
* @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
* @bio: The &struct bio which describes the I/O
*
* submit_bio() is very similar in purpose to generic_make_request(), and
* uses that function to do most of the work. Both are fairly rough
- * interfaces, @bio must be presetup and ready for I/O.
+ * interfaces; @bio must be presetup and ready for I/O.
*
*/
void submit_bio(int rw, struct bio *bio)
@@ -1470,11 +1533,7 @@ void submit_bio(int rw, struct bio *bio)
* If it's a regular read/write or a barrier with data attached,
* go through the normal accounting stuff before submission.
*/
- if (!bio_empty_barrier(bio)) {
-
- BIO_BUG_ON(!bio->bi_size);
- BIO_BUG_ON(!bio->bi_io_vec);
-
+ if (bio_has_data(bio)) {
if (rw & WRITE) {
count_vm_events(PGPGOUT, count);
} else {
@@ -1497,9 +1556,90 @@ void submit_bio(int rw, struct bio *bio)
EXPORT_SYMBOL(submit_bio);
/**
+ * blk_rq_check_limits - Helper function to check a request for the queue limit
+ * @q: the queue
+ * @rq: the request being checked
+ *
+ * Description:
+ * @rq may have been made based on weaker limitations of upper-level queues
+ * in request stacking drivers, and it may violate the limitation of @q.
+ * Since the block layer and the underlying device driver trust @rq
+ * after it is inserted to @q, it should be checked against @q before
+ * the insertion using this generic function.
+ *
+ * This function should also be useful for request stacking drivers
+ * in some cases below, so export this fuction.
+ * Request stacking drivers like request-based dm may change the queue
+ * limits while requests are in the queue (e.g. dm's table swapping).
+ * Such request stacking drivers should check those requests agaist
+ * the new queue limits again when they dispatch those requests,
+ * although such checkings are also done against the old queue limits
+ * when submitting requests.
+ */
+int blk_rq_check_limits(struct request_queue *q, struct request *rq)
+{
+ if (rq->nr_sectors > q->max_sectors ||
+ rq->data_len > q->max_hw_sectors << 9) {
+ printk(KERN_ERR "%s: over max size limit.\n", __func__);
+ return -EIO;
+ }
+
+ /*
+ * queue's settings related to segment counting like q->bounce_pfn
+ * may differ from that of other stacking queues.
+ * Recalculate it to check the request correctly on this queue's
+ * limitation.
+ */
+ blk_recalc_rq_segments(rq);
+ if (rq->nr_phys_segments > q->max_phys_segments ||
+ rq->nr_phys_segments > q->max_hw_segments) {
+ printk(KERN_ERR "%s: over max segments limit.\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blk_rq_check_limits);
+
+/**
+ * blk_insert_cloned_request - Helper for stacking drivers to submit a request
+ * @q: the queue to submit the request
+ * @rq: the request being queued
+ */
+int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
+{
+ unsigned long flags;
+
+ if (blk_rq_check_limits(q, rq))
+ return -EIO;
+
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+ if (rq->rq_disk && rq->rq_disk->part0.make_it_fail &&
+ should_fail(&fail_make_request, blk_rq_bytes(rq)))
+ return -EIO;
+#endif
+
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ /*
+ * Submitting request must be dequeued before calling this function
+ * because it will be linked to another request_queue
+ */
+ BUG_ON(blk_queued_rq(rq));
+
+ drive_stat_acct(rq, 1);
+ __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
+
+/**
* __end_that_request_first - end I/O on a request
* @req: the request being processed
- * @error: 0 for success, < 0 for error
+ * @error: %0 for success, < %0 for error
* @nr_bytes: number of bytes to complete
*
* Description:
@@ -1507,8 +1647,8 @@ EXPORT_SYMBOL(submit_bio);
* for the next range of segments (if any) in the cluster.
*
* Return:
- * 0 - we are done with this request, call end_that_request_last()
- * 1 - still buffers pending for this request
+ * %0 - we are done with this request, call end_that_request_last()
+ * %1 - still buffers pending for this request
**/
static int __end_that_request_first(struct request *req, int error,
int nr_bytes)
@@ -1519,7 +1659,7 @@ static int __end_that_request_first(struct request *req, int error,
blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
/*
- * for a REQ_BLOCK_PC request, we want to carry any eventual
+ * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual
* sense key with us all the way through
*/
if (!blk_pc_request(req))
@@ -1532,11 +1672,14 @@ static int __end_that_request_first(struct request *req, int error,
}
if (blk_fs_request(req) && req->rq_disk) {
- struct hd_struct *part = get_part(req->rq_disk, req->sector);
const int rw = rq_data_dir(req);
+ struct hd_struct *part;
+ int cpu;
- all_stat_add(req->rq_disk, part, sectors[rw],
- nr_bytes >> 9, req->sector);
+ cpu = part_stat_lock();
+ part = disk_map_sector_rcu(req->rq_disk, req->sector);
+ part_stat_add(cpu, part, sectors[rw], nr_bytes >> 9);
+ part_stat_unlock();
}
total_bytes = bio_nbytes = 0;
@@ -1621,88 +1764,14 @@ static int __end_that_request_first(struct request *req, int error,
}
/*
- * splice the completion data to a local structure and hand off to
- * process_completion_queue() to complete the requests
- */
-static void blk_done_softirq(struct softirq_action *h)
-{
- struct list_head *cpu_list, local_list;
-
- local_irq_disable();
- cpu_list = &__get_cpu_var(blk_cpu_done);
- list_replace_init(cpu_list, &local_list);
- local_irq_enable();
-
- while (!list_empty(&local_list)) {
- struct request *rq;
-
- rq = list_entry(local_list.next, struct request, donelist);
- list_del_init(&rq->donelist);
- rq->q->softirq_done_fn(rq);
- }
-}
-
-static int __cpuinit blk_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- /*
- * If a CPU goes away, splice its entries to the current CPU
- * and trigger a run of the softirq
- */
- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
- int cpu = (unsigned long) hcpu;
-
- local_irq_disable();
- list_splice_init(&per_cpu(blk_cpu_done, cpu),
- &__get_cpu_var(blk_cpu_done));
- raise_softirq_irqoff(BLOCK_SOFTIRQ);
- local_irq_enable();
- }
-
- return NOTIFY_OK;
-}
-
-
-static struct notifier_block blk_cpu_notifier __cpuinitdata = {
- .notifier_call = blk_cpu_notify,
-};
-
-/**
- * blk_complete_request - end I/O on a request
- * @req: the request being processed
- *
- * Description:
- * Ends all I/O on a request. It does not handle partial completions,
- * unless the driver actually implements this in its completion callback
- * through requeueing. The actual completion happens out-of-order,
- * through a softirq handler. The user must have registered a completion
- * callback through blk_queue_softirq_done().
- **/
-
-void blk_complete_request(struct request *req)
-{
- struct list_head *cpu_list;
- unsigned long flags;
-
- BUG_ON(!req->q->softirq_done_fn);
-
- local_irq_save(flags);
-
- cpu_list = &__get_cpu_var(blk_cpu_done);
- list_add_tail(&req->donelist, cpu_list);
- raise_softirq_irqoff(BLOCK_SOFTIRQ);
-
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL(blk_complete_request);
-
-/*
* queue lock must be held
*/
static void end_that_request_last(struct request *req, int error)
{
struct gendisk *disk = req->rq_disk;
+ blk_delete_timer(req);
+
if (blk_rq_tagged(req))
blk_queue_end_tag(req->q, req);
@@ -1720,16 +1789,18 @@ static void end_that_request_last(struct request *req, int error)
if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
unsigned long duration = jiffies - req->start_time;
const int rw = rq_data_dir(req);
- struct hd_struct *part = get_part(disk, req->sector);
-
- __all_stat_inc(disk, part, ios[rw], req->sector);
- __all_stat_add(disk, part, ticks[rw], duration, req->sector);
- disk_round_stats(disk);
- disk->in_flight--;
- if (part) {
- part_round_stats(part);
- part->in_flight--;
- }
+ struct hd_struct *part;
+ int cpu;
+
+ cpu = part_stat_lock();
+ part = disk_map_sector_rcu(disk, req->sector);
+
+ part_stat_inc(cpu, part, ios[rw]);
+ part_stat_add(cpu, part, ticks[rw], duration);
+ part_round_stats(cpu, part);
+ part_dec_in_flight(part);
+
+ part_stat_unlock();
}
if (req->end_io)
@@ -1742,17 +1813,6 @@ static void end_that_request_last(struct request *req, int error)
}
}
-static inline void __end_request(struct request *rq, int uptodate,
- unsigned int nr_bytes)
-{
- int error = 0;
-
- if (uptodate <= 0)
- error = uptodate ? uptodate : -EIO;
-
- __blk_end_request(rq, error, nr_bytes);
-}
-
/**
* blk_rq_bytes - Returns bytes left to complete in the entire request
* @rq: the request being processed
@@ -1783,74 +1843,57 @@ unsigned int blk_rq_cur_bytes(struct request *rq)
EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
/**
- * end_queued_request - end all I/O on a queued request
- * @rq: the request being processed
- * @uptodate: error value or 0/1 uptodate flag
- *
- * Description:
- * Ends all I/O on a request, and removes it from the block layer queues.
- * Not suitable for normal IO completion, unless the driver still has
- * the request attached to the block layer.
- *
- **/
-void end_queued_request(struct request *rq, int uptodate)
-{
- __end_request(rq, uptodate, blk_rq_bytes(rq));
-}
-EXPORT_SYMBOL(end_queued_request);
-
-/**
- * end_dequeued_request - end all I/O on a dequeued request
- * @rq: the request being processed
- * @uptodate: error value or 0/1 uptodate flag
- *
- * Description:
- * Ends all I/O on a request. The request must already have been
- * dequeued using blkdev_dequeue_request(), as is normally the case
- * for most drivers.
- *
- **/
-void end_dequeued_request(struct request *rq, int uptodate)
-{
- __end_request(rq, uptodate, blk_rq_bytes(rq));
-}
-EXPORT_SYMBOL(end_dequeued_request);
-
-
-/**
* end_request - end I/O on the current segment of the request
* @req: the request being processed
- * @uptodate: error value or 0/1 uptodate flag
+ * @uptodate: error value or %0/%1 uptodate flag
*
* Description:
* Ends I/O on the current segment of a request. If that is the only
* remaining segment, the request is also completed and freed.
*
- * This is a remnant of how older block drivers handled IO completions.
- * Modern drivers typically end IO on the full request in one go, unless
+ * This is a remnant of how older block drivers handled I/O completions.
+ * Modern drivers typically end I/O on the full request in one go, unless
* they have a residual value to account for. For that case this function
* isn't really useful, unless the residual just happens to be the
* full current segment. In other words, don't use this function in new
- * code. Either use end_request_completely(), or the
- * end_that_request_chunk() (along with end_that_request_last()) for
- * partial completions.
- *
+ * code. Use blk_end_request() or __blk_end_request() to end a request.
**/
void end_request(struct request *req, int uptodate)
{
- __end_request(req, uptodate, req->hard_cur_sectors << 9);
+ int error = 0;
+
+ if (uptodate <= 0)
+ error = uptodate ? uptodate : -EIO;
+
+ __blk_end_request(req, error, req->hard_cur_sectors << 9);
}
EXPORT_SYMBOL(end_request);
+static int end_that_request_data(struct request *rq, int error,
+ unsigned int nr_bytes, unsigned int bidi_bytes)
+{
+ if (rq->bio) {
+ if (__end_that_request_first(rq, error, nr_bytes))
+ return 1;
+
+ /* Bidi request must be completed as a whole */
+ if (blk_bidi_rq(rq) &&
+ __end_that_request_first(rq->next_rq, error, bidi_bytes))
+ return 1;
+ }
+
+ return 0;
+}
+
/**
* blk_end_io - Generic end_io function to complete a request.
* @rq: the request being processed
- * @error: 0 for success, < 0 for error
+ * @error: %0 for success, < %0 for error
* @nr_bytes: number of bytes to complete @rq
* @bidi_bytes: number of bytes to complete @rq->next_rq
* @drv_callback: function called between completion of bios in the request
* and completion of the request.
- * If the callback returns non 0, this helper returns without
+ * If the callback returns non %0, this helper returns without
* completion of the request.
*
* Description:
@@ -1858,8 +1901,8 @@ EXPORT_SYMBOL(end_request);
* If @rq has leftover, sets it up for the next range of segments.
*
* Return:
- * 0 - we are done with this request
- * 1 - this request is not freed yet, it still has pending buffers.
+ * %0 - we are done with this request
+ * %1 - this request is not freed yet, it still has pending buffers.
**/
static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
unsigned int bidi_bytes,
@@ -1868,15 +1911,8 @@ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
struct request_queue *q = rq->q;
unsigned long flags = 0UL;
- if (blk_fs_request(rq) || blk_pc_request(rq)) {
- if (__end_that_request_first(rq, error, nr_bytes))
- return 1;
-
- /* Bidi request must be completed as a whole */
- if (blk_bidi_rq(rq) &&
- __end_that_request_first(rq->next_rq, error, bidi_bytes))
- return 1;
- }
+ if (end_that_request_data(rq, error, nr_bytes, bidi_bytes))
+ return 1;
/* Special feature for tricky drivers */
if (drv_callback && drv_callback(rq))
@@ -1894,7 +1930,7 @@ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
/**
* blk_end_request - Helper function for drivers to complete the request.
* @rq: the request being processed
- * @error: 0 for success, < 0 for error
+ * @error: %0 for success, < %0 for error
* @nr_bytes: number of bytes to complete
*
* Description:
@@ -1902,8 +1938,8 @@ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
* If @rq has leftover, sets it up for the next range of segments.
*
* Return:
- * 0 - we are done with this request
- * 1 - still buffers pending for this request
+ * %0 - we are done with this request
+ * %1 - still buffers pending for this request
**/
int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
{
@@ -1914,22 +1950,20 @@ EXPORT_SYMBOL_GPL(blk_end_request);
/**
* __blk_end_request - Helper function for drivers to complete the request.
* @rq: the request being processed
- * @error: 0 for success, < 0 for error
+ * @error: %0 for success, < %0 for error
* @nr_bytes: number of bytes to complete
*
* Description:
* Must be called with queue lock held unlike blk_end_request().
*
* Return:
- * 0 - we are done with this request
- * 1 - still buffers pending for this request
+ * %0 - we are done with this request
+ * %1 - still buffers pending for this request
**/
int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
{
- if (blk_fs_request(rq) || blk_pc_request(rq)) {
- if (__end_that_request_first(rq, error, nr_bytes))
- return 1;
- }
+ if (rq->bio && __end_that_request_first(rq, error, nr_bytes))
+ return 1;
add_disk_randomness(rq->rq_disk);
@@ -1942,7 +1976,7 @@ EXPORT_SYMBOL_GPL(__blk_end_request);
/**
* blk_end_bidi_request - Helper function for drivers to complete bidi request.
* @rq: the bidi request being processed
- * @error: 0 for success, < 0 for error
+ * @error: %0 for success, < %0 for error
* @nr_bytes: number of bytes to complete @rq
* @bidi_bytes: number of bytes to complete @rq->next_rq
*
@@ -1950,8 +1984,8 @@ EXPORT_SYMBOL_GPL(__blk_end_request);
* Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
*
* Return:
- * 0 - we are done with this request
- * 1 - still buffers pending for this request
+ * %0 - we are done with this request
+ * %1 - still buffers pending for this request
**/
int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
unsigned int bidi_bytes)
@@ -1961,13 +1995,43 @@ int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
EXPORT_SYMBOL_GPL(blk_end_bidi_request);
/**
+ * blk_update_request - Special helper function for request stacking drivers
+ * @rq: the request being processed
+ * @error: %0 for success, < %0 for error
+ * @nr_bytes: number of bytes to complete @rq
+ *
+ * Description:
+ * Ends I/O on a number of bytes attached to @rq, but doesn't complete
+ * the request structure even if @rq doesn't have leftover.
+ * If @rq has leftover, sets it up for the next range of segments.
+ *
+ * This special helper function is only for request stacking drivers
+ * (e.g. request-based dm) so that they can handle partial completion.
+ * Actual device drivers should use blk_end_request instead.
+ */
+void blk_update_request(struct request *rq, int error, unsigned int nr_bytes)
+{
+ if (!end_that_request_data(rq, error, nr_bytes, 0)) {
+ /*
+ * These members are not updated in end_that_request_data()
+ * when all bios are completed.
+ * Update them so that the request stacking driver can find
+ * how many bytes remain in the request later.
+ */
+ rq->nr_sectors = rq->hard_nr_sectors = 0;
+ rq->current_nr_sectors = rq->hard_cur_sectors = 0;
+ }
+}
+EXPORT_SYMBOL_GPL(blk_update_request);
+
+/**
* blk_end_request_callback - Special helper function for tricky drivers
* @rq: the request being processed
- * @error: 0 for success, < 0 for error
+ * @error: %0 for success, < %0 for error
* @nr_bytes: number of bytes to complete
* @drv_callback: function called between completion of bios in the request
* and completion of the request.
- * If the callback returns non 0, this helper returns without
+ * If the callback returns non %0, this helper returns without
* completion of the request.
*
* Description:
@@ -1980,10 +2044,10 @@ EXPORT_SYMBOL_GPL(blk_end_bidi_request);
* Don't use this interface in other places anymore.
*
* Return:
- * 0 - we are done with this request
- * 1 - this request is not freed yet.
- * this request still has pending buffers or
- * the driver doesn't want to finish this request yet.
+ * %0 - we are done with this request
+ * %1 - this request is not freed yet.
+ * this request still has pending buffers or
+ * the driver doesn't want to finish this request yet.
**/
int blk_end_request_callback(struct request *rq, int error,
unsigned int nr_bytes,
@@ -1996,15 +2060,17 @@ EXPORT_SYMBOL_GPL(blk_end_request_callback);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio)
{
- /* first two bits are identical in rq->cmd_flags and bio->bi_rw */
+ /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw, and
+ we want BIO_RW_AHEAD (bit 1) to imply REQ_FAILFAST (bit 1). */
rq->cmd_flags |= (bio->bi_rw & 3);
- rq->nr_phys_segments = bio_phys_segments(q, bio);
- rq->nr_hw_segments = bio_hw_segments(q, bio);
+ if (bio_has_data(bio)) {
+ rq->nr_phys_segments = bio_phys_segments(q, bio);
+ rq->buffer = bio_data(bio);
+ }
rq->current_nr_sectors = bio_cur_sectors(bio);
rq->hard_cur_sectors = rq->current_nr_sectors;
rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
- rq->buffer = bio_data(bio);
rq->data_len = bio->bi_size;
rq->bio = rq->biotail = bio;
@@ -2013,7 +2079,35 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
rq->rq_disk = bio->bi_bdev->bd_disk;
}
-int kblockd_schedule_work(struct work_struct *work)
+/**
+ * blk_lld_busy - Check if underlying low-level drivers of a device are busy
+ * @q : the queue of the device being checked
+ *
+ * Description:
+ * Check if underlying low-level drivers of a device are busy.
+ * If the drivers want to export their busy state, they must set own
+ * exporting function using blk_queue_lld_busy() first.
+ *
+ * Basically, this function is used only by request stacking drivers
+ * to stop dispatching requests to underlying devices when underlying
+ * devices are busy. This behavior helps more I/O merging on the queue
+ * of the request stacking driver and prevents I/O throughput regression
+ * on burst I/O load.
+ *
+ * Return:
+ * 0 - Not busy (The request stacking driver should dispatch request)
+ * 1 - Busy (The request stacking driver should stop dispatching request)
+ */
+int blk_lld_busy(struct request_queue *q)
+{
+ if (q->lld_busy_fn)
+ return q->lld_busy_fn(q);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blk_lld_busy);
+
+int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
{
return queue_work(kblockd_workqueue, work);
}
@@ -2027,8 +2121,6 @@ EXPORT_SYMBOL(kblockd_flush_work);
int __init blk_dev_init(void)
{
- int i;
-
kblockd_workqueue = create_workqueue("kblockd");
if (!kblockd_workqueue)
panic("Failed to create kblockd\n");
@@ -2039,12 +2131,6 @@ int __init blk_dev_init(void)
blk_requestq_cachep = kmem_cache_create("blkdev_queue",
sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
- for_each_possible_cpu(i)
- INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
-
- open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
- register_hotcpu_notifier(&blk_cpu_notifier);
-
return 0;
}
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 9bceff7674f..6af716d1e54 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -16,7 +16,7 @@
/**
* blk_end_sync_rq - executes a completion event on a request
* @rq: request to complete
- * @error: end io status of the request
+ * @error: end I/O status of the request
*/
static void blk_end_sync_rq(struct request *rq, int error)
{
@@ -41,7 +41,7 @@ static void blk_end_sync_rq(struct request *rq, int error)
* @done: I/O completion handler
*
* Description:
- * Insert a fully prepared request at the back of the io scheduler queue
+ * Insert a fully prepared request at the back of the I/O scheduler queue
* for execution. Don't wait for completion.
*/
void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
@@ -72,7 +72,7 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
* @at_head: insert request at head or tail of queue
*
* Description:
- * Insert a fully prepared request at the back of the io scheduler queue
+ * Insert a fully prepared request at the back of the I/O scheduler queue
* for execution and wait for completion.
*/
int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 3f1a8478cc3..61a8e2f8fdd 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -108,51 +108,51 @@ new_segment:
EXPORT_SYMBOL(blk_rq_map_integrity_sg);
/**
- * blk_integrity_compare - Compare integrity profile of two block devices
- * @b1: Device to compare
- * @b2: Device to compare
+ * blk_integrity_compare - Compare integrity profile of two disks
+ * @gd1: Disk to compare
+ * @gd2: Disk to compare
*
* Description: Meta-devices like DM and MD need to verify that all
* sub-devices use the same integrity format before advertising to
* upper layers that they can send/receive integrity metadata. This
- * function can be used to check whether two block devices have
+ * function can be used to check whether two gendisk devices have
* compatible integrity formats.
*/
-int blk_integrity_compare(struct block_device *bd1, struct block_device *bd2)
+int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2)
{
- struct blk_integrity *b1 = bd1->bd_disk->integrity;
- struct blk_integrity *b2 = bd2->bd_disk->integrity;
+ struct blk_integrity *b1 = gd1->integrity;
+ struct blk_integrity *b2 = gd2->integrity;
- BUG_ON(bd1->bd_disk == NULL);
- BUG_ON(bd2->bd_disk == NULL);
+ if (!b1 && !b2)
+ return 0;
if (!b1 || !b2)
- return 0;
+ return -1;
if (b1->sector_size != b2->sector_size) {
printk(KERN_ERR "%s: %s/%s sector sz %u != %u\n", __func__,
- bd1->bd_disk->disk_name, bd2->bd_disk->disk_name,
+ gd1->disk_name, gd2->disk_name,
b1->sector_size, b2->sector_size);
return -1;
}
if (b1->tuple_size != b2->tuple_size) {
printk(KERN_ERR "%s: %s/%s tuple sz %u != %u\n", __func__,
- bd1->bd_disk->disk_name, bd2->bd_disk->disk_name,
+ gd1->disk_name, gd2->disk_name,
b1->tuple_size, b2->tuple_size);
return -1;
}
if (b1->tag_size && b2->tag_size && (b1->tag_size != b2->tag_size)) {
printk(KERN_ERR "%s: %s/%s tag sz %u != %u\n", __func__,
- bd1->bd_disk->disk_name, bd2->bd_disk->disk_name,
+ gd1->disk_name, gd2->disk_name,
b1->tag_size, b2->tag_size);
return -1;
}
if (strcmp(b1->name, b2->name)) {
printk(KERN_ERR "%s: %s/%s type %s != %s\n", __func__,
- bd1->bd_disk->disk_name, bd2->bd_disk->disk_name,
+ gd1->disk_name, gd2->disk_name,
b1->name, b2->name);
return -1;
}
@@ -331,7 +331,8 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
return -1;
if (kobject_init_and_add(&bi->kobj, &integrity_ktype,
- &disk->dev.kobj, "%s", "integrity")) {
+ &disk_to_dev(disk)->kobj,
+ "%s", "integrity")) {
kmem_cache_free(integrity_cachep, bi);
return -1;
}
@@ -375,7 +376,7 @@ void blk_integrity_unregister(struct gendisk *disk)
kobject_uevent(&bi->kobj, KOBJ_REMOVE);
kobject_del(&bi->kobj);
- kobject_put(&disk->dev.kobj);
kmem_cache_free(integrity_cachep, bi);
+ disk->integrity = NULL;
}
EXPORT_SYMBOL(blk_integrity_unregister);
diff --git a/block/blk-map.c b/block/blk-map.c
index ddd96fb11a7..4849fa36161 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -41,10 +41,10 @@ static int __blk_rq_unmap_user(struct bio *bio)
}
static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
- void __user *ubuf, unsigned int len)
+ struct rq_map_data *map_data, void __user *ubuf,
+ unsigned int len, int null_mapped, gfp_t gfp_mask)
{
unsigned long uaddr;
- unsigned int alignment;
struct bio *bio, *orig_bio;
int reading, ret;
@@ -55,15 +55,17 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
* direct dma. else, set up kernel bounce buffers
*/
uaddr = (unsigned long) ubuf;
- alignment = queue_dma_alignment(q) | q->dma_pad_mask;
- if (!(uaddr & alignment) && !(len & alignment))
- bio = bio_map_user(q, NULL, uaddr, len, reading);
+ if (blk_rq_aligned(q, ubuf, len) && !map_data)
+ bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
else
- bio = bio_copy_user(q, uaddr, len, reading);
+ bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
if (IS_ERR(bio))
return PTR_ERR(bio);
+ if (null_mapped)
+ bio->bi_flags |= (1 << BIO_NULL_MAPPED);
+
orig_bio = bio;
blk_queue_bounce(q, &bio);
@@ -85,17 +87,19 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
}
/**
- * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
+ * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
* @q: request queue where request should be inserted
* @rq: request structure to fill
+ * @map_data: pointer to the rq_map_data holding pages (if necessary)
* @ubuf: the user buffer
* @len: length of user data
+ * @gfp_mask: memory allocation flags
*
* Description:
- * Data will be mapped directly for zero copy io, if possible. Otherwise
+ * Data will be mapped directly for zero copy I/O, if possible. Otherwise
* a kernel bounce buffer is used.
*
- * A matching blk_rq_unmap_user() must be issued at the end of io, while
+ * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
* still in process context.
*
* Note: The mapped bio may need to be bounced through blk_queue_bounce()
@@ -105,16 +109,22 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
* unmapping.
*/
int blk_rq_map_user(struct request_queue *q, struct request *rq,
- void __user *ubuf, unsigned long len)
+ struct rq_map_data *map_data, void __user *ubuf,
+ unsigned long len, gfp_t gfp_mask)
{
unsigned long bytes_read = 0;
struct bio *bio = NULL;
- int ret;
+ int ret, null_mapped = 0;
if (len > (q->max_hw_sectors << 9))
return -EINVAL;
- if (!len || !ubuf)
+ if (!len)
return -EINVAL;
+ if (!ubuf) {
+ if (!map_data || rq_data_dir(rq) != READ)
+ return -EINVAL;
+ null_mapped = 1;
+ }
while (bytes_read != len) {
unsigned long map_len, end, start;
@@ -132,7 +142,8 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
if (end - start > BIO_MAX_PAGES)
map_len -= PAGE_SIZE;
- ret = __blk_rq_map_user(q, rq, ubuf, map_len);
+ ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
+ null_mapped, gfp_mask);
if (ret < 0)
goto unmap_rq;
if (!bio)
@@ -154,18 +165,20 @@ unmap_rq:
EXPORT_SYMBOL(blk_rq_map_user);
/**
- * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
+ * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
* @q: request queue where request should be inserted
* @rq: request to map data to
+ * @map_data: pointer to the rq_map_data holding pages (if necessary)
* @iov: pointer to the iovec
* @iov_count: number of elements in the iovec
* @len: I/O byte count
+ * @gfp_mask: memory allocation flags
*
* Description:
- * Data will be mapped directly for zero copy io, if possible. Otherwise
+ * Data will be mapped directly for zero copy I/O, if possible. Otherwise
* a kernel bounce buffer is used.
*
- * A matching blk_rq_unmap_user() must be issued at the end of io, while
+ * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
* still in process context.
*
* Note: The mapped bio may need to be bounced through blk_queue_bounce()
@@ -175,7 +188,8 @@ EXPORT_SYMBOL(blk_rq_map_user);
* unmapping.
*/
int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
- struct sg_iovec *iov, int iov_count, unsigned int len)
+ struct rq_map_data *map_data, struct sg_iovec *iov,
+ int iov_count, unsigned int len, gfp_t gfp_mask)
{
struct bio *bio;
int i, read = rq_data_dir(rq) == READ;
@@ -193,10 +207,11 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
}
}
- if (unaligned || (q->dma_pad_mask & len))
- bio = bio_copy_user_iov(q, iov, iov_count, read);
+ if (unaligned || (q->dma_pad_mask & len) || map_data)
+ bio = bio_copy_user_iov(q, map_data, iov, iov_count, read,
+ gfp_mask);
else
- bio = bio_map_user_iov(q, NULL, iov, iov_count, read);
+ bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
if (IS_ERR(bio))
return PTR_ERR(bio);
@@ -216,6 +231,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
rq->buffer = rq->data = NULL;
return 0;
}
+EXPORT_SYMBOL(blk_rq_map_user_iov);
/**
* blk_rq_unmap_user - unmap a request with user data
@@ -224,7 +240,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
* Description:
* Unmap a rq previously mapped by blk_rq_map_user(). The caller must
* supply the original rq->bio from the blk_rq_map_user() return, since
- * the io completion may have changed rq->bio.
+ * the I/O completion may have changed rq->bio.
*/
int blk_rq_unmap_user(struct bio *bio)
{
@@ -250,7 +266,7 @@ int blk_rq_unmap_user(struct bio *bio)
EXPORT_SYMBOL(blk_rq_unmap_user);
/**
- * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
+ * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
* @q: request queue where request should be inserted
* @rq: request to fill
* @kbuf: the kernel buffer
@@ -264,26 +280,16 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
unsigned int len, gfp_t gfp_mask)
{
- unsigned long kaddr;
- unsigned int alignment;
int reading = rq_data_dir(rq) == READ;
int do_copy = 0;
struct bio *bio;
- unsigned long stack_mask = ~(THREAD_SIZE - 1);
if (len > (q->max_hw_sectors << 9))
return -EINVAL;
if (!len || !kbuf)
return -EINVAL;
- kaddr = (unsigned long)kbuf;
- alignment = queue_dma_alignment(q) | q->dma_pad_mask;
- do_copy = ((kaddr & alignment) || (len & alignment));
-
- if (!((kaddr & stack_mask) ^
- ((unsigned long)current->stack & stack_mask)))
- do_copy = 1;
-
+ do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
if (do_copy)
bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
else
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 5efc9e7a68b..8681cd6f991 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -11,7 +11,7 @@
void blk_recalc_rq_sectors(struct request *rq, int nsect)
{
- if (blk_fs_request(rq)) {
+ if (blk_fs_request(rq) || blk_discard_rq(rq)) {
rq->hard_sector += nsect;
rq->hard_nr_sectors -= nsect;
@@ -41,12 +41,9 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
void blk_recalc_rq_segments(struct request *rq)
{
int nr_phys_segs;
- int nr_hw_segs;
unsigned int phys_size;
- unsigned int hw_size;
struct bio_vec *bv, *bvprv = NULL;
int seg_size;
- int hw_seg_size;
int cluster;
struct req_iterator iter;
int high, highprv = 1;
@@ -56,8 +53,8 @@ void blk_recalc_rq_segments(struct request *rq)
return;
cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
- hw_seg_size = seg_size = 0;
- phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
+ seg_size = 0;
+ phys_size = nr_phys_segs = 0;
rq_for_each_segment(bv, rq, iter) {
/*
* the trick here is making sure that a high page is never
@@ -66,7 +63,7 @@ void blk_recalc_rq_segments(struct request *rq)
*/
high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
if (high || highprv)
- goto new_hw_segment;
+ goto new_segment;
if (cluster) {
if (seg_size + bv->bv_len > q->max_segment_size)
goto new_segment;
@@ -74,26 +71,14 @@ void blk_recalc_rq_segments(struct request *rq)
goto new_segment;
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
goto new_segment;
- if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
- goto new_hw_segment;
seg_size += bv->bv_len;
- hw_seg_size += bv->bv_len;
bvprv = bv;
continue;
}
new_segment:
- if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
- !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
- hw_seg_size += bv->bv_len;
- else {
-new_hw_segment:
- if (nr_hw_segs == 1 &&
- hw_seg_size > rq->bio->bi_hw_front_size)
- rq->bio->bi_hw_front_size = hw_seg_size;
- hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
- nr_hw_segs++;
- }
+ if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
+ rq->bio->bi_seg_front_size = seg_size;
nr_phys_segs++;
bvprv = bv;
@@ -101,13 +86,12 @@ new_hw_segment:
highprv = high;
}
- if (nr_hw_segs == 1 &&
- hw_seg_size > rq->bio->bi_hw_front_size)
- rq->bio->bi_hw_front_size = hw_seg_size;
- if (hw_seg_size > rq->biotail->bi_hw_back_size)
- rq->biotail->bi_hw_back_size = hw_seg_size;
+ if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
+ rq->bio->bi_seg_front_size = seg_size;
+ if (seg_size > rq->biotail->bi_seg_back_size)
+ rq->biotail->bi_seg_back_size = seg_size;
+
rq->nr_phys_segments = nr_phys_segs;
- rq->nr_hw_segments = nr_hw_segs;
}
void blk_recount_segments(struct request_queue *q, struct bio *bio)
@@ -120,7 +104,6 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
blk_recalc_rq_segments(&rq);
bio->bi_next = nxt;
bio->bi_phys_segments = rq.nr_phys_segments;
- bio->bi_hw_segments = rq.nr_hw_segments;
bio->bi_flags |= (1 << BIO_SEG_VALID);
}
EXPORT_SYMBOL(blk_recount_segments);
@@ -131,13 +114,18 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
return 0;
- if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
+ if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
+ q->max_segment_size)
return 0;
- if (bio->bi_size + nxt->bi_size > q->max_segment_size)
+
+ if (!bio_has_data(bio))
+ return 1;
+
+ if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
return 0;
/*
- * bio and nxt are contigous in memory, check if the queue allows
+ * bio and nxt are contiguous in memory; check if the queue allows
* these two to be merged into one
*/
if (BIO_SEG_BOUNDARY(q, bio, nxt))
@@ -146,22 +134,6 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
return 0;
}
-static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
- struct bio *nxt)
-{
- if (!bio_flagged(bio, BIO_SEG_VALID))
- blk_recount_segments(q, bio);
- if (!bio_flagged(nxt, BIO_SEG_VALID))
- blk_recount_segments(q, nxt);
- if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
- BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size))
- return 0;
- if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size)
- return 0;
-
- return 1;
-}
-
/*
* map a request to scatterlist, return number of sg entries setup. Caller
* must make sure sg can hold rq->nr_phys_segments entries
@@ -275,10 +247,9 @@ static inline int ll_new_hw_segment(struct request_queue *q,
struct request *req,
struct bio *bio)
{
- int nr_hw_segs = bio_hw_segments(q, bio);
int nr_phys_segs = bio_phys_segments(q, bio);
- if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
+ if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments
|| req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
@@ -290,7 +261,6 @@ static inline int ll_new_hw_segment(struct request_queue *q,
* This will form the start of a new hw segment. Bump both
* counters.
*/
- req->nr_hw_segments += nr_hw_segs;
req->nr_phys_segments += nr_phys_segs;
return 1;
}
@@ -299,7 +269,6 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio)
{
unsigned short max_sectors;
- int len;
if (unlikely(blk_pc_request(req)))
max_sectors = q->max_hw_sectors;
@@ -316,19 +285,6 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
blk_recount_segments(q, req->biotail);
if (!bio_flagged(bio, BIO_SEG_VALID))
blk_recount_segments(q, bio);
- len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
- if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio))
- && !BIOVEC_VIRT_OVERSIZE(len)) {
- int mergeable = ll_new_mergeable(q, req, bio);
-
- if (mergeable) {
- if (req->nr_hw_segments == 1)
- req->bio->bi_hw_front_size = len;
- if (bio->bi_hw_segments == 1)
- bio->bi_hw_back_size = len;
- }
- return mergeable;
- }
return ll_new_hw_segment(q, req, bio);
}
@@ -337,7 +293,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio)
{
unsigned short max_sectors;
- int len;
if (unlikely(blk_pc_request(req)))
max_sectors = q->max_hw_sectors;
@@ -351,23 +306,10 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
q->last_merge = NULL;
return 0;
}
- len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
if (!bio_flagged(bio, BIO_SEG_VALID))
blk_recount_segments(q, bio);
if (!bio_flagged(req->bio, BIO_SEG_VALID))
blk_recount_segments(q, req->bio);
- if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
- !BIOVEC_VIRT_OVERSIZE(len)) {
- int mergeable = ll_new_mergeable(q, req, bio);
-
- if (mergeable) {
- if (bio->bi_hw_segments == 1)
- bio->bi_hw_front_size = len;
- if (req->nr_hw_segments == 1)
- req->biotail->bi_hw_back_size = len;
- }
- return mergeable;
- }
return ll_new_hw_segment(q, req, bio);
}
@@ -376,7 +318,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
struct request *next)
{
int total_phys_segments;
- int total_hw_segments;
+ unsigned int seg_size =
+ req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
/*
* First check if the either of the requests are re-queued
@@ -392,32 +335,22 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
return 0;
total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
- if (blk_phys_contig_segment(q, req->biotail, next->bio))
+ if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
+ if (req->nr_phys_segments == 1)
+ req->bio->bi_seg_front_size = seg_size;
+ if (next->nr_phys_segments == 1)
+ next->biotail->bi_seg_back_size = seg_size;
total_phys_segments--;
+ }
if (total_phys_segments > q->max_phys_segments)
return 0;
- total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
- if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
- int len = req->biotail->bi_hw_back_size +
- next->bio->bi_hw_front_size;
- /*
- * propagate the combined length to the end of the requests
- */
- if (req->nr_hw_segments == 1)
- req->bio->bi_hw_front_size = len;
- if (next->nr_hw_segments == 1)
- next->biotail->bi_hw_back_size = len;
- total_hw_segments--;
- }
-
- if (total_hw_segments > q->max_hw_segments)
+ if (total_phys_segments > q->max_hw_segments)
return 0;
/* Merge is OK... */
req->nr_phys_segments = total_phys_segments;
- req->nr_hw_segments = total_hw_segments;
return 1;
}
@@ -470,17 +403,21 @@ static int attempt_merge(struct request_queue *q, struct request *req,
elv_merge_requests(q, req, next);
if (req->rq_disk) {
- struct hd_struct *part
- = get_part(req->rq_disk, req->sector);
- disk_round_stats(req->rq_disk);
- req->rq_disk->in_flight--;
- if (part) {
- part_round_stats(part);
- part->in_flight--;
- }
+ struct hd_struct *part;
+ int cpu;
+
+ cpu = part_stat_lock();
+ part = disk_map_sector_rcu(req->rq_disk, req->sector);
+
+ part_round_stats(cpu, part);
+ part_dec_in_flight(part);
+
+ part_stat_unlock();
}
req->ioprio = ioprio_best(req->ioprio, next->ioprio);
+ if (blk_rq_cpu_valid(next))
+ req->cpu = next->cpu;
__blk_put_request(q, next);
return 1;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index dfc77012843..41392fbe19f 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -33,6 +33,23 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
EXPORT_SYMBOL(blk_queue_prep_rq);
/**
+ * blk_queue_set_discard - set a discard_sectors function for queue
+ * @q: queue
+ * @dfn: prepare_discard function
+ *
+ * It's possible for a queue to register a discard callback which is used
+ * to transform a discard request into the appropriate type for the
+ * hardware. If none is registered, then discard requests are failed
+ * with %EOPNOTSUPP.
+ *
+ */
+void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn)
+{
+ q->prepare_discard_fn = dfn;
+}
+EXPORT_SYMBOL(blk_queue_set_discard);
+
+/**
* blk_queue_merge_bvec - set a merge_bvec function for queue
* @q: queue
* @mbfn: merge_bvec_fn
@@ -60,6 +77,24 @@ void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
}
EXPORT_SYMBOL(blk_queue_softirq_done);
+void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
+{
+ q->rq_timeout = timeout;
+}
+EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
+
+void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
+{
+ q->rq_timed_out_fn = fn;
+}
+EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
+
+void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
+{
+ q->lld_busy_fn = fn;
+}
+EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
+
/**
* blk_queue_make_request - define an alternate make_request function for a device
* @q: the request queue for the device to be affected
@@ -106,8 +141,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
if (q->unplug_delay == 0)
q->unplug_delay = 1;
- INIT_WORK(&q->unplug_work, blk_unplug_work);
-
q->unplug_timer.function = blk_unplug_timeout;
q->unplug_timer.data = (unsigned long)q;
@@ -127,7 +160,7 @@ EXPORT_SYMBOL(blk_queue_make_request);
* Different hardware can have different requirements as to what pages
* it can do I/O directly to. A low level driver can call
* blk_queue_bounce_limit to have lower memory pages allocated as bounce
- * buffers for doing I/O to pages residing above @page.
+ * buffers for doing I/O to pages residing above @dma_addr.
**/
void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
{
@@ -212,7 +245,7 @@ EXPORT_SYMBOL(blk_queue_max_phys_segments);
* Description:
* Enables a low level driver to set an upper limit on the number of
* hw data segments in a request. This would be the largest number of
- * address/length pairs the host adapter can actually give as once
+ * address/length pairs the host adapter can actually give at once
* to the device.
**/
void blk_queue_max_hw_segments(struct request_queue *q,
@@ -393,7 +426,7 @@ EXPORT_SYMBOL(blk_queue_segment_boundary);
* @mask: alignment mask
*
* description:
- * set required memory and length aligment for direct dma transactions.
+ * set required memory and length alignment for direct dma transactions.
* this is used when buiding direct io requests for the queue.
*
**/
@@ -409,7 +442,7 @@ EXPORT_SYMBOL(blk_queue_dma_alignment);
* @mask: alignment mask
*
* description:
- * update required memory and length aligment for direct dma transactions.
+ * update required memory and length alignment for direct dma transactions.
* If the requested alignment is larger than the current alignment, then
* the current queue alignment is updated to the new value, otherwise it
* is left alone. The design of this is to allow multiple objects
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
new file mode 100644
index 00000000000..e660d26ca65
--- /dev/null
+++ b/block/blk-softirq.c
@@ -0,0 +1,175 @@
+/*
+ * Functions related to softirq rq completions
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/cpu.h>
+
+#include "blk.h"
+
+static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
+
+/*
+ * Softirq action handler - move entries to local list and loop over them
+ * while passing them to the queue registered handler.
+ */
+static void blk_done_softirq(struct softirq_action *h)
+{
+ struct list_head *cpu_list, local_list;
+
+ local_irq_disable();
+ cpu_list = &__get_cpu_var(blk_cpu_done);
+ list_replace_init(cpu_list, &local_list);
+ local_irq_enable();
+
+ while (!list_empty(&local_list)) {
+ struct request *rq;
+
+ rq = list_entry(local_list.next, struct request, csd.list);
+ list_del_init(&rq->csd.list);
+ rq->q->softirq_done_fn(rq);
+ }
+}
+
+#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+static void trigger_softirq(void *data)
+{
+ struct request *rq = data;
+ unsigned long flags;
+ struct list_head *list;
+
+ local_irq_save(flags);
+ list = &__get_cpu_var(blk_cpu_done);
+ list_add_tail(&rq->csd.list, list);
+
+ if (list->next == &rq->csd.list)
+ raise_softirq_irqoff(BLOCK_SOFTIRQ);
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Setup and invoke a run of 'trigger_softirq' on the given cpu.
+ */
+static int raise_blk_irq(int cpu, struct request *rq)
+{
+ if (cpu_online(cpu)) {
+ struct call_single_data *data = &rq->csd;
+
+ data->func = trigger_softirq;
+ data->info = rq;
+ data->flags = 0;
+
+ __smp_call_function_single(cpu, data);
+ return 0;
+ }
+
+ return 1;
+}
+#else /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */
+static int raise_blk_irq(int cpu, struct request *rq)
+{
+ return 1;
+}
+#endif
+
+static int __cpuinit blk_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ /*
+ * If a CPU goes away, splice its entries to the current CPU
+ * and trigger a run of the softirq
+ */
+ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
+ int cpu = (unsigned long) hcpu;
+
+ local_irq_disable();
+ list_splice_init(&per_cpu(blk_cpu_done, cpu),
+ &__get_cpu_var(blk_cpu_done));
+ raise_softirq_irqoff(BLOCK_SOFTIRQ);
+ local_irq_enable();
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata blk_cpu_notifier = {
+ .notifier_call = blk_cpu_notify,
+};
+
+void __blk_complete_request(struct request *req)
+{
+ struct request_queue *q = req->q;
+ unsigned long flags;
+ int ccpu, cpu, group_cpu;
+
+ BUG_ON(!q->softirq_done_fn);
+
+ local_irq_save(flags);
+ cpu = smp_processor_id();
+ group_cpu = blk_cpu_to_group(cpu);
+
+ /*
+ * Select completion CPU
+ */
+ if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && req->cpu != -1)
+ ccpu = req->cpu;
+ else
+ ccpu = cpu;
+
+ if (ccpu == cpu || ccpu == group_cpu) {
+ struct list_head *list;
+do_local:
+ list = &__get_cpu_var(blk_cpu_done);
+ list_add_tail(&req->csd.list, list);
+
+ /*
+ * if the list only contains our just added request,
+ * signal a raise of the softirq. If there are already
+ * entries there, someone already raised the irq but it
+ * hasn't run yet.
+ */
+ if (list->next == &req->csd.list)
+ raise_softirq_irqoff(BLOCK_SOFTIRQ);
+ } else if (raise_blk_irq(ccpu, req))
+ goto do_local;
+
+ local_irq_restore(flags);
+}
+
+/**
+ * blk_complete_request - end I/O on a request
+ * @req: the request being processed
+ *
+ * Description:
+ * Ends all I/O on a request. It does not handle partial completions,
+ * unless the driver actually implements this in its completion callback
+ * through requeueing. The actual completion happens out-of-order,
+ * through a softirq handler. The user must have registered a completion
+ * callback through blk_queue_softirq_done().
+ **/
+void blk_complete_request(struct request *req)
+{
+ if (unlikely(blk_should_fake_timeout(req->q)))
+ return;
+ if (!blk_mark_rq_complete(req))
+ __blk_complete_request(req);
+}
+EXPORT_SYMBOL(blk_complete_request);
+
+__init int blk_softirq_init(void)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
+
+ open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
+ register_hotcpu_notifier(&blk_cpu_notifier);
+ return 0;
+}
+subsys_initcall(blk_softirq_init);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 304ec73ab82..21e275d7eed 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -156,6 +156,30 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
return ret;
}
+static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
+{
+ unsigned int set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
+
+ return queue_var_show(set != 0, page);
+}
+
+static ssize_t
+queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
+{
+ ssize_t ret = -EINVAL;
+#if defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+ unsigned long val;
+
+ ret = queue_var_store(&val, page, count);
+ spin_lock_irq(q->queue_lock);
+ if (val)
+ queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
+ else
+ queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
+ spin_unlock_irq(q->queue_lock);
+#endif
+ return ret;
+}
static struct queue_sysfs_entry queue_requests_entry = {
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
@@ -197,6 +221,12 @@ static struct queue_sysfs_entry queue_nomerges_entry = {
.store = queue_nomerges_store,
};
+static struct queue_sysfs_entry queue_rq_affinity_entry = {
+ .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_rq_affinity_show,
+ .store = queue_rq_affinity_store,
+};
+
static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
@@ -205,6 +235,7 @@ static struct attribute *default_attrs[] = {
&queue_iosched_entry.attr,
&queue_hw_sector_size_entry.attr,
&queue_nomerges_entry.attr,
+ &queue_rq_affinity_entry.attr,
NULL,
};
@@ -310,7 +341,7 @@ int blk_register_queue(struct gendisk *disk)
if (!q->request_fn)
return 0;
- ret = kobject_add(&q->kobj, kobject_get(&disk->dev.kobj),
+ ret = kobject_add(&q->kobj, kobject_get(&disk_to_dev(disk)->kobj),
"%s", "queue");
if (ret < 0)
return ret;
@@ -339,6 +370,6 @@ void blk_unregister_queue(struct gendisk *disk)
kobject_uevent(&q->kobj, KOBJ_REMOVE);
kobject_del(&q->kobj);
- kobject_put(&disk->dev.kobj);
+ kobject_put(&disk_to_dev(disk)->kobj);
}
}
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 32667beb03e..c0d419e84ce 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -29,7 +29,7 @@ EXPORT_SYMBOL(blk_queue_find_tag);
* __blk_free_tags - release a given set of tag maintenance info
* @bqt: the tag map to free
*
- * Tries to free the specified @bqt@. Returns true if it was
+ * Tries to free the specified @bqt. Returns true if it was
* actually freed and false if there are still references using it
*/
static int __blk_free_tags(struct blk_queue_tag *bqt)
@@ -38,7 +38,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
retval = atomic_dec_and_test(&bqt->refcnt);
if (retval) {
- BUG_ON(bqt->busy);
+ BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
+ bqt->max_depth);
kfree(bqt->tag_index);
bqt->tag_index = NULL;
@@ -77,7 +78,7 @@ void __blk_queue_free_tags(struct request_queue *q)
* blk_free_tags - release a given set of tag maintenance info
* @bqt: the tag map to free
*
- * For externally managed @bqt@ frees the map. Callers of this
+ * For externally managed @bqt frees the map. Callers of this
* function must guarantee to have released all the queues that
* might have been using this tag map.
*/
@@ -93,7 +94,7 @@ EXPORT_SYMBOL(blk_free_tags);
* @q: the request queue for the device
*
* Notes:
- * This is used to disabled tagged queuing to a device, yet leave
+ * This is used to disable tagged queuing to a device, yet leave
* queue in function.
**/
void blk_queue_free_tags(struct request_queue *q)
@@ -147,7 +148,6 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
if (init_tag_map(q, tags, depth))
goto fail;
- tags->busy = 0;
atomic_set(&tags->refcnt, 1);
return tags;
fail:
@@ -271,7 +271,7 @@ EXPORT_SYMBOL(blk_queue_resize_tags);
* @rq: the request that has completed
*
* Description:
- * Typically called when end_that_request_first() returns 0, meaning
+ * Typically called when end_that_request_first() returns %0, meaning
* all transfers have been done for a request. It's important to call
* this function before end_that_request_last(), as that will put the
* request back on the free list thus corrupting the internal tag list.
@@ -313,7 +313,6 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
* unlock memory barrier semantics.
*/
clear_bit_unlock(tag, bqt->tag_map);
- bqt->busy--;
}
EXPORT_SYMBOL(blk_queue_end_tag);
@@ -338,6 +337,7 @@ EXPORT_SYMBOL(blk_queue_end_tag);
int blk_queue_start_tag(struct request_queue *q, struct request *rq)
{
struct blk_queue_tag *bqt = q->queue_tags;
+ unsigned max_depth, offset;
int tag;
if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
@@ -351,10 +351,19 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
/*
* Protect against shared tag maps, as we may not have exclusive
* access to the tag map.
+ *
+ * We reserve a few tags just for sync IO, since we don't want
+ * to starve sync IO on behalf of flooding async IO.
*/
+ max_depth = bqt->max_depth;
+ if (rq_is_sync(rq))
+ offset = 0;
+ else
+ offset = max_depth >> 2;
+
do {
- tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth);
- if (tag >= bqt->max_depth)
+ tag = find_next_zero_bit(bqt->tag_map, max_depth, offset);
+ if (tag >= max_depth)
return 1;
} while (test_and_set_bit_lock(tag, bqt->tag_map));
@@ -368,7 +377,6 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
bqt->tag_index[tag] = rq;
blkdev_dequeue_request(rq);
list_add(&rq->queuelist, &q->tag_busy_list);
- bqt->busy++;
return 0;
}
EXPORT_SYMBOL(blk_queue_start_tag);
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
new file mode 100644
index 00000000000..972a63f848f
--- /dev/null
+++ b/block/blk-timeout.c
@@ -0,0 +1,238 @@
+/*
+ * Functions related to generic timeout handling of requests.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/fault-inject.h>
+
+#include "blk.h"
+
+#ifdef CONFIG_FAIL_IO_TIMEOUT
+
+static DECLARE_FAULT_ATTR(fail_io_timeout);
+
+static int __init setup_fail_io_timeout(char *str)
+{
+ return setup_fault_attr(&fail_io_timeout, str);
+}
+__setup("fail_io_timeout=", setup_fail_io_timeout);
+
+int blk_should_fake_timeout(struct request_queue *q)
+{
+ if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
+ return 0;
+
+ return should_fail(&fail_io_timeout, 1);
+}
+
+static int __init fail_io_timeout_debugfs(void)
+{
+ return init_fault_attr_dentries(&fail_io_timeout, "fail_io_timeout");
+}
+
+late_initcall(fail_io_timeout_debugfs);
+
+ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+ int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags);
+
+ return sprintf(buf, "%d\n", set != 0);
+}
+
+ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+ int val;
+
+ if (count) {
+ struct request_queue *q = disk->queue;
+ char *p = (char *) buf;
+
+ val = simple_strtoul(p, &p, 10);
+ spin_lock_irq(q->queue_lock);
+ if (val)
+ queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
+ else
+ queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
+ spin_unlock_irq(q->queue_lock);
+ }
+
+ return count;
+}
+
+#endif /* CONFIG_FAIL_IO_TIMEOUT */
+
+/*
+ * blk_delete_timer - Delete/cancel timer for a given function.
+ * @req: request that we are canceling timer for
+ *
+ */
+void blk_delete_timer(struct request *req)
+{
+ struct request_queue *q = req->q;
+
+ /*
+ * Nothing to detach
+ */
+ if (!q->rq_timed_out_fn || !req->deadline)
+ return;
+
+ list_del_init(&req->timeout_list);
+
+ if (list_empty(&q->timeout_list))
+ del_timer(&q->timeout);
+}
+
+static void blk_rq_timed_out(struct request *req)
+{
+ struct request_queue *q = req->q;
+ enum blk_eh_timer_return ret;
+
+ ret = q->rq_timed_out_fn(req);
+ switch (ret) {
+ case BLK_EH_HANDLED:
+ __blk_complete_request(req);
+ break;
+ case BLK_EH_RESET_TIMER:
+ blk_clear_rq_complete(req);
+ blk_add_timer(req);
+ break;
+ case BLK_EH_NOT_HANDLED:
+ /*
+ * LLD handles this for now but in the future
+ * we can send a request msg to abort the command
+ * and we can move more of the generic scsi eh code to
+ * the blk layer.
+ */
+ break;
+ default:
+ printk(KERN_ERR "block: bad eh return: %d\n", ret);
+ break;
+ }
+}
+
+void blk_rq_timed_out_timer(unsigned long data)
+{
+ struct request_queue *q = (struct request_queue *) data;
+ unsigned long flags, uninitialized_var(next), next_set = 0;
+ struct request *rq, *tmp;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) {
+ if (time_after_eq(jiffies, rq->deadline)) {
+ list_del_init(&rq->timeout_list);
+
+ /*
+ * Check if we raced with end io completion
+ */
+ if (blk_mark_rq_complete(rq))
+ continue;
+ blk_rq_timed_out(rq);
+ }
+ if (!next_set) {
+ next = rq->deadline;
+ next_set = 1;
+ } else if (time_after(next, rq->deadline))
+ next = rq->deadline;
+ }
+
+ if (next_set && !list_empty(&q->timeout_list))
+ mod_timer(&q->timeout, round_jiffies(next));
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+/**
+ * blk_abort_request -- Request request recovery for the specified command
+ * @req: pointer to the request of interest
+ *
+ * This function requests that the block layer start recovery for the
+ * request by deleting the timer and calling the q's timeout function.
+ * LLDDs who implement their own error recovery MAY ignore the timeout
+ * event if they generated blk_abort_req. Must hold queue lock.
+ */
+void blk_abort_request(struct request *req)
+{
+ if (blk_mark_rq_complete(req))
+ return;
+ blk_delete_timer(req);
+ blk_rq_timed_out(req);
+}
+EXPORT_SYMBOL_GPL(blk_abort_request);
+
+/**
+ * blk_add_timer - Start timeout timer for a single request
+ * @req: request that is about to start running.
+ *
+ * Notes:
+ * Each request has its own timer, and as it is added to the queue, we
+ * set up the timer. When the request completes, we cancel the timer.
+ */
+void blk_add_timer(struct request *req)
+{
+ struct request_queue *q = req->q;
+ unsigned long expiry;
+
+ if (!q->rq_timed_out_fn)
+ return;
+
+ BUG_ON(!list_empty(&req->timeout_list));
+ BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
+
+ if (req->timeout)
+ req->deadline = jiffies + req->timeout;
+ else {
+ req->deadline = jiffies + q->rq_timeout;
+ /*
+ * Some LLDs, like scsi, peek at the timeout to prevent
+ * a command from being retried forever.
+ */
+ req->timeout = q->rq_timeout;
+ }
+ list_add_tail(&req->timeout_list, &q->timeout_list);
+
+ /*
+ * If the timer isn't already pending or this timeout is earlier
+ * than an existing one, modify the timer. Round to next nearest
+ * second.
+ */
+ expiry = round_jiffies(req->deadline);
+
+ /*
+ * We use ->deadline == 0 to detect whether a timer was added or
+ * not, so just increase to next jiffy for that specific case
+ */
+ if (unlikely(!req->deadline))
+ req->deadline = 1;
+
+ if (!timer_pending(&q->timeout) ||
+ time_before(expiry, q->timeout.expires))
+ mod_timer(&q->timeout, expiry);
+}
+
+/**
+ * blk_abort_queue -- Abort all request on given queue
+ * @queue: pointer to queue
+ *
+ */
+void blk_abort_queue(struct request_queue *q)
+{
+ unsigned long flags;
+ struct request *rq, *tmp;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ elv_abort_queue(q);
+
+ list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
+ blk_abort_request(rq);
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+}
+EXPORT_SYMBOL_GPL(blk_abort_queue);
diff --git a/block/blk.h b/block/blk.h
index c79f30e1df5..d2e49af90db 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -17,6 +17,43 @@ void __blk_queue_free_tags(struct request_queue *q);
void blk_unplug_work(struct work_struct *work);
void blk_unplug_timeout(unsigned long data);
+void blk_rq_timed_out_timer(unsigned long data);
+void blk_delete_timer(struct request *);
+void blk_add_timer(struct request *);
+void __generic_unplug_device(struct request_queue *);
+
+/*
+ * Internal atomic flags for request handling
+ */
+enum rq_atomic_flags {
+ REQ_ATOM_COMPLETE = 0,
+};
+
+/*
+ * EH timer and IO completion will both attempt to 'grab' the request, make
+ * sure that only one of them suceeds
+ */
+static inline int blk_mark_rq_complete(struct request *rq)
+{
+ return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
+}
+
+static inline void blk_clear_rq_complete(struct request *rq)
+{
+ clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
+}
+
+#ifdef CONFIG_FAIL_IO_TIMEOUT
+int blk_should_fake_timeout(struct request_queue *);
+ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
+ssize_t part_timeout_store(struct device *, struct device_attribute *,
+ const char *, size_t);
+#else
+static inline int blk_should_fake_timeout(struct request_queue *q)
+{
+ return 0;
+}
+#endif
struct io_context *current_io_context(gfp_t gfp_flags, int node);
@@ -59,4 +96,16 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
#endif /* BLK_DEV_INTEGRITY */
+static inline int blk_cpu_to_group(int cpu)
+{
+#ifdef CONFIG_SCHED_MC
+ cpumask_t mask = cpu_coregroup_map(cpu);
+ return first_cpu(mask);
+#elif defined(CONFIG_SCHED_SMT)
+ return first_cpu(per_cpu(cpu_sibling_map, cpu));
+#else
+ return cpu;
+#endif
+}
+
#endif
diff --git a/block/blktrace.c b/block/blktrace.c
index eb9651ccb24..85049a7e7a1 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -111,23 +111,9 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
*/
static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) };
-/*
- * Bio action bits of interest
- */
-static u32 bio_act[9] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_ACT(BLK_TC_SYNC), 0, BLK_TC_ACT(BLK_TC_AHEAD), 0, 0, 0, BLK_TC_ACT(BLK_TC_META) };
-
-/*
- * More could be added as needed, taking care to increment the decrementer
- * to get correct indexing
- */
-#define trace_barrier_bit(rw) \
- (((rw) & (1 << BIO_RW_BARRIER)) >> (BIO_RW_BARRIER - 0))
-#define trace_sync_bit(rw) \
- (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1))
-#define trace_ahead_bit(rw) \
- (((rw) & (1 << BIO_RW_AHEAD)) << (2 - BIO_RW_AHEAD))
-#define trace_meta_bit(rw) \
- (((rw) & (1 << BIO_RW_META)) >> (BIO_RW_META - 3))
+/* The ilog2() calls fall out because they're constant */
+#define MASK_TC_BIT(rw, __name) ( (rw & (1 << BIO_RW_ ## __name)) << \
+ (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name) )
/*
* The worker for the various blk_add_trace*() types. Fills out a
@@ -147,10 +133,11 @@ void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
return;
what |= ddir_act[rw & WRITE];
- what |= bio_act[trace_barrier_bit(rw)];
- what |= bio_act[trace_sync_bit(rw)];
- what |= bio_act[trace_ahead_bit(rw)];
- what |= bio_act[trace_meta_bit(rw)];
+ what |= MASK_TC_BIT(rw, BARRIER);
+ what |= MASK_TC_BIT(rw, SYNC);
+ what |= MASK_TC_BIT(rw, AHEAD);
+ what |= MASK_TC_BIT(rw, META);
+ what |= MASK_TC_BIT(rw, DISCARD);
pid = tsk->pid;
if (unlikely(act_log_check(bt, what, sector, pid)))
@@ -382,7 +369,8 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
if (!buts->buf_size || !buts->buf_nr)
return -EINVAL;
- strcpy(buts->name, name);
+ strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
+ buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
/*
* some device names have larger paths - convert the slashes
diff --git a/block/bsg.c b/block/bsg.c
index 5fb9b0bdbe8..e8bd2475682 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -45,8 +45,6 @@ struct bsg_device {
char name[BUS_ID_SIZE];
int max_queue;
unsigned long flags;
- struct blk_scsi_cmd_filter *cmd_filter;
- mode_t *f_mode;
};
enum {
@@ -174,7 +172,8 @@ unlock:
}
static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
- struct sg_io_v4 *hdr, struct bsg_device *bd)
+ struct sg_io_v4 *hdr, struct bsg_device *bd,
+ fmode_t has_write_perm)
{
if (hdr->request_len > BLK_MAX_CDB) {
rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
@@ -187,8 +186,7 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
return -EFAULT;
if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
- if (blk_cmd_filter_verify_command(bd->cmd_filter, rq->cmd,
- bd->f_mode))
+ if (blk_verify_command(&q->cmd_filter, rq->cmd, has_write_perm))
return -EPERM;
} else if (!capable(CAP_SYS_RAWIO))
return -EPERM;
@@ -244,7 +242,7 @@ bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
* map sg_io_v4 to a request.
*/
static struct request *
-bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
+bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm)
{
struct request_queue *q = bd->queue;
struct request *rq, *next_rq = NULL;
@@ -266,7 +264,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
rq = blk_get_request(q, rw, GFP_KERNEL);
if (!rq)
return ERR_PTR(-ENOMEM);
- ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd);
+ ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
if (ret)
goto out;
@@ -285,7 +283,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
next_rq->cmd_type = rq->cmd_type;
dxferp = (void*)(unsigned long)hdr->din_xferp;
- ret = blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len);
+ ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
+ hdr->din_xfer_len, GFP_KERNEL);
if (ret)
goto out;
}
@@ -300,7 +299,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
dxfer_len = 0;
if (dxfer_len) {
- ret = blk_rq_map_user(q, rq, dxferp, dxfer_len);
+ ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len,
+ GFP_KERNEL);
if (ret)
goto out;
}
@@ -568,25 +568,6 @@ static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
set_bit(BSG_F_BLOCK, &bd->flags);
}
-static void bsg_set_cmd_filter(struct bsg_device *bd,
- struct file *file)
-{
- struct inode *inode;
- struct gendisk *disk;
-
- if (!file)
- return;
-
- inode = file->f_dentry->d_inode;
- if (!inode)
- return;
-
- disk = inode->i_bdev->bd_disk;
-
- bd->cmd_filter = &disk->cmd_filter;
- bd->f_mode = &file->f_mode;
-}
-
/*
* Check if the error is a "real" error that we should return.
*/
@@ -608,7 +589,6 @@ bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
dprintk("%s: read %Zd bytes\n", bd->name, count);
bsg_set_block(bd, file);
- bsg_set_cmd_filter(bd, file);
bytes_read = 0;
ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
@@ -621,7 +601,8 @@ bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
}
static int __bsg_write(struct bsg_device *bd, const char __user *buf,
- size_t count, ssize_t *bytes_written)
+ size_t count, ssize_t *bytes_written,
+ fmode_t has_write_perm)
{
struct bsg_command *bc;
struct request *rq;
@@ -652,7 +633,7 @@ static int __bsg_write(struct bsg_device *bd, const char __user *buf,
/*
* get a request, fill in the blanks, and add to request queue
*/
- rq = bsg_map_hdr(bd, &bc->hdr);
+ rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
rq = NULL;
@@ -683,10 +664,11 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
dprintk("%s: write %Zd bytes\n", bd->name, count);
bsg_set_block(bd, file);
- bsg_set_cmd_filter(bd, file);
bytes_written = 0;
- ret = __bsg_write(bd, buf, count, &bytes_written);
+ ret = __bsg_write(bd, buf, count, &bytes_written,
+ file->f_mode & FMODE_WRITE);
+
*ppos = bytes_written;
/*
@@ -792,7 +774,6 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
bd->queue = rq;
bsg_set_block(bd, file);
- bsg_set_cmd_filter(bd, file);
atomic_set(&bd->ref_count, 1);
mutex_lock(&bsg_mutex);
@@ -933,7 +914,7 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case SG_EMULATED_HOST:
case SCSI_IOCTL_SEND_COMMAND: {
void __user *uarg = (void __user *) arg;
- return scsi_cmd_ioctl(file, bd->queue, NULL, cmd, uarg);
+ return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
}
case SG_IO: {
struct request *rq;
@@ -943,7 +924,7 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (copy_from_user(&hdr, uarg, sizeof(hdr)))
return -EFAULT;
- rq = bsg_map_hdr(bd, &hdr);
+ rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -1044,7 +1025,7 @@ int bsg_register_queue(struct request_queue *q, struct device *parent,
bcd->release = release;
kref_init(&bcd->ref);
dev = MKDEV(bsg_major, bcd->minor);
- class_dev = device_create(bsg_class, parent, dev, "%s", devname);
+ class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname);
if (IS_ERR(class_dev)) {
ret = PTR_ERR(class_dev);
goto put_dev;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 1e2aff812ee..6a062eebbd1 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -39,6 +39,7 @@ static int cfq_slice_idle = HZ / 125;
#define CFQ_MIN_TT (2)
#define CFQ_SLICE_SCALE (5)
+#define CFQ_HW_QUEUE_MIN (5)
#define RQ_CIC(rq) \
((struct cfq_io_context *) (rq)->elevator_private)
@@ -86,7 +87,14 @@ struct cfq_data {
int rq_in_driver;
int sync_flight;
+
+ /*
+ * queue-depth detection
+ */
+ int rq_queued;
int hw_tag;
+ int hw_tag_samples;
+ int rq_in_driver_peak;
/*
* idle window management
@@ -244,7 +252,7 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
{
if (cfqd->busy_queues) {
cfq_log(cfqd, "schedule dispatch");
- kblockd_schedule_work(&cfqd->unplug_work);
+ kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
}
}
@@ -654,15 +662,6 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
cfqd->rq_in_driver);
- /*
- * If the depth is larger 1, it really could be queueing. But lets
- * make the mark a little higher - idling could still be good for
- * low queueing, and a low queueing number could also just indicate
- * a SCSI mid layer like behaviour where limit+1 is often seen.
- */
- if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
- cfqd->hw_tag = 1;
-
cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
}
@@ -686,6 +685,7 @@ static void cfq_remove_request(struct request *rq)
list_del_init(&rq->queuelist);
cfq_del_rq_rb(rq);
+ cfqq->cfqd->rq_queued--;
if (rq_is_meta(rq)) {
WARN_ON(!cfqq->meta_pending);
cfqq->meta_pending--;
@@ -878,6 +878,14 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
struct cfq_io_context *cic;
unsigned long sl;
+ /*
+ * SSD device without seek penalty, disable idling. But only do so
+ * for devices that support queuing, otherwise we still have a problem
+ * with sync vs async workloads.
+ */
+ if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
+ return;
+
WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
WARN_ON(cfq_cfqq_slice_new(cfqq));
@@ -1833,6 +1841,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
{
struct cfq_io_context *cic = RQ_CIC(rq);
+ cfqd->rq_queued++;
if (rq_is_meta(rq))
cfqq->meta_pending++;
@@ -1880,6 +1889,31 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
cfq_rq_enqueued(cfqd, cfqq, rq);
}
+/*
+ * Update hw_tag based on peak queue depth over 50 samples under
+ * sufficient load.
+ */
+static void cfq_update_hw_tag(struct cfq_data *cfqd)
+{
+ if (cfqd->rq_in_driver > cfqd->rq_in_driver_peak)
+ cfqd->rq_in_driver_peak = cfqd->rq_in_driver;
+
+ if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
+ cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
+ return;
+
+ if (cfqd->hw_tag_samples++ < 50)
+ return;
+
+ if (cfqd->rq_in_driver_peak >= CFQ_HW_QUEUE_MIN)
+ cfqd->hw_tag = 1;
+ else
+ cfqd->hw_tag = 0;
+
+ cfqd->hw_tag_samples = 0;
+ cfqd->rq_in_driver_peak = 0;
+}
+
static void cfq_completed_request(struct request_queue *q, struct request *rq)
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);
@@ -1890,6 +1924,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
now = jiffies;
cfq_log_cfqq(cfqd, cfqq, "complete");
+ cfq_update_hw_tag(cfqd);
+
WARN_ON(!cfqd->rq_in_driver);
WARN_ON(!cfqq->dispatched);
cfqd->rq_in_driver--;
@@ -2200,6 +2236,7 @@ static void *cfq_init_queue(struct request_queue *q)
cfqd->cfq_slice[1] = cfq_slice_sync;
cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
cfqd->cfq_slice_idle = cfq_slice_idle;
+ cfqd->hw_tag = 1;
return cfqd;
}
diff --git a/block/cmd-filter.c b/block/cmd-filter.c
index eec4404fd35..504b275e1b9 100644
--- a/block/cmd-filter.c
+++ b/block/cmd-filter.c
@@ -20,15 +20,14 @@
#include <linux/list.h>
#include <linux/genhd.h>
#include <linux/spinlock.h>
-#include <linux/parser.h>
#include <linux/capability.h>
#include <linux/bitops.h>
#include <scsi/scsi.h>
#include <linux/cdrom.h>
-int blk_cmd_filter_verify_command(struct blk_scsi_cmd_filter *filter,
- unsigned char *cmd, mode_t *f_mode)
+int blk_verify_command(struct blk_cmd_filter *filter,
+ unsigned char *cmd, fmode_t has_write_perm)
{
/* root can do any command. */
if (capable(CAP_SYS_RAWIO))
@@ -43,34 +42,16 @@ int blk_cmd_filter_verify_command(struct blk_scsi_cmd_filter *filter,
return 0;
/* Write-safe commands require a writable open */
- if (test_bit(cmd[0], filter->write_ok) && (*f_mode & FMODE_WRITE))
+ if (test_bit(cmd[0], filter->write_ok) && has_write_perm)
return 0;
return -EPERM;
}
-EXPORT_SYMBOL(blk_cmd_filter_verify_command);
-
-int blk_verify_command(struct file *file, unsigned char *cmd)
-{
- struct gendisk *disk;
- struct inode *inode;
-
- if (!file)
- return -EINVAL;
-
- inode = file->f_dentry->d_inode;
- if (!inode)
- return -EINVAL;
-
- disk = inode->i_bdev->bd_disk;
-
- return blk_cmd_filter_verify_command(&disk->cmd_filter,
- cmd, &file->f_mode);
-}
EXPORT_SYMBOL(blk_verify_command);
+#if 0
/* and now, the sysfs stuff */
-static ssize_t rcf_cmds_show(struct blk_scsi_cmd_filter *filter, char *page,
+static ssize_t rcf_cmds_show(struct blk_cmd_filter *filter, char *page,
int rw)
{
char *npage = page;
@@ -84,8 +65,7 @@ static ssize_t rcf_cmds_show(struct blk_scsi_cmd_filter *filter, char *page,
for (i = 0; i < BLK_SCSI_MAX_CMDS; i++) {
if (test_bit(i, okbits)) {
- sprintf(npage, "%02x", i);
- npage += 2;
+ npage += sprintf(npage, "0x%02x", i);
if (i < BLK_SCSI_MAX_CMDS - 1)
sprintf(npage++, " ");
}
@@ -97,57 +77,65 @@ static ssize_t rcf_cmds_show(struct blk_scsi_cmd_filter *filter, char *page,
return npage - page;
}
-static ssize_t rcf_readcmds_show(struct blk_scsi_cmd_filter *filter, char *page)
+static ssize_t rcf_readcmds_show(struct blk_cmd_filter *filter, char *page)
{
return rcf_cmds_show(filter, page, READ);
}
-static ssize_t rcf_writecmds_show(struct blk_scsi_cmd_filter *filter,
+static ssize_t rcf_writecmds_show(struct blk_cmd_filter *filter,
char *page)
{
return rcf_cmds_show(filter, page, WRITE);
}
-static ssize_t rcf_cmds_store(struct blk_scsi_cmd_filter *filter,
+static ssize_t rcf_cmds_store(struct blk_cmd_filter *filter,
const char *page, size_t count, int rw)
{
- ssize_t ret = 0;
unsigned long okbits[BLK_SCSI_CMD_PER_LONG], *target_okbits;
- int cmd, status, len;
- substring_t ss;
-
- memset(&okbits, 0, sizeof(okbits));
-
- for (len = strlen(page); len > 0; len -= 3) {
- if (len < 2)
- break;
- ss.from = (char *) page + ret;
- ss.to = (char *) page + ret + 2;
- ret += 3;
- status = match_hex(&ss, &cmd);
+ int cmd, set;
+ char *p, *status;
+
+ if (rw == READ) {
+ memcpy(&okbits, filter->read_ok, sizeof(okbits));
+ target_okbits = filter->read_ok;
+ } else {
+ memcpy(&okbits, filter->write_ok, sizeof(okbits));
+ target_okbits = filter->write_ok;
+ }
+
+ while ((p = strsep((char **)&page, " ")) != NULL) {
+ set = 1;
+
+ if (p[0] == '+') {
+ p++;
+ } else if (p[0] == '-') {
+ set = 0;
+ p++;
+ }
+
+ cmd = simple_strtol(p, &status, 16);
+
/* either of these cases means invalid input, so do nothing. */
- if (status || cmd >= BLK_SCSI_MAX_CMDS)
+ if ((status == p) || cmd >= BLK_SCSI_MAX_CMDS)
return -EINVAL;
- __set_bit(cmd, okbits);
+ if (set)
+ __set_bit(cmd, okbits);
+ else
+ __clear_bit(cmd, okbits);
}
- if (rw == READ)
- target_okbits = filter->read_ok;
- else
- target_okbits = filter->write_ok;
-
- memmove(target_okbits, okbits, sizeof(okbits));
+ memcpy(target_okbits, okbits, sizeof(okbits));
return count;
}
-static ssize_t rcf_readcmds_store(struct blk_scsi_cmd_filter *filter,
+static ssize_t rcf_readcmds_store(struct blk_cmd_filter *filter,
const char *page, size_t count)
{
return rcf_cmds_store(filter, page, count, READ);
}
-static ssize_t rcf_writecmds_store(struct blk_scsi_cmd_filter *filter,
+static ssize_t rcf_writecmds_store(struct blk_cmd_filter *filter,
const char *page, size_t count)
{
return rcf_cmds_store(filter, page, count, WRITE);
@@ -155,8 +143,8 @@ static ssize_t rcf_writecmds_store(struct blk_scsi_cmd_filter *filter,
struct rcf_sysfs_entry {
struct attribute attr;
- ssize_t (*show)(struct blk_scsi_cmd_filter *, char *);
- ssize_t (*store)(struct blk_scsi_cmd_filter *, const char *, size_t);
+ ssize_t (*show)(struct blk_cmd_filter *, char *);
+ ssize_t (*store)(struct blk_cmd_filter *, const char *, size_t);
};
static struct rcf_sysfs_entry rcf_readcmds_entry = {
@@ -183,9 +171,9 @@ static ssize_t
rcf_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
struct rcf_sysfs_entry *entry = to_rcf(attr);
- struct blk_scsi_cmd_filter *filter;
+ struct blk_cmd_filter *filter;
- filter = container_of(kobj, struct blk_scsi_cmd_filter, kobj);
+ filter = container_of(kobj, struct blk_cmd_filter, kobj);
if (entry->show)
return entry->show(filter, page);
@@ -197,7 +185,7 @@ rcf_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length)
{
struct rcf_sysfs_entry *entry = to_rcf(attr);
- struct blk_scsi_cmd_filter *filter;
+ struct blk_cmd_filter *filter;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
@@ -205,7 +193,7 @@ rcf_attr_store(struct kobject *kobj, struct attribute *attr,
if (!entry->store)
return -EINVAL;
- filter = container_of(kobj, struct blk_scsi_cmd_filter, kobj);
+ filter = container_of(kobj, struct blk_cmd_filter, kobj);
return entry->store(filter, page, length);
}
@@ -219,116 +207,26 @@ static struct kobj_type rcf_ktype = {
.default_attrs = default_attrs,
};
-#ifndef MAINTENANCE_IN_CMD
-#define MAINTENANCE_IN_CMD 0xa3
-#endif
-
-static void rcf_set_defaults(struct blk_scsi_cmd_filter *filter)
-{
- /* Basic read-only commands */
- __set_bit(TEST_UNIT_READY, filter->read_ok);
- __set_bit(REQUEST_SENSE, filter->read_ok);
- __set_bit(READ_6, filter->read_ok);
- __set_bit(READ_10, filter->read_ok);
- __set_bit(READ_12, filter->read_ok);
- __set_bit(READ_16, filter->read_ok);
- __set_bit(READ_BUFFER, filter->read_ok);
- __set_bit(READ_DEFECT_DATA, filter->read_ok);
- __set_bit(READ_CAPACITY, filter->read_ok);
- __set_bit(READ_LONG, filter->read_ok);
- __set_bit(INQUIRY, filter->read_ok);
- __set_bit(MODE_SENSE, filter->read_ok);
- __set_bit(MODE_SENSE_10, filter->read_ok);
- __set_bit(LOG_SENSE, filter->read_ok);
- __set_bit(START_STOP, filter->read_ok);
- __set_bit(GPCMD_VERIFY_10, filter->read_ok);
- __set_bit(VERIFY_16, filter->read_ok);
- __set_bit(REPORT_LUNS, filter->read_ok);
- __set_bit(SERVICE_ACTION_IN, filter->read_ok);
- __set_bit(RECEIVE_DIAGNOSTIC, filter->read_ok);
- __set_bit(MAINTENANCE_IN_CMD, filter->read_ok);
- __set_bit(GPCMD_READ_BUFFER_CAPACITY, filter->read_ok);
-
- /* Audio CD commands */
- __set_bit(GPCMD_PLAY_CD, filter->read_ok);
- __set_bit(GPCMD_PLAY_AUDIO_10, filter->read_ok);
- __set_bit(GPCMD_PLAY_AUDIO_MSF, filter->read_ok);
- __set_bit(GPCMD_PLAY_AUDIO_TI, filter->read_ok);
- __set_bit(GPCMD_PAUSE_RESUME, filter->read_ok);
-
- /* CD/DVD data reading */
- __set_bit(GPCMD_READ_CD, filter->read_ok);
- __set_bit(GPCMD_READ_CD_MSF, filter->read_ok);
- __set_bit(GPCMD_READ_DISC_INFO, filter->read_ok);
- __set_bit(GPCMD_READ_CDVD_CAPACITY, filter->read_ok);
- __set_bit(GPCMD_READ_DVD_STRUCTURE, filter->read_ok);
- __set_bit(GPCMD_READ_HEADER, filter->read_ok);
- __set_bit(GPCMD_READ_TRACK_RZONE_INFO, filter->read_ok);
- __set_bit(GPCMD_READ_SUBCHANNEL, filter->read_ok);
- __set_bit(GPCMD_READ_TOC_PMA_ATIP, filter->read_ok);
- __set_bit(GPCMD_REPORT_KEY, filter->read_ok);
- __set_bit(GPCMD_SCAN, filter->read_ok);
- __set_bit(GPCMD_GET_CONFIGURATION, filter->read_ok);
- __set_bit(GPCMD_READ_FORMAT_CAPACITIES, filter->read_ok);
- __set_bit(GPCMD_GET_EVENT_STATUS_NOTIFICATION, filter->read_ok);
- __set_bit(GPCMD_GET_PERFORMANCE, filter->read_ok);
- __set_bit(GPCMD_SEEK, filter->read_ok);
- __set_bit(GPCMD_STOP_PLAY_SCAN, filter->read_ok);
-
- /* Basic writing commands */
- __set_bit(WRITE_6, filter->write_ok);
- __set_bit(WRITE_10, filter->write_ok);
- __set_bit(WRITE_VERIFY, filter->write_ok);
- __set_bit(WRITE_12, filter->write_ok);
- __set_bit(WRITE_VERIFY_12, filter->write_ok);
- __set_bit(WRITE_16, filter->write_ok);
- __set_bit(WRITE_LONG, filter->write_ok);
- __set_bit(WRITE_LONG_2, filter->write_ok);
- __set_bit(ERASE, filter->write_ok);
- __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
- __set_bit(MODE_SELECT, filter->write_ok);
- __set_bit(LOG_SELECT, filter->write_ok);
- __set_bit(GPCMD_BLANK, filter->write_ok);
- __set_bit(GPCMD_CLOSE_TRACK, filter->write_ok);
- __set_bit(GPCMD_FLUSH_CACHE, filter->write_ok);
- __set_bit(GPCMD_FORMAT_UNIT, filter->write_ok);
- __set_bit(GPCMD_REPAIR_RZONE_TRACK, filter->write_ok);
- __set_bit(GPCMD_RESERVE_RZONE_TRACK, filter->write_ok);
- __set_bit(GPCMD_SEND_DVD_STRUCTURE, filter->write_ok);
- __set_bit(GPCMD_SEND_EVENT, filter->write_ok);
- __set_bit(GPCMD_SEND_KEY, filter->write_ok);
- __set_bit(GPCMD_SEND_OPC, filter->write_ok);
- __set_bit(GPCMD_SEND_CUE_SHEET, filter->write_ok);
- __set_bit(GPCMD_SET_SPEED, filter->write_ok);
- __set_bit(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, filter->write_ok);
- __set_bit(GPCMD_LOAD_UNLOAD, filter->write_ok);
- __set_bit(GPCMD_SET_STREAMING, filter->write_ok);
-}
-
int blk_register_filter(struct gendisk *disk)
{
int ret;
- struct blk_scsi_cmd_filter *filter = &disk->cmd_filter;
- struct kobject *parent = kobject_get(disk->holder_dir->parent);
-
- if (!parent)
- return -ENODEV;
-
- ret = kobject_init_and_add(&filter->kobj, &rcf_ktype, parent,
- "%s", "cmd_filter");
+ struct blk_cmd_filter *filter = &disk->queue->cmd_filter;
+ ret = kobject_init_and_add(&filter->kobj, &rcf_ktype,
+ &disk_to_dev(disk)->kobj,
+ "%s", "cmd_filter");
if (ret < 0)
return ret;
- rcf_set_defaults(filter);
return 0;
}
+EXPORT_SYMBOL(blk_register_filter);
void blk_unregister_filter(struct gendisk *disk)
{
- struct blk_scsi_cmd_filter *filter = &disk->cmd_filter;
+ struct blk_cmd_filter *filter = &disk->queue->cmd_filter;
kobject_put(&filter->kobj);
- kobject_put(disk->holder_dir->parent);
}
-
+EXPORT_SYMBOL(blk_unregister_filter);
+#endif
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index c23177e4623..3d3e7a46f38 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -71,8 +71,8 @@ static int compat_hdio_getgeo(struct gendisk *disk, struct block_device *bdev,
return ret;
}
-static int compat_hdio_ioctl(struct inode *inode, struct file *file,
- struct gendisk *disk, unsigned int cmd, unsigned long arg)
+static int compat_hdio_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
{
mm_segment_t old_fs = get_fs();
unsigned long kval;
@@ -80,7 +80,7 @@ static int compat_hdio_ioctl(struct inode *inode, struct file *file,
int error;
set_fs(KERNEL_DS);
- error = blkdev_driver_ioctl(inode, file, disk,
+ error = __blkdev_driver_ioctl(bdev, mode,
cmd, (unsigned long)(&kval));
set_fs(old_fs);
@@ -111,8 +111,8 @@ struct compat_cdrom_generic_command {
compat_caddr_t reserved[1];
};
-static int compat_cdrom_read_audio(struct inode *inode, struct file *file,
- struct gendisk *disk, unsigned int cmd, unsigned long arg)
+static int compat_cdrom_read_audio(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
{
struct cdrom_read_audio __user *cdread_audio;
struct compat_cdrom_read_audio __user *cdread_audio32;
@@ -134,12 +134,12 @@ static int compat_cdrom_read_audio(struct inode *inode, struct file *file,
if (put_user(datap, &cdread_audio->buf))
return -EFAULT;
- return blkdev_driver_ioctl(inode, file, disk, cmd,
+ return __blkdev_driver_ioctl(bdev, mode, cmd,
(unsigned long)cdread_audio);
}
-static int compat_cdrom_generic_command(struct inode *inode, struct file *file,
- struct gendisk *disk, unsigned int cmd, unsigned long arg)
+static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
{
struct cdrom_generic_command __user *cgc;
struct compat_cdrom_generic_command __user *cgc32;
@@ -167,7 +167,7 @@ static int compat_cdrom_generic_command(struct inode *inode, struct file *file,
put_user(compat_ptr(data), &cgc->reserved[0]))
return -EFAULT;
- return blkdev_driver_ioctl(inode, file, disk, cmd, (unsigned long)cgc);
+ return __blkdev_driver_ioctl(bdev, mode, cmd, (unsigned long)cgc);
}
struct compat_blkpg_ioctl_arg {
@@ -177,7 +177,7 @@ struct compat_blkpg_ioctl_arg {
compat_caddr_t data;
};
-static int compat_blkpg_ioctl(struct inode *inode, struct file *file,
+static int compat_blkpg_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, struct compat_blkpg_ioctl_arg __user *ua32)
{
struct blkpg_ioctl_arg __user *a = compat_alloc_user_space(sizeof(*a));
@@ -196,7 +196,7 @@ static int compat_blkpg_ioctl(struct inode *inode, struct file *file,
if (err)
return err;
- return blkdev_ioctl(inode, file, cmd, (unsigned long)a);
+ return blkdev_ioctl(bdev, mode, cmd, (unsigned long)a);
}
#define BLKBSZGET_32 _IOR(0x12, 112, int)
@@ -308,8 +308,8 @@ static struct {
#define NR_FD_IOCTL_TRANS ARRAY_SIZE(fd_ioctl_trans_table)
-static int compat_fd_ioctl(struct inode *inode, struct file *file,
- struct gendisk *disk, unsigned int cmd, unsigned long arg)
+static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
{
mm_segment_t old_fs = get_fs();
void *karg = NULL;
@@ -413,7 +413,7 @@ static int compat_fd_ioctl(struct inode *inode, struct file *file,
return -EINVAL;
}
set_fs(KERNEL_DS);
- err = blkdev_driver_ioctl(inode, file, disk, kcmd, (unsigned long)karg);
+ err = __blkdev_driver_ioctl(bdev, mode, kcmd, (unsigned long)karg);
set_fs(old_fs);
if (err)
goto out;
@@ -579,11 +579,9 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
return 0;
}
-static int compat_blkdev_driver_ioctl(struct inode *inode, struct file *file,
- struct gendisk *disk, unsigned cmd, unsigned long arg)
+static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned cmd, unsigned long arg)
{
- int ret;
-
switch (cmd) {
case HDIO_GET_UNMASKINTR:
case HDIO_GET_MULTCOUNT:
@@ -596,7 +594,7 @@ static int compat_blkdev_driver_ioctl(struct inode *inode, struct file *file,
case HDIO_GET_ACOUSTIC:
case HDIO_GET_ADDRESS:
case HDIO_GET_BUSSTATE:
- return compat_hdio_ioctl(inode, file, disk, cmd, arg);
+ return compat_hdio_ioctl(bdev, mode, cmd, arg);
case FDSETPRM32:
case FDDEFPRM32:
case FDGETPRM32:
@@ -606,11 +604,11 @@ static int compat_blkdev_driver_ioctl(struct inode *inode, struct file *file,
case FDPOLLDRVSTAT32:
case FDGETFDCSTAT32:
case FDWERRORGET32:
- return compat_fd_ioctl(inode, file, disk, cmd, arg);
+ return compat_fd_ioctl(bdev, mode, cmd, arg);
case CDROMREADAUDIO:
- return compat_cdrom_read_audio(inode, file, disk, cmd, arg);
+ return compat_cdrom_read_audio(bdev, mode, cmd, arg);
case CDROM_SEND_PACKET:
- return compat_cdrom_generic_command(inode, file, disk, cmd, arg);
+ return compat_cdrom_generic_command(bdev, mode, cmd, arg);
/*
* No handler required for the ones below, we just need to
@@ -679,55 +677,49 @@ static int compat_blkdev_driver_ioctl(struct inode *inode, struct file *file,
case DVD_WRITE_STRUCT:
case DVD_AUTH:
arg = (unsigned long)compat_ptr(arg);
- /* These intepret arg as an unsigned long, not as a pointer,
- * so we must not do compat_ptr() conversion. */
- case HDIO_SET_MULTCOUNT:
- case HDIO_SET_UNMASKINTR:
- case HDIO_SET_KEEPSETTINGS:
- case HDIO_SET_32BIT:
- case HDIO_SET_NOWERR:
- case HDIO_SET_DMA:
- case HDIO_SET_PIO_MODE:
- case HDIO_SET_NICE:
- case HDIO_SET_WCACHE:
- case HDIO_SET_ACOUSTIC:
- case HDIO_SET_BUSSTATE:
- case HDIO_SET_ADDRESS:
- case CDROMEJECT_SW:
- case CDROM_SET_OPTIONS:
- case CDROM_CLEAR_OPTIONS:
- case CDROM_SELECT_SPEED:
- case CDROM_SELECT_DISC:
- case CDROM_MEDIA_CHANGED:
- case CDROM_DRIVE_STATUS:
- case CDROM_LOCKDOOR:
- case CDROM_DEBUG:
break;
default:
/* unknown ioctl number */
return -ENOIOCTLCMD;
}
- if (disk->fops->unlocked_ioctl)
- return disk->fops->unlocked_ioctl(file, cmd, arg);
-
- if (disk->fops->ioctl) {
- lock_kernel();
- ret = disk->fops->ioctl(inode, file, cmd, arg);
- unlock_kernel();
- return ret;
- }
-
- return -ENOTTY;
+ return __blkdev_driver_ioctl(bdev, mode, cmd, arg);
}
-static int compat_blkdev_locked_ioctl(struct inode *inode, struct file *file,
- struct block_device *bdev,
- unsigned cmd, unsigned long arg)
+/* Most of the generic ioctls are handled in the normal fallback path.
+ This assumes the blkdev's low level compat_ioctl always returns
+ ENOIOCTLCMD for unknown ioctls. */
+long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
+ int ret = -ENOIOCTLCMD;
+ struct inode *inode = file->f_mapping->host;
+ struct block_device *bdev = inode->i_bdev;
+ struct gendisk *disk = bdev->bd_disk;
+ fmode_t mode = file->f_mode;
struct backing_dev_info *bdi;
+ loff_t size;
+
+ if (file->f_flags & O_NDELAY)
+ mode |= FMODE_NDELAY_NOW;
switch (cmd) {
+ case HDIO_GETGEO:
+ return compat_hdio_getgeo(disk, bdev, compat_ptr(arg));
+ case BLKFLSBUF:
+ case BLKROSET:
+ case BLKDISCARD:
+ /*
+ * the ones below are implemented in blkdev_locked_ioctl,
+ * but we call blkdev_ioctl, which gets the lock for us
+ */
+ case BLKRRPART:
+ return blkdev_ioctl(bdev, mode, cmd,
+ (unsigned long)compat_ptr(arg));
+ case BLKBSZSET_32:
+ return blkdev_ioctl(bdev, mode, BLKBSZSET,
+ (unsigned long)compat_ptr(arg));
+ case BLKPG:
+ return compat_blkpg_ioctl(bdev, mode, cmd, compat_ptr(arg));
case BLKRAGET:
case BLKFRAGET:
if (!arg)
@@ -753,64 +745,36 @@ static int compat_blkdev_locked_ioctl(struct inode *inode, struct file *file,
bdi = blk_get_backing_dev_info(bdev);
if (bdi == NULL)
return -ENOTTY;
+ lock_kernel();
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
+ unlock_kernel();
return 0;
case BLKGETSIZE:
- if ((bdev->bd_inode->i_size >> 9) > ~0UL)
+ size = bdev->bd_inode->i_size;
+ if ((size >> 9) > ~0UL)
return -EFBIG;
- return compat_put_ulong(arg, bdev->bd_inode->i_size >> 9);
+ return compat_put_ulong(arg, size >> 9);
case BLKGETSIZE64_32:
return compat_put_u64(arg, bdev->bd_inode->i_size);
case BLKTRACESETUP32:
- return compat_blk_trace_setup(bdev, compat_ptr(arg));
+ lock_kernel();
+ ret = compat_blk_trace_setup(bdev, compat_ptr(arg));
+ unlock_kernel();
+ return ret;
case BLKTRACESTART: /* compatible */
case BLKTRACESTOP: /* compatible */
case BLKTRACETEARDOWN: /* compatible */
- return blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
- }
- return -ENOIOCTLCMD;
-}
-
-/* Most of the generic ioctls are handled in the normal fallback path.
- This assumes the blkdev's low level compat_ioctl always returns
- ENOIOCTLCMD for unknown ioctls. */
-long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
-{
- int ret = -ENOIOCTLCMD;
- struct inode *inode = file->f_mapping->host;
- struct block_device *bdev = inode->i_bdev;
- struct gendisk *disk = bdev->bd_disk;
-
- switch (cmd) {
- case HDIO_GETGEO:
- return compat_hdio_getgeo(disk, bdev, compat_ptr(arg));
- case BLKFLSBUF:
- case BLKROSET:
- /*
- * the ones below are implemented in blkdev_locked_ioctl,
- * but we call blkdev_ioctl, which gets the lock for us
- */
- case BLKRRPART:
- return blkdev_ioctl(inode, file, cmd,
- (unsigned long)compat_ptr(arg));
- case BLKBSZSET_32:
- return blkdev_ioctl(inode, file, BLKBSZSET,
- (unsigned long)compat_ptr(arg));
- case BLKPG:
- return compat_blkpg_ioctl(inode, file, cmd, compat_ptr(arg));
- }
-
- lock_kernel();
- ret = compat_blkdev_locked_ioctl(inode, file, bdev, cmd, arg);
- /* FIXME: why do we assume -> compat_ioctl needs the BKL? */
- if (ret == -ENOIOCTLCMD && disk->fops->compat_ioctl)
- ret = disk->fops->compat_ioctl(file, cmd, arg);
- unlock_kernel();
-
- if (ret != -ENOIOCTLCMD)
+ lock_kernel();
+ ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
+ unlock_kernel();
return ret;
-
- return compat_blkdev_driver_ioctl(inode, file, disk, cmd, arg);
+ default:
+ if (disk->fops->compat_ioctl)
+ ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
+ if (ret == -ENOIOCTLCMD)
+ ret = compat_blkdev_driver_ioctl(bdev, mode, cmd, arg);
+ return ret;
+ }
}
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 342448c3d2d..fd311179f44 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -33,7 +33,7 @@ struct deadline_data {
*/
struct rb_root sort_list[2];
struct list_head fifo_list[2];
-
+
/*
* next in sort order. read, write or both are NULL
*/
@@ -53,7 +53,11 @@ struct deadline_data {
static void deadline_move_request(struct deadline_data *, struct request *);
-#define RQ_RB_ROOT(dd, rq) (&(dd)->sort_list[rq_data_dir((rq))])
+static inline struct rb_root *
+deadline_rb_root(struct deadline_data *dd, struct request *rq)
+{
+ return &dd->sort_list[rq_data_dir(rq)];
+}
/*
* get the request after `rq' in sector-sorted order
@@ -72,15 +76,11 @@ deadline_latter_request(struct request *rq)
static void
deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
{
- struct rb_root *root = RQ_RB_ROOT(dd, rq);
+ struct rb_root *root = deadline_rb_root(dd, rq);
struct request *__alias;
-retry:
- __alias = elv_rb_add(root, rq);
- if (unlikely(__alias)) {
+ while (unlikely(__alias = elv_rb_add(root, rq)))
deadline_move_request(dd, __alias);
- goto retry;
- }
}
static inline void
@@ -91,7 +91,7 @@ deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
if (dd->next_rq[data_dir] == rq)
dd->next_rq[data_dir] = deadline_latter_request(rq);
- elv_rb_del(RQ_RB_ROOT(dd, rq), rq);
+ elv_rb_del(deadline_rb_root(dd, rq), rq);
}
/*
@@ -106,7 +106,7 @@ deadline_add_request(struct request_queue *q, struct request *rq)
deadline_add_rq_rb(dd, rq);
/*
- * set expire time (only used for reads) and add to fifo list
+ * set expire time and add to fifo list
*/
rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]);
list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
@@ -162,7 +162,7 @@ static void deadline_merged_request(struct request_queue *q,
* if the merge was a front merge, we need to reposition request
*/
if (type == ELEVATOR_FRONT_MERGE) {
- elv_rb_del(RQ_RB_ROOT(dd, req), req);
+ elv_rb_del(deadline_rb_root(dd, req), req);
deadline_add_rq_rb(dd, req);
}
}
@@ -212,7 +212,7 @@ deadline_move_request(struct deadline_data *dd, struct request *rq)
dd->next_rq[WRITE] = NULL;
dd->next_rq[data_dir] = deadline_latter_request(rq);
- dd->last_sector = rq->sector + rq->nr_sectors;
+ dd->last_sector = rq_end_sector(rq);
/*
* take it off the sort and fifo list, move
@@ -222,7 +222,7 @@ deadline_move_request(struct deadline_data *dd, struct request *rq)
}
/*
- * deadline_check_fifo returns 0 if there are no expired reads on the fifo,
+ * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
* 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
*/
static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
@@ -258,17 +258,9 @@ static int deadline_dispatch_requests(struct request_queue *q, int force)
else
rq = dd->next_rq[READ];
- if (rq) {
- /* we have a "next request" */
-
- if (dd->last_sector != rq->sector)
- /* end the batch on a non sequential request */
- dd->batching += dd->fifo_batch;
-
- if (dd->batching < dd->fifo_batch)
- /* we are still entitled to batch */
- goto dispatch_request;
- }
+ if (rq && dd->batching < dd->fifo_batch)
+ /* we have a next request are still entitled to batch */
+ goto dispatch_request;
/*
* at this point we are not running a batch. select the appropriate
diff --git a/block/elevator.c b/block/elevator.c
index ed6f8f32d27..59173a69ebd 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -34,8 +34,9 @@
#include <linux/delay.h>
#include <linux/blktrace_api.h>
#include <linux/hash.h>
+#include <linux/uaccess.h>
-#include <asm/uaccess.h>
+#include "blk.h"
static DEFINE_SPINLOCK(elv_list_lock);
static LIST_HEAD(elv_list);
@@ -75,6 +76,12 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
return 0;
/*
+ * Don't merge file system requests and discard requests
+ */
+ if (bio_discard(bio) != bio_discard(rq->bio))
+ return 0;
+
+ /*
* different data direction or already started, don't merge
*/
if (bio_data_dir(bio) != rq_data_dir(rq))
@@ -438,6 +445,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
list_for_each_prev(entry, &q->queue_head) {
struct request *pos = list_entry_rq(entry);
+ if (blk_discard_rq(rq) != blk_discard_rq(pos))
+ break;
if (rq_data_dir(rq) != rq_data_dir(pos))
break;
if (pos->cmd_flags & stop_flags)
@@ -603,11 +612,11 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
* processing.
*/
blk_remove_plug(q);
- q->request_fn(q);
+ blk_start_queueing(q);
break;
case ELEVATOR_INSERT_SORT:
- BUG_ON(!blk_fs_request(rq));
+ BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq));
rq->cmd_flags |= REQ_SORTED;
q->nr_sorted++;
if (rq_mergeable(rq)) {
@@ -692,7 +701,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
* this request is scheduling boundary, update
* end_sector
*/
- if (blk_fs_request(rq)) {
+ if (blk_fs_request(rq) || blk_discard_rq(rq)) {
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
}
@@ -745,7 +754,7 @@ struct request *elv_next_request(struct request_queue *q)
* not ever see it.
*/
if (blk_empty_barrier(rq)) {
- end_queued_request(rq, 1);
+ __blk_end_request(rq, 0, blk_rq_bytes(rq));
continue;
}
if (!(rq->cmd_flags & REQ_STARTED)) {
@@ -764,6 +773,12 @@ struct request *elv_next_request(struct request_queue *q)
*/
rq->cmd_flags |= REQ_STARTED;
blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
+
+ /*
+ * We are now handing the request to the hardware,
+ * add the timeout handler
+ */
+ blk_add_timer(rq);
}
if (!q->boundary_rq || q->boundary_rq == rq) {
@@ -782,7 +797,6 @@ struct request *elv_next_request(struct request_queue *q)
* device can handle
*/
rq->nr_phys_segments++;
- rq->nr_hw_segments++;
}
if (!q->prep_rq_fn)
@@ -805,14 +819,13 @@ struct request *elv_next_request(struct request_queue *q)
* so that we don't add it again
*/
--rq->nr_phys_segments;
- --rq->nr_hw_segments;
}
rq = NULL;
break;
} else if (ret == BLKPREP_KILL) {
rq->cmd_flags |= REQ_QUIET;
- end_queued_request(rq, 0);
+ __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
} else {
printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
break;
@@ -901,6 +914,19 @@ int elv_may_queue(struct request_queue *q, int rw)
return ELV_MQUEUE_MAY;
}
+void elv_abort_queue(struct request_queue *q)
+{
+ struct request *rq;
+
+ while (!list_empty(&q->queue_head)) {
+ rq = list_entry_rq(q->queue_head.next);
+ rq->cmd_flags |= REQ_QUIET;
+ blk_add_trace_rq(q, rq, BLK_TA_ABORT);
+ __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
+ }
+}
+EXPORT_SYMBOL(elv_abort_queue);
+
void elv_completed_request(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
@@ -924,7 +950,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
- q->request_fn(q);
+ blk_start_queueing(q);
}
}
}
@@ -1083,8 +1109,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
elv_drain_elevator(q);
while (q->rq.elvpriv) {
- blk_remove_plug(q);
- q->request_fn(q);
+ blk_start_queueing(q);
spin_unlock_irq(q->queue_lock);
msleep(10);
spin_lock_irq(q->queue_lock);
@@ -1140,15 +1165,10 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
size_t count)
{
char elevator_name[ELV_NAME_MAX];
- size_t len;
struct elevator_type *e;
- elevator_name[sizeof(elevator_name) - 1] = '\0';
- strncpy(elevator_name, name, sizeof(elevator_name) - 1);
- len = strlen(elevator_name);
-
- if (len && elevator_name[len - 1] == '\n')
- elevator_name[len - 1] = '\0';
+ strlcpy(elevator_name, name, sizeof(elevator_name));
+ strstrip(elevator_name);
e = elevator_get(elevator_name);
if (!e) {
diff --git a/block/genhd.c b/block/genhd.c
index 9074f384b09..4e5e7493f67 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -10,12 +10,14 @@
#include <linux/blkdev.h>
#include <linux/init.h>
#include <linux/spinlock.h>
+#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/kmod.h>
#include <linux/kobj_map.h>
#include <linux/buffer_head.h>
#include <linux/mutex.h>
+#include <linux/idr.h>
#include "blk.h"
@@ -24,8 +26,194 @@ static DEFINE_MUTEX(block_class_lock);
struct kobject *block_depr;
#endif
+/* for extended dynamic devt allocation, currently only one major is used */
+#define MAX_EXT_DEVT (1 << MINORBITS)
+
+/* For extended devt allocation. ext_devt_mutex prevents look up
+ * results from going away underneath its user.
+ */
+static DEFINE_MUTEX(ext_devt_mutex);
+static DEFINE_IDR(ext_devt_idr);
+
static struct device_type disk_type;
+/**
+ * disk_get_part - get partition
+ * @disk: disk to look partition from
+ * @partno: partition number
+ *
+ * Look for partition @partno from @disk. If found, increment
+ * reference count and return it.
+ *
+ * CONTEXT:
+ * Don't care.
+ *
+ * RETURNS:
+ * Pointer to the found partition on success, NULL if not found.
+ */
+struct hd_struct *disk_get_part(struct gendisk *disk, int partno)
+{
+ struct hd_struct *part = NULL;
+ struct disk_part_tbl *ptbl;
+
+ if (unlikely(partno < 0))
+ return NULL;
+
+ rcu_read_lock();
+
+ ptbl = rcu_dereference(disk->part_tbl);
+ if (likely(partno < ptbl->len)) {
+ part = rcu_dereference(ptbl->part[partno]);
+ if (part)
+ get_device(part_to_dev(part));
+ }
+
+ rcu_read_unlock();
+
+ return part;
+}
+EXPORT_SYMBOL_GPL(disk_get_part);
+
+/**
+ * disk_part_iter_init - initialize partition iterator
+ * @piter: iterator to initialize
+ * @disk: disk to iterate over
+ * @flags: DISK_PITER_* flags
+ *
+ * Initialize @piter so that it iterates over partitions of @disk.
+ *
+ * CONTEXT:
+ * Don't care.
+ */
+void disk_part_iter_init(struct disk_part_iter *piter, struct gendisk *disk,
+ unsigned int flags)
+{
+ struct disk_part_tbl *ptbl;
+
+ rcu_read_lock();
+ ptbl = rcu_dereference(disk->part_tbl);
+
+ piter->disk = disk;
+ piter->part = NULL;
+
+ if (flags & DISK_PITER_REVERSE)
+ piter->idx = ptbl->len - 1;
+ else if (flags & DISK_PITER_INCL_PART0)
+ piter->idx = 0;
+ else
+ piter->idx = 1;
+
+ piter->flags = flags;
+
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(disk_part_iter_init);
+
+/**
+ * disk_part_iter_next - proceed iterator to the next partition and return it
+ * @piter: iterator of interest
+ *
+ * Proceed @piter to the next partition and return it.
+ *
+ * CONTEXT:
+ * Don't care.
+ */
+struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter)
+{
+ struct disk_part_tbl *ptbl;
+ int inc, end;
+
+ /* put the last partition */
+ disk_put_part(piter->part);
+ piter->part = NULL;
+
+ /* get part_tbl */
+ rcu_read_lock();
+ ptbl = rcu_dereference(piter->disk->part_tbl);
+
+ /* determine iteration parameters */
+ if (piter->flags & DISK_PITER_REVERSE) {
+ inc = -1;
+ if (piter->flags & DISK_PITER_INCL_PART0)
+ end = -1;
+ else
+ end = 0;
+ } else {
+ inc = 1;
+ end = ptbl->len;
+ }
+
+ /* iterate to the next partition */
+ for (; piter->idx != end; piter->idx += inc) {
+ struct hd_struct *part;
+
+ part = rcu_dereference(ptbl->part[piter->idx]);
+ if (!part)
+ continue;
+ if (!(piter->flags & DISK_PITER_INCL_EMPTY) && !part->nr_sects)
+ continue;
+
+ get_device(part_to_dev(part));
+ piter->part = part;
+ piter->idx += inc;
+ break;
+ }
+
+ rcu_read_unlock();
+
+ return piter->part;
+}
+EXPORT_SYMBOL_GPL(disk_part_iter_next);
+
+/**
+ * disk_part_iter_exit - finish up partition iteration
+ * @piter: iter of interest
+ *
+ * Called when iteration is over. Cleans up @piter.
+ *
+ * CONTEXT:
+ * Don't care.
+ */
+void disk_part_iter_exit(struct disk_part_iter *piter)
+{
+ disk_put_part(piter->part);
+ piter->part = NULL;
+}
+EXPORT_SYMBOL_GPL(disk_part_iter_exit);
+
+/**
+ * disk_map_sector_rcu - map sector to partition
+ * @disk: gendisk of interest
+ * @sector: sector to map
+ *
+ * Find out which partition @sector maps to on @disk. This is
+ * primarily used for stats accounting.
+ *
+ * CONTEXT:
+ * RCU read locked. The returned partition pointer is valid only
+ * while preemption is disabled.
+ *
+ * RETURNS:
+ * Found partition on success, part0 is returned if no partition matches
+ */
+struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
+{
+ struct disk_part_tbl *ptbl;
+ int i;
+
+ ptbl = rcu_dereference(disk->part_tbl);
+
+ for (i = 1; i < ptbl->len; i++) {
+ struct hd_struct *part = rcu_dereference(ptbl->part[i]);
+
+ if (part && part->start_sect <= sector &&
+ sector < part->start_sect + part->nr_sects)
+ return part;
+ }
+ return &disk->part0;
+}
+EXPORT_SYMBOL_GPL(disk_map_sector_rcu);
+
/*
* Can be deleted altogether. Later.
*
@@ -43,14 +231,14 @@ static inline int major_to_index(int major)
}
#ifdef CONFIG_PROC_FS
-void blkdev_show(struct seq_file *f, off_t offset)
+void blkdev_show(struct seq_file *seqf, off_t offset)
{
struct blk_major_name *dp;
if (offset < BLKDEV_MAJOR_HASH_SIZE) {
mutex_lock(&block_class_lock);
for (dp = major_names[offset]; dp; dp = dp->next)
- seq_printf(f, "%3d %s\n", dp->major, dp->name);
+ seq_printf(seqf, "%3d %s\n", dp->major, dp->name);
mutex_unlock(&block_class_lock);
}
}
@@ -136,6 +324,117 @@ EXPORT_SYMBOL(unregister_blkdev);
static struct kobj_map *bdev_map;
+/**
+ * blk_mangle_minor - scatter minor numbers apart
+ * @minor: minor number to mangle
+ *
+ * Scatter consecutively allocated @minor number apart if MANGLE_DEVT
+ * is enabled. Mangling twice gives the original value.
+ *
+ * RETURNS:
+ * Mangled value.
+ *
+ * CONTEXT:
+ * Don't care.
+ */
+static int blk_mangle_minor(int minor)
+{
+#ifdef CONFIG_DEBUG_BLOCK_EXT_DEVT
+ int i;
+
+ for (i = 0; i < MINORBITS / 2; i++) {
+ int low = minor & (1 << i);
+ int high = minor & (1 << (MINORBITS - 1 - i));
+ int distance = MINORBITS - 1 - 2 * i;
+
+ minor ^= low | high; /* clear both bits */
+ low <<= distance; /* swap the positions */
+ high >>= distance;
+ minor |= low | high; /* and set */
+ }
+#endif
+ return minor;
+}
+
+/**
+ * blk_alloc_devt - allocate a dev_t for a partition
+ * @part: partition to allocate dev_t for
+ * @devt: out parameter for resulting dev_t
+ *
+ * Allocate a dev_t for block device.
+ *
+ * RETURNS:
+ * 0 on success, allocated dev_t is returned in *@devt. -errno on
+ * failure.
+ *
+ * CONTEXT:
+ * Might sleep.
+ */
+int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
+{
+ struct gendisk *disk = part_to_disk(part);
+ int idx, rc;
+
+ /* in consecutive minor range? */
+ if (part->partno < disk->minors) {
+ *devt = MKDEV(disk->major, disk->first_minor + part->partno);
+ return 0;
+ }
+
+ /* allocate ext devt */
+ do {
+ if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL))
+ return -ENOMEM;
+ rc = idr_get_new(&ext_devt_idr, part, &idx);
+ } while (rc == -EAGAIN);
+
+ if (rc)
+ return rc;
+
+ if (idx > MAX_EXT_DEVT) {
+ idr_remove(&ext_devt_idr, idx);
+ return -EBUSY;
+ }
+
+ *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
+ return 0;
+}
+
+/**
+ * blk_free_devt - free a dev_t
+ * @devt: dev_t to free
+ *
+ * Free @devt which was allocated using blk_alloc_devt().
+ *
+ * CONTEXT:
+ * Might sleep.
+ */
+void blk_free_devt(dev_t devt)
+{
+ might_sleep();
+
+ if (devt == MKDEV(0, 0))
+ return;
+
+ if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
+ mutex_lock(&ext_devt_mutex);
+ idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
+ mutex_unlock(&ext_devt_mutex);
+ }
+}
+
+static char *bdevt_str(dev_t devt, char *buf)
+{
+ if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) {
+ char tbuf[BDEVT_SIZE];
+ snprintf(tbuf, BDEVT_SIZE, "%02x%02x", MAJOR(devt), MINOR(devt));
+ snprintf(buf, BDEVT_SIZE, "%-9s", tbuf);
+ } else
+ snprintf(buf, BDEVT_SIZE, "%03x:%05x", MAJOR(devt), MINOR(devt));
+
+ return buf;
+}
+
/*
* Register device numbers dev..(dev+range-1)
* range must be nonzero
@@ -157,11 +456,11 @@ void blk_unregister_region(dev_t devt, unsigned long range)
EXPORT_SYMBOL(blk_unregister_region);
-static struct kobject *exact_match(dev_t devt, int *part, void *data)
+static struct kobject *exact_match(dev_t devt, int *partno, void *data)
{
struct gendisk *p = data;
- return &p->dev.kobj;
+ return &disk_to_dev(p)->kobj;
}
static int exact_lock(dev_t devt, void *data)
@@ -179,21 +478,47 @@ static int exact_lock(dev_t devt, void *data)
*
* This function registers the partitioning information in @disk
* with the kernel.
+ *
+ * FIXME: error handling
*/
void add_disk(struct gendisk *disk)
{
struct backing_dev_info *bdi;
+ dev_t devt;
+ int retval;
+
+ /* minors == 0 indicates to use ext devt from part0 and should
+ * be accompanied with EXT_DEVT flag. Make sure all
+ * parameters make sense.
+ */
+ WARN_ON(disk->minors && !(disk->major || disk->first_minor));
+ WARN_ON(!disk->minors && !(disk->flags & GENHD_FL_EXT_DEVT));
disk->flags |= GENHD_FL_UP;
- blk_register_region(MKDEV(disk->major, disk->first_minor),
- disk->minors, NULL, exact_match, exact_lock, disk);
+
+ retval = blk_alloc_devt(&disk->part0, &devt);
+ if (retval) {
+ WARN_ON(1);
+ return;
+ }
+ disk_to_dev(disk)->devt = devt;
+
+ /* ->major and ->first_minor aren't supposed to be
+ * dereferenced from here on, but set them just in case.
+ */
+ disk->major = MAJOR(devt);
+ disk->first_minor = MINOR(devt);
+
+ blk_register_region(disk_devt(disk), disk->minors, NULL,
+ exact_match, exact_lock, disk);
register_disk(disk);
blk_register_queue(disk);
- blk_register_filter(disk);
bdi = &disk->queue->backing_dev_info;
- bdi_register_dev(bdi, MKDEV(disk->major, disk->first_minor));
- sysfs_create_link(&disk->dev.kobj, &bdi->dev->kobj, "bdi");
+ bdi_register_dev(bdi, disk_devt(disk));
+ retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj,
+ "bdi");
+ WARN_ON(retval);
}
EXPORT_SYMBOL(add_disk);
@@ -201,28 +526,71 @@ EXPORT_SYMBOL(del_gendisk); /* in partitions/check.c */
void unlink_gendisk(struct gendisk *disk)
{
- blk_unregister_filter(disk);
- sysfs_remove_link(&disk->dev.kobj, "bdi");
+ sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
bdi_unregister(&disk->queue->backing_dev_info);
blk_unregister_queue(disk);
- blk_unregister_region(MKDEV(disk->major, disk->first_minor),
- disk->minors);
+ blk_unregister_region(disk_devt(disk), disk->minors);
}
/**
* get_gendisk - get partitioning information for a given device
- * @dev: device to get partitioning information for
+ * @devt: device to get partitioning information for
+ * @partno: returned partition index
*
* This function gets the structure containing partitioning
- * information for the given device @dev.
+ * information for the given device @devt.
+ */
+struct gendisk *get_gendisk(dev_t devt, int *partno)
+{
+ struct gendisk *disk = NULL;
+
+ if (MAJOR(devt) != BLOCK_EXT_MAJOR) {
+ struct kobject *kobj;
+
+ kobj = kobj_lookup(bdev_map, devt, partno);
+ if (kobj)
+ disk = dev_to_disk(kobj_to_dev(kobj));
+ } else {
+ struct hd_struct *part;
+
+ mutex_lock(&ext_devt_mutex);
+ part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
+ if (part && get_disk(part_to_disk(part))) {
+ *partno = part->partno;
+ disk = part_to_disk(part);
+ }
+ mutex_unlock(&ext_devt_mutex);
+ }
+
+ return disk;
+}
+
+/**
+ * bdget_disk - do bdget() by gendisk and partition number
+ * @disk: gendisk of interest
+ * @partno: partition number
+ *
+ * Find partition @partno from @disk, do bdget() on it.
+ *
+ * CONTEXT:
+ * Don't care.
+ *
+ * RETURNS:
+ * Resulting block_device on success, NULL on failure.
*/
-struct gendisk *get_gendisk(dev_t devt, int *part)
+struct block_device *bdget_disk(struct gendisk *disk, int partno)
{
- struct kobject *kobj = kobj_lookup(bdev_map, devt, part);
- struct device *dev = kobj_to_dev(kobj);
+ struct hd_struct *part;
+ struct block_device *bdev = NULL;
+
+ part = disk_get_part(disk, partno);
+ if (part)
+ bdev = bdget(part_devt(part));
+ disk_put_part(part);
- return kobj ? dev_to_disk(dev) : NULL;
+ return bdev;
}
+EXPORT_SYMBOL(bdget_disk);
/*
* print a full list of all partitions - intended for places where the root
@@ -231,136 +599,157 @@ struct gendisk *get_gendisk(dev_t devt, int *part)
*/
void __init printk_all_partitions(void)
{
+ struct class_dev_iter iter;
struct device *dev;
- struct gendisk *sgp;
- char buf[BDEVNAME_SIZE];
- int n;
- mutex_lock(&block_class_lock);
- /* For each block device... */
- list_for_each_entry(dev, &block_class.devices, node) {
- if (dev->type != &disk_type)
- continue;
- sgp = dev_to_disk(dev);
+ class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
+ while ((dev = class_dev_iter_next(&iter))) {
+ struct gendisk *disk = dev_to_disk(dev);
+ struct disk_part_iter piter;
+ struct hd_struct *part;
+ char name_buf[BDEVNAME_SIZE];
+ char devt_buf[BDEVT_SIZE];
+
/*
- * Don't show empty devices or things that have been surpressed
+ * Don't show empty devices or things that have been
+ * surpressed
*/
- if (get_capacity(sgp) == 0 ||
- (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
+ if (get_capacity(disk) == 0 ||
+ (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
continue;
/*
- * Note, unlike /proc/partitions, I am showing the numbers in
- * hex - the same format as the root= option takes.
+ * Note, unlike /proc/partitions, I am showing the
+ * numbers in hex - the same format as the root=
+ * option takes.
*/
- printk("%02x%02x %10llu %s",
- sgp->major, sgp->first_minor,
- (unsigned long long)get_capacity(sgp) >> 1,
- disk_name(sgp, 0, buf));
- if (sgp->driverfs_dev != NULL &&
- sgp->driverfs_dev->driver != NULL)
- printk(" driver: %s\n",
- sgp->driverfs_dev->driver->name);
- else
- printk(" (driver?)\n");
-
- /* now show the partitions */
- for (n = 0; n < sgp->minors - 1; ++n) {
- if (sgp->part[n] == NULL)
- continue;
- if (sgp->part[n]->nr_sects == 0)
- continue;
- printk(" %02x%02x %10llu %s\n",
- sgp->major, n + 1 + sgp->first_minor,
- (unsigned long long)sgp->part[n]->nr_sects >> 1,
- disk_name(sgp, n + 1, buf));
+ disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
+ while ((part = disk_part_iter_next(&piter))) {
+ bool is_part0 = part == &disk->part0;
+
+ printk("%s%s %10llu %s", is_part0 ? "" : " ",
+ bdevt_str(part_devt(part), devt_buf),
+ (unsigned long long)part->nr_sects >> 1,
+ disk_name(disk, part->partno, name_buf));
+ if (is_part0) {
+ if (disk->driverfs_dev != NULL &&
+ disk->driverfs_dev->driver != NULL)
+ printk(" driver: %s\n",
+ disk->driverfs_dev->driver->name);
+ else
+ printk(" (driver?)\n");
+ } else
+ printk("\n");
}
+ disk_part_iter_exit(&piter);
}
-
- mutex_unlock(&block_class_lock);
+ class_dev_iter_exit(&iter);
}
#ifdef CONFIG_PROC_FS
/* iterator */
-static void *part_start(struct seq_file *part, loff_t *pos)
+static void *disk_seqf_start(struct seq_file *seqf, loff_t *pos)
{
- loff_t k = *pos;
+ loff_t skip = *pos;
+ struct class_dev_iter *iter;
struct device *dev;
- mutex_lock(&block_class_lock);
- list_for_each_entry(dev, &block_class.devices, node) {
- if (dev->type != &disk_type)
- continue;
- if (!k--)
- return dev_to_disk(dev);
- }
- return NULL;
+ iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+ if (!iter)
+ return ERR_PTR(-ENOMEM);
+
+ seqf->private = iter;
+ class_dev_iter_init(iter, &block_class, NULL, &disk_type);
+ do {
+ dev = class_dev_iter_next(iter);
+ if (!dev)
+ return NULL;
+ } while (skip--);
+
+ return dev_to_disk(dev);
}
-static void *part_next(struct seq_file *part, void *v, loff_t *pos)
+static void *disk_seqf_next(struct seq_file *seqf, void *v, loff_t *pos)
{
- struct gendisk *gp = v;
struct device *dev;
- ++*pos;
- list_for_each_entry(dev, &gp->dev.node, node) {
- if (&dev->node == &block_class.devices)
- return NULL;
- if (dev->type == &disk_type)
- return dev_to_disk(dev);
- }
+
+ (*pos)++;
+ dev = class_dev_iter_next(seqf->private);
+ if (dev)
+ return dev_to_disk(dev);
+
return NULL;
}
-static void part_stop(struct seq_file *part, void *v)
+static void disk_seqf_stop(struct seq_file *seqf, void *v)
{
- mutex_unlock(&block_class_lock);
+ struct class_dev_iter *iter = seqf->private;
+
+ /* stop is called even after start failed :-( */
+ if (iter) {
+ class_dev_iter_exit(iter);
+ kfree(iter);
+ }
+}
+
+static void *show_partition_start(struct seq_file *seqf, loff_t *pos)
+{
+ static void *p;
+
+ p = disk_seqf_start(seqf, pos);
+ if (!IS_ERR(p) && p && !*pos)
+ seq_puts(seqf, "major minor #blocks name\n\n");
+ return p;
}
-static int show_partition(struct seq_file *part, void *v)
+static int show_partition(struct seq_file *seqf, void *v)
{
struct gendisk *sgp = v;
- int n;
+ struct disk_part_iter piter;
+ struct hd_struct *part;
char buf[BDEVNAME_SIZE];
- if (&sgp->dev.node == block_class.devices.next)
- seq_puts(part, "major minor #blocks name\n\n");
-
/* Don't show non-partitionable removeable devices or empty devices */
- if (!get_capacity(sgp) ||
- (sgp->minors == 1 && (sgp->flags & GENHD_FL_REMOVABLE)))
+ if (!get_capacity(sgp) || (!disk_partitionable(sgp) &&
+ (sgp->flags & GENHD_FL_REMOVABLE)))
return 0;
if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)
return 0;
/* show the full disk and all non-0 size partitions of it */
- seq_printf(part, "%4d %4d %10llu %s\n",
- sgp->major, sgp->first_minor,
- (unsigned long long)get_capacity(sgp) >> 1,
- disk_name(sgp, 0, buf));
- for (n = 0; n < sgp->minors - 1; n++) {
- if (!sgp->part[n])
- continue;
- if (sgp->part[n]->nr_sects == 0)
- continue;
- seq_printf(part, "%4d %4d %10llu %s\n",
- sgp->major, n + 1 + sgp->first_minor,
- (unsigned long long)sgp->part[n]->nr_sects >> 1 ,
- disk_name(sgp, n + 1, buf));
- }
+ disk_part_iter_init(&piter, sgp, DISK_PITER_INCL_PART0);
+ while ((part = disk_part_iter_next(&piter)))
+ seq_printf(seqf, "%4d %7d %10llu %s\n",
+ MAJOR(part_devt(part)), MINOR(part_devt(part)),
+ (unsigned long long)part->nr_sects >> 1,
+ disk_name(sgp, part->partno, buf));
+ disk_part_iter_exit(&piter);
return 0;
}
-const struct seq_operations partitions_op = {
- .start = part_start,
- .next = part_next,
- .stop = part_stop,
+static const struct seq_operations partitions_op = {
+ .start = show_partition_start,
+ .next = disk_seqf_next,
+ .stop = disk_seqf_stop,
.show = show_partition
};
+
+static int partitions_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &partitions_op);
+}
+
+static const struct file_operations proc_partitions_operations = {
+ .open = partitions_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
#endif
-static struct kobject *base_probe(dev_t devt, int *part, void *data)
+static struct kobject *base_probe(dev_t devt, int *partno, void *data)
{
if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
/* Make old-style 2.4 aliases work */
@@ -370,7 +759,10 @@ static struct kobject *base_probe(dev_t devt, int *part, void *data)
static int __init genhd_device_init(void)
{
- int error = class_register(&block_class);
+ int error;
+
+ block_class.dev_kobj = sysfs_dev_block_kobj;
+ error = class_register(&block_class);
if (unlikely(error))
return error;
bdev_map = kobj_map_init(base_probe, &block_class_lock);
@@ -393,29 +785,29 @@ static ssize_t disk_range_show(struct device *dev,
return sprintf(buf, "%d\n", disk->minors);
}
-static ssize_t disk_removable_show(struct device *dev,
+static ssize_t disk_ext_range_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
- return sprintf(buf, "%d\n",
- (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
+ return sprintf(buf, "%d\n", disk_max_parts(disk));
}
-static ssize_t disk_ro_show(struct device *dev,
+static ssize_t disk_removable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
- return sprintf(buf, "%d\n", disk->policy ? 1 : 0);
+ return sprintf(buf, "%d\n",
+ (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
}
-static ssize_t disk_size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t disk_ro_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
- return sprintf(buf, "%llu\n", (unsigned long long)get_capacity(disk));
+ return sprintf(buf, "%d\n", get_disk_ro(disk) ? 1 : 0);
}
static ssize_t disk_capability_show(struct device *dev,
@@ -426,73 +818,26 @@ static ssize_t disk_capability_show(struct device *dev,
return sprintf(buf, "%x\n", disk->flags);
}
-static ssize_t disk_stat_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct gendisk *disk = dev_to_disk(dev);
-
- preempt_disable();
- disk_round_stats(disk);
- preempt_enable();
- return sprintf(buf,
- "%8lu %8lu %8llu %8u "
- "%8lu %8lu %8llu %8u "
- "%8u %8u %8u"
- "\n",
- disk_stat_read(disk, ios[READ]),
- disk_stat_read(disk, merges[READ]),
- (unsigned long long)disk_stat_read(disk, sectors[READ]),
- jiffies_to_msecs(disk_stat_read(disk, ticks[READ])),
- disk_stat_read(disk, ios[WRITE]),
- disk_stat_read(disk, merges[WRITE]),
- (unsigned long long)disk_stat_read(disk, sectors[WRITE]),
- jiffies_to_msecs(disk_stat_read(disk, ticks[WRITE])),
- disk->in_flight,
- jiffies_to_msecs(disk_stat_read(disk, io_ticks)),
- jiffies_to_msecs(disk_stat_read(disk, time_in_queue)));
-}
-
-#ifdef CONFIG_FAIL_MAKE_REQUEST
-static ssize_t disk_fail_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct gendisk *disk = dev_to_disk(dev);
-
- return sprintf(buf, "%d\n", disk->flags & GENHD_FL_FAIL ? 1 : 0);
-}
-
-static ssize_t disk_fail_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gendisk *disk = dev_to_disk(dev);
- int i;
-
- if (count > 0 && sscanf(buf, "%d", &i) > 0) {
- if (i == 0)
- disk->flags &= ~GENHD_FL_FAIL;
- else
- disk->flags |= GENHD_FL_FAIL;
- }
-
- return count;
-}
-
-#endif
-
static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
+static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL);
static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL);
static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL);
-static DEVICE_ATTR(size, S_IRUGO, disk_size_show, NULL);
+static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
-static DEVICE_ATTR(stat, S_IRUGO, disk_stat_show, NULL);
+static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
#ifdef CONFIG_FAIL_MAKE_REQUEST
static struct device_attribute dev_attr_fail =
- __ATTR(make-it-fail, S_IRUGO|S_IWUSR, disk_fail_show, disk_fail_store);
+ __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
+#endif
+#ifdef CONFIG_FAIL_IO_TIMEOUT
+static struct device_attribute dev_attr_fail_timeout =
+ __ATTR(io-timeout-fail, S_IRUGO|S_IWUSR, part_timeout_show,
+ part_timeout_store);
#endif
static struct attribute *disk_attrs[] = {
&dev_attr_range.attr,
+ &dev_attr_ext_range.attr,
&dev_attr_removable.attr,
&dev_attr_ro.attr,
&dev_attr_size.attr,
@@ -501,6 +846,9 @@ static struct attribute *disk_attrs[] = {
#ifdef CONFIG_FAIL_MAKE_REQUEST
&dev_attr_fail.attr,
#endif
+#ifdef CONFIG_FAIL_IO_TIMEOUT
+ &dev_attr_fail_timeout.attr,
+#endif
NULL
};
@@ -513,13 +861,87 @@ static struct attribute_group *disk_attr_groups[] = {
NULL
};
+static void disk_free_ptbl_rcu_cb(struct rcu_head *head)
+{
+ struct disk_part_tbl *ptbl =
+ container_of(head, struct disk_part_tbl, rcu_head);
+
+ kfree(ptbl);
+}
+
+/**
+ * disk_replace_part_tbl - replace disk->part_tbl in RCU-safe way
+ * @disk: disk to replace part_tbl for
+ * @new_ptbl: new part_tbl to install
+ *
+ * Replace disk->part_tbl with @new_ptbl in RCU-safe way. The
+ * original ptbl is freed using RCU callback.
+ *
+ * LOCKING:
+ * Matching bd_mutx locked.
+ */
+static void disk_replace_part_tbl(struct gendisk *disk,
+ struct disk_part_tbl *new_ptbl)
+{
+ struct disk_part_tbl *old_ptbl = disk->part_tbl;
+
+ rcu_assign_pointer(disk->part_tbl, new_ptbl);
+ if (old_ptbl)
+ call_rcu(&old_ptbl->rcu_head, disk_free_ptbl_rcu_cb);
+}
+
+/**
+ * disk_expand_part_tbl - expand disk->part_tbl
+ * @disk: disk to expand part_tbl for
+ * @partno: expand such that this partno can fit in
+ *
+ * Expand disk->part_tbl such that @partno can fit in. disk->part_tbl
+ * uses RCU to allow unlocked dereferencing for stats and other stuff.
+ *
+ * LOCKING:
+ * Matching bd_mutex locked, might sleep.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int disk_expand_part_tbl(struct gendisk *disk, int partno)
+{
+ struct disk_part_tbl *old_ptbl = disk->part_tbl;
+ struct disk_part_tbl *new_ptbl;
+ int len = old_ptbl ? old_ptbl->len : 0;
+ int target = partno + 1;
+ size_t size;
+ int i;
+
+ /* disk_max_parts() is zero during initialization, ignore if so */
+ if (disk_max_parts(disk) && target > disk_max_parts(disk))
+ return -EINVAL;
+
+ if (target <= len)
+ return 0;
+
+ size = sizeof(*new_ptbl) + target * sizeof(new_ptbl->part[0]);
+ new_ptbl = kzalloc_node(size, GFP_KERNEL, disk->node_id);
+ if (!new_ptbl)
+ return -ENOMEM;
+
+ INIT_RCU_HEAD(&new_ptbl->rcu_head);
+ new_ptbl->len = target;
+
+ for (i = 0; i < len; i++)
+ rcu_assign_pointer(new_ptbl->part[i], old_ptbl->part[i]);
+
+ disk_replace_part_tbl(disk, new_ptbl);
+ return 0;
+}
+
static void disk_release(struct device *dev)
{
struct gendisk *disk = dev_to_disk(dev);
kfree(disk->random);
- kfree(disk->part);
- free_disk_stats(disk);
+ disk_replace_part_tbl(disk, NULL);
+ free_part_stats(&disk->part0);
kfree(disk);
}
struct class block_class = {
@@ -532,6 +954,7 @@ static struct device_type disk_type = {
.release = disk_release,
};
+#ifdef CONFIG_PROC_FS
/*
* aggregate disk stat collector. Uses the same stats that the sysfs
* entries do, above, but makes them available through one seq_file.
@@ -539,85 +962,31 @@ static struct device_type disk_type = {
* The output looks suspiciously like /proc/partitions with a bunch of
* extra fields.
*/
-
-static void *diskstats_start(struct seq_file *part, loff_t *pos)
-{
- loff_t k = *pos;
- struct device *dev;
-
- mutex_lock(&block_class_lock);
- list_for_each_entry(dev, &block_class.devices, node) {
- if (dev->type != &disk_type)
- continue;
- if (!k--)
- return dev_to_disk(dev);
- }
- return NULL;
-}
-
-static void *diskstats_next(struct seq_file *part, void *v, loff_t *pos)
-{
- struct gendisk *gp = v;
- struct device *dev;
-
- ++*pos;
- list_for_each_entry(dev, &gp->dev.node, node) {
- if (&dev->node == &block_class.devices)
- return NULL;
- if (dev->type == &disk_type)
- return dev_to_disk(dev);
- }
- return NULL;
-}
-
-static void diskstats_stop(struct seq_file *part, void *v)
-{
- mutex_unlock(&block_class_lock);
-}
-
-static int diskstats_show(struct seq_file *s, void *v)
+static int diskstats_show(struct seq_file *seqf, void *v)
{
struct gendisk *gp = v;
+ struct disk_part_iter piter;
+ struct hd_struct *hd;
char buf[BDEVNAME_SIZE];
- int n = 0;
+ int cpu;
/*
- if (&gp->dev.kobj.entry == block_class.devices.next)
- seq_puts(s, "major minor name"
+ if (&disk_to_dev(gp)->kobj.entry == block_class.devices.next)
+ seq_puts(seqf, "major minor name"
" rio rmerge rsect ruse wio wmerge "
"wsect wuse running use aveq"
"\n\n");
*/
- preempt_disable();
- disk_round_stats(gp);
- preempt_enable();
- seq_printf(s, "%4d %4d %s %lu %lu %llu %u %lu %lu %llu %u %u %u %u\n",
- gp->major, n + gp->first_minor, disk_name(gp, n, buf),
- disk_stat_read(gp, ios[0]), disk_stat_read(gp, merges[0]),
- (unsigned long long)disk_stat_read(gp, sectors[0]),
- jiffies_to_msecs(disk_stat_read(gp, ticks[0])),
- disk_stat_read(gp, ios[1]), disk_stat_read(gp, merges[1]),
- (unsigned long long)disk_stat_read(gp, sectors[1]),
- jiffies_to_msecs(disk_stat_read(gp, ticks[1])),
- gp->in_flight,
- jiffies_to_msecs(disk_stat_read(gp, io_ticks)),
- jiffies_to_msecs(disk_stat_read(gp, time_in_queue)));
-
- /* now show all non-0 size partitions of it */
- for (n = 0; n < gp->minors - 1; n++) {
- struct hd_struct *hd = gp->part[n];
-
- if (!hd || !hd->nr_sects)
- continue;
-
- preempt_disable();
- part_round_stats(hd);
- preempt_enable();
- seq_printf(s, "%4d %4d %s %lu %lu %llu "
+ disk_part_iter_init(&piter, gp, DISK_PITER_INCL_PART0);
+ while ((hd = disk_part_iter_next(&piter))) {
+ cpu = part_stat_lock();
+ part_round_stats(cpu, hd);
+ part_stat_unlock();
+ seq_printf(seqf, "%4d %7d %s %lu %lu %llu "
"%u %lu %lu %llu %u %u %u %u\n",
- gp->major, n + gp->first_minor + 1,
- disk_name(gp, n + 1, buf),
+ MAJOR(part_devt(hd)), MINOR(part_devt(hd)),
+ disk_name(gp, hd->partno, buf),
part_stat_read(hd, ios[0]),
part_stat_read(hd, merges[0]),
(unsigned long long)part_stat_read(hd, sectors[0]),
@@ -631,17 +1000,39 @@ static int diskstats_show(struct seq_file *s, void *v)
jiffies_to_msecs(part_stat_read(hd, time_in_queue))
);
}
+ disk_part_iter_exit(&piter);
return 0;
}
-const struct seq_operations diskstats_op = {
- .start = diskstats_start,
- .next = diskstats_next,
- .stop = diskstats_stop,
+static const struct seq_operations diskstats_op = {
+ .start = disk_seqf_start,
+ .next = disk_seqf_next,
+ .stop = disk_seqf_stop,
.show = diskstats_show
};
+static int diskstats_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &diskstats_op);
+}
+
+static const struct file_operations proc_diskstats_operations = {
+ .open = diskstats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int __init proc_genhd_init(void)
+{
+ proc_create("diskstats", 0, NULL, &proc_diskstats_operations);
+ proc_create("partitions", 0, NULL, &proc_partitions_operations);
+ return 0;
+}
+module_init(proc_genhd_init);
+#endif /* CONFIG_PROC_FS */
+
static void media_change_notify_thread(struct work_struct *work)
{
struct gendisk *gd = container_of(work, struct gendisk, async_notify);
@@ -652,7 +1043,7 @@ static void media_change_notify_thread(struct work_struct *work)
* set enviroment vars to indicate which event this is for
* so that user space will know to go check the media status.
*/
- kobject_uevent_env(&gd->dev.kobj, KOBJ_CHANGE, envp);
+ kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp);
put_device(gd->driverfs_dev);
}
@@ -665,26 +1056,29 @@ void genhd_media_change_notify(struct gendisk *disk)
EXPORT_SYMBOL_GPL(genhd_media_change_notify);
#endif /* 0 */
-dev_t blk_lookup_devt(const char *name, int part)
+dev_t blk_lookup_devt(const char *name, int partno)
{
- struct device *dev;
dev_t devt = MKDEV(0, 0);
+ struct class_dev_iter iter;
+ struct device *dev;
- mutex_lock(&block_class_lock);
- list_for_each_entry(dev, &block_class.devices, node) {
- if (dev->type != &disk_type)
+ class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
+ while ((dev = class_dev_iter_next(&iter))) {
+ struct gendisk *disk = dev_to_disk(dev);
+ struct hd_struct *part;
+
+ if (strcmp(dev->bus_id, name))
continue;
- if (strcmp(dev->bus_id, name) == 0) {
- struct gendisk *disk = dev_to_disk(dev);
- if (part < disk->minors)
- devt = MKDEV(MAJOR(dev->devt),
- MINOR(dev->devt) + part);
+ part = disk_get_part(disk, partno);
+ if (part) {
+ devt = part_devt(part);
+ disk_put_part(part);
break;
}
+ disk_put_part(part);
}
- mutex_unlock(&block_class_lock);
-
+ class_dev_iter_exit(&iter);
return devt;
}
EXPORT_SYMBOL(blk_lookup_devt);
@@ -693,6 +1087,7 @@ struct gendisk *alloc_disk(int minors)
{
return alloc_disk_node(minors, -1);
}
+EXPORT_SYMBOL(alloc_disk);
struct gendisk *alloc_disk_node(int minors, int node_id)
{
@@ -701,32 +1096,28 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
disk = kmalloc_node(sizeof(struct gendisk),
GFP_KERNEL | __GFP_ZERO, node_id);
if (disk) {
- if (!init_disk_stats(disk)) {
+ if (!init_part_stats(&disk->part0)) {
kfree(disk);
return NULL;
}
- if (minors > 1) {
- int size = (minors - 1) * sizeof(struct hd_struct *);
- disk->part = kmalloc_node(size,
- GFP_KERNEL | __GFP_ZERO, node_id);
- if (!disk->part) {
- free_disk_stats(disk);
- kfree(disk);
- return NULL;
- }
+ if (disk_expand_part_tbl(disk, 0)) {
+ free_part_stats(&disk->part0);
+ kfree(disk);
+ return NULL;
}
+ disk->part_tbl->part[0] = &disk->part0;
+
disk->minors = minors;
rand_initialize_disk(disk);
- disk->dev.class = &block_class;
- disk->dev.type = &disk_type;
- device_initialize(&disk->dev);
+ disk_to_dev(disk)->class = &block_class;
+ disk_to_dev(disk)->type = &disk_type;
+ device_initialize(disk_to_dev(disk));
INIT_WORK(&disk->async_notify,
media_change_notify_thread);
+ disk->node_id = node_id;
}
return disk;
}
-
-EXPORT_SYMBOL(alloc_disk);
EXPORT_SYMBOL(alloc_disk_node);
struct kobject *get_disk(struct gendisk *disk)
@@ -739,7 +1130,7 @@ struct kobject *get_disk(struct gendisk *disk)
owner = disk->fops->owner;
if (owner && !try_module_get(owner))
return NULL;
- kobj = kobject_get(&disk->dev.kobj);
+ kobj = kobject_get(&disk_to_dev(disk)->kobj);
if (kobj == NULL) {
module_put(owner);
return NULL;
@@ -753,27 +1144,28 @@ EXPORT_SYMBOL(get_disk);
void put_disk(struct gendisk *disk)
{
if (disk)
- kobject_put(&disk->dev.kobj);
+ kobject_put(&disk_to_dev(disk)->kobj);
}
EXPORT_SYMBOL(put_disk);
void set_device_ro(struct block_device *bdev, int flag)
{
- if (bdev->bd_contains != bdev)
- bdev->bd_part->policy = flag;
- else
- bdev->bd_disk->policy = flag;
+ bdev->bd_part->policy = flag;
}
EXPORT_SYMBOL(set_device_ro);
void set_disk_ro(struct gendisk *disk, int flag)
{
- int i;
- disk->policy = flag;
- for (i = 0; i < disk->minors - 1; i++)
- if (disk->part[i]) disk->part[i]->policy = flag;
+ struct disk_part_iter piter;
+ struct hd_struct *part;
+
+ disk_part_iter_init(&piter, disk,
+ DISK_PITER_INCL_EMPTY | DISK_PITER_INCL_PART0);
+ while ((part = disk_part_iter_next(&piter)))
+ part->policy = flag;
+ disk_part_iter_exit(&piter);
}
EXPORT_SYMBOL(set_disk_ro);
@@ -782,18 +1174,15 @@ int bdev_read_only(struct block_device *bdev)
{
if (!bdev)
return 0;
- else if (bdev->bd_contains != bdev)
- return bdev->bd_part->policy;
- else
- return bdev->bd_disk->policy;
+ return bdev->bd_part->policy;
}
EXPORT_SYMBOL(bdev_read_only);
-int invalidate_partition(struct gendisk *disk, int index)
+int invalidate_partition(struct gendisk *disk, int partno)
{
int res = 0;
- struct block_device *bdev = bdget_disk(disk, index);
+ struct block_device *bdev = bdget_disk(disk, partno);
if (bdev) {
fsync_bdev(bdev);
res = __invalidate_device(bdev);
diff --git a/block/ioctl.c b/block/ioctl.c
index 52d6385216a..c832d639b6e 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -12,11 +12,13 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
{
struct block_device *bdevp;
struct gendisk *disk;
+ struct hd_struct *part;
struct blkpg_ioctl_arg a;
struct blkpg_partition p;
+ struct disk_part_iter piter;
long long start, length;
- int part;
- int i;
+ int partno;
+ int err;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -27,8 +29,8 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
disk = bdev->bd_disk;
if (bdev != bdev->bd_contains)
return -EINVAL;
- part = p.pno;
- if (part <= 0 || part >= disk->minors)
+ partno = p.pno;
+ if (partno <= 0)
return -EINVAL;
switch (a.op) {
case BLKPG_ADD_PARTITION:
@@ -42,36 +44,37 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
|| pstart < 0 || plength < 0)
return -EINVAL;
}
- /* partition number in use? */
+
mutex_lock(&bdev->bd_mutex);
- if (disk->part[part - 1]) {
- mutex_unlock(&bdev->bd_mutex);
- return -EBUSY;
- }
- /* overlap? */
- for (i = 0; i < disk->minors - 1; i++) {
- struct hd_struct *s = disk->part[i];
- if (!s)
- continue;
- if (!(start+length <= s->start_sect ||
- start >= s->start_sect + s->nr_sects)) {
+ /* overlap? */
+ disk_part_iter_init(&piter, disk,
+ DISK_PITER_INCL_EMPTY);
+ while ((part = disk_part_iter_next(&piter))) {
+ if (!(start + length <= part->start_sect ||
+ start >= part->start_sect + part->nr_sects)) {
+ disk_part_iter_exit(&piter);
mutex_unlock(&bdev->bd_mutex);
return -EBUSY;
}
}
+ disk_part_iter_exit(&piter);
+
/* all seems OK */
- add_partition(disk, part, start, length, ADDPART_FLAG_NONE);
+ err = add_partition(disk, partno, start, length,
+ ADDPART_FLAG_NONE);
mutex_unlock(&bdev->bd_mutex);
- return 0;
+ return err;
case BLKPG_DEL_PARTITION:
- if (!disk->part[part-1])
- return -ENXIO;
- if (disk->part[part - 1]->nr_sects == 0)
+ part = disk_get_part(disk, partno);
+ if (!part)
return -ENXIO;
- bdevp = bdget_disk(disk, part);
+
+ bdevp = bdget(part_devt(part));
+ disk_put_part(part);
if (!bdevp)
return -ENOMEM;
+
mutex_lock(&bdevp->bd_mutex);
if (bdevp->bd_openers) {
mutex_unlock(&bdevp->bd_mutex);
@@ -83,7 +86,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
invalidate_bdev(bdevp);
mutex_lock_nested(&bdev->bd_mutex, 1);
- delete_partition(disk, part);
+ delete_partition(disk, partno);
mutex_unlock(&bdev->bd_mutex);
mutex_unlock(&bdevp->bd_mutex);
bdput(bdevp);
@@ -99,7 +102,7 @@ static int blkdev_reread_part(struct block_device *bdev)
struct gendisk *disk = bdev->bd_disk;
int res;
- if (disk->minors == 1 || bdev != bdev->bd_contains)
+ if (!disk_partitionable(disk) || bdev != bdev->bd_contains)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -110,6 +113,69 @@ static int blkdev_reread_part(struct block_device *bdev)
return res;
}
+static void blk_ioc_discard_endio(struct bio *bio, int err)
+{
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ }
+ complete(bio->bi_private);
+}
+
+static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
+ uint64_t len)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+ int ret = 0;
+
+ if (start & 511)
+ return -EINVAL;
+ if (len & 511)
+ return -EINVAL;
+ start >>= 9;
+ len >>= 9;
+
+ if (start + len > (bdev->bd_inode->i_size >> 9))
+ return -EINVAL;
+
+ if (!q->prepare_discard_fn)
+ return -EOPNOTSUPP;
+
+ while (len && !ret) {
+ DECLARE_COMPLETION_ONSTACK(wait);
+ struct bio *bio;
+
+ bio = bio_alloc(GFP_KERNEL, 0);
+ if (!bio)
+ return -ENOMEM;
+
+ bio->bi_end_io = blk_ioc_discard_endio;
+ bio->bi_bdev = bdev;
+ bio->bi_private = &wait;
+ bio->bi_sector = start;
+
+ if (len > q->max_hw_sectors) {
+ bio->bi_size = q->max_hw_sectors << 9;
+ len -= q->max_hw_sectors;
+ start += q->max_hw_sectors;
+ } else {
+ bio->bi_size = len << 9;
+ len = 0;
+ }
+ submit_bio(DISCARD_NOBARRIER, bio);
+
+ wait_for_completion(&wait);
+
+ if (bio_flagged(bio, BIO_EOPNOTSUPP))
+ ret = -EOPNOTSUPP;
+ else if (!bio_flagged(bio, BIO_UPTODATE))
+ ret = -EIO;
+ bio_put(bio);
+ }
+ return ret;
+}
+
static int put_ushort(unsigned long arg, unsigned short val)
{
return put_user(val, (unsigned short __user *)arg);
@@ -135,97 +201,41 @@ static int put_u64(unsigned long arg, u64 val)
return put_user(val, (u64 __user *)arg);
}
-static int blkdev_locked_ioctl(struct file *file, struct block_device *bdev,
- unsigned cmd, unsigned long arg)
-{
- struct backing_dev_info *bdi;
- int ret, n;
-
- switch (cmd) {
- case BLKRAGET:
- case BLKFRAGET:
- if (!arg)
- return -EINVAL;
- bdi = blk_get_backing_dev_info(bdev);
- if (bdi == NULL)
- return -ENOTTY;
- return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
- case BLKROGET:
- return put_int(arg, bdev_read_only(bdev) != 0);
- case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */
- return put_int(arg, block_size(bdev));
- case BLKSSZGET: /* get block device hardware sector size */
- return put_int(arg, bdev_hardsect_size(bdev));
- case BLKSECTGET:
- return put_ushort(arg, bdev_get_queue(bdev)->max_sectors);
- case BLKRASET:
- case BLKFRASET:
- if(!capable(CAP_SYS_ADMIN))
- return -EACCES;
- bdi = blk_get_backing_dev_info(bdev);
- if (bdi == NULL)
- return -ENOTTY;
- bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
- return 0;
- case BLKBSZSET:
- /* set the logical block size */
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- if (!arg)
- return -EINVAL;
- if (get_user(n, (int __user *) arg))
- return -EFAULT;
- if (bd_claim(bdev, file) < 0)
- return -EBUSY;
- ret = set_blocksize(bdev, n);
- bd_release(bdev);
- return ret;
- case BLKPG:
- return blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg);
- case BLKRRPART:
- return blkdev_reread_part(bdev);
- case BLKGETSIZE:
- if ((bdev->bd_inode->i_size >> 9) > ~0UL)
- return -EFBIG;
- return put_ulong(arg, bdev->bd_inode->i_size >> 9);
- case BLKGETSIZE64:
- return put_u64(arg, bdev->bd_inode->i_size);
- case BLKTRACESTART:
- case BLKTRACESTOP:
- case BLKTRACESETUP:
- case BLKTRACETEARDOWN:
- return blk_trace_ioctl(bdev, cmd, (char __user *) arg);
- }
- return -ENOIOCTLCMD;
-}
-
-int blkdev_driver_ioctl(struct inode *inode, struct file *file,
- struct gendisk *disk, unsigned cmd, unsigned long arg)
+int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned cmd, unsigned long arg)
{
+ struct gendisk *disk = bdev->bd_disk;
int ret;
- if (disk->fops->unlocked_ioctl)
- return disk->fops->unlocked_ioctl(file, cmd, arg);
- if (disk->fops->ioctl) {
+ if (disk->fops->ioctl)
+ return disk->fops->ioctl(bdev, mode, cmd, arg);
+
+ if (disk->fops->locked_ioctl) {
lock_kernel();
- ret = disk->fops->ioctl(inode, file, cmd, arg);
+ ret = disk->fops->locked_ioctl(bdev, mode, cmd, arg);
unlock_kernel();
return ret;
}
return -ENOTTY;
}
-EXPORT_SYMBOL_GPL(blkdev_driver_ioctl);
+/*
+ * For the record: _GPL here is only because somebody decided to slap it
+ * on the previous export. Sheer idiocy, since it wasn't copyrightable
+ * at all and could be open-coded without any exports by anybody who cares.
+ */
+EXPORT_SYMBOL_GPL(__blkdev_driver_ioctl);
/*
* always keep this in sync with compat_blkdev_ioctl() and
* compat_blkdev_locked_ioctl()
*/
-int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
+int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
unsigned long arg)
{
- struct block_device *bdev = inode->i_bdev;
struct gendisk *disk = bdev->bd_disk;
+ struct backing_dev_info *bdi;
+ loff_t size;
int ret, n;
switch(cmd) {
@@ -233,7 +243,7 @@ int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- ret = blkdev_driver_ioctl(inode, file, disk, cmd, arg);
+ ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
/* -EINVAL to handle old uncorrected drivers */
if (ret != -EINVAL && ret != -ENOTTY)
return ret;
@@ -245,7 +255,7 @@ int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
return 0;
case BLKROSET:
- ret = blkdev_driver_ioctl(inode, file, disk, cmd, arg);
+ ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
/* -EINVAL to handle old uncorrected drivers */
if (ret != -EINVAL && ret != -ENOTTY)
return ret;
@@ -257,6 +267,19 @@ int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
set_device_ro(bdev, n);
unlock_kernel();
return 0;
+
+ case BLKDISCARD: {
+ uint64_t range[2];
+
+ if (!(mode & FMODE_WRITE))
+ return -EBADF;
+
+ if (copy_from_user(range, (void __user *)arg, sizeof(range)))
+ return -EFAULT;
+
+ return blk_ioctl_discard(bdev, range[0], range[1]);
+ }
+
case HDIO_GETGEO: {
struct hd_geometry geo;
@@ -278,14 +301,75 @@ int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
return -EFAULT;
return 0;
}
- }
-
- lock_kernel();
- ret = blkdev_locked_ioctl(file, bdev, cmd, arg);
- unlock_kernel();
- if (ret != -ENOIOCTLCMD)
+ case BLKRAGET:
+ case BLKFRAGET:
+ if (!arg)
+ return -EINVAL;
+ bdi = blk_get_backing_dev_info(bdev);
+ if (bdi == NULL)
+ return -ENOTTY;
+ return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
+ case BLKROGET:
+ return put_int(arg, bdev_read_only(bdev) != 0);
+ case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */
+ return put_int(arg, block_size(bdev));
+ case BLKSSZGET: /* get block device hardware sector size */
+ return put_int(arg, bdev_hardsect_size(bdev));
+ case BLKSECTGET:
+ return put_ushort(arg, bdev_get_queue(bdev)->max_sectors);
+ case BLKRASET:
+ case BLKFRASET:
+ if(!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ bdi = blk_get_backing_dev_info(bdev);
+ if (bdi == NULL)
+ return -ENOTTY;
+ lock_kernel();
+ bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
+ unlock_kernel();
+ return 0;
+ case BLKBSZSET:
+ /* set the logical block size */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (!arg)
+ return -EINVAL;
+ if (get_user(n, (int __user *) arg))
+ return -EFAULT;
+ if (!(mode & FMODE_EXCL) && bd_claim(bdev, &bdev) < 0)
+ return -EBUSY;
+ ret = set_blocksize(bdev, n);
+ if (!(mode & FMODE_EXCL))
+ bd_release(bdev);
return ret;
-
- return blkdev_driver_ioctl(inode, file, disk, cmd, arg);
+ case BLKPG:
+ lock_kernel();
+ ret = blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg);
+ unlock_kernel();
+ break;
+ case BLKRRPART:
+ lock_kernel();
+ ret = blkdev_reread_part(bdev);
+ unlock_kernel();
+ break;
+ case BLKGETSIZE:
+ size = bdev->bd_inode->i_size;
+ if ((size >> 9) > ~0UL)
+ return -EFBIG;
+ return put_ulong(arg, size >> 9);
+ case BLKGETSIZE64:
+ return put_u64(arg, bdev->bd_inode->i_size);
+ case BLKTRACESTART:
+ case BLKTRACESTOP:
+ case BLKTRACESETUP:
+ case BLKTRACETEARDOWN:
+ lock_kernel();
+ ret = blk_trace_ioctl(bdev, cmd, (char __user *) arg);
+ unlock_kernel();
+ break;
+ default:
+ ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+ }
+ return ret;
}
EXPORT_SYMBOL_GPL(blkdev_ioctl);
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index c5b9bcfc0a6..5963cf91a3a 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -105,12 +105,96 @@ static int sg_emulated_host(struct request_queue *q, int __user *p)
return put_user(1, p);
}
+void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
+{
+ /* Basic read-only commands */
+ __set_bit(TEST_UNIT_READY, filter->read_ok);
+ __set_bit(REQUEST_SENSE, filter->read_ok);
+ __set_bit(READ_6, filter->read_ok);
+ __set_bit(READ_10, filter->read_ok);
+ __set_bit(READ_12, filter->read_ok);
+ __set_bit(READ_16, filter->read_ok);
+ __set_bit(READ_BUFFER, filter->read_ok);
+ __set_bit(READ_DEFECT_DATA, filter->read_ok);
+ __set_bit(READ_CAPACITY, filter->read_ok);
+ __set_bit(READ_LONG, filter->read_ok);
+ __set_bit(INQUIRY, filter->read_ok);
+ __set_bit(MODE_SENSE, filter->read_ok);
+ __set_bit(MODE_SENSE_10, filter->read_ok);
+ __set_bit(LOG_SENSE, filter->read_ok);
+ __set_bit(START_STOP, filter->read_ok);
+ __set_bit(GPCMD_VERIFY_10, filter->read_ok);
+ __set_bit(VERIFY_16, filter->read_ok);
+ __set_bit(REPORT_LUNS, filter->read_ok);
+ __set_bit(SERVICE_ACTION_IN, filter->read_ok);
+ __set_bit(RECEIVE_DIAGNOSTIC, filter->read_ok);
+ __set_bit(MAINTENANCE_IN, filter->read_ok);
+ __set_bit(GPCMD_READ_BUFFER_CAPACITY, filter->read_ok);
+
+ /* Audio CD commands */
+ __set_bit(GPCMD_PLAY_CD, filter->read_ok);
+ __set_bit(GPCMD_PLAY_AUDIO_10, filter->read_ok);
+ __set_bit(GPCMD_PLAY_AUDIO_MSF, filter->read_ok);
+ __set_bit(GPCMD_PLAY_AUDIO_TI, filter->read_ok);
+ __set_bit(GPCMD_PAUSE_RESUME, filter->read_ok);
+
+ /* CD/DVD data reading */
+ __set_bit(GPCMD_READ_CD, filter->read_ok);
+ __set_bit(GPCMD_READ_CD_MSF, filter->read_ok);
+ __set_bit(GPCMD_READ_DISC_INFO, filter->read_ok);
+ __set_bit(GPCMD_READ_CDVD_CAPACITY, filter->read_ok);
+ __set_bit(GPCMD_READ_DVD_STRUCTURE, filter->read_ok);
+ __set_bit(GPCMD_READ_HEADER, filter->read_ok);
+ __set_bit(GPCMD_READ_TRACK_RZONE_INFO, filter->read_ok);
+ __set_bit(GPCMD_READ_SUBCHANNEL, filter->read_ok);
+ __set_bit(GPCMD_READ_TOC_PMA_ATIP, filter->read_ok);
+ __set_bit(GPCMD_REPORT_KEY, filter->read_ok);
+ __set_bit(GPCMD_SCAN, filter->read_ok);
+ __set_bit(GPCMD_GET_CONFIGURATION, filter->read_ok);
+ __set_bit(GPCMD_READ_FORMAT_CAPACITIES, filter->read_ok);
+ __set_bit(GPCMD_GET_EVENT_STATUS_NOTIFICATION, filter->read_ok);
+ __set_bit(GPCMD_GET_PERFORMANCE, filter->read_ok);
+ __set_bit(GPCMD_SEEK, filter->read_ok);
+ __set_bit(GPCMD_STOP_PLAY_SCAN, filter->read_ok);
+
+ /* Basic writing commands */
+ __set_bit(WRITE_6, filter->write_ok);
+ __set_bit(WRITE_10, filter->write_ok);
+ __set_bit(WRITE_VERIFY, filter->write_ok);
+ __set_bit(WRITE_12, filter->write_ok);
+ __set_bit(WRITE_VERIFY_12, filter->write_ok);
+ __set_bit(WRITE_16, filter->write_ok);
+ __set_bit(WRITE_LONG, filter->write_ok);
+ __set_bit(WRITE_LONG_2, filter->write_ok);
+ __set_bit(ERASE, filter->write_ok);
+ __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
+ __set_bit(MODE_SELECT, filter->write_ok);
+ __set_bit(LOG_SELECT, filter->write_ok);
+ __set_bit(GPCMD_BLANK, filter->write_ok);
+ __set_bit(GPCMD_CLOSE_TRACK, filter->write_ok);
+ __set_bit(GPCMD_FLUSH_CACHE, filter->write_ok);
+ __set_bit(GPCMD_FORMAT_UNIT, filter->write_ok);
+ __set_bit(GPCMD_REPAIR_RZONE_TRACK, filter->write_ok);
+ __set_bit(GPCMD_RESERVE_RZONE_TRACK, filter->write_ok);
+ __set_bit(GPCMD_SEND_DVD_STRUCTURE, filter->write_ok);
+ __set_bit(GPCMD_SEND_EVENT, filter->write_ok);
+ __set_bit(GPCMD_SEND_KEY, filter->write_ok);
+ __set_bit(GPCMD_SEND_OPC, filter->write_ok);
+ __set_bit(GPCMD_SEND_CUE_SHEET, filter->write_ok);
+ __set_bit(GPCMD_SET_SPEED, filter->write_ok);
+ __set_bit(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, filter->write_ok);
+ __set_bit(GPCMD_LOAD_UNLOAD, filter->write_ok);
+ __set_bit(GPCMD_SET_STREAMING, filter->write_ok);
+ __set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok);
+}
+EXPORT_SYMBOL_GPL(blk_set_cmd_filter_defaults);
+
static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
- struct sg_io_hdr *hdr, struct file *file)
+ struct sg_io_hdr *hdr, fmode_t mode)
{
if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
return -EFAULT;
- if (blk_verify_command(file, rq->cmd))
+ if (blk_verify_command(&q->cmd_filter, rq->cmd, mode & FMODE_WRITE))
return -EPERM;
/*
@@ -175,8 +259,8 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
return r;
}
-static int sg_io(struct file *file, struct request_queue *q,
- struct gendisk *bd_disk, struct sg_io_hdr *hdr)
+static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
+ struct sg_io_hdr *hdr, fmode_t mode)
{
unsigned long start_time;
int writing = 0, ret = 0;
@@ -208,7 +292,7 @@ static int sg_io(struct file *file, struct request_queue *q,
if (!rq)
return -ENOMEM;
- if (blk_fill_sghdr_rq(q, rq, hdr, file)) {
+ if (blk_fill_sghdr_rq(q, rq, hdr, mode)) {
blk_put_request(rq);
return -EFAULT;
}
@@ -229,11 +313,12 @@ static int sg_io(struct file *file, struct request_queue *q,
goto out;
}
- ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count,
- hdr->dxfer_len);
+ ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count,
+ hdr->dxfer_len, GFP_KERNEL);
kfree(iov);
} else if (hdr->dxfer_len)
- ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);
+ ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
+ GFP_KERNEL);
if (ret)
goto out;
@@ -294,8 +379,8 @@ out:
* bytes in one int) where the lowest byte is the SCSI status.
*/
#define OMAX_SB_LEN 16 /* For backward compatibility */
-int sg_scsi_ioctl(struct file *file, struct request_queue *q,
- struct gendisk *disk, struct scsi_ioctl_command __user *sic)
+int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
+ struct scsi_ioctl_command __user *sic)
{
struct request *rq;
int err;
@@ -340,7 +425,7 @@ int sg_scsi_ioctl(struct file *file, struct request_queue *q,
if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
goto error;
- err = blk_verify_command(file, rq->cmd);
+ err = blk_verify_command(&q->cmd_filter, rq->cmd, mode & FMODE_WRITE);
if (err)
goto error;
@@ -432,8 +517,8 @@ static inline int blk_send_start_stop(struct request_queue *q,
return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data);
}
-int scsi_cmd_ioctl(struct file *file, struct request_queue *q,
- struct gendisk *bd_disk, unsigned int cmd, void __user *arg)
+int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mode,
+ unsigned int cmd, void __user *arg)
{
int err;
@@ -474,7 +559,7 @@ int scsi_cmd_ioctl(struct file *file, struct request_queue *q,
err = -EFAULT;
if (copy_from_user(&hdr, arg, sizeof(hdr)))
break;
- err = sg_io(file, q, bd_disk, &hdr);
+ err = sg_io(q, bd_disk, &hdr, mode);
if (err == -EFAULT)
break;
@@ -518,11 +603,11 @@ int scsi_cmd_ioctl(struct file *file, struct request_queue *q,
hdr.sbp = cgc.sense;
if (hdr.sbp)
hdr.mx_sb_len = sizeof(struct request_sense);
- hdr.timeout = cgc.timeout;
+ hdr.timeout = jiffies_to_msecs(cgc.timeout);
hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd;
hdr.cmd_len = sizeof(cgc.cmd);
- err = sg_io(file, q, bd_disk, &hdr);
+ err = sg_io(q, bd_disk, &hdr, mode);
if (err == -EFAULT)
break;
@@ -546,7 +631,7 @@ int scsi_cmd_ioctl(struct file *file, struct request_queue *q,
if (!arg)
break;
- err = sg_scsi_ioctl(file, q, bd_disk, arg);
+ err = sg_scsi_ioctl(q, bd_disk, mode, arg);
break;
case CDROMCLOSETRAY:
err = blk_send_start_stop(q, bd_disk, 0x03);