diff options
author | Dave Jones <davej@redhat.com> | 2006-12-12 17:41:41 -0500 |
---|---|---|
committer | Dave Jones <davej@redhat.com> | 2006-12-12 17:41:41 -0500 |
commit | c4366889dda8110247be59ca41fddb82951a8c26 (patch) | |
tree | 705c1a996bed8fd48ce94ff33ec9fd00f9b94875 /block | |
parent | db2fb9db5735cc532fd4fc55e94b9a3c3750378e (diff) | |
parent | e1036502e5263851259d147771226161e5ccc85a (diff) |
Merge ../linus
Conflicts:
drivers/cpufreq/cpufreq.c
Diffstat (limited to 'block')
-rw-r--r-- | block/Kconfig | 6 | ||||
-rw-r--r-- | block/as-iosched.c | 9 | ||||
-rw-r--r-- | block/blktrace.c | 53 | ||||
-rw-r--r-- | block/cfq-iosched.c | 37 | ||||
-rw-r--r-- | block/deadline-iosched.c | 2 | ||||
-rw-r--r-- | block/elevator.c | 4 | ||||
-rw-r--r-- | block/genhd.c | 31 | ||||
-rw-r--r-- | block/ioctl.c | 6 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 351 | ||||
-rw-r--r-- | block/noop-iosched.c | 2 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 56 |
11 files changed, 336 insertions, 221 deletions
diff --git a/block/Kconfig b/block/Kconfig index 83766a6bdee..a50f4811164 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -19,11 +19,9 @@ config BLOCK if BLOCK -#XXX - it makes sense to enable this only for 32-bit subarch's, not for x86_64 -#for instance. config LBD bool "Support for Large Block Devices" - depends on X86 || (MIPS && 32BIT) || PPC32 || (S390 && !64BIT) || SUPERH || UML + depends on !64BIT help Say Y here if you want to attach large (bigger than 2TB) discs to your machine, or if you want to have a raid or loopback device @@ -44,7 +42,7 @@ config BLK_DEV_IO_TRACE config LSF bool "Support for Large Single Files" - depends on X86 || (MIPS && 32BIT) || PPC32 || ARCH_S390_31 || SUPERH || UML + depends on !64BIT help Say Y here if you want to be able to handle very large files (bigger than 2TB), otherwise say N. diff --git a/block/as-iosched.c b/block/as-iosched.c index 50b95e4c142..5934c4bfd52 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -1274,9 +1274,10 @@ static void as_merged_requests(request_queue_t *q, struct request *req, * * FIXME! dispatch queue is not a queue at all! */ -static void as_work_handler(void *data) +static void as_work_handler(struct work_struct *work) { - struct request_queue *q = data; + struct as_data *ad = container_of(work, struct as_data, antic_work); + struct request_queue *q = ad->q; unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); @@ -1317,7 +1318,7 @@ static void as_exit_queue(elevator_t *e) /* * initialize elevator private data (as_data). */ -static void *as_init_queue(request_queue_t *q, elevator_t *e) +static void *as_init_queue(request_queue_t *q) { struct as_data *ad; @@ -1332,7 +1333,7 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e) ad->antic_timer.function = as_antic_timeout; ad->antic_timer.data = (unsigned long)q; init_timer(&ad->antic_timer); - INIT_WORK(&ad->antic_work, as_work_handler, q); + INIT_WORK(&ad->antic_work, as_work_handler); INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); diff --git a/block/blktrace.c b/block/blktrace.c index 135593c8e45..d3679dd1d22 100644 --- a/block/blktrace.c +++ b/block/blktrace.c @@ -22,32 +22,60 @@ #include <linux/init.h> #include <linux/mutex.h> #include <linux/debugfs.h> +#include <linux/time.h> #include <asm/uaccess.h> static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, }; static unsigned int blktrace_seq __read_mostly = 1; /* - * Send out a notify for this process, if we haven't done so since a trace - * started + * Send out a notify message. */ -static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk) +static void trace_note(struct blk_trace *bt, pid_t pid, int action, + const void *data, size_t len) { struct blk_io_trace *t; - t = relay_reserve(bt->rchan, sizeof(*t) + sizeof(tsk->comm)); + t = relay_reserve(bt->rchan, sizeof(*t) + len); if (t) { + const int cpu = smp_processor_id(); + t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; + t->time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu); t->device = bt->dev; - t->action = BLK_TC_ACT(BLK_TC_NOTIFY); - t->pid = tsk->pid; - t->cpu = smp_processor_id(); - t->pdu_len = sizeof(tsk->comm); - memcpy((void *) t + sizeof(*t), tsk->comm, t->pdu_len); - tsk->btrace_seq = blktrace_seq; + t->action = action; + t->pid = pid; + t->cpu = cpu; + t->pdu_len = len; + memcpy((void *) t + sizeof(*t), data, len); } } +/* + * Send out a notify for this process, if we haven't done so since a trace + * started + */ +static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk) +{ + tsk->btrace_seq = blktrace_seq; + trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm)); +} + +static void trace_note_time(struct blk_trace *bt) +{ + struct timespec now; + unsigned long flags; + u32 words[2]; + + getnstimeofday(&now); + words[0] = now.tv_sec; + words[1] = now.tv_nsec; + + local_irq_save(flags); + trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words)); + local_irq_restore(flags); +} + static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, pid_t pid) { @@ -366,8 +394,7 @@ err: if (bt) { if (bt->dropped_file) debugfs_remove(bt->dropped_file); - if (bt->sequence) - free_percpu(bt->sequence); + free_percpu(bt->sequence); if (bt->rchan) relay_close(bt->rchan); kfree(bt); @@ -394,6 +421,8 @@ static int blk_trace_startstop(request_queue_t *q, int start) blktrace_seq++; smp_mb(); bt->trace_state = Blktrace_running; + + trace_note_time(bt); ret = 0; } } else { diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index d3d76136f53..78c6b312bd3 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -43,8 +43,8 @@ static int cfq_slice_idle = HZ / 125; #define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private) #define RQ_CFQQ(rq) ((rq)->elevator_private2) -static kmem_cache_t *cfq_pool; -static kmem_cache_t *cfq_ioc_pool; +static struct kmem_cache *cfq_pool; +static struct kmem_cache *cfq_ioc_pool; static DEFINE_PER_CPU(unsigned long, ioc_count); static struct completion *ioc_gone; @@ -456,6 +456,9 @@ static void cfq_add_rq_rb(struct request *rq) */ while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL) cfq_dispatch_insert(cfqd->queue, __alias); + + if (!cfq_cfqq_on_rr(cfqq)) + cfq_add_cfqq_rr(cfqd, cfqq); } static inline void @@ -1215,11 +1218,12 @@ static inline void changed_ioprio(struct cfq_io_context *cic) { struct cfq_data *cfqd = cic->key; struct cfq_queue *cfqq; + unsigned long flags; if (unlikely(!cfqd)) return; - spin_lock(cfqd->queue->queue_lock); + spin_lock_irqsave(cfqd->queue->queue_lock, flags); cfqq = cic->cfqq[ASYNC]; if (cfqq) { @@ -1236,7 +1240,7 @@ static inline void changed_ioprio(struct cfq_io_context *cic) if (cfqq) cfq_mark_cfqq_prio_changed(cfqq); - spin_unlock(cfqd->queue->queue_lock); + spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); } static void cfq_ioc_set_ioprio(struct io_context *ioc) @@ -1362,6 +1366,7 @@ cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, struct rb_node **p; struct rb_node *parent; struct cfq_io_context *__cic; + unsigned long flags; void *k; cic->ioc = ioc; @@ -1391,9 +1396,9 @@ restart: rb_link_node(&cic->rb_node, parent, p); rb_insert_color(&cic->rb_node, &ioc->cic_root); - spin_lock_irq(cfqd->queue->queue_lock); + spin_lock_irqsave(cfqd->queue->queue_lock, flags); list_add(&cic->queue_list, &cfqd->cic_list); - spin_unlock_irq(cfqd->queue->queue_lock); + spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); } /* @@ -1459,8 +1464,7 @@ cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) } static void -cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic, - struct request *rq) +cfq_update_io_seektime(struct cfq_io_context *cic, struct request *rq) { sector_t sdist; u64 total; @@ -1612,7 +1616,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, } cfq_update_io_thinktime(cfqd, cic); - cfq_update_io_seektime(cfqd, cic, rq); + cfq_update_io_seektime(cic, rq); cfq_update_idle_window(cfqd, cfqq, cic); cic->last_queue = jiffies; @@ -1650,9 +1654,6 @@ static void cfq_insert_request(request_queue_t *q, struct request *rq) cfq_add_rq_rb(rq); - if (!cfq_cfqq_on_rr(cfqq)) - cfq_add_cfqq_rr(cfqd, cfqq); - list_add_tail(&rq->queuelist, &cfqq->fifo); cfq_rq_enqueued(cfqd, cfqq, rq); @@ -1768,7 +1769,7 @@ static int cfq_may_queue(request_queue_t *q, int rw) /* * queue lock held here */ -static void cfq_put_request(request_queue_t *q, struct request *rq) +static void cfq_put_request(struct request *rq) { struct cfq_queue *cfqq = RQ_CFQQ(rq); @@ -1839,9 +1840,11 @@ queue_fail: return 1; } -static void cfq_kick_queue(void *data) +static void cfq_kick_queue(struct work_struct *work) { - request_queue_t *q = data; + struct cfq_data *cfqd = + container_of(work, struct cfq_data, unplug_work); + request_queue_t *q = cfqd->queue; unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); @@ -1949,7 +1952,7 @@ static void cfq_exit_queue(elevator_t *e) kfree(cfqd); } -static void *cfq_init_queue(request_queue_t *q, elevator_t *e) +static void *cfq_init_queue(request_queue_t *q) { struct cfq_data *cfqd; int i; @@ -1985,7 +1988,7 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e) cfqd->idle_class_timer.function = cfq_idle_class_timer; cfqd->idle_class_timer.data = (unsigned long) cfqd; - INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q); + INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); cfqd->cfq_quantum = cfq_quantum; cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index b7c5b34cb7b..6d673e938d3 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c @@ -356,7 +356,7 @@ static void deadline_exit_queue(elevator_t *e) /* * initialize elevator private data (deadline_data). */ -static void *deadline_init_queue(request_queue_t *q, elevator_t *e) +static void *deadline_init_queue(request_queue_t *q) { struct deadline_data *dd; diff --git a/block/elevator.c b/block/elevator.c index 8ccd163254b..c0063f345c5 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -129,7 +129,7 @@ static struct elevator_type *elevator_get(const char *name) static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq) { - return eq->ops->elevator_init_fn(q, eq); + return eq->ops->elevator_init_fn(q); } static void elevator_attach(request_queue_t *q, struct elevator_queue *eq, @@ -810,7 +810,7 @@ void elv_put_request(request_queue_t *q, struct request *rq) elevator_t *e = q->elevator; if (e->ops->elevator_put_req_fn) - e->ops->elevator_put_req_fn(q, rq); + e->ops->elevator_put_req_fn(rq); } int elv_may_queue(request_queue_t *q, int rw) diff --git a/block/genhd.c b/block/genhd.c index 653919d50cd..457fdac4c17 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -417,6 +417,34 @@ static struct disk_attribute disk_attr_stat = { .show = disk_stats_read }; +#ifdef CONFIG_FAIL_MAKE_REQUEST + +static ssize_t disk_fail_store(struct gendisk * disk, + const char *buf, size_t count) +{ + int i; + + if (count > 0 && sscanf(buf, "%d", &i) > 0) { + if (i == 0) + disk->flags &= ~GENHD_FL_FAIL; + else + disk->flags |= GENHD_FL_FAIL; + } + + return count; +} +static ssize_t disk_fail_read(struct gendisk * disk, char *page) +{ + return sprintf(page, "%d\n", disk->flags & GENHD_FL_FAIL ? 1 : 0); +} +static struct disk_attribute disk_attr_fail = { + .attr = {.name = "make-it-fail", .mode = S_IRUGO | S_IWUSR }, + .store = disk_fail_store, + .show = disk_fail_read +}; + +#endif + static struct attribute * default_attrs[] = { &disk_attr_uevent.attr, &disk_attr_dev.attr, @@ -424,6 +452,9 @@ static struct attribute * default_attrs[] = { &disk_attr_removable.attr, &disk_attr_size.attr, &disk_attr_stat.attr, +#ifdef CONFIG_FAIL_MAKE_REQUEST + &disk_attr_fail.attr, +#endif NULL, }; diff --git a/block/ioctl.c b/block/ioctl.c index 58aab630dfc..f6962b64660 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -72,7 +72,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user bdevp = bdget_disk(disk, part); if (!bdevp) return -ENOMEM; - mutex_lock_nested(&bdevp->bd_mutex, BD_MUTEX_PARTITION); + mutex_lock(&bdevp->bd_mutex); if (bdevp->bd_openers) { mutex_unlock(&bdevp->bd_mutex); bdput(bdevp); @@ -82,7 +82,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user fsync_bdev(bdevp); invalidate_bdev(bdevp, 0); - mutex_lock_nested(&bdev->bd_mutex, BD_MUTEX_WHOLE); + mutex_lock(&bdev->bd_mutex); delete_partition(disk, part); mutex_unlock(&bdev->bd_mutex); mutex_unlock(&bdevp->bd_mutex); @@ -290,7 +290,7 @@ int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd, ENOIOCTLCMD for unknown ioctls. */ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) { - struct block_device *bdev = file->f_dentry->d_inode->i_bdev; + struct block_device *bdev = file->f_path.dentry->d_inode->i_bdev; struct gendisk *disk = bdev->bd_disk; int ret = -ENOIOCTLCMD; if (disk->fops->compat_ioctl) { diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index c847e17e5ca..a541b42c08e 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -25,16 +25,18 @@ #include <linux/slab.h> #include <linux/swap.h> #include <linux/writeback.h> +#include <linux/task_io_accounting_ops.h> #include <linux/interrupt.h> #include <linux/cpu.h> #include <linux/blktrace_api.h> +#include <linux/fault-inject.h> /* * for max sense size */ #include <scsi/scsi_cmnd.h> -static void blk_unplug_work(void *data); +static void blk_unplug_work(struct work_struct *work); static void blk_unplug_timeout(unsigned long data); static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); static void init_request_from_bio(struct request *req, struct bio *bio); @@ -44,22 +46,17 @@ static struct io_context *current_io_context(gfp_t gfp_flags, int node); /* * For the allocated request tables */ -static kmem_cache_t *request_cachep; +static struct kmem_cache *request_cachep; /* * For queue allocation */ -static kmem_cache_t *requestq_cachep; +static struct kmem_cache *requestq_cachep; /* * For io context allocations */ -static kmem_cache_t *iocontext_cachep; - -static wait_queue_head_t congestion_wqh[2] = { - __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), - __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) - }; +static struct kmem_cache *iocontext_cachep; /* * Controlling structure to kblockd @@ -112,35 +109,6 @@ static void blk_queue_congestion_threshold(struct request_queue *q) q->nr_congestion_off = nr; } -/* - * A queue has just exitted congestion. Note this in the global counter of - * congested queues, and wake up anyone who was waiting for requests to be - * put back. - */ -static void clear_queue_congested(request_queue_t *q, int rw) -{ - enum bdi_state bit; - wait_queue_head_t *wqh = &congestion_wqh[rw]; - - bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; - clear_bit(bit, &q->backing_dev_info.state); - smp_mb__after_clear_bit(); - if (waitqueue_active(wqh)) - wake_up(wqh); -} - -/* - * A queue has just entered congestion. Flag that in the queue's VM-visible - * state flags and increment the global gounter of congested queues. - */ -static void set_queue_congested(request_queue_t *q, int rw) -{ - enum bdi_state bit; - - bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; - set_bit(bit, &q->backing_dev_info.state); -} - /** * blk_get_backing_dev_info - get the address of a queue's backing_dev_info * @bdev: device @@ -159,17 +127,8 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) ret = &q->backing_dev_info; return ret; } - EXPORT_SYMBOL(blk_get_backing_dev_info); -void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data) -{ - q->activity_fn = fn; - q->activity_data = data; -} - -EXPORT_SYMBOL(blk_queue_activity_fn); - /** * blk_queue_prep_rq - set a prepare_request function for queue * @q: queue @@ -263,7 +222,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) if (q->unplug_delay == 0) q->unplug_delay = 1; - INIT_WORK(&q->unplug_work, blk_unplug_work, q); + INIT_WORK(&q->unplug_work, blk_unplug_work); q->unplug_timer.function = blk_unplug_timeout; q->unplug_timer.data = (unsigned long)q; @@ -272,8 +231,6 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) * by default assume old behaviour and bounce for any highmem page */ blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); - - blk_queue_activity_fn(q, NULL, NULL); } EXPORT_SYMBOL(blk_queue_make_request); @@ -1667,9 +1624,9 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi, } } -static void blk_unplug_work(void *data) +static void blk_unplug_work(struct work_struct *work) { - request_queue_t *q = data; + request_queue_t *q = container_of(work, request_queue_t, unplug_work); blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, q->rq.count[READ] + q->rq.count[WRITE]); @@ -2067,7 +2024,7 @@ static void __freed_request(request_queue_t *q, int rw) struct request_list *rl = &q->rq; if (rl->count[rw] < queue_congestion_off_threshold(q)) - clear_queue_congested(q, rw); + blk_clear_queue_congested(q, rw); if (rl->count[rw] + 1 <= q->nr_requests) { if (waitqueue_active(&rl->wait[rw])) @@ -2137,7 +2094,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, } } } - set_queue_congested(q, rw); + blk_set_queue_congested(q, rw); } /* @@ -2358,6 +2315,84 @@ void blk_insert_request(request_queue_t *q, struct request *rq, EXPORT_SYMBOL(blk_insert_request); +static int __blk_rq_unmap_user(struct bio *bio) +{ + int ret = 0; + + if (bio) { + if (bio_flagged(bio, BIO_USER_MAPPED)) + bio_unmap_user(bio); + else + ret = bio_uncopy_user(bio); + } + + return ret; +} + +static int __blk_rq_map_user(request_queue_t *q, struct request *rq, + void __user *ubuf, unsigned int len) +{ + unsigned long uaddr; + struct bio *bio, *orig_bio; + int reading, ret; + + reading = rq_data_dir(rq) == READ; + + /* + * if alignment requirement is satisfied, map in user pages for + * direct dma. else, set up kernel bounce buffers + */ + uaddr = (unsigned long) ubuf; + if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) + bio = bio_map_user(q, NULL, uaddr, len, reading); + else + bio = bio_copy_user(q, uaddr, len, reading); + + if (IS_ERR(bio)) { + return PTR_ERR(bio); + } + + orig_bio = bio; + blk_queue_bounce(q, &bio); + /* + * We link the bounce buffer in and could have to traverse it + * later so we have to get a ref to prevent it from being freed + */ + bio_get(bio); + + /* + * for most (all? don't know of any) queues we could + * skip grabbing the queue lock here. only drivers with + * funky private ->back_merge_fn() function could be + * problematic. + */ + spin_lock_irq(q->queue_lock); + if (!rq->bio) + blk_rq_bio_prep(q, rq, bio); + else if (!q->back_merge_fn(q, rq, bio)) { + ret = -EINVAL; + spin_unlock_irq(q->queue_lock); + goto unmap_bio; + } else { + rq->biotail->bi_next = bio; + rq->biotail = bio; + + rq->nr_sectors += bio_sectors(bio); + rq->hard_nr_sectors = rq->nr_sectors; + rq->data_len += bio->bi_size; + } + spin_unlock_irq(q->queue_lock); + + return bio->bi_size; + +unmap_bio: + /* if it was boucned we must call the end io function */ + bio_endio(bio, bio->bi_size, 0); + __blk_rq_unmap_user(orig_bio); + bio_put(bio); + return ret; +} + /** * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage * @q: request queue where request should be inserted @@ -2379,42 +2414,44 @@ EXPORT_SYMBOL(blk_insert_request); * unmapping. */ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, - unsigned int len) + unsigned long len) { - unsigned long uaddr; - struct bio *bio; - int reading; + unsigned long bytes_read = 0; + int ret; if (len > (q->max_hw_sectors << 9)) return -EINVAL; if (!len || !ubuf) return -EINVAL; - reading = rq_data_dir(rq) == READ; + while (bytes_read != len) { + unsigned long map_len, end, start; - /* - * if alignment requirement is satisfied, map in user pages for - * direct dma. else, set up kernel bounce buffers - */ - uaddr = (unsigned long) ubuf; - if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) - bio = bio_map_user(q, NULL, uaddr, len, reading); - else - bio = bio_copy_user(q, uaddr, len, reading); + map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE); + end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1) + >> PAGE_SHIFT; + start = (unsigned long)ubuf >> PAGE_SHIFT; - if (!IS_ERR(bio)) { - rq->bio = rq->biotail = bio; - blk_rq_bio_prep(q, rq, bio); + /* + * A bad offset could cause us to require BIO_MAX_PAGES + 1 + * pages. If this happens we just lower the requested + * mapping len by a page so that we can fit + */ + if (end - start > BIO_MAX_PAGES) + map_len -= PAGE_SIZE; - rq->buffer = rq->data = NULL; - rq->data_len = len; - return 0; + ret = __blk_rq_map_user(q, rq, ubuf, map_len); + if (ret < 0) + goto unmap_rq; + bytes_read += ret; + ubuf += ret; } - /* - * bio is the err-ptr - */ - return PTR_ERR(bio); + rq->buffer = rq->data = NULL; + return 0; +unmap_rq: + blk_rq_unmap_user(rq); + return ret; } EXPORT_SYMBOL(blk_rq_map_user); @@ -2440,7 +2477,7 @@ EXPORT_SYMBOL(blk_rq_map_user); * unmapping. */ int blk_rq_map_user_iov(request_queue_t *q, struct request *rq, - struct sg_iovec *iov, int iov_count) + struct sg_iovec *iov, int iov_count, unsigned int len) { struct bio *bio; @@ -2454,10 +2491,15 @@ int blk_rq_map_user_iov(request_queue_t *q, struct request *rq, if (IS_ERR(bio)) return PTR_ERR(bio); - rq->bio = rq->biotail = bio; + if (bio->bi_size != len) { + bio_endio(bio, bio->bi_size, 0); + bio_unmap_user(bio); + return -EINVAL; + } + + bio_get(bio); blk_rq_bio_prep(q, rq, bio); rq->buffer = rq->data = NULL; - rq->data_len = bio->bi_size; return 0; } @@ -2465,23 +2507,26 @@ EXPORT_SYMBOL(blk_rq_map_user_iov); /** * blk_rq_unmap_user - unmap a request with user data - * @bio: bio to be unmapped - * @ulen: length of user buffer + * @rq: rq to be unmapped * * Description: - * Unmap a bio previously mapped by blk_rq_map_user(). + * Unmap a rq previously mapped by blk_rq_map_user(). + * rq->bio must be set to the original head of the request. */ -int blk_rq_unmap_user(struct bio *bio, unsigned int ulen) +int blk_rq_unmap_user(struct request *rq) { - int ret = 0; + struct bio *bio, *mapped_bio; - if (bio) { - if (bio_flagged(bio, BIO_USER_MAPPED)) - bio_unmap_user(bio); + while ((bio = rq->bio)) { + if (bio_flagged(bio, BIO_BOUNCED)) + mapped_bio = bio->bi_private; else - ret = bio_uncopy_user(bio); - } + mapped_bio = bio; + __blk_rq_unmap_user(mapped_bio); + rq->bio = bio->bi_next; + bio_put(bio); + } return 0; } @@ -2512,11 +2557,8 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, if (rq_data_dir(rq) == WRITE) bio->bi_rw |= (1 << BIO_RW); - rq->bio = rq->biotail = bio; blk_rq_bio_prep(q, rq, bio); - rq->buffer = rq->data = NULL; - rq->data_len = len; return 0; } @@ -2645,9 +2687,6 @@ static inline void add_request(request_queue_t * q, struct request * req) { drive_stat_acct(req, req->nr_sectors, 1); - if (q->activity_fn) - q->activity_fn(q->activity_data, rq_data_dir(req)); - /* * elevator indicated where it wants this request to be * inserted at elevator_merge time @@ -2755,41 +2794,6 @@ void blk_end_sync_rq(struct request *rq, int error) } EXPORT_SYMBOL(blk_end_sync_rq); -/** - * blk_congestion_wait - wait for a queue to become uncongested - * @rw: READ or WRITE - * @timeout: timeout in jiffies - * - * Waits for up to @timeout jiffies for a queue (any queue) to exit congestion. - * If no queues are congested then just wait for the next request to be - * returned. - */ -long blk_congestion_wait(int rw, long timeout) -{ - long ret; - DEFINE_WAIT(wait); - wait_queue_head_t *wqh = &congestion_wqh[rw]; - - prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); - ret = io_schedule_timeout(timeout); - finish_wait(wqh, &wait); - return ret; -} - -EXPORT_SYMBOL(blk_congestion_wait); - -/** - * blk_congestion_end - wake up sleepers on a congestion queue - * @rw: READ or WRITE - */ -void blk_congestion_end(int rw) -{ - wait_queue_head_t *wqh = &congestion_wqh[rw]; - - if (waitqueue_active(wqh)) - wake_up(wqh); -} - /* * Has to be called with the request spinlock acquired */ @@ -3042,6 +3046,42 @@ static void handle_bad_sector(struct bio *bio) set_bit(BIO_EOF, &bio->bi_flags); } +#ifdef CONFIG_FAIL_MAKE_REQUEST + +static DECLARE_FAULT_ATTR(fail_make_request); + +static int __init setup_fail_make_request(char *str) +{ + return setup_fault_attr(&fail_make_request, str); +} +__setup("fail_make_request=", setup_fail_make_request); + +static int should_fail_request(struct bio *bio) +{ + if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) || + (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail)) + return should_fail(&fail_make_request, bio->bi_size); + + return 0; +} + +static int __init fail_make_request_debugfs(void) +{ + return init_fault_attr_dentries(&fail_make_request, + "fail_make_request"); +} + +late_initcall(fail_make_request_debugfs); + +#else /* CONFIG_FAIL_MAKE_REQUEST */ + +static inline int should_fail_request(struct bio *bio) +{ + return 0; +} + +#endif /* CONFIG_FAIL_MAKE_REQUEST */ + /** * generic_make_request: hand a buffer to its device driver for I/O * @bio: The bio describing the location in memory and on the device. @@ -3070,6 +3110,7 @@ void generic_make_request(struct bio *bio) { request_queue_t *q; sector_t maxsector; + sector_t old_sector; int ret, nr_sectors = bio_sectors(bio); dev_t old_dev; @@ -3098,7 +3139,7 @@ void generic_make_request(struct bio *bio) * NOTE: we don't repeat the blk_size check for each new device. * Stacking drivers are expected to know what they are doing. */ - maxsector = -1; + old_sector = -1; old_dev = 0; do { char b[BDEVNAME_SIZE]; @@ -3126,21 +3167,40 @@ end_io: if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) goto end_io; + if (should_fail_request(bio)) + goto end_io; + /* * If this device has partitions, remap block n * of partition p to block n+start(p) of the disk. */ blk_partition_remap(bio); - if (maxsector != -1) + if (old_sector != -1) blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, - maxsector); + old_sector); blk_add_trace_bio(q, bio, BLK_TA_QUEUE); - maxsector = bio->bi_sector; + old_sector = bio->bi_sector; old_dev = bio->bi_bdev->bd_dev; + maxsector = bio->bi_bdev->bd_inode->i_size >> 9; + if (maxsector) { + sector_t sector = bio->bi_sector; + + if (maxsector < nr_sectors || + maxsector - nr_sectors < sector) { + /* + * This may well happen - partitions are not + * checked to make sure they are within the size + * of the whole device. + */ + handle_bad_sector(bio); + goto end_io; + } + } + ret = q->make_request_fn(q, bio); } while (ret); } @@ -3164,10 +3224,12 @@ void submit_bio(int rw, struct bio *bio) BIO_BUG_ON(!bio->bi_size); BIO_BUG_ON(!bio->bi_io_vec); bio->bi_rw |= rw; - if (rw & WRITE) + if (rw & WRITE) { count_vm_events(PGPGOUT, count); - else + } else { + task_io_account_read(bio->bi_size); count_vm_events(PGPGIN, count); + } if (unlikely(block_dump)) { char b[BDEVNAME_SIZE]; @@ -3428,8 +3490,6 @@ static void blk_done_softirq(struct softirq_action *h) } } -#ifdef CONFIG_HOTPLUG_CPU - static int blk_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { @@ -3455,8 +3515,6 @@ static struct notifier_block __devinitdata blk_cpu_notifier = { .notifier_call = blk_cpu_notify, }; -#endif /* CONFIG_HOTPLUG_CPU */ - /** * blk_complete_request - end I/O on a request * @req: the request being processed @@ -3549,6 +3607,7 @@ void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio) rq->hard_cur_sectors = rq->current_nr_sectors; rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); rq->buffer = bio_data(bio); + rq->data_len = bio->bi_size; rq->bio = rq->biotail = bio; } @@ -3765,14 +3824,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) blk_queue_congestion_threshold(q); if (rl->count[READ] >= queue_congestion_on_threshold(q)) - set_queue_congested(q, READ); + blk_set_queue_congested(q, READ); else if (rl->count[READ] < queue_congestion_off_threshold(q)) - clear_queue_congested(q, READ); + blk_clear_queue_congested(q, READ); if (rl->count[WRITE] >= queue_congestion_on_threshold(q)) - set_queue_congested(q, WRITE); + blk_set_queue_congested(q, WRITE); else if (rl->count[WRITE] < queue_congestion_off_threshold(q)) - clear_queue_congested(q, WRITE); + blk_clear_queue_congested(q, WRITE); if (rl->count[READ] >= q->nr_requests) { blk_set_queue_full(q, READ); diff --git a/block/noop-iosched.c b/block/noop-iosched.c index 79af4317942..1c3de2b9a6b 100644 --- a/block/noop-iosched.c +++ b/block/noop-iosched.c @@ -65,7 +65,7 @@ noop_latter_request(request_queue_t *q, struct request *rq) return list_entry(rq->queuelist.next, struct request, queuelist); } -static void *noop_init_queue(request_queue_t *q, elevator_t *e) +static void *noop_init_queue(request_queue_t *q) { struct noop_data *nd; diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 2dc326421a2..f322b6a441d 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -226,9 +226,9 @@ static int sg_io(struct file *file, request_queue_t *q, unsigned long start_time; int writing = 0, ret = 0; struct request *rq; - struct bio *bio; char sense[SCSI_SENSE_BUFFERSIZE]; unsigned char cmd[BLK_MAX_CDB]; + struct bio *bio; if (hdr->interface_id != 'S') return -EINVAL; @@ -246,10 +246,10 @@ static int sg_io(struct file *file, request_queue_t *q, switch (hdr->dxfer_direction) { default: return -EINVAL; - case SG_DXFER_TO_FROM_DEV: case SG_DXFER_TO_DEV: writing = 1; break; + case SG_DXFER_TO_FROM_DEV: case SG_DXFER_FROM_DEV: break; } @@ -258,6 +258,25 @@ static int sg_io(struct file *file, request_queue_t *q, if (!rq) return -ENOMEM; + /* + * fill in request structure + */ + rq->cmd_len = hdr->cmd_len; + memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ + memcpy(rq->cmd, cmd, hdr->cmd_len); + + memset(sense, 0, sizeof(sense)); + rq->sense = sense; + rq->sense_len = 0; + + rq->cmd_type = REQ_TYPE_BLOCK_PC; + + rq->timeout = jiffies_to_msecs(hdr->timeout); + if (!rq->timeout) + rq->timeout = q->sg_timeout; + if (!rq->timeout) + rq->timeout = BLK_DEFAULT_TIMEOUT; + if (hdr->iovec_count) { const int size = sizeof(struct sg_iovec) * hdr->iovec_count; struct sg_iovec *iov; @@ -274,7 +293,8 @@ static int sg_io(struct file *file, request_queue_t *q, goto out; } - ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count); + ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count, + hdr->dxfer_len); kfree(iov); } else if (hdr->dxfer_len) ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len); @@ -282,34 +302,7 @@ static int sg_io(struct file *file, request_queue_t *q, if (ret) goto out; - /* - * fill in request structure - */ - rq->cmd_len = hdr->cmd_len; - memcpy(rq->cmd, cmd, hdr->cmd_len); - if (sizeof(rq->cmd) != hdr->cmd_len) - memset(rq->cmd + hdr->cmd_len, 0, sizeof(rq->cmd) - hdr->cmd_len); - - memset(sense, 0, sizeof(sense)); - rq->sense = sense; - rq->sense_len = 0; - - rq->cmd_type = REQ_TYPE_BLOCK_PC; bio = rq->bio; - - /* - * bounce this after holding a reference to the original bio, it's - * needed for proper unmapping - */ - if (rq->bio) - blk_queue_bounce(q, &rq->bio); - - rq->timeout = (hdr->timeout * HZ) / 1000; - if (!rq->timeout) - rq->timeout = q->sg_timeout; - if (!rq->timeout) - rq->timeout = BLK_DEFAULT_TIMEOUT; - rq->retries = 0; start_time = jiffies; @@ -340,7 +333,8 @@ static int sg_io(struct file *file, request_queue_t *q, hdr->sb_len_wr = len; } - if (blk_rq_unmap_user(bio, hdr->dxfer_len)) + rq->bio = bio; + if (blk_rq_unmap_user(rq)) ret = -EFAULT; /* may not have succeeded, but output values written to control |