From 6c5c934153513dc72e2d6464f39e8ef1f27c0a3e Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Fri, 29 Sep 2006 01:59:40 -0700 Subject: [PATCH] ifdef blktrace debugging fields Signed-off-by: Alexey Dobriyan Acked-by: Jens Axboe Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- block/ll_rw_blk.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'block/ll_rw_blk.c') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 9c3a06bcb7b..51dc0edf76e 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -1847,8 +1847,7 @@ static void blk_release_queue(struct kobject *kobj) if (q->queue_tags) __blk_queue_free_tags(q); - if (q->blk_trace) - blk_trace_shutdown(q); + blk_trace_shutdown(q); kmem_cache_free(requestq_cachep, q); } -- cgit v1.2.3 From 4aff5e2333c9a1609662f2091f55c3f6fffdad36 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 10 Aug 2006 08:44:47 +0200 Subject: [PATCH] Split struct request ->flags into two parts Right now ->flags is a bit of a mess: some are request types, and others are just modifiers. Clean this up by splitting it into ->cmd_type and ->cmd_flags. This allows introduction of generic Linux block message types, useful for sending generic Linux commands to block devices. Signed-off-by: Jens Axboe --- block/ll_rw_blk.c | 101 +++++++++++++++++++----------------------------------- 1 file changed, 35 insertions(+), 66 deletions(-) (limited to 'block/ll_rw_blk.c') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 51dc0edf76e..9b91bb70c5e 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -382,8 +382,8 @@ unsigned blk_ordered_req_seq(struct request *rq) if (rq == &q->post_flush_rq) return QUEUE_ORDSEQ_POSTFLUSH; - if ((rq->flags & REQ_ORDERED_COLOR) == - (q->orig_bar_rq->flags & REQ_ORDERED_COLOR)) + if ((rq->cmd_flags & REQ_ORDERED_COLOR) == + (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR)) return QUEUE_ORDSEQ_DRAIN; else return QUEUE_ORDSEQ_DONE; @@ -446,8 +446,8 @@ static void queue_flush(request_queue_t *q, unsigned which) end_io = post_flush_end_io; } + rq->cmd_flags = REQ_HARDBARRIER; rq_init(q, rq); - rq->flags = REQ_HARDBARRIER; rq->elevator_private = NULL; rq->rq_disk = q->bar_rq.rq_disk; rq->rl = NULL; @@ -471,9 +471,11 @@ static inline struct request *start_ordered(request_queue_t *q, blkdev_dequeue_request(rq); q->orig_bar_rq = rq; rq = &q->bar_rq; + rq->cmd_flags = 0; rq_init(q, rq); - rq->flags = bio_data_dir(q->orig_bar_rq->bio); - rq->flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0; + if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) + rq->cmd_flags |= REQ_RW; + rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0; rq->elevator_private = NULL; rq->rl = NULL; init_request_from_bio(rq, q->orig_bar_rq->bio); @@ -1124,7 +1126,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq) } list_del_init(&rq->queuelist); - rq->flags &= ~REQ_QUEUED; + rq->cmd_flags &= ~REQ_QUEUED; rq->tag = -1; if (unlikely(bqt->tag_index[tag] == NULL)) @@ -1160,7 +1162,7 @@ int blk_queue_start_tag(request_queue_t *q, struct request *rq) struct blk_queue_tag *bqt = q->queue_tags; int tag; - if (unlikely((rq->flags & REQ_QUEUED))) { + if (unlikely((rq->cmd_flags & REQ_QUEUED))) { printk(KERN_ERR "%s: request %p for device [%s] already tagged %d", __FUNCTION__, rq, @@ -1174,7 +1176,7 @@ int blk_queue_start_tag(request_queue_t *q, struct request *rq) __set_bit(tag, bqt->tag_map); - rq->flags |= REQ_QUEUED; + rq->cmd_flags |= REQ_QUEUED; rq->tag = tag; bqt->tag_index[tag] = rq; blkdev_dequeue_request(rq); @@ -1210,65 +1212,31 @@ void blk_queue_invalidate_tags(request_queue_t *q) printk(KERN_ERR "%s: bad tag found on list\n", __FUNCTION__); list_del_init(&rq->queuelist); - rq->flags &= ~REQ_QUEUED; + rq->cmd_flags &= ~REQ_QUEUED; } else blk_queue_end_tag(q, rq); - rq->flags &= ~REQ_STARTED; + rq->cmd_flags &= ~REQ_STARTED; __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0); } } EXPORT_SYMBOL(blk_queue_invalidate_tags); -static const char * const rq_flags[] = { - "REQ_RW", - "REQ_FAILFAST", - "REQ_SORTED", - "REQ_SOFTBARRIER", - "REQ_HARDBARRIER", - "REQ_FUA", - "REQ_CMD", - "REQ_NOMERGE", - "REQ_STARTED", - "REQ_DONTPREP", - "REQ_QUEUED", - "REQ_ELVPRIV", - "REQ_PC", - "REQ_BLOCK_PC", - "REQ_SENSE", - "REQ_FAILED", - "REQ_QUIET", - "REQ_SPECIAL", - "REQ_DRIVE_CMD", - "REQ_DRIVE_TASK", - "REQ_DRIVE_TASKFILE", - "REQ_PREEMPT", - "REQ_PM_SUSPEND", - "REQ_PM_RESUME", - "REQ_PM_SHUTDOWN", - "REQ_ORDERED_COLOR", -}; - void blk_dump_rq_flags(struct request *rq, char *msg) { int bit; - printk("%s: dev %s: flags = ", msg, - rq->rq_disk ? rq->rq_disk->disk_name : "?"); - bit = 0; - do { - if (rq->flags & (1 << bit)) - printk("%s ", rq_flags[bit]); - bit++; - } while (bit < __REQ_NR_BITS); + printk("%s: dev %s: type=%x, flags=%x\n", msg, + rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, + rq->cmd_flags); printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector, rq->nr_sectors, rq->current_nr_sectors); printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len); - if (rq->flags & (REQ_BLOCK_PC | REQ_PC)) { + if (blk_pc_request(rq)) { printk("cdb: "); for (bit = 0; bit < sizeof(rq->cmd); bit++) printk("%02x ", rq->cmd[bit]); @@ -1441,7 +1409,7 @@ static inline int ll_new_mergeable(request_queue_t *q, int nr_phys_segs = bio_phys_segments(q, bio); if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { - req->flags |= REQ_NOMERGE; + req->cmd_flags |= REQ_NOMERGE; if (req == q->last_merge) q->last_merge = NULL; return 0; @@ -1464,7 +1432,7 @@ static inline int ll_new_hw_segment(request_queue_t *q, if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { - req->flags |= REQ_NOMERGE; + req->cmd_flags |= REQ_NOMERGE; if (req == q->last_merge) q->last_merge = NULL; return 0; @@ -1491,7 +1459,7 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req, max_sectors = q->max_sectors; if (req->nr_sectors + bio_sectors(bio) > max_sectors) { - req->flags |= REQ_NOMERGE; + req->cmd_flags |= REQ_NOMERGE; if (req == q->last_merge) q->last_merge = NULL; return 0; @@ -1530,7 +1498,7 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req, if (req->nr_sectors + bio_sectors(bio) > max_sectors) { - req->flags |= REQ_NOMERGE; + req->cmd_flags |= REQ_NOMERGE; if (req == q->last_merge) q->last_merge = NULL; return 0; @@ -2029,7 +1997,7 @@ EXPORT_SYMBOL(blk_get_queue); static inline void blk_free_request(request_queue_t *q, struct request *rq) { - if (rq->flags & REQ_ELVPRIV) + if (rq->cmd_flags & REQ_ELVPRIV) elv_put_request(q, rq); mempool_free(rq, q->rq.rq_pool); } @@ -2044,17 +2012,17 @@ blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, return NULL; /* - * first three bits are identical in rq->flags and bio->bi_rw, + * first three bits are identical in rq->cmd_flags and bio->bi_rw, * see bio.h and blkdev.h */ - rq->flags = rw; + rq->cmd_flags = rw; if (priv) { if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) { mempool_free(rq, q->rq.rq_pool); return NULL; } - rq->flags |= REQ_ELVPRIV; + rq->cmd_flags |= REQ_ELVPRIV; } return rq; @@ -2351,7 +2319,8 @@ void blk_insert_request(request_queue_t *q, struct request *rq, * must not attempt merges on this) and that it acts as a soft * barrier */ - rq->flags |= REQ_SPECIAL | REQ_SOFTBARRIER; + rq->cmd_type = REQ_TYPE_SPECIAL; + rq->cmd_flags |= REQ_SOFTBARRIER; rq->special = data; @@ -2558,7 +2527,7 @@ void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk, int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; rq->rq_disk = bd_disk; - rq->flags |= REQ_NOMERGE; + rq->cmd_flags |= REQ_NOMERGE; rq->end_io = done; WARN_ON(irqs_disabled()); spin_lock_irq(q->queue_lock); @@ -2728,7 +2697,7 @@ void __blk_put_request(request_queue_t *q, struct request *req) */ if (rl) { int rw = rq_data_dir(req); - int priv = req->flags & REQ_ELVPRIV; + int priv = req->cmd_flags & REQ_ELVPRIV; BUG_ON(!list_empty(&req->queuelist)); @@ -2890,22 +2859,22 @@ static inline int attempt_front_merge(request_queue_t *q, struct request *rq) static void init_request_from_bio(struct request *req, struct bio *bio) { - req->flags |= REQ_CMD; + req->cmd_type = REQ_TYPE_FS; /* * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST) */ if (bio_rw_ahead(bio) || bio_failfast(bio)) - req->flags |= REQ_FAILFAST; + req->cmd_flags |= REQ_FAILFAST; /* * REQ_BARRIER implies no merging, but lets make it explicit */ if (unlikely(bio_barrier(bio))) - req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); + req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE); if (bio_sync(bio)) - req->flags |= REQ_RW_SYNC; + req->cmd_flags |= REQ_RW_SYNC; req->errors = 0; req->hard_sector = req->sector = bio->bi_sector; @@ -3306,7 +3275,7 @@ static int __end_that_request_first(struct request *req, int uptodate, req->errors = 0; if (!uptodate) { - if (blk_fs_request(req) && !(req->flags & REQ_QUIET)) + if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET)) printk("end_request: I/O error, dev %s, sector %llu\n", req->rq_disk ? req->rq_disk->disk_name : "?", (unsigned long long)req->sector); @@ -3569,8 +3538,8 @@ EXPORT_SYMBOL(end_request); void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio) { - /* first two bits are identical in rq->flags and bio->bi_rw */ - rq->flags |= (bio->bi_rw & 3); + /* first two bits are identical in rq->cmd_flags and bio->bi_rw */ + rq->cmd_flags |= (bio->bi_rw & 3); rq->nr_phys_segments = bio_phys_segments(q, bio); rq->nr_hw_segments = bio_hw_segments(q, bio); -- cgit v1.2.3 From 9817064b68fef7e4580c6df1ea597e106b9ff88b Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 28 Jul 2006 09:23:08 +0200 Subject: [PATCH] elevator: move the backmerging logic into the elevator core Right now, every IO scheduler implements its own backmerging (except for noop, which does no merging). That results in duplicated code for essentially the same operation, which is never a good thing. This patch moves the backmerging out of the io schedulers and into the elevator core. We save 1.6kb of text and as a bonus get backmerging for noop as well. Win-win! Signed-off-by: Jens Axboe --- block/ll_rw_blk.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'block/ll_rw_blk.c') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 9b91bb70c5e..9cbf7b550c7 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -281,6 +281,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq) { INIT_LIST_HEAD(&rq->queuelist); INIT_LIST_HEAD(&rq->donelist); + INIT_HLIST_NODE(&rq->hash); rq->errors = 0; rq->rq_status = RQ_ACTIVE; @@ -2700,6 +2701,7 @@ void __blk_put_request(request_queue_t *q, struct request *req) int priv = req->cmd_flags & REQ_ELVPRIV; BUG_ON(!list_empty(&req->queuelist)); + BUG_ON(!hlist_unhashed(&req->hash)); blk_free_request(q, req); freed_request(q, rw, priv); -- cgit v1.2.3 From 2e662b65f05d550b6799ed6bfa9963b82279e6b7 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 13 Jul 2006 11:55:04 +0200 Subject: [PATCH] elevator: abstract out the rbtree sort handling The rbtree sort/lookup/reposition logic is mostly duplicated in cfq/deadline/as, so move it to the elevator core. The io schedulers still provide the actual rb root, as we don't want to impose any sort of specific handling on the schedulers. Introduce the helpers and rb_node in struct request to help migrate the IO schedulers. Signed-off-by: Jens Axboe --- block/ll_rw_blk.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'block/ll_rw_blk.c') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 9cbf7b550c7..d388486e98b 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -281,11 +281,12 @@ static inline void rq_init(request_queue_t *q, struct request *rq) { INIT_LIST_HEAD(&rq->queuelist); INIT_LIST_HEAD(&rq->donelist); - INIT_HLIST_NODE(&rq->hash); rq->errors = 0; rq->rq_status = RQ_ACTIVE; rq->bio = rq->biotail = NULL; + INIT_HLIST_NODE(&rq->hash); + RB_CLEAR_NODE(&rq->rb_node); rq->ioprio = 0; rq->buffer = NULL; rq->ref_count = 1; @@ -2943,7 +2944,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) req->ioprio = ioprio_best(req->ioprio, prio); drive_stat_acct(req, nr_sectors, 0); if (!attempt_back_merge(q, req)) - elv_merged_request(q, req); + elv_merged_request(q, req, el_ret); goto out; case ELEVATOR_FRONT_MERGE: @@ -2970,7 +2971,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) req->ioprio = ioprio_best(req->ioprio, prio); drive_stat_acct(req, nr_sectors, 0); if (!attempt_front_merge(q, req)) - elv_merged_request(q, req); + elv_merged_request(q, req, el_ret); goto out; /* ELV_NO_MERGE: elevator says don't/can't merge. */ -- cgit v1.2.3 From c00895ab2f08df7044e58ee01c38bf0a661ea0eb Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 30 Sep 2006 20:29:12 +0200 Subject: [PATCH] Remove ->waiting member from struct request As the comments indicates in blkdev.h, we can fold it into ->end_io_data usage as that is really what ->waiting is. Fixup the users of blk_end_sync_rq(). Signed-off-by: Jens Axboe --- block/ll_rw_blk.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'block/ll_rw_blk.c') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index d388486e98b..3b6aad2affd 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -291,7 +291,6 @@ static inline void rq_init(request_queue_t *q, struct request *rq) rq->buffer = NULL; rq->ref_count = 1; rq->q = q; - rq->waiting = NULL; rq->special = NULL; rq->data_len = 0; rq->data = NULL; @@ -451,6 +450,7 @@ static void queue_flush(request_queue_t *q, unsigned which) rq->cmd_flags = REQ_HARDBARRIER; rq_init(q, rq); rq->elevator_private = NULL; + rq->elevator_private2 = NULL; rq->rq_disk = q->bar_rq.rq_disk; rq->rl = NULL; rq->end_io = end_io; @@ -479,6 +479,7 @@ static inline struct request *start_ordered(request_queue_t *q, rq->cmd_flags |= REQ_RW; rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0; rq->elevator_private = NULL; + rq->elevator_private2 = NULL; rq->rl = NULL; init_request_from_bio(rq, q->orig_bar_rq->bio); rq->end_io = bar_end_io; @@ -2569,10 +2570,9 @@ int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk, rq->sense_len = 0; } - rq->waiting = &wait; + rq->end_io_data = &wait; blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq); wait_for_completion(&wait); - rq->waiting = NULL; if (rq->errors) err = -EIO; @@ -2736,9 +2736,9 @@ EXPORT_SYMBOL(blk_put_request); */ void blk_end_sync_rq(struct request *rq, int error) { - struct completion *waiting = rq->waiting; + struct completion *waiting = rq->end_io_data; - rq->waiting = NULL; + rq->end_io_data = NULL; __blk_put_request(rq->q, rq); /* @@ -2801,7 +2801,7 @@ static int attempt_merge(request_queue_t *q, struct request *req, if (rq_data_dir(req) != rq_data_dir(next) || req->rq_disk != next->rq_disk - || next->waiting || next->special) + || next->special) return 0; /* @@ -2886,7 +2886,6 @@ static void init_request_from_bio(struct request *req, struct bio *bio) req->nr_phys_segments = bio_phys_segments(req->q, bio); req->nr_hw_segments = bio_hw_segments(req->q, bio); req->buffer = bio_data(bio); /* see ->buffer comment above */ - req->waiting = NULL; req->bio = req->biotail = bio; req->ioprio = bio_prio(bio); req->rq_disk = bio->bi_bdev->bd_disk; -- cgit v1.2.3 From 49171e5c6f414d49a061b5c1c84967c2eb569822 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 10 Aug 2006 08:59:11 +0200 Subject: [PATCH] Remove struct request_list from struct request It is always identical to &q->rq, and we only use it for detecting whether this request came out of our mempool or not. So replace it with an additional ->flags bit flag. Signed-off-by: Jens Axboe --- block/ll_rw_blk.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'block/ll_rw_blk.c') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 3b6aad2affd..f7462502bfd 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -452,7 +452,6 @@ static void queue_flush(request_queue_t *q, unsigned which) rq->elevator_private = NULL; rq->elevator_private2 = NULL; rq->rq_disk = q->bar_rq.rq_disk; - rq->rl = NULL; rq->end_io = end_io; q->prepare_flush_fn(q, rq); @@ -480,7 +479,6 @@ static inline struct request *start_ordered(request_queue_t *q, rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0; rq->elevator_private = NULL; rq->elevator_private2 = NULL; - rq->rl = NULL; init_request_from_bio(rq, q->orig_bar_rq->bio); rq->end_io = bar_end_io; @@ -2018,7 +2016,7 @@ blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, * first three bits are identical in rq->cmd_flags and bio->bi_rw, * see bio.h and blkdev.h */ - rq->cmd_flags = rw; + rq->cmd_flags = rw | REQ_ALLOCED; if (priv) { if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) { @@ -2196,7 +2194,6 @@ rq_starved: ioc->nr_batch_requests--; rq_init(q, rq); - rq->rl = rl; blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); out: @@ -2681,8 +2678,6 @@ EXPORT_SYMBOL_GPL(disk_round_stats); */ void __blk_put_request(request_queue_t *q, struct request *req) { - struct request_list *rl = req->rl; - if (unlikely(!q)) return; if (unlikely(--req->ref_count)) @@ -2691,13 +2686,12 @@ void __blk_put_request(request_queue_t *q, struct request *req) elv_completed_request(q, req); req->rq_status = RQ_INACTIVE; - req->rl = NULL; /* * Request may not have originated from ll_rw_blk. if not, * it didn't come out of our reserved rq pools */ - if (rl) { + if (req->cmd_flags & REQ_ALLOCED) { int rw = rq_data_dir(req); int priv = req->cmd_flags & REQ_ELVPRIV; -- cgit v1.2.3 From cdd6026217c0e4cda2efce1bdc318661bef1f66f Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 28 Jul 2006 09:32:07 +0200 Subject: [PATCH] Remove ->rq_status from struct request After Christophs SCSI change, the only usage left is RQ_ACTIVE and RQ_INACTIVE. The block layer sets RQ_INACTIVE right before freeing the request, so any check for RQ_INACTIVE in a driver is a bug and indicates use-after-free. So kill/clean the remaining users, straight forward. Signed-off-by: Jens Axboe --- block/ll_rw_blk.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'block/ll_rw_blk.c') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index f7462502bfd..b94a396aa62 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -283,7 +283,6 @@ static inline void rq_init(request_queue_t *q, struct request *rq) INIT_LIST_HEAD(&rq->donelist); rq->errors = 0; - rq->rq_status = RQ_ACTIVE; rq->bio = rq->biotail = NULL; INIT_HLIST_NODE(&rq->hash); RB_CLEAR_NODE(&rq->rb_node); @@ -2685,8 +2684,6 @@ void __blk_put_request(request_queue_t *q, struct request *req) elv_completed_request(q, req); - req->rq_status = RQ_INACTIVE; - /* * Request may not have originated from ll_rw_blk. if not, * it didn't come out of our reserved rq pools -- cgit v1.2.3 From cb78b285c8f9d59b0d4e4f6a54c2977ce1d9b880 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 28 Jul 2006 09:32:57 +0200 Subject: [PATCH] Drop useless bio passing in may_queue/set_request API It's not needed for anything, so kill the bio passing. Signed-off-by: Jens Axboe --- block/ll_rw_blk.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'block/ll_rw_blk.c') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index b94a396aa62..b1ea941f6dc 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -2003,8 +2003,7 @@ static inline void blk_free_request(request_queue_t *q, struct request *rq) } static inline struct request * -blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, - int priv, gfp_t gfp_mask) +blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask) { struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); @@ -2018,7 +2017,7 @@ blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, rq->cmd_flags = rw | REQ_ALLOCED; if (priv) { - if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) { + if (unlikely(elv_set_request(q, rq, gfp_mask))) { mempool_free(rq, q->rq.rq_pool); return NULL; } @@ -2109,7 +2108,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, struct io_context *ioc = NULL; int may_queue, priv; - may_queue = elv_may_queue(q, rw, bio); + may_queue = elv_may_queue(q, rw); if (may_queue == ELV_MQUEUE_NO) goto rq_starved; @@ -2157,7 +2156,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, spin_unlock_irq(q->queue_lock); - rq = blk_alloc_request(q, rw, bio, priv, gfp_mask); + rq = blk_alloc_request(q, rw, priv, gfp_mask); if (unlikely(!rq)) { /* * Allocation failed presumably due to memory. Undo anything -- cgit v1.2.3 From 51da90fcb6acd580e87280eaf4eb1f788021807d Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 18 Jul 2006 04:14:45 +0200 Subject: [PATCH] ll_rw_blk: cleanup __make_request() - Don't assign variables that are only used once. - Kill spin_lock() prefetching, it's opportunistic at best. Signed-off-by: Jens Axboe --- block/ll_rw_blk.c | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) (limited to 'block/ll_rw_blk.c') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index b1ea941f6dc..e25b4cd2dcd 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -2885,17 +2885,11 @@ static void init_request_from_bio(struct request *req, struct bio *bio) static int __make_request(request_queue_t *q, struct bio *bio) { struct request *req; - int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync; - unsigned short prio; - sector_t sector; + int el_ret, nr_sectors, barrier, err; + const unsigned short prio = bio_prio(bio); + const int sync = bio_sync(bio); - sector = bio->bi_sector; nr_sectors = bio_sectors(bio); - cur_nr_sectors = bio_cur_sectors(bio); - prio = bio_prio(bio); - - rw = bio_data_dir(bio); - sync = bio_sync(bio); /* * low level driver can indicate that it wants pages above a @@ -2904,8 +2898,6 @@ static int __make_request(request_queue_t *q, struct bio *bio) */ blk_queue_bounce(q, &bio); - spin_lock_prefetch(q->queue_lock); - barrier = bio_barrier(bio); if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) { err = -EOPNOTSUPP; @@ -2953,9 +2945,9 @@ static int __make_request(request_queue_t *q, struct bio *bio) * not touch req->buffer either... */ req->buffer = bio_data(bio); - req->current_nr_sectors = cur_nr_sectors; - req->hard_cur_sectors = cur_nr_sectors; - req->sector = req->hard_sector = sector; + req->current_nr_sectors = bio_cur_sectors(bio); + req->hard_cur_sectors = req->current_nr_sectors; + req->sector = req->hard_sector = bio->bi_sector; req->nr_sectors = req->hard_nr_sectors += nr_sectors; req->ioprio = ioprio_best(req->ioprio, prio); drive_stat_acct(req, nr_sectors, 0); @@ -2973,7 +2965,7 @@ get_rq: * Grab a free request. This is might sleep but can not fail. * Returns with the queue unlocked. */ - req = get_request_wait(q, rw, bio); + req = get_request_wait(q, bio_data_dir(bio), bio); /* * After dropping the lock and possibly sleeping here, our request -- cgit v1.2.3 From fc46379daf90dce57bf765c81d3b39f55150aac2 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 29 Aug 2006 09:05:44 +0200 Subject: [PATCH] cfq-iosched: kill cfq_exit_lock cfq_exit_lock is protecting two things now: - The per-ioc rbtree of cfq_io_contexts - The per-cfqd linked list of cfq_io_contexts The per-cfqd linked list can be protected by the queue lock, as it is (by definition) per cfqd as the queue lock is. The per-ioc rbtree is mainly used and updated by the process itself only. The only outside use is the io priority changing. If we move the priority changing to not browsing the rbtree, we can remove any locking from the rbtree updates and lookup completely. Let the sys_ioprio syscall just mark processes as having the iopriority changed and lazily update the private cfq io contexts the next time io is queued, and we can remove this locking as well. Signed-off-by: Jens Axboe --- block/ll_rw_blk.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'block/ll_rw_blk.c') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index e25b4cd2dcd..508548b834f 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -3654,7 +3654,7 @@ struct io_context *current_io_context(gfp_t gfp_flags) if (ret) { atomic_set(&ret->refcount, 1); ret->task = current; - ret->set_ioprio = NULL; + ret->ioprio_changed = 0; ret->last_waited = jiffies; /* doesn't matter... */ ret->nr_batch_requests = 0; /* because this is 0 */ ret->aic = NULL; -- cgit v1.2.3 From 1ea25ecb7256978947c258f08a30c878eebe9edb Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 18 Jul 2006 22:24:11 +0200 Subject: [PATCH] Audit block layer inlines Kill a few inlines that bring in too much code to more than one location Shrinks kernel text by about 300 bytes on 32-bit x86. Signed-off-by: Jens Axboe --- block/ll_rw_blk.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'block/ll_rw_blk.c') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 508548b834f..4b7b4461e8d 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -277,7 +277,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) EXPORT_SYMBOL(blk_queue_make_request); -static inline void rq_init(request_queue_t *q, struct request *rq) +static void rq_init(request_queue_t *q, struct request *rq) { INIT_LIST_HEAD(&rq->queuelist); INIT_LIST_HEAD(&rq->donelist); @@ -589,8 +589,8 @@ static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error) return 0; } -static inline int ordered_bio_endio(struct request *rq, struct bio *bio, - unsigned int nbytes, int error) +static int ordered_bio_endio(struct request *rq, struct bio *bio, + unsigned int nbytes, int error) { request_queue_t *q = rq->q; bio_end_io_t *endio; @@ -2002,7 +2002,7 @@ static inline void blk_free_request(request_queue_t *q, struct request *rq) mempool_free(rq, q->rq.rq_pool); } -static inline struct request * +static struct request * blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask) { struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); -- cgit v1.2.3 From b5deef901282628d88c784f4c9d2f0583ec3b355 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 19 Jul 2006 23:39:40 +0200 Subject: [PATCH] Make sure all block/io scheduler setups are node aware Some were kmalloc_node(), some were still kmalloc(). Change them all to kmalloc_node(). Signed-off-by: Jens Axboe --- block/ll_rw_blk.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'block/ll_rw_blk.c') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 4b7b4461e8d..c6dfa889206 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -39,6 +39,7 @@ static void blk_unplug_timeout(unsigned long data); static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); static void init_request_from_bio(struct request *req, struct bio *bio); static int __make_request(request_queue_t *q, struct bio *bio); +static struct io_context *current_io_context(gfp_t gfp_flags, int node); /* * For the allocated request tables @@ -2114,7 +2115,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) { if (rl->count[rw]+1 >= q->nr_requests) { - ioc = current_io_context(GFP_ATOMIC); + ioc = current_io_context(GFP_ATOMIC, q->node); /* * The queue will fill after this allocation, so set * it as full, and mark this process as "batching". @@ -2234,7 +2235,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw, * up to a big batch of them for a small period time. * See ioc_batching, ioc_set_batching */ - ioc = current_io_context(GFP_NOIO); + ioc = current_io_context(GFP_NOIO, q->node); ioc_set_batching(q, ioc); spin_lock_irq(q->queue_lock); @@ -3641,7 +3642,7 @@ void exit_io_context(void) * but since the current task itself holds a reference, the context can be * used in general code, so long as it stays within `current` context. */ -struct io_context *current_io_context(gfp_t gfp_flags) +static struct io_context *current_io_context(gfp_t gfp_flags, int node) { struct task_struct *tsk = current; struct io_context *ret; @@ -3650,7 +3651,7 @@ struct io_context *current_io_context(gfp_t gfp_flags) if (likely(ret)) return ret; - ret = kmem_cache_alloc(iocontext_cachep, gfp_flags); + ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); if (ret) { atomic_set(&ret->refcount, 1); ret->task = current; @@ -3674,10 +3675,10 @@ EXPORT_SYMBOL(current_io_context); * * This is always called in the context of the task which submitted the I/O. */ -struct io_context *get_io_context(gfp_t gfp_flags) +struct io_context *get_io_context(gfp_t gfp_flags, int node) { struct io_context *ret; - ret = current_io_context(gfp_flags); + ret = current_io_context(gfp_flags, node); if (likely(ret)) atomic_inc(&ret->refcount); return ret; -- cgit v1.2.3 From dc72ef4ae35c2016fb594bcc85ce871376682174 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 20 Jul 2006 14:54:05 +0200 Subject: [PATCH] Add blk_start_queueing() helper CFQ implements this on its own now, but it's really block layer knowledge. Tells a device queue to start dispatching requests to the driver, taking care to unplug if needed. Also fixes the issue where as/cfq will invoke a stopped queue, which we really don't want. Signed-off-by: Jens Axboe --- block/ll_rw_blk.c | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) (limited to 'block/ll_rw_blk.c') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index c6dfa889206..346be9ae31f 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -2266,6 +2266,25 @@ struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask) } EXPORT_SYMBOL(blk_get_request); +/** + * blk_start_queueing - initiate dispatch of requests to device + * @q: request queue to kick into gear + * + * This is basically a helper to remove the need to know whether a queue + * is plugged or not if someone just wants to initiate dispatch of requests + * for this queue. + * + * The queue lock must be held with interrupts disabled. + */ +void blk_start_queueing(request_queue_t *q) +{ + if (!blk_queue_plugged(q)) + q->request_fn(q); + else + __generic_unplug_device(q); +} +EXPORT_SYMBOL(blk_start_queueing); + /** * blk_requeue_request - put a request back on queue * @q: request queue where request should be inserted @@ -2333,11 +2352,7 @@ void blk_insert_request(request_queue_t *q, struct request *rq, drive_stat_acct(rq, rq->nr_sectors, 1); __elv_add_request(q, rq, where, 0); - - if (blk_queue_plugged(q)) - __generic_unplug_device(q); - else - q->request_fn(q); + blk_start_queueing(q); spin_unlock_irqrestore(q->queue_lock, flags); } -- cgit v1.2.3 From da20a20f3b5c175648fa797c899dd577e4dacb51 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 21 Jul 2006 20:30:28 +0200 Subject: [PATCH] ll_rw_blk: allow more flexibility for read_ahead_kb store It can make sense to set read-ahead larger than a single request. We should not be enforcing such policy on the user. Additionally, using the BLKRASET ioctl doesn't impose such a restriction. So additionally we now expose identical behaviour through the two. Issue also reported by Anton Signed-off-by: Jens Axboe --- block/ll_rw_blk.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'block/ll_rw_blk.c') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 346be9ae31f..e3980ec747c 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -3806,9 +3806,6 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count) ssize_t ret = queue_var_store(&ra_kb, page, count); spin_lock_irq(q->queue_lock); - if (ra_kb > (q->max_sectors >> 1)) - ra_kb = (q->max_sectors >> 1); - q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); spin_unlock_irq(q->queue_lock); -- cgit v1.2.3 From 5404bc7a87b9949cf61e0174b21f80e73239ab25 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 10 Aug 2006 09:01:02 +0200 Subject: [PATCH] Allow file systems to differentiate between data and meta reads We can use this information for making more intelligent priority decisions, and it will also be useful for blktrace. Signed-off-by: Jens Axboe --- block/ll_rw_blk.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'block/ll_rw_blk.c') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index e3980ec747c..57992ae511c 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -2884,6 +2884,8 @@ static void init_request_from_bio(struct request *req, struct bio *bio) if (bio_sync(bio)) req->cmd_flags |= REQ_RW_SYNC; + if (bio_rw_meta(bio)) + req->cmd_flags |= REQ_RW_META; req->errors = 0; req->hard_sector = req->sector = bio->bi_sector; -- cgit v1.2.3 From 25034d7a83cf77667f3d65822484b305d4be6b25 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 29 Aug 2006 09:15:14 +0200 Subject: [PATCH] exit_io_context: don't disable irqs We don't need to disable irqs to clear current->io_context, it is protected by ->alloc_lock. Even IF it was possible to submit I/O from IRQ on behalf of current this irq_disable() can't help: current_io_context() will re-instantiate ->io_context after irq_enable(). We don't need task_lock() or local_irq_disable() to clear ioc->task. This can't prevent other CPUs from playing with our io_context anyway. Signed-off-by: Oleg Nesterov Signed-off-by: Jens Axboe --- block/ll_rw_blk.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'block/ll_rw_blk.c') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 57992ae511c..f757ed41321 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -3629,25 +3629,22 @@ EXPORT_SYMBOL(put_io_context); /* Called by the exitting task */ void exit_io_context(void) { - unsigned long flags; struct io_context *ioc; struct cfq_io_context *cic; - local_irq_save(flags); task_lock(current); ioc = current->io_context; current->io_context = NULL; - ioc->task = NULL; task_unlock(current); - local_irq_restore(flags); + ioc->task = NULL; if (ioc->aic && ioc->aic->exit) ioc->aic->exit(ioc->aic); if (ioc->cic_root.rb_node != NULL) { cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node); cic->exit(ioc); } - + put_io_context(ioc); } -- cgit v1.2.3 From 059af497c23492cb1ddcbba11c09dad385960bc0 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 21 Sep 2006 20:37:22 +0200 Subject: [PATCH] blk_queue_start_tag() shared map race fix If we share the tag map between two or more queues, then we cannot use __set_bit() to set the bit. In fact we need to make sure we atomically acquire this tag, so loop using test_and_set_bit() to protect from that. Noticed by Mike Christie Signed-off-by: Jens Axboe --- block/ll_rw_blk.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'block/ll_rw_blk.c') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index f757ed41321..83425fb3c8d 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -1171,11 +1171,16 @@ int blk_queue_start_tag(request_queue_t *q, struct request *rq) BUG(); } - tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth); - if (tag >= bqt->max_depth) - return 1; + /* + * Protect against shared tag maps, as we may not have exclusive + * access to the tag map. + */ + do { + tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth); + if (tag >= bqt->max_depth) + return 1; - __set_bit(tag, bqt->tag_map); + } while (test_and_set_bit(tag, bqt->tag_map)); rq->cmd_flags |= REQ_QUEUED; rq->tag = tag; -- cgit v1.2.3