diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/Kconfig | 4 | ||||
-rw-r--r-- | block/cfq-iosched.c | 39 | ||||
-rw-r--r-- | block/elevator.c | 13 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 13 |
4 files changed, 43 insertions, 26 deletions
diff --git a/block/Kconfig b/block/Kconfig index a50f4811164..285935134bc 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -1,7 +1,7 @@ # # Block layer core configuration # -config BLOCK +menuconfig BLOCK bool "Enable the block layer" if EMBEDDED default y help @@ -49,6 +49,6 @@ config LSF If unsure, say Y. -endif +endif # BLOCK source block/Kconfig.iosched diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index baef5fc7cff..e0aa4dad674 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -92,6 +92,8 @@ struct cfq_data { struct cfq_queue *active_queue; struct cfq_io_context *active_cic; + struct cfq_queue *async_cfqq[IOPRIO_BE_NR]; + struct timer_list idle_class_timer; sector_t last_position; @@ -1351,8 +1353,8 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc) } static struct cfq_queue * -cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, - gfp_t gfp_mask) +cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, + struct task_struct *tsk, gfp_t gfp_mask) { struct cfq_queue *cfqq, *new_cfqq = NULL; struct cfq_io_context *cic; @@ -1405,12 +1407,35 @@ retry: if (new_cfqq) kmem_cache_free(cfq_pool, new_cfqq); - atomic_inc(&cfqq->ref); out: WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); return cfqq; } +static struct cfq_queue * +cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, + gfp_t gfp_mask) +{ + const int ioprio = task_ioprio(tsk); + struct cfq_queue *cfqq = NULL; + + if (!is_sync) + cfqq = cfqd->async_cfqq[ioprio]; + if (!cfqq) + cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask); + + /* + * pin the queue now that it's allocated, scheduler exit will prune it + */ + if (!is_sync && !cfqd->async_cfqq[ioprio]) { + atomic_inc(&cfqq->ref); + cfqd->async_cfqq[ioprio] = cfqq; + } + + atomic_inc(&cfqq->ref); + return cfqq; +} + /* * We drop cfq io contexts lazily, so we may find a dead one. */ @@ -2019,6 +2044,7 @@ static void cfq_exit_queue(elevator_t *e) { struct cfq_data *cfqd = e->elevator_data; request_queue_t *q = cfqd->queue; + int i; cfq_shutdown_timer_wq(cfqd); @@ -2035,6 +2061,13 @@ static void cfq_exit_queue(elevator_t *e) __cfq_exit_single_io_context(cfqd, cic); } + /* + * Put the async queues + */ + for (i = 0; i < IOPRIO_BE_NR; i++) + if (cfqd->async_cfqq[i]) + cfq_put_queue(cfqd->async_cfqq[i]); + spin_unlock_irq(q->queue_lock); cfq_shutdown_timer_wq(cfqd); diff --git a/block/elevator.c b/block/elevator.c index ce866eb75f6..4769a25d703 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -112,12 +112,8 @@ static inline int elv_try_merge(struct request *__rq, struct bio *bio) static struct elevator_type *elevator_find(const char *name) { struct elevator_type *e; - struct list_head *entry; - - list_for_each(entry, &elv_list) { - - e = list_entry(entry, struct elevator_type, list); + list_for_each_entry(e, &elv_list, list) { if (!strcmp(e->elevator_name, name)) return e; } @@ -1116,14 +1112,11 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name) { elevator_t *e = q->elevator; struct elevator_type *elv = e->elevator_type; - struct list_head *entry; + struct elevator_type *__e; int len = 0; spin_lock(&elv_list_lock); - list_for_each(entry, &elv_list) { - struct elevator_type *__e; - - __e = list_entry(entry, struct elevator_type, list); + list_for_each_entry(__e, &elv_list, list) { if (!strcmp(elv->elevator_name, __e->elevator_name)) len += sprintf(name+len, "[%s] ", elv->elevator_name); else diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index c99b4635485..ef42bb2b12b 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -527,8 +527,6 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp) static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error) { request_queue_t *q = bio->bi_private; - struct bio_vec *bvec; - int i; /* * This is dry run, restore bio_sector and size. We'll finish @@ -540,13 +538,6 @@ static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error) if (bio->bi_size) return 1; - /* Rewind bvec's */ - bio->bi_idx = 0; - bio_for_each_segment(bvec, bio, i) { - bvec->bv_len += bvec->bv_offset; - bvec->bv_offset = 0; - } - /* Reset bio */ set_bit(BIO_UPTODATE, &bio->bi_flags); bio->bi_size = q->bi_size; @@ -1304,9 +1295,9 @@ static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio, if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID))) blk_recount_segments(q, nxt); if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) || - BIOVEC_VIRT_OVERSIZE(bio->bi_hw_front_size + bio->bi_hw_back_size)) + BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size)) return 0; - if (bio->bi_size + nxt->bi_size > q->max_segment_size) + if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size) return 0; return 1; |