From c2dea2d1fdbce86942dba0a968c523d8b7858bb5 Mon Sep 17 00:00:00 2001 From: Vasily Tarasov Date: Fri, 20 Jul 2007 10:06:38 +0200 Subject: cfq: async queue allocation per priority If we have two processes with different ioprio_class, but the same ioprio_data, their async requests will fall into the same queue. I guess such behavior is not expected, because it's not right to put real-time requests and best-effort requests in the same queue. The attached patch fixes the problem by introducing additional *cfqq fields on cfqd, pointing to per-(class,priority) async queues. Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 56 +++++++++++++++++++++++++++++++++++++++----------- include/linux/ioprio.h | 8 ++++++++ 2 files changed, 52 insertions(+), 12 deletions(-) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 9755a3cfad2..bc7190eed10 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -92,7 +92,11 @@ struct cfq_data { struct cfq_queue *active_queue; struct cfq_io_context *active_cic; - struct cfq_queue *async_cfqq[IOPRIO_BE_NR]; + /* + * async queue for each priority case + */ + struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR]; + struct cfq_queue *async_idle_cfqq; struct timer_list idle_class_timer; @@ -1414,24 +1418,44 @@ out: return cfqq; } +static struct cfq_queue ** +cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) +{ + switch(ioprio_class) { + case IOPRIO_CLASS_RT: + return &cfqd->async_cfqq[0][ioprio]; + case IOPRIO_CLASS_BE: + return &cfqd->async_cfqq[1][ioprio]; + case IOPRIO_CLASS_IDLE: + return &cfqd->async_idle_cfqq; + default: + BUG(); + } +} + static struct cfq_queue * cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, gfp_t gfp_mask) { const int ioprio = task_ioprio(tsk); + const int ioprio_class = task_ioprio_class(tsk); + struct cfq_queue **async_cfqq = NULL; struct cfq_queue *cfqq = NULL; - if (!is_sync) - cfqq = cfqd->async_cfqq[ioprio]; + if (!is_sync) { + async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio); + cfqq = *async_cfqq; + } + if (!cfqq) cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask); /* * pin the queue now that it's allocated, scheduler exit will prune it */ - if (!is_sync && !cfqd->async_cfqq[ioprio]) { + if (!is_sync && !(*async_cfqq)) { atomic_inc(&cfqq->ref); - cfqd->async_cfqq[ioprio] = cfqq; + *async_cfqq = cfqq; } atomic_inc(&cfqq->ref); @@ -2042,11 +2066,24 @@ static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) blk_sync_queue(cfqd->queue); } +static void cfq_put_async_queues(struct cfq_data *cfqd) +{ + int i; + + for (i = 0; i < IOPRIO_BE_NR; i++) { + if (cfqd->async_cfqq[0][i]) + cfq_put_queue(cfqd->async_cfqq[0][i]); + if (cfqd->async_cfqq[1][i]) + cfq_put_queue(cfqd->async_cfqq[1][i]); + if (cfqd->async_idle_cfqq) + cfq_put_queue(cfqd->async_idle_cfqq); + } +} + static void cfq_exit_queue(elevator_t *e) { struct cfq_data *cfqd = e->elevator_data; request_queue_t *q = cfqd->queue; - int i; cfq_shutdown_timer_wq(cfqd); @@ -2063,12 +2100,7 @@ static void cfq_exit_queue(elevator_t *e) __cfq_exit_single_io_context(cfqd, cic); } - /* - * Put the async queues - */ - for (i = 0; i < IOPRIO_BE_NR; i++) - if (cfqd->async_cfqq[i]) - cfq_put_queue(cfqd->async_cfqq[i]); + cfq_put_async_queues(cfqd); spin_unlock_irq(q->queue_lock); diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h index 2eaa142cd06..baf29387cab 100644 --- a/include/linux/ioprio.h +++ b/include/linux/ioprio.h @@ -53,6 +53,14 @@ static inline int task_ioprio(struct task_struct *task) return IOPRIO_NORM; } +static inline int task_ioprio_class(struct task_struct *task) +{ + if (ioprio_valid(task->ioprio)) + return IOPRIO_PRIO_CLASS(task->ioprio); + + return IOPRIO_CLASS_BE; +} + static inline int task_nice_ioprio(struct task_struct *task) { return (task_nice(task) + 20) / 5; -- cgit v1.2.3 From 8350163a90f6003c9e60e8ebc0e00f654657645f Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Fri, 20 Jul 2007 10:07:50 +0200 Subject: cfq: Write-only stuff in CFQ data structures There are some leftover bits from the task cooperator patch, that was yanked out again. While it will get reintroduced, no point in having this write-only stuff in the tree. So yank it. Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index bc7190eed10..d148ccbc36d 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -115,9 +115,6 @@ struct cfq_data { unsigned int cfq_slice_idle; struct list_head cic_list; - - sector_t new_seek_mean; - u64 new_seek_total; }; /* @@ -157,8 +154,6 @@ struct cfq_queue { /* various state flags, see below */ unsigned int flags; - - sector_t last_request_pos; }; enum cfqq_state_flags { @@ -1621,11 +1616,6 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic, else sdist = cic->last_request_pos - rq->sector; - if (!cic->seek_samples) { - cfqd->new_seek_total = (7*cic->seek_total + (u64)256*sdist) / 8; - cfqd->new_seek_mean = cfqd->new_seek_total / 256; - } - /* * Don't allow the seek distance to get too large from the * odd fragment, pagein, etc @@ -1761,7 +1751,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfq_update_idle_window(cfqd, cfqq, cic); cic->last_request_pos = rq->sector + rq->nr_sectors; - cfqq->last_request_pos = cic->last_request_pos; if (cfqq == cfqd->active_queue) { /* -- cgit v1.2.3