From d7ed538a02c219119adb20f1dccbf0f8015e53f3 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 2 Aug 2005 20:08:02 +0200 Subject: [PATCH] cfq-iosched: fix problem with barriers and max_depth == 1 CFQ will currently stall when using write barriers and the default max_depth setting of 1, since we artificially need a depth of 2 when pre-pending the first flush. So never deny the barrier request going to the device. This is a regression since 2.6.12, it was found in SUSE testing. Signed-off-by: Jens Axboe Signed-off-by: Linus Torvalds --- drivers/block/cfq-iosched.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/block') diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c index de5746e38af..2435a7c99b2 100644 --- a/drivers/block/cfq-iosched.c +++ b/drivers/block/cfq-iosched.c @@ -1281,6 +1281,7 @@ dispatch: */ if (!cfq_crq_in_driver(crq) && !cfq_cfqq_idle_window(cfqq) && + !blk_barrier_rq(rq) && cfqd->rq_in_driver >= cfqd->cfq_max_depth) return NULL; -- cgit v1.2.3 From ba02508248e90a9d696aebd18b48a3290235b53c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 5 Aug 2005 13:28:11 -0700 Subject: [PATCH] blk: fix tag shrinking (revive real_max_size) My patch in commit fa72b903f75e4f0f0b2c2feed093005167da4023 incorrectly removed blk_queue_tag->real_max_depth. The original resize implementation was incorrect in the following points. * actual allocation size of tag_index was shorter than real_max_size, but assumed to be of the same size, possibly causing memory access beyond the allocated area. * bits in tag_map between max_deptn and real_max_depth were initialized to 1's, making the tags permanently reserved. In an attempt to fix above two bugs, I had removed allocation optimization in init_tag_map and real_max_size. Tag map/index were allocated and freed immediately during resize. Unfortunately, I wasn't considering that tag map/index can be resized dynamically with tags beyond new_depth active. This led to accessing freed area after shrinking tags and led to the following bug reporting thread on linux-scsi. http://marc.theaimsgroup.com/?l=linux-scsi&m=112319898111885&w=2 To fix the problem, I've revived real_max_depth without allocation optimization in init_tag_map, and Andrew Vasquez confirmed that the problem was fixed. As Jens is not going to be available for a week, he asked me to make sure that this patch reaches you. http://marc.theaimsgroup.com/?l=linux-scsi&m=112325778530886&w=2 Also, a comment was added to make sure that real_max_size is needed for dynamic shrinking. Signed-off-by: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/block/ll_rw_blk.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) (limited to 'drivers/block') diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c index 692a5fced76..3c818544475 100644 --- a/drivers/block/ll_rw_blk.c +++ b/drivers/block/ll_rw_blk.c @@ -719,7 +719,7 @@ struct request *blk_queue_find_tag(request_queue_t *q, int tag) { struct blk_queue_tag *bqt = q->queue_tags; - if (unlikely(bqt == NULL || tag >= bqt->max_depth)) + if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) return NULL; return bqt->tag_index[tag]; @@ -798,6 +798,7 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) memset(tag_index, 0, depth * sizeof(struct request *)); memset(tag_map, 0, nr_ulongs * sizeof(unsigned long)); + tags->real_max_depth = depth; tags->max_depth = depth; tags->tag_index = tag_index; tags->tag_map = tag_map; @@ -871,12 +872,23 @@ int blk_queue_resize_tags(request_queue_t *q, int new_depth) if (!bqt) return -ENXIO; + /* + * if we already have large enough real_max_depth. just + * adjust max_depth. *NOTE* as requests with tag value + * between new_depth and real_max_depth can be in-flight, tag + * map can not be shrunk blindly here. + */ + if (new_depth <= bqt->real_max_depth) { + bqt->max_depth = new_depth; + return 0; + } + /* * save the old state info, so we can copy it back */ tag_index = bqt->tag_index; tag_map = bqt->tag_map; - max_depth = bqt->max_depth; + max_depth = bqt->real_max_depth; if (init_tag_map(q, bqt, new_depth)) return -ENOMEM; @@ -913,7 +925,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq) BUG_ON(tag == -1); - if (unlikely(tag >= bqt->max_depth)) + if (unlikely(tag >= bqt->real_max_depth)) /* * This can happen after tag depth has been reduced. * FIXME: how about a warning or info message here? -- cgit v1.2.3