aboutsummaryrefslogtreecommitdiff
path: root/block/ll_rw_blk.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r--block/ll_rw_blk.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index a15845c164f..ed39313c408 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1075,12 +1075,6 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
*/
return;
- if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) {
- printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
- __FUNCTION__, tag);
- return;
- }
-
list_del_init(&rq->queuelist);
rq->cmd_flags &= ~REQ_QUEUED;
rq->tag = -1;
@@ -1090,6 +1084,19 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
__FUNCTION__, tag);
bqt->tag_index[tag] = NULL;
+
+ /*
+ * We use test_and_clear_bit's memory ordering properties here.
+ * The tag_map bit acts as a lock for tag_index[bit], so we need
+ * a barrer before clearing the bit (precisely: release semantics).
+ * Could use clear_bit_unlock when it is merged.
+ */
+ if (unlikely(!test_and_clear_bit(tag, bqt->tag_map))) {
+ printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
+ __FUNCTION__, tag);
+ return;
+ }
+
bqt->busy--;
}
@@ -1136,6 +1143,10 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
return 1;
} while (test_and_set_bit(tag, bqt->tag_map));
+ /*
+ * We rely on test_and_set_bit providing lock memory ordering semantics
+ * (could use test_and_set_bit_lock when it is merged).
+ */
rq->cmd_flags |= REQ_QUEUED;
rq->tag = tag;