diff options
author | James Bottomley <James.Bottomley@HansenPartnership.com> | 2009-05-17 18:55:18 +0300 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-05-19 12:14:55 +0200 |
commit | 3a5a39276d2a32b05b1ee25b384516805b17cf87 (patch) | |
tree | 8a52a4a158ed341b7c3049826c549c2a210386e3 /block | |
parent | b2858d7d1639c04ca3c54988d76c5f7300b76f1c (diff) |
block: allow blk_rq_map_kern to append to requests
Use blk_rq_append_bio() internally instead of blk_rq_bio_prep()
so blk_rq_map_kern can be called multiple times, to map multiple
buffers.
This is in the effort to un-export blk_rq_append_bio()
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Signed-off-by: Boaz Harrosh <bharrosh@panasas.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-map.c | 12 |
1 files changed, 10 insertions, 2 deletions
diff --git a/block/blk-map.c b/block/blk-map.c index 56082bea450..caa05a66774 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -282,7 +282,8 @@ EXPORT_SYMBOL(blk_rq_unmap_user); * * Description: * Data will be mapped directly if possible. Otherwise a bounce - * buffer is used. + * buffer is used. Can be called multple times to append multple + * buffers. */ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, unsigned int len, gfp_t gfp_mask) @@ -290,6 +291,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, int reading = rq_data_dir(rq) == READ; int do_copy = 0; struct bio *bio; + int ret; if (len > (q->max_hw_sectors << 9)) return -EINVAL; @@ -311,7 +313,13 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, if (do_copy) rq->cmd_flags |= REQ_COPY_USER; - blk_rq_bio_prep(q, rq, bio); + ret = blk_rq_append_bio(q, rq, bio); + if (unlikely(ret)) { + /* request is too big */ + bio_put(bio); + return ret; + } + blk_queue_bounce(q, &rq->bio); rq->buffer = NULL; return 0; |