diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/Kconfig | 11 | ||||
-rw-r--r-- | block/blk-core.c | 19 | ||||
-rw-r--r-- | block/blk-integrity.c | 1 | ||||
-rw-r--r-- | block/blk-settings.c | 84 |
4 files changed, 67 insertions, 48 deletions
diff --git a/block/Kconfig b/block/Kconfig index 95a86adc33a..9be0b56eaee 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -48,9 +48,9 @@ config LBDAF If unsure, say Y. config BLK_DEV_BSG - bool "Block layer SG support v4 (EXPERIMENTAL)" - depends on EXPERIMENTAL - ---help--- + bool "Block layer SG support v4" + default y + help Saying Y here will enable generic SG (SCSI generic) v4 support for any block device. @@ -60,7 +60,10 @@ config BLK_DEV_BSG protocols (e.g. Task Management Functions and SMP in Serial Attached SCSI). - If unsure, say N. + This option is required by recent UDEV versions to properly + access device serial numbers, etc. + + If unsure, say Y. config BLK_DEV_INTEGRITY bool "Block layer data integrity support" diff --git a/block/blk-core.c b/block/blk-core.c index 4b45435c6ea..e3299a77a0d 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -575,13 +575,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) return NULL; } - /* - * if caller didn't supply a lock, they get per-queue locking with - * our embedded lock - */ - if (!lock) - lock = &q->__queue_lock; - q->request_fn = rfn; q->prep_rq_fn = NULL; q->unplug_fn = generic_unplug_device; @@ -2143,7 +2136,7 @@ bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) { return blk_end_bidi_request(rq, error, nr_bytes, 0); } -EXPORT_SYMBOL_GPL(blk_end_request); +EXPORT_SYMBOL(blk_end_request); /** * blk_end_request_all - Helper function for drives to finish the request. @@ -2164,7 +2157,7 @@ void blk_end_request_all(struct request *rq, int error) pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); BUG_ON(pending); } -EXPORT_SYMBOL_GPL(blk_end_request_all); +EXPORT_SYMBOL(blk_end_request_all); /** * blk_end_request_cur - Helper function to finish the current request chunk. @@ -2182,7 +2175,7 @@ bool blk_end_request_cur(struct request *rq, int error) { return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); } -EXPORT_SYMBOL_GPL(blk_end_request_cur); +EXPORT_SYMBOL(blk_end_request_cur); /** * __blk_end_request - Helper function for drivers to complete the request. @@ -2201,7 +2194,7 @@ bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) { return __blk_end_bidi_request(rq, error, nr_bytes, 0); } -EXPORT_SYMBOL_GPL(__blk_end_request); +EXPORT_SYMBOL(__blk_end_request); /** * __blk_end_request_all - Helper function for drives to finish the request. @@ -2222,7 +2215,7 @@ void __blk_end_request_all(struct request *rq, int error) pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); BUG_ON(pending); } -EXPORT_SYMBOL_GPL(__blk_end_request_all); +EXPORT_SYMBOL(__blk_end_request_all); /** * __blk_end_request_cur - Helper function to finish the current request chunk. @@ -2241,7 +2234,7 @@ bool __blk_end_request_cur(struct request *rq, int error) { return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); } -EXPORT_SYMBOL_GPL(__blk_end_request_cur); +EXPORT_SYMBOL(__blk_end_request_cur); void blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio) diff --git a/block/blk-integrity.c b/block/blk-integrity.c index 73e28d35568..15c630813b1 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c @@ -379,6 +379,7 @@ void blk_integrity_unregister(struct gendisk *disk) kobject_uevent(&bi->kobj, KOBJ_REMOVE); kobject_del(&bi->kobj); + kobject_put(&bi->kobj); kmem_cache_free(integrity_cachep, bi); disk->integrity = NULL; } diff --git a/block/blk-settings.c b/block/blk-settings.c index bd582a7f531..476d8706507 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -7,6 +7,7 @@ #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ +#include <linux/gcd.h> #include "blk.h" @@ -165,6 +166,13 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) blk_set_default_limits(&q->limits); /* + * If the caller didn't supply a lock, fall back to our embedded + * per-queue locks + */ + if (!q->queue_lock) + q->queue_lock = &q->__queue_lock; + + /* * by default assume old behaviour and bounce for any highmem page */ blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); @@ -377,8 +385,8 @@ void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) EXPORT_SYMBOL(blk_queue_alignment_offset); /** - * blk_queue_io_min - set minimum request size for the queue - * @q: the request queue for the device + * blk_limits_io_min - set minimum request size for a device + * @limits: the queue limits * @min: smallest I/O size in bytes * * Description: @@ -387,15 +395,35 @@ EXPORT_SYMBOL(blk_queue_alignment_offset); * smallest I/O the device can perform without incurring a performance * penalty. */ -void blk_queue_io_min(struct request_queue *q, unsigned int min) +void blk_limits_io_min(struct queue_limits *limits, unsigned int min) { - q->limits.io_min = min; + limits->io_min = min; - if (q->limits.io_min < q->limits.logical_block_size) - q->limits.io_min = q->limits.logical_block_size; + if (limits->io_min < limits->logical_block_size) + limits->io_min = limits->logical_block_size; - if (q->limits.io_min < q->limits.physical_block_size) - q->limits.io_min = q->limits.physical_block_size; + if (limits->io_min < limits->physical_block_size) + limits->io_min = limits->physical_block_size; +} +EXPORT_SYMBOL(blk_limits_io_min); + +/** + * blk_queue_io_min - set minimum request size for the queue + * @q: the request queue for the device + * @min: smallest I/O size in bytes + * + * Description: + * Storage devices may report a granularity or preferred minimum I/O + * size which is the smallest request the device can perform without + * incurring a performance penalty. For disk drives this is often the + * physical block size. For RAID arrays it is often the stripe chunk + * size. A properly aligned multiple of minimum_io_size is the + * preferred request size for workloads where a high number of I/O + * operations is desired. + */ +void blk_queue_io_min(struct request_queue *q, unsigned int min) +{ + blk_limits_io_min(&q->limits, min); } EXPORT_SYMBOL(blk_queue_io_min); @@ -405,8 +433,12 @@ EXPORT_SYMBOL(blk_queue_io_min); * @opt: optimal request size in bytes * * Description: - * Drivers can call this function to set the preferred I/O request - * size for devices that report such a value. + * Storage devices may report an optimal I/O size, which is the + * device's preferred unit for sustained I/O. This is rarely reported + * for disk drives. For RAID arrays it is usually the stripe width or + * the internal track size. A properly aligned multiple of + * optimal_io_size is the preferred request size for workloads where + * sustained throughput is desired. */ void blk_queue_io_opt(struct request_queue *q, unsigned int opt) { @@ -426,27 +458,7 @@ EXPORT_SYMBOL(blk_queue_io_opt); **/ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) { - /* zero is "infinity" */ - t->limits.max_sectors = min_not_zero(queue_max_sectors(t), - queue_max_sectors(b)); - - t->limits.max_hw_sectors = min_not_zero(queue_max_hw_sectors(t), - queue_max_hw_sectors(b)); - - t->limits.seg_boundary_mask = min_not_zero(queue_segment_boundary(t), - queue_segment_boundary(b)); - - t->limits.max_phys_segments = min_not_zero(queue_max_phys_segments(t), - queue_max_phys_segments(b)); - - t->limits.max_hw_segments = min_not_zero(queue_max_hw_segments(t), - queue_max_hw_segments(b)); - - t->limits.max_segment_size = min_not_zero(queue_max_segment_size(t), - queue_max_segment_size(b)); - - t->limits.logical_block_size = max(queue_logical_block_size(t), - queue_logical_block_size(b)); + blk_stack_limits(&t->limits, &b->limits, 0); if (!t->queue_lock) WARN_ON_ONCE(1); @@ -516,6 +528,16 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, return -1; } + /* Find lcm() of optimal I/O size */ + if (t->io_opt && b->io_opt) + t->io_opt = (t->io_opt * b->io_opt) / gcd(t->io_opt, b->io_opt); + else if (b->io_opt) + t->io_opt = b->io_opt; + + /* Verify that optimal I/O size is a multiple of io_min */ + if (t->io_min && t->io_opt % t->io_min) + return -1; + return 0; } EXPORT_SYMBOL(blk_stack_limits); |