aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/genhd.c48
-rw-r--r--block/ll_rw_blk.c40
-rw-r--r--block/scsi_ioctl.c2
3 files changed, 56 insertions, 34 deletions
diff --git a/block/genhd.c b/block/genhd.c
index f04609d553b..f1ed83f3f08 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -358,7 +358,7 @@ static struct sysfs_ops disk_sysfs_ops = {
static ssize_t disk_uevent_store(struct gendisk * disk,
const char *buf, size_t count)
{
- kobject_hotplug(&disk->kobj, KOBJ_ADD);
+ kobject_uevent(&disk->kobj, KOBJ_ADD);
return count;
}
static ssize_t disk_dev_read(struct gendisk * disk, char *page)
@@ -455,14 +455,14 @@ static struct kobj_type ktype_block = {
extern struct kobj_type ktype_part;
-static int block_hotplug_filter(struct kset *kset, struct kobject *kobj)
+static int block_uevent_filter(struct kset *kset, struct kobject *kobj)
{
struct kobj_type *ktype = get_ktype(kobj);
return ((ktype == &ktype_block) || (ktype == &ktype_part));
}
-static int block_hotplug(struct kset *kset, struct kobject *kobj, char **envp,
+static int block_uevent(struct kset *kset, struct kobject *kobj, char **envp,
int num_envp, char *buffer, int buffer_size)
{
struct kobj_type *ktype = get_ktype(kobj);
@@ -474,40 +474,40 @@ static int block_hotplug(struct kset *kset, struct kobject *kobj, char **envp,
if (ktype == &ktype_block) {
disk = container_of(kobj, struct gendisk, kobj);
- add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size,
- &length, "MINOR=%u", disk->first_minor);
+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size,
+ &length, "MINOR=%u", disk->first_minor);
} else if (ktype == &ktype_part) {
disk = container_of(kobj->parent, struct gendisk, kobj);
part = container_of(kobj, struct hd_struct, kobj);
- add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size,
- &length, "MINOR=%u",
- disk->first_minor + part->partno);
+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size,
+ &length, "MINOR=%u",
+ disk->first_minor + part->partno);
} else
return 0;
- add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size, &length,
- "MAJOR=%u", disk->major);
+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
+ "MAJOR=%u", disk->major);
/* add physical device, backing this device */
physdev = disk->driverfs_dev;
if (physdev) {
char *path = kobject_get_path(&physdev->kobj, GFP_KERNEL);
- add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size,
- &length, "PHYSDEVPATH=%s", path);
+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size,
+ &length, "PHYSDEVPATH=%s", path);
kfree(path);
if (physdev->bus)
- add_hotplug_env_var(envp, num_envp, &i,
- buffer, buffer_size, &length,
- "PHYSDEVBUS=%s",
- physdev->bus->name);
+ add_uevent_var(envp, num_envp, &i,
+ buffer, buffer_size, &length,
+ "PHYSDEVBUS=%s",
+ physdev->bus->name);
if (physdev->driver)
- add_hotplug_env_var(envp, num_envp, &i,
- buffer, buffer_size, &length,
- "PHYSDEVDRIVER=%s",
- physdev->driver->name);
+ add_uevent_var(envp, num_envp, &i,
+ buffer, buffer_size, &length,
+ "PHYSDEVDRIVER=%s",
+ physdev->driver->name);
}
/* terminate, set to next free slot, shrink available space */
@@ -520,13 +520,13 @@ static int block_hotplug(struct kset *kset, struct kobject *kobj, char **envp,
return 0;
}
-static struct kset_hotplug_ops block_hotplug_ops = {
- .filter = block_hotplug_filter,
- .hotplug = block_hotplug,
+static struct kset_uevent_ops block_uevent_ops = {
+ .filter = block_uevent_filter,
+ .uevent = block_uevent,
};
/* declare block_subsys. */
-static decl_subsys(block, &ktype_block, &block_hotplug_ops);
+static decl_subsys(block, &ktype_block, &block_uevent_ops);
/*
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 99c9ca6d599..d4beb9a89ee 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -239,7 +239,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
q->backing_dev_info.state = 0;
q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
- blk_queue_max_sectors(q, MAX_SECTORS);
+ blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
blk_queue_hardsect_size(q, 512);
blk_queue_dma_alignment(q, 511);
blk_queue_congestion_threshold(q);
@@ -555,7 +555,12 @@ void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors)
printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
}
- q->max_sectors = q->max_hw_sectors = max_sectors;
+ if (BLK_DEF_MAX_SECTORS > max_sectors)
+ q->max_hw_sectors = q->max_sectors = max_sectors;
+ else {
+ q->max_sectors = BLK_DEF_MAX_SECTORS;
+ q->max_hw_sectors = max_sectors;
+ }
}
EXPORT_SYMBOL(blk_queue_max_sectors);
@@ -657,8 +662,8 @@ EXPORT_SYMBOL(blk_queue_hardsect_size);
void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
{
/* zero is "infinity" */
- t->max_sectors = t->max_hw_sectors =
- min_not_zero(t->max_sectors,b->max_sectors);
+ t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
+ t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors);
t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
@@ -1293,9 +1298,15 @@ static inline int ll_new_hw_segment(request_queue_t *q,
static int ll_back_merge_fn(request_queue_t *q, struct request *req,
struct bio *bio)
{
+ unsigned short max_sectors;
int len;
- if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
+ if (unlikely(blk_pc_request(req)))
+ max_sectors = q->max_hw_sectors;
+ else
+ max_sectors = q->max_sectors;
+
+ if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
req->flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
@@ -1325,9 +1336,16 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req,
static int ll_front_merge_fn(request_queue_t *q, struct request *req,
struct bio *bio)
{
+ unsigned short max_sectors;
int len;
- if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
+ if (unlikely(blk_pc_request(req)))
+ max_sectors = q->max_hw_sectors;
+ else
+ max_sectors = q->max_sectors;
+
+
+ if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
req->flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
@@ -2144,7 +2162,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
struct bio *bio;
int reading;
- if (len > (q->max_sectors << 9))
+ if (len > (q->max_hw_sectors << 9))
return -EINVAL;
if (!len || !ubuf)
return -EINVAL;
@@ -2259,7 +2277,7 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
{
struct bio *bio;
- if (len > (q->max_sectors << 9))
+ if (len > (q->max_hw_sectors << 9))
return -EINVAL;
if (!len || !kbuf)
return -EINVAL;
@@ -2306,6 +2324,8 @@ void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
generic_unplug_device(q);
}
+EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
+
/**
* blk_execute_rq - insert a request into queue for execution
* @q: queue to insert the request in
@@ -2444,7 +2464,7 @@ void disk_round_stats(struct gendisk *disk)
/*
* queue lock must be held
*/
-static void __blk_put_request(request_queue_t *q, struct request *req)
+void __blk_put_request(request_queue_t *q, struct request *req)
{
struct request_list *rl = req->rl;
@@ -2473,6 +2493,8 @@ static void __blk_put_request(request_queue_t *q, struct request *req)
}
}
+EXPORT_SYMBOL_GPL(__blk_put_request);
+
void blk_put_request(struct request *req)
{
unsigned long flags;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 6e7db2e79f4..1d8852f7bbf 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -233,7 +233,7 @@ static int sg_io(struct file *file, request_queue_t *q,
if (verify_command(file, cmd))
return -EPERM;
- if (hdr->dxfer_len > (q->max_sectors << 9))
+ if (hdr->dxfer_len > (q->max_hw_sectors << 9))
return -EIO;
if (hdr->dxfer_len)