aboutsummaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-table.c8
-rw-r--r--drivers/md/dm.c10
-rw-r--r--drivers/md/faulty.c2
-rw-r--r--drivers/md/linear.c14
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/multipath.c12
-rw-r--r--drivers/md/raid0.c14
-rw-r--r--drivers/md/raid1.c12
-rw-r--r--drivers/md/raid10.c14
-rw-r--r--drivers/md/raid5.c18
10 files changed, 53 insertions, 53 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 2fc199b0016..2bcde5798b5 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -526,7 +526,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
{
- request_queue_t *q = bdev_get_queue(bdev);
+ struct request_queue *q = bdev_get_queue(bdev);
struct io_restrictions *rs = &ti->limits;
/*
@@ -979,7 +979,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
devices = dm_table_get_devices(t);
for (d = devices->next; d != devices; d = d->next) {
struct dm_dev *dd = list_entry(d, struct dm_dev, list);
- request_queue_t *q = bdev_get_queue(dd->bdev);
+ struct request_queue *q = bdev_get_queue(dd->bdev);
r |= bdi_congested(&q->backing_dev_info, bdi_bits);
}
@@ -992,7 +992,7 @@ void dm_table_unplug_all(struct dm_table *t)
for (d = devices->next; d != devices; d = d->next) {
struct dm_dev *dd = list_entry(d, struct dm_dev, list);
- request_queue_t *q = bdev_get_queue(dd->bdev);
+ struct request_queue *q = bdev_get_queue(dd->bdev);
if (q->unplug_fn)
q->unplug_fn(q);
@@ -1011,7 +1011,7 @@ int dm_table_flush_all(struct dm_table *t)
for (d = devices->next; d != devices; d = d->next) {
struct dm_dev *dd = list_entry(d, struct dm_dev, list);
- request_queue_t *q = bdev_get_queue(dd->bdev);
+ struct request_queue *q = bdev_get_queue(dd->bdev);
int err;
if (!q->issue_flush_fn)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 846614e676c..141ff9fa296 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -80,7 +80,7 @@ struct mapped_device {
unsigned long flags;
- request_queue_t *queue;
+ struct request_queue *queue;
struct gendisk *disk;
char name[16];
@@ -792,7 +792,7 @@ static void __split_bio(struct mapped_device *md, struct bio *bio)
* The request function that just remaps the bio built up by
* dm_merge_bvec.
*/
-static int dm_request(request_queue_t *q, struct bio *bio)
+static int dm_request(struct request_queue *q, struct bio *bio)
{
int r;
int rw = bio_data_dir(bio);
@@ -844,7 +844,7 @@ static int dm_request(request_queue_t *q, struct bio *bio)
return 0;
}
-static int dm_flush_all(request_queue_t *q, struct gendisk *disk,
+static int dm_flush_all(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
struct mapped_device *md = q->queuedata;
@@ -859,7 +859,7 @@ static int dm_flush_all(request_queue_t *q, struct gendisk *disk,
return ret;
}
-static void dm_unplug_all(request_queue_t *q)
+static void dm_unplug_all(struct request_queue *q)
{
struct mapped_device *md = q->queuedata;
struct dm_table *map = dm_get_table(md);
@@ -1110,7 +1110,7 @@ static void __set_size(struct mapped_device *md, sector_t size)
static int __bind(struct mapped_device *md, struct dm_table *t)
{
- request_queue_t *q = md->queue;
+ struct request_queue *q = md->queue;
sector_t size;
size = dm_table_get_size(t);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 4ebd0f2a75e..cb059cf14c2 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -167,7 +167,7 @@ static void add_sector(conf_t *conf, sector_t start, int mode)
conf->nfaults = n+1;
}
-static int make_request(request_queue_t *q, struct bio *bio)
+static int make_request(struct request_queue *q, struct bio *bio)
{
mddev_t *mddev = q->queuedata;
conf_t *conf = (conf_t*)mddev->private;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 19274108319..17f795c3e0a 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -55,7 +55,7 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
*
* Return amount of bytes we can take at this offset
*/
-static int linear_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
+static int linear_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
{
mddev_t *mddev = q->queuedata;
dev_info_t *dev0;
@@ -79,20 +79,20 @@ static int linear_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio
return maxsectors << 9;
}
-static void linear_unplug(request_queue_t *q)
+static void linear_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
linear_conf_t *conf = mddev_to_conf(mddev);
int i;
for (i=0; i < mddev->raid_disks; i++) {
- request_queue_t *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
+ struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
if (r_queue->unplug_fn)
r_queue->unplug_fn(r_queue);
}
}
-static int linear_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int linear_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
@@ -101,7 +101,7 @@ static int linear_issue_flush(request_queue_t *q, struct gendisk *disk,
for (i=0; i < mddev->raid_disks && ret == 0; i++) {
struct block_device *bdev = conf->disks[i].rdev->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
@@ -118,7 +118,7 @@ static int linear_congested(void *data, int bits)
int i, ret = 0;
for (i = 0; i < mddev->raid_disks && !ret ; i++) {
- request_queue_t *q = bdev_get_queue(conf->disks[i].rdev->bdev);
+ struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
}
return ret;
@@ -330,7 +330,7 @@ static int linear_stop (mddev_t *mddev)
return 0;
}
-static int linear_make_request (request_queue_t *q, struct bio *bio)
+static int linear_make_request (struct request_queue *q, struct bio *bio)
{
const int rw = bio_data_dir(bio);
mddev_t *mddev = q->queuedata;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 65ddc887dfd..f883b7e37f3 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -211,7 +211,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
)
-static int md_fail_request (request_queue_t *q, struct bio *bio)
+static int md_fail_request (struct request_queue *q, struct bio *bio)
{
bio_io_error(bio, bio->bi_size);
return 0;
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 14da37fee37..1e2af43a73b 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -125,7 +125,7 @@ static void unplug_slaves(mddev_t *mddev)
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)
&& atomic_read(&rdev->nr_pending)) {
- request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
+ struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
@@ -140,13 +140,13 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_unlock();
}
-static void multipath_unplug(request_queue_t *q)
+static void multipath_unplug(struct request_queue *q)
{
unplug_slaves(q->queuedata);
}
-static int multipath_make_request (request_queue_t *q, struct bio * bio)
+static int multipath_make_request (struct request_queue *q, struct bio * bio)
{
mddev_t *mddev = q->queuedata;
multipath_conf_t *conf = mddev_to_conf(mddev);
@@ -199,7 +199,7 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev)
seq_printf (seq, "]");
}
-static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int multipath_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
@@ -211,7 +211,7 @@ static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk,
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct block_device *bdev = rdev->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
@@ -238,7 +238,7 @@ static int multipath_congested(void *data, int bits)
for (i = 0; i < mddev->raid_disks ; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
- request_queue_t *q = bdev_get_queue(rdev->bdev);
+ struct request_queue *q = bdev_get_queue(rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
/* Just like multipath_map, we just check the
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 2c404f73a37..b8216bc6db4 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -25,7 +25,7 @@
#define MD_DRIVER
#define MD_PERSONALITY
-static void raid0_unplug(request_queue_t *q)
+static void raid0_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
raid0_conf_t *conf = mddev_to_conf(mddev);
@@ -33,14 +33,14 @@ static void raid0_unplug(request_queue_t *q)
int i;
for (i=0; i<mddev->raid_disks; i++) {
- request_queue_t *r_queue = bdev_get_queue(devlist[i]->bdev);
+ struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
if (r_queue->unplug_fn)
r_queue->unplug_fn(r_queue);
}
}
-static int raid0_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int raid0_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
@@ -50,7 +50,7 @@ static int raid0_issue_flush(request_queue_t *q, struct gendisk *disk,
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
struct block_device *bdev = devlist[i]->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
@@ -68,7 +68,7 @@ static int raid0_congested(void *data, int bits)
int i, ret = 0;
for (i = 0; i < mddev->raid_disks && !ret ; i++) {
- request_queue_t *q = bdev_get_queue(devlist[i]->bdev);
+ struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
}
@@ -268,7 +268,7 @@ static int create_strip_zones (mddev_t *mddev)
*
* Return amount of bytes we can accept at this offset
*/
-static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
+static int raid0_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
{
mddev_t *mddev = q->queuedata;
sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
@@ -408,7 +408,7 @@ static int raid0_stop (mddev_t *mddev)
return 0;
}
-static int raid0_make_request (request_queue_t *q, struct bio *bio)
+static int raid0_make_request (struct request_queue *q, struct bio *bio)
{
mddev_t *mddev = q->queuedata;
unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 00c78b77b13..650991bddd8 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -552,7 +552,7 @@ static void unplug_slaves(mddev_t *mddev)
for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
- request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
+ struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
@@ -567,7 +567,7 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_unlock();
}
-static void raid1_unplug(request_queue_t *q)
+static void raid1_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
@@ -575,7 +575,7 @@ static void raid1_unplug(request_queue_t *q)
md_wakeup_thread(mddev->thread);
}
-static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int raid1_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
@@ -587,7 +587,7 @@ static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk,
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct block_device *bdev = rdev->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
@@ -615,7 +615,7 @@ static int raid1_congested(void *data, int bits)
for (i = 0; i < mddev->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
- request_queue_t *q = bdev_get_queue(rdev->bdev);
+ struct request_queue *q = bdev_get_queue(rdev->bdev);
/* Note the '|| 1' - when read_balance prefers
* non-congested targets, it can be removed
@@ -765,7 +765,7 @@ do_sync_io:
return NULL;
}
-static int make_request(request_queue_t *q, struct bio * bio)
+static int make_request(struct request_queue *q, struct bio * bio)
{
mddev_t *mddev = q->queuedata;
conf_t *conf = mddev_to_conf(mddev);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index a95ada1cfac..f730a144baf 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -453,7 +453,7 @@ static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
* If near_copies == raid_disk, there are no striping issues,
* but in that case, the function isn't called at all.
*/
-static int raid10_mergeable_bvec(request_queue_t *q, struct bio *bio,
+static int raid10_mergeable_bvec(struct request_queue *q, struct bio *bio,
struct bio_vec *bio_vec)
{
mddev_t *mddev = q->queuedata;
@@ -595,7 +595,7 @@ static void unplug_slaves(mddev_t *mddev)
for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
- request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
+ struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
@@ -610,7 +610,7 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_unlock();
}
-static void raid10_unplug(request_queue_t *q)
+static void raid10_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
@@ -618,7 +618,7 @@ static void raid10_unplug(request_queue_t *q)
md_wakeup_thread(mddev->thread);
}
-static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int raid10_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
@@ -630,7 +630,7 @@ static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct block_device *bdev = rdev->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
@@ -658,7 +658,7 @@ static int raid10_congested(void *data, int bits)
for (i = 0; i < mddev->raid_disks && ret == 0; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
- request_queue_t *q = bdev_get_queue(rdev->bdev);
+ struct request_queue *q = bdev_get_queue(rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
}
@@ -772,7 +772,7 @@ static void unfreeze_array(conf_t *conf)
spin_unlock_irq(&conf->resync_lock);
}
-static int make_request(request_queue_t *q, struct bio * bio)
+static int make_request(struct request_queue *q, struct bio * bio)
{
mddev_t *mddev = q->queuedata;
conf_t *conf = mddev_to_conf(mddev);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d90ee145eff..2aff4be35dc 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -289,7 +289,7 @@ static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, in
}
static void unplug_slaves(mddev_t *mddev);
-static void raid5_unplug_device(request_queue_t *q);
+static void raid5_unplug_device(struct request_queue *q);
static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks,
int pd_idx, int noblock)
@@ -3182,7 +3182,7 @@ static void unplug_slaves(mddev_t *mddev)
for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
- request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
+ struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
@@ -3197,7 +3197,7 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_unlock();
}
-static void raid5_unplug_device(request_queue_t *q)
+static void raid5_unplug_device(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -3216,7 +3216,7 @@ static void raid5_unplug_device(request_queue_t *q)
unplug_slaves(mddev);
}
-static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int raid5_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
@@ -3228,7 +3228,7 @@ static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct block_device *bdev = rdev->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
@@ -3267,7 +3267,7 @@ static int raid5_congested(void *data, int bits)
/* We want read requests to align with chunks where possible,
* but write requests don't need to.
*/
-static int raid5_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
+static int raid5_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
{
mddev_t *mddev = q->queuedata;
sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
@@ -3377,7 +3377,7 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
static int bio_fits_rdev(struct bio *bi)
{
- request_queue_t *q = bdev_get_queue(bi->bi_bdev);
+ struct request_queue *q = bdev_get_queue(bi->bi_bdev);
if ((bi->bi_size>>9) > q->max_sectors)
return 0;
@@ -3396,7 +3396,7 @@ static int bio_fits_rdev(struct bio *bi)
}
-static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
+static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
{
mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -3466,7 +3466,7 @@ static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
}
-static int make_request(request_queue_t *q, struct bio * bi)
+static int make_request(struct request_queue *q, struct bio * bi)
{
mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev_to_conf(mddev);