aboutsummaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bitmap.c24
-rw-r--r--drivers/md/dm-ioctl.c2
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/raid5.c42
4 files changed, 63 insertions, 7 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 11108165e26..5554adaa58f 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -666,7 +666,7 @@ static void bitmap_file_put(struct bitmap *bitmap)
if (file) {
struct inode *inode = file->f_path.dentry->d_inode;
- invalidate_inode_pages(inode->i_mapping);
+ invalidate_mapping_pages(inode->i_mapping, 0, -1);
fput(file);
}
}
@@ -1160,6 +1160,22 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
return 0;
}
+ if (unlikely((*bmc & COUNTER_MAX) == COUNTER_MAX)) {
+ DEFINE_WAIT(__wait);
+ /* note that it is safe to do the prepare_to_wait
+ * after the test as long as we do it before dropping
+ * the spinlock.
+ */
+ prepare_to_wait(&bitmap->overflow_wait, &__wait,
+ TASK_UNINTERRUPTIBLE);
+ spin_unlock_irq(&bitmap->lock);
+ bitmap->mddev->queue
+ ->unplug_fn(bitmap->mddev->queue);
+ schedule();
+ finish_wait(&bitmap->overflow_wait, &__wait);
+ continue;
+ }
+
switch(*bmc) {
case 0:
bitmap_file_set_bit(bitmap, offset);
@@ -1169,7 +1185,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
case 1:
*bmc = 2;
}
- BUG_ON((*bmc & COUNTER_MAX) == COUNTER_MAX);
+
(*bmc)++;
spin_unlock_irq(&bitmap->lock);
@@ -1207,6 +1223,9 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
if (!success && ! (*bmc & NEEDED_MASK))
*bmc |= NEEDED_MASK;
+ if ((*bmc & COUNTER_MAX) == COUNTER_MAX)
+ wake_up(&bitmap->overflow_wait);
+
(*bmc)--;
if (*bmc <= 2) {
set_page_attr(bitmap,
@@ -1431,6 +1450,7 @@ int bitmap_create(mddev_t *mddev)
spin_lock_init(&bitmap->lock);
atomic_set(&bitmap->pending_writes, 0);
init_waitqueue_head(&bitmap->write_wait);
+ init_waitqueue_head(&bitmap->overflow_wait);
bitmap->mddev = mddev;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index cd6a184536a..b441d82c338 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1473,7 +1473,7 @@ static int ctl_ioctl(struct inode *inode, struct file *file,
return r;
}
-static struct file_operations _ctl_fops = {
+static const struct file_operations _ctl_fops = {
.ioctl = ctl_ioctl,
.owner = THIS_MODULE,
};
diff --git a/drivers/md/md.c b/drivers/md/md.c
index e8807ea5377..e85fa75a791 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -4920,7 +4920,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
return mask;
}
-static struct file_operations md_seq_fops = {
+static const struct file_operations md_seq_fops = {
.owner = THIS_MODULE,
.open = md_seq_open,
.read = seq_read,
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 467c16982d0..11c3d7bfa79 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2620,7 +2620,7 @@ static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
}
bi = conf->retry_read_aligned_list;
if(bi) {
- conf->retry_read_aligned = bi->bi_next;
+ conf->retry_read_aligned_list = bi->bi_next;
bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* biased count of active stripes */
bi->bi_hw_segments = 0; /* count of processed stripes */
@@ -2669,6 +2669,27 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
return 0;
}
+static int bio_fits_rdev(struct bio *bi)
+{
+ request_queue_t *q = bdev_get_queue(bi->bi_bdev);
+
+ if ((bi->bi_size>>9) > q->max_sectors)
+ return 0;
+ blk_recount_segments(q, bi);
+ if (bi->bi_phys_segments > q->max_phys_segments ||
+ bi->bi_hw_segments > q->max_hw_segments)
+ return 0;
+
+ if (q->merge_bvec_fn)
+ /* it's too hard to apply the merge_bvec_fn at this stage,
+ * just just give up
+ */
+ return 0;
+
+ return 1;
+}
+
+
static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
{
mddev_t *mddev = q->queuedata;
@@ -2715,6 +2736,13 @@ static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
align_bi->bi_sector += rdev->data_offset;
+ if (!bio_fits_rdev(align_bi)) {
+ /* too big in some way */
+ bio_put(align_bi);
+ rdev_dec_pending(rdev, mddev);
+ return 0;
+ }
+
spin_lock_irq(&conf->device_lock);
wait_event_lock_irq(conf->wait_for_stripe,
conf->quiesce == 0,
@@ -3107,7 +3135,9 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
for (; logical_sector < last_sector;
- logical_sector += STRIPE_SECTORS, scnt++) {
+ logical_sector += STRIPE_SECTORS,
+ sector += STRIPE_SECTORS,
+ scnt++) {
if (scnt < raid_bio->bi_hw_segments)
/* already done this stripe */
@@ -3123,7 +3153,13 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
}
set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
- add_stripe_bio(sh, raid_bio, dd_idx, 0);
+ if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
+ release_stripe(sh);
+ raid_bio->bi_hw_segments = scnt;
+ conf->retry_read_aligned = raid_bio;
+ return handled;
+ }
+
handle_stripe(sh, NULL);
release_stripe(sh);
handled++;