aboutsummaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-crypt.c19
-rw-r--r--drivers/md/dm-emc.c6
-rw-r--r--drivers/md/dm-hw-handler.c2
-rw-r--r--drivers/md/dm-mpath.c32
-rw-r--r--drivers/md/dm-path-selector.c2
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/md/dm-zero.c4
-rw-r--r--drivers/md/dm.c96
-rw-r--r--drivers/md/md.c14
-rw-r--r--drivers/md/multipath.c5
-rw-r--r--drivers/md/raid1.c2
-rw-r--r--drivers/md/raid10.c2
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/md/raid6main.c2
14 files changed, 105 insertions, 85 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 77619a56e2b..0dd6c2b5391 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -331,25 +331,19 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
struct bio *bio;
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
int gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
- unsigned long flags = current->flags;
unsigned int i;
/*
- * Tell VM to act less aggressively and fail earlier.
- * This is not necessary but increases throughput.
+ * Use __GFP_NOMEMALLOC to tell the VM to act less aggressively and
+ * to fail earlier. This is not necessary but increases throughput.
* FIXME: Is this really intelligent?
*/
- current->flags &= ~PF_MEMALLOC;
-
if (base_bio)
- bio = bio_clone(base_bio, GFP_NOIO);
+ bio = bio_clone(base_bio, GFP_NOIO|__GFP_NOMEMALLOC);
else
- bio = bio_alloc(GFP_NOIO, nr_iovecs);
- if (!bio) {
- if (flags & PF_MEMALLOC)
- current->flags |= PF_MEMALLOC;
+ bio = bio_alloc(GFP_NOIO|__GFP_NOMEMALLOC, nr_iovecs);
+ if (!bio)
return NULL;
- }
/* if the last bio was not complete, continue where that one ended */
bio->bi_idx = *bio_vec_idx;
@@ -386,9 +380,6 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
size -= bv->bv_len;
}
- if (flags & PF_MEMALLOC)
- current->flags |= PF_MEMALLOC;
-
if (!bio->bi_size) {
bio_put(bio);
return NULL;
diff --git a/drivers/md/dm-emc.c b/drivers/md/dm-emc.c
index 70065866459..c7067674dcb 100644
--- a/drivers/md/dm-emc.c
+++ b/drivers/md/dm-emc.c
@@ -223,8 +223,10 @@ static struct emc_handler *alloc_emc_handler(void)
{
struct emc_handler *h = kmalloc(sizeof(*h), GFP_KERNEL);
- if (h)
+ if (h) {
+ memset(h, 0, sizeof(*h));
spin_lock_init(&h->lock);
+ }
return h;
}
@@ -259,8 +261,6 @@ static int emc_create(struct hw_handler *hwh, unsigned argc, char **argv)
if (!h)
return -ENOMEM;
- memset(h, 0, sizeof(*h));
-
hwh->context = h;
if ((h->short_trespass = short_trespass))
diff --git a/drivers/md/dm-hw-handler.c b/drivers/md/dm-hw-handler.c
index ae63772e44c..4cc0010e015 100644
--- a/drivers/md/dm-hw-handler.c
+++ b/drivers/md/dm-hw-handler.c
@@ -23,7 +23,7 @@ struct hwh_internal {
static LIST_HEAD(_hw_handlers);
static DECLARE_RWSEM(_hwh_lock);
-struct hwh_internal *__find_hw_handler_type(const char *name)
+static struct hwh_internal *__find_hw_handler_type(const char *name)
{
struct hwh_internal *hwhi;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 43763a0bd09..1e97b3c12bd 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -101,6 +101,7 @@ typedef int (*action_fn) (struct pgpath *pgpath);
static kmem_cache_t *_mpio_cache;
+struct workqueue_struct *kmultipathd;
static void process_queued_ios(void *data);
static void trigger_event(void *data);
@@ -308,7 +309,7 @@ static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio,
bio_list_add(&m->queued_ios, bio);
m->queue_size++;
if (m->pg_init_required || !m->queue_io)
- schedule_work(&m->process_queued_ios);
+ queue_work(kmultipathd, &m->process_queued_ios);
pgpath = NULL;
r = 0;
} else if (!pgpath)
@@ -334,7 +335,7 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path)
m->queue_if_no_path = queue_if_no_path;
if (!m->queue_if_no_path)
- schedule_work(&m->process_queued_ios);
+ queue_work(kmultipathd, &m->process_queued_ios);
spin_unlock_irqrestore(&m->lock, flags);
@@ -800,7 +801,7 @@ static int fail_path(struct pgpath *pgpath)
if (pgpath == m->current_pgpath)
m->current_pgpath = NULL;
- schedule_work(&m->trigger_event);
+ queue_work(kmultipathd, &m->trigger_event);
out:
spin_unlock_irqrestore(&m->lock, flags);
@@ -837,9 +838,9 @@ static int reinstate_path(struct pgpath *pgpath)
m->current_pgpath = NULL;
if (!m->nr_valid_paths++)
- schedule_work(&m->process_queued_ios);
+ queue_work(kmultipathd, &m->process_queued_ios);
- schedule_work(&m->trigger_event);
+ queue_work(kmultipathd, &m->trigger_event);
out:
spin_unlock_irqrestore(&m->lock, flags);
@@ -883,7 +884,7 @@ static void bypass_pg(struct multipath *m, struct priority_group *pg,
spin_unlock_irqrestore(&m->lock, flags);
- schedule_work(&m->trigger_event);
+ queue_work(kmultipathd, &m->trigger_event);
}
/*
@@ -913,7 +914,7 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
}
spin_unlock_irqrestore(&m->lock, flags);
- schedule_work(&m->trigger_event);
+ queue_work(kmultipathd, &m->trigger_event);
return 0;
}
@@ -968,7 +969,7 @@ void dm_pg_init_complete(struct path *path, unsigned err_flags)
m->current_pgpath = NULL;
m->current_pg = NULL;
}
- schedule_work(&m->process_queued_ios);
+ queue_work(kmultipathd, &m->process_queued_ios);
spin_unlock_irqrestore(&m->lock, flags);
}
@@ -1018,7 +1019,7 @@ static int do_end_io(struct multipath *m, struct bio *bio,
bio_list_add(&m->queued_ios, bio);
m->queue_size++;
if (!m->queue_io)
- schedule_work(&m->process_queued_ios);
+ queue_work(kmultipathd, &m->process_queued_ios);
spin_unlock(&m->lock);
return 1; /* io not complete */
@@ -1057,7 +1058,7 @@ static void multipath_presuspend(struct dm_target *ti)
spin_lock_irqsave(&m->lock, flags);
m->suspended = 1;
if (m->queue_if_no_path)
- schedule_work(&m->process_queued_ios);
+ queue_work(kmultipathd, &m->process_queued_ios);
spin_unlock_irqrestore(&m->lock, flags);
}
@@ -1274,6 +1275,15 @@ static int __init dm_multipath_init(void)
return -EINVAL;
}
+ kmultipathd = create_workqueue("kmpathd");
+ if (!kmultipathd) {
+ DMERR("%s: failed to create workqueue kmpathd",
+ multipath_target.name);
+ dm_unregister_target(&multipath_target);
+ kmem_cache_destroy(_mpio_cache);
+ return -ENOMEM;
+ }
+
DMINFO("dm-multipath version %u.%u.%u loaded",
multipath_target.version[0], multipath_target.version[1],
multipath_target.version[2]);
@@ -1285,6 +1295,8 @@ static void __exit dm_multipath_exit(void)
{
int r;
+ destroy_workqueue(kmultipathd);
+
r = dm_unregister_target(&multipath_target);
if (r < 0)
DMERR("%s: target unregister failed %d",
diff --git a/drivers/md/dm-path-selector.c b/drivers/md/dm-path-selector.c
index ac5c4bbec6c..a28c1c2b4ef 100644
--- a/drivers/md/dm-path-selector.c
+++ b/drivers/md/dm-path-selector.c
@@ -26,7 +26,7 @@ struct ps_internal {
static LIST_HEAD(_path_selectors);
static DECLARE_RWSEM(_ps_lock);
-struct ps_internal *__find_path_selector_type(const char *name)
+static struct ps_internal *__find_path_selector_type(const char *name)
{
struct ps_internal *psi;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index ee175d4906c..18e9b9953fc 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -242,7 +242,7 @@ static void free_devices(struct list_head *devices)
}
}
-void table_destroy(struct dm_table *t)
+static void table_destroy(struct dm_table *t)
{
unsigned int i;
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index 7febc2cac73..51c0639b248 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -55,7 +55,7 @@ static struct target_type zero_target = {
.map = zero_map,
};
-int __init dm_zero_init(void)
+static int __init dm_zero_init(void)
{
int r = dm_register_target(&zero_target);
@@ -65,7 +65,7 @@ int __init dm_zero_init(void)
return r;
}
-void __exit dm_zero_exit(void)
+static void __exit dm_zero_exit(void)
{
int r = dm_unregister_target(&zero_target);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 243ff6884e8..f6b03957efc 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -97,6 +97,7 @@ struct mapped_device {
* freeze/thaw support require holding onto a super block
*/
struct super_block *frozen_sb;
+ struct block_device *frozen_bdev;
};
#define MIN_IOS 256
@@ -990,44 +991,50 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table)
*/
static int __lock_fs(struct mapped_device *md)
{
- struct block_device *bdev;
+ int error = -ENOMEM;
if (test_and_set_bit(DMF_FS_LOCKED, &md->flags))
return 0;
- bdev = bdget_disk(md->disk, 0);
- if (!bdev) {
+ md->frozen_bdev = bdget_disk(md->disk, 0);
+ if (!md->frozen_bdev) {
DMWARN("bdget failed in __lock_fs");
- return -ENOMEM;
+ goto out;
}
WARN_ON(md->frozen_sb);
- md->frozen_sb = freeze_bdev(bdev);
+
+ md->frozen_sb = freeze_bdev(md->frozen_bdev);
+ if (IS_ERR(md->frozen_sb)) {
+ error = PTR_ERR(md->frozen_sb);
+ goto out_bdput;
+ }
+
/* don't bdput right now, we don't want the bdev
* to go away while it is locked. We'll bdput
* in __unlock_fs
*/
return 0;
+
+out_bdput:
+ bdput(md->frozen_bdev);
+ md->frozen_sb = NULL;
+ md->frozen_bdev = NULL;
+out:
+ clear_bit(DMF_FS_LOCKED, &md->flags);
+ return error;
}
-static int __unlock_fs(struct mapped_device *md)
+static void __unlock_fs(struct mapped_device *md)
{
- struct block_device *bdev;
-
if (!test_and_clear_bit(DMF_FS_LOCKED, &md->flags))
- return 0;
+ return;
- bdev = bdget_disk(md->disk, 0);
- if (!bdev) {
- DMWARN("bdget failed in __unlock_fs");
- return -ENOMEM;
- }
+ thaw_bdev(md->frozen_bdev, md->frozen_sb);
+ bdput(md->frozen_bdev);
- thaw_bdev(bdev, md->frozen_sb);
md->frozen_sb = NULL;
- bdput(bdev);
- bdput(bdev);
- return 0;
+ md->frozen_bdev = NULL;
}
/*
@@ -1041,37 +1048,37 @@ int dm_suspend(struct mapped_device *md)
{
struct dm_table *map;
DECLARE_WAITQUEUE(wait, current);
+ int error = -EINVAL;
/* Flush I/O to the device. */
down_read(&md->lock);
- if (test_bit(DMF_BLOCK_IO, &md->flags)) {
- up_read(&md->lock);
- return -EINVAL;
- }
+ if (test_bit(DMF_BLOCK_IO, &md->flags))
+ goto out_read_unlock;
+
+ error = __lock_fs(md);
+ if (error)
+ goto out_read_unlock;
map = dm_get_table(md);
if (map)
dm_table_presuspend_targets(map);
- __lock_fs(md);
up_read(&md->lock);
/*
- * First we set the BLOCK_IO flag so no more ios will be
- * mapped.
+ * First we set the BLOCK_IO flag so no more ios will be mapped.
+ *
+ * If the flag is already set we know another thread is trying to
+ * suspend as well, so we leave the fs locked for this thread.
*/
+ error = -EINVAL;
down_write(&md->lock);
- if (test_bit(DMF_BLOCK_IO, &md->flags)) {
- /*
- * If we get here we know another thread is
- * trying to suspend as well, so we leave the fs
- * locked for this thread.
- */
- up_write(&md->lock);
- return -EINVAL;
+ if (test_and_set_bit(DMF_BLOCK_IO, &md->flags)) {
+ if (map)
+ dm_table_put(map);
+ goto out_write_unlock;
}
- set_bit(DMF_BLOCK_IO, &md->flags);
add_wait_queue(&md->wait, &wait);
up_write(&md->lock);
@@ -1099,12 +1106,9 @@ int dm_suspend(struct mapped_device *md)
remove_wait_queue(&md->wait, &wait);
/* were we interrupted ? */
- if (atomic_read(&md->pending)) {
- __unlock_fs(md);
- clear_bit(DMF_BLOCK_IO, &md->flags);
- up_write(&md->lock);
- return -EINTR;
- }
+ error = -EINTR;
+ if (atomic_read(&md->pending))
+ goto out_unfreeze;
set_bit(DMF_SUSPENDED, &md->flags);
@@ -1115,6 +1119,18 @@ int dm_suspend(struct mapped_device *md)
up_write(&md->lock);
return 0;
+
+out_unfreeze:
+ /* FIXME Undo dm_table_presuspend_targets */
+ __unlock_fs(md);
+ clear_bit(DMF_BLOCK_IO, &md->flags);
+out_write_unlock:
+ up_write(&md->lock);
+ return error;
+
+out_read_unlock:
+ up_read(&md->lock);
+ return error;
}
int dm_resume(struct mapped_device *md)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 97af857d8a8..d899204d374 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -957,7 +957,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
}
-struct super_type super_types[] = {
+static struct super_type super_types[] = {
[0] = {
.name = "0.90.0",
.owner = THIS_MODULE,
@@ -2740,7 +2740,7 @@ static struct block_device_operations md_fops =
.revalidate_disk= md_revalidate,
};
-int md_thread(void * arg)
+static int md_thread(void * arg)
{
mdk_thread_t *thread = arg;
@@ -3232,7 +3232,7 @@ void md_handle_safemode(mddev_t *mddev)
}
-DECLARE_WAIT_QUEUE_HEAD(resync_wait);
+static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
#define SYNC_MARKS 10
#define SYNC_MARK_STEP (3*HZ)
@@ -3575,8 +3575,8 @@ void md_check_recovery(mddev_t *mddev)
}
}
-int md_notify_reboot(struct notifier_block *this,
- unsigned long code, void *x)
+static int md_notify_reboot(struct notifier_block *this,
+ unsigned long code, void *x)
{
struct list_head *tmp;
mddev_t *mddev;
@@ -3599,7 +3599,7 @@ int md_notify_reboot(struct notifier_block *this,
return NOTIFY_DONE;
}
-struct notifier_block md_notifier = {
+static struct notifier_block md_notifier = {
.notifier_call = md_notify_reboot,
.next = NULL,
.priority = INT_MAX, /* before any real devices */
@@ -3616,7 +3616,7 @@ static void md_geninit(void)
p->proc_fops = &md_seq_fops;
}
-int __init md_init(void)
+static int __init md_init(void)
{
int minor;
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index c9b134cd153..4e4bfde3db5 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -103,7 +103,8 @@ static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
mempool_free(mp_bh, conf->pool);
}
-int multipath_end_request(struct bio *bio, unsigned int bytes_done, int error)
+static int multipath_end_request(struct bio *bio, unsigned int bytes_done,
+ int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private);
@@ -355,7 +356,7 @@ static int multipath_remove_disk(mddev_t *mddev, int number)
goto abort;
}
p->rdev = NULL;
- synchronize_kernel();
+ synchronize_rcu();
if (atomic_read(&rdev->nr_pending)) {
/* lost the race, try later */
err = -EBUSY;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a389394b52f..83380b5d659 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -797,7 +797,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
goto abort;
}
p->rdev = NULL;
- synchronize_kernel();
+ synchronize_rcu();
if (atomic_read(&rdev->nr_pending)) {
/* lost the race, try later */
err = -EBUSY;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index b100bfe4fdc..e9dc2876a62 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -977,7 +977,7 @@ static int raid10_remove_disk(mddev_t *mddev, int number)
goto abort;
}
p->rdev = NULL;
- synchronize_kernel();
+ synchronize_rcu();
if (atomic_read(&rdev->nr_pending)) {
/* lost the race, try later */
err = -EBUSY;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 52c3a81c4aa..e96e2a10a9c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1873,7 +1873,7 @@ static int raid5_remove_disk(mddev_t *mddev, int number)
goto abort;
}
p->rdev = NULL;
- synchronize_kernel();
+ synchronize_rcu();
if (atomic_read(&rdev->nr_pending)) {
/* lost the race, try later */
err = -EBUSY;
diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c
index 7e30ab29691..8a33f351e09 100644
--- a/drivers/md/raid6main.c
+++ b/drivers/md/raid6main.c
@@ -2038,7 +2038,7 @@ static int raid6_remove_disk(mddev_t *mddev, int number)
goto abort;
}
p->rdev = NULL;
- synchronize_kernel();
+ synchronize_rcu();
if (atomic_read(&rdev->nr_pending)) {
/* lost the race, try later */
err = -EBUSY;