diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bitmap.c | 5 | ||||
-rw-r--r-- | drivers/md/dm-exception-store.c | 13 | ||||
-rw-r--r-- | drivers/md/dm-exception-store.h | 4 | ||||
-rw-r--r-- | drivers/md/dm-ioctl.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-log-userspace-base.c | 39 | ||||
-rw-r--r-- | drivers/md/dm-log-userspace-transfer.c | 8 | ||||
-rw-r--r-- | drivers/md/dm-log-userspace-transfer.h | 2 | ||||
-rw-r--r-- | drivers/md/dm-mpath.c | 42 | ||||
-rw-r--r-- | drivers/md/dm-raid1.c | 10 | ||||
-rw-r--r-- | drivers/md/dm-snap-persistent.c | 88 | ||||
-rw-r--r-- | drivers/md/dm-snap.c | 23 | ||||
-rw-r--r-- | drivers/md/dm-stripe.c | 15 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 51 | ||||
-rw-r--r-- | drivers/md/dm.c | 47 | ||||
-rw-r--r-- | drivers/md/linear.c | 5 | ||||
-rw-r--r-- | drivers/md/md.c | 29 | ||||
-rw-r--r-- | drivers/md/md.h | 3 | ||||
-rw-r--r-- | drivers/md/multipath.c | 11 | ||||
-rw-r--r-- | drivers/md/raid0.c | 10 | ||||
-rw-r--r-- | drivers/md/raid1.c | 29 | ||||
-rw-r--r-- | drivers/md/raid10.c | 18 | ||||
-rw-r--r-- | drivers/md/raid5.c | 20 |
22 files changed, 323 insertions, 151 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 3319c2fec28..6986b0059d2 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -108,6 +108,8 @@ static void bitmap_free_page(struct bitmap *bitmap, unsigned char *page) * allocated while we're using it */ static int bitmap_checkpage(struct bitmap *bitmap, unsigned long page, int create) +__releases(bitmap->lock) +__acquires(bitmap->lock) { unsigned char *mappage; @@ -325,7 +327,6 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) return 0; bad_alignment: - rcu_read_unlock(); return -EINVAL; } @@ -1207,6 +1208,8 @@ void bitmap_daemon_work(struct bitmap *bitmap) static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, sector_t offset, int *blocks, int create) +__releases(bitmap->lock) +__acquires(bitmap->lock) { /* If 'create', we might release the lock and reclaim it. * The lock must have been taken with interrupts enabled. diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index 3710ff88fc1..556acff3952 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c @@ -171,6 +171,14 @@ static int set_chunk_size(struct dm_exception_store *store, */ chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9); + return dm_exception_store_set_chunk_size(store, chunk_size_ulong, + error); +} + +int dm_exception_store_set_chunk_size(struct dm_exception_store *store, + unsigned long chunk_size_ulong, + char **error) +{ /* Check chunk_size is a power of 2 */ if (!is_power_of_2(chunk_size_ulong)) { *error = "Chunk size is not a power of 2"; @@ -183,6 +191,11 @@ static int set_chunk_size(struct dm_exception_store *store, return -EINVAL; } + if (chunk_size_ulong > INT_MAX >> SECTOR_SHIFT) { + *error = "Chunk size is too high"; + return -EINVAL; + } + store->chunk_size = chunk_size_ulong; store->chunk_mask = chunk_size_ulong - 1; store->chunk_shift = ffs(chunk_size_ulong) - 1; diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h index 2442c8c0789..812c71872ba 100644 --- a/drivers/md/dm-exception-store.h +++ b/drivers/md/dm-exception-store.h @@ -168,6 +168,10 @@ static inline chunk_t sector_to_chunk(struct dm_exception_store *store, int dm_exception_store_type_register(struct dm_exception_store_type *type); int dm_exception_store_type_unregister(struct dm_exception_store_type *type); +int dm_exception_store_set_chunk_size(struct dm_exception_store *store, + unsigned long chunk_size_ulong, + char **error); + int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, unsigned *args_used, struct dm_exception_store **store); diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 7f77f18fcaf..a6794293158 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1532,7 +1532,7 @@ static const struct file_operations _ctl_fops = { static struct miscdevice _dm_misc = { .minor = MISC_DYNAMIC_MINOR, .name = DM_NAME, - .devnode = "mapper/control", + .nodename = "mapper/control", .fops = &_ctl_fops }; diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c index e69b9656099..652bd33109e 100644 --- a/drivers/md/dm-log-userspace-base.c +++ b/drivers/md/dm-log-userspace-base.c @@ -21,6 +21,7 @@ struct log_c { struct dm_target *ti; uint32_t region_size; region_t region_count; + uint64_t luid; char uuid[DM_UUID_LEN]; char *usr_argv_str; @@ -63,7 +64,7 @@ static int userspace_do_request(struct log_c *lc, const char *uuid, * restored. */ retry: - r = dm_consult_userspace(uuid, request_type, data, + r = dm_consult_userspace(uuid, lc->luid, request_type, data, data_size, rdata, rdata_size); if (r != -ESRCH) @@ -74,14 +75,15 @@ retry: set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(2*HZ); DMWARN("Attempting to contact userspace log server..."); - r = dm_consult_userspace(uuid, DM_ULOG_CTR, lc->usr_argv_str, + r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_CTR, + lc->usr_argv_str, strlen(lc->usr_argv_str) + 1, NULL, NULL); if (!r) break; } DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete"); - r = dm_consult_userspace(uuid, DM_ULOG_RESUME, NULL, + r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_RESUME, NULL, 0, NULL, NULL); if (!r) goto retry; @@ -111,10 +113,9 @@ static int build_constructor_string(struct dm_target *ti, return -ENOMEM; } - for (i = 0, str_size = 0; i < argc; i++) - str_size += sprintf(str + str_size, "%s ", argv[i]); - str_size += sprintf(str + str_size, "%llu", - (unsigned long long)ti->len); + str_size = sprintf(str, "%llu", (unsigned long long)ti->len); + for (i = 0; i < argc; i++) + str_size += sprintf(str + str_size, " %s", argv[i]); *ctr_str = str; return str_size; @@ -154,6 +155,9 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti, return -ENOMEM; } + /* The ptr value is sufficient for local unique id */ + lc->luid = (uint64_t)lc; + lc->ti = ti; if (strlen(argv[0]) > (DM_UUID_LEN - 1)) { @@ -173,7 +177,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti, } /* Send table string */ - r = dm_consult_userspace(lc->uuid, DM_ULOG_CTR, + r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR, ctr_str, str_size, NULL, NULL); if (r == -ESRCH) { @@ -183,7 +187,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti, /* Since the region size does not change, get it now */ rdata_size = sizeof(rdata); - r = dm_consult_userspace(lc->uuid, DM_ULOG_GET_REGION_SIZE, + r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_GET_REGION_SIZE, NULL, 0, (char *)&rdata, &rdata_size); if (r) { @@ -212,7 +216,7 @@ static void userspace_dtr(struct dm_dirty_log *log) int r; struct log_c *lc = log->context; - r = dm_consult_userspace(lc->uuid, DM_ULOG_DTR, + r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR, NULL, 0, NULL, NULL); @@ -227,7 +231,7 @@ static int userspace_presuspend(struct dm_dirty_log *log) int r; struct log_c *lc = log->context; - r = dm_consult_userspace(lc->uuid, DM_ULOG_PRESUSPEND, + r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_PRESUSPEND, NULL, 0, NULL, NULL); @@ -239,7 +243,7 @@ static int userspace_postsuspend(struct dm_dirty_log *log) int r; struct log_c *lc = log->context; - r = dm_consult_userspace(lc->uuid, DM_ULOG_POSTSUSPEND, + r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_POSTSUSPEND, NULL, 0, NULL, NULL); @@ -252,7 +256,7 @@ static int userspace_resume(struct dm_dirty_log *log) struct log_c *lc = log->context; lc->in_sync_hint = 0; - r = dm_consult_userspace(lc->uuid, DM_ULOG_RESUME, + r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_RESUME, NULL, 0, NULL, NULL); @@ -561,6 +565,7 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type, char *result, unsigned maxlen) { int r = 0; + char *table_args; size_t sz = (size_t)maxlen; struct log_c *lc = log->context; @@ -577,8 +582,12 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type, break; case STATUSTYPE_TABLE: sz = 0; - DMEMIT("%s %u %s %s", log->type->name, lc->usr_argc + 1, - lc->uuid, lc->usr_argv_str); + table_args = strchr(lc->usr_argv_str, ' '); + BUG_ON(!table_args); /* There will always be a ' ' */ + table_args++; + + DMEMIT("%s %u %s %s ", log->type->name, lc->usr_argc, + lc->uuid, table_args); break; } return (r) ? 0 : (int)sz; diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c index 0ca1ee768a1..ba0edad2d04 100644 --- a/drivers/md/dm-log-userspace-transfer.c +++ b/drivers/md/dm-log-userspace-transfer.c @@ -108,7 +108,7 @@ static int fill_pkg(struct cn_msg *msg, struct dm_ulog_request *tfr) *(pkg->data_size) = 0; } else if (tfr->data_size > *(pkg->data_size)) { DMERR("Insufficient space to receive package [%u] " - "(%u vs %lu)", tfr->request_type, + "(%u vs %zu)", tfr->request_type, tfr->data_size, *(pkg->data_size)); *(pkg->data_size) = 0; @@ -147,7 +147,8 @@ static void cn_ulog_callback(void *data) /** * dm_consult_userspace - * @uuid: log's uuid (must be DM_UUID_LEN in size) + * @uuid: log's universal unique identifier (must be DM_UUID_LEN in size) + * @luid: log's local unique identifier * @request_type: found in include/linux/dm-log-userspace.h * @data: data to tx to the server * @data_size: size of data in bytes @@ -163,7 +164,7 @@ static void cn_ulog_callback(void *data) * * Returns: 0 on success, -EXXX on failure **/ -int dm_consult_userspace(const char *uuid, int request_type, +int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type, char *data, size_t data_size, char *rdata, size_t *rdata_size) { @@ -190,6 +191,7 @@ resend: memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size); memcpy(tfr->uuid, uuid, DM_UUID_LEN); + tfr->luid = luid; tfr->seq = dm_ulog_seq++; /* diff --git a/drivers/md/dm-log-userspace-transfer.h b/drivers/md/dm-log-userspace-transfer.h index c26d8e4e271..04ee874f915 100644 --- a/drivers/md/dm-log-userspace-transfer.h +++ b/drivers/md/dm-log-userspace-transfer.h @@ -11,7 +11,7 @@ int dm_ulog_tfr_init(void); void dm_ulog_tfr_exit(void); -int dm_consult_userspace(const char *uuid, int request_type, +int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type, char *data, size_t data_size, char *rdata, size_t *rdata_size); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 6f0d90d4a54..32d0b878ecc 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -64,6 +64,7 @@ struct multipath { spinlock_t lock; const char *hw_handler_name; + char *hw_handler_params; unsigned nr_priority_groups; struct list_head priority_groups; unsigned pg_init_required; /* pg_init needs calling? */ @@ -219,6 +220,7 @@ static void free_multipath(struct multipath *m) } kfree(m->hw_handler_name); + kfree(m->hw_handler_params); mempool_destroy(m->mpio_pool); kfree(m); } @@ -615,6 +617,17 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, dm_put_device(ti, p->path.dev); goto bad; } + + if (m->hw_handler_params) { + r = scsi_dh_set_params(q, m->hw_handler_params); + if (r < 0) { + ti->error = "unable to set hardware " + "handler parameters"; + scsi_dh_detach(q); + dm_put_device(ti, p->path.dev); + goto bad; + } + } } r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error); @@ -705,6 +718,7 @@ static struct priority_group *parse_priority_group(struct arg_set *as, static int parse_hw_handler(struct arg_set *as, struct multipath *m) { unsigned hw_argc; + int ret; struct dm_target *ti = m->ti; static struct param _params[] = { @@ -726,17 +740,33 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m) request_module("scsi_dh_%s", m->hw_handler_name); if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { ti->error = "unknown hardware handler type"; - kfree(m->hw_handler_name); - m->hw_handler_name = NULL; - return -EINVAL; + ret = -EINVAL; + goto fail; } - if (hw_argc > 1) - DMWARN("Ignoring user-specified arguments for " - "hardware handler \"%s\"", m->hw_handler_name); + if (hw_argc > 1) { + char *p; + int i, j, len = 4; + + for (i = 0; i <= hw_argc - 2; i++) + len += strlen(as->argv[i]) + 1; + p = m->hw_handler_params = kzalloc(len, GFP_KERNEL); + if (!p) { + ti->error = "memory allocation failed"; + ret = -ENOMEM; + goto fail; + } + j = sprintf(p, "%d", hw_argc - 1); + for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1) + j = sprintf(p, "%s", as->argv[i]); + } consume(as, hw_argc - 1); return 0; +fail: + kfree(m->hw_handler_name); + m->hw_handler_name = NULL; + return ret; } static int parse_features(struct arg_set *as, struct multipath *m) diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 9726577cde4..cc9dc79b078 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -648,7 +648,13 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) */ dm_rh_inc_pending(ms->rh, &sync); dm_rh_inc_pending(ms->rh, &nosync); - ms->log_failure = dm_rh_flush(ms->rh) ? 1 : 0; + + /* + * If the flush fails on a previous call and succeeds here, + * we must not reset the log_failure variable. We need + * userspace interaction to do that. + */ + ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure; /* * Dispatch io. @@ -1123,7 +1129,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, if (error == -EOPNOTSUPP) goto out; - if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio)) + if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD)) goto out; if (unlikely(error)) { diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 6e3fe4f1493..d5b2e08750d 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -106,6 +106,13 @@ struct pstore { void *zero_area; /* + * An area used for header. The header can be written + * concurrently with metadata (when invalidating the snapshot), + * so it needs a separate buffer. + */ + void *header_area; + + /* * Used to keep track of which metadata area the data in * 'chunk' refers to. */ @@ -148,16 +155,27 @@ static int alloc_area(struct pstore *ps) */ ps->area = vmalloc(len); if (!ps->area) - return r; + goto err_area; ps->zero_area = vmalloc(len); - if (!ps->zero_area) { - vfree(ps->area); - return r; - } + if (!ps->zero_area) + goto err_zero_area; memset(ps->zero_area, 0, len); + ps->header_area = vmalloc(len); + if (!ps->header_area) + goto err_header_area; + return 0; + +err_header_area: + vfree(ps->zero_area); + +err_zero_area: + vfree(ps->area); + +err_area: + return r; } static void free_area(struct pstore *ps) @@ -169,6 +187,10 @@ static void free_area(struct pstore *ps) if (ps->zero_area) vfree(ps->zero_area); ps->zero_area = NULL; + + if (ps->header_area) + vfree(ps->header_area); + ps->header_area = NULL; } struct mdata_req { @@ -188,7 +210,8 @@ static void do_metadata(struct work_struct *work) /* * Read or write a chunk aligned and sized block of data from a device. */ -static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata) +static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, + int metadata) { struct dm_io_region where = { .bdev = ps->store->cow->bdev, @@ -198,7 +221,7 @@ static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata) struct dm_io_request io_req = { .bi_rw = rw, .mem.type = DM_IO_VMA, - .mem.ptr.vma = ps->area, + .mem.ptr.vma = area, .client = ps->io_client, .notify.fn = NULL, }; @@ -240,7 +263,7 @@ static int area_io(struct pstore *ps, int rw) chunk = area_location(ps, ps->current_area); - r = chunk_io(ps, chunk, rw, 0); + r = chunk_io(ps, ps->area, chunk, rw, 0); if (r) return r; @@ -254,20 +277,7 @@ static void zero_memory_area(struct pstore *ps) static int zero_disk_area(struct pstore *ps, chunk_t area) { - struct dm_io_region where = { - .bdev = ps->store->cow->bdev, - .sector = ps->store->chunk_size * area_location(ps, area), - .count = ps->store->chunk_size, - }; - struct dm_io_request io_req = { - .bi_rw = WRITE, - .mem.type = DM_IO_VMA, - .mem.ptr.vma = ps->zero_area, - .client = ps->io_client, - .notify.fn = NULL, - }; - - return dm_io(&io_req, 1, &where, NULL); + return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0); } static int read_header(struct pstore *ps, int *new_snapshot) @@ -276,6 +286,7 @@ static int read_header(struct pstore *ps, int *new_snapshot) struct disk_header *dh; chunk_t chunk_size; int chunk_size_supplied = 1; + char *chunk_err; /* * Use default chunk size (or hardsect_size, if larger) if none supplied @@ -297,11 +308,11 @@ static int read_header(struct pstore *ps, int *new_snapshot) if (r) return r; - r = chunk_io(ps, 0, READ, 1); + r = chunk_io(ps, ps->header_area, 0, READ, 1); if (r) goto bad; - dh = (struct disk_header *) ps->area; + dh = ps->header_area; if (le32_to_cpu(dh->magic) == 0) { *new_snapshot = 1; @@ -319,20 +330,25 @@ static int read_header(struct pstore *ps, int *new_snapshot) ps->version = le32_to_cpu(dh->version); chunk_size = le32_to_cpu(dh->chunk_size); - if (!chunk_size_supplied || ps->store->chunk_size == chunk_size) + if (ps->store->chunk_size == chunk_size) return 0; - DMWARN("chunk size %llu in device metadata overrides " - "table chunk size of %llu.", - (unsigned long long)chunk_size, - (unsigned long long)ps->store->chunk_size); + if (chunk_size_supplied) + DMWARN("chunk size %llu in device metadata overrides " + "table chunk size of %llu.", + (unsigned long long)chunk_size, + (unsigned long long)ps->store->chunk_size); /* We had a bogus chunk_size. Fix stuff up. */ free_area(ps); - ps->store->chunk_size = chunk_size; - ps->store->chunk_mask = chunk_size - 1; - ps->store->chunk_shift = ffs(chunk_size) - 1; + r = dm_exception_store_set_chunk_size(ps->store, chunk_size, + &chunk_err); + if (r) { + DMERR("invalid on-disk chunk size %llu: %s.", + (unsigned long long)chunk_size, chunk_err); + return r; + } r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size), ps->io_client); @@ -351,15 +367,15 @@ static int write_header(struct pstore *ps) { struct disk_header *dh; - memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT); + memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT); - dh = (struct disk_header *) ps->area; + dh = ps->header_area; dh->magic = cpu_to_le32(SNAP_MAGIC); dh->valid = cpu_to_le32(ps->valid); dh->version = cpu_to_le32(ps->version); dh->chunk_size = cpu_to_le32(ps->store->chunk_size); - return chunk_io(ps, 0, WRITE, 1); + return chunk_io(ps, ps->header_area, 0, WRITE, 1); } /* @@ -679,6 +695,8 @@ static int persistent_ctr(struct dm_exception_store *store, ps->valid = 1; ps->version = SNAPSHOT_DISK_VERSION; ps->area = NULL; + ps->zero_area = NULL; + ps->header_area = NULL; ps->next_free = 2; /* skipping the header and first area */ ps->current_committed = 0; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index d573165cd2b..57f1bf7f3b7 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1176,6 +1176,15 @@ static int snapshot_status(struct dm_target *ti, status_type_t type, return 0; } +static int snapshot_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data) +{ + struct dm_snapshot *snap = ti->private; + + return fn(ti, snap->origin, 0, ti->len, data); +} + + /*----------------------------------------------------------------- * Origin methods *---------------------------------------------------------------*/ @@ -1410,20 +1419,29 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result, return 0; } +static int origin_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data) +{ + struct dm_dev *dev = ti->private; + + return fn(ti, dev, 0, ti->len, data); +} + static struct target_type origin_target = { .name = "snapshot-origin", - .version = {1, 6, 0}, + .version = {1, 7, 0}, .module = THIS_MODULE, .ctr = origin_ctr, .dtr = origin_dtr, .map = origin_map, .resume = origin_resume, .status = origin_status, + .iterate_devices = origin_iterate_devices, }; static struct target_type snapshot_target = { .name = "snapshot", - .version = {1, 6, 0}, + .version = {1, 7, 0}, .module = THIS_MODULE, .ctr = snapshot_ctr, .dtr = snapshot_dtr, @@ -1431,6 +1449,7 @@ static struct target_type snapshot_target = { .end_io = snapshot_end_io, .resume = snapshot_resume, .status = snapshot_status, + .iterate_devices = snapshot_iterate_devices, }; static int __init dm_snapshot_init(void) diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 4e0e5937e42..e0efc1adcaf 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -285,7 +285,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, if (!error) return 0; /* I/O complete */ - if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio)) + if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD)) return error; if (error == -EOPNOTSUPP) @@ -329,9 +329,19 @@ static int stripe_iterate_devices(struct dm_target *ti, return ret; } +static void stripe_io_hints(struct dm_target *ti, + struct queue_limits *limits) +{ + struct stripe_c *sc = ti->private; + unsigned chunk_size = (sc->chunk_mask + 1) << 9; + + blk_limits_io_min(limits, chunk_size); + blk_limits_io_opt(limits, chunk_size * sc->stripes); +} + static struct target_type stripe_target = { .name = "striped", - .version = {1, 2, 0}, + .version = {1, 3, 0}, .module = THIS_MODULE, .ctr = stripe_ctr, .dtr = stripe_dtr, @@ -339,6 +349,7 @@ static struct target_type stripe_target = { .end_io = stripe_end_io, .status = stripe_status, .iterate_devices = stripe_iterate_devices, + .io_hints = stripe_io_hints, }; int __init dm_stripe_init(void) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index d952b344191..1a6cb3c7822 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -343,10 +343,10 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md) } /* - * If possible, this checks an area of a destination device is valid. + * If possible, this checks an area of a destination device is invalid. */ -static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { struct queue_limits *limits = data; struct block_device *bdev = dev->bdev; @@ -357,36 +357,40 @@ static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev, char b[BDEVNAME_SIZE]; if (!dev_size) - return 1; + return 0; if ((start >= dev_size) || (start + len > dev_size)) { - DMWARN("%s: %s too small for target", - dm_device_name(ti->table->md), bdevname(bdev, b)); - return 0; + DMWARN("%s: %s too small for target: " + "start=%llu, len=%llu, dev_size=%llu", + dm_device_name(ti->table->md), bdevname(bdev, b), + (unsigned long long)start, + (unsigned long long)len, + (unsigned long long)dev_size); + return 1; } if (logical_block_size_sectors <= 1) - return 1; + return 0; if (start & (logical_block_size_sectors - 1)) { DMWARN("%s: start=%llu not aligned to h/w " - "logical block size %hu of %s", + "logical block size %u of %s", dm_device_name(ti->table->md), (unsigned long long)start, limits->logical_block_size, bdevname(bdev, b)); - return 0; + return 1; } if (len & (logical_block_size_sectors - 1)) { DMWARN("%s: len=%llu not aligned to h/w " - "logical block size %hu of %s", + "logical block size %u of %s", dm_device_name(ti->table->md), (unsigned long long)len, limits->logical_block_size, bdevname(bdev, b)); - return 0; + return 1; } - return 1; + return 0; } /* @@ -496,8 +500,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, } if (blk_stack_limits(limits, &q->limits, start << 9) < 0) - DMWARN("%s: target device %s is misaligned", - dm_device_name(ti->table->md), bdevname(bdev, b)); + DMWARN("%s: target device %s is misaligned: " + "physical_block_size=%u, logical_block_size=%u, " + "alignment_offset=%u, start=%llu", + dm_device_name(ti->table->md), bdevname(bdev, b), + q->limits.physical_block_size, + q->limits.logical_block_size, + q->limits.alignment_offset, + (unsigned long long) start << 9); + /* * Check if merge fn is supported. @@ -698,7 +709,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table, if (remaining) { DMWARN("%s: table line %u (start sect %llu len %llu) " - "not aligned to h/w logical block size %hu", + "not aligned to h/w logical block size %u", dm_device_name(table->md), i, (unsigned long long) ti->begin, (unsigned long long) ti->len, @@ -996,12 +1007,16 @@ int dm_calculate_queue_limits(struct dm_table *table, ti->type->iterate_devices(ti, dm_set_device_limits, &ti_limits); + /* Set I/O hints portion of queue limits */ + if (ti->type->io_hints) + ti->type->io_hints(ti, &ti_limits); + /* * Check each device area is consistent with the target's * overall queue limits. */ - if (!ti->type->iterate_devices(ti, device_area_is_valid, - &ti_limits)) + if (ti->type->iterate_devices(ti, device_area_is_invalid, + &ti_limits)) return -EINVAL; combine_limits: diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 8a311ea0d44..376f1ab48a2 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -130,7 +130,7 @@ struct mapped_device { /* * A list of ios that arrived while we were suspended. */ - atomic_t pending; + atomic_t pending[2]; wait_queue_head_t wait; struct work_struct work; struct bio_list deferred; @@ -453,13 +453,14 @@ static void start_io_acct(struct dm_io *io) { struct mapped_device *md = io->md; int cpu; + int rw = bio_data_dir(io->bio); io->start_time = jiffies; cpu = part_stat_lock(); part_round_stats(cpu, &dm_disk(md)->part0); part_stat_unlock(); - dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); + dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]); } static void end_io_acct(struct dm_io *io) @@ -479,8 +480,9 @@ static void end_io_acct(struct dm_io *io) * After this is decremented the bio must not be touched if it is * a barrier. */ - dm_disk(md)->part0.in_flight = pending = - atomic_dec_return(&md->pending); + dm_disk(md)->part0.in_flight[rw] = pending = + atomic_dec_return(&md->pending[rw]); + pending += atomic_read(&md->pending[rw^0x1]); /* nudge anyone waiting on suspend queue */ if (!pending) @@ -586,7 +588,7 @@ static void dec_pending(struct dm_io *io, int error) */ spin_lock_irqsave(&md->deferred_lock, flags); if (__noflush_suspending(md)) { - if (!bio_barrier(io->bio)) + if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER)) bio_list_add_head(&md->deferred, io->bio); } else @@ -598,7 +600,7 @@ static void dec_pending(struct dm_io *io, int error) io_error = io->error; bio = io->bio; - if (bio_barrier(bio)) { + if (bio_rw_flagged(bio, BIO_RW_BARRIER)) { /* * There can be just one barrier request so we use * a per-device variable for error reporting. @@ -738,16 +740,22 @@ static void rq_completed(struct mapped_device *md, int run_queue) dm_put(md); } +static void free_rq_clone(struct request *clone) +{ + struct dm_rq_target_io *tio = clone->end_io_data; + + blk_rq_unprep_clone(clone); + free_rq_tio(tio); +} + static void dm_unprep_request(struct request *rq) { struct request *clone = rq->special; - struct dm_rq_target_io *tio = clone->end_io_data; rq->special = NULL; rq->cmd_flags &= ~REQ_DONTPREP; - blk_rq_unprep_clone(clone); - free_rq_tio(tio); + free_rq_clone(clone); } /* @@ -825,8 +833,7 @@ static void dm_end_request(struct request *clone, int error) rq->sense_len = clone->sense_len; } - BUG_ON(clone->bio); - free_rq_tio(tio); + free_rq_clone(clone); blk_end_request_all(rq, error); @@ -1204,7 +1211,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) ci.map = dm_get_table(md); if (unlikely(!ci.map)) { - if (!bio_barrier(bio)) + if (!bio_rw_flagged(bio, BIO_RW_BARRIER)) bio_io_error(bio); else if (!md->barrier_error) @@ -1316,7 +1323,7 @@ static int _dm_request(struct request_queue *q, struct bio *bio) * we have to queue this io for later. */ if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) || - unlikely(bio_barrier(bio))) { + unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { up_read(&md->io_lock); if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) && @@ -1339,7 +1346,7 @@ static int dm_make_request(struct request_queue *q, struct bio *bio) { struct mapped_device *md = q->queuedata; - if (unlikely(bio_barrier(bio))) { + if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { bio_endio(bio, -EOPNOTSUPP); return 0; } @@ -1709,7 +1716,7 @@ out: return r; } -static struct block_device_operations dm_blk_dops; +static const struct block_device_operations dm_blk_dops; static void dm_wq_work(struct work_struct *work); @@ -1780,7 +1787,8 @@ static struct mapped_device *alloc_dev(int minor) if (!md->disk) goto bad_disk; - atomic_set(&md->pending, 0); + atomic_set(&md->pending[0], 0); + atomic_set(&md->pending[1], 0); init_waitqueue_head(&md->wait); INIT_WORK(&md->work, dm_wq_work); init_waitqueue_head(&md->eventq); @@ -2083,7 +2091,8 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) break; } spin_unlock_irqrestore(q->queue_lock, flags); - } else if (!atomic_read(&md->pending)) + } else if (!atomic_read(&md->pending[0]) && + !atomic_read(&md->pending[1])) break; if (interruptible == TASK_INTERRUPTIBLE && @@ -2159,7 +2168,7 @@ static void dm_wq_work(struct work_struct *work) if (dm_request_based(md)) generic_make_request(c); else { - if (bio_barrier(c)) + if (bio_rw_flagged(c, BIO_RW_BARRIER)) process_barrier(md, c); else __split_and_process_bio(md, c); @@ -2654,7 +2663,7 @@ void dm_free_md_mempools(struct dm_md_mempools *pools) kfree(pools); } -static struct block_device_operations dm_blk_dops = { +static const struct block_device_operations dm_blk_dops = { .open = dm_blk_open, .release = dm_blk_close, .ioctl = dm_blk_ioctl, diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 5fe39c2a3d2..1ceceb334d5 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -108,6 +108,9 @@ static int linear_congested(void *data, int bits) linear_conf_t *conf; int i, ret = 0; + if (mddev_congested(mddev, bits)) + return 1; + rcu_read_lock(); conf = rcu_dereference(mddev->private); @@ -288,7 +291,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio) sector_t start_sector; int cpu; - if (unlikely(bio_barrier(bio))) { + if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { bio_endio(bio, -EOPNOTSUPP); return 0; } diff --git a/drivers/md/md.c b/drivers/md/md.c index 9dd872000ce..26ba42a7912 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -138,7 +138,7 @@ static ctl_table raid_root_table[] = { { .ctl_name = 0 } }; -static struct block_device_operations md_fops; +static const struct block_device_operations md_fops; static int start_readonly; @@ -262,6 +262,12 @@ static void mddev_resume(mddev_t *mddev) mddev->pers->quiesce(mddev, 0); } +int mddev_congested(mddev_t *mddev, int bits) +{ + return mddev->suspended; +} +EXPORT_SYMBOL(mddev_congested); + static inline mddev_t *mddev_get(mddev_t *mddev) { @@ -4218,7 +4224,7 @@ static int do_md_run(mddev_t * mddev) set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); mddev->sync_thread = md_register_thread(md_do_sync, mddev, - "%s_resync"); + "resync"); if (!mddev->sync_thread) { printk(KERN_ERR "%s: could not start resync" " thread...\n", @@ -4575,10 +4581,10 @@ static int get_version(void __user * arg) static int get_array_info(mddev_t * mddev, void __user * arg) { mdu_array_info_t info; - int nr,working,active,failed,spare; + int nr,working,insync,failed,spare; mdk_rdev_t *rdev; - nr=working=active=failed=spare=0; + nr=working=insync=failed=spare=0; list_for_each_entry(rdev, &mddev->disks, same_set) { nr++; if (test_bit(Faulty, &rdev->flags)) @@ -4586,7 +4592,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg) else { working++; if (test_bit(In_sync, &rdev->flags)) - active++; + insync++; else spare++; } @@ -4611,7 +4617,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg) info.state = (1<<MD_SB_CLEAN); if (mddev->bitmap && mddev->bitmap_offset) info.state = (1<<MD_SB_BITMAP_PRESENT); - info.active_disks = active; + info.active_disks = insync; info.working_disks = working; info.failed_disks = failed; info.spare_disks = spare; @@ -4721,7 +4727,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) if (!list_empty(&mddev->disks)) { mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, mdk_rdev_t, same_set); - int err = super_types[mddev->major_version] + err = super_types[mddev->major_version] .load_super(rdev, rdev0, mddev->minor_version); if (err < 0) { printk(KERN_WARNING @@ -5556,7 +5562,7 @@ static int md_revalidate(struct gendisk *disk) mddev->changed = 0; return 0; } -static struct block_device_operations md_fops = +static const struct block_device_operations md_fops = { .owner = THIS_MODULE, .open = md_open, @@ -5631,7 +5637,10 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, thread->run = run; thread->mddev = mddev; thread->timeout = MAX_SCHEDULE_TIMEOUT; - thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev)); + thread->tsk = kthread_run(md_thread, thread, + "%s_%s", + mdname(thread->mddev), + name ?: mddev->pers->name); if (IS_ERR(thread->tsk)) { kfree(thread); return NULL; @@ -6745,7 +6754,7 @@ void md_check_recovery(mddev_t *mddev) } mddev->sync_thread = md_register_thread(md_do_sync, mddev, - "%s_resync"); + "resync"); if (!mddev->sync_thread) { printk(KERN_ERR "%s: could not start resync" " thread...\n", diff --git a/drivers/md/md.h b/drivers/md/md.h index f8fc188bc76..f184b69ef33 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -201,7 +201,7 @@ struct mddev_s * INTR: resync needs to be aborted for some reason * DONE: thread is done and is waiting to be reaped * REQUEST: user-space has requested a sync (used with SYNC) - * CHECK: user-space request for for check-only, no repair + * CHECK: user-space request for check-only, no repair * RESHAPE: A reshape is happening * * If neither SYNC or RESHAPE are set, then it is a recovery. @@ -430,6 +430,7 @@ extern void md_write_end(mddev_t *mddev); extern void md_done_sync(mddev_t *mddev, int blocks, int ok); extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev); +extern int mddev_congested(mddev_t *mddev, int bits); extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, sector_t sector, int size, struct page *page); extern void md_super_wait(mddev_t *mddev); diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 7140909f666..ee7646f974a 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -90,7 +90,7 @@ static void multipath_end_request(struct bio *bio, int error) if (uptodate) multipath_end_bh_io(mp_bh, 0); - else if (!bio_rw_ahead(bio)) { + else if (!bio_rw_flagged(bio, BIO_RW_AHEAD)) { /* * oops, IO error: */ @@ -144,7 +144,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio) const int rw = bio_data_dir(bio); int cpu; - if (unlikely(bio_barrier(bio))) { + if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { bio_endio(bio, -EOPNOTSUPP); return 0; } @@ -198,6 +198,9 @@ static int multipath_congested(void *data, int bits) multipath_conf_t *conf = mddev->private; int i, ret = 0; + if (mddev_congested(mddev, bits)) + return 1; + rcu_read_lock(); for (i = 0; i < mddev->raid_disks ; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); @@ -493,7 +496,7 @@ static int multipath_run (mddev_t *mddev) } mddev->degraded = conf->raid_disks - conf->working_disks; - conf->pool = mempool_create_kzalloc_pool(NR_RESERVED_BUFS, + conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS, sizeof(struct multipath_bh)); if (conf->pool == NULL) { printk(KERN_ERR @@ -503,7 +506,7 @@ static int multipath_run (mddev_t *mddev) } { - mddev->thread = md_register_thread(multipathd, mddev, "%s_multipath"); + mddev->thread = md_register_thread(multipathd, mddev, NULL); if (!mddev->thread) { printk(KERN_ERR "multipath: couldn't allocate thread" " for %s\n", mdname(mddev)); diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 898e2bdfee4..d3a4ce06015 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -44,6 +44,9 @@ static int raid0_congested(void *data, int bits) mdk_rdev_t **devlist = conf->devlist; int i, ret = 0; + if (mddev_congested(mddev, bits)) + return 1; + for (i = 0; i < mddev->raid_disks && !ret ; i++) { struct request_queue *q = bdev_get_queue(devlist[i]->bdev); @@ -86,7 +89,7 @@ static void dump_zones(mddev_t *mddev) static int create_strip_zones(mddev_t *mddev) { - int i, c, j, err; + int i, c, err; sector_t curr_zone_end, sectors; mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev, **dev; struct strip_zone *zone; @@ -198,6 +201,8 @@ static int create_strip_zones(mddev_t *mddev) /* now do the other zones */ for (i = 1; i < conf->nr_strip_zones; i++) { + int j; + zone = conf->strip_zone + i; dev = conf->devlist + i * mddev->raid_disks; @@ -207,7 +212,6 @@ static int create_strip_zones(mddev_t *mddev) c = 0; for (j=0; j<cnt; j++) { - char b[BDEVNAME_SIZE]; rdev = conf->devlist[j]; printk(KERN_INFO "raid0: checking %s ...", bdevname(rdev->bdev, b)); @@ -448,7 +452,7 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio) const int rw = bio_data_dir(bio); int cpu; - if (unlikely(bio_barrier(bio))) { + if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { bio_endio(bio, -EOPNOTSUPP); return 0; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 8726fd7ebce..d1b9bd5fd4f 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -576,6 +576,9 @@ static int raid1_congested(void *data, int bits) conf_t *conf = mddev->private; int i, ret = 0; + if (mddev_congested(mddev, bits)) + return 1; + rcu_read_lock(); for (i = 0; i < mddev->raid_disks; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); @@ -782,8 +785,9 @@ static int make_request(struct request_queue *q, struct bio * bio) struct bio_list bl; struct page **behind_pages = NULL; const int rw = bio_data_dir(bio); - const int do_sync = bio_sync(bio); - int cpu, do_barriers; + const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); + int cpu; + bool do_barriers; mdk_rdev_t *blocked_rdev; /* @@ -797,7 +801,8 @@ static int make_request(struct request_queue *q, struct bio * bio) md_write_start(mddev, bio); /* wait on superblock update early */ - if (unlikely(!mddev->barriers_work && bio_barrier(bio))) { + if (unlikely(!mddev->barriers_work && + bio_rw_flagged(bio, BIO_RW_BARRIER))) { if (rw == WRITE) md_write_end(mddev); bio_endio(bio, -EOPNOTSUPP); @@ -849,7 +854,7 @@ static int make_request(struct request_queue *q, struct bio * bio) read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; read_bio->bi_bdev = mirror->rdev->bdev; read_bio->bi_end_io = raid1_end_read_request; - read_bio->bi_rw = READ | do_sync; + read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); read_bio->bi_private = r1_bio; generic_make_request(read_bio); @@ -925,7 +930,7 @@ static int make_request(struct request_queue *q, struct bio * bio) atomic_set(&r1_bio->remaining, 0); atomic_set(&r1_bio->behind_remaining, 0); - do_barriers = bio_barrier(bio); + do_barriers = bio_rw_flagged(bio, BIO_RW_BARRIER); if (do_barriers) set_bit(R1BIO_Barrier, &r1_bio->state); @@ -941,7 +946,8 @@ static int make_request(struct request_queue *q, struct bio * bio) mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; mbio->bi_bdev = conf->mirrors[i].rdev->bdev; mbio->bi_end_io = raid1_end_write_request; - mbio->bi_rw = WRITE | do_barriers | do_sync; + mbio->bi_rw = WRITE | (do_barriers << BIO_RW_BARRIER) | + (do_sync << BIO_RW_SYNCIO); mbio->bi_private = r1_bio; if (behind_pages) { @@ -1600,7 +1606,7 @@ static void raid1d(mddev_t *mddev) * We already have a nr_pending reference on these rdevs. */ int i; - const int do_sync = bio_sync(r1_bio->master_bio); + const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO); clear_bit(R1BIO_BarrierRetry, &r1_bio->state); clear_bit(R1BIO_Barrier, &r1_bio->state); for (i=0; i < conf->raid_disks; i++) @@ -1621,7 +1627,8 @@ static void raid1d(mddev_t *mddev) conf->mirrors[i].rdev->data_offset; bio->bi_bdev = conf->mirrors[i].rdev->bdev; bio->bi_end_io = raid1_end_write_request; - bio->bi_rw = WRITE | do_sync; + bio->bi_rw = WRITE | + (do_sync << BIO_RW_SYNCIO); bio->bi_private = r1_bio; r1_bio->bios[i] = bio; generic_make_request(bio); @@ -1654,7 +1661,7 @@ static void raid1d(mddev_t *mddev) (unsigned long long)r1_bio->sector); raid_end_bio_io(r1_bio); } else { - const int do_sync = bio_sync(r1_bio->master_bio); + const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO); r1_bio->bios[r1_bio->read_disk] = mddev->ro ? IO_BLOCKED : NULL; r1_bio->read_disk = disk; @@ -1670,7 +1677,7 @@ static void raid1d(mddev_t *mddev) bio->bi_sector = r1_bio->sector + rdev->data_offset; bio->bi_bdev = rdev->bdev; bio->bi_end_io = raid1_end_read_request; - bio->bi_rw = READ | do_sync; + bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); bio->bi_private = r1_bio; unplug = 1; generic_make_request(bio); @@ -2045,7 +2052,7 @@ static int run(mddev_t *mddev) conf->last_used = j; - mddev->thread = md_register_thread(raid1d, mddev, "%s_raid1"); + mddev->thread = md_register_thread(raid1d, mddev, NULL); if (!mddev->thread) { printk(KERN_ERR "raid1: couldn't allocate thread for %s\n", diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 3d9020cf6f6..51c4c5c4d87 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -631,6 +631,8 @@ static int raid10_congested(void *data, int bits) conf_t *conf = mddev->private; int i, ret = 0; + if (mddev_congested(mddev, bits)) + return 1; rcu_read_lock(); for (i = 0; i < mddev->raid_disks && ret == 0; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); @@ -796,12 +798,12 @@ static int make_request(struct request_queue *q, struct bio * bio) int i; int chunk_sects = conf->chunk_mask + 1; const int rw = bio_data_dir(bio); - const int do_sync = bio_sync(bio); + const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); struct bio_list bl; unsigned long flags; mdk_rdev_t *blocked_rdev; - if (unlikely(bio_barrier(bio))) { + if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { bio_endio(bio, -EOPNOTSUPP); return 0; } @@ -882,7 +884,7 @@ static int make_request(struct request_queue *q, struct bio * bio) mirror->rdev->data_offset; read_bio->bi_bdev = mirror->rdev->bdev; read_bio->bi_end_io = raid10_end_read_request; - read_bio->bi_rw = READ | do_sync; + read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); read_bio->bi_private = r10_bio; generic_make_request(read_bio); @@ -950,7 +952,7 @@ static int make_request(struct request_queue *q, struct bio * bio) conf->mirrors[d].rdev->data_offset; mbio->bi_bdev = conf->mirrors[d].rdev->bdev; mbio->bi_end_io = raid10_end_write_request; - mbio->bi_rw = WRITE | do_sync; + mbio->bi_rw = WRITE | (do_sync << BIO_RW_SYNCIO); mbio->bi_private = r10_bio; atomic_inc(&r10_bio->remaining); @@ -1610,7 +1612,7 @@ static void raid10d(mddev_t *mddev) raid_end_bio_io(r10_bio); bio_put(bio); } else { - const int do_sync = bio_sync(r10_bio->master_bio); + const bool do_sync = bio_rw_flagged(r10_bio->master_bio, BIO_RW_SYNCIO); bio_put(bio); rdev = conf->mirrors[mirror].rdev; if (printk_ratelimit()) @@ -1623,7 +1625,7 @@ static void raid10d(mddev_t *mddev) bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr + rdev->data_offset; bio->bi_bdev = rdev->bdev; - bio->bi_rw = READ | do_sync; + bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); bio->bi_private = r10_bio; bio->bi_end_io = raid10_end_read_request; unplug = 1; @@ -1773,7 +1775,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i max_sync = RESYNC_PAGES << (PAGE_SHIFT-9); if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { /* recovery... the complicated one */ - int i, j, k; + int j, k; r10_bio = NULL; for (i=0 ; i<conf->raid_disks; i++) @@ -2188,7 +2190,7 @@ static int run(mddev_t *mddev) } - mddev->thread = md_register_thread(raid10d, mddev, "%s_raid10"); + mddev->thread = md_register_thread(raid10d, mddev, NULL); if (!mddev->thread) { printk(KERN_ERR "raid10: couldn't allocate thread for %s\n", diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 1898eda6072..94829804ab7 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2924,7 +2924,8 @@ static bool handle_stripe5(struct stripe_head *sh) rcu_read_lock(); for (i=disks; i--; ) { mdk_rdev_t *rdev; - struct r5dev *dev = &sh->dev[i]; + + dev = &sh->dev[i]; clear_bit(R5_Insync, &dev->flags); pr_debug("check %d: state 0x%lx toread %p read %p write %p " @@ -3548,6 +3549,9 @@ static int raid5_congested(void *data, int bits) /* No difference between reads and writes. Just check * how busy the stripe_cache is */ + + if (mddev_congested(mddev, bits)) + return 1; if (conf->inactive_blocked) return 1; if (conf->quiesce) @@ -3823,7 +3827,7 @@ static int make_request(struct request_queue *q, struct bio * bi) const int rw = bio_data_dir(bi); int cpu, remaining; - if (unlikely(bio_barrier(bi))) { + if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) { bio_endio(bi, -EOPNOTSUPP); return 0; } @@ -4097,7 +4101,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped INIT_LIST_HEAD(&stripes); for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { int j; - int skipped = 0; + int skipped_disk = 0; sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1); set_bit(STRIPE_EXPANDING, &sh->state); atomic_inc(&conf->reshape_stripes); @@ -4113,14 +4117,14 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped continue; s = compute_blocknr(sh, j, 0); if (s < raid5_size(mddev, 0, 0)) { - skipped = 1; + skipped_disk = 1; continue; } memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); set_bit(R5_Expanded, &sh->dev[j].flags); set_bit(R5_UPTODATE, &sh->dev[j].flags); } - if (!skipped) { + if (!skipped_disk) { set_bit(STRIPE_EXPAND_READY, &sh->state); set_bit(STRIPE_HANDLE, &sh->state); } @@ -4798,7 +4802,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) printk(KERN_INFO "raid5: allocated %dkB for %s\n", memory, mdname(mddev)); - conf->thread = md_register_thread(raid5d, mddev, "%s_raid5"); + conf->thread = md_register_thread(raid5d, mddev, NULL); if (!conf->thread) { printk(KERN_ERR "raid5: couldn't allocate thread for %s\n", @@ -4964,7 +4968,7 @@ static int run(mddev_t *mddev) set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); mddev->sync_thread = md_register_thread(md_do_sync, mddev, - "%s_reshape"); + "reshape"); } /* read-ahead size must cover two whole stripes, which is @@ -5382,7 +5386,7 @@ static int raid5_start_reshape(mddev_t *mddev) set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); mddev->sync_thread = md_register_thread(md_do_sync, mddev, - "%s_reshape"); + "reshape"); if (!mddev->sync_thread) { mddev->recovery = 0; spin_lock_irq(&conf->device_lock); |