aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/closure.c4
-rw-r--r--drivers/md/dm-core.h1
-rw-r--r--drivers/md/dm-crypt.c5
-rw-r--r--drivers/md/dm-ioctl.c37
-rw-r--r--drivers/md/dm-raid.c13
-rw-r--r--drivers/md/dm.c10
-rw-r--r--drivers/md/md.c72
-rw-r--r--drivers/md/md.h1
-rw-r--r--drivers/md/raid5.c7
9 files changed, 96 insertions, 54 deletions
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 7d5286b05036..1841d0359bac 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(closure_put);
void __closure_wake_up(struct closure_waitlist *wait_list)
{
struct llist_node *list;
- struct closure *cl;
+ struct closure *cl, *t;
struct llist_node *reverse = NULL;
list = llist_del_all(&wait_list->list);
@@ -73,7 +73,7 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
reverse = llist_reverse_order(list);
/* Then do the wakeups */
- llist_for_each_entry(cl, reverse, list) {
+ llist_for_each_entry_safe(cl, t, reverse, list) {
closure_set_waiting(cl, 0);
closure_sub(cl, CLOSURE_WAITING + 1);
}
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 24eddbdf2ab4..203144762f36 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -149,5 +149,6 @@ static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen
extern atomic_t dm_global_event_nr;
extern wait_queue_head_t dm_global_eventq;
+void dm_issue_global_event(void);
#endif
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index a55ffd4f5933..96ab46512e1f 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -2466,6 +2466,7 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
kfree(cipher_api);
return ret;
}
+ kfree(cipher_api);
return 0;
bad_mem:
@@ -2584,6 +2585,10 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
ti->error = "Invalid feature value for sector_size";
return -EINVAL;
}
+ if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
+ ti->error = "Device size is not multiple of sector_size feature";
+ return -EINVAL;
+ }
cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
} else if (!strcasecmp(opt_string, "iv_large_sectors"))
set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 8756a6850431..e52676fa9832 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -477,9 +477,13 @@ static int remove_all(struct file *filp, struct dm_ioctl *param, size_t param_si
* Round up the ptr to an 8-byte boundary.
*/
#define ALIGN_MASK 7
+static inline size_t align_val(size_t val)
+{
+ return (val + ALIGN_MASK) & ~ALIGN_MASK;
+}
static inline void *align_ptr(void *ptr)
{
- return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK);
+ return (void *)align_val((size_t)ptr);
}
/*
@@ -505,7 +509,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
struct hash_cell *hc;
size_t len, needed = 0;
struct gendisk *disk;
- struct dm_name_list *nl, *old_nl = NULL;
+ struct dm_name_list *orig_nl, *nl, *old_nl = NULL;
uint32_t *event_nr;
down_write(&_hash_lock);
@@ -516,17 +520,15 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
*/
for (i = 0; i < NUM_BUCKETS; i++) {
list_for_each_entry (hc, _name_buckets + i, name_list) {
- needed += sizeof(struct dm_name_list);
- needed += strlen(hc->name) + 1;
- needed += ALIGN_MASK;
- needed += (sizeof(uint32_t) + ALIGN_MASK) & ~ALIGN_MASK;
+ needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1);
+ needed += align_val(sizeof(uint32_t));
}
}
/*
* Grab our output buffer.
*/
- nl = get_result_buffer(param, param_size, &len);
+ nl = orig_nl = get_result_buffer(param, param_size, &len);
if (len < needed) {
param->flags |= DM_BUFFER_FULL_FLAG;
goto out;
@@ -549,11 +551,16 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
strcpy(nl->name, hc->name);
old_nl = nl;
- event_nr = align_ptr(((void *) (nl + 1)) + strlen(hc->name) + 1);
+ event_nr = align_ptr(nl->name + strlen(hc->name) + 1);
*event_nr = dm_get_event_nr(hc->md);
nl = align_ptr(event_nr + 1);
}
}
+ /*
+ * If mismatch happens, security may be compromised due to buffer
+ * overflow, so it's better to crash.
+ */
+ BUG_ON((char *)nl - (char *)orig_nl != needed);
out:
up_write(&_hash_lock);
@@ -1621,7 +1628,8 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
* which has a variable size, is not used by the function processing
* the ioctl.
*/
-#define IOCTL_FLAGS_NO_PARAMS 1
+#define IOCTL_FLAGS_NO_PARAMS 1
+#define IOCTL_FLAGS_ISSUE_GLOBAL_EVENT 2
/*-----------------------------------------------------------------
* Implementation of open/close/ioctl on the special char
@@ -1635,12 +1643,12 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
ioctl_fn fn;
} _ioctls[] = {
{DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */
- {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS, remove_all},
+ {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, remove_all},
{DM_LIST_DEVICES_CMD, 0, list_devices},
- {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_create},
- {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_remove},
- {DM_DEV_RENAME_CMD, 0, dev_rename},
+ {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_create},
+ {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_remove},
+ {DM_DEV_RENAME_CMD, IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_rename},
{DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend},
{DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status},
{DM_DEV_WAIT_CMD, 0, dev_wait},
@@ -1869,6 +1877,9 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us
unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS))
DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd);
+ if (!r && ioctl_flags & IOCTL_FLAGS_ISSUE_GLOBAL_EVENT)
+ dm_issue_global_event();
+
/*
* Copy the results back to userland.
*/
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 5bfe285ea9d1..2245d06d2045 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3238,7 +3238,7 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
return DM_MAPIO_REQUEUE;
- mddev->pers->make_request(mddev, bio);
+ md_handle_request(mddev, bio);
return DM_MAPIO_SUBMITTED;
}
@@ -3297,11 +3297,10 @@ static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev,
static sector_t rs_get_progress(struct raid_set *rs,
sector_t resync_max_sectors, bool *array_in_sync)
{
- sector_t r, recovery_cp, curr_resync_completed;
+ sector_t r, curr_resync_completed;
struct mddev *mddev = &rs->md;
curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp;
- recovery_cp = mddev->recovery_cp;
*array_in_sync = false;
if (rs_is_raid0(rs)) {
@@ -3330,9 +3329,11 @@ static sector_t rs_get_progress(struct raid_set *rs,
} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
r = curr_resync_completed;
else
- r = recovery_cp;
+ r = mddev->recovery_cp;
- if (r == MaxSector) {
+ if ((r == MaxSector) ||
+ (test_bit(MD_RECOVERY_DONE, &mddev->recovery) &&
+ (mddev->curr_resync_completed == resync_max_sectors))) {
/*
* Sync complete.
*/
@@ -3892,7 +3893,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 12, 1},
+ .version = {1, 13, 0},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6e54145969c5..4be85324f44d 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -52,6 +52,12 @@ static struct workqueue_struct *deferred_remove_workqueue;
atomic_t dm_global_event_nr = ATOMIC_INIT(0);
DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
+void dm_issue_global_event(void)
+{
+ atomic_inc(&dm_global_event_nr);
+ wake_up(&dm_global_eventq);
+}
+
/*
* One of these is allocated per bio.
*/
@@ -1865,9 +1871,8 @@ static void event_callback(void *context)
dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
atomic_inc(&md->event_nr);
- atomic_inc(&dm_global_event_nr);
wake_up(&md->eventq);
- wake_up(&dm_global_eventq);
+ dm_issue_global_event();
}
/*
@@ -2283,6 +2288,7 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
}
map = __bind(md, table, &limits);
+ dm_issue_global_event();
out:
mutex_unlock(&md->suspend_lock);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 08fcaebc61bd..0ff1bbf6c90e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -266,6 +266,37 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
* call has finished, the bio has been linked into some internal structure
* and so is visible to ->quiesce(), so we don't need the refcount any more.
*/
+void md_handle_request(struct mddev *mddev, struct bio *bio)
+{
+check_suspended:
+ rcu_read_lock();
+ if (mddev->suspended) {
+ DEFINE_WAIT(__wait);
+ for (;;) {
+ prepare_to_wait(&mddev->sb_wait, &__wait,
+ TASK_UNINTERRUPTIBLE);
+ if (!mddev->suspended)
+ break;
+ rcu_read_unlock();
+ schedule();
+ rcu_read_lock();
+ }
+ finish_wait(&mddev->sb_wait, &__wait);
+ }
+ atomic_inc(&mddev->active_io);
+ rcu_read_unlock();
+
+ if (!mddev->pers->make_request(mddev, bio)) {
+ atomic_dec(&mddev->active_io);
+ wake_up(&mddev->sb_wait);
+ goto check_suspended;
+ }
+
+ if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
+ wake_up(&mddev->sb_wait);
+}
+EXPORT_SYMBOL(md_handle_request);
+
static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
{
const int rw = bio_data_dir(bio);
@@ -285,23 +316,6 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
bio_endio(bio);
return BLK_QC_T_NONE;
}
-check_suspended:
- rcu_read_lock();
- if (mddev->suspended) {
- DEFINE_WAIT(__wait);
- for (;;) {
- prepare_to_wait(&mddev->sb_wait, &__wait,
- TASK_UNINTERRUPTIBLE);
- if (!mddev->suspended)
- break;
- rcu_read_unlock();
- schedule();
- rcu_read_lock();
- }
- finish_wait(&mddev->sb_wait, &__wait);
- }
- atomic_inc(&mddev->active_io);
- rcu_read_unlock();
/*
* save the sectors now since our bio can
@@ -310,20 +324,14 @@ check_suspended:
sectors = bio_sectors(bio);
/* bio could be mergeable after passing to underlayer */
bio->bi_opf &= ~REQ_NOMERGE;
- if (!mddev->pers->make_request(mddev, bio)) {
- atomic_dec(&mddev->active_io);
- wake_up(&mddev->sb_wait);
- goto check_suspended;
- }
+
+ md_handle_request(mddev, bio);
cpu = part_stat_lock();
part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
part_stat_unlock();
- if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
- wake_up(&mddev->sb_wait);
-
return BLK_QC_T_NONE;
}
@@ -439,16 +447,22 @@ static void md_submit_flush_data(struct work_struct *ws)
struct mddev *mddev = container_of(ws, struct mddev, flush_work);
struct bio *bio = mddev->flush_bio;
+ /*
+ * must reset flush_bio before calling into md_handle_request to avoid a
+ * deadlock, because other bios passed md_handle_request suspend check
+ * could wait for this and below md_handle_request could wait for those
+ * bios because of suspend check
+ */
+ mddev->flush_bio = NULL;
+ wake_up(&mddev->sb_wait);
+
if (bio->bi_iter.bi_size == 0)
/* an empty barrier - all done */
bio_endio(bio);
else {
bio->bi_opf &= ~REQ_PREFLUSH;
- mddev->pers->make_request(mddev, bio);
+ md_handle_request(mddev, bio);
}
-
- mddev->flush_bio = NULL;
- wake_up(&mddev->sb_wait);
}
void md_flush_request(struct mddev *mddev, struct bio *bio)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 561d22b9a9a8..d8287d3cd1bf 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -692,6 +692,7 @@ extern void md_stop_writes(struct mddev *mddev);
extern int md_rdev_init(struct md_rdev *rdev);
extern void md_rdev_clear(struct md_rdev *rdev);
+extern void md_handle_request(struct mddev *mddev, struct bio *bio);
extern void mddev_suspend(struct mddev *mddev);
extern void mddev_resume(struct mddev *mddev);
extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 076409455b60..928e24a07133 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6575,14 +6575,17 @@ static ssize_t
raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
{
struct r5conf *conf;
- unsigned long new;
+ unsigned int new;
int err;
struct r5worker_group *new_groups, *old_groups;
int group_cnt, worker_cnt_per_group;
if (len >= PAGE_SIZE)
return -EINVAL;
- if (kstrtoul(page, 10, &new))
+ if (kstrtouint(page, 10, &new))
+ return -EINVAL;
+ /* 8192 should be big enough */
+ if (new > 8192)
return -EINVAL;
err = mddev_lock(mddev);