aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid1.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r--drivers/md/raid1.c82
1 files changed, 26 insertions, 56 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f3f3e40dc9d8..6df398e3a008 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -37,13 +37,12 @@
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
-#include <linux/sched/signal.h>
#include <trace/events/block.h>
#include "md.h"
#include "raid1.h"
-#include "bitmap.h"
+#include "md-bitmap.h"
#define UNSUPPORTED_MDDEV_FLAGS \
((1L << MD_HAS_JOURNAL) | \
@@ -810,11 +809,15 @@ static void flush_pending_writes(struct r1conf *conf)
spin_lock_irq(&conf->device_lock);
if (conf->pending_bio_list.head) {
+ struct blk_plug plug;
struct bio *bio;
+
bio = bio_list_get(&conf->pending_bio_list);
conf->pending_count = 0;
spin_unlock_irq(&conf->device_lock);
+ blk_start_plug(&plug);
flush_bio_list(conf, bio);
+ blk_finish_plug(&plug);
} else
spin_unlock_irq(&conf->device_lock);
}
@@ -990,14 +993,6 @@ static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
_wait_barrier(conf, idx);
}
-static void wait_all_barriers(struct r1conf *conf)
-{
- int idx;
-
- for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
- _wait_barrier(conf, idx);
-}
-
static void _allow_barrier(struct r1conf *conf, int idx)
{
atomic_dec(&conf->nr_pending[idx]);
@@ -1011,14 +1006,6 @@ static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
_allow_barrier(conf, idx);
}
-static void allow_all_barriers(struct r1conf *conf)
-{
- int idx;
-
- for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
- _allow_barrier(conf, idx);
-}
-
/* conf->resync_lock should be held */
static int get_unqueued_pending(struct r1conf *conf)
{
@@ -1303,42 +1290,28 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
int first_clone;
int max_sectors;
- /*
- * Register the new request and wait if the reconstruction
- * thread has put up a bar for new requests.
- * Continue immediately if no resync is active currently.
- */
-
-
- if ((bio_end_sector(bio) > mddev->suspend_lo &&
- bio->bi_iter.bi_sector < mddev->suspend_hi) ||
- (mddev_is_clustered(mddev) &&
+ if (mddev_is_clustered(mddev) &&
md_cluster_ops->area_resyncing(mddev, WRITE,
- bio->bi_iter.bi_sector, bio_end_sector(bio)))) {
+ bio->bi_iter.bi_sector, bio_end_sector(bio))) {
- /*
- * As the suspend_* range is controlled by userspace, we want
- * an interruptible wait.
- */
DEFINE_WAIT(w);
for (;;) {
- sigset_t full, old;
prepare_to_wait(&conf->wait_barrier,
- &w, TASK_INTERRUPTIBLE);
- if (bio_end_sector(bio) <= mddev->suspend_lo ||
- bio->bi_iter.bi_sector >= mddev->suspend_hi ||
- (mddev_is_clustered(mddev) &&
- !md_cluster_ops->area_resyncing(mddev, WRITE,
- bio->bi_iter.bi_sector,
- bio_end_sector(bio))))
+ &w, TASK_IDLE);
+ if (!md_cluster_ops->area_resyncing(mddev, WRITE,
+ bio->bi_iter.bi_sector,
+ bio_end_sector(bio)))
break;
- sigfillset(&full);
- sigprocmask(SIG_BLOCK, &full, &old);
schedule();
- sigprocmask(SIG_SETMASK, &old, NULL);
}
finish_wait(&conf->wait_barrier, &w);
}
+
+ /*
+ * Register the new request and wait if the reconstruction
+ * thread has put up a bar for new requests.
+ * Continue immediately if no resync is active currently.
+ */
wait_barrier(conf, bio->bi_iter.bi_sector);
r1_bio = alloc_r1bio(mddev, bio);
@@ -1654,8 +1627,12 @@ static void print_conf(struct r1conf *conf)
static void close_sync(struct r1conf *conf)
{
- wait_all_barriers(conf);
- allow_all_barriers(conf);
+ int idx;
+
+ for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
+ _wait_barrier(conf, idx);
+ _allow_barrier(conf, idx);
+ }
mempool_destroy(conf->r1buf_pool);
conf->r1buf_pool = NULL;
@@ -3277,21 +3254,14 @@ static int raid1_reshape(struct mddev *mddev)
return 0;
}
-static void raid1_quiesce(struct mddev *mddev, int state)
+static void raid1_quiesce(struct mddev *mddev, int quiesce)
{
struct r1conf *conf = mddev->private;
- switch(state) {
- case 2: /* wake for suspend */
- wake_up(&conf->wait_barrier);
- break;
- case 1:
+ if (quiesce)
freeze_array(conf, 0);
- break;
- case 0:
+ else
unfreeze_array(conf);
- break;
- }
}
static void *raid1_takeover(struct mddev *mddev)