aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorDavid Sterba <dsterba@suse.com>2019-02-12 16:51:18 +0100
committerDavid Sterba <dsterba@suse.com>2019-02-25 14:13:38 +0100
commitc8352942745e260d68dfdfdfbecd276c2aac3277 (patch)
treed733d79c6447923c9ddb1ec48f372aa164b19fdd /fs/btrfs
parentbtrfs: scrub: convert scrub_workers_refcnt to refcount_t (diff)
downloadlinux-dev-c8352942745e260d68dfdfdfbecd276c2aac3277.tar.xz
linux-dev-c8352942745e260d68dfdfdfbecd276c2aac3277.zip
btrfs: scrub: add assertions for worker pointers
The scrub worker pointers are not NULL iff the scrub is running, so reset them back once the last reference is dropped. Add assertions to the initial phase of scrub to verify that. Reviewed-by: Anand Jain <anand.jain@oracle.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/ctree.h4
-rw-r--r--fs/btrfs/scrub.c7
2 files changed, 11 insertions, 0 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 7efa1edb30cd..f92f97304e69 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1075,6 +1075,10 @@ struct btrfs_fs_info {
atomic_t scrubs_paused;
atomic_t scrub_cancel_req;
wait_queue_head_t scrub_pause_wait;
+ /*
+ * The worker pointers are NULL iff the refcount is 0, ie. scrub is not
+ * running.
+ */
refcount_t scrub_workers_refcnt;
struct btrfs_workqueue *scrub_workers;
struct btrfs_workqueue *scrub_wr_completion_workers;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index d20150d68a90..669bedfec4a9 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -3744,17 +3744,20 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
lockdep_assert_held(&fs_info->scrub_lock);
if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
+ ASSERT(fs_info->scrub_workers == NULL);
fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub",
flags, is_dev_replace ? 1 : max_active, 4);
if (!fs_info->scrub_workers)
goto fail_scrub_workers;
+ ASSERT(fs_info->scrub_wr_completion_workers == NULL);
fs_info->scrub_wr_completion_workers =
btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
max_active, 2);
if (!fs_info->scrub_wr_completion_workers)
goto fail_scrub_wr_completion_workers;
+ ASSERT(fs_info->scrub_parity_workers == NULL);
fs_info->scrub_parity_workers =
btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
max_active, 2);
@@ -3934,6 +3937,10 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
scrub_workers = fs_info->scrub_workers;
scrub_wr_comp = fs_info->scrub_wr_completion_workers;
scrub_parity = fs_info->scrub_parity_workers;
+
+ fs_info->scrub_workers = NULL;
+ fs_info->scrub_wr_completion_workers = NULL;
+ fs_info->scrub_parity_workers = NULL;
}
mutex_unlock(&fs_info->scrub_lock);