aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/async-thread.c
diff options
context:
space:
mode:
authorOmar Sandoval <osandov@fb.com>2019-09-16 11:30:58 -0700
committerDavid Sterba <dsterba@suse.com>2019-11-18 12:46:49 +0100
commitc9eb55db8439057165f106164622c146cdd59468 (patch)
treebaf390f2f39d5262b7bfaa43df57bc30945dd7ad /fs/btrfs/async-thread.c
parentbtrfs: get rid of unique workqueue helper functions (diff)
downloadlinux-dev-c9eb55db8439057165f106164622c146cdd59468.tar.xz
linux-dev-c9eb55db8439057165f106164622c146cdd59468.zip
btrfs: get rid of pointless wtag variable in async-thread.c
Commit ac0c7cf8be00 ("btrfs: fix crash when tracepoint arguments are freed by wq callbacks") added a void pointer, wtag, which is passed into trace_btrfs_all_work_done() instead of the freed work item. This is silly for a few reasons: 1. The freed work item still has the same address. 2. work is still in scope after it's freed, so assigning wtag doesn't stop anyone from using it. 3. The tracepoint has always taken a void * argument, so assigning wtag doesn't actually make things any more type-safe. (Note that the original bug in commit bc074524e123 ("btrfs: prefix fsid to all trace events") was that the void * was implicitly casted when it was passed to btrfs_work_owner() in the trace point itself). Instead, let's add some clearer warnings as comments. Reviewed-by: Nikolay Borisov <nborisov@suse.com> Reviewed-by: Filipe Manana <fdmanana@suse.com> Signed-off-by: Omar Sandoval <osandov@fb.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/async-thread.c')
-rw-r--r--fs/btrfs/async-thread.c21
1 files changed, 8 insertions, 13 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 3f3110975f88..b97ae1b03417 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -226,7 +226,6 @@ static void run_ordered_work(struct __btrfs_workqueue *wq,
struct btrfs_work *work;
spinlock_t *lock = &wq->list_lock;
unsigned long flags;
- void *wtag;
bool free_self = false;
while (1) {
@@ -281,21 +280,19 @@ static void run_ordered_work(struct __btrfs_workqueue *wq,
} else {
/*
* We don't want to call the ordered free functions with
- * the lock held though. Save the work as tag for the
- * trace event, because the callback could free the
- * structure.
+ * the lock held.
*/
- wtag = work;
work->ordered_free(work);
- trace_btrfs_all_work_done(wq->fs_info, wtag);
+ /* NB: work must not be dereferenced past this point. */
+ trace_btrfs_all_work_done(wq->fs_info, work);
}
}
spin_unlock_irqrestore(lock, flags);
if (free_self) {
- wtag = self;
self->ordered_free(self);
- trace_btrfs_all_work_done(wq->fs_info, wtag);
+ /* NB: self must not be dereferenced past this point. */
+ trace_btrfs_all_work_done(wq->fs_info, self);
}
}
@@ -304,7 +301,6 @@ static void btrfs_work_helper(struct work_struct *normal_work)
struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
normal_work);
struct __btrfs_workqueue *wq;
- void *wtag;
int need_order = 0;
/*
@@ -318,8 +314,6 @@ static void btrfs_work_helper(struct work_struct *normal_work)
if (work->ordered_func)
need_order = 1;
wq = work->wq;
- /* Safe for tracepoints in case work gets freed by the callback */
- wtag = work;
trace_btrfs_work_sched(work);
thresh_exec_hook(wq);
@@ -327,9 +321,10 @@ static void btrfs_work_helper(struct work_struct *normal_work)
if (need_order) {
set_bit(WORK_DONE_BIT, &work->flags);
run_ordered_work(wq, work);
+ } else {
+ /* NB: work must not be dereferenced past this point. */
+ trace_btrfs_all_work_done(wq->fs_info, work);
}
- if (!need_order)
- trace_btrfs_all_work_done(wq->fs_info, wtag);
}
void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,