aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/writeback.c
diff options
context:
space:
mode:
authorColy Li <colyli@suse.de>2020-03-22 14:03:05 +0800
committerJens Axboe <axboe@kernel.dk>2020-03-22 10:06:57 -0600
commiteb9b6666d6ca6f3d9f218fa23ec6135eee1ac3a7 (patch)
tree638296204ff802aa6d66222c4e528ea1b7208479 /drivers/md/bcache/writeback.c
parentbcache: optimize barrier usage for Rmw atomic bitops (diff)
downloadlinux-dev-eb9b6666d6ca6f3d9f218fa23ec6135eee1ac3a7.tar.xz
linux-dev-eb9b6666d6ca6f3d9f218fa23ec6135eee1ac3a7.zip
bcache: optimize barrier usage for atomic operations
The idea of this patch is from Davidlohr Bueso, he posts a patch for bcache to optimize barrier usage for read-modify-write atomic bitops. Indeed such optimization can also apply on other locations where smp_mb() is used before or after an atomic operation. This patch replaces smp_mb() with smp_mb__before_atomic() or smp_mb__after_atomic() in btree.c and writeback.c, where it is used to synchronize memory cache just earlier on other cores. Although the locations are not on hot code path, it is always not bad to mkae things a little better. Signed-off-by: Coly Li <colyli@suse.de> Cc: Davidlohr Bueso <dave@stgolabs.net> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/md/bcache/writeback.c')
-rw-r--r--drivers/md/bcache/writeback.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 72ba6d015786..3f7641fb28d5 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -854,7 +854,7 @@ static int bch_dirty_init_thread(void *arg)
else {
atomic_set(&state->enough, 1);
/* Update state->enough earlier */
- smp_mb();
+ smp_mb__after_atomic();
goto out;
}
skip_nr--;
@@ -873,7 +873,7 @@ static int bch_dirty_init_thread(void *arg)
out:
/* In order to wake up state->wait in time */
- smp_mb();
+ smp_mb__before_atomic();
if (atomic_dec_and_test(&state->started))
wake_up(&state->wait);
@@ -932,7 +932,7 @@ void bch_sectors_dirty_init(struct bcache_device *d)
for (i = 0; i < state->total_threads; i++) {
/* Fetch latest state->enough earlier */
- smp_mb();
+ smp_mb__before_atomic();
if (atomic_read(&state->enough))
break;