aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2021-06-25 15:33:02 -0400
committerMike Snitzer <snitzer@redhat.com>2021-06-25 16:04:01 -0400
commit95b88f4d71cb953e02206be3c757083601391a0f (patch)
tree5330af07d1fdf5a5c2edb48d07fc377b704c532f /drivers/md
parentdm io tracker: factor out IO tracker (diff)
downloadlinux-dev-95b88f4d71cb953e02206be3c757083601391a0f.tar.xz
linux-dev-95b88f4d71cb953e02206be3c757083601391a0f.zip
dm writecache: pause writeback if cache full and origin being written directly
Implementation reuses dm_io_tracker, that until now was only used by dm-cache, to track if any writes were issued directly to the origin (due to cache being full) within the last second. If so writeback is paused for a second. This change improves performance for when the cache is full and IO is issued directly to the origin device (rather than through the cache). Depends-on: d53f1fafec9d ("dm writecache: do direct write if the cache is full") Suggested-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-writecache.c21
1 files changed, 20 insertions, 1 deletions
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 2eb7d7bcdfb1..d70342c9003a 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -15,6 +15,8 @@
#include <linux/dax.h>
#include <linux/pfn_t.h>
#include <linux/libnvdimm.h>
+#include <linux/delay.h>
+#include "dm-io-tracker.h"
#define DM_MSG_PREFIX "writecache"
@@ -183,6 +185,8 @@ struct dm_writecache {
struct work_struct writeback_work;
struct work_struct flush_work;
+ struct dm_io_tracker iot;
+
struct dm_io_client *dm_io;
raw_spinlock_t endio_list_lock;
@@ -1466,6 +1470,10 @@ bio_copy:
}
unlock_remap_origin:
+ if (bio_data_dir(bio) != READ) {
+ dm_iot_io_begin(&wc->iot, 1);
+ bio->bi_private = (void *)2;
+ }
bio_set_dev(bio, wc->dev->bdev);
wc_unlock(wc);
return DM_MAPIO_REMAPPED;
@@ -1496,11 +1504,13 @@ static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t
{
struct dm_writecache *wc = ti->private;
- if (bio->bi_private != NULL) {
+ if (bio->bi_private == (void *)1) {
int dir = bio_data_dir(bio);
if (atomic_dec_and_test(&wc->bio_in_progress[dir]))
if (unlikely(waitqueue_active(&wc->bio_in_progress_wait[dir])))
wake_up(&wc->bio_in_progress_wait[dir]);
+ } else if (bio->bi_private == (void *)2) {
+ dm_iot_io_end(&wc->iot, 1);
}
return 0;
}
@@ -1827,6 +1837,13 @@ static void writecache_writeback(struct work_struct *work)
dm_kcopyd_client_flush(wc->dm_kcopyd);
}
+ if (!wc->writeback_all && !dm_suspended(wc->ti)) {
+ while (!dm_iot_idle_for(&wc->iot, HZ)) {
+ cond_resched();
+ msleep(1000);
+ }
+ }
+
wc_lock(wc);
restart:
if (writecache_has_error(wc)) {
@@ -2140,6 +2157,8 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
INIT_WORK(&wc->writeback_work, writecache_writeback);
INIT_WORK(&wc->flush_work, writecache_flush_work);
+ dm_iot_init(&wc->iot);
+
raw_spin_lock_init(&wc->endio_list_lock);
INIT_LIST_HEAD(&wc->endio_list);
wc->endio_thread = kthread_create(writecache_endio_thread, wc, "writecache_endio");