aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2017-11-08 05:56:11 -0500
committerMike Snitzer <snitzer@redhat.com>2017-11-10 15:45:01 -0500
commit1e72a8e809f030bd4e318a49c497ee38e47e82c1 (patch)
tree8cac372c0bc3793760bda1f6f54e612974042217 /drivers/md
parentdm raid: fix panic when attempting to force a raid to sync (diff)
downloadlinux-dev-1e72a8e809f030bd4e318a49c497ee38e47e82c1.tar.xz
linux-dev-1e72a8e809f030bd4e318a49c497ee38e47e82c1.zip
dm cache policy smq: handle races with queuing background_work
The background_tracker holds a set of promotions/demotions that the cache policy wishes the core target to implement. When adding a new operation to the tracker it's possible that an operation on the same block is already present (but in practise this doesn't appear to be happening). Catch these situations and do the appropriate cleanup. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-cache-policy-smq.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index e5eb9c9b4bc8..42e5c4b59889 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1174,12 +1174,16 @@ static void queue_writeback(struct smq_policy *mq)
work.cblock = infer_cblock(mq, e);
r = btracker_queue(mq->bg_work, &work, NULL);
- WARN_ON_ONCE(r); // FIXME: finish, I think we have to get rid of this race.
+ if (r) {
+ clear_pending(mq, e);
+ q_push_front(&mq->dirty, e);
+ }
}
}
static void queue_demotion(struct smq_policy *mq)
{
+ int r;
struct policy_work work;
struct entry *e;
@@ -1199,12 +1203,17 @@ static void queue_demotion(struct smq_policy *mq)
work.op = POLICY_DEMOTE;
work.oblock = e->oblock;
work.cblock = infer_cblock(mq, e);
- btracker_queue(mq->bg_work, &work, NULL);
+ r = btracker_queue(mq->bg_work, &work, NULL);
+ if (r) {
+ clear_pending(mq, e);
+ q_push_front(&mq->clean, e);
+ }
}
static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
struct policy_work **workp)
{
+ int r;
struct entry *e;
struct policy_work work;
@@ -1234,7 +1243,9 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
work.op = POLICY_PROMOTE;
work.oblock = oblock;
work.cblock = infer_cblock(mq, e);
- btracker_queue(mq->bg_work, &work, workp);
+ r = btracker_queue(mq->bg_work, &work, workp);
+ if (r)
+ free_entry(&mq->cache_alloc, e);
}
/*----------------------------------------------------------------*/