aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blk-cgroup.h
diff options
context:
space:
mode:
authorDennis Zhou <dennis@kernel.org>2018-12-05 12:10:27 -0500
committerJens Axboe <axboe@kernel.dk>2018-12-07 22:26:36 -0700
commitb978962ad4f7f9c06e5aa07b2a9b22f6d600456c (patch)
tree2b32f74f61953f497e29cd1a90460bbd89e3296b /include/linux/blk-cgroup.h
parentblkcg: fix ref count issue with bio_blkcg() using task_css (diff)
downloadlinux-dev-b978962ad4f7f9c06e5aa07b2a9b22f6d600456c.tar.xz
linux-dev-b978962ad4f7f9c06e5aa07b2a9b22f6d600456c.zip
blkcg: update blkg_lookup_create() to do locking
To know when to create a blkg, the general pattern is to do a blkg_lookup() and if that fails, lock and do the lookup again, and if that fails finally create. It doesn't make much sense for everyone who wants to do creation to write this themselves. This changes blkg_lookup_create() to do locking and implement this pattern. The old blkg_lookup_create() is renamed to __blkg_lookup_create(). If a call site wants to do its own error handling or already owns the queue lock, they can use __blkg_lookup_create(). This will be used in upcoming patches. Signed-off-by: Dennis Zhou <dennis@kernel.org> Reviewed-by: Josef Bacik <josef@toxicpanda.com> Acked-by: Tejun Heo <tj@kernel.org> Reviewed-by: Liu Bo <bo.liu@linux.alibaba.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include/linux/blk-cgroup.h')
-rw-r--r--include/linux/blk-cgroup.h4
1 files changed, 3 insertions, 1 deletions
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index f619307171a6..b3b1a8187d23 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -181,6 +181,8 @@ extern struct cgroup_subsys_state * const blkcg_root_css;
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
struct request_queue *q, bool update_hint);
+struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q);
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q);
int blkcg_init_queue(struct request_queue *q);
@@ -799,7 +801,7 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
blkg = blkg_lookup(blkcg, q);
if (unlikely(!blkg)) {
spin_lock_irq(&q->queue_lock);
- blkg = blkg_lookup_create(blkcg, q);
+ blkg = __blkg_lookup_create(blkcg, q);
if (IS_ERR(blkg))
blkg = NULL;
spin_unlock_irq(&q->queue_lock);