From 03d8e11142a893ad322285d3c8a08e88b570cda1 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 13 Apr 2012 13:11:32 -0700 Subject: blkcg: add request_queue->root_blkg With per-queue policy activation, root blkg creation will be moved to blkcg core. Add q->root_blkg in preparation. For blk-throtl, this replaces throtl_data->root_tg; however, cfq needs to keep cfqd->root_group for !CONFIG_CFQ_GROUP_IOSCHED. This is to prepare for per-queue policy activation and doesn't cause any functional difference. Signed-off-by: Tejun Heo Cc: Vivek Goyal Signed-off-by: Jens Axboe --- block/blk-throttle.c | 16 ++++++++++------ block/cfq-iosched.c | 4 +++- 2 files changed, 13 insertions(+), 7 deletions(-) (limited to 'block') diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 6f1bfdf9a1b7..8c520fad6885 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -97,7 +97,6 @@ struct throtl_data /* service tree for active throtl groups */ struct throtl_rb_root tg_service_tree; - struct throtl_grp *root_tg; struct request_queue *queue; /* Total Number of queued bios on READ and WRITE lists */ @@ -131,6 +130,11 @@ static inline struct blkio_group *tg_to_blkg(struct throtl_grp *tg) return pdata_to_blkg(tg); } +static inline struct throtl_grp *td_root_tg(struct throtl_data *td) +{ + return blkg_to_tg(td->queue->root_blkg); +} + enum tg_state_flags { THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */ }; @@ -261,7 +265,7 @@ throtl_grp *throtl_lookup_tg(struct throtl_data *td, struct blkio_cgroup *blkcg) * Avoid lookup in this case */ if (blkcg == &blkio_root_cgroup) - return td->root_tg; + return td_root_tg(td); return blkg_to_tg(blkg_lookup(blkcg, td->queue)); } @@ -277,7 +281,7 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td, * Avoid lookup in this case */ if (blkcg == &blkio_root_cgroup) { - tg = td->root_tg; + tg = td_root_tg(td); } else { struct blkio_group *blkg; @@ -287,7 +291,7 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td, if (!IS_ERR(blkg)) tg = blkg_to_tg(blkg); else if (!blk_queue_dead(q)) - tg = td->root_tg; + tg = td_root_tg(td); } return tg; @@ -1245,12 +1249,12 @@ int blk_throtl_init(struct request_queue *q) blkg = blkg_lookup_create(&blkio_root_cgroup, q, true); if (!IS_ERR(blkg)) - td->root_tg = blkg_to_tg(blkg); + q->root_blkg = blkg; spin_unlock_irq(q->queue_lock); rcu_read_unlock(); - if (!td->root_tg) { + if (!q->root_blkg) { kfree(td); return -ENOMEM; } diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index de95f9a2acf8..86440e04f3ee 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -3964,8 +3964,10 @@ static int cfq_init_queue(struct request_queue *q) spin_lock_irq(q->queue_lock); blkg = blkg_lookup_create(&blkio_root_cgroup, q, true); - if (!IS_ERR(blkg)) + if (!IS_ERR(blkg)) { + q->root_blkg = blkg; cfqd->root_group = blkg_to_cfqg(blkg); + } spin_unlock_irq(q->queue_lock); rcu_read_unlock(); -- cgit v1.2.3-59-g8ed1b