aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c215
1 files changed, 146 insertions, 69 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 0ac817b750db..9f97da52d006 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -9,27 +9,33 @@
*
* Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
* Nauman Rafique <nauman@google.com>
+ *
+ * For policy-specific per-blkcg data:
+ * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
+ * Arianna Avanzini <avanzini.arianna@gmail.com>
*/
#include <linux/ioprio.h>
#include <linux/kdev_t.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/blkdev.h>
+#include <linux/backing-dev.h>
#include <linux/slab.h>
#include <linux/genhd.h>
#include <linux/delay.h>
#include <linux/atomic.h>
-#include "blk-cgroup.h"
+#include <linux/blk-cgroup.h>
#include "blk.h"
#define MAX_KEY_LEN 100
static DEFINE_MUTEX(blkcg_pol_mutex);
-struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT,
- .cfq_leaf_weight = 2 * CFQ_WEIGHT_DEFAULT, };
+struct blkcg blkcg_root;
EXPORT_SYMBOL_GPL(blkcg_root);
+struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
+
static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
static bool blkcg_policy_enabled(struct request_queue *q,
@@ -179,6 +185,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
struct blkcg_gq *new_blkg)
{
struct blkcg_gq *blkg;
+ struct bdi_writeback_congested *wb_congested;
int i, ret;
WARN_ON_ONCE(!rcu_read_lock_held());
@@ -190,22 +197,30 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
goto err_free_blkg;
}
+ wb_congested = wb_congested_get_create(&q->backing_dev_info,
+ blkcg->css.id, GFP_ATOMIC);
+ if (!wb_congested) {
+ ret = -ENOMEM;
+ goto err_put_css;
+ }
+
/* allocate */
if (!new_blkg) {
new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
if (unlikely(!new_blkg)) {
ret = -ENOMEM;
- goto err_put_css;
+ goto err_put_congested;
}
}
blkg = new_blkg;
+ blkg->wb_congested = wb_congested;
/* link parent */
if (blkcg_parent(blkcg)) {
blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
if (WARN_ON_ONCE(!blkg->parent)) {
ret = -EINVAL;
- goto err_put_css;
+ goto err_put_congested;
}
blkg_get(blkg->parent);
}
@@ -235,18 +250,15 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
blkg->online = true;
spin_unlock(&blkcg->lock);
- if (!ret) {
- if (blkcg == &blkcg_root) {
- q->root_blkg = blkg;
- q->root_rl.blkg = blkg;
- }
+ if (!ret)
return blkg;
- }
/* @blkg failed fully initialized, use the usual release path */
blkg_put(blkg);
return ERR_PTR(ret);
+err_put_congested:
+ wb_congested_put(wb_congested);
err_put_css:
css_put(&blkcg->css);
err_free_blkg:
@@ -340,15 +352,6 @@ static void blkg_destroy(struct blkcg_gq *blkg)
rcu_assign_pointer(blkcg->blkg_hint, NULL);
/*
- * If root blkg is destroyed. Just clear the pointer since root_rl
- * does not take reference on root blkg.
- */
- if (blkcg == &blkcg_root) {
- blkg->q->root_blkg = NULL;
- blkg->q->root_rl.blkg = NULL;
- }
-
- /*
* Put the reference taken at the time of creation so that when all
* queues are gone, group can be destroyed.
*/
@@ -402,6 +405,8 @@ void __blkg_release_rcu(struct rcu_head *rcu_head)
if (blkg->parent)
blkg_put(blkg->parent);
+ wb_congested_put(blkg->wb_congested);
+
blkg_free(blkg);
}
EXPORT_SYMBOL_GPL(__blkg_release_rcu);
@@ -809,6 +814,8 @@ static void blkcg_css_offline(struct cgroup_subsys_state *css)
}
spin_unlock_irq(&blkcg->lock);
+
+ wb_blkcg_offline(blkcg);
}
static void blkcg_css_free(struct cgroup_subsys_state *css)
@@ -823,6 +830,8 @@ static struct cgroup_subsys_state *
blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct blkcg *blkcg;
+ struct cgroup_subsys_state *ret;
+ int i;
if (!parent_css) {
blkcg = &blkcg_root;
@@ -830,17 +839,51 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
}
blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
- if (!blkcg)
- return ERR_PTR(-ENOMEM);
+ if (!blkcg) {
+ ret = ERR_PTR(-ENOMEM);
+ goto free_blkcg;
+ }
+
+ for (i = 0; i < BLKCG_MAX_POLS ; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+ struct blkcg_policy_data *cpd;
+
+ /*
+ * If the policy hasn't been attached yet, wait for it
+ * to be attached before doing anything else. Otherwise,
+ * check if the policy requires any specific per-cgroup
+ * data: if it does, allocate and initialize it.
+ */
+ if (!pol || !pol->cpd_size)
+ continue;
+
+ BUG_ON(blkcg->pd[i]);
+ cpd = kzalloc(pol->cpd_size, GFP_KERNEL);
+ if (!cpd) {
+ ret = ERR_PTR(-ENOMEM);
+ goto free_pd_blkcg;
+ }
+ blkcg->pd[i] = cpd;
+ cpd->plid = i;
+ pol->cpd_init_fn(blkcg);
+ }
- blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
- blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
done:
spin_lock_init(&blkcg->lock);
INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
INIT_HLIST_HEAD(&blkcg->blkg_list);
-
+#ifdef CONFIG_CGROUP_WRITEBACK
+ INIT_LIST_HEAD(&blkcg->cgwb_list);
+#endif
return &blkcg->css;
+
+free_pd_blkcg:
+ for (i--; i >= 0; i--)
+ kfree(blkcg->pd[i]);
+
+free_blkcg:
+ kfree(blkcg);
+ return ret;
}
/**
@@ -855,9 +898,45 @@ done:
*/
int blkcg_init_queue(struct request_queue *q)
{
- might_sleep();
+ struct blkcg_gq *new_blkg, *blkg;
+ bool preloaded;
+ int ret;
+
+ new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
+ if (!new_blkg)
+ return -ENOMEM;
+
+ preloaded = !radix_tree_preload(GFP_KERNEL);
+
+ /*
+ * Make sure the root blkg exists and count the existing blkgs. As
+ * @q is bypassing at this point, blkg_lookup_create() can't be
+ * used. Open code insertion.
+ */
+ rcu_read_lock();
+ spin_lock_irq(q->queue_lock);
+ blkg = blkg_create(&blkcg_root, q, new_blkg);
+ spin_unlock_irq(q->queue_lock);
+ rcu_read_unlock();
+
+ if (preloaded)
+ radix_tree_preload_end();
+
+ if (IS_ERR(blkg)) {
+ kfree(new_blkg);
+ return PTR_ERR(blkg);
+ }
+
+ q->root_blkg = blkg;
+ q->root_rl.blkg = blkg;
- return blk_throtl_init(q);
+ ret = blk_throtl_init(q);
+ if (ret) {
+ spin_lock_irq(q->queue_lock);
+ blkg_destroy_all(q);
+ spin_unlock_irq(q->queue_lock);
+ }
+ return ret;
}
/**
@@ -958,52 +1037,26 @@ int blkcg_activate_policy(struct request_queue *q,
const struct blkcg_policy *pol)
{
LIST_HEAD(pds);
- struct blkcg_gq *blkg, *new_blkg;
- struct blkg_policy_data *pd, *n;
+ LIST_HEAD(cpds);
+ struct blkcg_gq *blkg;
+ struct blkg_policy_data *pd, *nd;
+ struct blkcg_policy_data *cpd, *cnd;
int cnt = 0, ret;
- bool preloaded;
if (blkcg_policy_enabled(q, pol))
return 0;
- /* preallocations for root blkg */
- new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
- if (!new_blkg)
- return -ENOMEM;
-
+ /* count and allocate policy_data for all existing blkgs */
blk_queue_bypass_start(q);
-
- preloaded = !radix_tree_preload(GFP_KERNEL);
-
- /*
- * Make sure the root blkg exists and count the existing blkgs. As
- * @q is bypassing at this point, blkg_lookup_create() can't be
- * used. Open code it.
- */
spin_lock_irq(q->queue_lock);
-
- rcu_read_lock();
- blkg = __blkg_lookup(&blkcg_root, q, false);
- if (blkg)
- blkg_free(new_blkg);
- else
- blkg = blkg_create(&blkcg_root, q, new_blkg);
- rcu_read_unlock();
-
- if (preloaded)
- radix_tree_preload_end();
-
- if (IS_ERR(blkg)) {
- ret = PTR_ERR(blkg);
- goto out_unlock;
- }
-
list_for_each_entry(blkg, &q->blkg_list, q_node)
cnt++;
-
spin_unlock_irq(q->queue_lock);
- /* allocate policy_data for all existing blkgs */
+ /*
+ * Allocate per-blkg and per-blkcg policy data
+ * for all existing blkgs.
+ */
while (cnt--) {
pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
if (!pd) {
@@ -1011,26 +1064,50 @@ int blkcg_activate_policy(struct request_queue *q,
goto out_free;
}
list_add_tail(&pd->alloc_node, &pds);
+
+ if (!pol->cpd_size)
+ continue;
+ cpd = kzalloc_node(pol->cpd_size, GFP_KERNEL, q->node);
+ if (!cpd) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ list_add_tail(&cpd->alloc_node, &cpds);
}
/*
- * Install the allocated pds. With @q bypassing, no new blkg
+ * Install the allocated pds and cpds. With @q bypassing, no new blkg
* should have been created while the queue lock was dropped.
*/
spin_lock_irq(q->queue_lock);
list_for_each_entry(blkg, &q->blkg_list, q_node) {
- if (WARN_ON(list_empty(&pds))) {
+ if (WARN_ON(list_empty(&pds)) ||
+ WARN_ON(pol->cpd_size && list_empty(&cpds))) {
/* umm... this shouldn't happen, just abort */
ret = -ENOMEM;
goto out_unlock;
}
+ cpd = list_first_entry(&cpds, struct blkcg_policy_data,
+ alloc_node);
+ list_del_init(&cpd->alloc_node);
pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
list_del_init(&pd->alloc_node);
/* grab blkcg lock too while installing @pd on @blkg */
spin_lock(&blkg->blkcg->lock);
+ if (!pol->cpd_size)
+ goto no_cpd;
+ if (!blkg->blkcg->pd[pol->plid]) {
+ /* Per-policy per-blkcg data */
+ blkg->blkcg->pd[pol->plid] = cpd;
+ cpd->plid = pol->plid;
+ pol->cpd_init_fn(blkg->blkcg);
+ } else { /* must free it as it has already been extracted */
+ kfree(cpd);
+ }
+no_cpd:
blkg->pd[pol->plid] = pd;
pd->blkg = blkg;
pd->plid = pol->plid;
@@ -1045,8 +1122,10 @@ out_unlock:
spin_unlock_irq(q->queue_lock);
out_free:
blk_queue_bypass_end(q);
- list_for_each_entry_safe(pd, n, &pds, alloc_node)
+ list_for_each_entry_safe(pd, nd, &pds, alloc_node)
kfree(pd);
+ list_for_each_entry_safe(cpd, cnd, &cpds, alloc_node)
+ kfree(cpd);
return ret;
}
EXPORT_SYMBOL_GPL(blkcg_activate_policy);
@@ -1072,10 +1151,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
__clear_bit(pol->plid, q->blkcg_pols);
- /* if no policy is left, no need for blkgs - shoot them down */
- if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
- blkg_destroy_all(q);
-
list_for_each_entry(blkg, &q->blkg_list, q_node) {
/* grab blkcg lock too while removing @pd from @blkg */
spin_lock(&blkg->blkcg->lock);
@@ -1087,6 +1162,8 @@ void blkcg_deactivate_policy(struct request_queue *q,
kfree(blkg->pd[pol->plid]);
blkg->pd[pol->plid] = NULL;
+ kfree(blkg->blkcg->pd[pol->plid]);
+ blkg->blkcg->pd[pol->plid] = NULL;
spin_unlock(&blkg->blkcg->lock);
}