blkcg: let blkio_group point to blkio_cgroup directly
Currently, blkg points to the associated blkcg via its css_id. This
unnecessarily complicates dereferencing blkcg. Let blkg hold a
reference to the associated blkcg and point directly to it and disable
css_id on blkio_subsys.
This change requires splitting blkiocg_destroy() into
blkiocg_pre_destroy() and blkiocg_destroy() so that all blkg's can be
destroyed and all the blkcg references held by them dropped during
cgroup removal.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 7694236..d42d826 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -37,6 +37,7 @@
struct cgroup_taskset *);
static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
struct cgroup_taskset *);
+static int blkiocg_pre_destroy(struct cgroup_subsys *, struct cgroup *);
static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
@@ -51,10 +52,10 @@
.create = blkiocg_create,
.can_attach = blkiocg_can_attach,
.attach = blkiocg_attach,
+ .pre_destroy = blkiocg_pre_destroy,
.destroy = blkiocg_destroy,
.populate = blkiocg_populate,
.subsys_id = blkio_subsys_id,
- .use_id = 1,
.module = THIS_MODULE,
};
EXPORT_SYMBOL_GPL(blkio_subsys);
@@ -442,6 +443,7 @@
if (blkg)
return blkg;
+ /* blkg holds a reference to blkcg */
if (!css_tryget(&blkcg->css))
return ERR_PTR(-EINVAL);
@@ -463,15 +465,16 @@
spin_lock_init(&new_blkg->stats_lock);
rcu_assign_pointer(new_blkg->q, q);
- new_blkg->blkcg_id = css_id(&blkcg->css);
+ new_blkg->blkcg = blkcg;
new_blkg->plid = plid;
cgroup_path(blkcg->css.cgroup, new_blkg->path,
sizeof(new_blkg->path));
+ } else {
+ css_put(&blkcg->css);
}
rcu_read_lock();
spin_lock_irq(q->queue_lock);
- css_put(&blkcg->css);
/* did bypass get turned on inbetween? */
if (unlikely(blk_queue_bypass(q)) && !for_root) {
@@ -500,6 +503,7 @@
if (new_blkg) {
free_percpu(new_blkg->stats_cpu);
kfree(new_blkg);
+ css_put(&blkcg->css);
}
return blkg;
}
@@ -508,7 +512,6 @@
static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
{
hlist_del_init_rcu(&blkg->blkcg_node);
- blkg->blkcg_id = 0;
}
/*
@@ -517,24 +520,17 @@
*/
int blkiocg_del_blkio_group(struct blkio_group *blkg)
{
- struct blkio_cgroup *blkcg;
+ struct blkio_cgroup *blkcg = blkg->blkcg;
unsigned long flags;
- struct cgroup_subsys_state *css;
int ret = 1;
- rcu_read_lock();
- css = css_lookup(&blkio_subsys, blkg->blkcg_id);
- if (css) {
- blkcg = container_of(css, struct blkio_cgroup, css);
- spin_lock_irqsave(&blkcg->lock, flags);
- if (!hlist_unhashed(&blkg->blkcg_node)) {
- __blkiocg_del_blkio_group(blkg);
- ret = 0;
- }
- spin_unlock_irqrestore(&blkcg->lock, flags);
+ spin_lock_irqsave(&blkcg->lock, flags);
+ if (!hlist_unhashed(&blkg->blkcg_node)) {
+ __blkiocg_del_blkio_group(blkg);
+ ret = 0;
}
+ spin_unlock_irqrestore(&blkcg->lock, flags);
- rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
@@ -1387,7 +1383,8 @@
ARRAY_SIZE(blkio_files));
}
-static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
+static int blkiocg_pre_destroy(struct cgroup_subsys *subsys,
+ struct cgroup *cgroup)
{
struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
unsigned long flags;
@@ -1396,6 +1393,7 @@
struct blkio_policy_type *blkiop;
rcu_read_lock();
+
do {
spin_lock_irqsave(&blkcg->lock, flags);
@@ -1425,8 +1423,15 @@
spin_unlock(&blkio_list_lock);
} while (1);
- free_css_id(&blkio_subsys, &blkcg->css);
rcu_read_unlock();
+
+ return 0;
+}
+
+static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
+{
+ struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
+
if (blkcg != &blkio_root_cgroup)
kfree(blkcg);
}
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 7ebecf6..ca1fc63 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -163,7 +163,7 @@
/* Pointer to the associated request_queue, RCU protected */
struct request_queue __rcu *q;
struct hlist_node blkcg_node;
- unsigned short blkcg_id;
+ struct blkio_cgroup *blkcg;
/* Store cgroup path */
char path[128];
/* policy which owns this blk group */
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 52a4293..fe6a442 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -169,6 +169,9 @@
if (!atomic_dec_and_test(&tg->ref))
return;
+ /* release the extra blkcg reference this blkg has been holding */
+ css_put(&tg->blkg.blkcg->css);
+
/*
* A group is freed in rcu manner. But having an rcu lock does not
* mean that one can access all the fields of blkg and assume these
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index f67d109..9ef86fb 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1133,6 +1133,10 @@
cfqg->ref--;
if (cfqg->ref)
return;
+
+ /* release the extra blkcg reference this blkg has been holding */
+ css_put(&cfqg->blkg.blkcg->css);
+
for_each_cfqg_st(cfqg, i, j, st)
BUG_ON(!RB_EMPTY_ROOT(&st->rb));
free_percpu(cfqg->blkg.stats_cpu);