summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-08-18 14:55:09 -0700
committerJens Axboe <axboe@fb.com>2015-08-18 15:49:16 -0700
commit4c55f4f9ad3001ac1fefdd8d8ca7641d18558e23 (patch)
tree66be4a0f4881ba200b7e31e9f0efac8912ab0f21
parentbc915e61cde25d0b429f536cec9e83039bf23504 (diff)
blkcg: restructure blkg_policy_data allocation in blkcg_activate_policy()
When a policy gets activated, it needs to allocate and install its policy data on all existing blkg's (blkcg_gq's). Because blkg iteration is protected by a spinlock, it currently counts the total number of blkg's in the system, allocates the matching number of policy data on a list and installs them during a single iteration. This can be simplified by using speculative GFP_NOWAIT allocations while iterating and falling back to a preallocated policy data on failure. If the preallocated one has already been consumed, it releases the lock, preallocate with GFP_KERNEL and then restarts the iteration. This can be a bit more expensive than before but policy activation is a very cold path and shouldn't matter. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/blk-cgroup.c55
-rw-r--r--include/linux/blk-cgroup.h3
2 files changed, 21 insertions, 37 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index f91a4e09e0c9..9e9b0df339ee 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1047,65 +1047,52 @@ EXPORT_SYMBOL_GPL(blkio_cgrp_subsys);
int blkcg_activate_policy(struct request_queue *q,
const struct blkcg_policy *pol)
{
- LIST_HEAD(pds);
+ struct blkg_policy_data *pd_prealloc = NULL;
struct blkcg_gq *blkg;
- struct blkg_policy_data *pd, *nd;
- int cnt = 0, ret;
+ int ret;
if (blkcg_policy_enabled(q, pol))
return 0;
- /* count and allocate policy_data for all existing blkgs */
blk_queue_bypass_start(q);
- spin_lock_irq(q->queue_lock);
- list_for_each_entry(blkg, &q->blkg_list, q_node)
- cnt++;
- spin_unlock_irq(q->queue_lock);
-
- /* allocate per-blkg policy data for all existing blkgs */
- while (cnt--) {
- pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
- if (!pd) {
+pd_prealloc:
+ if (!pd_prealloc) {
+ pd_prealloc = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
+ if (!pd_prealloc) {
ret = -ENOMEM;
- goto out_free;
+ goto out_bypass_end;
}
- list_add_tail(&pd->alloc_node, &pds);
}
- /*
- * Install the allocated pds and cpds. With @q bypassing, no new blkg
- * should have been created while the queue lock was dropped.
- */
spin_lock_irq(q->queue_lock);
list_for_each_entry(blkg, &q->blkg_list, q_node) {
- if (WARN_ON(list_empty(&pds))) {
- /* umm... this shouldn't happen, just abort */
- ret = -ENOMEM;
- goto out_unlock;
- }
- pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
- list_del_init(&pd->alloc_node);
+ struct blkg_policy_data *pd;
- /* grab blkcg lock too while installing @pd on @blkg */
- spin_lock(&blkg->blkcg->lock);
+ if (blkg->pd[pol->plid])
+ continue;
+
+ pd = kzalloc_node(pol->pd_size, GFP_NOWAIT, q->node);
+ if (!pd)
+ swap(pd, pd_prealloc);
+ if (!pd) {
+ spin_unlock_irq(q->queue_lock);
+ goto pd_prealloc;
+ }
blkg->pd[pol->plid] = pd;
pd->blkg = blkg;
pd->plid = pol->plid;
pol->pd_init_fn(blkg);
-
- spin_unlock(&blkg->blkcg->lock);
}
__set_bit(pol->plid, q->blkcg_pols);
ret = 0;
-out_unlock:
+
spin_unlock_irq(q->queue_lock);
-out_free:
+out_bypass_end:
blk_queue_bypass_end(q);
- list_for_each_entry_safe(pd, nd, &pds, alloc_node)
- kfree(pd);
+ kfree(pd_prealloc);
return ret;
}
EXPORT_SYMBOL_GPL(blkcg_activate_policy);
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 9711fc277c02..db822880242a 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -80,9 +80,6 @@ struct blkg_policy_data {
/* the blkg and policy id this per-policy data belongs to */
struct blkcg_gq *blkg;
int plid;
-
- /* used during policy activation */
- struct list_head alloc_node;
};
/*