summaryrefslogtreecommitdiff
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2021-11-08 10:51:13 -0800
committerPaul E. McKenney <paulmck@kernel.org>2021-12-07 16:26:57 -0800
commit7a30871b6a27de1a1f418c7fd2c5dde9a46bfd16 (patch)
treec0b13f98b4f8ea2e44dbac97dae52c09d57222bb /kernel/rcu
parentcafafd67765b21334086b3fb8963ad9c5866c03d (diff)
rcu-tasks: Introduce ->percpu_enqueue_shift for dynamic queue selection
This commit introduces a ->percpu_enqueue_shift field to the rcu_tasks structure, and uses it to shift down the CPU number in order to select a rcu_tasks_percpu structure. This field is currently set to a sufficiently large shift count to always select the CPU-0 instance of the rcu_tasks_percpu structure, and later commits will adjust this. Reported-by: Martin Lau <kafai@fb.com> Cc: Neeraj Upadhyay <neeraj.iitr10@gmail.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tasks.h7
1 files changed, 6 insertions, 1 deletions
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index 30048db7aa49..2a5fe3e04b36 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -52,6 +52,7 @@ struct rcu_tasks_percpu {
* @postgp_func: This flavor's post-grace-period function (optional).
* @call_func: This flavor's call_rcu()-equivalent function.
* @rtpcpu: This flavor's rcu_tasks_percpu structure.
+ * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
* @name: This flavor's textual name.
* @kname: This flavor's kthread name.
*/
@@ -75,6 +76,7 @@ struct rcu_tasks {
postgp_func_t postgp_func;
call_rcu_func_t call_func;
struct rcu_tasks_percpu __percpu *rtpcpu;
+ int percpu_enqueue_shift;
char *name;
char *kname;
};
@@ -91,6 +93,7 @@ static struct rcu_tasks rt_name = \
.call_func = call, \
.rtpcpu = &rt_name ## __percpu, \
.name = n, \
+ .percpu_enqueue_shift = ilog2(CONFIG_NR_CPUS), \
.kname = #rt_name, \
}
@@ -169,6 +172,7 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
unsigned long flags;
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
+ rtp->percpu_enqueue_shift = ilog2(nr_cpu_ids);
for_each_possible_cpu(cpu) {
struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
@@ -195,7 +199,8 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
rhp->next = NULL;
rhp->func = func;
local_irq_save(flags);
- rtpcp = per_cpu_ptr(rtp->rtpcpu, 0 /* smp_processor_id() */);
+ rtpcp = per_cpu_ptr(rtp->rtpcpu,
+ smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift));
raw_spin_lock(&rtpcp->cbs_pcpu_lock);
if (!rtpcp->cbs_tail) {
raw_spin_unlock(&rtpcp->cbs_pcpu_lock); // irqs remain disabled.