diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2012-10-11 16:18:09 -0700 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-11-08 11:50:13 -0800 |
commit | a30489c5228fba6f16b4c740a0292879ef13371e (patch) | |
tree | 2a28a2d6180f8315a8c1cee12cdb415a90b167f0 /kernel/rcutree.c | |
parent | 40694d6644d5cca28531707559466122eb212d8b (diff) |
rcu: Instrument synchronize_rcu_expedited() for debugfs tracing
This commit adds the counters to rcu_state and updates them in
synchronize_rcu_expedited() to provide the data needed for debugfs
tracing.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 18 |
1 files changed, 15 insertions, 3 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 3c72e5e5528c..b966d56ebb51 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -2321,6 +2321,7 @@ void synchronize_sched_expedited(void) (ulong)atomic_long_read(&rsp->expedited_done) + ULONG_MAX / 8)) { synchronize_sched(); + atomic_long_inc(&rsp->expedited_wrap); return; } @@ -2341,11 +2342,14 @@ void synchronize_sched_expedited(void) synchronize_sched_expedited_cpu_stop, NULL) == -EAGAIN) { put_online_cpus(); + atomic_long_inc(&rsp->expedited_tryfail); /* Check to see if someone else did our work for us. */ s = atomic_long_read(&rsp->expedited_done); if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { - smp_mb(); /* ensure test happens before caller kfree */ + /* ensure test happens before caller kfree */ + smp_mb__before_atomic_inc(); /* ^^^ */ + atomic_long_inc(&rsp->expedited_workdone1); return; } @@ -2354,13 +2358,16 @@ void synchronize_sched_expedited(void) udelay(trycount * num_online_cpus()); } else { synchronize_sched(); + atomic_long_inc(&rsp->expedited_normal); return; } /* Recheck to see if someone else did our work for us. */ s = atomic_long_read(&rsp->expedited_done); if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { - smp_mb(); /* ensure test happens before caller kfree */ + /* ensure test happens before caller kfree */ + smp_mb__before_atomic_inc(); /* ^^^ */ + atomic_long_inc(&rsp->expedited_workdone2); return; } @@ -2375,6 +2382,7 @@ void synchronize_sched_expedited(void) snap = atomic_long_read(&rsp->expedited_start); smp_mb(); /* ensure read is before try_stop_cpus(). */ } + atomic_long_inc(&rsp->expedited_stoppedcpus); /* * Everyone up to our most recent fetch is covered by our grace @@ -2383,12 +2391,16 @@ void synchronize_sched_expedited(void) * than we did already did their update. */ do { + atomic_long_inc(&rsp->expedited_done_tries); s = atomic_long_read(&rsp->expedited_done); if (ULONG_CMP_GE((ulong)s, (ulong)snap)) { - smp_mb(); /* ensure test happens before caller kfree */ + /* ensure test happens before caller kfree */ + smp_mb__before_atomic_inc(); /* ^^^ */ + atomic_long_inc(&rsp->expedited_done_lost); break; } } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s); + atomic_long_inc(&rsp->expedited_done_exit); put_online_cpus(); } |