diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/rcupdate.h | 83 | ||||
-rw-r--r-- | include/linux/rcutiny.h | 10 | ||||
-rw-r--r-- | include/linux/rcutree.h | 19 | ||||
-rw-r--r-- | include/linux/sched.h | 3 | ||||
-rw-r--r-- | include/linux/srcu.h | 15 |
5 files changed, 117 insertions, 13 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 81c04f4348ec..937217425c47 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -190,6 +190,33 @@ extern void rcu_idle_exit(void); extern void rcu_irq_enter(void); extern void rcu_irq_exit(void); +/** + * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers + * @a: Code that RCU needs to pay attention to. + * + * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden + * in the inner idle loop, that is, between the rcu_idle_enter() and + * the rcu_idle_exit() -- RCU will happily ignore any such read-side + * critical sections. However, things like powertop need tracepoints + * in the inner idle loop. + * + * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU()) + * will tell RCU that it needs to pay attending, invoke its argument + * (in this example, a call to the do_something_with_RCU() function), + * and then tell RCU to go back to ignoring this CPU. It is permissible + * to nest RCU_NONIDLE() wrappers, but the nesting level is currently + * quite limited. If deeper nesting is required, it will be necessary + * to adjust DYNTICK_TASK_NESTING_VALUE accordingly. + * + * This macro may be used from process-level code only. + */ +#define RCU_NONIDLE(a) \ + do { \ + rcu_idle_exit(); \ + do { a; } while (0); \ + rcu_idle_enter(); \ + } while (0) + /* * Infrastructure to implement the synchronize_() primitives in * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. @@ -226,6 +253,15 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head) } #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ +#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) +bool rcu_lockdep_current_cpu_online(void); +#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ +static inline bool rcu_lockdep_current_cpu_online(void) +{ + return 1; +} +#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ + #ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_PROVE_RCU @@ -239,13 +275,11 @@ static inline int rcu_is_cpu_idle(void) static inline void rcu_lock_acquire(struct lockdep_map *map) { - WARN_ON_ONCE(rcu_is_cpu_idle()); lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_); } static inline void rcu_lock_release(struct lockdep_map *map) { - WARN_ON_ONCE(rcu_is_cpu_idle()); lock_release(map, 1, _THIS_IP_); } @@ -270,6 +304,9 @@ extern int debug_lockdep_rcu_enabled(void); * occur in the same context, for example, it is illegal to invoke * rcu_read_unlock() in process context if the matching rcu_read_lock() * was invoked from within an irq handler. + * + * Note that rcu_read_lock() is disallowed if the CPU is either idle or + * offline from an RCU perspective, so check for those as well. */ static inline int rcu_read_lock_held(void) { @@ -277,6 +314,8 @@ static inline int rcu_read_lock_held(void) return 1; if (rcu_is_cpu_idle()) return 0; + if (!rcu_lockdep_current_cpu_online()) + return 0; return lock_is_held(&rcu_lock_map); } @@ -313,6 +352,9 @@ extern int rcu_read_lock_bh_held(void); * notice an extended quiescent state to other CPUs that started a grace * period. Otherwise we would delay any grace period as long as we run in * the idle task. + * + * Similarly, we avoid claiming an SRCU read lock held if the current + * CPU is offline. */ #ifdef CONFIG_PREEMPT_COUNT static inline int rcu_read_lock_sched_held(void) @@ -323,6 +365,8 @@ static inline int rcu_read_lock_sched_held(void) return 1; if (rcu_is_cpu_idle()) return 0; + if (!rcu_lockdep_current_cpu_online()) + return 0; if (debug_locks) lockdep_opinion = lock_is_held(&rcu_sched_lock_map); return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); @@ -381,8 +425,22 @@ extern int rcu_my_thread_group_empty(void); } \ } while (0) +#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU) +static inline void rcu_preempt_sleep_check(void) +{ + rcu_lockdep_assert(!lock_is_held(&rcu_lock_map), + "Illegal context switch in RCU read-side " + "critical section"); +} +#else /* #ifdef CONFIG_PROVE_RCU */ +static inline void rcu_preempt_sleep_check(void) +{ +} +#endif /* #else #ifdef CONFIG_PROVE_RCU */ + #define rcu_sleep_check() \ do { \ + rcu_preempt_sleep_check(); \ rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \ "Illegal context switch in RCU-bh" \ " read-side critical section"); \ @@ -470,6 +528,13 @@ extern int rcu_my_thread_group_empty(void); * NULL. Although rcu_access_pointer() may also be used in cases where * update-side locks prevent the value of the pointer from changing, you * should instead use rcu_dereference_protected() for this use case. + * + * It is also permissible to use rcu_access_pointer() when read-side + * access to the pointer was removed at least one grace period ago, as + * is the case in the context of the RCU callback that is freeing up + * the data, or after a synchronize_rcu() returns. This can be useful + * when tearing down multi-linked structures after a grace period + * has elapsed. */ #define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu) @@ -659,6 +724,8 @@ static inline void rcu_read_lock(void) __rcu_read_lock(); __acquire(RCU); rcu_lock_acquire(&rcu_lock_map); + rcu_lockdep_assert(!rcu_is_cpu_idle(), + "rcu_read_lock() used illegally while idle"); } /* @@ -678,6 +745,8 @@ static inline void rcu_read_lock(void) */ static inline void rcu_read_unlock(void) { + rcu_lockdep_assert(!rcu_is_cpu_idle(), + "rcu_read_unlock() used illegally while idle"); rcu_lock_release(&rcu_lock_map); __release(RCU); __rcu_read_unlock(); @@ -705,6 +774,8 @@ static inline void rcu_read_lock_bh(void) local_bh_disable(); __acquire(RCU_BH); rcu_lock_acquire(&rcu_bh_lock_map); + rcu_lockdep_assert(!rcu_is_cpu_idle(), + "rcu_read_lock_bh() used illegally while idle"); } /* @@ -714,6 +785,8 @@ static inline void rcu_read_lock_bh(void) */ static inline void rcu_read_unlock_bh(void) { + rcu_lockdep_assert(!rcu_is_cpu_idle(), + "rcu_read_unlock_bh() used illegally while idle"); rcu_lock_release(&rcu_bh_lock_map); __release(RCU_BH); local_bh_enable(); @@ -737,6 +810,8 @@ static inline void rcu_read_lock_sched(void) preempt_disable(); __acquire(RCU_SCHED); rcu_lock_acquire(&rcu_sched_lock_map); + rcu_lockdep_assert(!rcu_is_cpu_idle(), + "rcu_read_lock_sched() used illegally while idle"); } /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ @@ -753,6 +828,8 @@ static inline notrace void rcu_read_lock_sched_notrace(void) */ static inline void rcu_read_unlock_sched(void) { + rcu_lockdep_assert(!rcu_is_cpu_idle(), + "rcu_read_unlock_sched() used illegally while idle"); rcu_lock_release(&rcu_sched_lock_map); __release(RCU_SCHED); preempt_enable(); @@ -841,7 +918,7 @@ void __kfree_rcu(struct rcu_head *head, unsigned long offset) /* See the kfree_rcu() header comment. */ BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); - call_rcu(head, (rcu_callback)offset); + kfree_call_rcu(head, (rcu_callback)offset); } /** diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 00b7a5e493d2..e93df77176d1 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -27,13 +27,9 @@ #include <linux/cache.h> -#ifdef CONFIG_RCU_BOOST static inline void rcu_init(void) { } -#else /* #ifdef CONFIG_RCU_BOOST */ -void rcu_init(void); -#endif /* #else #ifdef CONFIG_RCU_BOOST */ static inline void rcu_barrier_bh(void) { @@ -83,6 +79,12 @@ static inline void synchronize_sched_expedited(void) synchronize_sched(); } +static inline void kfree_call_rcu(struct rcu_head *head, + void (*func)(struct rcu_head *rcu)) +{ + call_rcu(head, func); +} + #ifdef CONFIG_TINY_RCU static inline void rcu_preempt_note_context_switch(void) diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 67458468f1a8..e8ee5dd0854c 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -61,6 +61,24 @@ extern void synchronize_rcu_bh(void); extern void synchronize_sched_expedited(void); extern void synchronize_rcu_expedited(void); +void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); + +/** + * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period + * + * Wait for an RCU-bh grace period to elapse, but use a "big hammer" + * approach to force the grace period to end quickly. This consumes + * significant time on all CPUs and is unfriendly to real-time workloads, + * so is thus not recommended for any sort of common-case code. In fact, + * if you are using synchronize_rcu_bh_expedited() in a loop, please + * restructure your code to batch your updates, and then use a single + * synchronize_rcu_bh() instead. + * + * Note that it is illegal to call this function while holding any lock + * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal + * to call this function from a CPU-hotplug notifier. Failing to observe + * these restriction will result in deadlock. + */ static inline void synchronize_rcu_bh_expedited(void) { synchronize_sched_expedited(); @@ -83,6 +101,7 @@ extern void rcu_sched_force_quiescent_state(void); /* A context switch is a grace period for RCU-sched and RCU-bh. */ static inline int rcu_blocking_is_gp(void) { + might_sleep(); /* Check for RCU read-side critical section. */ return num_online_cpus() == 1; } diff --git a/include/linux/sched.h b/include/linux/sched.h index 0657368bd78f..f58889b8a608 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1863,8 +1863,7 @@ extern void task_clear_jobctl_pending(struct task_struct *task, #ifdef CONFIG_PREEMPT_RCU #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ -#define RCU_READ_UNLOCK_BOOSTED (1 << 1) /* boosted while in RCU read-side. */ -#define RCU_READ_UNLOCK_NEED_QS (1 << 2) /* RCU core needs CPU response. */ +#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ static inline void rcu_copy_process(struct task_struct *p) { diff --git a/include/linux/srcu.h b/include/linux/srcu.h index e1b005918bbb..d3d5fa54f25e 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -99,15 +99,18 @@ long srcu_batches_completed(struct srcu_struct *sp); * power mode. This way we can notice an extended quiescent state to * other CPUs that started a grace period. Otherwise we would delay any * grace period as long as we run in the idle task. + * + * Similarly, we avoid claiming an SRCU read lock held if the current + * CPU is offline. */ static inline int srcu_read_lock_held(struct srcu_struct *sp) { - if (rcu_is_cpu_idle()) - return 0; - if (!debug_lockdep_rcu_enabled()) return 1; - + if (rcu_is_cpu_idle()) + return 0; + if (!rcu_lockdep_current_cpu_online()) + return 0; return lock_is_held(&sp->dep_map); } @@ -169,6 +172,8 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) int retval = __srcu_read_lock(sp); rcu_lock_acquire(&(sp)->dep_map); + rcu_lockdep_assert(!rcu_is_cpu_idle(), + "srcu_read_lock() used illegally while idle"); return retval; } @@ -182,6 +187,8 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp) { + rcu_lockdep_assert(!rcu_is_cpu_idle(), + "srcu_read_unlock() used illegally while idle"); rcu_lock_release(&(sp)->dep_map); __srcu_read_unlock(sp, idx); } |