summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-06-28 08:10:14 -0400
committerTejun Heo <tj@kernel.org>2014-06-28 08:10:14 -0400
commit2d7227828e1475c7b272e55bd70c4cec8eea219a (patch)
tree068171c424acc2390b1e6ecf514182c82cc5811d /include/linux
parent9a1049da9bd2cd83fe11d46433e603c193aa9c71 (diff)
percpu-refcount: implement percpu_ref_reinit() and percpu_ref_is_zero()
Now that explicit invocation of percpu_ref_exit() is necessary to free the percpu counter, we can implement percpu_ref_reinit() which reinitializes a released percpu_ref. This can be used implement scalable gating switch which can be drained and then re-opened without worrying about memory allocation failures. percpu_ref_is_zero() is added to be used in a sanity check in percpu_ref_exit(). As this function will be useful for other purposes too, make it a public interface. v2: Use smp_read_barrier_depends() instead of smp_load_acquire(). We only need data dep barrier and smp_load_acquire() is stronger and heavier on some archs. Spotted by Lai Jiangshan. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Kent Overstreet <kmo@daterainc.com> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/percpu-refcount.h19
1 files changed, 19 insertions, 0 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 0ddd2839ca84..3dfbf237cd8f 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -67,6 +67,7 @@ struct percpu_ref {
int __must_check percpu_ref_init(struct percpu_ref *ref,
percpu_ref_func_t *release);
+void percpu_ref_reinit(struct percpu_ref *ref);
void percpu_ref_exit(struct percpu_ref *ref);
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill);
@@ -99,6 +100,9 @@ static inline bool __pcpu_ref_alive(struct percpu_ref *ref,
{
unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr);
+ /* paired with smp_store_release() in percpu_ref_reinit() */
+ smp_read_barrier_depends();
+
if (unlikely(pcpu_ptr & PCPU_REF_DEAD))
return false;
@@ -206,4 +210,19 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
rcu_read_unlock_sched();
}
+/**
+ * percpu_ref_is_zero - test whether a percpu refcount reached zero
+ * @ref: percpu_ref to test
+ *
+ * Returns %true if @ref reached zero.
+ */
+static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
+{
+ unsigned __percpu *pcpu_count;
+
+ if (__pcpu_ref_alive(ref, &pcpu_count))
+ return false;
+ return !atomic_read(&ref->count);
+}
+
#endif