summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-05-13 16:35:16 -0400
committerTejun Heo <tj@kernel.org>2015-05-26 20:35:00 -0400
commit7d7efec368d537226142cbe559f45797f18672f9 (patch)
tree8b391dada2b97b66f9fbd4315b397516f13199de /include
parent8ab456ac3697dbd1d3eae5d5817dba941faf89ee (diff)
sched, cgroup: reorganize threadgroup locking
threadgroup_change_begin/end() are used to mark the beginning and end of threadgroup modifying operations to allow code paths which require a threadgroup to stay stable across blocking operations to synchronize against those sections using threadgroup_lock/unlock(). It's currently implemented as a general mechanism in sched.h using per-signal_struct rwsem; however, this never grew non-cgroup use cases and becomes noop if !CONFIG_CGROUPS. It turns out that cgroups is gonna be better served with a different sycnrhonization scheme and is a bit silly to keep cgroups specific details as a general mechanism. What's general here is identifying the places where threadgroups are modified. This patch restructures threadgroup locking so that threadgroup_change_begin/end() become a place where subsystems which need to sycnhronize against threadgroup changes can hook into. cgroup_threadgroup_change_begin/end() which operate on the per-signal_struct rwsem are created and threadgroup_lock/unlock() are moved to cgroup.c and made static. This is pure reorganization which doesn't cause any functional changes. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/cgroup-defs.h10
-rw-r--r--include/linux/sched.h53
2 files changed, 27 insertions, 36 deletions
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 55f3120fb952..1b8c93806dbd 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -14,6 +14,7 @@
#include <linux/mutex.h>
#include <linux/rcupdate.h>
#include <linux/percpu-refcount.h>
+#include <linux/percpu-rwsem.h>
#include <linux/workqueue.h>
#ifdef CONFIG_CGROUPS
@@ -460,5 +461,14 @@ struct cgroup_subsys {
unsigned int depends_on;
};
+void cgroup_threadgroup_change_begin(struct task_struct *tsk);
+void cgroup_threadgroup_change_end(struct task_struct *tsk);
+
+#else /* CONFIG_CGROUPS */
+
+static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {}
+static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
+
#endif /* CONFIG_CGROUPS */
+
#endif /* _LINUX_CGROUP_DEFS_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8222ae40ecb0..5ee290003470 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -58,6 +58,7 @@ struct sched_param {
#include <linux/uidgid.h>
#include <linux/gfp.h>
#include <linux/magic.h>
+#include <linux/cgroup-defs.h>
#include <asm/processor.h>
@@ -2648,53 +2649,33 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
}
-#ifdef CONFIG_CGROUPS
-static inline void threadgroup_change_begin(struct task_struct *tsk)
-{
- down_read(&tsk->signal->group_rwsem);
-}
-static inline void threadgroup_change_end(struct task_struct *tsk)
-{
- up_read(&tsk->signal->group_rwsem);
-}
-
/**
- * threadgroup_lock - lock threadgroup
- * @tsk: member task of the threadgroup to lock
- *
- * Lock the threadgroup @tsk belongs to. No new task is allowed to enter
- * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
- * change ->group_leader/pid. This is useful for cases where the threadgroup
- * needs to stay stable across blockable operations.
+ * threadgroup_change_begin - mark the beginning of changes to a threadgroup
+ * @tsk: task causing the changes
*
- * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
- * synchronization. While held, no new task will be added to threadgroup
- * and no existing live task will have its PF_EXITING set.
- *
- * de_thread() does threadgroup_change_{begin|end}() when a non-leader
- * sub-thread becomes a new leader.
+ * All operations which modify a threadgroup - a new thread joining the
+ * group, death of a member thread (the assertion of PF_EXITING) and
+ * exec(2) dethreading the process and replacing the leader - are wrapped
+ * by threadgroup_change_{begin|end}(). This is to provide a place which
+ * subsystems needing threadgroup stability can hook into for
+ * synchronization.
*/
-static inline void threadgroup_lock(struct task_struct *tsk)
+static inline void threadgroup_change_begin(struct task_struct *tsk)
{
- down_write(&tsk->signal->group_rwsem);
+ might_sleep();
+ cgroup_threadgroup_change_begin(tsk);
}
/**
- * threadgroup_unlock - unlock threadgroup
- * @tsk: member task of the threadgroup to unlock
+ * threadgroup_change_end - mark the end of changes to a threadgroup
+ * @tsk: task causing the changes
*
- * Reverse threadgroup_lock().
+ * See threadgroup_change_begin().
*/
-static inline void threadgroup_unlock(struct task_struct *tsk)
+static inline void threadgroup_change_end(struct task_struct *tsk)
{
- up_write(&tsk->signal->group_rwsem);
+ cgroup_threadgroup_change_end(tsk);
}
-#else
-static inline void threadgroup_change_begin(struct task_struct *tsk) {}
-static inline void threadgroup_change_end(struct task_struct *tsk) {}
-static inline void threadgroup_lock(struct task_struct *tsk) {}
-static inline void threadgroup_unlock(struct task_struct *tsk) {}
-#endif
#ifndef __HAVE_THREAD_FUNCTIONS