summaryrefslogtreecommitdiff
path: root/kernel/signal.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/signal.c')
-rw-r--r--kernel/signal.c59
1 files changed, 48 insertions, 11 deletions
diff --git a/kernel/signal.c b/kernel/signal.c
index dca53515ae3f..f7c6ffcbd044 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -408,7 +408,8 @@ void task_join_group_stop(struct task_struct *task)
* appropriate lock must be held to stop the target task from exiting
*/
static struct sigqueue *
-__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
+__sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
+ int override_rlimit, const unsigned int sigqueue_flags)
{
struct sigqueue *q = NULL;
struct user_struct *user;
@@ -430,7 +431,16 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
rcu_read_unlock();
if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
- q = kmem_cache_alloc(sigqueue_cachep, flags);
+ /*
+ * Preallocation does not hold sighand::siglock so it can't
+ * use the cache. The lockless caching requires that only
+ * one consumer and only one producer run at a time.
+ */
+ q = READ_ONCE(t->sigqueue_cache);
+ if (!q || sigqueue_flags)
+ q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
+ else
+ WRITE_ONCE(t->sigqueue_cache, NULL);
} else {
print_dropped_signal(sig);
}
@@ -440,20 +450,51 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
free_uid(user);
} else {
INIT_LIST_HEAD(&q->list);
- q->flags = 0;
+ q->flags = sigqueue_flags;
q->user = user;
}
return q;
}
+void exit_task_sigqueue_cache(struct task_struct *tsk)
+{
+ /* Race free because @tsk is mopped up */
+ struct sigqueue *q = tsk->sigqueue_cache;
+
+ if (q) {
+ tsk->sigqueue_cache = NULL;
+ /*
+ * Hand it back to the cache as the task might
+ * be self reaping which would leak the object.
+ */
+ kmem_cache_free(sigqueue_cachep, q);
+ }
+}
+
+static void sigqueue_cache_or_free(struct sigqueue *q)
+{
+ /*
+ * Cache one sigqueue per task. This pairs with the consumer side
+ * in __sigqueue_alloc() and needs READ/WRITE_ONCE() to prevent the
+ * compiler from store tearing and to tell KCSAN that the data race
+ * is intentional when run without holding current->sighand->siglock,
+ * which is fine as current obviously cannot run __sigqueue_free()
+ * concurrently.
+ */
+ if (!READ_ONCE(current->sigqueue_cache))
+ WRITE_ONCE(current->sigqueue_cache, q);
+ else
+ kmem_cache_free(sigqueue_cachep, q);
+}
+
static void __sigqueue_free(struct sigqueue *q)
{
if (q->flags & SIGQUEUE_PREALLOC)
return;
if (atomic_dec_and_test(&q->user->sigpending))
free_uid(q->user);
- kmem_cache_free(sigqueue_cachep, q);
+ sigqueue_cache_or_free(q);
}
void flush_sigqueue(struct sigpending *queue)
@@ -1111,7 +1152,8 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc
else
override_rlimit = 0;
- q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
+ q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
+
if (q) {
list_add_tail(&q->list, &pending->list);
switch ((unsigned long) info) {
@@ -1822,12 +1864,7 @@ EXPORT_SYMBOL(kill_pid);
*/
struct sigqueue *sigqueue_alloc(void)
{
- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
-
- if (q)
- q->flags |= SIGQUEUE_PREALLOC;
-
- return q;
+ return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
}
void sigqueue_free(struct sigqueue *q)