summaryrefslogtreecommitdiffstats
path: root/kernel/signal.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/signal.c')
-rw-r--r--kernel/signal.c59
1 files changed, 2 insertions, 57 deletions
diff --git a/kernel/signal.c b/kernel/signal.c
index f1ecd8f0c11d..30a0bee5ff9b 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -431,22 +431,7 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
rcu_read_unlock();
if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
- /*
- * Preallocation does not hold sighand::siglock so it can't
- * use the cache. The lockless caching requires that only
- * one consumer and only one producer run at a time.
- *
- * For the regular allocation case it is sufficient to
- * check @q for NULL because this code can only be called
- * if the target task @t has not been reaped yet; which
- * means this code can never observe the error pointer which is
- * written to @t->sigqueue_cache in exit_task_sigqueue_cache().
- */
- q = READ_ONCE(t->sigqueue_cache);
- if (!q || sigqueue_flags)
- q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
- else
- WRITE_ONCE(t->sigqueue_cache, NULL);
+ q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
} else {
print_dropped_signal(sig);
}
@@ -463,53 +448,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
return q;
}
-void exit_task_sigqueue_cache(struct task_struct *tsk)
-{
- /* Race free because @tsk is mopped up */
- struct sigqueue *q = tsk->sigqueue_cache;
-
- if (q) {
- /*
- * Hand it back to the cache as the task might
- * be self reaping which would leak the object.
- */
- kmem_cache_free(sigqueue_cachep, q);
- }
-
- /*
- * Set an error pointer to ensure that @tsk will not cache a
- * sigqueue when it is reaping it's child tasks
- */
- tsk->sigqueue_cache = ERR_PTR(-1);
-}
-
-static void sigqueue_cache_or_free(struct sigqueue *q)
-{
- /*
- * Cache one sigqueue per task. This pairs with the consumer side
- * in __sigqueue_alloc() and needs READ/WRITE_ONCE() to prevent the
- * compiler from store tearing and to tell KCSAN that the data race
- * is intentional when run without holding current->sighand->siglock,
- * which is fine as current obviously cannot run __sigqueue_free()
- * concurrently.
- *
- * The NULL check is safe even if current has been reaped already,
- * in which case exit_task_sigqueue_cache() wrote an error pointer
- * into current->sigqueue_cache.
- */
- if (!READ_ONCE(current->sigqueue_cache))
- WRITE_ONCE(current->sigqueue_cache, q);
- else
- kmem_cache_free(sigqueue_cachep, q);
-}
-
static void __sigqueue_free(struct sigqueue *q)
{
if (q->flags & SIGQUEUE_PREALLOC)
return;
if (atomic_dec_and_test(&q->user->sigpending))
free_uid(q->user);
- sigqueue_cache_or_free(q);
+ kmem_cache_free(sigqueue_cachep, q);
}
void flush_sigqueue(struct sigpending *queue)