aboutsummaryrefslogtreecommitdiffstats
path: root/features/rt/signals-Allow-RT-tasks-to-cache-one-sigqueue-struct.patch
diff options
context:
space:
mode:
Diffstat (limited to 'features/rt/signals-Allow-RT-tasks-to-cache-one-sigqueue-struct.patch')
-rw-r--r--features/rt/signals-Allow-RT-tasks-to-cache-one-sigqueue-struct.patch216
1 files changed, 216 insertions, 0 deletions
diff --git a/features/rt/signals-Allow-RT-tasks-to-cache-one-sigqueue-struct.patch b/features/rt/signals-Allow-RT-tasks-to-cache-one-sigqueue-struct.patch
new file mode 100644
index 00000000..ae43826e
--- /dev/null
+++ b/features/rt/signals-Allow-RT-tasks-to-cache-one-sigqueue-struct.patch
@@ -0,0 +1,216 @@
+From 7f989d7d619da9e6ad512bc325d36b0055fca72c Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 3 Jul 2009 08:44:56 -0500
+Subject: [PATCH 188/191] signals: Allow RT tasks to cache one sigqueue struct
+
+Allow realtime tasks to cache one sigqueue in task struct. This avoids an
+allocation which can cause latencies or fail.
+Ideally the sigqueue is cached after first sucessfull delivery and will be
+available for next signal delivery. This works under the assumption that the RT
+task has never an unprocessed singal while one is about to be queued.
+The caching is not used for SIGQUEUE_PREALLOC because this kind of sigqueue is
+handled differently (and not used for regular signal delivery).
+
+[bigeasy: With a fix from Matt Fleming <matt@codeblueprint.co.uk>]
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/sched.h | 1 +
+ include/linux/signal.h | 1 +
+ kernel/exit.c | 2 +-
+ kernel/fork.c | 1 +
+ kernel/signal.c | 67 +++++++++++++++++++++++++++++++++++++++---
+ 5 files changed, 67 insertions(+), 5 deletions(-)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 8e566b8049aa..635e08a5aa71 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -988,6 +988,7 @@ struct task_struct {
+ /* Signal handlers: */
+ struct signal_struct *signal;
+ struct sighand_struct __rcu *sighand;
++ struct sigqueue *sigqueue_cache;
+ sigset_t blocked;
+ sigset_t real_blocked;
+ /* Restored if set_restore_sigmask() was used: */
+diff --git a/include/linux/signal.h b/include/linux/signal.h
+index 205526c4003a..d47a86790edc 100644
+--- a/include/linux/signal.h
++++ b/include/linux/signal.h
+@@ -265,6 +265,7 @@ static inline void init_sigpending(struct sigpending *sig)
+ }
+
+ extern void flush_sigqueue(struct sigpending *queue);
++extern void flush_task_sigqueue(struct task_struct *tsk);
+
+ /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
+ static inline int valid_signal(unsigned long sig)
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 04029e35e69a..346f7b76ceca 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -152,7 +152,7 @@ static void __exit_signal(struct task_struct *tsk)
+ * Do this under ->siglock, we can race with another thread
+ * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
+ */
+- flush_sigqueue(&tsk->pending);
++ flush_task_sigqueue(tsk);
+ tsk->sighand = NULL;
+ spin_unlock(&sighand->siglock);
+
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 5fdb0a1bbad8..146a019df72a 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -2027,6 +2027,7 @@ static __latent_entropy struct task_struct *copy_process(
+ spin_lock_init(&p->alloc_lock);
+
+ init_sigpending(&p->pending);
++ p->sigqueue_cache = NULL;
+
+ p->utime = p->stime = p->gtime = 0;
+ #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
+diff --git a/kernel/signal.c b/kernel/signal.c
+index a5d16084f54c..5baac8c5d2e9 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -20,6 +20,7 @@
+ #include <linux/sched/task.h>
+ #include <linux/sched/task_stack.h>
+ #include <linux/sched/cputime.h>
++#include <linux/sched/rt.h>
+ #include <linux/file.h>
+ #include <linux/fs.h>
+ #include <linux/proc_fs.h>
+@@ -404,13 +405,30 @@ void task_join_group_stop(struct task_struct *task)
+ task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
+ }
+
++static struct sigqueue *sigqueue_from_cache(struct task_struct *t)
++{
++ struct sigqueue *q = t->sigqueue_cache;
++
++ if (q && cmpxchg(&t->sigqueue_cache, q, NULL) == q)
++ return q;
++ return NULL;
++}
++
++static bool sigqueue_add_cache(struct task_struct *t, struct sigqueue *q)
++{
++ if (!t->sigqueue_cache && cmpxchg(&t->sigqueue_cache, NULL, q) == NULL)
++ return true;
++ return false;
++}
++
+ /*
+ * allocate a new signal queue record
+ * - this may be called without locks if and only if t == current, otherwise an
+ * appropriate lock must be held to stop the target task from exiting
+ */
+ static struct sigqueue *
+-__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
++__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
++ int override_rlimit, bool fromslab)
+ {
+ struct sigqueue *q = NULL;
+ struct user_struct *user;
+@@ -432,7 +450,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
+ rcu_read_unlock();
+
+ if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
+- q = kmem_cache_alloc(sigqueue_cachep, flags);
++ if (!fromslab)
++ q = sigqueue_from_cache(t);
++ if (!q)
++ q = kmem_cache_alloc(sigqueue_cachep, flags);
+ } else {
+ print_dropped_signal(sig);
+ }
+@@ -449,6 +470,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
+ return q;
+ }
+
++static struct sigqueue *
++__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags,
++ int override_rlimit)
++{
++ return __sigqueue_do_alloc(sig, t, flags, override_rlimit, false);
++}
++
+ static void __sigqueue_free(struct sigqueue *q)
+ {
+ if (q->flags & SIGQUEUE_PREALLOC)
+@@ -458,6 +486,20 @@ static void __sigqueue_free(struct sigqueue *q)
+ kmem_cache_free(sigqueue_cachep, q);
+ }
+
++static void __sigqueue_cache_or_free(struct sigqueue *q)
++{
++ struct user_struct *up;
++
++ if (q->flags & SIGQUEUE_PREALLOC)
++ return;
++
++ up = q->user;
++ if (atomic_dec_and_test(&up->sigpending))
++ free_uid(up);
++ if (!task_is_realtime(current) || !sigqueue_add_cache(current, q))
++ kmem_cache_free(sigqueue_cachep, q);
++}
++
+ void flush_sigqueue(struct sigpending *queue)
+ {
+ struct sigqueue *q;
+@@ -470,6 +512,21 @@ void flush_sigqueue(struct sigpending *queue)
+ }
+ }
+
++/*
++ * Called from __exit_signal. Flush tsk->pending and
++ * tsk->sigqueue_cache
++ */
++void flush_task_sigqueue(struct task_struct *tsk)
++{
++ struct sigqueue *q;
++
++ flush_sigqueue(&tsk->pending);
++
++ q = sigqueue_from_cache(tsk);
++ if (q)
++ kmem_cache_free(sigqueue_cachep, q);
++}
++
+ /*
+ * Flush all pending signals for this kthread.
+ */
+@@ -594,7 +651,7 @@ static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *i
+ (info->si_code == SI_TIMER) &&
+ (info->si_sys_private);
+
+- __sigqueue_free(first);
++ __sigqueue_cache_or_free(first);
+ } else {
+ /*
+ * Ok, it wasn't in the queue. This must be
+@@ -631,6 +688,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *in
+ bool resched_timer = false;
+ int signr;
+
++ WARN_ON_ONCE(tsk != current);
++
+ /* We only dequeue private signals from ourselves, we don't let
+ * signalfd steal them
+ */
+@@ -1835,7 +1894,7 @@ EXPORT_SYMBOL(kill_pid);
+ */
+ struct sigqueue *sigqueue_alloc(void)
+ {
+- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
++ struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, true);
+
+ if (q)
+ q->flags |= SIGQUEUE_PREALLOC;
+--
+2.19.1
+