aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h207
1 files changed, 194 insertions, 13 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 57cacb2b62c7..0b762e939885 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -26,6 +26,7 @@ struct sched_param {
#include <linux/nodemask.h>
#include <linux/mm_types.h>
#include <linux/preempt.h>
+#include <asm/kmap_types.h>
#include <asm/page.h>
#include <asm/ptrace.h>
@@ -243,10 +244,7 @@ extern char ___assert_task_state[1 - 2*!!(
TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
__TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
-#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
-#define task_is_stopped_or_traced(task) \
- ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
#define task_contributes_to_load(task) \
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0 && \
@@ -312,6 +310,11 @@ extern char ___assert_task_state[1 - 2*!!(
#endif
+#define __set_current_state_no_track(state_value) \
+ do { current->state = (state_value); } while (0)
+#define set_current_state_no_track(state_value) \
+ set_mb(current->state, (state_value))
+
/* Task command name length */
#define TASK_COMM_LEN 16
@@ -1009,8 +1012,18 @@ struct wake_q_head {
struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
extern void wake_q_add(struct wake_q_head *head,
- struct task_struct *task);
-extern void wake_up_q(struct wake_q_head *head);
+ struct task_struct *task);
+extern void __wake_up_q(struct wake_q_head *head, bool sleeper);
+
+static inline void wake_up_q(struct wake_q_head *head)
+{
+ __wake_up_q(head, false);
+}
+
+static inline void wake_up_q_sleeper(struct wake_q_head *head)
+{
+ __wake_up_q(head, true);
+}
/*
* sched-domains (multiprocessor balancing) declarations:
@@ -1466,6 +1479,7 @@ struct task_struct {
struct thread_info thread_info;
#endif
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
+ volatile long saved_state; /* saved state for "spinlock sleepers" */
void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
@@ -1505,6 +1519,12 @@ struct task_struct {
#endif
unsigned int policy;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ int migrate_disable;
+# ifdef CONFIG_SCHED_DEBUG
+ int migrate_disable_atomic;
+# endif
+#endif
int nr_cpus_allowed;
cpumask_t cpus_allowed;
@@ -1643,6 +1663,9 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
+#ifdef CONFIG_PREEMPT_RT_BASE
+ struct task_struct *posix_timer_list;
+#endif
/* process credentials */
const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
@@ -1674,10 +1697,15 @@ struct task_struct {
/* signal handlers */
struct signal_struct *signal;
struct sighand_struct *sighand;
+ struct sigqueue *sigqueue_cache;
sigset_t blocked, real_blocked;
sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
struct sigpending pending;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ /* TODO: move me into ->restart_block ? */
+ struct siginfo forced_info;
+#endif
unsigned long sas_ss_sp;
size_t sas_ss_size;
@@ -1906,6 +1934,12 @@ struct task_struct {
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
+ u64 preempt_timestamp_hist;
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+ long timer_offset;
+#endif
+#endif
#ifdef CONFIG_KCOV
/* Coverage collection mode enabled for this task (0 if disabled). */
enum kcov_mode kcov_mode;
@@ -1931,9 +1965,23 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
+#ifdef CONFIG_PREEMPT_RT_BASE
+ struct rcu_head put_rcu;
+ int softirq_nestcnt;
+ unsigned int softirqs_raised;
+#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32
+ int kmap_idx;
+ pte_t kmap_pte[KM_TYPE_NR];
+# endif
+#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+ int xmit_recursion;
+#endif
int pagefault_disabled;
#ifdef CONFIG_MMU
struct task_struct *oom_reaper_list;
@@ -1969,14 +2017,6 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
}
#endif
-/* Future-safe accessor for struct task_struct's cpus_allowed. */
-#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
-
-static inline int tsk_nr_cpus_allowed(struct task_struct *p)
-{
- return p->nr_cpus_allowed;
-}
-
#define TNF_MIGRATED 0x01
#define TNF_NO_GROUP 0x02
#define TNF_SHARED 0x04
@@ -2192,6 +2232,15 @@ extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
+#ifdef CONFIG_PREEMPT_RT_BASE
+extern void __put_task_struct_cb(struct rcu_head *rhp);
+
+static inline void put_task_struct(struct task_struct *t)
+{
+ if (atomic_dec_and_test(&t->usage))
+ call_rcu(&t->put_rcu, __put_task_struct_cb);
+}
+#else
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
@@ -2199,6 +2248,7 @@ static inline void put_task_struct(struct task_struct *t)
if (atomic_dec_and_test(&t->usage))
__put_task_struct(t);
}
+#endif
struct task_struct *task_rcu_dereference(struct task_struct **ptask);
struct task_struct *try_get_task_struct(struct task_struct **ptask);
@@ -2240,6 +2290,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
/*
* Per process flags
*/
+#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
@@ -2408,6 +2459,10 @@ extern void do_set_cpus_allowed(struct task_struct *p,
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
+int migrate_me(void);
+void tell_sched_cpu_down_begin(int cpu);
+void tell_sched_cpu_down_done(int cpu);
+
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
@@ -2420,6 +2475,9 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
return -EINVAL;
return 0;
}
+static inline int migrate_me(void) { return 0; }
+static inline void tell_sched_cpu_down_begin(int cpu) { }
+static inline void tell_sched_cpu_down_done(int cpu) { }
#endif
#ifdef CONFIG_NO_HZ_COMMON
@@ -2656,6 +2714,7 @@ extern void xtime_update(unsigned long ticks);
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
+extern int wake_up_lock_sleeper(struct task_struct * tsk);
extern void wake_up_new_task(struct task_struct *tsk);
#ifdef CONFIG_SMP
extern void kick_process(struct task_struct *tsk);
@@ -2864,6 +2923,17 @@ static inline void mmdrop(struct mm_struct *mm)
__mmdrop(mm);
}
+#ifdef CONFIG_PREEMPT_RT_BASE
+extern void __mmdrop_delayed(struct rcu_head *rhp);
+static inline void mmdrop_delayed(struct mm_struct *mm)
+{
+ if (atomic_dec_and_test(&mm->mm_count))
+ call_rcu(&mm->delayed_drop, __mmdrop_delayed);
+}
+#else
+# define mmdrop_delayed(mm) mmdrop(mm)
+#endif
+
static inline bool mmget_not_zero(struct mm_struct *mm)
{
return atomic_inc_not_zero(&mm->mm_users);
@@ -3232,6 +3302,43 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
+#ifdef CONFIG_PREEMPT_LAZY
+static inline void set_tsk_need_resched_lazy(struct task_struct *tsk)
+{
+ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
+}
+
+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk)
+{
+ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
+}
+
+static inline int test_tsk_need_resched_lazy(struct task_struct *tsk)
+{
+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY));
+}
+
+static inline int need_resched_lazy(void)
+{
+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
+}
+
+static inline int need_resched_now(void)
+{
+ return test_thread_flag(TIF_NEED_RESCHED);
+}
+
+#else
+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { }
+static inline int need_resched_lazy(void) { return 0; }
+
+static inline int need_resched_now(void)
+{
+ return test_thread_flag(TIF_NEED_RESCHED);
+}
+
+#endif
+
static inline int restart_syscall(void)
{
set_tsk_thread_flag(current, TIF_SIGPENDING);
@@ -3263,6 +3370,51 @@ static inline int signal_pending_state(long state, struct task_struct *p)
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}
+static inline bool __task_is_stopped_or_traced(struct task_struct *task)
+{
+ if (task->state & (__TASK_STOPPED | __TASK_TRACED))
+ return true;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED))
+ return true;
+#endif
+ return false;
+}
+
+static inline bool task_is_stopped_or_traced(struct task_struct *task)
+{
+ bool traced_stopped;
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
+ traced_stopped = __task_is_stopped_or_traced(task);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+#else
+ traced_stopped = __task_is_stopped_or_traced(task);
+#endif
+ return traced_stopped;
+}
+
+static inline bool task_is_traced(struct task_struct *task)
+{
+ bool traced = false;
+
+ if (task->state & __TASK_TRACED)
+ return true;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ /* in case the task is sleeping on tasklist_lock */
+ raw_spin_lock_irq(&task->pi_lock);
+ if (task->state & __TASK_TRACED)
+ traced = true;
+ else if (task->saved_state & __TASK_TRACED)
+ traced = true;
+ raw_spin_unlock_irq(&task->pi_lock);
+#endif
+ return traced;
+}
+
/*
* cond_resched() and cond_resched_lock(): latency reduction via
* explicit rescheduling in places that are safe. The return
@@ -3284,12 +3436,16 @@ extern int __cond_resched_lock(spinlock_t *lock);
__cond_resched_lock(lock); \
})
+#ifndef CONFIG_PREEMPT_RT_FULL
extern int __cond_resched_softirq(void);
#define cond_resched_softirq() ({ \
___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
__cond_resched_softirq(); \
})
+#else
+# define cond_resched_softirq() cond_resched()
+#endif
static inline void cond_resched_rcu(void)
{
@@ -3455,6 +3611,31 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
+static inline int __migrate_disabled(struct task_struct *p)
+{
+#ifdef CONFIG_PREEMPT_RT_FULL
+ return p->migrate_disable;
+#else
+ return 0;
+#endif
+}
+
+/* Future-safe accessor for struct task_struct's cpus_allowed. */
+static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
+{
+ if (__migrate_disabled(p))
+ return cpumask_of(task_cpu(p));
+
+ return &p->cpus_allowed;
+}
+
+static inline int tsk_nr_cpus_allowed(struct task_struct *p)
+{
+ if (__migrate_disabled(p))
+ return 1;
+ return p->nr_cpus_allowed;
+}
+
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);