diff options
Diffstat (limited to 'features/rt/arch-arm64-Add-lazy-preempt-support.patch')
-rw-r--r-- | features/rt/arch-arm64-Add-lazy-preempt-support.patch | 170 |
1 files changed, 170 insertions, 0 deletions
diff --git a/features/rt/arch-arm64-Add-lazy-preempt-support.patch b/features/rt/arch-arm64-Add-lazy-preempt-support.patch new file mode 100644 index 00000000..cc7a56d7 --- /dev/null +++ b/features/rt/arch-arm64-Add-lazy-preempt-support.patch @@ -0,0 +1,170 @@ +From f4aa6daf2286c2e06200d7909bb812c7ceb09e69 Mon Sep 17 00:00:00 2001 +From: Anders Roxell <anders.roxell@linaro.org> +Date: Thu, 14 May 2015 17:52:17 +0200 +Subject: [PATCH 168/191] arch/arm64: Add lazy preempt support + +arm64 is missing support for PREEMPT_RT. The main feature which is +lacking is support for lazy preemption. The arch-specific entry code, +thread information structure definitions, and associated data tables +have to be extended to provide this support. Then the Kconfig file has +to be extended to indicate the support is available, and also to +indicate that support for full RT preemption is now available. + +Signed-off-by: Anders Roxell <anders.roxell@linaro.org> +--- + arch/arm64/Kconfig | 1 + + arch/arm64/include/asm/preempt.h | 25 ++++++++++++++++++++++++- + arch/arm64/include/asm/thread_info.h | 8 +++++++- + arch/arm64/kernel/asm-offsets.c | 1 + + arch/arm64/kernel/entry.S | 13 +++++++++++-- + arch/arm64/kernel/signal.c | 2 +- + 6 files changed, 45 insertions(+), 5 deletions(-) + +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig +index 5656e7aacd69..93a4dd767825 100644 +--- a/arch/arm64/Kconfig ++++ b/arch/arm64/Kconfig +@@ -182,6 +182,7 @@ config ARM64 + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP + select HAVE_REGS_AND_STACK_ACCESS_API ++ select HAVE_PREEMPT_LAZY + select HAVE_FUNCTION_ARG_ACCESS_API + select HAVE_FUTEX_CMPXCHG if FUTEX + select MMU_GATHER_RCU_TABLE_FREE +diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h +index f06a23898540..994f997b1572 100644 +--- a/arch/arm64/include/asm/preempt.h ++++ b/arch/arm64/include/asm/preempt.h +@@ -70,13 +70,36 @@ static inline bool __preempt_count_dec_and_test(void) + * interrupt occurring between the non-atomic READ_ONCE/WRITE_ONCE + * pair. + */ +- return !pc || !READ_ONCE(ti->preempt_count); ++ if (!pc || !READ_ONCE(ti->preempt_count)) ++ return true; ++#ifdef CONFIG_PREEMPT_LAZY ++ if ((pc & ~PREEMPT_NEED_RESCHED)) ++ return false; ++ if (current_thread_info()->preempt_lazy_count) ++ return false; ++ return test_thread_flag(TIF_NEED_RESCHED_LAZY); ++#else ++ return false; ++#endif + } + + static inline bool should_resched(int preempt_offset) + { ++#ifdef CONFIG_PREEMPT_LAZY ++ u64 pc = READ_ONCE(current_thread_info()->preempt_count); ++ if (pc == preempt_offset) ++ return true; ++ ++ if ((pc & ~PREEMPT_NEED_RESCHED) != preempt_offset) ++ return false; ++ ++ if (current_thread_info()->preempt_lazy_count) ++ return false; ++ return test_thread_flag(TIF_NEED_RESCHED_LAZY); ++#else + u64 pc = READ_ONCE(current_thread_info()->preempt_count); + return pc == preempt_offset; ++#endif + } + + #ifdef CONFIG_PREEMPTION +diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h +index 9f4e3b266f21..d3fa570c7235 100644 +--- a/arch/arm64/include/asm/thread_info.h ++++ b/arch/arm64/include/asm/thread_info.h +@@ -26,6 +26,7 @@ struct thread_info { + #ifdef CONFIG_ARM64_SW_TTBR0_PAN + u64 ttbr0; /* saved TTBR0_EL1 */ + #endif ++ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ + union { + u64 preempt_count; /* 0 => preemptible, <0 => bug */ + struct { +@@ -65,6 +66,7 @@ void arch_release_task_struct(struct task_struct *tsk); + #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */ + #define TIF_MTE_ASYNC_FAULT 5 /* MTE Asynchronous Tag Check Fault */ + #define TIF_NOTIFY_SIGNAL 6 /* signal notifications exist */ ++#define TIF_NEED_RESCHED_LAZY 7 + #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ + #define TIF_SYSCALL_AUDIT 9 /* syscall auditing */ + #define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */ +@@ -95,8 +97,10 @@ void arch_release_task_struct(struct task_struct *tsk); + #define _TIF_SVE (1 << TIF_SVE) + #define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT) + #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) ++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) + +-#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ ++#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \ ++ _TIF_SIGPENDING | \ + _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ + _TIF_UPROBE | _TIF_MTE_ASYNC_FAULT | \ + _TIF_NOTIFY_SIGNAL) +@@ -105,6 +109,8 @@ void arch_release_task_struct(struct task_struct *tsk); + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ + _TIF_SYSCALL_EMU) + ++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) ++ + #ifdef CONFIG_SHADOW_CALL_STACK + #define INIT_SCS \ + .scs_base = init_shadow_call_stack, \ +diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c +index a36e2fc330d4..b94354a3af96 100644 +--- a/arch/arm64/kernel/asm-offsets.c ++++ b/arch/arm64/kernel/asm-offsets.c +@@ -30,6 +30,7 @@ int main(void) + BLANK(); + DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags)); + DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count)); ++ DEFINE(TSK_TI_PREEMPT_LAZY, offsetof(struct task_struct, thread_info.preempt_lazy_count)); + #ifdef CONFIG_ARM64_SW_TTBR0_PAN + DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0)); + #endif +diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S +index a31a0a713c85..63e1ad8c2ea8 100644 +--- a/arch/arm64/kernel/entry.S ++++ b/arch/arm64/kernel/entry.S +@@ -678,9 +678,18 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING + mrs x0, daif + orr x24, x24, x0 + alternative_else_nop_endif +- cbnz x24, 1f // preempt count != 0 || NMI return path +- bl arm64_preempt_schedule_irq // irq en/disable is done inside ++ ++ cbz x24, 1f // (need_resched + count) == 0 ++ cbnz w24, 2f // count != 0 ++ ++ ldr w24, [tsk, #TSK_TI_PREEMPT_LAZY] // get preempt lazy count ++ cbnz w24, 2f // preempt lazy count != 0 ++ ++ ldr x0, [tsk, #TSK_TI_FLAGS] // get flags ++ tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling? + 1: ++ bl arm64_preempt_schedule_irq // irq en/disable is done inside ++2: + #endif + + mov x0, sp +diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c +index 6237486ff6bb..ab411f336c39 100644 +--- a/arch/arm64/kernel/signal.c ++++ b/arch/arm64/kernel/signal.c +@@ -915,7 +915,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, + unsigned long thread_flags) + { + do { +- if (thread_flags & _TIF_NEED_RESCHED) { ++ if (thread_flags & _TIF_NEED_RESCHED_MASK) { + /* Unmask Debug and SError for the next task */ + local_daif_restore(DAIF_PROCCTX_NOIRQ); + +-- +2.19.1 + |