diff options
Diffstat (limited to 'features/rt/powerpc-Add-support-for-lazy-preemption.patch')
-rw-r--r-- | features/rt/powerpc-Add-support-for-lazy-preemption.patch | 215 |
1 files changed, 215 insertions, 0 deletions
diff --git a/features/rt/powerpc-Add-support-for-lazy-preemption.patch b/features/rt/powerpc-Add-support-for-lazy-preemption.patch new file mode 100644 index 00000000..4cc73824 --- /dev/null +++ b/features/rt/powerpc-Add-support-for-lazy-preemption.patch @@ -0,0 +1,215 @@ +From 2525cb41cb38be3c1639aed5deb00823f5a472f4 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Thu, 1 Nov 2012 10:14:11 +0100 +Subject: [PATCH 167/191] powerpc: Add support for lazy preemption + +Implement the powerpc pieces for lazy preempt. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +--- + arch/powerpc/Kconfig | 1 + + arch/powerpc/include/asm/thread_info.h | 7 +++++++ + arch/powerpc/kernel/asm-offsets.c | 1 + + arch/powerpc/kernel/entry_32.S | 11 +++++++++-- + arch/powerpc/kernel/exceptions-64e.S | 16 ++++++++++++---- + arch/powerpc/kernel/interrupt.c | 10 +++++++--- + 6 files changed, 37 insertions(+), 9 deletions(-) + +diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig +index 386ae12d8523..bbee9b2f2bc7 100644 +--- a/arch/powerpc/Kconfig ++++ b/arch/powerpc/Kconfig +@@ -231,6 +231,7 @@ config PPC + select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP ++ select HAVE_PREEMPT_LAZY + select MMU_GATHER_RCU_TABLE_FREE + select MMU_GATHER_PAGE_SIZE + select HAVE_REGS_AND_STACK_ACCESS_API +diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h +index 386d576673a1..730ce15944ce 100644 +--- a/arch/powerpc/include/asm/thread_info.h ++++ b/arch/powerpc/include/asm/thread_info.h +@@ -48,6 +48,8 @@ + struct thread_info { + int preempt_count; /* 0 => preemptable, + <0 => BUG */ ++ int preempt_lazy_count; /* 0 => preemptable, ++ <0 => BUG */ + unsigned long local_flags; /* private flags for thread */ + #ifdef CONFIG_LIVEPATCH + unsigned long *livepatch_sp; +@@ -94,6 +96,7 @@ void arch_setup_new_exec(void); + #define TIF_PATCH_PENDING 6 /* pending live patching update */ + #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ + #define TIF_SINGLESTEP 8 /* singlestepping active */ ++#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */ + #define TIF_SECCOMP 10 /* secure computing */ + #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ + #define TIF_NOERROR 12 /* Force successful syscall return */ +@@ -109,6 +112,7 @@ void arch_setup_new_exec(void); + #define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */ + #define TIF_32BIT 20 /* 32 bit binary */ + ++ + /* as above, but as bit values */ + #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) + #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) +@@ -120,6 +124,7 @@ void arch_setup_new_exec(void); + #define _TIF_PATCH_PENDING (1<<TIF_PATCH_PENDING) + #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) + #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) ++#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY) + #define _TIF_SECCOMP (1<<TIF_SECCOMP) + #define _TIF_RESTOREALL (1<<TIF_RESTOREALL) + #define _TIF_NOERROR (1<<TIF_NOERROR) +@@ -133,10 +138,12 @@ void arch_setup_new_exec(void); + _TIF_SYSCALL_EMU) + + #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ ++ _TIF_NEED_RESCHED_LAZY | \ + _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ + _TIF_RESTORE_TM | _TIF_PATCH_PENDING | \ + _TIF_NOTIFY_SIGNAL) + #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) ++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) + + /* Bits in local_flags */ + /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */ +diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c +index f3a662201a9f..1202c9c2e5b5 100644 +--- a/arch/powerpc/kernel/asm-offsets.c ++++ b/arch/powerpc/kernel/asm-offsets.c +@@ -191,6 +191,7 @@ int main(void) + OFFSET(TI_FLAGS, thread_info, flags); + OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags); + OFFSET(TI_PREEMPT, thread_info, preempt_count); ++ OFFSET(TI_PREEMPT_LAZY, thread_info, preempt_lazy_count); + + #ifdef CONFIG_PPC64 + OFFSET(DCACHEL1BLOCKSIZE, ppc64_caches, l1d.block_size); +diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S +index 78c430b7f9d9..b778938b4a5b 100644 +--- a/arch/powerpc/kernel/entry_32.S ++++ b/arch/powerpc/kernel/entry_32.S +@@ -674,7 +674,14 @@ resume_kernel: + cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ + bne restore_kuap + andi. r8,r8,_TIF_NEED_RESCHED ++ bne+ 1f ++ lwz r0,TI_PREEMPT_LAZY(r2) ++ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ ++ bne restore_kuap ++ lwz r0,TI_FLAGS(r2) ++ andi. r0,r0,_TIF_NEED_RESCHED_LAZY + beq+ restore_kuap ++1: + lwz r3,_MSR(r1) + andi. r0,r3,MSR_EE /* interrupts off? */ + beq restore_kuap /* don't schedule if so */ +@@ -989,7 +996,7 @@ global_dbcr0: + #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ + + do_work: /* r10 contains MSR_KERNEL here */ +- andi. r0,r9,_TIF_NEED_RESCHED ++ andi. r0,r9,_TIF_NEED_RESCHED_MASK + beq do_user_signal + + do_resched: /* r10 contains MSR_KERNEL here */ +@@ -1008,7 +1015,7 @@ recheck: + LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) + mtmsr r10 /* disable interrupts */ + lwz r9,TI_FLAGS(r2) +- andi. r0,r9,_TIF_NEED_RESCHED ++ andi. r0,r9,_TIF_NEED_RESCHED_MASK + bne- do_resched + andi. r0,r9,_TIF_USER_WORK_MASK + beq restore_user +diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S +index e8eb9992a270..6eb9599a3262 100644 +--- a/arch/powerpc/kernel/exceptions-64e.S ++++ b/arch/powerpc/kernel/exceptions-64e.S +@@ -1074,7 +1074,7 @@ _GLOBAL(ret_from_except_lite) + li r10, -1 + mtspr SPRN_DBSR,r10 + b restore +-1: andi. r0,r4,_TIF_NEED_RESCHED ++1: andi. r0,r4,_TIF_NEED_RESCHED_MASK + beq 2f + bl restore_interrupts + SCHEDULE_USER +@@ -1126,12 +1126,20 @@ resume_kernel: + bne- 0b + 1: + +-#ifdef CONFIG_PREEMPT ++#ifdef CONFIG_PREEMPTION + /* Check if we need to preempt */ ++ lwz r8,TI_PREEMPT(r9) ++ cmpwi 0,r8,0 /* if non-zero, just restore regs and return */ ++ bne restore + andi. r0,r4,_TIF_NEED_RESCHED ++ bne+ check_count ++ ++ andi. r0,r4,_TIF_NEED_RESCHED_LAZY + beq+ restore ++ lwz r8,TI_PREEMPT_LAZY(r9) ++ + /* Check that preempt_count() == 0 and interrupts are enabled */ +- lwz r8,TI_PREEMPT(r9) ++check_count: + cmpwi cr0,r8,0 + bne restore + ld r0,SOFTE(r1) +@@ -1152,7 +1160,7 @@ resume_kernel: + * interrupted after loading SRR0/1. + */ + wrteei 0 +-#endif /* CONFIG_PREEMPT */ ++#endif /* CONFIG_PREEMPTION */ + + restore: + /* +diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c +index c475a229a42a..d6b69de1284c 100644 +--- a/arch/powerpc/kernel/interrupt.c ++++ b/arch/powerpc/kernel/interrupt.c +@@ -286,7 +286,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, + ti_flags = READ_ONCE(current_thread_info()->flags); + while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) { + local_irq_enable(); +- if (ti_flags & _TIF_NEED_RESCHED) { ++ if (ti_flags & _TIF_NEED_RESCHED_MASK) { + schedule(); + } else { + /* +@@ -381,7 +381,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned + ti_flags = READ_ONCE(current_thread_info()->flags); + while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) { + local_irq_enable(); /* returning to user: may enable */ +- if (ti_flags & _TIF_NEED_RESCHED) { ++ if (ti_flags & _TIF_NEED_RESCHED_MASK) { + schedule(); + } else { + if (ti_flags & _TIF_SIGPENDING) +@@ -473,11 +473,15 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign + /* Returning to a kernel context with local irqs enabled. */ + WARN_ON_ONCE(!(regs->msr & MSR_EE)); + again: +- if (IS_ENABLED(CONFIG_PREEMPT)) { ++ if (IS_ENABLED(CONFIG_PREEMPTION)) { + /* Return to preemptible kernel context */ + if (unlikely(current_thread_info()->flags & _TIF_NEED_RESCHED)) { + if (preempt_count() == 0) + preempt_schedule_irq(); ++ } else if (unlikely(current_thread_info()->flags & _TIF_NEED_RESCHED_LAZY)) { ++ if ((preempt_count() == 0) && ++ (current_thread_info()->preempt_lazy_count == 0)) ++ preempt_schedule_irq(); + } + } + +-- +2.19.1 + |