aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig6
-rw-r--r--arch/powerpc/include/asm/spinlock_types.h4
-rw-r--r--arch/powerpc/include/asm/stackprotector.h4
-rw-r--r--arch/powerpc/include/asm/thread_info.h16
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/entry_32.S29
-rw-r--r--arch/powerpc/kernel/entry_64.S26
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/misc_32.S2
-rw-r--r--arch/powerpc/kernel/misc_64.S2
-rw-r--r--arch/powerpc/kernel/traps.c1
-rw-r--r--arch/powerpc/kernel/watchdog.c5
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c2
-rw-r--r--arch/powerpc/platforms/ps3/device-init.c4
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c16
16 files changed, 78 insertions, 43 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 2890d36eb531..df0cc2dcbcd8 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -105,10 +105,11 @@ config LOCKDEP_SUPPORT
config RWSEM_GENERIC_SPINLOCK
bool
+ default y if PREEMPT_RT_FULL
config RWSEM_XCHGADD_ALGORITHM
bool
- default y
+ default y if !PREEMPT_RT_FULL
config GENERIC_LOCKBREAK
bool
@@ -218,6 +219,7 @@ config PPC
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_PREEMPT_LAZY
select HAVE_RCU_TABLE_FREE if SMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if PPC64 && CPU_LITTLE_ENDIAN
@@ -394,7 +396,7 @@ menu "Kernel options"
config HIGHMEM
bool "High memory support"
- depends on PPC32
+ depends on PPC32 && !PREEMPT_RT_FULL
source "kernel/Kconfig.hz"
diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h
index 87adaf13b7e8..7305cb6a53e4 100644
--- a/arch/powerpc/include/asm/spinlock_types.h
+++ b/arch/powerpc/include/asm/spinlock_types.h
@@ -2,10 +2,6 @@
#ifndef _ASM_POWERPC_SPINLOCK_TYPES_H
#define _ASM_POWERPC_SPINLOCK_TYPES_H
-#ifndef __LINUX_SPINLOCK_TYPES_H
-# error "please don't include this file directly"
-#endif
-
typedef struct {
volatile unsigned int slock;
} arch_spinlock_t;
diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h
index 1c8460e23583..e764eb4b6c28 100644
--- a/arch/powerpc/include/asm/stackprotector.h
+++ b/arch/powerpc/include/asm/stackprotector.h
@@ -24,7 +24,11 @@ static __always_inline void boot_init_stack_canary(void)
unsigned long canary;
/* Try to get a semi random initial value. */
+#ifdef CONFIG_PREEMPT_RT_FULL
+ canary = (unsigned long)&canary;
+#else
canary = get_random_canary();
+#endif
canary ^= mftb();
canary ^= LINUX_VERSION_CODE;
canary &= CANARY_MASK;
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 544cac0474cb..f9132398566d 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -38,6 +38,8 @@ struct thread_info {
int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable,
<0 => BUG */
+ int preempt_lazy_count; /* 0 => preemptable,
+ <0 => BUG */
unsigned long local_flags; /* private flags for thread */
#ifdef CONFIG_LIVEPATCH
unsigned long *livepatch_sp;
@@ -99,11 +101,12 @@ void arch_setup_new_exec(void);
#define TIF_SINGLESTEP 8 /* singlestepping active */
#define TIF_NOHZ 9 /* in adaptive nohz mode */
#define TIF_SECCOMP 10 /* secure computing */
-#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
-#define TIF_NOERROR 12 /* Force successful syscall return */
+
+#define TIF_NEED_RESCHED_LAZY 11 /* lazy rescheduling necessary */
+#define TIF_SYSCALL_TRACEPOINT 12 /* syscall tracepoint instrumentation */
+
#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
#define TIF_UPROBE 14 /* breakpointed or single-stepping */
-#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
for stack store? */
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
@@ -112,6 +115,9 @@ void arch_setup_new_exec(void);
#endif
#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_32BIT 20 /* 32 bit binary */
+#define TIF_RESTOREALL 21 /* Restore all regs (implies NOERROR) */
+#define TIF_NOERROR 22 /* Force successful syscall return */
+
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -131,6 +137,7 @@ void arch_setup_new_exec(void);
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
#define _TIF_NOHZ (1<<TIF_NOHZ)
+#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY)
#define _TIF_FSCHECK (1<<TIF_FSCHECK)
#define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
@@ -140,8 +147,9 @@ void arch_setup_new_exec(void);
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
_TIF_NOTIFY_RESUME | _TIF_UPROBE | \
_TIF_RESTORE_TM | _TIF_PATCH_PENDING | \
- _TIF_FSCHECK)
+ _TIF_FSCHECK | _TIF_NEED_RESCHED_LAZY)
#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
/* Bits in local_flags */
/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 9ffc72ded73a..f23224826a84 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -161,6 +161,7 @@ int main(void)
OFFSET(TI_FLAGS, thread_info, flags);
OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
OFFSET(TI_PREEMPT, thread_info, preempt_count);
+ OFFSET(TI_PREEMPT_LAZY, thread_info, preempt_lazy_count);
OFFSET(TI_TASK, thread_info, task);
OFFSET(TI_CPU, thread_info, cpu);
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index fdd528cdb2ee..1b6c95d49cd2 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -393,7 +393,9 @@ ret_from_syscall:
MTMSRD(r10)
lwz r9,TI_FLAGS(r12)
li r8,-MAX_ERRNO
- andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
+ lis r0,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)@h
+ ori r0,r0, (_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)@l
+ and. r0,r9,r0
bne- syscall_exit_work
cmplw 0,r3,r8
blt+ syscall_exit_cont
@@ -511,13 +513,13 @@ syscall_dotrace:
b syscall_dotrace_cont
syscall_exit_work:
- andi. r0,r9,_TIF_RESTOREALL
+ andis. r0,r9,_TIF_RESTOREALL@h
beq+ 0f
REST_NVGPRS(r1)
b 2f
0: cmplw 0,r3,r8
blt+ 1f
- andi. r0,r9,_TIF_NOERROR
+ andis. r0,r9,_TIF_NOERROR@h
bne- 1f
lwz r11,_CCR(r1) /* Load CR */
neg r3,r3
@@ -526,12 +528,12 @@ syscall_exit_work:
1: stw r6,RESULT(r1) /* Save result */
stw r3,GPR3(r1) /* Update return value */
-2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
+2: andis. r0,r9,(_TIF_PERSYSCALL_MASK)@h
beq 4f
/* Clear per-syscall TIF flags if any are set. */
- li r11,_TIF_PERSYSCALL_MASK
+ lis r11,_TIF_PERSYSCALL_MASK@h
addi r12,r12,TI_FLAGS
3: lwarx r8,0,r12
andc r8,r8,r11
@@ -888,7 +890,14 @@ resume_kernel:
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore
andi. r8,r8,_TIF_NEED_RESCHED
+ bne+ 1f
+ lwz r0,TI_PREEMPT_LAZY(r9)
+ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
+ bne restore
+ lwz r0,TI_FLAGS(r9)
+ andi. r0,r0,_TIF_NEED_RESCHED_LAZY
beq+ restore
+1:
lwz r3,_MSR(r1)
andi. r0,r3,MSR_EE /* interrupts off? */
beq restore /* don't schedule if so */
@@ -899,11 +908,11 @@ resume_kernel:
*/
bl trace_hardirqs_off
#endif
-1: bl preempt_schedule_irq
+2: bl preempt_schedule_irq
CURRENT_THREAD_INFO(r9, r1)
lwz r3,TI_FLAGS(r9)
- andi. r0,r3,_TIF_NEED_RESCHED
- bne- 1b
+ andi. r0,r3,_TIF_NEED_RESCHED_MASK
+ bne- 2b
#ifdef CONFIG_TRACE_IRQFLAGS
/* And now, to properly rebalance the above, we tell lockdep they
* are being turned back on, which will happen when we return
@@ -1232,7 +1241,7 @@ global_dbcr0:
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
do_work: /* r10 contains MSR_KERNEL here */
- andi. r0,r9,_TIF_NEED_RESCHED
+ andi. r0,r9,_TIF_NEED_RESCHED_MASK
beq do_user_signal
do_resched: /* r10 contains MSR_KERNEL here */
@@ -1253,7 +1262,7 @@ recheck:
MTMSRD(r10) /* disable interrupts */
CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_FLAGS(r9)
- andi. r0,r9,_TIF_NEED_RESCHED
+ andi. r0,r9,_TIF_NEED_RESCHED_MASK
bne- do_resched
andi. r0,r9,_TIF_USER_WORK_MASK
beq restore_user
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index a2c168b395d2..abdd089c20a6 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -253,7 +253,9 @@ system_call_exit:
ld r9,TI_FLAGS(r12)
li r11,-MAX_ERRNO
- andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
+ lis r0,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)@h
+ ori r0,r0,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)@l
+ and. r0,r9,r0
bne- .Lsyscall_exit_work
andi. r0,r8,MSR_FP
@@ -370,25 +372,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
If TIF_NOERROR is set, just save r3 as it is. */
- andi. r0,r9,_TIF_RESTOREALL
+ andis. r0,r9,_TIF_RESTOREALL@h
beq+ 0f
REST_NVGPRS(r1)
b 2f
0: cmpld r3,r11 /* r11 is -MAX_ERRNO */
blt+ 1f
- andi. r0,r9,_TIF_NOERROR
+ andis. r0,r9,_TIF_NOERROR@h
bne- 1f
ld r5,_CCR(r1)
neg r3,r3
oris r5,r5,0x1000 /* Set SO bit in CR */
std r5,_CCR(r1)
1: std r3,GPR3(r1)
-2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
+2: andis. r0,r9,(_TIF_PERSYSCALL_MASK)@h
beq 4f
/* Clear per-syscall TIF flags if any are set. */
- li r11,_TIF_PERSYSCALL_MASK
+ lis r11,(_TIF_PERSYSCALL_MASK)@h
addi r12,r12,TI_FLAGS
3: ldarx r10,0,r12
andc r10,r10,r11
@@ -780,7 +782,7 @@ _GLOBAL(ret_from_except_lite)
bl restore_math
b restore
#endif
-1: andi. r0,r4,_TIF_NEED_RESCHED
+1: andi. r0,r4,_TIF_NEED_RESCHED_MASK
beq 2f
bl restore_interrupts
SCHEDULE_USER
@@ -842,10 +844,18 @@ resume_kernel:
#ifdef CONFIG_PREEMPT
/* Check if we need to preempt */
+ lwz r8,TI_PREEMPT(r9)
+ cmpwi 0,r8,0 /* if non-zero, just restore regs and return */
+ bne restore
andi. r0,r4,_TIF_NEED_RESCHED
+ bne+ check_count
+
+ andi. r0,r4,_TIF_NEED_RESCHED_LAZY
beq+ restore
+ lwz r8,TI_PREEMPT_LAZY(r9)
+
/* Check that preempt_count() == 0 and interrupts are enabled */
- lwz r8,TI_PREEMPT(r9)
+check_count:
cmpwi cr0,r8,0
bne restore
ld r0,SOFTE(r1)
@@ -862,7 +872,7 @@ resume_kernel:
/* Re-test flags and eventually loop */
CURRENT_THREAD_INFO(r9, r1)
ld r4,TI_FLAGS(r9)
- andi. r0,r4,_TIF_NEED_RESCHED
+ andi. r0,r4,_TIF_NEED_RESCHED_MASK
bne 1b
/*
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 916ddc4aac44..833d27f85aea 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -766,6 +766,7 @@ void irq_ctx_init(void)
}
}
+#ifndef CONFIG_PREEMPT_RT_FULL
void do_softirq_own_stack(void)
{
struct thread_info *curtp, *irqtp;
@@ -783,6 +784,7 @@ void do_softirq_own_stack(void)
if (irqtp->flags)
set_bits(irqtp->flags, &curtp->flags);
}
+#endif
irq_hw_number_t virq_to_hw(unsigned int virq)
{
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 57d2ffb2d45c..479958bf1a48 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -42,6 +42,7 @@
* We store the saved ksp_limit in the unused part
* of the STACK_FRAME_OVERHEAD
*/
+#ifndef CONFIG_PREEMPT_RT_FULL
_GLOBAL(call_do_softirq)
mflr r0
stw r0,4(r1)
@@ -58,6 +59,7 @@ _GLOBAL(call_do_softirq)
stw r10,THREAD+KSP_LIMIT(r2)
mtlr r0
blr
+#endif
/*
* void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 262ba9481781..4935ef9a142e 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -32,6 +32,7 @@
.text
+#ifndef CONFIG_PREEMPT_RT_FULL
_GLOBAL(call_do_softirq)
mflr r0
std r0,16(r1)
@@ -42,6 +43,7 @@ _GLOBAL(call_do_softirq)
ld r0,16(r1)
mtlr r0
blr
+#endif
_GLOBAL(call_do_irq)
mflr r0
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 7a1de34f38c8..314be8e39d1f 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -174,7 +174,6 @@ extern void panic_flush_kmsg_start(void)
extern void panic_flush_kmsg_end(void)
{
- printk_safe_flush_on_panic();
kmsg_dump(KMSG_DUMP_PANIC);
bust_spinlocks(0);
debug_locks_off();
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index 3c6ab22a0c4e..bf98181c5b30 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -181,11 +181,6 @@ static void watchdog_smp_panic(int cpu, u64 tb)
wd_smp_unlock(&flags);
- printk_safe_flush();
- /*
- * printk_safe_flush() seems to require another print
- * before anything actually goes out to console.
- */
if (sysctl_hardlockup_all_cpu_backtrace)
trigger_allbutself_cpu_backtrace();
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index bfdde04e4905..0bebb00698e8 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -178,6 +178,7 @@ config KVM_E500MC
config KVM_MPIC
bool "KVM in-kernel MPIC emulation"
depends on KVM && E500
+ depends on !PREEMPT_RT_FULL
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQFD
select HAVE_KVM_IRQ_ROUTING
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 9fcccb4490b9..a24010d4f35e 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -141,7 +141,7 @@ void __spu_update_sched_info(struct spu_context *ctx)
* runqueue. The context will be rescheduled on the proper node
* if it is timesliced or preempted.
*/
- cpumask_copy(&ctx->cpus_allowed, &current->cpus_allowed);
+ cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr);
/* Save the current cpu id for spu interrupt routing. */
ctx->last_ran = raw_smp_processor_id();
diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
index e7075aaff1bb..1580464a9d5b 100644
--- a/arch/powerpc/platforms/ps3/device-init.c
+++ b/arch/powerpc/platforms/ps3/device-init.c
@@ -752,8 +752,8 @@ static int ps3_notification_read_write(struct ps3_notification_device *dev,
}
pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op);
- res = wait_event_interruptible(dev->done.wait,
- dev->done.done || kthread_should_stop());
+ res = swait_event_interruptible_exclusive(dev->done.wait,
+ dev->done.done || kthread_should_stop());
if (kthread_should_stop())
res = -EINTR;
if (res) {
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 8fc8fe0b9848..14ecedbd8ff1 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -38,6 +38,7 @@
#include <linux/of.h>
#include <linux/iommu.h>
#include <linux/rculist.h>
+#include <linux/locallock.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/rtas.h>
@@ -191,6 +192,7 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
}
static DEFINE_PER_CPU(__be64 *, tce_page);
+static DEFINE_LOCAL_IRQ_LOCK(tcp_page_lock);
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr,
@@ -211,7 +213,8 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
direction, attrs);
}
- local_irq_save(flags); /* to protect tcep and the page behind it */
+ /* to protect tcep and the page behind it */
+ local_lock_irqsave(tcp_page_lock, flags);
tcep = __this_cpu_read(tce_page);
@@ -222,7 +225,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
/* If allocation fails, fall back to the loop implementation */
if (!tcep) {
- local_irq_restore(flags);
+ local_unlock_irqrestore(tcp_page_lock, flags);
return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
direction, attrs);
}
@@ -256,7 +259,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
tcenum += limit;
} while (npages > 0 && !rc);
- local_irq_restore(flags);
+ local_unlock_irqrestore(tcp_page_lock, flags);
if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
ret = (int)rc;
@@ -414,13 +417,14 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
u64 rc = 0;
long l, limit;
- local_irq_disable(); /* to protect tcep and the page behind it */
+ /* to protect tcep and the page behind it */
+ local_lock_irq(tcp_page_lock);
tcep = __this_cpu_read(tce_page);
if (!tcep) {
tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
if (!tcep) {
- local_irq_enable();
+ local_unlock_irq(tcp_page_lock);
return -ENOMEM;
}
__this_cpu_write(tce_page, tcep);
@@ -466,7 +470,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
/* error cleanup: caller will clear whole range */
- local_irq_enable();
+ local_unlock_irq(tcp_page_lock);
return rc;
}