aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/crypto/chacha-avx512vl-x86_64.S4
-rw-r--r--arch/x86/kvm/mmu.c10
-rw-r--r--arch/x86/um/shared/sysdep/syscalls_64.h5
-rw-r--r--arch/x86/xen/smp_pv.c3
-rw-r--r--arch/x86/xen/xen-head.S18
5 files changed, 28 insertions, 12 deletions
diff --git a/arch/x86/crypto/chacha-avx512vl-x86_64.S b/arch/x86/crypto/chacha-avx512vl-x86_64.S
index 848f9c75fd4f..596f91e3a774 100644
--- a/arch/x86/crypto/chacha-avx512vl-x86_64.S
+++ b/arch/x86/crypto/chacha-avx512vl-x86_64.S
@@ -172,7 +172,7 @@ ENTRY(chacha_2block_xor_avx512vl)
# xor remaining bytes from partial register into output
mov %rcx,%rax
and $0xf,%rcx
- jz .Ldone8
+ jz .Ldone2
mov %rax,%r9
and $~0xf,%r9
@@ -438,7 +438,7 @@ ENTRY(chacha_4block_xor_avx512vl)
# xor remaining bytes from partial register into output
mov %rcx,%rax
and $0xf,%rcx
- jz .Ldone8
+ jz .Ldone4
mov %rax,%r9
and $~0xf,%r9
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d3877dd713ae..015da62e4ad7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5821,6 +5821,7 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
{
struct kvm_mmu_page *sp, *node;
int nr_zapped, batch = 0;
+ bool unstable;
restart:
list_for_each_entry_safe_reverse(sp, node,
@@ -5853,11 +5854,12 @@ restart:
goto restart;
}
- if (__kvm_mmu_prepare_zap_page(kvm, sp,
- &kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
- batch += nr_zapped;
+ unstable = __kvm_mmu_prepare_zap_page(kvm, sp,
+ &kvm->arch.zapped_obsolete_pages, &nr_zapped);
+ batch += nr_zapped;
+
+ if (unstable)
goto restart;
- }
}
/*
diff --git a/arch/x86/um/shared/sysdep/syscalls_64.h b/arch/x86/um/shared/sysdep/syscalls_64.h
index 8a7d5e1da98e..1e6875b4ffd8 100644
--- a/arch/x86/um/shared/sysdep/syscalls_64.h
+++ b/arch/x86/um/shared/sysdep/syscalls_64.h
@@ -10,13 +10,12 @@
#include <linux/msg.h>
#include <linux/shm.h>
-typedef long syscall_handler_t(void);
+typedef long syscall_handler_t(long, long, long, long, long, long);
extern syscall_handler_t *sys_call_table[];
#define EXECUTE_SYSCALL(syscall, regs) \
- (((long (*)(long, long, long, long, long, long)) \
- (*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(&regs->regs), \
+ (((*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(&regs->regs), \
UPT_SYSCALL_ARG2(&regs->regs), \
UPT_SYSCALL_ARG3(&regs->regs), \
UPT_SYSCALL_ARG4(&regs->regs), \
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 64e6ec2c32a7..9d9777ded5f7 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -53,6 +53,7 @@ static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
static DEFINE_PER_CPU(struct xen_common_irq, xen_pmu_irq) = { .irq = -1 };
static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
+void asm_cpu_bringup_and_idle(void);
static void cpu_bringup(void)
{
@@ -310,7 +311,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
* pointing just below where pt_regs would be if it were a normal
* kernel entry.
*/
- ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
+ ctxt->user_regs.eip = (unsigned long)asm_cpu_bringup_and_idle;
ctxt->flags = VGCF_IN_KERNEL;
ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
ctxt->user_regs.ds = __USER_DS;
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index c1d8b90aa4e2..142da8547480 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -35,7 +35,11 @@ ENTRY(startup_xen)
rep __ASM_SIZE(stos)
mov %_ASM_SI, xen_start_info
- mov $init_thread_union+THREAD_SIZE, %_ASM_SP
+#ifdef CONFIG_X86_64
+ mov initial_stack(%rip), %rsp
+#else
+ mov initial_stack, %esp
+#endif
#ifdef CONFIG_X86_64
/* Set up %gs.
@@ -51,9 +55,19 @@ ENTRY(startup_xen)
wrmsr
#endif
- jmp xen_start_kernel
+ call xen_start_kernel
END(startup_xen)
__FINIT
+
+#ifdef CONFIG_XEN_PV_SMP
+.pushsection .text
+SYM_CODE_START(asm_cpu_bringup_and_idle)
+ UNWIND_HINT_EMPTY
+
+ call cpu_bringup_and_idle
+SYM_CODE_END(asm_cpu_bringup_and_idle)
+.popsection
+#endif
#endif
.pushsection .text