aboutsummaryrefslogtreecommitdiffstats
path: root/common
diff options
context:
space:
mode:
Diffstat (limited to 'common')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0001-x86-paravirt-objtool-Annotate-indirect-calls.patch129
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0002-x86-module-Detect-and-skip-invalid-relocations.patch77
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0003-kvm-svm-Setup-MCG_CAP-on-AMD-properly.patch54
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0004-kvm-nVMX-Disallow-userspace-injected-exceptions-in-g.patch71
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0005-x86-cpufeatures-Add-Intel-PCONFIG-cpufeature.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0006-x86-speculation-objtool-Annotate-indirect-calls-jump.patch57
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0007-x86-speculation-Remove-Skylake-C2-from-Speculation-C.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0008-x86-reboot-Turn-off-KVM-when-halting-a-CPU.patch62
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0009-x86-KASLR-Fix-kexec-kernel-boot-crash-when-KASLR-ran.patch79
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0010-kvm-x86-fix-icebp-instruction-handling.patch88
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0011-bpf-x64-increase-number-of-passes.patch56
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0012-x86-mm-kaslr-Use-the-_ASM_MUL-macro-for-multiplicati.patch75
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0013-KVM-X86-Fix-preempt-the-preemption-timer-cancel.patch93
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0014-KVM-nVMX-Fix-handling-of-lmsw-instruction.patch63
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0015-KVM-SVM-do-not-zero-out-segment-attributes-if-segmen.patch95
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0016-KVM-nVMX-Update-vmcs12-guest_linear_address-on-neste.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0017-perf-x86-Fix-possible-Spectre-v1-indexing-for-hw_per.patch62
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0018-perf-x86-cstate-Fix-possible-Spectre-v1-indexing-for.patch53
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0019-perf-x86-msr-Fix-possible-Spectre-v1-indexing-in-the.patch65
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0020-perf-x86-Fix-possible-Spectre-v1-indexing-for-x86_pm.patch59
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0021-x86-amd-don-t-set-X86_BUG_SYSRET_SS_ATTRS-when-runni.patch66
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0022-x86-nospec-Simplify-alternative_msr_write.patch71
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0023-x86-bugs-Concentrate-bug-detection-into-a-separate-f.patch75
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0024-x86-bugs-Concentrate-bug-reporting-into-a-separate-f.patch92
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0025-x86-bugs-Read-SPEC_CTRL-MSR-during-boot-and-re-use-r.patch143
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0026-x86-bugs-KVM-Support-the-combination-of-guest-and-ho.patch137
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0027-x86-bugs-Expose-sys-.-spec_store_bypass.patch148
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0028-x86-cpufeatures-Add-X86_FEATURE_RDS.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0029-x86-bugs-Provide-boot-parameters-for-the-spec_store_.patch272
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0030-x86-bugs-intel-Set-proper-CPU-features-and-setup-RDS.patch183
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0031-x86-bugs-Whitelist-allowed-SPEC_CTRL-MSR-values.patch70
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0032-x86-bugs-AMD-Add-support-to-disable-RDS-on-Fam-15-16.patch200
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0033-x86-KVM-VMX-Expose-SPEC_CTRL-Bit-2-to-the-guest.patch120
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0034-x86-speculation-Create-spec-ctrl.h-to-avoid-include-.patch141
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0035-x86-process-Optimize-TIF-checks-in-__switch_to_xtra.patch125
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0036-x86-process-Correct-and-optimize-TIF_BLOCKSTEP-switc.patch84
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0037-x86-process-Optimize-TIF_NOTSC-switch.patch112
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0038-x86-process-Allow-runtime-control-of-Speculative-Sto.patch229
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0039-x86-speculation-Add-prctl-for-Speculative-Store-Bypa.patch222
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0040-nospec-Move-array_index_nospec-parameter-checking-in.patch92
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0041-nospec-Allow-index-argument-to-have-const-qualified-.patch68
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0042-nospec-Kill-array_index_nospec_mask_check.patch85
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0043-nospec-Include-asm-barrier.h-dependency.patch51
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0044-prctl-Add-speculation-control-prctls.patch239
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0045-nospec-Allow-getting-setting-on-non-current-task.patch162
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0046-x86-bugs-Make-boot-modes-__ro_after_init.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0047-fs-proc-Report-eip-esp-in-prod-PID-stat-for-coredump.patch77
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0048-proc-fix-coredump-vs-read-proc-stat-race.patch105
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0049-proc-Provide-details-on-speculation-flaw-mitigations.patch64
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0050-prctl-Add-force-disable-speculation.patch218
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0051-seccomp-fix-the-usage-of-get-put_seccomp_filter-in-s.patch94
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0052-seccomp-Enable-speculation-flaw-mitigations.patch64
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0053-seccomp-Use-PR_SPEC_FORCE_DISABLE.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0054-seccomp-Add-filter-flag-to-opt-out-of-SSB-mitigation.patch222
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0055-seccomp-Move-speculation-migitation-control-to-arch-.patch121
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0056-x86-speculation-Make-seccomp-the-default-mode-for-Sp.patch166
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0057-x86-bugs-Rename-_RDS-to-_SSBD.patch405
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0058-x86-bugs-Fix-__ssb_select_mitigation-return-type.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0059-x86-bugs-Make-cpu_show_common-static.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0060-x86-bugs-Fix-the-parameters-alignment-and-missing-vo.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0061-x86-cpu-Make-alternative_msr_write-work-for-32-bit-c.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0062-KVM-SVM-Move-spec-control-call-after-restore-of-GS.patch70
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0063-x86-speculation-Use-synthetic-bits-for-IBRS-IBPB-STI.patch156
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0064-x86-cpufeatures-Disentangle-MSR_SPEC_CTRL-enumeratio.patch155
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0065-x86-cpufeatures-Disentangle-SSBD-enumeration.patch163
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0066-x86-cpu-AMD-Fix-erratum-1076-CPB-bit.patch55
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0067-x86-cpufeatures-Add-FEATURE_ZEN.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0068-x86-speculation-Handle-HT-correctly-on-AMD.patch240
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0069-x86-bugs-KVM-Extend-speculation-control-for-VIRT_SPE.patch163
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0070-x86-speculation-Add-virtualized-speculative-store-by.patch104
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0071-x86-speculation-Rework-speculative_store_bypass_upda.patch75
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0072-x86-bugs-Unify-x86_spec_ctrl_-set_guest-restore_host.patch145
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0073-x86-bugs-Expose-x86_spec_ctrl_base-directly.patch120
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0074-x86-bugs-Remove-x86_spec_ctrl_set.patch76
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0075-x86-bugs-Rework-spec_ctrl-base-and-mask-logic.patch95
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0076-x86-speculation-KVM-Implement-support-for-VIRT_SPEC_.patch84
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0077-KVM-SVM-Implement-VIRT_SPEC_CTRL-support-for-SSBD.patch241
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0078-x86-bugs-Rename-SSBD_NO-to-SSB_NO.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0079-x86-kexec-Avoid-double-free_page-upon-do_kexec_load-.patch106
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0080-KVM-VMX-Expose-SSBD-properly-to-guests.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0081-KVM-x86-Update-cpuid-properly-when-CR4.OSXAVE-or-CR4.patch63
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0082-kvm-x86-IA32_ARCH_CAPABILITIES-is-always-supported.patch54
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0083-kvm-x86-fix-KVM_XEN_HVM_CONFIG-ioctl.patch57
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0084-KVM-VMX-raise-internal-error-for-exception-during-in.patch90
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0085-KVM-lapic-stop-advertising-DIRECTED_EOI-when-in-kern.patch56
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0086-objtool-Improve-detection-of-BUG-and-other-dead-ends.patch217
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0087-objtool-Move-checking-code-to-check.c.patch2802
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0088-objtool-sync-up-with-the-4.14.47-version-of-objtool.patch9906
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0089-objtool-x86-Add-several-functions-and-files-to-the-o.patch316
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0090-x86-xen-Add-unwind-hint-annotations-to-xen_setup_gdt.patch47
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0091-x86-amd-revert-commit-944e0fc51a89c9827b9.patch51
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0092-xen-set-cpu-capabilities-from-xen_start_kernel.patch72
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/0093-x86-amd-don-t-set-X86_BUG_SYSRET_SS_ATTRS-when-runni.patch65
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.9.21/upstream-backports.scc93
94 files changed, 22432 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0001-x86-paravirt-objtool-Annotate-indirect-calls.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0001-x86-paravirt-objtool-Annotate-indirect-calls.patch
new file mode 100644
index 00000000..fddb3346
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0001-x86-paravirt-objtool-Annotate-indirect-calls.patch
@@ -0,0 +1,129 @@
+From 8b18def6a2da1b716f49fad6744a41c94d31a2c5 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Wed, 17 Jan 2018 16:58:11 +0100
+Subject: [PATCH 01/93] x86/paravirt, objtool: Annotate indirect calls
+
+commit 3010a0663fd949d122eca0561b06b0a9453f7866 upstream.
+
+Paravirt emits indirect calls which get flagged by objtool retpoline
+checks, annotate it away because all these indirect calls will be
+patched out before we start userspace.
+
+This patching happens through alternative_instructions() ->
+apply_paravirt() -> pv_init_ops.patch() which will eventually end up
+in paravirt_patch_default(). This function _will_ write direct
+alternatives.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/paravirt.h | 16 ++++++++++++----
+ arch/x86/include/asm/paravirt_types.h | 5 ++++-
+ 2 files changed, 16 insertions(+), 5 deletions(-)
+
+diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
+index ce93281..24af8b1 100644
+--- a/arch/x86/include/asm/paravirt.h
++++ b/arch/x86/include/asm/paravirt.h
+@@ -6,6 +6,7 @@
+ #ifdef CONFIG_PARAVIRT
+ #include <asm/pgtable_types.h>
+ #include <asm/asm.h>
++#include <asm/nospec-branch.h>
+
+ #include <asm/paravirt_types.h>
+
+@@ -869,23 +870,27 @@ extern void default_banner(void);
+
+ #define INTERRUPT_RETURN \
+ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
+- jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
++ ANNOTATE_RETPOLINE_SAFE; \
++ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret);)
+
+ #define DISABLE_INTERRUPTS(clobbers) \
+ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
+ PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
++ ANNOTATE_RETPOLINE_SAFE; \
+ call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
+ PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
+
+ #define ENABLE_INTERRUPTS(clobbers) \
+ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
+ PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
++ ANNOTATE_RETPOLINE_SAFE; \
+ call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
+ PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
+
+ #ifdef CONFIG_X86_32
+ #define GET_CR0_INTO_EAX \
+ push %ecx; push %edx; \
++ ANNOTATE_RETPOLINE_SAFE; \
+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
+ pop %edx; pop %ecx
+ #else /* !CONFIG_X86_32 */
+@@ -907,11 +912,13 @@ extern void default_banner(void);
+ */
+ #define SWAPGS \
+ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
+- call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
++ ANNOTATE_RETPOLINE_SAFE; \
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
+ )
+
+ #define GET_CR2_INTO_RAX \
+- call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
++ ANNOTATE_RETPOLINE_SAFE; \
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2);
+
+ #define PARAVIRT_ADJUST_EXCEPTION_FRAME \
+ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
+@@ -921,7 +928,8 @@ extern void default_banner(void);
+ #define USERGS_SYSRET64 \
+ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
+ CLBR_NONE, \
+- jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
++ ANNOTATE_RETPOLINE_SAFE; \
++ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64);)
+ #endif /* CONFIG_X86_32 */
+
+ #endif /* __ASSEMBLY__ */
+diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
+index 0f400c0..04b7971 100644
+--- a/arch/x86/include/asm/paravirt_types.h
++++ b/arch/x86/include/asm/paravirt_types.h
+@@ -42,6 +42,7 @@
+ #include <asm/desc_defs.h>
+ #include <asm/kmap_types.h>
+ #include <asm/pgtable_types.h>
++#include <asm/nospec-branch.h>
+
+ struct page;
+ struct thread_struct;
+@@ -391,7 +392,9 @@ int paravirt_disable_iospace(void);
+ * offset into the paravirt_patch_template structure, and can therefore be
+ * freely converted back into a structure offset.
+ */
+-#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
++#define PARAVIRT_CALL \
++ ANNOTATE_RETPOLINE_SAFE \
++ "call *%c[paravirt_opptr];"
+
+ /*
+ * These macros are intended to wrap calls through one of the paravirt
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0002-x86-module-Detect-and-skip-invalid-relocations.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0002-x86-module-Detect-and-skip-invalid-relocations.patch
new file mode 100644
index 00000000..3035344f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0002-x86-module-Detect-and-skip-invalid-relocations.patch
@@ -0,0 +1,77 @@
+From 23f4b6492ade30e2f7fc21acfb162e46851cf0f0 Mon Sep 17 00:00:00 2001
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Fri, 3 Nov 2017 07:58:54 -0500
+Subject: [PATCH 02/93] x86/module: Detect and skip invalid relocations
+
+commit eda9cec4c9a12208a6f69fbe68f72a6311d50032 upstream.
+
+There have been some cases where external tooling (e.g., kpatch-build)
+creates a corrupt relocation which targets the wrong address. This is a
+silent failure which can corrupt memory in unexpected places.
+
+On x86, the bytes of data being overwritten by relocations are always
+initialized to zero beforehand. Use that knowledge to add sanity checks
+to detect such cases before they corrupt memory.
+
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: jeyu@kernel.org
+Cc: live-patching@vger.kernel.org
+Link: http://lkml.kernel.org/r/37450d6c6225e54db107fba447ce9e56e5f758e9.1509713553.git.jpoimboe@redhat.com
+[ Restructured the messages, as it's unclear whether the relocation or the target is corrupted. ]
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: Matthias Kaehlcke <mka@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/module.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
+index 477ae80..87f30a8 100644
+--- a/arch/x86/kernel/module.c
++++ b/arch/x86/kernel/module.c
+@@ -171,19 +171,27 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
+ case R_X86_64_NONE:
+ break;
+ case R_X86_64_64:
++ if (*(u64 *)loc != 0)
++ goto invalid_relocation;
+ *(u64 *)loc = val;
+ break;
+ case R_X86_64_32:
++ if (*(u32 *)loc != 0)
++ goto invalid_relocation;
+ *(u32 *)loc = val;
+ if (val != *(u32 *)loc)
+ goto overflow;
+ break;
+ case R_X86_64_32S:
++ if (*(s32 *)loc != 0)
++ goto invalid_relocation;
+ *(s32 *)loc = val;
+ if ((s64)val != *(s32 *)loc)
+ goto overflow;
+ break;
+ case R_X86_64_PC32:
++ if (*(u32 *)loc != 0)
++ goto invalid_relocation;
+ val -= (u64)loc;
+ *(u32 *)loc = val;
+ #if 0
+@@ -199,6 +207,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
+ }
+ return 0;
+
++invalid_relocation:
++ pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n",
++ (int)ELF64_R_TYPE(rel[i].r_info), loc, val);
++ return -ENOEXEC;
++
+ overflow:
+ pr_err("overflow in relocation type %d val %Lx\n",
+ (int)ELF64_R_TYPE(rel[i].r_info), val);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0003-kvm-svm-Setup-MCG_CAP-on-AMD-properly.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0003-kvm-svm-Setup-MCG_CAP-on-AMD-properly.patch
new file mode 100644
index 00000000..d1b9f3df
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0003-kvm-svm-Setup-MCG_CAP-on-AMD-properly.patch
@@ -0,0 +1,54 @@
+From de05b6da8c54ed0aa2158ad3112ac582c88f0676 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Sun, 26 Mar 2017 23:51:24 +0200
+Subject: [PATCH 03/93] kvm/svm: Setup MCG_CAP on AMD properly
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[ Upstream commit 74f169090b6f36b867c9df0454366dd9af6f62d1 ]
+
+MCG_CAP[63:9] bits are reserved on AMD. However, on an AMD guest, this
+MSR returns 0x100010a. More specifically, bit 24 is set, which is simply
+wrong. That bit is MCG_SER_P and is present only on Intel. Thus, clean
+up the reserved bits in order not to confuse guests.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index b82bb66..2d96e30 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -5437,6 +5437,12 @@ static inline void avic_post_state_restore(struct kvm_vcpu *vcpu)
+ avic_handle_ldr_update(vcpu);
+ }
+
++static void svm_setup_mce(struct kvm_vcpu *vcpu)
++{
++ /* [63:9] are reserved. */
++ vcpu->arch.mcg_cap &= 0x1ff;
++}
++
+ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
+ .cpu_has_kvm_support = has_svm,
+ .disabled_by_bios = is_disabled,
+@@ -5552,6 +5558,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
+ .pmu_ops = &amd_pmu_ops,
+ .deliver_posted_interrupt = svm_deliver_avic_intr,
+ .update_pi_irte = svm_update_pi_irte,
++ .setup_mce = svm_setup_mce,
+ };
+
+ static int __init svm_init(void)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0004-kvm-nVMX-Disallow-userspace-injected-exceptions-in-g.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0004-kvm-nVMX-Disallow-userspace-injected-exceptions-in-g.patch
new file mode 100644
index 00000000..3d7259ab
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0004-kvm-nVMX-Disallow-userspace-injected-exceptions-in-g.patch
@@ -0,0 +1,71 @@
+From 230ca3c5a44c752650e6bac9a4fe0eefc5ff0758 Mon Sep 17 00:00:00 2001
+From: Jim Mattson <jmattson@google.com>
+Date: Wed, 5 Apr 2017 09:14:40 -0700
+Subject: [PATCH 04/93] kvm: nVMX: Disallow userspace-injected exceptions in
+ guest mode
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[ Upstream commit 28d06353881939703c34d82a1465136af176c620 ]
+
+The userspace exception injection API and code path are entirely
+unprepared for exceptions that might cause a VM-exit from L2 to L1, so
+the best course of action may be to simply disallow this for now.
+
+1. The API provides no mechanism for userspace to specify the new DR6
+bits for a #DB exception or the new CR2 value for a #PF
+exception. Presumably, userspace is expected to modify these registers
+directly with KVM_SET_SREGS before the next KVM_RUN ioctl. However, in
+the event that L1 intercepts the exception, these registers should not
+be changed. Instead, the new values should be provided in the
+exit_qualification field of vmcs12 (Intel SDM vol 3, section 27.1).
+
+2. In the case of a userspace-injected #DB, inject_pending_event()
+clears DR7.GD before calling vmx_queue_exception(). However, in the
+event that L1 intercepts the exception, this is too early, because
+DR7.GD should not be modified by a #DB that causes a VM-exit directly
+(Intel SDM vol 3, section 27.1).
+
+3. If the injected exception is a #PF, nested_vmx_check_exception()
+doesn't properly check whether or not L1 is interested in the
+associated error code (using the #PF error code mask and match fields
+from vmcs12). It may either return 0 when it should call
+nested_vmx_vmexit() or vice versa.
+
+4. nested_vmx_check_exception() assumes that it is dealing with a
+hardware-generated exception intercept from L2, with some of the
+relevant details (the VM-exit interruption-information and the exit
+qualification) live in vmcs02. For userspace-injected exceptions, this
+is not the case.
+
+5. prepare_vmcs12() assumes that when its exit_intr_info argument
+specifies valid information with a valid error code that it can VMREAD
+the VM-exit interruption error code from vmcs02. For
+userspace-injected exceptions, this is not the case.
+
+Signed-off-by: Jim Mattson <jmattson@google.com>
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/x86.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 9f0f7e2..b27b93d 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3056,7 +3056,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
+ return -EINVAL;
+
+ if (events->exception.injected &&
+- (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
++ (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR ||
++ is_guest_mode(vcpu)))
+ return -EINVAL;
+
+ process_nmi(vcpu);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0005-x86-cpufeatures-Add-Intel-PCONFIG-cpufeature.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0005-x86-cpufeatures-Add-Intel-PCONFIG-cpufeature.patch
new file mode 100644
index 00000000..1e33e521
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0005-x86-cpufeatures-Add-Intel-PCONFIG-cpufeature.patch
@@ -0,0 +1,39 @@
+From a3032e35007a8178f448e471acb6bc6c972c087a Mon Sep 17 00:00:00 2001
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Mon, 5 Mar 2018 19:25:51 +0300
+Subject: [PATCH 05/93] x86/cpufeatures: Add Intel PCONFIG cpufeature
+
+commit 7958b2246fadf54b7ff820a2a5a2c5ca1554716f upstream.
+
+CPUID.0x7.0x0:EDX[18] indicates whether Intel CPU support PCONFIG instruction.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kai Huang <kai.huang@linux.intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: linux-mm@kvack.org
+Link: http://lkml.kernel.org/r/20180305162610.37510-4-kirill.shutemov@linux.intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index ed7a1d2..a248531 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -302,6 +302,7 @@
+ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
+ #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
+ #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
++#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
+ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
+ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0006-x86-speculation-objtool-Annotate-indirect-calls-jump.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0006-x86-speculation-objtool-Annotate-indirect-calls-jump.patch
new file mode 100644
index 00000000..ecb1cdd3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0006-x86-speculation-objtool-Annotate-indirect-calls-jump.patch
@@ -0,0 +1,57 @@
+From b4f699a49be9bbfa6bb5408e7f54c89b9bdc8919 Mon Sep 17 00:00:00 2001
+From: Andy Whitcroft <apw@canonical.com>
+Date: Wed, 14 Mar 2018 11:24:27 +0000
+Subject: [PATCH 06/93] x86/speculation, objtool: Annotate indirect calls/jumps
+ for objtool on 32-bit kernels
+
+commit a14bff131108faf50cc0cf864589fd71ee216c96 upstream.
+
+In the following commit:
+
+ 9e0e3c5130e9 ("x86/speculation, objtool: Annotate indirect calls/jumps for objtool")
+
+... we added annotations for CALL_NOSPEC/JMP_NOSPEC on 64-bit x86 kernels,
+but we did not annotate the 32-bit path.
+
+Annotate it similarly.
+
+Signed-off-by: Andy Whitcroft <apw@canonical.com>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/20180314112427.22351-1-apw@canonical.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index d0dabea..f928ad9 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -183,7 +183,10 @@
+ * otherwise we'll run out of registers. We don't care about CET
+ * here, anyway.
+ */
+-# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n", \
++# define CALL_NOSPEC \
++ ALTERNATIVE( \
++ ANNOTATE_RETPOLINE_SAFE \
++ "call *%[thunk_target]\n", \
+ " jmp 904f;\n" \
+ " .align 16\n" \
+ "901: call 903f;\n" \
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0007-x86-speculation-Remove-Skylake-C2-from-Speculation-C.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0007-x86-speculation-Remove-Skylake-C2-from-Speculation-C.patch
new file mode 100644
index 00000000..4da48ef5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0007-x86-speculation-Remove-Skylake-C2-from-Speculation-C.patch
@@ -0,0 +1,48 @@
+From 5516ae4d16ab0ce922de31fec20d5d5e198aa258 Mon Sep 17 00:00:00 2001
+From: Alexander Sergeyev <sergeev917@gmail.com>
+Date: Tue, 13 Mar 2018 22:38:56 +0300
+Subject: [PATCH 07/93] x86/speculation: Remove Skylake C2 from Speculation
+ Control microcode blacklist
+
+commit e3b3121fa8da94cb20f9e0c64ab7981ae47fd085 upstream.
+
+In accordance with Intel's microcode revision guidance from March 6 MCU
+rev 0xc2 is cleared on both Skylake H/S and Skylake Xeon E3 processors
+that share CPUID 506E3.
+
+Signed-off-by: Alexander Sergeyev <sergeev917@gmail.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Jia Zhang <qianyue.zj@alibaba-inc.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Kyle Huey <me@kylehuey.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Link: https://lkml.kernel.org/r/20180313193856.GA8580@localhost.localdomain
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/intel.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index 7680425..8fb1d65 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -64,7 +64,7 @@ void check_mpx_erratum(struct cpuinfo_x86 *c)
+ /*
+ * Early microcode releases for the Spectre v2 mitigation were broken.
+ * Information taken from;
+- * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf
++ * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
+ * - https://kb.vmware.com/s/article/52345
+ * - Microcode revisions observed in the wild
+ * - Release note from 20180108 microcode release
+@@ -82,7 +82,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = {
+ { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
+ { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
+ { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
+- { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
+ { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
+ { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
+ { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0008-x86-reboot-Turn-off-KVM-when-halting-a-CPU.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0008-x86-reboot-Turn-off-KVM-when-halting-a-CPU.patch
new file mode 100644
index 00000000..1b5231fc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0008-x86-reboot-Turn-off-KVM-when-halting-a-CPU.patch
@@ -0,0 +1,62 @@
+From 7737fc421365d9f2fd328b19fdccf005092d4ec1 Mon Sep 17 00:00:00 2001
+From: Tiantian Feng <fengtiantian@huawei.com>
+Date: Wed, 19 Apr 2017 18:18:39 +0200
+Subject: [PATCH 08/93] x86/reboot: Turn off KVM when halting a CPU
+
+[ Upstream commit fba4f472b33aa81ca1836f57d005455261e9126f ]
+
+A CPU in VMX root mode will ignore INIT signals and will fail to bring
+up the APs after reboot. Therefore, on a panic we disable VMX on all
+CPUs before rebooting or triggering kdump.
+
+Do this when halting the machine as well, in case a firmware-level reboot
+does not perform a cold reset for all processors. Without doing this,
+rebooting the host may hang.
+
+Signed-off-by: Tiantian Feng <fengtiantian@huawei.com>
+Signed-off-by: Xishi Qiu <qiuxishi@huawei.com>
+[ Rewritten commit message. ]
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: kvm@vger.kernel.org
+Link: http://lkml.kernel.org/r/20170419161839.30550-1-pbonzini@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/smp.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
+index c00cb64..420f2dc 100644
+--- a/arch/x86/kernel/smp.c
++++ b/arch/x86/kernel/smp.c
+@@ -33,6 +33,7 @@
+ #include <asm/mce.h>
+ #include <asm/trace/irq_vectors.h>
+ #include <asm/kexec.h>
++#include <asm/virtext.h>
+
+ /*
+ * Some notes on x86 processor bugs affecting SMP operation:
+@@ -162,6 +163,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
+ if (raw_smp_processor_id() == atomic_read(&stopping_cpu))
+ return NMI_HANDLED;
+
++ cpu_emergency_vmxoff();
+ stop_this_cpu(NULL);
+
+ return NMI_HANDLED;
+@@ -174,6 +176,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
+ asmlinkage __visible void smp_reboot_interrupt(void)
+ {
+ ipi_entering_ack_irq();
++ cpu_emergency_vmxoff();
+ stop_this_cpu(NULL);
+ irq_exit();
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0009-x86-KASLR-Fix-kexec-kernel-boot-crash-when-KASLR-ran.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0009-x86-KASLR-Fix-kexec-kernel-boot-crash-when-KASLR-ran.patch
new file mode 100644
index 00000000..1e9973e7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0009-x86-KASLR-Fix-kexec-kernel-boot-crash-when-KASLR-ran.patch
@@ -0,0 +1,79 @@
+From 29fa51519ae0978980c8fc154eba5b244ad7980f Mon Sep 17 00:00:00 2001
+From: Baoquan He <bhe@redhat.com>
+Date: Thu, 27 Apr 2017 15:42:20 +0800
+Subject: [PATCH 09/93] x86/KASLR: Fix kexec kernel boot crash when KASLR
+ randomization fails
+
+[ Upstream commit da63b6b20077469bd6bd96e07991ce145fc4fbc4 ]
+
+Dave found that a kdump kernel with KASLR enabled will reset to the BIOS
+immediately if physical randomization failed to find a new position for
+the kernel. A kernel with the 'nokaslr' option works in this case.
+
+The reason is that KASLR will install a new page table for the identity
+mapping, while it missed building it for the original kernel location
+if KASLR physical randomization fails.
+
+This only happens in the kexec/kdump kernel, because the identity mapping
+has been built for kexec/kdump in the 1st kernel for the whole memory by
+calling init_pgtable(). Here if physical randomizaiton fails, it won't build
+the identity mapping for the original area of the kernel but change to a
+new page table '_pgtable'. Then the kernel will triple fault immediately
+caused by no identity mappings.
+
+The normal kernel won't see this bug, because it comes here via startup_32()
+and CR3 will be set to _pgtable already. In startup_32() the identity
+mapping is built for the 0~4G area. In KASLR we just append to the existing
+area instead of entirely overwriting it for on-demand identity mapping
+building. So the identity mapping for the original area of kernel is still
+there.
+
+To fix it we just switch to the new identity mapping page table when physical
+KASLR succeeds. Otherwise we keep the old page table unchanged just like
+"nokaslr" does.
+
+Signed-off-by: Baoquan He <bhe@redhat.com>
+Signed-off-by: Dave Young <dyoung@redhat.com>
+Acked-by: Kees Cook <keescook@chromium.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Dave Jiang <dave.jiang@intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Garnier <thgarnie@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Yinghai Lu <yinghai@kernel.org>
+Link: http://lkml.kernel.org/r/1493278940-5885-1-git-send-email-bhe@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/boot/compressed/kaslr.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
+index a66854d..af42b4d 100644
+--- a/arch/x86/boot/compressed/kaslr.c
++++ b/arch/x86/boot/compressed/kaslr.c
+@@ -463,10 +463,17 @@ void choose_random_location(unsigned long input,
+ add_identity_map(random_addr, output_size);
+ *output = random_addr;
+ }
++
++ /*
++ * This loads the identity mapping page table.
++ * This should only be done if a new physical address
++ * is found for the kernel, otherwise we should keep
++ * the old page table to make it be like the "nokaslr"
++ * case.
++ */
++ finalize_identity_maps();
+ }
+
+- /* This actually loads the identity pagetable on x86_64. */
+- finalize_identity_maps();
+
+ /* Pick random virtual address starting from LOAD_PHYSICAL_ADDR. */
+ if (IS_ENABLED(CONFIG_X86_64))
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0010-kvm-x86-fix-icebp-instruction-handling.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0010-kvm-x86-fix-icebp-instruction-handling.patch
new file mode 100644
index 00000000..aef1109b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0010-kvm-x86-fix-icebp-instruction-handling.patch
@@ -0,0 +1,88 @@
+From 694ba89c4cb4e43ae4cb418ea46b1415f6d31ce7 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Tue, 20 Mar 2018 12:16:59 -0700
+Subject: [PATCH 10/93] kvm/x86: fix icebp instruction handling
+
+commit 32d43cd391bacb5f0814c2624399a5dad3501d09 upstream.
+
+The undocumented 'icebp' instruction (aka 'int1') works pretty much like
+'int3' in the absense of in-circuit probing equipment (except,
+obviously, that it raises #DB instead of raising #BP), and is used by
+some validation test-suites as such.
+
+But Andy Lutomirski noticed that his test suite acted differently in kvm
+than on bare hardware.
+
+The reason is that kvm used an inexact test for the icebp instruction:
+it just assumed that an all-zero VM exit qualification value meant that
+the VM exit was due to icebp.
+
+That is not unlike the guess that do_debug() does for the actual
+exception handling case, but it's purely a heuristic, not an absolute
+rule. do_debug() does it because it wants to ascribe _some_ reasons to
+the #DB that happened, and an empty %dr6 value means that 'icebp' is the
+most likely casue and we have no better information.
+
+But kvm can just do it right, because unlike the do_debug() case, kvm
+actually sees the real reason for the #DB in the VM-exit interruption
+information field.
+
+So instead of relying on an inexact heuristic, just use the actual VM
+exit information that says "it was 'icebp'".
+
+Right now the 'icebp' instruction isn't technically documented by Intel,
+but that will hopefully change. The special "privileged software
+exception" information _is_ actually mentioned in the Intel SDM, even
+though the cause of it isn't enumerated.
+
+Reported-by: Andy Lutomirski <luto@kernel.org>
+Tested-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/vmx.h | 1 +
+ arch/x86/kvm/vmx.c | 9 ++++++++-
+ 2 files changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
+index 6899cf1..9cbfbef 100644
+--- a/arch/x86/include/asm/vmx.h
++++ b/arch/x86/include/asm/vmx.h
+@@ -309,6 +309,7 @@ enum vmcs_field {
+ #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */
+ #define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */
+ #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */
++#define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */
+ #define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */
+
+ /* GUEST_INTERRUPTIBILITY_INFO flags. */
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 3c3558b..27f505d 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1053,6 +1053,13 @@ static inline bool is_machine_check(u32 intr_info)
+ (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
+ }
+
++/* Undocumented: icebp/int1 */
++static inline bool is_icebp(u32 intr_info)
++{
++ return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
++ == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK);
++}
++
+ static inline bool cpu_has_vmx_msr_bitmap(void)
+ {
+ return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
+@@ -5708,7 +5715,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
+ (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
+ vcpu->arch.dr6 &= ~15;
+ vcpu->arch.dr6 |= dr6 | DR6_RTM;
+- if (!(dr6 & ~DR6_RESERVED)) /* icebp */
++ if (is_icebp(intr_info))
+ skip_emulated_instruction(vcpu);
+
+ kvm_queue_exception(vcpu, DB_VECTOR);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0011-bpf-x64-increase-number-of-passes.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0011-bpf-x64-increase-number-of-passes.patch
new file mode 100644
index 00000000..bf2556b8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0011-bpf-x64-increase-number-of-passes.patch
@@ -0,0 +1,56 @@
+From 1909a1513f6d5b9170e40c4fee98bf2cd57b5b55 Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Wed, 7 Mar 2018 22:10:01 +0100
+Subject: [PATCH 11/93] bpf, x64: increase number of passes
+
+commit 6007b080d2e2adb7af22bf29165f0594ea12b34c upstream.
+
+In Cilium some of the main programs we run today are hitting 9 passes
+on x64's JIT compiler, and we've had cases already where we surpassed
+the limit where the JIT then punts the program to the interpreter
+instead, leading to insertion failures due to CONFIG_BPF_JIT_ALWAYS_ON
+or insertion failures due to the prog array owner being JITed but the
+program to insert not (both must have the same JITed/non-JITed property).
+
+One concrete case the program image shrunk from 12,767 bytes down to
+10,288 bytes where the image converged after 16 steps. I've measured
+that this took 340us in the JIT until it converges on my i7-6600U. Thus,
+increase the original limit we had from day one where the JIT covered
+cBPF only back then before we run into the case (as similar with the
+complexity limit) where we trip over this and hit program rejections.
+Also add a cond_resched() into the compilation loop, the JIT process
+runs without any locks and may sleep anyway.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/net/bpf_jit_comp.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 1f7ed2e..cd97645 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -1135,7 +1135,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ * may converge on the last pass. In such case do one more
+ * pass to emit the final image
+ */
+- for (pass = 0; pass < 10 || image; pass++) {
++ for (pass = 0; pass < 20 || image; pass++) {
+ proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
+ if (proglen <= 0) {
+ image = NULL;
+@@ -1162,6 +1162,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ }
+ }
+ oldproglen = proglen;
++ cond_resched();
+ }
+
+ if (bpf_jit_enable > 1)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0012-x86-mm-kaslr-Use-the-_ASM_MUL-macro-for-multiplicati.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0012-x86-mm-kaslr-Use-the-_ASM_MUL-macro-for-multiplicati.patch
new file mode 100644
index 00000000..bdb55fda
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0012-x86-mm-kaslr-Use-the-_ASM_MUL-macro-for-multiplicati.patch
@@ -0,0 +1,75 @@
+From 280488ceca9427dd91e5ee449d90f8cf16d8e65c Mon Sep 17 00:00:00 2001
+From: Matthias Kaehlcke <mka@chromium.org>
+Date: Mon, 1 May 2017 15:47:41 -0700
+Subject: [PATCH 12/93] x86/mm/kaslr: Use the _ASM_MUL macro for multiplication
+ to work around Clang incompatibility
+
+[ Upstream commit 121843eb02a6e2fa30aefab64bfe183c97230c75 ]
+
+The constraint "rm" allows the compiler to put mix_const into memory.
+When the input operand is a memory location then MUL needs an operand
+size suffix, since Clang can't infer the multiplication width from the
+operand.
+
+Add and use the _ASM_MUL macro which determines the operand size and
+resolves to the NUL instruction with the corresponding suffix.
+
+This fixes the following error when building with clang:
+
+ CC arch/x86/lib/kaslr.o
+ /tmp/kaslr-dfe1ad.s: Assembler messages:
+ /tmp/kaslr-dfe1ad.s:182: Error: no instruction mnemonic suffix given and no register operands; can't size instruction
+
+Signed-off-by: Matthias Kaehlcke <mka@chromium.org>
+Cc: Grant Grundler <grundler@chromium.org>
+Cc: Greg Hackmann <ghackmann@google.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Michael Davidson <md@google.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/20170501224741.133938-1-mka@chromium.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/asm.h | 1 +
+ arch/x86/lib/kaslr.c | 3 ++-
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
+index 7bb29a4..08684b3 100644
+--- a/arch/x86/include/asm/asm.h
++++ b/arch/x86/include/asm/asm.h
+@@ -34,6 +34,7 @@
+ #define _ASM_ADD __ASM_SIZE(add)
+ #define _ASM_SUB __ASM_SIZE(sub)
+ #define _ASM_XADD __ASM_SIZE(xadd)
++#define _ASM_MUL __ASM_SIZE(mul)
+
+ #define _ASM_AX __ASM_REG(ax)
+ #define _ASM_BX __ASM_REG(bx)
+diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c
+index 121f59c..0c7fe44 100644
+--- a/arch/x86/lib/kaslr.c
++++ b/arch/x86/lib/kaslr.c
+@@ -5,6 +5,7 @@
+ * kernel starts. This file is included in the compressed kernel and
+ * normally linked in the regular.
+ */
++#include <asm/asm.h>
+ #include <asm/kaslr.h>
+ #include <asm/msr.h>
+ #include <asm/archrandom.h>
+@@ -79,7 +80,7 @@ unsigned long kaslr_get_random_long(const char *purpose)
+ }
+
+ /* Circular multiply for better bit diffusion */
+- asm("mul %3"
++ asm(_ASM_MUL "%3"
+ : "=a" (random), "=d" (raw)
+ : "a" (random), "rm" (mix_const));
+ random += raw;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0013-KVM-X86-Fix-preempt-the-preemption-timer-cancel.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0013-KVM-X86-Fix-preempt-the-preemption-timer-cancel.patch
new file mode 100644
index 00000000..4331a9f4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0013-KVM-X86-Fix-preempt-the-preemption-timer-cancel.patch
@@ -0,0 +1,93 @@
+From b541de5f53d608796a946a42f5c3251e4dd07522 Mon Sep 17 00:00:00 2001
+From: Wanpeng Li <wanpeng.li@hotmail.com>
+Date: Sat, 20 May 2017 20:32:32 -0700
+Subject: [PATCH 13/93] KVM: X86: Fix preempt the preemption timer cancel
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[ Upstream commit 5acc1ca4fb15f00bfa3d4046e35ca381bc25d580 ]
+
+Preemption can occur during cancel preemption timer, and there will be
+inconsistent status in lapic, vmx and vmcs field.
+
+ CPU0 CPU1
+
+ preemption timer vmexit
+ handle_preemption_timer(vCPU0)
+ kvm_lapic_expired_hv_timer
+ vmx_cancel_hv_timer
+ vmx->hv_deadline_tsc = -1
+ vmcs_clear_bits
+ /* hv_timer_in_use still true */
+ sched_out
+ sched_in
+ kvm_arch_vcpu_load
+ vmx_set_hv_timer
+ write vmx->hv_deadline_tsc
+ vmcs_set_bits
+ /* back in kvm_lapic_expired_hv_timer */
+ hv_timer_in_use = false
+ ...
+ vmx_vcpu_run
+ vmx_arm_hv_run
+ write preemption timer deadline
+ spurious preemption timer vmexit
+ handle_preemption_timer(vCPU0)
+ kvm_lapic_expired_hv_timer
+ WARN_ON(!apic->lapic_timer.hv_timer_in_use);
+
+This can be reproduced sporadically during boot of L2 on a
+preemptible L1, causing a splat on L1.
+
+ WARNING: CPU: 3 PID: 1952 at arch/x86/kvm/lapic.c:1529 kvm_lapic_expired_hv_timer+0xb5/0xd0 [kvm]
+ CPU: 3 PID: 1952 Comm: qemu-system-x86 Not tainted 4.12.0-rc1+ #24 RIP: 0010:kvm_lapic_expired_hv_timer+0xb5/0xd0 [kvm]
+ Call Trace:
+ handle_preemption_timer+0xe/0x20 [kvm_intel]
+ vmx_handle_exit+0xc9/0x15f0 [kvm_intel]
+ ? lock_acquire+0xdb/0x250
+ ? lock_acquire+0xdb/0x250
+ ? kvm_arch_vcpu_ioctl_run+0xdf3/0x1ce0 [kvm]
+ kvm_arch_vcpu_ioctl_run+0xe55/0x1ce0 [kvm]
+ kvm_vcpu_ioctl+0x384/0x7b0 [kvm]
+ ? kvm_vcpu_ioctl+0x384/0x7b0 [kvm]
+ ? __fget+0xf3/0x210
+ do_vfs_ioctl+0xa4/0x700
+ ? __fget+0x114/0x210
+ SyS_ioctl+0x79/0x90
+ do_syscall_64+0x8f/0x750
+ ? trace_hardirqs_on_thunk+0x1a/0x1c
+ entry_SYSCALL64_slow_path+0x25/0x25
+
+This patch fixes it by disabling preemption while cancelling
+preemption timer. This way cancel_hv_timer is atomic with
+respect to kvm_arch_vcpu_load.
+
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/lapic.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 3f05c04..650ff4a 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1358,8 +1358,10 @@ EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
+
+ static void cancel_hv_tscdeadline(struct kvm_lapic *apic)
+ {
++ preempt_disable();
+ kvm_x86_ops->cancel_hv_timer(apic->vcpu);
+ apic->lapic_timer.hv_timer_in_use = false;
++ preempt_enable();
+ }
+
+ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0014-KVM-nVMX-Fix-handling-of-lmsw-instruction.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0014-KVM-nVMX-Fix-handling-of-lmsw-instruction.patch
new file mode 100644
index 00000000..43b1f38e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0014-KVM-nVMX-Fix-handling-of-lmsw-instruction.patch
@@ -0,0 +1,63 @@
+From 2c5329f428b85d1167abdd3206bdac08a02ae082 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jan=20H=2E=20Sch=C3=B6nherr?= <jschoenh@amazon.de>
+Date: Sat, 20 May 2017 13:22:56 +0200
+Subject: [PATCH 14/93] KVM: nVMX: Fix handling of lmsw instruction
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[ Upstream commit e1d39b17e044e8ae819827810d87d809ba5f58c0 ]
+
+The decision whether or not to exit from L2 to L1 on an lmsw instruction is
+based on bogus values: instead of using the information encoded within the
+exit qualification, it uses the data also used for the mov-to-cr
+instruction, which boils down to using whatever is in %eax at that point.
+
+Use the correct values instead.
+
+Without this fix, an L1 may not get notified when a 32-bit Linux L2
+switches its secondary CPUs to protected mode; the L1 is only notified on
+the next modification of CR0. This short time window poses a problem, when
+there is some other reason to exit to L1 in between. Then, L2 will be
+resumed in real mode and chaos ensues.
+
+Signed-off-by: Jan H. Schönherr <jschoenh@amazon.de>
+Reviewed-by: Wanpeng Li <wanpeng.li@hotmail.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 27f505d..8d842d9 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -7910,11 +7910,13 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
+ {
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ int cr = exit_qualification & 15;
+- int reg = (exit_qualification >> 8) & 15;
+- unsigned long val = kvm_register_readl(vcpu, reg);
++ int reg;
++ unsigned long val;
+
+ switch ((exit_qualification >> 4) & 3) {
+ case 0: /* mov to cr */
++ reg = (exit_qualification >> 8) & 15;
++ val = kvm_register_readl(vcpu, reg);
+ switch (cr) {
+ case 0:
+ if (vmcs12->cr0_guest_host_mask &
+@@ -7969,6 +7971,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
+ * lmsw can change bits 1..3 of cr0, and only set bit 0 of
+ * cr0. Other attempted changes are ignored, with no exit.
+ */
++ val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
+ if (vmcs12->cr0_guest_host_mask & 0xe &
+ (val ^ vmcs12->cr0_read_shadow))
+ return true;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0015-KVM-SVM-do-not-zero-out-segment-attributes-if-segmen.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0015-KVM-SVM-do-not-zero-out-segment-attributes-if-segmen.patch
new file mode 100644
index 00000000..913e3fe5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0015-KVM-SVM-do-not-zero-out-segment-attributes-if-segmen.patch
@@ -0,0 +1,95 @@
+From 348032cf73954af79ac077ae0c13d6faa99294af Mon Sep 17 00:00:00 2001
+From: Roman Pen <roman.penyaev@profitbricks.com>
+Date: Thu, 1 Jun 2017 10:55:03 +0200
+Subject: [PATCH 15/93] KVM: SVM: do not zero out segment attributes if segment
+ is unusable or not present
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[ Upstream commit d9c1b5431d5f0e07575db785a022bce91051ac1d ]
+
+This is a fix for the problem [1], where VMCB.CPL was set to 0 and interrupt
+was taken on userspace stack. The root cause lies in the specific AMD CPU
+behaviour which manifests itself as unusable segment attributes on SYSRET.
+The corresponding work around for the kernel is the following:
+
+61f01dd941ba ("x86_64, asm: Work around AMD SYSRET SS descriptor attribute issue")
+
+In other turn virtualization side treated unusable segment incorrectly and
+restored CPL from SS attributes, which were zeroed out few lines above.
+
+In current patch it is assured only that P bit is cleared in VMCB.save state
+and segment attributes are not zeroed out if segment is not presented or is
+unusable, therefore CPL can be safely restored from DPL field.
+
+This is only one part of the fix, since QEMU side should be fixed accordingly
+not to zero out attributes on its side. Corresponding patch will follow.
+
+[1] Message id: CAJrWOzD6Xq==b-zYCDdFLgSRMPM-NkNuTSDFEtX=7MreT45i7Q@mail.gmail.com
+
+Signed-off-by: Roman Pen <roman.penyaev@profitbricks.com>
+Signed-off-by: Mikhail Sennikovskii <mikhail.sennikovskii@profitbricks.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim KrÄmář <rkrcmar@redhat.com>
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm.c | 24 +++++++++++-------------
+ 1 file changed, 11 insertions(+), 13 deletions(-)
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 2d96e30..8551a54 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1876,6 +1876,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
+ */
+ if (var->unusable)
+ var->db = 0;
++ /* This is symmetric with svm_set_segment() */
+ var->dpl = to_svm(vcpu)->vmcb->save.cpl;
+ break;
+ }
+@@ -2021,18 +2022,14 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
+ s->base = var->base;
+ s->limit = var->limit;
+ s->selector = var->selector;
+- if (var->unusable)
+- s->attrib = 0;
+- else {
+- s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
+- s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
+- s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
+- s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
+- s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
+- s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
+- s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
+- s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
+- }
++ s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
++ s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
++ s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
++ s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
++ s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
++ s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
++ s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
++ s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
+
+ /*
+ * This is always accurate, except if SYSRET returned to a segment
+@@ -2041,7 +2038,8 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
+ * would entail passing the CPL to userspace and back.
+ */
+ if (seg == VCPU_SREG_SS)
+- svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
++ /* This is symmetric with svm_get_segment() */
++ svm->vmcb->save.cpl = (var->dpl & 3);
+
+ mark_dirty(svm->vmcb, VMCB_SEG);
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0016-KVM-nVMX-Update-vmcs12-guest_linear_address-on-neste.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0016-KVM-nVMX-Update-vmcs12-guest_linear_address-on-neste.patch
new file mode 100644
index 00000000..cf8424c9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0016-KVM-nVMX-Update-vmcs12-guest_linear_address-on-neste.patch
@@ -0,0 +1,42 @@
+From d79905a595224c714dc8da5df054653c3b958250 Mon Sep 17 00:00:00 2001
+From: Jim Mattson <jmattson@google.com>
+Date: Thu, 1 Jun 2017 12:44:46 -0700
+Subject: [PATCH 16/93] KVM: nVMX: Update vmcs12->guest_linear_address on
+ nested VM-exit
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[ Upstream commit d281e13b0bfe745a21061a194e386a949784393f ]
+
+The guest-linear address field is set for VM exits due to attempts to
+execute LMSW with a memory operand and VM exits due to attempts to
+execute INS or OUTS for which the relevant segment is usable,
+regardless of whether or not EPT is in use.
+
+Fixes: 119a9c01a5922 ("KVM: nVMX: pass valid guest linear-address to the L1")
+Signed-off-by: Jim Mattson <jmattson@google.com>
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 8d842d9..273313f 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -10621,8 +10621,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
+ vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
+ }
+
+- if (nested_cpu_has_ept(vmcs12))
+- vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
++ vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
+
+ if (nested_cpu_has_vid(vmcs12))
+ vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0017-perf-x86-Fix-possible-Spectre-v1-indexing-for-hw_per.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0017-perf-x86-Fix-possible-Spectre-v1-indexing-for-hw_per.patch
new file mode 100644
index 00000000..cb6045b1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0017-perf-x86-Fix-possible-Spectre-v1-indexing-for-hw_per.patch
@@ -0,0 +1,62 @@
+From 1007b2c9e70fe3aaffda12b809da0f3b53642777 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 20 Apr 2018 14:06:29 +0200
+Subject: [PATCH 17/93] perf/x86: Fix possible Spectre-v1 indexing for
+ hw_perf_event cache_*
+
+commit ef9ee4ad38445a30909c48998624861716f2a994 upstream.
+
+> arch/x86/events/core.c:319 set_ext_hw_attr() warn: potential spectre issue 'hw_cache_event_ids[cache_type]' (local cap)
+> arch/x86/events/core.c:319 set_ext_hw_attr() warn: potential spectre issue 'hw_cache_event_ids' (local cap)
+> arch/x86/events/core.c:328 set_ext_hw_attr() warn: potential spectre issue 'hw_cache_extra_regs[cache_type]' (local cap)
+> arch/x86/events/core.c:328 set_ext_hw_attr() warn: potential spectre issue 'hw_cache_extra_regs' (local cap)
+
+Userspace controls @config which contains 3 (byte) fields used for a 3
+dimensional array deref.
+
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: <stable@kernel.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/core.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 38623e2..6b955e3 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -303,17 +303,20 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
+
+ config = attr->config;
+
+- cache_type = (config >> 0) & 0xff;
++ cache_type = (config >> 0) & 0xff;
+ if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+ return -EINVAL;
++ cache_type = array_index_nospec(cache_type, PERF_COUNT_HW_CACHE_MAX);
+
+ cache_op = (config >> 8) & 0xff;
+ if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+ return -EINVAL;
++ cache_op = array_index_nospec(cache_op, PERF_COUNT_HW_CACHE_OP_MAX);
+
+ cache_result = (config >> 16) & 0xff;
+ if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ return -EINVAL;
++ cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX);
+
+ val = hw_cache_event_ids[cache_type][cache_op][cache_result];
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0018-perf-x86-cstate-Fix-possible-Spectre-v1-indexing-for.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0018-perf-x86-cstate-Fix-possible-Spectre-v1-indexing-for.patch
new file mode 100644
index 00000000..40bc2cae
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0018-perf-x86-cstate-Fix-possible-Spectre-v1-indexing-for.patch
@@ -0,0 +1,53 @@
+From 8708c762c727c3c4a8fb6c75fc1d5585f89ece90 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 20 Apr 2018 14:25:48 +0200
+Subject: [PATCH 18/93] perf/x86/cstate: Fix possible Spectre-v1 indexing for
+ pkg_msr
+
+commit a5f81290ce475489fa2551c01a07470c1a4c932e upstream.
+
+> arch/x86/events/intel/cstate.c:307 cstate_pmu_event_init() warn: potential spectre issue 'pkg_msr' (local cap)
+
+Userspace controls @attr, sanitize cfg (attr->config) before using it
+to index an array.
+
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: <stable@kernel.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/intel/cstate.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
+index fec8a46..c6a04c0 100644
+--- a/arch/x86/events/intel/cstate.c
++++ b/arch/x86/events/intel/cstate.c
+@@ -90,6 +90,7 @@
+ #include <linux/module.h>
+ #include <linux/slab.h>
+ #include <linux/perf_event.h>
++#include <linux/nospec.h>
+ #include <asm/cpu_device_id.h>
+ #include <asm/intel-family.h>
+ #include "../perf_event.h"
+@@ -300,6 +301,7 @@ static int cstate_pmu_event_init(struct perf_event *event)
+ } else if (event->pmu == &cstate_pkg_pmu) {
+ if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
+ return -EINVAL;
++ cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX);
+ if (!pkg_msr[cfg].attr)
+ return -EINVAL;
+ event->hw.event_base = pkg_msr[cfg].msr;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0019-perf-x86-msr-Fix-possible-Spectre-v1-indexing-in-the.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0019-perf-x86-msr-Fix-possible-Spectre-v1-indexing-in-the.patch
new file mode 100644
index 00000000..876e4bd9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0019-perf-x86-msr-Fix-possible-Spectre-v1-indexing-in-the.patch
@@ -0,0 +1,65 @@
+From 2c1bc0d092e3885ee643c9d5755957a1297b5245 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 20 Apr 2018 14:23:36 +0200
+Subject: [PATCH 19/93] perf/x86/msr: Fix possible Spectre-v1 indexing in the
+ MSR driver
+
+commit 06ce6e9b6d6c09d4129c6e24a1314a395d816c10 upstream.
+
+> arch/x86/events/msr.c:178 msr_event_init() warn: potential spectre issue 'msr' (local cap)
+
+Userspace controls @attr, sanitize cfg (attr->config) before using it
+to index an array.
+
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: <stable@kernel.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/msr.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
+index 4bb3ec6..be0b196 100644
+--- a/arch/x86/events/msr.c
++++ b/arch/x86/events/msr.c
+@@ -1,4 +1,5 @@
+ #include <linux/perf_event.h>
++#include <linux/nospec.h>
+ #include <asm/intel-family.h>
+
+ enum perf_msr_id {
+@@ -136,9 +137,6 @@ static int msr_event_init(struct perf_event *event)
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+- if (cfg >= PERF_MSR_EVENT_MAX)
+- return -EINVAL;
+-
+ /* unsupported modes and filters */
+ if (event->attr.exclude_user ||
+ event->attr.exclude_kernel ||
+@@ -149,6 +147,11 @@ static int msr_event_init(struct perf_event *event)
+ event->attr.sample_period) /* no sampling */
+ return -EINVAL;
+
++ if (cfg >= PERF_MSR_EVENT_MAX)
++ return -EINVAL;
++
++ cfg = array_index_nospec((unsigned long)cfg, PERF_MSR_EVENT_MAX);
++
+ if (!msr[cfg].attr)
+ return -EINVAL;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0020-perf-x86-Fix-possible-Spectre-v1-indexing-for-x86_pm.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0020-perf-x86-Fix-possible-Spectre-v1-indexing-for-x86_pm.patch
new file mode 100644
index 00000000..c4c48d56
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0020-perf-x86-Fix-possible-Spectre-v1-indexing-for-x86_pm.patch
@@ -0,0 +1,59 @@
+From faf22307f64c353212c5c132f45f5e7414cea4bf Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 20 Apr 2018 14:08:58 +0200
+Subject: [PATCH 20/93] perf/x86: Fix possible Spectre-v1 indexing for
+ x86_pmu::event_map()
+
+commit 46b1b577229a091b137831becaa0fae8690ee15a upstream.
+
+> arch/x86/events/intel/cstate.c:307 cstate_pmu_event_init() warn: potential spectre issue 'pkg_msr' (local cap)
+> arch/x86/events/intel/core.c:337 intel_pmu_event_map() warn: potential spectre issue 'intel_perfmon_event_map'
+> arch/x86/events/intel/knc.c:122 knc_pmu_event_map() warn: potential spectre issue 'knc_perfmon_event_map'
+> arch/x86/events/intel/p4.c:722 p4_pmu_event_map() warn: potential spectre issue 'p4_general_events'
+> arch/x86/events/intel/p6.c:116 p6_pmu_event_map() warn: potential spectre issue 'p6_perfmon_event_map'
+> arch/x86/events/amd/core.c:132 amd_pmu_event_map() warn: potential spectre issue 'amd_perfmon_event_map'
+
+Userspace controls @attr, sanitize @attr->config before passing it on
+to x86_pmu::event_map().
+
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: <stable@kernel.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/core.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 6b955e3..d36ada3 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -26,6 +26,7 @@
+ #include <linux/cpu.h>
+ #include <linux/bitops.h>
+ #include <linux/device.h>
++#include <linux/nospec.h>
+
+ #include <asm/apic.h>
+ #include <asm/stacktrace.h>
+@@ -423,6 +424,8 @@ int x86_setup_perfctr(struct perf_event *event)
+ if (attr->config >= x86_pmu.max_events)
+ return -EINVAL;
+
++ attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events);
++
+ /*
+ * The generic map:
+ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0021-x86-amd-don-t-set-X86_BUG_SYSRET_SS_ATTRS-when-runni.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0021-x86-amd-don-t-set-X86_BUG_SYSRET_SS_ATTRS-when-runni.patch
new file mode 100644
index 00000000..39d81c71
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0021-x86-amd-don-t-set-X86_BUG_SYSRET_SS_ATTRS-when-runni.patch
@@ -0,0 +1,66 @@
+From 5fb8da20577a159d311db9c29e62dbb782529571 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Sun, 20 May 2018 20:51:10 +0100
+Subject: [PATCH 21/93] x86/amd: don't set X86_BUG_SYSRET_SS_ATTRS when running
+ under Xen
+
+commit def9331a12977770cc6132d79f8e6565871e8e38 upstream
+
+When running as Xen pv guest X86_BUG_SYSRET_SS_ATTRS must not be set
+on AMD cpus.
+
+This bug/feature bit is kind of special as it will be used very early
+when switching threads. Setting the bit and clearing it a little bit
+later leaves a critical window where things can go wrong. This time
+window has enlarged a little bit by using setup_clear_cpu_cap() instead
+of the hypervisor's set_cpu_features callback. It seems this larger
+window now makes it rather easy to hit the problem.
+
+The proper solution is to never set the bit in case of Xen.
+
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/amd.c | 5 +++--
+ arch/x86/xen/enlighten.c | 4 +---
+ 2 files changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index c375bc6..747f8a2 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -824,8 +824,9 @@ static void init_amd(struct cpuinfo_x86 *c)
+ if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
+ set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
+
+- /* AMD CPUs don't reset SS attributes on SYSRET */
+- set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
++ /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
++ if (!cpu_has(c, X86_FEATURE_XENPV))
++ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+ }
+
+ #ifdef CONFIG_X86_32
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 5226379..8b97c87 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -1968,10 +1968,8 @@ EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
+
+ static void xen_set_cpu_features(struct cpuinfo_x86 *c)
+ {
+- if (xen_pv_domain()) {
+- clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
++ if (xen_pv_domain())
+ set_cpu_cap(c, X86_FEATURE_XENPV);
+- }
+ }
+
+ static void xen_pin_vcpu(int cpu)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0022-x86-nospec-Simplify-alternative_msr_write.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0022-x86-nospec-Simplify-alternative_msr_write.patch
new file mode 100644
index 00000000..aef6dcc5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0022-x86-nospec-Simplify-alternative_msr_write.patch
@@ -0,0 +1,71 @@
+From 0ba8203bd88d5640bd6b062b09d3514d5787161d Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Tue, 1 May 2018 15:55:51 +0200
+Subject: [PATCH 22/93] x86/nospec: Simplify alternative_msr_write()
+
+commit 1aa7a5735a41418d8e01fa7c9565eb2657e2ea3f upstream
+
+The macro is not type safe and I did look for why that "g" constraint for
+the asm doesn't work: it's because the asm is more fundamentally wrong.
+
+It does
+
+ movl %[val], %%eax
+
+but "val" isn't a 32-bit value, so then gcc will pass it in a register,
+and generate code like
+
+ movl %rsi, %eax
+
+and gas will complain about a nonsensical 'mov' instruction (it's moving a
+64-bit register to a 32-bit one).
+
+Passing it through memory will just hide the real bug - gcc still thinks
+the memory location is 64-bit, but the "movl" will only load the first 32
+bits and it all happens to work because x86 is little-endian.
+
+Convert it to a type safe inline function with a little trick which hands
+the feature into the ALTERNATIVE macro.
+
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 19 ++++++++++---------
+ 1 file changed, 10 insertions(+), 9 deletions(-)
+
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index f928ad9..870acfc 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -241,15 +241,16 @@ static inline void vmexit_fill_RSB(void)
+ #endif
+ }
+
+-#define alternative_msr_write(_msr, _val, _feature) \
+- asm volatile(ALTERNATIVE("", \
+- "movl %[msr], %%ecx\n\t" \
+- "movl %[val], %%eax\n\t" \
+- "movl $0, %%edx\n\t" \
+- "wrmsr", \
+- _feature) \
+- : : [msr] "i" (_msr), [val] "i" (_val) \
+- : "eax", "ecx", "edx", "memory")
++static __always_inline
++void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
++{
++ asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
++ : : "c" (msr),
++ "a" (val),
++ "d" (val >> 32),
++ [feature] "i" (feature)
++ : "memory");
++}
+
+ static inline void indirect_branch_prediction_barrier(void)
+ {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0023-x86-bugs-Concentrate-bug-detection-into-a-separate-f.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0023-x86-bugs-Concentrate-bug-detection-into-a-separate-f.patch
new file mode 100644
index 00000000..6d18a50c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0023-x86-bugs-Concentrate-bug-detection-into-a-separate-f.patch
@@ -0,0 +1,75 @@
+From c3a018c5b5ae383b51700cd636995916fc8c1f61 Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:16 -0400
+Subject: [PATCH 23/93] x86/bugs: Concentrate bug detection into a separate
+ function
+
+commit 4a28bfe3267b68e22c663ac26185aa16c9b879ef upstream
+
+Combine the various logic which goes through all those
+x86_cpu_id matching structures in one function.
+
+Suggested-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/common.c | 21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 301bbd1..357c589 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -879,21 +879,27 @@ static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
+ {}
+ };
+
+-static bool __init cpu_vulnerable_to_meltdown(struct cpuinfo_x86 *c)
++static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ {
+ u64 ia32_cap = 0;
+
++ if (x86_match_cpu(cpu_no_speculation))
++ return;
++
++ setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
++ setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
++
+ if (x86_match_cpu(cpu_no_meltdown))
+- return false;
++ return;
+
+ if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+ /* Rogue Data Cache Load? No! */
+ if (ia32_cap & ARCH_CAP_RDCL_NO)
+- return false;
++ return;
+
+- return true;
++ setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+ }
+
+ /*
+@@ -942,12 +948,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
+
+ setup_force_cpu_cap(X86_FEATURE_ALWAYS);
+
+- if (!x86_match_cpu(cpu_no_speculation)) {
+- if (cpu_vulnerable_to_meltdown(c))
+- setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+- setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+- setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+- }
++ cpu_set_bug_bits(c);
+
+ fpu__init_system(c);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0024-x86-bugs-Concentrate-bug-reporting-into-a-separate-f.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0024-x86-bugs-Concentrate-bug-reporting-into-a-separate-f.patch
new file mode 100644
index 00000000..b86011cc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0024-x86-bugs-Concentrate-bug-reporting-into-a-separate-f.patch
@@ -0,0 +1,92 @@
+From 2ea1e87e0557d4994d239cf75a12cd624d3c7ef9 Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:17 -0400
+Subject: [PATCH 24/93] x86/bugs: Concentrate bug reporting into a separate
+ function
+
+commit d1059518b4789cabe34bb4b714d07e6089c82ca1 upstream
+
+Those SysFS functions have a similar preamble, as such make common
+code to handle them.
+
+Suggested-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 46 ++++++++++++++++++++++++++++++++--------------
+ 1 file changed, 32 insertions(+), 14 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index b8b0b6e..4d9c5fe 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -313,30 +313,48 @@ static void __init spectre_v2_select_mitigation(void)
+ #undef pr_fmt
+
+ #ifdef CONFIG_SYSFS
+-ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
++
++ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
++ char *buf, unsigned int bug)
+ {
+- if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
++ if (!boot_cpu_has_bug(bug))
+ return sprintf(buf, "Not affected\n");
+- if (boot_cpu_has(X86_FEATURE_KAISER))
+- return sprintf(buf, "Mitigation: PTI\n");
++
++ switch (bug) {
++ case X86_BUG_CPU_MELTDOWN:
++ if (boot_cpu_has(X86_FEATURE_KAISER))
++ return sprintf(buf, "Mitigation: PTI\n");
++
++ break;
++
++ case X86_BUG_SPECTRE_V1:
++ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
++
++ case X86_BUG_SPECTRE_V2:
++ return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
++ boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
++ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
++ spectre_v2_module_string());
++
++ default:
++ break;
++ }
++
+ return sprintf(buf, "Vulnerable\n");
+ }
+
++ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
++}
++
+ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+- if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
+- return sprintf(buf, "Not affected\n");
+- return sprintf(buf, "Mitigation: __user pointer sanitization\n");
++ return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
+ }
+
+ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+- if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+- return sprintf(buf, "Not affected\n");
+-
+- return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+- boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
+- boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+- spectre_v2_module_string());
++ return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
+ }
+ #endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0025-x86-bugs-Read-SPEC_CTRL-MSR-during-boot-and-re-use-r.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0025-x86-bugs-Read-SPEC_CTRL-MSR-during-boot-and-re-use-r.patch
new file mode 100644
index 00000000..da25f2fe
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0025-x86-bugs-Read-SPEC_CTRL-MSR-during-boot-and-re-use-r.patch
@@ -0,0 +1,143 @@
+From f35005b1a8b68f66c980652ef5299cb422eb9123 Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:18 -0400
+Subject: [PATCH 25/93] x86/bugs: Read SPEC_CTRL MSR during boot and re-use
+ reserved bits
+
+commit 1b86883ccb8d5d9506529d42dbe1a5257cb30b18 upstream
+
+The 336996-Speculative-Execution-Side-Channel-Mitigations.pdf refers to all
+the other bits as reserved. The Intel SDM glossary defines reserved as
+implementation specific - aka unknown.
+
+As such at bootup this must be taken it into account and proper masking for
+the bits in use applied.
+
+A copy of this document is available at
+https://bugzilla.kernel.org/show_bug.cgi?id=199511
+
+[ tglx: Made x86_spec_ctrl_base __ro_after_init ]
+
+Suggested-by: Jon Masters <jcm@redhat.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 24 ++++++++++++++++++++----
+ arch/x86/kernel/cpu/bugs.c | 28 ++++++++++++++++++++++++++++
+ 2 files changed, 48 insertions(+), 4 deletions(-)
+
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index 870acfc..9ec3d4d 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -217,6 +217,17 @@ enum spectre_v2_mitigation {
+ SPECTRE_V2_IBRS,
+ };
+
++/*
++ * The Intel specification for the SPEC_CTRL MSR requires that we
++ * preserve any already set reserved bits at boot time (e.g. for
++ * future additions that this kernel is not currently aware of).
++ * We then set any additional mitigation bits that we want
++ * ourselves and always use this as the base for SPEC_CTRL.
++ * We also use this when handling guest entry/exit as below.
++ */
++extern void x86_spec_ctrl_set(u64);
++extern u64 x86_spec_ctrl_get_default(void);
++
+ extern char __indirect_thunk_start[];
+ extern char __indirect_thunk_end[];
+
+@@ -254,8 +265,9 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
+
+ static inline void indirect_branch_prediction_barrier(void)
+ {
+- alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
+- X86_FEATURE_USE_IBPB);
++ u64 val = PRED_CMD_IBPB;
++
++ alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
+ }
+
+ /*
+@@ -266,14 +278,18 @@ static inline void indirect_branch_prediction_barrier(void)
+ */
+ #define firmware_restrict_branch_speculation_start() \
+ do { \
++ u64 val = x86_spec_ctrl_get_default() | SPEC_CTRL_IBRS; \
++ \
+ preempt_disable(); \
+- alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, \
++ alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
+ X86_FEATURE_USE_IBRS_FW); \
+ } while (0)
+
+ #define firmware_restrict_branch_speculation_end() \
+ do { \
+- alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, \
++ u64 val = x86_spec_ctrl_get_default(); \
++ \
++ alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
+ X86_FEATURE_USE_IBRS_FW); \
+ preempt_enable(); \
+ } while (0)
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 4d9c5fe..6ff972a 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -27,6 +27,12 @@
+
+ static void __init spectre_v2_select_mitigation(void);
+
++/*
++ * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
++ * writes to SPEC_CTRL contain whatever reserved bits have been set.
++ */
++static u64 __ro_after_init x86_spec_ctrl_base;
++
+ void __init check_bugs(void)
+ {
+ identify_boot_cpu();
+@@ -36,6 +42,13 @@ void __init check_bugs(void)
+ print_cpu_info(&boot_cpu_data);
+ }
+
++ /*
++ * Read the SPEC_CTRL MSR to account for reserved bits which may
++ * have unknown values.
++ */
++ if (boot_cpu_has(X86_FEATURE_IBRS))
++ rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
++
+ /* Select the proper spectre mitigation before patching alternatives */
+ spectre_v2_select_mitigation();
+
+@@ -94,6 +107,21 @@ static const char *spectre_v2_strings[] = {
+
+ static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
+
++void x86_spec_ctrl_set(u64 val)
++{
++ if (val & ~SPEC_CTRL_IBRS)
++ WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
++ else
++ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
++}
++EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
++
++u64 x86_spec_ctrl_get_default(void)
++{
++ return x86_spec_ctrl_base;
++}
++EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
++
+ #ifdef RETPOLINE
+ static bool spectre_v2_bad_module;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0026-x86-bugs-KVM-Support-the-combination-of-guest-and-ho.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0026-x86-bugs-KVM-Support-the-combination-of-guest-and-ho.patch
new file mode 100644
index 00000000..d0e8ddcb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0026-x86-bugs-KVM-Support-the-combination-of-guest-and-ho.patch
@@ -0,0 +1,137 @@
+From d9dc73cbf12047f0d0e171366bfb962b3a592e6f Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:19 -0400
+Subject: [PATCH 26/93] x86/bugs, KVM: Support the combination of guest and
+ host IBRS
+
+commit 5cf687548705412da47c9cec342fd952d71ed3d5 upstream
+
+A guest may modify the SPEC_CTRL MSR from the value used by the
+kernel. Since the kernel doesn't use IBRS, this means a value of zero is
+what is needed in the host.
+
+But the 336996-Speculative-Execution-Side-Channel-Mitigations.pdf refers to
+the other bits as reserved so the kernel should respect the boot time
+SPEC_CTRL value and use that.
+
+This allows to deal with future extensions to the SPEC_CTRL interface if
+any at all.
+
+Note: This uses wrmsrl() instead of native_wrmsl(). I does not make any
+difference as paravirt will over-write the callq *0xfff.. with the wrmsrl
+assembler code.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 10 ++++++++++
+ arch/x86/kernel/cpu/bugs.c | 18 ++++++++++++++++++
+ arch/x86/kvm/svm.c | 6 ++----
+ arch/x86/kvm/vmx.c | 6 ++----
+ 4 files changed, 32 insertions(+), 8 deletions(-)
+
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index 9ec3d4d..d1c2630 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -228,6 +228,16 @@ enum spectre_v2_mitigation {
+ extern void x86_spec_ctrl_set(u64);
+ extern u64 x86_spec_ctrl_get_default(void);
+
++/*
++ * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
++ * the guest has, while on VMEXIT we restore the host view. This
++ * would be easier if SPEC_CTRL were architecturally maskable or
++ * shadowable for guests but this is not (currently) the case.
++ * Takes the guest view of SPEC_CTRL MSR as a parameter.
++ */
++extern void x86_spec_ctrl_set_guest(u64);
++extern void x86_spec_ctrl_restore_host(u64);
++
+ extern char __indirect_thunk_start[];
+ extern char __indirect_thunk_end[];
+
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 6ff972a..f5cad2f 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -122,6 +122,24 @@ u64 x86_spec_ctrl_get_default(void)
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
+
++void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
++{
++ if (!boot_cpu_has(X86_FEATURE_IBRS))
++ return;
++ if (x86_spec_ctrl_base != guest_spec_ctrl)
++ wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
++}
++EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
++
++void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
++{
++ if (!boot_cpu_has(X86_FEATURE_IBRS))
++ return;
++ if (x86_spec_ctrl_base != guest_spec_ctrl)
++ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
++}
++EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
++
+ #ifdef RETPOLINE
+ static bool spectre_v2_bad_module;
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 8551a54..a07579f 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -4905,8 +4905,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
+ * is no need to worry about the conditional branch over the wrmsr
+ * being speculatively taken.
+ */
+- if (svm->spec_ctrl)
+- native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
++ x86_spec_ctrl_set_guest(svm->spec_ctrl);
+
+ asm volatile (
+ "push %%" _ASM_BP "; \n\t"
+@@ -5018,8 +5017,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
+ if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
+ svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
+
+- if (svm->spec_ctrl)
+- native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
++ x86_spec_ctrl_restore_host(svm->spec_ctrl);
+
+ /* Eliminate branch target predictions from guest mode */
+ vmexit_fill_RSB();
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 273313f..c386d13 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8898,8 +8898,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ * is no need to worry about the conditional branch over the wrmsr
+ * being speculatively taken.
+ */
+- if (vmx->spec_ctrl)
+- native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
++ x86_spec_ctrl_set_guest(vmx->spec_ctrl);
+
+ vmx->__launched = vmx->loaded_vmcs->launched;
+ asm(
+@@ -9037,8 +9036,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
+ vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
+
+- if (vmx->spec_ctrl)
+- native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
++ x86_spec_ctrl_restore_host(vmx->spec_ctrl);
+
+ /* Eliminate branch target predictions from guest mode */
+ vmexit_fill_RSB();
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0027-x86-bugs-Expose-sys-.-spec_store_bypass.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0027-x86-bugs-Expose-sys-.-spec_store_bypass.patch
new file mode 100644
index 00000000..c058dd8f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0027-x86-bugs-Expose-sys-.-spec_store_bypass.patch
@@ -0,0 +1,148 @@
+From a24af5ff013ee664d221b6b4d4933f8317f4facb Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:20 -0400
+Subject: [PATCH 27/93] x86/bugs: Expose /sys/../spec_store_bypass
+
+commit c456442cd3a59eeb1d60293c26cbe2ff2c4e42cf upstream
+
+Add the sysfs file for the new vulerability. It does not do much except
+show the words 'Vulnerable' for recent x86 cores.
+
+Intel cores prior to family 6 are known not to be vulnerable, and so are
+some Atoms and some Xeon Phi.
+
+It assumes that older Cyrix, Centaur, etc. cores are immune.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/ABI/testing/sysfs-devices-system-cpu | 1 +
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ arch/x86/kernel/cpu/bugs.c | 5 +++++
+ arch/x86/kernel/cpu/common.c | 23 ++++++++++++++++++++++
+ drivers/base/cpu.c | 8 ++++++++
+ include/linux/cpu.h | 2 ++
+ 6 files changed, 40 insertions(+)
+
+diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
+index dfd56ec..6d75a9c 100644
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -355,6 +355,7 @@ What: /sys/devices/system/cpu/vulnerabilities
+ /sys/devices/system/cpu/vulnerabilities/meltdown
+ /sys/devices/system/cpu/vulnerabilities/spectre_v1
+ /sys/devices/system/cpu/vulnerabilities/spectre_v2
++ /sys/devices/system/cpu/vulnerabilities/spec_store_bypass
+ Date: January 2018
+ Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+ Description: Information about CPU vulnerabilities
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index a248531..a688adb 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -335,5 +335,6 @@
+ #define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
+ #define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
+ #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
++#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
+
+ #endif /* _ASM_X86_CPUFEATURES_H */
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index f5cad2f..64e17a9 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -403,4 +403,9 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
+ {
+ return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
+ }
++
++ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
++}
+ #endif
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 357c589..4f1050a 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -879,10 +879,33 @@ static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
+ {}
+ };
+
++static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
++ { X86_VENDOR_CENTAUR, 5, },
++ { X86_VENDOR_INTEL, 5, },
++ { X86_VENDOR_NSC, 5, },
++ { X86_VENDOR_ANY, 4, },
++ {}
++};
++
+ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ {
+ u64 ia32_cap = 0;
+
++ if (!x86_match_cpu(cpu_no_spec_store_bypass))
++ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
++
+ if (x86_match_cpu(cpu_no_speculation))
+ return;
+
+diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
+index 56b6c85..cbb1cc6 100644
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -519,14 +519,22 @@ ssize_t __weak cpu_show_spectre_v2(struct device *dev,
+ return sprintf(buf, "Not affected\n");
+ }
+
++ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return sprintf(buf, "Not affected\n");
++}
++
+ static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+ static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+ static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
++static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
+
+ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ &dev_attr_meltdown.attr,
+ &dev_attr_spectre_v1.attr,
+ &dev_attr_spectre_v2.attr,
++ &dev_attr_spec_store_bypass.attr,
+ NULL
+ };
+
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index 2f475ad..917829b 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -50,6 +50,8 @@ extern ssize_t cpu_show_spectre_v1(struct device *dev,
+ struct device_attribute *attr, char *buf);
+ extern ssize_t cpu_show_spectre_v2(struct device *dev,
+ struct device_attribute *attr, char *buf);
++extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
++ struct device_attribute *attr, char *buf);
+
+ extern __printf(4, 5)
+ struct device *cpu_device_create(struct device *parent, void *drvdata,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0028-x86-cpufeatures-Add-X86_FEATURE_RDS.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0028-x86-cpufeatures-Add-X86_FEATURE_RDS.patch
new file mode 100644
index 00000000..19e234c1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0028-x86-cpufeatures-Add-X86_FEATURE_RDS.patch
@@ -0,0 +1,36 @@
+From 516277f549be576a1146ab20f22ab17393a2c53c Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Sat, 28 Apr 2018 22:34:17 +0200
+Subject: [PATCH 28/93] x86/cpufeatures: Add X86_FEATURE_RDS
+
+commit 0cc5fa00b0a88dad140b4e5c2cead9951ad36822 upstream
+
+Add the CPU feature bit CPUID.7.0.EDX[31] which indicates whether the CPU
+supports Reduced Data Speculation.
+
+[ tglx: Split it out from a later patch ]
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index a688adb..0c05c6c 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -306,6 +306,7 @@
+ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
+ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
++#define X86_FEATURE_RDS (18*32+31) /* Reduced Data Speculation */
+
+ /*
+ * BUG word(s)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0029-x86-bugs-Provide-boot-parameters-for-the-spec_store_.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0029-x86-bugs-Provide-boot-parameters-for-the-spec_store_.patch
new file mode 100644
index 00000000..15084ab2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0029-x86-bugs-Provide-boot-parameters-for-the-spec_store_.patch
@@ -0,0 +1,272 @@
+From b3c238b8a317093dd74e635d553271f2c56cb8c3 Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:21 -0400
+Subject: [PATCH 29/93] x86/bugs: Provide boot parameters for the
+ spec_store_bypass_disable mitigation
+
+commit 24f7fc83b9204d20f878c57cb77d261ae825e033 upstream
+
+Contemporary high performance processors use a common industry-wide
+optimization known as "Speculative Store Bypass" in which loads from
+addresses to which a recent store has occurred may (speculatively) see an
+older value. Intel refers to this feature as "Memory Disambiguation" which
+is part of their "Smart Memory Access" capability.
+
+Memory Disambiguation can expose a cache side-channel attack against such
+speculatively read values. An attacker can create exploit code that allows
+them to read memory outside of a sandbox environment (for example,
+malicious JavaScript in a web page), or to perform more complex attacks
+against code running within the same privilege level, e.g. via the stack.
+
+As a first step to mitigate against such attacks, provide two boot command
+line control knobs:
+
+ nospec_store_bypass_disable
+ spec_store_bypass_disable=[off,auto,on]
+
+By default affected x86 processors will power on with Speculative
+Store Bypass enabled. Hence the provided kernel parameters are written
+from the point of view of whether to enable a mitigation or not.
+The parameters are as follows:
+
+ - auto - Kernel detects whether your CPU model contains an implementation
+ of Speculative Store Bypass and picks the most appropriate
+ mitigation.
+
+ - on - disable Speculative Store Bypass
+ - off - enable Speculative Store Bypass
+
+[ tglx: Reordered the checks so that the whole evaluation is not done
+ when the CPU does not support RDS ]
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/kernel-parameters.txt | 33 +++++++++++
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ arch/x86/include/asm/nospec-branch.h | 6 ++
+ arch/x86/kernel/cpu/bugs.c | 103 +++++++++++++++++++++++++++++++++++
+ 4 files changed, 143 insertions(+)
+
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 4b438e4..348ca9d 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -2686,6 +2686,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ allow data leaks with this option, which is equivalent
+ to spectre_v2=off.
+
++ nospec_store_bypass_disable
++ [HW] Disable all mitigations for the Speculative Store Bypass vulnerability
++
+ noxsave [BUGS=X86] Disables x86 extended register state save
+ and restore using xsave. The kernel will fallback to
+ enabling legacy floating-point and sse state.
+@@ -3962,6 +3965,36 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ Not specifying this option is equivalent to
+ spectre_v2=auto.
+
++ spec_store_bypass_disable=
++ [HW] Control Speculative Store Bypass (SSB) Disable mitigation
++ (Speculative Store Bypass vulnerability)
++
++ Certain CPUs are vulnerable to an exploit against a
++ a common industry wide performance optimization known
++ as "Speculative Store Bypass" in which recent stores
++ to the same memory location may not be observed by
++ later loads during speculative execution. The idea
++ is that such stores are unlikely and that they can
++ be detected prior to instruction retirement at the
++ end of a particular speculation execution window.
++
++ In vulnerable processors, the speculatively forwarded
++ store can be used in a cache side channel attack, for
++ example to read memory to which the attacker does not
++ directly have access (e.g. inside sandboxed code).
++
++ This parameter controls whether the Speculative Store
++ Bypass optimization is used.
++
++ on - Unconditionally disable Speculative Store Bypass
++ off - Unconditionally enable Speculative Store Bypass
++ auto - Kernel detects whether the CPU model contains an
++ implementation of Speculative Store Bypass and
++ picks the most appropriate mitigation
++
++ Not specifying this option is equivalent to
++ spec_store_bypass_disable=auto.
++
+ spia_io_base= [HW,MTD]
+ spia_fio_base=
+ spia_pedr=
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 0c05c6c..013f3de 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -204,6 +204,7 @@
+
+ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
++#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
+
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index d1c2630..7b9eacf 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -238,6 +238,12 @@ extern u64 x86_spec_ctrl_get_default(void);
+ extern void x86_spec_ctrl_set_guest(u64);
+ extern void x86_spec_ctrl_restore_host(u64);
+
++/* The Speculative Store Bypass disable variants */
++enum ssb_mitigation {
++ SPEC_STORE_BYPASS_NONE,
++ SPEC_STORE_BYPASS_DISABLE,
++};
++
+ extern char __indirect_thunk_start[];
+ extern char __indirect_thunk_end[];
+
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 64e17a9..75146d9 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -26,6 +26,7 @@
+ #include <asm/intel-family.h>
+
+ static void __init spectre_v2_select_mitigation(void);
++static void __init ssb_select_mitigation(void);
+
+ /*
+ * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
+@@ -52,6 +53,12 @@ void __init check_bugs(void)
+ /* Select the proper spectre mitigation before patching alternatives */
+ spectre_v2_select_mitigation();
+
++ /*
++ * Select proper mitigation for any exposure to the Speculative Store
++ * Bypass vulnerability.
++ */
++ ssb_select_mitigation();
++
+ #ifdef CONFIG_X86_32
+ /*
+ * Check whether we are able to run this kernel safely on SMP.
+@@ -357,6 +364,99 @@ static void __init spectre_v2_select_mitigation(void)
+ }
+
+ #undef pr_fmt
++#define pr_fmt(fmt) "Speculative Store Bypass: " fmt
++
++static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
++
++/* The kernel command line selection */
++enum ssb_mitigation_cmd {
++ SPEC_STORE_BYPASS_CMD_NONE,
++ SPEC_STORE_BYPASS_CMD_AUTO,
++ SPEC_STORE_BYPASS_CMD_ON,
++};
++
++static const char *ssb_strings[] = {
++ [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
++ [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled"
++};
++
++static const struct {
++ const char *option;
++ enum ssb_mitigation_cmd cmd;
++} ssb_mitigation_options[] = {
++ { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
++ { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
++ { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
++};
++
++static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
++{
++ enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
++ char arg[20];
++ int ret, i;
++
++ if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
++ return SPEC_STORE_BYPASS_CMD_NONE;
++ } else {
++ ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
++ arg, sizeof(arg));
++ if (ret < 0)
++ return SPEC_STORE_BYPASS_CMD_AUTO;
++
++ for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
++ if (!match_option(arg, ret, ssb_mitigation_options[i].option))
++ continue;
++
++ cmd = ssb_mitigation_options[i].cmd;
++ break;
++ }
++
++ if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
++ pr_err("unknown option (%s). Switching to AUTO select\n", arg);
++ return SPEC_STORE_BYPASS_CMD_AUTO;
++ }
++ }
++
++ return cmd;
++}
++
++static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
++{
++ enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
++ enum ssb_mitigation_cmd cmd;
++
++ if (!boot_cpu_has(X86_FEATURE_RDS))
++ return mode;
++
++ cmd = ssb_parse_cmdline();
++ if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
++ (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
++ cmd == SPEC_STORE_BYPASS_CMD_AUTO))
++ return mode;
++
++ switch (cmd) {
++ case SPEC_STORE_BYPASS_CMD_AUTO:
++ case SPEC_STORE_BYPASS_CMD_ON:
++ mode = SPEC_STORE_BYPASS_DISABLE;
++ break;
++ case SPEC_STORE_BYPASS_CMD_NONE:
++ break;
++ }
++
++ if (mode != SPEC_STORE_BYPASS_NONE)
++ setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
++ return mode;
++}
++
++static void ssb_select_mitigation()
++{
++ ssb_mode = __ssb_select_mitigation();
++
++ if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
++ pr_info("%s\n", ssb_strings[ssb_mode]);
++}
++
++#undef pr_fmt
+
+ #ifdef CONFIG_SYSFS
+
+@@ -382,6 +482,9 @@ ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+ spectre_v2_module_string());
+
++ case X86_BUG_SPEC_STORE_BYPASS:
++ return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
++
+ default:
+ break;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0030-x86-bugs-intel-Set-proper-CPU-features-and-setup-RDS.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0030-x86-bugs-intel-Set-proper-CPU-features-and-setup-RDS.patch
new file mode 100644
index 00000000..d4c39c90
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0030-x86-bugs-intel-Set-proper-CPU-features-and-setup-RDS.patch
@@ -0,0 +1,183 @@
+From 58645a84abdc201b048cc16d3e1e500884ca452b Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:22 -0400
+Subject: [PATCH 30/93] x86/bugs/intel: Set proper CPU features and setup RDS
+
+commit 772439717dbf703b39990be58d8d4e3e4ad0598a upstream
+
+Intel CPUs expose methods to:
+
+ - Detect whether RDS capability is available via CPUID.7.0.EDX[31],
+
+ - The SPEC_CTRL MSR(0x48), bit 2 set to enable RDS.
+
+ - MSR_IA32_ARCH_CAPABILITIES, Bit(4) no need to enable RRS.
+
+With that in mind if spec_store_bypass_disable=[auto,on] is selected set at
+boot-time the SPEC_CTRL MSR to enable RDS if the platform requires it.
+
+Note that this does not fix the KVM case where the SPEC_CTRL is exposed to
+guests which can muck with it, see patch titled :
+ KVM/SVM/VMX/x86/spectre_v2: Support the combination of guest and host IBRS.
+
+And for the firmware (IBRS to be set), see patch titled:
+ x86/spectre_v2: Read SPEC_CTRL MSR during boot and re-use reserved bits
+
+[ tglx: Distangled it from the intel implementation and kept the call order ]
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/msr-index.h | 6 ++++++
+ arch/x86/kernel/cpu/bugs.c | 30 ++++++++++++++++++++++++++++--
+ arch/x86/kernel/cpu/common.c | 10 ++++++----
+ arch/x86/kernel/cpu/cpu.h | 3 +++
+ arch/x86/kernel/cpu/intel.c | 1 +
+ 5 files changed, 44 insertions(+), 6 deletions(-)
+
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 0e4da8e..9f014c1 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -40,6 +40,7 @@
+ #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
+ #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
+ #define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
++#define SPEC_CTRL_RDS (1 << 2) /* Reduced Data Speculation */
+
+ #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
+ #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
+@@ -61,6 +62,11 @@
+ #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
+ #define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
+ #define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
++#define ARCH_CAP_RDS_NO (1 << 4) /*
++ * Not susceptible to Speculative Store Bypass
++ * attack, so no Reduced Data Speculation control
++ * required.
++ */
+
+ #define MSR_IA32_BBL_CR_CTL 0x00000119
+ #define MSR_IA32_BBL_CR_CTL3 0x0000011e
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 75146d9..7dd16f4 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -116,7 +116,7 @@ static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
+
+ void x86_spec_ctrl_set(u64 val)
+ {
+- if (val & ~SPEC_CTRL_IBRS)
++ if (val & ~(SPEC_CTRL_IBRS | SPEC_CTRL_RDS))
+ WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
+ else
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
+@@ -443,8 +443,28 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
+ break;
+ }
+
+- if (mode != SPEC_STORE_BYPASS_NONE)
++ /*
++ * We have three CPU feature flags that are in play here:
++ * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
++ * - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass
++ * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
++ */
++ if (mode != SPEC_STORE_BYPASS_NONE) {
+ setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
++ /*
++ * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
++ * a completely different MSR and bit dependent on family.
++ */
++ switch (boot_cpu_data.x86_vendor) {
++ case X86_VENDOR_INTEL:
++ x86_spec_ctrl_base |= SPEC_CTRL_RDS;
++ x86_spec_ctrl_set(SPEC_CTRL_RDS);
++ break;
++ case X86_VENDOR_AMD:
++ break;
++ }
++ }
++
+ return mode;
+ }
+
+@@ -458,6 +478,12 @@ static void ssb_select_mitigation()
+
+ #undef pr_fmt
+
++void x86_spec_ctrl_setup_ap(void)
++{
++ if (boot_cpu_has(X86_FEATURE_IBRS))
++ x86_spec_ctrl_set(x86_spec_ctrl_base & (SPEC_CTRL_IBRS | SPEC_CTRL_RDS));
++}
++
+ #ifdef CONFIG_SYSFS
+
+ ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 4f1050a..ab6b3ad 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -903,7 +903,11 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ {
+ u64 ia32_cap = 0;
+
+- if (!x86_match_cpu(cpu_no_spec_store_bypass))
++ if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
++ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
++
++ if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
++ !(ia32_cap & ARCH_CAP_RDS_NO))
+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+ if (x86_match_cpu(cpu_no_speculation))
+@@ -915,9 +919,6 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ if (x86_match_cpu(cpu_no_meltdown))
+ return;
+
+- if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
+- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+-
+ /* Rogue Data Cache Load? No! */
+ if (ia32_cap & ARCH_CAP_RDCL_NO)
+ return;
+@@ -1339,6 +1340,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
+ #endif
+ mtrr_ap_init();
+ validate_apic_and_package_id(c);
++ x86_spec_ctrl_setup_ap();
+ }
+
+ struct msr_range {
+diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
+index 2584265..3b19d82 100644
+--- a/arch/x86/kernel/cpu/cpu.h
++++ b/arch/x86/kernel/cpu/cpu.h
+@@ -46,4 +46,7 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
+
+ extern void get_cpu_cap(struct cpuinfo_x86 *c);
+ extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
++
++extern void x86_spec_ctrl_setup_ap(void);
++
+ #endif /* ARCH_X86_CPU_H */
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index 8fb1d65..f15aea6 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -154,6 +154,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
+ setup_clear_cpu_cap(X86_FEATURE_STIBP);
+ setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
+ setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
++ setup_clear_cpu_cap(X86_FEATURE_RDS);
+ }
+
+ /*
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0031-x86-bugs-Whitelist-allowed-SPEC_CTRL-MSR-values.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0031-x86-bugs-Whitelist-allowed-SPEC_CTRL-MSR-values.patch
new file mode 100644
index 00000000..ff00b421
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0031-x86-bugs-Whitelist-allowed-SPEC_CTRL-MSR-values.patch
@@ -0,0 +1,70 @@
+From 9b78406df0ca3d21903d71f41b64a793dad76cfc Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:23 -0400
+Subject: [PATCH 31/93] x86/bugs: Whitelist allowed SPEC_CTRL MSR values
+
+commit 1115a859f33276fe8afb31c60cf9d8e657872558 upstream
+
+Intel and AMD SPEC_CTRL (0x48) MSR semantics may differ in the
+future (or in fact use different MSRs for the same functionality).
+
+As such a run-time mechanism is required to whitelist the appropriate MSR
+values.
+
+[ tglx: Made the variable __ro_after_init ]
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 7dd16f4..b92c469 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -34,6 +34,12 @@ static void __init ssb_select_mitigation(void);
+ */
+ static u64 __ro_after_init x86_spec_ctrl_base;
+
++/*
++ * The vendor and possibly platform specific bits which can be modified in
++ * x86_spec_ctrl_base.
++ */
++static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
++
+ void __init check_bugs(void)
+ {
+ identify_boot_cpu();
+@@ -116,7 +122,7 @@ static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
+
+ void x86_spec_ctrl_set(u64 val)
+ {
+- if (val & ~(SPEC_CTRL_IBRS | SPEC_CTRL_RDS))
++ if (val & x86_spec_ctrl_mask)
+ WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
+ else
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
+@@ -458,6 +464,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_INTEL:
+ x86_spec_ctrl_base |= SPEC_CTRL_RDS;
++ x86_spec_ctrl_mask &= ~SPEC_CTRL_RDS;
+ x86_spec_ctrl_set(SPEC_CTRL_RDS);
+ break;
+ case X86_VENDOR_AMD:
+@@ -481,7 +488,7 @@ static void ssb_select_mitigation()
+ void x86_spec_ctrl_setup_ap(void)
+ {
+ if (boot_cpu_has(X86_FEATURE_IBRS))
+- x86_spec_ctrl_set(x86_spec_ctrl_base & (SPEC_CTRL_IBRS | SPEC_CTRL_RDS));
++ x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
+ }
+
+ #ifdef CONFIG_SYSFS
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0032-x86-bugs-AMD-Add-support-to-disable-RDS-on-Fam-15-16.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0032-x86-bugs-AMD-Add-support-to-disable-RDS-on-Fam-15-16.patch
new file mode 100644
index 00000000..a79d655d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0032-x86-bugs-AMD-Add-support-to-disable-RDS-on-Fam-15-16.patch
@@ -0,0 +1,200 @@
+From 5066a8fdb740b1c31a315ea7da3a58c8208b15eb Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Sun, 20 May 2018 20:52:05 +0100
+Subject: [PATCH 32/93] x86/bugs/AMD: Add support to disable RDS on
+ Fam[15,16,17]h if requested
+
+commit 764f3c21588a059cd783c6ba0734d4db2d72822d upstream
+
+AMD does not need the Speculative Store Bypass mitigation to be enabled.
+
+The parameters for this are already available and can be done via MSR
+C001_1020. Each family uses a different bit in that MSR for this.
+
+[ tglx: Expose the bit mask via a variable and move the actual MSR fiddling
+ into the bugs code as that's the right thing to do and also required
+ to prepare for dynamic enable/disable ]
+
+Suggested-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ arch/x86/include/asm/nospec-branch.h | 4 ++++
+ arch/x86/kernel/cpu/amd.c | 26 ++++++++++++++++++++++++++
+ arch/x86/kernel/cpu/bugs.c | 27 ++++++++++++++++++++++++++-
+ arch/x86/kernel/cpu/common.c | 4 ++++
+ 5 files changed, 61 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 013f3de..8797069 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -205,6 +205,7 @@
+ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
++#define X86_FEATURE_AMD_RDS (7*32+24) /* "" AMD RDS implementation */
+
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index 7b9eacf..3a1541c 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -244,6 +244,10 @@ enum ssb_mitigation {
+ SPEC_STORE_BYPASS_DISABLE,
+ };
+
++/* AMD specific Speculative Store Bypass MSR data */
++extern u64 x86_amd_ls_cfg_base;
++extern u64 x86_amd_ls_cfg_rds_mask;
++
+ extern char __indirect_thunk_start[];
+ extern char __indirect_thunk_end[];
+
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 747f8a2..7551d9ad 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -9,6 +9,7 @@
+ #include <asm/processor.h>
+ #include <asm/apic.h>
+ #include <asm/cpu.h>
++#include <asm/nospec-branch.h>
+ #include <asm/smp.h>
+ #include <asm/pci-direct.h>
+ #include <asm/delay.h>
+@@ -542,6 +543,26 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
+ rdmsrl(MSR_FAM10H_NODE_ID, value);
+ nodes_per_socket = ((value >> 3) & 7) + 1;
+ }
++
++ if (c->x86 >= 0x15 && c->x86 <= 0x17) {
++ unsigned int bit;
++
++ switch (c->x86) {
++ case 0x15: bit = 54; break;
++ case 0x16: bit = 33; break;
++ case 0x17: bit = 10; break;
++ default: return;
++ }
++ /*
++ * Try to cache the base value so further operations can
++ * avoid RMW. If that faults, do not enable RDS.
++ */
++ if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
++ setup_force_cpu_cap(X86_FEATURE_RDS);
++ setup_force_cpu_cap(X86_FEATURE_AMD_RDS);
++ x86_amd_ls_cfg_rds_mask = 1ULL << bit;
++ }
++ }
+ }
+
+ static void early_init_amd(struct cpuinfo_x86 *c)
+@@ -827,6 +848,11 @@ static void init_amd(struct cpuinfo_x86 *c)
+ /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
+ if (!cpu_has(c, X86_FEATURE_XENPV))
+ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
++
++ if (boot_cpu_has(X86_FEATURE_AMD_RDS)) {
++ set_cpu_cap(c, X86_FEATURE_RDS);
++ set_cpu_cap(c, X86_FEATURE_AMD_RDS);
++ }
+ }
+
+ #ifdef CONFIG_X86_32
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index b92c469..b3696cc 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -40,6 +40,13 @@ static u64 __ro_after_init x86_spec_ctrl_base;
+ */
+ static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
+
++/*
++ * AMD specific MSR info for Speculative Store Bypass control.
++ * x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu().
++ */
++u64 __ro_after_init x86_amd_ls_cfg_base;
++u64 __ro_after_init x86_amd_ls_cfg_rds_mask;
++
+ void __init check_bugs(void)
+ {
+ identify_boot_cpu();
+@@ -51,7 +58,8 @@ void __init check_bugs(void)
+
+ /*
+ * Read the SPEC_CTRL MSR to account for reserved bits which may
+- * have unknown values.
++ * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
++ * init code as it is not enumerated and depends on the family.
+ */
+ if (boot_cpu_has(X86_FEATURE_IBRS))
+ rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+@@ -153,6 +161,14 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
+
++static void x86_amd_rds_enable(void)
++{
++ u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask;
++
++ if (boot_cpu_has(X86_FEATURE_AMD_RDS))
++ wrmsrl(MSR_AMD64_LS_CFG, msrval);
++}
++
+ #ifdef RETPOLINE
+ static bool spectre_v2_bad_module;
+
+@@ -442,6 +458,11 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
+
+ switch (cmd) {
+ case SPEC_STORE_BYPASS_CMD_AUTO:
++ /*
++ * AMD platforms by default don't need SSB mitigation.
++ */
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
++ break;
+ case SPEC_STORE_BYPASS_CMD_ON:
+ mode = SPEC_STORE_BYPASS_DISABLE;
+ break;
+@@ -468,6 +489,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
+ x86_spec_ctrl_set(SPEC_CTRL_RDS);
+ break;
+ case X86_VENDOR_AMD:
++ x86_amd_rds_enable();
+ break;
+ }
+ }
+@@ -489,6 +511,9 @@ void x86_spec_ctrl_setup_ap(void)
+ {
+ if (boot_cpu_has(X86_FEATURE_IBRS))
+ x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
++
++ if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
++ x86_amd_rds_enable();
+ }
+
+ #ifdef CONFIG_SYSFS
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index ab6b3ad..beb1da8 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -895,6 +895,10 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
+ { X86_VENDOR_CENTAUR, 5, },
+ { X86_VENDOR_INTEL, 5, },
+ { X86_VENDOR_NSC, 5, },
++ { X86_VENDOR_AMD, 0x12, },
++ { X86_VENDOR_AMD, 0x11, },
++ { X86_VENDOR_AMD, 0x10, },
++ { X86_VENDOR_AMD, 0xf, },
+ { X86_VENDOR_ANY, 4, },
+ {}
+ };
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0033-x86-KVM-VMX-Expose-SPEC_CTRL-Bit-2-to-the-guest.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0033-x86-KVM-VMX-Expose-SPEC_CTRL-Bit-2-to-the-guest.patch
new file mode 100644
index 00000000..743d2a90
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0033-x86-KVM-VMX-Expose-SPEC_CTRL-Bit-2-to-the-guest.patch
@@ -0,0 +1,120 @@
+From 8f8f17abbbabcff7ebf353b62bbcfb414f83d77e Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:25 -0400
+Subject: [PATCH 33/93] x86/KVM/VMX: Expose SPEC_CTRL Bit(2) to the guest
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+commit da39556f66f5cfe8f9c989206974f1cb16ca5d7c upstream
+
+Expose the CPUID.7.EDX[31] bit to the guest, and also guard against various
+combinations of SPEC_CTRL MSR values.
+
+The handling of the MSR (to take into account the host value of SPEC_CTRL
+Bit(2)) is taken care of in patch:
+
+ KVM/SVM/VMX/x86/spectre_v2: Support the combination of guest and host IBRS
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+
+[dwmw2: Handle 4.9 guest CPUID differences, rename
+ guest_cpu_has_ibrs() → guest_cpu_has_spec_ctrl()]
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/cpuid.c | 2 +-
+ arch/x86/kvm/cpuid.h | 4 ++--
+ arch/x86/kvm/svm.c | 4 ++--
+ arch/x86/kvm/vmx.c | 6 +++---
+ 4 files changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index bcebe84..237e926 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -382,7 +382,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+
+ /* cpuid 7.0.edx*/
+ const u32 kvm_cpuid_7_0_edx_x86_features =
+- F(SPEC_CTRL) | F(ARCH_CAPABILITIES);
++ F(SPEC_CTRL) | F(RDS) | F(ARCH_CAPABILITIES);
+
+ /* all calls to cpuid_count() should be made on the same cpu */
+ get_cpu();
+diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
+index 841e80d..39dd457 100644
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -163,7 +163,7 @@ static inline bool guest_cpuid_has_ibpb(struct kvm_vcpu *vcpu)
+ return best && (best->edx & bit(X86_FEATURE_SPEC_CTRL));
+ }
+
+-static inline bool guest_cpuid_has_ibrs(struct kvm_vcpu *vcpu)
++static inline bool guest_cpuid_has_spec_ctrl(struct kvm_vcpu *vcpu)
+ {
+ struct kvm_cpuid_entry2 *best;
+
+@@ -171,7 +171,7 @@ static inline bool guest_cpuid_has_ibrs(struct kvm_vcpu *vcpu)
+ if (best && (best->ebx & bit(X86_FEATURE_IBRS)))
+ return true;
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+- return best && (best->edx & bit(X86_FEATURE_SPEC_CTRL));
++ return best && (best->edx & (bit(X86_FEATURE_SPEC_CTRL) | bit(X86_FEATURE_RDS)));
+ }
+
+ static inline bool guest_cpuid_has_arch_capabilities(struct kvm_vcpu *vcpu)
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index a07579f..43736dd 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -3540,7 +3540,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ break;
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr_info->host_initiated &&
+- !guest_cpuid_has_ibrs(vcpu))
++ !guest_cpuid_has_spec_ctrl(vcpu))
+ return 1;
+
+ msr_info->data = svm->spec_ctrl;
+@@ -3631,7 +3631,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+ break;
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr->host_initiated &&
+- !guest_cpuid_has_ibrs(vcpu))
++ !guest_cpuid_has_spec_ctrl(vcpu))
+ return 1;
+
+ /* The STIBP bit doesn't fault even if it's not advertised */
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index c386d13..3210add 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -3017,7 +3017,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ break;
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr_info->host_initiated &&
+- !guest_cpuid_has_ibrs(vcpu))
++ !guest_cpuid_has_spec_ctrl(vcpu))
+ return 1;
+
+ msr_info->data = to_vmx(vcpu)->spec_ctrl;
+@@ -3129,11 +3129,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ break;
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr_info->host_initiated &&
+- !guest_cpuid_has_ibrs(vcpu))
++ !guest_cpuid_has_spec_ctrl(vcpu))
+ return 1;
+
+ /* The STIBP bit doesn't fault even if it's not advertised */
+- if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
++ if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_RDS))
+ return 1;
+
+ vmx->spec_ctrl = data;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0034-x86-speculation-Create-spec-ctrl.h-to-avoid-include-.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0034-x86-speculation-Create-spec-ctrl.h-to-avoid-include-.patch
new file mode 100644
index 00000000..36224a56
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0034-x86-speculation-Create-spec-ctrl.h-to-avoid-include-.patch
@@ -0,0 +1,141 @@
+From b8380a76b18fa5522368b50c284530fc6e1b1992 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 29 Apr 2018 15:01:37 +0200
+Subject: [PATCH 34/93] x86/speculation: Create spec-ctrl.h to avoid include
+ hell
+
+commit 28a2775217b17208811fa43a9e96bd1fdf417b86 upstream
+
+Having everything in nospec-branch.h creates a hell of dependencies when
+adding the prctl based switching mechanism. Move everything which is not
+required in nospec-branch.h to spec-ctrl.h and fix up the includes in the
+relevant files.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 14 --------------
+ arch/x86/include/asm/spec-ctrl.h | 21 +++++++++++++++++++++
+ arch/x86/kernel/cpu/amd.c | 2 +-
+ arch/x86/kernel/cpu/bugs.c | 2 +-
+ arch/x86/kvm/svm.c | 2 +-
+ arch/x86/kvm/vmx.c | 2 +-
+ 6 files changed, 25 insertions(+), 18 deletions(-)
+ create mode 100644 arch/x86/include/asm/spec-ctrl.h
+
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index 3a1541c..1119f14 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -228,26 +228,12 @@ enum spectre_v2_mitigation {
+ extern void x86_spec_ctrl_set(u64);
+ extern u64 x86_spec_ctrl_get_default(void);
+
+-/*
+- * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
+- * the guest has, while on VMEXIT we restore the host view. This
+- * would be easier if SPEC_CTRL were architecturally maskable or
+- * shadowable for guests but this is not (currently) the case.
+- * Takes the guest view of SPEC_CTRL MSR as a parameter.
+- */
+-extern void x86_spec_ctrl_set_guest(u64);
+-extern void x86_spec_ctrl_restore_host(u64);
+-
+ /* The Speculative Store Bypass disable variants */
+ enum ssb_mitigation {
+ SPEC_STORE_BYPASS_NONE,
+ SPEC_STORE_BYPASS_DISABLE,
+ };
+
+-/* AMD specific Speculative Store Bypass MSR data */
+-extern u64 x86_amd_ls_cfg_base;
+-extern u64 x86_amd_ls_cfg_rds_mask;
+-
+ extern char __indirect_thunk_start[];
+ extern char __indirect_thunk_end[];
+
+diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
+new file mode 100644
+index 0000000..3ad6442
+--- /dev/null
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -0,0 +1,21 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_X86_SPECCTRL_H_
++#define _ASM_X86_SPECCTRL_H_
++
++#include <asm/nospec-branch.h>
++
++/*
++ * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
++ * the guest has, while on VMEXIT we restore the host view. This
++ * would be easier if SPEC_CTRL were architecturally maskable or
++ * shadowable for guests but this is not (currently) the case.
++ * Takes the guest view of SPEC_CTRL MSR as a parameter.
++ */
++extern void x86_spec_ctrl_set_guest(u64);
++extern void x86_spec_ctrl_restore_host(u64);
++
++/* AMD specific Speculative Store Bypass MSR data */
++extern u64 x86_amd_ls_cfg_base;
++extern u64 x86_amd_ls_cfg_rds_mask;
++
++#endif
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 7551d9ad..a176c81 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -9,7 +9,7 @@
+ #include <asm/processor.h>
+ #include <asm/apic.h>
+ #include <asm/cpu.h>
+-#include <asm/nospec-branch.h>
++#include <asm/spec-ctrl.h>
+ #include <asm/smp.h>
+ #include <asm/pci-direct.h>
+ #include <asm/delay.h>
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index b3696cc..46d01fd 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -12,7 +12,7 @@
+ #include <linux/cpu.h>
+ #include <linux/module.h>
+
+-#include <asm/nospec-branch.h>
++#include <asm/spec-ctrl.h>
+ #include <asm/cmdline.h>
+ #include <asm/bugs.h>
+ #include <asm/processor.h>
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 43736dd..47779f5 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -45,7 +45,7 @@
+ #include <asm/kvm_para.h>
+ #include <asm/irq_remapping.h>
+ #include <asm/microcode.h>
+-#include <asm/nospec-branch.h>
++#include <asm/spec-ctrl.h>
+
+ #include <asm/virtext.h>
+ #include "trace.h"
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 3210add..17199dc 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -50,7 +50,7 @@
+ #include <asm/apic.h>
+ #include <asm/irq_remapping.h>
+ #include <asm/microcode.h>
+-#include <asm/nospec-branch.h>
++#include <asm/spec-ctrl.h>
+
+ #include "trace.h"
+ #include "pmu.h"
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0035-x86-process-Optimize-TIF-checks-in-__switch_to_xtra.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0035-x86-process-Optimize-TIF-checks-in-__switch_to_xtra.patch
new file mode 100644
index 00000000..bcbf8f92
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0035-x86-process-Optimize-TIF-checks-in-__switch_to_xtra.patch
@@ -0,0 +1,125 @@
+From ac5c35e60743b4260df777cc4ac1e877c2999b1d Mon Sep 17 00:00:00 2001
+From: Kyle Huey <me@kylehuey.com>
+Date: Tue, 14 Feb 2017 00:11:02 -0800
+Subject: [PATCH 35/93] x86/process: Optimize TIF checks in __switch_to_xtra()
+
+commit af8b3cd3934ec60f4c2a420d19a9d416554f140b upstream
+
+Help the compiler to avoid reevaluating the thread flags for each checked
+bit by reordering the bit checks and providing an explicit xor for
+evaluation.
+
+With default defconfigs for each arch,
+
+x86_64: arch/x86/kernel/process.o
+text data bss dec hex
+3056 8577 16 11649 2d81 Before
+3024 8577 16 11617 2d61 After
+
+i386: arch/x86/kernel/process.o
+text data bss dec hex
+2957 8673 8 11638 2d76 Before
+2925 8673 8 11606 2d56 After
+
+Originally-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Kyle Huey <khuey@kylehuey.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Link: http://lkml.kernel.org/r/20170214081104.9244-2-khuey@kylehuey.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+[dwmw2: backported to make TIF_RDS handling simpler.
+ No deferred TR reload.]
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/process.c | 54 +++++++++++++++++++++++++++--------------------
+ 1 file changed, 31 insertions(+), 23 deletions(-)
+
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index a55b320..0e1999e 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -192,48 +192,56 @@ int set_tsc_mode(unsigned int val)
+ return 0;
+ }
+
++static inline void switch_to_bitmap(struct tss_struct *tss,
++ struct thread_struct *prev,
++ struct thread_struct *next,
++ unsigned long tifp, unsigned long tifn)
++{
++ if (tifn & _TIF_IO_BITMAP) {
++ /*
++ * Copy the relevant range of the IO bitmap.
++ * Normally this is 128 bytes or less:
++ */
++ memcpy(tss->io_bitmap, next->io_bitmap_ptr,
++ max(prev->io_bitmap_max, next->io_bitmap_max));
++ } else if (tifp & _TIF_IO_BITMAP) {
++ /*
++ * Clear any possible leftover bits:
++ */
++ memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
++ }
++}
++
+ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+ struct tss_struct *tss)
+ {
+ struct thread_struct *prev, *next;
++ unsigned long tifp, tifn;
+
+ prev = &prev_p->thread;
+ next = &next_p->thread;
+
+- if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
+- test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
++ tifn = READ_ONCE(task_thread_info(next_p)->flags);
++ tifp = READ_ONCE(task_thread_info(prev_p)->flags);
++ switch_to_bitmap(tss, prev, next, tifp, tifn);
++
++ propagate_user_return_notify(prev_p, next_p);
++
++ if ((tifp ^ tifn) & _TIF_BLOCKSTEP) {
+ unsigned long debugctl = get_debugctlmsr();
+
+ debugctl &= ~DEBUGCTLMSR_BTF;
+- if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
++ if (tifn & _TIF_BLOCKSTEP)
+ debugctl |= DEBUGCTLMSR_BTF;
+-
+ update_debugctlmsr(debugctl);
+ }
+
+- if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
+- test_tsk_thread_flag(next_p, TIF_NOTSC)) {
+- /* prev and next are different */
+- if (test_tsk_thread_flag(next_p, TIF_NOTSC))
++ if ((tifp ^ tifn) & _TIF_NOTSC) {
++ if (tifn & _TIF_NOTSC)
+ hard_disable_TSC();
+ else
+ hard_enable_TSC();
+ }
+-
+- if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
+- /*
+- * Copy the relevant range of the IO bitmap.
+- * Normally this is 128 bytes or less:
+- */
+- memcpy(tss->io_bitmap, next->io_bitmap_ptr,
+- max(prev->io_bitmap_max, next->io_bitmap_max));
+- } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
+- /*
+- * Clear any possible leftover bits:
+- */
+- memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
+- }
+- propagate_user_return_notify(prev_p, next_p);
+ }
+
+ /*
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0036-x86-process-Correct-and-optimize-TIF_BLOCKSTEP-switc.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0036-x86-process-Correct-and-optimize-TIF_BLOCKSTEP-switc.patch
new file mode 100644
index 00000000..9fd2ab23
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0036-x86-process-Correct-and-optimize-TIF_BLOCKSTEP-switc.patch
@@ -0,0 +1,84 @@
+From 19f795b97249d2e81ea918644577ab9669704c28 Mon Sep 17 00:00:00 2001
+From: Kyle Huey <me@kylehuey.com>
+Date: Tue, 14 Feb 2017 00:11:03 -0800
+Subject: [PATCH 36/93] x86/process: Correct and optimize TIF_BLOCKSTEP switch
+
+commit b9894a2f5bd18b1691cb6872c9afe32b148d0132 upstream
+
+The debug control MSR is "highly magical" as the blockstep bit can be
+cleared by hardware under not well documented circumstances.
+
+So a task switch relying on the bit set by the previous task (according to
+the previous tasks thread flags) can trip over this and not update the flag
+for the next task.
+
+To fix this its required to handle DEBUGCTLMSR_BTF when either the previous
+or the next or both tasks have the TIF_BLOCKSTEP flag set.
+
+While at it avoid branching within the TIF_BLOCKSTEP case and evaluating
+boot_cpu_data twice in kernels without CONFIG_X86_DEBUGCTLMSR.
+
+x86_64: arch/x86/kernel/process.o
+text data bss dec hex
+3024 8577 16 11617 2d61 Before
+3008 8577 16 11601 2d51 After
+
+i386: No change
+
+[ tglx: Made the shift value explicit, use a local variable to make the
+code readable and massaged changelog]
+
+Originally-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Kyle Huey <khuey@kylehuey.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Link: http://lkml.kernel.org/r/20170214081104.9244-3-khuey@kylehuey.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/msr-index.h | 1 +
+ arch/x86/kernel/process.c | 12 +++++++-----
+ 2 files changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 9f014c1..4027c33 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -141,6 +141,7 @@
+
+ /* DEBUGCTLMSR bits (others vary by model): */
+ #define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
++#define DEBUGCTLMSR_BTF_SHIFT 1
+ #define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */
+ #define DEBUGCTLMSR_TR (1UL << 6)
+ #define DEBUGCTLMSR_BTS (1UL << 7)
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index 0e1999e..496eef6 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -227,13 +227,15 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+
+ propagate_user_return_notify(prev_p, next_p);
+
+- if ((tifp ^ tifn) & _TIF_BLOCKSTEP) {
+- unsigned long debugctl = get_debugctlmsr();
++ if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) &&
++ arch_has_block_step()) {
++ unsigned long debugctl, msk;
+
++ rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ debugctl &= ~DEBUGCTLMSR_BTF;
+- if (tifn & _TIF_BLOCKSTEP)
+- debugctl |= DEBUGCTLMSR_BTF;
+- update_debugctlmsr(debugctl);
++ msk = tifn & _TIF_BLOCKSTEP;
++ debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
++ wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ }
+
+ if ((tifp ^ tifn) & _TIF_NOTSC) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0037-x86-process-Optimize-TIF_NOTSC-switch.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0037-x86-process-Optimize-TIF_NOTSC-switch.patch
new file mode 100644
index 00000000..e5a210ab
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0037-x86-process-Optimize-TIF_NOTSC-switch.patch
@@ -0,0 +1,112 @@
+From b72b69b9696975c9279441e4998ceca506280dec Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 14 Feb 2017 00:11:04 -0800
+Subject: [PATCH 37/93] x86/process: Optimize TIF_NOTSC switch
+
+commit 5a920155e388ec22a22e0532fb695b9215c9b34d upstream
+
+Provide and use a toggle helper instead of doing it with a branch.
+
+x86_64: arch/x86/kernel/process.o
+text data bss dec hex
+3008 8577 16 11601 2d51 Before
+2976 8577 16 11569 2d31 After
+
+i386: arch/x86/kernel/process.o
+text data bss dec hex
+2925 8673 8 11606 2d56 Before
+2893 8673 8 11574 2d36 After
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Link: http://lkml.kernel.org/r/20170214081104.9244-4-khuey@kylehuey.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/tlbflush.h | 10 ++++++++++
+ arch/x86/kernel/process.c | 22 ++++------------------
+ 2 files changed, 14 insertions(+), 18 deletions(-)
+
+diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
+index 99185a0..686a58d 100644
+--- a/arch/x86/include/asm/tlbflush.h
++++ b/arch/x86/include/asm/tlbflush.h
+@@ -111,6 +111,16 @@ static inline void cr4_clear_bits(unsigned long mask)
+ }
+ }
+
++static inline void cr4_toggle_bits(unsigned long mask)
++{
++ unsigned long cr4;
++
++ cr4 = this_cpu_read(cpu_tlbstate.cr4);
++ cr4 ^= mask;
++ this_cpu_write(cpu_tlbstate.cr4, cr4);
++ __write_cr4(cr4);
++}
++
+ /* Read the CR4 shadow. */
+ static inline unsigned long cr4_read_shadow(void)
+ {
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index 496eef6..b7e3822 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -134,11 +134,6 @@ void flush_thread(void)
+ fpu__clear(&tsk->thread.fpu);
+ }
+
+-static void hard_disable_TSC(void)
+-{
+- cr4_set_bits(X86_CR4_TSD);
+-}
+-
+ void disable_TSC(void)
+ {
+ preempt_disable();
+@@ -147,15 +142,10 @@ void disable_TSC(void)
+ * Must flip the CPU state synchronously with
+ * TIF_NOTSC in the current running context.
+ */
+- hard_disable_TSC();
++ cr4_set_bits(X86_CR4_TSD);
+ preempt_enable();
+ }
+
+-static void hard_enable_TSC(void)
+-{
+- cr4_clear_bits(X86_CR4_TSD);
+-}
+-
+ static void enable_TSC(void)
+ {
+ preempt_disable();
+@@ -164,7 +154,7 @@ static void enable_TSC(void)
+ * Must flip the CPU state synchronously with
+ * TIF_NOTSC in the current running context.
+ */
+- hard_enable_TSC();
++ cr4_clear_bits(X86_CR4_TSD);
+ preempt_enable();
+ }
+
+@@ -238,12 +228,8 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+ wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ }
+
+- if ((tifp ^ tifn) & _TIF_NOTSC) {
+- if (tifn & _TIF_NOTSC)
+- hard_disable_TSC();
+- else
+- hard_enable_TSC();
+- }
++ if ((tifp ^ tifn) & _TIF_NOTSC)
++ cr4_toggle_bits(X86_CR4_TSD);
+ }
+
+ /*
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0038-x86-process-Allow-runtime-control-of-Speculative-Sto.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0038-x86-process-Allow-runtime-control-of-Speculative-Sto.patch
new file mode 100644
index 00000000..86badf1b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0038-x86-process-Allow-runtime-control-of-Speculative-Sto.patch
@@ -0,0 +1,229 @@
+From 4cac5cffd142a19a03aceb9037302e10fe04d566 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 29 Apr 2018 15:21:42 +0200
+Subject: [PATCH 38/93] x86/process: Allow runtime control of Speculative Store
+ Bypass
+
+commit 885f82bfbc6fefb6664ea27965c3ab9ac4194b8c upstream
+
+The Speculative Store Bypass vulnerability can be mitigated with the
+Reduced Data Speculation (RDS) feature. To allow finer grained control of
+this eventually expensive mitigation a per task mitigation control is
+required.
+
+Add a new TIF_RDS flag and put it into the group of TIF flags which are
+evaluated for mismatch in switch_to(). If these bits differ in the previous
+and the next task, then the slow path function __switch_to_xtra() is
+invoked. Implement the TIF_RDS dependent mitigation control in the slow
+path.
+
+If the prctl for controlling Speculative Store Bypass is disabled or no
+task uses the prctl then there is no overhead in the switch_to() fast
+path.
+
+Update the KVM related speculation control functions to take TID_RDS into
+account as well.
+
+Based on a patch from Tim Chen. Completely rewritten.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/msr-index.h | 3 ++-
+ arch/x86/include/asm/spec-ctrl.h | 17 +++++++++++++++++
+ arch/x86/include/asm/thread_info.h | 6 ++++--
+ arch/x86/kernel/cpu/bugs.c | 26 +++++++++++++++++++++-----
+ arch/x86/kernel/process.c | 22 ++++++++++++++++++++++
+ 5 files changed, 66 insertions(+), 8 deletions(-)
+
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 4027c33..7ad3ed9 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -40,7 +40,8 @@
+ #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
+ #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
+ #define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
+-#define SPEC_CTRL_RDS (1 << 2) /* Reduced Data Speculation */
++#define SPEC_CTRL_RDS_SHIFT 2 /* Reduced Data Speculation bit */
++#define SPEC_CTRL_RDS (1 << SPEC_CTRL_RDS_SHIFT) /* Reduced Data Speculation */
+
+ #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
+ #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
+diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
+index 3ad6442..45ef00a 100644
+--- a/arch/x86/include/asm/spec-ctrl.h
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -2,6 +2,7 @@
+ #ifndef _ASM_X86_SPECCTRL_H_
+ #define _ASM_X86_SPECCTRL_H_
+
++#include <linux/thread_info.h>
+ #include <asm/nospec-branch.h>
+
+ /*
+@@ -18,4 +19,20 @@ extern void x86_spec_ctrl_restore_host(u64);
+ extern u64 x86_amd_ls_cfg_base;
+ extern u64 x86_amd_ls_cfg_rds_mask;
+
++/* The Intel SPEC CTRL MSR base value cache */
++extern u64 x86_spec_ctrl_base;
++
++static inline u64 rds_tif_to_spec_ctrl(u64 tifn)
++{
++ BUILD_BUG_ON(TIF_RDS < SPEC_CTRL_RDS_SHIFT);
++ return (tifn & _TIF_RDS) >> (TIF_RDS - SPEC_CTRL_RDS_SHIFT);
++}
++
++static inline u64 rds_tif_to_amd_ls_cfg(u64 tifn)
++{
++ return (tifn & _TIF_RDS) ? x86_amd_ls_cfg_rds_mask : 0ULL;
++}
++
++extern void speculative_store_bypass_update(void);
++
+ #endif
+diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
+index 89978b9..661afac 100644
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -83,6 +83,7 @@ struct thread_info {
+ #define TIF_SIGPENDING 2 /* signal pending */
+ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+ #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
++#define TIF_RDS 5 /* Reduced data speculation */
+ #define TIF_SYSCALL_EMU 6 /* syscall emulation active */
+ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
+ #define TIF_SECCOMP 8 /* secure computing */
+@@ -104,8 +105,9 @@ struct thread_info {
+ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+ #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+-#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
++#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
++#define _TIF_RDS (1 << TIF_RDS)
+ #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+ #define _TIF_SECCOMP (1 << TIF_SECCOMP)
+@@ -139,7 +141,7 @@ struct thread_info {
+
+ /* flags to check in __switch_to() */
+ #define _TIF_WORK_CTXSW \
+- (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP)
++ (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_RDS)
+
+ #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
+ #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 46d01fd..4f09576 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -32,7 +32,7 @@ static void __init ssb_select_mitigation(void);
+ * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
+ * writes to SPEC_CTRL contain whatever reserved bits have been set.
+ */
+-static u64 __ro_after_init x86_spec_ctrl_base;
++u64 __ro_after_init x86_spec_ctrl_base;
+
+ /*
+ * The vendor and possibly platform specific bits which can be modified in
+@@ -139,25 +139,41 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
+
+ u64 x86_spec_ctrl_get_default(void)
+ {
+- return x86_spec_ctrl_base;
++ u64 msrval = x86_spec_ctrl_base;
++
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++ msrval |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
++ return msrval;
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
+
+ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
+ {
++ u64 host = x86_spec_ctrl_base;
++
+ if (!boot_cpu_has(X86_FEATURE_IBRS))
+ return;
+- if (x86_spec_ctrl_base != guest_spec_ctrl)
++
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++ host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
++
++ if (host != guest_spec_ctrl)
+ wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
+
+ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
+ {
++ u64 host = x86_spec_ctrl_base;
++
+ if (!boot_cpu_has(X86_FEATURE_IBRS))
+ return;
+- if (x86_spec_ctrl_base != guest_spec_ctrl)
+- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
++
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++ host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
++
++ if (host != guest_spec_ctrl)
++ wrmsrl(MSR_IA32_SPEC_CTRL, host);
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
+
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index b7e3822..9c48e18 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -33,6 +33,7 @@
+ #include <asm/mce.h>
+ #include <asm/vm86.h>
+ #include <asm/switch_to.h>
++#include <asm/spec-ctrl.h>
+
+ /*
+ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
+@@ -202,6 +203,24 @@ static inline void switch_to_bitmap(struct tss_struct *tss,
+ }
+ }
+
++static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
++{
++ u64 msr;
++
++ if (static_cpu_has(X86_FEATURE_AMD_RDS)) {
++ msr = x86_amd_ls_cfg_base | rds_tif_to_amd_ls_cfg(tifn);
++ wrmsrl(MSR_AMD64_LS_CFG, msr);
++ } else {
++ msr = x86_spec_ctrl_base | rds_tif_to_spec_ctrl(tifn);
++ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
++ }
++}
++
++void speculative_store_bypass_update(void)
++{
++ __speculative_store_bypass_update(current_thread_info()->flags);
++}
++
+ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+ struct tss_struct *tss)
+ {
+@@ -230,6 +249,9 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+
+ if ((tifp ^ tifn) & _TIF_NOTSC)
+ cr4_toggle_bits(X86_CR4_TSD);
++
++ if ((tifp ^ tifn) & _TIF_RDS)
++ __speculative_store_bypass_update(tifn);
+ }
+
+ /*
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0039-x86-speculation-Add-prctl-for-Speculative-Store-Bypa.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0039-x86-speculation-Add-prctl-for-Speculative-Store-Bypa.patch
new file mode 100644
index 00000000..d1cb5dcd
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0039-x86-speculation-Add-prctl-for-Speculative-Store-Bypa.patch
@@ -0,0 +1,222 @@
+From 3495e18cce0a77cb974173998dfecbf22c9df984 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 29 Apr 2018 15:26:40 +0200
+Subject: [PATCH 39/93] x86/speculation: Add prctl for Speculative Store Bypass
+ mitigation
+
+commit a73ec77ee17ec556fe7f165d00314cb7c047b1ac upstream
+
+Add prctl based control for Speculative Store Bypass mitigation and make it
+the default mitigation for Intel and AMD.
+
+Andi Kleen provided the following rationale (slightly redacted):
+
+ There are multiple levels of impact of Speculative Store Bypass:
+
+ 1) JITed sandbox.
+ It cannot invoke system calls, but can do PRIME+PROBE and may have call
+ interfaces to other code
+
+ 2) Native code process.
+ No protection inside the process at this level.
+
+ 3) Kernel.
+
+ 4) Between processes.
+
+ The prctl tries to protect against case (1) doing attacks.
+
+ If the untrusted code can do random system calls then control is already
+ lost in a much worse way. So there needs to be system call protection in
+ some way (using a JIT not allowing them or seccomp). Or rather if the
+ process can subvert its environment somehow to do the prctl it can already
+ execute arbitrary code, which is much worse than SSB.
+
+ To put it differently, the point of the prctl is to not allow JITed code
+ to read data it shouldn't read from its JITed sandbox. If it already has
+ escaped its sandbox then it can already read everything it wants in its
+ address space, and do much worse.
+
+ The ability to control Speculative Store Bypass allows to enable the
+ protection selectively without affecting overall system performance.
+
+Based on an initial patch from Tim Chen. Completely rewritten.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/kernel-parameters.txt | 6 ++-
+ arch/x86/include/asm/nospec-branch.h | 1 +
+ arch/x86/kernel/cpu/bugs.c | 83 +++++++++++++++++++++++++++++++-----
+ 3 files changed, 79 insertions(+), 11 deletions(-)
+
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 348ca9d..80811df 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -3990,7 +3990,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ off - Unconditionally enable Speculative Store Bypass
+ auto - Kernel detects whether the CPU model contains an
+ implementation of Speculative Store Bypass and
+- picks the most appropriate mitigation
++ picks the most appropriate mitigation.
++ prctl - Control Speculative Store Bypass per thread
++ via prctl. Speculative Store Bypass is enabled
++ for a process by default. The state of the control
++ is inherited on fork.
+
+ Not specifying this option is equivalent to
+ spec_store_bypass_disable=auto.
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index 1119f14..71ad014 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -232,6 +232,7 @@ extern u64 x86_spec_ctrl_get_default(void);
+ enum ssb_mitigation {
+ SPEC_STORE_BYPASS_NONE,
+ SPEC_STORE_BYPASS_DISABLE,
++ SPEC_STORE_BYPASS_PRCTL,
+ };
+
+ extern char __indirect_thunk_start[];
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 4f09576..b7d9adf 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -11,6 +11,8 @@
+ #include <linux/utsname.h>
+ #include <linux/cpu.h>
+ #include <linux/module.h>
++#include <linux/nospec.h>
++#include <linux/prctl.h>
+
+ #include <asm/spec-ctrl.h>
+ #include <asm/cmdline.h>
+@@ -411,20 +413,23 @@ enum ssb_mitigation_cmd {
+ SPEC_STORE_BYPASS_CMD_NONE,
+ SPEC_STORE_BYPASS_CMD_AUTO,
+ SPEC_STORE_BYPASS_CMD_ON,
++ SPEC_STORE_BYPASS_CMD_PRCTL,
+ };
+
+ static const char *ssb_strings[] = {
+ [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
+- [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled"
++ [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
++ [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl"
+ };
+
+ static const struct {
+ const char *option;
+ enum ssb_mitigation_cmd cmd;
+ } ssb_mitigation_options[] = {
+- { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
+- { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
+- { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
++ { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
++ { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
++ { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
++ { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
+ };
+
+ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
+@@ -474,14 +479,15 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
+
+ switch (cmd) {
+ case SPEC_STORE_BYPASS_CMD_AUTO:
+- /*
+- * AMD platforms by default don't need SSB mitigation.
+- */
+- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+- break;
++ /* Choose prctl as the default mode */
++ mode = SPEC_STORE_BYPASS_PRCTL;
++ break;
+ case SPEC_STORE_BYPASS_CMD_ON:
+ mode = SPEC_STORE_BYPASS_DISABLE;
+ break;
++ case SPEC_STORE_BYPASS_CMD_PRCTL:
++ mode = SPEC_STORE_BYPASS_PRCTL;
++ break;
+ case SPEC_STORE_BYPASS_CMD_NONE:
+ break;
+ }
+@@ -492,7 +498,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
+ * - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass
+ * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
+ */
+- if (mode != SPEC_STORE_BYPASS_NONE) {
++ if (mode == SPEC_STORE_BYPASS_DISABLE) {
+ setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
+ /*
+ * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
+@@ -523,6 +529,63 @@ static void ssb_select_mitigation()
+
+ #undef pr_fmt
+
++static int ssb_prctl_set(unsigned long ctrl)
++{
++ bool rds = !!test_tsk_thread_flag(current, TIF_RDS);
++
++ if (ssb_mode != SPEC_STORE_BYPASS_PRCTL)
++ return -ENXIO;
++
++ if (ctrl == PR_SPEC_ENABLE)
++ clear_tsk_thread_flag(current, TIF_RDS);
++ else
++ set_tsk_thread_flag(current, TIF_RDS);
++
++ if (rds != !!test_tsk_thread_flag(current, TIF_RDS))
++ speculative_store_bypass_update();
++
++ return 0;
++}
++
++static int ssb_prctl_get(void)
++{
++ switch (ssb_mode) {
++ case SPEC_STORE_BYPASS_DISABLE:
++ return PR_SPEC_DISABLE;
++ case SPEC_STORE_BYPASS_PRCTL:
++ if (test_tsk_thread_flag(current, TIF_RDS))
++ return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
++ return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
++ default:
++ if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
++ return PR_SPEC_ENABLE;
++ return PR_SPEC_NOT_AFFECTED;
++ }
++}
++
++int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl)
++{
++ if (ctrl != PR_SPEC_ENABLE && ctrl != PR_SPEC_DISABLE)
++ return -ERANGE;
++
++ switch (which) {
++ case PR_SPEC_STORE_BYPASS:
++ return ssb_prctl_set(ctrl);
++ default:
++ return -ENODEV;
++ }
++}
++
++int arch_prctl_spec_ctrl_get(unsigned long which)
++{
++ switch (which) {
++ case PR_SPEC_STORE_BYPASS:
++ return ssb_prctl_get();
++ default:
++ return -ENODEV;
++ }
++}
++
+ void x86_spec_ctrl_setup_ap(void)
+ {
+ if (boot_cpu_has(X86_FEATURE_IBRS))
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0040-nospec-Move-array_index_nospec-parameter-checking-in.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0040-nospec-Move-array_index_nospec-parameter-checking-in.patch
new file mode 100644
index 00000000..973e9188
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0040-nospec-Move-array_index_nospec-parameter-checking-in.patch
@@ -0,0 +1,92 @@
+From a3cb1b4823957921fa7a58e51bc8ee3e880bf1c5 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Mon, 5 Feb 2018 14:16:06 +0000
+Subject: [PATCH 40/93] nospec: Move array_index_nospec() parameter checking
+ into separate macro
+
+commit 8fa80c503b484ddc1abbd10c7cb2ab81f3824a50 upstream.
+
+For architectures providing their own implementation of
+array_index_mask_nospec() in asm/barrier.h, attempting to use WARN_ONCE() to
+complain about out-of-range parameters using WARN_ON() results in a mess
+of mutually-dependent include files.
+
+Rather than unpick the dependencies, simply have the core code in nospec.h
+perform the checking for us.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lkml.kernel.org/r/1517840166-15399-1-git-send-email-will.deacon@arm.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/nospec.h | 36 +++++++++++++++++++++---------------
+ 1 file changed, 21 insertions(+), 15 deletions(-)
+
+diff --git a/include/linux/nospec.h b/include/linux/nospec.h
+index b99bced..fbc98e2 100644
+--- a/include/linux/nospec.h
++++ b/include/linux/nospec.h
+@@ -20,20 +20,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
+ unsigned long size)
+ {
+ /*
+- * Warn developers about inappropriate array_index_nospec() usage.
+- *
+- * Even if the CPU speculates past the WARN_ONCE branch, the
+- * sign bit of @index is taken into account when generating the
+- * mask.
+- *
+- * This warning is compiled out when the compiler can infer that
+- * @index and @size are less than LONG_MAX.
+- */
+- if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX,
+- "array_index_nospec() limited to range of [0, LONG_MAX]\n"))
+- return 0;
+-
+- /*
+ * Always calculate and emit the mask even if the compiler
+ * thinks the mask is not needed. The compiler does not take
+ * into account the value of @index under speculation.
+@@ -44,6 +30,26 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
+ #endif
+
+ /*
++ * Warn developers about inappropriate array_index_nospec() usage.
++ *
++ * Even if the CPU speculates past the WARN_ONCE branch, the
++ * sign bit of @index is taken into account when generating the
++ * mask.
++ *
++ * This warning is compiled out when the compiler can infer that
++ * @index and @size are less than LONG_MAX.
++ */
++#define array_index_mask_nospec_check(index, size) \
++({ \
++ if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, \
++ "array_index_nospec() limited to range of [0, LONG_MAX]\n")) \
++ _mask = 0; \
++ else \
++ _mask = array_index_mask_nospec(index, size); \
++ _mask; \
++})
++
++/*
+ * array_index_nospec - sanitize an array index after a bounds check
+ *
+ * For a code sequence like:
+@@ -61,7 +67,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
+ ({ \
+ typeof(index) _i = (index); \
+ typeof(size) _s = (size); \
+- unsigned long _mask = array_index_mask_nospec(_i, _s); \
++ unsigned long _mask = array_index_mask_nospec_check(_i, _s); \
+ \
+ BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
+ BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0041-nospec-Allow-index-argument-to-have-const-qualified-.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0041-nospec-Allow-index-argument-to-have-const-qualified-.patch
new file mode 100644
index 00000000..48dd7bd7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0041-nospec-Allow-index-argument-to-have-const-qualified-.patch
@@ -0,0 +1,68 @@
+From 0f31ea4b42fd0a593d539e2278b1baa35a31a122 Mon Sep 17 00:00:00 2001
+From: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+Date: Fri, 16 Feb 2018 13:20:48 -0800
+Subject: [PATCH 41/93] nospec: Allow index argument to have const-qualified
+ type
+
+commit b98c6a160a057d5686a8c54c79cc6c8c94a7d0c8 upstream.
+
+The last expression in a statement expression need not be a bare
+variable, quoting gcc docs
+
+ The last thing in the compound statement should be an expression
+ followed by a semicolon; the value of this subexpression serves as the
+ value of the entire construct.
+
+and we already use that in e.g. the min/max macros which end with a
+ternary expression.
+
+This way, we can allow index to have const-qualified type, which will in
+some cases avoid the need for introducing a local copy of index of
+non-const qualified type. That, in turn, can prevent readers not
+familiar with the internals of array_index_nospec from wondering about
+the seemingly redundant extra variable, and I think that's worthwhile
+considering how confusing the whole _nospec business is.
+
+The expression _i&_mask has type unsigned long (since that is the type
+of _mask, and the BUILD_BUG_ONs guarantee that _i will get promoted to
+that), so in order not to change the type of the whole expression, add
+a cast back to typeof(_i).
+
+Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: linux-arch@vger.kernel.org
+Cc: stable@vger.kernel.org
+Link: http://lkml.kernel.org/r/151881604837.17395.10812767547837568328.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/nospec.h | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/include/linux/nospec.h b/include/linux/nospec.h
+index fbc98e2..132e3f5 100644
+--- a/include/linux/nospec.h
++++ b/include/linux/nospec.h
+@@ -72,7 +72,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
+ BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
+ BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
+ \
+- _i &= _mask; \
+- _i; \
++ (typeof(_i)) (_i & _mask); \
+ })
+ #endif /* _LINUX_NOSPEC_H */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0042-nospec-Kill-array_index_nospec_mask_check.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0042-nospec-Kill-array_index_nospec_mask_check.patch
new file mode 100644
index 00000000..d74a2ba7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0042-nospec-Kill-array_index_nospec_mask_check.patch
@@ -0,0 +1,85 @@
+From ae4a53f80d78b49ff776956f133cb59344aa10e9 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Fri, 16 Feb 2018 13:20:42 -0800
+Subject: [PATCH 42/93] nospec: Kill array_index_nospec_mask_check()
+
+commit 1d91c1d2c80cb70e2e553845e278b87a960c04da upstream.
+
+There are multiple problems with the dynamic sanity checking in
+array_index_nospec_mask_check():
+
+* It causes unnecessary overhead in the 32-bit case since integer sized
+ @index values will no longer cause the check to be compiled away like
+ in the 64-bit case.
+
+* In the 32-bit case it may trigger with user controllable input when
+ the expectation is that should only trigger during development of new
+ kernel enabling.
+
+* The macro reuses the input parameter in multiple locations which is
+ broken if someone passes an expression like 'index++' to
+ array_index_nospec().
+
+Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: linux-arch@vger.kernel.org
+Link: http://lkml.kernel.org/r/151881604278.17395.6605847763178076520.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/nospec.h | 22 +---------------------
+ 1 file changed, 1 insertion(+), 21 deletions(-)
+
+diff --git a/include/linux/nospec.h b/include/linux/nospec.h
+index 132e3f5..172a19d 100644
+--- a/include/linux/nospec.h
++++ b/include/linux/nospec.h
+@@ -30,26 +30,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
+ #endif
+
+ /*
+- * Warn developers about inappropriate array_index_nospec() usage.
+- *
+- * Even if the CPU speculates past the WARN_ONCE branch, the
+- * sign bit of @index is taken into account when generating the
+- * mask.
+- *
+- * This warning is compiled out when the compiler can infer that
+- * @index and @size are less than LONG_MAX.
+- */
+-#define array_index_mask_nospec_check(index, size) \
+-({ \
+- if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, \
+- "array_index_nospec() limited to range of [0, LONG_MAX]\n")) \
+- _mask = 0; \
+- else \
+- _mask = array_index_mask_nospec(index, size); \
+- _mask; \
+-})
+-
+-/*
+ * array_index_nospec - sanitize an array index after a bounds check
+ *
+ * For a code sequence like:
+@@ -67,7 +47,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
+ ({ \
+ typeof(index) _i = (index); \
+ typeof(size) _s = (size); \
+- unsigned long _mask = array_index_mask_nospec_check(_i, _s); \
++ unsigned long _mask = array_index_mask_nospec(_i, _s); \
+ \
+ BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
+ BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0043-nospec-Include-asm-barrier.h-dependency.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0043-nospec-Include-asm-barrier.h-dependency.patch
new file mode 100644
index 00000000..33ce3dd7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0043-nospec-Include-asm-barrier.h-dependency.patch
@@ -0,0 +1,51 @@
+From 3997af07cbe06033b93bffe163982e30f86d4ac7 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Fri, 16 Feb 2018 13:20:54 -0800
+Subject: [PATCH 43/93] nospec: Include <asm/barrier.h> dependency
+
+commit eb6174f6d1be16b19cfa43dac296bfed003ce1a6 upstream.
+
+The nospec.h header expects the per-architecture header file
+<asm/barrier.h> to optionally define array_index_mask_nospec(). Include
+that dependency to prevent inadvertent fallback to the default
+array_index_mask_nospec() implementation.
+
+The default implementation may not provide a full mitigation
+on architectures that perform data value speculation.
+
+Reported-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: linux-arch@vger.kernel.org
+Link: http://lkml.kernel.org/r/151881605404.17395.1341935530792574707.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/nospec.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/include/linux/nospec.h b/include/linux/nospec.h
+index 172a19d..e791ebc 100644
+--- a/include/linux/nospec.h
++++ b/include/linux/nospec.h
+@@ -5,6 +5,7 @@
+
+ #ifndef _LINUX_NOSPEC_H
+ #define _LINUX_NOSPEC_H
++#include <asm/barrier.h>
+
+ /**
+ * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0044-prctl-Add-speculation-control-prctls.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0044-prctl-Add-speculation-control-prctls.patch
new file mode 100644
index 00000000..1baf848c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0044-prctl-Add-speculation-control-prctls.patch
@@ -0,0 +1,239 @@
+From 93715f38b4419faa4f84a9bb536f11d89c5c7427 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 29 Apr 2018 15:20:11 +0200
+Subject: [PATCH 44/93] prctl: Add speculation control prctls
+
+commit b617cfc858161140d69cc0b5cc211996b557a1c7 upstream
+
+Add two new prctls to control aspects of speculation related vulnerabilites
+and their mitigations to provide finer grained control over performance
+impacting mitigations.
+
+PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
+which is selected with arg2 of prctl(2). The return value uses bit 0-2 with
+the following meaning:
+
+Bit Define Description
+0 PR_SPEC_PRCTL Mitigation can be controlled per task by
+ PR_SET_SPECULATION_CTRL
+1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
+ disabled
+2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
+ enabled
+
+If all bits are 0 the CPU is not affected by the speculation misfeature.
+
+If PR_SPEC_PRCTL is set, then the per task control of the mitigation is
+available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
+misfeature will fail.
+
+PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which
+is selected by arg2 of prctl(2) per task. arg3 is used to hand in the
+control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE.
+
+The common return values are:
+
+EINVAL prctl is not implemented by the architecture or the unused prctl()
+ arguments are not 0
+ENODEV arg2 is selecting a not supported speculation misfeature
+
+PR_SET_SPECULATION_CTRL has these additional return values:
+
+ERANGE arg3 is incorrect, i.e. it's not either PR_SPEC_ENABLE or PR_SPEC_DISABLE
+ENXIO prctl control of the selected speculation misfeature is disabled
+
+The first supported controlable speculation misfeature is
+PR_SPEC_STORE_BYPASS. Add the define so this can be shared between
+architectures.
+
+Based on an initial patch from Tim Chen and mostly rewritten.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/spec_ctrl.txt | 86 +++++++++++++++++++++++++++++++++++++++++++++
+ include/linux/nospec.h | 5 +++
+ include/uapi/linux/prctl.h | 11 ++++++
+ kernel/sys.c | 22 ++++++++++++
+ 4 files changed, 124 insertions(+)
+ create mode 100644 Documentation/spec_ctrl.txt
+
+diff --git a/Documentation/spec_ctrl.txt b/Documentation/spec_ctrl.txt
+new file mode 100644
+index 0000000..ddbebcd
+--- /dev/null
++++ b/Documentation/spec_ctrl.txt
+@@ -0,0 +1,86 @@
++===================
++Speculation Control
++===================
++
++Quite some CPUs have speculation related misfeatures which are in fact
++vulnerabilites causing data leaks in various forms even accross privilege
++domains.
++
++The kernel provides mitigation for such vulnerabilities in various
++forms. Some of these mitigations are compile time configurable and some on
++the kernel command line.
++
++There is also a class of mitigations which are very expensive, but they can
++be restricted to a certain set of processes or tasks in controlled
++environments. The mechanism to control these mitigations is via
++:manpage:`prctl(2)`.
++
++There are two prctl options which are related to this:
++
++ * PR_GET_SPECULATION_CTRL
++
++ * PR_SET_SPECULATION_CTRL
++
++PR_GET_SPECULATION_CTRL
++-----------------------
++
++PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
++which is selected with arg2 of prctl(2). The return value uses bits 0-2 with
++the following meaning:
++
++==== ================ ===================================================
++Bit Define Description
++==== ================ ===================================================
++0 PR_SPEC_PRCTL Mitigation can be controlled per task by
++ PR_SET_SPECULATION_CTRL
++1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
++ disabled
++2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
++ enabled
++==== ================ ===================================================
++
++If all bits are 0 the CPU is not affected by the speculation misfeature.
++
++If PR_SPEC_PRCTL is set, then the per task control of the mitigation is
++available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
++misfeature will fail.
++
++PR_SET_SPECULATION_CTRL
++-----------------------
++PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which
++is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand
++in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE.
++
++Common error codes
++------------------
++======= =================================================================
++Value Meaning
++======= =================================================================
++EINVAL The prctl is not implemented by the architecture or unused
++ prctl(2) arguments are not 0
++
++ENODEV arg2 is selecting a not supported speculation misfeature
++======= =================================================================
++
++PR_SET_SPECULATION_CTRL error codes
++-----------------------------------
++======= =================================================================
++Value Meaning
++======= =================================================================
++0 Success
++
++ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor
++ PR_SPEC_DISABLE
++
++ENXIO Control of the selected speculation misfeature is not possible.
++ See PR_GET_SPECULATION_CTRL.
++======= =================================================================
++
++Speculation misfeature controls
++-------------------------------
++- PR_SPEC_STORE_BYPASS: Speculative Store Bypass
++
++ Invocations:
++ * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0);
++ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
++ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
+diff --git a/include/linux/nospec.h b/include/linux/nospec.h
+index e791ebc..700bb8a 100644
+--- a/include/linux/nospec.h
++++ b/include/linux/nospec.h
+@@ -55,4 +55,9 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
+ \
+ (typeof(_i)) (_i & _mask); \
+ })
++
++/* Speculation control prctl */
++int arch_prctl_spec_ctrl_get(unsigned long which);
++int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl);
++
+ #endif /* _LINUX_NOSPEC_H */
+diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
+index a8d0759..3b316be 100644
+--- a/include/uapi/linux/prctl.h
++++ b/include/uapi/linux/prctl.h
+@@ -197,4 +197,15 @@ struct prctl_mm_map {
+ # define PR_CAP_AMBIENT_LOWER 3
+ # define PR_CAP_AMBIENT_CLEAR_ALL 4
+
++/* Per task speculation control */
++#define PR_GET_SPECULATION_CTRL 52
++#define PR_SET_SPECULATION_CTRL 53
++/* Speculation control variants */
++# define PR_SPEC_STORE_BYPASS 0
++/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
++# define PR_SPEC_NOT_AFFECTED 0
++# define PR_SPEC_PRCTL (1UL << 0)
++# define PR_SPEC_ENABLE (1UL << 1)
++# define PR_SPEC_DISABLE (1UL << 2)
++
+ #endif /* _LINUX_PRCTL_H */
+diff --git a/kernel/sys.c b/kernel/sys.c
+index 89d5be4..312c985 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -53,6 +53,8 @@
+ #include <linux/uidgid.h>
+ #include <linux/cred.h>
+
++#include <linux/nospec.h>
++
+ #include <linux/kmsg_dump.h>
+ /* Move somewhere else to avoid recompiling? */
+ #include <generated/utsrelease.h>
+@@ -2072,6 +2074,16 @@ static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
+ }
+ #endif
+
++int __weak arch_prctl_spec_ctrl_get(unsigned long which)
++{
++ return -EINVAL;
++}
++
++int __weak arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl)
++{
++ return -EINVAL;
++}
++
+ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
+ unsigned long, arg4, unsigned long, arg5)
+ {
+@@ -2270,6 +2282,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
+ case PR_GET_FP_MODE:
+ error = GET_FP_MODE(me);
+ break;
++ case PR_GET_SPECULATION_CTRL:
++ if (arg3 || arg4 || arg5)
++ return -EINVAL;
++ error = arch_prctl_spec_ctrl_get(arg2);
++ break;
++ case PR_SET_SPECULATION_CTRL:
++ if (arg4 || arg5)
++ return -EINVAL;
++ error = arch_prctl_spec_ctrl_set(arg2, arg3);
++ break;
+ default:
+ error = -EINVAL;
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0045-nospec-Allow-getting-setting-on-non-current-task.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0045-nospec-Allow-getting-setting-on-non-current-task.patch
new file mode 100644
index 00000000..5c1e6d48
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0045-nospec-Allow-getting-setting-on-non-current-task.patch
@@ -0,0 +1,162 @@
+From e2a9a40a2a4fbebc999eacc678c2af449db5af11 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Tue, 1 May 2018 15:19:04 -0700
+Subject: [PATCH 45/93] nospec: Allow getting/setting on non-current task
+
+commit 7bbf1373e228840bb0295a2ca26d548ef37f448e upstream
+
+Adjust arch_prctl_get/set_spec_ctrl() to operate on tasks other than
+current.
+
+This is needed both for /proc/$pid/status queries and for seccomp (since
+thread-syncing can trigger seccomp in non-current threads).
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 27 ++++++++++++++++-----------
+ include/linux/nospec.h | 7 +++++--
+ kernel/sys.c | 9 +++++----
+ 3 files changed, 26 insertions(+), 17 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index b7d9adf..3760931 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -529,31 +529,35 @@ static void ssb_select_mitigation()
+
+ #undef pr_fmt
+
+-static int ssb_prctl_set(unsigned long ctrl)
++static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+ {
+- bool rds = !!test_tsk_thread_flag(current, TIF_RDS);
++ bool rds = !!test_tsk_thread_flag(task, TIF_RDS);
+
+ if (ssb_mode != SPEC_STORE_BYPASS_PRCTL)
+ return -ENXIO;
+
+ if (ctrl == PR_SPEC_ENABLE)
+- clear_tsk_thread_flag(current, TIF_RDS);
++ clear_tsk_thread_flag(task, TIF_RDS);
+ else
+- set_tsk_thread_flag(current, TIF_RDS);
++ set_tsk_thread_flag(task, TIF_RDS);
+
+- if (rds != !!test_tsk_thread_flag(current, TIF_RDS))
++ /*
++ * If being set on non-current task, delay setting the CPU
++ * mitigation until it is next scheduled.
++ */
++ if (task == current && rds != !!test_tsk_thread_flag(task, TIF_RDS))
+ speculative_store_bypass_update();
+
+ return 0;
+ }
+
+-static int ssb_prctl_get(void)
++static int ssb_prctl_get(struct task_struct *task)
+ {
+ switch (ssb_mode) {
+ case SPEC_STORE_BYPASS_DISABLE:
+ return PR_SPEC_DISABLE;
+ case SPEC_STORE_BYPASS_PRCTL:
+- if (test_tsk_thread_flag(current, TIF_RDS))
++ if (test_tsk_thread_flag(task, TIF_RDS))
+ return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+ return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+ default:
+@@ -563,24 +567,25 @@ static int ssb_prctl_get(void)
+ }
+ }
+
+-int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl)
++int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
++ unsigned long ctrl)
+ {
+ if (ctrl != PR_SPEC_ENABLE && ctrl != PR_SPEC_DISABLE)
+ return -ERANGE;
+
+ switch (which) {
+ case PR_SPEC_STORE_BYPASS:
+- return ssb_prctl_set(ctrl);
++ return ssb_prctl_set(task, ctrl);
+ default:
+ return -ENODEV;
+ }
+ }
+
+-int arch_prctl_spec_ctrl_get(unsigned long which)
++int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+ {
+ switch (which) {
+ case PR_SPEC_STORE_BYPASS:
+- return ssb_prctl_get();
++ return ssb_prctl_get(task);
+ default:
+ return -ENODEV;
+ }
+diff --git a/include/linux/nospec.h b/include/linux/nospec.h
+index 700bb8a..a908c95 100644
+--- a/include/linux/nospec.h
++++ b/include/linux/nospec.h
+@@ -7,6 +7,8 @@
+ #define _LINUX_NOSPEC_H
+ #include <asm/barrier.h>
+
++struct task_struct;
++
+ /**
+ * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
+ * @index: array element index
+@@ -57,7 +59,8 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
+ })
+
+ /* Speculation control prctl */
+-int arch_prctl_spec_ctrl_get(unsigned long which);
+-int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl);
++int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which);
++int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
++ unsigned long ctrl);
+
+ #endif /* _LINUX_NOSPEC_H */
+diff --git a/kernel/sys.c b/kernel/sys.c
+index 312c985..143cd63 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -2074,12 +2074,13 @@ static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
+ }
+ #endif
+
+-int __weak arch_prctl_spec_ctrl_get(unsigned long which)
++int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
+ {
+ return -EINVAL;
+ }
+
+-int __weak arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl)
++int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
++ unsigned long ctrl)
+ {
+ return -EINVAL;
+ }
+@@ -2285,12 +2286,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
+ case PR_GET_SPECULATION_CTRL:
+ if (arg3 || arg4 || arg5)
+ return -EINVAL;
+- error = arch_prctl_spec_ctrl_get(arg2);
++ error = arch_prctl_spec_ctrl_get(me, arg2);
+ break;
+ case PR_SET_SPECULATION_CTRL:
+ if (arg4 || arg5)
+ return -EINVAL;
+- error = arch_prctl_spec_ctrl_set(arg2, arg3);
++ error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
+ break;
+ default:
+ error = -EINVAL;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0046-x86-bugs-Make-boot-modes-__ro_after_init.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0046-x86-bugs-Make-boot-modes-__ro_after_init.patch
new file mode 100644
index 00000000..f2e083bc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0046-x86-bugs-Make-boot-modes-__ro_after_init.patch
@@ -0,0 +1,43 @@
+From 6dbf11655572182e63051b8ef4e61a07fb4901c0 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Thu, 3 May 2018 15:03:30 -0700
+Subject: [PATCH 46/93] x86/bugs: Make boot modes __ro_after_init
+
+commit f9544b2b076ca90d887c5ae5d74fab4c21bb7c13 upstream
+
+There's no reason for these to be changed after boot.
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 3760931..65114d2 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -128,7 +128,8 @@ static const char *spectre_v2_strings[] = {
+ #undef pr_fmt
+ #define pr_fmt(fmt) "Spectre V2 : " fmt
+
+-static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
++static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
++ SPECTRE_V2_NONE;
+
+ void x86_spec_ctrl_set(u64 val)
+ {
+@@ -406,7 +407,7 @@ static void __init spectre_v2_select_mitigation(void)
+ #undef pr_fmt
+ #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
+
+-static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
++static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
+
+ /* The kernel command line selection */
+ enum ssb_mitigation_cmd {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0047-fs-proc-Report-eip-esp-in-prod-PID-stat-for-coredump.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0047-fs-proc-Report-eip-esp-in-prod-PID-stat-for-coredump.patch
new file mode 100644
index 00000000..6f74166c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0047-fs-proc-Report-eip-esp-in-prod-PID-stat-for-coredump.patch
@@ -0,0 +1,77 @@
+From 10f154142e83fdb4e9d107e0f72b01864e69e108 Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Thu, 14 Sep 2017 11:42:17 +0200
+Subject: [PATCH 47/93] fs/proc: Report eip/esp in /prod/PID/stat for
+ coredumping
+
+commit fd7d56270b526ca3ed0c224362e3c64a0f86687a upstream.
+
+Commit 0a1eb2d474ed ("fs/proc: Stop reporting eip and esp in
+/proc/PID/stat") stopped reporting eip/esp because it is
+racy and dangerous for executing tasks. The comment adds:
+
+ As far as I know, there are no use programs that make any
+ material use of these fields, so just get rid of them.
+
+However, existing userspace core-dump-handler applications (for
+example, minicoredumper) are using these fields since they
+provide an excellent cross-platform interface to these valuable
+pointers. So that commit introduced a user space visible
+regression.
+
+Partially revert the change and make the readout possible for
+tasks with the proper permissions and only if the target task
+has the PF_DUMPCORE flag set.
+
+Fixes: 0a1eb2d474ed ("fs/proc: Stop reporting eip and esp in> /proc/PID/stat")
+Reported-by: Marco Felsch <marco.felsch@preh.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Andy Lutomirski <luto@kernel.org>
+Cc: Tycho Andersen <tycho.andersen@canonical.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Linux API <linux-api@vger.kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Link: http://lkml.kernel.org/r/87poatfwg6.fsf@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/proc/array.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 81818ad..c932ec4 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -60,6 +60,7 @@
+ #include <linux/tty.h>
+ #include <linux/string.h>
+ #include <linux/mman.h>
++#include <linux/sched.h>
+ #include <linux/proc_fs.h>
+ #include <linux/ioport.h>
+ #include <linux/uaccess.h>
+@@ -416,7 +417,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ * esp and eip are intentionally zeroed out. There is no
+ * non-racy way to read them without freezing the task.
+ * Programs that need reliable values can use ptrace(2).
++ *
++ * The only exception is if the task is core dumping because
++ * a program is not able to use ptrace(2) in that case. It is
++ * safe because the task has stopped executing permanently.
+ */
++ if (permitted && (task->flags & PF_DUMPCORE)) {
++ eip = KSTK_EIP(task);
++ esp = KSTK_ESP(task);
++ }
+ }
+
+ get_task_comm(tcomm, task);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0048-proc-fix-coredump-vs-read-proc-stat-race.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0048-proc-fix-coredump-vs-read-proc-stat-race.patch
new file mode 100644
index 00000000..30c8de50
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0048-proc-fix-coredump-vs-read-proc-stat-race.patch
@@ -0,0 +1,105 @@
+From 476f6e1404b0b16c48ae53249ffb362a16bf376c Mon Sep 17 00:00:00 2001
+From: Alexey Dobriyan <adobriyan@gmail.com>
+Date: Thu, 18 Jan 2018 16:34:05 -0800
+Subject: [PATCH 48/93] proc: fix coredump vs read /proc/*/stat race
+
+commit 8bb2ee192e482c5d500df9f2b1b26a560bd3026f upstream.
+
+do_task_stat() accesses IP and SP of a task without bumping reference
+count of a stack (which became an entity with independent lifetime at
+some point).
+
+Steps to reproduce:
+
+ #include <stdio.h>
+ #include <sys/types.h>
+ #include <sys/stat.h>
+ #include <fcntl.h>
+ #include <sys/time.h>
+ #include <sys/resource.h>
+ #include <unistd.h>
+ #include <sys/wait.h>
+
+ int main(void)
+ {
+ setrlimit(RLIMIT_CORE, &(struct rlimit){});
+
+ while (1) {
+ char buf[64];
+ char buf2[4096];
+ pid_t pid;
+ int fd;
+
+ pid = fork();
+ if (pid == 0) {
+ *(volatile int *)0 = 0;
+ }
+
+ snprintf(buf, sizeof(buf), "/proc/%u/stat", pid);
+ fd = open(buf, O_RDONLY);
+ read(fd, buf2, sizeof(buf2));
+ close(fd);
+
+ waitpid(pid, NULL, 0);
+ }
+ return 0;
+ }
+
+ BUG: unable to handle kernel paging request at 0000000000003fd8
+ IP: do_task_stat+0x8b4/0xaf0
+ PGD 800000003d73e067 P4D 800000003d73e067 PUD 3d558067 PMD 0
+ Oops: 0000 [#1] PREEMPT SMP PTI
+ CPU: 0 PID: 1417 Comm: a.out Not tainted 4.15.0-rc8-dirty #2
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc27 04/01/2014
+ RIP: 0010:do_task_stat+0x8b4/0xaf0
+ Call Trace:
+ proc_single_show+0x43/0x70
+ seq_read+0xe6/0x3b0
+ __vfs_read+0x1e/0x120
+ vfs_read+0x84/0x110
+ SyS_read+0x3d/0xa0
+ entry_SYSCALL_64_fastpath+0x13/0x6c
+ RIP: 0033:0x7f4d7928cba0
+ RSP: 002b:00007ffddb245158 EFLAGS: 00000246
+ Code: 03 b7 a0 01 00 00 4c 8b 4c 24 70 4c 8b 44 24 78 4c 89 74 24 18 e9 91 f9 ff ff f6 45 4d 02 0f 84 fd f7 ff ff 48 8b 45 40 48 89 ef <48> 8b 80 d8 3f 00 00 48 89 44 24 20 e8 9b 97 eb ff 48 89 44 24
+ RIP: do_task_stat+0x8b4/0xaf0 RSP: ffffc90000607cc8
+ CR2: 0000000000003fd8
+
+John Ogness said: for my tests I added an else case to verify that the
+race is hit and correctly mitigated.
+
+Link: http://lkml.kernel.org/r/20180116175054.GA11513@avx2
+Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
+Reported-by: "Kohli, Gaurav" <gkohli@codeaurora.org>
+Tested-by: John Ogness <john.ogness@linutronix.de>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Ingo Molnar <mingo@elte.hu>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/proc/array.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index c932ec4..794b52a 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -423,8 +423,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ * safe because the task has stopped executing permanently.
+ */
+ if (permitted && (task->flags & PF_DUMPCORE)) {
+- eip = KSTK_EIP(task);
+- esp = KSTK_ESP(task);
++ if (try_get_task_stack(task)) {
++ eip = KSTK_EIP(task);
++ esp = KSTK_ESP(task);
++ put_task_stack(task);
++ }
+ }
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0049-proc-Provide-details-on-speculation-flaw-mitigations.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0049-proc-Provide-details-on-speculation-flaw-mitigations.patch
new file mode 100644
index 00000000..4c1c8184
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0049-proc-Provide-details-on-speculation-flaw-mitigations.patch
@@ -0,0 +1,64 @@
+From a59a45de2d39c0e4f789ab2f05dc4b675ebc7914 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Tue, 1 May 2018 15:31:45 -0700
+Subject: [PATCH 49/93] proc: Provide details on speculation flaw mitigations
+
+commit fae1fa0fc6cca8beee3ab8ed71d54f9a78fa3f64 upstream
+
+As done with seccomp and no_new_privs, also show speculation flaw
+mitigation state in /proc/$pid/status.
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/proc/array.c | 24 +++++++++++++++++++++++-
+ 1 file changed, 23 insertions(+), 1 deletion(-)
+
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 794b52a..64f3f20 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -80,6 +80,7 @@
+ #include <linux/delayacct.h>
+ #include <linux/seq_file.h>
+ #include <linux/pid_namespace.h>
++#include <linux/prctl.h>
+ #include <linux/ptrace.h>
+ #include <linux/tracehook.h>
+ #include <linux/string_helpers.h>
+@@ -345,8 +346,29 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
+ {
+ #ifdef CONFIG_SECCOMP
+ seq_put_decimal_ull(m, "Seccomp:\t", p->seccomp.mode);
+- seq_putc(m, '\n');
+ #endif
++ seq_printf(m, "\nSpeculation Store Bypass:\t");
++ switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
++ case -EINVAL:
++ seq_printf(m, "unknown");
++ break;
++ case PR_SPEC_NOT_AFFECTED:
++ seq_printf(m, "not vulnerable");
++ break;
++ case PR_SPEC_PRCTL | PR_SPEC_DISABLE:
++ seq_printf(m, "thread mitigated");
++ break;
++ case PR_SPEC_PRCTL | PR_SPEC_ENABLE:
++ seq_printf(m, "thread vulnerable");
++ break;
++ case PR_SPEC_DISABLE:
++ seq_printf(m, "globally mitigated");
++ break;
++ default:
++ seq_printf(m, "vulnerable");
++ break;
++ }
++ seq_putc(m, '\n');
+ }
+
+ static inline void task_context_switch_counts(struct seq_file *m,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0050-prctl-Add-force-disable-speculation.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0050-prctl-Add-force-disable-speculation.patch
new file mode 100644
index 00000000..acdc260b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0050-prctl-Add-force-disable-speculation.patch
@@ -0,0 +1,218 @@
+From 6eca73ee80c5d8b6f8c3d294b3f97b7c8da67791 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 3 May 2018 22:09:15 +0200
+Subject: [PATCH 50/93] prctl: Add force disable speculation
+
+commit 356e4bfff2c5489e016fdb925adbf12a1e3950ee upstream
+
+For certain use cases it is desired to enforce mitigations so they cannot
+be undone afterwards. That's important for loader stubs which want to
+prevent a child from disabling the mitigation again. Will also be used for
+seccomp(). The extra state preserving of the prctl state for SSB is a
+preparatory step for EBPF dymanic speculation control.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/spec_ctrl.txt | 34 +++++++++++++++++++++-------------
+ arch/x86/kernel/cpu/bugs.c | 35 +++++++++++++++++++++++++----------
+ fs/proc/array.c | 3 +++
+ include/linux/sched.h | 9 +++++++++
+ include/uapi/linux/prctl.h | 1 +
+ 5 files changed, 59 insertions(+), 23 deletions(-)
+
+diff --git a/Documentation/spec_ctrl.txt b/Documentation/spec_ctrl.txt
+index ddbebcd..1b3690d 100644
+--- a/Documentation/spec_ctrl.txt
++++ b/Documentation/spec_ctrl.txt
+@@ -25,19 +25,21 @@ PR_GET_SPECULATION_CTRL
+ -----------------------
+
+ PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
+-which is selected with arg2 of prctl(2). The return value uses bits 0-2 with
++which is selected with arg2 of prctl(2). The return value uses bits 0-3 with
+ the following meaning:
+
+-==== ================ ===================================================
+-Bit Define Description
+-==== ================ ===================================================
+-0 PR_SPEC_PRCTL Mitigation can be controlled per task by
+- PR_SET_SPECULATION_CTRL
+-1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
+- disabled
+-2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
+- enabled
+-==== ================ ===================================================
++==== ===================== ===================================================
++Bit Define Description
++==== ===================== ===================================================
++0 PR_SPEC_PRCTL Mitigation can be controlled per task by
++ PR_SET_SPECULATION_CTRL
++1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
++ disabled
++2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
++ enabled
++3 PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A
++ subsequent prctl(..., PR_SPEC_ENABLE) will fail.
++==== ===================== ===================================================
+
+ If all bits are 0 the CPU is not affected by the speculation misfeature.
+
+@@ -47,9 +49,11 @@ misfeature will fail.
+
+ PR_SET_SPECULATION_CTRL
+ -----------------------
++
+ PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which
+ is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand
+-in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE.
++in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE or
++PR_SPEC_FORCE_DISABLE.
+
+ Common error codes
+ ------------------
+@@ -70,10 +74,13 @@ Value Meaning
+ 0 Success
+
+ ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor
+- PR_SPEC_DISABLE
++ PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE
+
+ ENXIO Control of the selected speculation misfeature is not possible.
+ See PR_GET_SPECULATION_CTRL.
++
++EPERM Speculation was disabled with PR_SPEC_FORCE_DISABLE and caller
++ tried to enable it again.
+ ======= =================================================================
+
+ Speculation misfeature controls
+@@ -84,3 +91,4 @@ Speculation misfeature controls
+ * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0);
+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
++ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0);
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 65114d2..fdbd8e5 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -532,21 +532,37 @@ static void ssb_select_mitigation()
+
+ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+ {
+- bool rds = !!test_tsk_thread_flag(task, TIF_RDS);
++ bool update;
+
+ if (ssb_mode != SPEC_STORE_BYPASS_PRCTL)
+ return -ENXIO;
+
+- if (ctrl == PR_SPEC_ENABLE)
+- clear_tsk_thread_flag(task, TIF_RDS);
+- else
+- set_tsk_thread_flag(task, TIF_RDS);
++ switch (ctrl) {
++ case PR_SPEC_ENABLE:
++ /* If speculation is force disabled, enable is not allowed */
++ if (task_spec_ssb_force_disable(task))
++ return -EPERM;
++ task_clear_spec_ssb_disable(task);
++ update = test_and_clear_tsk_thread_flag(task, TIF_RDS);
++ break;
++ case PR_SPEC_DISABLE:
++ task_set_spec_ssb_disable(task);
++ update = !test_and_set_tsk_thread_flag(task, TIF_RDS);
++ break;
++ case PR_SPEC_FORCE_DISABLE:
++ task_set_spec_ssb_disable(task);
++ task_set_spec_ssb_force_disable(task);
++ update = !test_and_set_tsk_thread_flag(task, TIF_RDS);
++ break;
++ default:
++ return -ERANGE;
++ }
+
+ /*
+ * If being set on non-current task, delay setting the CPU
+ * mitigation until it is next scheduled.
+ */
+- if (task == current && rds != !!test_tsk_thread_flag(task, TIF_RDS))
++ if (task == current && update)
+ speculative_store_bypass_update();
+
+ return 0;
+@@ -558,7 +574,9 @@ static int ssb_prctl_get(struct task_struct *task)
+ case SPEC_STORE_BYPASS_DISABLE:
+ return PR_SPEC_DISABLE;
+ case SPEC_STORE_BYPASS_PRCTL:
+- if (test_tsk_thread_flag(task, TIF_RDS))
++ if (task_spec_ssb_force_disable(task))
++ return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
++ if (task_spec_ssb_disable(task))
+ return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+ return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+ default:
+@@ -571,9 +589,6 @@ static int ssb_prctl_get(struct task_struct *task)
+ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+ unsigned long ctrl)
+ {
+- if (ctrl != PR_SPEC_ENABLE && ctrl != PR_SPEC_DISABLE)
+- return -ERANGE;
+-
+ switch (which) {
+ case PR_SPEC_STORE_BYPASS:
+ return ssb_prctl_set(task, ctrl);
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 64f3f20..3e37195 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -355,6 +355,9 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
+ case PR_SPEC_NOT_AFFECTED:
+ seq_printf(m, "not vulnerable");
+ break;
++ case PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE:
++ seq_printf(m, "thread force mitigated");
++ break;
+ case PR_SPEC_PRCTL | PR_SPEC_DISABLE:
+ seq_printf(m, "thread mitigated");
+ break;
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 75d9a57..8e127a3 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -2335,6 +2335,8 @@ static inline void memalloc_noio_restore(unsigned int flags)
+ #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
+ #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
+ #define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */
++#define PFA_SPEC_SSB_DISABLE 4 /* Speculative Store Bypass disabled */
++#define PFA_SPEC_SSB_FORCE_DISABLE 5 /* Speculative Store Bypass force disabled*/
+
+
+ #define TASK_PFA_TEST(name, func) \
+@@ -2361,6 +2363,13 @@ TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
+ TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
+ TASK_PFA_SET(LMK_WAITING, lmk_waiting)
+
++TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
++TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
++TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
++
++TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
++TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
++
+ /*
+ * task->jobctl flags
+ */
+diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
+index 3b316be..64776b7 100644
+--- a/include/uapi/linux/prctl.h
++++ b/include/uapi/linux/prctl.h
+@@ -207,5 +207,6 @@ struct prctl_mm_map {
+ # define PR_SPEC_PRCTL (1UL << 0)
+ # define PR_SPEC_ENABLE (1UL << 1)
+ # define PR_SPEC_DISABLE (1UL << 2)
++# define PR_SPEC_FORCE_DISABLE (1UL << 3)
+
+ #endif /* _LINUX_PRCTL_H */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0051-seccomp-fix-the-usage-of-get-put_seccomp_filter-in-s.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0051-seccomp-fix-the-usage-of-get-put_seccomp_filter-in-s.patch
new file mode 100644
index 00000000..7361acf3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0051-seccomp-fix-the-usage-of-get-put_seccomp_filter-in-s.patch
@@ -0,0 +1,94 @@
+From 687c8baff48fb1849f5c2e8fdaeb2ff565f6554b Mon Sep 17 00:00:00 2001
+From: Oleg Nesterov <oleg@redhat.com>
+Date: Wed, 27 Sep 2017 09:25:30 -0600
+Subject: [PATCH 51/93] seccomp: fix the usage of get/put_seccomp_filter() in
+ seccomp_get_filter()
+
+commit 66a733ea6b611aecf0119514d2dddab5f9d6c01e upstream.
+
+As Chris explains, get_seccomp_filter() and put_seccomp_filter() can end
+up using different filters. Once we drop ->siglock it is possible for
+task->seccomp.filter to have been replaced by SECCOMP_FILTER_FLAG_TSYNC.
+
+Fixes: f8e529ed941b ("seccomp, ptrace: add support for dumping seccomp filters")
+Reported-by: Chris Salls <chrissalls5@gmail.com>
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+[tycho: add __get_seccomp_filter vs. open coding refcount_inc()]
+Signed-off-by: Tycho Andersen <tycho@docker.com>
+[kees: tweak commit log]
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/seccomp.c | 23 ++++++++++++++++-------
+ 1 file changed, 16 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index 0db7c8a..af182a6 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -457,14 +457,19 @@ static long seccomp_attach_filter(unsigned int flags,
+ return 0;
+ }
+
++void __get_seccomp_filter(struct seccomp_filter *filter)
++{
++ /* Reference count is bounded by the number of total processes. */
++ atomic_inc(&filter->usage);
++}
++
+ /* get_seccomp_filter - increments the reference count of the filter on @tsk */
+ void get_seccomp_filter(struct task_struct *tsk)
+ {
+ struct seccomp_filter *orig = tsk->seccomp.filter;
+ if (!orig)
+ return;
+- /* Reference count is bounded by the number of total processes. */
+- atomic_inc(&orig->usage);
++ __get_seccomp_filter(orig);
+ }
+
+ static inline void seccomp_filter_free(struct seccomp_filter *filter)
+@@ -475,10 +480,8 @@ static inline void seccomp_filter_free(struct seccomp_filter *filter)
+ }
+ }
+
+-/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
+-void put_seccomp_filter(struct task_struct *tsk)
++static void __put_seccomp_filter(struct seccomp_filter *orig)
+ {
+- struct seccomp_filter *orig = tsk->seccomp.filter;
+ /* Clean up single-reference branches iteratively. */
+ while (orig && atomic_dec_and_test(&orig->usage)) {
+ struct seccomp_filter *freeme = orig;
+@@ -487,6 +490,12 @@ void put_seccomp_filter(struct task_struct *tsk)
+ }
+ }
+
++/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
++void put_seccomp_filter(struct task_struct *tsk)
++{
++ __put_seccomp_filter(tsk->seccomp.filter);
++}
++
+ /**
+ * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
+ * @syscall: syscall number to send to userland
+@@ -892,13 +901,13 @@ long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
+ if (!data)
+ goto out;
+
+- get_seccomp_filter(task);
++ __get_seccomp_filter(filter);
+ spin_unlock_irq(&task->sighand->siglock);
+
+ if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
+ ret = -EFAULT;
+
+- put_seccomp_filter(task);
++ __put_seccomp_filter(filter);
+ return ret;
+
+ out:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0052-seccomp-Enable-speculation-flaw-mitigations.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0052-seccomp-Enable-speculation-flaw-mitigations.patch
new file mode 100644
index 00000000..85ed7f13
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0052-seccomp-Enable-speculation-flaw-mitigations.patch
@@ -0,0 +1,64 @@
+From 6afc277e9b6b9bf8bb4c8c2e4641a021f9d709e2 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Tue, 1 May 2018 15:07:31 -0700
+Subject: [PATCH 52/93] seccomp: Enable speculation flaw mitigations
+
+commit 5c3070890d06ff82eecb808d02d2ca39169533ef upstream
+
+When speculation flaw mitigations are opt-in (via prctl), using seccomp
+will automatically opt-in to these protections, since using seccomp
+indicates at least some level of sandboxing is desired.
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/seccomp.c | 17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index af182a6..1d3078b 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -16,6 +16,8 @@
+ #include <linux/atomic.h>
+ #include <linux/audit.h>
+ #include <linux/compat.h>
++#include <linux/nospec.h>
++#include <linux/prctl.h>
+ #include <linux/sched.h>
+ #include <linux/seccomp.h>
+ #include <linux/slab.h>
+@@ -214,6 +216,19 @@ static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
+ return true;
+ }
+
++/*
++ * If a given speculation mitigation is opt-in (prctl()-controlled),
++ * select it, by disabling speculation (enabling mitigation).
++ */
++static inline void spec_mitigate(struct task_struct *task,
++ unsigned long which)
++{
++ int state = arch_prctl_spec_ctrl_get(task, which);
++
++ if (state > 0 && (state & PR_SPEC_PRCTL))
++ arch_prctl_spec_ctrl_set(task, which, PR_SPEC_DISABLE);
++}
++
+ static inline void seccomp_assign_mode(struct task_struct *task,
+ unsigned long seccomp_mode)
+ {
+@@ -225,6 +240,8 @@ static inline void seccomp_assign_mode(struct task_struct *task,
+ * filter) is set.
+ */
+ smp_mb__before_atomic();
++ /* Assume seccomp processes want speculation flaw mitigation. */
++ spec_mitigate(task, PR_SPEC_STORE_BYPASS);
+ set_tsk_thread_flag(task, TIF_SECCOMP);
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0053-seccomp-Use-PR_SPEC_FORCE_DISABLE.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0053-seccomp-Use-PR_SPEC_FORCE_DISABLE.patch
new file mode 100644
index 00000000..a7a60b69
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0053-seccomp-Use-PR_SPEC_FORCE_DISABLE.patch
@@ -0,0 +1,33 @@
+From 62722a97a6aeb1ebba9b749068ed6e9eaecceb37 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 4 May 2018 09:40:03 +0200
+Subject: [PATCH 53/93] seccomp: Use PR_SPEC_FORCE_DISABLE
+
+commit b849a812f7eb92e96d1c8239b06581b2cfd8b275 upstream
+
+Use PR_SPEC_FORCE_DISABLE in seccomp() because seccomp does not allow to
+widen restrictions.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/seccomp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index 1d3078b..a0bd6ea 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -226,7 +226,7 @@ static inline void spec_mitigate(struct task_struct *task,
+ int state = arch_prctl_spec_ctrl_get(task, which);
+
+ if (state > 0 && (state & PR_SPEC_PRCTL))
+- arch_prctl_spec_ctrl_set(task, which, PR_SPEC_DISABLE);
++ arch_prctl_spec_ctrl_set(task, which, PR_SPEC_FORCE_DISABLE);
+ }
+
+ static inline void seccomp_assign_mode(struct task_struct *task,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0054-seccomp-Add-filter-flag-to-opt-out-of-SSB-mitigation.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0054-seccomp-Add-filter-flag-to-opt-out-of-SSB-mitigation.patch
new file mode 100644
index 00000000..17012902
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0054-seccomp-Add-filter-flag-to-opt-out-of-SSB-mitigation.patch
@@ -0,0 +1,222 @@
+From ed34265c5f460b645a0669079fbc6ad094c83c96 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Thu, 3 May 2018 14:56:12 -0700
+Subject: [PATCH 54/93] seccomp: Add filter flag to opt-out of SSB mitigation
+
+commit 00a02d0c502a06d15e07b857f8ff921e3e402675 upstream
+
+If a seccomp user is not interested in Speculative Store Bypass mitigation
+by default, it can set the new SECCOMP_FILTER_FLAG_SPEC_ALLOW flag when
+adding filters.
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/seccomp.h | 3 +-
+ include/uapi/linux/seccomp.h | 4 +-
+ kernel/seccomp.c | 19 ++++---
+ tools/testing/selftests/seccomp/seccomp_bpf.c | 78 ++++++++++++++++++++++++++-
+ 4 files changed, 93 insertions(+), 11 deletions(-)
+
+diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
+index ecc296c..50c460a 100644
+--- a/include/linux/seccomp.h
++++ b/include/linux/seccomp.h
+@@ -3,7 +3,8 @@
+
+ #include <uapi/linux/seccomp.h>
+
+-#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC)
++#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
++ SECCOMP_FILTER_FLAG_SPEC_ALLOW)
+
+ #ifdef CONFIG_SECCOMP
+
+diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
+index 0f238a4..e4acb61 100644
+--- a/include/uapi/linux/seccomp.h
++++ b/include/uapi/linux/seccomp.h
+@@ -15,7 +15,9 @@
+ #define SECCOMP_SET_MODE_FILTER 1
+
+ /* Valid flags for SECCOMP_SET_MODE_FILTER */
+-#define SECCOMP_FILTER_FLAG_TSYNC 1
++#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
++/* In v4.14+ SECCOMP_FILTER_FLAG_LOG is (1UL << 1) */
++#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
+
+ /*
+ * All BPF programs must return a 32-bit value.
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index a0bd6ea..62a60e7 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -230,7 +230,8 @@ static inline void spec_mitigate(struct task_struct *task,
+ }
+
+ static inline void seccomp_assign_mode(struct task_struct *task,
+- unsigned long seccomp_mode)
++ unsigned long seccomp_mode,
++ unsigned long flags)
+ {
+ assert_spin_locked(&task->sighand->siglock);
+
+@@ -240,8 +241,9 @@ static inline void seccomp_assign_mode(struct task_struct *task,
+ * filter) is set.
+ */
+ smp_mb__before_atomic();
+- /* Assume seccomp processes want speculation flaw mitigation. */
+- spec_mitigate(task, PR_SPEC_STORE_BYPASS);
++ /* Assume default seccomp processes want spec flaw mitigation. */
++ if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
++ spec_mitigate(task, PR_SPEC_STORE_BYPASS);
+ set_tsk_thread_flag(task, TIF_SECCOMP);
+ }
+
+@@ -309,7 +311,7 @@ static inline pid_t seccomp_can_sync_threads(void)
+ * without dropping the locks.
+ *
+ */
+-static inline void seccomp_sync_threads(void)
++static inline void seccomp_sync_threads(unsigned long flags)
+ {
+ struct task_struct *thread, *caller;
+
+@@ -350,7 +352,8 @@ static inline void seccomp_sync_threads(void)
+ * allow one thread to transition the other.
+ */
+ if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
+- seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
++ seccomp_assign_mode(thread, SECCOMP_MODE_FILTER,
++ flags);
+ }
+ }
+
+@@ -469,7 +472,7 @@ static long seccomp_attach_filter(unsigned int flags,
+
+ /* Now that the new filter is in place, synchronize to all threads. */
+ if (flags & SECCOMP_FILTER_FLAG_TSYNC)
+- seccomp_sync_threads();
++ seccomp_sync_threads(flags);
+
+ return 0;
+ }
+@@ -729,7 +732,7 @@ static long seccomp_set_mode_strict(void)
+ #ifdef TIF_NOTSC
+ disable_TSC();
+ #endif
+- seccomp_assign_mode(current, seccomp_mode);
++ seccomp_assign_mode(current, seccomp_mode, 0);
+ ret = 0;
+
+ out:
+@@ -787,7 +790,7 @@ static long seccomp_set_mode_filter(unsigned int flags,
+ /* Do not free the successfully attached filter. */
+ prepared = NULL;
+
+- seccomp_assign_mode(current, seccomp_mode);
++ seccomp_assign_mode(current, seccomp_mode, flags);
+ out:
+ spin_unlock_irq(&current->sighand->siglock);
+ if (flags & SECCOMP_FILTER_FLAG_TSYNC)
+diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
+index 03f1fa4..3362f11 100644
+--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
+@@ -1684,7 +1684,11 @@ TEST_F_SIGNAL(TRACE_syscall, kill_after_ptrace, SIGSYS)
+ #endif
+
+ #ifndef SECCOMP_FILTER_FLAG_TSYNC
+-#define SECCOMP_FILTER_FLAG_TSYNC 1
++#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
++#endif
++
++#ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW
++#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
+ #endif
+
+ #ifndef seccomp
+@@ -1783,6 +1787,78 @@ TEST(seccomp_syscall_mode_lock)
+ }
+ }
+
++/*
++ * Test detection of known and unknown filter flags. Userspace needs to be able
++ * to check if a filter flag is supported by the current kernel and a good way
++ * of doing that is by attempting to enter filter mode, with the flag bit in
++ * question set, and a NULL pointer for the _args_ parameter. EFAULT indicates
++ * that the flag is valid and EINVAL indicates that the flag is invalid.
++ */
++TEST(detect_seccomp_filter_flags)
++{
++ unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC,
++ SECCOMP_FILTER_FLAG_SPEC_ALLOW };
++ unsigned int flag, all_flags;
++ int i;
++ long ret;
++
++ /* Test detection of known-good filter flags */
++ for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) {
++ int bits = 0;
++
++ flag = flags[i];
++ /* Make sure the flag is a single bit! */
++ while (flag) {
++ if (flag & 0x1)
++ bits ++;
++ flag >>= 1;
++ }
++ ASSERT_EQ(1, bits);
++ flag = flags[i];
++
++ ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
++ ASSERT_NE(ENOSYS, errno) {
++ TH_LOG("Kernel does not support seccomp syscall!");
++ }
++ EXPECT_EQ(-1, ret);
++ EXPECT_EQ(EFAULT, errno) {
++ TH_LOG("Failed to detect that a known-good filter flag (0x%X) is supported!",
++ flag);
++ }
++
++ all_flags |= flag;
++ }
++
++ /* Test detection of all known-good filter flags */
++ ret = seccomp(SECCOMP_SET_MODE_FILTER, all_flags, NULL);
++ EXPECT_EQ(-1, ret);
++ EXPECT_EQ(EFAULT, errno) {
++ TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!",
++ all_flags);
++ }
++
++ /* Test detection of an unknown filter flag */
++ flag = -1;
++ ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
++ EXPECT_EQ(-1, ret);
++ EXPECT_EQ(EINVAL, errno) {
++ TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported!",
++ flag);
++ }
++
++ /*
++ * Test detection of an unknown filter flag that may simply need to be
++ * added to this test
++ */
++ flag = flags[ARRAY_SIZE(flags) - 1] << 1;
++ ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
++ EXPECT_EQ(-1, ret);
++ EXPECT_EQ(EINVAL, errno) {
++ TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported! Does a new flag need to be added to this test?",
++ flag);
++ }
++}
++
+ TEST(TSYNC_first)
+ {
+ struct sock_filter filter[] = {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0055-seccomp-Move-speculation-migitation-control-to-arch-.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0055-seccomp-Move-speculation-migitation-control-to-arch-.patch
new file mode 100644
index 00000000..ca98b862
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0055-seccomp-Move-speculation-migitation-control-to-arch-.patch
@@ -0,0 +1,121 @@
+From 2a4ae48837c977605ea36a01ed63fa8638e4c881 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 4 May 2018 15:12:06 +0200
+Subject: [PATCH 55/93] seccomp: Move speculation migitation control to arch
+ code
+
+commit 8bf37d8c067bb7eb8e7c381bdadf9bd89182b6bc upstream
+
+The migitation control is simpler to implement in architecture code as it
+avoids the extra function call to check the mode. Aside of that having an
+explicit seccomp enabled mode in the architecture mitigations would require
+even more workarounds.
+
+Move it into architecture code and provide a weak function in the seccomp
+code. Remove the 'which' argument as this allows the architecture to decide
+which mitigations are relevant for seccomp.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 29 ++++++++++++++++++-----------
+ include/linux/nospec.h | 2 ++
+ kernel/seccomp.c | 15 ++-------------
+ 3 files changed, 22 insertions(+), 24 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index fdbd8e5..131617d 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -568,6 +568,24 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+ return 0;
+ }
+
++int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
++ unsigned long ctrl)
++{
++ switch (which) {
++ case PR_SPEC_STORE_BYPASS:
++ return ssb_prctl_set(task, ctrl);
++ default:
++ return -ENODEV;
++ }
++}
++
++#ifdef CONFIG_SECCOMP
++void arch_seccomp_spec_mitigate(struct task_struct *task)
++{
++ ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
++}
++#endif
++
+ static int ssb_prctl_get(struct task_struct *task)
+ {
+ switch (ssb_mode) {
+@@ -586,17 +604,6 @@ static int ssb_prctl_get(struct task_struct *task)
+ }
+ }
+
+-int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+- unsigned long ctrl)
+-{
+- switch (which) {
+- case PR_SPEC_STORE_BYPASS:
+- return ssb_prctl_set(task, ctrl);
+- default:
+- return -ENODEV;
+- }
+-}
+-
+ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+ {
+ switch (which) {
+diff --git a/include/linux/nospec.h b/include/linux/nospec.h
+index a908c95..0c5ef54 100644
+--- a/include/linux/nospec.h
++++ b/include/linux/nospec.h
+@@ -62,5 +62,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
+ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which);
+ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+ unsigned long ctrl);
++/* Speculation control for seccomp enforced mitigation */
++void arch_seccomp_spec_mitigate(struct task_struct *task);
+
+ #endif /* _LINUX_NOSPEC_H */
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index 62a60e7..3975856 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -216,18 +216,7 @@ static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
+ return true;
+ }
+
+-/*
+- * If a given speculation mitigation is opt-in (prctl()-controlled),
+- * select it, by disabling speculation (enabling mitigation).
+- */
+-static inline void spec_mitigate(struct task_struct *task,
+- unsigned long which)
+-{
+- int state = arch_prctl_spec_ctrl_get(task, which);
+-
+- if (state > 0 && (state & PR_SPEC_PRCTL))
+- arch_prctl_spec_ctrl_set(task, which, PR_SPEC_FORCE_DISABLE);
+-}
++void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
+
+ static inline void seccomp_assign_mode(struct task_struct *task,
+ unsigned long seccomp_mode,
+@@ -243,7 +232,7 @@ static inline void seccomp_assign_mode(struct task_struct *task,
+ smp_mb__before_atomic();
+ /* Assume default seccomp processes want spec flaw mitigation. */
+ if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
+- spec_mitigate(task, PR_SPEC_STORE_BYPASS);
++ arch_seccomp_spec_mitigate(task);
+ set_tsk_thread_flag(task, TIF_SECCOMP);
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0056-x86-speculation-Make-seccomp-the-default-mode-for-Sp.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0056-x86-speculation-Make-seccomp-the-default-mode-for-Sp.patch
new file mode 100644
index 00000000..21edf610
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0056-x86-speculation-Make-seccomp-the-default-mode-for-Sp.patch
@@ -0,0 +1,166 @@
+From c9379df089e45eab50820798e3e98aee3b1e5adf Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Thu, 3 May 2018 14:37:54 -0700
+Subject: [PATCH 56/93] x86/speculation: Make "seccomp" the default mode for
+ Speculative Store Bypass
+
+commit f21b53b20c754021935ea43364dbf53778eeba32 upstream
+
+Unless explicitly opted out of, anything running under seccomp will have
+SSB mitigations enabled. Choosing the "prctl" mode will disable this.
+
+[ tglx: Adjusted it to the new arch_seccomp_spec_mitigate() mechanism ]
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/kernel-parameters.txt | 26 +++++++++++++++++---------
+ arch/x86/include/asm/nospec-branch.h | 1 +
+ arch/x86/kernel/cpu/bugs.c | 32 +++++++++++++++++++++++---------
+ 3 files changed, 41 insertions(+), 18 deletions(-)
+
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 80811df..2c5df33 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -3986,19 +3986,27 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ This parameter controls whether the Speculative Store
+ Bypass optimization is used.
+
+- on - Unconditionally disable Speculative Store Bypass
+- off - Unconditionally enable Speculative Store Bypass
+- auto - Kernel detects whether the CPU model contains an
+- implementation of Speculative Store Bypass and
+- picks the most appropriate mitigation.
+- prctl - Control Speculative Store Bypass per thread
+- via prctl. Speculative Store Bypass is enabled
+- for a process by default. The state of the control
+- is inherited on fork.
++ on - Unconditionally disable Speculative Store Bypass
++ off - Unconditionally enable Speculative Store Bypass
++ auto - Kernel detects whether the CPU model contains an
++ implementation of Speculative Store Bypass and
++ picks the most appropriate mitigation. If the
++ CPU is not vulnerable, "off" is selected. If the
++ CPU is vulnerable the default mitigation is
++ architecture and Kconfig dependent. See below.
++ prctl - Control Speculative Store Bypass per thread
++ via prctl. Speculative Store Bypass is enabled
++ for a process by default. The state of the control
++ is inherited on fork.
++ seccomp - Same as "prctl" above, but all seccomp threads
++ will disable SSB unless they explicitly opt out.
+
+ Not specifying this option is equivalent to
+ spec_store_bypass_disable=auto.
+
++ Default mitigations:
++ X86: If CONFIG_SECCOMP=y "seccomp", otherwise "prctl"
++
+ spia_io_base= [HW,MTD]
+ spia_fio_base=
+ spia_pedr=
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index 71ad014..328ea3c 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -233,6 +233,7 @@ enum ssb_mitigation {
+ SPEC_STORE_BYPASS_NONE,
+ SPEC_STORE_BYPASS_DISABLE,
+ SPEC_STORE_BYPASS_PRCTL,
++ SPEC_STORE_BYPASS_SECCOMP,
+ };
+
+ extern char __indirect_thunk_start[];
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 131617d..9a3bb65 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -415,22 +415,25 @@ enum ssb_mitigation_cmd {
+ SPEC_STORE_BYPASS_CMD_AUTO,
+ SPEC_STORE_BYPASS_CMD_ON,
+ SPEC_STORE_BYPASS_CMD_PRCTL,
++ SPEC_STORE_BYPASS_CMD_SECCOMP,
+ };
+
+ static const char *ssb_strings[] = {
+ [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
+ [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
+- [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl"
++ [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
++ [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
+ };
+
+ static const struct {
+ const char *option;
+ enum ssb_mitigation_cmd cmd;
+ } ssb_mitigation_options[] = {
+- { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
+- { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
+- { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
+- { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
++ { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
++ { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
++ { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
++ { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
++ { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
+ };
+
+ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
+@@ -480,8 +483,15 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
+
+ switch (cmd) {
+ case SPEC_STORE_BYPASS_CMD_AUTO:
+- /* Choose prctl as the default mode */
+- mode = SPEC_STORE_BYPASS_PRCTL;
++ case SPEC_STORE_BYPASS_CMD_SECCOMP:
++ /*
++ * Choose prctl+seccomp as the default mode if seccomp is
++ * enabled.
++ */
++ if (IS_ENABLED(CONFIG_SECCOMP))
++ mode = SPEC_STORE_BYPASS_SECCOMP;
++ else
++ mode = SPEC_STORE_BYPASS_PRCTL;
+ break;
+ case SPEC_STORE_BYPASS_CMD_ON:
+ mode = SPEC_STORE_BYPASS_DISABLE;
+@@ -529,12 +539,14 @@ static void ssb_select_mitigation()
+ }
+
+ #undef pr_fmt
++#define pr_fmt(fmt) "Speculation prctl: " fmt
+
+ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+ {
+ bool update;
+
+- if (ssb_mode != SPEC_STORE_BYPASS_PRCTL)
++ if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
++ ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
+ return -ENXIO;
+
+ switch (ctrl) {
+@@ -582,7 +594,8 @@ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+ #ifdef CONFIG_SECCOMP
+ void arch_seccomp_spec_mitigate(struct task_struct *task)
+ {
+- ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
++ if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
++ ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
+ }
+ #endif
+
+@@ -591,6 +604,7 @@ static int ssb_prctl_get(struct task_struct *task)
+ switch (ssb_mode) {
+ case SPEC_STORE_BYPASS_DISABLE:
+ return PR_SPEC_DISABLE;
++ case SPEC_STORE_BYPASS_SECCOMP:
+ case SPEC_STORE_BYPASS_PRCTL:
+ if (task_spec_ssb_force_disable(task))
+ return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0057-x86-bugs-Rename-_RDS-to-_SSBD.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0057-x86-bugs-Rename-_RDS-to-_SSBD.patch
new file mode 100644
index 00000000..189588aa
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0057-x86-bugs-Rename-_RDS-to-_SSBD.patch
@@ -0,0 +1,405 @@
+From 4bb9a717246aa3019a3d97904e29c4da0bfc37f9 Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 9 May 2018 21:41:38 +0200
+Subject: [PATCH 57/93] x86/bugs: Rename _RDS to _SSBD
+
+commit 9f65fb29374ee37856dbad847b4e121aab72b510 upstream
+
+Intel collateral will reference the SSB mitigation bit in IA32_SPEC_CTL[2]
+as SSBD (Speculative Store Bypass Disable).
+
+Hence changing it.
+
+It is unclear yet what the MSR_IA32_ARCH_CAPABILITIES (0x10a) Bit(4) name
+is going to be. Following the rename it would be SSBD_NO but that rolls out
+to Speculative Store Bypass Disable No.
+
+Also fixed the missing space in X86_FEATURE_AMD_SSBD.
+
+[ tglx: Fixup x86_amd_rds_enable() and rds_tif_to_amd_ls_cfg() as well ]
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 4 ++--
+ arch/x86/include/asm/msr-index.h | 10 +++++-----
+ arch/x86/include/asm/spec-ctrl.h | 12 ++++++------
+ arch/x86/include/asm/thread_info.h | 6 +++---
+ arch/x86/kernel/cpu/amd.c | 14 +++++++-------
+ arch/x86/kernel/cpu/bugs.c | 36 ++++++++++++++++++------------------
+ arch/x86/kernel/cpu/common.c | 2 +-
+ arch/x86/kernel/cpu/intel.c | 2 +-
+ arch/x86/kernel/process.c | 8 ++++----
+ arch/x86/kvm/cpuid.c | 2 +-
+ arch/x86/kvm/cpuid.h | 2 +-
+ arch/x86/kvm/vmx.c | 2 +-
+ 12 files changed, 50 insertions(+), 50 deletions(-)
+
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 8797069..0ed8ea5 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -205,7 +205,7 @@
+ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
+-#define X86_FEATURE_AMD_RDS (7*32+24) /* "" AMD RDS implementation */
++#define X86_FEATURE_AMD_SSBD (7*32+24) /* "" AMD SSBD implementation */
+
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+@@ -308,7 +308,7 @@
+ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
+ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+-#define X86_FEATURE_RDS (18*32+31) /* Reduced Data Speculation */
++#define X86_FEATURE_SSBD (18*32+31) /* Speculative Store Bypass Disable */
+
+ /*
+ * BUG word(s)
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 7ad3ed9..0145a0b 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -40,8 +40,8 @@
+ #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
+ #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
+ #define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
+-#define SPEC_CTRL_RDS_SHIFT 2 /* Reduced Data Speculation bit */
+-#define SPEC_CTRL_RDS (1 << SPEC_CTRL_RDS_SHIFT) /* Reduced Data Speculation */
++#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
++#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
+
+ #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
+ #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
+@@ -63,10 +63,10 @@
+ #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
+ #define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
+ #define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
+-#define ARCH_CAP_RDS_NO (1 << 4) /*
++#define ARCH_CAP_SSBD_NO (1 << 4) /*
+ * Not susceptible to Speculative Store Bypass
+- * attack, so no Reduced Data Speculation control
+- * required.
++ * attack, so no Speculative Store Bypass
++ * control required.
+ */
+
+ #define MSR_IA32_BBL_CR_CTL 0x00000119
+diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
+index 45ef00a..dc21209 100644
+--- a/arch/x86/include/asm/spec-ctrl.h
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -17,20 +17,20 @@ extern void x86_spec_ctrl_restore_host(u64);
+
+ /* AMD specific Speculative Store Bypass MSR data */
+ extern u64 x86_amd_ls_cfg_base;
+-extern u64 x86_amd_ls_cfg_rds_mask;
++extern u64 x86_amd_ls_cfg_ssbd_mask;
+
+ /* The Intel SPEC CTRL MSR base value cache */
+ extern u64 x86_spec_ctrl_base;
+
+-static inline u64 rds_tif_to_spec_ctrl(u64 tifn)
++static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
+ {
+- BUILD_BUG_ON(TIF_RDS < SPEC_CTRL_RDS_SHIFT);
+- return (tifn & _TIF_RDS) >> (TIF_RDS - SPEC_CTRL_RDS_SHIFT);
++ BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
++ return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
+ }
+
+-static inline u64 rds_tif_to_amd_ls_cfg(u64 tifn)
++static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
+ {
+- return (tifn & _TIF_RDS) ? x86_amd_ls_cfg_rds_mask : 0ULL;
++ return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
+ }
+
+ extern void speculative_store_bypass_update(void);
+diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
+index 661afac..2d8788a 100644
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -83,7 +83,7 @@ struct thread_info {
+ #define TIF_SIGPENDING 2 /* signal pending */
+ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+ #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
+-#define TIF_RDS 5 /* Reduced data speculation */
++#define TIF_SSBD 5 /* Reduced data speculation */
+ #define TIF_SYSCALL_EMU 6 /* syscall emulation active */
+ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
+ #define TIF_SECCOMP 8 /* secure computing */
+@@ -107,7 +107,7 @@ struct thread_info {
+ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+ #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+-#define _TIF_RDS (1 << TIF_RDS)
++#define _TIF_SSBD (1 << TIF_SSBD)
+ #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+ #define _TIF_SECCOMP (1 << TIF_SECCOMP)
+@@ -141,7 +141,7 @@ struct thread_info {
+
+ /* flags to check in __switch_to() */
+ #define _TIF_WORK_CTXSW \
+- (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_RDS)
++ (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
+
+ #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
+ #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index a176c81..acb2fcc 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -555,12 +555,12 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
+ }
+ /*
+ * Try to cache the base value so further operations can
+- * avoid RMW. If that faults, do not enable RDS.
++ * avoid RMW. If that faults, do not enable SSBD.
+ */
+ if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
+- setup_force_cpu_cap(X86_FEATURE_RDS);
+- setup_force_cpu_cap(X86_FEATURE_AMD_RDS);
+- x86_amd_ls_cfg_rds_mask = 1ULL << bit;
++ setup_force_cpu_cap(X86_FEATURE_SSBD);
++ setup_force_cpu_cap(X86_FEATURE_AMD_SSBD);
++ x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
+ }
+ }
+ }
+@@ -849,9 +849,9 @@ static void init_amd(struct cpuinfo_x86 *c)
+ if (!cpu_has(c, X86_FEATURE_XENPV))
+ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+
+- if (boot_cpu_has(X86_FEATURE_AMD_RDS)) {
+- set_cpu_cap(c, X86_FEATURE_RDS);
+- set_cpu_cap(c, X86_FEATURE_AMD_RDS);
++ if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) {
++ set_cpu_cap(c, X86_FEATURE_SSBD);
++ set_cpu_cap(c, X86_FEATURE_AMD_SSBD);
+ }
+ }
+
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 9a3bb65..ae6f9ba 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -44,10 +44,10 @@ static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
+
+ /*
+ * AMD specific MSR info for Speculative Store Bypass control.
+- * x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu().
++ * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
+ */
+ u64 __ro_after_init x86_amd_ls_cfg_base;
+-u64 __ro_after_init x86_amd_ls_cfg_rds_mask;
++u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
+
+ void __init check_bugs(void)
+ {
+@@ -145,7 +145,7 @@ u64 x86_spec_ctrl_get_default(void)
+ u64 msrval = x86_spec_ctrl_base;
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+- msrval |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
++ msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+ return msrval;
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
+@@ -158,7 +158,7 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
+ return;
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+- host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
++ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+
+ if (host != guest_spec_ctrl)
+ wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
+@@ -173,18 +173,18 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
+ return;
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+- host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
++ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+
+ if (host != guest_spec_ctrl)
+ wrmsrl(MSR_IA32_SPEC_CTRL, host);
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
+
+-static void x86_amd_rds_enable(void)
++static void x86_amd_ssb_disable(void)
+ {
+- u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask;
++ u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
+
+- if (boot_cpu_has(X86_FEATURE_AMD_RDS))
++ if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
+ wrmsrl(MSR_AMD64_LS_CFG, msrval);
+ }
+
+@@ -472,7 +472,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
+ enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
+ enum ssb_mitigation_cmd cmd;
+
+- if (!boot_cpu_has(X86_FEATURE_RDS))
++ if (!boot_cpu_has(X86_FEATURE_SSBD))
+ return mode;
+
+ cmd = ssb_parse_cmdline();
+@@ -506,7 +506,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
+ /*
+ * We have three CPU feature flags that are in play here:
+ * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
+- * - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass
++ * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
+ * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
+ */
+ if (mode == SPEC_STORE_BYPASS_DISABLE) {
+@@ -517,12 +517,12 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
+ */
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_INTEL:
+- x86_spec_ctrl_base |= SPEC_CTRL_RDS;
+- x86_spec_ctrl_mask &= ~SPEC_CTRL_RDS;
+- x86_spec_ctrl_set(SPEC_CTRL_RDS);
++ x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
++ x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD;
++ x86_spec_ctrl_set(SPEC_CTRL_SSBD);
+ break;
+ case X86_VENDOR_AMD:
+- x86_amd_rds_enable();
++ x86_amd_ssb_disable();
+ break;
+ }
+ }
+@@ -555,16 +555,16 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+ if (task_spec_ssb_force_disable(task))
+ return -EPERM;
+ task_clear_spec_ssb_disable(task);
+- update = test_and_clear_tsk_thread_flag(task, TIF_RDS);
++ update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
+ break;
+ case PR_SPEC_DISABLE:
+ task_set_spec_ssb_disable(task);
+- update = !test_and_set_tsk_thread_flag(task, TIF_RDS);
++ update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+ break;
+ case PR_SPEC_FORCE_DISABLE:
+ task_set_spec_ssb_disable(task);
+ task_set_spec_ssb_force_disable(task);
+- update = !test_and_set_tsk_thread_flag(task, TIF_RDS);
++ update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+ break;
+ default:
+ return -ERANGE;
+@@ -634,7 +634,7 @@ void x86_spec_ctrl_setup_ap(void)
+ x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
+
+ if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
+- x86_amd_rds_enable();
++ x86_amd_ssb_disable();
+ }
+
+ #ifdef CONFIG_SYSFS
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index beb1da8..d0dd736 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -911,7 +911,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+ if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
+- !(ia32_cap & ARCH_CAP_RDS_NO))
++ !(ia32_cap & ARCH_CAP_SSBD_NO))
+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+ if (x86_match_cpu(cpu_no_speculation))
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index f15aea6..047adaa 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -154,7 +154,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
+ setup_clear_cpu_cap(X86_FEATURE_STIBP);
+ setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
+ setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
+- setup_clear_cpu_cap(X86_FEATURE_RDS);
++ setup_clear_cpu_cap(X86_FEATURE_SSBD);
+ }
+
+ /*
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index 9c48e18..c344230 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -207,11 +207,11 @@ static __always_inline void __speculative_store_bypass_update(unsigned long tifn
+ {
+ u64 msr;
+
+- if (static_cpu_has(X86_FEATURE_AMD_RDS)) {
+- msr = x86_amd_ls_cfg_base | rds_tif_to_amd_ls_cfg(tifn);
++ if (static_cpu_has(X86_FEATURE_AMD_SSBD)) {
++ msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
+ wrmsrl(MSR_AMD64_LS_CFG, msr);
+ } else {
+- msr = x86_spec_ctrl_base | rds_tif_to_spec_ctrl(tifn);
++ msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
+ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+ }
+ }
+@@ -250,7 +250,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+ if ((tifp ^ tifn) & _TIF_NOTSC)
+ cr4_toggle_bits(X86_CR4_TSD);
+
+- if ((tifp ^ tifn) & _TIF_RDS)
++ if ((tifp ^ tifn) & _TIF_SSBD)
+ __speculative_store_bypass_update(tifn);
+ }
+
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 237e926..db95637 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -382,7 +382,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+
+ /* cpuid 7.0.edx*/
+ const u32 kvm_cpuid_7_0_edx_x86_features =
+- F(SPEC_CTRL) | F(RDS) | F(ARCH_CAPABILITIES);
++ F(SPEC_CTRL) | F(SSBD) | F(ARCH_CAPABILITIES);
+
+ /* all calls to cpuid_count() should be made on the same cpu */
+ get_cpu();
+diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
+index 39dd457..72551c5 100644
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -171,7 +171,7 @@ static inline bool guest_cpuid_has_spec_ctrl(struct kvm_vcpu *vcpu)
+ if (best && (best->ebx & bit(X86_FEATURE_IBRS)))
+ return true;
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+- return best && (best->edx & (bit(X86_FEATURE_SPEC_CTRL) | bit(X86_FEATURE_RDS)));
++ return best && (best->edx & (bit(X86_FEATURE_SPEC_CTRL) | bit(X86_FEATURE_SSBD)));
+ }
+
+ static inline bool guest_cpuid_has_arch_capabilities(struct kvm_vcpu *vcpu)
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 17199dc..c7df5c4 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -3133,7 +3133,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ return 1;
+
+ /* The STIBP bit doesn't fault even if it's not advertised */
+- if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_RDS))
++ if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
+ return 1;
+
+ vmx->spec_ctrl = data;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0058-x86-bugs-Fix-__ssb_select_mitigation-return-type.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0058-x86-bugs-Fix-__ssb_select_mitigation-return-type.patch
new file mode 100644
index 00000000..f24bec49
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0058-x86-bugs-Fix-__ssb_select_mitigation-return-type.patch
@@ -0,0 +1,35 @@
+From 1372f3493fdf1eaaeb82c4f3770a38aad5541f3b Mon Sep 17 00:00:00 2001
+From: Jiri Kosina <jkosina@suse.cz>
+Date: Thu, 10 May 2018 22:47:18 +0200
+Subject: [PATCH 58/93] x86/bugs: Fix __ssb_select_mitigation() return type
+
+commit d66d8ff3d21667b41eddbe86b35ab411e40d8c5f upstream
+
+__ssb_select_mitigation() returns one of the members of enum ssb_mitigation,
+not ssb_mitigation_cmd; fix the prototype to reflect that.
+
+Fixes: 24f7fc83b9204 ("x86/bugs: Provide boot parameters for the spec_store_bypass_disable mitigation")
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index ae6f9ba..c7b4d11 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -467,7 +467,7 @@ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
+ return cmd;
+ }
+
+-static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
++static enum ssb_mitigation __init __ssb_select_mitigation(void)
+ {
+ enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
+ enum ssb_mitigation_cmd cmd;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0059-x86-bugs-Make-cpu_show_common-static.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0059-x86-bugs-Make-cpu_show_common-static.patch
new file mode 100644
index 00000000..5dc616b5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0059-x86-bugs-Make-cpu_show_common-static.patch
@@ -0,0 +1,34 @@
+From 3c2ec124e35d5a74d3ed660095591290dc1d549b Mon Sep 17 00:00:00 2001
+From: Jiri Kosina <jkosina@suse.cz>
+Date: Thu, 10 May 2018 22:47:32 +0200
+Subject: [PATCH 59/93] x86/bugs: Make cpu_show_common() static
+
+commit 7bb4d366cba992904bffa4820d24e70a3de93e76 upstream
+
+cpu_show_common() is not used outside of arch/x86/kernel/cpu/bugs.c, so
+make it static.
+
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index c7b4d11..8187642 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -639,7 +639,7 @@ void x86_spec_ctrl_setup_ap(void)
+
+ #ifdef CONFIG_SYSFS
+
+-ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
++static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+ char *buf, unsigned int bug)
+ {
+ if (!boot_cpu_has_bug(bug))
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0060-x86-bugs-Fix-the-parameters-alignment-and-missing-vo.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0060-x86-bugs-Fix-the-parameters-alignment-and-missing-vo.patch
new file mode 100644
index 00000000..ef9f4216
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0060-x86-bugs-Fix-the-parameters-alignment-and-missing-vo.patch
@@ -0,0 +1,42 @@
+From 947d5d98fb1328a22a8b502f8ce6f8e5657a5ec7 Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Fri, 11 May 2018 16:50:35 -0400
+Subject: [PATCH 60/93] x86/bugs: Fix the parameters alignment and missing void
+
+commit ffed645e3be0e32f8e9ab068d257aee8d0fe8eec upstream
+
+Fixes: 7bb4d366c ("x86/bugs: Make cpu_show_common() static")
+Fixes: 24f7fc83b ("x86/bugs: Provide boot parameters for the spec_store_bypass_disable mitigation")
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 8187642..4f8c88e 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -530,7 +530,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
+ return mode;
+ }
+
+-static void ssb_select_mitigation()
++static void ssb_select_mitigation(void)
+ {
+ ssb_mode = __ssb_select_mitigation();
+
+@@ -640,7 +640,7 @@ void x86_spec_ctrl_setup_ap(void)
+ #ifdef CONFIG_SYSFS
+
+ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+- char *buf, unsigned int bug)
++ char *buf, unsigned int bug)
+ {
+ if (!boot_cpu_has_bug(bug))
+ return sprintf(buf, "Not affected\n");
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0061-x86-cpu-Make-alternative_msr_write-work-for-32-bit-c.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0061-x86-cpu-Make-alternative_msr_write-work-for-32-bit-c.patch
new file mode 100644
index 00000000..1f830819
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0061-x86-cpu-Make-alternative_msr_write-work-for-32-bit-c.patch
@@ -0,0 +1,42 @@
+From 76eefada90172bd111371bd2669a50eec64a3b0f Mon Sep 17 00:00:00 2001
+From: Jim Mattson <jmattson@google.com>
+Date: Sun, 13 May 2018 17:33:57 -0400
+Subject: [PATCH 61/93] x86/cpu: Make alternative_msr_write work for 32-bit
+ code
+
+commit 5f2b745f5e1304f438f9b2cd03ebc8120b6e0d3b upstream
+
+Cast val and (val >> 32) to (u32), so that they fit in a
+general-purpose register in both 32-bit and 64-bit code.
+
+[ tglx: Made it u32 instead of uintptr_t ]
+
+Fixes: c65732e4f721 ("x86/cpu: Restore CPUID_8000_0008_EBX reload")
+Signed-off-by: Jim Mattson <jmattson@google.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index 328ea3c..bc258e6 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -265,8 +265,8 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
+ {
+ asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
+ : : "c" (msr),
+- "a" (val),
+- "d" (val >> 32),
++ "a" ((u32)val),
++ "d" ((u32)(val >> 32)),
+ [feature] "i" (feature)
+ : "memory");
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0062-KVM-SVM-Move-spec-control-call-after-restore-of-GS.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0062-KVM-SVM-Move-spec-control-call-after-restore-of-GS.patch
new file mode 100644
index 00000000..75caec43
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0062-KVM-SVM-Move-spec-control-call-after-restore-of-GS.patch
@@ -0,0 +1,70 @@
+From 21d2555ad333e693fc6859bff2a60b9b24de8d99 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 11 May 2018 15:21:01 +0200
+Subject: [PATCH 62/93] KVM: SVM: Move spec control call after restore of GS
+
+commit 15e6c22fd8e5a42c5ed6d487b7c9fe44c2517765 upstream
+
+svm_vcpu_run() invokes x86_spec_ctrl_restore_host() after VMEXIT, but
+before the host GS is restored. x86_spec_ctrl_restore_host() uses 'current'
+to determine the host SSBD state of the thread. 'current' is GS based, but
+host GS is not yet restored and the access causes a triple fault.
+
+Move the call after the host GS restore.
+
+Fixes: 885f82bfbc6f x86/process: Allow runtime control of Speculative Store Bypass
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 47779f5..9991462 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -4999,6 +4999,18 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
+ #endif
+ );
+
++ /* Eliminate branch target predictions from guest mode */
++ vmexit_fill_RSB();
++
++#ifdef CONFIG_X86_64
++ wrmsrl(MSR_GS_BASE, svm->host.gs_base);
++#else
++ loadsegment(fs, svm->host.fs);
++#ifndef CONFIG_X86_32_LAZY_GS
++ loadsegment(gs, svm->host.gs);
++#endif
++#endif
++
+ /*
+ * We do not use IBRS in the kernel. If this vCPU has used the
+ * SPEC_CTRL MSR it may have left it on; save the value and
+@@ -5019,18 +5031,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
+
+ x86_spec_ctrl_restore_host(svm->spec_ctrl);
+
+- /* Eliminate branch target predictions from guest mode */
+- vmexit_fill_RSB();
+-
+-#ifdef CONFIG_X86_64
+- wrmsrl(MSR_GS_BASE, svm->host.gs_base);
+-#else
+- loadsegment(fs, svm->host.fs);
+-#ifndef CONFIG_X86_32_LAZY_GS
+- loadsegment(gs, svm->host.gs);
+-#endif
+-#endif
+-
+ reload_tss(vcpu);
+
+ local_irq_disable();
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0063-x86-speculation-Use-synthetic-bits-for-IBRS-IBPB-STI.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0063-x86-speculation-Use-synthetic-bits-for-IBRS-IBPB-STI.patch
new file mode 100644
index 00000000..a004c9a0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0063-x86-speculation-Use-synthetic-bits-for-IBRS-IBPB-STI.patch
@@ -0,0 +1,156 @@
+From 471e61fb50a8b552bf18db27c7ff9808182008dd Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Wed, 2 May 2018 18:15:14 +0200
+Subject: [PATCH 63/93] x86/speculation: Use synthetic bits for IBRS/IBPB/STIBP
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+commit e7c587da125291db39ddf1f49b18e5970adbac17 upstream
+
+Intel and AMD have different CPUID bits hence for those use synthetic bits
+which get set on the respective vendor's in init_speculation_control(). So
+that debacles like what the commit message of
+
+ c65732e4f721 ("x86/cpu: Restore CPUID_8000_0008_EBX reload")
+
+talks about don't happen anymore.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Tested-by: Jörg Otte <jrg.otte@gmail.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Link: https://lkml.kernel.org/r/20180504161815.GG9257@pd.tnic
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 12 ++++++++----
+ arch/x86/kernel/cpu/common.c | 14 ++++++++++----
+ arch/x86/kvm/cpuid.c | 10 +++++-----
+ arch/x86/kvm/cpuid.h | 4 ++--
+ 4 files changed, 25 insertions(+), 15 deletions(-)
+
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 0ed8ea5..059437a 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -205,7 +205,10 @@
+ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
+-#define X86_FEATURE_AMD_SSBD (7*32+24) /* "" AMD SSBD implementation */
++#define X86_FEATURE_AMD_SSBD ( 7*32+24) /* "" AMD SSBD implementation */
++#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
++#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
++#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
+
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+@@ -263,9 +266,9 @@
+ /* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
+ #define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
+ #define X86_FEATURE_IRPERF (13*32+1) /* Instructions Retired Count */
+-#define X86_FEATURE_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
+-#define X86_FEATURE_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
+-#define X86_FEATURE_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
++#define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
++#define X86_FEATURE_AMD_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
++#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
+
+ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
+ #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
+@@ -301,6 +304,7 @@
+ #define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */
+ #define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */
+
++
+ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
+ #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
+ #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index d0dd736..67bfa3c 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -725,17 +725,23 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
+ * and they also have a different bit for STIBP support. Also,
+ * a hypervisor might have set the individual AMD bits even on
+ * Intel CPUs, for finer-grained selection of what's available.
+- *
+- * We use the AMD bits in 0x8000_0008 EBX as the generic hardware
+- * features, which are visible in /proc/cpuinfo and used by the
+- * kernel. So set those accordingly from the Intel bits.
+ */
+ if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
+ set_cpu_cap(c, X86_FEATURE_IBRS);
+ set_cpu_cap(c, X86_FEATURE_IBPB);
+ }
++
+ if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
+ set_cpu_cap(c, X86_FEATURE_STIBP);
++
++ if (cpu_has(c, X86_FEATURE_AMD_IBRS))
++ set_cpu_cap(c, X86_FEATURE_IBRS);
++
++ if (cpu_has(c, X86_FEATURE_AMD_IBPB))
++ set_cpu_cap(c, X86_FEATURE_IBPB);
++
++ if (cpu_has(c, X86_FEATURE_AMD_STIBP))
++ set_cpu_cap(c, X86_FEATURE_STIBP);
+ }
+
+ void get_cpu_cap(struct cpuinfo_x86 *c)
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index db95637..4ccdfbe 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -357,7 +357,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+
+ /* cpuid 0x80000008.ebx */
+ const u32 kvm_cpuid_8000_0008_ebx_x86_features =
+- F(IBPB) | F(IBRS);
++ F(AMD_IBPB) | F(AMD_IBRS);
+
+ /* cpuid 0xC0000001.edx */
+ const u32 kvm_cpuid_C000_0001_edx_x86_features =
+@@ -619,10 +619,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ entry->eax = g_phys_as | (virt_as << 8);
+ entry->edx = 0;
+ /* IBRS and IBPB aren't necessarily present in hardware cpuid */
+- if (boot_cpu_has(X86_FEATURE_IBPB))
+- entry->ebx |= F(IBPB);
+- if (boot_cpu_has(X86_FEATURE_IBRS))
+- entry->ebx |= F(IBRS);
++ if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
++ entry->ebx |= F(AMD_IBPB);
++ if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
++ entry->ebx |= F(AMD_IBRS);
+ entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
+ cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
+ break;
+diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
+index 72551c5..410070c 100644
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -157,7 +157,7 @@ static inline bool guest_cpuid_has_ibpb(struct kvm_vcpu *vcpu)
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+- if (best && (best->ebx & bit(X86_FEATURE_IBPB)))
++ if (best && (best->ebx & bit(X86_FEATURE_AMD_IBPB)))
+ return true;
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->edx & bit(X86_FEATURE_SPEC_CTRL));
+@@ -168,7 +168,7 @@ static inline bool guest_cpuid_has_spec_ctrl(struct kvm_vcpu *vcpu)
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+- if (best && (best->ebx & bit(X86_FEATURE_IBRS)))
++ if (best && (best->ebx & bit(X86_FEATURE_AMD_IBRS)))
+ return true;
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->edx & (bit(X86_FEATURE_SPEC_CTRL) | bit(X86_FEATURE_SSBD)));
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0064-x86-cpufeatures-Disentangle-MSR_SPEC_CTRL-enumeratio.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0064-x86-cpufeatures-Disentangle-MSR_SPEC_CTRL-enumeratio.patch
new file mode 100644
index 00000000..b84bc768
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0064-x86-cpufeatures-Disentangle-MSR_SPEC_CTRL-enumeratio.patch
@@ -0,0 +1,155 @@
+From 7731d9040d16874cb3fe11f52c4a238ab3fd658d Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 10 May 2018 19:13:18 +0200
+Subject: [PATCH 64/93] x86/cpufeatures: Disentangle MSR_SPEC_CTRL enumeration
+ from IBRS
+
+commit 7eb8956a7fec3c1f0abc2a5517dada99ccc8a961 upstream
+
+The availability of the SPEC_CTRL MSR is enumerated by a CPUID bit on
+Intel and implied by IBRS or STIBP support on AMD. That's just confusing
+and in case an AMD CPU has IBRS not supported because the underlying
+problem has been fixed but has another bit valid in the SPEC_CTRL MSR,
+the thing falls apart.
+
+Add a synthetic feature bit X86_FEATURE_MSR_SPEC_CTRL to denote the
+availability on both Intel and AMD.
+
+While at it replace the boot_cpu_has() checks with static_cpu_has() where
+possible. This prevents late microcode loading from exposing SPEC_CTRL, but
+late loading is already very limited as it does not reevaluate the
+mitigation options and other bits and pieces. Having static_cpu_has() is
+the simplest and least fragile solution.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 2 ++
+ arch/x86/kernel/cpu/bugs.c | 18 +++++++++++-------
+ arch/x86/kernel/cpu/common.c | 9 +++++++--
+ arch/x86/kernel/cpu/intel.c | 1 +
+ 4 files changed, 21 insertions(+), 9 deletions(-)
+
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 059437a..ca0f33f 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -197,6 +197,8 @@
+ #define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
+ #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
+
++#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
++
+ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
+
+ /* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 4f8c88e..59649310 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -63,7 +63,7 @@ void __init check_bugs(void)
+ * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
+ * init code as it is not enumerated and depends on the family.
+ */
+- if (boot_cpu_has(X86_FEATURE_IBRS))
++ if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+ rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+
+ /* Select the proper spectre mitigation before patching alternatives */
+@@ -144,7 +144,7 @@ u64 x86_spec_ctrl_get_default(void)
+ {
+ u64 msrval = x86_spec_ctrl_base;
+
+- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++ if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
+ msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+ return msrval;
+ }
+@@ -154,10 +154,12 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
+ {
+ u64 host = x86_spec_ctrl_base;
+
+- if (!boot_cpu_has(X86_FEATURE_IBRS))
++ /* Is MSR_SPEC_CTRL implemented ? */
++ if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+ return;
+
+- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++ /* Intel controls SSB in MSR_SPEC_CTRL */
++ if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
+ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+
+ if (host != guest_spec_ctrl)
+@@ -169,10 +171,12 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
+ {
+ u64 host = x86_spec_ctrl_base;
+
+- if (!boot_cpu_has(X86_FEATURE_IBRS))
++ /* Is MSR_SPEC_CTRL implemented ? */
++ if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+ return;
+
+- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++ /* Intel controls SSB in MSR_SPEC_CTRL */
++ if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
+ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+
+ if (host != guest_spec_ctrl)
+@@ -630,7 +634,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+
+ void x86_spec_ctrl_setup_ap(void)
+ {
+- if (boot_cpu_has(X86_FEATURE_IBRS))
++ if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+ x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
+
+ if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 67bfa3c..04362282 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -729,19 +729,24 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
+ if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
+ set_cpu_cap(c, X86_FEATURE_IBRS);
+ set_cpu_cap(c, X86_FEATURE_IBPB);
++ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+ }
+
+ if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
+ set_cpu_cap(c, X86_FEATURE_STIBP);
+
+- if (cpu_has(c, X86_FEATURE_AMD_IBRS))
++ if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
+ set_cpu_cap(c, X86_FEATURE_IBRS);
++ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
++ }
+
+ if (cpu_has(c, X86_FEATURE_AMD_IBPB))
+ set_cpu_cap(c, X86_FEATURE_IBPB);
+
+- if (cpu_has(c, X86_FEATURE_AMD_STIBP))
++ if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
+ set_cpu_cap(c, X86_FEATURE_STIBP);
++ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
++ }
+ }
+
+ void get_cpu_cap(struct cpuinfo_x86 *c)
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index 047adaa..7f495e8 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -153,6 +153,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
+ setup_clear_cpu_cap(X86_FEATURE_IBPB);
+ setup_clear_cpu_cap(X86_FEATURE_STIBP);
+ setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
++ setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
+ setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
+ setup_clear_cpu_cap(X86_FEATURE_SSBD);
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0065-x86-cpufeatures-Disentangle-SSBD-enumeration.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0065-x86-cpufeatures-Disentangle-SSBD-enumeration.patch
new file mode 100644
index 00000000..84d35057
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0065-x86-cpufeatures-Disentangle-SSBD-enumeration.patch
@@ -0,0 +1,163 @@
+From f8a3968ae9a100977e28f434f303fd74a0a8591b Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 10 May 2018 20:21:36 +0200
+Subject: [PATCH 65/93] x86/cpufeatures: Disentangle SSBD enumeration
+
+commit 52817587e706686fcdb27f14c1b000c92f266c96 upstream
+
+The SSBD enumeration is similarly to the other bits magically shared
+between Intel and AMD though the mechanisms are different.
+
+Make X86_FEATURE_SSBD synthetic and set it depending on the vendor specific
+features or family dependent setup.
+
+Change the Intel bit to X86_FEATURE_SPEC_CTRL_SSBD to denote that SSBD is
+controlled via MSR_SPEC_CTRL and fix up the usage sites.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 5 +++--
+ arch/x86/kernel/cpu/amd.c | 7 +------
+ arch/x86/kernel/cpu/bugs.c | 10 +++++-----
+ arch/x86/kernel/cpu/common.c | 3 +++
+ arch/x86/kernel/cpu/intel.c | 1 +
+ arch/x86/kernel/process.c | 2 +-
+ 6 files changed, 14 insertions(+), 14 deletions(-)
+
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index ca0f33f..d071767 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -198,6 +198,7 @@
+ #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
+
+ #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
++#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
+
+ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
+
+@@ -207,7 +208,7 @@
+ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
+-#define X86_FEATURE_AMD_SSBD ( 7*32+24) /* "" AMD SSBD implementation */
++#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation */
+ #define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
+ #define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
+ #define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
+@@ -314,7 +315,7 @@
+ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
+ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+-#define X86_FEATURE_SSBD (18*32+31) /* Speculative Store Bypass Disable */
++#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
+
+ /*
+ * BUG word(s)
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index acb2fcc..179d572 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -558,8 +558,8 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
+ * avoid RMW. If that faults, do not enable SSBD.
+ */
+ if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
++ setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
+ setup_force_cpu_cap(X86_FEATURE_SSBD);
+- setup_force_cpu_cap(X86_FEATURE_AMD_SSBD);
+ x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
+ }
+ }
+@@ -848,11 +848,6 @@ static void init_amd(struct cpuinfo_x86 *c)
+ /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
+ if (!cpu_has(c, X86_FEATURE_XENPV))
+ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+-
+- if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) {
+- set_cpu_cap(c, X86_FEATURE_SSBD);
+- set_cpu_cap(c, X86_FEATURE_AMD_SSBD);
+- }
+ }
+
+ #ifdef CONFIG_X86_32
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 59649310..15a6c58 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -158,8 +158,8 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
+ if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+ return;
+
+- /* Intel controls SSB in MSR_SPEC_CTRL */
+- if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
++ /* SSBD controlled in MSR_SPEC_CTRL */
++ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+
+ if (host != guest_spec_ctrl)
+@@ -175,8 +175,8 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
+ if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+ return;
+
+- /* Intel controls SSB in MSR_SPEC_CTRL */
+- if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
++ /* SSBD controlled in MSR_SPEC_CTRL */
++ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+
+ if (host != guest_spec_ctrl)
+@@ -188,7 +188,7 @@ static void x86_amd_ssb_disable(void)
+ {
+ u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
+
+- if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
++ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+ wrmsrl(MSR_AMD64_LS_CFG, msrval);
+ }
+
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 04362282..945e841 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -735,6 +735,9 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
+ if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
+ set_cpu_cap(c, X86_FEATURE_STIBP);
+
++ if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD))
++ set_cpu_cap(c, X86_FEATURE_SSBD);
++
+ if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
+ set_cpu_cap(c, X86_FEATURE_IBRS);
+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index 7f495e8..93781e3 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -156,6 +156,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
+ setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
+ setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
+ setup_clear_cpu_cap(X86_FEATURE_SSBD);
++ setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
+ }
+
+ /*
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index c344230..b3cd08e 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -207,7 +207,7 @@ static __always_inline void __speculative_store_bypass_update(unsigned long tifn
+ {
+ u64 msr;
+
+- if (static_cpu_has(X86_FEATURE_AMD_SSBD)) {
++ if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
+ msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
+ wrmsrl(MSR_AMD64_LS_CFG, msr);
+ } else {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0066-x86-cpu-AMD-Fix-erratum-1076-CPB-bit.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0066-x86-cpu-AMD-Fix-erratum-1076-CPB-bit.patch
new file mode 100644
index 00000000..b9d9a567
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0066-x86-cpu-AMD-Fix-erratum-1076-CPB-bit.patch
@@ -0,0 +1,55 @@
+From b6aa89b4ab638e59beab4c2d264c02dfc887187f Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Thu, 7 Sep 2017 19:08:21 +0200
+Subject: [PATCH 66/93] x86/cpu/AMD: Fix erratum 1076 (CPB bit)
+
+commit f7f3dc00f61261cdc9ccd8b886f21bc4dffd6fd9 upstream
+
+CPUID Fn8000_0007_EDX[CPB] is wrongly 0 on models up to B1. But they do
+support CPB (AMD's Core Performance Boosting cpufreq CPU feature), so fix that.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sherry Hurwitz <sherry.hurwitz@amd.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/20170907170821.16021-1-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/amd.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 179d572..21367b5 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -749,6 +749,16 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
+ }
+ }
+
++static void init_amd_zn(struct cpuinfo_x86 *c)
++{
++ /*
++ * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
++ * all up to and including B1.
++ */
++ if (c->x86_model <= 1 && c->x86_stepping <= 1)
++ set_cpu_cap(c, X86_FEATURE_CPB);
++}
++
+ static void init_amd(struct cpuinfo_x86 *c)
+ {
+ u32 dummy;
+@@ -779,6 +789,7 @@ static void init_amd(struct cpuinfo_x86 *c)
+ case 0x10: init_amd_gh(c); break;
+ case 0x12: init_amd_ln(c); break;
+ case 0x15: init_amd_bd(c); break;
++ case 0x17: init_amd_zn(c); break;
+ }
+
+ /* Enable workaround for FXSAVE leak */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0067-x86-cpufeatures-Add-FEATURE_ZEN.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0067-x86-cpufeatures-Add-FEATURE_ZEN.patch
new file mode 100644
index 00000000..4dc85820
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0067-x86-cpufeatures-Add-FEATURE_ZEN.patch
@@ -0,0 +1,48 @@
+From c9b69035094a1cadce0c634ad76ded5a4a033ff6 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 10 May 2018 16:26:00 +0200
+Subject: [PATCH 67/93] x86/cpufeatures: Add FEATURE_ZEN
+
+commit d1035d971829dcf80e8686ccde26f94b0a069472 upstream
+
+Add a ZEN feature bit so family-dependent static_cpu_has() optimizations
+can be built for ZEN.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 2 ++
+ arch/x86/kernel/cpu/amd.c | 1 +
+ 2 files changed, 3 insertions(+)
+
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index d071767..ec87b8c 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -212,6 +212,8 @@
+ #define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
+ #define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
+ #define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
++#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
++
+
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 21367b5..4c2be99 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -751,6 +751,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
+
+ static void init_amd_zn(struct cpuinfo_x86 *c)
+ {
++ set_cpu_cap(c, X86_FEATURE_ZEN);
+ /*
+ * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
+ * all up to and including B1.
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0068-x86-speculation-Handle-HT-correctly-on-AMD.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0068-x86-speculation-Handle-HT-correctly-on-AMD.patch
new file mode 100644
index 00000000..cb74bad4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0068-x86-speculation-Handle-HT-correctly-on-AMD.patch
@@ -0,0 +1,240 @@
+From cbf0028f2c499e981af020c1cdb6bff7d0b4e192 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 9 May 2018 21:53:09 +0200
+Subject: [PATCH 68/93] x86/speculation: Handle HT correctly on AMD
+
+commit 1f50ddb4f4189243c05926b842dc1a0332195f31 upstream
+
+The AMD64_LS_CFG MSR is a per core MSR on Family 17H CPUs. That means when
+hyperthreading is enabled the SSBD bit toggle needs to take both cores into
+account. Otherwise the following situation can happen:
+
+CPU0 CPU1
+
+disable SSB
+ disable SSB
+ enable SSB <- Enables it for the Core, i.e. for CPU0 as well
+
+So after the SSB enable on CPU1 the task on CPU0 runs with SSB enabled
+again.
+
+On Intel the SSBD control is per core as well, but the synchronization
+logic is implemented behind the per thread SPEC_CTRL MSR. It works like
+this:
+
+ CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
+
+i.e. if one of the threads enables a mitigation then this affects both and
+the mitigation is only disabled in the core when both threads disabled it.
+
+Add the necessary synchronization logic for AMD family 17H. Unfortunately
+that requires a spinlock to serialize the access to the MSR, but the locks
+are only shared between siblings.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/spec-ctrl.h | 6 ++
+ arch/x86/kernel/process.c | 125 +++++++++++++++++++++++++++++++++++++--
+ arch/x86/kernel/smpboot.c | 5 ++
+ 3 files changed, 130 insertions(+), 6 deletions(-)
+
+diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
+index dc21209..0cb49c4 100644
+--- a/arch/x86/include/asm/spec-ctrl.h
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -33,6 +33,12 @@ static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
+ return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
+ }
+
++#ifdef CONFIG_SMP
++extern void speculative_store_bypass_ht_init(void);
++#else
++static inline void speculative_store_bypass_ht_init(void) { }
++#endif
++
+ extern void speculative_store_bypass_update(void);
+
+ #endif
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index b3cd08e..1e9d155 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -203,22 +203,135 @@ static inline void switch_to_bitmap(struct tss_struct *tss,
+ }
+ }
+
+-static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
++#ifdef CONFIG_SMP
++
++struct ssb_state {
++ struct ssb_state *shared_state;
++ raw_spinlock_t lock;
++ unsigned int disable_state;
++ unsigned long local_state;
++};
++
++#define LSTATE_SSB 0
++
++static DEFINE_PER_CPU(struct ssb_state, ssb_state);
++
++void speculative_store_bypass_ht_init(void)
+ {
+- u64 msr;
++ struct ssb_state *st = this_cpu_ptr(&ssb_state);
++ unsigned int this_cpu = smp_processor_id();
++ unsigned int cpu;
++
++ st->local_state = 0;
++
++ /*
++ * Shared state setup happens once on the first bringup
++ * of the CPU. It's not destroyed on CPU hotunplug.
++ */
++ if (st->shared_state)
++ return;
++
++ raw_spin_lock_init(&st->lock);
+
+- if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
+- msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
++ /*
++ * Go over HT siblings and check whether one of them has set up the
++ * shared state pointer already.
++ */
++ for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
++ if (cpu == this_cpu)
++ continue;
++
++ if (!per_cpu(ssb_state, cpu).shared_state)
++ continue;
++
++ /* Link it to the state of the sibling: */
++ st->shared_state = per_cpu(ssb_state, cpu).shared_state;
++ return;
++ }
++
++ /*
++ * First HT sibling to come up on the core. Link shared state of
++ * the first HT sibling to itself. The siblings on the same core
++ * which come up later will see the shared state pointer and link
++ * themself to the state of this CPU.
++ */
++ st->shared_state = st;
++}
++
++/*
++ * Logic is: First HT sibling enables SSBD for both siblings in the core
++ * and last sibling to disable it, disables it for the whole core. This how
++ * MSR_SPEC_CTRL works in "hardware":
++ *
++ * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
++ */
++static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
++{
++ struct ssb_state *st = this_cpu_ptr(&ssb_state);
++ u64 msr = x86_amd_ls_cfg_base;
++
++ if (!static_cpu_has(X86_FEATURE_ZEN)) {
++ msr |= ssbd_tif_to_amd_ls_cfg(tifn);
+ wrmsrl(MSR_AMD64_LS_CFG, msr);
++ return;
++ }
++
++ if (tifn & _TIF_SSBD) {
++ /*
++ * Since this can race with prctl(), block reentry on the
++ * same CPU.
++ */
++ if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
++ return;
++
++ msr |= x86_amd_ls_cfg_ssbd_mask;
++
++ raw_spin_lock(&st->shared_state->lock);
++ /* First sibling enables SSBD: */
++ if (!st->shared_state->disable_state)
++ wrmsrl(MSR_AMD64_LS_CFG, msr);
++ st->shared_state->disable_state++;
++ raw_spin_unlock(&st->shared_state->lock);
+ } else {
+- msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
+- wrmsrl(MSR_IA32_SPEC_CTRL, msr);
++ if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
++ return;
++
++ raw_spin_lock(&st->shared_state->lock);
++ st->shared_state->disable_state--;
++ if (!st->shared_state->disable_state)
++ wrmsrl(MSR_AMD64_LS_CFG, msr);
++ raw_spin_unlock(&st->shared_state->lock);
+ }
+ }
++#else
++static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
++{
++ u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
++
++ wrmsrl(MSR_AMD64_LS_CFG, msr);
++}
++#endif
++
++static __always_inline void intel_set_ssb_state(unsigned long tifn)
++{
++ u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
++
++ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
++}
++
++static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
++{
++ if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
++ amd_set_core_ssb_state(tifn);
++ else
++ intel_set_ssb_state(tifn);
++}
+
+ void speculative_store_bypass_update(void)
+ {
++ preempt_disable();
+ __speculative_store_bypass_update(current_thread_info()->flags);
++ preempt_enable();
+ }
+
+ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 36171bc..c898a69 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -75,6 +75,7 @@
+ #include <asm/i8259.h>
+ #include <asm/realmode.h>
+ #include <asm/misc.h>
++#include <asm/spec-ctrl.h>
+
+ /* Number of siblings per CPU package */
+ int smp_num_siblings = 1;
+@@ -237,6 +238,8 @@ static void notrace start_secondary(void *unused)
+ */
+ check_tsc_sync_target();
+
++ speculative_store_bypass_ht_init();
++
+ /*
+ * Lock vector_lock and initialize the vectors on this cpu
+ * before setting the cpu online. We must set it online with
+@@ -1333,6 +1336,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
+ set_mtrr_aps_delayed_init();
+
+ smp_quirk_init_udelay();
++
++ speculative_store_bypass_ht_init();
+ }
+
+ void arch_enable_nonboot_cpus_begin(void)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0069-x86-bugs-KVM-Extend-speculation-control-for-VIRT_SPE.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0069-x86-bugs-KVM-Extend-speculation-control-for-VIRT_SPE.patch
new file mode 100644
index 00000000..e298d3bc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0069-x86-bugs-KVM-Extend-speculation-control-for-VIRT_SPE.patch
@@ -0,0 +1,163 @@
+From 77aaa77d68bbabee027737671cdc1318e8dfe763 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 9 May 2018 23:01:01 +0200
+Subject: [PATCH 69/93] x86/bugs, KVM: Extend speculation control for
+ VIRT_SPEC_CTRL
+
+commit ccbcd2674472a978b48c91c1fbfb66c0ff959f24 upstream
+
+AMD is proposing a VIRT_SPEC_CTRL MSR to handle the Speculative Store
+Bypass Disable via MSR_AMD64_LS_CFG so that guests do not have to care
+about the bit position of the SSBD bit and thus facilitate migration.
+Also, the sibling coordination on Family 17H CPUs can only be done on
+the host.
+
+Extend x86_spec_ctrl_set_guest() and x86_spec_ctrl_restore_host() with an
+extra argument for the VIRT_SPEC_CTRL MSR.
+
+Hand in 0 from VMX and in SVM add a new virt_spec_ctrl member to the CPU
+data structure which is going to be used in later patches for the actual
+implementation.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/spec-ctrl.h | 9 ++++++---
+ arch/x86/kernel/cpu/bugs.c | 20 ++++++++++++++++++--
+ arch/x86/kvm/svm.c | 11 +++++++++--
+ arch/x86/kvm/vmx.c | 5 +++--
+ 4 files changed, 36 insertions(+), 9 deletions(-)
+
+diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
+index 0cb49c4..6e28740 100644
+--- a/arch/x86/include/asm/spec-ctrl.h
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -10,10 +10,13 @@
+ * the guest has, while on VMEXIT we restore the host view. This
+ * would be easier if SPEC_CTRL were architecturally maskable or
+ * shadowable for guests but this is not (currently) the case.
+- * Takes the guest view of SPEC_CTRL MSR as a parameter.
++ * Takes the guest view of SPEC_CTRL MSR as a parameter and also
++ * the guest's version of VIRT_SPEC_CTRL, if emulated.
+ */
+-extern void x86_spec_ctrl_set_guest(u64);
+-extern void x86_spec_ctrl_restore_host(u64);
++extern void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl,
++ u64 guest_virt_spec_ctrl);
++extern void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl,
++ u64 guest_virt_spec_ctrl);
+
+ /* AMD specific Speculative Store Bypass MSR data */
+ extern u64 x86_amd_ls_cfg_base;
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 15a6c58..d00e246 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -150,7 +150,15 @@ u64 x86_spec_ctrl_get_default(void)
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
+
+-void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
++/**
++ * x86_spec_ctrl_set_guest - Set speculation control registers for the guest
++ * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
++ * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
++ * (may get translated to MSR_AMD64_LS_CFG bits)
++ *
++ * Avoids writing to the MSR if the content/bits are the same
++ */
++void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
+ {
+ u64 host = x86_spec_ctrl_base;
+
+@@ -167,7 +175,15 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
+
+-void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
++/**
++ * x86_spec_ctrl_restore_host - Restore host speculation control registers
++ * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
++ * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
++ * (may get translated to MSR_AMD64_LS_CFG bits)
++ *
++ * Avoids writing to the MSR if the content/bits are the same
++ */
++void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
+ {
+ u64 host = x86_spec_ctrl_base;
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 9991462..481b106 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -185,6 +185,12 @@ struct vcpu_svm {
+ } host;
+
+ u64 spec_ctrl;
++ /*
++ * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
++ * translated into the appropriate L2_CFG bits on the host to
++ * perform speculative control.
++ */
++ u64 virt_spec_ctrl;
+
+ u32 *msrpm;
+
+@@ -1558,6 +1564,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
+ u32 eax = 1;
+
+ svm->spec_ctrl = 0;
++ svm->virt_spec_ctrl = 0;
+
+ if (!init_event) {
+ svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
+@@ -4905,7 +4912,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
+ * is no need to worry about the conditional branch over the wrmsr
+ * being speculatively taken.
+ */
+- x86_spec_ctrl_set_guest(svm->spec_ctrl);
++ x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
+
+ asm volatile (
+ "push %%" _ASM_BP "; \n\t"
+@@ -5029,7 +5036,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
+ if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
+ svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
+
+- x86_spec_ctrl_restore_host(svm->spec_ctrl);
++ x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
+
+ reload_tss(vcpu);
+
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index c7df5c4..55af4b6 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8898,9 +8898,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ * is no need to worry about the conditional branch over the wrmsr
+ * being speculatively taken.
+ */
+- x86_spec_ctrl_set_guest(vmx->spec_ctrl);
++ x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
+
+ vmx->__launched = vmx->loaded_vmcs->launched;
++
+ asm(
+ /* Store host registers */
+ "push %%" _ASM_DX "; push %%" _ASM_BP ";"
+@@ -9036,7 +9037,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
+ vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
+
+- x86_spec_ctrl_restore_host(vmx->spec_ctrl);
++ x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
+
+ /* Eliminate branch target predictions from guest mode */
+ vmexit_fill_RSB();
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0070-x86-speculation-Add-virtualized-speculative-store-by.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0070-x86-speculation-Add-virtualized-speculative-store-by.patch
new file mode 100644
index 00000000..f7f668b1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0070-x86-speculation-Add-virtualized-speculative-store-by.patch
@@ -0,0 +1,104 @@
+From fa6ec76841319858ad2046107420a63feda4a0bb Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Thu, 17 May 2018 17:09:18 +0200
+Subject: [PATCH 70/93] x86/speculation: Add virtualized speculative store
+ bypass disable support
+
+commit 11fb0683493b2da112cd64c9dada221b52463bf7 upstream
+
+Some AMD processors only support a non-architectural means of enabling
+speculative store bypass disable (SSBD). To allow a simplified view of
+this to a guest, an architectural definition has been created through a new
+CPUID bit, 0x80000008_EBX[25], and a new MSR, 0xc001011f. With this, a
+hypervisor can virtualize the existence of this definition and provide an
+architectural method for using SSBD to a guest.
+
+Add the new CPUID feature, the new MSR and update the existing SSBD
+support to use this MSR when present.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ arch/x86/include/asm/msr-index.h | 2 ++
+ arch/x86/kernel/cpu/bugs.c | 4 +++-
+ arch/x86/kernel/process.c | 13 ++++++++++++-
+ 4 files changed, 18 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index ec87b8c..c278f27 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -274,6 +274,7 @@
+ #define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
+ #define X86_FEATURE_AMD_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
+ #define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
++#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
+
+ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
+ #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 0145a0b..ad5d0d8 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -323,6 +323,8 @@
+ #define MSR_AMD64_IBSOPDATA4 0xc001103d
+ #define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
+
++#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
++
+ /* Fam 17h MSRs */
+ #define MSR_F17H_IRPERF 0xc00000e9
+
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index d00e246..97987b5 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -204,7 +204,9 @@ static void x86_amd_ssb_disable(void)
+ {
+ u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
+
+- if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
++ if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
++ wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
++ else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+ wrmsrl(MSR_AMD64_LS_CFG, msrval);
+ }
+
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index 1e9d155..6d9e1ee 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -312,6 +312,15 @@ static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
+ }
+ #endif
+
++static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
++{
++ /*
++ * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
++ * so ssbd_tif_to_spec_ctrl() just works.
++ */
++ wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
++}
++
+ static __always_inline void intel_set_ssb_state(unsigned long tifn)
+ {
+ u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
+@@ -321,7 +330,9 @@ static __always_inline void intel_set_ssb_state(unsigned long tifn)
+
+ static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
+ {
+- if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
++ if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
++ amd_set_ssb_virt_state(tifn);
++ else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+ amd_set_core_ssb_state(tifn);
+ else
+ intel_set_ssb_state(tifn);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0071-x86-speculation-Rework-speculative_store_bypass_upda.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0071-x86-speculation-Rework-speculative_store_bypass_upda.patch
new file mode 100644
index 00000000..daf64371
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0071-x86-speculation-Rework-speculative_store_bypass_upda.patch
@@ -0,0 +1,75 @@
+From 10bd199ba2af68b40deb854851b3db51bd97531a Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 10 May 2018 20:31:44 +0200
+Subject: [PATCH 71/93] x86/speculation: Rework
+ speculative_store_bypass_update()
+
+commit 0270be3e34efb05a88bc4c422572ece038ef3608 upstream
+
+The upcoming support for the virtual SPEC_CTRL MSR on AMD needs to reuse
+speculative_store_bypass_update() to avoid code duplication. Add an
+argument for supplying a thread info (TIF) value and create a wrapper
+speculative_store_bypass_update_current() which is used at the existing
+call site.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/spec-ctrl.h | 7 ++++++-
+ arch/x86/kernel/cpu/bugs.c | 2 +-
+ arch/x86/kernel/process.c | 4 ++--
+ 3 files changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
+index 6e28740..82b6c5a 100644
+--- a/arch/x86/include/asm/spec-ctrl.h
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -42,6 +42,11 @@ extern void speculative_store_bypass_ht_init(void);
+ static inline void speculative_store_bypass_ht_init(void) { }
+ #endif
+
+-extern void speculative_store_bypass_update(void);
++extern void speculative_store_bypass_update(unsigned long tif);
++
++static inline void speculative_store_bypass_update_current(void)
++{
++ speculative_store_bypass_update(current_thread_info()->flags);
++}
+
+ #endif
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 97987b5..eddbdc8 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -597,7 +597,7 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+ * mitigation until it is next scheduled.
+ */
+ if (task == current && update)
+- speculative_store_bypass_update();
++ speculative_store_bypass_update_current();
+
+ return 0;
+ }
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index 6d9e1ee..00a9047 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -338,10 +338,10 @@ static __always_inline void __speculative_store_bypass_update(unsigned long tifn
+ intel_set_ssb_state(tifn);
+ }
+
+-void speculative_store_bypass_update(void)
++void speculative_store_bypass_update(unsigned long tif)
+ {
+ preempt_disable();
+- __speculative_store_bypass_update(current_thread_info()->flags);
++ __speculative_store_bypass_update(tif);
+ preempt_enable();
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0072-x86-bugs-Unify-x86_spec_ctrl_-set_guest-restore_host.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0072-x86-bugs-Unify-x86_spec_ctrl_-set_guest-restore_host.patch
new file mode 100644
index 00000000..e3e0a67d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0072-x86-bugs-Unify-x86_spec_ctrl_-set_guest-restore_host.patch
@@ -0,0 +1,145 @@
+From f30cba1d35ebb9a07ebd54253086280080b366a6 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Sat, 12 May 2018 00:14:51 +0200
+Subject: [PATCH 72/93] x86/bugs: Unify x86_spec_ctrl_{set_guest,restore_host}
+
+commit cc69b34989210f067b2c51d5539b5f96ebcc3a01 upstream
+
+Function bodies are very similar and are going to grow more almost
+identical code. Add a bool arg to determine whether SPEC_CTRL is being set
+for the guest or restored to the host.
+
+No functional changes.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/spec-ctrl.h | 33 +++++++++++++++++++---
+ arch/x86/kernel/cpu/bugs.c | 60 ++++++++++------------------------------
+ 2 files changed, 44 insertions(+), 49 deletions(-)
+
+diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
+index 82b6c5a..9cecbe5 100644
+--- a/arch/x86/include/asm/spec-ctrl.h
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -13,10 +13,35 @@
+ * Takes the guest view of SPEC_CTRL MSR as a parameter and also
+ * the guest's version of VIRT_SPEC_CTRL, if emulated.
+ */
+-extern void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl,
+- u64 guest_virt_spec_ctrl);
+-extern void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl,
+- u64 guest_virt_spec_ctrl);
++extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest);
++
++/**
++ * x86_spec_ctrl_set_guest - Set speculation control registers for the guest
++ * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
++ * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
++ * (may get translated to MSR_AMD64_LS_CFG bits)
++ *
++ * Avoids writing to the MSR if the content/bits are the same
++ */
++static inline
++void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
++{
++ x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true);
++}
++
++/**
++ * x86_spec_ctrl_restore_host - Restore host speculation control registers
++ * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
++ * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
++ * (may get translated to MSR_AMD64_LS_CFG bits)
++ *
++ * Avoids writing to the MSR if the content/bits are the same
++ */
++static inline
++void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
++{
++ x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false);
++}
+
+ /* AMD specific Speculative Store Bypass MSR data */
+ extern u64 x86_amd_ls_cfg_base;
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index eddbdc8..9203150 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -150,55 +150,25 @@ u64 x86_spec_ctrl_get_default(void)
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
+
+-/**
+- * x86_spec_ctrl_set_guest - Set speculation control registers for the guest
+- * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
+- * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
+- * (may get translated to MSR_AMD64_LS_CFG bits)
+- *
+- * Avoids writing to the MSR if the content/bits are the same
+- */
+-void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
++void
++x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
+ {
+- u64 host = x86_spec_ctrl_base;
++ struct thread_info *ti = current_thread_info();
++ u64 msr, host = x86_spec_ctrl_base;
+
+ /* Is MSR_SPEC_CTRL implemented ? */
+- if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+- return;
+-
+- /* SSBD controlled in MSR_SPEC_CTRL */
+- if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+- host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+-
+- if (host != guest_spec_ctrl)
+- wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
+-}
+-EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
+-
+-/**
+- * x86_spec_ctrl_restore_host - Restore host speculation control registers
+- * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
+- * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
+- * (may get translated to MSR_AMD64_LS_CFG bits)
+- *
+- * Avoids writing to the MSR if the content/bits are the same
+- */
+-void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
+-{
+- u64 host = x86_spec_ctrl_base;
+-
+- /* Is MSR_SPEC_CTRL implemented ? */
+- if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+- return;
+-
+- /* SSBD controlled in MSR_SPEC_CTRL */
+- if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+- host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+-
+- if (host != guest_spec_ctrl)
+- wrmsrl(MSR_IA32_SPEC_CTRL, host);
++ if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
++ /* SSBD controlled in MSR_SPEC_CTRL */
++ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
++ host |= ssbd_tif_to_spec_ctrl(ti->flags);
++
++ if (host != guest_spec_ctrl) {
++ msr = setguest ? guest_spec_ctrl : host;
++ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
++ }
++ }
+ }
+-EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
++EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
+
+ static void x86_amd_ssb_disable(void)
+ {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0073-x86-bugs-Expose-x86_spec_ctrl_base-directly.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0073-x86-bugs-Expose-x86_spec_ctrl_base-directly.patch
new file mode 100644
index 00000000..49224dbb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0073-x86-bugs-Expose-x86_spec_ctrl_base-directly.patch
@@ -0,0 +1,120 @@
+From 22a75daea25a170892d8c6cbf0b740ef35219cc8 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 12 May 2018 20:49:16 +0200
+Subject: [PATCH 73/93] x86/bugs: Expose x86_spec_ctrl_base directly
+
+commit fa8ac4988249c38476f6ad678a4848a736373403 upstream
+
+x86_spec_ctrl_base is the system wide default value for the SPEC_CTRL MSR.
+x86_spec_ctrl_get_default() returns x86_spec_ctrl_base and was intended to
+prevent modification to that variable. Though the variable is read only
+after init and globaly visible already.
+
+Remove the function and export the variable instead.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 16 +++++-----------
+ arch/x86/include/asm/spec-ctrl.h | 3 ---
+ arch/x86/kernel/cpu/bugs.c | 11 +----------
+ 3 files changed, 6 insertions(+), 24 deletions(-)
+
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index bc258e6..8d9deec 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -217,16 +217,7 @@ enum spectre_v2_mitigation {
+ SPECTRE_V2_IBRS,
+ };
+
+-/*
+- * The Intel specification for the SPEC_CTRL MSR requires that we
+- * preserve any already set reserved bits at boot time (e.g. for
+- * future additions that this kernel is not currently aware of).
+- * We then set any additional mitigation bits that we want
+- * ourselves and always use this as the base for SPEC_CTRL.
+- * We also use this when handling guest entry/exit as below.
+- */
+ extern void x86_spec_ctrl_set(u64);
+-extern u64 x86_spec_ctrl_get_default(void);
+
+ /* The Speculative Store Bypass disable variants */
+ enum ssb_mitigation {
+@@ -278,6 +269,9 @@ static inline void indirect_branch_prediction_barrier(void)
+ alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
+ }
+
++/* The Intel SPEC CTRL MSR base value cache */
++extern u64 x86_spec_ctrl_base;
++
+ /*
+ * With retpoline, we must use IBRS to restrict branch prediction
+ * before calling into firmware.
+@@ -286,7 +280,7 @@ static inline void indirect_branch_prediction_barrier(void)
+ */
+ #define firmware_restrict_branch_speculation_start() \
+ do { \
+- u64 val = x86_spec_ctrl_get_default() | SPEC_CTRL_IBRS; \
++ u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \
+ \
+ preempt_disable(); \
+ alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
+@@ -295,7 +289,7 @@ do { \
+
+ #define firmware_restrict_branch_speculation_end() \
+ do { \
+- u64 val = x86_spec_ctrl_get_default(); \
++ u64 val = x86_spec_ctrl_base; \
+ \
+ alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
+ X86_FEATURE_USE_IBRS_FW); \
+diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
+index 9cecbe5..763d497 100644
+--- a/arch/x86/include/asm/spec-ctrl.h
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -47,9 +47,6 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
+ extern u64 x86_amd_ls_cfg_base;
+ extern u64 x86_amd_ls_cfg_ssbd_mask;
+
+-/* The Intel SPEC CTRL MSR base value cache */
+-extern u64 x86_spec_ctrl_base;
+-
+ static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
+ {
+ BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 9203150..47b7f4f 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -35,6 +35,7 @@ static void __init ssb_select_mitigation(void);
+ * writes to SPEC_CTRL contain whatever reserved bits have been set.
+ */
+ u64 __ro_after_init x86_spec_ctrl_base;
++EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
+
+ /*
+ * The vendor and possibly platform specific bits which can be modified in
+@@ -140,16 +141,6 @@ void x86_spec_ctrl_set(u64 val)
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
+
+-u64 x86_spec_ctrl_get_default(void)
+-{
+- u64 msrval = x86_spec_ctrl_base;
+-
+- if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
+- msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+- return msrval;
+-}
+-EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
+-
+ void
+ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
+ {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0074-x86-bugs-Remove-x86_spec_ctrl_set.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0074-x86-bugs-Remove-x86_spec_ctrl_set.patch
new file mode 100644
index 00000000..40bf45d2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0074-x86-bugs-Remove-x86_spec_ctrl_set.patch
@@ -0,0 +1,76 @@
+From ac97f3ffd444941e88a86ea4cd8033b686ab9170 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 12 May 2018 20:53:14 +0200
+Subject: [PATCH 74/93] x86/bugs: Remove x86_spec_ctrl_set()
+
+commit 4b59bdb569453a60b752b274ca61f009e37f4dae upstream
+
+x86_spec_ctrl_set() is only used in bugs.c and the extra mask checks there
+provide no real value as both call sites can just write x86_spec_ctrl_base
+to MSR_SPEC_CTRL. x86_spec_ctrl_base is valid and does not need any extra
+masking or checking.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 2 --
+ arch/x86/kernel/cpu/bugs.c | 13 ++-----------
+ 2 files changed, 2 insertions(+), 13 deletions(-)
+
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index 8d9deec..8b38df9 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -217,8 +217,6 @@ enum spectre_v2_mitigation {
+ SPECTRE_V2_IBRS,
+ };
+
+-extern void x86_spec_ctrl_set(u64);
+-
+ /* The Speculative Store Bypass disable variants */
+ enum ssb_mitigation {
+ SPEC_STORE_BYPASS_NONE,
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 47b7f4f..82a99d0 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -132,15 +132,6 @@ static const char *spectre_v2_strings[] = {
+ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
+ SPECTRE_V2_NONE;
+
+-void x86_spec_ctrl_set(u64 val)
+-{
+- if (val & x86_spec_ctrl_mask)
+- WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
+- else
+- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
+-}
+-EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
+-
+ void
+ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
+ {
+@@ -502,7 +493,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
+ case X86_VENDOR_INTEL:
+ x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
+ x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD;
+- x86_spec_ctrl_set(SPEC_CTRL_SSBD);
++ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+ break;
+ case X86_VENDOR_AMD:
+ x86_amd_ssb_disable();
+@@ -614,7 +605,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+ void x86_spec_ctrl_setup_ap(void)
+ {
+ if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+- x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
++ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+
+ if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
+ x86_amd_ssb_disable();
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0075-x86-bugs-Rework-spec_ctrl-base-and-mask-logic.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0075-x86-bugs-Rework-spec_ctrl-base-and-mask-logic.patch
new file mode 100644
index 00000000..27bd0430
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0075-x86-bugs-Rework-spec_ctrl-base-and-mask-logic.patch
@@ -0,0 +1,95 @@
+From 96c9747df6b51ecfe781ba6c09ded9f406d20093 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 12 May 2018 20:10:00 +0200
+Subject: [PATCH 75/93] x86/bugs: Rework spec_ctrl base and mask logic
+
+commit be6fcb5478e95bb1c91f489121238deb3abca46a upstream
+
+x86_spec_ctrL_mask is intended to mask out bits from a MSR_SPEC_CTRL value
+which are not to be modified. However the implementation is not really used
+and the bitmask was inverted to make a check easier, which was removed in
+"x86/bugs: Remove x86_spec_ctrl_set()"
+
+Aside of that it is missing the STIBP bit if it is supported by the
+platform, so if the mask would be used in x86_virt_spec_ctrl() then it
+would prevent a guest from setting STIBP.
+
+Add the STIBP bit if supported and use the mask in x86_virt_spec_ctrl() to
+sanitize the value which is supplied by the guest.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 26 +++++++++++++++++++-------
+ 1 file changed, 19 insertions(+), 7 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 82a99d0..2ae3586 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -41,7 +41,7 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
+ * The vendor and possibly platform specific bits which can be modified in
+ * x86_spec_ctrl_base.
+ */
+-static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
++static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
+
+ /*
+ * AMD specific MSR info for Speculative Store Bypass control.
+@@ -67,6 +67,10 @@ void __init check_bugs(void)
+ if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+ rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+
++ /* Allow STIBP in MSR_SPEC_CTRL if supported */
++ if (boot_cpu_has(X86_FEATURE_STIBP))
++ x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
++
+ /* Select the proper spectre mitigation before patching alternatives */
+ spectre_v2_select_mitigation();
+
+@@ -135,18 +139,26 @@ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
+ void
+ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
+ {
++ u64 msrval, guestval, hostval = x86_spec_ctrl_base;
+ struct thread_info *ti = current_thread_info();
+- u64 msr, host = x86_spec_ctrl_base;
+
+ /* Is MSR_SPEC_CTRL implemented ? */
+ if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
++ /*
++ * Restrict guest_spec_ctrl to supported values. Clear the
++ * modifiable bits in the host base value and or the
++ * modifiable bits from the guest value.
++ */
++ guestval = hostval & ~x86_spec_ctrl_mask;
++ guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
++
+ /* SSBD controlled in MSR_SPEC_CTRL */
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+- host |= ssbd_tif_to_spec_ctrl(ti->flags);
++ hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
+
+- if (host != guest_spec_ctrl) {
+- msr = setguest ? guest_spec_ctrl : host;
+- wrmsrl(MSR_IA32_SPEC_CTRL, msr);
++ if (hostval != guestval) {
++ msrval = setguest ? guestval : hostval;
++ wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
+ }
+ }
+ }
+@@ -492,7 +504,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_INTEL:
+ x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
+- x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD;
++ x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+ break;
+ case X86_VENDOR_AMD:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0076-x86-speculation-KVM-Implement-support-for-VIRT_SPEC_.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0076-x86-speculation-KVM-Implement-support-for-VIRT_SPEC_.patch
new file mode 100644
index 00000000..d7ddca7e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0076-x86-speculation-KVM-Implement-support-for-VIRT_SPEC_.patch
@@ -0,0 +1,84 @@
+From d63bb88a1ae9c702ddf7477b0e96be1fc20f8d28 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 10 May 2018 20:42:48 +0200
+Subject: [PATCH 76/93] x86/speculation, KVM: Implement support for
+ VIRT_SPEC_CTRL/LS_CFG
+
+commit 47c61b3955cf712cadfc25635bf9bc174af030ea upstream
+
+Add the necessary logic for supporting the emulated VIRT_SPEC_CTRL MSR to
+x86_virt_spec_ctrl(). If either X86_FEATURE_LS_CFG_SSBD or
+X86_FEATURE_VIRT_SPEC_CTRL is set then use the new guest_virt_spec_ctrl
+argument to check whether the state must be modified on the host. The
+update reuses speculative_store_bypass_update() so the ZEN-specific sibling
+coordination can be reused.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/spec-ctrl.h | 6 ++++++
+ arch/x86/kernel/cpu/bugs.c | 30 ++++++++++++++++++++++++++++++
+ 2 files changed, 36 insertions(+)
+
+diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
+index 763d497..ae7c2c5 100644
+--- a/arch/x86/include/asm/spec-ctrl.h
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -53,6 +53,12 @@ static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
+ return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
+ }
+
++static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
++{
++ BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
++ return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
++}
++
+ static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
+ {
+ return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 2ae3586..86af9b1 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -161,6 +161,36 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
+ wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
+ }
+ }
++
++ /*
++ * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
++ * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
++ */
++ if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
++ !static_cpu_has(X86_FEATURE_VIRT_SSBD))
++ return;
++
++ /*
++ * If the host has SSBD mitigation enabled, force it in the host's
++ * virtual MSR value. If its not permanently enabled, evaluate
++ * current's TIF_SSBD thread flag.
++ */
++ if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
++ hostval = SPEC_CTRL_SSBD;
++ else
++ hostval = ssbd_tif_to_spec_ctrl(ti->flags);
++
++ /* Sanitize the guest value */
++ guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
++
++ if (hostval != guestval) {
++ unsigned long tif;
++
++ tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
++ ssbd_spec_ctrl_to_tif(hostval);
++
++ speculative_store_bypass_update(tif);
++ }
+ }
+ EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0077-KVM-SVM-Implement-VIRT_SPEC_CTRL-support-for-SSBD.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0077-KVM-SVM-Implement-VIRT_SPEC_CTRL-support-for-SSBD.patch
new file mode 100644
index 00000000..de5ae0c2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0077-KVM-SVM-Implement-VIRT_SPEC_CTRL-support-for-SSBD.patch
@@ -0,0 +1,241 @@
+From 708128a64a6b750b63a5f1ca1e943c48023145b9 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Thu, 10 May 2018 22:06:39 +0200
+Subject: [PATCH 77/93] KVM: SVM: Implement VIRT_SPEC_CTRL support for SSBD
+
+commit bc226f07dcd3c9ef0b7f6236fe356ea4a9cb4769 upstream
+
+Expose the new virtualized architectural mechanism, VIRT_SSBD, for using
+speculative store bypass disable (SSBD) under SVM. This will allow guests
+to use SSBD on hardware that uses non-architectural mechanisms for enabling
+SSBD.
+
+[ tglx: Folded the migration fixup from Paolo Bonzini ]
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/kvm_host.h | 2 +-
+ arch/x86/kernel/cpu/common.c | 3 ++-
+ arch/x86/kvm/cpuid.c | 11 +++++++++--
+ arch/x86/kvm/cpuid.h | 9 +++++++++
+ arch/x86/kvm/svm.c | 21 +++++++++++++++++++--
+ arch/x86/kvm/vmx.c | 18 +++++++++++++++---
+ arch/x86/kvm/x86.c | 13 ++++---------
+ 7 files changed, 59 insertions(+), 18 deletions(-)
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 6f6ee68..fd3a854 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -864,7 +864,7 @@ struct kvm_x86_ops {
+ int (*hardware_setup)(void); /* __init */
+ void (*hardware_unsetup)(void); /* __exit */
+ bool (*cpu_has_accelerated_tpr)(void);
+- bool (*cpu_has_high_real_mode_segbase)(void);
++ bool (*has_emulated_msr)(int index);
+ void (*cpuid_update)(struct kvm_vcpu *vcpu);
+
+ int (*vm_init)(struct kvm *kvm);
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 945e841..40fc748 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -735,7 +735,8 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
+ if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
+ set_cpu_cap(c, X86_FEATURE_STIBP);
+
+- if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD))
++ if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
++ cpu_has(c, X86_FEATURE_VIRT_SSBD))
+ set_cpu_cap(c, X86_FEATURE_SSBD);
+
+ if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 4ccdfbe..4d3269b 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -357,7 +357,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+
+ /* cpuid 0x80000008.ebx */
+ const u32 kvm_cpuid_8000_0008_ebx_x86_features =
+- F(AMD_IBPB) | F(AMD_IBRS);
++ F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
+
+ /* cpuid 0xC0000001.edx */
+ const u32 kvm_cpuid_C000_0001_edx_x86_features =
+@@ -618,13 +618,20 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ g_phys_as = phys_as;
+ entry->eax = g_phys_as | (virt_as << 8);
+ entry->edx = 0;
+- /* IBRS and IBPB aren't necessarily present in hardware cpuid */
++ /*
++ * IBRS, IBPB and VIRT_SSBD aren't necessarily present in
++ * hardware cpuid
++ */
+ if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
+ entry->ebx |= F(AMD_IBPB);
+ if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
+ entry->ebx |= F(AMD_IBRS);
++ if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
++ entry->ebx |= F(VIRT_SSBD);
+ entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
+ cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
++ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
++ entry->ebx |= F(VIRT_SSBD);
+ break;
+ }
+ case 0x80000019:
+diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
+index 410070c..d22695c 100644
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -182,6 +182,15 @@ static inline bool guest_cpuid_has_arch_capabilities(struct kvm_vcpu *vcpu)
+ return best && (best->edx & bit(X86_FEATURE_ARCH_CAPABILITIES));
+ }
+
++static inline bool guest_cpuid_has_virt_ssbd(struct kvm_vcpu *vcpu)
++{
++ struct kvm_cpuid_entry2 *best;
++
++ best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
++ return best && (best->ebx & bit(X86_FEATURE_VIRT_SSBD));
++}
++
++
+
+ /*
+ * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 481b106..c60d8fc 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -3552,6 +3552,13 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+
+ msr_info->data = svm->spec_ctrl;
+ break;
++ case MSR_AMD64_VIRT_SPEC_CTRL:
++ if (!msr_info->host_initiated &&
++ !guest_cpuid_has_virt_ssbd(vcpu))
++ return 1;
++
++ msr_info->data = svm->virt_spec_ctrl;
++ break;
+ case MSR_IA32_UCODE_REV:
+ msr_info->data = 0x01000065;
+ break;
+@@ -3679,6 +3686,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+ break;
+ set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
+ break;
++ case MSR_AMD64_VIRT_SPEC_CTRL:
++ if (!msr->host_initiated &&
++ !guest_cpuid_has_virt_ssbd(vcpu))
++ return 1;
++
++ if (data & ~SPEC_CTRL_SSBD)
++ return 1;
++
++ svm->virt_spec_ctrl = data;
++ break;
+ case MSR_STAR:
+ svm->vmcb->save.star = data;
+ break;
+@@ -5138,7 +5155,7 @@ static bool svm_cpu_has_accelerated_tpr(void)
+ return false;
+ }
+
+-static bool svm_has_high_real_mode_segbase(void)
++static bool svm_has_emulated_msr(int index)
+ {
+ return true;
+ }
+@@ -5455,7 +5472,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
+ .hardware_enable = svm_hardware_enable,
+ .hardware_disable = svm_hardware_disable,
+ .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
+- .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
++ .has_emulated_msr = svm_has_emulated_msr,
+
+ .vcpu_create = svm_create_vcpu,
+ .vcpu_free = svm_free_vcpu,
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 55af4b6..7b4739c 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8673,9 +8673,21 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
+ }
+ }
+
+-static bool vmx_has_high_real_mode_segbase(void)
++static bool vmx_has_emulated_msr(int index)
+ {
+- return enable_unrestricted_guest || emulate_invalid_guest_state;
++ switch (index) {
++ case MSR_IA32_SMBASE:
++ /*
++ * We cannot do SMM unless we can run the guest in big
++ * real mode.
++ */
++ return enable_unrestricted_guest || emulate_invalid_guest_state;
++ case MSR_AMD64_VIRT_SPEC_CTRL:
++ /* This is AMD only. */
++ return false;
++ default:
++ return true;
++ }
+ }
+
+ static bool vmx_mpx_supported(void)
+@@ -11304,7 +11316,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
+ .hardware_enable = hardware_enable,
+ .hardware_disable = hardware_disable,
+ .cpu_has_accelerated_tpr = report_flexpriority,
+- .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
++ .has_emulated_msr = vmx_has_emulated_msr,
+
+ .vcpu_create = vmx_create_vcpu,
+ .vcpu_free = vmx_free_vcpu,
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index b27b93d..c531231 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1002,6 +1002,7 @@ static u32 emulated_msrs[] = {
+ MSR_IA32_MCG_CTL,
+ MSR_IA32_MCG_EXT_CTL,
+ MSR_IA32_SMBASE,
++ MSR_AMD64_VIRT_SPEC_CTRL,
+ };
+
+ static unsigned num_emulated_msrs;
+@@ -2650,7 +2651,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+ * fringe case that is not enabled except via specific settings
+ * of the module parameters.
+ */
+- r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
++ r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE);
+ break;
+ case KVM_CAP_COALESCED_MMIO:
+ r = KVM_COALESCED_MMIO_PAGE_OFFSET;
+@@ -4201,14 +4202,8 @@ static void kvm_init_msr_list(void)
+ num_msrs_to_save = j;
+
+ for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
+- switch (emulated_msrs[i]) {
+- case MSR_IA32_SMBASE:
+- if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
+- continue;
+- break;
+- default:
+- break;
+- }
++ if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
++ continue;
+
+ if (j < i)
+ emulated_msrs[j] = emulated_msrs[i];
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0078-x86-bugs-Rename-SSBD_NO-to-SSB_NO.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0078-x86-bugs-Rename-SSBD_NO-to-SSB_NO.patch
new file mode 100644
index 00000000..f2131e66
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0078-x86-bugs-Rename-SSBD_NO-to-SSB_NO.patch
@@ -0,0 +1,48 @@
+From b5380d0ef78780a08140c0b4e8d050752e91104a Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 16 May 2018 23:18:09 -0400
+Subject: [PATCH 78/93] x86/bugs: Rename SSBD_NO to SSB_NO
+
+commit 240da953fcc6a9008c92fae5b1f727ee5ed167ab upstream
+
+The "336996 Speculative Execution Side Channel Mitigations" from
+May defines this as SSB_NO, hence lets sync-up.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/msr-index.h | 2 +-
+ arch/x86/kernel/cpu/common.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index ad5d0d8..ca41d8f 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -63,7 +63,7 @@
+ #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
+ #define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
+ #define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
+-#define ARCH_CAP_SSBD_NO (1 << 4) /*
++#define ARCH_CAP_SSB_NO (1 << 4) /*
+ * Not susceptible to Speculative Store Bypass
+ * attack, so no Speculative Store Bypass
+ * control required.
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 40fc748..b0fd028 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -926,7 +926,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+ if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
+- !(ia32_cap & ARCH_CAP_SSBD_NO))
++ !(ia32_cap & ARCH_CAP_SSB_NO))
+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+ if (x86_match_cpu(cpu_no_speculation))
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0079-x86-kexec-Avoid-double-free_page-upon-do_kexec_load-.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0079-x86-kexec-Avoid-double-free_page-upon-do_kexec_load-.patch
new file mode 100644
index 00000000..b3f12503
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0079-x86-kexec-Avoid-double-free_page-upon-do_kexec_load-.patch
@@ -0,0 +1,106 @@
+From f4e4c29205e3747d4cc2d033e1c46ad9725e9886 Mon Sep 17 00:00:00 2001
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Date: Wed, 9 May 2018 19:42:20 +0900
+Subject: [PATCH 79/93] x86/kexec: Avoid double free_page() upon
+ do_kexec_load() failure
+
+commit a466ef76b815b86748d9870ef2a430af7b39c710 upstream.
+
+>From ff82bedd3e12f0d3353282054ae48c3bd8c72012 Mon Sep 17 00:00:00 2001
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Date: Wed, 9 May 2018 12:12:39 +0900
+Subject: x86/kexec: Avoid double free_page() upon do_kexec_load() failure
+
+syzbot is reporting crashes after memory allocation failure inside
+do_kexec_load() [1]. This is because free_transition_pgtable() is called
+by both init_transition_pgtable() and machine_kexec_cleanup() when memory
+allocation failed inside init_transition_pgtable().
+
+Regarding 32bit code, machine_kexec_free_page_tables() is called by both
+machine_kexec_alloc_page_tables() and machine_kexec_cleanup() when memory
+allocation failed inside machine_kexec_alloc_page_tables().
+
+Fix this by leaving the error handling to machine_kexec_cleanup()
+(and optionally setting NULL after free_page()).
+
+[1] https://syzkaller.appspot.com/bug?id=91e52396168cf2bdd572fe1e1bc0bc645c1c6b40
+
+Fixes: f5deb79679af6eb4 ("x86: kexec: Use one page table in x86_64 machine_kexec")
+Fixes: 92be3d6bdf2cb349 ("kexec/i386: allocate page table pages dynamically")
+Reported-by: syzbot <syzbot+d96f60296ef613fe1d69@syzkaller.appspotmail.com>
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Baoquan He <bhe@redhat.com>
+Cc: thomas.lendacky@amd.com
+Cc: prudo@linux.vnet.ibm.com
+Cc: Huang Ying <ying.huang@intel.com>
+Cc: syzkaller-bugs@googlegroups.com
+Cc: takahiro.akashi@linaro.org
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: akpm@linux-foundation.org
+Cc: dyoung@redhat.com
+Cc: kirill.shutemov@linux.intel.com
+Link: https://lkml.kernel.org/r/201805091942.DGG12448.tMFVFSJFQOOLHO@I-love.SAKURA.ne.jp
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/machine_kexec_32.c | 6 +++++-
+ arch/x86/kernel/machine_kexec_64.c | 4 +++-
+ 2 files changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
+index 469b23d..fd7e993 100644
+--- a/arch/x86/kernel/machine_kexec_32.c
++++ b/arch/x86/kernel/machine_kexec_32.c
+@@ -71,12 +71,17 @@ static void load_segments(void)
+ static void machine_kexec_free_page_tables(struct kimage *image)
+ {
+ free_page((unsigned long)image->arch.pgd);
++ image->arch.pgd = NULL;
+ #ifdef CONFIG_X86_PAE
+ free_page((unsigned long)image->arch.pmd0);
++ image->arch.pmd0 = NULL;
+ free_page((unsigned long)image->arch.pmd1);
++ image->arch.pmd1 = NULL;
+ #endif
+ free_page((unsigned long)image->arch.pte0);
++ image->arch.pte0 = NULL;
+ free_page((unsigned long)image->arch.pte1);
++ image->arch.pte1 = NULL;
+ }
+
+ static int machine_kexec_alloc_page_tables(struct kimage *image)
+@@ -93,7 +98,6 @@ static int machine_kexec_alloc_page_tables(struct kimage *image)
+ !image->arch.pmd0 || !image->arch.pmd1 ||
+ #endif
+ !image->arch.pte0 || !image->arch.pte1) {
+- machine_kexec_free_page_tables(image);
+ return -ENOMEM;
+ }
+ return 0;
+diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
+index 8c1f218..26242cd 100644
+--- a/arch/x86/kernel/machine_kexec_64.c
++++ b/arch/x86/kernel/machine_kexec_64.c
+@@ -37,8 +37,11 @@ static struct kexec_file_ops *kexec_file_loaders[] = {
+ static void free_transition_pgtable(struct kimage *image)
+ {
+ free_page((unsigned long)image->arch.pud);
++ image->arch.pud = NULL;
+ free_page((unsigned long)image->arch.pmd);
++ image->arch.pmd = NULL;
+ free_page((unsigned long)image->arch.pte);
++ image->arch.pte = NULL;
+ }
+
+ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
+@@ -79,7 +82,6 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
+ set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
+ return 0;
+ err:
+- free_transition_pgtable(image);
+ return result;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0080-KVM-VMX-Expose-SSBD-properly-to-guests.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0080-KVM-VMX-Expose-SSBD-properly-to-guests.patch
new file mode 100644
index 00000000..ce234269
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0080-KVM-VMX-Expose-SSBD-properly-to-guests.patch
@@ -0,0 +1,44 @@
+From 546e325d7b773ae3c0df848b95f06206ebc7cd87 Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Mon, 21 May 2018 17:54:49 -0400
+Subject: [PATCH 80/93] KVM/VMX: Expose SSBD properly to guests
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+commit 0aa48468d00959c8a37cd3ac727284f4f7359151 upstream.
+
+The X86_FEATURE_SSBD is an synthetic CPU feature - that is
+it bit location has no relevance to the real CPUID 0x7.EBX[31]
+bit position. For that we need the new CPU feature name.
+
+Fixes: 52817587e706 ("x86/cpufeatures: Disentangle SSBD enumeration")
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: kvm@vger.kernel.org
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: stable@vger.kernel.org
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Link: https://lkml.kernel.org/r/20180521215449.26423-2-konrad.wilk@oracle.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/cpuid.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 4d3269b..8510b7b 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -382,7 +382,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+
+ /* cpuid 7.0.edx*/
+ const u32 kvm_cpuid_7_0_edx_x86_features =
+- F(SPEC_CTRL) | F(SSBD) | F(ARCH_CAPABILITIES);
++ F(SPEC_CTRL) | F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES);
+
+ /* all calls to cpuid_count() should be made on the same cpu */
+ get_cpu();
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0081-KVM-x86-Update-cpuid-properly-when-CR4.OSXAVE-or-CR4.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0081-KVM-x86-Update-cpuid-properly-when-CR4.OSXAVE-or-CR4.patch
new file mode 100644
index 00000000..f44b77a1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0081-KVM-x86-Update-cpuid-properly-when-CR4.OSXAVE-or-CR4.patch
@@ -0,0 +1,63 @@
+From a41340930388022d17c5acfa7c00edc80fa486f6 Mon Sep 17 00:00:00 2001
+From: Wei Huang <wei@redhat.com>
+Date: Tue, 1 May 2018 09:49:54 -0500
+Subject: [PATCH 81/93] KVM: x86: Update cpuid properly when CR4.OSXAVE or
+ CR4.PKE is changed
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+commit c4d2188206bafa177ea58e9a25b952baa0bf7712 upstream.
+
+The CPUID bits of OSXSAVE (function=0x1) and OSPKE (func=0x7, leaf=0x0)
+allows user apps to detect if OS has set CR4.OSXSAVE or CR4.PKE. KVM is
+supposed to update these CPUID bits when CR4 is updated. Current KVM
+code doesn't handle some special cases when updates come from emulator.
+Here is one example:
+
+ Step 1: guest boots
+ Step 2: guest OS enables XSAVE ==> CR4.OSXSAVE=1 and CPUID.OSXSAVE=1
+ Step 3: guest hot reboot ==> QEMU reset CR4 to 0, but CPUID.OSXAVE==1
+ Step 4: guest os checks CPUID.OSXAVE, detects 1, then executes xgetbv
+
+Step 4 above will cause an #UD and guest crash because guest OS hasn't
+turned on OSXAVE yet. This patch solves the problem by comparing the the
+old_cr4 with cr4. If the related bits have been changed,
+kvm_update_cpuid() needs to be called.
+
+Signed-off-by: Wei Huang <wei@redhat.com>
+Reviewed-by: Bandan Das <bsd@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/x86.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index c531231..27e6cf0 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -7201,6 +7201,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ {
+ struct msr_data apic_base_msr;
+ int mmu_reset_needed = 0;
++ int cpuid_update_needed = 0;
+ int pending_vec, max_bits, idx;
+ struct desc_ptr dt;
+
+@@ -7232,8 +7233,10 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ vcpu->arch.cr0 = sregs->cr0;
+
+ mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
++ cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) &
++ (X86_CR4_OSXSAVE | X86_CR4_PKE));
+ kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
+- if (sregs->cr4 & (X86_CR4_OSXSAVE | X86_CR4_PKE))
++ if (cpuid_update_needed)
+ kvm_update_cpuid(vcpu);
+
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0082-kvm-x86-IA32_ARCH_CAPABILITIES-is-always-supported.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0082-kvm-x86-IA32_ARCH_CAPABILITIES-is-always-supported.patch
new file mode 100644
index 00000000..313f2577
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0082-kvm-x86-IA32_ARCH_CAPABILITIES-is-always-supported.patch
@@ -0,0 +1,54 @@
+From e34ebcda27df86037fd748254208aff7e442ff0b Mon Sep 17 00:00:00 2001
+From: Jim Mattson <jmattson@google.com>
+Date: Wed, 9 May 2018 14:29:35 -0700
+Subject: [PATCH 82/93] kvm: x86: IA32_ARCH_CAPABILITIES is always supported
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+commit 1eaafe91a0df4157521b6417b3dd8430bf5f52f0 upstream.
+
+If there is a possibility that a VM may migrate to a Skylake host,
+then the hypervisor should report IA32_ARCH_CAPABILITIES.RSBA[bit 2]
+as being set (future work, of course). This implies that
+CPUID.(EAX=7,ECX=0):EDX.ARCH_CAPABILITIES[bit 29] should be
+set. Therefore, kvm should report this CPUID bit as being supported
+whether or not the host supports it. Userspace is still free to clear
+the bit if it chooses.
+
+For more information on RSBA, see Intel's white paper, "Retpoline: A
+Branch Target Injection Mitigation" (Document Number 337131-001),
+currently available at https://bugzilla.kernel.org/show_bug.cgi?id=199511.
+
+Since the IA32_ARCH_CAPABILITIES MSR is emulated in kvm, there is no
+dependency on hardware support for this feature.
+
+Signed-off-by: Jim Mattson <jmattson@google.com>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Fixes: 28c1c9fabf48 ("KVM/VMX: Emulate MSR_IA32_ARCH_CAPABILITIES")
+Cc: stable@vger.kernel.org
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/cpuid.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 8510b7b..fbd6c62 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -468,6 +468,11 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ entry->ecx &= ~F(PKU);
+ entry->edx &= kvm_cpuid_7_0_edx_x86_features;
+ cpuid_mask(&entry->edx, CPUID_7_EDX);
++ /*
++ * We emulate ARCH_CAPABILITIES in software even
++ * if the host doesn't support it.
++ */
++ entry->edx |= F(ARCH_CAPABILITIES);
+ } else {
+ entry->ebx = 0;
+ entry->ecx = 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0083-kvm-x86-fix-KVM_XEN_HVM_CONFIG-ioctl.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0083-kvm-x86-fix-KVM_XEN_HVM_CONFIG-ioctl.patch
new file mode 100644
index 00000000..b4bec832
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0083-kvm-x86-fix-KVM_XEN_HVM_CONFIG-ioctl.patch
@@ -0,0 +1,57 @@
+From 91702980566c39210225154c2a8b1cef41942737 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Thu, 26 Oct 2017 15:45:47 +0200
+Subject: [PATCH 83/93] kvm: x86: fix KVM_XEN_HVM_CONFIG ioctl
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[ Upstream commit 51776043afa415435c7e4636204fbe4f7edc4501 ]
+
+This ioctl is obsolete (it was used by Xenner as far as I know) but
+still let's not break it gratuitously... Its handler is copying
+directly into struct kvm. Go through a bounce buffer instead, with
+the added benefit that we can actually do something useful with the
+flags argument---the previous code was exiting with -EINVAL but still
+doing the copy.
+
+This technically is a userspace ABI breakage, but since no one should be
+using the ioctl, it's a good occasion to see if someone actually
+complains.
+
+Cc: kernel-hardening@lists.openwall.com
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/x86.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 27e6cf0..d7974fc 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -4106,13 +4106,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
+ mutex_unlock(&kvm->lock);
+ break;
+ case KVM_XEN_HVM_CONFIG: {
++ struct kvm_xen_hvm_config xhc;
+ r = -EFAULT;
+- if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
+- sizeof(struct kvm_xen_hvm_config)))
++ if (copy_from_user(&xhc, argp, sizeof(xhc)))
+ goto out;
+ r = -EINVAL;
+- if (kvm->arch.xen_hvm_config.flags)
++ if (xhc.flags)
+ goto out;
++ memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc));
+ r = 0;
+ break;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0084-KVM-VMX-raise-internal-error-for-exception-during-in.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0084-KVM-VMX-raise-internal-error-for-exception-during-in.patch
new file mode 100644
index 00000000..a2280307
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0084-KVM-VMX-raise-internal-error-for-exception-during-in.patch
@@ -0,0 +1,90 @@
+From 075696ba348a4c1eb20a641157f84f8b81220510 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Fri, 23 Mar 2018 09:34:00 -0700
+Subject: [PATCH 84/93] KVM: VMX: raise internal error for exception during
+ invalid protected mode state
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[ Upstream commit add5ff7a216ee545a214013f26d1ef2f44a9c9f8 ]
+
+Exit to userspace with KVM_INTERNAL_ERROR_EMULATION if we encounter
+an exception in Protected Mode while emulating guest due to invalid
+guest state. Unlike Big RM, KVM doesn't support emulating exceptions
+in PM, i.e. PM exceptions are always injected via the VMCS. Because
+we will never do VMRESUME due to emulation_required, the exception is
+never realized and we'll keep emulating the faulting instruction over
+and over until we receive a signal.
+
+Exit to userspace iff there is a pending exception, i.e. don't exit
+simply on a requested event. The purpose of this check and exit is to
+aid in debugging a guest that is in all likelihood already doomed.
+Invalid guest state in PM is extremely limited in normal operation,
+e.g. it generally only occurs for a few instructions early in BIOS,
+and any exception at this time is all but guaranteed to be fatal.
+Non-vectored interrupts, e.g. INIT, SIPI and SMI, can be cleanly
+handled/emulated, while checking for vectored interrupts, e.g. INTR
+and NMI, without hitting false positives would add a fair amount of
+complexity for almost no benefit (getting hit by lightning seems
+more likely than encountering this specific scenario).
+
+Add a WARN_ON_ONCE to vmx_queue_exception() if we try to inject an
+exception via the VMCS and emulation_required is true.
+
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx.c | 20 ++++++++++++++------
+ 1 file changed, 14 insertions(+), 6 deletions(-)
+
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 7b4739c..9307c0d 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -2555,6 +2555,8 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
+ return;
+ }
+
++ WARN_ON_ONCE(vmx->emulation_required);
++
+ if (kvm_exception_is_soft(nr)) {
+ vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
+ vmx->vcpu.arch.event_exit_inst_len);
+@@ -6405,12 +6407,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
+ goto out;
+ }
+
+- if (err != EMULATE_DONE) {
+- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+- vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+- vcpu->run->internal.ndata = 0;
+- return 0;
+- }
++ if (err != EMULATE_DONE)
++ goto emulation_error;
++
++ if (vmx->emulation_required && !vmx->rmode.vm86_active &&
++ vcpu->arch.exception.pending)
++ goto emulation_error;
+
+ if (vcpu->arch.halt_request) {
+ vcpu->arch.halt_request = 0;
+@@ -6426,6 +6428,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
+
+ out:
+ return ret;
++
++emulation_error:
++ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
++ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
++ vcpu->run->internal.ndata = 0;
++ return 0;
+ }
+
+ static int __grow_ple_window(int val)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0085-KVM-lapic-stop-advertising-DIRECTED_EOI-when-in-kern.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0085-KVM-lapic-stop-advertising-DIRECTED_EOI-when-in-kern.patch
new file mode 100644
index 00000000..db300b21
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0085-KVM-lapic-stop-advertising-DIRECTED_EOI-when-in-kern.patch
@@ -0,0 +1,56 @@
+From 2ece92e70fbd29fd14c1add63648b7154521b473 Mon Sep 17 00:00:00 2001
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+Date: Fri, 9 Feb 2018 14:01:33 +0100
+Subject: [PATCH 85/93] KVM: lapic: stop advertising DIRECTED_EOI when
+ in-kernel IOAPIC is in use
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[ Upstream commit 0bcc3fb95b97ac2ca223a5a870287b37f56265ac ]
+
+Devices which use level-triggered interrupts under Windows 2016 with
+Hyper-V role enabled don't work: Windows disables EOI broadcast in SPIV
+unconditionally. Our in-kernel IOAPIC implementation emulates an old IOAPIC
+version which has no EOI register so EOI never happens.
+
+The issue was discovered and discussed a while ago:
+https://www.spinics.net/lists/kvm/msg148098.html
+
+While this is a guest OS bug (it should check that IOAPIC has the required
+capabilities before disabling EOI broadcast) we can workaround it in KVM:
+advertising DIRECTED_EOI with in-kernel IOAPIC makes little sense anyway.
+
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/lapic.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 650ff4a..d99e13d 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -294,8 +294,16 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu)
+ if (!lapic_in_kernel(vcpu))
+ return;
+
++ /*
++ * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
++ * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
++ * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
++ * version first and level-triggered interrupts never get EOIed in
++ * IOAPIC.
++ */
+ feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
+- if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))))
++ if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))) &&
++ !ioapic_in_kernel(vcpu->kvm))
+ v |= APIC_LVR_DIRECTED_EOI;
+ kvm_lapic_set_reg(apic, APIC_LVR, v);
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0086-objtool-Improve-detection-of-BUG-and-other-dead-ends.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0086-objtool-Improve-detection-of-BUG-and-other-dead-ends.patch
new file mode 100644
index 00000000..f659e885
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0086-objtool-Improve-detection-of-BUG-and-other-dead-ends.patch
@@ -0,0 +1,217 @@
+From 655125acee5c084743a8bae4ffe2b723856594ce Mon Sep 17 00:00:00 2001
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Tue, 21 Feb 2017 15:35:32 -0600
+Subject: [PATCH 86/93] objtool: Improve detection of BUG() and other dead ends
+
+commit d1091c7fa3d52ebce4dd3f15d04155b3469b2f90 upstream.
+
+The BUG() macro's use of __builtin_unreachable() via the unreachable()
+macro tells gcc that the instruction is a dead end, and that it's safe
+to assume the current code path will not execute past the previous
+instruction.
+
+On x86, the BUG() macro is implemented with the 'ud2' instruction. When
+objtool's branch analysis sees that instruction, it knows the current
+code path has come to a dead end.
+
+Peter Zijlstra has been working on a patch to change the WARN macros to
+use 'ud2'. That patch will break objtool's assumption that 'ud2' is
+always a dead end.
+
+Generally it's best for objtool to avoid making those kinds of
+assumptions anyway. The more ignorant it is of kernel code internals,
+the better.
+
+So create a more generic way for objtool to detect dead ends by adding
+an annotation to the unreachable() macro. The annotation stores a
+pointer to the end of the unreachable code path in an '__unreachable'
+section. Objtool can read that section to find the dead ends.
+
+Tested-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/41a6d33971462ebd944a1c60ad4bf5be86c17b77.1487712920.git.jpoimboe@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/vmlinux.lds.S | 1 +
+ include/linux/compiler-gcc.h | 13 ++++++++-
+ tools/objtool/arch.h | 5 ++--
+ tools/objtool/arch/x86/decode.c | 3 ---
+ tools/objtool/builtin-check.c | 60 ++++++++++++++++++++++++++++++++++++++---
+ 5 files changed, 71 insertions(+), 11 deletions(-)
+
+diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
+index c7194e9..4ef267f 100644
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -353,6 +353,7 @@ SECTIONS
+ /DISCARD/ : {
+ *(.eh_frame)
+ *(__func_stack_frame_non_standard)
++ *(__unreachable)
+ }
+ }
+
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 362a1e17..b69d102 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -199,6 +199,17 @@
+ #endif
+ #endif
+
++#ifdef CONFIG_STACK_VALIDATION
++#define annotate_unreachable() ({ \
++ asm("1:\t\n" \
++ ".pushsection __unreachable, \"a\"\t\n" \
++ ".long 1b\t\n" \
++ ".popsection\t\n"); \
++})
++#else
++#define annotate_unreachable()
++#endif
++
+ /*
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+@@ -208,7 +219,7 @@
+ * this in the preprocessor, but we can live with this because they're
+ * unreleased. Really, we need to have autoconf for the kernel.
+ */
+-#define unreachable() __builtin_unreachable()
++#define unreachable() annotate_unreachable(); __builtin_unreachable()
+
+ /* Mark a function definition as prohibited from being cloned. */
+ #define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
+diff --git a/tools/objtool/arch.h b/tools/objtool/arch.h
+index f7350fc..a59e061 100644
+--- a/tools/objtool/arch.h
++++ b/tools/objtool/arch.h
+@@ -31,9 +31,8 @@
+ #define INSN_CALL_DYNAMIC 8
+ #define INSN_RETURN 9
+ #define INSN_CONTEXT_SWITCH 10
+-#define INSN_BUG 11
+-#define INSN_NOP 12
+-#define INSN_OTHER 13
++#define INSN_NOP 11
++#define INSN_OTHER 12
+ #define INSN_LAST INSN_OTHER
+
+ int arch_decode_instruction(struct elf *elf, struct section *sec,
+diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
+index 5e0dea2..9fb487f 100644
+--- a/tools/objtool/arch/x86/decode.c
++++ b/tools/objtool/arch/x86/decode.c
+@@ -118,9 +118,6 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
+ op2 == 0x35)
+ /* sysenter, sysret */
+ *type = INSN_CONTEXT_SWITCH;
+- else if (op2 == 0x0b || op2 == 0xb9)
+- /* ud2 */
+- *type = INSN_BUG;
+ else if (op2 == 0x0d || op2 == 0x1f)
+ /* nopl/nopw */
+ *type = INSN_NOP;
+diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
+index 377bff0..ad9eda9 100644
+--- a/tools/objtool/builtin-check.c
++++ b/tools/objtool/builtin-check.c
+@@ -51,7 +51,7 @@ struct instruction {
+ unsigned int len, state;
+ unsigned char type;
+ unsigned long immediate;
+- bool alt_group, visited, ignore_alts;
++ bool alt_group, visited, dead_end, ignore_alts;
+ struct symbol *call_dest;
+ struct instruction *jump_dest;
+ struct list_head alts;
+@@ -330,6 +330,54 @@ static int decode_instructions(struct objtool_file *file)
+ }
+
+ /*
++ * Find all uses of the unreachable() macro, which are code path dead ends.
++ */
++static int add_dead_ends(struct objtool_file *file)
++{
++ struct section *sec;
++ struct rela *rela;
++ struct instruction *insn;
++ bool found;
++
++ sec = find_section_by_name(file->elf, ".rela__unreachable");
++ if (!sec)
++ return 0;
++
++ list_for_each_entry(rela, &sec->rela_list, list) {
++ if (rela->sym->type != STT_SECTION) {
++ WARN("unexpected relocation symbol type in .rela__unreachable");
++ return -1;
++ }
++ insn = find_insn(file, rela->sym->sec, rela->addend);
++ if (insn)
++ insn = list_prev_entry(insn, list);
++ else if (rela->addend == rela->sym->sec->len) {
++ found = false;
++ list_for_each_entry_reverse(insn, &file->insn_list, list) {
++ if (insn->sec == rela->sym->sec) {
++ found = true;
++ break;
++ }
++ }
++
++ if (!found) {
++ WARN("can't find unreachable insn at %s+0x%x",
++ rela->sym->sec->name, rela->addend);
++ return -1;
++ }
++ } else {
++ WARN("can't find unreachable insn at %s+0x%x",
++ rela->sym->sec->name, rela->addend);
++ return -1;
++ }
++
++ insn->dead_end = true;
++ }
++
++ return 0;
++}
++
++/*
+ * Warnings shouldn't be reported for ignored functions.
+ */
+ static void add_ignores(struct objtool_file *file)
+@@ -896,6 +944,10 @@ static int decode_sections(struct objtool_file *file)
+ if (ret)
+ return ret;
+
++ ret = add_dead_ends(file);
++ if (ret)
++ return ret;
++
+ add_ignores(file);
+
+ ret = add_nospec_ignores(file);
+@@ -1094,13 +1146,13 @@ static int validate_branch(struct objtool_file *file,
+
+ return 0;
+
+- case INSN_BUG:
+- return 0;
+-
+ default:
+ break;
+ }
+
++ if (insn->dead_end)
++ return 0;
++
+ insn = next_insn_same_sec(file, insn);
+ if (!insn) {
+ WARN("%s: unexpected end of section", sec->name);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0087-objtool-Move-checking-code-to-check.c.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0087-objtool-Move-checking-code-to-check.c.patch
new file mode 100644
index 00000000..076eb364
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0087-objtool-Move-checking-code-to-check.c.patch
@@ -0,0 +1,2802 @@
+From 1c6b7026213ec74f811957627c80513e75f6fb96 Mon Sep 17 00:00:00 2001
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Wed, 28 Jun 2017 10:11:05 -0500
+Subject: [PATCH 87/93] objtool: Move checking code to check.c
+
+commit dcc914f44f065ef73685b37e59877a5bb3cb7358 upstream.
+
+In preparation for the new 'objtool undwarf generate' command, which
+will rely on 'objtool check', move the checking code from
+builtin-check.c to check.c where it can be used by other commands.
+
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Reviewed-by: Jiri Slaby <jslaby@suse.cz>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: live-patching@vger.kernel.org
+Link: http://lkml.kernel.org/r/294c5c695fd73c1a5000bbe5960a7c9bec4ee6b4.1498659915.git.jpoimboe@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+[backported by hand to 4.9, this was a pain... - gregkh]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/objtool/Build | 1 +
+ tools/objtool/builtin-check.c | 1337 +----------------------------------------
+ tools/objtool/check.c | 1327 ++++++++++++++++++++++++++++++++++++++++
+ tools/objtool/check.h | 51 ++
+ 4 files changed, 1392 insertions(+), 1324 deletions(-)
+ create mode 100644 tools/objtool/check.c
+ create mode 100644 tools/objtool/check.h
+
+diff --git a/tools/objtool/Build b/tools/objtool/Build
+index d6cdece..6f2e198 100644
+--- a/tools/objtool/Build
++++ b/tools/objtool/Build
+@@ -1,5 +1,6 @@
+ objtool-y += arch/$(SRCARCH)/
+ objtool-y += builtin-check.o
++objtool-y += check.o
+ objtool-y += elf.o
+ objtool-y += special.o
+ objtool-y += objtool.o
+diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
+index ad9eda9..365c34e 100644
+--- a/tools/objtool/builtin-check.c
++++ b/tools/objtool/builtin-check.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
++ * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+@@ -25,1343 +25,32 @@
+ * For more information, see tools/objtool/Documentation/stack-validation.txt.
+ */
+
+-#include <string.h>
+-#include <stdlib.h>
+ #include <subcmd/parse-options.h>
+-
+ #include "builtin.h"
+-#include "elf.h"
+-#include "special.h"
+-#include "arch.h"
+-#include "warn.h"
+-
+-#include <linux/hashtable.h>
+-
+-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+-
+-#define STATE_FP_SAVED 0x1
+-#define STATE_FP_SETUP 0x2
+-#define STATE_FENTRY 0x4
+-
+-struct instruction {
+- struct list_head list;
+- struct hlist_node hash;
+- struct section *sec;
+- unsigned long offset;
+- unsigned int len, state;
+- unsigned char type;
+- unsigned long immediate;
+- bool alt_group, visited, dead_end, ignore_alts;
+- struct symbol *call_dest;
+- struct instruction *jump_dest;
+- struct list_head alts;
+- struct symbol *func;
+-};
+-
+-struct alternative {
+- struct list_head list;
+- struct instruction *insn;
+-};
+-
+-struct objtool_file {
+- struct elf *elf;
+- struct list_head insn_list;
+- DECLARE_HASHTABLE(insn_hash, 16);
+- struct section *rodata, *whitelist;
+- bool ignore_unreachables, c_file;
+-};
+-
+-const char *objname;
+-static bool nofp;
+-
+-static struct instruction *find_insn(struct objtool_file *file,
+- struct section *sec, unsigned long offset)
+-{
+- struct instruction *insn;
+-
+- hash_for_each_possible(file->insn_hash, insn, hash, offset)
+- if (insn->sec == sec && insn->offset == offset)
+- return insn;
+-
+- return NULL;
+-}
+-
+-static struct instruction *next_insn_same_sec(struct objtool_file *file,
+- struct instruction *insn)
+-{
+- struct instruction *next = list_next_entry(insn, list);
+-
+- if (&next->list == &file->insn_list || next->sec != insn->sec)
+- return NULL;
+-
+- return next;
+-}
+-
+-static bool gcov_enabled(struct objtool_file *file)
+-{
+- struct section *sec;
+- struct symbol *sym;
+-
+- list_for_each_entry(sec, &file->elf->sections, list)
+- list_for_each_entry(sym, &sec->symbol_list, list)
+- if (!strncmp(sym->name, "__gcov_.", 8))
+- return true;
+-
+- return false;
+-}
+-
+-#define for_each_insn(file, insn) \
+- list_for_each_entry(insn, &file->insn_list, list)
+-
+-#define func_for_each_insn(file, func, insn) \
+- for (insn = find_insn(file, func->sec, func->offset); \
+- insn && &insn->list != &file->insn_list && \
+- insn->sec == func->sec && \
+- insn->offset < func->offset + func->len; \
+- insn = list_next_entry(insn, list))
+-
+-#define func_for_each_insn_continue_reverse(file, func, insn) \
+- for (insn = list_prev_entry(insn, list); \
+- &insn->list != &file->insn_list && \
+- insn->sec == func->sec && insn->offset >= func->offset; \
+- insn = list_prev_entry(insn, list))
+-
+-#define sec_for_each_insn_from(file, insn) \
+- for (; insn; insn = next_insn_same_sec(file, insn))
+-
+-
+-/*
+- * Check if the function has been manually whitelisted with the
+- * STACK_FRAME_NON_STANDARD macro, or if it should be automatically whitelisted
+- * due to its use of a context switching instruction.
+- */
+-static bool ignore_func(struct objtool_file *file, struct symbol *func)
+-{
+- struct rela *rela;
+- struct instruction *insn;
+-
+- /* check for STACK_FRAME_NON_STANDARD */
+- if (file->whitelist && file->whitelist->rela)
+- list_for_each_entry(rela, &file->whitelist->rela->rela_list, list) {
+- if (rela->sym->type == STT_SECTION &&
+- rela->sym->sec == func->sec &&
+- rela->addend == func->offset)
+- return true;
+- if (rela->sym->type == STT_FUNC && rela->sym == func)
+- return true;
+- }
+-
+- /* check if it has a context switching instruction */
+- func_for_each_insn(file, func, insn)
+- if (insn->type == INSN_CONTEXT_SWITCH)
+- return true;
+-
+- return false;
+-}
+-
+-/*
+- * This checks to see if the given function is a "noreturn" function.
+- *
+- * For global functions which are outside the scope of this object file, we
+- * have to keep a manual list of them.
+- *
+- * For local functions, we have to detect them manually by simply looking for
+- * the lack of a return instruction.
+- *
+- * Returns:
+- * -1: error
+- * 0: no dead end
+- * 1: dead end
+- */
+-static int __dead_end_function(struct objtool_file *file, struct symbol *func,
+- int recursion)
+-{
+- int i;
+- struct instruction *insn;
+- bool empty = true;
+-
+- /*
+- * Unfortunately these have to be hard coded because the noreturn
+- * attribute isn't provided in ELF data.
+- */
+- static const char * const global_noreturns[] = {
+- "__stack_chk_fail",
+- "panic",
+- "do_exit",
+- "do_task_dead",
+- "__module_put_and_exit",
+- "complete_and_exit",
+- "kvm_spurious_fault",
+- "__reiserfs_panic",
+- "lbug_with_loc"
+- };
+-
+- if (func->bind == STB_WEAK)
+- return 0;
+-
+- if (func->bind == STB_GLOBAL)
+- for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
+- if (!strcmp(func->name, global_noreturns[i]))
+- return 1;
+-
+- if (!func->sec)
+- return 0;
+-
+- func_for_each_insn(file, func, insn) {
+- empty = false;
+-
+- if (insn->type == INSN_RETURN)
+- return 0;
+- }
+-
+- if (empty)
+- return 0;
+-
+- /*
+- * A function can have a sibling call instead of a return. In that
+- * case, the function's dead-end status depends on whether the target
+- * of the sibling call returns.
+- */
+- func_for_each_insn(file, func, insn) {
+- if (insn->sec != func->sec ||
+- insn->offset >= func->offset + func->len)
+- break;
+-
+- if (insn->type == INSN_JUMP_UNCONDITIONAL) {
+- struct instruction *dest = insn->jump_dest;
+- struct symbol *dest_func;
+-
+- if (!dest)
+- /* sibling call to another file */
+- return 0;
+-
+- if (dest->sec != func->sec ||
+- dest->offset < func->offset ||
+- dest->offset >= func->offset + func->len) {
+- /* local sibling call */
+- dest_func = find_symbol_by_offset(dest->sec,
+- dest->offset);
+- if (!dest_func)
+- continue;
+-
+- if (recursion == 5) {
+- WARN_FUNC("infinite recursion (objtool bug!)",
+- dest->sec, dest->offset);
+- return -1;
+- }
+-
+- return __dead_end_function(file, dest_func,
+- recursion + 1);
+- }
+- }
+-
+- if (insn->type == INSN_JUMP_DYNAMIC && list_empty(&insn->alts))
+- /* sibling call */
+- return 0;
+- }
+-
+- return 1;
+-}
+-
+-static int dead_end_function(struct objtool_file *file, struct symbol *func)
+-{
+- return __dead_end_function(file, func, 0);
+-}
+-
+-/*
+- * Call the arch-specific instruction decoder for all the instructions and add
+- * them to the global instruction list.
+- */
+-static int decode_instructions(struct objtool_file *file)
+-{
+- struct section *sec;
+- struct symbol *func;
+- unsigned long offset;
+- struct instruction *insn;
+- int ret;
+-
+- list_for_each_entry(sec, &file->elf->sections, list) {
+-
+- if (!(sec->sh.sh_flags & SHF_EXECINSTR))
+- continue;
+-
+- for (offset = 0; offset < sec->len; offset += insn->len) {
+- insn = malloc(sizeof(*insn));
+- memset(insn, 0, sizeof(*insn));
+-
+- INIT_LIST_HEAD(&insn->alts);
+- insn->sec = sec;
+- insn->offset = offset;
+-
+- ret = arch_decode_instruction(file->elf, sec, offset,
+- sec->len - offset,
+- &insn->len, &insn->type,
+- &insn->immediate);
+- if (ret)
+- return ret;
+-
+- if (!insn->type || insn->type > INSN_LAST) {
+- WARN_FUNC("invalid instruction type %d",
+- insn->sec, insn->offset, insn->type);
+- return -1;
+- }
+-
+- hash_add(file->insn_hash, &insn->hash, insn->offset);
+- list_add_tail(&insn->list, &file->insn_list);
+- }
+-
+- list_for_each_entry(func, &sec->symbol_list, list) {
+- if (func->type != STT_FUNC)
+- continue;
+-
+- if (!find_insn(file, sec, func->offset)) {
+- WARN("%s(): can't find starting instruction",
+- func->name);
+- return -1;
+- }
+-
+- func_for_each_insn(file, func, insn)
+- if (!insn->func)
+- insn->func = func;
+- }
+- }
+-
+- return 0;
+-}
+-
+-/*
+- * Find all uses of the unreachable() macro, which are code path dead ends.
+- */
+-static int add_dead_ends(struct objtool_file *file)
+-{
+- struct section *sec;
+- struct rela *rela;
+- struct instruction *insn;
+- bool found;
+-
+- sec = find_section_by_name(file->elf, ".rela__unreachable");
+- if (!sec)
+- return 0;
+-
+- list_for_each_entry(rela, &sec->rela_list, list) {
+- if (rela->sym->type != STT_SECTION) {
+- WARN("unexpected relocation symbol type in .rela__unreachable");
+- return -1;
+- }
+- insn = find_insn(file, rela->sym->sec, rela->addend);
+- if (insn)
+- insn = list_prev_entry(insn, list);
+- else if (rela->addend == rela->sym->sec->len) {
+- found = false;
+- list_for_each_entry_reverse(insn, &file->insn_list, list) {
+- if (insn->sec == rela->sym->sec) {
+- found = true;
+- break;
+- }
+- }
+-
+- if (!found) {
+- WARN("can't find unreachable insn at %s+0x%x",
+- rela->sym->sec->name, rela->addend);
+- return -1;
+- }
+- } else {
+- WARN("can't find unreachable insn at %s+0x%x",
+- rela->sym->sec->name, rela->addend);
+- return -1;
+- }
+-
+- insn->dead_end = true;
+- }
+-
+- return 0;
+-}
+-
+-/*
+- * Warnings shouldn't be reported for ignored functions.
+- */
+-static void add_ignores(struct objtool_file *file)
+-{
+- struct instruction *insn;
+- struct section *sec;
+- struct symbol *func;
+-
+- list_for_each_entry(sec, &file->elf->sections, list) {
+- list_for_each_entry(func, &sec->symbol_list, list) {
+- if (func->type != STT_FUNC)
+- continue;
+-
+- if (!ignore_func(file, func))
+- continue;
+-
+- func_for_each_insn(file, func, insn)
+- insn->visited = true;
+- }
+- }
+-}
+-
+-/*
+- * FIXME: For now, just ignore any alternatives which add retpolines. This is
+- * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
+- * But it at least allows objtool to understand the control flow *around* the
+- * retpoline.
+- */
+-static int add_nospec_ignores(struct objtool_file *file)
+-{
+- struct section *sec;
+- struct rela *rela;
+- struct instruction *insn;
+-
+- sec = find_section_by_name(file->elf, ".rela.discard.nospec");
+- if (!sec)
+- return 0;
+-
+- list_for_each_entry(rela, &sec->rela_list, list) {
+- if (rela->sym->type != STT_SECTION) {
+- WARN("unexpected relocation symbol type in %s", sec->name);
+- return -1;
+- }
+-
+- insn = find_insn(file, rela->sym->sec, rela->addend);
+- if (!insn) {
+- WARN("bad .discard.nospec entry");
+- return -1;
+- }
+-
+- insn->ignore_alts = true;
+- }
+-
+- return 0;
+-}
+-
+-/*
+- * Find the destination instructions for all jumps.
+- */
+-static int add_jump_destinations(struct objtool_file *file)
+-{
+- struct instruction *insn;
+- struct rela *rela;
+- struct section *dest_sec;
+- unsigned long dest_off;
+-
+- for_each_insn(file, insn) {
+- if (insn->type != INSN_JUMP_CONDITIONAL &&
+- insn->type != INSN_JUMP_UNCONDITIONAL)
+- continue;
+-
+- /* skip ignores */
+- if (insn->visited)
+- continue;
+-
+- rela = find_rela_by_dest_range(insn->sec, insn->offset,
+- insn->len);
+- if (!rela) {
+- dest_sec = insn->sec;
+- dest_off = insn->offset + insn->len + insn->immediate;
+- } else if (rela->sym->type == STT_SECTION) {
+- dest_sec = rela->sym->sec;
+- dest_off = rela->addend + 4;
+- } else if (rela->sym->sec->idx) {
+- dest_sec = rela->sym->sec;
+- dest_off = rela->sym->sym.st_value + rela->addend + 4;
+- } else if (strstr(rela->sym->name, "_indirect_thunk_")) {
+- /*
+- * Retpoline jumps are really dynamic jumps in
+- * disguise, so convert them accordingly.
+- */
+- insn->type = INSN_JUMP_DYNAMIC;
+- continue;
+- } else {
+- /* sibling call */
+- insn->jump_dest = 0;
+- continue;
+- }
+-
+- insn->jump_dest = find_insn(file, dest_sec, dest_off);
+- if (!insn->jump_dest) {
+-
+- /*
+- * This is a special case where an alt instruction
+- * jumps past the end of the section. These are
+- * handled later in handle_group_alt().
+- */
+- if (!strcmp(insn->sec->name, ".altinstr_replacement"))
+- continue;
+-
+- WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
+- insn->sec, insn->offset, dest_sec->name,
+- dest_off);
+- return -1;
+- }
+- }
+-
+- return 0;
+-}
+-
+-/*
+- * Find the destination instructions for all calls.
+- */
+-static int add_call_destinations(struct objtool_file *file)
+-{
+- struct instruction *insn;
+- unsigned long dest_off;
+- struct rela *rela;
+-
+- for_each_insn(file, insn) {
+- if (insn->type != INSN_CALL)
+- continue;
+-
+- rela = find_rela_by_dest_range(insn->sec, insn->offset,
+- insn->len);
+- if (!rela) {
+- dest_off = insn->offset + insn->len + insn->immediate;
+- insn->call_dest = find_symbol_by_offset(insn->sec,
+- dest_off);
+- /*
+- * FIXME: Thanks to retpolines, it's now considered
+- * normal for a function to call within itself. So
+- * disable this warning for now.
+- */
+-#if 0
+- if (!insn->call_dest) {
+- WARN_FUNC("can't find call dest symbol at offset 0x%lx",
+- insn->sec, insn->offset, dest_off);
+- return -1;
+- }
+-#endif
+- } else if (rela->sym->type == STT_SECTION) {
+- insn->call_dest = find_symbol_by_offset(rela->sym->sec,
+- rela->addend+4);
+- if (!insn->call_dest ||
+- insn->call_dest->type != STT_FUNC) {
+- WARN_FUNC("can't find call dest symbol at %s+0x%x",
+- insn->sec, insn->offset,
+- rela->sym->sec->name,
+- rela->addend + 4);
+- return -1;
+- }
+- } else
+- insn->call_dest = rela->sym;
+- }
+-
+- return 0;
+-}
+-
+-/*
+- * The .alternatives section requires some extra special care, over and above
+- * what other special sections require:
+- *
+- * 1. Because alternatives are patched in-place, we need to insert a fake jump
+- * instruction at the end so that validate_branch() skips all the original
+- * replaced instructions when validating the new instruction path.
+- *
+- * 2. An added wrinkle is that the new instruction length might be zero. In
+- * that case the old instructions are replaced with noops. We simulate that
+- * by creating a fake jump as the only new instruction.
+- *
+- * 3. In some cases, the alternative section includes an instruction which
+- * conditionally jumps to the _end_ of the entry. We have to modify these
+- * jumps' destinations to point back to .text rather than the end of the
+- * entry in .altinstr_replacement.
+- *
+- * 4. It has been requested that we don't validate the !POPCNT feature path
+- * which is a "very very small percentage of machines".
+- */
+-static int handle_group_alt(struct objtool_file *file,
+- struct special_alt *special_alt,
+- struct instruction *orig_insn,
+- struct instruction **new_insn)
+-{
+- struct instruction *last_orig_insn, *last_new_insn, *insn, *fake_jump;
+- unsigned long dest_off;
+-
+- last_orig_insn = NULL;
+- insn = orig_insn;
+- sec_for_each_insn_from(file, insn) {
+- if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
+- break;
+-
+- if (special_alt->skip_orig)
+- insn->type = INSN_NOP;
+-
+- insn->alt_group = true;
+- last_orig_insn = insn;
+- }
+-
+- if (!next_insn_same_sec(file, last_orig_insn)) {
+- WARN("%s: don't know how to handle alternatives at end of section",
+- special_alt->orig_sec->name);
+- return -1;
+- }
+-
+- fake_jump = malloc(sizeof(*fake_jump));
+- if (!fake_jump) {
+- WARN("malloc failed");
+- return -1;
+- }
+- memset(fake_jump, 0, sizeof(*fake_jump));
+- INIT_LIST_HEAD(&fake_jump->alts);
+- fake_jump->sec = special_alt->new_sec;
+- fake_jump->offset = -1;
+- fake_jump->type = INSN_JUMP_UNCONDITIONAL;
+- fake_jump->jump_dest = list_next_entry(last_orig_insn, list);
+-
+- if (!special_alt->new_len) {
+- *new_insn = fake_jump;
+- return 0;
+- }
+-
+- last_new_insn = NULL;
+- insn = *new_insn;
+- sec_for_each_insn_from(file, insn) {
+- if (insn->offset >= special_alt->new_off + special_alt->new_len)
+- break;
+-
+- last_new_insn = insn;
+-
+- if (insn->type != INSN_JUMP_CONDITIONAL &&
+- insn->type != INSN_JUMP_UNCONDITIONAL)
+- continue;
+-
+- if (!insn->immediate)
+- continue;
+-
+- dest_off = insn->offset + insn->len + insn->immediate;
+- if (dest_off == special_alt->new_off + special_alt->new_len)
+- insn->jump_dest = fake_jump;
+-
+- if (!insn->jump_dest) {
+- WARN_FUNC("can't find alternative jump destination",
+- insn->sec, insn->offset);
+- return -1;
+- }
+- }
+-
+- if (!last_new_insn) {
+- WARN_FUNC("can't find last new alternative instruction",
+- special_alt->new_sec, special_alt->new_off);
+- return -1;
+- }
+-
+- list_add(&fake_jump->list, &last_new_insn->list);
+-
+- return 0;
+-}
+-
+-/*
+- * A jump table entry can either convert a nop to a jump or a jump to a nop.
+- * If the original instruction is a jump, make the alt entry an effective nop
+- * by just skipping the original instruction.
+- */
+-static int handle_jump_alt(struct objtool_file *file,
+- struct special_alt *special_alt,
+- struct instruction *orig_insn,
+- struct instruction **new_insn)
+-{
+- if (orig_insn->type == INSN_NOP)
+- return 0;
+-
+- if (orig_insn->type != INSN_JUMP_UNCONDITIONAL) {
+- WARN_FUNC("unsupported instruction at jump label",
+- orig_insn->sec, orig_insn->offset);
+- return -1;
+- }
+-
+- *new_insn = list_next_entry(orig_insn, list);
+- return 0;
+-}
+-
+-/*
+- * Read all the special sections which have alternate instructions which can be
+- * patched in or redirected to at runtime. Each instruction having alternate
+- * instruction(s) has them added to its insn->alts list, which will be
+- * traversed in validate_branch().
+- */
+-static int add_special_section_alts(struct objtool_file *file)
+-{
+- struct list_head special_alts;
+- struct instruction *orig_insn, *new_insn;
+- struct special_alt *special_alt, *tmp;
+- struct alternative *alt;
+- int ret;
+-
+- ret = special_get_alts(file->elf, &special_alts);
+- if (ret)
+- return ret;
+-
+- list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
+-
+- orig_insn = find_insn(file, special_alt->orig_sec,
+- special_alt->orig_off);
+- if (!orig_insn) {
+- WARN_FUNC("special: can't find orig instruction",
+- special_alt->orig_sec, special_alt->orig_off);
+- ret = -1;
+- goto out;
+- }
+-
+- /* Ignore retpoline alternatives. */
+- if (orig_insn->ignore_alts)
+- continue;
+-
+- new_insn = NULL;
+- if (!special_alt->group || special_alt->new_len) {
+- new_insn = find_insn(file, special_alt->new_sec,
+- special_alt->new_off);
+- if (!new_insn) {
+- WARN_FUNC("special: can't find new instruction",
+- special_alt->new_sec,
+- special_alt->new_off);
+- ret = -1;
+- goto out;
+- }
+- }
+-
+- if (special_alt->group) {
+- ret = handle_group_alt(file, special_alt, orig_insn,
+- &new_insn);
+- if (ret)
+- goto out;
+- } else if (special_alt->jump_or_nop) {
+- ret = handle_jump_alt(file, special_alt, orig_insn,
+- &new_insn);
+- if (ret)
+- goto out;
+- }
+-
+- alt = malloc(sizeof(*alt));
+- if (!alt) {
+- WARN("malloc failed");
+- ret = -1;
+- goto out;
+- }
+-
+- alt->insn = new_insn;
+- list_add_tail(&alt->list, &orig_insn->alts);
+-
+- list_del(&special_alt->list);
+- free(special_alt);
+- }
+-
+-out:
+- return ret;
+-}
+-
+-static int add_switch_table(struct objtool_file *file, struct symbol *func,
+- struct instruction *insn, struct rela *table,
+- struct rela *next_table)
+-{
+- struct rela *rela = table;
+- struct instruction *alt_insn;
+- struct alternative *alt;
+-
+- list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) {
+- if (rela == next_table)
+- break;
+-
+- if (rela->sym->sec != insn->sec ||
+- rela->addend <= func->offset ||
+- rela->addend >= func->offset + func->len)
+- break;
+-
+- alt_insn = find_insn(file, insn->sec, rela->addend);
+- if (!alt_insn) {
+- WARN("%s: can't find instruction at %s+0x%x",
+- file->rodata->rela->name, insn->sec->name,
+- rela->addend);
+- return -1;
+- }
+-
+- alt = malloc(sizeof(*alt));
+- if (!alt) {
+- WARN("malloc failed");
+- return -1;
+- }
+-
+- alt->insn = alt_insn;
+- list_add_tail(&alt->list, &insn->alts);
+- }
+-
+- return 0;
+-}
+-
+-/*
+- * find_switch_table() - Given a dynamic jump, find the switch jump table in
+- * .rodata associated with it.
+- *
+- * There are 3 basic patterns:
+- *
+- * 1. jmpq *[rodata addr](,%reg,8)
+- *
+- * This is the most common case by far. It jumps to an address in a simple
+- * jump table which is stored in .rodata.
+- *
+- * 2. jmpq *[rodata addr](%rip)
+- *
+- * This is caused by a rare GCC quirk, currently only seen in three driver
+- * functions in the kernel, only with certain obscure non-distro configs.
+- *
+- * As part of an optimization, GCC makes a copy of an existing switch jump
+- * table, modifies it, and then hard-codes the jump (albeit with an indirect
+- * jump) to use a single entry in the table. The rest of the jump table and
+- * some of its jump targets remain as dead code.
+- *
+- * In such a case we can just crudely ignore all unreachable instruction
+- * warnings for the entire object file. Ideally we would just ignore them
+- * for the function, but that would require redesigning the code quite a
+- * bit. And honestly that's just not worth doing: unreachable instruction
+- * warnings are of questionable value anyway, and this is such a rare issue.
+- *
+- * 3. mov [rodata addr],%reg1
+- * ... some instructions ...
+- * jmpq *(%reg1,%reg2,8)
+- *
+- * This is a fairly uncommon pattern which is new for GCC 6. As of this
+- * writing, there are 11 occurrences of it in the allmodconfig kernel.
+- *
+- * TODO: Once we have DWARF CFI and smarter instruction decoding logic,
+- * ensure the same register is used in the mov and jump instructions.
+- */
+-static struct rela *find_switch_table(struct objtool_file *file,
+- struct symbol *func,
+- struct instruction *insn)
+-{
+- struct rela *text_rela, *rodata_rela;
+- struct instruction *orig_insn = insn;
+-
+- text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len);
+- if (text_rela && text_rela->sym == file->rodata->sym) {
+- /* case 1 */
+- rodata_rela = find_rela_by_dest(file->rodata,
+- text_rela->addend);
+- if (rodata_rela)
+- return rodata_rela;
+-
+- /* case 2 */
+- rodata_rela = find_rela_by_dest(file->rodata,
+- text_rela->addend + 4);
+- if (!rodata_rela)
+- return NULL;
+- file->ignore_unreachables = true;
+- return rodata_rela;
+- }
+-
+- /* case 3 */
+- func_for_each_insn_continue_reverse(file, func, insn) {
+- if (insn->type == INSN_JUMP_DYNAMIC)
+- break;
+-
+- /* allow small jumps within the range */
+- if (insn->type == INSN_JUMP_UNCONDITIONAL &&
+- insn->jump_dest &&
+- (insn->jump_dest->offset <= insn->offset ||
+- insn->jump_dest->offset > orig_insn->offset))
+- break;
+-
+- text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
+- insn->len);
+- if (text_rela && text_rela->sym == file->rodata->sym)
+- return find_rela_by_dest(file->rodata,
+- text_rela->addend);
+- }
+-
+- return NULL;
+-}
+-
+-static int add_func_switch_tables(struct objtool_file *file,
+- struct symbol *func)
+-{
+- struct instruction *insn, *prev_jump = NULL;
+- struct rela *rela, *prev_rela = NULL;
+- int ret;
+-
+- func_for_each_insn(file, func, insn) {
+- if (insn->type != INSN_JUMP_DYNAMIC)
+- continue;
+-
+- rela = find_switch_table(file, func, insn);
+- if (!rela)
+- continue;
+-
+- /*
+- * We found a switch table, but we don't know yet how big it
+- * is. Don't add it until we reach the end of the function or
+- * the beginning of another switch table in the same function.
+- */
+- if (prev_jump) {
+- ret = add_switch_table(file, func, prev_jump, prev_rela,
+- rela);
+- if (ret)
+- return ret;
+- }
+-
+- prev_jump = insn;
+- prev_rela = rela;
+- }
+-
+- if (prev_jump) {
+- ret = add_switch_table(file, func, prev_jump, prev_rela, NULL);
+- if (ret)
+- return ret;
+- }
+-
+- return 0;
+-}
+-
+-/*
+- * For some switch statements, gcc generates a jump table in the .rodata
+- * section which contains a list of addresses within the function to jump to.
+- * This finds these jump tables and adds them to the insn->alts lists.
+- */
+-static int add_switch_table_alts(struct objtool_file *file)
+-{
+- struct section *sec;
+- struct symbol *func;
+- int ret;
+-
+- if (!file->rodata || !file->rodata->rela)
+- return 0;
+-
+- list_for_each_entry(sec, &file->elf->sections, list) {
+- list_for_each_entry(func, &sec->symbol_list, list) {
+- if (func->type != STT_FUNC)
+- continue;
+-
+- ret = add_func_switch_tables(file, func);
+- if (ret)
+- return ret;
+- }
+- }
+-
+- return 0;
+-}
+-
+-static int decode_sections(struct objtool_file *file)
+-{
+- int ret;
++#include "check.h"
+
+- ret = decode_instructions(file);
+- if (ret)
+- return ret;
++bool nofp;
+
+- ret = add_dead_ends(file);
+- if (ret)
+- return ret;
+-
+- add_ignores(file);
+-
+- ret = add_nospec_ignores(file);
+- if (ret)
+- return ret;
+-
+- ret = add_jump_destinations(file);
+- if (ret)
+- return ret;
+-
+- ret = add_call_destinations(file);
+- if (ret)
+- return ret;
+-
+- ret = add_special_section_alts(file);
+- if (ret)
+- return ret;
+-
+- ret = add_switch_table_alts(file);
+- if (ret)
+- return ret;
+-
+- return 0;
+-}
+-
+-static bool is_fentry_call(struct instruction *insn)
+-{
+- if (insn->type == INSN_CALL &&
+- insn->call_dest->type == STT_NOTYPE &&
+- !strcmp(insn->call_dest->name, "__fentry__"))
+- return true;
+-
+- return false;
+-}
+-
+-static bool has_modified_stack_frame(struct instruction *insn)
+-{
+- return (insn->state & STATE_FP_SAVED) ||
+- (insn->state & STATE_FP_SETUP);
+-}
+-
+-static bool has_valid_stack_frame(struct instruction *insn)
+-{
+- return (insn->state & STATE_FP_SAVED) &&
+- (insn->state & STATE_FP_SETUP);
+-}
+-
+-static unsigned int frame_state(unsigned long state)
+-{
+- return (state & (STATE_FP_SAVED | STATE_FP_SETUP));
+-}
+-
+-/*
+- * Follow the branch starting at the given instruction, and recursively follow
+- * any other branches (jumps). Meanwhile, track the frame pointer state at
+- * each instruction and validate all the rules described in
+- * tools/objtool/Documentation/stack-validation.txt.
+- */
+-static int validate_branch(struct objtool_file *file,
+- struct instruction *first, unsigned char first_state)
+-{
+- struct alternative *alt;
+- struct instruction *insn;
+- struct section *sec;
+- struct symbol *func = NULL;
+- unsigned char state;
+- int ret;
+-
+- insn = first;
+- sec = insn->sec;
+- state = first_state;
+-
+- if (insn->alt_group && list_empty(&insn->alts)) {
+- WARN_FUNC("don't know how to handle branch to middle of alternative instruction group",
+- sec, insn->offset);
+- return 1;
+- }
+-
+- while (1) {
+- if (file->c_file && insn->func) {
+- if (func && func != insn->func) {
+- WARN("%s() falls through to next function %s()",
+- func->name, insn->func->name);
+- return 1;
+- }
+-
+- func = insn->func;
+- }
+-
+- if (insn->visited) {
+- if (frame_state(insn->state) != frame_state(state)) {
+- WARN_FUNC("frame pointer state mismatch",
+- sec, insn->offset);
+- return 1;
+- }
+-
+- return 0;
+- }
+-
+- insn->visited = true;
+- insn->state = state;
+-
+- list_for_each_entry(alt, &insn->alts, list) {
+- ret = validate_branch(file, alt->insn, state);
+- if (ret)
+- return 1;
+- }
+-
+- switch (insn->type) {
+-
+- case INSN_FP_SAVE:
+- if (!nofp) {
+- if (state & STATE_FP_SAVED) {
+- WARN_FUNC("duplicate frame pointer save",
+- sec, insn->offset);
+- return 1;
+- }
+- state |= STATE_FP_SAVED;
+- }
+- break;
+-
+- case INSN_FP_SETUP:
+- if (!nofp) {
+- if (state & STATE_FP_SETUP) {
+- WARN_FUNC("duplicate frame pointer setup",
+- sec, insn->offset);
+- return 1;
+- }
+- state |= STATE_FP_SETUP;
+- }
+- break;
+-
+- case INSN_FP_RESTORE:
+- if (!nofp) {
+- if (has_valid_stack_frame(insn))
+- state &= ~STATE_FP_SETUP;
+-
+- state &= ~STATE_FP_SAVED;
+- }
+- break;
+-
+- case INSN_RETURN:
+- if (!nofp && has_modified_stack_frame(insn)) {
+- WARN_FUNC("return without frame pointer restore",
+- sec, insn->offset);
+- return 1;
+- }
+- return 0;
+-
+- case INSN_CALL:
+- if (is_fentry_call(insn)) {
+- state |= STATE_FENTRY;
+- break;
+- }
+-
+- ret = dead_end_function(file, insn->call_dest);
+- if (ret == 1)
+- return 0;
+- if (ret == -1)
+- return 1;
+-
+- /* fallthrough */
+- case INSN_CALL_DYNAMIC:
+- if (!nofp && !has_valid_stack_frame(insn)) {
+- WARN_FUNC("call without frame pointer save/setup",
+- sec, insn->offset);
+- return 1;
+- }
+- break;
+-
+- case INSN_JUMP_CONDITIONAL:
+- case INSN_JUMP_UNCONDITIONAL:
+- if (insn->jump_dest) {
+- ret = validate_branch(file, insn->jump_dest,
+- state);
+- if (ret)
+- return 1;
+- } else if (has_modified_stack_frame(insn)) {
+- WARN_FUNC("sibling call from callable instruction with changed frame pointer",
+- sec, insn->offset);
+- return 1;
+- } /* else it's a sibling call */
+-
+- if (insn->type == INSN_JUMP_UNCONDITIONAL)
+- return 0;
+-
+- break;
+-
+- case INSN_JUMP_DYNAMIC:
+- if (list_empty(&insn->alts) &&
+- has_modified_stack_frame(insn)) {
+- WARN_FUNC("sibling call from callable instruction with changed frame pointer",
+- sec, insn->offset);
+- return 1;
+- }
+-
+- return 0;
+-
+- default:
+- break;
+- }
+-
+- if (insn->dead_end)
+- return 0;
+-
+- insn = next_insn_same_sec(file, insn);
+- if (!insn) {
+- WARN("%s: unexpected end of section", sec->name);
+- return 1;
+- }
+- }
+-
+- return 0;
+-}
+-
+-static bool is_kasan_insn(struct instruction *insn)
+-{
+- return (insn->type == INSN_CALL &&
+- !strcmp(insn->call_dest->name, "__asan_handle_no_return"));
+-}
+-
+-static bool is_ubsan_insn(struct instruction *insn)
+-{
+- return (insn->type == INSN_CALL &&
+- !strcmp(insn->call_dest->name,
+- "__ubsan_handle_builtin_unreachable"));
+-}
+-
+-static bool ignore_unreachable_insn(struct symbol *func,
+- struct instruction *insn)
+-{
+- int i;
+-
+- if (insn->type == INSN_NOP)
+- return true;
+-
+- /*
+- * Check if this (or a subsequent) instruction is related to
+- * CONFIG_UBSAN or CONFIG_KASAN.
+- *
+- * End the search at 5 instructions to avoid going into the weeds.
+- */
+- for (i = 0; i < 5; i++) {
+-
+- if (is_kasan_insn(insn) || is_ubsan_insn(insn))
+- return true;
+-
+- if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest) {
+- insn = insn->jump_dest;
+- continue;
+- }
+-
+- if (insn->offset + insn->len >= func->offset + func->len)
+- break;
+- insn = list_next_entry(insn, list);
+- }
+-
+- return false;
+-}
+-
+-static int validate_functions(struct objtool_file *file)
+-{
+- struct section *sec;
+- struct symbol *func;
+- struct instruction *insn;
+- int ret, warnings = 0;
+-
+- list_for_each_entry(sec, &file->elf->sections, list) {
+- list_for_each_entry(func, &sec->symbol_list, list) {
+- if (func->type != STT_FUNC)
+- continue;
+-
+- insn = find_insn(file, sec, func->offset);
+- if (!insn)
+- continue;
+-
+- ret = validate_branch(file, insn, 0);
+- warnings += ret;
+- }
+- }
+-
+- list_for_each_entry(sec, &file->elf->sections, list) {
+- list_for_each_entry(func, &sec->symbol_list, list) {
+- if (func->type != STT_FUNC)
+- continue;
+-
+- func_for_each_insn(file, func, insn) {
+- if (insn->visited)
+- continue;
+-
+- insn->visited = true;
+-
+- if (file->ignore_unreachables || warnings ||
+- ignore_unreachable_insn(func, insn))
+- continue;
+-
+- /*
+- * gcov produces a lot of unreachable
+- * instructions. If we get an unreachable
+- * warning and the file has gcov enabled, just
+- * ignore it, and all other such warnings for
+- * the file.
+- */
+- if (!file->ignore_unreachables &&
+- gcov_enabled(file)) {
+- file->ignore_unreachables = true;
+- continue;
+- }
+-
+- WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset);
+- warnings++;
+- }
+- }
+- }
+-
+- return warnings;
+-}
+-
+-static int validate_uncallable_instructions(struct objtool_file *file)
+-{
+- struct instruction *insn;
+- int warnings = 0;
+-
+- for_each_insn(file, insn) {
+- if (!insn->visited && insn->type == INSN_RETURN) {
+-
+- /*
+- * Don't warn about call instructions in unvisited
+- * retpoline alternatives.
+- */
+- if (!strcmp(insn->sec->name, ".altinstr_replacement"))
+- continue;
+-
+- WARN_FUNC("return instruction outside of a callable function",
+- insn->sec, insn->offset);
+- warnings++;
+- }
+- }
+-
+- return warnings;
+-}
+-
+-static void cleanup(struct objtool_file *file)
+-{
+- struct instruction *insn, *tmpinsn;
+- struct alternative *alt, *tmpalt;
+-
+- list_for_each_entry_safe(insn, tmpinsn, &file->insn_list, list) {
+- list_for_each_entry_safe(alt, tmpalt, &insn->alts, list) {
+- list_del(&alt->list);
+- free(alt);
+- }
+- list_del(&insn->list);
+- hash_del(&insn->hash);
+- free(insn);
+- }
+- elf_close(file->elf);
+-}
+-
+-const char * const check_usage[] = {
++static const char * const check_usage[] = {
+ "objtool check [<options>] file.o",
+ NULL,
+ };
+
++const struct option check_options[] = {
++ OPT_BOOLEAN('f', "no-fp", &nofp, "Skip frame pointer validation"),
++ OPT_END(),
++};
++
+ int cmd_check(int argc, const char **argv)
+ {
+- struct objtool_file file;
+- int ret, warnings = 0;
++ const char *objname;
+
+- const struct option options[] = {
+- OPT_BOOLEAN('f', "no-fp", &nofp, "Skip frame pointer validation"),
+- OPT_END(),
+- };
+-
+- argc = parse_options(argc, argv, options, check_usage, 0);
++ argc = parse_options(argc, argv, check_options, check_usage, 0);
+
+ if (argc != 1)
+- usage_with_options(check_usage, options);
++ usage_with_options(check_usage, check_options);
+
+ objname = argv[0];
+
+- file.elf = elf_open(objname);
+- if (!file.elf) {
+- fprintf(stderr, "error reading elf file %s\n", objname);
+- return 1;
+- }
+-
+- INIT_LIST_HEAD(&file.insn_list);
+- hash_init(file.insn_hash);
+- file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard");
+- file.rodata = find_section_by_name(file.elf, ".rodata");
+- file.ignore_unreachables = false;
+- file.c_file = find_section_by_name(file.elf, ".comment");
+-
+- ret = decode_sections(&file);
+- if (ret < 0)
+- goto out;
+- warnings += ret;
+-
+- ret = validate_functions(&file);
+- if (ret < 0)
+- goto out;
+- warnings += ret;
+-
+- ret = validate_uncallable_instructions(&file);
+- if (ret < 0)
+- goto out;
+- warnings += ret;
+-
+-out:
+- cleanup(&file);
+-
+- /* ignore warnings for now until we get all the code cleaned up */
+- if (ret || warnings)
+- return 0;
+- return 0;
++ return check(objname, nofp);
+ }
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+new file mode 100644
+index 0000000..b7a0af5
+--- /dev/null
++++ b/tools/objtool/check.c
+@@ -0,0 +1,1327 @@
++/*
++ * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2
++ * of the License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <string.h>
++#include <stdlib.h>
++
++#include "check.h"
++#include "elf.h"
++#include "special.h"
++#include "arch.h"
++#include "warn.h"
++
++#include <linux/hashtable.h>
++
++#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
++
++#define STATE_FP_SAVED 0x1
++#define STATE_FP_SETUP 0x2
++#define STATE_FENTRY 0x4
++
++struct alternative {
++ struct list_head list;
++ struct instruction *insn;
++};
++
++const char *objname;
++static bool nofp;
++
++static struct instruction *find_insn(struct objtool_file *file,
++ struct section *sec, unsigned long offset)
++{
++ struct instruction *insn;
++
++ hash_for_each_possible(file->insn_hash, insn, hash, offset)
++ if (insn->sec == sec && insn->offset == offset)
++ return insn;
++
++ return NULL;
++}
++
++static struct instruction *next_insn_same_sec(struct objtool_file *file,
++ struct instruction *insn)
++{
++ struct instruction *next = list_next_entry(insn, list);
++
++ if (&next->list == &file->insn_list || next->sec != insn->sec)
++ return NULL;
++
++ return next;
++}
++
++static bool gcov_enabled(struct objtool_file *file)
++{
++ struct section *sec;
++ struct symbol *sym;
++
++ list_for_each_entry(sec, &file->elf->sections, list)
++ list_for_each_entry(sym, &sec->symbol_list, list)
++ if (!strncmp(sym->name, "__gcov_.", 8))
++ return true;
++
++ return false;
++}
++
++#define for_each_insn(file, insn) \
++ list_for_each_entry(insn, &file->insn_list, list)
++
++#define func_for_each_insn(file, func, insn) \
++ for (insn = find_insn(file, func->sec, func->offset); \
++ insn && &insn->list != &file->insn_list && \
++ insn->sec == func->sec && \
++ insn->offset < func->offset + func->len; \
++ insn = list_next_entry(insn, list))
++
++#define func_for_each_insn_continue_reverse(file, func, insn) \
++ for (insn = list_prev_entry(insn, list); \
++ &insn->list != &file->insn_list && \
++ insn->sec == func->sec && insn->offset >= func->offset; \
++ insn = list_prev_entry(insn, list))
++
++#define sec_for_each_insn_from(file, insn) \
++ for (; insn; insn = next_insn_same_sec(file, insn))
++
++
++/*
++ * Check if the function has been manually whitelisted with the
++ * STACK_FRAME_NON_STANDARD macro, or if it should be automatically whitelisted
++ * due to its use of a context switching instruction.
++ */
++static bool ignore_func(struct objtool_file *file, struct symbol *func)
++{
++ struct rela *rela;
++ struct instruction *insn;
++
++ /* check for STACK_FRAME_NON_STANDARD */
++ if (file->whitelist && file->whitelist->rela)
++ list_for_each_entry(rela, &file->whitelist->rela->rela_list, list) {
++ if (rela->sym->type == STT_SECTION &&
++ rela->sym->sec == func->sec &&
++ rela->addend == func->offset)
++ return true;
++ if (rela->sym->type == STT_FUNC && rela->sym == func)
++ return true;
++ }
++
++ /* check if it has a context switching instruction */
++ func_for_each_insn(file, func, insn)
++ if (insn->type == INSN_CONTEXT_SWITCH)
++ return true;
++
++ return false;
++}
++
++/*
++ * This checks to see if the given function is a "noreturn" function.
++ *
++ * For global functions which are outside the scope of this object file, we
++ * have to keep a manual list of them.
++ *
++ * For local functions, we have to detect them manually by simply looking for
++ * the lack of a return instruction.
++ *
++ * Returns:
++ * -1: error
++ * 0: no dead end
++ * 1: dead end
++ */
++static int __dead_end_function(struct objtool_file *file, struct symbol *func,
++ int recursion)
++{
++ int i;
++ struct instruction *insn;
++ bool empty = true;
++
++ /*
++ * Unfortunately these have to be hard coded because the noreturn
++ * attribute isn't provided in ELF data.
++ */
++ static const char * const global_noreturns[] = {
++ "__stack_chk_fail",
++ "panic",
++ "do_exit",
++ "do_task_dead",
++ "__module_put_and_exit",
++ "complete_and_exit",
++ "kvm_spurious_fault",
++ "__reiserfs_panic",
++ "lbug_with_loc"
++ };
++
++ if (func->bind == STB_WEAK)
++ return 0;
++
++ if (func->bind == STB_GLOBAL)
++ for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
++ if (!strcmp(func->name, global_noreturns[i]))
++ return 1;
++
++ if (!func->sec)
++ return 0;
++
++ func_for_each_insn(file, func, insn) {
++ empty = false;
++
++ if (insn->type == INSN_RETURN)
++ return 0;
++ }
++
++ if (empty)
++ return 0;
++
++ /*
++ * A function can have a sibling call instead of a return. In that
++ * case, the function's dead-end status depends on whether the target
++ * of the sibling call returns.
++ */
++ func_for_each_insn(file, func, insn) {
++ if (insn->sec != func->sec ||
++ insn->offset >= func->offset + func->len)
++ break;
++
++ if (insn->type == INSN_JUMP_UNCONDITIONAL) {
++ struct instruction *dest = insn->jump_dest;
++ struct symbol *dest_func;
++
++ if (!dest)
++ /* sibling call to another file */
++ return 0;
++
++ if (dest->sec != func->sec ||
++ dest->offset < func->offset ||
++ dest->offset >= func->offset + func->len) {
++ /* local sibling call */
++ dest_func = find_symbol_by_offset(dest->sec,
++ dest->offset);
++ if (!dest_func)
++ continue;
++
++ if (recursion == 5) {
++ WARN_FUNC("infinite recursion (objtool bug!)",
++ dest->sec, dest->offset);
++ return -1;
++ }
++
++ return __dead_end_function(file, dest_func,
++ recursion + 1);
++ }
++ }
++
++ if (insn->type == INSN_JUMP_DYNAMIC && list_empty(&insn->alts))
++ /* sibling call */
++ return 0;
++ }
++
++ return 1;
++}
++
++static int dead_end_function(struct objtool_file *file, struct symbol *func)
++{
++ return __dead_end_function(file, func, 0);
++}
++
++/*
++ * Call the arch-specific instruction decoder for all the instructions and add
++ * them to the global instruction list.
++ */
++static int decode_instructions(struct objtool_file *file)
++{
++ struct section *sec;
++ struct symbol *func;
++ unsigned long offset;
++ struct instruction *insn;
++ int ret;
++
++ list_for_each_entry(sec, &file->elf->sections, list) {
++
++ if (!(sec->sh.sh_flags & SHF_EXECINSTR))
++ continue;
++
++ for (offset = 0; offset < sec->len; offset += insn->len) {
++ insn = malloc(sizeof(*insn));
++ memset(insn, 0, sizeof(*insn));
++
++ INIT_LIST_HEAD(&insn->alts);
++ insn->sec = sec;
++ insn->offset = offset;
++
++ ret = arch_decode_instruction(file->elf, sec, offset,
++ sec->len - offset,
++ &insn->len, &insn->type,
++ &insn->immediate);
++ if (ret)
++ return ret;
++
++ if (!insn->type || insn->type > INSN_LAST) {
++ WARN_FUNC("invalid instruction type %d",
++ insn->sec, insn->offset, insn->type);
++ return -1;
++ }
++
++ hash_add(file->insn_hash, &insn->hash, insn->offset);
++ list_add_tail(&insn->list, &file->insn_list);
++ }
++
++ list_for_each_entry(func, &sec->symbol_list, list) {
++ if (func->type != STT_FUNC)
++ continue;
++
++ if (!find_insn(file, sec, func->offset)) {
++ WARN("%s(): can't find starting instruction",
++ func->name);
++ return -1;
++ }
++
++ func_for_each_insn(file, func, insn)
++ if (!insn->func)
++ insn->func = func;
++ }
++ }
++
++ return 0;
++}
++
++/*
++ * Find all uses of the unreachable() macro, which are code path dead ends.
++ */
++static int add_dead_ends(struct objtool_file *file)
++{
++ struct section *sec;
++ struct rela *rela;
++ struct instruction *insn;
++ bool found;
++
++ sec = find_section_by_name(file->elf, ".rela__unreachable");
++ if (!sec)
++ return 0;
++
++ list_for_each_entry(rela, &sec->rela_list, list) {
++ if (rela->sym->type != STT_SECTION) {
++ WARN("unexpected relocation symbol type in .rela__unreachable");
++ return -1;
++ }
++ insn = find_insn(file, rela->sym->sec, rela->addend);
++ if (insn)
++ insn = list_prev_entry(insn, list);
++ else if (rela->addend == rela->sym->sec->len) {
++ found = false;
++ list_for_each_entry_reverse(insn, &file->insn_list, list) {
++ if (insn->sec == rela->sym->sec) {
++ found = true;
++ break;
++ }
++ }
++
++ if (!found) {
++ WARN("can't find unreachable insn at %s+0x%x",
++ rela->sym->sec->name, rela->addend);
++ return -1;
++ }
++ } else {
++ WARN("can't find unreachable insn at %s+0x%x",
++ rela->sym->sec->name, rela->addend);
++ return -1;
++ }
++
++ insn->dead_end = true;
++ }
++
++ return 0;
++}
++
++/*
++ * Warnings shouldn't be reported for ignored functions.
++ */
++static void add_ignores(struct objtool_file *file)
++{
++ struct instruction *insn;
++ struct section *sec;
++ struct symbol *func;
++
++ list_for_each_entry(sec, &file->elf->sections, list) {
++ list_for_each_entry(func, &sec->symbol_list, list) {
++ if (func->type != STT_FUNC)
++ continue;
++
++ if (!ignore_func(file, func))
++ continue;
++
++ func_for_each_insn(file, func, insn)
++ insn->visited = true;
++ }
++ }
++}
++
++/*
++ * FIXME: For now, just ignore any alternatives which add retpolines. This is
++ * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
++ * But it at least allows objtool to understand the control flow *around* the
++ * retpoline.
++ */
++static int add_nospec_ignores(struct objtool_file *file)
++{
++ struct section *sec;
++ struct rela *rela;
++ struct instruction *insn;
++
++ sec = find_section_by_name(file->elf, ".rela.discard.nospec");
++ if (!sec)
++ return 0;
++
++ list_for_each_entry(rela, &sec->rela_list, list) {
++ if (rela->sym->type != STT_SECTION) {
++ WARN("unexpected relocation symbol type in %s", sec->name);
++ return -1;
++ }
++
++ insn = find_insn(file, rela->sym->sec, rela->addend);
++ if (!insn) {
++ WARN("bad .discard.nospec entry");
++ return -1;
++ }
++
++ insn->ignore_alts = true;
++ }
++
++ return 0;
++}
++
++/*
++ * Find the destination instructions for all jumps.
++ */
++static int add_jump_destinations(struct objtool_file *file)
++{
++ struct instruction *insn;
++ struct rela *rela;
++ struct section *dest_sec;
++ unsigned long dest_off;
++
++ for_each_insn(file, insn) {
++ if (insn->type != INSN_JUMP_CONDITIONAL &&
++ insn->type != INSN_JUMP_UNCONDITIONAL)
++ continue;
++
++ /* skip ignores */
++ if (insn->visited)
++ continue;
++
++ rela = find_rela_by_dest_range(insn->sec, insn->offset,
++ insn->len);
++ if (!rela) {
++ dest_sec = insn->sec;
++ dest_off = insn->offset + insn->len + insn->immediate;
++ } else if (rela->sym->type == STT_SECTION) {
++ dest_sec = rela->sym->sec;
++ dest_off = rela->addend + 4;
++ } else if (rela->sym->sec->idx) {
++ dest_sec = rela->sym->sec;
++ dest_off = rela->sym->sym.st_value + rela->addend + 4;
++ } else if (strstr(rela->sym->name, "_indirect_thunk_")) {
++ /*
++ * Retpoline jumps are really dynamic jumps in
++ * disguise, so convert them accordingly.
++ */
++ insn->type = INSN_JUMP_DYNAMIC;
++ continue;
++ } else {
++ /* sibling call */
++ insn->jump_dest = 0;
++ continue;
++ }
++
++ insn->jump_dest = find_insn(file, dest_sec, dest_off);
++ if (!insn->jump_dest) {
++
++ /*
++ * This is a special case where an alt instruction
++ * jumps past the end of the section. These are
++ * handled later in handle_group_alt().
++ */
++ if (!strcmp(insn->sec->name, ".altinstr_replacement"))
++ continue;
++
++ WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
++ insn->sec, insn->offset, dest_sec->name,
++ dest_off);
++ return -1;
++ }
++ }
++
++ return 0;
++}
++
++/*
++ * Find the destination instructions for all calls.
++ */
++static int add_call_destinations(struct objtool_file *file)
++{
++ struct instruction *insn;
++ unsigned long dest_off;
++ struct rela *rela;
++
++ for_each_insn(file, insn) {
++ if (insn->type != INSN_CALL)
++ continue;
++
++ rela = find_rela_by_dest_range(insn->sec, insn->offset,
++ insn->len);
++ if (!rela) {
++ dest_off = insn->offset + insn->len + insn->immediate;
++ insn->call_dest = find_symbol_by_offset(insn->sec,
++ dest_off);
++ /*
++ * FIXME: Thanks to retpolines, it's now considered
++ * normal for a function to call within itself. So
++ * disable this warning for now.
++ */
++#if 0
++ if (!insn->call_dest) {
++ WARN_FUNC("can't find call dest symbol at offset 0x%lx",
++ insn->sec, insn->offset, dest_off);
++ return -1;
++ }
++#endif
++ } else if (rela->sym->type == STT_SECTION) {
++ insn->call_dest = find_symbol_by_offset(rela->sym->sec,
++ rela->addend+4);
++ if (!insn->call_dest ||
++ insn->call_dest->type != STT_FUNC) {
++ WARN_FUNC("can't find call dest symbol at %s+0x%x",
++ insn->sec, insn->offset,
++ rela->sym->sec->name,
++ rela->addend + 4);
++ return -1;
++ }
++ } else
++ insn->call_dest = rela->sym;
++ }
++
++ return 0;
++}
++
++/*
++ * The .alternatives section requires some extra special care, over and above
++ * what other special sections require:
++ *
++ * 1. Because alternatives are patched in-place, we need to insert a fake jump
++ * instruction at the end so that validate_branch() skips all the original
++ * replaced instructions when validating the new instruction path.
++ *
++ * 2. An added wrinkle is that the new instruction length might be zero. In
++ * that case the old instructions are replaced with noops. We simulate that
++ * by creating a fake jump as the only new instruction.
++ *
++ * 3. In some cases, the alternative section includes an instruction which
++ * conditionally jumps to the _end_ of the entry. We have to modify these
++ * jumps' destinations to point back to .text rather than the end of the
++ * entry in .altinstr_replacement.
++ *
++ * 4. It has been requested that we don't validate the !POPCNT feature path
++ * which is a "very very small percentage of machines".
++ */
++static int handle_group_alt(struct objtool_file *file,
++ struct special_alt *special_alt,
++ struct instruction *orig_insn,
++ struct instruction **new_insn)
++{
++ struct instruction *last_orig_insn, *last_new_insn, *insn, *fake_jump;
++ unsigned long dest_off;
++
++ last_orig_insn = NULL;
++ insn = orig_insn;
++ sec_for_each_insn_from(file, insn) {
++ if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
++ break;
++
++ if (special_alt->skip_orig)
++ insn->type = INSN_NOP;
++
++ insn->alt_group = true;
++ last_orig_insn = insn;
++ }
++
++ if (!next_insn_same_sec(file, last_orig_insn)) {
++ WARN("%s: don't know how to handle alternatives at end of section",
++ special_alt->orig_sec->name);
++ return -1;
++ }
++
++ fake_jump = malloc(sizeof(*fake_jump));
++ if (!fake_jump) {
++ WARN("malloc failed");
++ return -1;
++ }
++ memset(fake_jump, 0, sizeof(*fake_jump));
++ INIT_LIST_HEAD(&fake_jump->alts);
++ fake_jump->sec = special_alt->new_sec;
++ fake_jump->offset = -1;
++ fake_jump->type = INSN_JUMP_UNCONDITIONAL;
++ fake_jump->jump_dest = list_next_entry(last_orig_insn, list);
++
++ if (!special_alt->new_len) {
++ *new_insn = fake_jump;
++ return 0;
++ }
++
++ last_new_insn = NULL;
++ insn = *new_insn;
++ sec_for_each_insn_from(file, insn) {
++ if (insn->offset >= special_alt->new_off + special_alt->new_len)
++ break;
++
++ last_new_insn = insn;
++
++ if (insn->type != INSN_JUMP_CONDITIONAL &&
++ insn->type != INSN_JUMP_UNCONDITIONAL)
++ continue;
++
++ if (!insn->immediate)
++ continue;
++
++ dest_off = insn->offset + insn->len + insn->immediate;
++ if (dest_off == special_alt->new_off + special_alt->new_len)
++ insn->jump_dest = fake_jump;
++
++ if (!insn->jump_dest) {
++ WARN_FUNC("can't find alternative jump destination",
++ insn->sec, insn->offset);
++ return -1;
++ }
++ }
++
++ if (!last_new_insn) {
++ WARN_FUNC("can't find last new alternative instruction",
++ special_alt->new_sec, special_alt->new_off);
++ return -1;
++ }
++
++ list_add(&fake_jump->list, &last_new_insn->list);
++
++ return 0;
++}
++
++/*
++ * A jump table entry can either convert a nop to a jump or a jump to a nop.
++ * If the original instruction is a jump, make the alt entry an effective nop
++ * by just skipping the original instruction.
++ */
++static int handle_jump_alt(struct objtool_file *file,
++ struct special_alt *special_alt,
++ struct instruction *orig_insn,
++ struct instruction **new_insn)
++{
++ if (orig_insn->type == INSN_NOP)
++ return 0;
++
++ if (orig_insn->type != INSN_JUMP_UNCONDITIONAL) {
++ WARN_FUNC("unsupported instruction at jump label",
++ orig_insn->sec, orig_insn->offset);
++ return -1;
++ }
++
++ *new_insn = list_next_entry(orig_insn, list);
++ return 0;
++}
++
++/*
++ * Read all the special sections which have alternate instructions which can be
++ * patched in or redirected to at runtime. Each instruction having alternate
++ * instruction(s) has them added to its insn->alts list, which will be
++ * traversed in validate_branch().
++ */
++static int add_special_section_alts(struct objtool_file *file)
++{
++ struct list_head special_alts;
++ struct instruction *orig_insn, *new_insn;
++ struct special_alt *special_alt, *tmp;
++ struct alternative *alt;
++ int ret;
++
++ ret = special_get_alts(file->elf, &special_alts);
++ if (ret)
++ return ret;
++
++ list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
++ orig_insn = find_insn(file, special_alt->orig_sec,
++ special_alt->orig_off);
++ if (!orig_insn) {
++ WARN_FUNC("special: can't find orig instruction",
++ special_alt->orig_sec, special_alt->orig_off);
++ ret = -1;
++ goto out;
++ }
++
++ /* Ignore retpoline alternatives. */
++ if (orig_insn->ignore_alts)
++ continue;
++
++ new_insn = NULL;
++ if (!special_alt->group || special_alt->new_len) {
++ new_insn = find_insn(file, special_alt->new_sec,
++ special_alt->new_off);
++ if (!new_insn) {
++ WARN_FUNC("special: can't find new instruction",
++ special_alt->new_sec,
++ special_alt->new_off);
++ ret = -1;
++ goto out;
++ }
++ }
++
++ if (special_alt->group) {
++ ret = handle_group_alt(file, special_alt, orig_insn,
++ &new_insn);
++ if (ret)
++ goto out;
++ } else if (special_alt->jump_or_nop) {
++ ret = handle_jump_alt(file, special_alt, orig_insn,
++ &new_insn);
++ if (ret)
++ goto out;
++ }
++
++ alt = malloc(sizeof(*alt));
++ if (!alt) {
++ WARN("malloc failed");
++ ret = -1;
++ goto out;
++ }
++
++ alt->insn = new_insn;
++ list_add_tail(&alt->list, &orig_insn->alts);
++
++ list_del(&special_alt->list);
++ free(special_alt);
++ }
++
++out:
++ return ret;
++}
++
++static int add_switch_table(struct objtool_file *file, struct symbol *func,
++ struct instruction *insn, struct rela *table,
++ struct rela *next_table)
++{
++ struct rela *rela = table;
++ struct instruction *alt_insn;
++ struct alternative *alt;
++
++ list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) {
++ if (rela == next_table)
++ break;
++
++ if (rela->sym->sec != insn->sec ||
++ rela->addend <= func->offset ||
++ rela->addend >= func->offset + func->len)
++ break;
++
++ alt_insn = find_insn(file, insn->sec, rela->addend);
++ if (!alt_insn) {
++ WARN("%s: can't find instruction at %s+0x%x",
++ file->rodata->rela->name, insn->sec->name,
++ rela->addend);
++ return -1;
++ }
++
++ alt = malloc(sizeof(*alt));
++ if (!alt) {
++ WARN("malloc failed");
++ return -1;
++ }
++
++ alt->insn = alt_insn;
++ list_add_tail(&alt->list, &insn->alts);
++ }
++
++ return 0;
++}
++
++/*
++ * find_switch_table() - Given a dynamic jump, find the switch jump table in
++ * .rodata associated with it.
++ *
++ * There are 3 basic patterns:
++ *
++ * 1. jmpq *[rodata addr](,%reg,8)
++ *
++ * This is the most common case by far. It jumps to an address in a simple
++ * jump table which is stored in .rodata.
++ *
++ * 2. jmpq *[rodata addr](%rip)
++ *
++ * This is caused by a rare GCC quirk, currently only seen in three driver
++ * functions in the kernel, only with certain obscure non-distro configs.
++ *
++ * As part of an optimization, GCC makes a copy of an existing switch jump
++ * table, modifies it, and then hard-codes the jump (albeit with an indirect
++ * jump) to use a single entry in the table. The rest of the jump table and
++ * some of its jump targets remain as dead code.
++ *
++ * In such a case we can just crudely ignore all unreachable instruction
++ * warnings for the entire object file. Ideally we would just ignore them
++ * for the function, but that would require redesigning the code quite a
++ * bit. And honestly that's just not worth doing: unreachable instruction
++ * warnings are of questionable value anyway, and this is such a rare issue.
++ *
++ * 3. mov [rodata addr],%reg1
++ * ... some instructions ...
++ * jmpq *(%reg1,%reg2,8)
++ *
++ * This is a fairly uncommon pattern which is new for GCC 6. As of this
++ * writing, there are 11 occurrences of it in the allmodconfig kernel.
++ *
++ * TODO: Once we have DWARF CFI and smarter instruction decoding logic,
++ * ensure the same register is used in the mov and jump instructions.
++ */
++static struct rela *find_switch_table(struct objtool_file *file,
++ struct symbol *func,
++ struct instruction *insn)
++{
++ struct rela *text_rela, *rodata_rela;
++ struct instruction *orig_insn = insn;
++
++ text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len);
++ if (text_rela && text_rela->sym == file->rodata->sym) {
++ /* case 1 */
++ rodata_rela = find_rela_by_dest(file->rodata,
++ text_rela->addend);
++ if (rodata_rela)
++ return rodata_rela;
++
++ /* case 2 */
++ rodata_rela = find_rela_by_dest(file->rodata,
++ text_rela->addend + 4);
++ if (!rodata_rela)
++ return NULL;
++ file->ignore_unreachables = true;
++ return rodata_rela;
++ }
++
++ /* case 3 */
++ func_for_each_insn_continue_reverse(file, func, insn) {
++ if (insn->type == INSN_JUMP_DYNAMIC)
++ break;
++
++ /* allow small jumps within the range */
++ if (insn->type == INSN_JUMP_UNCONDITIONAL &&
++ insn->jump_dest &&
++ (insn->jump_dest->offset <= insn->offset ||
++ insn->jump_dest->offset > orig_insn->offset))
++ break;
++
++ /* look for a relocation which references .rodata */
++ text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
++ insn->len);
++ if (!text_rela || text_rela->sym != file->rodata->sym)
++ continue;
++
++ /*
++ * Make sure the .rodata address isn't associated with a
++ * symbol. gcc jump tables are anonymous data.
++ */
++ if (find_symbol_containing(file->rodata, text_rela->addend))
++ continue;
++
++ return find_rela_by_dest(file->rodata, text_rela->addend);
++ }
++
++ return NULL;
++}
++
++static int add_func_switch_tables(struct objtool_file *file,
++ struct symbol *func)
++{
++ struct instruction *insn, *prev_jump = NULL;
++ struct rela *rela, *prev_rela = NULL;
++ int ret;
++
++ func_for_each_insn(file, func, insn) {
++ if (insn->type != INSN_JUMP_DYNAMIC)
++ continue;
++
++ rela = find_switch_table(file, func, insn);
++ if (!rela)
++ continue;
++
++ /*
++ * We found a switch table, but we don't know yet how big it
++ * is. Don't add it until we reach the end of the function or
++ * the beginning of another switch table in the same function.
++ */
++ if (prev_jump) {
++ ret = add_switch_table(file, func, prev_jump, prev_rela,
++ rela);
++ if (ret)
++ return ret;
++ }
++
++ prev_jump = insn;
++ prev_rela = rela;
++ }
++
++ if (prev_jump) {
++ ret = add_switch_table(file, func, prev_jump, prev_rela, NULL);
++ if (ret)
++ return ret;
++ }
++
++ return 0;
++}
++
++/*
++ * For some switch statements, gcc generates a jump table in the .rodata
++ * section which contains a list of addresses within the function to jump to.
++ * This finds these jump tables and adds them to the insn->alts lists.
++ */
++static int add_switch_table_alts(struct objtool_file *file)
++{
++ struct section *sec;
++ struct symbol *func;
++ int ret;
++
++ if (!file->rodata || !file->rodata->rela)
++ return 0;
++
++ list_for_each_entry(sec, &file->elf->sections, list) {
++ list_for_each_entry(func, &sec->symbol_list, list) {
++ if (func->type != STT_FUNC)
++ continue;
++
++ ret = add_func_switch_tables(file, func);
++ if (ret)
++ return ret;
++ }
++ }
++
++ return 0;
++}
++
++static int decode_sections(struct objtool_file *file)
++{
++ int ret;
++
++ ret = decode_instructions(file);
++ if (ret)
++ return ret;
++
++ ret = add_dead_ends(file);
++ if (ret)
++ return ret;
++
++ add_ignores(file);
++
++ ret = add_nospec_ignores(file);
++ if (ret)
++ return ret;
++
++ ret = add_jump_destinations(file);
++ if (ret)
++ return ret;
++
++ ret = add_call_destinations(file);
++ if (ret)
++ return ret;
++
++ ret = add_special_section_alts(file);
++ if (ret)
++ return ret;
++
++ ret = add_switch_table_alts(file);
++ if (ret)
++ return ret;
++
++ return 0;
++}
++
++static bool is_fentry_call(struct instruction *insn)
++{
++ if (insn->type == INSN_CALL &&
++ insn->call_dest->type == STT_NOTYPE &&
++ !strcmp(insn->call_dest->name, "__fentry__"))
++ return true;
++
++ return false;
++}
++
++static bool has_modified_stack_frame(struct instruction *insn)
++{
++ return (insn->state & STATE_FP_SAVED) ||
++ (insn->state & STATE_FP_SETUP);
++}
++
++static bool has_valid_stack_frame(struct instruction *insn)
++{
++ return (insn->state & STATE_FP_SAVED) &&
++ (insn->state & STATE_FP_SETUP);
++}
++
++static unsigned int frame_state(unsigned long state)
++{
++ return (state & (STATE_FP_SAVED | STATE_FP_SETUP));
++}
++
++/*
++ * Follow the branch starting at the given instruction, and recursively follow
++ * any other branches (jumps). Meanwhile, track the frame pointer state at
++ * each instruction and validate all the rules described in
++ * tools/objtool/Documentation/stack-validation.txt.
++ */
++static int validate_branch(struct objtool_file *file,
++ struct instruction *first, unsigned char first_state)
++{
++ struct alternative *alt;
++ struct instruction *insn;
++ struct section *sec;
++ struct symbol *func = NULL;
++ unsigned char state;
++ int ret;
++
++ insn = first;
++ sec = insn->sec;
++ state = first_state;
++
++ if (insn->alt_group && list_empty(&insn->alts)) {
++ WARN_FUNC("don't know how to handle branch to middle of alternative instruction group",
++ sec, insn->offset);
++ return 1;
++ }
++
++ while (1) {
++ if (file->c_file && insn->func) {
++ if (func && func != insn->func) {
++ WARN("%s() falls through to next function %s()",
++ func->name, insn->func->name);
++ return 1;
++ }
++
++ func = insn->func;
++ }
++
++ if (insn->visited) {
++ if (frame_state(insn->state) != frame_state(state)) {
++ WARN_FUNC("frame pointer state mismatch",
++ sec, insn->offset);
++ return 1;
++ }
++
++ return 0;
++ }
++
++ insn->visited = true;
++ insn->state = state;
++
++ list_for_each_entry(alt, &insn->alts, list) {
++ ret = validate_branch(file, alt->insn, state);
++ if (ret)
++ return 1;
++ }
++
++ switch (insn->type) {
++
++ case INSN_FP_SAVE:
++ if (!nofp) {
++ if (state & STATE_FP_SAVED) {
++ WARN_FUNC("duplicate frame pointer save",
++ sec, insn->offset);
++ return 1;
++ }
++ state |= STATE_FP_SAVED;
++ }
++ break;
++
++ case INSN_FP_SETUP:
++ if (!nofp) {
++ if (state & STATE_FP_SETUP) {
++ WARN_FUNC("duplicate frame pointer setup",
++ sec, insn->offset);
++ return 1;
++ }
++ state |= STATE_FP_SETUP;
++ }
++ break;
++
++ case INSN_FP_RESTORE:
++ if (!nofp) {
++ if (has_valid_stack_frame(insn))
++ state &= ~STATE_FP_SETUP;
++
++ state &= ~STATE_FP_SAVED;
++ }
++ break;
++
++ case INSN_RETURN:
++ if (!nofp && has_modified_stack_frame(insn)) {
++ WARN_FUNC("return without frame pointer restore",
++ sec, insn->offset);
++ return 1;
++ }
++ return 0;
++
++ case INSN_CALL:
++ if (is_fentry_call(insn)) {
++ state |= STATE_FENTRY;
++ break;
++ }
++
++ ret = dead_end_function(file, insn->call_dest);
++ if (ret == 1)
++ return 0;
++ if (ret == -1)
++ return 1;
++
++ /* fallthrough */
++ case INSN_CALL_DYNAMIC:
++ if (!nofp && !has_valid_stack_frame(insn)) {
++ WARN_FUNC("call without frame pointer save/setup",
++ sec, insn->offset);
++ return 1;
++ }
++ break;
++
++ case INSN_JUMP_CONDITIONAL:
++ case INSN_JUMP_UNCONDITIONAL:
++ if (insn->jump_dest) {
++ ret = validate_branch(file, insn->jump_dest,
++ state);
++ if (ret)
++ return 1;
++ } else if (has_modified_stack_frame(insn)) {
++ WARN_FUNC("sibling call from callable instruction with changed frame pointer",
++ sec, insn->offset);
++ return 1;
++ } /* else it's a sibling call */
++
++ if (insn->type == INSN_JUMP_UNCONDITIONAL)
++ return 0;
++
++ break;
++
++ case INSN_JUMP_DYNAMIC:
++ if (list_empty(&insn->alts) &&
++ has_modified_stack_frame(insn)) {
++ WARN_FUNC("sibling call from callable instruction with changed frame pointer",
++ sec, insn->offset);
++ return 1;
++ }
++
++ return 0;
++
++ default:
++ break;
++ }
++
++ if (insn->dead_end)
++ return 0;
++
++ insn = next_insn_same_sec(file, insn);
++ if (!insn) {
++ WARN("%s: unexpected end of section", sec->name);
++ return 1;
++ }
++ }
++
++ return 0;
++}
++
++static bool is_kasan_insn(struct instruction *insn)
++{
++ return (insn->type == INSN_CALL &&
++ !strcmp(insn->call_dest->name, "__asan_handle_no_return"));
++}
++
++static bool is_ubsan_insn(struct instruction *insn)
++{
++ return (insn->type == INSN_CALL &&
++ !strcmp(insn->call_dest->name,
++ "__ubsan_handle_builtin_unreachable"));
++}
++
++static bool ignore_unreachable_insn(struct symbol *func,
++ struct instruction *insn)
++{
++ int i;
++
++ if (insn->type == INSN_NOP)
++ return true;
++
++ /*
++ * Check if this (or a subsequent) instruction is related to
++ * CONFIG_UBSAN or CONFIG_KASAN.
++ *
++ * End the search at 5 instructions to avoid going into the weeds.
++ */
++ for (i = 0; i < 5; i++) {
++
++ if (is_kasan_insn(insn) || is_ubsan_insn(insn))
++ return true;
++
++ if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest) {
++ insn = insn->jump_dest;
++ continue;
++ }
++
++ if (insn->offset + insn->len >= func->offset + func->len)
++ break;
++ insn = list_next_entry(insn, list);
++ }
++
++ return false;
++}
++
++static int validate_functions(struct objtool_file *file)
++{
++ struct section *sec;
++ struct symbol *func;
++ struct instruction *insn;
++ int ret, warnings = 0;
++
++ list_for_each_entry(sec, &file->elf->sections, list) {
++ list_for_each_entry(func, &sec->symbol_list, list) {
++ if (func->type != STT_FUNC)
++ continue;
++
++ insn = find_insn(file, sec, func->offset);
++ if (!insn)
++ continue;
++
++ ret = validate_branch(file, insn, 0);
++ warnings += ret;
++ }
++ }
++
++ list_for_each_entry(sec, &file->elf->sections, list) {
++ list_for_each_entry(func, &sec->symbol_list, list) {
++ if (func->type != STT_FUNC)
++ continue;
++
++ func_for_each_insn(file, func, insn) {
++ if (insn->visited)
++ continue;
++
++ insn->visited = true;
++
++ if (file->ignore_unreachables || warnings ||
++ ignore_unreachable_insn(func, insn))
++ continue;
++
++ /*
++ * gcov produces a lot of unreachable
++ * instructions. If we get an unreachable
++ * warning and the file has gcov enabled, just
++ * ignore it, and all other such warnings for
++ * the file.
++ */
++ if (!file->ignore_unreachables &&
++ gcov_enabled(file)) {
++ file->ignore_unreachables = true;
++ continue;
++ }
++
++ WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset);
++ warnings++;
++ }
++ }
++ }
++
++ return warnings;
++}
++
++static int validate_uncallable_instructions(struct objtool_file *file)
++{
++ struct instruction *insn;
++ int warnings = 0;
++
++ for_each_insn(file, insn) {
++ if (!insn->visited && insn->type == INSN_RETURN) {
++
++ /*
++ * Don't warn about call instructions in unvisited
++ * retpoline alternatives.
++ */
++ if (!strcmp(insn->sec->name, ".altinstr_replacement"))
++ continue;
++
++ WARN_FUNC("return instruction outside of a callable function",
++ insn->sec, insn->offset);
++ warnings++;
++ }
++ }
++
++ return warnings;
++}
++
++static void cleanup(struct objtool_file *file)
++{
++ struct instruction *insn, *tmpinsn;
++ struct alternative *alt, *tmpalt;
++
++ list_for_each_entry_safe(insn, tmpinsn, &file->insn_list, list) {
++ list_for_each_entry_safe(alt, tmpalt, &insn->alts, list) {
++ list_del(&alt->list);
++ free(alt);
++ }
++ list_del(&insn->list);
++ hash_del(&insn->hash);
++ free(insn);
++ }
++ elf_close(file->elf);
++}
++
++int check(const char *_objname, bool _nofp)
++{
++ struct objtool_file file;
++ int ret, warnings = 0;
++
++ objname = _objname;
++ nofp = _nofp;
++
++ file.elf = elf_open(objname);
++ if (!file.elf) {
++ fprintf(stderr, "error reading elf file %s\n", objname);
++ return 1;
++ }
++
++ INIT_LIST_HEAD(&file.insn_list);
++ hash_init(file.insn_hash);
++ file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard");
++ file.rodata = find_section_by_name(file.elf, ".rodata");
++ file.ignore_unreachables = false;
++ file.c_file = find_section_by_name(file.elf, ".comment");
++
++ ret = decode_sections(&file);
++ if (ret < 0)
++ goto out;
++ warnings += ret;
++
++ ret = validate_functions(&file);
++ if (ret < 0)
++ goto out;
++ warnings += ret;
++
++ ret = validate_uncallable_instructions(&file);
++ if (ret < 0)
++ goto out;
++ warnings += ret;
++
++out:
++ cleanup(&file);
++
++ /* ignore warnings for now until we get all the code cleaned up */
++ if (ret || warnings)
++ return 0;
++ return 0;
++}
+diff --git a/tools/objtool/check.h b/tools/objtool/check.h
+new file mode 100644
+index 0000000..aca248a
+--- /dev/null
++++ b/tools/objtool/check.h
+@@ -0,0 +1,51 @@
++/*
++ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2
++ * of the License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _CHECK_H
++#define _CHECK_H
++
++#include <stdbool.h>
++#include "elf.h"
++#include "arch.h"
++#include <linux/hashtable.h>
++
++struct instruction {
++ struct list_head list;
++ struct hlist_node hash;
++ struct section *sec;
++ unsigned long offset;
++ unsigned int len, state;
++ unsigned char type;
++ unsigned long immediate;
++ bool alt_group, visited, dead_end, ignore_alts;
++ struct symbol *call_dest;
++ struct instruction *jump_dest;
++ struct list_head alts;
++ struct symbol *func;
++};
++
++struct objtool_file {
++ struct elf *elf;
++ struct list_head insn_list;
++ DECLARE_HASHTABLE(insn_hash, 16);
++ struct section *rodata, *whitelist;
++ bool ignore_unreachables, c_file;
++};
++
++int check(const char *objname, bool nofp);
++
++#endif /* _CHECK_H */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0088-objtool-sync-up-with-the-4.14.47-version-of-objtool.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0088-objtool-sync-up-with-the-4.14.47-version-of-objtool.patch
new file mode 100644
index 00000000..318297bf
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0088-objtool-sync-up-with-the-4.14.47-version-of-objtool.patch
@@ -0,0 +1,9906 @@
+From 0706298ca42f992d0c1afb93c8d6710d15f88ccb Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Sun, 3 Jun 2018 12:35:15 +0200
+Subject: [PATCH 88/93] objtool: sync up with the 4.14.47 version of objtool
+
+There are pros and cons of dealing with tools in the kernel directory.
+The pros are the fact that development happens fast, and new features
+can be added to the kernel and the tools at the same times. The cons
+are when dealing with backported kernel patches, it can be necessary to
+backport parts of the tool changes as well.
+
+For 4.9.y so far, we have backported individual patches. That quickly
+breaks down when there are minor differences between how backports were
+handled, so grabbing 40+ patch long series can be difficult, not
+impossible, but really frustrating to attempt.
+
+To help mitigate this mess, here's a single big patch to sync up the
+objtool logic to the 4.14.47 version of the tool. From this point
+forward (after some other minor header file patches are applied), the
+tool should be in sync and much easier to maintain over time.
+
+This has survivied my limited testing, and as the codebase is identical
+to 4.14.47, I'm pretty comfortable dropping this big change in here in
+4.9.y. Hopefully all goes well...
+
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/orc_types.h | 107 ++
+ arch/x86/include/asm/unwind_hints.h | 103 ++
+ tools/objtool/Build | 3 +
+ tools/objtool/Documentation/stack-validation.txt | 195 ++-
+ tools/objtool/Makefile | 35 +-
+ tools/objtool/arch.h | 65 +-
+ tools/objtool/arch/x86/Build | 10 +-
+ tools/objtool/arch/x86/decode.c | 408 +++++-
+ tools/objtool/arch/x86/include/asm/inat.h | 244 ++++
+ tools/objtool/arch/x86/include/asm/inat_types.h | 29 +
+ tools/objtool/arch/x86/include/asm/insn.h | 211 ++++
+ tools/objtool/arch/x86/include/asm/orc_types.h | 107 ++
+ tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk | 392 ------
+ tools/objtool/arch/x86/insn/inat.c | 97 --
+ tools/objtool/arch/x86/insn/inat.h | 234 ----
+ tools/objtool/arch/x86/insn/inat_types.h | 29 -
+ tools/objtool/arch/x86/insn/insn.c | 606 ---------
+ tools/objtool/arch/x86/insn/insn.h | 211 ----
+ tools/objtool/arch/x86/insn/x86-opcode-map.txt | 1063 ----------------
+ tools/objtool/arch/x86/lib/inat.c | 97 ++
+ tools/objtool/arch/x86/lib/insn.c | 606 +++++++++
+ tools/objtool/arch/x86/lib/x86-opcode-map.txt | 1072 ++++++++++++++++
+ tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk | 393 ++++++
+ tools/objtool/builtin-check.c | 9 +-
+ tools/objtool/builtin-orc.c | 68 +
+ tools/objtool/builtin.h | 6 +
+ tools/objtool/cfi.h | 55 +
+ tools/objtool/check.c | 1329 ++++++++++++++++----
+ tools/objtool/check.h | 39 +-
+ tools/objtool/elf.c | 284 ++++-
+ tools/objtool/elf.h | 21 +-
+ tools/objtool/objtool.c | 12 +-
+ tools/objtool/orc.h | 30 +
+ tools/objtool/orc_dump.c | 213 ++++
+ tools/objtool/orc_gen.c | 221 ++++
+ tools/objtool/special.c | 6 +-
+ tools/objtool/sync-check.sh | 29 +
+ tools/objtool/warn.h | 10 +
+ 38 files changed, 5511 insertions(+), 3138 deletions(-)
+ create mode 100644 arch/x86/include/asm/orc_types.h
+ create mode 100644 arch/x86/include/asm/unwind_hints.h
+ create mode 100644 tools/objtool/arch/x86/include/asm/inat.h
+ create mode 100644 tools/objtool/arch/x86/include/asm/inat_types.h
+ create mode 100644 tools/objtool/arch/x86/include/asm/insn.h
+ create mode 100644 tools/objtool/arch/x86/include/asm/orc_types.h
+ delete mode 100644 tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk
+ delete mode 100644 tools/objtool/arch/x86/insn/inat.c
+ delete mode 100644 tools/objtool/arch/x86/insn/inat.h
+ delete mode 100644 tools/objtool/arch/x86/insn/inat_types.h
+ delete mode 100644 tools/objtool/arch/x86/insn/insn.c
+ delete mode 100644 tools/objtool/arch/x86/insn/insn.h
+ delete mode 100644 tools/objtool/arch/x86/insn/x86-opcode-map.txt
+ create mode 100644 tools/objtool/arch/x86/lib/inat.c
+ create mode 100644 tools/objtool/arch/x86/lib/insn.c
+ create mode 100644 tools/objtool/arch/x86/lib/x86-opcode-map.txt
+ create mode 100644 tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
+ create mode 100644 tools/objtool/builtin-orc.c
+ create mode 100644 tools/objtool/cfi.h
+ create mode 100644 tools/objtool/orc.h
+ create mode 100644 tools/objtool/orc_dump.c
+ create mode 100644 tools/objtool/orc_gen.c
+ create mode 100755 tools/objtool/sync-check.sh
+
+diff --git a/arch/x86/include/asm/orc_types.h b/arch/x86/include/asm/orc_types.h
+new file mode 100644
+index 0000000..7dc777a
+--- /dev/null
++++ b/arch/x86/include/asm/orc_types.h
+@@ -0,0 +1,107 @@
++/*
++ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2
++ * of the License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _ORC_TYPES_H
++#define _ORC_TYPES_H
++
++#include <linux/types.h>
++#include <linux/compiler.h>
++
++/*
++ * The ORC_REG_* registers are base registers which are used to find other
++ * registers on the stack.
++ *
++ * ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the
++ * address of the previous frame: the caller's SP before it called the current
++ * function.
++ *
++ * ORC_REG_UNDEFINED means the corresponding register's value didn't change in
++ * the current frame.
++ *
++ * The most commonly used base registers are SP and BP -- which the previous SP
++ * is usually based on -- and PREV_SP and UNDEFINED -- which the previous BP is
++ * usually based on.
++ *
++ * The rest of the base registers are needed for special cases like entry code
++ * and GCC realigned stacks.
++ */
++#define ORC_REG_UNDEFINED 0
++#define ORC_REG_PREV_SP 1
++#define ORC_REG_DX 2
++#define ORC_REG_DI 3
++#define ORC_REG_BP 4
++#define ORC_REG_SP 5
++#define ORC_REG_R10 6
++#define ORC_REG_R13 7
++#define ORC_REG_BP_INDIRECT 8
++#define ORC_REG_SP_INDIRECT 9
++#define ORC_REG_MAX 15
++
++/*
++ * ORC_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP (the
++ * caller's SP right before it made the call). Used for all callable
++ * functions, i.e. all C code and all callable asm functions.
++ *
++ * ORC_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset points
++ * to a fully populated pt_regs from a syscall, interrupt, or exception.
++ *
++ * ORC_TYPE_REGS_IRET: Used in entry code to indicate that sp_reg+sp_offset
++ * points to the iret return frame.
++ *
++ * The UNWIND_HINT macros are used only for the unwind_hint struct. They
++ * aren't used in struct orc_entry due to size and complexity constraints.
++ * Objtool converts them to real types when it converts the hints to orc
++ * entries.
++ */
++#define ORC_TYPE_CALL 0
++#define ORC_TYPE_REGS 1
++#define ORC_TYPE_REGS_IRET 2
++#define UNWIND_HINT_TYPE_SAVE 3
++#define UNWIND_HINT_TYPE_RESTORE 4
++
++#ifndef __ASSEMBLY__
++/*
++ * This struct is more or less a vastly simplified version of the DWARF Call
++ * Frame Information standard. It contains only the necessary parts of DWARF
++ * CFI, simplified for ease of access by the in-kernel unwinder. It tells the
++ * unwinder how to find the previous SP and BP (and sometimes entry regs) on
++ * the stack for a given code address. Each instance of the struct corresponds
++ * to one or more code locations.
++ */
++struct orc_entry {
++ s16 sp_offset;
++ s16 bp_offset;
++ unsigned sp_reg:4;
++ unsigned bp_reg:4;
++ unsigned type:2;
++};
++
++/*
++ * This struct is used by asm and inline asm code to manually annotate the
++ * location of registers on the stack for the ORC unwinder.
++ *
++ * Type can be either ORC_TYPE_* or UNWIND_HINT_TYPE_*.
++ */
++struct unwind_hint {
++ u32 ip;
++ s16 sp_offset;
++ u8 sp_reg;
++ u8 type;
++};
++#endif /* __ASSEMBLY__ */
++
++#endif /* _ORC_TYPES_H */
+diff --git a/arch/x86/include/asm/unwind_hints.h b/arch/x86/include/asm/unwind_hints.h
+new file mode 100644
+index 0000000..5e02b11
+--- /dev/null
++++ b/arch/x86/include/asm/unwind_hints.h
+@@ -0,0 +1,103 @@
++#ifndef _ASM_X86_UNWIND_HINTS_H
++#define _ASM_X86_UNWIND_HINTS_H
++
++#include "orc_types.h"
++
++#ifdef __ASSEMBLY__
++
++/*
++ * In asm, there are two kinds of code: normal C-type callable functions and
++ * the rest. The normal callable functions can be called by other code, and
++ * don't do anything unusual with the stack. Such normal callable functions
++ * are annotated with the ENTRY/ENDPROC macros. Most asm code falls in this
++ * category. In this case, no special debugging annotations are needed because
++ * objtool can automatically generate the ORC data for the ORC unwinder to read
++ * at runtime.
++ *
++ * Anything which doesn't fall into the above category, such as syscall and
++ * interrupt handlers, tends to not be called directly by other functions, and
++ * often does unusual non-C-function-type things with the stack pointer. Such
++ * code needs to be annotated such that objtool can understand it. The
++ * following CFI hint macros are for this type of code.
++ *
++ * These macros provide hints to objtool about the state of the stack at each
++ * instruction. Objtool starts from the hints and follows the code flow,
++ * making automatic CFI adjustments when it sees pushes and pops, filling out
++ * the debuginfo as necessary. It will also warn if it sees any
++ * inconsistencies.
++ */
++.macro UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=0 type=ORC_TYPE_CALL
++#ifdef CONFIG_STACK_VALIDATION
++.Lunwind_hint_ip_\@:
++ .pushsection .discard.unwind_hints
++ /* struct unwind_hint */
++ .long .Lunwind_hint_ip_\@ - .
++ .short \sp_offset
++ .byte \sp_reg
++ .byte \type
++ .popsection
++#endif
++.endm
++
++.macro UNWIND_HINT_EMPTY
++ UNWIND_HINT sp_reg=ORC_REG_UNDEFINED
++.endm
++
++.macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 iret=0
++ .if \base == %rsp && \indirect
++ .set sp_reg, ORC_REG_SP_INDIRECT
++ .elseif \base == %rsp
++ .set sp_reg, ORC_REG_SP
++ .elseif \base == %rbp
++ .set sp_reg, ORC_REG_BP
++ .elseif \base == %rdi
++ .set sp_reg, ORC_REG_DI
++ .elseif \base == %rdx
++ .set sp_reg, ORC_REG_DX
++ .elseif \base == %r10
++ .set sp_reg, ORC_REG_R10
++ .else
++ .error "UNWIND_HINT_REGS: bad base register"
++ .endif
++
++ .set sp_offset, \offset
++
++ .if \iret
++ .set type, ORC_TYPE_REGS_IRET
++ .elseif \extra == 0
++ .set type, ORC_TYPE_REGS_IRET
++ .set sp_offset, \offset + (16*8)
++ .else
++ .set type, ORC_TYPE_REGS
++ .endif
++
++ UNWIND_HINT sp_reg=sp_reg sp_offset=sp_offset type=type
++.endm
++
++.macro UNWIND_HINT_IRET_REGS base=%rsp offset=0
++ UNWIND_HINT_REGS base=\base offset=\offset iret=1
++.endm
++
++.macro UNWIND_HINT_FUNC sp_offset=8
++ UNWIND_HINT sp_offset=\sp_offset
++.endm
++
++#else /* !__ASSEMBLY__ */
++
++#define UNWIND_HINT(sp_reg, sp_offset, type) \
++ "987: \n\t" \
++ ".pushsection .discard.unwind_hints\n\t" \
++ /* struct unwind_hint */ \
++ ".long 987b - .\n\t" \
++ ".short " __stringify(sp_offset) "\n\t" \
++ ".byte " __stringify(sp_reg) "\n\t" \
++ ".byte " __stringify(type) "\n\t" \
++ ".popsection\n\t"
++
++#define UNWIND_HINT_SAVE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_SAVE)
++
++#define UNWIND_HINT_RESTORE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_RESTORE)
++
++#endif /* __ASSEMBLY__ */
++
++#endif /* _ASM_X86_UNWIND_HINTS_H */
+diff --git a/tools/objtool/Build b/tools/objtool/Build
+index 6f2e198..749becd 100644
+--- a/tools/objtool/Build
++++ b/tools/objtool/Build
+@@ -1,6 +1,9 @@
+ objtool-y += arch/$(SRCARCH)/
+ objtool-y += builtin-check.o
++objtool-y += builtin-orc.o
+ objtool-y += check.o
++objtool-y += orc_gen.o
++objtool-y += orc_dump.o
+ objtool-y += elf.o
+ objtool-y += special.o
+ objtool-y += objtool.o
+diff --git a/tools/objtool/Documentation/stack-validation.txt b/tools/objtool/Documentation/stack-validation.txt
+index 55a60d3..3995735 100644
+--- a/tools/objtool/Documentation/stack-validation.txt
++++ b/tools/objtool/Documentation/stack-validation.txt
+@@ -11,9 +11,6 @@ analyzes every .o file and ensures the validity of its stack metadata.
+ It enforces a set of rules on asm code and C inline assembly code so
+ that stack traces can be reliable.
+
+-Currently it only checks frame pointer usage, but there are plans to add
+-CFI validation for C files and CFI generation for asm files.
+-
+ For each function, it recursively follows all possible code paths and
+ validates the correct frame pointer state at each instruction.
+
+@@ -23,6 +20,10 @@ alternative execution paths to a given instruction (or set of
+ instructions). Similarly, it knows how to follow switch statements, for
+ which gcc sometimes uses jump tables.
+
++(Objtool also has an 'orc generate' subcommand which generates debuginfo
++for the ORC unwinder. See Documentation/x86/orc-unwinder.txt in the
++kernel tree for more details.)
++
+
+ Why do we need stack metadata validation?
+ -----------------------------------------
+@@ -93,62 +94,24 @@ a) More reliable stack traces for frame pointer enabled kernels
+ or at the very end of the function after the stack frame has been
+ destroyed. This is an inherent limitation of frame pointers.
+
+-b) 100% reliable stack traces for DWARF enabled kernels
+-
+- (NOTE: This is not yet implemented)
+-
+- As an alternative to frame pointers, DWARF Call Frame Information
+- (CFI) metadata can be used to walk the stack. Unlike frame pointers,
+- CFI metadata is out of band. So it doesn't affect runtime
+- performance and it can be reliable even when interrupts or exceptions
+- are involved.
+-
+- For C code, gcc automatically generates DWARF CFI metadata. But for
+- asm code, generating CFI is a tedious manual approach which requires
+- manually placed .cfi assembler macros to be scattered throughout the
+- code. It's clumsy and very easy to get wrong, and it makes the real
+- code harder to read.
+-
+- Stacktool will improve this situation in several ways. For code
+- which already has CFI annotations, it will validate them. For code
+- which doesn't have CFI annotations, it will generate them. So an
+- architecture can opt to strip out all the manual .cfi annotations
+- from their asm code and have objtool generate them instead.
++b) ORC (Oops Rewind Capability) unwind table generation
+
+- We might also add a runtime stack validation debug option where we
+- periodically walk the stack from schedule() and/or an NMI to ensure
+- that the stack metadata is sane and that we reach the bottom of the
+- stack.
++ An alternative to frame pointers and DWARF, ORC unwind data can be
++ used to walk the stack. Unlike frame pointers, ORC data is out of
++ band. So it doesn't affect runtime performance and it can be
++ reliable even when interrupts or exceptions are involved.
+
+- So the benefit of objtool here will be that external tooling should
+- always show perfect stack traces. And the same will be true for
+- kernel warning/oops traces if the architecture has a runtime DWARF
+- unwinder.
++ For more details, see Documentation/x86/orc-unwinder.txt.
+
+ c) Higher live patching compatibility rate
+
+- (NOTE: This is not yet implemented)
+-
+- Currently with CONFIG_LIVEPATCH there's a basic live patching
+- framework which is safe for roughly 85-90% of "security" fixes. But
+- patches can't have complex features like function dependency or
+- prototype changes, or data structure changes.
+-
+- There's a strong need to support patches which have the more complex
+- features so that the patch compatibility rate for security fixes can
+- eventually approach something resembling 100%. To achieve that, a
+- "consistency model" is needed, which allows tasks to be safely
+- transitioned from an unpatched state to a patched state.
+-
+- One of the key requirements of the currently proposed livepatch
+- consistency model [*] is that it needs to walk the stack of each
+- sleeping task to determine if it can be transitioned to the patched
+- state. If objtool can ensure that stack traces are reliable, this
+- consistency model can be used and the live patching compatibility
+- rate can be improved significantly.
+-
+- [*] https://lkml.kernel.org/r/cover.1423499826.git.jpoimboe@redhat.com
++ Livepatch has an optional "consistency model", which is needed for
++ more complex patches. In order for the consistency model to work,
++ stack traces need to be reliable (or an unreliable condition needs to
++ be detectable). Objtool makes that possible.
+
++ For more details, see the livepatch documentation in the Linux kernel
++ source tree at Documentation/livepatch/livepatch.txt.
+
+ Rules
+ -----
+@@ -201,80 +164,84 @@ To achieve the validation, objtool enforces the following rules:
+ return normally.
+
+
+-Errors in .S files
+-------------------
++Objtool warnings
++----------------
++
++For asm files, if you're getting an error which doesn't make sense,
++first make sure that the affected code follows the above rules.
+
+-If you're getting an error in a compiled .S file which you don't
+-understand, first make sure that the affected code follows the above
+-rules.
++For C files, the common culprits are inline asm statements and calls to
++"noreturn" functions. See below for more details.
++
++Another possible cause for errors in C code is if the Makefile removes
++-fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options.
+
+ Here are some examples of common warnings reported by objtool, what
+ they mean, and suggestions for how to fix them.
+
+
+-1. asm_file.o: warning: objtool: func()+0x128: call without frame pointer save/setup
++1. file.o: warning: objtool: func()+0x128: call without frame pointer save/setup
+
+ The func() function made a function call without first saving and/or
+- updating the frame pointer.
+-
+- If func() is indeed a callable function, add proper frame pointer
+- logic using the FRAME_BEGIN and FRAME_END macros. Otherwise, remove
+- its ELF function annotation by changing ENDPROC to END.
+-
+- If you're getting this error in a .c file, see the "Errors in .c
+- files" section.
++ updating the frame pointer, and CONFIG_FRAME_POINTER is enabled.
+
++ If the error is for an asm file, and func() is indeed a callable
++ function, add proper frame pointer logic using the FRAME_BEGIN and
++ FRAME_END macros. Otherwise, if it's not a callable function, remove
++ its ELF function annotation by changing ENDPROC to END, and instead
++ use the manual unwind hint macros in asm/unwind_hints.h.
+
+-2. asm_file.o: warning: objtool: .text+0x53: return instruction outside of a callable function
++ If it's a GCC-compiled .c file, the error may be because the function
++ uses an inline asm() statement which has a "call" instruction. An
++ asm() statement with a call instruction must declare the use of the
++ stack pointer in its output operand. On x86_64, this means adding
++ the ASM_CALL_CONSTRAINT as an output constraint:
+
+- A return instruction was detected, but objtool couldn't find a way
+- for a callable function to reach the instruction.
++ asm volatile("call func" : ASM_CALL_CONSTRAINT);
+
+- If the return instruction is inside (or reachable from) a callable
+- function, the function needs to be annotated with the ENTRY/ENDPROC
+- macros.
++ Otherwise the stack frame may not get created before the call.
+
+- If you _really_ need a return instruction outside of a function, and
+- are 100% sure that it won't affect stack traces, you can tell
+- objtool to ignore it. See the "Adding exceptions" section below.
+
++2. file.o: warning: objtool: .text+0x53: unreachable instruction
+
+-3. asm_file.o: warning: objtool: func()+0x9: function has unreachable instruction
++ Objtool couldn't find a code path to reach the instruction.
+
+- The instruction lives inside of a callable function, but there's no
+- possible control flow path from the beginning of the function to the
+- instruction.
++ If the error is for an asm file, and the instruction is inside (or
++ reachable from) a callable function, the function should be annotated
++ with the ENTRY/ENDPROC macros (ENDPROC is the important one).
++ Otherwise, the code should probably be annotated with the unwind hint
++ macros in asm/unwind_hints.h so objtool and the unwinder can know the
++ stack state associated with the code.
+
+- If the instruction is actually needed, and it's actually in a
+- callable function, ensure that its function is properly annotated
+- with ENTRY/ENDPROC.
++ If you're 100% sure the code won't affect stack traces, or if you're
++ a just a bad person, you can tell objtool to ignore it. See the
++ "Adding exceptions" section below.
+
+ If it's not actually in a callable function (e.g. kernel entry code),
+ change ENDPROC to END.
+
+
+-4. asm_file.o: warning: objtool: func(): can't find starting instruction
++4. file.o: warning: objtool: func(): can't find starting instruction
+ or
+- asm_file.o: warning: objtool: func()+0x11dd: can't decode instruction
++ file.o: warning: objtool: func()+0x11dd: can't decode instruction
+
+- Did you put data in a text section? If so, that can confuse
++ Does the file have data in a text section? If so, that can confuse
+ objtool's instruction decoder. Move the data to a more appropriate
+ section like .data or .rodata.
+
+
+-5. asm_file.o: warning: objtool: func()+0x6: kernel entry/exit from callable instruction
+-
+- This is a kernel entry/exit instruction like sysenter or sysret.
+- Such instructions aren't allowed in a callable function, and are most
+- likely part of the kernel entry code.
++5. file.o: warning: objtool: func()+0x6: unsupported instruction in callable function
+
+- If the instruction isn't actually in a callable function, change
+- ENDPROC to END.
++ This is a kernel entry/exit instruction like sysenter or iret. Such
++ instructions aren't allowed in a callable function, and are most
++ likely part of the kernel entry code. They should usually not have
++ the callable function annotation (ENDPROC) and should always be
++ annotated with the unwind hint macros in asm/unwind_hints.h.
+
+
+-6. asm_file.o: warning: objtool: func()+0x26: sibling call from callable instruction with changed frame pointer
++6. file.o: warning: objtool: func()+0x26: sibling call from callable instruction with modified stack frame
+
+- This is a dynamic jump or a jump to an undefined symbol. Stacktool
++ This is a dynamic jump or a jump to an undefined symbol. Objtool
+ assumed it's a sibling call and detected that the frame pointer
+ wasn't first restored to its original state.
+
+@@ -282,24 +249,28 @@ they mean, and suggestions for how to fix them.
+ destination code to the local file.
+
+ If the instruction is not actually in a callable function (e.g.
+- kernel entry code), change ENDPROC to END.
++ kernel entry code), change ENDPROC to END and annotate manually with
++ the unwind hint macros in asm/unwind_hints.h.
+
+
+-7. asm_file: warning: objtool: func()+0x5c: frame pointer state mismatch
++7. file: warning: objtool: func()+0x5c: stack state mismatch
+
+ The instruction's frame pointer state is inconsistent, depending on
+ which execution path was taken to reach the instruction.
+
+- Make sure the function pushes and sets up the frame pointer (for
+- x86_64, this means rbp) at the beginning of the function and pops it
+- at the end of the function. Also make sure that no other code in the
+- function touches the frame pointer.
++ Make sure that, when CONFIG_FRAME_POINTER is enabled, the function
++ pushes and sets up the frame pointer (for x86_64, this means rbp) at
++ the beginning of the function and pops it at the end of the function.
++ Also make sure that no other code in the function touches the frame
++ pointer.
+
++ Another possibility is that the code has some asm or inline asm which
++ does some unusual things to the stack or the frame pointer. In such
++ cases it's probably appropriate to use the unwind hint macros in
++ asm/unwind_hints.h.
+
+-Errors in .c files
+-------------------
+
+-1. c_file.o: warning: objtool: funcA() falls through to next function funcB()
++8. file.o: warning: objtool: funcA() falls through to next function funcB()
+
+ This means that funcA() doesn't end with a return instruction or an
+ unconditional jump, and that objtool has determined that the function
+@@ -318,22 +289,6 @@ Errors in .c files
+ might be corrupt due to a gcc bug. For more details, see:
+ https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70646
+
+-2. If you're getting any other objtool error in a compiled .c file, it
+- may be because the file uses an asm() statement which has a "call"
+- instruction. An asm() statement with a call instruction must declare
+- the use of the stack pointer in its output operand. For example, on
+- x86_64:
+-
+- register void *__sp asm("rsp");
+- asm volatile("call func" : "+r" (__sp));
+-
+- Otherwise the stack frame may not get created before the call.
+-
+-3. Another possible cause for errors in C code is if the Makefile removes
+- -fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options.
+-
+-Also see the above section for .S file errors for more information what
+-the individual error messages mean.
+
+ If the error doesn't seem to make sense, it could be a bug in objtool.
+ Feel free to ask the objtool maintainer for help.
+diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
+index 041b493..e6acc28 100644
+--- a/tools/objtool/Makefile
++++ b/tools/objtool/Makefile
+@@ -1,3 +1,4 @@
++# SPDX-License-Identifier: GPL-2.0
+ include ../scripts/Makefile.include
+ include ../scripts/Makefile.arch
+
+@@ -6,17 +7,19 @@ ARCH := x86
+ endif
+
+ # always use the host compiler
+-CC = gcc
+-LD = ld
+-AR = ar
++HOSTCC ?= gcc
++HOSTLD ?= ld
++CC = $(HOSTCC)
++LD = $(HOSTLD)
++AR = ar
+
+ ifeq ($(srctree),)
+-srctree := $(patsubst %/,%,$(dir $(shell pwd)))
++srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+ srctree := $(patsubst %/,%,$(dir $(srctree)))
+ endif
+
+ SUBCMD_SRCDIR = $(srctree)/tools/lib/subcmd/
+-LIBSUBCMD_OUTPUT = $(if $(OUTPUT),$(OUTPUT),$(PWD)/)
++LIBSUBCMD_OUTPUT = $(if $(OUTPUT),$(OUTPUT),$(CURDIR)/)
+ LIBSUBCMD = $(LIBSUBCMD_OUTPUT)libsubcmd.a
+
+ OBJTOOL := $(OUTPUT)objtool
+@@ -24,8 +27,11 @@ OBJTOOL_IN := $(OBJTOOL)-in.o
+
+ all: $(OBJTOOL)
+
+-INCLUDES := -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi
+-CFLAGS += -Wall -Werror $(EXTRA_WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
++INCLUDES := -I$(srctree)/tools/include \
++ -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
++ -I$(srctree)/tools/objtool/arch/$(ARCH)/include
++WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
++CFLAGS += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
+ LDFLAGS += -lelf $(LIBSUBCMD)
+
+ # Allow old libelf to be used:
+@@ -39,19 +45,8 @@ include $(srctree)/tools/build/Makefile.include
+ $(OBJTOOL_IN): fixdep FORCE
+ @$(MAKE) $(build)=objtool
+
+-# Busybox's diff doesn't have -I, avoid warning in that case
+-#
+ $(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN)
+- @(diff -I 2>&1 | grep -q 'option requires an argument' && \
+- test -d ../../kernel -a -d ../../tools -a -d ../objtool && (( \
+- diff -I'^#include' arch/x86/insn/insn.c ../../arch/x86/lib/insn.c >/dev/null && \
+- diff -I'^#include' arch/x86/insn/inat.c ../../arch/x86/lib/inat.c >/dev/null && \
+- diff arch/x86/insn/x86-opcode-map.txt ../../arch/x86/lib/x86-opcode-map.txt >/dev/null && \
+- diff arch/x86/insn/gen-insn-attr-x86.awk ../../arch/x86/tools/gen-insn-attr-x86.awk >/dev/null && \
+- diff -I'^#include' arch/x86/insn/insn.h ../../arch/x86/include/asm/insn.h >/dev/null && \
+- diff -I'^#include' arch/x86/insn/inat.h ../../arch/x86/include/asm/inat.h >/dev/null && \
+- diff -I'^#include' arch/x86/insn/inat_types.h ../../arch/x86/include/asm/inat_types.h >/dev/null) \
+- || echo "warning: objtool: x86 instruction decoder differs from kernel" >&2 )) || true
++ @$(CONFIG_SHELL) ./sync-check.sh
+ $(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@
+
+
+@@ -61,7 +56,7 @@ $(LIBSUBCMD): fixdep FORCE
+ clean:
+ $(call QUIET_CLEAN, objtool) $(RM) $(OBJTOOL)
+ $(Q)find $(OUTPUT) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
+- $(Q)$(RM) $(OUTPUT)arch/x86/insn/inat-tables.c $(OUTPUT)fixdep
++ $(Q)$(RM) $(OUTPUT)arch/x86/lib/inat-tables.c $(OUTPUT)fixdep
+
+ FORCE:
+
+diff --git a/tools/objtool/arch.h b/tools/objtool/arch.h
+index a59e061..b0d7dc3 100644
+--- a/tools/objtool/arch.h
++++ b/tools/objtool/arch.h
+@@ -19,25 +19,64 @@
+ #define _ARCH_H
+
+ #include <stdbool.h>
++#include <linux/list.h>
+ #include "elf.h"
++#include "cfi.h"
+
+-#define INSN_FP_SAVE 1
+-#define INSN_FP_SETUP 2
+-#define INSN_FP_RESTORE 3
+-#define INSN_JUMP_CONDITIONAL 4
+-#define INSN_JUMP_UNCONDITIONAL 5
+-#define INSN_JUMP_DYNAMIC 6
+-#define INSN_CALL 7
+-#define INSN_CALL_DYNAMIC 8
+-#define INSN_RETURN 9
+-#define INSN_CONTEXT_SWITCH 10
+-#define INSN_NOP 11
+-#define INSN_OTHER 12
++#define INSN_JUMP_CONDITIONAL 1
++#define INSN_JUMP_UNCONDITIONAL 2
++#define INSN_JUMP_DYNAMIC 3
++#define INSN_CALL 4
++#define INSN_CALL_DYNAMIC 5
++#define INSN_RETURN 6
++#define INSN_CONTEXT_SWITCH 7
++#define INSN_STACK 8
++#define INSN_BUG 9
++#define INSN_NOP 10
++#define INSN_OTHER 11
+ #define INSN_LAST INSN_OTHER
+
++enum op_dest_type {
++ OP_DEST_REG,
++ OP_DEST_REG_INDIRECT,
++ OP_DEST_MEM,
++ OP_DEST_PUSH,
++ OP_DEST_LEAVE,
++};
++
++struct op_dest {
++ enum op_dest_type type;
++ unsigned char reg;
++ int offset;
++};
++
++enum op_src_type {
++ OP_SRC_REG,
++ OP_SRC_REG_INDIRECT,
++ OP_SRC_CONST,
++ OP_SRC_POP,
++ OP_SRC_ADD,
++ OP_SRC_AND,
++};
++
++struct op_src {
++ enum op_src_type type;
++ unsigned char reg;
++ int offset;
++};
++
++struct stack_op {
++ struct op_dest dest;
++ struct op_src src;
++};
++
++void arch_initial_func_cfi_state(struct cfi_state *state);
++
+ int arch_decode_instruction(struct elf *elf, struct section *sec,
+ unsigned long offset, unsigned int maxlen,
+ unsigned int *len, unsigned char *type,
+- unsigned long *displacement);
++ unsigned long *immediate, struct stack_op *op);
++
++bool arch_callee_saved_reg(unsigned char reg);
+
+ #endif /* _ARCH_H */
+diff --git a/tools/objtool/arch/x86/Build b/tools/objtool/arch/x86/Build
+index debbdb0..b998412 100644
+--- a/tools/objtool/arch/x86/Build
++++ b/tools/objtool/arch/x86/Build
+@@ -1,12 +1,12 @@
+ objtool-y += decode.o
+
+-inat_tables_script = arch/x86/insn/gen-insn-attr-x86.awk
+-inat_tables_maps = arch/x86/insn/x86-opcode-map.txt
++inat_tables_script = arch/x86/tools/gen-insn-attr-x86.awk
++inat_tables_maps = arch/x86/lib/x86-opcode-map.txt
+
+-$(OUTPUT)arch/x86/insn/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
++$(OUTPUT)arch/x86/lib/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
+ $(call rule_mkdir)
+ $(Q)$(call echo-cmd,gen)$(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@
+
+-$(OUTPUT)arch/x86/decode.o: $(OUTPUT)arch/x86/insn/inat-tables.c
++$(OUTPUT)arch/x86/decode.o: $(OUTPUT)arch/x86/lib/inat-tables.c
+
+-CFLAGS_decode.o += -I$(OUTPUT)arch/x86/insn
++CFLAGS_decode.o += -I$(OUTPUT)arch/x86/lib
+diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
+index 9fb487f..006b6d7 100644
+--- a/tools/objtool/arch/x86/decode.c
++++ b/tools/objtool/arch/x86/decode.c
+@@ -19,14 +19,25 @@
+ #include <stdlib.h>
+
+ #define unlikely(cond) (cond)
+-#include "insn/insn.h"
+-#include "insn/inat.c"
+-#include "insn/insn.c"
++#include <asm/insn.h>
++#include "lib/inat.c"
++#include "lib/insn.c"
+
+ #include "../../elf.h"
+ #include "../../arch.h"
+ #include "../../warn.h"
+
++static unsigned char op_to_cfi_reg[][2] = {
++ {CFI_AX, CFI_R8},
++ {CFI_CX, CFI_R9},
++ {CFI_DX, CFI_R10},
++ {CFI_BX, CFI_R11},
++ {CFI_SP, CFI_R12},
++ {CFI_BP, CFI_R13},
++ {CFI_SI, CFI_R14},
++ {CFI_DI, CFI_R15},
++};
++
+ static int is_x86_64(struct elf *elf)
+ {
+ switch (elf->ehdr.e_machine) {
+@@ -40,24 +51,50 @@ static int is_x86_64(struct elf *elf)
+ }
+ }
+
++bool arch_callee_saved_reg(unsigned char reg)
++{
++ switch (reg) {
++ case CFI_BP:
++ case CFI_BX:
++ case CFI_R12:
++ case CFI_R13:
++ case CFI_R14:
++ case CFI_R15:
++ return true;
++
++ case CFI_AX:
++ case CFI_CX:
++ case CFI_DX:
++ case CFI_SI:
++ case CFI_DI:
++ case CFI_SP:
++ case CFI_R8:
++ case CFI_R9:
++ case CFI_R10:
++ case CFI_R11:
++ case CFI_RA:
++ default:
++ return false;
++ }
++}
++
+ int arch_decode_instruction(struct elf *elf, struct section *sec,
+ unsigned long offset, unsigned int maxlen,
+ unsigned int *len, unsigned char *type,
+- unsigned long *immediate)
++ unsigned long *immediate, struct stack_op *op)
+ {
+ struct insn insn;
+- int x86_64;
+- unsigned char op1, op2, ext;
++ int x86_64, sign;
++ unsigned char op1, op2, rex = 0, rex_b = 0, rex_r = 0, rex_w = 0,
++ rex_x = 0, modrm = 0, modrm_mod = 0, modrm_rm = 0,
++ modrm_reg = 0, sib = 0;
+
+ x86_64 = is_x86_64(elf);
+ if (x86_64 == -1)
+ return -1;
+
+- insn_init(&insn, (void *)(sec->data + offset), maxlen, x86_64);
++ insn_init(&insn, sec->data->d_buf + offset, maxlen, x86_64);
+ insn_get_length(&insn);
+- insn_get_opcode(&insn);
+- insn_get_modrm(&insn);
+- insn_get_immediate(&insn);
+
+ if (!insn_complete(&insn)) {
+ WARN_FUNC("can't decode instruction", sec, offset);
+@@ -73,67 +110,317 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
+ op1 = insn.opcode.bytes[0];
+ op2 = insn.opcode.bytes[1];
+
++ if (insn.rex_prefix.nbytes) {
++ rex = insn.rex_prefix.bytes[0];
++ rex_w = X86_REX_W(rex) >> 3;
++ rex_r = X86_REX_R(rex) >> 2;
++ rex_x = X86_REX_X(rex) >> 1;
++ rex_b = X86_REX_B(rex);
++ }
++
++ if (insn.modrm.nbytes) {
++ modrm = insn.modrm.bytes[0];
++ modrm_mod = X86_MODRM_MOD(modrm);
++ modrm_reg = X86_MODRM_REG(modrm);
++ modrm_rm = X86_MODRM_RM(modrm);
++ }
++
++ if (insn.sib.nbytes)
++ sib = insn.sib.bytes[0];
++
+ switch (op1) {
+- case 0x55:
+- if (!insn.rex_prefix.nbytes)
+- /* push rbp */
+- *type = INSN_FP_SAVE;
++
++ case 0x1:
++ case 0x29:
++ if (rex_w && !rex_b && modrm_mod == 3 && modrm_rm == 4) {
++
++ /* add/sub reg, %rsp */
++ *type = INSN_STACK;
++ op->src.type = OP_SRC_ADD;
++ op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
++ op->dest.type = OP_DEST_REG;
++ op->dest.reg = CFI_SP;
++ }
++ break;
++
++ case 0x50 ... 0x57:
++
++ /* push reg */
++ *type = INSN_STACK;
++ op->src.type = OP_SRC_REG;
++ op->src.reg = op_to_cfi_reg[op1 & 0x7][rex_b];
++ op->dest.type = OP_DEST_PUSH;
++
++ break;
++
++ case 0x58 ... 0x5f:
++
++ /* pop reg */
++ *type = INSN_STACK;
++ op->src.type = OP_SRC_POP;
++ op->dest.type = OP_DEST_REG;
++ op->dest.reg = op_to_cfi_reg[op1 & 0x7][rex_b];
++
+ break;
+
+- case 0x5d:
+- if (!insn.rex_prefix.nbytes)
+- /* pop rbp */
+- *type = INSN_FP_RESTORE;
++ case 0x68:
++ case 0x6a:
++ /* push immediate */
++ *type = INSN_STACK;
++ op->src.type = OP_SRC_CONST;
++ op->dest.type = OP_DEST_PUSH;
+ break;
+
+ case 0x70 ... 0x7f:
+ *type = INSN_JUMP_CONDITIONAL;
+ break;
+
++ case 0x81:
++ case 0x83:
++ if (rex != 0x48)
++ break;
++
++ if (modrm == 0xe4) {
++ /* and imm, %rsp */
++ *type = INSN_STACK;
++ op->src.type = OP_SRC_AND;
++ op->src.reg = CFI_SP;
++ op->src.offset = insn.immediate.value;
++ op->dest.type = OP_DEST_REG;
++ op->dest.reg = CFI_SP;
++ break;
++ }
++
++ if (modrm == 0xc4)
++ sign = 1;
++ else if (modrm == 0xec)
++ sign = -1;
++ else
++ break;
++
++ /* add/sub imm, %rsp */
++ *type = INSN_STACK;
++ op->src.type = OP_SRC_ADD;
++ op->src.reg = CFI_SP;
++ op->src.offset = insn.immediate.value * sign;
++ op->dest.type = OP_DEST_REG;
++ op->dest.reg = CFI_SP;
++ break;
++
+ case 0x89:
+- if (insn.rex_prefix.nbytes == 1 &&
+- insn.rex_prefix.bytes[0] == 0x48 &&
+- insn.modrm.nbytes && insn.modrm.bytes[0] == 0xe5)
+- /* mov rsp, rbp */
+- *type = INSN_FP_SETUP;
++ if (rex_w && !rex_r && modrm_mod == 3 && modrm_reg == 4) {
++
++ /* mov %rsp, reg */
++ *type = INSN_STACK;
++ op->src.type = OP_SRC_REG;
++ op->src.reg = CFI_SP;
++ op->dest.type = OP_DEST_REG;
++ op->dest.reg = op_to_cfi_reg[modrm_rm][rex_b];
++ break;
++ }
++
++ if (rex_w && !rex_b && modrm_mod == 3 && modrm_rm == 4) {
++
++ /* mov reg, %rsp */
++ *type = INSN_STACK;
++ op->src.type = OP_SRC_REG;
++ op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
++ op->dest.type = OP_DEST_REG;
++ op->dest.reg = CFI_SP;
++ break;
++ }
++
++ /* fallthrough */
++ case 0x88:
++ if (!rex_b &&
++ (modrm_mod == 1 || modrm_mod == 2) && modrm_rm == 5) {
++
++ /* mov reg, disp(%rbp) */
++ *type = INSN_STACK;
++ op->src.type = OP_SRC_REG;
++ op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
++ op->dest.type = OP_DEST_REG_INDIRECT;
++ op->dest.reg = CFI_BP;
++ op->dest.offset = insn.displacement.value;
++
++ } else if (rex_w && !rex_b && modrm_rm == 4 && sib == 0x24) {
++
++ /* mov reg, disp(%rsp) */
++ *type = INSN_STACK;
++ op->src.type = OP_SRC_REG;
++ op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
++ op->dest.type = OP_DEST_REG_INDIRECT;
++ op->dest.reg = CFI_SP;
++ op->dest.offset = insn.displacement.value;
++ }
++
++ break;
++
++ case 0x8b:
++ if (rex_w && !rex_b && modrm_mod == 1 && modrm_rm == 5) {
++
++ /* mov disp(%rbp), reg */
++ *type = INSN_STACK;
++ op->src.type = OP_SRC_REG_INDIRECT;
++ op->src.reg = CFI_BP;
++ op->src.offset = insn.displacement.value;
++ op->dest.type = OP_DEST_REG;
++ op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r];
++
++ } else if (rex_w && !rex_b && sib == 0x24 &&
++ modrm_mod != 3 && modrm_rm == 4) {
++
++ /* mov disp(%rsp), reg */
++ *type = INSN_STACK;
++ op->src.type = OP_SRC_REG_INDIRECT;
++ op->src.reg = CFI_SP;
++ op->src.offset = insn.displacement.value;
++ op->dest.type = OP_DEST_REG;
++ op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r];
++ }
++
+ break;
+
+ case 0x8d:
+- if (insn.rex_prefix.nbytes &&
+- insn.rex_prefix.bytes[0] == 0x48 &&
+- insn.modrm.nbytes && insn.modrm.bytes[0] == 0x2c &&
+- insn.sib.nbytes && insn.sib.bytes[0] == 0x24)
+- /* lea %(rsp), %rbp */
+- *type = INSN_FP_SETUP;
++ if (sib == 0x24 && rex_w && !rex_b && !rex_x) {
++
++ *type = INSN_STACK;
++ if (!insn.displacement.value) {
++ /* lea (%rsp), reg */
++ op->src.type = OP_SRC_REG;
++ } else {
++ /* lea disp(%rsp), reg */
++ op->src.type = OP_SRC_ADD;
++ op->src.offset = insn.displacement.value;
++ }
++ op->src.reg = CFI_SP;
++ op->dest.type = OP_DEST_REG;
++ op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r];
++
++ } else if (rex == 0x48 && modrm == 0x65) {
++
++ /* lea disp(%rbp), %rsp */
++ *type = INSN_STACK;
++ op->src.type = OP_SRC_ADD;
++ op->src.reg = CFI_BP;
++ op->src.offset = insn.displacement.value;
++ op->dest.type = OP_DEST_REG;
++ op->dest.reg = CFI_SP;
++
++ } else if (rex == 0x49 && modrm == 0x62 &&
++ insn.displacement.value == -8) {
++
++ /*
++ * lea -0x8(%r10), %rsp
++ *
++ * Restoring rsp back to its original value after a
++ * stack realignment.
++ */
++ *type = INSN_STACK;
++ op->src.type = OP_SRC_ADD;
++ op->src.reg = CFI_R10;
++ op->src.offset = -8;
++ op->dest.type = OP_DEST_REG;
++ op->dest.reg = CFI_SP;
++
++ } else if (rex == 0x49 && modrm == 0x65 &&
++ insn.displacement.value == -16) {
++
++ /*
++ * lea -0x10(%r13), %rsp
++ *
++ * Restoring rsp back to its original value after a
++ * stack realignment.
++ */
++ *type = INSN_STACK;
++ op->src.type = OP_SRC_ADD;
++ op->src.reg = CFI_R13;
++ op->src.offset = -16;
++ op->dest.type = OP_DEST_REG;
++ op->dest.reg = CFI_SP;
++ }
++
++ break;
++
++ case 0x8f:
++ /* pop to mem */
++ *type = INSN_STACK;
++ op->src.type = OP_SRC_POP;
++ op->dest.type = OP_DEST_MEM;
+ break;
+
+ case 0x90:
+ *type = INSN_NOP;
+ break;
+
++ case 0x9c:
++ /* pushf */
++ *type = INSN_STACK;
++ op->src.type = OP_SRC_CONST;
++ op->dest.type = OP_DEST_PUSH;
++ break;
++
++ case 0x9d:
++ /* popf */
++ *type = INSN_STACK;
++ op->src.type = OP_SRC_POP;
++ op->dest.type = OP_DEST_MEM;
++ break;
++
+ case 0x0f:
+- if (op2 >= 0x80 && op2 <= 0x8f)
++
++ if (op2 >= 0x80 && op2 <= 0x8f) {
++
+ *type = INSN_JUMP_CONDITIONAL;
+- else if (op2 == 0x05 || op2 == 0x07 || op2 == 0x34 ||
+- op2 == 0x35)
++
++ } else if (op2 == 0x05 || op2 == 0x07 || op2 == 0x34 ||
++ op2 == 0x35) {
++
+ /* sysenter, sysret */
+ *type = INSN_CONTEXT_SWITCH;
+- else if (op2 == 0x0d || op2 == 0x1f)
++
++ } else if (op2 == 0x0b || op2 == 0xb9) {
++
++ /* ud2 */
++ *type = INSN_BUG;
++
++ } else if (op2 == 0x0d || op2 == 0x1f) {
++
+ /* nopl/nopw */
+ *type = INSN_NOP;
+- else if (op2 == 0x01 && insn.modrm.nbytes &&
+- (insn.modrm.bytes[0] == 0xc2 ||
+- insn.modrm.bytes[0] == 0xd8))
+- /* vmlaunch, vmrun */
+- *type = INSN_CONTEXT_SWITCH;
++
++ } else if (op2 == 0xa0 || op2 == 0xa8) {
++
++ /* push fs/gs */
++ *type = INSN_STACK;
++ op->src.type = OP_SRC_CONST;
++ op->dest.type = OP_DEST_PUSH;
++
++ } else if (op2 == 0xa1 || op2 == 0xa9) {
++
++ /* pop fs/gs */
++ *type = INSN_STACK;
++ op->src.type = OP_SRC_POP;
++ op->dest.type = OP_DEST_MEM;
++ }
+
+ break;
+
+- case 0xc9: /* leave */
+- *type = INSN_FP_RESTORE;
++ case 0xc9:
++ /*
++ * leave
++ *
++ * equivalent to:
++ * mov bp, sp
++ * pop bp
++ */
++ *type = INSN_STACK;
++ op->dest.type = OP_DEST_LEAVE;
++
+ break;
+
+- case 0xe3: /* jecxz/jrcxz */
++ case 0xe3:
++ /* jecxz/jrcxz */
+ *type = INSN_JUMP_CONDITIONAL;
+ break;
+
+@@ -158,14 +445,27 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
+ break;
+
+ case 0xff:
+- ext = X86_MODRM_REG(insn.modrm.bytes[0]);
+- if (ext == 2 || ext == 3)
++ if (modrm_reg == 2 || modrm_reg == 3)
++
+ *type = INSN_CALL_DYNAMIC;
+- else if (ext == 4)
++
++ else if (modrm_reg == 4)
++
+ *type = INSN_JUMP_DYNAMIC;
+- else if (ext == 5) /*jmpf */
++
++ else if (modrm_reg == 5)
++
++ /* jmpf */
+ *type = INSN_CONTEXT_SWITCH;
+
++ else if (modrm_reg == 6) {
++
++ /* push from mem */
++ *type = INSN_STACK;
++ op->src.type = OP_SRC_CONST;
++ op->dest.type = OP_DEST_PUSH;
++ }
++
+ break;
+
+ default:
+@@ -176,3 +476,21 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
+
+ return 0;
+ }
++
++void arch_initial_func_cfi_state(struct cfi_state *state)
++{
++ int i;
++
++ for (i = 0; i < CFI_NUM_REGS; i++) {
++ state->regs[i].base = CFI_UNDEFINED;
++ state->regs[i].offset = 0;
++ }
++
++ /* initial CFA (call frame address) */
++ state->cfa.base = CFI_SP;
++ state->cfa.offset = 8;
++
++ /* initial RA (return address) */
++ state->regs[16].base = CFI_CFA;
++ state->regs[16].offset = -8;
++}
+diff --git a/tools/objtool/arch/x86/include/asm/inat.h b/tools/objtool/arch/x86/include/asm/inat.h
+new file mode 100644
+index 0000000..1c78580
+--- /dev/null
++++ b/tools/objtool/arch/x86/include/asm/inat.h
+@@ -0,0 +1,244 @@
++#ifndef _ASM_X86_INAT_H
++#define _ASM_X86_INAT_H
++/*
++ * x86 instruction attributes
++ *
++ * Written by Masami Hiramatsu <mhiramat@redhat.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++ *
++ */
++#include <asm/inat_types.h>
++
++/*
++ * Internal bits. Don't use bitmasks directly, because these bits are
++ * unstable. You should use checking functions.
++ */
++
++#define INAT_OPCODE_TABLE_SIZE 256
++#define INAT_GROUP_TABLE_SIZE 8
++
++/* Legacy last prefixes */
++#define INAT_PFX_OPNDSZ 1 /* 0x66 */ /* LPFX1 */
++#define INAT_PFX_REPE 2 /* 0xF3 */ /* LPFX2 */
++#define INAT_PFX_REPNE 3 /* 0xF2 */ /* LPFX3 */
++/* Other Legacy prefixes */
++#define INAT_PFX_LOCK 4 /* 0xF0 */
++#define INAT_PFX_CS 5 /* 0x2E */
++#define INAT_PFX_DS 6 /* 0x3E */
++#define INAT_PFX_ES 7 /* 0x26 */
++#define INAT_PFX_FS 8 /* 0x64 */
++#define INAT_PFX_GS 9 /* 0x65 */
++#define INAT_PFX_SS 10 /* 0x36 */
++#define INAT_PFX_ADDRSZ 11 /* 0x67 */
++/* x86-64 REX prefix */
++#define INAT_PFX_REX 12 /* 0x4X */
++/* AVX VEX prefixes */
++#define INAT_PFX_VEX2 13 /* 2-bytes VEX prefix */
++#define INAT_PFX_VEX3 14 /* 3-bytes VEX prefix */
++#define INAT_PFX_EVEX 15 /* EVEX prefix */
++
++#define INAT_LSTPFX_MAX 3
++#define INAT_LGCPFX_MAX 11
++
++/* Immediate size */
++#define INAT_IMM_BYTE 1
++#define INAT_IMM_WORD 2
++#define INAT_IMM_DWORD 3
++#define INAT_IMM_QWORD 4
++#define INAT_IMM_PTR 5
++#define INAT_IMM_VWORD32 6
++#define INAT_IMM_VWORD 7
++
++/* Legacy prefix */
++#define INAT_PFX_OFFS 0
++#define INAT_PFX_BITS 4
++#define INAT_PFX_MAX ((1 << INAT_PFX_BITS) - 1)
++#define INAT_PFX_MASK (INAT_PFX_MAX << INAT_PFX_OFFS)
++/* Escape opcodes */
++#define INAT_ESC_OFFS (INAT_PFX_OFFS + INAT_PFX_BITS)
++#define INAT_ESC_BITS 2
++#define INAT_ESC_MAX ((1 << INAT_ESC_BITS) - 1)
++#define INAT_ESC_MASK (INAT_ESC_MAX << INAT_ESC_OFFS)
++/* Group opcodes (1-16) */
++#define INAT_GRP_OFFS (INAT_ESC_OFFS + INAT_ESC_BITS)
++#define INAT_GRP_BITS 5
++#define INAT_GRP_MAX ((1 << INAT_GRP_BITS) - 1)
++#define INAT_GRP_MASK (INAT_GRP_MAX << INAT_GRP_OFFS)
++/* Immediates */
++#define INAT_IMM_OFFS (INAT_GRP_OFFS + INAT_GRP_BITS)
++#define INAT_IMM_BITS 3
++#define INAT_IMM_MASK (((1 << INAT_IMM_BITS) - 1) << INAT_IMM_OFFS)
++/* Flags */
++#define INAT_FLAG_OFFS (INAT_IMM_OFFS + INAT_IMM_BITS)
++#define INAT_MODRM (1 << (INAT_FLAG_OFFS))
++#define INAT_FORCE64 (1 << (INAT_FLAG_OFFS + 1))
++#define INAT_SCNDIMM (1 << (INAT_FLAG_OFFS + 2))
++#define INAT_MOFFSET (1 << (INAT_FLAG_OFFS + 3))
++#define INAT_VARIANT (1 << (INAT_FLAG_OFFS + 4))
++#define INAT_VEXOK (1 << (INAT_FLAG_OFFS + 5))
++#define INAT_VEXONLY (1 << (INAT_FLAG_OFFS + 6))
++#define INAT_EVEXONLY (1 << (INAT_FLAG_OFFS + 7))
++/* Attribute making macros for attribute tables */
++#define INAT_MAKE_PREFIX(pfx) (pfx << INAT_PFX_OFFS)
++#define INAT_MAKE_ESCAPE(esc) (esc << INAT_ESC_OFFS)
++#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM)
++#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS)
++
++/* Identifiers for segment registers */
++#define INAT_SEG_REG_IGNORE 0
++#define INAT_SEG_REG_DEFAULT 1
++#define INAT_SEG_REG_CS 2
++#define INAT_SEG_REG_SS 3
++#define INAT_SEG_REG_DS 4
++#define INAT_SEG_REG_ES 5
++#define INAT_SEG_REG_FS 6
++#define INAT_SEG_REG_GS 7
++
++/* Attribute search APIs */
++extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
++extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
++extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode,
++ int lpfx_id,
++ insn_attr_t esc_attr);
++extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm,
++ int lpfx_id,
++ insn_attr_t esc_attr);
++extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode,
++ insn_byte_t vex_m,
++ insn_byte_t vex_pp);
++
++/* Attribute checking functions */
++static inline int inat_is_legacy_prefix(insn_attr_t attr)
++{
++ attr &= INAT_PFX_MASK;
++ return attr && attr <= INAT_LGCPFX_MAX;
++}
++
++static inline int inat_is_address_size_prefix(insn_attr_t attr)
++{
++ return (attr & INAT_PFX_MASK) == INAT_PFX_ADDRSZ;
++}
++
++static inline int inat_is_operand_size_prefix(insn_attr_t attr)
++{
++ return (attr & INAT_PFX_MASK) == INAT_PFX_OPNDSZ;
++}
++
++static inline int inat_is_rex_prefix(insn_attr_t attr)
++{
++ return (attr & INAT_PFX_MASK) == INAT_PFX_REX;
++}
++
++static inline int inat_last_prefix_id(insn_attr_t attr)
++{
++ if ((attr & INAT_PFX_MASK) > INAT_LSTPFX_MAX)
++ return 0;
++ else
++ return attr & INAT_PFX_MASK;
++}
++
++static inline int inat_is_vex_prefix(insn_attr_t attr)
++{
++ attr &= INAT_PFX_MASK;
++ return attr == INAT_PFX_VEX2 || attr == INAT_PFX_VEX3 ||
++ attr == INAT_PFX_EVEX;
++}
++
++static inline int inat_is_evex_prefix(insn_attr_t attr)
++{
++ return (attr & INAT_PFX_MASK) == INAT_PFX_EVEX;
++}
++
++static inline int inat_is_vex3_prefix(insn_attr_t attr)
++{
++ return (attr & INAT_PFX_MASK) == INAT_PFX_VEX3;
++}
++
++static inline int inat_is_escape(insn_attr_t attr)
++{
++ return attr & INAT_ESC_MASK;
++}
++
++static inline int inat_escape_id(insn_attr_t attr)
++{
++ return (attr & INAT_ESC_MASK) >> INAT_ESC_OFFS;
++}
++
++static inline int inat_is_group(insn_attr_t attr)
++{
++ return attr & INAT_GRP_MASK;
++}
++
++static inline int inat_group_id(insn_attr_t attr)
++{
++ return (attr & INAT_GRP_MASK) >> INAT_GRP_OFFS;
++}
++
++static inline int inat_group_common_attribute(insn_attr_t attr)
++{
++ return attr & ~INAT_GRP_MASK;
++}
++
++static inline int inat_has_immediate(insn_attr_t attr)
++{
++ return attr & INAT_IMM_MASK;
++}
++
++static inline int inat_immediate_size(insn_attr_t attr)
++{
++ return (attr & INAT_IMM_MASK) >> INAT_IMM_OFFS;
++}
++
++static inline int inat_has_modrm(insn_attr_t attr)
++{
++ return attr & INAT_MODRM;
++}
++
++static inline int inat_is_force64(insn_attr_t attr)
++{
++ return attr & INAT_FORCE64;
++}
++
++static inline int inat_has_second_immediate(insn_attr_t attr)
++{
++ return attr & INAT_SCNDIMM;
++}
++
++static inline int inat_has_moffset(insn_attr_t attr)
++{
++ return attr & INAT_MOFFSET;
++}
++
++static inline int inat_has_variant(insn_attr_t attr)
++{
++ return attr & INAT_VARIANT;
++}
++
++static inline int inat_accept_vex(insn_attr_t attr)
++{
++ return attr & INAT_VEXOK;
++}
++
++static inline int inat_must_vex(insn_attr_t attr)
++{
++ return attr & (INAT_VEXONLY | INAT_EVEXONLY);
++}
++
++static inline int inat_must_evex(insn_attr_t attr)
++{
++ return attr & INAT_EVEXONLY;
++}
++#endif
+diff --git a/tools/objtool/arch/x86/include/asm/inat_types.h b/tools/objtool/arch/x86/include/asm/inat_types.h
+new file mode 100644
+index 0000000..cb3c20c
+--- /dev/null
++++ b/tools/objtool/arch/x86/include/asm/inat_types.h
+@@ -0,0 +1,29 @@
++#ifndef _ASM_X86_INAT_TYPES_H
++#define _ASM_X86_INAT_TYPES_H
++/*
++ * x86 instruction attributes
++ *
++ * Written by Masami Hiramatsu <mhiramat@redhat.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++ *
++ */
++
++/* Instruction attributes */
++typedef unsigned int insn_attr_t;
++typedef unsigned char insn_byte_t;
++typedef signed int insn_value_t;
++
++#endif
+diff --git a/tools/objtool/arch/x86/include/asm/insn.h b/tools/objtool/arch/x86/include/asm/insn.h
+new file mode 100644
+index 0000000..b3e32b0
+--- /dev/null
++++ b/tools/objtool/arch/x86/include/asm/insn.h
+@@ -0,0 +1,211 @@
++#ifndef _ASM_X86_INSN_H
++#define _ASM_X86_INSN_H
++/*
++ * x86 instruction analysis
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * Copyright (C) IBM Corporation, 2009
++ */
++
++/* insn_attr_t is defined in inat.h */
++#include <asm/inat.h>
++
++struct insn_field {
++ union {
++ insn_value_t value;
++ insn_byte_t bytes[4];
++ };
++ /* !0 if we've run insn_get_xxx() for this field */
++ unsigned char got;
++ unsigned char nbytes;
++};
++
++struct insn {
++ struct insn_field prefixes; /*
++ * Prefixes
++ * prefixes.bytes[3]: last prefix
++ */
++ struct insn_field rex_prefix; /* REX prefix */
++ struct insn_field vex_prefix; /* VEX prefix */
++ struct insn_field opcode; /*
++ * opcode.bytes[0]: opcode1
++ * opcode.bytes[1]: opcode2
++ * opcode.bytes[2]: opcode3
++ */
++ struct insn_field modrm;
++ struct insn_field sib;
++ struct insn_field displacement;
++ union {
++ struct insn_field immediate;
++ struct insn_field moffset1; /* for 64bit MOV */
++ struct insn_field immediate1; /* for 64bit imm or off16/32 */
++ };
++ union {
++ struct insn_field moffset2; /* for 64bit MOV */
++ struct insn_field immediate2; /* for 64bit imm or seg16 */
++ };
++
++ insn_attr_t attr;
++ unsigned char opnd_bytes;
++ unsigned char addr_bytes;
++ unsigned char length;
++ unsigned char x86_64;
++
++ const insn_byte_t *kaddr; /* kernel address of insn to analyze */
++ const insn_byte_t *end_kaddr; /* kernel address of last insn in buffer */
++ const insn_byte_t *next_byte;
++};
++
++#define MAX_INSN_SIZE 15
++
++#define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
++#define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
++#define X86_MODRM_RM(modrm) ((modrm) & 0x07)
++
++#define X86_SIB_SCALE(sib) (((sib) & 0xc0) >> 6)
++#define X86_SIB_INDEX(sib) (((sib) & 0x38) >> 3)
++#define X86_SIB_BASE(sib) ((sib) & 0x07)
++
++#define X86_REX_W(rex) ((rex) & 8)
++#define X86_REX_R(rex) ((rex) & 4)
++#define X86_REX_X(rex) ((rex) & 2)
++#define X86_REX_B(rex) ((rex) & 1)
++
++/* VEX bit flags */
++#define X86_VEX_W(vex) ((vex) & 0x80) /* VEX3 Byte2 */
++#define X86_VEX_R(vex) ((vex) & 0x80) /* VEX2/3 Byte1 */
++#define X86_VEX_X(vex) ((vex) & 0x40) /* VEX3 Byte1 */
++#define X86_VEX_B(vex) ((vex) & 0x20) /* VEX3 Byte1 */
++#define X86_VEX_L(vex) ((vex) & 0x04) /* VEX3 Byte2, VEX2 Byte1 */
++/* VEX bit fields */
++#define X86_EVEX_M(vex) ((vex) & 0x03) /* EVEX Byte1 */
++#define X86_VEX3_M(vex) ((vex) & 0x1f) /* VEX3 Byte1 */
++#define X86_VEX2_M 1 /* VEX2.M always 1 */
++#define X86_VEX_V(vex) (((vex) & 0x78) >> 3) /* VEX3 Byte2, VEX2 Byte1 */
++#define X86_VEX_P(vex) ((vex) & 0x03) /* VEX3 Byte2, VEX2 Byte1 */
++#define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */
++
++extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64);
++extern void insn_get_prefixes(struct insn *insn);
++extern void insn_get_opcode(struct insn *insn);
++extern void insn_get_modrm(struct insn *insn);
++extern void insn_get_sib(struct insn *insn);
++extern void insn_get_displacement(struct insn *insn);
++extern void insn_get_immediate(struct insn *insn);
++extern void insn_get_length(struct insn *insn);
++
++/* Attribute will be determined after getting ModRM (for opcode groups) */
++static inline void insn_get_attribute(struct insn *insn)
++{
++ insn_get_modrm(insn);
++}
++
++/* Instruction uses RIP-relative addressing */
++extern int insn_rip_relative(struct insn *insn);
++
++/* Init insn for kernel text */
++static inline void kernel_insn_init(struct insn *insn,
++ const void *kaddr, int buf_len)
++{
++#ifdef CONFIG_X86_64
++ insn_init(insn, kaddr, buf_len, 1);
++#else /* CONFIG_X86_32 */
++ insn_init(insn, kaddr, buf_len, 0);
++#endif
++}
++
++static inline int insn_is_avx(struct insn *insn)
++{
++ if (!insn->prefixes.got)
++ insn_get_prefixes(insn);
++ return (insn->vex_prefix.value != 0);
++}
++
++static inline int insn_is_evex(struct insn *insn)
++{
++ if (!insn->prefixes.got)
++ insn_get_prefixes(insn);
++ return (insn->vex_prefix.nbytes == 4);
++}
++
++/* Ensure this instruction is decoded completely */
++static inline int insn_complete(struct insn *insn)
++{
++ return insn->opcode.got && insn->modrm.got && insn->sib.got &&
++ insn->displacement.got && insn->immediate.got;
++}
++
++static inline insn_byte_t insn_vex_m_bits(struct insn *insn)
++{
++ if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
++ return X86_VEX2_M;
++ else if (insn->vex_prefix.nbytes == 3) /* 3 bytes VEX */
++ return X86_VEX3_M(insn->vex_prefix.bytes[1]);
++ else /* EVEX */
++ return X86_EVEX_M(insn->vex_prefix.bytes[1]);
++}
++
++static inline insn_byte_t insn_vex_p_bits(struct insn *insn)
++{
++ if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
++ return X86_VEX_P(insn->vex_prefix.bytes[1]);
++ else
++ return X86_VEX_P(insn->vex_prefix.bytes[2]);
++}
++
++/* Get the last prefix id from last prefix or VEX prefix */
++static inline int insn_last_prefix_id(struct insn *insn)
++{
++ if (insn_is_avx(insn))
++ return insn_vex_p_bits(insn); /* VEX_p is a SIMD prefix id */
++
++ if (insn->prefixes.bytes[3])
++ return inat_get_last_prefix_id(insn->prefixes.bytes[3]);
++
++ return 0;
++}
++
++/* Offset of each field from kaddr */
++static inline int insn_offset_rex_prefix(struct insn *insn)
++{
++ return insn->prefixes.nbytes;
++}
++static inline int insn_offset_vex_prefix(struct insn *insn)
++{
++ return insn_offset_rex_prefix(insn) + insn->rex_prefix.nbytes;
++}
++static inline int insn_offset_opcode(struct insn *insn)
++{
++ return insn_offset_vex_prefix(insn) + insn->vex_prefix.nbytes;
++}
++static inline int insn_offset_modrm(struct insn *insn)
++{
++ return insn_offset_opcode(insn) + insn->opcode.nbytes;
++}
++static inline int insn_offset_sib(struct insn *insn)
++{
++ return insn_offset_modrm(insn) + insn->modrm.nbytes;
++}
++static inline int insn_offset_displacement(struct insn *insn)
++{
++ return insn_offset_sib(insn) + insn->sib.nbytes;
++}
++static inline int insn_offset_immediate(struct insn *insn)
++{
++ return insn_offset_displacement(insn) + insn->displacement.nbytes;
++}
++
++#endif /* _ASM_X86_INSN_H */
+diff --git a/tools/objtool/arch/x86/include/asm/orc_types.h b/tools/objtool/arch/x86/include/asm/orc_types.h
+new file mode 100644
+index 0000000..9c9dc57
+--- /dev/null
++++ b/tools/objtool/arch/x86/include/asm/orc_types.h
+@@ -0,0 +1,107 @@
++/*
++ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2
++ * of the License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _ORC_TYPES_H
++#define _ORC_TYPES_H
++
++#include <linux/types.h>
++#include <linux/compiler.h>
++
++/*
++ * The ORC_REG_* registers are base registers which are used to find other
++ * registers on the stack.
++ *
++ * ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the
++ * address of the previous frame: the caller's SP before it called the current
++ * function.
++ *
++ * ORC_REG_UNDEFINED means the corresponding register's value didn't change in
++ * the current frame.
++ *
++ * The most commonly used base registers are SP and BP -- which the previous SP
++ * is usually based on -- and PREV_SP and UNDEFINED -- which the previous BP is
++ * usually based on.
++ *
++ * The rest of the base registers are needed for special cases like entry code
++ * and GCC realigned stacks.
++ */
++#define ORC_REG_UNDEFINED 0
++#define ORC_REG_PREV_SP 1
++#define ORC_REG_DX 2
++#define ORC_REG_DI 3
++#define ORC_REG_BP 4
++#define ORC_REG_SP 5
++#define ORC_REG_R10 6
++#define ORC_REG_R13 7
++#define ORC_REG_BP_INDIRECT 8
++#define ORC_REG_SP_INDIRECT 9
++#define ORC_REG_MAX 15
++
++/*
++ * ORC_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP (the
++ * caller's SP right before it made the call). Used for all callable
++ * functions, i.e. all C code and all callable asm functions.
++ *
++ * ORC_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset points
++ * to a fully populated pt_regs from a syscall, interrupt, or exception.
++ *
++ * ORC_TYPE_REGS_IRET: Used in entry code to indicate that sp_reg+sp_offset
++ * points to the iret return frame.
++ *
++ * The UNWIND_HINT macros are used only for the unwind_hint struct. They
++ * aren't used in struct orc_entry due to size and complexity constraints.
++ * Objtool converts them to real types when it converts the hints to orc
++ * entries.
++ */
++#define ORC_TYPE_CALL 0
++#define ORC_TYPE_REGS 1
++#define ORC_TYPE_REGS_IRET 2
++#define UNWIND_HINT_TYPE_SAVE 3
++#define UNWIND_HINT_TYPE_RESTORE 4
++
++#ifndef __ASSEMBLY__
++/*
++ * This struct is more or less a vastly simplified version of the DWARF Call
++ * Frame Information standard. It contains only the necessary parts of DWARF
++ * CFI, simplified for ease of access by the in-kernel unwinder. It tells the
++ * unwinder how to find the previous SP and BP (and sometimes entry regs) on
++ * the stack for a given code address. Each instance of the struct corresponds
++ * to one or more code locations.
++ */
++struct orc_entry {
++ s16 sp_offset;
++ s16 bp_offset;
++ unsigned sp_reg:4;
++ unsigned bp_reg:4;
++ unsigned type:2;
++} __packed;
++
++/*
++ * This struct is used by asm and inline asm code to manually annotate the
++ * location of registers on the stack for the ORC unwinder.
++ *
++ * Type can be either ORC_TYPE_* or UNWIND_HINT_TYPE_*.
++ */
++struct unwind_hint {
++ u32 ip;
++ s16 sp_offset;
++ u8 sp_reg;
++ u8 type;
++};
++#endif /* __ASSEMBLY__ */
++
++#endif /* _ORC_TYPES_H */
+diff --git a/tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk b/tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk
+deleted file mode 100644
+index a3d2c62..0000000
+--- a/tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk
++++ /dev/null
+@@ -1,392 +0,0 @@
+-#!/bin/awk -f
+-# gen-insn-attr-x86.awk: Instruction attribute table generator
+-# Written by Masami Hiramatsu <mhiramat@redhat.com>
+-#
+-# Usage: awk -f gen-insn-attr-x86.awk x86-opcode-map.txt > inat-tables.c
+-
+-# Awk implementation sanity check
+-function check_awk_implement() {
+- if (sprintf("%x", 0) != "0")
+- return "Your awk has a printf-format problem."
+- return ""
+-}
+-
+-# Clear working vars
+-function clear_vars() {
+- delete table
+- delete lptable2
+- delete lptable1
+- delete lptable3
+- eid = -1 # escape id
+- gid = -1 # group id
+- aid = -1 # AVX id
+- tname = ""
+-}
+-
+-BEGIN {
+- # Implementation error checking
+- awkchecked = check_awk_implement()
+- if (awkchecked != "") {
+- print "Error: " awkchecked > "/dev/stderr"
+- print "Please try to use gawk." > "/dev/stderr"
+- exit 1
+- }
+-
+- # Setup generating tables
+- print "/* x86 opcode map generated from x86-opcode-map.txt */"
+- print "/* Do not change this code. */\n"
+- ggid = 1
+- geid = 1
+- gaid = 0
+- delete etable
+- delete gtable
+- delete atable
+-
+- opnd_expr = "^[A-Za-z/]"
+- ext_expr = "^\\("
+- sep_expr = "^\\|$"
+- group_expr = "^Grp[0-9A-Za-z]+"
+-
+- imm_expr = "^[IJAOL][a-z]"
+- imm_flag["Ib"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
+- imm_flag["Jb"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
+- imm_flag["Iw"] = "INAT_MAKE_IMM(INAT_IMM_WORD)"
+- imm_flag["Id"] = "INAT_MAKE_IMM(INAT_IMM_DWORD)"
+- imm_flag["Iq"] = "INAT_MAKE_IMM(INAT_IMM_QWORD)"
+- imm_flag["Ap"] = "INAT_MAKE_IMM(INAT_IMM_PTR)"
+- imm_flag["Iz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)"
+- imm_flag["Jz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)"
+- imm_flag["Iv"] = "INAT_MAKE_IMM(INAT_IMM_VWORD)"
+- imm_flag["Ob"] = "INAT_MOFFSET"
+- imm_flag["Ov"] = "INAT_MOFFSET"
+- imm_flag["Lx"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
+-
+- modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])"
+- force64_expr = "\\([df]64\\)"
+- rex_expr = "^REX(\\.[XRWB]+)*"
+- fpu_expr = "^ESC" # TODO
+-
+- lprefix1_expr = "\\((66|!F3)\\)"
+- lprefix2_expr = "\\(F3\\)"
+- lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)"
+- lprefix_expr = "\\((66|F2|F3)\\)"
+- max_lprefix = 4
+-
+- # All opcodes starting with lower-case 'v', 'k' or with (v1) superscript
+- # accepts VEX prefix
+- vexok_opcode_expr = "^[vk].*"
+- vexok_expr = "\\(v1\\)"
+- # All opcodes with (v) superscript supports *only* VEX prefix
+- vexonly_expr = "\\(v\\)"
+- # All opcodes with (ev) superscript supports *only* EVEX prefix
+- evexonly_expr = "\\(ev\\)"
+-
+- prefix_expr = "\\(Prefix\\)"
+- prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ"
+- prefix_num["REPNE"] = "INAT_PFX_REPNE"
+- prefix_num["REP/REPE"] = "INAT_PFX_REPE"
+- prefix_num["XACQUIRE"] = "INAT_PFX_REPNE"
+- prefix_num["XRELEASE"] = "INAT_PFX_REPE"
+- prefix_num["LOCK"] = "INAT_PFX_LOCK"
+- prefix_num["SEG=CS"] = "INAT_PFX_CS"
+- prefix_num["SEG=DS"] = "INAT_PFX_DS"
+- prefix_num["SEG=ES"] = "INAT_PFX_ES"
+- prefix_num["SEG=FS"] = "INAT_PFX_FS"
+- prefix_num["SEG=GS"] = "INAT_PFX_GS"
+- prefix_num["SEG=SS"] = "INAT_PFX_SS"
+- prefix_num["Address-Size"] = "INAT_PFX_ADDRSZ"
+- prefix_num["VEX+1byte"] = "INAT_PFX_VEX2"
+- prefix_num["VEX+2byte"] = "INAT_PFX_VEX3"
+- prefix_num["EVEX"] = "INAT_PFX_EVEX"
+-
+- clear_vars()
+-}
+-
+-function semantic_error(msg) {
+- print "Semantic error at " NR ": " msg > "/dev/stderr"
+- exit 1
+-}
+-
+-function debug(msg) {
+- print "DEBUG: " msg
+-}
+-
+-function array_size(arr, i,c) {
+- c = 0
+- for (i in arr)
+- c++
+- return c
+-}
+-
+-/^Table:/ {
+- print "/* " $0 " */"
+- if (tname != "")
+- semantic_error("Hit Table: before EndTable:.");
+-}
+-
+-/^Referrer:/ {
+- if (NF != 1) {
+- # escape opcode table
+- ref = ""
+- for (i = 2; i <= NF; i++)
+- ref = ref $i
+- eid = escape[ref]
+- tname = sprintf("inat_escape_table_%d", eid)
+- }
+-}
+-
+-/^AVXcode:/ {
+- if (NF != 1) {
+- # AVX/escape opcode table
+- aid = $2
+- if (gaid <= aid)
+- gaid = aid + 1
+- if (tname == "") # AVX only opcode table
+- tname = sprintf("inat_avx_table_%d", $2)
+- }
+- if (aid == -1 && eid == -1) # primary opcode table
+- tname = "inat_primary_table"
+-}
+-
+-/^GrpTable:/ {
+- print "/* " $0 " */"
+- if (!($2 in group))
+- semantic_error("No group: " $2 )
+- gid = group[$2]
+- tname = "inat_group_table_" gid
+-}
+-
+-function print_table(tbl,name,fmt,n)
+-{
+- print "const insn_attr_t " name " = {"
+- for (i = 0; i < n; i++) {
+- id = sprintf(fmt, i)
+- if (tbl[id])
+- print " [" id "] = " tbl[id] ","
+- }
+- print "};"
+-}
+-
+-/^EndTable/ {
+- if (gid != -1) {
+- # print group tables
+- if (array_size(table) != 0) {
+- print_table(table, tname "[INAT_GROUP_TABLE_SIZE]",
+- "0x%x", 8)
+- gtable[gid,0] = tname
+- }
+- if (array_size(lptable1) != 0) {
+- print_table(lptable1, tname "_1[INAT_GROUP_TABLE_SIZE]",
+- "0x%x", 8)
+- gtable[gid,1] = tname "_1"
+- }
+- if (array_size(lptable2) != 0) {
+- print_table(lptable2, tname "_2[INAT_GROUP_TABLE_SIZE]",
+- "0x%x", 8)
+- gtable[gid,2] = tname "_2"
+- }
+- if (array_size(lptable3) != 0) {
+- print_table(lptable3, tname "_3[INAT_GROUP_TABLE_SIZE]",
+- "0x%x", 8)
+- gtable[gid,3] = tname "_3"
+- }
+- } else {
+- # print primary/escaped tables
+- if (array_size(table) != 0) {
+- print_table(table, tname "[INAT_OPCODE_TABLE_SIZE]",
+- "0x%02x", 256)
+- etable[eid,0] = tname
+- if (aid >= 0)
+- atable[aid,0] = tname
+- }
+- if (array_size(lptable1) != 0) {
+- print_table(lptable1,tname "_1[INAT_OPCODE_TABLE_SIZE]",
+- "0x%02x", 256)
+- etable[eid,1] = tname "_1"
+- if (aid >= 0)
+- atable[aid,1] = tname "_1"
+- }
+- if (array_size(lptable2) != 0) {
+- print_table(lptable2,tname "_2[INAT_OPCODE_TABLE_SIZE]",
+- "0x%02x", 256)
+- etable[eid,2] = tname "_2"
+- if (aid >= 0)
+- atable[aid,2] = tname "_2"
+- }
+- if (array_size(lptable3) != 0) {
+- print_table(lptable3,tname "_3[INAT_OPCODE_TABLE_SIZE]",
+- "0x%02x", 256)
+- etable[eid,3] = tname "_3"
+- if (aid >= 0)
+- atable[aid,3] = tname "_3"
+- }
+- }
+- print ""
+- clear_vars()
+-}
+-
+-function add_flags(old,new) {
+- if (old && new)
+- return old " | " new
+- else if (old)
+- return old
+- else
+- return new
+-}
+-
+-# convert operands to flags.
+-function convert_operands(count,opnd, i,j,imm,mod)
+-{
+- imm = null
+- mod = null
+- for (j = 1; j <= count; j++) {
+- i = opnd[j]
+- if (match(i, imm_expr) == 1) {
+- if (!imm_flag[i])
+- semantic_error("Unknown imm opnd: " i)
+- if (imm) {
+- if (i != "Ib")
+- semantic_error("Second IMM error")
+- imm = add_flags(imm, "INAT_SCNDIMM")
+- } else
+- imm = imm_flag[i]
+- } else if (match(i, modrm_expr))
+- mod = "INAT_MODRM"
+- }
+- return add_flags(imm, mod)
+-}
+-
+-/^[0-9a-f]+\:/ {
+- if (NR == 1)
+- next
+- # get index
+- idx = "0x" substr($1, 1, index($1,":") - 1)
+- if (idx in table)
+- semantic_error("Redefine " idx " in " tname)
+-
+- # check if escaped opcode
+- if ("escape" == $2) {
+- if ($3 != "#")
+- semantic_error("No escaped name")
+- ref = ""
+- for (i = 4; i <= NF; i++)
+- ref = ref $i
+- if (ref in escape)
+- semantic_error("Redefine escape (" ref ")")
+- escape[ref] = geid
+- geid++
+- table[idx] = "INAT_MAKE_ESCAPE(" escape[ref] ")"
+- next
+- }
+-
+- variant = null
+- # converts
+- i = 2
+- while (i <= NF) {
+- opcode = $(i++)
+- delete opnds
+- ext = null
+- flags = null
+- opnd = null
+- # parse one opcode
+- if (match($i, opnd_expr)) {
+- opnd = $i
+- count = split($(i++), opnds, ",")
+- flags = convert_operands(count, opnds)
+- }
+- if (match($i, ext_expr))
+- ext = $(i++)
+- if (match($i, sep_expr))
+- i++
+- else if (i < NF)
+- semantic_error($i " is not a separator")
+-
+- # check if group opcode
+- if (match(opcode, group_expr)) {
+- if (!(opcode in group)) {
+- group[opcode] = ggid
+- ggid++
+- }
+- flags = add_flags(flags, "INAT_MAKE_GROUP(" group[opcode] ")")
+- }
+- # check force(or default) 64bit
+- if (match(ext, force64_expr))
+- flags = add_flags(flags, "INAT_FORCE64")
+-
+- # check REX prefix
+- if (match(opcode, rex_expr))
+- flags = add_flags(flags, "INAT_MAKE_PREFIX(INAT_PFX_REX)")
+-
+- # check coprocessor escape : TODO
+- if (match(opcode, fpu_expr))
+- flags = add_flags(flags, "INAT_MODRM")
+-
+- # check VEX codes
+- if (match(ext, evexonly_expr))
+- flags = add_flags(flags, "INAT_VEXOK | INAT_EVEXONLY")
+- else if (match(ext, vexonly_expr))
+- flags = add_flags(flags, "INAT_VEXOK | INAT_VEXONLY")
+- else if (match(ext, vexok_expr) || match(opcode, vexok_opcode_expr))
+- flags = add_flags(flags, "INAT_VEXOK")
+-
+- # check prefixes
+- if (match(ext, prefix_expr)) {
+- if (!prefix_num[opcode])
+- semantic_error("Unknown prefix: " opcode)
+- flags = add_flags(flags, "INAT_MAKE_PREFIX(" prefix_num[opcode] ")")
+- }
+- if (length(flags) == 0)
+- continue
+- # check if last prefix
+- if (match(ext, lprefix1_expr)) {
+- lptable1[idx] = add_flags(lptable1[idx],flags)
+- variant = "INAT_VARIANT"
+- }
+- if (match(ext, lprefix2_expr)) {
+- lptable2[idx] = add_flags(lptable2[idx],flags)
+- variant = "INAT_VARIANT"
+- }
+- if (match(ext, lprefix3_expr)) {
+- lptable3[idx] = add_flags(lptable3[idx],flags)
+- variant = "INAT_VARIANT"
+- }
+- if (!match(ext, lprefix_expr)){
+- table[idx] = add_flags(table[idx],flags)
+- }
+- }
+- if (variant)
+- table[idx] = add_flags(table[idx],variant)
+-}
+-
+-END {
+- if (awkchecked != "")
+- exit 1
+- # print escape opcode map's array
+- print "/* Escape opcode map array */"
+- print "const insn_attr_t * const inat_escape_tables[INAT_ESC_MAX + 1]" \
+- "[INAT_LSTPFX_MAX + 1] = {"
+- for (i = 0; i < geid; i++)
+- for (j = 0; j < max_lprefix; j++)
+- if (etable[i,j])
+- print " ["i"]["j"] = "etable[i,j]","
+- print "};\n"
+- # print group opcode map's array
+- print "/* Group opcode map array */"
+- print "const insn_attr_t * const inat_group_tables[INAT_GRP_MAX + 1]"\
+- "[INAT_LSTPFX_MAX + 1] = {"
+- for (i = 0; i < ggid; i++)
+- for (j = 0; j < max_lprefix; j++)
+- if (gtable[i,j])
+- print " ["i"]["j"] = "gtable[i,j]","
+- print "};\n"
+- # print AVX opcode map's array
+- print "/* AVX opcode map array */"
+- print "const insn_attr_t * const inat_avx_tables[X86_VEX_M_MAX + 1]"\
+- "[INAT_LSTPFX_MAX + 1] = {"
+- for (i = 0; i < gaid; i++)
+- for (j = 0; j < max_lprefix; j++)
+- if (atable[i,j])
+- print " ["i"]["j"] = "atable[i,j]","
+- print "};"
+-}
+-
+diff --git a/tools/objtool/arch/x86/insn/inat.c b/tools/objtool/arch/x86/insn/inat.c
+deleted file mode 100644
+index e4bf28e..0000000
+--- a/tools/objtool/arch/x86/insn/inat.c
++++ /dev/null
+@@ -1,97 +0,0 @@
+-/*
+- * x86 instruction attribute tables
+- *
+- * Written by Masami Hiramatsu <mhiramat@redhat.com>
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+- *
+- */
+-#include "insn.h"
+-
+-/* Attribute tables are generated from opcode map */
+-#include "inat-tables.c"
+-
+-/* Attribute search APIs */
+-insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode)
+-{
+- return inat_primary_table[opcode];
+-}
+-
+-int inat_get_last_prefix_id(insn_byte_t last_pfx)
+-{
+- insn_attr_t lpfx_attr;
+-
+- lpfx_attr = inat_get_opcode_attribute(last_pfx);
+- return inat_last_prefix_id(lpfx_attr);
+-}
+-
+-insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, int lpfx_id,
+- insn_attr_t esc_attr)
+-{
+- const insn_attr_t *table;
+- int n;
+-
+- n = inat_escape_id(esc_attr);
+-
+- table = inat_escape_tables[n][0];
+- if (!table)
+- return 0;
+- if (inat_has_variant(table[opcode]) && lpfx_id) {
+- table = inat_escape_tables[n][lpfx_id];
+- if (!table)
+- return 0;
+- }
+- return table[opcode];
+-}
+-
+-insn_attr_t inat_get_group_attribute(insn_byte_t modrm, int lpfx_id,
+- insn_attr_t grp_attr)
+-{
+- const insn_attr_t *table;
+- int n;
+-
+- n = inat_group_id(grp_attr);
+-
+- table = inat_group_tables[n][0];
+- if (!table)
+- return inat_group_common_attribute(grp_attr);
+- if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && lpfx_id) {
+- table = inat_group_tables[n][lpfx_id];
+- if (!table)
+- return inat_group_common_attribute(grp_attr);
+- }
+- return table[X86_MODRM_REG(modrm)] |
+- inat_group_common_attribute(grp_attr);
+-}
+-
+-insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, insn_byte_t vex_m,
+- insn_byte_t vex_p)
+-{
+- const insn_attr_t *table;
+- if (vex_m > X86_VEX_M_MAX || vex_p > INAT_LSTPFX_MAX)
+- return 0;
+- /* At first, this checks the master table */
+- table = inat_avx_tables[vex_m][0];
+- if (!table)
+- return 0;
+- if (!inat_is_group(table[opcode]) && vex_p) {
+- /* If this is not a group, get attribute directly */
+- table = inat_avx_tables[vex_m][vex_p];
+- if (!table)
+- return 0;
+- }
+- return table[opcode];
+-}
+-
+diff --git a/tools/objtool/arch/x86/insn/inat.h b/tools/objtool/arch/x86/insn/inat.h
+deleted file mode 100644
+index 125ecd2..0000000
+--- a/tools/objtool/arch/x86/insn/inat.h
++++ /dev/null
+@@ -1,234 +0,0 @@
+-#ifndef _ASM_X86_INAT_H
+-#define _ASM_X86_INAT_H
+-/*
+- * x86 instruction attributes
+- *
+- * Written by Masami Hiramatsu <mhiramat@redhat.com>
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+- *
+- */
+-#include "inat_types.h"
+-
+-/*
+- * Internal bits. Don't use bitmasks directly, because these bits are
+- * unstable. You should use checking functions.
+- */
+-
+-#define INAT_OPCODE_TABLE_SIZE 256
+-#define INAT_GROUP_TABLE_SIZE 8
+-
+-/* Legacy last prefixes */
+-#define INAT_PFX_OPNDSZ 1 /* 0x66 */ /* LPFX1 */
+-#define INAT_PFX_REPE 2 /* 0xF3 */ /* LPFX2 */
+-#define INAT_PFX_REPNE 3 /* 0xF2 */ /* LPFX3 */
+-/* Other Legacy prefixes */
+-#define INAT_PFX_LOCK 4 /* 0xF0 */
+-#define INAT_PFX_CS 5 /* 0x2E */
+-#define INAT_PFX_DS 6 /* 0x3E */
+-#define INAT_PFX_ES 7 /* 0x26 */
+-#define INAT_PFX_FS 8 /* 0x64 */
+-#define INAT_PFX_GS 9 /* 0x65 */
+-#define INAT_PFX_SS 10 /* 0x36 */
+-#define INAT_PFX_ADDRSZ 11 /* 0x67 */
+-/* x86-64 REX prefix */
+-#define INAT_PFX_REX 12 /* 0x4X */
+-/* AVX VEX prefixes */
+-#define INAT_PFX_VEX2 13 /* 2-bytes VEX prefix */
+-#define INAT_PFX_VEX3 14 /* 3-bytes VEX prefix */
+-#define INAT_PFX_EVEX 15 /* EVEX prefix */
+-
+-#define INAT_LSTPFX_MAX 3
+-#define INAT_LGCPFX_MAX 11
+-
+-/* Immediate size */
+-#define INAT_IMM_BYTE 1
+-#define INAT_IMM_WORD 2
+-#define INAT_IMM_DWORD 3
+-#define INAT_IMM_QWORD 4
+-#define INAT_IMM_PTR 5
+-#define INAT_IMM_VWORD32 6
+-#define INAT_IMM_VWORD 7
+-
+-/* Legacy prefix */
+-#define INAT_PFX_OFFS 0
+-#define INAT_PFX_BITS 4
+-#define INAT_PFX_MAX ((1 << INAT_PFX_BITS) - 1)
+-#define INAT_PFX_MASK (INAT_PFX_MAX << INAT_PFX_OFFS)
+-/* Escape opcodes */
+-#define INAT_ESC_OFFS (INAT_PFX_OFFS + INAT_PFX_BITS)
+-#define INAT_ESC_BITS 2
+-#define INAT_ESC_MAX ((1 << INAT_ESC_BITS) - 1)
+-#define INAT_ESC_MASK (INAT_ESC_MAX << INAT_ESC_OFFS)
+-/* Group opcodes (1-16) */
+-#define INAT_GRP_OFFS (INAT_ESC_OFFS + INAT_ESC_BITS)
+-#define INAT_GRP_BITS 5
+-#define INAT_GRP_MAX ((1 << INAT_GRP_BITS) - 1)
+-#define INAT_GRP_MASK (INAT_GRP_MAX << INAT_GRP_OFFS)
+-/* Immediates */
+-#define INAT_IMM_OFFS (INAT_GRP_OFFS + INAT_GRP_BITS)
+-#define INAT_IMM_BITS 3
+-#define INAT_IMM_MASK (((1 << INAT_IMM_BITS) - 1) << INAT_IMM_OFFS)
+-/* Flags */
+-#define INAT_FLAG_OFFS (INAT_IMM_OFFS + INAT_IMM_BITS)
+-#define INAT_MODRM (1 << (INAT_FLAG_OFFS))
+-#define INAT_FORCE64 (1 << (INAT_FLAG_OFFS + 1))
+-#define INAT_SCNDIMM (1 << (INAT_FLAG_OFFS + 2))
+-#define INAT_MOFFSET (1 << (INAT_FLAG_OFFS + 3))
+-#define INAT_VARIANT (1 << (INAT_FLAG_OFFS + 4))
+-#define INAT_VEXOK (1 << (INAT_FLAG_OFFS + 5))
+-#define INAT_VEXONLY (1 << (INAT_FLAG_OFFS + 6))
+-#define INAT_EVEXONLY (1 << (INAT_FLAG_OFFS + 7))
+-/* Attribute making macros for attribute tables */
+-#define INAT_MAKE_PREFIX(pfx) (pfx << INAT_PFX_OFFS)
+-#define INAT_MAKE_ESCAPE(esc) (esc << INAT_ESC_OFFS)
+-#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM)
+-#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS)
+-
+-/* Attribute search APIs */
+-extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
+-extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
+-extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode,
+- int lpfx_id,
+- insn_attr_t esc_attr);
+-extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm,
+- int lpfx_id,
+- insn_attr_t esc_attr);
+-extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode,
+- insn_byte_t vex_m,
+- insn_byte_t vex_pp);
+-
+-/* Attribute checking functions */
+-static inline int inat_is_legacy_prefix(insn_attr_t attr)
+-{
+- attr &= INAT_PFX_MASK;
+- return attr && attr <= INAT_LGCPFX_MAX;
+-}
+-
+-static inline int inat_is_address_size_prefix(insn_attr_t attr)
+-{
+- return (attr & INAT_PFX_MASK) == INAT_PFX_ADDRSZ;
+-}
+-
+-static inline int inat_is_operand_size_prefix(insn_attr_t attr)
+-{
+- return (attr & INAT_PFX_MASK) == INAT_PFX_OPNDSZ;
+-}
+-
+-static inline int inat_is_rex_prefix(insn_attr_t attr)
+-{
+- return (attr & INAT_PFX_MASK) == INAT_PFX_REX;
+-}
+-
+-static inline int inat_last_prefix_id(insn_attr_t attr)
+-{
+- if ((attr & INAT_PFX_MASK) > INAT_LSTPFX_MAX)
+- return 0;
+- else
+- return attr & INAT_PFX_MASK;
+-}
+-
+-static inline int inat_is_vex_prefix(insn_attr_t attr)
+-{
+- attr &= INAT_PFX_MASK;
+- return attr == INAT_PFX_VEX2 || attr == INAT_PFX_VEX3 ||
+- attr == INAT_PFX_EVEX;
+-}
+-
+-static inline int inat_is_evex_prefix(insn_attr_t attr)
+-{
+- return (attr & INAT_PFX_MASK) == INAT_PFX_EVEX;
+-}
+-
+-static inline int inat_is_vex3_prefix(insn_attr_t attr)
+-{
+- return (attr & INAT_PFX_MASK) == INAT_PFX_VEX3;
+-}
+-
+-static inline int inat_is_escape(insn_attr_t attr)
+-{
+- return attr & INAT_ESC_MASK;
+-}
+-
+-static inline int inat_escape_id(insn_attr_t attr)
+-{
+- return (attr & INAT_ESC_MASK) >> INAT_ESC_OFFS;
+-}
+-
+-static inline int inat_is_group(insn_attr_t attr)
+-{
+- return attr & INAT_GRP_MASK;
+-}
+-
+-static inline int inat_group_id(insn_attr_t attr)
+-{
+- return (attr & INAT_GRP_MASK) >> INAT_GRP_OFFS;
+-}
+-
+-static inline int inat_group_common_attribute(insn_attr_t attr)
+-{
+- return attr & ~INAT_GRP_MASK;
+-}
+-
+-static inline int inat_has_immediate(insn_attr_t attr)
+-{
+- return attr & INAT_IMM_MASK;
+-}
+-
+-static inline int inat_immediate_size(insn_attr_t attr)
+-{
+- return (attr & INAT_IMM_MASK) >> INAT_IMM_OFFS;
+-}
+-
+-static inline int inat_has_modrm(insn_attr_t attr)
+-{
+- return attr & INAT_MODRM;
+-}
+-
+-static inline int inat_is_force64(insn_attr_t attr)
+-{
+- return attr & INAT_FORCE64;
+-}
+-
+-static inline int inat_has_second_immediate(insn_attr_t attr)
+-{
+- return attr & INAT_SCNDIMM;
+-}
+-
+-static inline int inat_has_moffset(insn_attr_t attr)
+-{
+- return attr & INAT_MOFFSET;
+-}
+-
+-static inline int inat_has_variant(insn_attr_t attr)
+-{
+- return attr & INAT_VARIANT;
+-}
+-
+-static inline int inat_accept_vex(insn_attr_t attr)
+-{
+- return attr & INAT_VEXOK;
+-}
+-
+-static inline int inat_must_vex(insn_attr_t attr)
+-{
+- return attr & (INAT_VEXONLY | INAT_EVEXONLY);
+-}
+-
+-static inline int inat_must_evex(insn_attr_t attr)
+-{
+- return attr & INAT_EVEXONLY;
+-}
+-#endif
+diff --git a/tools/objtool/arch/x86/insn/inat_types.h b/tools/objtool/arch/x86/insn/inat_types.h
+deleted file mode 100644
+index cb3c20c..0000000
+--- a/tools/objtool/arch/x86/insn/inat_types.h
++++ /dev/null
+@@ -1,29 +0,0 @@
+-#ifndef _ASM_X86_INAT_TYPES_H
+-#define _ASM_X86_INAT_TYPES_H
+-/*
+- * x86 instruction attributes
+- *
+- * Written by Masami Hiramatsu <mhiramat@redhat.com>
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+- *
+- */
+-
+-/* Instruction attributes */
+-typedef unsigned int insn_attr_t;
+-typedef unsigned char insn_byte_t;
+-typedef signed int insn_value_t;
+-
+-#endif
+diff --git a/tools/objtool/arch/x86/insn/insn.c b/tools/objtool/arch/x86/insn/insn.c
+deleted file mode 100644
+index ca983e2..0000000
+--- a/tools/objtool/arch/x86/insn/insn.c
++++ /dev/null
+@@ -1,606 +0,0 @@
+-/*
+- * x86 instruction analysis
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+- *
+- * Copyright (C) IBM Corporation, 2002, 2004, 2009
+- */
+-
+-#ifdef __KERNEL__
+-#include <linux/string.h>
+-#else
+-#include <string.h>
+-#endif
+-#include "inat.h"
+-#include "insn.h"
+-
+-/* Verify next sizeof(t) bytes can be on the same instruction */
+-#define validate_next(t, insn, n) \
+- ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
+-
+-#define __get_next(t, insn) \
+- ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
+-
+-#define __peek_nbyte_next(t, insn, n) \
+- ({ t r = *(t*)((insn)->next_byte + n); r; })
+-
+-#define get_next(t, insn) \
+- ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
+-
+-#define peek_nbyte_next(t, insn, n) \
+- ({ if (unlikely(!validate_next(t, insn, n))) goto err_out; __peek_nbyte_next(t, insn, n); })
+-
+-#define peek_next(t, insn) peek_nbyte_next(t, insn, 0)
+-
+-/**
+- * insn_init() - initialize struct insn
+- * @insn: &struct insn to be initialized
+- * @kaddr: address (in kernel memory) of instruction (or copy thereof)
+- * @x86_64: !0 for 64-bit kernel or 64-bit app
+- */
+-void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
+-{
+- /*
+- * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid
+- * even if the input buffer is long enough to hold them.
+- */
+- if (buf_len > MAX_INSN_SIZE)
+- buf_len = MAX_INSN_SIZE;
+-
+- memset(insn, 0, sizeof(*insn));
+- insn->kaddr = kaddr;
+- insn->end_kaddr = kaddr + buf_len;
+- insn->next_byte = kaddr;
+- insn->x86_64 = x86_64 ? 1 : 0;
+- insn->opnd_bytes = 4;
+- if (x86_64)
+- insn->addr_bytes = 8;
+- else
+- insn->addr_bytes = 4;
+-}
+-
+-/**
+- * insn_get_prefixes - scan x86 instruction prefix bytes
+- * @insn: &struct insn containing instruction
+- *
+- * Populates the @insn->prefixes bitmap, and updates @insn->next_byte
+- * to point to the (first) opcode. No effect if @insn->prefixes.got
+- * is already set.
+- */
+-void insn_get_prefixes(struct insn *insn)
+-{
+- struct insn_field *prefixes = &insn->prefixes;
+- insn_attr_t attr;
+- insn_byte_t b, lb;
+- int i, nb;
+-
+- if (prefixes->got)
+- return;
+-
+- nb = 0;
+- lb = 0;
+- b = peek_next(insn_byte_t, insn);
+- attr = inat_get_opcode_attribute(b);
+- while (inat_is_legacy_prefix(attr)) {
+- /* Skip if same prefix */
+- for (i = 0; i < nb; i++)
+- if (prefixes->bytes[i] == b)
+- goto found;
+- if (nb == 4)
+- /* Invalid instruction */
+- break;
+- prefixes->bytes[nb++] = b;
+- if (inat_is_address_size_prefix(attr)) {
+- /* address size switches 2/4 or 4/8 */
+- if (insn->x86_64)
+- insn->addr_bytes ^= 12;
+- else
+- insn->addr_bytes ^= 6;
+- } else if (inat_is_operand_size_prefix(attr)) {
+- /* oprand size switches 2/4 */
+- insn->opnd_bytes ^= 6;
+- }
+-found:
+- prefixes->nbytes++;
+- insn->next_byte++;
+- lb = b;
+- b = peek_next(insn_byte_t, insn);
+- attr = inat_get_opcode_attribute(b);
+- }
+- /* Set the last prefix */
+- if (lb && lb != insn->prefixes.bytes[3]) {
+- if (unlikely(insn->prefixes.bytes[3])) {
+- /* Swap the last prefix */
+- b = insn->prefixes.bytes[3];
+- for (i = 0; i < nb; i++)
+- if (prefixes->bytes[i] == lb)
+- prefixes->bytes[i] = b;
+- }
+- insn->prefixes.bytes[3] = lb;
+- }
+-
+- /* Decode REX prefix */
+- if (insn->x86_64) {
+- b = peek_next(insn_byte_t, insn);
+- attr = inat_get_opcode_attribute(b);
+- if (inat_is_rex_prefix(attr)) {
+- insn->rex_prefix.value = b;
+- insn->rex_prefix.nbytes = 1;
+- insn->next_byte++;
+- if (X86_REX_W(b))
+- /* REX.W overrides opnd_size */
+- insn->opnd_bytes = 8;
+- }
+- }
+- insn->rex_prefix.got = 1;
+-
+- /* Decode VEX prefix */
+- b = peek_next(insn_byte_t, insn);
+- attr = inat_get_opcode_attribute(b);
+- if (inat_is_vex_prefix(attr)) {
+- insn_byte_t b2 = peek_nbyte_next(insn_byte_t, insn, 1);
+- if (!insn->x86_64) {
+- /*
+- * In 32-bits mode, if the [7:6] bits (mod bits of
+- * ModRM) on the second byte are not 11b, it is
+- * LDS or LES or BOUND.
+- */
+- if (X86_MODRM_MOD(b2) != 3)
+- goto vex_end;
+- }
+- insn->vex_prefix.bytes[0] = b;
+- insn->vex_prefix.bytes[1] = b2;
+- if (inat_is_evex_prefix(attr)) {
+- b2 = peek_nbyte_next(insn_byte_t, insn, 2);
+- insn->vex_prefix.bytes[2] = b2;
+- b2 = peek_nbyte_next(insn_byte_t, insn, 3);
+- insn->vex_prefix.bytes[3] = b2;
+- insn->vex_prefix.nbytes = 4;
+- insn->next_byte += 4;
+- if (insn->x86_64 && X86_VEX_W(b2))
+- /* VEX.W overrides opnd_size */
+- insn->opnd_bytes = 8;
+- } else if (inat_is_vex3_prefix(attr)) {
+- b2 = peek_nbyte_next(insn_byte_t, insn, 2);
+- insn->vex_prefix.bytes[2] = b2;
+- insn->vex_prefix.nbytes = 3;
+- insn->next_byte += 3;
+- if (insn->x86_64 && X86_VEX_W(b2))
+- /* VEX.W overrides opnd_size */
+- insn->opnd_bytes = 8;
+- } else {
+- /*
+- * For VEX2, fake VEX3-like byte#2.
+- * Makes it easier to decode vex.W, vex.vvvv,
+- * vex.L and vex.pp. Masking with 0x7f sets vex.W == 0.
+- */
+- insn->vex_prefix.bytes[2] = b2 & 0x7f;
+- insn->vex_prefix.nbytes = 2;
+- insn->next_byte += 2;
+- }
+- }
+-vex_end:
+- insn->vex_prefix.got = 1;
+-
+- prefixes->got = 1;
+-
+-err_out:
+- return;
+-}
+-
+-/**
+- * insn_get_opcode - collect opcode(s)
+- * @insn: &struct insn containing instruction
+- *
+- * Populates @insn->opcode, updates @insn->next_byte to point past the
+- * opcode byte(s), and set @insn->attr (except for groups).
+- * If necessary, first collects any preceding (prefix) bytes.
+- * Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got
+- * is already 1.
+- */
+-void insn_get_opcode(struct insn *insn)
+-{
+- struct insn_field *opcode = &insn->opcode;
+- insn_byte_t op;
+- int pfx_id;
+- if (opcode->got)
+- return;
+- if (!insn->prefixes.got)
+- insn_get_prefixes(insn);
+-
+- /* Get first opcode */
+- op = get_next(insn_byte_t, insn);
+- opcode->bytes[0] = op;
+- opcode->nbytes = 1;
+-
+- /* Check if there is VEX prefix or not */
+- if (insn_is_avx(insn)) {
+- insn_byte_t m, p;
+- m = insn_vex_m_bits(insn);
+- p = insn_vex_p_bits(insn);
+- insn->attr = inat_get_avx_attribute(op, m, p);
+- if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) ||
+- (!inat_accept_vex(insn->attr) &&
+- !inat_is_group(insn->attr)))
+- insn->attr = 0; /* This instruction is bad */
+- goto end; /* VEX has only 1 byte for opcode */
+- }
+-
+- insn->attr = inat_get_opcode_attribute(op);
+- while (inat_is_escape(insn->attr)) {
+- /* Get escaped opcode */
+- op = get_next(insn_byte_t, insn);
+- opcode->bytes[opcode->nbytes++] = op;
+- pfx_id = insn_last_prefix_id(insn);
+- insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr);
+- }
+- if (inat_must_vex(insn->attr))
+- insn->attr = 0; /* This instruction is bad */
+-end:
+- opcode->got = 1;
+-
+-err_out:
+- return;
+-}
+-
+-/**
+- * insn_get_modrm - collect ModRM byte, if any
+- * @insn: &struct insn containing instruction
+- *
+- * Populates @insn->modrm and updates @insn->next_byte to point past the
+- * ModRM byte, if any. If necessary, first collects the preceding bytes
+- * (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1.
+- */
+-void insn_get_modrm(struct insn *insn)
+-{
+- struct insn_field *modrm = &insn->modrm;
+- insn_byte_t pfx_id, mod;
+- if (modrm->got)
+- return;
+- if (!insn->opcode.got)
+- insn_get_opcode(insn);
+-
+- if (inat_has_modrm(insn->attr)) {
+- mod = get_next(insn_byte_t, insn);
+- modrm->value = mod;
+- modrm->nbytes = 1;
+- if (inat_is_group(insn->attr)) {
+- pfx_id = insn_last_prefix_id(insn);
+- insn->attr = inat_get_group_attribute(mod, pfx_id,
+- insn->attr);
+- if (insn_is_avx(insn) && !inat_accept_vex(insn->attr))
+- insn->attr = 0; /* This is bad */
+- }
+- }
+-
+- if (insn->x86_64 && inat_is_force64(insn->attr))
+- insn->opnd_bytes = 8;
+- modrm->got = 1;
+-
+-err_out:
+- return;
+-}
+-
+-
+-/**
+- * insn_rip_relative() - Does instruction use RIP-relative addressing mode?
+- * @insn: &struct insn containing instruction
+- *
+- * If necessary, first collects the instruction up to and including the
+- * ModRM byte. No effect if @insn->x86_64 is 0.
+- */
+-int insn_rip_relative(struct insn *insn)
+-{
+- struct insn_field *modrm = &insn->modrm;
+-
+- if (!insn->x86_64)
+- return 0;
+- if (!modrm->got)
+- insn_get_modrm(insn);
+- /*
+- * For rip-relative instructions, the mod field (top 2 bits)
+- * is zero and the r/m field (bottom 3 bits) is 0x5.
+- */
+- return (modrm->nbytes && (modrm->value & 0xc7) == 0x5);
+-}
+-
+-/**
+- * insn_get_sib() - Get the SIB byte of instruction
+- * @insn: &struct insn containing instruction
+- *
+- * If necessary, first collects the instruction up to and including the
+- * ModRM byte.
+- */
+-void insn_get_sib(struct insn *insn)
+-{
+- insn_byte_t modrm;
+-
+- if (insn->sib.got)
+- return;
+- if (!insn->modrm.got)
+- insn_get_modrm(insn);
+- if (insn->modrm.nbytes) {
+- modrm = (insn_byte_t)insn->modrm.value;
+- if (insn->addr_bytes != 2 &&
+- X86_MODRM_MOD(modrm) != 3 && X86_MODRM_RM(modrm) == 4) {
+- insn->sib.value = get_next(insn_byte_t, insn);
+- insn->sib.nbytes = 1;
+- }
+- }
+- insn->sib.got = 1;
+-
+-err_out:
+- return;
+-}
+-
+-
+-/**
+- * insn_get_displacement() - Get the displacement of instruction
+- * @insn: &struct insn containing instruction
+- *
+- * If necessary, first collects the instruction up to and including the
+- * SIB byte.
+- * Displacement value is sign-expanded.
+- */
+-void insn_get_displacement(struct insn *insn)
+-{
+- insn_byte_t mod, rm, base;
+-
+- if (insn->displacement.got)
+- return;
+- if (!insn->sib.got)
+- insn_get_sib(insn);
+- if (insn->modrm.nbytes) {
+- /*
+- * Interpreting the modrm byte:
+- * mod = 00 - no displacement fields (exceptions below)
+- * mod = 01 - 1-byte displacement field
+- * mod = 10 - displacement field is 4 bytes, or 2 bytes if
+- * address size = 2 (0x67 prefix in 32-bit mode)
+- * mod = 11 - no memory operand
+- *
+- * If address size = 2...
+- * mod = 00, r/m = 110 - displacement field is 2 bytes
+- *
+- * If address size != 2...
+- * mod != 11, r/m = 100 - SIB byte exists
+- * mod = 00, SIB base = 101 - displacement field is 4 bytes
+- * mod = 00, r/m = 101 - rip-relative addressing, displacement
+- * field is 4 bytes
+- */
+- mod = X86_MODRM_MOD(insn->modrm.value);
+- rm = X86_MODRM_RM(insn->modrm.value);
+- base = X86_SIB_BASE(insn->sib.value);
+- if (mod == 3)
+- goto out;
+- if (mod == 1) {
+- insn->displacement.value = get_next(signed char, insn);
+- insn->displacement.nbytes = 1;
+- } else if (insn->addr_bytes == 2) {
+- if ((mod == 0 && rm == 6) || mod == 2) {
+- insn->displacement.value =
+- get_next(short, insn);
+- insn->displacement.nbytes = 2;
+- }
+- } else {
+- if ((mod == 0 && rm == 5) || mod == 2 ||
+- (mod == 0 && base == 5)) {
+- insn->displacement.value = get_next(int, insn);
+- insn->displacement.nbytes = 4;
+- }
+- }
+- }
+-out:
+- insn->displacement.got = 1;
+-
+-err_out:
+- return;
+-}
+-
+-/* Decode moffset16/32/64. Return 0 if failed */
+-static int __get_moffset(struct insn *insn)
+-{
+- switch (insn->addr_bytes) {
+- case 2:
+- insn->moffset1.value = get_next(short, insn);
+- insn->moffset1.nbytes = 2;
+- break;
+- case 4:
+- insn->moffset1.value = get_next(int, insn);
+- insn->moffset1.nbytes = 4;
+- break;
+- case 8:
+- insn->moffset1.value = get_next(int, insn);
+- insn->moffset1.nbytes = 4;
+- insn->moffset2.value = get_next(int, insn);
+- insn->moffset2.nbytes = 4;
+- break;
+- default: /* opnd_bytes must be modified manually */
+- goto err_out;
+- }
+- insn->moffset1.got = insn->moffset2.got = 1;
+-
+- return 1;
+-
+-err_out:
+- return 0;
+-}
+-
+-/* Decode imm v32(Iz). Return 0 if failed */
+-static int __get_immv32(struct insn *insn)
+-{
+- switch (insn->opnd_bytes) {
+- case 2:
+- insn->immediate.value = get_next(short, insn);
+- insn->immediate.nbytes = 2;
+- break;
+- case 4:
+- case 8:
+- insn->immediate.value = get_next(int, insn);
+- insn->immediate.nbytes = 4;
+- break;
+- default: /* opnd_bytes must be modified manually */
+- goto err_out;
+- }
+-
+- return 1;
+-
+-err_out:
+- return 0;
+-}
+-
+-/* Decode imm v64(Iv/Ov), Return 0 if failed */
+-static int __get_immv(struct insn *insn)
+-{
+- switch (insn->opnd_bytes) {
+- case 2:
+- insn->immediate1.value = get_next(short, insn);
+- insn->immediate1.nbytes = 2;
+- break;
+- case 4:
+- insn->immediate1.value = get_next(int, insn);
+- insn->immediate1.nbytes = 4;
+- break;
+- case 8:
+- insn->immediate1.value = get_next(int, insn);
+- insn->immediate1.nbytes = 4;
+- insn->immediate2.value = get_next(int, insn);
+- insn->immediate2.nbytes = 4;
+- break;
+- default: /* opnd_bytes must be modified manually */
+- goto err_out;
+- }
+- insn->immediate1.got = insn->immediate2.got = 1;
+-
+- return 1;
+-err_out:
+- return 0;
+-}
+-
+-/* Decode ptr16:16/32(Ap) */
+-static int __get_immptr(struct insn *insn)
+-{
+- switch (insn->opnd_bytes) {
+- case 2:
+- insn->immediate1.value = get_next(short, insn);
+- insn->immediate1.nbytes = 2;
+- break;
+- case 4:
+- insn->immediate1.value = get_next(int, insn);
+- insn->immediate1.nbytes = 4;
+- break;
+- case 8:
+- /* ptr16:64 is not exist (no segment) */
+- return 0;
+- default: /* opnd_bytes must be modified manually */
+- goto err_out;
+- }
+- insn->immediate2.value = get_next(unsigned short, insn);
+- insn->immediate2.nbytes = 2;
+- insn->immediate1.got = insn->immediate2.got = 1;
+-
+- return 1;
+-err_out:
+- return 0;
+-}
+-
+-/**
+- * insn_get_immediate() - Get the immediates of instruction
+- * @insn: &struct insn containing instruction
+- *
+- * If necessary, first collects the instruction up to and including the
+- * displacement bytes.
+- * Basically, most of immediates are sign-expanded. Unsigned-value can be
+- * get by bit masking with ((1 << (nbytes * 8)) - 1)
+- */
+-void insn_get_immediate(struct insn *insn)
+-{
+- if (insn->immediate.got)
+- return;
+- if (!insn->displacement.got)
+- insn_get_displacement(insn);
+-
+- if (inat_has_moffset(insn->attr)) {
+- if (!__get_moffset(insn))
+- goto err_out;
+- goto done;
+- }
+-
+- if (!inat_has_immediate(insn->attr))
+- /* no immediates */
+- goto done;
+-
+- switch (inat_immediate_size(insn->attr)) {
+- case INAT_IMM_BYTE:
+- insn->immediate.value = get_next(signed char, insn);
+- insn->immediate.nbytes = 1;
+- break;
+- case INAT_IMM_WORD:
+- insn->immediate.value = get_next(short, insn);
+- insn->immediate.nbytes = 2;
+- break;
+- case INAT_IMM_DWORD:
+- insn->immediate.value = get_next(int, insn);
+- insn->immediate.nbytes = 4;
+- break;
+- case INAT_IMM_QWORD:
+- insn->immediate1.value = get_next(int, insn);
+- insn->immediate1.nbytes = 4;
+- insn->immediate2.value = get_next(int, insn);
+- insn->immediate2.nbytes = 4;
+- break;
+- case INAT_IMM_PTR:
+- if (!__get_immptr(insn))
+- goto err_out;
+- break;
+- case INAT_IMM_VWORD32:
+- if (!__get_immv32(insn))
+- goto err_out;
+- break;
+- case INAT_IMM_VWORD:
+- if (!__get_immv(insn))
+- goto err_out;
+- break;
+- default:
+- /* Here, insn must have an immediate, but failed */
+- goto err_out;
+- }
+- if (inat_has_second_immediate(insn->attr)) {
+- insn->immediate2.value = get_next(signed char, insn);
+- insn->immediate2.nbytes = 1;
+- }
+-done:
+- insn->immediate.got = 1;
+-
+-err_out:
+- return;
+-}
+-
+-/**
+- * insn_get_length() - Get the length of instruction
+- * @insn: &struct insn containing instruction
+- *
+- * If necessary, first collects the instruction up to and including the
+- * immediates bytes.
+- */
+-void insn_get_length(struct insn *insn)
+-{
+- if (insn->length)
+- return;
+- if (!insn->immediate.got)
+- insn_get_immediate(insn);
+- insn->length = (unsigned char)((unsigned long)insn->next_byte
+- - (unsigned long)insn->kaddr);
+-}
+diff --git a/tools/objtool/arch/x86/insn/insn.h b/tools/objtool/arch/x86/insn/insn.h
+deleted file mode 100644
+index e23578c..0000000
+--- a/tools/objtool/arch/x86/insn/insn.h
++++ /dev/null
+@@ -1,211 +0,0 @@
+-#ifndef _ASM_X86_INSN_H
+-#define _ASM_X86_INSN_H
+-/*
+- * x86 instruction analysis
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+- *
+- * Copyright (C) IBM Corporation, 2009
+- */
+-
+-/* insn_attr_t is defined in inat.h */
+-#include "inat.h"
+-
+-struct insn_field {
+- union {
+- insn_value_t value;
+- insn_byte_t bytes[4];
+- };
+- /* !0 if we've run insn_get_xxx() for this field */
+- unsigned char got;
+- unsigned char nbytes;
+-};
+-
+-struct insn {
+- struct insn_field prefixes; /*
+- * Prefixes
+- * prefixes.bytes[3]: last prefix
+- */
+- struct insn_field rex_prefix; /* REX prefix */
+- struct insn_field vex_prefix; /* VEX prefix */
+- struct insn_field opcode; /*
+- * opcode.bytes[0]: opcode1
+- * opcode.bytes[1]: opcode2
+- * opcode.bytes[2]: opcode3
+- */
+- struct insn_field modrm;
+- struct insn_field sib;
+- struct insn_field displacement;
+- union {
+- struct insn_field immediate;
+- struct insn_field moffset1; /* for 64bit MOV */
+- struct insn_field immediate1; /* for 64bit imm or off16/32 */
+- };
+- union {
+- struct insn_field moffset2; /* for 64bit MOV */
+- struct insn_field immediate2; /* for 64bit imm or seg16 */
+- };
+-
+- insn_attr_t attr;
+- unsigned char opnd_bytes;
+- unsigned char addr_bytes;
+- unsigned char length;
+- unsigned char x86_64;
+-
+- const insn_byte_t *kaddr; /* kernel address of insn to analyze */
+- const insn_byte_t *end_kaddr; /* kernel address of last insn in buffer */
+- const insn_byte_t *next_byte;
+-};
+-
+-#define MAX_INSN_SIZE 15
+-
+-#define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
+-#define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
+-#define X86_MODRM_RM(modrm) ((modrm) & 0x07)
+-
+-#define X86_SIB_SCALE(sib) (((sib) & 0xc0) >> 6)
+-#define X86_SIB_INDEX(sib) (((sib) & 0x38) >> 3)
+-#define X86_SIB_BASE(sib) ((sib) & 0x07)
+-
+-#define X86_REX_W(rex) ((rex) & 8)
+-#define X86_REX_R(rex) ((rex) & 4)
+-#define X86_REX_X(rex) ((rex) & 2)
+-#define X86_REX_B(rex) ((rex) & 1)
+-
+-/* VEX bit flags */
+-#define X86_VEX_W(vex) ((vex) & 0x80) /* VEX3 Byte2 */
+-#define X86_VEX_R(vex) ((vex) & 0x80) /* VEX2/3 Byte1 */
+-#define X86_VEX_X(vex) ((vex) & 0x40) /* VEX3 Byte1 */
+-#define X86_VEX_B(vex) ((vex) & 0x20) /* VEX3 Byte1 */
+-#define X86_VEX_L(vex) ((vex) & 0x04) /* VEX3 Byte2, VEX2 Byte1 */
+-/* VEX bit fields */
+-#define X86_EVEX_M(vex) ((vex) & 0x03) /* EVEX Byte1 */
+-#define X86_VEX3_M(vex) ((vex) & 0x1f) /* VEX3 Byte1 */
+-#define X86_VEX2_M 1 /* VEX2.M always 1 */
+-#define X86_VEX_V(vex) (((vex) & 0x78) >> 3) /* VEX3 Byte2, VEX2 Byte1 */
+-#define X86_VEX_P(vex) ((vex) & 0x03) /* VEX3 Byte2, VEX2 Byte1 */
+-#define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */
+-
+-extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64);
+-extern void insn_get_prefixes(struct insn *insn);
+-extern void insn_get_opcode(struct insn *insn);
+-extern void insn_get_modrm(struct insn *insn);
+-extern void insn_get_sib(struct insn *insn);
+-extern void insn_get_displacement(struct insn *insn);
+-extern void insn_get_immediate(struct insn *insn);
+-extern void insn_get_length(struct insn *insn);
+-
+-/* Attribute will be determined after getting ModRM (for opcode groups) */
+-static inline void insn_get_attribute(struct insn *insn)
+-{
+- insn_get_modrm(insn);
+-}
+-
+-/* Instruction uses RIP-relative addressing */
+-extern int insn_rip_relative(struct insn *insn);
+-
+-/* Init insn for kernel text */
+-static inline void kernel_insn_init(struct insn *insn,
+- const void *kaddr, int buf_len)
+-{
+-#ifdef CONFIG_X86_64
+- insn_init(insn, kaddr, buf_len, 1);
+-#else /* CONFIG_X86_32 */
+- insn_init(insn, kaddr, buf_len, 0);
+-#endif
+-}
+-
+-static inline int insn_is_avx(struct insn *insn)
+-{
+- if (!insn->prefixes.got)
+- insn_get_prefixes(insn);
+- return (insn->vex_prefix.value != 0);
+-}
+-
+-static inline int insn_is_evex(struct insn *insn)
+-{
+- if (!insn->prefixes.got)
+- insn_get_prefixes(insn);
+- return (insn->vex_prefix.nbytes == 4);
+-}
+-
+-/* Ensure this instruction is decoded completely */
+-static inline int insn_complete(struct insn *insn)
+-{
+- return insn->opcode.got && insn->modrm.got && insn->sib.got &&
+- insn->displacement.got && insn->immediate.got;
+-}
+-
+-static inline insn_byte_t insn_vex_m_bits(struct insn *insn)
+-{
+- if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
+- return X86_VEX2_M;
+- else if (insn->vex_prefix.nbytes == 3) /* 3 bytes VEX */
+- return X86_VEX3_M(insn->vex_prefix.bytes[1]);
+- else /* EVEX */
+- return X86_EVEX_M(insn->vex_prefix.bytes[1]);
+-}
+-
+-static inline insn_byte_t insn_vex_p_bits(struct insn *insn)
+-{
+- if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
+- return X86_VEX_P(insn->vex_prefix.bytes[1]);
+- else
+- return X86_VEX_P(insn->vex_prefix.bytes[2]);
+-}
+-
+-/* Get the last prefix id from last prefix or VEX prefix */
+-static inline int insn_last_prefix_id(struct insn *insn)
+-{
+- if (insn_is_avx(insn))
+- return insn_vex_p_bits(insn); /* VEX_p is a SIMD prefix id */
+-
+- if (insn->prefixes.bytes[3])
+- return inat_get_last_prefix_id(insn->prefixes.bytes[3]);
+-
+- return 0;
+-}
+-
+-/* Offset of each field from kaddr */
+-static inline int insn_offset_rex_prefix(struct insn *insn)
+-{
+- return insn->prefixes.nbytes;
+-}
+-static inline int insn_offset_vex_prefix(struct insn *insn)
+-{
+- return insn_offset_rex_prefix(insn) + insn->rex_prefix.nbytes;
+-}
+-static inline int insn_offset_opcode(struct insn *insn)
+-{
+- return insn_offset_vex_prefix(insn) + insn->vex_prefix.nbytes;
+-}
+-static inline int insn_offset_modrm(struct insn *insn)
+-{
+- return insn_offset_opcode(insn) + insn->opcode.nbytes;
+-}
+-static inline int insn_offset_sib(struct insn *insn)
+-{
+- return insn_offset_modrm(insn) + insn->modrm.nbytes;
+-}
+-static inline int insn_offset_displacement(struct insn *insn)
+-{
+- return insn_offset_sib(insn) + insn->sib.nbytes;
+-}
+-static inline int insn_offset_immediate(struct insn *insn)
+-{
+- return insn_offset_displacement(insn) + insn->displacement.nbytes;
+-}
+-
+-#endif /* _ASM_X86_INSN_H */
+diff --git a/tools/objtool/arch/x86/insn/x86-opcode-map.txt b/tools/objtool/arch/x86/insn/x86-opcode-map.txt
+deleted file mode 100644
+index 767be7c..0000000
+--- a/tools/objtool/arch/x86/insn/x86-opcode-map.txt
++++ /dev/null
+@@ -1,1063 +0,0 @@
+-# x86 Opcode Maps
+-#
+-# This is (mostly) based on following documentations.
+-# - Intel(R) 64 and IA-32 Architectures Software Developer's Manual Vol.2C
+-# (#326018-047US, June 2013)
+-#
+-#<Opcode maps>
+-# Table: table-name
+-# Referrer: escaped-name
+-# AVXcode: avx-code
+-# opcode: mnemonic|GrpXXX [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...]
+-# (or)
+-# opcode: escape # escaped-name
+-# EndTable
+-#
+-# mnemonics that begin with lowercase 'v' accept a VEX or EVEX prefix
+-# mnemonics that begin with lowercase 'k' accept a VEX prefix
+-#
+-#<group maps>
+-# GrpTable: GrpXXX
+-# reg: mnemonic [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...]
+-# EndTable
+-#
+-# AVX Superscripts
+-# (ev): this opcode requires EVEX prefix.
+-# (evo): this opcode is changed by EVEX prefix (EVEX opcode)
+-# (v): this opcode requires VEX prefix.
+-# (v1): this opcode only supports 128bit VEX.
+-#
+-# Last Prefix Superscripts
+-# - (66): the last prefix is 0x66
+-# - (F3): the last prefix is 0xF3
+-# - (F2): the last prefix is 0xF2
+-# - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
+-# - (66&F2): Both 0x66 and 0xF2 prefixes are specified.
+-
+-Table: one byte opcode
+-Referrer:
+-AVXcode:
+-# 0x00 - 0x0f
+-00: ADD Eb,Gb
+-01: ADD Ev,Gv
+-02: ADD Gb,Eb
+-03: ADD Gv,Ev
+-04: ADD AL,Ib
+-05: ADD rAX,Iz
+-06: PUSH ES (i64)
+-07: POP ES (i64)
+-08: OR Eb,Gb
+-09: OR Ev,Gv
+-0a: OR Gb,Eb
+-0b: OR Gv,Ev
+-0c: OR AL,Ib
+-0d: OR rAX,Iz
+-0e: PUSH CS (i64)
+-0f: escape # 2-byte escape
+-# 0x10 - 0x1f
+-10: ADC Eb,Gb
+-11: ADC Ev,Gv
+-12: ADC Gb,Eb
+-13: ADC Gv,Ev
+-14: ADC AL,Ib
+-15: ADC rAX,Iz
+-16: PUSH SS (i64)
+-17: POP SS (i64)
+-18: SBB Eb,Gb
+-19: SBB Ev,Gv
+-1a: SBB Gb,Eb
+-1b: SBB Gv,Ev
+-1c: SBB AL,Ib
+-1d: SBB rAX,Iz
+-1e: PUSH DS (i64)
+-1f: POP DS (i64)
+-# 0x20 - 0x2f
+-20: AND Eb,Gb
+-21: AND Ev,Gv
+-22: AND Gb,Eb
+-23: AND Gv,Ev
+-24: AND AL,Ib
+-25: AND rAx,Iz
+-26: SEG=ES (Prefix)
+-27: DAA (i64)
+-28: SUB Eb,Gb
+-29: SUB Ev,Gv
+-2a: SUB Gb,Eb
+-2b: SUB Gv,Ev
+-2c: SUB AL,Ib
+-2d: SUB rAX,Iz
+-2e: SEG=CS (Prefix)
+-2f: DAS (i64)
+-# 0x30 - 0x3f
+-30: XOR Eb,Gb
+-31: XOR Ev,Gv
+-32: XOR Gb,Eb
+-33: XOR Gv,Ev
+-34: XOR AL,Ib
+-35: XOR rAX,Iz
+-36: SEG=SS (Prefix)
+-37: AAA (i64)
+-38: CMP Eb,Gb
+-39: CMP Ev,Gv
+-3a: CMP Gb,Eb
+-3b: CMP Gv,Ev
+-3c: CMP AL,Ib
+-3d: CMP rAX,Iz
+-3e: SEG=DS (Prefix)
+-3f: AAS (i64)
+-# 0x40 - 0x4f
+-40: INC eAX (i64) | REX (o64)
+-41: INC eCX (i64) | REX.B (o64)
+-42: INC eDX (i64) | REX.X (o64)
+-43: INC eBX (i64) | REX.XB (o64)
+-44: INC eSP (i64) | REX.R (o64)
+-45: INC eBP (i64) | REX.RB (o64)
+-46: INC eSI (i64) | REX.RX (o64)
+-47: INC eDI (i64) | REX.RXB (o64)
+-48: DEC eAX (i64) | REX.W (o64)
+-49: DEC eCX (i64) | REX.WB (o64)
+-4a: DEC eDX (i64) | REX.WX (o64)
+-4b: DEC eBX (i64) | REX.WXB (o64)
+-4c: DEC eSP (i64) | REX.WR (o64)
+-4d: DEC eBP (i64) | REX.WRB (o64)
+-4e: DEC eSI (i64) | REX.WRX (o64)
+-4f: DEC eDI (i64) | REX.WRXB (o64)
+-# 0x50 - 0x5f
+-50: PUSH rAX/r8 (d64)
+-51: PUSH rCX/r9 (d64)
+-52: PUSH rDX/r10 (d64)
+-53: PUSH rBX/r11 (d64)
+-54: PUSH rSP/r12 (d64)
+-55: PUSH rBP/r13 (d64)
+-56: PUSH rSI/r14 (d64)
+-57: PUSH rDI/r15 (d64)
+-58: POP rAX/r8 (d64)
+-59: POP rCX/r9 (d64)
+-5a: POP rDX/r10 (d64)
+-5b: POP rBX/r11 (d64)
+-5c: POP rSP/r12 (d64)
+-5d: POP rBP/r13 (d64)
+-5e: POP rSI/r14 (d64)
+-5f: POP rDI/r15 (d64)
+-# 0x60 - 0x6f
+-60: PUSHA/PUSHAD (i64)
+-61: POPA/POPAD (i64)
+-62: BOUND Gv,Ma (i64) | EVEX (Prefix)
+-63: ARPL Ew,Gw (i64) | MOVSXD Gv,Ev (o64)
+-64: SEG=FS (Prefix)
+-65: SEG=GS (Prefix)
+-66: Operand-Size (Prefix)
+-67: Address-Size (Prefix)
+-68: PUSH Iz (d64)
+-69: IMUL Gv,Ev,Iz
+-6a: PUSH Ib (d64)
+-6b: IMUL Gv,Ev,Ib
+-6c: INS/INSB Yb,DX
+-6d: INS/INSW/INSD Yz,DX
+-6e: OUTS/OUTSB DX,Xb
+-6f: OUTS/OUTSW/OUTSD DX,Xz
+-# 0x70 - 0x7f
+-70: JO Jb
+-71: JNO Jb
+-72: JB/JNAE/JC Jb
+-73: JNB/JAE/JNC Jb
+-74: JZ/JE Jb
+-75: JNZ/JNE Jb
+-76: JBE/JNA Jb
+-77: JNBE/JA Jb
+-78: JS Jb
+-79: JNS Jb
+-7a: JP/JPE Jb
+-7b: JNP/JPO Jb
+-7c: JL/JNGE Jb
+-7d: JNL/JGE Jb
+-7e: JLE/JNG Jb
+-7f: JNLE/JG Jb
+-# 0x80 - 0x8f
+-80: Grp1 Eb,Ib (1A)
+-81: Grp1 Ev,Iz (1A)
+-82: Grp1 Eb,Ib (1A),(i64)
+-83: Grp1 Ev,Ib (1A)
+-84: TEST Eb,Gb
+-85: TEST Ev,Gv
+-86: XCHG Eb,Gb
+-87: XCHG Ev,Gv
+-88: MOV Eb,Gb
+-89: MOV Ev,Gv
+-8a: MOV Gb,Eb
+-8b: MOV Gv,Ev
+-8c: MOV Ev,Sw
+-8d: LEA Gv,M
+-8e: MOV Sw,Ew
+-8f: Grp1A (1A) | POP Ev (d64)
+-# 0x90 - 0x9f
+-90: NOP | PAUSE (F3) | XCHG r8,rAX
+-91: XCHG rCX/r9,rAX
+-92: XCHG rDX/r10,rAX
+-93: XCHG rBX/r11,rAX
+-94: XCHG rSP/r12,rAX
+-95: XCHG rBP/r13,rAX
+-96: XCHG rSI/r14,rAX
+-97: XCHG rDI/r15,rAX
+-98: CBW/CWDE/CDQE
+-99: CWD/CDQ/CQO
+-9a: CALLF Ap (i64)
+-9b: FWAIT/WAIT
+-9c: PUSHF/D/Q Fv (d64)
+-9d: POPF/D/Q Fv (d64)
+-9e: SAHF
+-9f: LAHF
+-# 0xa0 - 0xaf
+-a0: MOV AL,Ob
+-a1: MOV rAX,Ov
+-a2: MOV Ob,AL
+-a3: MOV Ov,rAX
+-a4: MOVS/B Yb,Xb
+-a5: MOVS/W/D/Q Yv,Xv
+-a6: CMPS/B Xb,Yb
+-a7: CMPS/W/D Xv,Yv
+-a8: TEST AL,Ib
+-a9: TEST rAX,Iz
+-aa: STOS/B Yb,AL
+-ab: STOS/W/D/Q Yv,rAX
+-ac: LODS/B AL,Xb
+-ad: LODS/W/D/Q rAX,Xv
+-ae: SCAS/B AL,Yb
+-# Note: The May 2011 Intel manual shows Xv for the second parameter of the
+-# next instruction but Yv is correct
+-af: SCAS/W/D/Q rAX,Yv
+-# 0xb0 - 0xbf
+-b0: MOV AL/R8L,Ib
+-b1: MOV CL/R9L,Ib
+-b2: MOV DL/R10L,Ib
+-b3: MOV BL/R11L,Ib
+-b4: MOV AH/R12L,Ib
+-b5: MOV CH/R13L,Ib
+-b6: MOV DH/R14L,Ib
+-b7: MOV BH/R15L,Ib
+-b8: MOV rAX/r8,Iv
+-b9: MOV rCX/r9,Iv
+-ba: MOV rDX/r10,Iv
+-bb: MOV rBX/r11,Iv
+-bc: MOV rSP/r12,Iv
+-bd: MOV rBP/r13,Iv
+-be: MOV rSI/r14,Iv
+-bf: MOV rDI/r15,Iv
+-# 0xc0 - 0xcf
+-c0: Grp2 Eb,Ib (1A)
+-c1: Grp2 Ev,Ib (1A)
+-c2: RETN Iw (f64)
+-c3: RETN
+-c4: LES Gz,Mp (i64) | VEX+2byte (Prefix)
+-c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix)
+-c6: Grp11A Eb,Ib (1A)
+-c7: Grp11B Ev,Iz (1A)
+-c8: ENTER Iw,Ib
+-c9: LEAVE (d64)
+-ca: RETF Iw
+-cb: RETF
+-cc: INT3
+-cd: INT Ib
+-ce: INTO (i64)
+-cf: IRET/D/Q
+-# 0xd0 - 0xdf
+-d0: Grp2 Eb,1 (1A)
+-d1: Grp2 Ev,1 (1A)
+-d2: Grp2 Eb,CL (1A)
+-d3: Grp2 Ev,CL (1A)
+-d4: AAM Ib (i64)
+-d5: AAD Ib (i64)
+-d6:
+-d7: XLAT/XLATB
+-d8: ESC
+-d9: ESC
+-da: ESC
+-db: ESC
+-dc: ESC
+-dd: ESC
+-de: ESC
+-df: ESC
+-# 0xe0 - 0xef
+-# Note: "forced64" is Intel CPU behavior: they ignore 0x66 prefix
+-# in 64-bit mode. AMD CPUs accept 0x66 prefix, it causes RIP truncation
+-# to 16 bits. In 32-bit mode, 0x66 is accepted by both Intel and AMD.
+-e0: LOOPNE/LOOPNZ Jb (f64)
+-e1: LOOPE/LOOPZ Jb (f64)
+-e2: LOOP Jb (f64)
+-e3: JrCXZ Jb (f64)
+-e4: IN AL,Ib
+-e5: IN eAX,Ib
+-e6: OUT Ib,AL
+-e7: OUT Ib,eAX
+-# With 0x66 prefix in 64-bit mode, for AMD CPUs immediate offset
+-# in "near" jumps and calls is 16-bit. For CALL,
+-# push of return address is 16-bit wide, RSP is decremented by 2
+-# but is not truncated to 16 bits, unlike RIP.
+-e8: CALL Jz (f64)
+-e9: JMP-near Jz (f64)
+-ea: JMP-far Ap (i64)
+-eb: JMP-short Jb (f64)
+-ec: IN AL,DX
+-ed: IN eAX,DX
+-ee: OUT DX,AL
+-ef: OUT DX,eAX
+-# 0xf0 - 0xff
+-f0: LOCK (Prefix)
+-f1:
+-f2: REPNE (Prefix) | XACQUIRE (Prefix)
+-f3: REP/REPE (Prefix) | XRELEASE (Prefix)
+-f4: HLT
+-f5: CMC
+-f6: Grp3_1 Eb (1A)
+-f7: Grp3_2 Ev (1A)
+-f8: CLC
+-f9: STC
+-fa: CLI
+-fb: STI
+-fc: CLD
+-fd: STD
+-fe: Grp4 (1A)
+-ff: Grp5 (1A)
+-EndTable
+-
+-Table: 2-byte opcode (0x0f)
+-Referrer: 2-byte escape
+-AVXcode: 1
+-# 0x0f 0x00-0x0f
+-00: Grp6 (1A)
+-01: Grp7 (1A)
+-02: LAR Gv,Ew
+-03: LSL Gv,Ew
+-04:
+-05: SYSCALL (o64)
+-06: CLTS
+-07: SYSRET (o64)
+-08: INVD
+-09: WBINVD
+-0a:
+-0b: UD2 (1B)
+-0c:
+-# AMD's prefetch group. Intel supports prefetchw(/1) only.
+-0d: GrpP
+-0e: FEMMS
+-# 3DNow! uses the last imm byte as opcode extension.
+-0f: 3DNow! Pq,Qq,Ib
+-# 0x0f 0x10-0x1f
+-# NOTE: According to Intel SDM opcode map, vmovups and vmovupd has no operands
+-# but it actually has operands. And also, vmovss and vmovsd only accept 128bit.
+-# MOVSS/MOVSD has too many forms(3) on SDM. This map just shows a typical form.
+-# Many AVX instructions lack v1 superscript, according to Intel AVX-Prgramming
+-# Reference A.1
+-10: vmovups Vps,Wps | vmovupd Vpd,Wpd (66) | vmovss Vx,Hx,Wss (F3),(v1) | vmovsd Vx,Hx,Wsd (F2),(v1)
+-11: vmovups Wps,Vps | vmovupd Wpd,Vpd (66) | vmovss Wss,Hx,Vss (F3),(v1) | vmovsd Wsd,Hx,Vsd (F2),(v1)
+-12: vmovlps Vq,Hq,Mq (v1) | vmovhlps Vq,Hq,Uq (v1) | vmovlpd Vq,Hq,Mq (66),(v1) | vmovsldup Vx,Wx (F3) | vmovddup Vx,Wx (F2)
+-13: vmovlps Mq,Vq (v1) | vmovlpd Mq,Vq (66),(v1)
+-14: vunpcklps Vx,Hx,Wx | vunpcklpd Vx,Hx,Wx (66)
+-15: vunpckhps Vx,Hx,Wx | vunpckhpd Vx,Hx,Wx (66)
+-16: vmovhps Vdq,Hq,Mq (v1) | vmovlhps Vdq,Hq,Uq (v1) | vmovhpd Vdq,Hq,Mq (66),(v1) | vmovshdup Vx,Wx (F3)
+-17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1)
+-18: Grp16 (1A)
+-19:
+-# Intel SDM opcode map does not list MPX instructions. For now using Gv for
+-# bnd registers and Ev for everything else is OK because the instruction
+-# decoder does not use the information except as an indication that there is
+-# a ModR/M byte.
+-1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev
+-1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv
+-1c:
+-1d:
+-1e:
+-1f: NOP Ev
+-# 0x0f 0x20-0x2f
+-20: MOV Rd,Cd
+-21: MOV Rd,Dd
+-22: MOV Cd,Rd
+-23: MOV Dd,Rd
+-24:
+-25:
+-26:
+-27:
+-28: vmovaps Vps,Wps | vmovapd Vpd,Wpd (66)
+-29: vmovaps Wps,Vps | vmovapd Wpd,Vpd (66)
+-2a: cvtpi2ps Vps,Qpi | cvtpi2pd Vpd,Qpi (66) | vcvtsi2ss Vss,Hss,Ey (F3),(v1) | vcvtsi2sd Vsd,Hsd,Ey (F2),(v1)
+-2b: vmovntps Mps,Vps | vmovntpd Mpd,Vpd (66)
+-2c: cvttps2pi Ppi,Wps | cvttpd2pi Ppi,Wpd (66) | vcvttss2si Gy,Wss (F3),(v1) | vcvttsd2si Gy,Wsd (F2),(v1)
+-2d: cvtps2pi Ppi,Wps | cvtpd2pi Qpi,Wpd (66) | vcvtss2si Gy,Wss (F3),(v1) | vcvtsd2si Gy,Wsd (F2),(v1)
+-2e: vucomiss Vss,Wss (v1) | vucomisd Vsd,Wsd (66),(v1)
+-2f: vcomiss Vss,Wss (v1) | vcomisd Vsd,Wsd (66),(v1)
+-# 0x0f 0x30-0x3f
+-30: WRMSR
+-31: RDTSC
+-32: RDMSR
+-33: RDPMC
+-34: SYSENTER
+-35: SYSEXIT
+-36:
+-37: GETSEC
+-38: escape # 3-byte escape 1
+-39:
+-3a: escape # 3-byte escape 2
+-3b:
+-3c:
+-3d:
+-3e:
+-3f:
+-# 0x0f 0x40-0x4f
+-40: CMOVO Gv,Ev
+-41: CMOVNO Gv,Ev | kandw/q Vk,Hk,Uk | kandb/d Vk,Hk,Uk (66)
+-42: CMOVB/C/NAE Gv,Ev | kandnw/q Vk,Hk,Uk | kandnb/d Vk,Hk,Uk (66)
+-43: CMOVAE/NB/NC Gv,Ev
+-44: CMOVE/Z Gv,Ev | knotw/q Vk,Uk | knotb/d Vk,Uk (66)
+-45: CMOVNE/NZ Gv,Ev | korw/q Vk,Hk,Uk | korb/d Vk,Hk,Uk (66)
+-46: CMOVBE/NA Gv,Ev | kxnorw/q Vk,Hk,Uk | kxnorb/d Vk,Hk,Uk (66)
+-47: CMOVA/NBE Gv,Ev | kxorw/q Vk,Hk,Uk | kxorb/d Vk,Hk,Uk (66)
+-48: CMOVS Gv,Ev
+-49: CMOVNS Gv,Ev
+-4a: CMOVP/PE Gv,Ev | kaddw/q Vk,Hk,Uk | kaddb/d Vk,Hk,Uk (66)
+-4b: CMOVNP/PO Gv,Ev | kunpckbw Vk,Hk,Uk (66) | kunpckwd/dq Vk,Hk,Uk
+-4c: CMOVL/NGE Gv,Ev
+-4d: CMOVNL/GE Gv,Ev
+-4e: CMOVLE/NG Gv,Ev
+-4f: CMOVNLE/G Gv,Ev
+-# 0x0f 0x50-0x5f
+-50: vmovmskps Gy,Ups | vmovmskpd Gy,Upd (66)
+-51: vsqrtps Vps,Wps | vsqrtpd Vpd,Wpd (66) | vsqrtss Vss,Hss,Wss (F3),(v1) | vsqrtsd Vsd,Hsd,Wsd (F2),(v1)
+-52: vrsqrtps Vps,Wps | vrsqrtss Vss,Hss,Wss (F3),(v1)
+-53: vrcpps Vps,Wps | vrcpss Vss,Hss,Wss (F3),(v1)
+-54: vandps Vps,Hps,Wps | vandpd Vpd,Hpd,Wpd (66)
+-55: vandnps Vps,Hps,Wps | vandnpd Vpd,Hpd,Wpd (66)
+-56: vorps Vps,Hps,Wps | vorpd Vpd,Hpd,Wpd (66)
+-57: vxorps Vps,Hps,Wps | vxorpd Vpd,Hpd,Wpd (66)
+-58: vaddps Vps,Hps,Wps | vaddpd Vpd,Hpd,Wpd (66) | vaddss Vss,Hss,Wss (F3),(v1) | vaddsd Vsd,Hsd,Wsd (F2),(v1)
+-59: vmulps Vps,Hps,Wps | vmulpd Vpd,Hpd,Wpd (66) | vmulss Vss,Hss,Wss (F3),(v1) | vmulsd Vsd,Hsd,Wsd (F2),(v1)
+-5a: vcvtps2pd Vpd,Wps | vcvtpd2ps Vps,Wpd (66) | vcvtss2sd Vsd,Hx,Wss (F3),(v1) | vcvtsd2ss Vss,Hx,Wsd (F2),(v1)
+-5b: vcvtdq2ps Vps,Wdq | vcvtqq2ps Vps,Wqq (evo) | vcvtps2dq Vdq,Wps (66) | vcvttps2dq Vdq,Wps (F3)
+-5c: vsubps Vps,Hps,Wps | vsubpd Vpd,Hpd,Wpd (66) | vsubss Vss,Hss,Wss (F3),(v1) | vsubsd Vsd,Hsd,Wsd (F2),(v1)
+-5d: vminps Vps,Hps,Wps | vminpd Vpd,Hpd,Wpd (66) | vminss Vss,Hss,Wss (F3),(v1) | vminsd Vsd,Hsd,Wsd (F2),(v1)
+-5e: vdivps Vps,Hps,Wps | vdivpd Vpd,Hpd,Wpd (66) | vdivss Vss,Hss,Wss (F3),(v1) | vdivsd Vsd,Hsd,Wsd (F2),(v1)
+-5f: vmaxps Vps,Hps,Wps | vmaxpd Vpd,Hpd,Wpd (66) | vmaxss Vss,Hss,Wss (F3),(v1) | vmaxsd Vsd,Hsd,Wsd (F2),(v1)
+-# 0x0f 0x60-0x6f
+-60: punpcklbw Pq,Qd | vpunpcklbw Vx,Hx,Wx (66),(v1)
+-61: punpcklwd Pq,Qd | vpunpcklwd Vx,Hx,Wx (66),(v1)
+-62: punpckldq Pq,Qd | vpunpckldq Vx,Hx,Wx (66),(v1)
+-63: packsswb Pq,Qq | vpacksswb Vx,Hx,Wx (66),(v1)
+-64: pcmpgtb Pq,Qq | vpcmpgtb Vx,Hx,Wx (66),(v1)
+-65: pcmpgtw Pq,Qq | vpcmpgtw Vx,Hx,Wx (66),(v1)
+-66: pcmpgtd Pq,Qq | vpcmpgtd Vx,Hx,Wx (66),(v1)
+-67: packuswb Pq,Qq | vpackuswb Vx,Hx,Wx (66),(v1)
+-68: punpckhbw Pq,Qd | vpunpckhbw Vx,Hx,Wx (66),(v1)
+-69: punpckhwd Pq,Qd | vpunpckhwd Vx,Hx,Wx (66),(v1)
+-6a: punpckhdq Pq,Qd | vpunpckhdq Vx,Hx,Wx (66),(v1)
+-6b: packssdw Pq,Qd | vpackssdw Vx,Hx,Wx (66),(v1)
+-6c: vpunpcklqdq Vx,Hx,Wx (66),(v1)
+-6d: vpunpckhqdq Vx,Hx,Wx (66),(v1)
+-6e: movd/q Pd,Ey | vmovd/q Vy,Ey (66),(v1)
+-6f: movq Pq,Qq | vmovdqa Vx,Wx (66) | vmovdqa32/64 Vx,Wx (66),(evo) | vmovdqu Vx,Wx (F3) | vmovdqu32/64 Vx,Wx (F3),(evo) | vmovdqu8/16 Vx,Wx (F2),(ev)
+-# 0x0f 0x70-0x7f
+-70: pshufw Pq,Qq,Ib | vpshufd Vx,Wx,Ib (66),(v1) | vpshufhw Vx,Wx,Ib (F3),(v1) | vpshuflw Vx,Wx,Ib (F2),(v1)
+-71: Grp12 (1A)
+-72: Grp13 (1A)
+-73: Grp14 (1A)
+-74: pcmpeqb Pq,Qq | vpcmpeqb Vx,Hx,Wx (66),(v1)
+-75: pcmpeqw Pq,Qq | vpcmpeqw Vx,Hx,Wx (66),(v1)
+-76: pcmpeqd Pq,Qq | vpcmpeqd Vx,Hx,Wx (66),(v1)
+-# Note: Remove (v), because vzeroall and vzeroupper becomes emms without VEX.
+-77: emms | vzeroupper | vzeroall
+-78: VMREAD Ey,Gy | vcvttps2udq/pd2udq Vx,Wpd (evo) | vcvttsd2usi Gv,Wx (F2),(ev) | vcvttss2usi Gv,Wx (F3),(ev) | vcvttps2uqq/pd2uqq Vx,Wx (66),(ev)
+-79: VMWRITE Gy,Ey | vcvtps2udq/pd2udq Vx,Wpd (evo) | vcvtsd2usi Gv,Wx (F2),(ev) | vcvtss2usi Gv,Wx (F3),(ev) | vcvtps2uqq/pd2uqq Vx,Wx (66),(ev)
+-7a: vcvtudq2pd/uqq2pd Vpd,Wx (F3),(ev) | vcvtudq2ps/uqq2ps Vpd,Wx (F2),(ev) | vcvttps2qq/pd2qq Vx,Wx (66),(ev)
+-7b: vcvtusi2sd Vpd,Hpd,Ev (F2),(ev) | vcvtusi2ss Vps,Hps,Ev (F3),(ev) | vcvtps2qq/pd2qq Vx,Wx (66),(ev)
+-7c: vhaddpd Vpd,Hpd,Wpd (66) | vhaddps Vps,Hps,Wps (F2)
+-7d: vhsubpd Vpd,Hpd,Wpd (66) | vhsubps Vps,Hps,Wps (F2)
+-7e: movd/q Ey,Pd | vmovd/q Ey,Vy (66),(v1) | vmovq Vq,Wq (F3),(v1)
+-7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqa32/64 Wx,Vx (66),(evo) | vmovdqu Wx,Vx (F3) | vmovdqu32/64 Wx,Vx (F3),(evo) | vmovdqu8/16 Wx,Vx (F2),(ev)
+-# 0x0f 0x80-0x8f
+-# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
+-80: JO Jz (f64)
+-81: JNO Jz (f64)
+-82: JB/JC/JNAE Jz (f64)
+-83: JAE/JNB/JNC Jz (f64)
+-84: JE/JZ Jz (f64)
+-85: JNE/JNZ Jz (f64)
+-86: JBE/JNA Jz (f64)
+-87: JA/JNBE Jz (f64)
+-88: JS Jz (f64)
+-89: JNS Jz (f64)
+-8a: JP/JPE Jz (f64)
+-8b: JNP/JPO Jz (f64)
+-8c: JL/JNGE Jz (f64)
+-8d: JNL/JGE Jz (f64)
+-8e: JLE/JNG Jz (f64)
+-8f: JNLE/JG Jz (f64)
+-# 0x0f 0x90-0x9f
+-90: SETO Eb | kmovw/q Vk,Wk | kmovb/d Vk,Wk (66)
+-91: SETNO Eb | kmovw/q Mv,Vk | kmovb/d Mv,Vk (66)
+-92: SETB/C/NAE Eb | kmovw Vk,Rv | kmovb Vk,Rv (66) | kmovq/d Vk,Rv (F2)
+-93: SETAE/NB/NC Eb | kmovw Gv,Uk | kmovb Gv,Uk (66) | kmovq/d Gv,Uk (F2)
+-94: SETE/Z Eb
+-95: SETNE/NZ Eb
+-96: SETBE/NA Eb
+-97: SETA/NBE Eb
+-98: SETS Eb | kortestw/q Vk,Uk | kortestb/d Vk,Uk (66)
+-99: SETNS Eb | ktestw/q Vk,Uk | ktestb/d Vk,Uk (66)
+-9a: SETP/PE Eb
+-9b: SETNP/PO Eb
+-9c: SETL/NGE Eb
+-9d: SETNL/GE Eb
+-9e: SETLE/NG Eb
+-9f: SETNLE/G Eb
+-# 0x0f 0xa0-0xaf
+-a0: PUSH FS (d64)
+-a1: POP FS (d64)
+-a2: CPUID
+-a3: BT Ev,Gv
+-a4: SHLD Ev,Gv,Ib
+-a5: SHLD Ev,Gv,CL
+-a6: GrpPDLK
+-a7: GrpRNG
+-a8: PUSH GS (d64)
+-a9: POP GS (d64)
+-aa: RSM
+-ab: BTS Ev,Gv
+-ac: SHRD Ev,Gv,Ib
+-ad: SHRD Ev,Gv,CL
+-ae: Grp15 (1A),(1C)
+-af: IMUL Gv,Ev
+-# 0x0f 0xb0-0xbf
+-b0: CMPXCHG Eb,Gb
+-b1: CMPXCHG Ev,Gv
+-b2: LSS Gv,Mp
+-b3: BTR Ev,Gv
+-b4: LFS Gv,Mp
+-b5: LGS Gv,Mp
+-b6: MOVZX Gv,Eb
+-b7: MOVZX Gv,Ew
+-b8: JMPE (!F3) | POPCNT Gv,Ev (F3)
+-b9: Grp10 (1A)
+-ba: Grp8 Ev,Ib (1A)
+-bb: BTC Ev,Gv
+-bc: BSF Gv,Ev (!F3) | TZCNT Gv,Ev (F3)
+-bd: BSR Gv,Ev (!F3) | LZCNT Gv,Ev (F3)
+-be: MOVSX Gv,Eb
+-bf: MOVSX Gv,Ew
+-# 0x0f 0xc0-0xcf
+-c0: XADD Eb,Gb
+-c1: XADD Ev,Gv
+-c2: vcmpps Vps,Hps,Wps,Ib | vcmppd Vpd,Hpd,Wpd,Ib (66) | vcmpss Vss,Hss,Wss,Ib (F3),(v1) | vcmpsd Vsd,Hsd,Wsd,Ib (F2),(v1)
+-c3: movnti My,Gy
+-c4: pinsrw Pq,Ry/Mw,Ib | vpinsrw Vdq,Hdq,Ry/Mw,Ib (66),(v1)
+-c5: pextrw Gd,Nq,Ib | vpextrw Gd,Udq,Ib (66),(v1)
+-c6: vshufps Vps,Hps,Wps,Ib | vshufpd Vpd,Hpd,Wpd,Ib (66)
+-c7: Grp9 (1A)
+-c8: BSWAP RAX/EAX/R8/R8D
+-c9: BSWAP RCX/ECX/R9/R9D
+-ca: BSWAP RDX/EDX/R10/R10D
+-cb: BSWAP RBX/EBX/R11/R11D
+-cc: BSWAP RSP/ESP/R12/R12D
+-cd: BSWAP RBP/EBP/R13/R13D
+-ce: BSWAP RSI/ESI/R14/R14D
+-cf: BSWAP RDI/EDI/R15/R15D
+-# 0x0f 0xd0-0xdf
+-d0: vaddsubpd Vpd,Hpd,Wpd (66) | vaddsubps Vps,Hps,Wps (F2)
+-d1: psrlw Pq,Qq | vpsrlw Vx,Hx,Wx (66),(v1)
+-d2: psrld Pq,Qq | vpsrld Vx,Hx,Wx (66),(v1)
+-d3: psrlq Pq,Qq | vpsrlq Vx,Hx,Wx (66),(v1)
+-d4: paddq Pq,Qq | vpaddq Vx,Hx,Wx (66),(v1)
+-d5: pmullw Pq,Qq | vpmullw Vx,Hx,Wx (66),(v1)
+-d6: vmovq Wq,Vq (66),(v1) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2)
+-d7: pmovmskb Gd,Nq | vpmovmskb Gd,Ux (66),(v1)
+-d8: psubusb Pq,Qq | vpsubusb Vx,Hx,Wx (66),(v1)
+-d9: psubusw Pq,Qq | vpsubusw Vx,Hx,Wx (66),(v1)
+-da: pminub Pq,Qq | vpminub Vx,Hx,Wx (66),(v1)
+-db: pand Pq,Qq | vpand Vx,Hx,Wx (66),(v1) | vpandd/q Vx,Hx,Wx (66),(evo)
+-dc: paddusb Pq,Qq | vpaddusb Vx,Hx,Wx (66),(v1)
+-dd: paddusw Pq,Qq | vpaddusw Vx,Hx,Wx (66),(v1)
+-de: pmaxub Pq,Qq | vpmaxub Vx,Hx,Wx (66),(v1)
+-df: pandn Pq,Qq | vpandn Vx,Hx,Wx (66),(v1) | vpandnd/q Vx,Hx,Wx (66),(evo)
+-# 0x0f 0xe0-0xef
+-e0: pavgb Pq,Qq | vpavgb Vx,Hx,Wx (66),(v1)
+-e1: psraw Pq,Qq | vpsraw Vx,Hx,Wx (66),(v1)
+-e2: psrad Pq,Qq | vpsrad Vx,Hx,Wx (66),(v1)
+-e3: pavgw Pq,Qq | vpavgw Vx,Hx,Wx (66),(v1)
+-e4: pmulhuw Pq,Qq | vpmulhuw Vx,Hx,Wx (66),(v1)
+-e5: pmulhw Pq,Qq | vpmulhw Vx,Hx,Wx (66),(v1)
+-e6: vcvttpd2dq Vx,Wpd (66) | vcvtdq2pd Vx,Wdq (F3) | vcvtdq2pd/qq2pd Vx,Wdq (F3),(evo) | vcvtpd2dq Vx,Wpd (F2)
+-e7: movntq Mq,Pq | vmovntdq Mx,Vx (66)
+-e8: psubsb Pq,Qq | vpsubsb Vx,Hx,Wx (66),(v1)
+-e9: psubsw Pq,Qq | vpsubsw Vx,Hx,Wx (66),(v1)
+-ea: pminsw Pq,Qq | vpminsw Vx,Hx,Wx (66),(v1)
+-eb: por Pq,Qq | vpor Vx,Hx,Wx (66),(v1) | vpord/q Vx,Hx,Wx (66),(evo)
+-ec: paddsb Pq,Qq | vpaddsb Vx,Hx,Wx (66),(v1)
+-ed: paddsw Pq,Qq | vpaddsw Vx,Hx,Wx (66),(v1)
+-ee: pmaxsw Pq,Qq | vpmaxsw Vx,Hx,Wx (66),(v1)
+-ef: pxor Pq,Qq | vpxor Vx,Hx,Wx (66),(v1) | vpxord/q Vx,Hx,Wx (66),(evo)
+-# 0x0f 0xf0-0xff
+-f0: vlddqu Vx,Mx (F2)
+-f1: psllw Pq,Qq | vpsllw Vx,Hx,Wx (66),(v1)
+-f2: pslld Pq,Qq | vpslld Vx,Hx,Wx (66),(v1)
+-f3: psllq Pq,Qq | vpsllq Vx,Hx,Wx (66),(v1)
+-f4: pmuludq Pq,Qq | vpmuludq Vx,Hx,Wx (66),(v1)
+-f5: pmaddwd Pq,Qq | vpmaddwd Vx,Hx,Wx (66),(v1)
+-f6: psadbw Pq,Qq | vpsadbw Vx,Hx,Wx (66),(v1)
+-f7: maskmovq Pq,Nq | vmaskmovdqu Vx,Ux (66),(v1)
+-f8: psubb Pq,Qq | vpsubb Vx,Hx,Wx (66),(v1)
+-f9: psubw Pq,Qq | vpsubw Vx,Hx,Wx (66),(v1)
+-fa: psubd Pq,Qq | vpsubd Vx,Hx,Wx (66),(v1)
+-fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
+-fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
+-fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
+-fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
+-ff:
+-EndTable
+-
+-Table: 3-byte opcode 1 (0x0f 0x38)
+-Referrer: 3-byte escape 1
+-AVXcode: 2
+-# 0x0f 0x38 0x00-0x0f
+-00: pshufb Pq,Qq | vpshufb Vx,Hx,Wx (66),(v1)
+-01: phaddw Pq,Qq | vphaddw Vx,Hx,Wx (66),(v1)
+-02: phaddd Pq,Qq | vphaddd Vx,Hx,Wx (66),(v1)
+-03: phaddsw Pq,Qq | vphaddsw Vx,Hx,Wx (66),(v1)
+-04: pmaddubsw Pq,Qq | vpmaddubsw Vx,Hx,Wx (66),(v1)
+-05: phsubw Pq,Qq | vphsubw Vx,Hx,Wx (66),(v1)
+-06: phsubd Pq,Qq | vphsubd Vx,Hx,Wx (66),(v1)
+-07: phsubsw Pq,Qq | vphsubsw Vx,Hx,Wx (66),(v1)
+-08: psignb Pq,Qq | vpsignb Vx,Hx,Wx (66),(v1)
+-09: psignw Pq,Qq | vpsignw Vx,Hx,Wx (66),(v1)
+-0a: psignd Pq,Qq | vpsignd Vx,Hx,Wx (66),(v1)
+-0b: pmulhrsw Pq,Qq | vpmulhrsw Vx,Hx,Wx (66),(v1)
+-0c: vpermilps Vx,Hx,Wx (66),(v)
+-0d: vpermilpd Vx,Hx,Wx (66),(v)
+-0e: vtestps Vx,Wx (66),(v)
+-0f: vtestpd Vx,Wx (66),(v)
+-# 0x0f 0x38 0x10-0x1f
+-10: pblendvb Vdq,Wdq (66) | vpsrlvw Vx,Hx,Wx (66),(evo) | vpmovuswb Wx,Vx (F3),(ev)
+-11: vpmovusdb Wx,Vd (F3),(ev) | vpsravw Vx,Hx,Wx (66),(ev)
+-12: vpmovusqb Wx,Vq (F3),(ev) | vpsllvw Vx,Hx,Wx (66),(ev)
+-13: vcvtph2ps Vx,Wx (66),(v) | vpmovusdw Wx,Vd (F3),(ev)
+-14: blendvps Vdq,Wdq (66) | vpmovusqw Wx,Vq (F3),(ev) | vprorvd/q Vx,Hx,Wx (66),(evo)
+-15: blendvpd Vdq,Wdq (66) | vpmovusqd Wx,Vq (F3),(ev) | vprolvd/q Vx,Hx,Wx (66),(evo)
+-16: vpermps Vqq,Hqq,Wqq (66),(v) | vpermps/d Vqq,Hqq,Wqq (66),(evo)
+-17: vptest Vx,Wx (66)
+-18: vbroadcastss Vx,Wd (66),(v)
+-19: vbroadcastsd Vqq,Wq (66),(v) | vbroadcastf32x2 Vqq,Wq (66),(evo)
+-1a: vbroadcastf128 Vqq,Mdq (66),(v) | vbroadcastf32x4/64x2 Vqq,Wq (66),(evo)
+-1b: vbroadcastf32x8/64x4 Vqq,Mdq (66),(ev)
+-1c: pabsb Pq,Qq | vpabsb Vx,Wx (66),(v1)
+-1d: pabsw Pq,Qq | vpabsw Vx,Wx (66),(v1)
+-1e: pabsd Pq,Qq | vpabsd Vx,Wx (66),(v1)
+-1f: vpabsq Vx,Wx (66),(ev)
+-# 0x0f 0x38 0x20-0x2f
+-20: vpmovsxbw Vx,Ux/Mq (66),(v1) | vpmovswb Wx,Vx (F3),(ev)
+-21: vpmovsxbd Vx,Ux/Md (66),(v1) | vpmovsdb Wx,Vd (F3),(ev)
+-22: vpmovsxbq Vx,Ux/Mw (66),(v1) | vpmovsqb Wx,Vq (F3),(ev)
+-23: vpmovsxwd Vx,Ux/Mq (66),(v1) | vpmovsdw Wx,Vd (F3),(ev)
+-24: vpmovsxwq Vx,Ux/Md (66),(v1) | vpmovsqw Wx,Vq (F3),(ev)
+-25: vpmovsxdq Vx,Ux/Mq (66),(v1) | vpmovsqd Wx,Vq (F3),(ev)
+-26: vptestmb/w Vk,Hx,Wx (66),(ev) | vptestnmb/w Vk,Hx,Wx (F3),(ev)
+-27: vptestmd/q Vk,Hx,Wx (66),(ev) | vptestnmd/q Vk,Hx,Wx (F3),(ev)
+-28: vpmuldq Vx,Hx,Wx (66),(v1) | vpmovm2b/w Vx,Uk (F3),(ev)
+-29: vpcmpeqq Vx,Hx,Wx (66),(v1) | vpmovb2m/w2m Vk,Ux (F3),(ev)
+-2a: vmovntdqa Vx,Mx (66),(v1) | vpbroadcastmb2q Vx,Uk (F3),(ev)
+-2b: vpackusdw Vx,Hx,Wx (66),(v1)
+-2c: vmaskmovps Vx,Hx,Mx (66),(v) | vscalefps/d Vx,Hx,Wx (66),(evo)
+-2d: vmaskmovpd Vx,Hx,Mx (66),(v) | vscalefss/d Vx,Hx,Wx (66),(evo)
+-2e: vmaskmovps Mx,Hx,Vx (66),(v)
+-2f: vmaskmovpd Mx,Hx,Vx (66),(v)
+-# 0x0f 0x38 0x30-0x3f
+-30: vpmovzxbw Vx,Ux/Mq (66),(v1) | vpmovwb Wx,Vx (F3),(ev)
+-31: vpmovzxbd Vx,Ux/Md (66),(v1) | vpmovdb Wx,Vd (F3),(ev)
+-32: vpmovzxbq Vx,Ux/Mw (66),(v1) | vpmovqb Wx,Vq (F3),(ev)
+-33: vpmovzxwd Vx,Ux/Mq (66),(v1) | vpmovdw Wx,Vd (F3),(ev)
+-34: vpmovzxwq Vx,Ux/Md (66),(v1) | vpmovqw Wx,Vq (F3),(ev)
+-35: vpmovzxdq Vx,Ux/Mq (66),(v1) | vpmovqd Wx,Vq (F3),(ev)
+-36: vpermd Vqq,Hqq,Wqq (66),(v) | vpermd/q Vqq,Hqq,Wqq (66),(evo)
+-37: vpcmpgtq Vx,Hx,Wx (66),(v1)
+-38: vpminsb Vx,Hx,Wx (66),(v1) | vpmovm2d/q Vx,Uk (F3),(ev)
+-39: vpminsd Vx,Hx,Wx (66),(v1) | vpminsd/q Vx,Hx,Wx (66),(evo) | vpmovd2m/q2m Vk,Ux (F3),(ev)
+-3a: vpminuw Vx,Hx,Wx (66),(v1) | vpbroadcastmw2d Vx,Uk (F3),(ev)
+-3b: vpminud Vx,Hx,Wx (66),(v1) | vpminud/q Vx,Hx,Wx (66),(evo)
+-3c: vpmaxsb Vx,Hx,Wx (66),(v1)
+-3d: vpmaxsd Vx,Hx,Wx (66),(v1) | vpmaxsd/q Vx,Hx,Wx (66),(evo)
+-3e: vpmaxuw Vx,Hx,Wx (66),(v1)
+-3f: vpmaxud Vx,Hx,Wx (66),(v1) | vpmaxud/q Vx,Hx,Wx (66),(evo)
+-# 0x0f 0x38 0x40-0x8f
+-40: vpmulld Vx,Hx,Wx (66),(v1) | vpmulld/q Vx,Hx,Wx (66),(evo)
+-41: vphminposuw Vdq,Wdq (66),(v1)
+-42: vgetexpps/d Vx,Wx (66),(ev)
+-43: vgetexpss/d Vx,Hx,Wx (66),(ev)
+-44: vplzcntd/q Vx,Wx (66),(ev)
+-45: vpsrlvd/q Vx,Hx,Wx (66),(v)
+-46: vpsravd Vx,Hx,Wx (66),(v) | vpsravd/q Vx,Hx,Wx (66),(evo)
+-47: vpsllvd/q Vx,Hx,Wx (66),(v)
+-# Skip 0x48-0x4b
+-4c: vrcp14ps/d Vpd,Wpd (66),(ev)
+-4d: vrcp14ss/d Vsd,Hpd,Wsd (66),(ev)
+-4e: vrsqrt14ps/d Vpd,Wpd (66),(ev)
+-4f: vrsqrt14ss/d Vsd,Hsd,Wsd (66),(ev)
+-# Skip 0x50-0x57
+-58: vpbroadcastd Vx,Wx (66),(v)
+-59: vpbroadcastq Vx,Wx (66),(v) | vbroadcasti32x2 Vx,Wx (66),(evo)
+-5a: vbroadcasti128 Vqq,Mdq (66),(v) | vbroadcasti32x4/64x2 Vx,Wx (66),(evo)
+-5b: vbroadcasti32x8/64x4 Vqq,Mdq (66),(ev)
+-# Skip 0x5c-0x63
+-64: vpblendmd/q Vx,Hx,Wx (66),(ev)
+-65: vblendmps/d Vx,Hx,Wx (66),(ev)
+-66: vpblendmb/w Vx,Hx,Wx (66),(ev)
+-# Skip 0x67-0x74
+-75: vpermi2b/w Vx,Hx,Wx (66),(ev)
+-76: vpermi2d/q Vx,Hx,Wx (66),(ev)
+-77: vpermi2ps/d Vx,Hx,Wx (66),(ev)
+-78: vpbroadcastb Vx,Wx (66),(v)
+-79: vpbroadcastw Vx,Wx (66),(v)
+-7a: vpbroadcastb Vx,Rv (66),(ev)
+-7b: vpbroadcastw Vx,Rv (66),(ev)
+-7c: vpbroadcastd/q Vx,Rv (66),(ev)
+-7d: vpermt2b/w Vx,Hx,Wx (66),(ev)
+-7e: vpermt2d/q Vx,Hx,Wx (66),(ev)
+-7f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
+-80: INVEPT Gy,Mdq (66)
+-81: INVPID Gy,Mdq (66)
+-82: INVPCID Gy,Mdq (66)
+-83: vpmultishiftqb Vx,Hx,Wx (66),(ev)
+-88: vexpandps/d Vpd,Wpd (66),(ev)
+-89: vpexpandd/q Vx,Wx (66),(ev)
+-8a: vcompressps/d Wx,Vx (66),(ev)
+-8b: vpcompressd/q Wx,Vx (66),(ev)
+-8c: vpmaskmovd/q Vx,Hx,Mx (66),(v)
+-8d: vpermb/w Vx,Hx,Wx (66),(ev)
+-8e: vpmaskmovd/q Mx,Vx,Hx (66),(v)
+-# 0x0f 0x38 0x90-0xbf (FMA)
+-90: vgatherdd/q Vx,Hx,Wx (66),(v) | vpgatherdd/q Vx,Wx (66),(evo)
+-91: vgatherqd/q Vx,Hx,Wx (66),(v) | vpgatherqd/q Vx,Wx (66),(evo)
+-92: vgatherdps/d Vx,Hx,Wx (66),(v)
+-93: vgatherqps/d Vx,Hx,Wx (66),(v)
+-94:
+-95:
+-96: vfmaddsub132ps/d Vx,Hx,Wx (66),(v)
+-97: vfmsubadd132ps/d Vx,Hx,Wx (66),(v)
+-98: vfmadd132ps/d Vx,Hx,Wx (66),(v)
+-99: vfmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
+-9a: vfmsub132ps/d Vx,Hx,Wx (66),(v)
+-9b: vfmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
+-9c: vfnmadd132ps/d Vx,Hx,Wx (66),(v)
+-9d: vfnmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
+-9e: vfnmsub132ps/d Vx,Hx,Wx (66),(v)
+-9f: vfnmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
+-a0: vpscatterdd/q Wx,Vx (66),(ev)
+-a1: vpscatterqd/q Wx,Vx (66),(ev)
+-a2: vscatterdps/d Wx,Vx (66),(ev)
+-a3: vscatterqps/d Wx,Vx (66),(ev)
+-a6: vfmaddsub213ps/d Vx,Hx,Wx (66),(v)
+-a7: vfmsubadd213ps/d Vx,Hx,Wx (66),(v)
+-a8: vfmadd213ps/d Vx,Hx,Wx (66),(v)
+-a9: vfmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
+-aa: vfmsub213ps/d Vx,Hx,Wx (66),(v)
+-ab: vfmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
+-ac: vfnmadd213ps/d Vx,Hx,Wx (66),(v)
+-ad: vfnmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
+-ae: vfnmsub213ps/d Vx,Hx,Wx (66),(v)
+-af: vfnmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
+-b4: vpmadd52luq Vx,Hx,Wx (66),(ev)
+-b5: vpmadd52huq Vx,Hx,Wx (66),(ev)
+-b6: vfmaddsub231ps/d Vx,Hx,Wx (66),(v)
+-b7: vfmsubadd231ps/d Vx,Hx,Wx (66),(v)
+-b8: vfmadd231ps/d Vx,Hx,Wx (66),(v)
+-b9: vfmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
+-ba: vfmsub231ps/d Vx,Hx,Wx (66),(v)
+-bb: vfmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
+-bc: vfnmadd231ps/d Vx,Hx,Wx (66),(v)
+-bd: vfnmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
+-be: vfnmsub231ps/d Vx,Hx,Wx (66),(v)
+-bf: vfnmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
+-# 0x0f 0x38 0xc0-0xff
+-c4: vpconflictd/q Vx,Wx (66),(ev)
+-c6: Grp18 (1A)
+-c7: Grp19 (1A)
+-c8: sha1nexte Vdq,Wdq | vexp2ps/d Vx,Wx (66),(ev)
+-c9: sha1msg1 Vdq,Wdq
+-ca: sha1msg2 Vdq,Wdq | vrcp28ps/d Vx,Wx (66),(ev)
+-cb: sha256rnds2 Vdq,Wdq | vrcp28ss/d Vx,Hx,Wx (66),(ev)
+-cc: sha256msg1 Vdq,Wdq | vrsqrt28ps/d Vx,Wx (66),(ev)
+-cd: sha256msg2 Vdq,Wdq | vrsqrt28ss/d Vx,Hx,Wx (66),(ev)
+-db: VAESIMC Vdq,Wdq (66),(v1)
+-dc: VAESENC Vdq,Hdq,Wdq (66),(v1)
+-dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1)
+-de: VAESDEC Vdq,Hdq,Wdq (66),(v1)
+-df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1)
+-f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) | CRC32 Gd,Eb (66&F2)
+-f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) | CRC32 Gd,Ew (66&F2)
+-f2: ANDN Gy,By,Ey (v)
+-f3: Grp17 (1A)
+-f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
+-f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v)
+-f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
+-EndTable
+-
+-Table: 3-byte opcode 2 (0x0f 0x3a)
+-Referrer: 3-byte escape 2
+-AVXcode: 3
+-# 0x0f 0x3a 0x00-0xff
+-00: vpermq Vqq,Wqq,Ib (66),(v)
+-01: vpermpd Vqq,Wqq,Ib (66),(v)
+-02: vpblendd Vx,Hx,Wx,Ib (66),(v)
+-03: valignd/q Vx,Hx,Wx,Ib (66),(ev)
+-04: vpermilps Vx,Wx,Ib (66),(v)
+-05: vpermilpd Vx,Wx,Ib (66),(v)
+-06: vperm2f128 Vqq,Hqq,Wqq,Ib (66),(v)
+-07:
+-08: vroundps Vx,Wx,Ib (66) | vrndscaleps Vx,Wx,Ib (66),(evo)
+-09: vroundpd Vx,Wx,Ib (66) | vrndscalepd Vx,Wx,Ib (66),(evo)
+-0a: vroundss Vss,Wss,Ib (66),(v1) | vrndscaless Vx,Hx,Wx,Ib (66),(evo)
+-0b: vroundsd Vsd,Wsd,Ib (66),(v1) | vrndscalesd Vx,Hx,Wx,Ib (66),(evo)
+-0c: vblendps Vx,Hx,Wx,Ib (66)
+-0d: vblendpd Vx,Hx,Wx,Ib (66)
+-0e: vpblendw Vx,Hx,Wx,Ib (66),(v1)
+-0f: palignr Pq,Qq,Ib | vpalignr Vx,Hx,Wx,Ib (66),(v1)
+-14: vpextrb Rd/Mb,Vdq,Ib (66),(v1)
+-15: vpextrw Rd/Mw,Vdq,Ib (66),(v1)
+-16: vpextrd/q Ey,Vdq,Ib (66),(v1)
+-17: vextractps Ed,Vdq,Ib (66),(v1)
+-18: vinsertf128 Vqq,Hqq,Wqq,Ib (66),(v) | vinsertf32x4/64x2 Vqq,Hqq,Wqq,Ib (66),(evo)
+-19: vextractf128 Wdq,Vqq,Ib (66),(v) | vextractf32x4/64x2 Wdq,Vqq,Ib (66),(evo)
+-1a: vinsertf32x8/64x4 Vqq,Hqq,Wqq,Ib (66),(ev)
+-1b: vextractf32x8/64x4 Wdq,Vqq,Ib (66),(ev)
+-1d: vcvtps2ph Wx,Vx,Ib (66),(v)
+-1e: vpcmpud/q Vk,Hd,Wd,Ib (66),(ev)
+-1f: vpcmpd/q Vk,Hd,Wd,Ib (66),(ev)
+-20: vpinsrb Vdq,Hdq,Ry/Mb,Ib (66),(v1)
+-21: vinsertps Vdq,Hdq,Udq/Md,Ib (66),(v1)
+-22: vpinsrd/q Vdq,Hdq,Ey,Ib (66),(v1)
+-23: vshuff32x4/64x2 Vx,Hx,Wx,Ib (66),(ev)
+-25: vpternlogd/q Vx,Hx,Wx,Ib (66),(ev)
+-26: vgetmantps/d Vx,Wx,Ib (66),(ev)
+-27: vgetmantss/d Vx,Hx,Wx,Ib (66),(ev)
+-30: kshiftrb/w Vk,Uk,Ib (66),(v)
+-31: kshiftrd/q Vk,Uk,Ib (66),(v)
+-32: kshiftlb/w Vk,Uk,Ib (66),(v)
+-33: kshiftld/q Vk,Uk,Ib (66),(v)
+-38: vinserti128 Vqq,Hqq,Wqq,Ib (66),(v) | vinserti32x4/64x2 Vqq,Hqq,Wqq,Ib (66),(evo)
+-39: vextracti128 Wdq,Vqq,Ib (66),(v) | vextracti32x4/64x2 Wdq,Vqq,Ib (66),(evo)
+-3a: vinserti32x8/64x4 Vqq,Hqq,Wqq,Ib (66),(ev)
+-3b: vextracti32x8/64x4 Wdq,Vqq,Ib (66),(ev)
+-3e: vpcmpub/w Vk,Hk,Wx,Ib (66),(ev)
+-3f: vpcmpb/w Vk,Hk,Wx,Ib (66),(ev)
+-40: vdpps Vx,Hx,Wx,Ib (66)
+-41: vdppd Vdq,Hdq,Wdq,Ib (66),(v1)
+-42: vmpsadbw Vx,Hx,Wx,Ib (66),(v1) | vdbpsadbw Vx,Hx,Wx,Ib (66),(evo)
+-43: vshufi32x4/64x2 Vx,Hx,Wx,Ib (66),(ev)
+-44: vpclmulqdq Vdq,Hdq,Wdq,Ib (66),(v1)
+-46: vperm2i128 Vqq,Hqq,Wqq,Ib (66),(v)
+-4a: vblendvps Vx,Hx,Wx,Lx (66),(v)
+-4b: vblendvpd Vx,Hx,Wx,Lx (66),(v)
+-4c: vpblendvb Vx,Hx,Wx,Lx (66),(v1)
+-50: vrangeps/d Vx,Hx,Wx,Ib (66),(ev)
+-51: vrangess/d Vx,Hx,Wx,Ib (66),(ev)
+-54: vfixupimmps/d Vx,Hx,Wx,Ib (66),(ev)
+-55: vfixupimmss/d Vx,Hx,Wx,Ib (66),(ev)
+-56: vreduceps/d Vx,Wx,Ib (66),(ev)
+-57: vreducess/d Vx,Hx,Wx,Ib (66),(ev)
+-60: vpcmpestrm Vdq,Wdq,Ib (66),(v1)
+-61: vpcmpestri Vdq,Wdq,Ib (66),(v1)
+-62: vpcmpistrm Vdq,Wdq,Ib (66),(v1)
+-63: vpcmpistri Vdq,Wdq,Ib (66),(v1)
+-66: vfpclassps/d Vk,Wx,Ib (66),(ev)
+-67: vfpclassss/d Vk,Wx,Ib (66),(ev)
+-cc: sha1rnds4 Vdq,Wdq,Ib
+-df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1)
+-f0: RORX Gy,Ey,Ib (F2),(v)
+-EndTable
+-
+-GrpTable: Grp1
+-0: ADD
+-1: OR
+-2: ADC
+-3: SBB
+-4: AND
+-5: SUB
+-6: XOR
+-7: CMP
+-EndTable
+-
+-GrpTable: Grp1A
+-0: POP
+-EndTable
+-
+-GrpTable: Grp2
+-0: ROL
+-1: ROR
+-2: RCL
+-3: RCR
+-4: SHL/SAL
+-5: SHR
+-6:
+-7: SAR
+-EndTable
+-
+-GrpTable: Grp3_1
+-0: TEST Eb,Ib
+-1:
+-2: NOT Eb
+-3: NEG Eb
+-4: MUL AL,Eb
+-5: IMUL AL,Eb
+-6: DIV AL,Eb
+-7: IDIV AL,Eb
+-EndTable
+-
+-GrpTable: Grp3_2
+-0: TEST Ev,Iz
+-1:
+-2: NOT Ev
+-3: NEG Ev
+-4: MUL rAX,Ev
+-5: IMUL rAX,Ev
+-6: DIV rAX,Ev
+-7: IDIV rAX,Ev
+-EndTable
+-
+-GrpTable: Grp4
+-0: INC Eb
+-1: DEC Eb
+-EndTable
+-
+-GrpTable: Grp5
+-0: INC Ev
+-1: DEC Ev
+-# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
+-2: CALLN Ev (f64)
+-3: CALLF Ep
+-4: JMPN Ev (f64)
+-5: JMPF Mp
+-6: PUSH Ev (d64)
+-7:
+-EndTable
+-
+-GrpTable: Grp6
+-0: SLDT Rv/Mw
+-1: STR Rv/Mw
+-2: LLDT Ew
+-3: LTR Ew
+-4: VERR Ew
+-5: VERW Ew
+-EndTable
+-
+-GrpTable: Grp7
+-0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
+-1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B)
+-2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
+-3: LIDT Ms
+-4: SMSW Mw/Rv
+-5: rdpkru (110),(11B) | wrpkru (111),(11B)
+-6: LMSW Ew
+-7: INVLPG Mb | SWAPGS (o64),(000),(11B) | RDTSCP (001),(11B)
+-EndTable
+-
+-GrpTable: Grp8
+-4: BT
+-5: BTS
+-6: BTR
+-7: BTC
+-EndTable
+-
+-GrpTable: Grp9
+-1: CMPXCHG8B/16B Mq/Mdq
+-3: xrstors
+-4: xsavec
+-5: xsaves
+-6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B)
+-7: VMPTRST Mq | VMPTRST Mq (F3) | RDSEED Rv (11B)
+-EndTable
+-
+-GrpTable: Grp10
+-EndTable
+-
+-# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
+-GrpTable: Grp11A
+-0: MOV Eb,Ib
+-7: XABORT Ib (000),(11B)
+-EndTable
+-
+-GrpTable: Grp11B
+-0: MOV Eb,Iz
+-7: XBEGIN Jz (000),(11B)
+-EndTable
+-
+-GrpTable: Grp12
+-2: psrlw Nq,Ib (11B) | vpsrlw Hx,Ux,Ib (66),(11B),(v1)
+-4: psraw Nq,Ib (11B) | vpsraw Hx,Ux,Ib (66),(11B),(v1)
+-6: psllw Nq,Ib (11B) | vpsllw Hx,Ux,Ib (66),(11B),(v1)
+-EndTable
+-
+-GrpTable: Grp13
+-0: vprord/q Hx,Wx,Ib (66),(ev)
+-1: vprold/q Hx,Wx,Ib (66),(ev)
+-2: psrld Nq,Ib (11B) | vpsrld Hx,Ux,Ib (66),(11B),(v1)
+-4: psrad Nq,Ib (11B) | vpsrad Hx,Ux,Ib (66),(11B),(v1) | vpsrad/q Hx,Ux,Ib (66),(evo)
+-6: pslld Nq,Ib (11B) | vpslld Hx,Ux,Ib (66),(11B),(v1)
+-EndTable
+-
+-GrpTable: Grp14
+-2: psrlq Nq,Ib (11B) | vpsrlq Hx,Ux,Ib (66),(11B),(v1)
+-3: vpsrldq Hx,Ux,Ib (66),(11B),(v1)
+-6: psllq Nq,Ib (11B) | vpsllq Hx,Ux,Ib (66),(11B),(v1)
+-7: vpslldq Hx,Ux,Ib (66),(11B),(v1)
+-EndTable
+-
+-GrpTable: Grp15
+-0: fxsave | RDFSBASE Ry (F3),(11B)
+-1: fxstor | RDGSBASE Ry (F3),(11B)
+-2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B)
+-3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
+-4: XSAVE
+-5: XRSTOR | lfence (11B)
+-6: XSAVEOPT | clwb (66) | mfence (11B)
+-7: clflush | clflushopt (66) | sfence (11B)
+-EndTable
+-
+-GrpTable: Grp16
+-0: prefetch NTA
+-1: prefetch T0
+-2: prefetch T1
+-3: prefetch T2
+-EndTable
+-
+-GrpTable: Grp17
+-1: BLSR By,Ey (v)
+-2: BLSMSK By,Ey (v)
+-3: BLSI By,Ey (v)
+-EndTable
+-
+-GrpTable: Grp18
+-1: vgatherpf0dps/d Wx (66),(ev)
+-2: vgatherpf1dps/d Wx (66),(ev)
+-5: vscatterpf0dps/d Wx (66),(ev)
+-6: vscatterpf1dps/d Wx (66),(ev)
+-EndTable
+-
+-GrpTable: Grp19
+-1: vgatherpf0qps/d Wx (66),(ev)
+-2: vgatherpf1qps/d Wx (66),(ev)
+-5: vscatterpf0qps/d Wx (66),(ev)
+-6: vscatterpf1qps/d Wx (66),(ev)
+-EndTable
+-
+-# AMD's Prefetch Group
+-GrpTable: GrpP
+-0: PREFETCH
+-1: PREFETCHW
+-EndTable
+-
+-GrpTable: GrpPDLK
+-0: MONTMUL
+-1: XSHA1
+-2: XSHA2
+-EndTable
+-
+-GrpTable: GrpRNG
+-0: xstore-rng
+-1: xcrypt-ecb
+-2: xcrypt-cbc
+-4: xcrypt-cfb
+-5: xcrypt-ofb
+-EndTable
+diff --git a/tools/objtool/arch/x86/lib/inat.c b/tools/objtool/arch/x86/lib/inat.c
+new file mode 100644
+index 0000000..c1f01a8
+--- /dev/null
++++ b/tools/objtool/arch/x86/lib/inat.c
+@@ -0,0 +1,97 @@
++/*
++ * x86 instruction attribute tables
++ *
++ * Written by Masami Hiramatsu <mhiramat@redhat.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++ *
++ */
++#include <asm/insn.h>
++
++/* Attribute tables are generated from opcode map */
++#include "inat-tables.c"
++
++/* Attribute search APIs */
++insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode)
++{
++ return inat_primary_table[opcode];
++}
++
++int inat_get_last_prefix_id(insn_byte_t last_pfx)
++{
++ insn_attr_t lpfx_attr;
++
++ lpfx_attr = inat_get_opcode_attribute(last_pfx);
++ return inat_last_prefix_id(lpfx_attr);
++}
++
++insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, int lpfx_id,
++ insn_attr_t esc_attr)
++{
++ const insn_attr_t *table;
++ int n;
++
++ n = inat_escape_id(esc_attr);
++
++ table = inat_escape_tables[n][0];
++ if (!table)
++ return 0;
++ if (inat_has_variant(table[opcode]) && lpfx_id) {
++ table = inat_escape_tables[n][lpfx_id];
++ if (!table)
++ return 0;
++ }
++ return table[opcode];
++}
++
++insn_attr_t inat_get_group_attribute(insn_byte_t modrm, int lpfx_id,
++ insn_attr_t grp_attr)
++{
++ const insn_attr_t *table;
++ int n;
++
++ n = inat_group_id(grp_attr);
++
++ table = inat_group_tables[n][0];
++ if (!table)
++ return inat_group_common_attribute(grp_attr);
++ if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && lpfx_id) {
++ table = inat_group_tables[n][lpfx_id];
++ if (!table)
++ return inat_group_common_attribute(grp_attr);
++ }
++ return table[X86_MODRM_REG(modrm)] |
++ inat_group_common_attribute(grp_attr);
++}
++
++insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, insn_byte_t vex_m,
++ insn_byte_t vex_p)
++{
++ const insn_attr_t *table;
++ if (vex_m > X86_VEX_M_MAX || vex_p > INAT_LSTPFX_MAX)
++ return 0;
++ /* At first, this checks the master table */
++ table = inat_avx_tables[vex_m][0];
++ if (!table)
++ return 0;
++ if (!inat_is_group(table[opcode]) && vex_p) {
++ /* If this is not a group, get attribute directly */
++ table = inat_avx_tables[vex_m][vex_p];
++ if (!table)
++ return 0;
++ }
++ return table[opcode];
++}
++
+diff --git a/tools/objtool/arch/x86/lib/insn.c b/tools/objtool/arch/x86/lib/insn.c
+new file mode 100644
+index 0000000..1088eb8
+--- /dev/null
++++ b/tools/objtool/arch/x86/lib/insn.c
+@@ -0,0 +1,606 @@
++/*
++ * x86 instruction analysis
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * Copyright (C) IBM Corporation, 2002, 2004, 2009
++ */
++
++#ifdef __KERNEL__
++#include <linux/string.h>
++#else
++#include <string.h>
++#endif
++#include <asm/inat.h>
++#include <asm/insn.h>
++
++/* Verify next sizeof(t) bytes can be on the same instruction */
++#define validate_next(t, insn, n) \
++ ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
++
++#define __get_next(t, insn) \
++ ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
++
++#define __peek_nbyte_next(t, insn, n) \
++ ({ t r = *(t*)((insn)->next_byte + n); r; })
++
++#define get_next(t, insn) \
++ ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
++
++#define peek_nbyte_next(t, insn, n) \
++ ({ if (unlikely(!validate_next(t, insn, n))) goto err_out; __peek_nbyte_next(t, insn, n); })
++
++#define peek_next(t, insn) peek_nbyte_next(t, insn, 0)
++
++/**
++ * insn_init() - initialize struct insn
++ * @insn: &struct insn to be initialized
++ * @kaddr: address (in kernel memory) of instruction (or copy thereof)
++ * @x86_64: !0 for 64-bit kernel or 64-bit app
++ */
++void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
++{
++ /*
++ * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid
++ * even if the input buffer is long enough to hold them.
++ */
++ if (buf_len > MAX_INSN_SIZE)
++ buf_len = MAX_INSN_SIZE;
++
++ memset(insn, 0, sizeof(*insn));
++ insn->kaddr = kaddr;
++ insn->end_kaddr = kaddr + buf_len;
++ insn->next_byte = kaddr;
++ insn->x86_64 = x86_64 ? 1 : 0;
++ insn->opnd_bytes = 4;
++ if (x86_64)
++ insn->addr_bytes = 8;
++ else
++ insn->addr_bytes = 4;
++}
++
++/**
++ * insn_get_prefixes - scan x86 instruction prefix bytes
++ * @insn: &struct insn containing instruction
++ *
++ * Populates the @insn->prefixes bitmap, and updates @insn->next_byte
++ * to point to the (first) opcode. No effect if @insn->prefixes.got
++ * is already set.
++ */
++void insn_get_prefixes(struct insn *insn)
++{
++ struct insn_field *prefixes = &insn->prefixes;
++ insn_attr_t attr;
++ insn_byte_t b, lb;
++ int i, nb;
++
++ if (prefixes->got)
++ return;
++
++ nb = 0;
++ lb = 0;
++ b = peek_next(insn_byte_t, insn);
++ attr = inat_get_opcode_attribute(b);
++ while (inat_is_legacy_prefix(attr)) {
++ /* Skip if same prefix */
++ for (i = 0; i < nb; i++)
++ if (prefixes->bytes[i] == b)
++ goto found;
++ if (nb == 4)
++ /* Invalid instruction */
++ break;
++ prefixes->bytes[nb++] = b;
++ if (inat_is_address_size_prefix(attr)) {
++ /* address size switches 2/4 or 4/8 */
++ if (insn->x86_64)
++ insn->addr_bytes ^= 12;
++ else
++ insn->addr_bytes ^= 6;
++ } else if (inat_is_operand_size_prefix(attr)) {
++ /* oprand size switches 2/4 */
++ insn->opnd_bytes ^= 6;
++ }
++found:
++ prefixes->nbytes++;
++ insn->next_byte++;
++ lb = b;
++ b = peek_next(insn_byte_t, insn);
++ attr = inat_get_opcode_attribute(b);
++ }
++ /* Set the last prefix */
++ if (lb && lb != insn->prefixes.bytes[3]) {
++ if (unlikely(insn->prefixes.bytes[3])) {
++ /* Swap the last prefix */
++ b = insn->prefixes.bytes[3];
++ for (i = 0; i < nb; i++)
++ if (prefixes->bytes[i] == lb)
++ prefixes->bytes[i] = b;
++ }
++ insn->prefixes.bytes[3] = lb;
++ }
++
++ /* Decode REX prefix */
++ if (insn->x86_64) {
++ b = peek_next(insn_byte_t, insn);
++ attr = inat_get_opcode_attribute(b);
++ if (inat_is_rex_prefix(attr)) {
++ insn->rex_prefix.value = b;
++ insn->rex_prefix.nbytes = 1;
++ insn->next_byte++;
++ if (X86_REX_W(b))
++ /* REX.W overrides opnd_size */
++ insn->opnd_bytes = 8;
++ }
++ }
++ insn->rex_prefix.got = 1;
++
++ /* Decode VEX prefix */
++ b = peek_next(insn_byte_t, insn);
++ attr = inat_get_opcode_attribute(b);
++ if (inat_is_vex_prefix(attr)) {
++ insn_byte_t b2 = peek_nbyte_next(insn_byte_t, insn, 1);
++ if (!insn->x86_64) {
++ /*
++ * In 32-bits mode, if the [7:6] bits (mod bits of
++ * ModRM) on the second byte are not 11b, it is
++ * LDS or LES or BOUND.
++ */
++ if (X86_MODRM_MOD(b2) != 3)
++ goto vex_end;
++ }
++ insn->vex_prefix.bytes[0] = b;
++ insn->vex_prefix.bytes[1] = b2;
++ if (inat_is_evex_prefix(attr)) {
++ b2 = peek_nbyte_next(insn_byte_t, insn, 2);
++ insn->vex_prefix.bytes[2] = b2;
++ b2 = peek_nbyte_next(insn_byte_t, insn, 3);
++ insn->vex_prefix.bytes[3] = b2;
++ insn->vex_prefix.nbytes = 4;
++ insn->next_byte += 4;
++ if (insn->x86_64 && X86_VEX_W(b2))
++ /* VEX.W overrides opnd_size */
++ insn->opnd_bytes = 8;
++ } else if (inat_is_vex3_prefix(attr)) {
++ b2 = peek_nbyte_next(insn_byte_t, insn, 2);
++ insn->vex_prefix.bytes[2] = b2;
++ insn->vex_prefix.nbytes = 3;
++ insn->next_byte += 3;
++ if (insn->x86_64 && X86_VEX_W(b2))
++ /* VEX.W overrides opnd_size */
++ insn->opnd_bytes = 8;
++ } else {
++ /*
++ * For VEX2, fake VEX3-like byte#2.
++ * Makes it easier to decode vex.W, vex.vvvv,
++ * vex.L and vex.pp. Masking with 0x7f sets vex.W == 0.
++ */
++ insn->vex_prefix.bytes[2] = b2 & 0x7f;
++ insn->vex_prefix.nbytes = 2;
++ insn->next_byte += 2;
++ }
++ }
++vex_end:
++ insn->vex_prefix.got = 1;
++
++ prefixes->got = 1;
++
++err_out:
++ return;
++}
++
++/**
++ * insn_get_opcode - collect opcode(s)
++ * @insn: &struct insn containing instruction
++ *
++ * Populates @insn->opcode, updates @insn->next_byte to point past the
++ * opcode byte(s), and set @insn->attr (except for groups).
++ * If necessary, first collects any preceding (prefix) bytes.
++ * Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got
++ * is already 1.
++ */
++void insn_get_opcode(struct insn *insn)
++{
++ struct insn_field *opcode = &insn->opcode;
++ insn_byte_t op;
++ int pfx_id;
++ if (opcode->got)
++ return;
++ if (!insn->prefixes.got)
++ insn_get_prefixes(insn);
++
++ /* Get first opcode */
++ op = get_next(insn_byte_t, insn);
++ opcode->bytes[0] = op;
++ opcode->nbytes = 1;
++
++ /* Check if there is VEX prefix or not */
++ if (insn_is_avx(insn)) {
++ insn_byte_t m, p;
++ m = insn_vex_m_bits(insn);
++ p = insn_vex_p_bits(insn);
++ insn->attr = inat_get_avx_attribute(op, m, p);
++ if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) ||
++ (!inat_accept_vex(insn->attr) &&
++ !inat_is_group(insn->attr)))
++ insn->attr = 0; /* This instruction is bad */
++ goto end; /* VEX has only 1 byte for opcode */
++ }
++
++ insn->attr = inat_get_opcode_attribute(op);
++ while (inat_is_escape(insn->attr)) {
++ /* Get escaped opcode */
++ op = get_next(insn_byte_t, insn);
++ opcode->bytes[opcode->nbytes++] = op;
++ pfx_id = insn_last_prefix_id(insn);
++ insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr);
++ }
++ if (inat_must_vex(insn->attr))
++ insn->attr = 0; /* This instruction is bad */
++end:
++ opcode->got = 1;
++
++err_out:
++ return;
++}
++
++/**
++ * insn_get_modrm - collect ModRM byte, if any
++ * @insn: &struct insn containing instruction
++ *
++ * Populates @insn->modrm and updates @insn->next_byte to point past the
++ * ModRM byte, if any. If necessary, first collects the preceding bytes
++ * (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1.
++ */
++void insn_get_modrm(struct insn *insn)
++{
++ struct insn_field *modrm = &insn->modrm;
++ insn_byte_t pfx_id, mod;
++ if (modrm->got)
++ return;
++ if (!insn->opcode.got)
++ insn_get_opcode(insn);
++
++ if (inat_has_modrm(insn->attr)) {
++ mod = get_next(insn_byte_t, insn);
++ modrm->value = mod;
++ modrm->nbytes = 1;
++ if (inat_is_group(insn->attr)) {
++ pfx_id = insn_last_prefix_id(insn);
++ insn->attr = inat_get_group_attribute(mod, pfx_id,
++ insn->attr);
++ if (insn_is_avx(insn) && !inat_accept_vex(insn->attr))
++ insn->attr = 0; /* This is bad */
++ }
++ }
++
++ if (insn->x86_64 && inat_is_force64(insn->attr))
++ insn->opnd_bytes = 8;
++ modrm->got = 1;
++
++err_out:
++ return;
++}
++
++
++/**
++ * insn_rip_relative() - Does instruction use RIP-relative addressing mode?
++ * @insn: &struct insn containing instruction
++ *
++ * If necessary, first collects the instruction up to and including the
++ * ModRM byte. No effect if @insn->x86_64 is 0.
++ */
++int insn_rip_relative(struct insn *insn)
++{
++ struct insn_field *modrm = &insn->modrm;
++
++ if (!insn->x86_64)
++ return 0;
++ if (!modrm->got)
++ insn_get_modrm(insn);
++ /*
++ * For rip-relative instructions, the mod field (top 2 bits)
++ * is zero and the r/m field (bottom 3 bits) is 0x5.
++ */
++ return (modrm->nbytes && (modrm->value & 0xc7) == 0x5);
++}
++
++/**
++ * insn_get_sib() - Get the SIB byte of instruction
++ * @insn: &struct insn containing instruction
++ *
++ * If necessary, first collects the instruction up to and including the
++ * ModRM byte.
++ */
++void insn_get_sib(struct insn *insn)
++{
++ insn_byte_t modrm;
++
++ if (insn->sib.got)
++ return;
++ if (!insn->modrm.got)
++ insn_get_modrm(insn);
++ if (insn->modrm.nbytes) {
++ modrm = (insn_byte_t)insn->modrm.value;
++ if (insn->addr_bytes != 2 &&
++ X86_MODRM_MOD(modrm) != 3 && X86_MODRM_RM(modrm) == 4) {
++ insn->sib.value = get_next(insn_byte_t, insn);
++ insn->sib.nbytes = 1;
++ }
++ }
++ insn->sib.got = 1;
++
++err_out:
++ return;
++}
++
++
++/**
++ * insn_get_displacement() - Get the displacement of instruction
++ * @insn: &struct insn containing instruction
++ *
++ * If necessary, first collects the instruction up to and including the
++ * SIB byte.
++ * Displacement value is sign-expanded.
++ */
++void insn_get_displacement(struct insn *insn)
++{
++ insn_byte_t mod, rm, base;
++
++ if (insn->displacement.got)
++ return;
++ if (!insn->sib.got)
++ insn_get_sib(insn);
++ if (insn->modrm.nbytes) {
++ /*
++ * Interpreting the modrm byte:
++ * mod = 00 - no displacement fields (exceptions below)
++ * mod = 01 - 1-byte displacement field
++ * mod = 10 - displacement field is 4 bytes, or 2 bytes if
++ * address size = 2 (0x67 prefix in 32-bit mode)
++ * mod = 11 - no memory operand
++ *
++ * If address size = 2...
++ * mod = 00, r/m = 110 - displacement field is 2 bytes
++ *
++ * If address size != 2...
++ * mod != 11, r/m = 100 - SIB byte exists
++ * mod = 00, SIB base = 101 - displacement field is 4 bytes
++ * mod = 00, r/m = 101 - rip-relative addressing, displacement
++ * field is 4 bytes
++ */
++ mod = X86_MODRM_MOD(insn->modrm.value);
++ rm = X86_MODRM_RM(insn->modrm.value);
++ base = X86_SIB_BASE(insn->sib.value);
++ if (mod == 3)
++ goto out;
++ if (mod == 1) {
++ insn->displacement.value = get_next(signed char, insn);
++ insn->displacement.nbytes = 1;
++ } else if (insn->addr_bytes == 2) {
++ if ((mod == 0 && rm == 6) || mod == 2) {
++ insn->displacement.value =
++ get_next(short, insn);
++ insn->displacement.nbytes = 2;
++ }
++ } else {
++ if ((mod == 0 && rm == 5) || mod == 2 ||
++ (mod == 0 && base == 5)) {
++ insn->displacement.value = get_next(int, insn);
++ insn->displacement.nbytes = 4;
++ }
++ }
++ }
++out:
++ insn->displacement.got = 1;
++
++err_out:
++ return;
++}
++
++/* Decode moffset16/32/64. Return 0 if failed */
++static int __get_moffset(struct insn *insn)
++{
++ switch (insn->addr_bytes) {
++ case 2:
++ insn->moffset1.value = get_next(short, insn);
++ insn->moffset1.nbytes = 2;
++ break;
++ case 4:
++ insn->moffset1.value = get_next(int, insn);
++ insn->moffset1.nbytes = 4;
++ break;
++ case 8:
++ insn->moffset1.value = get_next(int, insn);
++ insn->moffset1.nbytes = 4;
++ insn->moffset2.value = get_next(int, insn);
++ insn->moffset2.nbytes = 4;
++ break;
++ default: /* opnd_bytes must be modified manually */
++ goto err_out;
++ }
++ insn->moffset1.got = insn->moffset2.got = 1;
++
++ return 1;
++
++err_out:
++ return 0;
++}
++
++/* Decode imm v32(Iz). Return 0 if failed */
++static int __get_immv32(struct insn *insn)
++{
++ switch (insn->opnd_bytes) {
++ case 2:
++ insn->immediate.value = get_next(short, insn);
++ insn->immediate.nbytes = 2;
++ break;
++ case 4:
++ case 8:
++ insn->immediate.value = get_next(int, insn);
++ insn->immediate.nbytes = 4;
++ break;
++ default: /* opnd_bytes must be modified manually */
++ goto err_out;
++ }
++
++ return 1;
++
++err_out:
++ return 0;
++}
++
++/* Decode imm v64(Iv/Ov), Return 0 if failed */
++static int __get_immv(struct insn *insn)
++{
++ switch (insn->opnd_bytes) {
++ case 2:
++ insn->immediate1.value = get_next(short, insn);
++ insn->immediate1.nbytes = 2;
++ break;
++ case 4:
++ insn->immediate1.value = get_next(int, insn);
++ insn->immediate1.nbytes = 4;
++ break;
++ case 8:
++ insn->immediate1.value = get_next(int, insn);
++ insn->immediate1.nbytes = 4;
++ insn->immediate2.value = get_next(int, insn);
++ insn->immediate2.nbytes = 4;
++ break;
++ default: /* opnd_bytes must be modified manually */
++ goto err_out;
++ }
++ insn->immediate1.got = insn->immediate2.got = 1;
++
++ return 1;
++err_out:
++ return 0;
++}
++
++/* Decode ptr16:16/32(Ap) */
++static int __get_immptr(struct insn *insn)
++{
++ switch (insn->opnd_bytes) {
++ case 2:
++ insn->immediate1.value = get_next(short, insn);
++ insn->immediate1.nbytes = 2;
++ break;
++ case 4:
++ insn->immediate1.value = get_next(int, insn);
++ insn->immediate1.nbytes = 4;
++ break;
++ case 8:
++ /* ptr16:64 is not exist (no segment) */
++ return 0;
++ default: /* opnd_bytes must be modified manually */
++ goto err_out;
++ }
++ insn->immediate2.value = get_next(unsigned short, insn);
++ insn->immediate2.nbytes = 2;
++ insn->immediate1.got = insn->immediate2.got = 1;
++
++ return 1;
++err_out:
++ return 0;
++}
++
++/**
++ * insn_get_immediate() - Get the immediates of instruction
++ * @insn: &struct insn containing instruction
++ *
++ * If necessary, first collects the instruction up to and including the
++ * displacement bytes.
++ * Basically, most of immediates are sign-expanded. Unsigned-value can be
++ * get by bit masking with ((1 << (nbytes * 8)) - 1)
++ */
++void insn_get_immediate(struct insn *insn)
++{
++ if (insn->immediate.got)
++ return;
++ if (!insn->displacement.got)
++ insn_get_displacement(insn);
++
++ if (inat_has_moffset(insn->attr)) {
++ if (!__get_moffset(insn))
++ goto err_out;
++ goto done;
++ }
++
++ if (!inat_has_immediate(insn->attr))
++ /* no immediates */
++ goto done;
++
++ switch (inat_immediate_size(insn->attr)) {
++ case INAT_IMM_BYTE:
++ insn->immediate.value = get_next(signed char, insn);
++ insn->immediate.nbytes = 1;
++ break;
++ case INAT_IMM_WORD:
++ insn->immediate.value = get_next(short, insn);
++ insn->immediate.nbytes = 2;
++ break;
++ case INAT_IMM_DWORD:
++ insn->immediate.value = get_next(int, insn);
++ insn->immediate.nbytes = 4;
++ break;
++ case INAT_IMM_QWORD:
++ insn->immediate1.value = get_next(int, insn);
++ insn->immediate1.nbytes = 4;
++ insn->immediate2.value = get_next(int, insn);
++ insn->immediate2.nbytes = 4;
++ break;
++ case INAT_IMM_PTR:
++ if (!__get_immptr(insn))
++ goto err_out;
++ break;
++ case INAT_IMM_VWORD32:
++ if (!__get_immv32(insn))
++ goto err_out;
++ break;
++ case INAT_IMM_VWORD:
++ if (!__get_immv(insn))
++ goto err_out;
++ break;
++ default:
++ /* Here, insn must have an immediate, but failed */
++ goto err_out;
++ }
++ if (inat_has_second_immediate(insn->attr)) {
++ insn->immediate2.value = get_next(signed char, insn);
++ insn->immediate2.nbytes = 1;
++ }
++done:
++ insn->immediate.got = 1;
++
++err_out:
++ return;
++}
++
++/**
++ * insn_get_length() - Get the length of instruction
++ * @insn: &struct insn containing instruction
++ *
++ * If necessary, first collects the instruction up to and including the
++ * immediates bytes.
++ */
++void insn_get_length(struct insn *insn)
++{
++ if (insn->length)
++ return;
++ if (!insn->immediate.got)
++ insn_get_immediate(insn);
++ insn->length = (unsigned char)((unsigned long)insn->next_byte
++ - (unsigned long)insn->kaddr);
++}
+diff --git a/tools/objtool/arch/x86/lib/x86-opcode-map.txt b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
+new file mode 100644
+index 0000000..aa2270d
+--- /dev/null
++++ b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
+@@ -0,0 +1,1072 @@
++# x86 Opcode Maps
++#
++# This is (mostly) based on following documentations.
++# - Intel(R) 64 and IA-32 Architectures Software Developer's Manual Vol.2C
++# (#326018-047US, June 2013)
++#
++#<Opcode maps>
++# Table: table-name
++# Referrer: escaped-name
++# AVXcode: avx-code
++# opcode: mnemonic|GrpXXX [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...]
++# (or)
++# opcode: escape # escaped-name
++# EndTable
++#
++# mnemonics that begin with lowercase 'v' accept a VEX or EVEX prefix
++# mnemonics that begin with lowercase 'k' accept a VEX prefix
++#
++#<group maps>
++# GrpTable: GrpXXX
++# reg: mnemonic [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...]
++# EndTable
++#
++# AVX Superscripts
++# (ev): this opcode requires EVEX prefix.
++# (evo): this opcode is changed by EVEX prefix (EVEX opcode)
++# (v): this opcode requires VEX prefix.
++# (v1): this opcode only supports 128bit VEX.
++#
++# Last Prefix Superscripts
++# - (66): the last prefix is 0x66
++# - (F3): the last prefix is 0xF3
++# - (F2): the last prefix is 0xF2
++# - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
++# - (66&F2): Both 0x66 and 0xF2 prefixes are specified.
++
++Table: one byte opcode
++Referrer:
++AVXcode:
++# 0x00 - 0x0f
++00: ADD Eb,Gb
++01: ADD Ev,Gv
++02: ADD Gb,Eb
++03: ADD Gv,Ev
++04: ADD AL,Ib
++05: ADD rAX,Iz
++06: PUSH ES (i64)
++07: POP ES (i64)
++08: OR Eb,Gb
++09: OR Ev,Gv
++0a: OR Gb,Eb
++0b: OR Gv,Ev
++0c: OR AL,Ib
++0d: OR rAX,Iz
++0e: PUSH CS (i64)
++0f: escape # 2-byte escape
++# 0x10 - 0x1f
++10: ADC Eb,Gb
++11: ADC Ev,Gv
++12: ADC Gb,Eb
++13: ADC Gv,Ev
++14: ADC AL,Ib
++15: ADC rAX,Iz
++16: PUSH SS (i64)
++17: POP SS (i64)
++18: SBB Eb,Gb
++19: SBB Ev,Gv
++1a: SBB Gb,Eb
++1b: SBB Gv,Ev
++1c: SBB AL,Ib
++1d: SBB rAX,Iz
++1e: PUSH DS (i64)
++1f: POP DS (i64)
++# 0x20 - 0x2f
++20: AND Eb,Gb
++21: AND Ev,Gv
++22: AND Gb,Eb
++23: AND Gv,Ev
++24: AND AL,Ib
++25: AND rAx,Iz
++26: SEG=ES (Prefix)
++27: DAA (i64)
++28: SUB Eb,Gb
++29: SUB Ev,Gv
++2a: SUB Gb,Eb
++2b: SUB Gv,Ev
++2c: SUB AL,Ib
++2d: SUB rAX,Iz
++2e: SEG=CS (Prefix)
++2f: DAS (i64)
++# 0x30 - 0x3f
++30: XOR Eb,Gb
++31: XOR Ev,Gv
++32: XOR Gb,Eb
++33: XOR Gv,Ev
++34: XOR AL,Ib
++35: XOR rAX,Iz
++36: SEG=SS (Prefix)
++37: AAA (i64)
++38: CMP Eb,Gb
++39: CMP Ev,Gv
++3a: CMP Gb,Eb
++3b: CMP Gv,Ev
++3c: CMP AL,Ib
++3d: CMP rAX,Iz
++3e: SEG=DS (Prefix)
++3f: AAS (i64)
++# 0x40 - 0x4f
++40: INC eAX (i64) | REX (o64)
++41: INC eCX (i64) | REX.B (o64)
++42: INC eDX (i64) | REX.X (o64)
++43: INC eBX (i64) | REX.XB (o64)
++44: INC eSP (i64) | REX.R (o64)
++45: INC eBP (i64) | REX.RB (o64)
++46: INC eSI (i64) | REX.RX (o64)
++47: INC eDI (i64) | REX.RXB (o64)
++48: DEC eAX (i64) | REX.W (o64)
++49: DEC eCX (i64) | REX.WB (o64)
++4a: DEC eDX (i64) | REX.WX (o64)
++4b: DEC eBX (i64) | REX.WXB (o64)
++4c: DEC eSP (i64) | REX.WR (o64)
++4d: DEC eBP (i64) | REX.WRB (o64)
++4e: DEC eSI (i64) | REX.WRX (o64)
++4f: DEC eDI (i64) | REX.WRXB (o64)
++# 0x50 - 0x5f
++50: PUSH rAX/r8 (d64)
++51: PUSH rCX/r9 (d64)
++52: PUSH rDX/r10 (d64)
++53: PUSH rBX/r11 (d64)
++54: PUSH rSP/r12 (d64)
++55: PUSH rBP/r13 (d64)
++56: PUSH rSI/r14 (d64)
++57: PUSH rDI/r15 (d64)
++58: POP rAX/r8 (d64)
++59: POP rCX/r9 (d64)
++5a: POP rDX/r10 (d64)
++5b: POP rBX/r11 (d64)
++5c: POP rSP/r12 (d64)
++5d: POP rBP/r13 (d64)
++5e: POP rSI/r14 (d64)
++5f: POP rDI/r15 (d64)
++# 0x60 - 0x6f
++60: PUSHA/PUSHAD (i64)
++61: POPA/POPAD (i64)
++62: BOUND Gv,Ma (i64) | EVEX (Prefix)
++63: ARPL Ew,Gw (i64) | MOVSXD Gv,Ev (o64)
++64: SEG=FS (Prefix)
++65: SEG=GS (Prefix)
++66: Operand-Size (Prefix)
++67: Address-Size (Prefix)
++68: PUSH Iz (d64)
++69: IMUL Gv,Ev,Iz
++6a: PUSH Ib (d64)
++6b: IMUL Gv,Ev,Ib
++6c: INS/INSB Yb,DX
++6d: INS/INSW/INSD Yz,DX
++6e: OUTS/OUTSB DX,Xb
++6f: OUTS/OUTSW/OUTSD DX,Xz
++# 0x70 - 0x7f
++70: JO Jb
++71: JNO Jb
++72: JB/JNAE/JC Jb
++73: JNB/JAE/JNC Jb
++74: JZ/JE Jb
++75: JNZ/JNE Jb
++76: JBE/JNA Jb
++77: JNBE/JA Jb
++78: JS Jb
++79: JNS Jb
++7a: JP/JPE Jb
++7b: JNP/JPO Jb
++7c: JL/JNGE Jb
++7d: JNL/JGE Jb
++7e: JLE/JNG Jb
++7f: JNLE/JG Jb
++# 0x80 - 0x8f
++80: Grp1 Eb,Ib (1A)
++81: Grp1 Ev,Iz (1A)
++82: Grp1 Eb,Ib (1A),(i64)
++83: Grp1 Ev,Ib (1A)
++84: TEST Eb,Gb
++85: TEST Ev,Gv
++86: XCHG Eb,Gb
++87: XCHG Ev,Gv
++88: MOV Eb,Gb
++89: MOV Ev,Gv
++8a: MOV Gb,Eb
++8b: MOV Gv,Ev
++8c: MOV Ev,Sw
++8d: LEA Gv,M
++8e: MOV Sw,Ew
++8f: Grp1A (1A) | POP Ev (d64)
++# 0x90 - 0x9f
++90: NOP | PAUSE (F3) | XCHG r8,rAX
++91: XCHG rCX/r9,rAX
++92: XCHG rDX/r10,rAX
++93: XCHG rBX/r11,rAX
++94: XCHG rSP/r12,rAX
++95: XCHG rBP/r13,rAX
++96: XCHG rSI/r14,rAX
++97: XCHG rDI/r15,rAX
++98: CBW/CWDE/CDQE
++99: CWD/CDQ/CQO
++9a: CALLF Ap (i64)
++9b: FWAIT/WAIT
++9c: PUSHF/D/Q Fv (d64)
++9d: POPF/D/Q Fv (d64)
++9e: SAHF
++9f: LAHF
++# 0xa0 - 0xaf
++a0: MOV AL,Ob
++a1: MOV rAX,Ov
++a2: MOV Ob,AL
++a3: MOV Ov,rAX
++a4: MOVS/B Yb,Xb
++a5: MOVS/W/D/Q Yv,Xv
++a6: CMPS/B Xb,Yb
++a7: CMPS/W/D Xv,Yv
++a8: TEST AL,Ib
++a9: TEST rAX,Iz
++aa: STOS/B Yb,AL
++ab: STOS/W/D/Q Yv,rAX
++ac: LODS/B AL,Xb
++ad: LODS/W/D/Q rAX,Xv
++ae: SCAS/B AL,Yb
++# Note: The May 2011 Intel manual shows Xv for the second parameter of the
++# next instruction but Yv is correct
++af: SCAS/W/D/Q rAX,Yv
++# 0xb0 - 0xbf
++b0: MOV AL/R8L,Ib
++b1: MOV CL/R9L,Ib
++b2: MOV DL/R10L,Ib
++b3: MOV BL/R11L,Ib
++b4: MOV AH/R12L,Ib
++b5: MOV CH/R13L,Ib
++b6: MOV DH/R14L,Ib
++b7: MOV BH/R15L,Ib
++b8: MOV rAX/r8,Iv
++b9: MOV rCX/r9,Iv
++ba: MOV rDX/r10,Iv
++bb: MOV rBX/r11,Iv
++bc: MOV rSP/r12,Iv
++bd: MOV rBP/r13,Iv
++be: MOV rSI/r14,Iv
++bf: MOV rDI/r15,Iv
++# 0xc0 - 0xcf
++c0: Grp2 Eb,Ib (1A)
++c1: Grp2 Ev,Ib (1A)
++c2: RETN Iw (f64)
++c3: RETN
++c4: LES Gz,Mp (i64) | VEX+2byte (Prefix)
++c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix)
++c6: Grp11A Eb,Ib (1A)
++c7: Grp11B Ev,Iz (1A)
++c8: ENTER Iw,Ib
++c9: LEAVE (d64)
++ca: RETF Iw
++cb: RETF
++cc: INT3
++cd: INT Ib
++ce: INTO (i64)
++cf: IRET/D/Q
++# 0xd0 - 0xdf
++d0: Grp2 Eb,1 (1A)
++d1: Grp2 Ev,1 (1A)
++d2: Grp2 Eb,CL (1A)
++d3: Grp2 Ev,CL (1A)
++d4: AAM Ib (i64)
++d5: AAD Ib (i64)
++d6:
++d7: XLAT/XLATB
++d8: ESC
++d9: ESC
++da: ESC
++db: ESC
++dc: ESC
++dd: ESC
++de: ESC
++df: ESC
++# 0xe0 - 0xef
++# Note: "forced64" is Intel CPU behavior: they ignore 0x66 prefix
++# in 64-bit mode. AMD CPUs accept 0x66 prefix, it causes RIP truncation
++# to 16 bits. In 32-bit mode, 0x66 is accepted by both Intel and AMD.
++e0: LOOPNE/LOOPNZ Jb (f64)
++e1: LOOPE/LOOPZ Jb (f64)
++e2: LOOP Jb (f64)
++e3: JrCXZ Jb (f64)
++e4: IN AL,Ib
++e5: IN eAX,Ib
++e6: OUT Ib,AL
++e7: OUT Ib,eAX
++# With 0x66 prefix in 64-bit mode, for AMD CPUs immediate offset
++# in "near" jumps and calls is 16-bit. For CALL,
++# push of return address is 16-bit wide, RSP is decremented by 2
++# but is not truncated to 16 bits, unlike RIP.
++e8: CALL Jz (f64)
++e9: JMP-near Jz (f64)
++ea: JMP-far Ap (i64)
++eb: JMP-short Jb (f64)
++ec: IN AL,DX
++ed: IN eAX,DX
++ee: OUT DX,AL
++ef: OUT DX,eAX
++# 0xf0 - 0xff
++f0: LOCK (Prefix)
++f1:
++f2: REPNE (Prefix) | XACQUIRE (Prefix)
++f3: REP/REPE (Prefix) | XRELEASE (Prefix)
++f4: HLT
++f5: CMC
++f6: Grp3_1 Eb (1A)
++f7: Grp3_2 Ev (1A)
++f8: CLC
++f9: STC
++fa: CLI
++fb: STI
++fc: CLD
++fd: STD
++fe: Grp4 (1A)
++ff: Grp5 (1A)
++EndTable
++
++Table: 2-byte opcode (0x0f)
++Referrer: 2-byte escape
++AVXcode: 1
++# 0x0f 0x00-0x0f
++00: Grp6 (1A)
++01: Grp7 (1A)
++02: LAR Gv,Ew
++03: LSL Gv,Ew
++04:
++05: SYSCALL (o64)
++06: CLTS
++07: SYSRET (o64)
++08: INVD
++09: WBINVD
++0a:
++0b: UD2 (1B)
++0c:
++# AMD's prefetch group. Intel supports prefetchw(/1) only.
++0d: GrpP
++0e: FEMMS
++# 3DNow! uses the last imm byte as opcode extension.
++0f: 3DNow! Pq,Qq,Ib
++# 0x0f 0x10-0x1f
++# NOTE: According to Intel SDM opcode map, vmovups and vmovupd has no operands
++# but it actually has operands. And also, vmovss and vmovsd only accept 128bit.
++# MOVSS/MOVSD has too many forms(3) on SDM. This map just shows a typical form.
++# Many AVX instructions lack v1 superscript, according to Intel AVX-Prgramming
++# Reference A.1
++10: vmovups Vps,Wps | vmovupd Vpd,Wpd (66) | vmovss Vx,Hx,Wss (F3),(v1) | vmovsd Vx,Hx,Wsd (F2),(v1)
++11: vmovups Wps,Vps | vmovupd Wpd,Vpd (66) | vmovss Wss,Hx,Vss (F3),(v1) | vmovsd Wsd,Hx,Vsd (F2),(v1)
++12: vmovlps Vq,Hq,Mq (v1) | vmovhlps Vq,Hq,Uq (v1) | vmovlpd Vq,Hq,Mq (66),(v1) | vmovsldup Vx,Wx (F3) | vmovddup Vx,Wx (F2)
++13: vmovlps Mq,Vq (v1) | vmovlpd Mq,Vq (66),(v1)
++14: vunpcklps Vx,Hx,Wx | vunpcklpd Vx,Hx,Wx (66)
++15: vunpckhps Vx,Hx,Wx | vunpckhpd Vx,Hx,Wx (66)
++16: vmovhps Vdq,Hq,Mq (v1) | vmovlhps Vdq,Hq,Uq (v1) | vmovhpd Vdq,Hq,Mq (66),(v1) | vmovshdup Vx,Wx (F3)
++17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1)
++18: Grp16 (1A)
++19:
++# Intel SDM opcode map does not list MPX instructions. For now using Gv for
++# bnd registers and Ev for everything else is OK because the instruction
++# decoder does not use the information except as an indication that there is
++# a ModR/M byte.
++1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev
++1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv
++1c:
++1d:
++1e:
++1f: NOP Ev
++# 0x0f 0x20-0x2f
++20: MOV Rd,Cd
++21: MOV Rd,Dd
++22: MOV Cd,Rd
++23: MOV Dd,Rd
++24:
++25:
++26:
++27:
++28: vmovaps Vps,Wps | vmovapd Vpd,Wpd (66)
++29: vmovaps Wps,Vps | vmovapd Wpd,Vpd (66)
++2a: cvtpi2ps Vps,Qpi | cvtpi2pd Vpd,Qpi (66) | vcvtsi2ss Vss,Hss,Ey (F3),(v1) | vcvtsi2sd Vsd,Hsd,Ey (F2),(v1)
++2b: vmovntps Mps,Vps | vmovntpd Mpd,Vpd (66)
++2c: cvttps2pi Ppi,Wps | cvttpd2pi Ppi,Wpd (66) | vcvttss2si Gy,Wss (F3),(v1) | vcvttsd2si Gy,Wsd (F2),(v1)
++2d: cvtps2pi Ppi,Wps | cvtpd2pi Qpi,Wpd (66) | vcvtss2si Gy,Wss (F3),(v1) | vcvtsd2si Gy,Wsd (F2),(v1)
++2e: vucomiss Vss,Wss (v1) | vucomisd Vsd,Wsd (66),(v1)
++2f: vcomiss Vss,Wss (v1) | vcomisd Vsd,Wsd (66),(v1)
++# 0x0f 0x30-0x3f
++30: WRMSR
++31: RDTSC
++32: RDMSR
++33: RDPMC
++34: SYSENTER
++35: SYSEXIT
++36:
++37: GETSEC
++38: escape # 3-byte escape 1
++39:
++3a: escape # 3-byte escape 2
++3b:
++3c:
++3d:
++3e:
++3f:
++# 0x0f 0x40-0x4f
++40: CMOVO Gv,Ev
++41: CMOVNO Gv,Ev | kandw/q Vk,Hk,Uk | kandb/d Vk,Hk,Uk (66)
++42: CMOVB/C/NAE Gv,Ev | kandnw/q Vk,Hk,Uk | kandnb/d Vk,Hk,Uk (66)
++43: CMOVAE/NB/NC Gv,Ev
++44: CMOVE/Z Gv,Ev | knotw/q Vk,Uk | knotb/d Vk,Uk (66)
++45: CMOVNE/NZ Gv,Ev | korw/q Vk,Hk,Uk | korb/d Vk,Hk,Uk (66)
++46: CMOVBE/NA Gv,Ev | kxnorw/q Vk,Hk,Uk | kxnorb/d Vk,Hk,Uk (66)
++47: CMOVA/NBE Gv,Ev | kxorw/q Vk,Hk,Uk | kxorb/d Vk,Hk,Uk (66)
++48: CMOVS Gv,Ev
++49: CMOVNS Gv,Ev
++4a: CMOVP/PE Gv,Ev | kaddw/q Vk,Hk,Uk | kaddb/d Vk,Hk,Uk (66)
++4b: CMOVNP/PO Gv,Ev | kunpckbw Vk,Hk,Uk (66) | kunpckwd/dq Vk,Hk,Uk
++4c: CMOVL/NGE Gv,Ev
++4d: CMOVNL/GE Gv,Ev
++4e: CMOVLE/NG Gv,Ev
++4f: CMOVNLE/G Gv,Ev
++# 0x0f 0x50-0x5f
++50: vmovmskps Gy,Ups | vmovmskpd Gy,Upd (66)
++51: vsqrtps Vps,Wps | vsqrtpd Vpd,Wpd (66) | vsqrtss Vss,Hss,Wss (F3),(v1) | vsqrtsd Vsd,Hsd,Wsd (F2),(v1)
++52: vrsqrtps Vps,Wps | vrsqrtss Vss,Hss,Wss (F3),(v1)
++53: vrcpps Vps,Wps | vrcpss Vss,Hss,Wss (F3),(v1)
++54: vandps Vps,Hps,Wps | vandpd Vpd,Hpd,Wpd (66)
++55: vandnps Vps,Hps,Wps | vandnpd Vpd,Hpd,Wpd (66)
++56: vorps Vps,Hps,Wps | vorpd Vpd,Hpd,Wpd (66)
++57: vxorps Vps,Hps,Wps | vxorpd Vpd,Hpd,Wpd (66)
++58: vaddps Vps,Hps,Wps | vaddpd Vpd,Hpd,Wpd (66) | vaddss Vss,Hss,Wss (F3),(v1) | vaddsd Vsd,Hsd,Wsd (F2),(v1)
++59: vmulps Vps,Hps,Wps | vmulpd Vpd,Hpd,Wpd (66) | vmulss Vss,Hss,Wss (F3),(v1) | vmulsd Vsd,Hsd,Wsd (F2),(v1)
++5a: vcvtps2pd Vpd,Wps | vcvtpd2ps Vps,Wpd (66) | vcvtss2sd Vsd,Hx,Wss (F3),(v1) | vcvtsd2ss Vss,Hx,Wsd (F2),(v1)
++5b: vcvtdq2ps Vps,Wdq | vcvtqq2ps Vps,Wqq (evo) | vcvtps2dq Vdq,Wps (66) | vcvttps2dq Vdq,Wps (F3)
++5c: vsubps Vps,Hps,Wps | vsubpd Vpd,Hpd,Wpd (66) | vsubss Vss,Hss,Wss (F3),(v1) | vsubsd Vsd,Hsd,Wsd (F2),(v1)
++5d: vminps Vps,Hps,Wps | vminpd Vpd,Hpd,Wpd (66) | vminss Vss,Hss,Wss (F3),(v1) | vminsd Vsd,Hsd,Wsd (F2),(v1)
++5e: vdivps Vps,Hps,Wps | vdivpd Vpd,Hpd,Wpd (66) | vdivss Vss,Hss,Wss (F3),(v1) | vdivsd Vsd,Hsd,Wsd (F2),(v1)
++5f: vmaxps Vps,Hps,Wps | vmaxpd Vpd,Hpd,Wpd (66) | vmaxss Vss,Hss,Wss (F3),(v1) | vmaxsd Vsd,Hsd,Wsd (F2),(v1)
++# 0x0f 0x60-0x6f
++60: punpcklbw Pq,Qd | vpunpcklbw Vx,Hx,Wx (66),(v1)
++61: punpcklwd Pq,Qd | vpunpcklwd Vx,Hx,Wx (66),(v1)
++62: punpckldq Pq,Qd | vpunpckldq Vx,Hx,Wx (66),(v1)
++63: packsswb Pq,Qq | vpacksswb Vx,Hx,Wx (66),(v1)
++64: pcmpgtb Pq,Qq | vpcmpgtb Vx,Hx,Wx (66),(v1)
++65: pcmpgtw Pq,Qq | vpcmpgtw Vx,Hx,Wx (66),(v1)
++66: pcmpgtd Pq,Qq | vpcmpgtd Vx,Hx,Wx (66),(v1)
++67: packuswb Pq,Qq | vpackuswb Vx,Hx,Wx (66),(v1)
++68: punpckhbw Pq,Qd | vpunpckhbw Vx,Hx,Wx (66),(v1)
++69: punpckhwd Pq,Qd | vpunpckhwd Vx,Hx,Wx (66),(v1)
++6a: punpckhdq Pq,Qd | vpunpckhdq Vx,Hx,Wx (66),(v1)
++6b: packssdw Pq,Qd | vpackssdw Vx,Hx,Wx (66),(v1)
++6c: vpunpcklqdq Vx,Hx,Wx (66),(v1)
++6d: vpunpckhqdq Vx,Hx,Wx (66),(v1)
++6e: movd/q Pd,Ey | vmovd/q Vy,Ey (66),(v1)
++6f: movq Pq,Qq | vmovdqa Vx,Wx (66) | vmovdqa32/64 Vx,Wx (66),(evo) | vmovdqu Vx,Wx (F3) | vmovdqu32/64 Vx,Wx (F3),(evo) | vmovdqu8/16 Vx,Wx (F2),(ev)
++# 0x0f 0x70-0x7f
++70: pshufw Pq,Qq,Ib | vpshufd Vx,Wx,Ib (66),(v1) | vpshufhw Vx,Wx,Ib (F3),(v1) | vpshuflw Vx,Wx,Ib (F2),(v1)
++71: Grp12 (1A)
++72: Grp13 (1A)
++73: Grp14 (1A)
++74: pcmpeqb Pq,Qq | vpcmpeqb Vx,Hx,Wx (66),(v1)
++75: pcmpeqw Pq,Qq | vpcmpeqw Vx,Hx,Wx (66),(v1)
++76: pcmpeqd Pq,Qq | vpcmpeqd Vx,Hx,Wx (66),(v1)
++# Note: Remove (v), because vzeroall and vzeroupper becomes emms without VEX.
++77: emms | vzeroupper | vzeroall
++78: VMREAD Ey,Gy | vcvttps2udq/pd2udq Vx,Wpd (evo) | vcvttsd2usi Gv,Wx (F2),(ev) | vcvttss2usi Gv,Wx (F3),(ev) | vcvttps2uqq/pd2uqq Vx,Wx (66),(ev)
++79: VMWRITE Gy,Ey | vcvtps2udq/pd2udq Vx,Wpd (evo) | vcvtsd2usi Gv,Wx (F2),(ev) | vcvtss2usi Gv,Wx (F3),(ev) | vcvtps2uqq/pd2uqq Vx,Wx (66),(ev)
++7a: vcvtudq2pd/uqq2pd Vpd,Wx (F3),(ev) | vcvtudq2ps/uqq2ps Vpd,Wx (F2),(ev) | vcvttps2qq/pd2qq Vx,Wx (66),(ev)
++7b: vcvtusi2sd Vpd,Hpd,Ev (F2),(ev) | vcvtusi2ss Vps,Hps,Ev (F3),(ev) | vcvtps2qq/pd2qq Vx,Wx (66),(ev)
++7c: vhaddpd Vpd,Hpd,Wpd (66) | vhaddps Vps,Hps,Wps (F2)
++7d: vhsubpd Vpd,Hpd,Wpd (66) | vhsubps Vps,Hps,Wps (F2)
++7e: movd/q Ey,Pd | vmovd/q Ey,Vy (66),(v1) | vmovq Vq,Wq (F3),(v1)
++7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqa32/64 Wx,Vx (66),(evo) | vmovdqu Wx,Vx (F3) | vmovdqu32/64 Wx,Vx (F3),(evo) | vmovdqu8/16 Wx,Vx (F2),(ev)
++# 0x0f 0x80-0x8f
++# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
++80: JO Jz (f64)
++81: JNO Jz (f64)
++82: JB/JC/JNAE Jz (f64)
++83: JAE/JNB/JNC Jz (f64)
++84: JE/JZ Jz (f64)
++85: JNE/JNZ Jz (f64)
++86: JBE/JNA Jz (f64)
++87: JA/JNBE Jz (f64)
++88: JS Jz (f64)
++89: JNS Jz (f64)
++8a: JP/JPE Jz (f64)
++8b: JNP/JPO Jz (f64)
++8c: JL/JNGE Jz (f64)
++8d: JNL/JGE Jz (f64)
++8e: JLE/JNG Jz (f64)
++8f: JNLE/JG Jz (f64)
++# 0x0f 0x90-0x9f
++90: SETO Eb | kmovw/q Vk,Wk | kmovb/d Vk,Wk (66)
++91: SETNO Eb | kmovw/q Mv,Vk | kmovb/d Mv,Vk (66)
++92: SETB/C/NAE Eb | kmovw Vk,Rv | kmovb Vk,Rv (66) | kmovq/d Vk,Rv (F2)
++93: SETAE/NB/NC Eb | kmovw Gv,Uk | kmovb Gv,Uk (66) | kmovq/d Gv,Uk (F2)
++94: SETE/Z Eb
++95: SETNE/NZ Eb
++96: SETBE/NA Eb
++97: SETA/NBE Eb
++98: SETS Eb | kortestw/q Vk,Uk | kortestb/d Vk,Uk (66)
++99: SETNS Eb | ktestw/q Vk,Uk | ktestb/d Vk,Uk (66)
++9a: SETP/PE Eb
++9b: SETNP/PO Eb
++9c: SETL/NGE Eb
++9d: SETNL/GE Eb
++9e: SETLE/NG Eb
++9f: SETNLE/G Eb
++# 0x0f 0xa0-0xaf
++a0: PUSH FS (d64)
++a1: POP FS (d64)
++a2: CPUID
++a3: BT Ev,Gv
++a4: SHLD Ev,Gv,Ib
++a5: SHLD Ev,Gv,CL
++a6: GrpPDLK
++a7: GrpRNG
++a8: PUSH GS (d64)
++a9: POP GS (d64)
++aa: RSM
++ab: BTS Ev,Gv
++ac: SHRD Ev,Gv,Ib
++ad: SHRD Ev,Gv,CL
++ae: Grp15 (1A),(1C)
++af: IMUL Gv,Ev
++# 0x0f 0xb0-0xbf
++b0: CMPXCHG Eb,Gb
++b1: CMPXCHG Ev,Gv
++b2: LSS Gv,Mp
++b3: BTR Ev,Gv
++b4: LFS Gv,Mp
++b5: LGS Gv,Mp
++b6: MOVZX Gv,Eb
++b7: MOVZX Gv,Ew
++b8: JMPE (!F3) | POPCNT Gv,Ev (F3)
++b9: Grp10 (1A)
++ba: Grp8 Ev,Ib (1A)
++bb: BTC Ev,Gv
++bc: BSF Gv,Ev (!F3) | TZCNT Gv,Ev (F3)
++bd: BSR Gv,Ev (!F3) | LZCNT Gv,Ev (F3)
++be: MOVSX Gv,Eb
++bf: MOVSX Gv,Ew
++# 0x0f 0xc0-0xcf
++c0: XADD Eb,Gb
++c1: XADD Ev,Gv
++c2: vcmpps Vps,Hps,Wps,Ib | vcmppd Vpd,Hpd,Wpd,Ib (66) | vcmpss Vss,Hss,Wss,Ib (F3),(v1) | vcmpsd Vsd,Hsd,Wsd,Ib (F2),(v1)
++c3: movnti My,Gy
++c4: pinsrw Pq,Ry/Mw,Ib | vpinsrw Vdq,Hdq,Ry/Mw,Ib (66),(v1)
++c5: pextrw Gd,Nq,Ib | vpextrw Gd,Udq,Ib (66),(v1)
++c6: vshufps Vps,Hps,Wps,Ib | vshufpd Vpd,Hpd,Wpd,Ib (66)
++c7: Grp9 (1A)
++c8: BSWAP RAX/EAX/R8/R8D
++c9: BSWAP RCX/ECX/R9/R9D
++ca: BSWAP RDX/EDX/R10/R10D
++cb: BSWAP RBX/EBX/R11/R11D
++cc: BSWAP RSP/ESP/R12/R12D
++cd: BSWAP RBP/EBP/R13/R13D
++ce: BSWAP RSI/ESI/R14/R14D
++cf: BSWAP RDI/EDI/R15/R15D
++# 0x0f 0xd0-0xdf
++d0: vaddsubpd Vpd,Hpd,Wpd (66) | vaddsubps Vps,Hps,Wps (F2)
++d1: psrlw Pq,Qq | vpsrlw Vx,Hx,Wx (66),(v1)
++d2: psrld Pq,Qq | vpsrld Vx,Hx,Wx (66),(v1)
++d3: psrlq Pq,Qq | vpsrlq Vx,Hx,Wx (66),(v1)
++d4: paddq Pq,Qq | vpaddq Vx,Hx,Wx (66),(v1)
++d5: pmullw Pq,Qq | vpmullw Vx,Hx,Wx (66),(v1)
++d6: vmovq Wq,Vq (66),(v1) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2)
++d7: pmovmskb Gd,Nq | vpmovmskb Gd,Ux (66),(v1)
++d8: psubusb Pq,Qq | vpsubusb Vx,Hx,Wx (66),(v1)
++d9: psubusw Pq,Qq | vpsubusw Vx,Hx,Wx (66),(v1)
++da: pminub Pq,Qq | vpminub Vx,Hx,Wx (66),(v1)
++db: pand Pq,Qq | vpand Vx,Hx,Wx (66),(v1) | vpandd/q Vx,Hx,Wx (66),(evo)
++dc: paddusb Pq,Qq | vpaddusb Vx,Hx,Wx (66),(v1)
++dd: paddusw Pq,Qq | vpaddusw Vx,Hx,Wx (66),(v1)
++de: pmaxub Pq,Qq | vpmaxub Vx,Hx,Wx (66),(v1)
++df: pandn Pq,Qq | vpandn Vx,Hx,Wx (66),(v1) | vpandnd/q Vx,Hx,Wx (66),(evo)
++# 0x0f 0xe0-0xef
++e0: pavgb Pq,Qq | vpavgb Vx,Hx,Wx (66),(v1)
++e1: psraw Pq,Qq | vpsraw Vx,Hx,Wx (66),(v1)
++e2: psrad Pq,Qq | vpsrad Vx,Hx,Wx (66),(v1)
++e3: pavgw Pq,Qq | vpavgw Vx,Hx,Wx (66),(v1)
++e4: pmulhuw Pq,Qq | vpmulhuw Vx,Hx,Wx (66),(v1)
++e5: pmulhw Pq,Qq | vpmulhw Vx,Hx,Wx (66),(v1)
++e6: vcvttpd2dq Vx,Wpd (66) | vcvtdq2pd Vx,Wdq (F3) | vcvtdq2pd/qq2pd Vx,Wdq (F3),(evo) | vcvtpd2dq Vx,Wpd (F2)
++e7: movntq Mq,Pq | vmovntdq Mx,Vx (66)
++e8: psubsb Pq,Qq | vpsubsb Vx,Hx,Wx (66),(v1)
++e9: psubsw Pq,Qq | vpsubsw Vx,Hx,Wx (66),(v1)
++ea: pminsw Pq,Qq | vpminsw Vx,Hx,Wx (66),(v1)
++eb: por Pq,Qq | vpor Vx,Hx,Wx (66),(v1) | vpord/q Vx,Hx,Wx (66),(evo)
++ec: paddsb Pq,Qq | vpaddsb Vx,Hx,Wx (66),(v1)
++ed: paddsw Pq,Qq | vpaddsw Vx,Hx,Wx (66),(v1)
++ee: pmaxsw Pq,Qq | vpmaxsw Vx,Hx,Wx (66),(v1)
++ef: pxor Pq,Qq | vpxor Vx,Hx,Wx (66),(v1) | vpxord/q Vx,Hx,Wx (66),(evo)
++# 0x0f 0xf0-0xff
++f0: vlddqu Vx,Mx (F2)
++f1: psllw Pq,Qq | vpsllw Vx,Hx,Wx (66),(v1)
++f2: pslld Pq,Qq | vpslld Vx,Hx,Wx (66),(v1)
++f3: psllq Pq,Qq | vpsllq Vx,Hx,Wx (66),(v1)
++f4: pmuludq Pq,Qq | vpmuludq Vx,Hx,Wx (66),(v1)
++f5: pmaddwd Pq,Qq | vpmaddwd Vx,Hx,Wx (66),(v1)
++f6: psadbw Pq,Qq | vpsadbw Vx,Hx,Wx (66),(v1)
++f7: maskmovq Pq,Nq | vmaskmovdqu Vx,Ux (66),(v1)
++f8: psubb Pq,Qq | vpsubb Vx,Hx,Wx (66),(v1)
++f9: psubw Pq,Qq | vpsubw Vx,Hx,Wx (66),(v1)
++fa: psubd Pq,Qq | vpsubd Vx,Hx,Wx (66),(v1)
++fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
++fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
++fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
++fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
++ff: UD0
++EndTable
++
++Table: 3-byte opcode 1 (0x0f 0x38)
++Referrer: 3-byte escape 1
++AVXcode: 2
++# 0x0f 0x38 0x00-0x0f
++00: pshufb Pq,Qq | vpshufb Vx,Hx,Wx (66),(v1)
++01: phaddw Pq,Qq | vphaddw Vx,Hx,Wx (66),(v1)
++02: phaddd Pq,Qq | vphaddd Vx,Hx,Wx (66),(v1)
++03: phaddsw Pq,Qq | vphaddsw Vx,Hx,Wx (66),(v1)
++04: pmaddubsw Pq,Qq | vpmaddubsw Vx,Hx,Wx (66),(v1)
++05: phsubw Pq,Qq | vphsubw Vx,Hx,Wx (66),(v1)
++06: phsubd Pq,Qq | vphsubd Vx,Hx,Wx (66),(v1)
++07: phsubsw Pq,Qq | vphsubsw Vx,Hx,Wx (66),(v1)
++08: psignb Pq,Qq | vpsignb Vx,Hx,Wx (66),(v1)
++09: psignw Pq,Qq | vpsignw Vx,Hx,Wx (66),(v1)
++0a: psignd Pq,Qq | vpsignd Vx,Hx,Wx (66),(v1)
++0b: pmulhrsw Pq,Qq | vpmulhrsw Vx,Hx,Wx (66),(v1)
++0c: vpermilps Vx,Hx,Wx (66),(v)
++0d: vpermilpd Vx,Hx,Wx (66),(v)
++0e: vtestps Vx,Wx (66),(v)
++0f: vtestpd Vx,Wx (66),(v)
++# 0x0f 0x38 0x10-0x1f
++10: pblendvb Vdq,Wdq (66) | vpsrlvw Vx,Hx,Wx (66),(evo) | vpmovuswb Wx,Vx (F3),(ev)
++11: vpmovusdb Wx,Vd (F3),(ev) | vpsravw Vx,Hx,Wx (66),(ev)
++12: vpmovusqb Wx,Vq (F3),(ev) | vpsllvw Vx,Hx,Wx (66),(ev)
++13: vcvtph2ps Vx,Wx (66),(v) | vpmovusdw Wx,Vd (F3),(ev)
++14: blendvps Vdq,Wdq (66) | vpmovusqw Wx,Vq (F3),(ev) | vprorvd/q Vx,Hx,Wx (66),(evo)
++15: blendvpd Vdq,Wdq (66) | vpmovusqd Wx,Vq (F3),(ev) | vprolvd/q Vx,Hx,Wx (66),(evo)
++16: vpermps Vqq,Hqq,Wqq (66),(v) | vpermps/d Vqq,Hqq,Wqq (66),(evo)
++17: vptest Vx,Wx (66)
++18: vbroadcastss Vx,Wd (66),(v)
++19: vbroadcastsd Vqq,Wq (66),(v) | vbroadcastf32x2 Vqq,Wq (66),(evo)
++1a: vbroadcastf128 Vqq,Mdq (66),(v) | vbroadcastf32x4/64x2 Vqq,Wq (66),(evo)
++1b: vbroadcastf32x8/64x4 Vqq,Mdq (66),(ev)
++1c: pabsb Pq,Qq | vpabsb Vx,Wx (66),(v1)
++1d: pabsw Pq,Qq | vpabsw Vx,Wx (66),(v1)
++1e: pabsd Pq,Qq | vpabsd Vx,Wx (66),(v1)
++1f: vpabsq Vx,Wx (66),(ev)
++# 0x0f 0x38 0x20-0x2f
++20: vpmovsxbw Vx,Ux/Mq (66),(v1) | vpmovswb Wx,Vx (F3),(ev)
++21: vpmovsxbd Vx,Ux/Md (66),(v1) | vpmovsdb Wx,Vd (F3),(ev)
++22: vpmovsxbq Vx,Ux/Mw (66),(v1) | vpmovsqb Wx,Vq (F3),(ev)
++23: vpmovsxwd Vx,Ux/Mq (66),(v1) | vpmovsdw Wx,Vd (F3),(ev)
++24: vpmovsxwq Vx,Ux/Md (66),(v1) | vpmovsqw Wx,Vq (F3),(ev)
++25: vpmovsxdq Vx,Ux/Mq (66),(v1) | vpmovsqd Wx,Vq (F3),(ev)
++26: vptestmb/w Vk,Hx,Wx (66),(ev) | vptestnmb/w Vk,Hx,Wx (F3),(ev)
++27: vptestmd/q Vk,Hx,Wx (66),(ev) | vptestnmd/q Vk,Hx,Wx (F3),(ev)
++28: vpmuldq Vx,Hx,Wx (66),(v1) | vpmovm2b/w Vx,Uk (F3),(ev)
++29: vpcmpeqq Vx,Hx,Wx (66),(v1) | vpmovb2m/w2m Vk,Ux (F3),(ev)
++2a: vmovntdqa Vx,Mx (66),(v1) | vpbroadcastmb2q Vx,Uk (F3),(ev)
++2b: vpackusdw Vx,Hx,Wx (66),(v1)
++2c: vmaskmovps Vx,Hx,Mx (66),(v) | vscalefps/d Vx,Hx,Wx (66),(evo)
++2d: vmaskmovpd Vx,Hx,Mx (66),(v) | vscalefss/d Vx,Hx,Wx (66),(evo)
++2e: vmaskmovps Mx,Hx,Vx (66),(v)
++2f: vmaskmovpd Mx,Hx,Vx (66),(v)
++# 0x0f 0x38 0x30-0x3f
++30: vpmovzxbw Vx,Ux/Mq (66),(v1) | vpmovwb Wx,Vx (F3),(ev)
++31: vpmovzxbd Vx,Ux/Md (66),(v1) | vpmovdb Wx,Vd (F3),(ev)
++32: vpmovzxbq Vx,Ux/Mw (66),(v1) | vpmovqb Wx,Vq (F3),(ev)
++33: vpmovzxwd Vx,Ux/Mq (66),(v1) | vpmovdw Wx,Vd (F3),(ev)
++34: vpmovzxwq Vx,Ux/Md (66),(v1) | vpmovqw Wx,Vq (F3),(ev)
++35: vpmovzxdq Vx,Ux/Mq (66),(v1) | vpmovqd Wx,Vq (F3),(ev)
++36: vpermd Vqq,Hqq,Wqq (66),(v) | vpermd/q Vqq,Hqq,Wqq (66),(evo)
++37: vpcmpgtq Vx,Hx,Wx (66),(v1)
++38: vpminsb Vx,Hx,Wx (66),(v1) | vpmovm2d/q Vx,Uk (F3),(ev)
++39: vpminsd Vx,Hx,Wx (66),(v1) | vpminsd/q Vx,Hx,Wx (66),(evo) | vpmovd2m/q2m Vk,Ux (F3),(ev)
++3a: vpminuw Vx,Hx,Wx (66),(v1) | vpbroadcastmw2d Vx,Uk (F3),(ev)
++3b: vpminud Vx,Hx,Wx (66),(v1) | vpminud/q Vx,Hx,Wx (66),(evo)
++3c: vpmaxsb Vx,Hx,Wx (66),(v1)
++3d: vpmaxsd Vx,Hx,Wx (66),(v1) | vpmaxsd/q Vx,Hx,Wx (66),(evo)
++3e: vpmaxuw Vx,Hx,Wx (66),(v1)
++3f: vpmaxud Vx,Hx,Wx (66),(v1) | vpmaxud/q Vx,Hx,Wx (66),(evo)
++# 0x0f 0x38 0x40-0x8f
++40: vpmulld Vx,Hx,Wx (66),(v1) | vpmulld/q Vx,Hx,Wx (66),(evo)
++41: vphminposuw Vdq,Wdq (66),(v1)
++42: vgetexpps/d Vx,Wx (66),(ev)
++43: vgetexpss/d Vx,Hx,Wx (66),(ev)
++44: vplzcntd/q Vx,Wx (66),(ev)
++45: vpsrlvd/q Vx,Hx,Wx (66),(v)
++46: vpsravd Vx,Hx,Wx (66),(v) | vpsravd/q Vx,Hx,Wx (66),(evo)
++47: vpsllvd/q Vx,Hx,Wx (66),(v)
++# Skip 0x48-0x4b
++4c: vrcp14ps/d Vpd,Wpd (66),(ev)
++4d: vrcp14ss/d Vsd,Hpd,Wsd (66),(ev)
++4e: vrsqrt14ps/d Vpd,Wpd (66),(ev)
++4f: vrsqrt14ss/d Vsd,Hsd,Wsd (66),(ev)
++# Skip 0x50-0x57
++58: vpbroadcastd Vx,Wx (66),(v)
++59: vpbroadcastq Vx,Wx (66),(v) | vbroadcasti32x2 Vx,Wx (66),(evo)
++5a: vbroadcasti128 Vqq,Mdq (66),(v) | vbroadcasti32x4/64x2 Vx,Wx (66),(evo)
++5b: vbroadcasti32x8/64x4 Vqq,Mdq (66),(ev)
++# Skip 0x5c-0x63
++64: vpblendmd/q Vx,Hx,Wx (66),(ev)
++65: vblendmps/d Vx,Hx,Wx (66),(ev)
++66: vpblendmb/w Vx,Hx,Wx (66),(ev)
++# Skip 0x67-0x74
++75: vpermi2b/w Vx,Hx,Wx (66),(ev)
++76: vpermi2d/q Vx,Hx,Wx (66),(ev)
++77: vpermi2ps/d Vx,Hx,Wx (66),(ev)
++78: vpbroadcastb Vx,Wx (66),(v)
++79: vpbroadcastw Vx,Wx (66),(v)
++7a: vpbroadcastb Vx,Rv (66),(ev)
++7b: vpbroadcastw Vx,Rv (66),(ev)
++7c: vpbroadcastd/q Vx,Rv (66),(ev)
++7d: vpermt2b/w Vx,Hx,Wx (66),(ev)
++7e: vpermt2d/q Vx,Hx,Wx (66),(ev)
++7f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
++80: INVEPT Gy,Mdq (66)
++81: INVVPID Gy,Mdq (66)
++82: INVPCID Gy,Mdq (66)
++83: vpmultishiftqb Vx,Hx,Wx (66),(ev)
++88: vexpandps/d Vpd,Wpd (66),(ev)
++89: vpexpandd/q Vx,Wx (66),(ev)
++8a: vcompressps/d Wx,Vx (66),(ev)
++8b: vpcompressd/q Wx,Vx (66),(ev)
++8c: vpmaskmovd/q Vx,Hx,Mx (66),(v)
++8d: vpermb/w Vx,Hx,Wx (66),(ev)
++8e: vpmaskmovd/q Mx,Vx,Hx (66),(v)
++# 0x0f 0x38 0x90-0xbf (FMA)
++90: vgatherdd/q Vx,Hx,Wx (66),(v) | vpgatherdd/q Vx,Wx (66),(evo)
++91: vgatherqd/q Vx,Hx,Wx (66),(v) | vpgatherqd/q Vx,Wx (66),(evo)
++92: vgatherdps/d Vx,Hx,Wx (66),(v)
++93: vgatherqps/d Vx,Hx,Wx (66),(v)
++94:
++95:
++96: vfmaddsub132ps/d Vx,Hx,Wx (66),(v)
++97: vfmsubadd132ps/d Vx,Hx,Wx (66),(v)
++98: vfmadd132ps/d Vx,Hx,Wx (66),(v)
++99: vfmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
++9a: vfmsub132ps/d Vx,Hx,Wx (66),(v)
++9b: vfmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
++9c: vfnmadd132ps/d Vx,Hx,Wx (66),(v)
++9d: vfnmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
++9e: vfnmsub132ps/d Vx,Hx,Wx (66),(v)
++9f: vfnmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
++a0: vpscatterdd/q Wx,Vx (66),(ev)
++a1: vpscatterqd/q Wx,Vx (66),(ev)
++a2: vscatterdps/d Wx,Vx (66),(ev)
++a3: vscatterqps/d Wx,Vx (66),(ev)
++a6: vfmaddsub213ps/d Vx,Hx,Wx (66),(v)
++a7: vfmsubadd213ps/d Vx,Hx,Wx (66),(v)
++a8: vfmadd213ps/d Vx,Hx,Wx (66),(v)
++a9: vfmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
++aa: vfmsub213ps/d Vx,Hx,Wx (66),(v)
++ab: vfmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
++ac: vfnmadd213ps/d Vx,Hx,Wx (66),(v)
++ad: vfnmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
++ae: vfnmsub213ps/d Vx,Hx,Wx (66),(v)
++af: vfnmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
++b4: vpmadd52luq Vx,Hx,Wx (66),(ev)
++b5: vpmadd52huq Vx,Hx,Wx (66),(ev)
++b6: vfmaddsub231ps/d Vx,Hx,Wx (66),(v)
++b7: vfmsubadd231ps/d Vx,Hx,Wx (66),(v)
++b8: vfmadd231ps/d Vx,Hx,Wx (66),(v)
++b9: vfmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
++ba: vfmsub231ps/d Vx,Hx,Wx (66),(v)
++bb: vfmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
++bc: vfnmadd231ps/d Vx,Hx,Wx (66),(v)
++bd: vfnmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
++be: vfnmsub231ps/d Vx,Hx,Wx (66),(v)
++bf: vfnmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
++# 0x0f 0x38 0xc0-0xff
++c4: vpconflictd/q Vx,Wx (66),(ev)
++c6: Grp18 (1A)
++c7: Grp19 (1A)
++c8: sha1nexte Vdq,Wdq | vexp2ps/d Vx,Wx (66),(ev)
++c9: sha1msg1 Vdq,Wdq
++ca: sha1msg2 Vdq,Wdq | vrcp28ps/d Vx,Wx (66),(ev)
++cb: sha256rnds2 Vdq,Wdq | vrcp28ss/d Vx,Hx,Wx (66),(ev)
++cc: sha256msg1 Vdq,Wdq | vrsqrt28ps/d Vx,Wx (66),(ev)
++cd: sha256msg2 Vdq,Wdq | vrsqrt28ss/d Vx,Hx,Wx (66),(ev)
++db: VAESIMC Vdq,Wdq (66),(v1)
++dc: VAESENC Vdq,Hdq,Wdq (66),(v1)
++dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1)
++de: VAESDEC Vdq,Hdq,Wdq (66),(v1)
++df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1)
++f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) | CRC32 Gd,Eb (66&F2)
++f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) | CRC32 Gd,Ew (66&F2)
++f2: ANDN Gy,By,Ey (v)
++f3: Grp17 (1A)
++f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
++f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v)
++f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
++EndTable
++
++Table: 3-byte opcode 2 (0x0f 0x3a)
++Referrer: 3-byte escape 2
++AVXcode: 3
++# 0x0f 0x3a 0x00-0xff
++00: vpermq Vqq,Wqq,Ib (66),(v)
++01: vpermpd Vqq,Wqq,Ib (66),(v)
++02: vpblendd Vx,Hx,Wx,Ib (66),(v)
++03: valignd/q Vx,Hx,Wx,Ib (66),(ev)
++04: vpermilps Vx,Wx,Ib (66),(v)
++05: vpermilpd Vx,Wx,Ib (66),(v)
++06: vperm2f128 Vqq,Hqq,Wqq,Ib (66),(v)
++07:
++08: vroundps Vx,Wx,Ib (66) | vrndscaleps Vx,Wx,Ib (66),(evo)
++09: vroundpd Vx,Wx,Ib (66) | vrndscalepd Vx,Wx,Ib (66),(evo)
++0a: vroundss Vss,Wss,Ib (66),(v1) | vrndscaless Vx,Hx,Wx,Ib (66),(evo)
++0b: vroundsd Vsd,Wsd,Ib (66),(v1) | vrndscalesd Vx,Hx,Wx,Ib (66),(evo)
++0c: vblendps Vx,Hx,Wx,Ib (66)
++0d: vblendpd Vx,Hx,Wx,Ib (66)
++0e: vpblendw Vx,Hx,Wx,Ib (66),(v1)
++0f: palignr Pq,Qq,Ib | vpalignr Vx,Hx,Wx,Ib (66),(v1)
++14: vpextrb Rd/Mb,Vdq,Ib (66),(v1)
++15: vpextrw Rd/Mw,Vdq,Ib (66),(v1)
++16: vpextrd/q Ey,Vdq,Ib (66),(v1)
++17: vextractps Ed,Vdq,Ib (66),(v1)
++18: vinsertf128 Vqq,Hqq,Wqq,Ib (66),(v) | vinsertf32x4/64x2 Vqq,Hqq,Wqq,Ib (66),(evo)
++19: vextractf128 Wdq,Vqq,Ib (66),(v) | vextractf32x4/64x2 Wdq,Vqq,Ib (66),(evo)
++1a: vinsertf32x8/64x4 Vqq,Hqq,Wqq,Ib (66),(ev)
++1b: vextractf32x8/64x4 Wdq,Vqq,Ib (66),(ev)
++1d: vcvtps2ph Wx,Vx,Ib (66),(v)
++1e: vpcmpud/q Vk,Hd,Wd,Ib (66),(ev)
++1f: vpcmpd/q Vk,Hd,Wd,Ib (66),(ev)
++20: vpinsrb Vdq,Hdq,Ry/Mb,Ib (66),(v1)
++21: vinsertps Vdq,Hdq,Udq/Md,Ib (66),(v1)
++22: vpinsrd/q Vdq,Hdq,Ey,Ib (66),(v1)
++23: vshuff32x4/64x2 Vx,Hx,Wx,Ib (66),(ev)
++25: vpternlogd/q Vx,Hx,Wx,Ib (66),(ev)
++26: vgetmantps/d Vx,Wx,Ib (66),(ev)
++27: vgetmantss/d Vx,Hx,Wx,Ib (66),(ev)
++30: kshiftrb/w Vk,Uk,Ib (66),(v)
++31: kshiftrd/q Vk,Uk,Ib (66),(v)
++32: kshiftlb/w Vk,Uk,Ib (66),(v)
++33: kshiftld/q Vk,Uk,Ib (66),(v)
++38: vinserti128 Vqq,Hqq,Wqq,Ib (66),(v) | vinserti32x4/64x2 Vqq,Hqq,Wqq,Ib (66),(evo)
++39: vextracti128 Wdq,Vqq,Ib (66),(v) | vextracti32x4/64x2 Wdq,Vqq,Ib (66),(evo)
++3a: vinserti32x8/64x4 Vqq,Hqq,Wqq,Ib (66),(ev)
++3b: vextracti32x8/64x4 Wdq,Vqq,Ib (66),(ev)
++3e: vpcmpub/w Vk,Hk,Wx,Ib (66),(ev)
++3f: vpcmpb/w Vk,Hk,Wx,Ib (66),(ev)
++40: vdpps Vx,Hx,Wx,Ib (66)
++41: vdppd Vdq,Hdq,Wdq,Ib (66),(v1)
++42: vmpsadbw Vx,Hx,Wx,Ib (66),(v1) | vdbpsadbw Vx,Hx,Wx,Ib (66),(evo)
++43: vshufi32x4/64x2 Vx,Hx,Wx,Ib (66),(ev)
++44: vpclmulqdq Vdq,Hdq,Wdq,Ib (66),(v1)
++46: vperm2i128 Vqq,Hqq,Wqq,Ib (66),(v)
++4a: vblendvps Vx,Hx,Wx,Lx (66),(v)
++4b: vblendvpd Vx,Hx,Wx,Lx (66),(v)
++4c: vpblendvb Vx,Hx,Wx,Lx (66),(v1)
++50: vrangeps/d Vx,Hx,Wx,Ib (66),(ev)
++51: vrangess/d Vx,Hx,Wx,Ib (66),(ev)
++54: vfixupimmps/d Vx,Hx,Wx,Ib (66),(ev)
++55: vfixupimmss/d Vx,Hx,Wx,Ib (66),(ev)
++56: vreduceps/d Vx,Wx,Ib (66),(ev)
++57: vreducess/d Vx,Hx,Wx,Ib (66),(ev)
++60: vpcmpestrm Vdq,Wdq,Ib (66),(v1)
++61: vpcmpestri Vdq,Wdq,Ib (66),(v1)
++62: vpcmpistrm Vdq,Wdq,Ib (66),(v1)
++63: vpcmpistri Vdq,Wdq,Ib (66),(v1)
++66: vfpclassps/d Vk,Wx,Ib (66),(ev)
++67: vfpclassss/d Vk,Wx,Ib (66),(ev)
++cc: sha1rnds4 Vdq,Wdq,Ib
++df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1)
++f0: RORX Gy,Ey,Ib (F2),(v)
++EndTable
++
++GrpTable: Grp1
++0: ADD
++1: OR
++2: ADC
++3: SBB
++4: AND
++5: SUB
++6: XOR
++7: CMP
++EndTable
++
++GrpTable: Grp1A
++0: POP
++EndTable
++
++GrpTable: Grp2
++0: ROL
++1: ROR
++2: RCL
++3: RCR
++4: SHL/SAL
++5: SHR
++6:
++7: SAR
++EndTable
++
++GrpTable: Grp3_1
++0: TEST Eb,Ib
++1:
++2: NOT Eb
++3: NEG Eb
++4: MUL AL,Eb
++5: IMUL AL,Eb
++6: DIV AL,Eb
++7: IDIV AL,Eb
++EndTable
++
++GrpTable: Grp3_2
++0: TEST Ev,Iz
++1:
++2: NOT Ev
++3: NEG Ev
++4: MUL rAX,Ev
++5: IMUL rAX,Ev
++6: DIV rAX,Ev
++7: IDIV rAX,Ev
++EndTable
++
++GrpTable: Grp4
++0: INC Eb
++1: DEC Eb
++EndTable
++
++GrpTable: Grp5
++0: INC Ev
++1: DEC Ev
++# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
++2: CALLN Ev (f64)
++3: CALLF Ep
++4: JMPN Ev (f64)
++5: JMPF Mp
++6: PUSH Ev (d64)
++7:
++EndTable
++
++GrpTable: Grp6
++0: SLDT Rv/Mw
++1: STR Rv/Mw
++2: LLDT Ew
++3: LTR Ew
++4: VERR Ew
++5: VERW Ew
++EndTable
++
++GrpTable: Grp7
++0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
++1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B)
++2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
++3: LIDT Ms
++4: SMSW Mw/Rv
++5: rdpkru (110),(11B) | wrpkru (111),(11B)
++6: LMSW Ew
++7: INVLPG Mb | SWAPGS (o64),(000),(11B) | RDTSCP (001),(11B)
++EndTable
++
++GrpTable: Grp8
++4: BT
++5: BTS
++6: BTR
++7: BTC
++EndTable
++
++GrpTable: Grp9
++1: CMPXCHG8B/16B Mq/Mdq
++3: xrstors
++4: xsavec
++5: xsaves
++6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B)
++7: VMPTRST Mq | VMPTRST Mq (F3) | RDSEED Rv (11B)
++EndTable
++
++GrpTable: Grp10
++# all are UD1
++0: UD1
++1: UD1
++2: UD1
++3: UD1
++4: UD1
++5: UD1
++6: UD1
++7: UD1
++EndTable
++
++# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
++GrpTable: Grp11A
++0: MOV Eb,Ib
++7: XABORT Ib (000),(11B)
++EndTable
++
++GrpTable: Grp11B
++0: MOV Eb,Iz
++7: XBEGIN Jz (000),(11B)
++EndTable
++
++GrpTable: Grp12
++2: psrlw Nq,Ib (11B) | vpsrlw Hx,Ux,Ib (66),(11B),(v1)
++4: psraw Nq,Ib (11B) | vpsraw Hx,Ux,Ib (66),(11B),(v1)
++6: psllw Nq,Ib (11B) | vpsllw Hx,Ux,Ib (66),(11B),(v1)
++EndTable
++
++GrpTable: Grp13
++0: vprord/q Hx,Wx,Ib (66),(ev)
++1: vprold/q Hx,Wx,Ib (66),(ev)
++2: psrld Nq,Ib (11B) | vpsrld Hx,Ux,Ib (66),(11B),(v1)
++4: psrad Nq,Ib (11B) | vpsrad Hx,Ux,Ib (66),(11B),(v1) | vpsrad/q Hx,Ux,Ib (66),(evo)
++6: pslld Nq,Ib (11B) | vpslld Hx,Ux,Ib (66),(11B),(v1)
++EndTable
++
++GrpTable: Grp14
++2: psrlq Nq,Ib (11B) | vpsrlq Hx,Ux,Ib (66),(11B),(v1)
++3: vpsrldq Hx,Ux,Ib (66),(11B),(v1)
++6: psllq Nq,Ib (11B) | vpsllq Hx,Ux,Ib (66),(11B),(v1)
++7: vpslldq Hx,Ux,Ib (66),(11B),(v1)
++EndTable
++
++GrpTable: Grp15
++0: fxsave | RDFSBASE Ry (F3),(11B)
++1: fxstor | RDGSBASE Ry (F3),(11B)
++2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B)
++3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
++4: XSAVE | ptwrite Ey (F3),(11B)
++5: XRSTOR | lfence (11B)
++6: XSAVEOPT | clwb (66) | mfence (11B)
++7: clflush | clflushopt (66) | sfence (11B)
++EndTable
++
++GrpTable: Grp16
++0: prefetch NTA
++1: prefetch T0
++2: prefetch T1
++3: prefetch T2
++EndTable
++
++GrpTable: Grp17
++1: BLSR By,Ey (v)
++2: BLSMSK By,Ey (v)
++3: BLSI By,Ey (v)
++EndTable
++
++GrpTable: Grp18
++1: vgatherpf0dps/d Wx (66),(ev)
++2: vgatherpf1dps/d Wx (66),(ev)
++5: vscatterpf0dps/d Wx (66),(ev)
++6: vscatterpf1dps/d Wx (66),(ev)
++EndTable
++
++GrpTable: Grp19
++1: vgatherpf0qps/d Wx (66),(ev)
++2: vgatherpf1qps/d Wx (66),(ev)
++5: vscatterpf0qps/d Wx (66),(ev)
++6: vscatterpf1qps/d Wx (66),(ev)
++EndTable
++
++# AMD's Prefetch Group
++GrpTable: GrpP
++0: PREFETCH
++1: PREFETCHW
++EndTable
++
++GrpTable: GrpPDLK
++0: MONTMUL
++1: XSHA1
++2: XSHA2
++EndTable
++
++GrpTable: GrpRNG
++0: xstore-rng
++1: xcrypt-ecb
++2: xcrypt-cbc
++4: xcrypt-cfb
++5: xcrypt-ofb
++EndTable
+diff --git a/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
+new file mode 100644
+index 0000000..b02a36b
+--- /dev/null
++++ b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
+@@ -0,0 +1,393 @@
++#!/bin/awk -f
++# SPDX-License-Identifier: GPL-2.0
++# gen-insn-attr-x86.awk: Instruction attribute table generator
++# Written by Masami Hiramatsu <mhiramat@redhat.com>
++#
++# Usage: awk -f gen-insn-attr-x86.awk x86-opcode-map.txt > inat-tables.c
++
++# Awk implementation sanity check
++function check_awk_implement() {
++ if (sprintf("%x", 0) != "0")
++ return "Your awk has a printf-format problem."
++ return ""
++}
++
++# Clear working vars
++function clear_vars() {
++ delete table
++ delete lptable2
++ delete lptable1
++ delete lptable3
++ eid = -1 # escape id
++ gid = -1 # group id
++ aid = -1 # AVX id
++ tname = ""
++}
++
++BEGIN {
++ # Implementation error checking
++ awkchecked = check_awk_implement()
++ if (awkchecked != "") {
++ print "Error: " awkchecked > "/dev/stderr"
++ print "Please try to use gawk." > "/dev/stderr"
++ exit 1
++ }
++
++ # Setup generating tables
++ print "/* x86 opcode map generated from x86-opcode-map.txt */"
++ print "/* Do not change this code. */\n"
++ ggid = 1
++ geid = 1
++ gaid = 0
++ delete etable
++ delete gtable
++ delete atable
++
++ opnd_expr = "^[A-Za-z/]"
++ ext_expr = "^\\("
++ sep_expr = "^\\|$"
++ group_expr = "^Grp[0-9A-Za-z]+"
++
++ imm_expr = "^[IJAOL][a-z]"
++ imm_flag["Ib"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
++ imm_flag["Jb"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
++ imm_flag["Iw"] = "INAT_MAKE_IMM(INAT_IMM_WORD)"
++ imm_flag["Id"] = "INAT_MAKE_IMM(INAT_IMM_DWORD)"
++ imm_flag["Iq"] = "INAT_MAKE_IMM(INAT_IMM_QWORD)"
++ imm_flag["Ap"] = "INAT_MAKE_IMM(INAT_IMM_PTR)"
++ imm_flag["Iz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)"
++ imm_flag["Jz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)"
++ imm_flag["Iv"] = "INAT_MAKE_IMM(INAT_IMM_VWORD)"
++ imm_flag["Ob"] = "INAT_MOFFSET"
++ imm_flag["Ov"] = "INAT_MOFFSET"
++ imm_flag["Lx"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
++
++ modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])"
++ force64_expr = "\\([df]64\\)"
++ rex_expr = "^REX(\\.[XRWB]+)*"
++ fpu_expr = "^ESC" # TODO
++
++ lprefix1_expr = "\\((66|!F3)\\)"
++ lprefix2_expr = "\\(F3\\)"
++ lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)"
++ lprefix_expr = "\\((66|F2|F3)\\)"
++ max_lprefix = 4
++
++ # All opcodes starting with lower-case 'v', 'k' or with (v1) superscript
++ # accepts VEX prefix
++ vexok_opcode_expr = "^[vk].*"
++ vexok_expr = "\\(v1\\)"
++ # All opcodes with (v) superscript supports *only* VEX prefix
++ vexonly_expr = "\\(v\\)"
++ # All opcodes with (ev) superscript supports *only* EVEX prefix
++ evexonly_expr = "\\(ev\\)"
++
++ prefix_expr = "\\(Prefix\\)"
++ prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ"
++ prefix_num["REPNE"] = "INAT_PFX_REPNE"
++ prefix_num["REP/REPE"] = "INAT_PFX_REPE"
++ prefix_num["XACQUIRE"] = "INAT_PFX_REPNE"
++ prefix_num["XRELEASE"] = "INAT_PFX_REPE"
++ prefix_num["LOCK"] = "INAT_PFX_LOCK"
++ prefix_num["SEG=CS"] = "INAT_PFX_CS"
++ prefix_num["SEG=DS"] = "INAT_PFX_DS"
++ prefix_num["SEG=ES"] = "INAT_PFX_ES"
++ prefix_num["SEG=FS"] = "INAT_PFX_FS"
++ prefix_num["SEG=GS"] = "INAT_PFX_GS"
++ prefix_num["SEG=SS"] = "INAT_PFX_SS"
++ prefix_num["Address-Size"] = "INAT_PFX_ADDRSZ"
++ prefix_num["VEX+1byte"] = "INAT_PFX_VEX2"
++ prefix_num["VEX+2byte"] = "INAT_PFX_VEX3"
++ prefix_num["EVEX"] = "INAT_PFX_EVEX"
++
++ clear_vars()
++}
++
++function semantic_error(msg) {
++ print "Semantic error at " NR ": " msg > "/dev/stderr"
++ exit 1
++}
++
++function debug(msg) {
++ print "DEBUG: " msg
++}
++
++function array_size(arr, i,c) {
++ c = 0
++ for (i in arr)
++ c++
++ return c
++}
++
++/^Table:/ {
++ print "/* " $0 " */"
++ if (tname != "")
++ semantic_error("Hit Table: before EndTable:.");
++}
++
++/^Referrer:/ {
++ if (NF != 1) {
++ # escape opcode table
++ ref = ""
++ for (i = 2; i <= NF; i++)
++ ref = ref $i
++ eid = escape[ref]
++ tname = sprintf("inat_escape_table_%d", eid)
++ }
++}
++
++/^AVXcode:/ {
++ if (NF != 1) {
++ # AVX/escape opcode table
++ aid = $2
++ if (gaid <= aid)
++ gaid = aid + 1
++ if (tname == "") # AVX only opcode table
++ tname = sprintf("inat_avx_table_%d", $2)
++ }
++ if (aid == -1 && eid == -1) # primary opcode table
++ tname = "inat_primary_table"
++}
++
++/^GrpTable:/ {
++ print "/* " $0 " */"
++ if (!($2 in group))
++ semantic_error("No group: " $2 )
++ gid = group[$2]
++ tname = "inat_group_table_" gid
++}
++
++function print_table(tbl,name,fmt,n)
++{
++ print "const insn_attr_t " name " = {"
++ for (i = 0; i < n; i++) {
++ id = sprintf(fmt, i)
++ if (tbl[id])
++ print " [" id "] = " tbl[id] ","
++ }
++ print "};"
++}
++
++/^EndTable/ {
++ if (gid != -1) {
++ # print group tables
++ if (array_size(table) != 0) {
++ print_table(table, tname "[INAT_GROUP_TABLE_SIZE]",
++ "0x%x", 8)
++ gtable[gid,0] = tname
++ }
++ if (array_size(lptable1) != 0) {
++ print_table(lptable1, tname "_1[INAT_GROUP_TABLE_SIZE]",
++ "0x%x", 8)
++ gtable[gid,1] = tname "_1"
++ }
++ if (array_size(lptable2) != 0) {
++ print_table(lptable2, tname "_2[INAT_GROUP_TABLE_SIZE]",
++ "0x%x", 8)
++ gtable[gid,2] = tname "_2"
++ }
++ if (array_size(lptable3) != 0) {
++ print_table(lptable3, tname "_3[INAT_GROUP_TABLE_SIZE]",
++ "0x%x", 8)
++ gtable[gid,3] = tname "_3"
++ }
++ } else {
++ # print primary/escaped tables
++ if (array_size(table) != 0) {
++ print_table(table, tname "[INAT_OPCODE_TABLE_SIZE]",
++ "0x%02x", 256)
++ etable[eid,0] = tname
++ if (aid >= 0)
++ atable[aid,0] = tname
++ }
++ if (array_size(lptable1) != 0) {
++ print_table(lptable1,tname "_1[INAT_OPCODE_TABLE_SIZE]",
++ "0x%02x", 256)
++ etable[eid,1] = tname "_1"
++ if (aid >= 0)
++ atable[aid,1] = tname "_1"
++ }
++ if (array_size(lptable2) != 0) {
++ print_table(lptable2,tname "_2[INAT_OPCODE_TABLE_SIZE]",
++ "0x%02x", 256)
++ etable[eid,2] = tname "_2"
++ if (aid >= 0)
++ atable[aid,2] = tname "_2"
++ }
++ if (array_size(lptable3) != 0) {
++ print_table(lptable3,tname "_3[INAT_OPCODE_TABLE_SIZE]",
++ "0x%02x", 256)
++ etable[eid,3] = tname "_3"
++ if (aid >= 0)
++ atable[aid,3] = tname "_3"
++ }
++ }
++ print ""
++ clear_vars()
++}
++
++function add_flags(old,new) {
++ if (old && new)
++ return old " | " new
++ else if (old)
++ return old
++ else
++ return new
++}
++
++# convert operands to flags.
++function convert_operands(count,opnd, i,j,imm,mod)
++{
++ imm = null
++ mod = null
++ for (j = 1; j <= count; j++) {
++ i = opnd[j]
++ if (match(i, imm_expr) == 1) {
++ if (!imm_flag[i])
++ semantic_error("Unknown imm opnd: " i)
++ if (imm) {
++ if (i != "Ib")
++ semantic_error("Second IMM error")
++ imm = add_flags(imm, "INAT_SCNDIMM")
++ } else
++ imm = imm_flag[i]
++ } else if (match(i, modrm_expr))
++ mod = "INAT_MODRM"
++ }
++ return add_flags(imm, mod)
++}
++
++/^[0-9a-f]+\:/ {
++ if (NR == 1)
++ next
++ # get index
++ idx = "0x" substr($1, 1, index($1,":") - 1)
++ if (idx in table)
++ semantic_error("Redefine " idx " in " tname)
++
++ # check if escaped opcode
++ if ("escape" == $2) {
++ if ($3 != "#")
++ semantic_error("No escaped name")
++ ref = ""
++ for (i = 4; i <= NF; i++)
++ ref = ref $i
++ if (ref in escape)
++ semantic_error("Redefine escape (" ref ")")
++ escape[ref] = geid
++ geid++
++ table[idx] = "INAT_MAKE_ESCAPE(" escape[ref] ")"
++ next
++ }
++
++ variant = null
++ # converts
++ i = 2
++ while (i <= NF) {
++ opcode = $(i++)
++ delete opnds
++ ext = null
++ flags = null
++ opnd = null
++ # parse one opcode
++ if (match($i, opnd_expr)) {
++ opnd = $i
++ count = split($(i++), opnds, ",")
++ flags = convert_operands(count, opnds)
++ }
++ if (match($i, ext_expr))
++ ext = $(i++)
++ if (match($i, sep_expr))
++ i++
++ else if (i < NF)
++ semantic_error($i " is not a separator")
++
++ # check if group opcode
++ if (match(opcode, group_expr)) {
++ if (!(opcode in group)) {
++ group[opcode] = ggid
++ ggid++
++ }
++ flags = add_flags(flags, "INAT_MAKE_GROUP(" group[opcode] ")")
++ }
++ # check force(or default) 64bit
++ if (match(ext, force64_expr))
++ flags = add_flags(flags, "INAT_FORCE64")
++
++ # check REX prefix
++ if (match(opcode, rex_expr))
++ flags = add_flags(flags, "INAT_MAKE_PREFIX(INAT_PFX_REX)")
++
++ # check coprocessor escape : TODO
++ if (match(opcode, fpu_expr))
++ flags = add_flags(flags, "INAT_MODRM")
++
++ # check VEX codes
++ if (match(ext, evexonly_expr))
++ flags = add_flags(flags, "INAT_VEXOK | INAT_EVEXONLY")
++ else if (match(ext, vexonly_expr))
++ flags = add_flags(flags, "INAT_VEXOK | INAT_VEXONLY")
++ else if (match(ext, vexok_expr) || match(opcode, vexok_opcode_expr))
++ flags = add_flags(flags, "INAT_VEXOK")
++
++ # check prefixes
++ if (match(ext, prefix_expr)) {
++ if (!prefix_num[opcode])
++ semantic_error("Unknown prefix: " opcode)
++ flags = add_flags(flags, "INAT_MAKE_PREFIX(" prefix_num[opcode] ")")
++ }
++ if (length(flags) == 0)
++ continue
++ # check if last prefix
++ if (match(ext, lprefix1_expr)) {
++ lptable1[idx] = add_flags(lptable1[idx],flags)
++ variant = "INAT_VARIANT"
++ }
++ if (match(ext, lprefix2_expr)) {
++ lptable2[idx] = add_flags(lptable2[idx],flags)
++ variant = "INAT_VARIANT"
++ }
++ if (match(ext, lprefix3_expr)) {
++ lptable3[idx] = add_flags(lptable3[idx],flags)
++ variant = "INAT_VARIANT"
++ }
++ if (!match(ext, lprefix_expr)){
++ table[idx] = add_flags(table[idx],flags)
++ }
++ }
++ if (variant)
++ table[idx] = add_flags(table[idx],variant)
++}
++
++END {
++ if (awkchecked != "")
++ exit 1
++ # print escape opcode map's array
++ print "/* Escape opcode map array */"
++ print "const insn_attr_t * const inat_escape_tables[INAT_ESC_MAX + 1]" \
++ "[INAT_LSTPFX_MAX + 1] = {"
++ for (i = 0; i < geid; i++)
++ for (j = 0; j < max_lprefix; j++)
++ if (etable[i,j])
++ print " ["i"]["j"] = "etable[i,j]","
++ print "};\n"
++ # print group opcode map's array
++ print "/* Group opcode map array */"
++ print "const insn_attr_t * const inat_group_tables[INAT_GRP_MAX + 1]"\
++ "[INAT_LSTPFX_MAX + 1] = {"
++ for (i = 0; i < ggid; i++)
++ for (j = 0; j < max_lprefix; j++)
++ if (gtable[i,j])
++ print " ["i"]["j"] = "gtable[i,j]","
++ print "};\n"
++ # print AVX opcode map's array
++ print "/* AVX opcode map array */"
++ print "const insn_attr_t * const inat_avx_tables[X86_VEX_M_MAX + 1]"\
++ "[INAT_LSTPFX_MAX + 1] = {"
++ for (i = 0; i < gaid; i++)
++ for (j = 0; j < max_lprefix; j++)
++ if (atable[i,j])
++ print " ["i"]["j"] = "atable[i,j]","
++ print "};"
++}
++
+diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
+index 365c34e..694abc6 100644
+--- a/tools/objtool/builtin-check.c
++++ b/tools/objtool/builtin-check.c
+@@ -29,7 +29,7 @@
+ #include "builtin.h"
+ #include "check.h"
+
+-bool nofp;
++bool no_fp, no_unreachable, retpoline, module;
+
+ static const char * const check_usage[] = {
+ "objtool check [<options>] file.o",
+@@ -37,7 +37,10 @@ static const char * const check_usage[] = {
+ };
+
+ const struct option check_options[] = {
+- OPT_BOOLEAN('f', "no-fp", &nofp, "Skip frame pointer validation"),
++ OPT_BOOLEAN('f', "no-fp", &no_fp, "Skip frame pointer validation"),
++ OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"),
++ OPT_BOOLEAN('r', "retpoline", &retpoline, "Validate retpoline assumptions"),
++ OPT_BOOLEAN('m', "module", &module, "Indicates the object will be part of a kernel module"),
+ OPT_END(),
+ };
+
+@@ -52,5 +55,5 @@ int cmd_check(int argc, const char **argv)
+
+ objname = argv[0];
+
+- return check(objname, nofp);
++ return check(objname, false);
+ }
+diff --git a/tools/objtool/builtin-orc.c b/tools/objtool/builtin-orc.c
+new file mode 100644
+index 0000000..77ea2b9
+--- /dev/null
++++ b/tools/objtool/builtin-orc.c
+@@ -0,0 +1,68 @@
++/*
++ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2
++ * of the License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, see <http://www.gnu.org/licenses/>.
++ */
++
++/*
++ * objtool orc:
++ *
++ * This command analyzes a .o file and adds .orc_unwind and .orc_unwind_ip
++ * sections to it, which is used by the in-kernel ORC unwinder.
++ *
++ * This command is a superset of "objtool check".
++ */
++
++#include <string.h>
++#include "builtin.h"
++#include "check.h"
++
++
++static const char *orc_usage[] = {
++ "objtool orc generate [<options>] file.o",
++ "objtool orc dump file.o",
++ NULL,
++};
++
++int cmd_orc(int argc, const char **argv)
++{
++ const char *objname;
++
++ argc--; argv++;
++ if (argc <= 0)
++ usage_with_options(orc_usage, check_options);
++
++ if (!strncmp(argv[0], "gen", 3)) {
++ argc = parse_options(argc, argv, check_options, orc_usage, 0);
++ if (argc != 1)
++ usage_with_options(orc_usage, check_options);
++
++ objname = argv[0];
++
++ return check(objname, true);
++ }
++
++ if (!strcmp(argv[0], "dump")) {
++ if (argc != 2)
++ usage_with_options(orc_usage, check_options);
++
++ objname = argv[1];
++
++ return orc_dump(objname);
++ }
++
++ usage_with_options(orc_usage, check_options);
++
++ return 0;
++}
+diff --git a/tools/objtool/builtin.h b/tools/objtool/builtin.h
+index 34d2ba7..28ff40e 100644
+--- a/tools/objtool/builtin.h
++++ b/tools/objtool/builtin.h
+@@ -17,6 +17,12 @@
+ #ifndef _BUILTIN_H
+ #define _BUILTIN_H
+
++#include <subcmd/parse-options.h>
++
++extern const struct option check_options[];
++extern bool no_fp, no_unreachable, retpoline, module;
++
+ extern int cmd_check(int argc, const char **argv);
++extern int cmd_orc(int argc, const char **argv);
+
+ #endif /* _BUILTIN_H */
+diff --git a/tools/objtool/cfi.h b/tools/objtool/cfi.h
+new file mode 100644
+index 0000000..2fe883c
+--- /dev/null
++++ b/tools/objtool/cfi.h
+@@ -0,0 +1,55 @@
++/*
++ * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2
++ * of the License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _OBJTOOL_CFI_H
++#define _OBJTOOL_CFI_H
++
++#define CFI_UNDEFINED -1
++#define CFI_CFA -2
++#define CFI_SP_INDIRECT -3
++#define CFI_BP_INDIRECT -4
++
++#define CFI_AX 0
++#define CFI_DX 1
++#define CFI_CX 2
++#define CFI_BX 3
++#define CFI_SI 4
++#define CFI_DI 5
++#define CFI_BP 6
++#define CFI_SP 7
++#define CFI_R8 8
++#define CFI_R9 9
++#define CFI_R10 10
++#define CFI_R11 11
++#define CFI_R12 12
++#define CFI_R13 13
++#define CFI_R14 14
++#define CFI_R15 15
++#define CFI_RA 16
++#define CFI_NUM_REGS 17
++
++struct cfi_reg {
++ int base;
++ int offset;
++};
++
++struct cfi_state {
++ struct cfi_reg cfa;
++ struct cfi_reg regs[CFI_NUM_REGS];
++};
++
++#endif /* _OBJTOOL_CFI_H */
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index b7a0af5..c8b8b71 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -18,6 +18,7 @@
+ #include <string.h>
+ #include <stdlib.h>
+
++#include "builtin.h"
+ #include "check.h"
+ #include "elf.h"
+ #include "special.h"
+@@ -25,12 +26,7 @@
+ #include "warn.h"
+
+ #include <linux/hashtable.h>
+-
+-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+-
+-#define STATE_FP_SAVED 0x1
+-#define STATE_FP_SETUP 0x2
+-#define STATE_FENTRY 0x4
++#include <linux/kernel.h>
+
+ struct alternative {
+ struct list_head list;
+@@ -38,10 +34,10 @@ struct alternative {
+ };
+
+ const char *objname;
+-static bool nofp;
++struct cfi_state initial_func_cfi;
+
+-static struct instruction *find_insn(struct objtool_file *file,
+- struct section *sec, unsigned long offset)
++struct instruction *find_insn(struct objtool_file *file,
++ struct section *sec, unsigned long offset)
+ {
+ struct instruction *insn;
+
+@@ -57,28 +53,12 @@ static struct instruction *next_insn_same_sec(struct objtool_file *file,
+ {
+ struct instruction *next = list_next_entry(insn, list);
+
+- if (&next->list == &file->insn_list || next->sec != insn->sec)
++ if (!next || &next->list == &file->insn_list || next->sec != insn->sec)
+ return NULL;
+
+ return next;
+ }
+
+-static bool gcov_enabled(struct objtool_file *file)
+-{
+- struct section *sec;
+- struct symbol *sym;
+-
+- list_for_each_entry(sec, &file->elf->sections, list)
+- list_for_each_entry(sym, &sec->symbol_list, list)
+- if (!strncmp(sym->name, "__gcov_.", 8))
+- return true;
+-
+- return false;
+-}
+-
+-#define for_each_insn(file, insn) \
+- list_for_each_entry(insn, &file->insn_list, list)
+-
+ #define func_for_each_insn(file, func, insn) \
+ for (insn = find_insn(file, func->sec, func->offset); \
+ insn && &insn->list != &file->insn_list && \
+@@ -95,6 +75,9 @@ static bool gcov_enabled(struct objtool_file *file)
+ #define sec_for_each_insn_from(file, insn) \
+ for (; insn; insn = next_insn_same_sec(file, insn))
+
++#define sec_for_each_insn_continue(file, insn) \
++ for (insn = next_insn_same_sec(file, insn); insn; \
++ insn = next_insn_same_sec(file, insn))
+
+ /*
+ * Check if the function has been manually whitelisted with the
+@@ -104,7 +87,6 @@ static bool gcov_enabled(struct objtool_file *file)
+ static bool ignore_func(struct objtool_file *file, struct symbol *func)
+ {
+ struct rela *rela;
+- struct instruction *insn;
+
+ /* check for STACK_FRAME_NON_STANDARD */
+ if (file->whitelist && file->whitelist->rela)
+@@ -117,11 +99,6 @@ static bool ignore_func(struct objtool_file *file, struct symbol *func)
+ return true;
+ }
+
+- /* check if it has a context switching instruction */
+- func_for_each_insn(file, func, insn)
+- if (insn->type == INSN_CONTEXT_SWITCH)
+- return true;
+-
+ return false;
+ }
+
+@@ -159,7 +136,8 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
+ "complete_and_exit",
+ "kvm_spurious_fault",
+ "__reiserfs_panic",
+- "lbug_with_loc"
++ "lbug_with_loc",
++ "fortify_panic",
+ };
+
+ if (func->bind == STB_WEAK)
+@@ -234,6 +212,20 @@ static int dead_end_function(struct objtool_file *file, struct symbol *func)
+ return __dead_end_function(file, func, 0);
+ }
+
++static void clear_insn_state(struct insn_state *state)
++{
++ int i;
++
++ memset(state, 0, sizeof(*state));
++ state->cfa.base = CFI_UNDEFINED;
++ for (i = 0; i < CFI_NUM_REGS; i++) {
++ state->regs[i].base = CFI_UNDEFINED;
++ state->vals[i].base = CFI_UNDEFINED;
++ }
++ state->drap_reg = CFI_UNDEFINED;
++ state->drap_offset = -1;
++}
++
+ /*
+ * Call the arch-specific instruction decoder for all the instructions and add
+ * them to the global instruction list.
+@@ -246,30 +238,42 @@ static int decode_instructions(struct objtool_file *file)
+ struct instruction *insn;
+ int ret;
+
+- list_for_each_entry(sec, &file->elf->sections, list) {
++ for_each_sec(file, sec) {
+
+ if (!(sec->sh.sh_flags & SHF_EXECINSTR))
+ continue;
+
++ if (strcmp(sec->name, ".altinstr_replacement") &&
++ strcmp(sec->name, ".altinstr_aux") &&
++ strncmp(sec->name, ".discard.", 9))
++ sec->text = true;
++
+ for (offset = 0; offset < sec->len; offset += insn->len) {
+ insn = malloc(sizeof(*insn));
++ if (!insn) {
++ WARN("malloc failed");
++ return -1;
++ }
+ memset(insn, 0, sizeof(*insn));
+-
+ INIT_LIST_HEAD(&insn->alts);
++ clear_insn_state(&insn->state);
++
+ insn->sec = sec;
+ insn->offset = offset;
+
+ ret = arch_decode_instruction(file->elf, sec, offset,
+ sec->len - offset,
+ &insn->len, &insn->type,
+- &insn->immediate);
++ &insn->immediate,
++ &insn->stack_op);
+ if (ret)
+- return ret;
++ goto err;
+
+ if (!insn->type || insn->type > INSN_LAST) {
+ WARN_FUNC("invalid instruction type %d",
+ insn->sec, insn->offset, insn->type);
+- return -1;
++ ret = -1;
++ goto err;
+ }
+
+ hash_add(file->insn_hash, &insn->hash, insn->offset);
+@@ -293,10 +297,14 @@ static int decode_instructions(struct objtool_file *file)
+ }
+
+ return 0;
++
++err:
++ free(insn);
++ return ret;
+ }
+
+ /*
+- * Find all uses of the unreachable() macro, which are code path dead ends.
++ * Mark "ud2" instructions and manually annotated dead ends.
+ */
+ static int add_dead_ends(struct objtool_file *file)
+ {
+@@ -305,13 +313,24 @@ static int add_dead_ends(struct objtool_file *file)
+ struct instruction *insn;
+ bool found;
+
+- sec = find_section_by_name(file->elf, ".rela__unreachable");
++ /*
++ * By default, "ud2" is a dead end unless otherwise annotated, because
++ * GCC 7 inserts it for certain divide-by-zero cases.
++ */
++ for_each_insn(file, insn)
++ if (insn->type == INSN_BUG)
++ insn->dead_end = true;
++
++ /*
++ * Check for manually annotated dead ends.
++ */
++ sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
+ if (!sec)
+- return 0;
++ goto reachable;
+
+ list_for_each_entry(rela, &sec->rela_list, list) {
+ if (rela->sym->type != STT_SECTION) {
+- WARN("unexpected relocation symbol type in .rela__unreachable");
++ WARN("unexpected relocation symbol type in %s", sec->name);
+ return -1;
+ }
+ insn = find_insn(file, rela->sym->sec, rela->addend);
+@@ -340,6 +359,48 @@ static int add_dead_ends(struct objtool_file *file)
+ insn->dead_end = true;
+ }
+
++reachable:
++ /*
++ * These manually annotated reachable checks are needed for GCC 4.4,
++ * where the Linux unreachable() macro isn't supported. In that case
++ * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
++ * not a dead end.
++ */
++ sec = find_section_by_name(file->elf, ".rela.discard.reachable");
++ if (!sec)
++ return 0;
++
++ list_for_each_entry(rela, &sec->rela_list, list) {
++ if (rela->sym->type != STT_SECTION) {
++ WARN("unexpected relocation symbol type in %s", sec->name);
++ return -1;
++ }
++ insn = find_insn(file, rela->sym->sec, rela->addend);
++ if (insn)
++ insn = list_prev_entry(insn, list);
++ else if (rela->addend == rela->sym->sec->len) {
++ found = false;
++ list_for_each_entry_reverse(insn, &file->insn_list, list) {
++ if (insn->sec == rela->sym->sec) {
++ found = true;
++ break;
++ }
++ }
++
++ if (!found) {
++ WARN("can't find reachable insn at %s+0x%x",
++ rela->sym->sec->name, rela->addend);
++ return -1;
++ }
++ } else {
++ WARN("can't find reachable insn at %s+0x%x",
++ rela->sym->sec->name, rela->addend);
++ return -1;
++ }
++
++ insn->dead_end = false;
++ }
++
+ return 0;
+ }
+
+@@ -352,7 +413,7 @@ static void add_ignores(struct objtool_file *file)
+ struct section *sec;
+ struct symbol *func;
+
+- list_for_each_entry(sec, &file->elf->sections, list) {
++ for_each_sec(file, sec) {
+ list_for_each_entry(func, &sec->symbol_list, list) {
+ if (func->type != STT_FUNC)
+ continue;
+@@ -361,7 +422,7 @@ static void add_ignores(struct objtool_file *file)
+ continue;
+
+ func_for_each_insn(file, func, insn)
+- insn->visited = true;
++ insn->ignore = true;
+ }
+ }
+ }
+@@ -415,8 +476,7 @@ static int add_jump_destinations(struct objtool_file *file)
+ insn->type != INSN_JUMP_UNCONDITIONAL)
+ continue;
+
+- /* skip ignores */
+- if (insn->visited)
++ if (insn->ignore)
+ continue;
+
+ rela = find_rela_by_dest_range(insn->sec, insn->offset,
+@@ -436,6 +496,7 @@ static int add_jump_destinations(struct objtool_file *file)
+ * disguise, so convert them accordingly.
+ */
+ insn->type = INSN_JUMP_DYNAMIC;
++ insn->retpoline_safe = true;
+ continue;
+ } else {
+ /* sibling call */
+@@ -483,18 +544,15 @@ static int add_call_destinations(struct objtool_file *file)
+ dest_off = insn->offset + insn->len + insn->immediate;
+ insn->call_dest = find_symbol_by_offset(insn->sec,
+ dest_off);
+- /*
+- * FIXME: Thanks to retpolines, it's now considered
+- * normal for a function to call within itself. So
+- * disable this warning for now.
+- */
+-#if 0
+- if (!insn->call_dest) {
+- WARN_FUNC("can't find call dest symbol at offset 0x%lx",
+- insn->sec, insn->offset, dest_off);
++
++ if (!insn->call_dest && !insn->ignore) {
++ WARN_FUNC("unsupported intra-function call",
++ insn->sec, insn->offset);
++ if (retpoline)
++ WARN("If this is a retpoline, please patch it in with alternatives and annotate it with ANNOTATE_NOSPEC_ALTERNATIVE.");
+ return -1;
+ }
+-#endif
++
+ } else if (rela->sym->type == STT_SECTION) {
+ insn->call_dest = find_symbol_by_offset(rela->sym->sec,
+ rela->addend+4);
+@@ -538,7 +596,7 @@ static int handle_group_alt(struct objtool_file *file,
+ struct instruction *orig_insn,
+ struct instruction **new_insn)
+ {
+- struct instruction *last_orig_insn, *last_new_insn, *insn, *fake_jump;
++ struct instruction *last_orig_insn, *last_new_insn, *insn, *fake_jump = NULL;
+ unsigned long dest_off;
+
+ last_orig_insn = NULL;
+@@ -554,25 +612,30 @@ static int handle_group_alt(struct objtool_file *file,
+ last_orig_insn = insn;
+ }
+
+- if (!next_insn_same_sec(file, last_orig_insn)) {
+- WARN("%s: don't know how to handle alternatives at end of section",
+- special_alt->orig_sec->name);
+- return -1;
+- }
+-
+- fake_jump = malloc(sizeof(*fake_jump));
+- if (!fake_jump) {
+- WARN("malloc failed");
+- return -1;
++ if (next_insn_same_sec(file, last_orig_insn)) {
++ fake_jump = malloc(sizeof(*fake_jump));
++ if (!fake_jump) {
++ WARN("malloc failed");
++ return -1;
++ }
++ memset(fake_jump, 0, sizeof(*fake_jump));
++ INIT_LIST_HEAD(&fake_jump->alts);
++ clear_insn_state(&fake_jump->state);
++
++ fake_jump->sec = special_alt->new_sec;
++ fake_jump->offset = -1;
++ fake_jump->type = INSN_JUMP_UNCONDITIONAL;
++ fake_jump->jump_dest = list_next_entry(last_orig_insn, list);
++ fake_jump->ignore = true;
+ }
+- memset(fake_jump, 0, sizeof(*fake_jump));
+- INIT_LIST_HEAD(&fake_jump->alts);
+- fake_jump->sec = special_alt->new_sec;
+- fake_jump->offset = -1;
+- fake_jump->type = INSN_JUMP_UNCONDITIONAL;
+- fake_jump->jump_dest = list_next_entry(last_orig_insn, list);
+
+ if (!special_alt->new_len) {
++ if (!fake_jump) {
++ WARN("%s: empty alternative at end of section",
++ special_alt->orig_sec->name);
++ return -1;
++ }
++
+ *new_insn = fake_jump;
+ return 0;
+ }
+@@ -585,6 +648,8 @@ static int handle_group_alt(struct objtool_file *file,
+
+ last_new_insn = insn;
+
++ insn->ignore = orig_insn->ignore_alts;
++
+ if (insn->type != INSN_JUMP_CONDITIONAL &&
+ insn->type != INSN_JUMP_UNCONDITIONAL)
+ continue;
+@@ -593,8 +658,14 @@ static int handle_group_alt(struct objtool_file *file,
+ continue;
+
+ dest_off = insn->offset + insn->len + insn->immediate;
+- if (dest_off == special_alt->new_off + special_alt->new_len)
++ if (dest_off == special_alt->new_off + special_alt->new_len) {
++ if (!fake_jump) {
++ WARN("%s: alternative jump to end of section",
++ special_alt->orig_sec->name);
++ return -1;
++ }
+ insn->jump_dest = fake_jump;
++ }
+
+ if (!insn->jump_dest) {
+ WARN_FUNC("can't find alternative jump destination",
+@@ -609,7 +680,8 @@ static int handle_group_alt(struct objtool_file *file,
+ return -1;
+ }
+
+- list_add(&fake_jump->list, &last_new_insn->list);
++ if (fake_jump)
++ list_add(&fake_jump->list, &last_new_insn->list);
+
+ return 0;
+ }
+@@ -656,6 +728,7 @@ static int add_special_section_alts(struct objtool_file *file)
+ return ret;
+
+ list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
++
+ orig_insn = find_insn(file, special_alt->orig_sec,
+ special_alt->orig_off);
+ if (!orig_insn) {
+@@ -665,10 +738,6 @@ static int add_special_section_alts(struct objtool_file *file)
+ goto out;
+ }
+
+- /* Ignore retpoline alternatives. */
+- if (orig_insn->ignore_alts)
+- continue;
+-
+ new_insn = NULL;
+ if (!special_alt->group || special_alt->new_len) {
+ new_insn = find_insn(file, special_alt->new_sec,
+@@ -784,8 +853,14 @@ static int add_switch_table(struct objtool_file *file, struct symbol *func,
+ * This is a fairly uncommon pattern which is new for GCC 6. As of this
+ * writing, there are 11 occurrences of it in the allmodconfig kernel.
+ *
++ * As of GCC 7 there are quite a few more of these and the 'in between' code
++ * is significant. Esp. with KASAN enabled some of the code between the mov
++ * and jmpq uses .rodata itself, which can confuse things.
++ *
+ * TODO: Once we have DWARF CFI and smarter instruction decoding logic,
+ * ensure the same register is used in the mov and jump instructions.
++ *
++ * NOTE: RETPOLINE made it harder still to decode dynamic jumps.
+ */
+ static struct rela *find_switch_table(struct objtool_file *file,
+ struct symbol *func,
+@@ -807,12 +882,25 @@ static struct rela *find_switch_table(struct objtool_file *file,
+ text_rela->addend + 4);
+ if (!rodata_rela)
+ return NULL;
++
+ file->ignore_unreachables = true;
+ return rodata_rela;
+ }
+
+ /* case 3 */
+- func_for_each_insn_continue_reverse(file, func, insn) {
++ /*
++ * Backward search using the @first_jump_src links, these help avoid
++ * much of the 'in between' code. Which avoids us getting confused by
++ * it.
++ */
++ for (insn = list_prev_entry(insn, list);
++
++ &insn->list != &file->insn_list &&
++ insn->sec == func->sec &&
++ insn->offset >= func->offset;
++
++ insn = insn->first_jump_src ?: list_prev_entry(insn, list)) {
++
+ if (insn->type == INSN_JUMP_DYNAMIC)
+ break;
+
+@@ -836,20 +924,42 @@ static struct rela *find_switch_table(struct objtool_file *file,
+ if (find_symbol_containing(file->rodata, text_rela->addend))
+ continue;
+
+- return find_rela_by_dest(file->rodata, text_rela->addend);
++ rodata_rela = find_rela_by_dest(file->rodata, text_rela->addend);
++ if (!rodata_rela)
++ continue;
++
++ return rodata_rela;
+ }
+
+ return NULL;
+ }
+
++
+ static int add_func_switch_tables(struct objtool_file *file,
+ struct symbol *func)
+ {
+- struct instruction *insn, *prev_jump = NULL;
++ struct instruction *insn, *last = NULL, *prev_jump = NULL;
+ struct rela *rela, *prev_rela = NULL;
+ int ret;
+
+ func_for_each_insn(file, func, insn) {
++ if (!last)
++ last = insn;
++
++ /*
++ * Store back-pointers for unconditional forward jumps such
++ * that find_switch_table() can back-track using those and
++ * avoid some potentially confusing code.
++ */
++ if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
++ insn->offset > last->offset &&
++ insn->jump_dest->offset > insn->offset &&
++ !insn->jump_dest->first_jump_src) {
++
++ insn->jump_dest->first_jump_src = insn;
++ last = insn->jump_dest;
++ }
++
+ if (insn->type != INSN_JUMP_DYNAMIC)
+ continue;
+
+@@ -896,7 +1006,7 @@ static int add_switch_table_alts(struct objtool_file *file)
+ if (!file->rodata || !file->rodata->rela)
+ return 0;
+
+- list_for_each_entry(sec, &file->elf->sections, list) {
++ for_each_sec(file, sec) {
+ list_for_each_entry(func, &sec->symbol_list, list) {
+ if (func->type != STT_FUNC)
+ continue;
+@@ -910,6 +1020,134 @@ static int add_switch_table_alts(struct objtool_file *file)
+ return 0;
+ }
+
++static int read_unwind_hints(struct objtool_file *file)
++{
++ struct section *sec, *relasec;
++ struct rela *rela;
++ struct unwind_hint *hint;
++ struct instruction *insn;
++ struct cfi_reg *cfa;
++ int i;
++
++ sec = find_section_by_name(file->elf, ".discard.unwind_hints");
++ if (!sec)
++ return 0;
++
++ relasec = sec->rela;
++ if (!relasec) {
++ WARN("missing .rela.discard.unwind_hints section");
++ return -1;
++ }
++
++ if (sec->len % sizeof(struct unwind_hint)) {
++ WARN("struct unwind_hint size mismatch");
++ return -1;
++ }
++
++ file->hints = true;
++
++ for (i = 0; i < sec->len / sizeof(struct unwind_hint); i++) {
++ hint = (struct unwind_hint *)sec->data->d_buf + i;
++
++ rela = find_rela_by_dest(sec, i * sizeof(*hint));
++ if (!rela) {
++ WARN("can't find rela for unwind_hints[%d]", i);
++ return -1;
++ }
++
++ insn = find_insn(file, rela->sym->sec, rela->addend);
++ if (!insn) {
++ WARN("can't find insn for unwind_hints[%d]", i);
++ return -1;
++ }
++
++ cfa = &insn->state.cfa;
++
++ if (hint->type == UNWIND_HINT_TYPE_SAVE) {
++ insn->save = true;
++ continue;
++
++ } else if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
++ insn->restore = true;
++ insn->hint = true;
++ continue;
++ }
++
++ insn->hint = true;
++
++ switch (hint->sp_reg) {
++ case ORC_REG_UNDEFINED:
++ cfa->base = CFI_UNDEFINED;
++ break;
++ case ORC_REG_SP:
++ cfa->base = CFI_SP;
++ break;
++ case ORC_REG_BP:
++ cfa->base = CFI_BP;
++ break;
++ case ORC_REG_SP_INDIRECT:
++ cfa->base = CFI_SP_INDIRECT;
++ break;
++ case ORC_REG_R10:
++ cfa->base = CFI_R10;
++ break;
++ case ORC_REG_R13:
++ cfa->base = CFI_R13;
++ break;
++ case ORC_REG_DI:
++ cfa->base = CFI_DI;
++ break;
++ case ORC_REG_DX:
++ cfa->base = CFI_DX;
++ break;
++ default:
++ WARN_FUNC("unsupported unwind_hint sp base reg %d",
++ insn->sec, insn->offset, hint->sp_reg);
++ return -1;
++ }
++
++ cfa->offset = hint->sp_offset;
++ insn->state.type = hint->type;
++ }
++
++ return 0;
++}
++
++static int read_retpoline_hints(struct objtool_file *file)
++{
++ struct section *sec;
++ struct instruction *insn;
++ struct rela *rela;
++
++ sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
++ if (!sec)
++ return 0;
++
++ list_for_each_entry(rela, &sec->rela_list, list) {
++ if (rela->sym->type != STT_SECTION) {
++ WARN("unexpected relocation symbol type in %s", sec->name);
++ return -1;
++ }
++
++ insn = find_insn(file, rela->sym->sec, rela->addend);
++ if (!insn) {
++ WARN("bad .discard.retpoline_safe entry");
++ return -1;
++ }
++
++ if (insn->type != INSN_JUMP_DYNAMIC &&
++ insn->type != INSN_CALL_DYNAMIC) {
++ WARN_FUNC("retpoline_safe hint not an indirect jump/call",
++ insn->sec, insn->offset);
++ return -1;
++ }
++
++ insn->retpoline_safe = true;
++ }
++
++ return 0;
++}
++
+ static int decode_sections(struct objtool_file *file)
+ {
+ int ret;
+@@ -932,11 +1170,11 @@ static int decode_sections(struct objtool_file *file)
+ if (ret)
+ return ret;
+
+- ret = add_call_destinations(file);
++ ret = add_special_section_alts(file);
+ if (ret)
+ return ret;
+
+- ret = add_special_section_alts(file);
++ ret = add_call_destinations(file);
+ if (ret)
+ return ret;
+
+@@ -944,6 +1182,14 @@ static int decode_sections(struct objtool_file *file)
+ if (ret)
+ return ret;
+
++ ret = read_unwind_hints(file);
++ if (ret)
++ return ret;
++
++ ret = read_retpoline_hints(file);
++ if (ret)
++ return ret;
++
+ return 0;
+ }
+
+@@ -957,125 +1203,647 @@ static bool is_fentry_call(struct instruction *insn)
+ return false;
+ }
+
+-static bool has_modified_stack_frame(struct instruction *insn)
++static bool has_modified_stack_frame(struct insn_state *state)
++{
++ int i;
++
++ if (state->cfa.base != initial_func_cfi.cfa.base ||
++ state->cfa.offset != initial_func_cfi.cfa.offset ||
++ state->stack_size != initial_func_cfi.cfa.offset ||
++ state->drap)
++ return true;
++
++ for (i = 0; i < CFI_NUM_REGS; i++)
++ if (state->regs[i].base != initial_func_cfi.regs[i].base ||
++ state->regs[i].offset != initial_func_cfi.regs[i].offset)
++ return true;
++
++ return false;
++}
++
++static bool has_valid_stack_frame(struct insn_state *state)
++{
++ if (state->cfa.base == CFI_BP && state->regs[CFI_BP].base == CFI_CFA &&
++ state->regs[CFI_BP].offset == -16)
++ return true;
++
++ if (state->drap && state->regs[CFI_BP].base == CFI_BP)
++ return true;
++
++ return false;
++}
++
++static int update_insn_state_regs(struct instruction *insn, struct insn_state *state)
+ {
+- return (insn->state & STATE_FP_SAVED) ||
+- (insn->state & STATE_FP_SETUP);
++ struct cfi_reg *cfa = &state->cfa;
++ struct stack_op *op = &insn->stack_op;
++
++ if (cfa->base != CFI_SP)
++ return 0;
++
++ /* push */
++ if (op->dest.type == OP_DEST_PUSH)
++ cfa->offset += 8;
++
++ /* pop */
++ if (op->src.type == OP_SRC_POP)
++ cfa->offset -= 8;
++
++ /* add immediate to sp */
++ if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
++ op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
++ cfa->offset -= op->src.offset;
++
++ return 0;
+ }
+
+-static bool has_valid_stack_frame(struct instruction *insn)
++static void save_reg(struct insn_state *state, unsigned char reg, int base,
++ int offset)
+ {
+- return (insn->state & STATE_FP_SAVED) &&
+- (insn->state & STATE_FP_SETUP);
++ if (arch_callee_saved_reg(reg) &&
++ state->regs[reg].base == CFI_UNDEFINED) {
++ state->regs[reg].base = base;
++ state->regs[reg].offset = offset;
++ }
+ }
+
+-static unsigned int frame_state(unsigned long state)
++static void restore_reg(struct insn_state *state, unsigned char reg)
+ {
+- return (state & (STATE_FP_SAVED | STATE_FP_SETUP));
++ state->regs[reg].base = CFI_UNDEFINED;
++ state->regs[reg].offset = 0;
+ }
+
+ /*
+- * Follow the branch starting at the given instruction, and recursively follow
+- * any other branches (jumps). Meanwhile, track the frame pointer state at
+- * each instruction and validate all the rules described in
+- * tools/objtool/Documentation/stack-validation.txt.
++ * A note about DRAP stack alignment:
++ *
++ * GCC has the concept of a DRAP register, which is used to help keep track of
++ * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP
++ * register. The typical DRAP pattern is:
++ *
++ * 4c 8d 54 24 08 lea 0x8(%rsp),%r10
++ * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp
++ * 41 ff 72 f8 pushq -0x8(%r10)
++ * 55 push %rbp
++ * 48 89 e5 mov %rsp,%rbp
++ * (more pushes)
++ * 41 52 push %r10
++ * ...
++ * 41 5a pop %r10
++ * (more pops)
++ * 5d pop %rbp
++ * 49 8d 62 f8 lea -0x8(%r10),%rsp
++ * c3 retq
++ *
++ * There are some variations in the epilogues, like:
++ *
++ * 5b pop %rbx
++ * 41 5a pop %r10
++ * 41 5c pop %r12
++ * 41 5d pop %r13
++ * 41 5e pop %r14
++ * c9 leaveq
++ * 49 8d 62 f8 lea -0x8(%r10),%rsp
++ * c3 retq
++ *
++ * and:
++ *
++ * 4c 8b 55 e8 mov -0x18(%rbp),%r10
++ * 48 8b 5d e0 mov -0x20(%rbp),%rbx
++ * 4c 8b 65 f0 mov -0x10(%rbp),%r12
++ * 4c 8b 6d f8 mov -0x8(%rbp),%r13
++ * c9 leaveq
++ * 49 8d 62 f8 lea -0x8(%r10),%rsp
++ * c3 retq
++ *
++ * Sometimes r13 is used as the DRAP register, in which case it's saved and
++ * restored beforehand:
++ *
++ * 41 55 push %r13
++ * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13
++ * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
++ * ...
++ * 49 8d 65 f0 lea -0x10(%r13),%rsp
++ * 41 5d pop %r13
++ * c3 retq
+ */
+-static int validate_branch(struct objtool_file *file,
+- struct instruction *first, unsigned char first_state)
++static int update_insn_state(struct instruction *insn, struct insn_state *state)
+ {
+- struct alternative *alt;
+- struct instruction *insn;
+- struct section *sec;
+- struct symbol *func = NULL;
+- unsigned char state;
+- int ret;
++ struct stack_op *op = &insn->stack_op;
++ struct cfi_reg *cfa = &state->cfa;
++ struct cfi_reg *regs = state->regs;
++
++ /* stack operations don't make sense with an undefined CFA */
++ if (cfa->base == CFI_UNDEFINED) {
++ if (insn->func) {
++ WARN_FUNC("undefined stack state", insn->sec, insn->offset);
++ return -1;
++ }
++ return 0;
++ }
+
+- insn = first;
+- sec = insn->sec;
+- state = first_state;
++ if (state->type == ORC_TYPE_REGS || state->type == ORC_TYPE_REGS_IRET)
++ return update_insn_state_regs(insn, state);
+
+- if (insn->alt_group && list_empty(&insn->alts)) {
+- WARN_FUNC("don't know how to handle branch to middle of alternative instruction group",
+- sec, insn->offset);
+- return 1;
+- }
++ switch (op->dest.type) {
+
+- while (1) {
+- if (file->c_file && insn->func) {
+- if (func && func != insn->func) {
+- WARN("%s() falls through to next function %s()",
+- func->name, insn->func->name);
+- return 1;
+- }
++ case OP_DEST_REG:
++ switch (op->src.type) {
+
+- func = insn->func;
+- }
++ case OP_SRC_REG:
++ if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
++ cfa->base == CFI_SP &&
++ regs[CFI_BP].base == CFI_CFA &&
++ regs[CFI_BP].offset == -cfa->offset) {
+
+- if (insn->visited) {
+- if (frame_state(insn->state) != frame_state(state)) {
+- WARN_FUNC("frame pointer state mismatch",
+- sec, insn->offset);
+- return 1;
++ /* mov %rsp, %rbp */
++ cfa->base = op->dest.reg;
++ state->bp_scratch = false;
+ }
+
+- return 0;
++ else if (op->src.reg == CFI_SP &&
++ op->dest.reg == CFI_BP && state->drap) {
++
++ /* drap: mov %rsp, %rbp */
++ regs[CFI_BP].base = CFI_BP;
++ regs[CFI_BP].offset = -state->stack_size;
++ state->bp_scratch = false;
++ }
++
++ else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
++
++ /*
++ * mov %rsp, %reg
++ *
++ * This is needed for the rare case where GCC
++ * does:
++ *
++ * mov %rsp, %rax
++ * ...
++ * mov %rax, %rsp
++ */
++ state->vals[op->dest.reg].base = CFI_CFA;
++ state->vals[op->dest.reg].offset = -state->stack_size;
++ }
++
++ else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
++ cfa->base == CFI_BP) {
++
++ /*
++ * mov %rbp, %rsp
++ *
++ * Restore the original stack pointer (Clang).
++ */
++ state->stack_size = -state->regs[CFI_BP].offset;
++ }
++
++ else if (op->dest.reg == cfa->base) {
++
++ /* mov %reg, %rsp */
++ if (cfa->base == CFI_SP &&
++ state->vals[op->src.reg].base == CFI_CFA) {
++
++ /*
++ * This is needed for the rare case
++ * where GCC does something dumb like:
++ *
++ * lea 0x8(%rsp), %rcx
++ * ...
++ * mov %rcx, %rsp
++ */
++ cfa->offset = -state->vals[op->src.reg].offset;
++ state->stack_size = cfa->offset;
++
++ } else {
++ cfa->base = CFI_UNDEFINED;
++ cfa->offset = 0;
++ }
++ }
++
++ break;
++
++ case OP_SRC_ADD:
++ if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
++
++ /* add imm, %rsp */
++ state->stack_size -= op->src.offset;
++ if (cfa->base == CFI_SP)
++ cfa->offset -= op->src.offset;
++ break;
++ }
++
++ if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
++
++ /* lea disp(%rbp), %rsp */
++ state->stack_size = -(op->src.offset + regs[CFI_BP].offset);
++ break;
++ }
++
++ if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
++
++ /* drap: lea disp(%rsp), %drap */
++ state->drap_reg = op->dest.reg;
++
++ /*
++ * lea disp(%rsp), %reg
++ *
++ * This is needed for the rare case where GCC
++ * does something dumb like:
++ *
++ * lea 0x8(%rsp), %rcx
++ * ...
++ * mov %rcx, %rsp
++ */
++ state->vals[op->dest.reg].base = CFI_CFA;
++ state->vals[op->dest.reg].offset = \
++ -state->stack_size + op->src.offset;
++
++ break;
++ }
++
++ if (state->drap && op->dest.reg == CFI_SP &&
++ op->src.reg == state->drap_reg) {
++
++ /* drap: lea disp(%drap), %rsp */
++ cfa->base = CFI_SP;
++ cfa->offset = state->stack_size = -op->src.offset;
++ state->drap_reg = CFI_UNDEFINED;
++ state->drap = false;
++ break;
++ }
++
++ if (op->dest.reg == state->cfa.base) {
++ WARN_FUNC("unsupported stack register modification",
++ insn->sec, insn->offset);
++ return -1;
++ }
++
++ break;
++
++ case OP_SRC_AND:
++ if (op->dest.reg != CFI_SP ||
++ (state->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
++ (state->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
++ WARN_FUNC("unsupported stack pointer realignment",
++ insn->sec, insn->offset);
++ return -1;
++ }
++
++ if (state->drap_reg != CFI_UNDEFINED) {
++ /* drap: and imm, %rsp */
++ cfa->base = state->drap_reg;
++ cfa->offset = state->stack_size = 0;
++ state->drap = true;
++ }
++
++ /*
++ * Older versions of GCC (4.8ish) realign the stack
++ * without DRAP, with a frame pointer.
++ */
++
++ break;
++
++ case OP_SRC_POP:
++ if (!state->drap && op->dest.type == OP_DEST_REG &&
++ op->dest.reg == cfa->base) {
++
++ /* pop %rbp */
++ cfa->base = CFI_SP;
++ }
++
++ if (state->drap && cfa->base == CFI_BP_INDIRECT &&
++ op->dest.type == OP_DEST_REG &&
++ op->dest.reg == state->drap_reg &&
++ state->drap_offset == -state->stack_size) {
++
++ /* drap: pop %drap */
++ cfa->base = state->drap_reg;
++ cfa->offset = 0;
++ state->drap_offset = -1;
++
++ } else if (regs[op->dest.reg].offset == -state->stack_size) {
++
++ /* pop %reg */
++ restore_reg(state, op->dest.reg);
++ }
++
++ state->stack_size -= 8;
++ if (cfa->base == CFI_SP)
++ cfa->offset -= 8;
++
++ break;
++
++ case OP_SRC_REG_INDIRECT:
++ if (state->drap && op->src.reg == CFI_BP &&
++ op->src.offset == state->drap_offset) {
++
++ /* drap: mov disp(%rbp), %drap */
++ cfa->base = state->drap_reg;
++ cfa->offset = 0;
++ state->drap_offset = -1;
++ }
++
++ if (state->drap && op->src.reg == CFI_BP &&
++ op->src.offset == regs[op->dest.reg].offset) {
++
++ /* drap: mov disp(%rbp), %reg */
++ restore_reg(state, op->dest.reg);
++
++ } else if (op->src.reg == cfa->base &&
++ op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
++
++ /* mov disp(%rbp), %reg */
++ /* mov disp(%rsp), %reg */
++ restore_reg(state, op->dest.reg);
++ }
++
++ break;
++
++ default:
++ WARN_FUNC("unknown stack-related instruction",
++ insn->sec, insn->offset);
++ return -1;
+ }
+
+- insn->visited = true;
+- insn->state = state;
++ break;
+
+- list_for_each_entry(alt, &insn->alts, list) {
+- ret = validate_branch(file, alt->insn, state);
+- if (ret)
++ case OP_DEST_PUSH:
++ state->stack_size += 8;
++ if (cfa->base == CFI_SP)
++ cfa->offset += 8;
++
++ if (op->src.type != OP_SRC_REG)
++ break;
++
++ if (state->drap) {
++ if (op->src.reg == cfa->base && op->src.reg == state->drap_reg) {
++
++ /* drap: push %drap */
++ cfa->base = CFI_BP_INDIRECT;
++ cfa->offset = -state->stack_size;
++
++ /* save drap so we know when to restore it */
++ state->drap_offset = -state->stack_size;
++
++ } else if (op->src.reg == CFI_BP && cfa->base == state->drap_reg) {
++
++ /* drap: push %rbp */
++ state->stack_size = 0;
++
++ } else if (regs[op->src.reg].base == CFI_UNDEFINED) {
++
++ /* drap: push %reg */
++ save_reg(state, op->src.reg, CFI_BP, -state->stack_size);
++ }
++
++ } else {
++
++ /* push %reg */
++ save_reg(state, op->src.reg, CFI_CFA, -state->stack_size);
++ }
++
++ /* detect when asm code uses rbp as a scratch register */
++ if (!no_fp && insn->func && op->src.reg == CFI_BP &&
++ cfa->base != CFI_BP)
++ state->bp_scratch = true;
++ break;
++
++ case OP_DEST_REG_INDIRECT:
++
++ if (state->drap) {
++ if (op->src.reg == cfa->base && op->src.reg == state->drap_reg) {
++
++ /* drap: mov %drap, disp(%rbp) */
++ cfa->base = CFI_BP_INDIRECT;
++ cfa->offset = op->dest.offset;
++
++ /* save drap offset so we know when to restore it */
++ state->drap_offset = op->dest.offset;
++ }
++
++ else if (regs[op->src.reg].base == CFI_UNDEFINED) {
++
++ /* drap: mov reg, disp(%rbp) */
++ save_reg(state, op->src.reg, CFI_BP, op->dest.offset);
++ }
++
++ } else if (op->dest.reg == cfa->base) {
++
++ /* mov reg, disp(%rbp) */
++ /* mov reg, disp(%rsp) */
++ save_reg(state, op->src.reg, CFI_CFA,
++ op->dest.offset - state->cfa.offset);
++ }
++
++ break;
++
++ case OP_DEST_LEAVE:
++ if ((!state->drap && cfa->base != CFI_BP) ||
++ (state->drap && cfa->base != state->drap_reg)) {
++ WARN_FUNC("leave instruction with modified stack frame",
++ insn->sec, insn->offset);
++ return -1;
++ }
++
++ /* leave (mov %rbp, %rsp; pop %rbp) */
++
++ state->stack_size = -state->regs[CFI_BP].offset - 8;
++ restore_reg(state, CFI_BP);
++
++ if (!state->drap) {
++ cfa->base = CFI_SP;
++ cfa->offset -= 8;
++ }
++
++ break;
++
++ case OP_DEST_MEM:
++ if (op->src.type != OP_SRC_POP) {
++ WARN_FUNC("unknown stack-related memory operation",
++ insn->sec, insn->offset);
++ return -1;
++ }
++
++ /* pop mem */
++ state->stack_size -= 8;
++ if (cfa->base == CFI_SP)
++ cfa->offset -= 8;
++
++ break;
++
++ default:
++ WARN_FUNC("unknown stack-related instruction",
++ insn->sec, insn->offset);
++ return -1;
++ }
++
++ return 0;
++}
++
++static bool insn_state_match(struct instruction *insn, struct insn_state *state)
++{
++ struct insn_state *state1 = &insn->state, *state2 = state;
++ int i;
++
++ if (memcmp(&state1->cfa, &state2->cfa, sizeof(state1->cfa))) {
++ WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
++ insn->sec, insn->offset,
++ state1->cfa.base, state1->cfa.offset,
++ state2->cfa.base, state2->cfa.offset);
++
++ } else if (memcmp(&state1->regs, &state2->regs, sizeof(state1->regs))) {
++ for (i = 0; i < CFI_NUM_REGS; i++) {
++ if (!memcmp(&state1->regs[i], &state2->regs[i],
++ sizeof(struct cfi_reg)))
++ continue;
++
++ WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
++ insn->sec, insn->offset,
++ i, state1->regs[i].base, state1->regs[i].offset,
++ i, state2->regs[i].base, state2->regs[i].offset);
++ break;
++ }
++
++ } else if (state1->type != state2->type) {
++ WARN_FUNC("stack state mismatch: type1=%d type2=%d",
++ insn->sec, insn->offset, state1->type, state2->type);
++
++ } else if (state1->drap != state2->drap ||
++ (state1->drap && state1->drap_reg != state2->drap_reg) ||
++ (state1->drap && state1->drap_offset != state2->drap_offset)) {
++ WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
++ insn->sec, insn->offset,
++ state1->drap, state1->drap_reg, state1->drap_offset,
++ state2->drap, state2->drap_reg, state2->drap_offset);
++
++ } else
++ return true;
++
++ return false;
++}
++
++/*
++ * Follow the branch starting at the given instruction, and recursively follow
++ * any other branches (jumps). Meanwhile, track the frame pointer state at
++ * each instruction and validate all the rules described in
++ * tools/objtool/Documentation/stack-validation.txt.
++ */
++static int validate_branch(struct objtool_file *file, struct instruction *first,
++ struct insn_state state)
++{
++ struct alternative *alt;
++ struct instruction *insn, *next_insn;
++ struct section *sec;
++ struct symbol *func = NULL;
++ int ret;
++
++ insn = first;
++ sec = insn->sec;
++
++ if (insn->alt_group && list_empty(&insn->alts)) {
++ WARN_FUNC("don't know how to handle branch to middle of alternative instruction group",
++ sec, insn->offset);
++ return 1;
++ }
++
++ while (1) {
++ next_insn = next_insn_same_sec(file, insn);
++
++
++ if (file->c_file && func && insn->func && func != insn->func) {
++ WARN("%s() falls through to next function %s()",
++ func->name, insn->func->name);
++ return 1;
++ }
++
++ if (insn->func)
++ func = insn->func;
++
++ if (func && insn->ignore) {
++ WARN_FUNC("BUG: why am I validating an ignored function?",
++ sec, insn->offset);
++ return 1;
++ }
++
++ if (insn->visited) {
++ if (!insn->hint && !insn_state_match(insn, &state))
+ return 1;
++
++ return 0;
+ }
+
+- switch (insn->type) {
++ if (insn->hint) {
++ if (insn->restore) {
++ struct instruction *save_insn, *i;
++
++ i = insn;
++ save_insn = NULL;
++ func_for_each_insn_continue_reverse(file, func, i) {
++ if (i->save) {
++ save_insn = i;
++ break;
++ }
++ }
+
+- case INSN_FP_SAVE:
+- if (!nofp) {
+- if (state & STATE_FP_SAVED) {
+- WARN_FUNC("duplicate frame pointer save",
++ if (!save_insn) {
++ WARN_FUNC("no corresponding CFI save for CFI restore",
+ sec, insn->offset);
+ return 1;
+ }
+- state |= STATE_FP_SAVED;
+- }
+- break;
+
+- case INSN_FP_SETUP:
+- if (!nofp) {
+- if (state & STATE_FP_SETUP) {
+- WARN_FUNC("duplicate frame pointer setup",
++ if (!save_insn->visited) {
++ /*
++ * Oops, no state to copy yet.
++ * Hopefully we can reach this
++ * instruction from another branch
++ * after the save insn has been
++ * visited.
++ */
++ if (insn == first)
++ return 0;
++
++ WARN_FUNC("objtool isn't smart enough to handle this CFI save/restore combo",
+ sec, insn->offset);
+ return 1;
+ }
+- state |= STATE_FP_SETUP;
++
++ insn->state = save_insn->state;
+ }
+- break;
+
+- case INSN_FP_RESTORE:
+- if (!nofp) {
+- if (has_valid_stack_frame(insn))
+- state &= ~STATE_FP_SETUP;
++ state = insn->state;
++
++ } else
++ insn->state = state;
++
++ insn->visited = true;
+
+- state &= ~STATE_FP_SAVED;
++ if (!insn->ignore_alts) {
++ list_for_each_entry(alt, &insn->alts, list) {
++ ret = validate_branch(file, alt->insn, state);
++ if (ret)
++ return 1;
+ }
+- break;
++ }
++
++ switch (insn->type) {
+
+ case INSN_RETURN:
+- if (!nofp && has_modified_stack_frame(insn)) {
+- WARN_FUNC("return without frame pointer restore",
++ if (func && has_modified_stack_frame(&state)) {
++ WARN_FUNC("return with modified stack frame",
+ sec, insn->offset);
+ return 1;
+ }
++
++ if (state.bp_scratch) {
++ WARN("%s uses BP as a scratch register",
++ insn->func->name);
++ return 1;
++ }
++
+ return 0;
+
+ case INSN_CALL:
+- if (is_fentry_call(insn)) {
+- state |= STATE_FENTRY;
++ if (is_fentry_call(insn))
+ break;
+- }
+
+ ret = dead_end_function(file, insn->call_dest);
+ if (ret == 1)
+@@ -1085,7 +1853,7 @@ static int validate_branch(struct objtool_file *file,
+
+ /* fallthrough */
+ case INSN_CALL_DYNAMIC:
+- if (!nofp && !has_valid_stack_frame(insn)) {
++ if (!no_fp && func && !has_valid_stack_frame(&state)) {
+ WARN_FUNC("call without frame pointer save/setup",
+ sec, insn->offset);
+ return 1;
+@@ -1094,16 +1862,19 @@ static int validate_branch(struct objtool_file *file,
+
+ case INSN_JUMP_CONDITIONAL:
+ case INSN_JUMP_UNCONDITIONAL:
+- if (insn->jump_dest) {
++ if (insn->jump_dest &&
++ (!func || !insn->jump_dest->func ||
++ func == insn->jump_dest->func)) {
+ ret = validate_branch(file, insn->jump_dest,
+ state);
+ if (ret)
+ return 1;
+- } else if (has_modified_stack_frame(insn)) {
+- WARN_FUNC("sibling call from callable instruction with changed frame pointer",
++
++ } else if (func && has_modified_stack_frame(&state)) {
++ WARN_FUNC("sibling call from callable instruction with modified stack frame",
+ sec, insn->offset);
+ return 1;
+- } /* else it's a sibling call */
++ }
+
+ if (insn->type == INSN_JUMP_UNCONDITIONAL)
+ return 0;
+@@ -1111,15 +1882,29 @@ static int validate_branch(struct objtool_file *file,
+ break;
+
+ case INSN_JUMP_DYNAMIC:
+- if (list_empty(&insn->alts) &&
+- has_modified_stack_frame(insn)) {
+- WARN_FUNC("sibling call from callable instruction with changed frame pointer",
++ if (func && list_empty(&insn->alts) &&
++ has_modified_stack_frame(&state)) {
++ WARN_FUNC("sibling call from callable instruction with modified stack frame",
+ sec, insn->offset);
+ return 1;
+ }
+
+ return 0;
+
++ case INSN_CONTEXT_SWITCH:
++ if (func && (!next_insn || !next_insn->hint)) {
++ WARN_FUNC("unsupported instruction in callable function",
++ sec, insn->offset);
++ return 1;
++ }
++ return 0;
++
++ case INSN_STACK:
++ if (update_insn_state(insn, &state))
++ return 1;
++
++ break;
++
+ default:
+ break;
+ }
+@@ -1127,16 +1912,72 @@ static int validate_branch(struct objtool_file *file,
+ if (insn->dead_end)
+ return 0;
+
+- insn = next_insn_same_sec(file, insn);
+- if (!insn) {
++ if (!next_insn) {
++ if (state.cfa.base == CFI_UNDEFINED)
++ return 0;
+ WARN("%s: unexpected end of section", sec->name);
+ return 1;
+ }
++
++ insn = next_insn;
+ }
+
+ return 0;
+ }
+
++static int validate_unwind_hints(struct objtool_file *file)
++{
++ struct instruction *insn;
++ int ret, warnings = 0;
++ struct insn_state state;
++
++ if (!file->hints)
++ return 0;
++
++ clear_insn_state(&state);
++
++ for_each_insn(file, insn) {
++ if (insn->hint && !insn->visited) {
++ ret = validate_branch(file, insn, state);
++ warnings += ret;
++ }
++ }
++
++ return warnings;
++}
++
++static int validate_retpoline(struct objtool_file *file)
++{
++ struct instruction *insn;
++ int warnings = 0;
++
++ for_each_insn(file, insn) {
++ if (insn->type != INSN_JUMP_DYNAMIC &&
++ insn->type != INSN_CALL_DYNAMIC)
++ continue;
++
++ if (insn->retpoline_safe)
++ continue;
++
++ /*
++ * .init.text code is ran before userspace and thus doesn't
++ * strictly need retpolines, except for modules which are
++ * loaded late, they very much do need retpoline in their
++ * .init.text
++ */
++ if (!strcmp(insn->sec->name, ".init.text") && !module)
++ continue;
++
++ WARN_FUNC("indirect %s found in RETPOLINE build",
++ insn->sec, insn->offset,
++ insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
++
++ warnings++;
++ }
++
++ return warnings;
++}
++
+ static bool is_kasan_insn(struct instruction *insn)
+ {
+ return (insn->type == INSN_CALL &&
+@@ -1150,12 +1991,23 @@ static bool is_ubsan_insn(struct instruction *insn)
+ "__ubsan_handle_builtin_unreachable"));
+ }
+
+-static bool ignore_unreachable_insn(struct symbol *func,
+- struct instruction *insn)
++static bool ignore_unreachable_insn(struct instruction *insn)
+ {
+ int i;
+
+- if (insn->type == INSN_NOP)
++ if (insn->ignore || insn->type == INSN_NOP)
++ return true;
++
++ /*
++ * Ignore any unused exceptions. This can happen when a whitelisted
++ * function has an exception table entry.
++ *
++ * Also ignore alternative replacement instructions. This can happen
++ * when a whitelisted function uses one of the ALTERNATIVE macros.
++ */
++ if (!strcmp(insn->sec->name, ".fixup") ||
++ !strcmp(insn->sec->name, ".altinstr_replacement") ||
++ !strcmp(insn->sec->name, ".altinstr_aux"))
+ return true;
+
+ /*
+@@ -1164,18 +2016,26 @@ static bool ignore_unreachable_insn(struct symbol *func,
+ *
+ * End the search at 5 instructions to avoid going into the weeds.
+ */
++ if (!insn->func)
++ return false;
+ for (i = 0; i < 5; i++) {
+
+ if (is_kasan_insn(insn) || is_ubsan_insn(insn))
+ return true;
+
+- if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest) {
+- insn = insn->jump_dest;
+- continue;
++ if (insn->type == INSN_JUMP_UNCONDITIONAL) {
++ if (insn->jump_dest &&
++ insn->jump_dest->func == insn->func) {
++ insn = insn->jump_dest;
++ continue;
++ }
++
++ break;
+ }
+
+- if (insn->offset + insn->len >= func->offset + func->len)
++ if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
+ break;
++
+ insn = list_next_entry(insn, list);
+ }
+
+@@ -1187,81 +2047,49 @@ static int validate_functions(struct objtool_file *file)
+ struct section *sec;
+ struct symbol *func;
+ struct instruction *insn;
++ struct insn_state state;
+ int ret, warnings = 0;
+
+- list_for_each_entry(sec, &file->elf->sections, list) {
++ clear_insn_state(&state);
++
++ state.cfa = initial_func_cfi.cfa;
++ memcpy(&state.regs, &initial_func_cfi.regs,
++ CFI_NUM_REGS * sizeof(struct cfi_reg));
++ state.stack_size = initial_func_cfi.cfa.offset;
++
++ for_each_sec(file, sec) {
+ list_for_each_entry(func, &sec->symbol_list, list) {
+ if (func->type != STT_FUNC)
+ continue;
+
+ insn = find_insn(file, sec, func->offset);
+- if (!insn)
++ if (!insn || insn->ignore)
+ continue;
+
+- ret = validate_branch(file, insn, 0);
++ ret = validate_branch(file, insn, state);
+ warnings += ret;
+ }
+ }
+
+- list_for_each_entry(sec, &file->elf->sections, list) {
+- list_for_each_entry(func, &sec->symbol_list, list) {
+- if (func->type != STT_FUNC)
+- continue;
+-
+- func_for_each_insn(file, func, insn) {
+- if (insn->visited)
+- continue;
+-
+- insn->visited = true;
+-
+- if (file->ignore_unreachables || warnings ||
+- ignore_unreachable_insn(func, insn))
+- continue;
+-
+- /*
+- * gcov produces a lot of unreachable
+- * instructions. If we get an unreachable
+- * warning and the file has gcov enabled, just
+- * ignore it, and all other such warnings for
+- * the file.
+- */
+- if (!file->ignore_unreachables &&
+- gcov_enabled(file)) {
+- file->ignore_unreachables = true;
+- continue;
+- }
+-
+- WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset);
+- warnings++;
+- }
+- }
+- }
+-
+ return warnings;
+ }
+
+-static int validate_uncallable_instructions(struct objtool_file *file)
++static int validate_reachable_instructions(struct objtool_file *file)
+ {
+ struct instruction *insn;
+- int warnings = 0;
+
+- for_each_insn(file, insn) {
+- if (!insn->visited && insn->type == INSN_RETURN) {
++ if (file->ignore_unreachables)
++ return 0;
+
+- /*
+- * Don't warn about call instructions in unvisited
+- * retpoline alternatives.
+- */
+- if (!strcmp(insn->sec->name, ".altinstr_replacement"))
+- continue;
++ for_each_insn(file, insn) {
++ if (insn->visited || ignore_unreachable_insn(insn))
++ continue;
+
+- WARN_FUNC("return instruction outside of a callable function",
+- insn->sec, insn->offset);
+- warnings++;
+- }
++ WARN_FUNC("unreachable instruction", insn->sec, insn->offset);
++ return 1;
+ }
+
+- return warnings;
++ return 0;
+ }
+
+ static void cleanup(struct objtool_file *file)
+@@ -1281,42 +2109,73 @@ static void cleanup(struct objtool_file *file)
+ elf_close(file->elf);
+ }
+
+-int check(const char *_objname, bool _nofp)
++int check(const char *_objname, bool orc)
+ {
+ struct objtool_file file;
+ int ret, warnings = 0;
+
+ objname = _objname;
+- nofp = _nofp;
+
+- file.elf = elf_open(objname);
+- if (!file.elf) {
+- fprintf(stderr, "error reading elf file %s\n", objname);
++ file.elf = elf_open(objname, orc ? O_RDWR : O_RDONLY);
++ if (!file.elf)
+ return 1;
+- }
+
+ INIT_LIST_HEAD(&file.insn_list);
+ hash_init(file.insn_hash);
+ file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard");
+ file.rodata = find_section_by_name(file.elf, ".rodata");
+- file.ignore_unreachables = false;
+ file.c_file = find_section_by_name(file.elf, ".comment");
++ file.ignore_unreachables = no_unreachable;
++ file.hints = false;
++
++ arch_initial_func_cfi_state(&initial_func_cfi);
+
+ ret = decode_sections(&file);
+ if (ret < 0)
+ goto out;
+ warnings += ret;
+
++ if (list_empty(&file.insn_list))
++ goto out;
++
++ if (retpoline) {
++ ret = validate_retpoline(&file);
++ if (ret < 0)
++ return ret;
++ warnings += ret;
++ }
++
+ ret = validate_functions(&file);
+ if (ret < 0)
+ goto out;
+ warnings += ret;
+
+- ret = validate_uncallable_instructions(&file);
++ ret = validate_unwind_hints(&file);
+ if (ret < 0)
+ goto out;
+ warnings += ret;
+
++ if (!warnings) {
++ ret = validate_reachable_instructions(&file);
++ if (ret < 0)
++ goto out;
++ warnings += ret;
++ }
++
++ if (orc) {
++ ret = create_orc(&file);
++ if (ret < 0)
++ goto out;
++
++ ret = create_orc_sections(&file);
++ if (ret < 0)
++ goto out;
++
++ ret = elf_write(file.elf);
++ if (ret < 0)
++ goto out;
++ }
++
+ out:
+ cleanup(&file);
+
+diff --git a/tools/objtool/check.h b/tools/objtool/check.h
+index aca248a..c6b68fc 100644
+--- a/tools/objtool/check.h
++++ b/tools/objtool/check.h
+@@ -20,22 +20,40 @@
+
+ #include <stdbool.h>
+ #include "elf.h"
++#include "cfi.h"
+ #include "arch.h"
++#include "orc.h"
+ #include <linux/hashtable.h>
+
++struct insn_state {
++ struct cfi_reg cfa;
++ struct cfi_reg regs[CFI_NUM_REGS];
++ int stack_size;
++ unsigned char type;
++ bool bp_scratch;
++ bool drap;
++ int drap_reg, drap_offset;
++ struct cfi_reg vals[CFI_NUM_REGS];
++};
++
+ struct instruction {
+ struct list_head list;
+ struct hlist_node hash;
+ struct section *sec;
+ unsigned long offset;
+- unsigned int len, state;
++ unsigned int len;
+ unsigned char type;
+ unsigned long immediate;
+- bool alt_group, visited, dead_end, ignore_alts;
++ bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
++ bool retpoline_safe;
+ struct symbol *call_dest;
+ struct instruction *jump_dest;
++ struct instruction *first_jump_src;
+ struct list_head alts;
+ struct symbol *func;
++ struct stack_op stack_op;
++ struct insn_state state;
++ struct orc_entry orc;
+ };
+
+ struct objtool_file {
+@@ -43,9 +61,22 @@ struct objtool_file {
+ struct list_head insn_list;
+ DECLARE_HASHTABLE(insn_hash, 16);
+ struct section *rodata, *whitelist;
+- bool ignore_unreachables, c_file;
++ bool ignore_unreachables, c_file, hints;
+ };
+
+-int check(const char *objname, bool nofp);
++int check(const char *objname, bool orc);
++
++struct instruction *find_insn(struct objtool_file *file,
++ struct section *sec, unsigned long offset);
++
++#define for_each_insn(file, insn) \
++ list_for_each_entry(insn, &file->insn_list, list)
++
++#define sec_for_each_insn(file, sec, insn) \
++ for (insn = find_insn(file, sec, 0); \
++ insn && &insn->list != &file->insn_list && \
++ insn->sec == sec; \
++ insn = list_next_entry(insn, list))
++
+
+ #endif /* _CHECK_H */
+diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
+index 14a74d4..b31b7a6 100644
+--- a/tools/objtool/elf.c
++++ b/tools/objtool/elf.c
+@@ -31,13 +31,6 @@
+ #include "elf.h"
+ #include "warn.h"
+
+-/*
+- * Fallback for systems without this "read, mmaping if possible" cmd.
+- */
+-#ifndef ELF_C_READ_MMAP
+-#define ELF_C_READ_MMAP ELF_C_READ
+-#endif
+-
+ struct section *find_section_by_name(struct elf *elf, const char *name)
+ {
+ struct section *sec;
+@@ -128,12 +121,12 @@ static int read_sections(struct elf *elf)
+ int i;
+
+ if (elf_getshdrnum(elf->elf, &sections_nr)) {
+- perror("elf_getshdrnum");
++ WARN_ELF("elf_getshdrnum");
+ return -1;
+ }
+
+ if (elf_getshdrstrndx(elf->elf, &shstrndx)) {
+- perror("elf_getshdrstrndx");
++ WARN_ELF("elf_getshdrstrndx");
+ return -1;
+ }
+
+@@ -154,37 +147,37 @@ static int read_sections(struct elf *elf)
+
+ s = elf_getscn(elf->elf, i);
+ if (!s) {
+- perror("elf_getscn");
++ WARN_ELF("elf_getscn");
+ return -1;
+ }
+
+ sec->idx = elf_ndxscn(s);
+
+ if (!gelf_getshdr(s, &sec->sh)) {
+- perror("gelf_getshdr");
++ WARN_ELF("gelf_getshdr");
+ return -1;
+ }
+
+ sec->name = elf_strptr(elf->elf, shstrndx, sec->sh.sh_name);
+ if (!sec->name) {
+- perror("elf_strptr");
+- return -1;
+- }
+-
+- sec->elf_data = elf_getdata(s, NULL);
+- if (!sec->elf_data) {
+- perror("elf_getdata");
++ WARN_ELF("elf_strptr");
+ return -1;
+ }
+
+- if (sec->elf_data->d_off != 0 ||
+- sec->elf_data->d_size != sec->sh.sh_size) {
+- WARN("unexpected data attributes for %s", sec->name);
+- return -1;
++ if (sec->sh.sh_size != 0) {
++ sec->data = elf_getdata(s, NULL);
++ if (!sec->data) {
++ WARN_ELF("elf_getdata");
++ return -1;
++ }
++ if (sec->data->d_off != 0 ||
++ sec->data->d_size != sec->sh.sh_size) {
++ WARN("unexpected data attributes for %s",
++ sec->name);
++ return -1;
++ }
+ }
+-
+- sec->data = (unsigned long)sec->elf_data->d_buf;
+- sec->len = sec->elf_data->d_size;
++ sec->len = sec->sh.sh_size;
+ }
+
+ /* sanity check, one more call to elf_nextscn() should return NULL */
+@@ -221,15 +214,15 @@ static int read_symbols(struct elf *elf)
+
+ sym->idx = i;
+
+- if (!gelf_getsym(symtab->elf_data, i, &sym->sym)) {
+- perror("gelf_getsym");
++ if (!gelf_getsym(symtab->data, i, &sym->sym)) {
++ WARN_ELF("gelf_getsym");
+ goto err;
+ }
+
+ sym->name = elf_strptr(elf->elf, symtab->sh.sh_link,
+ sym->sym.st_name);
+ if (!sym->name) {
+- perror("elf_strptr");
++ WARN_ELF("elf_strptr");
+ goto err;
+ }
+
+@@ -311,8 +304,8 @@ static int read_relas(struct elf *elf)
+ }
+ memset(rela, 0, sizeof(*rela));
+
+- if (!gelf_getrela(sec->elf_data, i, &rela->rela)) {
+- perror("gelf_getrela");
++ if (!gelf_getrela(sec->data, i, &rela->rela)) {
++ WARN_ELF("gelf_getrela");
+ return -1;
+ }
+
+@@ -336,9 +329,10 @@ static int read_relas(struct elf *elf)
+ return 0;
+ }
+
+-struct elf *elf_open(const char *name)
++struct elf *elf_open(const char *name, int flags)
+ {
+ struct elf *elf;
++ Elf_Cmd cmd;
+
+ elf_version(EV_CURRENT);
+
+@@ -351,27 +345,28 @@ struct elf *elf_open(const char *name)
+
+ INIT_LIST_HEAD(&elf->sections);
+
+- elf->name = strdup(name);
+- if (!elf->name) {
+- perror("strdup");
+- goto err;
+- }
+-
+- elf->fd = open(name, O_RDONLY);
++ elf->fd = open(name, flags);
+ if (elf->fd == -1) {
+ fprintf(stderr, "objtool: Can't open '%s': %s\n",
+ name, strerror(errno));
+ goto err;
+ }
+
+- elf->elf = elf_begin(elf->fd, ELF_C_READ_MMAP, NULL);
++ if ((flags & O_ACCMODE) == O_RDONLY)
++ cmd = ELF_C_READ_MMAP;
++ else if ((flags & O_ACCMODE) == O_RDWR)
++ cmd = ELF_C_RDWR;
++ else /* O_WRONLY */
++ cmd = ELF_C_WRITE;
++
++ elf->elf = elf_begin(elf->fd, cmd, NULL);
+ if (!elf->elf) {
+- perror("elf_begin");
++ WARN_ELF("elf_begin");
+ goto err;
+ }
+
+ if (!gelf_getehdr(elf->elf, &elf->ehdr)) {
+- perror("gelf_getehdr");
++ WARN_ELF("gelf_getehdr");
+ goto err;
+ }
+
+@@ -391,12 +386,212 @@ struct elf *elf_open(const char *name)
+ return NULL;
+ }
+
++struct section *elf_create_section(struct elf *elf, const char *name,
++ size_t entsize, int nr)
++{
++ struct section *sec, *shstrtab;
++ size_t size = entsize * nr;
++ struct Elf_Scn *s;
++ Elf_Data *data;
++
++ sec = malloc(sizeof(*sec));
++ if (!sec) {
++ perror("malloc");
++ return NULL;
++ }
++ memset(sec, 0, sizeof(*sec));
++
++ INIT_LIST_HEAD(&sec->symbol_list);
++ INIT_LIST_HEAD(&sec->rela_list);
++ hash_init(sec->rela_hash);
++ hash_init(sec->symbol_hash);
++
++ list_add_tail(&sec->list, &elf->sections);
++
++ s = elf_newscn(elf->elf);
++ if (!s) {
++ WARN_ELF("elf_newscn");
++ return NULL;
++ }
++
++ sec->name = strdup(name);
++ if (!sec->name) {
++ perror("strdup");
++ return NULL;
++ }
++
++ sec->idx = elf_ndxscn(s);
++ sec->len = size;
++ sec->changed = true;
++
++ sec->data = elf_newdata(s);
++ if (!sec->data) {
++ WARN_ELF("elf_newdata");
++ return NULL;
++ }
++
++ sec->data->d_size = size;
++ sec->data->d_align = 1;
++
++ if (size) {
++ sec->data->d_buf = malloc(size);
++ if (!sec->data->d_buf) {
++ perror("malloc");
++ return NULL;
++ }
++ memset(sec->data->d_buf, 0, size);
++ }
++
++ if (!gelf_getshdr(s, &sec->sh)) {
++ WARN_ELF("gelf_getshdr");
++ return NULL;
++ }
++
++ sec->sh.sh_size = size;
++ sec->sh.sh_entsize = entsize;
++ sec->sh.sh_type = SHT_PROGBITS;
++ sec->sh.sh_addralign = 1;
++ sec->sh.sh_flags = SHF_ALLOC;
++
++
++ /* Add section name to .shstrtab */
++ shstrtab = find_section_by_name(elf, ".shstrtab");
++ if (!shstrtab) {
++ WARN("can't find .shstrtab section");
++ return NULL;
++ }
++
++ s = elf_getscn(elf->elf, shstrtab->idx);
++ if (!s) {
++ WARN_ELF("elf_getscn");
++ return NULL;
++ }
++
++ data = elf_newdata(s);
++ if (!data) {
++ WARN_ELF("elf_newdata");
++ return NULL;
++ }
++
++ data->d_buf = sec->name;
++ data->d_size = strlen(name) + 1;
++ data->d_align = 1;
++
++ sec->sh.sh_name = shstrtab->len;
++
++ shstrtab->len += strlen(name) + 1;
++ shstrtab->changed = true;
++
++ return sec;
++}
++
++struct section *elf_create_rela_section(struct elf *elf, struct section *base)
++{
++ char *relaname;
++ struct section *sec;
++
++ relaname = malloc(strlen(base->name) + strlen(".rela") + 1);
++ if (!relaname) {
++ perror("malloc");
++ return NULL;
++ }
++ strcpy(relaname, ".rela");
++ strcat(relaname, base->name);
++
++ sec = elf_create_section(elf, relaname, sizeof(GElf_Rela), 0);
++ free(relaname);
++ if (!sec)
++ return NULL;
++
++ base->rela = sec;
++ sec->base = base;
++
++ sec->sh.sh_type = SHT_RELA;
++ sec->sh.sh_addralign = 8;
++ sec->sh.sh_link = find_section_by_name(elf, ".symtab")->idx;
++ sec->sh.sh_info = base->idx;
++ sec->sh.sh_flags = SHF_INFO_LINK;
++
++ return sec;
++}
++
++int elf_rebuild_rela_section(struct section *sec)
++{
++ struct rela *rela;
++ int nr, idx = 0, size;
++ GElf_Rela *relas;
++
++ nr = 0;
++ list_for_each_entry(rela, &sec->rela_list, list)
++ nr++;
++
++ size = nr * sizeof(*relas);
++ relas = malloc(size);
++ if (!relas) {
++ perror("malloc");
++ return -1;
++ }
++
++ sec->data->d_buf = relas;
++ sec->data->d_size = size;
++
++ sec->sh.sh_size = size;
++
++ idx = 0;
++ list_for_each_entry(rela, &sec->rela_list, list) {
++ relas[idx].r_offset = rela->offset;
++ relas[idx].r_addend = rela->addend;
++ relas[idx].r_info = GELF_R_INFO(rela->sym->idx, rela->type);
++ idx++;
++ }
++
++ return 0;
++}
++
++int elf_write(struct elf *elf)
++{
++ struct section *sec;
++ Elf_Scn *s;
++
++ /* Update section headers for changed sections: */
++ list_for_each_entry(sec, &elf->sections, list) {
++ if (sec->changed) {
++ s = elf_getscn(elf->elf, sec->idx);
++ if (!s) {
++ WARN_ELF("elf_getscn");
++ return -1;
++ }
++ if (!gelf_update_shdr(s, &sec->sh)) {
++ WARN_ELF("gelf_update_shdr");
++ return -1;
++ }
++ }
++ }
++
++ /* Make sure the new section header entries get updated properly. */
++ elf_flagelf(elf->elf, ELF_C_SET, ELF_F_DIRTY);
++
++ /* Write all changes to the file. */
++ if (elf_update(elf->elf, ELF_C_WRITE) < 0) {
++ WARN_ELF("elf_update");
++ return -1;
++ }
++
++ return 0;
++}
++
+ void elf_close(struct elf *elf)
+ {
+ struct section *sec, *tmpsec;
+ struct symbol *sym, *tmpsym;
+ struct rela *rela, *tmprela;
+
++ if (elf->elf)
++ elf_end(elf->elf);
++
++ if (elf->fd > 0)
++ close(elf->fd);
++
+ list_for_each_entry_safe(sec, tmpsec, &elf->sections, list) {
+ list_for_each_entry_safe(sym, tmpsym, &sec->symbol_list, list) {
+ list_del(&sym->list);
+@@ -411,11 +606,6 @@ void elf_close(struct elf *elf)
+ list_del(&sec->list);
+ free(sec);
+ }
+- if (elf->name)
+- free(elf->name);
+- if (elf->fd > 0)
+- close(elf->fd);
+- if (elf->elf)
+- elf_end(elf->elf);
++
+ free(elf);
+ }
+diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h
+index aa1ff65..440b83b 100644
+--- a/tools/objtool/elf.h
++++ b/tools/objtool/elf.h
+@@ -28,6 +28,13 @@
+ # define elf_getshdrstrndx elf_getshstrndx
+ #endif
+
++/*
++ * Fallback for systems without this "read, mmaping if possible" cmd.
++ */
++#ifndef ELF_C_READ_MMAP
++#define ELF_C_READ_MMAP ELF_C_READ
++#endif
++
+ struct section {
+ struct list_head list;
+ GElf_Shdr sh;
+@@ -37,11 +44,11 @@ struct section {
+ DECLARE_HASHTABLE(rela_hash, 16);
+ struct section *base, *rela;
+ struct symbol *sym;
+- Elf_Data *elf_data;
++ Elf_Data *data;
+ char *name;
+ int idx;
+- unsigned long data;
+ unsigned int len;
++ bool changed, text;
+ };
+
+ struct symbol {
+@@ -76,15 +83,21 @@ struct elf {
+ };
+
+
+-struct elf *elf_open(const char *name);
++struct elf *elf_open(const char *name, int flags);
+ struct section *find_section_by_name(struct elf *elf, const char *name);
+ struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset);
+ struct rela *find_rela_by_dest(struct section *sec, unsigned long offset);
+ struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset,
+ unsigned int len);
+ struct symbol *find_containing_func(struct section *sec, unsigned long offset);
++struct section *elf_create_section(struct elf *elf, const char *name, size_t
++ entsize, int nr);
++struct section *elf_create_rela_section(struct elf *elf, struct section *base);
++int elf_rebuild_rela_section(struct section *sec);
++int elf_write(struct elf *elf);
+ void elf_close(struct elf *elf);
+
+-
++#define for_each_sec(file, sec) \
++ list_for_each_entry(sec, &file->elf->sections, list)
+
+ #endif /* _OBJTOOL_ELF_H */
+diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c
+index 46c326d..07f3299 100644
+--- a/tools/objtool/objtool.c
++++ b/tools/objtool/objtool.c
+@@ -31,11 +31,10 @@
+ #include <stdlib.h>
+ #include <subcmd/exec-cmd.h>
+ #include <subcmd/pager.h>
++#include <linux/kernel.h>
+
+ #include "builtin.h"
+
+-#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
+-
+ struct cmd_struct {
+ const char *name;
+ int (*fn)(int, const char **);
+@@ -43,10 +42,11 @@ struct cmd_struct {
+ };
+
+ static const char objtool_usage_string[] =
+- "objtool [OPTIONS] COMMAND [ARGS]";
++ "objtool COMMAND [ARGS]";
+
+ static struct cmd_struct objtool_cmds[] = {
+ {"check", cmd_check, "Perform stack metadata validation on an object file" },
++ {"orc", cmd_orc, "Generate in-place ORC unwind tables for an object file" },
+ };
+
+ bool help;
+@@ -70,7 +70,7 @@ static void cmd_usage(void)
+
+ printf("\n");
+
+- exit(1);
++ exit(129);
+ }
+
+ static void handle_options(int *argc, const char ***argv)
+@@ -86,9 +86,7 @@ static void handle_options(int *argc, const char ***argv)
+ break;
+ } else {
+ fprintf(stderr, "Unknown option: %s\n", cmd);
+- fprintf(stderr, "\n Usage: %s\n",
+- objtool_usage_string);
+- exit(1);
++ cmd_usage();
+ }
+
+ (*argv)++;
+diff --git a/tools/objtool/orc.h b/tools/objtool/orc.h
+new file mode 100644
+index 0000000..b0e92a6
+--- /dev/null
++++ b/tools/objtool/orc.h
+@@ -0,0 +1,30 @@
++/*
++ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2
++ * of the License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _ORC_H
++#define _ORC_H
++
++#include <asm/orc_types.h>
++
++struct objtool_file;
++
++int create_orc(struct objtool_file *file);
++int create_orc_sections(struct objtool_file *file);
++
++int orc_dump(const char *objname);
++
++#endif /* _ORC_H */
+diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c
+new file mode 100644
+index 0000000..c334382
+--- /dev/null
++++ b/tools/objtool/orc_dump.c
+@@ -0,0 +1,213 @@
++/*
++ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2
++ * of the License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <unistd.h>
++#include "orc.h"
++#include "warn.h"
++
++static const char *reg_name(unsigned int reg)
++{
++ switch (reg) {
++ case ORC_REG_PREV_SP:
++ return "prevsp";
++ case ORC_REG_DX:
++ return "dx";
++ case ORC_REG_DI:
++ return "di";
++ case ORC_REG_BP:
++ return "bp";
++ case ORC_REG_SP:
++ return "sp";
++ case ORC_REG_R10:
++ return "r10";
++ case ORC_REG_R13:
++ return "r13";
++ case ORC_REG_BP_INDIRECT:
++ return "bp(ind)";
++ case ORC_REG_SP_INDIRECT:
++ return "sp(ind)";
++ default:
++ return "?";
++ }
++}
++
++static const char *orc_type_name(unsigned int type)
++{
++ switch (type) {
++ case ORC_TYPE_CALL:
++ return "call";
++ case ORC_TYPE_REGS:
++ return "regs";
++ case ORC_TYPE_REGS_IRET:
++ return "iret";
++ default:
++ return "?";
++ }
++}
++
++static void print_reg(unsigned int reg, int offset)
++{
++ if (reg == ORC_REG_BP_INDIRECT)
++ printf("(bp%+d)", offset);
++ else if (reg == ORC_REG_SP_INDIRECT)
++ printf("(sp%+d)", offset);
++ else if (reg == ORC_REG_UNDEFINED)
++ printf("(und)");
++ else
++ printf("%s%+d", reg_name(reg), offset);
++}
++
++int orc_dump(const char *_objname)
++{
++ int fd, nr_entries, i, *orc_ip = NULL, orc_size = 0;
++ struct orc_entry *orc = NULL;
++ char *name;
++ size_t nr_sections;
++ Elf64_Addr orc_ip_addr = 0;
++ size_t shstrtab_idx;
++ Elf *elf;
++ Elf_Scn *scn;
++ GElf_Shdr sh;
++ GElf_Rela rela;
++ GElf_Sym sym;
++ Elf_Data *data, *symtab = NULL, *rela_orc_ip = NULL;
++
++
++ objname = _objname;
++
++ elf_version(EV_CURRENT);
++
++ fd = open(objname, O_RDONLY);
++ if (fd == -1) {
++ perror("open");
++ return -1;
++ }
++
++ elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
++ if (!elf) {
++ WARN_ELF("elf_begin");
++ return -1;
++ }
++
++ if (elf_getshdrnum(elf, &nr_sections)) {
++ WARN_ELF("elf_getshdrnum");
++ return -1;
++ }
++
++ if (elf_getshdrstrndx(elf, &shstrtab_idx)) {
++ WARN_ELF("elf_getshdrstrndx");
++ return -1;
++ }
++
++ for (i = 0; i < nr_sections; i++) {
++ scn = elf_getscn(elf, i);
++ if (!scn) {
++ WARN_ELF("elf_getscn");
++ return -1;
++ }
++
++ if (!gelf_getshdr(scn, &sh)) {
++ WARN_ELF("gelf_getshdr");
++ return -1;
++ }
++
++ name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
++ if (!name) {
++ WARN_ELF("elf_strptr");
++ return -1;
++ }
++
++ data = elf_getdata(scn, NULL);
++ if (!data) {
++ WARN_ELF("elf_getdata");
++ return -1;
++ }
++
++ if (!strcmp(name, ".symtab")) {
++ symtab = data;
++ } else if (!strcmp(name, ".orc_unwind")) {
++ orc = data->d_buf;
++ orc_size = sh.sh_size;
++ } else if (!strcmp(name, ".orc_unwind_ip")) {
++ orc_ip = data->d_buf;
++ orc_ip_addr = sh.sh_addr;
++ } else if (!strcmp(name, ".rela.orc_unwind_ip")) {
++ rela_orc_ip = data;
++ }
++ }
++
++ if (!symtab || !orc || !orc_ip)
++ return 0;
++
++ if (orc_size % sizeof(*orc) != 0) {
++ WARN("bad .orc_unwind section size");
++ return -1;
++ }
++
++ nr_entries = orc_size / sizeof(*orc);
++ for (i = 0; i < nr_entries; i++) {
++ if (rela_orc_ip) {
++ if (!gelf_getrela(rela_orc_ip, i, &rela)) {
++ WARN_ELF("gelf_getrela");
++ return -1;
++ }
++
++ if (!gelf_getsym(symtab, GELF_R_SYM(rela.r_info), &sym)) {
++ WARN_ELF("gelf_getsym");
++ return -1;
++ }
++
++ scn = elf_getscn(elf, sym.st_shndx);
++ if (!scn) {
++ WARN_ELF("elf_getscn");
++ return -1;
++ }
++
++ if (!gelf_getshdr(scn, &sh)) {
++ WARN_ELF("gelf_getshdr");
++ return -1;
++ }
++
++ name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
++ if (!name || !*name) {
++ WARN_ELF("elf_strptr");
++ return -1;
++ }
++
++ printf("%s+%llx:", name, (unsigned long long)rela.r_addend);
++
++ } else {
++ printf("%llx:", (unsigned long long)(orc_ip_addr + (i * sizeof(int)) + orc_ip[i]));
++ }
++
++
++ printf(" sp:");
++
++ print_reg(orc[i].sp_reg, orc[i].sp_offset);
++
++ printf(" bp:");
++
++ print_reg(orc[i].bp_reg, orc[i].bp_offset);
++
++ printf(" type:%s\n", orc_type_name(orc[i].type));
++ }
++
++ elf_end(elf);
++ close(fd);
++
++ return 0;
++}
+diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c
+new file mode 100644
+index 0000000..18384d9
+--- /dev/null
++++ b/tools/objtool/orc_gen.c
+@@ -0,0 +1,221 @@
++/*
++ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2
++ * of the License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <stdlib.h>
++#include <string.h>
++
++#include "orc.h"
++#include "check.h"
++#include "warn.h"
++
++int create_orc(struct objtool_file *file)
++{
++ struct instruction *insn;
++
++ for_each_insn(file, insn) {
++ struct orc_entry *orc = &insn->orc;
++ struct cfi_reg *cfa = &insn->state.cfa;
++ struct cfi_reg *bp = &insn->state.regs[CFI_BP];
++
++ if (cfa->base == CFI_UNDEFINED) {
++ orc->sp_reg = ORC_REG_UNDEFINED;
++ continue;
++ }
++
++ switch (cfa->base) {
++ case CFI_SP:
++ orc->sp_reg = ORC_REG_SP;
++ break;
++ case CFI_SP_INDIRECT:
++ orc->sp_reg = ORC_REG_SP_INDIRECT;
++ break;
++ case CFI_BP:
++ orc->sp_reg = ORC_REG_BP;
++ break;
++ case CFI_BP_INDIRECT:
++ orc->sp_reg = ORC_REG_BP_INDIRECT;
++ break;
++ case CFI_R10:
++ orc->sp_reg = ORC_REG_R10;
++ break;
++ case CFI_R13:
++ orc->sp_reg = ORC_REG_R13;
++ break;
++ case CFI_DI:
++ orc->sp_reg = ORC_REG_DI;
++ break;
++ case CFI_DX:
++ orc->sp_reg = ORC_REG_DX;
++ break;
++ default:
++ WARN_FUNC("unknown CFA base reg %d",
++ insn->sec, insn->offset, cfa->base);
++ return -1;
++ }
++
++ switch(bp->base) {
++ case CFI_UNDEFINED:
++ orc->bp_reg = ORC_REG_UNDEFINED;
++ break;
++ case CFI_CFA:
++ orc->bp_reg = ORC_REG_PREV_SP;
++ break;
++ case CFI_BP:
++ orc->bp_reg = ORC_REG_BP;
++ break;
++ default:
++ WARN_FUNC("unknown BP base reg %d",
++ insn->sec, insn->offset, bp->base);
++ return -1;
++ }
++
++ orc->sp_offset = cfa->offset;
++ orc->bp_offset = bp->offset;
++ orc->type = insn->state.type;
++ }
++
++ return 0;
++}
++
++static int create_orc_entry(struct section *u_sec, struct section *ip_relasec,
++ unsigned int idx, struct section *insn_sec,
++ unsigned long insn_off, struct orc_entry *o)
++{
++ struct orc_entry *orc;
++ struct rela *rela;
++
++ if (!insn_sec->sym) {
++ WARN("missing symbol for section %s", insn_sec->name);
++ return -1;
++ }
++
++ /* populate ORC data */
++ orc = (struct orc_entry *)u_sec->data->d_buf + idx;
++ memcpy(orc, o, sizeof(*orc));
++
++ /* populate rela for ip */
++ rela = malloc(sizeof(*rela));
++ if (!rela) {
++ perror("malloc");
++ return -1;
++ }
++ memset(rela, 0, sizeof(*rela));
++
++ rela->sym = insn_sec->sym;
++ rela->addend = insn_off;
++ rela->type = R_X86_64_PC32;
++ rela->offset = idx * sizeof(int);
++
++ list_add_tail(&rela->list, &ip_relasec->rela_list);
++ hash_add(ip_relasec->rela_hash, &rela->hash, rela->offset);
++
++ return 0;
++}
++
++int create_orc_sections(struct objtool_file *file)
++{
++ struct instruction *insn, *prev_insn;
++ struct section *sec, *u_sec, *ip_relasec;
++ unsigned int idx;
++
++ struct orc_entry empty = {
++ .sp_reg = ORC_REG_UNDEFINED,
++ .bp_reg = ORC_REG_UNDEFINED,
++ .type = ORC_TYPE_CALL,
++ };
++
++ sec = find_section_by_name(file->elf, ".orc_unwind");
++ if (sec) {
++ WARN("file already has .orc_unwind section, skipping");
++ return -1;
++ }
++
++ /* count the number of needed orcs */
++ idx = 0;
++ for_each_sec(file, sec) {
++ if (!sec->text)
++ continue;
++
++ prev_insn = NULL;
++ sec_for_each_insn(file, sec, insn) {
++ if (!prev_insn ||
++ memcmp(&insn->orc, &prev_insn->orc,
++ sizeof(struct orc_entry))) {
++ idx++;
++ }
++ prev_insn = insn;
++ }
++
++ /* section terminator */
++ if (prev_insn)
++ idx++;
++ }
++ if (!idx)
++ return -1;
++
++
++ /* create .orc_unwind_ip and .rela.orc_unwind_ip sections */
++ sec = elf_create_section(file->elf, ".orc_unwind_ip", sizeof(int), idx);
++ if (!sec)
++ return -1;
++
++ ip_relasec = elf_create_rela_section(file->elf, sec);
++ if (!ip_relasec)
++ return -1;
++
++ /* create .orc_unwind section */
++ u_sec = elf_create_section(file->elf, ".orc_unwind",
++ sizeof(struct orc_entry), idx);
++
++ /* populate sections */
++ idx = 0;
++ for_each_sec(file, sec) {
++ if (!sec->text)
++ continue;
++
++ prev_insn = NULL;
++ sec_for_each_insn(file, sec, insn) {
++ if (!prev_insn || memcmp(&insn->orc, &prev_insn->orc,
++ sizeof(struct orc_entry))) {
++
++ if (create_orc_entry(u_sec, ip_relasec, idx,
++ insn->sec, insn->offset,
++ &insn->orc))
++ return -1;
++
++ idx++;
++ }
++ prev_insn = insn;
++ }
++
++ /* section terminator */
++ if (prev_insn) {
++ if (create_orc_entry(u_sec, ip_relasec, idx,
++ prev_insn->sec,
++ prev_insn->offset + prev_insn->len,
++ &empty))
++ return -1;
++
++ idx++;
++ }
++ }
++
++ if (elf_rebuild_rela_section(ip_relasec))
++ return -1;
++
++ return 0;
++}
+diff --git a/tools/objtool/special.c b/tools/objtool/special.c
+index bff8abb..84f001d 100644
+--- a/tools/objtool/special.c
++++ b/tools/objtool/special.c
+@@ -91,16 +91,16 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
+ alt->jump_or_nop = entry->jump_or_nop;
+
+ if (alt->group) {
+- alt->orig_len = *(unsigned char *)(sec->data + offset +
++ alt->orig_len = *(unsigned char *)(sec->data->d_buf + offset +
+ entry->orig_len);
+- alt->new_len = *(unsigned char *)(sec->data + offset +
++ alt->new_len = *(unsigned char *)(sec->data->d_buf + offset +
+ entry->new_len);
+ }
+
+ if (entry->feature) {
+ unsigned short feature;
+
+- feature = *(unsigned short *)(sec->data + offset +
++ feature = *(unsigned short *)(sec->data->d_buf + offset +
+ entry->feature);
+
+ /*
+diff --git a/tools/objtool/sync-check.sh b/tools/objtool/sync-check.sh
+new file mode 100755
+index 0000000..1470e74
+--- /dev/null
++++ b/tools/objtool/sync-check.sh
+@@ -0,0 +1,29 @@
++#!/bin/sh
++# SPDX-License-Identifier: GPL-2.0
++
++FILES='
++arch/x86/lib/insn.c
++arch/x86/lib/inat.c
++arch/x86/lib/x86-opcode-map.txt
++arch/x86/tools/gen-insn-attr-x86.awk
++arch/x86/include/asm/insn.h
++arch/x86/include/asm/inat.h
++arch/x86/include/asm/inat_types.h
++arch/x86/include/asm/orc_types.h
++'
++
++check()
++{
++ local file=$1
++
++ diff $file ../../$file > /dev/null ||
++ echo "Warning: synced file at 'tools/objtool/$file' differs from latest kernel version at '$file'"
++}
++
++if [ ! -d ../../kernel ] || [ ! -d ../../tools ] || [ ! -d ../objtool ]; then
++ exit 0
++fi
++
++for i in $FILES; do
++ check $i
++done
+diff --git a/tools/objtool/warn.h b/tools/objtool/warn.h
+index ac7e075..afd9f7a 100644
+--- a/tools/objtool/warn.h
++++ b/tools/objtool/warn.h
+@@ -18,6 +18,13 @@
+ #ifndef _WARN_H
+ #define _WARN_H
+
++#include <stdlib.h>
++#include <string.h>
++#include <sys/types.h>
++#include <sys/stat.h>
++#include <fcntl.h>
++#include "elf.h"
++
+ extern const char *objname;
+
+ static inline char *offstr(struct section *sec, unsigned long offset)
+@@ -57,4 +64,7 @@ static inline char *offstr(struct section *sec, unsigned long offset)
+ free(_str); \
+ })
+
++#define WARN_ELF(format, ...) \
++ WARN(format ": %s", ##__VA_ARGS__, elf_errmsg(-1))
++
+ #endif /* _WARN_H */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0089-objtool-x86-Add-several-functions-and-files-to-the-o.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0089-objtool-x86-Add-several-functions-and-files-to-the-o.patch
new file mode 100644
index 00000000..6bdeb9f8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0089-objtool-x86-Add-several-functions-and-files-to-the-o.patch
@@ -0,0 +1,316 @@
+From 1a39c1b7d192d62e6d4203ea7acfc35eb3317c48 Mon Sep 17 00:00:00 2001
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Wed, 28 Jun 2017 10:11:06 -0500
+Subject: [PATCH 89/93] objtool, x86: Add several functions and files to the
+ objtool whitelist
+
+commit c207aee48037abca71c669cbec407b9891965c34 upstream.
+
+In preparation for an objtool rewrite which will have broader checks,
+whitelist functions and files which cause problems because they do
+unusual things with the stack.
+
+These whitelists serve as a TODO list for which functions and files
+don't yet have undwarf unwinder coverage. Eventually most of the
+whitelists can be removed in favor of manual CFI hint annotations or
+objtool improvements.
+
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Jiri Slaby <jslaby@suse.cz>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: live-patching@vger.kernel.org
+Link: http://lkml.kernel.org/r/7f934a5d707a574bda33ea282e9478e627fb1829.1498659915.git.jpoimboe@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/crypto/Makefile | 2 ++
+ arch/x86/crypto/sha1-mb/Makefile | 2 ++
+ arch/x86/crypto/sha256-mb/Makefile | 2 ++
+ arch/x86/kernel/Makefile | 1 +
+ arch/x86/kernel/acpi/Makefile | 2 ++
+ arch/x86/kernel/kprobes/opt.c | 9 ++++++++-
+ arch/x86/kernel/reboot.c | 2 ++
+ arch/x86/kvm/svm.c | 2 ++
+ arch/x86/kvm/vmx.c | 3 +++
+ arch/x86/lib/msr-reg.S | 8 ++++----
+ arch/x86/net/Makefile | 2 ++
+ arch/x86/platform/efi/Makefile | 1 +
+ arch/x86/power/Makefile | 2 ++
+ arch/x86/xen/Makefile | 3 +++
+ kernel/kexec_core.c | 4 +++-
+ 15 files changed, 39 insertions(+), 6 deletions(-)
+
+diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
+index 34b3fa2..9e32d40 100644
+--- a/arch/x86/crypto/Makefile
++++ b/arch/x86/crypto/Makefile
+@@ -2,6 +2,8 @@
+ # Arch-specific CryptoAPI modules.
+ #
+
++OBJECT_FILES_NON_STANDARD := y
++
+ avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no)
+ avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
+ $(comma)4)$(comma)%ymm2,yes,no)
+diff --git a/arch/x86/crypto/sha1-mb/Makefile b/arch/x86/crypto/sha1-mb/Makefile
+index 2f87563..2e14acc 100644
+--- a/arch/x86/crypto/sha1-mb/Makefile
++++ b/arch/x86/crypto/sha1-mb/Makefile
+@@ -2,6 +2,8 @@
+ # Arch-specific CryptoAPI modules.
+ #
+
++OBJECT_FILES_NON_STANDARD := y
++
+ avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
+ $(comma)4)$(comma)%ymm2,yes,no)
+ ifeq ($(avx2_supported),yes)
+diff --git a/arch/x86/crypto/sha256-mb/Makefile b/arch/x86/crypto/sha256-mb/Makefile
+index 41089e7..45b4fca 100644
+--- a/arch/x86/crypto/sha256-mb/Makefile
++++ b/arch/x86/crypto/sha256-mb/Makefile
+@@ -2,6 +2,8 @@
+ # Arch-specific CryptoAPI modules.
+ #
+
++OBJECT_FILES_NON_STANDARD := y
++
+ avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
+ $(comma)4)$(comma)%ymm2,yes,no)
+ ifeq ($(avx2_supported),yes)
+diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
+index 79076d7..4c9c615 100644
+--- a/arch/x86/kernel/Makefile
++++ b/arch/x86/kernel/Makefile
+@@ -29,6 +29,7 @@ OBJECT_FILES_NON_STANDARD_head_$(BITS).o := y
+ OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y
+ OBJECT_FILES_NON_STANDARD_mcount_$(BITS).o := y
+ OBJECT_FILES_NON_STANDARD_test_nx.o := y
++OBJECT_FILES_NON_STANDARD_paravirt_patch_$(BITS).o := y
+
+ # If instrumentation of this dir is enabled, boot hangs during first second.
+ # Probably could be more selective here, but note that files related to irqs,
+diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile
+index 26b78d8..85a9e17 100644
+--- a/arch/x86/kernel/acpi/Makefile
++++ b/arch/x86/kernel/acpi/Makefile
+@@ -1,3 +1,5 @@
++OBJECT_FILES_NON_STANDARD_wakeup_$(BITS).o := y
++
+ obj-$(CONFIG_ACPI) += boot.o
+ obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_$(BITS).o
+ obj-$(CONFIG_ACPI_APEI) += apei.o
+diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
+index 90f8cd6..6a7b428 100644
+--- a/arch/x86/kernel/kprobes/opt.c
++++ b/arch/x86/kernel/kprobes/opt.c
+@@ -28,6 +28,7 @@
+ #include <linux/kdebug.h>
+ #include <linux/kallsyms.h>
+ #include <linux/ftrace.h>
++#include <linux/frame.h>
+
+ #include <asm/text-patching.h>
+ #include <asm/cacheflush.h>
+@@ -91,6 +92,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
+ }
+
+ asm (
++ "optprobe_template_func:\n"
+ ".global optprobe_template_entry\n"
+ "optprobe_template_entry:\n"
+ #ifdef CONFIG_X86_64
+@@ -128,7 +130,12 @@ asm (
+ " popf\n"
+ #endif
+ ".global optprobe_template_end\n"
+- "optprobe_template_end:\n");
++ "optprobe_template_end:\n"
++ ".type optprobe_template_func, @function\n"
++ ".size optprobe_template_func, .-optprobe_template_func\n");
++
++void optprobe_template_func(void);
++STACK_FRAME_NON_STANDARD(optprobe_template_func);
+
+ #define TMPL_MOVE_IDX \
+ ((long)&optprobe_template_val - (long)&optprobe_template_entry)
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index e244c19..acca20d 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -9,6 +9,7 @@
+ #include <linux/sched.h>
+ #include <linux/tboot.h>
+ #include <linux/delay.h>
++#include <linux/frame.h>
+ #include <acpi/reboot.h>
+ #include <asm/io.h>
+ #include <asm/apic.h>
+@@ -123,6 +124,7 @@ void __noreturn machine_real_restart(unsigned int type)
+ #ifdef CONFIG_APM_MODULE
+ EXPORT_SYMBOL(machine_real_restart);
+ #endif
++STACK_FRAME_NON_STANDARD(machine_real_restart);
+
+ /*
+ * Some Apple MacBook and MacBookPro's needs reboot=p to be able to reboot
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index c60d8fc..2672102 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -36,6 +36,7 @@
+ #include <linux/slab.h>
+ #include <linux/amd-iommu.h>
+ #include <linux/hashtable.h>
++#include <linux/frame.h>
+
+ #include <asm/apic.h>
+ #include <asm/perf_event.h>
+@@ -5099,6 +5100,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
+
+ mark_all_clean(svm->vmcb);
+ }
++STACK_FRAME_NON_STANDARD(svm_vcpu_run);
+
+ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
+ {
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 9307c0d..d39062c 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -33,6 +33,7 @@
+ #include <linux/slab.h>
+ #include <linux/tboot.h>
+ #include <linux/hrtimer.h>
++#include <linux/frame.h>
+ #include <linux/nospec.h>
+ #include "kvm_cache_regs.h"
+ #include "x86.h"
+@@ -8680,6 +8681,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
+ );
+ }
+ }
++STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
+
+ static bool vmx_has_emulated_msr(int index)
+ {
+@@ -9120,6 +9122,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ vmx_recover_nmi_blocking(vmx);
+ vmx_complete_interrupts(vmx);
+ }
++STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
+
+ static void vmx_load_vmcs01(struct kvm_vcpu *vcpu)
+ {
+diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
+index c815564..10ffa7e 100644
+--- a/arch/x86/lib/msr-reg.S
++++ b/arch/x86/lib/msr-reg.S
+@@ -13,14 +13,14 @@
+ .macro op_safe_regs op
+ ENTRY(\op\()_safe_regs)
+ pushq %rbx
+- pushq %rbp
++ pushq %r12
+ movq %rdi, %r10 /* Save pointer */
+ xorl %r11d, %r11d /* Return value */
+ movl (%rdi), %eax
+ movl 4(%rdi), %ecx
+ movl 8(%rdi), %edx
+ movl 12(%rdi), %ebx
+- movl 20(%rdi), %ebp
++ movl 20(%rdi), %r12d
+ movl 24(%rdi), %esi
+ movl 28(%rdi), %edi
+ 1: \op
+@@ -29,10 +29,10 @@ ENTRY(\op\()_safe_regs)
+ movl %ecx, 4(%r10)
+ movl %edx, 8(%r10)
+ movl %ebx, 12(%r10)
+- movl %ebp, 20(%r10)
++ movl %r12d, 20(%r10)
+ movl %esi, 24(%r10)
+ movl %edi, 28(%r10)
+- popq %rbp
++ popq %r12
+ popq %rbx
+ ret
+ 3:
+diff --git a/arch/x86/net/Makefile b/arch/x86/net/Makefile
+index 90568c3..fefb4b6 100644
+--- a/arch/x86/net/Makefile
++++ b/arch/x86/net/Makefile
+@@ -1,4 +1,6 @@
+ #
+ # Arch-specific network modules
+ #
++OBJECT_FILES_NON_STANDARD_bpf_jit.o += y
++
+ obj-$(CONFIG_BPF_JIT) += bpf_jit.o bpf_jit_comp.o
+diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile
+index 066619b..7a25502 100644
+--- a/arch/x86/platform/efi/Makefile
++++ b/arch/x86/platform/efi/Makefile
+@@ -1,4 +1,5 @@
+ OBJECT_FILES_NON_STANDARD_efi_thunk_$(BITS).o := y
++OBJECT_FILES_NON_STANDARD_efi_stub_$(BITS).o := y
+
+ obj-$(CONFIG_EFI) += quirks.o efi.o efi_$(BITS).o efi_stub_$(BITS).o
+ obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o
+diff --git a/arch/x86/power/Makefile b/arch/x86/power/Makefile
+index a6a198c..0504187 100644
+--- a/arch/x86/power/Makefile
++++ b/arch/x86/power/Makefile
+@@ -1,3 +1,5 @@
++OBJECT_FILES_NON_STANDARD_hibernate_asm_$(BITS).o := y
++
+ # __restore_processor_state() restores %gs after S3 resume and so should not
+ # itself be stack-protected
+ nostackp := $(call cc-option, -fno-stack-protector)
+diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
+index e47e527..4a54059 100644
+--- a/arch/x86/xen/Makefile
++++ b/arch/x86/xen/Makefile
+@@ -1,3 +1,6 @@
++OBJECT_FILES_NON_STANDARD_xen-asm_$(BITS).o := y
++OBJECT_FILES_NON_STANDARD_xen-pvh.o := y
++
+ ifdef CONFIG_FUNCTION_TRACER
+ # Do not profile debug and lowlevel utilities
+ CFLAGS_REMOVE_spinlock.o = -pg
+diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
+index 5616755..f5ab72e 100644
+--- a/kernel/kexec_core.c
++++ b/kernel/kexec_core.c
+@@ -38,6 +38,7 @@
+ #include <linux/syscore_ops.h>
+ #include <linux/compiler.h>
+ #include <linux/hugetlb.h>
++#include <linux/frame.h>
+
+ #include <asm/page.h>
+ #include <asm/sections.h>
+@@ -878,7 +879,7 @@ int kexec_load_disabled;
+ * only when panic_cpu holds the current CPU number; this is the only CPU
+ * which processes crash_kexec routines.
+ */
+-void __crash_kexec(struct pt_regs *regs)
++void __noclone __crash_kexec(struct pt_regs *regs)
+ {
+ /* Take the kexec_mutex here to prevent sys_kexec_load
+ * running on one cpu from replacing the crash kernel
+@@ -900,6 +901,7 @@ void __crash_kexec(struct pt_regs *regs)
+ mutex_unlock(&kexec_mutex);
+ }
+ }
++STACK_FRAME_NON_STANDARD(__crash_kexec);
+
+ void crash_kexec(struct pt_regs *regs)
+ {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0090-x86-xen-Add-unwind-hint-annotations-to-xen_setup_gdt.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0090-x86-xen-Add-unwind-hint-annotations-to-xen_setup_gdt.patch
new file mode 100644
index 00000000..3fdd5b1b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0090-x86-xen-Add-unwind-hint-annotations-to-xen_setup_gdt.patch
@@ -0,0 +1,47 @@
+From 666ab0b0ab5ec0645aca319b17218c976d98b950 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Sun, 3 Jun 2018 13:37:03 +0200
+Subject: [PATCH 90/93] x86/xen: Add unwind hint annotations to xen_setup_gdt
+
+Not needed in mainline as this function got rewritten in 4.12
+
+This enables objtool to grok the iret in the middle of a C function.
+
+This matches commit 76846bf3cb09 ("x86/asm: Add unwind hint annotations
+to sync_core()")
+
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/xen/enlighten.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 8b97c87..af79764 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -75,6 +75,7 @@
+ #include <asm/mwait.h>
+ #include <asm/pci_x86.h>
+ #include <asm/cpu.h>
++#include <asm/unwind_hints.h>
+
+ #ifdef CONFIG_ACPI
+ #include <linux/acpi.h>
+@@ -1450,10 +1451,12 @@ static void __ref xen_setup_gdt(int cpu)
+ * GDT. The new GDT has __KERNEL_CS with CS.L = 1
+ * and we are jumping to reload it.
+ */
+- asm volatile ("pushq %0\n"
++ asm volatile (UNWIND_HINT_SAVE
++ "pushq %0\n"
+ "leaq 1f(%%rip),%0\n"
+ "pushq %0\n"
+ "lretq\n"
++ UNWIND_HINT_RESTORE
+ "1:\n"
+ : "=&r" (dummy) : "0" (__KERNEL_CS));
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0091-x86-amd-revert-commit-944e0fc51a89c9827b9.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0091-x86-amd-revert-commit-944e0fc51a89c9827b9.patch
new file mode 100644
index 00000000..3b05904d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0091-x86-amd-revert-commit-944e0fc51a89c9827b9.patch
@@ -0,0 +1,51 @@
+From 0ccf96d6eee2affd66ebde69247397542a03185b Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Wed, 30 May 2018 13:09:56 +0200
+Subject: [PATCH 91/93] x86/amd: revert commit 944e0fc51a89c9827b9
+
+Revert commit 944e0fc51a89c9827b98813d65dc083274777c7f ("x86/amd: don't
+set X86_BUG_SYSRET_SS_ATTRS when running under Xen") as it is lacking
+a prerequisite patch and is making things worse.
+
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/amd.c | 5 ++---
+ arch/x86/xen/enlighten.c | 4 +++-
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 4c2be99..cd0abf8 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -857,9 +857,8 @@ static void init_amd(struct cpuinfo_x86 *c)
+ if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
+ set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
+
+- /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
+- if (!cpu_has(c, X86_FEATURE_XENPV))
+- set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
++ /* AMD CPUs don't reset SS attributes on SYSRET */
++ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+ }
+
+ #ifdef CONFIG_X86_32
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index af79764..c926db0 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -1971,8 +1971,10 @@ EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
+
+ static void xen_set_cpu_features(struct cpuinfo_x86 *c)
+ {
+- if (xen_pv_domain())
++ if (xen_pv_domain()) {
++ clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+ set_cpu_cap(c, X86_FEATURE_XENPV);
++ }
+ }
+
+ static void xen_pin_vcpu(int cpu)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0092-xen-set-cpu-capabilities-from-xen_start_kernel.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0092-xen-set-cpu-capabilities-from-xen_start_kernel.patch
new file mode 100644
index 00000000..7860a717
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0092-xen-set-cpu-capabilities-from-xen_start_kernel.patch
@@ -0,0 +1,72 @@
+From 4f306d0db3c6e46b730cba2b9b59a93b46a0a315 Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Wed, 30 May 2018 13:09:57 +0200
+Subject: [PATCH 92/93] xen: set cpu capabilities from xen_start_kernel()
+
+Upstream commit: 0808e80cb760de2733c0527d2090ed2205a1eef8 ("xen: set
+cpu capabilities from xen_start_kernel()")
+
+There is no need to set the same capabilities for each cpu
+individually. This can easily be done for all cpus when starting the
+kernel.
+
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/xen/enlighten.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index c926db0..01120da 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -470,6 +470,14 @@ static void __init xen_init_cpuid_mask(void)
+ cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32));
+ }
+
++static void __init xen_init_capabilities(void)
++{
++ if (xen_pv_domain()) {
++ setup_clear_cpu_cap(X86_BUG_SYSRET_SS_ATTRS);
++ setup_force_cpu_cap(X86_FEATURE_XENPV);
++ }
++}
++
+ static void xen_set_debugreg(int reg, unsigned long val)
+ {
+ HYPERVISOR_set_debugreg(reg, val);
+@@ -1629,6 +1637,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
+
+ xen_init_irq_ops();
+ xen_init_cpuid_mask();
++ xen_init_capabilities();
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+ /*
+@@ -1969,14 +1978,6 @@ bool xen_hvm_need_lapic(void)
+ }
+ EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
+
+-static void xen_set_cpu_features(struct cpuinfo_x86 *c)
+-{
+- if (xen_pv_domain()) {
+- clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+- set_cpu_cap(c, X86_FEATURE_XENPV);
+- }
+-}
+-
+ static void xen_pin_vcpu(int cpu)
+ {
+ static bool disable_pinning;
+@@ -2023,7 +2024,6 @@ const struct hypervisor_x86 x86_hyper_xen = {
+ .init_platform = xen_hvm_guest_init,
+ #endif
+ .x2apic_available = xen_x2apic_para_available,
+- .set_cpu_features = xen_set_cpu_features,
+ .pin_vcpu = xen_pin_vcpu,
+ };
+ EXPORT_SYMBOL(x86_hyper_xen);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/0093-x86-amd-don-t-set-X86_BUG_SYSRET_SS_ATTRS-when-runni.patch b/common/recipes-kernel/linux/linux-yocto-4.9.21/0093-x86-amd-don-t-set-X86_BUG_SYSRET_SS_ATTRS-when-runni.patch
new file mode 100644
index 00000000..e2416627
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/0093-x86-amd-don-t-set-X86_BUG_SYSRET_SS_ATTRS-when-runni.patch
@@ -0,0 +1,65 @@
+From 7a5ded1f7c88ae162c525486f4be8030fa54fa26 Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Wed, 30 May 2018 13:09:58 +0200
+Subject: [PATCH 93/93] x86/amd: don't set X86_BUG_SYSRET_SS_ATTRS when running
+ under Xen
+
+Upstream commit: def9331a12977770cc6132d79f8e6565871e8e38 ("x86/amd:
+don't set X86_BUG_SYSRET_SS_ATTRS when running under Xen")
+
+When running as Xen pv guest X86_BUG_SYSRET_SS_ATTRS must not be set
+on AMD cpus.
+
+This bug/feature bit is kind of special as it will be used very early
+when switching threads. Setting the bit and clearing it a little bit
+later leaves a critical window where things can go wrong. This time
+window has enlarged a little bit by using setup_clear_cpu_cap() instead
+of the hypervisor's set_cpu_features callback. It seems this larger
+window now makes it rather easy to hit the problem.
+
+The proper solution is to never set the bit in case of Xen.
+
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/amd.c | 5 +++--
+ arch/x86/xen/enlighten.c | 4 +---
+ 2 files changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index cd0abf8..4c2be99 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -857,8 +857,9 @@ static void init_amd(struct cpuinfo_x86 *c)
+ if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
+ set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
+
+- /* AMD CPUs don't reset SS attributes on SYSRET */
+- set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
++ /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
++ if (!cpu_has(c, X86_FEATURE_XENPV))
++ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+ }
+
+ #ifdef CONFIG_X86_32
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 01120da..fc63c84 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -472,10 +472,8 @@ static void __init xen_init_cpuid_mask(void)
+
+ static void __init xen_init_capabilities(void)
+ {
+- if (xen_pv_domain()) {
+- setup_clear_cpu_cap(X86_BUG_SYSRET_SS_ATTRS);
++ if (xen_pv_domain())
+ setup_force_cpu_cap(X86_FEATURE_XENPV);
+- }
+ }
+
+ static void xen_set_debugreg(int reg, unsigned long val)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.9.21/upstream-backports.scc b/common/recipes-kernel/linux/linux-yocto-4.9.21/upstream-backports.scc
index 70237be9..5567c30e 100644
--- a/common/recipes-kernel/linux/linux-yocto-4.9.21/upstream-backports.scc
+++ b/common/recipes-kernel/linux/linux-yocto-4.9.21/upstream-backports.scc
@@ -200,3 +200,96 @@ patch 0030-KVM-VMX-clean-up-declaration-of-VPID-EPT-invalidatio.patch
patch 0031-KVM-nVMX-invvpid-handling-improvements.patch
patch 0032-KVM-x86-Remove-indirect-MSR-op-calls-from-SPEC_CTRL.patch
patch 0033-KVM-VMX-Optimize-vmx_vcpu_run-and-svm_vcpu_run-by-ma.patch
+patch 0001-x86-paravirt-objtool-Annotate-indirect-calls.patch
+patch 0002-x86-module-Detect-and-skip-invalid-relocations.patch
+patch 0003-kvm-svm-Setup-MCG_CAP-on-AMD-properly.patch
+patch 0004-kvm-nVMX-Disallow-userspace-injected-exceptions-in-g.patch
+patch 0005-x86-cpufeatures-Add-Intel-PCONFIG-cpufeature.patch
+patch 0006-x86-speculation-objtool-Annotate-indirect-calls-jump.patch
+patch 0007-x86-speculation-Remove-Skylake-C2-from-Speculation-C.patch
+patch 0008-x86-reboot-Turn-off-KVM-when-halting-a-CPU.patch
+patch 0009-x86-KASLR-Fix-kexec-kernel-boot-crash-when-KASLR-ran.patch
+patch 0010-kvm-x86-fix-icebp-instruction-handling.patch
+patch 0011-bpf-x64-increase-number-of-passes.patch
+patch 0012-x86-mm-kaslr-Use-the-_ASM_MUL-macro-for-multiplicati.patch
+patch 0013-KVM-X86-Fix-preempt-the-preemption-timer-cancel.patch
+patch 0014-KVM-nVMX-Fix-handling-of-lmsw-instruction.patch
+patch 0015-KVM-SVM-do-not-zero-out-segment-attributes-if-segmen.patch
+patch 0016-KVM-nVMX-Update-vmcs12-guest_linear_address-on-neste.patch
+patch 0017-perf-x86-Fix-possible-Spectre-v1-indexing-for-hw_per.patch
+patch 0018-perf-x86-cstate-Fix-possible-Spectre-v1-indexing-for.patch
+patch 0019-perf-x86-msr-Fix-possible-Spectre-v1-indexing-in-the.patch
+patch 0020-perf-x86-Fix-possible-Spectre-v1-indexing-for-x86_pm.patch
+patch 0021-x86-amd-don-t-set-X86_BUG_SYSRET_SS_ATTRS-when-runni.patch
+patch 0022-x86-nospec-Simplify-alternative_msr_write.patch
+patch 0023-x86-bugs-Concentrate-bug-detection-into-a-separate-f.patch
+patch 0024-x86-bugs-Concentrate-bug-reporting-into-a-separate-f.patch
+patch 0025-x86-bugs-Read-SPEC_CTRL-MSR-during-boot-and-re-use-r.patch
+patch 0026-x86-bugs-KVM-Support-the-combination-of-guest-and-ho.patch
+patch 0027-x86-bugs-Expose-sys-.-spec_store_bypass.patch
+patch 0028-x86-cpufeatures-Add-X86_FEATURE_RDS.patch
+patch 0029-x86-bugs-Provide-boot-parameters-for-the-spec_store_.patch
+patch 0030-x86-bugs-intel-Set-proper-CPU-features-and-setup-RDS.patch
+patch 0031-x86-bugs-Whitelist-allowed-SPEC_CTRL-MSR-values.patch
+patch 0032-x86-bugs-AMD-Add-support-to-disable-RDS-on-Fam-15-16.patch
+patch 0033-x86-KVM-VMX-Expose-SPEC_CTRL-Bit-2-to-the-guest.patch
+patch 0034-x86-speculation-Create-spec-ctrl.h-to-avoid-include-.patch
+patch 0035-x86-process-Optimize-TIF-checks-in-__switch_to_xtra.patch
+patch 0036-x86-process-Correct-and-optimize-TIF_BLOCKSTEP-switc.patch
+patch 0037-x86-process-Optimize-TIF_NOTSC-switch.patch
+patch 0038-x86-process-Allow-runtime-control-of-Speculative-Sto.patch
+patch 0039-x86-speculation-Add-prctl-for-Speculative-Store-Bypa.patch
+patch 0040-nospec-Move-array_index_nospec-parameter-checking-in.patch
+patch 0041-nospec-Allow-index-argument-to-have-const-qualified-.patch
+patch 0042-nospec-Kill-array_index_nospec_mask_check.patch
+patch 0043-nospec-Include-asm-barrier.h-dependency.patch
+patch 0044-prctl-Add-speculation-control-prctls.patch
+patch 0045-nospec-Allow-getting-setting-on-non-current-task.patch
+patch 0046-x86-bugs-Make-boot-modes-__ro_after_init.patch
+patch 0047-fs-proc-Report-eip-esp-in-prod-PID-stat-for-coredump.patch
+patch 0048-proc-fix-coredump-vs-read-proc-stat-race.patch
+patch 0049-proc-Provide-details-on-speculation-flaw-mitigations.patch
+patch 0050-prctl-Add-force-disable-speculation.patch
+patch 0051-seccomp-fix-the-usage-of-get-put_seccomp_filter-in-s.patch
+patch 0052-seccomp-Enable-speculation-flaw-mitigations.patch
+patch 0053-seccomp-Use-PR_SPEC_FORCE_DISABLE.patch
+patch 0054-seccomp-Add-filter-flag-to-opt-out-of-SSB-mitigation.patch
+patch 0055-seccomp-Move-speculation-migitation-control-to-arch-.patch
+patch 0056-x86-speculation-Make-seccomp-the-default-mode-for-Sp.patch
+patch 0057-x86-bugs-Rename-_RDS-to-_SSBD.patch
+patch 0058-x86-bugs-Fix-__ssb_select_mitigation-return-type.patch
+patch 0059-x86-bugs-Make-cpu_show_common-static.patch
+patch 0060-x86-bugs-Fix-the-parameters-alignment-and-missing-vo.patch
+patch 0061-x86-cpu-Make-alternative_msr_write-work-for-32-bit-c.patch
+patch 0062-KVM-SVM-Move-spec-control-call-after-restore-of-GS.patch
+patch 0063-x86-speculation-Use-synthetic-bits-for-IBRS-IBPB-STI.patch
+patch 0064-x86-cpufeatures-Disentangle-MSR_SPEC_CTRL-enumeratio.patch
+patch 0065-x86-cpufeatures-Disentangle-SSBD-enumeration.patch
+patch 0066-x86-cpu-AMD-Fix-erratum-1076-CPB-bit.patch
+patch 0067-x86-cpufeatures-Add-FEATURE_ZEN.patch
+patch 0068-x86-speculation-Handle-HT-correctly-on-AMD.patch
+patch 0069-x86-bugs-KVM-Extend-speculation-control-for-VIRT_SPE.patch
+patch 0070-x86-speculation-Add-virtualized-speculative-store-by.patch
+patch 0071-x86-speculation-Rework-speculative_store_bypass_upda.patch
+patch 0072-x86-bugs-Unify-x86_spec_ctrl_-set_guest-restore_host.patch
+patch 0073-x86-bugs-Expose-x86_spec_ctrl_base-directly.patch
+patch 0074-x86-bugs-Remove-x86_spec_ctrl_set.patch
+patch 0075-x86-bugs-Rework-spec_ctrl-base-and-mask-logic.patch
+patch 0076-x86-speculation-KVM-Implement-support-for-VIRT_SPEC_.patch
+patch 0077-KVM-SVM-Implement-VIRT_SPEC_CTRL-support-for-SSBD.patch
+patch 0078-x86-bugs-Rename-SSBD_NO-to-SSB_NO.patch
+patch 0079-x86-kexec-Avoid-double-free_page-upon-do_kexec_load-.patch
+patch 0080-KVM-VMX-Expose-SSBD-properly-to-guests.patch
+patch 0081-KVM-x86-Update-cpuid-properly-when-CR4.OSXAVE-or-CR4.patch
+patch 0082-kvm-x86-IA32_ARCH_CAPABILITIES-is-always-supported.patch
+patch 0083-kvm-x86-fix-KVM_XEN_HVM_CONFIG-ioctl.patch
+patch 0084-KVM-VMX-raise-internal-error-for-exception-during-in.patch
+patch 0085-KVM-lapic-stop-advertising-DIRECTED_EOI-when-in-kern.patch
+patch 0086-objtool-Improve-detection-of-BUG-and-other-dead-ends.patch
+patch 0087-objtool-Move-checking-code-to-check.c.patch
+patch 0088-objtool-sync-up-with-the-4.14.47-version-of-objtool.patch
+patch 0089-objtool-x86-Add-several-functions-and-files-to-the-o.patch
+patch 0090-x86-xen-Add-unwind-hint-annotations-to-xen_setup_gdt.patch
+patch 0091-x86-amd-revert-commit-944e0fc51a89c9827b9.patch
+patch 0092-xen-set-cpu-capabilities-from-xen_start_kernel.patch
+patch 0093-x86-amd-don-t-set-X86_BUG_SYSRET_SS_ATTRS-when-runni.patch