aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig7
-rw-r--r--arch/x86/Kconfig.debug3
-rw-r--r--arch/x86/Makefile32
-rw-r--r--arch/x86/boot/compressed/Makefile6
-rw-r--r--arch/x86/boot/compressed/kaslr.c7
-rw-r--r--arch/x86/boot/compressed/misc.c4
-rw-r--r--arch/x86/boot/header.S8
-rw-r--r--arch/x86/configs/i386_defconfig92
-rw-r--r--arch/x86/configs/x86_64_defconfig93
-rw-r--r--arch/x86/crypto/aes_ctrby8_avx-x86_64.S15
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S739
-rw-r--r--arch/x86/crypto/aesni-intel_avx-x86_64.S1
-rw-r--r--arch/x86/crypto/chacha-ssse3-x86_64.S16
-rw-r--r--arch/x86/crypto/chacha_glue.c17
-rw-r--r--arch/x86/crypto/crc32-pclmul_asm.S47
-rw-r--r--arch/x86/crypto/crc32c-pcl-intel-asm_64.S7
-rw-r--r--arch/x86/crypto/curve25519-x86_64.c6
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_asm.S17
-rw-r--r--arch/x86/entry/calling.h40
-rw-r--r--arch/x86/entry/common.c638
-rw-r--r--arch/x86/entry/entry_32.S4
-rw-r--r--arch/x86/entry/entry_64.S143
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl1
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl1
-rw-r--r--arch/x86/entry/vdso/vma.c5
-rw-r--r--arch/x86/events/amd/power.c4
-rw-r--r--arch/x86/events/core.c28
-rw-r--r--arch/x86/events/intel/core.c127
-rw-r--r--arch/x86/events/intel/ds.c6
-rw-r--r--arch/x86/events/intel/lbr.c733
-rw-r--r--arch/x86/events/intel/uncore.c26
-rw-r--r--arch/x86/events/intel/uncore.h37
-rw-r--r--arch/x86/events/intel/uncore_snb.c80
-rw-r--r--arch/x86/events/intel/uncore_snbep.c208
-rw-r--r--arch/x86/events/perf_event.h125
-rw-r--r--arch/x86/events/rapl.c3
-rw-r--r--arch/x86/events/zhaoxin/core.c2
-rw-r--r--arch/x86/include/asm/asm.h6
-rw-r--r--arch/x86/include/asm/atomic.h2
-rw-r--r--arch/x86/include/asm/boot.h11
-rw-r--r--arch/x86/include/asm/bug.h1
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h2
-rw-r--r--arch/x86/include/asm/cpufeatures.h4
-rw-r--r--arch/x86/include/asm/div64.h14
-rw-r--r--arch/x86/include/asm/dma-mapping.h4
-rw-r--r--arch/x86/include/asm/efi.h20
-rw-r--r--arch/x86/include/asm/entry-common.h76
-rw-r--r--arch/x86/include/asm/fpu/internal.h47
-rw-r--r--arch/x86/include/asm/fpu/types.h27
-rw-r--r--arch/x86/include/asm/fpu/xstate.h36
-rw-r--r--arch/x86/include/asm/fsgsbase.h47
-rw-r--r--arch/x86/include/asm/idtentry.h42
-rw-r--r--arch/x86/include/asm/inst.h170
-rw-r--r--arch/x86/include/asm/intel-family.h7
-rw-r--r--arch/x86/include/asm/io_apic.h1
-rw-r--r--arch/x86/include/asm/kdebug.h5
-rw-r--r--arch/x86/include/asm/kprobes.h2
-rw-r--r--arch/x86/include/asm/mem_encrypt.h5
-rw-r--r--arch/x86/include/asm/msr-index.h26
-rw-r--r--arch/x86/include/asm/percpu.h510
-rw-r--r--arch/x86/include/asm/perf_event.h82
-rw-r--r--arch/x86/include/asm/pgtable.h9
-rw-r--r--arch/x86/include/asm/pgtable_64.h13
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h2
-rw-r--r--arch/x86/include/asm/processor.h70
-rw-r--r--arch/x86/include/asm/ptrace.h5
-rw-r--r--arch/x86/include/asm/signal.h1
-rw-r--r--arch/x86/include/asm/sparsemem.h6
-rw-r--r--arch/x86/include/asm/special_insns.h1
-rw-r--r--arch/x86/include/asm/stackprotector.h12
-rw-r--r--arch/x86/include/asm/sync_core.h72
-rw-r--r--arch/x86/include/asm/thread_info.h5
-rw-r--r--arch/x86/include/asm/topology.h2
-rw-r--r--arch/x86/include/asm/tsc.h3
-rw-r--r--arch/x86/include/asm/uaccess.h5
-rw-r--r--arch/x86/include/asm/uv/bios.h2
-rw-r--r--arch/x86/include/asm/uv/uv.h2
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h118
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h36
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h712
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h2
-rw-r--r--arch/x86/include/uapi/asm/hwcap2.h3
-rw-r--r--arch/x86/kernel/alternative.c44
-rw-r--r--arch/x86/kernel/amd_gart_64.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c5
-rw-r--r--arch/x86/kernel/apic/vector.c4
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c122
-rw-r--r--arch/x86/kernel/cpu/bugs.c19
-rw-r--r--arch/x86/kernel/cpu/common.c22
-rw-r--r--arch/x86/kernel/cpu/intel.c2
-rw-r--r--arch/x86/kernel/cpu/mce/core.c9
-rw-r--r--arch/x86/kernel/cpu/mce/dev-mcelog.c2
-rw-r--r--arch/x86/kernel/cpu/mce/inject.c2
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c2
-rw-r--r--arch/x86/kernel/dumpstack.c23
-rw-r--r--arch/x86/kernel/fpu/core.c39
-rw-r--r--arch/x86/kernel/fpu/xstate.c89
-rw-r--r--arch/x86/kernel/idt.c2
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c9
-rw-r--r--arch/x86/kernel/kprobes/core.c15
-rw-r--r--arch/x86/kernel/kprobes/opt.c38
-rw-r--r--arch/x86/kernel/kvm.c6
-rw-r--r--arch/x86/kernel/msr.c69
-rw-r--r--arch/x86/kernel/nmi.c9
-rw-r--r--arch/x86/kernel/process.c14
-rw-r--r--arch/x86/kernel/process_32.c29
-rw-r--r--arch/x86/kernel/process_64.c190
-rw-r--r--arch/x86/kernel/ptrace.c60
-rw-r--r--arch/x86/kernel/quirks.c10
-rw-r--r--arch/x86/kernel/signal.c3
-rw-r--r--arch/x86/kernel/smpboot.c64
-rw-r--r--arch/x86/kernel/sys_ia32.c3
-rw-r--r--arch/x86/kernel/traps.c41
-rw-r--r--arch/x86/kernel/unwind_frame.c4
-rw-r--r--arch/x86/kvm/Kconfig1
-rw-r--r--arch/x86/kvm/mmu/mmu.c2
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/vmx/vmx.c13
-rw-r--r--arch/x86/kvm/x86.c17
-rw-r--r--arch/x86/mm/fault.c6
-rw-r--r--arch/x86/mm/init.c3
-rw-r--r--arch/x86/mm/init_64.c59
-rw-r--r--arch/x86/mm/mem_encrypt.c2
-rw-r--r--arch/x86/mm/numa.c18
-rw-r--r--arch/x86/mm/pat/set_memory.c2
-rw-r--r--arch/x86/platform/efi/efi.c16
-rw-r--r--arch/x86/platform/efi/efi_64.c38
-rw-r--r--arch/x86/platform/efi/quirks.c31
-rw-r--r--arch/x86/platform/uv/bios_uv.c173
-rw-r--r--arch/x86/platform/uv/tlb_uv.c243
-rw-r--r--arch/x86/platform/uv/uv_time.c16
-rw-r--r--arch/x86/xen/enlighten_pv.c2
-rw-r--r--arch/x86/xen/smp_pv.c2
-rw-r--r--arch/x86/xen/time.c1
134 files changed, 3346 insertions, 3809 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 883da0abf779..fd03cefabd34 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -115,6 +115,7 @@ config X86
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_VULNERABILITIES
select GENERIC_EARLY_IOREMAP
+ select GENERIC_ENTRY
select GENERIC_FIND_FIRST_BIT
select GENERIC_IOMAP
select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
@@ -161,7 +162,6 @@ config X86
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
select HAVE_CONTEXT_TRACKING if X86_64
- select HAVE_COPY_THREAD_TLS
select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
@@ -188,6 +188,7 @@ config X86
select HAVE_KERNEL_LZMA
select HAVE_KERNEL_LZO
select HAVE_KERNEL_XZ
+ select HAVE_KERNEL_ZSTD
select HAVE_KPROBES
select HAVE_KPROBES_ON_FTRACE
select HAVE_FUNCTION_ERROR_INJECTION
@@ -909,6 +910,7 @@ config DMI
config GART_IOMMU
bool "Old AMD GART IOMMU support"
+ select DMA_OPS
select IOMMU_HELPER
select SWIOTLB
depends on X86_64 && PCI && AMD_NB
@@ -1292,7 +1294,6 @@ config MICROCODE
bool "CPU microcode loading support"
default y
depends on CPU_SUP_AMD || CPU_SUP_INTEL
- select FW_LOADER
help
If you say Y here, you will be able to update the microcode on
Intel and AMD processors. The Intel support is for the IA32 family,
@@ -1314,7 +1315,6 @@ config MICROCODE_INTEL
bool "Intel microcode loading support"
depends on MICROCODE
default MICROCODE
- select FW_LOADER
help
This options enables microcode patch loading support for Intel
processors.
@@ -1326,7 +1326,6 @@ config MICROCODE_INTEL
config MICROCODE_AMD
bool "AMD microcode loading support"
depends on MICROCODE
- select FW_LOADER
help
If you select this option, microcode patch loading support for AMD
processors will be enabled.
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 0dd319e6e5b4..ee1d3c5834c6 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -3,6 +3,9 @@
config TRACE_IRQFLAGS_SUPPORT
def_bool y
+config TRACE_IRQFLAGS_NMI_SUPPORT
+ def_bool y
+
config EARLY_PRINTK_USB
bool
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 00e378de8bc0..1e634d7ee6eb 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -47,10 +47,6 @@ export REALMODE_CFLAGS
# e.g.: obj-y += foo_$(BITS).o
export BITS
-ifdef CONFIG_X86_NEED_RELOCS
- LDFLAGS_vmlinux := --emit-relocs --discard-none
-endif
-
#
# Prevent GCC from generating any FP code by mistake.
#
@@ -177,17 +173,6 @@ ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
endif
-KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)
-
-#
-# The 64-bit kernel must be aligned to 2MB. Pass -z max-page-size=0x200000 to
-# the linker to force 2MB page size regardless of the default page size used
-# by the linker.
-#
-ifdef CONFIG_X86_64
-KBUILD_LDFLAGS += $(call ld-option, -z max-page-size=0x200000)
-endif
-
# Workaround for a gcc prelease that unfortunately was shipped in a suse release
KBUILD_CFLAGS += -Wno-sign-compare
#
@@ -207,6 +192,23 @@ ifdef CONFIG_RETPOLINE
endif
endif
+KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)
+
+ifdef CONFIG_X86_NEED_RELOCS
+LDFLAGS_vmlinux := --emit-relocs --discard-none
+else
+LDFLAGS_vmlinux :=
+endif
+
+#
+# The 64-bit kernel must be aligned to 2MB. Pass -z max-page-size=0x200000 to
+# the linker to force 2MB page size regardless of the default page size used
+# by the linker.
+#
+ifdef CONFIG_X86_64
+LDFLAGS_vmlinux += -z max-page-size=0x200000
+endif
+
archscripts: scripts_basic
$(Q)$(MAKE) $(build)=arch/x86/tools relocs
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 5a828fde7a42..c08714ae76ec 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -26,7 +26,7 @@ OBJECT_FILES_NON_STANDARD := y
KCOV_INSTRUMENT := n
targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
- vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4
+ vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 vmlinux.bin.zst
KBUILD_CFLAGS := -m$(BITS) -O2
KBUILD_CFLAGS += -fno-strict-aliasing $(call cc-option, -fPIE, -fPIC)
@@ -42,6 +42,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
KBUILD_CFLAGS += -Wno-pointer-sign
KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
+KBUILD_CFLAGS += -D__DISABLE_EXPORTS
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
@@ -145,6 +146,8 @@ $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
$(call if_changed,lzo)
$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) FORCE
$(call if_changed,lz4)
+$(obj)/vmlinux.bin.zst: $(vmlinux.bin.all-y) FORCE
+ $(call if_changed,zstd22)
suffix-$(CONFIG_KERNEL_GZIP) := gz
suffix-$(CONFIG_KERNEL_BZIP2) := bz2
@@ -152,6 +155,7 @@ suffix-$(CONFIG_KERNEL_LZMA) := lzma
suffix-$(CONFIG_KERNEL_XZ) := xz
suffix-$(CONFIG_KERNEL_LZO) := lzo
suffix-$(CONFIG_KERNEL_LZ4) := lz4
+suffix-$(CONFIG_KERNEL_ZSTD) := zst
quiet_cmd_mkpiggy = MKPIGGY $@
cmd_mkpiggy = $(obj)/mkpiggy $< > $@
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index d7408af55738..0048269180d5 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -19,13 +19,6 @@
*/
#define BOOT_CTYPE_H
-/*
- * _ctype[] in lib/ctype.c is needed by isspace() of linux/ctype.h.
- * While both lib/ctype.c and lib/cmdline.c will bring EXPORT_SYMBOL
- * which is meaningless and will cause compiling error in some cases.
- */
-#define __DISABLE_EXPORTS
-
#include "misc.h"
#include "error.h"
#include "../string.h"
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 9652d5c2afda..39e592d0e0b4 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -77,6 +77,10 @@ static int lines, cols;
#ifdef CONFIG_KERNEL_LZ4
#include "../../../../lib/decompress_unlz4.c"
#endif
+
+#ifdef CONFIG_KERNEL_ZSTD
+#include "../../../../lib/decompress_unzstd.c"
+#endif
/*
* NOTE: When adding a new decompressor, please update the analysis in
* ../header.S.
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 735ad7f21ab0..6dbd7e9f74c9 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -539,8 +539,14 @@ pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
# the size-dependent part now grows so fast.
#
# extra_bytes = (uncompressed_size >> 8) + 65536
+#
+# ZSTD compressed data grows by at most 3 bytes per 128K, and only has a 22
+# byte fixed overhead but has a maximum block size of 128K, so it needs a
+# larger margin.
+#
+# extra_bytes = (uncompressed_size >> 8) + 131072
-#define ZO_z_extra_bytes ((ZO_z_output_len >> 8) + 65536)
+#define ZO_z_extra_bytes ((ZO_z_output_len >> 8) + 131072)
#if ZO_z_output_len > ZO_z_input_len
# define ZO_z_extract_offset (ZO_z_output_len + ZO_z_extra_bytes - \
ZO_z_input_len)
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 550904591e94..d7577fece9eb 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -1,39 +1,29 @@
-# CONFIG_64BIT is not set
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_FHANDLE=y
-CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=18
CONFIG_CGROUPS=y
+CONFIG_CGROUP_SCHED=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
-CONFIG_CGROUP_SCHED=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_KPROBES=y
-CONFIG_JUMP_LABEL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_SMP=y
CONFIG_X86_GENERIC=y
CONFIG_HPET_TIMER=y
-CONFIG_SCHED_SMT=y
-CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
-CONFIG_X86_MCE=y
CONFIG_X86_REBOOTFIXUPS=y
-CONFIG_MICROCODE=y
CONFIG_MICROCODE_AMD=y
CONFIG_X86_MSR=y
CONFIG_X86_CPUID=y
@@ -41,28 +31,25 @@ CONFIG_HIGHPTE=y
CONFIG_X86_CHECK_BIOS_CORRUPTION=y
# CONFIG_MTRR_SANITIZER is not set
CONFIG_EFI=y
+CONFIG_EFI_STUB=y
CONFIG_HZ_1000=y
CONFIG_KEXEC=y
CONFIG_CRASH_DUMP=y
-CONFIG_RANDOMIZE_BASE=y
-CONFIG_RANDOMIZE_MEMORY=y
-# CONFIG_COMPAT_VDSO is not set
CONFIG_HIBERNATION=y
CONFIG_PM_DEBUG=y
CONFIG_PM_TRACE_RTC=y
CONFIG_ACPI_DOCK=y
-CONFIG_CPU_FREQ=y
-# CONFIG_CPU_FREQ_STAT is not set
+CONFIG_ACPI_BGRT=y
CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_X86_ACPI_CPUFREQ=y
-CONFIG_PCI=y
-CONFIG_PCIEPORTBUS=y
-CONFIG_PCI_MSI=y
-CONFIG_PCCARD=y
-CONFIG_YENTA=y
-CONFIG_HOTPLUG_PCI=y
+CONFIG_EFI_VARS=y
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_BINFMT_MISC=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -82,16 +69,12 @@ CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_DIAG is not set
CONFIG_TCP_CONG_ADVANCED=y
# CONFIG_TCP_CONG_BIC is not set
# CONFIG_TCP_CONG_WESTWOOD is not set
# CONFIG_TCP_CONG_HTCP is not set
CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6=y
CONFIG_INET6_AH=y
CONFIG_INET6_ESP=y
CONFIG_NETLABEL=y
@@ -102,6 +85,7 @@ CONFIG_NF_CONNTRACK_FTP=y
CONFIG_NF_CONNTRACK_IRC=y
CONFIG_NF_CONNTRACK_SIP=y
CONFIG_NF_CT_NETLINK=y
+CONFIG_NF_NAT=y
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
@@ -109,14 +93,11 @@ CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
CONFIG_NETFILTER_XT_MATCH_POLICY=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
-CONFIG_NF_CONNTRACK_IPV4=y
CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_TARGET_REJECT=y
-CONFIG_NF_NAT=y
-CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_MANGLE=y
-CONFIG_NF_CONNTRACK_IPV6=y
CONFIG_IP6_NF_IPTABLES=y
CONFIG_IP6_NF_MATCH_IPV6HEADER=y
CONFIG_IP6_NF_FILTER=y
@@ -129,6 +110,12 @@ CONFIG_CFG80211=y
CONFIG_MAC80211=y
CONFIG_MAC80211_LEDS=y
CONFIG_RFKILL=y
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_MSI=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_PCCARD=y
+CONFIG_YENTA=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_DEBUG_DEVRES=y
@@ -170,15 +157,12 @@ CONFIG_8139TOO=y
# CONFIG_8139TOO_PIO is not set
CONFIG_R8169=y
CONFIG_INPUT_POLLDEV=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TABLET=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_INPUT_MISC=y
-CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_LEGACY_PTYS is not set
-CONFIG_SERIAL_NONSTANDARD=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=32
@@ -187,6 +171,7 @@ CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
+CONFIG_SERIAL_NONSTANDARD=y
CONFIG_HW_RANDOM=y
CONFIG_NVRAM=y
CONFIG_HPET=y
@@ -201,19 +186,15 @@ CONFIG_DRM_I915=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
CONFIG_FB_EFI=y
-# CONFIG_LCD_CLASS_DEVICE is not set
CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
CONFIG_SOUND=y
CONFIG_SND=y
+CONFIG_SND_HRTIMER=y
CONFIG_SND_SEQUENCER=y
CONFIG_SND_SEQ_DUMMY=y
-CONFIG_SND_MIXER_OSS=y
-CONFIG_SND_PCM_OSS=y
-CONFIG_SND_SEQUENCER_OSS=y
-CONFIG_SND_HRTIMER=y
CONFIG_SND_HDA_INTEL=y
CONFIG_SND_HDA_HWDEP=y
CONFIG_HIDRAW=y
@@ -234,17 +215,14 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_MON=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_TT_NEWSCHED=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_UHCI_HCD=y
CONFIG_USB_PRINTER=y
CONFIG_USB_STORAGE=y
-CONFIG_EDAC=y
CONFIG_RTC_CLASS=y
# CONFIG_RTC_HCTOSYS is not set
CONFIG_DMADEVICES=y
CONFIG_EEEPC_LAPTOP=y
-CONFIG_EFI_VARS=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
@@ -270,27 +248,19 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_PRINTK_TIME=y
-CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_STACKOVERFLOW=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHEDSTATS=y
-CONFIG_TIMER_STATS=y
-CONFIG_DEBUG_STACK_USAGE=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
CONFIG_EARLY_PRINTK_DBGP=y
-CONFIG_DEBUG_STACKOVERFLOW=y
-# CONFIG_DEBUG_RODATA_TEST is not set
CONFIG_DEBUG_BOOT_PARAMS=y
-CONFIG_SECURITY=y
-CONFIG_SECURITY_NETWORK=y
-CONFIG_SECURITY_SELINUX=y
-CONFIG_SECURITY_SELINUX_BOOTPARAM=y
-CONFIG_SECURITY_SELINUX_DISABLE=y
-CONFIG_CRYPTO_AES_586=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_EFI_STUB=y
-CONFIG_ACPI_BGRT=y
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 614961009075..f85600143747 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -1,36 +1,26 @@
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_FHANDLE=y
-CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=18
CONFIG_CGROUPS=y
+CONFIG_CGROUP_SCHED=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
-CONFIG_CGROUP_SCHED=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_KPROBES=y
-CONFIG_JUMP_LABEL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_SMP=y
-CONFIG_NR_CPUS=64
-CONFIG_SCHED_SMT=y
-CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
-CONFIG_X86_MCE=y
-CONFIG_MICROCODE=y
CONFIG_MICROCODE_AMD=y
CONFIG_X86_MSR=y
CONFIG_X86_CPUID=y
@@ -38,30 +28,28 @@ CONFIG_NUMA=y
CONFIG_X86_CHECK_BIOS_CORRUPTION=y
# CONFIG_MTRR_SANITIZER is not set
CONFIG_EFI=y
+CONFIG_EFI_STUB=y
+CONFIG_EFI_MIXED=y
CONFIG_HZ_1000=y
CONFIG_KEXEC=y
CONFIG_CRASH_DUMP=y
-CONFIG_RANDOMIZE_BASE=y
-CONFIG_RANDOMIZE_MEMORY=y
-# CONFIG_COMPAT_VDSO is not set
CONFIG_HIBERNATION=y
CONFIG_PM_DEBUG=y
CONFIG_PM_TRACE_RTC=y
CONFIG_ACPI_DOCK=y
-CONFIG_CPU_FREQ=y
-# CONFIG_CPU_FREQ_STAT is not set
+CONFIG_ACPI_BGRT=y
CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_X86_ACPI_CPUFREQ=y
-CONFIG_PCI=y
-CONFIG_PCI_MMCONFIG=y
-CONFIG_PCIEPORTBUS=y
-CONFIG_PCCARD=y
-CONFIG_YENTA=y
-CONFIG_HOTPLUG_PCI=y
-CONFIG_BINFMT_MISC=y
CONFIG_IA32_EMULATION=y
+CONFIG_EFI_VARS=y
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_BINFMT_MISC=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -80,16 +68,12 @@ CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_DIAG is not set
CONFIG_TCP_CONG_ADVANCED=y
# CONFIG_TCP_CONG_BIC is not set
# CONFIG_TCP_CONG_WESTWOOD is not set
# CONFIG_TCP_CONG_HTCP is not set
CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6=y
CONFIG_INET6_AH=y
CONFIG_INET6_ESP=y
CONFIG_NETLABEL=y
@@ -100,6 +84,7 @@ CONFIG_NF_CONNTRACK_FTP=y
CONFIG_NF_CONNTRACK_IRC=y
CONFIG_NF_CONNTRACK_SIP=y
CONFIG_NF_CT_NETLINK=y
+CONFIG_NF_NAT=y
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
@@ -107,14 +92,11 @@ CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
CONFIG_NETFILTER_XT_MATCH_POLICY=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
-CONFIG_NF_CONNTRACK_IPV4=y
CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_TARGET_REJECT=y
-CONFIG_NF_NAT=y
-CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_MANGLE=y
-CONFIG_NF_CONNTRACK_IPV6=y
CONFIG_IP6_NF_IPTABLES=y
CONFIG_IP6_NF_MATCH_IPV6HEADER=y
CONFIG_IP6_NF_FILTER=y
@@ -127,6 +109,11 @@ CONFIG_CFG80211=y
CONFIG_MAC80211=y
CONFIG_MAC80211_LEDS=y
CONFIG_RFKILL=y
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_PCCARD=y
+CONFIG_YENTA=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_DEBUG_DEVRES=y
@@ -163,15 +150,12 @@ CONFIG_FORCEDETH=y
CONFIG_8139TOO=y
CONFIG_R8169=y
CONFIG_INPUT_POLLDEV=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TABLET=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_INPUT_MISC=y
-CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_LEGACY_PTYS is not set
-CONFIG_SERIAL_NONSTANDARD=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=32
@@ -180,6 +164,7 @@ CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
+CONFIG_SERIAL_NONSTANDARD=y
CONFIG_HW_RANDOM=y
# CONFIG_HW_RANDOM_INTEL is not set
# CONFIG_HW_RANDOM_AMD is not set
@@ -196,19 +181,15 @@ CONFIG_DRM_I915=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
CONFIG_FB_EFI=y
-# CONFIG_LCD_CLASS_DEVICE is not set
CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
CONFIG_SOUND=y
CONFIG_SND=y
+CONFIG_SND_HRTIMER=y
CONFIG_SND_SEQUENCER=y
CONFIG_SND_SEQ_DUMMY=y
-CONFIG_SND_MIXER_OSS=y
-CONFIG_SND_PCM_OSS=y
-CONFIG_SND_SEQUENCER_OSS=y
-CONFIG_SND_HRTIMER=y
CONFIG_SND_HDA_INTEL=y
CONFIG_SND_HDA_HWDEP=y
CONFIG_HIDRAW=y
@@ -229,12 +210,10 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_MON=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_TT_NEWSCHED=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_UHCI_HCD=y
CONFIG_USB_PRINTER=y
CONFIG_USB_STORAGE=y
-CONFIG_EDAC=y
CONFIG_RTC_CLASS=y
# CONFIG_RTC_HCTOSYS is not set
CONFIG_DMADEVICES=y
@@ -242,7 +221,6 @@ CONFIG_EEEPC_LAPTOP=y
CONFIG_AMD_IOMMU=y
CONFIG_INTEL_IOMMU=y
# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set
-CONFIG_EFI_VARS=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
@@ -268,27 +246,18 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_PRINTK_TIME=y
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_STACK_USAGE=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHEDSTATS=y
-CONFIG_TIMER_STATS=y
-CONFIG_DEBUG_STACK_USAGE=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
CONFIG_EARLY_PRINTK_DBGP=y
-CONFIG_DEBUG_STACKOVERFLOW=y
-# CONFIG_DEBUG_RODATA_TEST is not set
CONFIG_DEBUG_BOOT_PARAMS=y
-CONFIG_UNWINDER_ORC=y
-CONFIG_SECURITY=y
-CONFIG_SECURITY_NETWORK=y
-CONFIG_SECURITY_SELINUX=y
-CONFIG_SECURITY_SELINUX_BOOTPARAM=y
-CONFIG_SECURITY_SELINUX_DISABLE=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_EFI_STUB=y
-CONFIG_EFI_MIXED=y
-CONFIG_ACPI_BGRT=y
diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
index ec437db1fa54..3f0fc7dd87d7 100644
--- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
+++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
@@ -63,7 +63,6 @@
*/
#include <linux/linkage.h>
-#include <asm/inst.h>
#define VMOVDQ vmovdqu
@@ -127,10 +126,6 @@ ddq_add_8:
/* generate a unique variable for ddq_add_x */
-.macro setddq n
- var_ddq_add = ddq_add_\n
-.endm
-
/* generate a unique variable for xmm register */
.macro setxdata n
var_xdata = %xmm\n
@@ -140,9 +135,7 @@ ddq_add_8:
.macro club name, id
.altmacro
- .if \name == DDQ_DATA
- setddq %\id
- .elseif \name == XDATA
+ .if \name == XDATA
setxdata %\id
.endif
.noaltmacro
@@ -165,9 +158,8 @@ ddq_add_8:
.set i, 1
.rept (by - 1)
- club DDQ_DATA, i
club XDATA, i
- vpaddq var_ddq_add(%rip), xcounter, var_xdata
+ vpaddq (ddq_add_1 + 16 * (i - 1))(%rip), xcounter, var_xdata
vptest ddq_low_msk(%rip), var_xdata
jnz 1f
vpaddq ddq_high_add_1(%rip), var_xdata, var_xdata
@@ -180,8 +172,7 @@ ddq_add_8:
vmovdqa 1*16(p_keys), xkeyA
vpxor xkey0, xdata0, xdata0
- club DDQ_DATA, by
- vpaddq var_ddq_add(%rip), xcounter, xcounter
+ vpaddq (ddq_add_1 + 16 * (by - 1))(%rip), xcounter, xcounter
vptest ddq_low_msk(%rip), xcounter
jnz 1f
vpaddq ddq_high_add_1(%rip), xcounter, xcounter
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 54e7d15dbd0d..1852b19a73a0 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -26,7 +26,6 @@
*/
#include <linux/linkage.h>
-#include <asm/inst.h>
#include <asm/frame.h>
#include <asm/nospec-branch.h>
@@ -201,7 +200,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
mov \SUBKEY, %r12
movdqu (%r12), \TMP3
movdqa SHUF_MASK(%rip), \TMP2
- PSHUFB_XMM \TMP2, \TMP3
+ pshufb \TMP2, \TMP3
# precompute HashKey<<1 mod poly from the HashKey (required for GHASH)
@@ -263,10 +262,10 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
movdqu %xmm0, OrigIV(%arg2) # ctx_data.orig_IV = iv
movdqa SHUF_MASK(%rip), %xmm2
- PSHUFB_XMM %xmm2, %xmm0
+ pshufb %xmm2, %xmm0
movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv
- PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7
movdqu HashKey(%arg2), %xmm13
CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \
@@ -347,7 +346,7 @@ _zero_cipher_left_\@:
paddd ONE(%rip), %xmm0 # INCR CNT to get Yn
movdqu %xmm0, CurCount(%arg2)
movdqa SHUF_MASK(%rip), %xmm10
- PSHUFB_XMM %xmm10, %xmm0
+ pshufb %xmm10, %xmm0
ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn)
movdqu %xmm0, PBlockEncKey(%arg2)
@@ -377,7 +376,7 @@ _large_enough_update_\@:
# get the appropriate shuffle mask
movdqu (%r12), %xmm2
# shift right 16-r13 bytes
- PSHUFB_XMM %xmm2, %xmm1
+ pshufb %xmm2, %xmm1
_data_read_\@:
lea ALL_F+16(%rip), %r12
@@ -393,12 +392,12 @@ _data_read_\@:
.ifc \operation, dec
pand %xmm1, %xmm2
movdqa SHUF_MASK(%rip), %xmm10
- PSHUFB_XMM %xmm10 ,%xmm2
+ pshufb %xmm10 ,%xmm2
pxor %xmm2, %xmm8
.else
movdqa SHUF_MASK(%rip), %xmm10
- PSHUFB_XMM %xmm10,%xmm0
+ pshufb %xmm10,%xmm0
pxor %xmm0, %xmm8
.endif
@@ -408,17 +407,17 @@ _data_read_\@:
# GHASH computation for the last <16 byte block
movdqa SHUF_MASK(%rip), %xmm10
# shuffle xmm0 back to output as ciphertext
- PSHUFB_XMM %xmm10, %xmm0
+ pshufb %xmm10, %xmm0
.endif
# Output %r13 bytes
- MOVQ_R64_XMM %xmm0, %rax
+ movq %xmm0, %rax
cmp $8, %r13
jle _less_than_8_bytes_left_\@
mov %rax, (%arg3 , %r11, 1)
add $8, %r11
psrldq $8, %xmm0
- MOVQ_R64_XMM %xmm0, %rax
+ movq %xmm0, %rax
sub $8, %r13
_less_than_8_bytes_left_\@:
mov %al, (%arg3, %r11, 1)
@@ -449,7 +448,7 @@ _partial_done\@:
movd %r12d, %xmm15 # len(A) in %xmm15
mov InLen(%arg2), %r12
shl $3, %r12 # len(C) in bits (*128)
- MOVQ_R64_XMM %r12, %xmm1
+ movq %r12, %xmm1
pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
pxor %xmm1, %xmm15 # %xmm15 = len(A)||len(C)
@@ -457,7 +456,7 @@ _partial_done\@:
GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
# final GHASH computation
movdqa SHUF_MASK(%rip), %xmm10
- PSHUFB_XMM %xmm10, %xmm8
+ pshufb %xmm10, %xmm8
movdqu OrigIV(%arg2), %xmm0 # %xmm0 = Y0
ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Y0)
@@ -470,7 +469,7 @@ _return_T_\@:
cmp $8, %r11
jl _T_4_\@
_T_8_\@:
- MOVQ_R64_XMM %xmm0, %rax
+ movq %xmm0, %rax
mov %rax, (%r10)
add $8, %r10
sub $8, %r11
@@ -518,9 +517,9 @@ _return_T_done_\@:
pshufd $78, \HK, \TMP3
pxor \GH, \TMP2 # TMP2 = a1+a0
pxor \HK, \TMP3 # TMP3 = b1+b0
- PCLMULQDQ 0x11, \HK, \TMP1 # TMP1 = a1*b1
- PCLMULQDQ 0x00, \HK, \GH # GH = a0*b0
- PCLMULQDQ 0x00, \TMP3, \TMP2 # TMP2 = (a0+a1)*(b1+b0)
+ pclmulqdq $0x11, \HK, \TMP1 # TMP1 = a1*b1
+ pclmulqdq $0x00, \HK, \GH # GH = a0*b0
+ pclmulqdq $0x00, \TMP3, \TMP2 # TMP2 = (a0+a1)*(b1+b0)
pxor \GH, \TMP2
pxor \TMP1, \TMP2 # TMP2 = (a0*b0)+(a1*b0)
movdqa \TMP2, \TMP3
@@ -570,7 +569,7 @@ _return_T_done_\@:
cmp $8, \DLEN
jl _read_lt8_\@
mov (\DPTR), %rax
- MOVQ_R64_XMM %rax, \XMMDst
+ movq %rax, \XMMDst
sub $8, \DLEN
jz _done_read_partial_block_\@
xor %eax, %eax
@@ -579,7 +578,7 @@ _read_next_byte_\@:
mov 7(\DPTR, \DLEN, 1), %al
dec \DLEN
jnz _read_next_byte_\@
- MOVQ_R64_XMM %rax, \XMM1
+ movq %rax, \XMM1
pslldq $8, \XMM1
por \XMM1, \XMMDst
jmp _done_read_partial_block_\@
@@ -590,7 +589,7 @@ _read_next_byte_lt8_\@:
mov -1(\DPTR, \DLEN, 1), %al
dec \DLEN
jnz _read_next_byte_lt8_\@
- MOVQ_R64_XMM %rax, \XMMDst
+ movq %rax, \XMMDst
_done_read_partial_block_\@:
.endm
@@ -608,7 +607,7 @@ _done_read_partial_block_\@:
jl _get_AAD_rest\@
_get_AAD_blocks\@:
movdqu (%r10), \TMP7
- PSHUFB_XMM %xmm14, \TMP7 # byte-reflect the AAD data
+ pshufb %xmm14, \TMP7 # byte-reflect the AAD data
pxor \TMP7, \TMP6
GHASH_MUL \TMP6, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
add $16, %r10
@@ -624,7 +623,7 @@ _get_AAD_rest\@:
je _get_AAD_done\@
READ_PARTIAL_BLOCK %r10, %r11, \TMP1, \TMP7
- PSHUFB_XMM %xmm14, \TMP7 # byte-reflect the AAD data
+ pshufb %xmm14, \TMP7 # byte-reflect the AAD data
pxor \TMP6, \TMP7
GHASH_MUL \TMP7, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
movdqu \TMP7, \TMP6
@@ -667,7 +666,7 @@ _data_read_\@: # Finished reading in data
# r16-r13 is the number of bytes in plaintext mod 16)
add %r13, %r12
movdqu (%r12), %xmm2 # get the appropriate shuffle mask
- PSHUFB_XMM %xmm2, %xmm9 # shift right r13 bytes
+ pshufb %xmm2, %xmm9 # shift right r13 bytes
.ifc \operation, dec
movdqa %xmm1, %xmm3
@@ -689,8 +688,8 @@ _no_extra_mask_1_\@:
pand %xmm1, %xmm3
movdqa SHUF_MASK(%rip), %xmm10
- PSHUFB_XMM %xmm10, %xmm3
- PSHUFB_XMM %xmm2, %xmm3
+ pshufb %xmm10, %xmm3
+ pshufb %xmm2, %xmm3
pxor %xmm3, \AAD_HASH
cmp $0, %r10
@@ -724,8 +723,8 @@ _no_extra_mask_2_\@:
pand %xmm1, %xmm9
movdqa SHUF_MASK(%rip), %xmm1
- PSHUFB_XMM %xmm1, %xmm9
- PSHUFB_XMM %xmm2, %xmm9
+ pshufb %xmm1, %xmm9
+ pshufb %xmm2, %xmm9
pxor %xmm9, \AAD_HASH
cmp $0, %r10
@@ -744,8 +743,8 @@ _encode_done_\@:
movdqa SHUF_MASK(%rip), %xmm10
# shuffle xmm9 back to output as ciphertext
- PSHUFB_XMM %xmm10, %xmm9
- PSHUFB_XMM %xmm2, %xmm9
+ pshufb %xmm10, %xmm9
+ pshufb %xmm2, %xmm9
.endif
# output encrypted Bytes
cmp $0, %r10
@@ -759,14 +758,14 @@ _partial_fill_\@:
mov \PLAIN_CYPH_LEN, %r13
_count_set_\@:
movdqa %xmm9, %xmm0
- MOVQ_R64_XMM %xmm0, %rax
+ movq %xmm0, %rax
cmp $8, %r13
jle _less_than_8_bytes_left_\@
mov %rax, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1)
add $8, \DATA_OFFSET
psrldq $8, %xmm0
- MOVQ_R64_XMM %xmm0, %rax
+ movq %xmm0, %rax
sub $8, %r13
_less_than_8_bytes_left_\@:
movb %al, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1)
@@ -810,7 +809,7 @@ _partial_block_done_\@:
.else
MOVADQ \XMM0, %xmm\index
.endif
- PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap
+ pshufb %xmm14, %xmm\index # perform a 16 byte swap
pxor \TMP2, %xmm\index
.endr
lea 0x10(%arg1),%r10
@@ -821,7 +820,7 @@ _partial_block_done_\@:
aes_loop_initial_\@:
MOVADQ (%r10),\TMP1
.irpc index, \i_seq
- AESENC \TMP1, %xmm\index
+ aesenc \TMP1, %xmm\index
.endr
add $16,%r10
sub $1,%eax
@@ -829,7 +828,7 @@ aes_loop_initial_\@:
MOVADQ (%r10), \TMP1
.irpc index, \i_seq
- AESENCLAST \TMP1, %xmm\index # Last Round
+ aesenclast \TMP1, %xmm\index # Last Round
.endr
.irpc index, \i_seq
movdqu (%arg4 , %r11, 1), \TMP1
@@ -841,7 +840,7 @@ aes_loop_initial_\@:
.ifc \operation, dec
movdqa \TMP1, %xmm\index
.endif
- PSHUFB_XMM %xmm14, %xmm\index
+ pshufb %xmm14, %xmm\index
# prepare plaintext/ciphertext for GHASH computation
.endr
@@ -876,19 +875,19 @@ aes_loop_initial_\@:
MOVADQ ONE(%RIP),\TMP1
paddd \TMP1, \XMM0 # INCR Y0
MOVADQ \XMM0, \XMM1
- PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
+ pshufb %xmm14, \XMM1 # perform a 16 byte swap
paddd \TMP1, \XMM0 # INCR Y0
MOVADQ \XMM0, \XMM2
- PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
+ pshufb %xmm14, \XMM2 # perform a 16 byte swap
paddd \TMP1, \XMM0 # INCR Y0
MOVADQ \XMM0, \XMM3
- PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
+ pshufb %xmm14, \XMM3 # perform a 16 byte swap
paddd \TMP1, \XMM0 # INCR Y0
MOVADQ \XMM0, \XMM4
- PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
+ pshufb %xmm14, \XMM4 # perform a 16 byte swap
MOVADQ 0(%arg1),\TMP1
pxor \TMP1, \XMM1
@@ -897,17 +896,17 @@ aes_loop_initial_\@:
pxor \TMP1, \XMM4
.irpc index, 1234 # do 4 rounds
movaps 0x10*\index(%arg1), \TMP1
- AESENC \TMP1, \XMM1
- AESENC \TMP1, \XMM2
- AESENC \TMP1, \XMM3
- AESENC \TMP1, \XMM4
+ aesenc \TMP1, \XMM1
+ aesenc \TMP1, \XMM2
+ aesenc \TMP1, \XMM3
+ aesenc \TMP1, \XMM4
.endr
.irpc index, 56789 # do next 5 rounds
movaps 0x10*\index(%arg1), \TMP1
- AESENC \TMP1, \XMM1
- AESENC \TMP1, \XMM2
- AESENC \TMP1, \XMM3
- AESENC \TMP1, \XMM4
+ aesenc \TMP1, \XMM1
+ aesenc \TMP1, \XMM2
+ aesenc \TMP1, \XMM3
+ aesenc \TMP1, \XMM4
.endr
lea 0xa0(%arg1),%r10
mov keysize,%eax
@@ -918,7 +917,7 @@ aes_loop_initial_\@:
aes_loop_pre_\@:
MOVADQ (%r10),\TMP2
.irpc index, 1234
- AESENC \TMP2, %xmm\index
+ aesenc \TMP2, %xmm\index
.endr
add $16,%r10
sub $1,%eax
@@ -926,10 +925,10 @@ aes_loop_pre_\@:
aes_loop_pre_done\@:
MOVADQ (%r10), \TMP2
- AESENCLAST \TMP2, \XMM1
- AESENCLAST \TMP2, \XMM2
- AESENCLAST \TMP2, \XMM3
- AESENCLAST \TMP2, \XMM4
+ aesenclast \TMP2, \XMM1
+ aesenclast \TMP2, \XMM2
+ aesenclast \TMP2, \XMM3
+ aesenclast \TMP2, \XMM4
movdqu 16*0(%arg4 , %r11 , 1), \TMP1
pxor \TMP1, \XMM1
.ifc \operation, dec
@@ -961,12 +960,12 @@ aes_loop_pre_done\@:
.endif
add $64, %r11
- PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
+ pshufb %xmm14, \XMM1 # perform a 16 byte swap
pxor \XMMDst, \XMM1
# combine GHASHed value with the corresponding ciphertext
- PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
- PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
- PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
+ pshufb %xmm14, \XMM2 # perform a 16 byte swap
+ pshufb %xmm14, \XMM3 # perform a 16 byte swap
+ pshufb %xmm14, \XMM4 # perform a 16 byte swap
_initial_blocks_done\@:
@@ -978,7 +977,7 @@ _initial_blocks_done\@:
* arg1, %arg3, %arg4 are used as pointers only, not modified
* %r11 is the data offset value
*/
-.macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \
+.macro GHASH_4_ENCRYPT_4_PARALLEL_enc TMP1 TMP2 TMP3 TMP4 TMP5 \
TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM1, \XMM5
@@ -994,7 +993,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pxor \XMM5, \TMP6
paddd ONE(%rip), \XMM0 # INCR CNT
movdqu HashKey_4(%arg2), \TMP5
- PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1
+ pclmulqdq $0x11, \TMP5, \TMP4 # TMP4 = a1*b1
movdqa \XMM0, \XMM1
paddd ONE(%rip), \XMM0 # INCR CNT
movdqa \XMM0, \XMM2
@@ -1002,51 +1001,51 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM0, \XMM3
paddd ONE(%rip), \XMM0 # INCR CNT
movdqa \XMM0, \XMM4
- PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap
- PCLMULQDQ 0x00, \TMP5, \XMM5 # XMM5 = a0*b0
- PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap
- PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap
- PSHUFB_XMM %xmm15, \XMM4 # perform a 16 byte swap
+ pshufb %xmm15, \XMM1 # perform a 16 byte swap
+ pclmulqdq $0x00, \TMP5, \XMM5 # XMM5 = a0*b0
+ pshufb %xmm15, \XMM2 # perform a 16 byte swap
+ pshufb %xmm15, \XMM3 # perform a 16 byte swap
+ pshufb %xmm15, \XMM4 # perform a 16 byte swap
pxor (%arg1), \XMM1
pxor (%arg1), \XMM2
pxor (%arg1), \XMM3
pxor (%arg1), \XMM4
movdqu HashKey_4_k(%arg2), \TMP5
- PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0)
+ pclmulqdq $0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0)
movaps 0x10(%arg1), \TMP1
- AESENC \TMP1, \XMM1 # Round 1
- AESENC \TMP1, \XMM2
- AESENC \TMP1, \XMM3
- AESENC \TMP1, \XMM4
+ aesenc \TMP1, \XMM1 # Round 1
+ aesenc \TMP1, \XMM2
+ aesenc \TMP1, \XMM3
+ aesenc \TMP1, \XMM4
movaps 0x20(%arg1), \TMP1
- AESENC \TMP1, \XMM1 # Round 2
- AESENC \TMP1, \XMM2
- AESENC \TMP1, \XMM3
- AESENC \TMP1, \XMM4
+ aesenc \TMP1, \XMM1 # Round 2
+ aesenc \TMP1, \XMM2
+ aesenc \TMP1, \XMM3
+ aesenc \TMP1, \XMM4
movdqa \XMM6, \TMP1
pshufd $78, \XMM6, \TMP2
pxor \XMM6, \TMP2
movdqu HashKey_3(%arg2), \TMP5
- PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
+ pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
movaps 0x30(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 3
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
- PCLMULQDQ 0x00, \TMP5, \XMM6 # XMM6 = a0*b0
+ aesenc \TMP3, \XMM1 # Round 3
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
+ pclmulqdq $0x00, \TMP5, \XMM6 # XMM6 = a0*b0
movaps 0x40(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 4
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
+ aesenc \TMP3, \XMM1 # Round 4
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
movdqu HashKey_3_k(%arg2), \TMP5
- PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
+ pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x50(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 5
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
+ aesenc \TMP3, \XMM1 # Round 5
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
pxor \TMP1, \TMP4
# accumulate the results in TMP4:XMM5, TMP6 holds the middle part
pxor \XMM6, \XMM5
@@ -1058,25 +1057,25 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
# Multiply TMP5 * HashKey using karatsuba
- PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
+ pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1
movaps 0x60(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 6
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
- PCLMULQDQ 0x00, \TMP5, \XMM7 # XMM7 = a0*b0
+ aesenc \TMP3, \XMM1 # Round 6
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
+ pclmulqdq $0x00, \TMP5, \XMM7 # XMM7 = a0*b0
movaps 0x70(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 7
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
+ aesenc \TMP3, \XMM1 # Round 7
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
movdqu HashKey_2_k(%arg2), \TMP5
- PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
+ pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x80(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 8
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
+ aesenc \TMP3, \XMM1 # Round 8
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
pxor \TMP1, \TMP4
# accumulate the results in TMP4:XMM5, TMP6 holds the middle part
pxor \XMM7, \XMM5
@@ -1089,13 +1088,13 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pshufd $78, \XMM8, \TMP2
pxor \XMM8, \TMP2
movdqu HashKey(%arg2), \TMP5
- PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
+ pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1
movaps 0x90(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 9
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
- PCLMULQDQ 0x00, \TMP5, \XMM8 # XMM8 = a0*b0
+ aesenc \TMP3, \XMM1 # Round 9
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
+ pclmulqdq $0x00, \TMP5, \XMM8 # XMM8 = a0*b0
lea 0xa0(%arg1),%r10
mov keysize,%eax
shr $2,%eax # 128->4, 192->6, 256->8
@@ -1105,7 +1104,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
aes_loop_par_enc\@:
MOVADQ (%r10),\TMP3
.irpc index, 1234
- AESENC \TMP3, %xmm\index
+ aesenc \TMP3, %xmm\index
.endr
add $16,%r10
sub $1,%eax
@@ -1113,12 +1112,12 @@ aes_loop_par_enc\@:
aes_loop_par_enc_done\@:
MOVADQ (%r10), \TMP3
- AESENCLAST \TMP3, \XMM1 # Round 10
- AESENCLAST \TMP3, \XMM2
- AESENCLAST \TMP3, \XMM3
- AESENCLAST \TMP3, \XMM4
+ aesenclast \TMP3, \XMM1 # Round 10
+ aesenclast \TMP3, \XMM2
+ aesenclast \TMP3, \XMM3
+ aesenclast \TMP3, \XMM4
movdqu HashKey_k(%arg2), \TMP5
- PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
+ pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movdqu (%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
movdqu 16(%arg4,%r11,1), \TMP3
@@ -1131,10 +1130,10 @@ aes_loop_par_enc_done\@:
movdqu \XMM2, 16(%arg3,%r11,1) # Write to the ciphertext buffer
movdqu \XMM3, 32(%arg3,%r11,1) # Write to the ciphertext buffer
movdqu \XMM4, 48(%arg3,%r11,1) # Write to the ciphertext buffer
- PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap
- PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap
- PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap
- PSHUFB_XMM %xmm15, \XMM4 # perform a 16 byte swap
+ pshufb %xmm15, \XMM1 # perform a 16 byte swap
+ pshufb %xmm15, \XMM2 # perform a 16 byte swap
+ pshufb %xmm15, \XMM3 # perform a 16 byte swap
+ pshufb %xmm15, \XMM4 # perform a 16 byte swap
pxor \TMP4, \TMP1
pxor \XMM8, \XMM5
@@ -1186,7 +1185,7 @@ aes_loop_par_enc_done\@:
* arg1, %arg3, %arg4 are used as pointers only, not modified
* %r11 is the data offset value
*/
-.macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \
+.macro GHASH_4_ENCRYPT_4_PARALLEL_dec TMP1 TMP2 TMP3 TMP4 TMP5 \
TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM1, \XMM5
@@ -1202,7 +1201,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pxor \XMM5, \TMP6
paddd ONE(%rip), \XMM0 # INCR CNT
movdqu HashKey_4(%arg2), \TMP5
- PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1
+ pclmulqdq $0x11, \TMP5, \TMP4 # TMP4 = a1*b1
movdqa \XMM0, \XMM1
paddd ONE(%rip), \XMM0 # INCR CNT
movdqa \XMM0, \XMM2
@@ -1210,51 +1209,51 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM0, \XMM3
paddd ONE(%rip), \XMM0 # INCR CNT
movdqa \XMM0, \XMM4
- PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap
- PCLMULQDQ 0x00, \TMP5, \XMM5 # XMM5 = a0*b0
- PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap
- PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap
- PSHUFB_XMM %xmm15, \XMM4 # perform a 16 byte swap
+ pshufb %xmm15, \XMM1 # perform a 16 byte swap
+ pclmulqdq $0x00, \TMP5, \XMM5 # XMM5 = a0*b0
+ pshufb %xmm15, \XMM2 # perform a 16 byte swap
+ pshufb %xmm15, \XMM3 # perform a 16 byte swap
+ pshufb %xmm15, \XMM4 # perform a 16 byte swap
pxor (%arg1), \XMM1
pxor (%arg1), \XMM2
pxor (%arg1), \XMM3
pxor (%arg1), \XMM4
movdqu HashKey_4_k(%arg2), \TMP5
- PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0)
+ pclmulqdq $0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0)
movaps 0x10(%arg1), \TMP1
- AESENC \TMP1, \XMM1 # Round 1
- AESENC \TMP1, \XMM2
- AESENC \TMP1, \XMM3
- AESENC \TMP1, \XMM4
+ aesenc \TMP1, \XMM1 # Round 1
+ aesenc \TMP1, \XMM2
+ aesenc \TMP1, \XMM3
+ aesenc \TMP1, \XMM4
movaps 0x20(%arg1), \TMP1
- AESENC \TMP1, \XMM1 # Round 2
- AESENC \TMP1, \XMM2
- AESENC \TMP1, \XMM3
- AESENC \TMP1, \XMM4
+ aesenc \TMP1, \XMM1 # Round 2
+ aesenc \TMP1, \XMM2
+ aesenc \TMP1, \XMM3
+ aesenc \TMP1, \XMM4
movdqa \XMM6, \TMP1
pshufd $78, \XMM6, \TMP2
pxor \XMM6, \TMP2
movdqu HashKey_3(%arg2), \TMP5
- PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
+ pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
movaps 0x30(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 3
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
- PCLMULQDQ 0x00, \TMP5, \XMM6 # XMM6 = a0*b0
+ aesenc \TMP3, \XMM1 # Round 3
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
+ pclmulqdq $0x00, \TMP5, \XMM6 # XMM6 = a0*b0
movaps 0x40(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 4
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
+ aesenc \TMP3, \XMM1 # Round 4
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
movdqu HashKey_3_k(%arg2), \TMP5
- PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
+ pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x50(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 5
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
+ aesenc \TMP3, \XMM1 # Round 5
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
pxor \TMP1, \TMP4
# accumulate the results in TMP4:XMM5, TMP6 holds the middle part
pxor \XMM6, \XMM5
@@ -1266,25 +1265,25 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
# Multiply TMP5 * HashKey using karatsuba
- PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
+ pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1
movaps 0x60(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 6
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
- PCLMULQDQ 0x00, \TMP5, \XMM7 # XMM7 = a0*b0
+ aesenc \TMP3, \XMM1 # Round 6
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
+ pclmulqdq $0x00, \TMP5, \XMM7 # XMM7 = a0*b0
movaps 0x70(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 7
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
+ aesenc \TMP3, \XMM1 # Round 7
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
movdqu HashKey_2_k(%arg2), \TMP5
- PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
+ pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x80(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 8
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
+ aesenc \TMP3, \XMM1 # Round 8
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
pxor \TMP1, \TMP4
# accumulate the results in TMP4:XMM5, TMP6 holds the middle part
pxor \XMM7, \XMM5
@@ -1297,13 +1296,13 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pshufd $78, \XMM8, \TMP2
pxor \XMM8, \TMP2
movdqu HashKey(%arg2), \TMP5
- PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
+ pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1
movaps 0x90(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 9
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
- PCLMULQDQ 0x00, \TMP5, \XMM8 # XMM8 = a0*b0
+ aesenc \TMP3, \XMM1 # Round 9
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
+ pclmulqdq $0x00, \TMP5, \XMM8 # XMM8 = a0*b0
lea 0xa0(%arg1),%r10
mov keysize,%eax
shr $2,%eax # 128->4, 192->6, 256->8
@@ -1313,7 +1312,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
aes_loop_par_dec\@:
MOVADQ (%r10),\TMP3
.irpc index, 1234
- AESENC \TMP3, %xmm\index
+ aesenc \TMP3, %xmm\index
.endr
add $16,%r10
sub $1,%eax
@@ -1321,12 +1320,12 @@ aes_loop_par_dec\@:
aes_loop_par_dec_done\@:
MOVADQ (%r10), \TMP3
- AESENCLAST \TMP3, \XMM1 # last round
- AESENCLAST \TMP3, \XMM2
- AESENCLAST \TMP3, \XMM3
- AESENCLAST \TMP3, \XMM4
+ aesenclast \TMP3, \XMM1 # last round
+ aesenclast \TMP3, \XMM2
+ aesenclast \TMP3, \XMM3
+ aesenclast \TMP3, \XMM4
movdqu HashKey_k(%arg2), \TMP5
- PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
+ pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movdqu (%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
movdqu \XMM1, (%arg3,%r11,1) # Write to plaintext buffer
@@ -1343,10 +1342,10 @@ aes_loop_par_dec_done\@:
pxor \TMP3, \XMM4 # Ciphertext/Plaintext XOR EK
movdqu \XMM4, 48(%arg3,%r11,1) # Write to plaintext buffer
movdqa \TMP3, \XMM4
- PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap
- PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap
- PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap
- PSHUFB_XMM %xmm15, \XMM4 # perform a 16 byte swap
+ pshufb %xmm15, \XMM1 # perform a 16 byte swap
+ pshufb %xmm15, \XMM2 # perform a 16 byte swap
+ pshufb %xmm15, \XMM3 # perform a 16 byte swap
+ pshufb %xmm15, \XMM4 # perform a 16 byte swap
pxor \TMP4, \TMP1
pxor \XMM8, \XMM5
@@ -1402,10 +1401,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
pshufd $78, \XMM1, \TMP2
pxor \XMM1, \TMP2
movdqu HashKey_4(%arg2), \TMP5
- PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1
- PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0
+ pclmulqdq $0x11, \TMP5, \TMP6 # TMP6 = a1*b1
+ pclmulqdq $0x00, \TMP5, \XMM1 # XMM1 = a0*b0
movdqu HashKey_4_k(%arg2), \TMP4
- PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
+ pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movdqa \XMM1, \XMMDst
movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1
@@ -1415,10 +1414,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
pshufd $78, \XMM2, \TMP2
pxor \XMM2, \TMP2
movdqu HashKey_3(%arg2), \TMP5
- PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
- PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0
+ pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1
+ pclmulqdq $0x00, \TMP5, \XMM2 # XMM2 = a0*b0
movdqu HashKey_3_k(%arg2), \TMP4
- PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
+ pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
pxor \TMP1, \TMP6
pxor \XMM2, \XMMDst
pxor \TMP2, \XMM1
@@ -1430,10 +1429,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
pshufd $78, \XMM3, \TMP2
pxor \XMM3, \TMP2
movdqu HashKey_2(%arg2), \TMP5
- PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
- PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0
+ pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1
+ pclmulqdq $0x00, \TMP5, \XMM3 # XMM3 = a0*b0
movdqu HashKey_2_k(%arg2), \TMP4
- PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
+ pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
pxor \TMP1, \TMP6
pxor \XMM3, \XMMDst
pxor \TMP2, \XMM1 # results accumulated in TMP6, XMMDst, XMM1
@@ -1443,10 +1442,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
pshufd $78, \XMM4, \TMP2
pxor \XMM4, \TMP2
movdqu HashKey(%arg2), \TMP5
- PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
- PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0
+ pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1
+ pclmulqdq $0x00, \TMP5, \XMM4 # XMM4 = a0*b0
movdqu HashKey_k(%arg2), \TMP4
- PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
+ pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
pxor \TMP1, \TMP6
pxor \XMM4, \XMMDst
pxor \XMM1, \TMP2
@@ -1504,13 +1503,13 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
_esb_loop_\@:
MOVADQ (%r10),\TMP1
- AESENC \TMP1,\XMM0
+ aesenc \TMP1,\XMM0
add $16,%r10
sub $1,%eax
jnz _esb_loop_\@
MOVADQ (%r10),\TMP1
- AESENCLAST \TMP1,\XMM0
+ aesenclast \TMP1,\XMM0
.endm
/*****************************************************************************
* void aesni_gcm_dec(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
@@ -1849,72 +1848,72 @@ SYM_FUNC_START(aesni_set_key)
movups 0x10(UKEYP), %xmm2 # other user key
movaps %xmm2, (TKEYP)
add $0x10, TKEYP
- AESKEYGENASSIST 0x1 %xmm2 %xmm1 # round 1
+ aeskeygenassist $0x1, %xmm2, %xmm1 # round 1
call _key_expansion_256a
- AESKEYGENASSIST 0x1 %xmm0 %xmm1
+ aeskeygenassist $0x1, %xmm0, %xmm1
call _key_expansion_256b
- AESKEYGENASSIST 0x2 %xmm2 %xmm1 # round 2
+ aeskeygenassist $0x2, %xmm2, %xmm1 # round 2
call _key_expansion_256a
- AESKEYGENASSIST 0x2 %xmm0 %xmm1
+ aeskeygenassist $0x2, %xmm0, %xmm1
call _key_expansion_256b
- AESKEYGENASSIST 0x4 %xmm2 %xmm1 # round 3
+ aeskeygenassist $0x4, %xmm2, %xmm1 # round 3
call _key_expansion_256a
- AESKEYGENASSIST 0x4 %xmm0 %xmm1
+ aeskeygenassist $0x4, %xmm0, %xmm1
call _key_expansion_256b
- AESKEYGENASSIST 0x8 %xmm2 %xmm1 # round 4
+ aeskeygenassist $0x8, %xmm2, %xmm1 # round 4
call _key_expansion_256a
- AESKEYGENASSIST 0x8 %xmm0 %xmm1
+ aeskeygenassist $0x8, %xmm0, %xmm1
call _key_expansion_256b
- AESKEYGENASSIST 0x10 %xmm2 %xmm1 # round 5
+ aeskeygenassist $0x10, %xmm2, %xmm1 # round 5
call _key_expansion_256a
- AESKEYGENASSIST 0x10 %xmm0 %xmm1
+ aeskeygenassist $0x10, %xmm0, %xmm1
call _key_expansion_256b
- AESKEYGENASSIST 0x20 %xmm2 %xmm1 # round 6
+ aeskeygenassist $0x20, %xmm2, %xmm1 # round 6
call _key_expansion_256a
- AESKEYGENASSIST 0x20 %xmm0 %xmm1
+ aeskeygenassist $0x20, %xmm0, %xmm1
call _key_expansion_256b
- AESKEYGENASSIST 0x40 %xmm2 %xmm1 # round 7
+ aeskeygenassist $0x40, %xmm2, %xmm1 # round 7
call _key_expansion_256a
jmp .Ldec_key
.Lenc_key192:
movq 0x10(UKEYP), %xmm2 # other user key
- AESKEYGENASSIST 0x1 %xmm2 %xmm1 # round 1
+ aeskeygenassist $0x1, %xmm2, %xmm1 # round 1
call _key_expansion_192a
- AESKEYGENASSIST 0x2 %xmm2 %xmm1 # round 2
+ aeskeygenassist $0x2, %xmm2, %xmm1 # round 2
call _key_expansion_192b
- AESKEYGENASSIST 0x4 %xmm2 %xmm1 # round 3
+ aeskeygenassist $0x4, %xmm2, %xmm1 # round 3
call _key_expansion_192a
- AESKEYGENASSIST 0x8 %xmm2 %xmm1 # round 4
+ aeskeygenassist $0x8, %xmm2, %xmm1 # round 4
call _key_expansion_192b
- AESKEYGENASSIST 0x10 %xmm2 %xmm1 # round 5
+ aeskeygenassist $0x10, %xmm2, %xmm1 # round 5
call _key_expansion_192a
- AESKEYGENASSIST 0x20 %xmm2 %xmm1 # round 6
+ aeskeygenassist $0x20, %xmm2, %xmm1 # round 6
call _key_expansion_192b
- AESKEYGENASSIST 0x40 %xmm2 %xmm1 # round 7
+ aeskeygenassist $0x40, %xmm2, %xmm1 # round 7
call _key_expansion_192a
- AESKEYGENASSIST 0x80 %xmm2 %xmm1 # round 8
+ aeskeygenassist $0x80, %xmm2, %xmm1 # round 8
call _key_expansion_192b
jmp .Ldec_key
.Lenc_key128:
- AESKEYGENASSIST 0x1 %xmm0 %xmm1 # round 1
+ aeskeygenassist $0x1, %xmm0, %xmm1 # round 1
call _key_expansion_128
- AESKEYGENASSIST 0x2 %xmm0 %xmm1 # round 2
+ aeskeygenassist $0x2, %xmm0, %xmm1 # round 2
call _key_expansion_128
- AESKEYGENASSIST 0x4 %xmm0 %xmm1 # round 3
+ aeskeygenassist $0x4, %xmm0, %xmm1 # round 3
call _key_expansion_128
- AESKEYGENASSIST 0x8 %xmm0 %xmm1 # round 4
+ aeskeygenassist $0x8, %xmm0, %xmm1 # round 4
call _key_expansion_128
- AESKEYGENASSIST 0x10 %xmm0 %xmm1 # round 5
+ aeskeygenassist $0x10, %xmm0, %xmm1 # round 5
call _key_expansion_128
- AESKEYGENASSIST 0x20 %xmm0 %xmm1 # round 6
+ aeskeygenassist $0x20, %xmm0, %xmm1 # round 6
call _key_expansion_128
- AESKEYGENASSIST 0x40 %xmm0 %xmm1 # round 7
+ aeskeygenassist $0x40, %xmm0, %xmm1 # round 7
call _key_expansion_128
- AESKEYGENASSIST 0x80 %xmm0 %xmm1 # round 8
+ aeskeygenassist $0x80, %xmm0, %xmm1 # round 8
call _key_expansion_128
- AESKEYGENASSIST 0x1b %xmm0 %xmm1 # round 9
+ aeskeygenassist $0x1b, %xmm0, %xmm1 # round 9
call _key_expansion_128
- AESKEYGENASSIST 0x36 %xmm0 %xmm1 # round 10
+ aeskeygenassist $0x36, %xmm0, %xmm1 # round 10
call _key_expansion_128
.Ldec_key:
sub $0x10, TKEYP
@@ -1927,7 +1926,7 @@ SYM_FUNC_START(aesni_set_key)
.align 4
.Ldec_key_loop:
movaps (KEYP), %xmm0
- AESIMC %xmm0 %xmm1
+ aesimc %xmm0, %xmm1
movaps %xmm1, (UKEYP)
add $0x10, KEYP
sub $0x10, UKEYP
@@ -1988,37 +1987,37 @@ SYM_FUNC_START_LOCAL(_aesni_enc1)
je .Lenc192
add $0x20, TKEYP
movaps -0x60(TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
movaps -0x50(TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
.align 4
.Lenc192:
movaps -0x40(TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
movaps -0x30(TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
.align 4
.Lenc128:
movaps -0x20(TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
movaps -0x10(TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
movaps (TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
movaps 0x10(TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
movaps 0x20(TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
movaps 0x30(TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
movaps 0x40(TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
movaps 0x50(TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
movaps 0x60(TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
movaps 0x70(TKEYP), KEY
- AESENCLAST KEY STATE
+ aesenclast KEY, STATE
ret
SYM_FUNC_END(_aesni_enc1)
@@ -2054,79 +2053,79 @@ SYM_FUNC_START_LOCAL(_aesni_enc4)
je .L4enc192
add $0x20, TKEYP
movaps -0x60(TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
movaps -0x50(TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
#.align 4
.L4enc192:
movaps -0x40(TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
movaps -0x30(TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
#.align 4
.L4enc128:
movaps -0x20(TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
movaps -0x10(TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
movaps (TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
movaps 0x10(TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
movaps 0x20(TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
movaps 0x30(TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
movaps 0x40(TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
movaps 0x50(TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
movaps 0x60(TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
movaps 0x70(TKEYP), KEY
- AESENCLAST KEY STATE1 # last round
- AESENCLAST KEY STATE2
- AESENCLAST KEY STATE3
- AESENCLAST KEY STATE4
+ aesenclast KEY, STATE1 # last round
+ aesenclast KEY, STATE2
+ aesenclast KEY, STATE3
+ aesenclast KEY, STATE4
ret
SYM_FUNC_END(_aesni_enc4)
@@ -2178,37 +2177,37 @@ SYM_FUNC_START_LOCAL(_aesni_dec1)
je .Ldec192
add $0x20, TKEYP
movaps -0x60(TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
movaps -0x50(TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
.align 4
.Ldec192:
movaps -0x40(TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
movaps -0x30(TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
.align 4
.Ldec128:
movaps -0x20(TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
movaps -0x10(TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
movaps (TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
movaps 0x10(TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
movaps 0x20(TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
movaps 0x30(TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
movaps 0x40(TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
movaps 0x50(TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
movaps 0x60(TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
movaps 0x70(TKEYP), KEY
- AESDECLAST KEY STATE
+ aesdeclast KEY, STATE
ret
SYM_FUNC_END(_aesni_dec1)
@@ -2244,79 +2243,79 @@ SYM_FUNC_START_LOCAL(_aesni_dec4)
je .L4dec192
add $0x20, TKEYP
movaps -0x60(TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
movaps -0x50(TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
.align 4
.L4dec192:
movaps -0x40(TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
movaps -0x30(TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
.align 4
.L4dec128:
movaps -0x20(TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
movaps -0x10(TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
movaps (TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
movaps 0x10(TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
movaps 0x20(TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
movaps 0x30(TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
movaps 0x40(TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
movaps 0x50(TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
movaps 0x60(TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
movaps 0x70(TKEYP), KEY
- AESDECLAST KEY STATE1 # last round
- AESDECLAST KEY STATE2
- AESDECLAST KEY STATE3
- AESDECLAST KEY STATE4
+ aesdeclast KEY, STATE1 # last round
+ aesdeclast KEY, STATE2
+ aesdeclast KEY, STATE3
+ aesdeclast KEY, STATE4
ret
SYM_FUNC_END(_aesni_dec4)
@@ -2599,10 +2598,10 @@ SYM_FUNC_END(aesni_cbc_dec)
SYM_FUNC_START_LOCAL(_aesni_inc_init)
movaps .Lbswap_mask, BSWAP_MASK
movaps IV, CTR
- PSHUFB_XMM BSWAP_MASK CTR
+ pshufb BSWAP_MASK, CTR
mov $1, TCTR_LOW
- MOVQ_R64_XMM TCTR_LOW INC
- MOVQ_R64_XMM CTR TCTR_LOW
+ movq TCTR_LOW, INC
+ movq CTR, TCTR_LOW
ret
SYM_FUNC_END(_aesni_inc_init)
@@ -2630,7 +2629,7 @@ SYM_FUNC_START_LOCAL(_aesni_inc)
psrldq $8, INC
.Linc_low:
movaps CTR, IV
- PSHUFB_XMM BSWAP_MASK IV
+ pshufb BSWAP_MASK, IV
ret
SYM_FUNC_END(_aesni_inc)
diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S
index 0cea33295287..5fee47956f3b 100644
--- a/arch/x86/crypto/aesni-intel_avx-x86_64.S
+++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S
@@ -120,7 +120,6 @@
##
#include <linux/linkage.h>
-#include <asm/inst.h>
# constants in mergeable sections, linker can reorder and merge
.section .rodata.cst16.POLY, "aM", @progbits, 16
diff --git a/arch/x86/crypto/chacha-ssse3-x86_64.S b/arch/x86/crypto/chacha-ssse3-x86_64.S
index a38ab2512a6f..ca1788bfee16 100644
--- a/arch/x86/crypto/chacha-ssse3-x86_64.S
+++ b/arch/x86/crypto/chacha-ssse3-x86_64.S
@@ -120,10 +120,10 @@ SYM_FUNC_START(chacha_block_xor_ssse3)
FRAME_BEGIN
# x0..3 = s0..3
- movdqa 0x00(%rdi),%xmm0
- movdqa 0x10(%rdi),%xmm1
- movdqa 0x20(%rdi),%xmm2
- movdqa 0x30(%rdi),%xmm3
+ movdqu 0x00(%rdi),%xmm0
+ movdqu 0x10(%rdi),%xmm1
+ movdqu 0x20(%rdi),%xmm2
+ movdqu 0x30(%rdi),%xmm3
movdqa %xmm0,%xmm8
movdqa %xmm1,%xmm9
movdqa %xmm2,%xmm10
@@ -205,10 +205,10 @@ SYM_FUNC_START(hchacha_block_ssse3)
# %edx: nrounds
FRAME_BEGIN
- movdqa 0x00(%rdi),%xmm0
- movdqa 0x10(%rdi),%xmm1
- movdqa 0x20(%rdi),%xmm2
- movdqa 0x30(%rdi),%xmm3
+ movdqu 0x00(%rdi),%xmm0
+ movdqu 0x10(%rdi),%xmm1
+ movdqu 0x20(%rdi),%xmm2
+ movdqu 0x30(%rdi),%xmm3
mov %edx,%r8d
call chacha_permute
diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c
index 22250091cdbe..e67a59130025 100644
--- a/arch/x86/crypto/chacha_glue.c
+++ b/arch/x86/crypto/chacha_glue.c
@@ -14,8 +14,6 @@
#include <linux/module.h>
#include <asm/simd.h>
-#define CHACHA_STATE_ALIGN 16
-
asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
unsigned int len, int nrounds);
asmlinkage void chacha_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
@@ -124,8 +122,6 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
{
- state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
-
if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable()) {
hchacha_block_generic(state, stream, nrounds);
} else {
@@ -138,8 +134,6 @@ EXPORT_SYMBOL(hchacha_block_arch);
void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
{
- state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
-
chacha_init_generic(state, key, iv);
}
EXPORT_SYMBOL(chacha_init_arch);
@@ -147,8 +141,6 @@ EXPORT_SYMBOL(chacha_init_arch);
void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
int nrounds)
{
- state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
-
if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable() ||
bytes <= CHACHA_BLOCK_SIZE)
return chacha_crypt_generic(state, dst, src, bytes, nrounds);
@@ -170,15 +162,12 @@ EXPORT_SYMBOL(chacha_crypt_arch);
static int chacha_simd_stream_xor(struct skcipher_request *req,
const struct chacha_ctx *ctx, const u8 *iv)
{
- u32 *state, state_buf[16 + 2] __aligned(8);
+ u32 state[CHACHA_STATE_WORDS] __aligned(8);
struct skcipher_walk walk;
int err;
err = skcipher_walk_virt(&walk, req, false);
- BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
- state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
-
chacha_init_generic(state, ctx->key, iv);
while (walk.nbytes > 0) {
@@ -217,12 +206,10 @@ static int xchacha_simd(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- u32 *state, state_buf[16 + 2] __aligned(8);
+ u32 state[CHACHA_STATE_WORDS] __aligned(8);
struct chacha_ctx subctx;
u8 real_iv[16];
- BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
- state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
chacha_init_generic(state, ctx->key, req->iv);
if (req->cryptlen > CHACHA_BLOCK_SIZE && crypto_simd_usable()) {
diff --git a/arch/x86/crypto/crc32-pclmul_asm.S b/arch/x86/crypto/crc32-pclmul_asm.S
index 9fd28ff65bc2..6e7d4c4d3208 100644
--- a/arch/x86/crypto/crc32-pclmul_asm.S
+++ b/arch/x86/crypto/crc32-pclmul_asm.S
@@ -38,7 +38,6 @@
*/
#include <linux/linkage.h>
-#include <asm/inst.h>
.section .rodata
@@ -129,17 +128,17 @@ loop_64:/* 64 bytes Full cache line folding */
#ifdef __x86_64__
movdqa %xmm4, %xmm8
#endif
- PCLMULQDQ 00, CONSTANT, %xmm1
- PCLMULQDQ 00, CONSTANT, %xmm2
- PCLMULQDQ 00, CONSTANT, %xmm3
+ pclmulqdq $0x00, CONSTANT, %xmm1
+ pclmulqdq $0x00, CONSTANT, %xmm2
+ pclmulqdq $0x00, CONSTANT, %xmm3
#ifdef __x86_64__
- PCLMULQDQ 00, CONSTANT, %xmm4
+ pclmulqdq $0x00, CONSTANT, %xmm4
#endif
- PCLMULQDQ 0x11, CONSTANT, %xmm5
- PCLMULQDQ 0x11, CONSTANT, %xmm6
- PCLMULQDQ 0x11, CONSTANT, %xmm7
+ pclmulqdq $0x11, CONSTANT, %xmm5
+ pclmulqdq $0x11, CONSTANT, %xmm6
+ pclmulqdq $0x11, CONSTANT, %xmm7
#ifdef __x86_64__
- PCLMULQDQ 0x11, CONSTANT, %xmm8
+ pclmulqdq $0x11, CONSTANT, %xmm8
#endif
pxor %xmm5, %xmm1
pxor %xmm6, %xmm2
@@ -149,8 +148,8 @@ loop_64:/* 64 bytes Full cache line folding */
#else
/* xmm8 unsupported for x32 */
movdqa %xmm4, %xmm5
- PCLMULQDQ 00, CONSTANT, %xmm4
- PCLMULQDQ 0x11, CONSTANT, %xmm5
+ pclmulqdq $0x00, CONSTANT, %xmm4
+ pclmulqdq $0x11, CONSTANT, %xmm5
pxor %xmm5, %xmm4
#endif
@@ -172,20 +171,20 @@ less_64:/* Folding cache line into 128bit */
prefetchnta (BUF)
movdqa %xmm1, %xmm5
- PCLMULQDQ 0x00, CONSTANT, %xmm1
- PCLMULQDQ 0x11, CONSTANT, %xmm5
+ pclmulqdq $0x00, CONSTANT, %xmm1
+ pclmulqdq $0x11, CONSTANT, %xmm5
pxor %xmm5, %xmm1
pxor %xmm2, %xmm1
movdqa %xmm1, %xmm5
- PCLMULQDQ 0x00, CONSTANT, %xmm1
- PCLMULQDQ 0x11, CONSTANT, %xmm5
+ pclmulqdq $0x00, CONSTANT, %xmm1
+ pclmulqdq $0x11, CONSTANT, %xmm5
pxor %xmm5, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm5
- PCLMULQDQ 0x00, CONSTANT, %xmm1
- PCLMULQDQ 0x11, CONSTANT, %xmm5
+ pclmulqdq $0x00, CONSTANT, %xmm1
+ pclmulqdq $0x11, CONSTANT, %xmm5
pxor %xmm5, %xmm1
pxor %xmm4, %xmm1
@@ -193,8 +192,8 @@ less_64:/* Folding cache line into 128bit */
jb fold_64
loop_16:/* Folding rest buffer into 128bit */
movdqa %xmm1, %xmm5
- PCLMULQDQ 0x00, CONSTANT, %xmm1
- PCLMULQDQ 0x11, CONSTANT, %xmm5
+ pclmulqdq $0x00, CONSTANT, %xmm1
+ pclmulqdq $0x11, CONSTANT, %xmm5
pxor %xmm5, %xmm1
pxor (BUF), %xmm1
sub $0x10, LEN
@@ -205,7 +204,7 @@ loop_16:/* Folding rest buffer into 128bit */
fold_64:
/* perform the last 64 bit fold, also adds 32 zeroes
* to the input stream */
- PCLMULQDQ 0x01, %xmm1, CONSTANT /* R4 * xmm1.low */
+ pclmulqdq $0x01, %xmm1, CONSTANT /* R4 * xmm1.low */
psrldq $0x08, %xmm1
pxor CONSTANT, %xmm1
@@ -220,7 +219,7 @@ fold_64:
#endif
psrldq $0x04, %xmm2
pand %xmm3, %xmm1
- PCLMULQDQ 0x00, CONSTANT, %xmm1
+ pclmulqdq $0x00, CONSTANT, %xmm1
pxor %xmm2, %xmm1
/* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
@@ -231,11 +230,11 @@ fold_64:
#endif
movdqa %xmm1, %xmm2
pand %xmm3, %xmm1
- PCLMULQDQ 0x10, CONSTANT, %xmm1
+ pclmulqdq $0x10, CONSTANT, %xmm1
pand %xmm3, %xmm1
- PCLMULQDQ 0x00, CONSTANT, %xmm1
+ pclmulqdq $0x00, CONSTANT, %xmm1
pxor %xmm2, %xmm1
- PEXTRD 0x01, %xmm1, %eax
+ pextrd $0x01, %xmm1, %eax
ret
SYM_FUNC_END(crc32_pclmul_le_16)
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
index 8501ec4532f4..884dc767b051 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -43,7 +43,6 @@
* SOFTWARE.
*/
-#include <asm/inst.h>
#include <linux/linkage.h>
#include <asm/nospec-branch.h>
@@ -170,7 +169,7 @@ continue_block:
## branch into array
lea jump_table(%rip), %bufp
- movzxw (%bufp, %rax, 2), len
+ movzwq (%bufp, %rax, 2), len
lea crc_array(%rip), %bufp
lea (%bufp, len, 1), %bufp
JMP_NOSPEC bufp
@@ -225,10 +224,10 @@ LABEL crc_ %i
subq %rax, tmp # tmp -= rax*24
movq crc_init, %xmm1 # CRC for block 1
- PCLMULQDQ 0x00,%xmm0,%xmm1 # Multiply by K2
+ pclmulqdq $0x00, %xmm0, %xmm1 # Multiply by K2
movq crc1, %xmm2 # CRC for block 2
- PCLMULQDQ 0x10, %xmm0, %xmm2 # Multiply by K1
+ pclmulqdq $0x10, %xmm0, %xmm2 # Multiply by K1
pxor %xmm2,%xmm1
movq %xmm1, %rax
diff --git a/arch/x86/crypto/curve25519-x86_64.c b/arch/x86/crypto/curve25519-x86_64.c
index 8a17621f7d3a..8acbb6584a37 100644
--- a/arch/x86/crypto/curve25519-x86_64.c
+++ b/arch/x86/crypto/curve25519-x86_64.c
@@ -948,10 +948,8 @@ static void store_felem(u64 *b, u64 *f)
{
u64 f30 = f[3U];
u64 top_bit0 = f30 >> (u32)63U;
- u64 carry0;
u64 f31;
u64 top_bit;
- u64 carry;
u64 f0;
u64 f1;
u64 f2;
@@ -970,11 +968,11 @@ static void store_felem(u64 *b, u64 *f)
u64 o2;
u64 o3;
f[3U] = f30 & (u64)0x7fffffffffffffffU;
- carry0 = add_scalar(f, f, (u64)19U * top_bit0);
+ add_scalar(f, f, (u64)19U * top_bit0);
f31 = f[3U];
top_bit = f31 >> (u32)63U;
f[3U] = f31 & (u64)0x7fffffffffffffffU;
- carry = add_scalar(f, f, (u64)19U * top_bit);
+ add_scalar(f, f, (u64)19U * top_bit);
f0 = f[0U];
f1 = f[1U];
f2 = f[2U];
diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
index bb9735fbb865..99ac25e18e09 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
@@ -14,7 +14,6 @@
*/
#include <linux/linkage.h>
-#include <asm/inst.h>
#include <asm/frame.h>
.section .rodata.cst16.bswap_mask, "aM", @progbits, 16
@@ -51,9 +50,9 @@ SYM_FUNC_START_LOCAL(__clmul_gf128mul_ble)
pxor DATA, T2
pxor SHASH, T3
- PCLMULQDQ 0x00 SHASH DATA # DATA = a0 * b0
- PCLMULQDQ 0x11 SHASH T1 # T1 = a1 * b1
- PCLMULQDQ 0x00 T3 T2 # T2 = (a1 + a0) * (b1 + b0)
+ pclmulqdq $0x00, SHASH, DATA # DATA = a0 * b0
+ pclmulqdq $0x11, SHASH, T1 # T1 = a1 * b1
+ pclmulqdq $0x00, T3, T2 # T2 = (a1 + a0) * (b1 + b0)
pxor DATA, T2
pxor T1, T2 # T2 = a0 * b1 + a1 * b0
@@ -95,9 +94,9 @@ SYM_FUNC_START(clmul_ghash_mul)
movups (%rdi), DATA
movups (%rsi), SHASH
movaps .Lbswap_mask, BSWAP
- PSHUFB_XMM BSWAP DATA
+ pshufb BSWAP, DATA
call __clmul_gf128mul_ble
- PSHUFB_XMM BSWAP DATA
+ pshufb BSWAP, DATA
movups DATA, (%rdi)
FRAME_END
ret
@@ -114,18 +113,18 @@ SYM_FUNC_START(clmul_ghash_update)
movaps .Lbswap_mask, BSWAP
movups (%rdi), DATA
movups (%rcx), SHASH
- PSHUFB_XMM BSWAP DATA
+ pshufb BSWAP, DATA
.align 4
.Lupdate_loop:
movups (%rsi), IN1
- PSHUFB_XMM BSWAP IN1
+ pshufb BSWAP, IN1
pxor IN1, DATA
call __clmul_gf128mul_ble
sub $16, %rdx
add $16, %rsi
cmp $16, %rdx
jge .Lupdate_loop
- PSHUFB_XMM BSWAP DATA
+ pshufb BSWAP, DATA
movups DATA, (%rdi)
.Lupdate_just_ret:
FRAME_END
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 4208c1e3f601..98e4d8886f11 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -6,6 +6,7 @@
#include <asm/percpu.h>
#include <asm/asm-offsets.h>
#include <asm/processor-flags.h>
+#include <asm/inst.h>
/*
@@ -341,6 +342,12 @@ For 32-bit we have the following conventions - kernel is built with
#endif
.endm
+.macro SAVE_AND_SET_GSBASE scratch_reg:req save_reg:req
+ rdgsbase \save_reg
+ GET_PERCPU_BASE \scratch_reg
+ wrgsbase \scratch_reg
+.endm
+
#else /* CONFIG_X86_64 */
# undef UNWIND_HINT_IRET_REGS
# define UNWIND_HINT_IRET_REGS
@@ -351,3 +358,36 @@ For 32-bit we have the following conventions - kernel is built with
call stackleak_erase
#endif
.endm
+
+#ifdef CONFIG_SMP
+
+/*
+ * CPU/node NR is loaded from the limit (size) field of a special segment
+ * descriptor entry in GDT.
+ */
+.macro LOAD_CPU_AND_NODE_SEG_LIMIT reg:req
+ movq $__CPUNODE_SEG, \reg
+ lsl \reg, \reg
+.endm
+
+/*
+ * Fetch the per-CPU GSBASE value for this processor and put it in @reg.
+ * We normally use %gs for accessing per-CPU data, but we are setting up
+ * %gs here and obviously can not use %gs itself to access per-CPU data.
+ */
+.macro GET_PERCPU_BASE reg:req
+ ALTERNATIVE \
+ "LOAD_CPU_AND_NODE_SEG_LIMIT \reg", \
+ "RDPID \reg", \
+ X86_FEATURE_RDPID
+ andq $VDSO_CPUNODE_MASK, \reg
+ movq __per_cpu_offset(, \reg, 8), \reg
+.endm
+
+#else
+
+.macro GET_PERCPU_BASE reg:req
+ movq pcpu_unit_offsets(%rip), \reg
+.endm
+
+#endif /* CONFIG_SMP */
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index f09288431f28..48512c7944e7 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -10,20 +10,13 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
+#include <linux/entry-common.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
-#include <linux/tracehook.h>
-#include <linux/audit.h>
-#include <linux/seccomp.h>
-#include <linux/signal.h>
#include <linux/export.h>
-#include <linux/context_tracking.h>
-#include <linux/user-return-notifier.h>
#include <linux/nospec.h>
-#include <linux/uprobes.h>
-#include <linux/livepatch.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
@@ -42,343 +35,12 @@
#include <asm/syscall.h>
#include <asm/irq_stack.h>
-#define CREATE_TRACE_POINTS
-#include <trace/events/syscalls.h>
-
-/* Check that the stack and regs on entry from user mode are sane. */
-static noinstr void check_user_regs(struct pt_regs *regs)
-{
- if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) {
- /*
- * Make sure that the entry code gave us a sensible EFLAGS
- * register. Native because we want to check the actual CPU
- * state, not the interrupt state as imagined by Xen.
- */
- unsigned long flags = native_save_fl();
- WARN_ON_ONCE(flags & (X86_EFLAGS_AC | X86_EFLAGS_DF |
- X86_EFLAGS_NT));
-
- /* We think we came from user mode. Make sure pt_regs agrees. */
- WARN_ON_ONCE(!user_mode(regs));
-
- /*
- * All entries from user mode (except #DF) should be on the
- * normal thread stack and should have user pt_regs in the
- * correct location.
- */
- WARN_ON_ONCE(!on_thread_stack());
- WARN_ON_ONCE(regs != task_pt_regs(current));
- }
-}
-
-#ifdef CONFIG_CONTEXT_TRACKING
-/**
- * enter_from_user_mode - Establish state when coming from user mode
- *
- * Syscall entry disables interrupts, but user mode is traced as interrupts
- * enabled. Also with NO_HZ_FULL RCU might be idle.
- *
- * 1) Tell lockdep that interrupts are disabled
- * 2) Invoke context tracking if enabled to reactivate RCU
- * 3) Trace interrupts off state
- */
-static noinstr void enter_from_user_mode(void)
-{
- enum ctx_state state = ct_state();
-
- lockdep_hardirqs_off(CALLER_ADDR0);
- user_exit_irqoff();
-
- instrumentation_begin();
- CT_WARN_ON(state != CONTEXT_USER);
- trace_hardirqs_off_finish();
- instrumentation_end();
-}
-#else
-static __always_inline void enter_from_user_mode(void)
-{
- lockdep_hardirqs_off(CALLER_ADDR0);
- instrumentation_begin();
- trace_hardirqs_off_finish();
- instrumentation_end();
-}
-#endif
-
-/**
- * exit_to_user_mode - Fixup state when exiting to user mode
- *
- * Syscall exit enables interrupts, but the kernel state is interrupts
- * disabled when this is invoked. Also tell RCU about it.
- *
- * 1) Trace interrupts on state
- * 2) Invoke context tracking if enabled to adjust RCU state
- * 3) Clear CPU buffers if CPU is affected by MDS and the migitation is on.
- * 4) Tell lockdep that interrupts are enabled
- */
-static __always_inline void exit_to_user_mode(void)
-{
- instrumentation_begin();
- trace_hardirqs_on_prepare();
- lockdep_hardirqs_on_prepare(CALLER_ADDR0);
- instrumentation_end();
-
- user_enter_irqoff();
- mds_user_clear_cpu_buffers();
- lockdep_hardirqs_on(CALLER_ADDR0);
-}
-
-static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
-{
-#ifdef CONFIG_X86_64
- if (arch == AUDIT_ARCH_X86_64) {
- audit_syscall_entry(regs->orig_ax, regs->di,
- regs->si, regs->dx, regs->r10);
- } else
-#endif
- {
- audit_syscall_entry(regs->orig_ax, regs->bx,
- regs->cx, regs->dx, regs->si);
- }
-}
-
-/*
- * Returns the syscall nr to run (which should match regs->orig_ax) or -1
- * to skip the syscall.
- */
-static long syscall_trace_enter(struct pt_regs *regs)
-{
- u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
-
- struct thread_info *ti = current_thread_info();
- unsigned long ret = 0;
- u32 work;
-
- work = READ_ONCE(ti->flags);
-
- if (work & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU)) {
- ret = tracehook_report_syscall_entry(regs);
- if (ret || (work & _TIF_SYSCALL_EMU))
- return -1L;
- }
-
-#ifdef CONFIG_SECCOMP
- /*
- * Do seccomp after ptrace, to catch any tracer changes.
- */
- if (work & _TIF_SECCOMP) {
- struct seccomp_data sd;
-
- sd.arch = arch;
- sd.nr = regs->orig_ax;
- sd.instruction_pointer = regs->ip;
-#ifdef CONFIG_X86_64
- if (arch == AUDIT_ARCH_X86_64) {
- sd.args[0] = regs->di;
- sd.args[1] = regs->si;
- sd.args[2] = regs->dx;
- sd.args[3] = regs->r10;
- sd.args[4] = regs->r8;
- sd.args[5] = regs->r9;
- } else
-#endif
- {
- sd.args[0] = regs->bx;
- sd.args[1] = regs->cx;
- sd.args[2] = regs->dx;
- sd.args[3] = regs->si;
- sd.args[4] = regs->di;
- sd.args[5] = regs->bp;
- }
-
- ret = __secure_computing(&sd);
- if (ret == -1)
- return ret;
- }
-#endif
-
- if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
- trace_sys_enter(regs, regs->orig_ax);
-
- do_audit_syscall_entry(regs, arch);
-
- return ret ?: regs->orig_ax;
-}
-
-#define EXIT_TO_USERMODE_LOOP_FLAGS \
- (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
- _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
-
-static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
-{
- /*
- * In order to return to user mode, we need to have IRQs off with
- * none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags
- * can be set at any time on preemptible kernels if we have IRQs on,
- * so we need to loop. Disabling preemption wouldn't help: doing the
- * work to clear some of the flags can sleep.
- */
- while (true) {
- /* We have work to do. */
- local_irq_enable();
-
- if (cached_flags & _TIF_NEED_RESCHED)
- schedule();
-
- if (cached_flags & _TIF_UPROBE)
- uprobe_notify_resume(regs);
-
- if (cached_flags & _TIF_PATCH_PENDING)
- klp_update_patch_state(current);
-
- /* deal with pending signal delivery */
- if (cached_flags & _TIF_SIGPENDING)
- do_signal(regs);
-
- if (cached_flags & _TIF_NOTIFY_RESUME) {
- clear_thread_flag(TIF_NOTIFY_RESUME);
- tracehook_notify_resume(regs);
- rseq_handle_notify_resume(NULL, regs);
- }
-
- if (cached_flags & _TIF_USER_RETURN_NOTIFY)
- fire_user_return_notifiers();
-
- /* Disable IRQs and retry */
- local_irq_disable();
-
- cached_flags = READ_ONCE(current_thread_info()->flags);
-
- if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
- break;
- }
-}
-
-static void __prepare_exit_to_usermode(struct pt_regs *regs)
-{
- struct thread_info *ti = current_thread_info();
- u32 cached_flags;
-
- addr_limit_user_check();
-
- lockdep_assert_irqs_disabled();
- lockdep_sys_exit();
-
- cached_flags = READ_ONCE(ti->flags);
-
- if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
- exit_to_usermode_loop(regs, cached_flags);
-
- /* Reload ti->flags; we may have rescheduled above. */
- cached_flags = READ_ONCE(ti->flags);
-
- if (unlikely(cached_flags & _TIF_IO_BITMAP))
- tss_update_io_bitmap();
-
- fpregs_assert_state_consistent();
- if (unlikely(cached_flags & _TIF_NEED_FPU_LOAD))
- switch_fpu_return();
-
-#ifdef CONFIG_COMPAT
- /*
- * Compat syscalls set TS_COMPAT. Make sure we clear it before
- * returning to user mode. We need to clear it *after* signal
- * handling, because syscall restart has a fixup for compat
- * syscalls. The fixup is exercised by the ptrace_syscall_32
- * selftest.
- *
- * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
- * special case only applies after poking regs and before the
- * very next return to user mode.
- */
- ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
-#endif
-}
-
-static noinstr void prepare_exit_to_usermode(struct pt_regs *regs)
-{
- instrumentation_begin();
- __prepare_exit_to_usermode(regs);
- instrumentation_end();
- exit_to_user_mode();
-}
-
-#define SYSCALL_EXIT_WORK_FLAGS \
- (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
- _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
-
-static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
-{
- bool step;
-
- audit_syscall_exit(regs);
-
- if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
- trace_sys_exit(regs, regs->ax);
-
- /*
- * If TIF_SYSCALL_EMU is set, we only get here because of
- * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
- * We already reported this syscall instruction in
- * syscall_trace_enter().
- */
- step = unlikely(
- (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
- == _TIF_SINGLESTEP);
- if (step || cached_flags & _TIF_SYSCALL_TRACE)
- tracehook_report_syscall_exit(regs, step);
-}
-
-static void __syscall_return_slowpath(struct pt_regs *regs)
-{
- struct thread_info *ti = current_thread_info();
- u32 cached_flags = READ_ONCE(ti->flags);
-
- CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
-
- if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
- WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
- local_irq_enable();
-
- rseq_syscall(regs);
-
- /*
- * First do one-time work. If these work items are enabled, we
- * want to run them exactly once per syscall exit with IRQs on.
- */
- if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
- syscall_slow_exit_work(regs, cached_flags);
-
- local_irq_disable();
- __prepare_exit_to_usermode(regs);
-}
-
-/*
- * Called with IRQs on and fully valid regs. Returns with IRQs off in a
- * state such that we can immediately switch to user mode.
- */
-__visible noinstr void syscall_return_slowpath(struct pt_regs *regs)
-{
- instrumentation_begin();
- __syscall_return_slowpath(regs);
- instrumentation_end();
- exit_to_user_mode();
-}
-
#ifdef CONFIG_X86_64
__visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs)
{
- struct thread_info *ti;
+ nr = syscall_enter_from_user_mode(regs, nr);
- check_user_regs(regs);
-
- enter_from_user_mode();
instrumentation_begin();
-
- local_irq_enable();
- ti = current_thread_info();
- if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
- nr = syscall_trace_enter(regs);
-
if (likely(nr < NR_syscalls)) {
nr = array_index_nospec(nr, NR_syscalls);
regs->ax = sys_call_table[nr](regs);
@@ -390,66 +52,55 @@ __visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs)
regs->ax = x32_sys_call_table[nr](regs);
#endif
}
- __syscall_return_slowpath(regs);
-
instrumentation_end();
- exit_to_user_mode();
+ syscall_exit_to_user_mode(regs);
}
#endif
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
-/*
- * Does a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL. Does
- * all entry and exit work and returns with IRQs off. This function is
- * extremely hot in workloads that use it, and it's usually called from
- * do_fast_syscall_32, so forcibly inline it to improve performance.
- */
-static void do_syscall_32_irqs_on(struct pt_regs *regs)
+static __always_inline unsigned int syscall_32_enter(struct pt_regs *regs)
{
- struct thread_info *ti = current_thread_info();
unsigned int nr = (unsigned int)regs->orig_ax;
-#ifdef CONFIG_IA32_EMULATION
- ti->status |= TS_COMPAT;
-#endif
-
- if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
- /*
- * Subtlety here: if ptrace pokes something larger than
- * 2^32-1 into orig_ax, this truncates it. This may or
- * may not be necessary, but it matches the old asm
- * behavior.
- */
- nr = syscall_trace_enter(regs);
- }
+ if (IS_ENABLED(CONFIG_IA32_EMULATION))
+ current_thread_info()->status |= TS_COMPAT;
+ /*
+ * Subtlety here: if ptrace pokes something larger than 2^32-1 into
+ * orig_ax, the unsigned int return value truncates it. This may
+ * or may not be necessary, but it matches the old asm behavior.
+ */
+ return (unsigned int)syscall_enter_from_user_mode(regs, nr);
+}
+/*
+ * Invoke a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL.
+ */
+static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs,
+ unsigned int nr)
+{
if (likely(nr < IA32_NR_syscalls)) {
+ instrumentation_begin();
nr = array_index_nospec(nr, IA32_NR_syscalls);
regs->ax = ia32_sys_call_table[nr](regs);
+ instrumentation_end();
}
-
- __syscall_return_slowpath(regs);
}
/* Handles int $0x80 */
__visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
{
- check_user_regs(regs);
+ unsigned int nr = syscall_32_enter(regs);
- enter_from_user_mode();
- instrumentation_begin();
-
- local_irq_enable();
- do_syscall_32_irqs_on(regs);
-
- instrumentation_end();
- exit_to_user_mode();
+ do_syscall_32_irqs_on(regs, nr);
+ syscall_exit_to_user_mode(regs);
}
-static bool __do_fast_syscall_32(struct pt_regs *regs)
+static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
{
+ unsigned int nr = syscall_32_enter(regs);
int res;
+ instrumentation_begin();
/* Fetch EBP from where the vDSO stashed it. */
if (IS_ENABLED(CONFIG_X86_64)) {
/*
@@ -462,17 +113,18 @@ static bool __do_fast_syscall_32(struct pt_regs *regs)
res = get_user(*(u32 *)&regs->bp,
(u32 __user __force *)(unsigned long)(u32)regs->sp);
}
+ instrumentation_end();
if (res) {
/* User code screwed up. */
regs->ax = -EFAULT;
- local_irq_disable();
- __prepare_exit_to_usermode(regs);
+ syscall_exit_to_user_mode(regs);
return false;
}
/* Now this is just like a normal syscall. */
- do_syscall_32_irqs_on(regs);
+ do_syscall_32_irqs_on(regs, nr);
+ syscall_exit_to_user_mode(regs);
return true;
}
@@ -485,9 +137,6 @@ __visible noinstr long do_fast_syscall_32(struct pt_regs *regs)
*/
unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
vdso_image_32.sym_int80_landing_pad;
- bool success;
-
- check_user_regs(regs);
/*
* SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
@@ -496,17 +145,8 @@ __visible noinstr long do_fast_syscall_32(struct pt_regs *regs)
*/
regs->ip = landing_pad;
- enter_from_user_mode();
- instrumentation_begin();
-
- local_irq_enable();
- success = __do_fast_syscall_32(regs);
-
- instrumentation_end();
- exit_to_user_mode();
-
- /* If it failed, keep it simple: use IRET. */
- if (!success)
+ /* Invoke the syscall. If it failed, keep it simple: use IRET. */
+ if (!__do_fast_syscall_32(regs))
return 0;
#ifdef CONFIG_X86_64
@@ -558,197 +198,38 @@ SYSCALL_DEFINE0(ni_syscall)
return -ENOSYS;
}
-/**
- * idtentry_enter_cond_rcu - Handle state tracking on idtentry with conditional
- * RCU handling
- * @regs: Pointer to pt_regs of interrupted context
- *
- * Invokes:
- * - lockdep irqflag state tracking as low level ASM entry disabled
- * interrupts.
- *
- * - Context tracking if the exception hit user mode.
- *
- * - The hardirq tracer to keep the state consistent as low level ASM
- * entry disabled interrupts.
- *
- * For kernel mode entries RCU handling is done conditional. If RCU is
- * watching then the only RCU requirement is to check whether the tick has
- * to be restarted. If RCU is not watching then rcu_irq_enter() has to be
- * invoked on entry and rcu_irq_exit() on exit.
- *
- * Avoiding the rcu_irq_enter/exit() calls is an optimization but also
- * solves the problem of kernel mode pagefaults which can schedule, which
- * is not possible after invoking rcu_irq_enter() without undoing it.
- *
- * For user mode entries enter_from_user_mode() must be invoked to
- * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit
- * would not be possible.
- *
- * Returns: True if RCU has been adjusted on a kernel entry
- * False otherwise
- *
- * The return value must be fed into the rcu_exit argument of
- * idtentry_exit_cond_rcu().
- */
-bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs)
+noinstr bool idtentry_enter_nmi(struct pt_regs *regs)
{
- if (user_mode(regs)) {
- check_user_regs(regs);
- enter_from_user_mode();
- return false;
- }
+ bool irq_state = lockdep_hardirqs_enabled();
- /*
- * If this entry hit the idle task invoke rcu_irq_enter() whether
- * RCU is watching or not.
- *
- * Interupts can nest when the first interrupt invokes softirq
- * processing on return which enables interrupts.
- *
- * Scheduler ticks in the idle task can mark quiescent state and
- * terminate a grace period, if and only if the timer interrupt is
- * not nested into another interrupt.
- *
- * Checking for __rcu_is_watching() here would prevent the nesting
- * interrupt to invoke rcu_irq_enter(). If that nested interrupt is
- * the tick then rcu_flavor_sched_clock_irq() would wrongfully
- * assume that it is the first interupt and eventually claim
- * quiescient state and end grace periods prematurely.
- *
- * Unconditionally invoke rcu_irq_enter() so RCU state stays
- * consistent.
- *
- * TINY_RCU does not support EQS, so let the compiler eliminate
- * this part when enabled.
- */
- if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
- /*
- * If RCU is not watching then the same careful
- * sequence vs. lockdep and tracing is required
- * as in enter_from_user_mode().
- */
- lockdep_hardirqs_off(CALLER_ADDR0);
- rcu_irq_enter();
- instrumentation_begin();
- trace_hardirqs_off_finish();
- instrumentation_end();
-
- return true;
- }
+ __nmi_enter();
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ lockdep_hardirq_enter();
+ rcu_nmi_enter();
- /*
- * If RCU is watching then RCU only wants to check whether it needs
- * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick()
- * already contains a warning when RCU is not watching, so no point
- * in having another one here.
- */
instrumentation_begin();
- rcu_irq_enter_check_tick();
- /* Use the combo lockdep/tracing function */
- trace_hardirqs_off();
+ trace_hardirqs_off_finish();
+ ftrace_nmi_enter();
instrumentation_end();
- return false;
-}
-
-static void idtentry_exit_cond_resched(struct pt_regs *regs, bool may_sched)
-{
- if (may_sched && !preempt_count()) {
- /* Sanity check RCU and thread stack */
- rcu_irq_exit_check_preempt();
- if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
- WARN_ON_ONCE(!on_thread_stack());
- if (need_resched())
- preempt_schedule_irq();
- }
- /* Covers both tracing and lockdep */
- trace_hardirqs_on();
+ return irq_state;
}
-/**
- * idtentry_exit_cond_rcu - Handle return from exception with conditional RCU
- * handling
- * @regs: Pointer to pt_regs (exception entry regs)
- * @rcu_exit: Invoke rcu_irq_exit() if true
- *
- * Depending on the return target (kernel/user) this runs the necessary
- * preemption and work checks if possible and reguired and returns to
- * the caller with interrupts disabled and no further work pending.
- *
- * This is the last action before returning to the low level ASM code which
- * just needs to return to the appropriate context.
- *
- * Counterpart to idtentry_enter_cond_rcu(). The return value of the entry
- * function must be fed into the @rcu_exit argument.
- */
-void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit)
+noinstr void idtentry_exit_nmi(struct pt_regs *regs, bool restore)
{
- lockdep_assert_irqs_disabled();
-
- /* Check whether this returns to user mode */
- if (user_mode(regs)) {
- prepare_exit_to_usermode(regs);
- } else if (regs->flags & X86_EFLAGS_IF) {
- /*
- * If RCU was not watching on entry this needs to be done
- * carefully and needs the same ordering of lockdep/tracing
- * and RCU as the return to user mode path.
- */
- if (rcu_exit) {
- instrumentation_begin();
- /* Tell the tracer that IRET will enable interrupts */
- trace_hardirqs_on_prepare();
- lockdep_hardirqs_on_prepare(CALLER_ADDR0);
- instrumentation_end();
- rcu_irq_exit();
- lockdep_hardirqs_on(CALLER_ADDR0);
- return;
- }
-
- instrumentation_begin();
- idtentry_exit_cond_resched(regs, IS_ENABLED(CONFIG_PREEMPTION));
- instrumentation_end();
- } else {
- /*
- * IRQ flags state is correct already. Just tell RCU if it
- * was not watching on entry.
- */
- if (rcu_exit)
- rcu_irq_exit();
+ instrumentation_begin();
+ ftrace_nmi_exit();
+ if (restore) {
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
}
-}
-
-/**
- * idtentry_enter_user - Handle state tracking on idtentry from user mode
- * @regs: Pointer to pt_regs of interrupted context
- *
- * Invokes enter_from_user_mode() to establish the proper context for
- * NOHZ_FULL. Otherwise scheduling on exit would not be possible.
- */
-void noinstr idtentry_enter_user(struct pt_regs *regs)
-{
- check_user_regs(regs);
- enter_from_user_mode();
-}
-
-/**
- * idtentry_exit_user - Handle return from exception to user mode
- * @regs: Pointer to pt_regs (exception entry regs)
- *
- * Runs the necessary preemption and work checks and returns to the caller
- * with interrupts disabled and no further work pending.
- *
- * This is the last action before returning to the low level ASM code which
- * just needs to return to the appropriate context.
- *
- * Counterpart to idtentry_enter_user().
- */
-void noinstr idtentry_exit_user(struct pt_regs *regs)
-{
- lockdep_assert_irqs_disabled();
+ instrumentation_end();
- prepare_exit_to_usermode(regs);
+ rcu_nmi_exit();
+ lockdep_hardirq_exit();
+ if (restore)
+ lockdep_hardirqs_on(CALLER_ADDR0);
+ __nmi_exit();
}
#ifdef CONFIG_XEN_PV
@@ -800,9 +281,10 @@ static void __xen_pv_evtchn_do_upcall(void)
__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
{
struct pt_regs *old_regs;
- bool inhcall, rcu_exit;
+ bool inhcall;
+ irqentry_state_t state;
- rcu_exit = idtentry_enter_cond_rcu(regs);
+ state = irqentry_enter(regs);
old_regs = set_irq_regs(regs);
instrumentation_begin();
@@ -812,13 +294,13 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
set_irq_regs(old_regs);
inhcall = get_and_clear_inhcall();
- if (inhcall && !WARN_ON_ONCE(rcu_exit)) {
+ if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
instrumentation_begin();
- idtentry_exit_cond_resched(regs, true);
+ irqentry_exit_cond_resched();
instrumentation_end();
restore_inhcall(inhcall);
} else {
- idtentry_exit_cond_rcu(regs, rcu_exit);
+ irqentry_exit(regs, state);
}
}
#endif /* CONFIG_XEN_PV */
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 2d0bd5d5f032..29b7d52143e9 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -846,7 +846,7 @@ SYM_CODE_START(ret_from_fork)
2:
/* When we fork, we trace the syscall return in the child, too. */
movl %esp, %eax
- call syscall_return_slowpath
+ call syscall_exit_to_user_mode
jmp .Lsyscall_32_done
/* kernel thread */
@@ -854,7 +854,7 @@ SYM_CODE_START(ret_from_fork)
CALL_NOSPEC ebx
/*
* A kernel thread is allowed to return here after successfully
- * calling do_execve(). Exit to userspace to complete the execve()
+ * calling kernel_execve(). Exit to userspace to complete the execve()
* syscall.
*/
movl $0, PT_EAX(%esp)
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index d2a00c97e53f..70dea9337816 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -38,6 +38,7 @@
#include <asm/frame.h>
#include <asm/trapnr.h>
#include <asm/nospec-branch.h>
+#include <asm/fsgsbase.h>
#include <linux/err.h>
#include "calling.h"
@@ -283,7 +284,7 @@ SYM_CODE_START(ret_from_fork)
2:
UNWIND_HINT_REGS
movq %rsp, %rdi
- call syscall_return_slowpath /* returns with IRQs disabled */
+ call syscall_exit_to_user_mode /* returns with IRQs disabled */
jmp swapgs_restore_regs_and_return_to_usermode
1:
@@ -293,7 +294,7 @@ SYM_CODE_START(ret_from_fork)
CALL_NOSPEC rbx
/*
* A kernel thread is allowed to return here after successfully
- * calling do_execve(). Exit to userspace to complete the execve()
+ * calling kernel_execve(). Exit to userspace to complete the execve()
* syscall.
*/
movq $0, RAX(%rsp)
@@ -426,10 +427,7 @@ SYM_CODE_START(\asmsym)
testb $3, CS-ORIG_RAX(%rsp)
jnz .Lfrom_usermode_switch_stack_\@
- /*
- * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX.
- * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS
- */
+ /* paranoid_entry returns GS information for paranoid_exit in EBX. */
call paranoid_entry
UNWIND_HINT_REGS
@@ -458,10 +456,7 @@ SYM_CODE_START(\asmsym)
UNWIND_HINT_IRET_REGS offset=8
ASM_CLAC
- /*
- * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX.
- * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS
- */
+ /* paranoid_entry returns GS information for paranoid_exit in EBX. */
call paranoid_entry
UNWIND_HINT_REGS
@@ -798,24 +793,21 @@ SYM_CODE_END(xen_failsafe_callback)
#endif /* CONFIG_XEN_PV */
/*
- * Save all registers in pt_regs, and switch gs if needed.
- * Use slow, but surefire "are we in kernel?" check.
- * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
+ * Save all registers in pt_regs. Return GSBASE related information
+ * in EBX depending on the availability of the FSGSBASE instructions:
+ *
+ * FSGSBASE R/EBX
+ * N 0 -> SWAPGS on exit
+ * 1 -> no SWAPGS on exit
+ *
+ * Y GSBASE value at entry, must be restored in paranoid_exit
*/
SYM_CODE_START_LOCAL(paranoid_entry)
UNWIND_HINT_FUNC
cld
PUSH_AND_CLEAR_REGS save_ret=1
ENCODE_FRAME_POINTER 8
- movl $1, %ebx
- movl $MSR_GS_BASE, %ecx
- rdmsr
- testl %edx, %edx
- js 1f /* negative -> in kernel */
- SWAPGS
- xorl %ebx, %ebx
-1:
/*
* Always stash CR3 in %r14. This value will be restored,
* verbatim, at exit. Needed if paranoid_entry interrupted
@@ -825,16 +817,60 @@ SYM_CODE_START_LOCAL(paranoid_entry)
* This is also why CS (stashed in the "iret frame" by the
* hardware at entry) can not be used: this may be a return
* to kernel code, but with a user CR3 value.
+ *
+ * Switching CR3 does not depend on kernel GSBASE so it can
+ * be done before switching to the kernel GSBASE. This is
+ * required for FSGSBASE because the kernel GSBASE has to
+ * be retrieved from a kernel internal table.
*/
SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
/*
+ * Handling GSBASE depends on the availability of FSGSBASE.
+ *
+ * Without FSGSBASE the kernel enforces that negative GSBASE
+ * values indicate kernel GSBASE. With FSGSBASE no assumptions
+ * can be made about the GSBASE value when entering from user
+ * space.
+ */
+ ALTERNATIVE "jmp .Lparanoid_entry_checkgs", "", X86_FEATURE_FSGSBASE
+
+ /*
+ * Read the current GSBASE and store it in %rbx unconditionally,
+ * retrieve and set the current CPUs kernel GSBASE. The stored value
+ * has to be restored in paranoid_exit unconditionally.
+ *
+ * The MSR write ensures that no subsequent load is based on a
+ * mispredicted GSBASE. No extra FENCE required.
+ */
+ SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx
+ ret
+
+.Lparanoid_entry_checkgs:
+ /* EBX = 1 -> kernel GSBASE active, no restore required */
+ movl $1, %ebx
+ /*
+ * The kernel-enforced convention is a negative GSBASE indicates
+ * a kernel value. No SWAPGS needed on entry and exit.
+ */
+ movl $MSR_GS_BASE, %ecx
+ rdmsr
+ testl %edx, %edx
+ jns .Lparanoid_entry_swapgs
+ ret
+
+.Lparanoid_entry_swapgs:
+ SWAPGS
+
+ /*
* The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an
* unconditional CR3 write, even in the PTI case. So do an lfence
* to prevent GS speculation, regardless of whether PTI is enabled.
*/
FENCE_SWAPGS_KERNEL_ENTRY
+ /* EBX = 0 -> SWAPGS required on exit */
+ xorl %ebx, %ebx
ret
SYM_CODE_END(paranoid_entry)
@@ -845,23 +881,45 @@ SYM_CODE_END(paranoid_entry)
*
* We may be returning to very strange contexts (e.g. very early
* in syscall entry), so checking for preemption here would
- * be complicated. Fortunately, we there's no good reason
- * to try to handle preemption here.
+ * be complicated. Fortunately, there's no good reason to try
+ * to handle preemption here.
+ *
+ * R/EBX contains the GSBASE related information depending on the
+ * availability of the FSGSBASE instructions:
+ *
+ * FSGSBASE R/EBX
+ * N 0 -> SWAPGS on exit
+ * 1 -> no SWAPGS on exit
*
- * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
+ * Y User space GSBASE, must be restored unconditionally
*/
SYM_CODE_START_LOCAL(paranoid_exit)
UNWIND_HINT_REGS
- testl %ebx, %ebx /* swapgs needed? */
- jnz .Lparanoid_exit_no_swapgs
- /* Always restore stashed CR3 value (see paranoid_entry) */
- RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
+ /*
+ * The order of operations is important. RESTORE_CR3 requires
+ * kernel GSBASE.
+ *
+ * NB to anyone to try to optimize this code: this code does
+ * not execute at all for exceptions from user mode. Those
+ * exceptions go through error_exit instead.
+ */
+ RESTORE_CR3 scratch_reg=%rax save_reg=%r14
+
+ /* Handle the three GSBASE cases */
+ ALTERNATIVE "jmp .Lparanoid_exit_checkgs", "", X86_FEATURE_FSGSBASE
+
+ /* With FSGSBASE enabled, unconditionally restore GSBASE */
+ wrgsbase %rbx
+ jmp restore_regs_and_return_to_kernel
+
+.Lparanoid_exit_checkgs:
+ /* On non-FSGSBASE systems, conditionally do SWAPGS */
+ testl %ebx, %ebx
+ jnz restore_regs_and_return_to_kernel
+
+ /* We are returning to a context with user GSBASE */
SWAPGS_UNSAFE_STACK
- jmp restore_regs_and_return_to_kernel
-.Lparanoid_exit_no_swapgs:
- /* Always restore stashed CR3 value (see paranoid_entry) */
- RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
- jmp restore_regs_and_return_to_kernel
+ jmp restore_regs_and_return_to_kernel
SYM_CODE_END(paranoid_exit)
/*
@@ -1266,10 +1324,27 @@ end_repeat_nmi:
/* Always restore stashed CR3 value (see paranoid_entry) */
RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
- testl %ebx, %ebx /* swapgs needed? */
+ /*
+ * The above invocation of paranoid_entry stored the GSBASE
+ * related information in R/EBX depending on the availability
+ * of FSGSBASE.
+ *
+ * If FSGSBASE is enabled, restore the saved GSBASE value
+ * unconditionally, otherwise take the conditional SWAPGS path.
+ */
+ ALTERNATIVE "jmp nmi_no_fsgsbase", "", X86_FEATURE_FSGSBASE
+
+ wrgsbase %rbx
+ jmp nmi_restore
+
+nmi_no_fsgsbase:
+ /* EBX == 0 -> invoke SWAPGS */
+ testl %ebx, %ebx
jnz nmi_restore
+
nmi_swapgs:
SWAPGS_UNSAFE_STACK
+
nmi_restore:
POP_REGS
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index d8f8a1a69ed1..3f0c6546b47c 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -440,6 +440,7 @@
433 i386 fspick sys_fspick
434 i386 pidfd_open sys_pidfd_open
435 i386 clone3 sys_clone3
+436 i386 close_range sys_close_range
437 i386 openat2 sys_openat2
438 i386 pidfd_getfd sys_pidfd_getfd
439 i386 faccessat2 sys_faccessat2
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index 78847b32e137..f8637c2c863d 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -357,6 +357,7 @@
433 common fspick sys_fspick
434 common pidfd_open sys_pidfd_open
435 common clone3 sys_clone3
+436 common close_range sys_close_range
437 common openat2 sys_openat2
438 common pidfd_getfd sys_pidfd_getfd
439 common faccessat2 sys_faccessat2
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index ea7c1f0b79df..9185cb1d13b9 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -144,8 +144,7 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
struct mm_struct *mm = task->mm;
struct vm_area_struct *vma;
- if (mmap_write_lock_killable(mm))
- return -EINTR;
+ mmap_read_lock(mm);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
unsigned long size = vma->vm_end - vma->vm_start;
@@ -154,7 +153,7 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
zap_page_range(vma, vma->vm_start, size);
}
- mmap_write_unlock(mm);
+ mmap_read_unlock(mm);
return 0;
}
#else
diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c
index 43b09e9c93a2..16a2369c586e 100644
--- a/arch/x86/events/amd/power.c
+++ b/arch/x86/events/amd/power.c
@@ -13,10 +13,6 @@
#include <asm/cpu_device_id.h>
#include "../perf_event.h"
-#define MSR_F15H_CU_PWR_ACCUMULATOR 0xc001007a
-#define MSR_F15H_CU_MAX_PWR_ACCUMULATOR 0xc001007b
-#define MSR_F15H_PTSC 0xc0010280
-
/* Event code: LSB 8 bits, passed in attr->config any other bit is reserved. */
#define AMD_POWER_EVENT_MASK 0xFFULL
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 4103665c6e03..1cbf57dc2ac8 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -71,10 +71,9 @@ u64 x86_perf_event_update(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
int shift = 64 - x86_pmu.cntval_bits;
u64 prev_raw_count, new_raw_count;
- int idx = hwc->idx;
u64 delta;
- if (idx == INTEL_PMC_IDX_FIXED_BTS)
+ if (unlikely(!hwc->event_base))
return 0;
/*
@@ -359,6 +358,7 @@ void x86_release_hardware(void)
if (atomic_dec_and_mutex_lock(&pmc_refcount, &pmc_reserve_mutex)) {
release_pmc_hardware();
release_ds_buffers();
+ release_lbr_buffers();
mutex_unlock(&pmc_reserve_mutex);
}
}
@@ -1097,22 +1097,31 @@ static inline void x86_assign_hw_event(struct perf_event *event,
struct cpu_hw_events *cpuc, int i)
{
struct hw_perf_event *hwc = &event->hw;
+ int idx;
- hwc->idx = cpuc->assign[i];
+ idx = hwc->idx = cpuc->assign[i];
hwc->last_cpu = smp_processor_id();
hwc->last_tag = ++cpuc->tags[i];
- if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) {
+ switch (hwc->idx) {
+ case INTEL_PMC_IDX_FIXED_BTS:
+ case INTEL_PMC_IDX_FIXED_VLBR:
hwc->config_base = 0;
hwc->event_base = 0;
- } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
+ break;
+
+ case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS-1:
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
- hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED);
- hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30;
- } else {
+ hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 +
+ (idx - INTEL_PMC_IDX_FIXED);
+ hwc->event_base_rdpmc = (idx - INTEL_PMC_IDX_FIXED) | 1<<30;
+ break;
+
+ default:
hwc->config_base = x86_pmu_config_addr(hwc->idx);
hwc->event_base = x86_pmu_event_addr(hwc->idx);
hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx);
+ break;
}
}
@@ -1233,7 +1242,7 @@ int x86_perf_event_set_period(struct perf_event *event)
s64 period = hwc->sample_period;
int ret = 0, idx = hwc->idx;
- if (idx == INTEL_PMC_IDX_FIXED_BTS)
+ if (unlikely(!hwc->event_base))
return 0;
/*
@@ -2363,7 +2372,6 @@ static struct pmu pmu = {
.event_idx = x86_pmu_event_idx,
.sched_task = x86_pmu_sched_task,
- .task_ctx_size = sizeof(struct x86_perf_task_context),
.swap_task_ctx = x86_pmu_swap_task_ctx,
.check_period = x86_pmu_check_period,
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index ca35c8b5ee10..50963472ee85 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2136,8 +2136,35 @@ static inline void intel_pmu_ack_status(u64 ack)
wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
}
-static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
+static inline bool event_is_checkpointed(struct perf_event *event)
{
+ return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
+}
+
+static inline void intel_set_masks(struct perf_event *event, int idx)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ if (event->attr.exclude_host)
+ __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
+ if (event->attr.exclude_guest)
+ __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
+ if (event_is_checkpointed(event))
+ __set_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
+}
+
+static inline void intel_clear_masks(struct perf_event *event, int idx)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
+ __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
+ __clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
+}
+
+static void intel_pmu_disable_fixed(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
u64 ctrl_val, mask;
@@ -2148,30 +2175,22 @@ static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
wrmsrl(hwc->config_base, ctrl_val);
}
-static inline bool event_is_checkpointed(struct perf_event *event)
-{
- return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
-}
-
static void intel_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
- struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ int idx = hwc->idx;
- if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
+ if (idx < INTEL_PMC_IDX_FIXED) {
+ intel_clear_masks(event, idx);
+ x86_pmu_disable_event(event);
+ } else if (idx < INTEL_PMC_IDX_FIXED_BTS) {
+ intel_clear_masks(event, idx);
+ intel_pmu_disable_fixed(event);
+ } else if (idx == INTEL_PMC_IDX_FIXED_BTS) {
intel_pmu_disable_bts();
intel_pmu_drain_bts_buffer();
- return;
- }
-
- cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
- cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
- cpuc->intel_cp_status &= ~(1ull << hwc->idx);
-
- if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
- intel_pmu_disable_fixed(hwc);
- else
- x86_pmu_disable_event(event);
+ } else if (idx == INTEL_PMC_IDX_FIXED_VLBR)
+ intel_clear_masks(event, idx);
/*
* Needs to be called after x86_pmu_disable_event,
@@ -2238,33 +2257,23 @@ static void intel_pmu_enable_fixed(struct perf_event *event)
static void intel_pmu_enable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
- struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
-
- if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
- if (!__this_cpu_read(cpu_hw_events.enabled))
- return;
-
- intel_pmu_enable_bts(hwc->config);
- return;
- }
-
- if (event->attr.exclude_host)
- cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
- if (event->attr.exclude_guest)
- cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
-
- if (unlikely(event_is_checkpointed(event)))
- cpuc->intel_cp_status |= (1ull << hwc->idx);
+ int idx = hwc->idx;
if (unlikely(event->attr.precise_ip))
intel_pmu_pebs_enable(event);
- if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
+ if (idx < INTEL_PMC_IDX_FIXED) {
+ intel_set_masks(event, idx);
+ __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
+ } else if (idx < INTEL_PMC_IDX_FIXED_BTS) {
+ intel_set_masks(event, idx);
intel_pmu_enable_fixed(event);
- return;
- }
-
- __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
+ } else if (idx == INTEL_PMC_IDX_FIXED_BTS) {
+ if (!__this_cpu_read(cpu_hw_events.enabled))
+ return;
+ intel_pmu_enable_bts(hwc->config);
+ } else if (idx == INTEL_PMC_IDX_FIXED_VLBR)
+ intel_set_masks(event, idx);
}
static void intel_pmu_add_event(struct perf_event *event)
@@ -2614,6 +2623,20 @@ intel_bts_constraints(struct perf_event *event)
return NULL;
}
+/*
+ * Note: matches a fake event, like Fixed2.
+ */
+static struct event_constraint *
+intel_vlbr_constraints(struct perf_event *event)
+{
+ struct event_constraint *c = &vlbr_constraint;
+
+ if (unlikely(constraint_match(c, event->hw.config)))
+ return c;
+
+ return NULL;
+}
+
static int intel_alt_er(int idx, u64 config)
{
int alt_idx = idx;
@@ -2804,6 +2827,10 @@ __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
{
struct event_constraint *c;
+ c = intel_vlbr_constraints(event);
+ if (c)
+ return c;
+
c = intel_bts_constraints(event);
if (c)
return c;
@@ -3951,6 +3978,11 @@ static __initconst const struct x86_pmu core_pmu = {
.cpu_dead = intel_pmu_cpu_dead,
.check_period = intel_pmu_check_period,
+
+ .lbr_reset = intel_pmu_lbr_reset_64,
+ .lbr_read = intel_pmu_lbr_read_64,
+ .lbr_save = intel_pmu_lbr_save,
+ .lbr_restore = intel_pmu_lbr_restore,
};
static __initconst const struct x86_pmu intel_pmu = {
@@ -3996,6 +4028,11 @@ static __initconst const struct x86_pmu intel_pmu = {
.check_period = intel_pmu_check_period,
.aux_output_match = intel_pmu_aux_output_match,
+
+ .lbr_reset = intel_pmu_lbr_reset_64,
+ .lbr_read = intel_pmu_lbr_read_64,
+ .lbr_save = intel_pmu_lbr_save,
+ .lbr_restore = intel_pmu_lbr_restore,
};
static __init void intel_clovertown_quirk(void)
@@ -4622,6 +4659,14 @@ __init int intel_pmu_init(void)
x86_pmu.intel_cap.capabilities = capabilities;
}
+ if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) {
+ x86_pmu.lbr_reset = intel_pmu_lbr_reset_32;
+ x86_pmu.lbr_read = intel_pmu_lbr_read_32;
+ }
+
+ if (boot_cpu_has(X86_FEATURE_ARCH_LBR))
+ intel_pmu_arch_lbr_init();
+
intel_ds_init();
x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index dc43cc124e09..86848c57b55e 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -954,7 +954,7 @@ static void adaptive_pebs_record_size_update(void)
if (pebs_data_cfg & PEBS_DATACFG_XMMS)
sz += sizeof(struct pebs_xmm);
if (pebs_data_cfg & PEBS_DATACFG_LBRS)
- sz += x86_pmu.lbr_nr * sizeof(struct pebs_lbr_entry);
+ sz += x86_pmu.lbr_nr * sizeof(struct lbr_entry);
cpuc->pebs_record_size = sz;
}
@@ -1595,10 +1595,10 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
}
if (format_size & PEBS_DATACFG_LBRS) {
- struct pebs_lbr *lbr = next_record;
+ struct lbr_entry *lbr = next_record;
int num_lbr = ((format_size >> PEBS_DATACFG_LBR_SHIFT)
& 0xff) + 1;
- next_record = next_record + num_lbr*sizeof(struct pebs_lbr_entry);
+ next_record = next_record + num_lbr * sizeof(struct lbr_entry);
if (has_branch_stack(event)) {
intel_pmu_store_pebs_lbrs(lbr);
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 65113b16804a..63f58bdf556c 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -8,17 +8,6 @@
#include "../perf_event.h"
-enum {
- LBR_FORMAT_32 = 0x00,
- LBR_FORMAT_LIP = 0x01,
- LBR_FORMAT_EIP = 0x02,
- LBR_FORMAT_EIP_FLAGS = 0x03,
- LBR_FORMAT_EIP_FLAGS2 = 0x04,
- LBR_FORMAT_INFO = 0x05,
- LBR_FORMAT_TIME = 0x06,
- LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_TIME,
-};
-
static const enum {
LBR_EIP_FLAGS = 1,
LBR_TSX = 2,
@@ -143,8 +132,54 @@ enum {
X86_BR_IRQ |\
X86_BR_INT)
+/*
+ * Intel LBR_CTL bits
+ *
+ * Hardware branch filter for Arch LBR
+ */
+#define ARCH_LBR_KERNEL_BIT 1 /* capture at ring0 */
+#define ARCH_LBR_USER_BIT 2 /* capture at ring > 0 */
+#define ARCH_LBR_CALL_STACK_BIT 3 /* enable call stack */
+#define ARCH_LBR_JCC_BIT 16 /* capture conditional branches */
+#define ARCH_LBR_REL_JMP_BIT 17 /* capture relative jumps */
+#define ARCH_LBR_IND_JMP_BIT 18 /* capture indirect jumps */
+#define ARCH_LBR_REL_CALL_BIT 19 /* capture relative calls */
+#define ARCH_LBR_IND_CALL_BIT 20 /* capture indirect calls */
+#define ARCH_LBR_RETURN_BIT 21 /* capture near returns */
+#define ARCH_LBR_OTHER_BRANCH_BIT 22 /* capture other branches */
+
+#define ARCH_LBR_KERNEL (1ULL << ARCH_LBR_KERNEL_BIT)
+#define ARCH_LBR_USER (1ULL << ARCH_LBR_USER_BIT)
+#define ARCH_LBR_CALL_STACK (1ULL << ARCH_LBR_CALL_STACK_BIT)
+#define ARCH_LBR_JCC (1ULL << ARCH_LBR_JCC_BIT)
+#define ARCH_LBR_REL_JMP (1ULL << ARCH_LBR_REL_JMP_BIT)
+#define ARCH_LBR_IND_JMP (1ULL << ARCH_LBR_IND_JMP_BIT)
+#define ARCH_LBR_REL_CALL (1ULL << ARCH_LBR_REL_CALL_BIT)
+#define ARCH_LBR_IND_CALL (1ULL << ARCH_LBR_IND_CALL_BIT)
+#define ARCH_LBR_RETURN (1ULL << ARCH_LBR_RETURN_BIT)
+#define ARCH_LBR_OTHER_BRANCH (1ULL << ARCH_LBR_OTHER_BRANCH_BIT)
+
+#define ARCH_LBR_ANY \
+ (ARCH_LBR_JCC |\
+ ARCH_LBR_REL_JMP |\
+ ARCH_LBR_IND_JMP |\
+ ARCH_LBR_REL_CALL |\
+ ARCH_LBR_IND_CALL |\
+ ARCH_LBR_RETURN |\
+ ARCH_LBR_OTHER_BRANCH)
+
+#define ARCH_LBR_CTL_MASK 0x7f000e
+
static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
+static __always_inline bool is_lbr_call_stack_bit_set(u64 config)
+{
+ if (static_cpu_has(X86_FEATURE_ARCH_LBR))
+ return !!(config & ARCH_LBR_CALL_STACK);
+
+ return !!(config & LBR_CALL_STACK);
+}
+
/*
* We only support LBR implementations that have FREEZE_LBRS_ON_PMI
* otherwise it becomes near impossible to get a reliable stack.
@@ -168,33 +203,46 @@ static void __intel_pmu_lbr_enable(bool pmi)
*/
if (cpuc->lbr_sel)
lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask;
- if (!pmi && cpuc->lbr_sel)
+ if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && !pmi && cpuc->lbr_sel)
wrmsrl(MSR_LBR_SELECT, lbr_select);
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
orig_debugctl = debugctl;
- debugctl |= DEBUGCTLMSR_LBR;
+
+ if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
+ debugctl |= DEBUGCTLMSR_LBR;
/*
* LBR callstack does not work well with FREEZE_LBRS_ON_PMI.
* If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions
* may cause superfluous increase/decrease of LBR_TOS.
*/
- if (!(lbr_select & LBR_CALL_STACK))
+ if (is_lbr_call_stack_bit_set(lbr_select))
+ debugctl &= ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
+ else
debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
+
if (orig_debugctl != debugctl)
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+
+ if (static_cpu_has(X86_FEATURE_ARCH_LBR))
+ wrmsrl(MSR_ARCH_LBR_CTL, lbr_select | ARCH_LBR_CTL_LBREN);
}
static void __intel_pmu_lbr_disable(void)
{
u64 debugctl;
+ if (static_cpu_has(X86_FEATURE_ARCH_LBR)) {
+ wrmsrl(MSR_ARCH_LBR_CTL, 0);
+ return;
+ }
+
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
}
-static void intel_pmu_lbr_reset_32(void)
+void intel_pmu_lbr_reset_32(void)
{
int i;
@@ -202,7 +250,7 @@ static void intel_pmu_lbr_reset_32(void)
wrmsrl(x86_pmu.lbr_from + i, 0);
}
-static void intel_pmu_lbr_reset_64(void)
+void intel_pmu_lbr_reset_64(void)
{
int i;
@@ -210,10 +258,16 @@ static void intel_pmu_lbr_reset_64(void)
wrmsrl(x86_pmu.lbr_from + i, 0);
wrmsrl(x86_pmu.lbr_to + i, 0);
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
- wrmsrl(MSR_LBR_INFO_0 + i, 0);
+ wrmsrl(x86_pmu.lbr_info + i, 0);
}
}
+static void intel_pmu_arch_lbr_reset(void)
+{
+ /* Write to ARCH_LBR_DEPTH MSR, all LBR entries are reset to 0 */
+ wrmsrl(MSR_ARCH_LBR_DEPTH, x86_pmu.lbr_nr);
+}
+
void intel_pmu_lbr_reset(void)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -221,10 +275,7 @@ void intel_pmu_lbr_reset(void)
if (!x86_pmu.lbr_nr)
return;
- if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
- intel_pmu_lbr_reset_32();
- else
- intel_pmu_lbr_reset_64();
+ x86_pmu.lbr_reset();
cpuc->last_task_ctx = NULL;
cpuc->last_log_id = 0;
@@ -308,69 +359,97 @@ static u64 lbr_from_signext_quirk_rd(u64 val)
return val;
}
-static inline void wrlbr_from(unsigned int idx, u64 val)
+static __always_inline void wrlbr_from(unsigned int idx, u64 val)
{
val = lbr_from_signext_quirk_wr(val);
wrmsrl(x86_pmu.lbr_from + idx, val);
}
-static inline void wrlbr_to(unsigned int idx, u64 val)
+static __always_inline void wrlbr_to(unsigned int idx, u64 val)
{
wrmsrl(x86_pmu.lbr_to + idx, val);
}
-static inline u64 rdlbr_from(unsigned int idx)
+static __always_inline void wrlbr_info(unsigned int idx, u64 val)
+{
+ wrmsrl(x86_pmu.lbr_info + idx, val);
+}
+
+static __always_inline u64 rdlbr_from(unsigned int idx, struct lbr_entry *lbr)
{
u64 val;
+ if (lbr)
+ return lbr->from;
+
rdmsrl(x86_pmu.lbr_from + idx, val);
return lbr_from_signext_quirk_rd(val);
}
-static inline u64 rdlbr_to(unsigned int idx)
+static __always_inline u64 rdlbr_to(unsigned int idx, struct lbr_entry *lbr)
{
u64 val;
+ if (lbr)
+ return lbr->to;
+
rdmsrl(x86_pmu.lbr_to + idx, val);
return val;
}
-static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
+static __always_inline u64 rdlbr_info(unsigned int idx, struct lbr_entry *lbr)
+{
+ u64 val;
+
+ if (lbr)
+ return lbr->info;
+
+ rdmsrl(x86_pmu.lbr_info + idx, val);
+
+ return val;
+}
+
+static inline void
+wrlbr_all(struct lbr_entry *lbr, unsigned int idx, bool need_info)
+{
+ wrlbr_from(idx, lbr->from);
+ wrlbr_to(idx, lbr->to);
+ if (need_info)
+ wrlbr_info(idx, lbr->info);
+}
+
+static inline bool
+rdlbr_all(struct lbr_entry *lbr, unsigned int idx, bool need_info)
+{
+ u64 from = rdlbr_from(idx, NULL);
+
+ /* Don't read invalid entry */
+ if (!from)
+ return false;
+
+ lbr->from = from;
+ lbr->to = rdlbr_to(idx, NULL);
+ if (need_info)
+ lbr->info = rdlbr_info(idx, NULL);
+
+ return true;
+}
+
+void intel_pmu_lbr_restore(void *ctx)
{
+ bool need_info = x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO;
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct x86_perf_task_context *task_ctx = ctx;
int i;
unsigned lbr_idx, mask;
- u64 tos;
-
- if (task_ctx->lbr_callstack_users == 0 ||
- task_ctx->lbr_stack_state == LBR_NONE) {
- intel_pmu_lbr_reset();
- return;
- }
-
- tos = task_ctx->tos;
- /*
- * Does not restore the LBR registers, if
- * - No one else touched them, and
- * - Did not enter C6
- */
- if ((task_ctx == cpuc->last_task_ctx) &&
- (task_ctx->log_id == cpuc->last_log_id) &&
- rdlbr_from(tos)) {
- task_ctx->lbr_stack_state = LBR_NONE;
- return;
- }
+ u64 tos = task_ctx->tos;
mask = x86_pmu.lbr_nr - 1;
for (i = 0; i < task_ctx->valid_lbrs; i++) {
lbr_idx = (tos - i) & mask;
- wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
- wrlbr_to (lbr_idx, task_ctx->lbr_to[i]);
-
- if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
- wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
+ wrlbr_all(&task_ctx->lbr[i], lbr_idx, need_info);
}
for (; i < x86_pmu.lbr_nr; i++) {
@@ -378,49 +457,149 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
wrlbr_from(lbr_idx, 0);
wrlbr_to(lbr_idx, 0);
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
- wrmsrl(MSR_LBR_INFO_0 + lbr_idx, 0);
+ wrlbr_info(lbr_idx, 0);
}
wrmsrl(x86_pmu.lbr_tos, tos);
- task_ctx->lbr_stack_state = LBR_NONE;
+
+ if (cpuc->lbr_select)
+ wrmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
}
-static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
+static void intel_pmu_arch_lbr_restore(void *ctx)
{
- struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- unsigned lbr_idx, mask;
- u64 tos, from;
+ struct x86_perf_task_context_arch_lbr *task_ctx = ctx;
+ struct lbr_entry *entries = task_ctx->entries;
int i;
- if (task_ctx->lbr_callstack_users == 0) {
- task_ctx->lbr_stack_state = LBR_NONE;
+ /* Fast reset the LBRs before restore if the call stack is not full. */
+ if (!entries[x86_pmu.lbr_nr - 1].from)
+ intel_pmu_arch_lbr_reset();
+
+ for (i = 0; i < x86_pmu.lbr_nr; i++) {
+ if (!entries[i].from)
+ break;
+ wrlbr_all(&entries[i], i, true);
+ }
+}
+
+/*
+ * Restore the Architecture LBR state from the xsave area in the perf
+ * context data for the task via the XRSTORS instruction.
+ */
+static void intel_pmu_arch_lbr_xrstors(void *ctx)
+{
+ struct x86_perf_task_context_arch_lbr_xsave *task_ctx = ctx;
+
+ copy_kernel_to_dynamic_supervisor(&task_ctx->xsave, XFEATURE_MASK_LBR);
+}
+
+static __always_inline bool lbr_is_reset_in_cstate(void *ctx)
+{
+ if (static_cpu_has(X86_FEATURE_ARCH_LBR))
+ return x86_pmu.lbr_deep_c_reset && !rdlbr_from(0, NULL);
+
+ return !rdlbr_from(((struct x86_perf_task_context *)ctx)->tos, NULL);
+}
+
+static void __intel_pmu_lbr_restore(void *ctx)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ if (task_context_opt(ctx)->lbr_callstack_users == 0 ||
+ task_context_opt(ctx)->lbr_stack_state == LBR_NONE) {
+ intel_pmu_lbr_reset();
+ return;
+ }
+
+ /*
+ * Does not restore the LBR registers, if
+ * - No one else touched them, and
+ * - Was not cleared in Cstate
+ */
+ if ((ctx == cpuc->last_task_ctx) &&
+ (task_context_opt(ctx)->log_id == cpuc->last_log_id) &&
+ !lbr_is_reset_in_cstate(ctx)) {
+ task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
return;
}
+ x86_pmu.lbr_restore(ctx);
+
+ task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
+}
+
+void intel_pmu_lbr_save(void *ctx)
+{
+ bool need_info = x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO;
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct x86_perf_task_context *task_ctx = ctx;
+ unsigned lbr_idx, mask;
+ u64 tos;
+ int i;
+
mask = x86_pmu.lbr_nr - 1;
tos = intel_pmu_lbr_tos();
for (i = 0; i < x86_pmu.lbr_nr; i++) {
lbr_idx = (tos - i) & mask;
- from = rdlbr_from(lbr_idx);
- if (!from)
+ if (!rdlbr_all(&task_ctx->lbr[i], lbr_idx, need_info))
break;
- task_ctx->lbr_from[i] = from;
- task_ctx->lbr_to[i] = rdlbr_to(lbr_idx);
- if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
- rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
}
task_ctx->valid_lbrs = i;
task_ctx->tos = tos;
- task_ctx->lbr_stack_state = LBR_VALID;
- cpuc->last_task_ctx = task_ctx;
- cpuc->last_log_id = ++task_ctx->log_id;
+ if (cpuc->lbr_select)
+ rdmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
+}
+
+static void intel_pmu_arch_lbr_save(void *ctx)
+{
+ struct x86_perf_task_context_arch_lbr *task_ctx = ctx;
+ struct lbr_entry *entries = task_ctx->entries;
+ int i;
+
+ for (i = 0; i < x86_pmu.lbr_nr; i++) {
+ if (!rdlbr_all(&entries[i], i, true))
+ break;
+ }
+
+ /* LBR call stack is not full. Reset is required in restore. */
+ if (i < x86_pmu.lbr_nr)
+ entries[x86_pmu.lbr_nr - 1].from = 0;
+}
+
+/*
+ * Save the Architecture LBR state to the xsave area in the perf
+ * context data for the task via the XSAVES instruction.
+ */
+static void intel_pmu_arch_lbr_xsaves(void *ctx)
+{
+ struct x86_perf_task_context_arch_lbr_xsave *task_ctx = ctx;
+
+ copy_dynamic_supervisor_to_kernel(&task_ctx->xsave, XFEATURE_MASK_LBR);
+}
+
+static void __intel_pmu_lbr_save(void *ctx)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ if (task_context_opt(ctx)->lbr_callstack_users == 0) {
+ task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
+ return;
+ }
+
+ x86_pmu.lbr_save(ctx);
+
+ task_context_opt(ctx)->lbr_stack_state = LBR_VALID;
+
+ cpuc->last_task_ctx = ctx;
+ cpuc->last_log_id = ++task_context_opt(ctx)->log_id;
}
void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev,
struct perf_event_context *next)
{
- struct x86_perf_task_context *prev_ctx_data, *next_ctx_data;
+ void *prev_ctx_data, *next_ctx_data;
swap(prev->task_ctx_data, next->task_ctx_data);
@@ -436,14 +615,14 @@ void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev,
if (!prev_ctx_data || !next_ctx_data)
return;
- swap(prev_ctx_data->lbr_callstack_users,
- next_ctx_data->lbr_callstack_users);
+ swap(task_context_opt(prev_ctx_data)->lbr_callstack_users,
+ task_context_opt(next_ctx_data)->lbr_callstack_users);
}
void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- struct x86_perf_task_context *task_ctx;
+ void *task_ctx;
if (!cpuc->lbr_users)
return;
@@ -479,18 +658,19 @@ static inline bool branch_user_callstack(unsigned br_sel)
void intel_pmu_lbr_add(struct perf_event *event)
{
+ struct kmem_cache *kmem_cache = event->pmu->task_ctx_cache;
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- struct x86_perf_task_context *task_ctx;
if (!x86_pmu.lbr_nr)
return;
+ if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
+ cpuc->lbr_select = 1;
+
cpuc->br_sel = event->hw.branch_reg.reg;
- if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data) {
- task_ctx = event->ctx->task_ctx_data;
- task_ctx->lbr_callstack_users++;
- }
+ if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data)
+ task_context_opt(event->ctx->task_ctx_data)->lbr_callstack_users++;
/*
* Request pmu::sched_task() callback, which will fire inside the
@@ -516,21 +696,44 @@ void intel_pmu_lbr_add(struct perf_event *event)
perf_sched_cb_inc(event->ctx->pmu);
if (!cpuc->lbr_users++ && !event->total_time_running)
intel_pmu_lbr_reset();
+
+ if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
+ kmem_cache && !cpuc->lbr_xsave &&
+ (cpuc->lbr_users != cpuc->lbr_pebs_users))
+ cpuc->lbr_xsave = kmem_cache_alloc(kmem_cache, GFP_KERNEL);
+}
+
+void release_lbr_buffers(void)
+{
+ struct kmem_cache *kmem_cache = x86_get_pmu()->task_ctx_cache;
+ struct cpu_hw_events *cpuc;
+ int cpu;
+
+ if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
+ return;
+
+ for_each_possible_cpu(cpu) {
+ cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
+ if (kmem_cache && cpuc->lbr_xsave) {
+ kmem_cache_free(kmem_cache, cpuc->lbr_xsave);
+ cpuc->lbr_xsave = NULL;
+ }
+ }
}
void intel_pmu_lbr_del(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- struct x86_perf_task_context *task_ctx;
if (!x86_pmu.lbr_nr)
return;
if (branch_user_callstack(cpuc->br_sel) &&
- event->ctx->task_ctx_data) {
- task_ctx = event->ctx->task_ctx_data;
- task_ctx->lbr_callstack_users--;
- }
+ event->ctx->task_ctx_data)
+ task_context_opt(event->ctx->task_ctx_data)->lbr_callstack_users--;
+
+ if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
+ cpuc->lbr_select = 0;
if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
cpuc->lbr_pebs_users--;
@@ -540,11 +743,19 @@ void intel_pmu_lbr_del(struct perf_event *event)
perf_sched_cb_dec(event->ctx->pmu);
}
+static inline bool vlbr_exclude_host(void)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ return test_bit(INTEL_PMC_IDX_FIXED_VLBR,
+ (unsigned long *)&cpuc->intel_ctrl_guest_mask);
+}
+
void intel_pmu_lbr_enable_all(bool pmi)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- if (cpuc->lbr_users)
+ if (cpuc->lbr_users && !vlbr_exclude_host())
__intel_pmu_lbr_enable(pmi);
}
@@ -552,11 +763,11 @@ void intel_pmu_lbr_disable_all(void)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- if (cpuc->lbr_users)
+ if (cpuc->lbr_users && !vlbr_exclude_host())
__intel_pmu_lbr_disable();
}
-static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
+void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
{
unsigned long mask = x86_pmu.lbr_nr - 1;
u64 tos = intel_pmu_lbr_tos();
@@ -593,7 +804,7 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
* is the same as the linear address, allowing us to merge the LIP and EIP
* LBR formats.
*/
-static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
+void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
{
bool need_info = false, call_stack = false;
unsigned long mask = x86_pmu.lbr_nr - 1;
@@ -616,8 +827,8 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
u16 cycles = 0;
int lbr_flags = lbr_desc[lbr_format];
- from = rdlbr_from(lbr_idx);
- to = rdlbr_to(lbr_idx);
+ from = rdlbr_from(lbr_idx, NULL);
+ to = rdlbr_to(lbr_idx, NULL);
/*
* Read LBR call stack entries
@@ -629,7 +840,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
if (lbr_format == LBR_FORMAT_INFO && need_info) {
u64 info;
- rdmsrl(MSR_LBR_INFO_0 + lbr_idx, info);
+ info = rdlbr_info(lbr_idx, NULL);
mis = !!(info & LBR_INFO_MISPRED);
pred = !mis;
in_tx = !!(info & LBR_INFO_IN_TX);
@@ -684,6 +895,93 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
cpuc->lbr_stack.hw_idx = tos;
}
+static __always_inline int get_lbr_br_type(u64 info)
+{
+ if (!static_cpu_has(X86_FEATURE_ARCH_LBR) || !x86_pmu.lbr_br_type)
+ return 0;
+
+ return (info & LBR_INFO_BR_TYPE) >> LBR_INFO_BR_TYPE_OFFSET;
+}
+
+static __always_inline bool get_lbr_mispred(u64 info)
+{
+ if (static_cpu_has(X86_FEATURE_ARCH_LBR) && !x86_pmu.lbr_mispred)
+ return 0;
+
+ return !!(info & LBR_INFO_MISPRED);
+}
+
+static __always_inline bool get_lbr_predicted(u64 info)
+{
+ if (static_cpu_has(X86_FEATURE_ARCH_LBR) && !x86_pmu.lbr_mispred)
+ return 0;
+
+ return !(info & LBR_INFO_MISPRED);
+}
+
+static __always_inline bool get_lbr_cycles(u64 info)
+{
+ if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
+ !(x86_pmu.lbr_timed_lbr && info & LBR_INFO_CYC_CNT_VALID))
+ return 0;
+
+ return info & LBR_INFO_CYCLES;
+}
+
+static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc,
+ struct lbr_entry *entries)
+{
+ struct perf_branch_entry *e;
+ struct lbr_entry *lbr;
+ u64 from, to, info;
+ int i;
+
+ for (i = 0; i < x86_pmu.lbr_nr; i++) {
+ lbr = entries ? &entries[i] : NULL;
+ e = &cpuc->lbr_entries[i];
+
+ from = rdlbr_from(i, lbr);
+ /*
+ * Read LBR entries until invalid entry (0s) is detected.
+ */
+ if (!from)
+ break;
+
+ to = rdlbr_to(i, lbr);
+ info = rdlbr_info(i, lbr);
+
+ e->from = from;
+ e->to = to;
+ e->mispred = get_lbr_mispred(info);
+ e->predicted = get_lbr_predicted(info);
+ e->in_tx = !!(info & LBR_INFO_IN_TX);
+ e->abort = !!(info & LBR_INFO_ABORT);
+ e->cycles = get_lbr_cycles(info);
+ e->type = get_lbr_br_type(info);
+ e->reserved = 0;
+ }
+
+ cpuc->lbr_stack.nr = i;
+}
+
+static void intel_pmu_arch_lbr_read(struct cpu_hw_events *cpuc)
+{
+ intel_pmu_store_lbr(cpuc, NULL);
+}
+
+static void intel_pmu_arch_lbr_read_xsave(struct cpu_hw_events *cpuc)
+{
+ struct x86_perf_task_context_arch_lbr_xsave *xsave = cpuc->lbr_xsave;
+
+ if (!xsave) {
+ intel_pmu_store_lbr(cpuc, NULL);
+ return;
+ }
+ copy_dynamic_supervisor_to_kernel(&xsave->xsave, XFEATURE_MASK_LBR);
+
+ intel_pmu_store_lbr(cpuc, xsave->lbr.entries);
+}
+
void intel_pmu_lbr_read(void)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -694,13 +992,11 @@ void intel_pmu_lbr_read(void)
* This could be smarter and actually check the event,
* but this simple approach seems to work for now.
*/
- if (!cpuc->lbr_users || cpuc->lbr_users == cpuc->lbr_pebs_users)
+ if (!cpuc->lbr_users || vlbr_exclude_host() ||
+ cpuc->lbr_users == cpuc->lbr_pebs_users)
return;
- if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
- intel_pmu_lbr_read_32(cpuc);
- else
- intel_pmu_lbr_read_64(cpuc);
+ x86_pmu.lbr_read(cpuc);
intel_pmu_lbr_filter(cpuc);
}
@@ -800,6 +1096,11 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
reg = &event->hw.branch_reg;
reg->idx = EXTRA_REG_LBR;
+ if (static_cpu_has(X86_FEATURE_ARCH_LBR)) {
+ reg->config = mask;
+ return 0;
+ }
+
/*
* The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
* in suppress mode. So LBR_SELECT should be set to
@@ -1056,6 +1357,27 @@ common_branch_type(int type)
return PERF_BR_UNKNOWN;
}
+enum {
+ ARCH_LBR_BR_TYPE_JCC = 0,
+ ARCH_LBR_BR_TYPE_NEAR_IND_JMP = 1,
+ ARCH_LBR_BR_TYPE_NEAR_REL_JMP = 2,
+ ARCH_LBR_BR_TYPE_NEAR_IND_CALL = 3,
+ ARCH_LBR_BR_TYPE_NEAR_REL_CALL = 4,
+ ARCH_LBR_BR_TYPE_NEAR_RET = 5,
+ ARCH_LBR_BR_TYPE_KNOWN_MAX = ARCH_LBR_BR_TYPE_NEAR_RET,
+
+ ARCH_LBR_BR_TYPE_MAP_MAX = 16,
+};
+
+static const int arch_lbr_br_type_map[ARCH_LBR_BR_TYPE_MAP_MAX] = {
+ [ARCH_LBR_BR_TYPE_JCC] = X86_BR_JCC,
+ [ARCH_LBR_BR_TYPE_NEAR_IND_JMP] = X86_BR_IND_JMP,
+ [ARCH_LBR_BR_TYPE_NEAR_REL_JMP] = X86_BR_JMP,
+ [ARCH_LBR_BR_TYPE_NEAR_IND_CALL] = X86_BR_IND_CALL,
+ [ARCH_LBR_BR_TYPE_NEAR_REL_CALL] = X86_BR_CALL,
+ [ARCH_LBR_BR_TYPE_NEAR_RET] = X86_BR_RET,
+};
+
/*
* implement actual branch filter based on user demand.
* Hardware may not exactly satisfy that request, thus
@@ -1068,7 +1390,7 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
{
u64 from, to;
int br_sel = cpuc->br_sel;
- int i, j, type;
+ int i, j, type, to_plm;
bool compress = false;
/* if sampling all branches, then nothing to filter */
@@ -1080,8 +1402,19 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
from = cpuc->lbr_entries[i].from;
to = cpuc->lbr_entries[i].to;
+ type = cpuc->lbr_entries[i].type;
- type = branch_type(from, to, cpuc->lbr_entries[i].abort);
+ /*
+ * Parse the branch type recorded in LBR_x_INFO MSR.
+ * Doesn't support OTHER_BRANCH decoding for now.
+ * OTHER_BRANCH branch type still rely on software decoding.
+ */
+ if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
+ type <= ARCH_LBR_BR_TYPE_KNOWN_MAX) {
+ to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
+ type = arch_lbr_br_type_map[type] | to_plm;
+ } else
+ type = branch_type(from, to, cpuc->lbr_entries[i].abort);
if (type != X86_BR_NONE && (br_sel & X86_BR_ANYTX)) {
if (cpuc->lbr_entries[i].in_tx)
type |= X86_BR_IN_TX;
@@ -1116,32 +1449,18 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
}
}
-void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr)
+void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- int i;
-
- cpuc->lbr_stack.nr = x86_pmu.lbr_nr;
- /* Cannot get TOS for large PEBS */
- if (cpuc->n_pebs == cpuc->n_large_pebs)
+ /* Cannot get TOS for large PEBS and Arch LBR */
+ if (static_cpu_has(X86_FEATURE_ARCH_LBR) ||
+ (cpuc->n_pebs == cpuc->n_large_pebs))
cpuc->lbr_stack.hw_idx = -1ULL;
else
cpuc->lbr_stack.hw_idx = intel_pmu_lbr_tos();
- for (i = 0; i < x86_pmu.lbr_nr; i++) {
- u64 info = lbr->lbr[i].info;
- struct perf_branch_entry *e = &cpuc->lbr_entries[i];
-
- e->from = lbr->lbr[i].from;
- e->to = lbr->lbr[i].to;
- e->mispred = !!(info & LBR_INFO_MISPRED);
- e->predicted = !(info & LBR_INFO_MISPRED);
- e->in_tx = !!(info & LBR_INFO_IN_TX);
- e->abort = !!(info & LBR_INFO_ABORT);
- e->cycles = info & LBR_INFO_CYCLES;
- e->reserved = 0;
- }
+ intel_pmu_store_lbr(cpuc, lbr);
intel_pmu_lbr_filter(cpuc);
}
@@ -1198,6 +1517,26 @@ static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
[PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
};
+static int arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
+ [PERF_SAMPLE_BRANCH_ANY_SHIFT] = ARCH_LBR_ANY,
+ [PERF_SAMPLE_BRANCH_USER_SHIFT] = ARCH_LBR_USER,
+ [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = ARCH_LBR_KERNEL,
+ [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
+ [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = ARCH_LBR_RETURN |
+ ARCH_LBR_OTHER_BRANCH,
+ [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = ARCH_LBR_REL_CALL |
+ ARCH_LBR_IND_CALL |
+ ARCH_LBR_OTHER_BRANCH,
+ [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = ARCH_LBR_IND_CALL,
+ [PERF_SAMPLE_BRANCH_COND_SHIFT] = ARCH_LBR_JCC,
+ [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = ARCH_LBR_REL_CALL |
+ ARCH_LBR_IND_CALL |
+ ARCH_LBR_RETURN |
+ ARCH_LBR_CALL_STACK,
+ [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = ARCH_LBR_IND_JMP,
+ [PERF_SAMPLE_BRANCH_CALL_SHIFT] = ARCH_LBR_REL_CALL,
+};
+
/* core */
void __init intel_pmu_lbr_init_core(void)
{
@@ -1251,9 +1590,17 @@ void __init intel_pmu_lbr_init_snb(void)
*/
}
+static inline struct kmem_cache *
+create_lbr_kmem_cache(size_t size, size_t align)
+{
+ return kmem_cache_create("x86_lbr", size, align, 0, NULL);
+}
+
/* haswell */
void intel_pmu_lbr_init_hsw(void)
{
+ size_t size = sizeof(struct x86_perf_task_context);
+
x86_pmu.lbr_nr = 16;
x86_pmu.lbr_tos = MSR_LBR_TOS;
x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
@@ -1262,6 +1609,8 @@ void intel_pmu_lbr_init_hsw(void)
x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
+ x86_get_pmu()->task_ctx_cache = create_lbr_kmem_cache(size, 0);
+
if (lbr_from_signext_quirk_needed())
static_branch_enable(&lbr_from_quirk_key);
}
@@ -1269,14 +1618,19 @@ void intel_pmu_lbr_init_hsw(void)
/* skylake */
__init void intel_pmu_lbr_init_skl(void)
{
+ size_t size = sizeof(struct x86_perf_task_context);
+
x86_pmu.lbr_nr = 32;
x86_pmu.lbr_tos = MSR_LBR_TOS;
x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
x86_pmu.lbr_to = MSR_LBR_NHM_TO;
+ x86_pmu.lbr_info = MSR_LBR_INFO_0;
x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
+ x86_get_pmu()->task_ctx_cache = create_lbr_kmem_cache(size, 0);
+
/*
* SW branch filter usage:
* - support syscall, sysret capture.
@@ -1343,3 +1697,152 @@ void intel_pmu_lbr_init_knl(void)
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
}
+
+/*
+ * LBR state size is variable based on the max number of registers.
+ * This calculates the expected state size, which should match
+ * what the hardware enumerates for the size of XFEATURE_LBR.
+ */
+static inline unsigned int get_lbr_state_size(void)
+{
+ return sizeof(struct arch_lbr_state) +
+ x86_pmu.lbr_nr * sizeof(struct lbr_entry);
+}
+
+static bool is_arch_lbr_xsave_available(void)
+{
+ if (!boot_cpu_has(X86_FEATURE_XSAVES))
+ return false;
+
+ /*
+ * Check the LBR state with the corresponding software structure.
+ * Disable LBR XSAVES support if the size doesn't match.
+ */
+ if (WARN_ON(xfeature_size(XFEATURE_LBR) != get_lbr_state_size()))
+ return false;
+
+ return true;
+}
+
+void __init intel_pmu_arch_lbr_init(void)
+{
+ struct pmu *pmu = x86_get_pmu();
+ union cpuid28_eax eax;
+ union cpuid28_ebx ebx;
+ union cpuid28_ecx ecx;
+ unsigned int unused_edx;
+ bool arch_lbr_xsave;
+ size_t size;
+ u64 lbr_nr;
+
+ /* Arch LBR Capabilities */
+ cpuid(28, &eax.full, &ebx.full, &ecx.full, &unused_edx);
+
+ lbr_nr = fls(eax.split.lbr_depth_mask) * 8;
+ if (!lbr_nr)
+ goto clear_arch_lbr;
+
+ /* Apply the max depth of Arch LBR */
+ if (wrmsrl_safe(MSR_ARCH_LBR_DEPTH, lbr_nr))
+ goto clear_arch_lbr;
+
+ x86_pmu.lbr_depth_mask = eax.split.lbr_depth_mask;
+ x86_pmu.lbr_deep_c_reset = eax.split.lbr_deep_c_reset;
+ x86_pmu.lbr_lip = eax.split.lbr_lip;
+ x86_pmu.lbr_cpl = ebx.split.lbr_cpl;
+ x86_pmu.lbr_filter = ebx.split.lbr_filter;
+ x86_pmu.lbr_call_stack = ebx.split.lbr_call_stack;
+ x86_pmu.lbr_mispred = ecx.split.lbr_mispred;
+ x86_pmu.lbr_timed_lbr = ecx.split.lbr_timed_lbr;
+ x86_pmu.lbr_br_type = ecx.split.lbr_br_type;
+ x86_pmu.lbr_nr = lbr_nr;
+
+
+ arch_lbr_xsave = is_arch_lbr_xsave_available();
+ if (arch_lbr_xsave) {
+ size = sizeof(struct x86_perf_task_context_arch_lbr_xsave) +
+ get_lbr_state_size();
+ pmu->task_ctx_cache = create_lbr_kmem_cache(size,
+ XSAVE_ALIGNMENT);
+ }
+
+ if (!pmu->task_ctx_cache) {
+ arch_lbr_xsave = false;
+
+ size = sizeof(struct x86_perf_task_context_arch_lbr) +
+ lbr_nr * sizeof(struct lbr_entry);
+ pmu->task_ctx_cache = create_lbr_kmem_cache(size, 0);
+ }
+
+ x86_pmu.lbr_from = MSR_ARCH_LBR_FROM_0;
+ x86_pmu.lbr_to = MSR_ARCH_LBR_TO_0;
+ x86_pmu.lbr_info = MSR_ARCH_LBR_INFO_0;
+
+ /* LBR callstack requires both CPL and Branch Filtering support */
+ if (!x86_pmu.lbr_cpl ||
+ !x86_pmu.lbr_filter ||
+ !x86_pmu.lbr_call_stack)
+ arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_NOT_SUPP;
+
+ if (!x86_pmu.lbr_cpl) {
+ arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_NOT_SUPP;
+ arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_NOT_SUPP;
+ } else if (!x86_pmu.lbr_filter) {
+ arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_NOT_SUPP;
+ arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_NOT_SUPP;
+ arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_NOT_SUPP;
+ arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_NOT_SUPP;
+ arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_NOT_SUPP;
+ arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_NOT_SUPP;
+ arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_NOT_SUPP;
+ }
+
+ x86_pmu.lbr_ctl_mask = ARCH_LBR_CTL_MASK;
+ x86_pmu.lbr_ctl_map = arch_lbr_ctl_map;
+
+ if (!x86_pmu.lbr_cpl && !x86_pmu.lbr_filter)
+ x86_pmu.lbr_ctl_map = NULL;
+
+ x86_pmu.lbr_reset = intel_pmu_arch_lbr_reset;
+ if (arch_lbr_xsave) {
+ x86_pmu.lbr_save = intel_pmu_arch_lbr_xsaves;
+ x86_pmu.lbr_restore = intel_pmu_arch_lbr_xrstors;
+ x86_pmu.lbr_read = intel_pmu_arch_lbr_read_xsave;
+ pr_cont("XSAVE ");
+ } else {
+ x86_pmu.lbr_save = intel_pmu_arch_lbr_save;
+ x86_pmu.lbr_restore = intel_pmu_arch_lbr_restore;
+ x86_pmu.lbr_read = intel_pmu_arch_lbr_read;
+ }
+
+ pr_cont("Architectural LBR, ");
+
+ return;
+
+clear_arch_lbr:
+ clear_cpu_cap(&boot_cpu_data, X86_FEATURE_ARCH_LBR);
+}
+
+/**
+ * x86_perf_get_lbr - get the LBR records information
+ *
+ * @lbr: the caller's memory to store the LBR records information
+ *
+ * Returns: 0 indicates the LBR info has been successfully obtained
+ */
+int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
+{
+ int lbr_fmt = x86_pmu.intel_cap.lbr_format;
+
+ lbr->nr = x86_pmu.lbr_nr;
+ lbr->from = x86_pmu.lbr_from;
+ lbr->to = x86_pmu.lbr_to;
+ lbr->info = (lbr_fmt == LBR_FORMAT_INFO) ? x86_pmu.lbr_info : 0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(x86_perf_get_lbr);
+
+struct event_constraint vlbr_constraint =
+ __EVENT_CONSTRAINT(INTEL_FIXED_VLBR_EVENT, (1ULL << INTEL_PMC_IDX_FIXED_VLBR),
+ FIXED_EVENT_FLAGS, 1, 0, PERF_X86_EVENT_LBR_SELECT);
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index cf76d6631afa..d5c6d3b340c5 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -16,7 +16,7 @@ struct pci_driver *uncore_pci_driver;
DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
struct pci_extra_dev *uncore_extra_pci_dev;
-static int max_dies;
+int __uncore_max_dies;
/* mask of cpus that collect uncore events */
static cpumask_t uncore_cpu_mask;
@@ -108,7 +108,7 @@ struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu
* The unsigned check also catches the '-1' return value for non
* existent mappings in the topology map.
*/
- return dieid < max_dies ? pmu->boxes[dieid] : NULL;
+ return dieid < uncore_max_dies() ? pmu->boxes[dieid] : NULL;
}
u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
@@ -132,6 +132,9 @@ u64 uncore_mmio_read_counter(struct intel_uncore_box *box,
if (!box->io_addr)
return 0;
+ if (!uncore_mmio_is_valid_offset(box, event->hw.event_base))
+ return 0;
+
return readq(box->io_addr + event->hw.event_base);
}
@@ -843,10 +846,12 @@ static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
.read = uncore_pmu_event_read,
.module = THIS_MODULE,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .attr_update = pmu->type->attr_update,
};
} else {
pmu->pmu = *pmu->type->pmu;
pmu->pmu.attr_groups = pmu->type->attr_groups;
+ pmu->pmu.attr_update = pmu->type->attr_update;
}
if (pmu->type->num_boxes == 1) {
@@ -877,7 +882,7 @@ static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
{
int die;
- for (die = 0; die < max_dies; die++)
+ for (die = 0; die < uncore_max_dies(); die++)
kfree(pmu->boxes[die]);
kfree(pmu->boxes);
}
@@ -887,6 +892,9 @@ static void uncore_type_exit(struct intel_uncore_type *type)
struct intel_uncore_pmu *pmu = type->pmus;
int i;
+ if (type->cleanup_mapping)
+ type->cleanup_mapping(type);
+
if (pmu) {
for (i = 0; i < type->num_boxes; i++, pmu++) {
uncore_pmu_unregister(pmu);
@@ -915,7 +923,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
if (!pmus)
return -ENOMEM;
- size = max_dies * sizeof(struct intel_uncore_box *);
+ size = uncore_max_dies() * sizeof(struct intel_uncore_box *);
for (i = 0; i < type->num_boxes; i++) {
pmus[i].func_id = setid ? i : -1;
@@ -954,6 +962,9 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
type->pmu_group = &uncore_pmu_attr_group;
+ if (type->set_mapping)
+ type->set_mapping(type);
+
return 0;
err:
@@ -1112,7 +1123,7 @@ static int __init uncore_pci_init(void)
size_t size;
int ret;
- size = max_dies * sizeof(struct pci_extra_dev);
+ size = uncore_max_dies() * sizeof(struct pci_extra_dev);
uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
if (!uncore_extra_pci_dev) {
ret = -ENOMEM;
@@ -1514,6 +1525,8 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &skx_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &skl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &skl_uncore_init),
+ X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &skl_uncore_init),
+ X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &skl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, &icl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_uncore_init),
@@ -1539,7 +1552,8 @@ static int __init intel_uncore_init(void)
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
return -ENODEV;
- max_dies = topology_max_packages() * topology_max_die_per_package();
+ __uncore_max_dies =
+ topology_max_packages() * topology_max_die_per_package();
uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
if (uncore_init->pci_init) {
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index b469ddd45515..105fdc69825e 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -61,6 +61,7 @@ struct intel_uncore_type {
unsigned msr_offset;
unsigned mmio_offset;
};
+ unsigned mmio_map_size;
unsigned num_shared_regs:8;
unsigned single_fixed:1;
unsigned pair_ctr_ctl:1;
@@ -72,7 +73,19 @@ struct intel_uncore_type {
struct uncore_event_desc *event_descs;
struct freerunning_counters *freerunning;
const struct attribute_group *attr_groups[4];
+ const struct attribute_group **attr_update;
struct pmu *pmu; /* for custom pmu ops */
+ /*
+ * Uncore PMU would store relevant platform topology configuration here
+ * to identify which platform component each PMON block of that type is
+ * supposed to monitor.
+ */
+ u64 *topology;
+ /*
+ * Optional callbacks for managing mapping of Uncore units to PMONs
+ */
+ int (*set_mapping)(struct intel_uncore_type *type);
+ void (*cleanup_mapping)(struct intel_uncore_type *type);
};
#define pmu_group attr_groups[0]
@@ -169,6 +182,18 @@ int uncore_pcibus_to_physid(struct pci_bus *bus);
ssize_t uncore_event_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf);
+static inline struct intel_uncore_pmu *dev_to_uncore_pmu(struct device *dev)
+{
+ return container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu);
+}
+
+#define to_device_attribute(n) container_of(n, struct device_attribute, attr)
+#define to_dev_ext_attribute(n) container_of(n, struct dev_ext_attribute, attr)
+#define attr_to_ext_attr(n) to_dev_ext_attribute(to_device_attribute(n))
+
+extern int __uncore_max_dies;
+#define uncore_max_dies() (__uncore_max_dies)
+
#define INTEL_UNCORE_EVENT_DESC(_name, _config) \
{ \
.attr = __ATTR(_name, 0444, uncore_event_show, NULL), \
@@ -196,6 +221,18 @@ static inline bool uncore_pmc_freerunning(int idx)
return idx == UNCORE_PMC_IDX_FREERUNNING;
}
+static inline bool uncore_mmio_is_valid_offset(struct intel_uncore_box *box,
+ unsigned long offset)
+{
+ if (offset < box->pmu->type->mmio_map_size)
+ return true;
+
+ pr_warn_once("perf uncore: Invalid offset 0x%lx exceeds mapped area of %s.\n",
+ offset, box->pmu->type->name);
+
+ return false;
+}
+
static inline
unsigned int uncore_mmio_box_ctl(struct intel_uncore_box *box)
{
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index 3de1065eefc4..cb94ba86efd2 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -42,6 +42,17 @@
#define PCI_DEVICE_ID_INTEL_WHL_UQ_IMC 0x3ed0
#define PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC 0x3e34
#define PCI_DEVICE_ID_INTEL_WHL_UD_IMC 0x3e35
+#define PCI_DEVICE_ID_INTEL_CML_H1_IMC 0x9b44
+#define PCI_DEVICE_ID_INTEL_CML_H2_IMC 0x9b54
+#define PCI_DEVICE_ID_INTEL_CML_H3_IMC 0x9b64
+#define PCI_DEVICE_ID_INTEL_CML_U1_IMC 0x9b51
+#define PCI_DEVICE_ID_INTEL_CML_U2_IMC 0x9b61
+#define PCI_DEVICE_ID_INTEL_CML_U3_IMC 0x9b71
+#define PCI_DEVICE_ID_INTEL_CML_S1_IMC 0x9b33
+#define PCI_DEVICE_ID_INTEL_CML_S2_IMC 0x9b43
+#define PCI_DEVICE_ID_INTEL_CML_S3_IMC 0x9b53
+#define PCI_DEVICE_ID_INTEL_CML_S4_IMC 0x9b63
+#define PCI_DEVICE_ID_INTEL_CML_S5_IMC 0x9b73
#define PCI_DEVICE_ID_INTEL_ICL_U_IMC 0x8a02
#define PCI_DEVICE_ID_INTEL_ICL_U2_IMC 0x8a12
#define PCI_DEVICE_ID_INTEL_TGL_U1_IMC 0x9a02
@@ -415,6 +426,7 @@ static const struct attribute_group snb_uncore_imc_format_group = {
static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
{
+ struct intel_uncore_type *type = box->pmu->type;
struct pci_dev *pdev = box->pci_dev;
int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
resource_size_t addr;
@@ -430,7 +442,10 @@ static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
addr &= ~(PAGE_SIZE - 1);
- box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE);
+ box->io_addr = ioremap(addr, type->mmio_map_size);
+ if (!box->io_addr)
+ pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
+
box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
}
@@ -586,6 +601,7 @@ static struct intel_uncore_type snb_uncore_imc = {
.num_counters = 2,
.num_boxes = 1,
.num_freerunning_types = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
+ .mmio_map_size = SNB_UNCORE_PCI_IMC_MAP_SIZE,
.freerunning = snb_uncore_imc_freerunning,
.event_descs = snb_uncore_imc_events,
.format_group = &snb_uncore_imc_format_group,
@@ -771,6 +787,50 @@ static const struct pci_device_id skl_uncore_pci_ids[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_UD_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_H1_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_H2_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_H3_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_U1_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_U2_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_U3_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S1_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S2_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S3_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S4_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S5_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
{ /* end: all zeroes */ },
};
@@ -863,6 +923,17 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
IMC_DEV(WHL_UQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Quad Core */
IMC_DEV(WHL_4_UQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Quad Core */
IMC_DEV(WHL_UD_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Dual Core */
+ IMC_DEV(CML_H1_IMC, &skl_uncore_pci_driver),
+ IMC_DEV(CML_H2_IMC, &skl_uncore_pci_driver),
+ IMC_DEV(CML_H3_IMC, &skl_uncore_pci_driver),
+ IMC_DEV(CML_U1_IMC, &skl_uncore_pci_driver),
+ IMC_DEV(CML_U2_IMC, &skl_uncore_pci_driver),
+ IMC_DEV(CML_U3_IMC, &skl_uncore_pci_driver),
+ IMC_DEV(CML_S1_IMC, &skl_uncore_pci_driver),
+ IMC_DEV(CML_S2_IMC, &skl_uncore_pci_driver),
+ IMC_DEV(CML_S3_IMC, &skl_uncore_pci_driver),
+ IMC_DEV(CML_S4_IMC, &skl_uncore_pci_driver),
+ IMC_DEV(CML_S5_IMC, &skl_uncore_pci_driver),
IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */
IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */
{ /* end marker */ }
@@ -1085,11 +1156,13 @@ static struct pci_dev *tgl_uncore_get_mc_dev(void)
}
#define TGL_UNCORE_MMIO_IMC_MEM_OFFSET 0x10000
+#define TGL_UNCORE_PCI_IMC_MAP_SIZE 0xe000
static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
{
struct pci_dev *pdev = tgl_uncore_get_mc_dev();
struct intel_uncore_pmu *pmu = box->pmu;
+ struct intel_uncore_type *type = pmu->type;
resource_size_t addr;
u32 mch_bar;
@@ -1112,7 +1185,9 @@ static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
addr |= ((resource_size_t)mch_bar << 32);
#endif
- box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE);
+ box->io_addr = ioremap(addr, type->mmio_map_size);
+ if (!box->io_addr)
+ pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
}
static struct intel_uncore_ops tgl_uncore_imc_freerunning_ops = {
@@ -1138,6 +1213,7 @@ static struct intel_uncore_type tgl_uncore_imc_free_running = {
.num_counters = 3,
.num_boxes = 2,
.num_freerunning_types = TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX,
+ .mmio_map_size = TGL_UNCORE_PCI_IMC_MAP_SIZE,
.freerunning = tgl_uncore_imc_freerunning,
.ops = &tgl_uncore_imc_freerunning_ops,
.event_descs = tgl_uncore_imc_events,
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 07652fa20ebb..62e88ad919ff 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -273,6 +273,30 @@
#define SKX_CPUNODEID 0xc0
#define SKX_GIDNIDMAP 0xd4
+/*
+ * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
+ * that BIOS programmed. MSR has package scope.
+ * | Bit | Default | Description
+ * | [63] | 00h | VALID - When set, indicates the CPU bus
+ * numbers have been initialized. (RO)
+ * |[62:48]| --- | Reserved
+ * |[47:40]| 00h | BUS_NUM_5 — Return the bus number BIOS assigned
+ * CPUBUSNO(5). (RO)
+ * |[39:32]| 00h | BUS_NUM_4 — Return the bus number BIOS assigned
+ * CPUBUSNO(4). (RO)
+ * |[31:24]| 00h | BUS_NUM_3 — Return the bus number BIOS assigned
+ * CPUBUSNO(3). (RO)
+ * |[23:16]| 00h | BUS_NUM_2 — Return the bus number BIOS assigned
+ * CPUBUSNO(2). (RO)
+ * |[15:8] | 00h | BUS_NUM_1 — Return the bus number BIOS assigned
+ * CPUBUSNO(1). (RO)
+ * | [7:0] | 00h | BUS_NUM_0 — Return the bus number BIOS assigned
+ * CPUBUSNO(0). (RO)
+ */
+#define SKX_MSR_CPU_BUS_NUMBER 0x300
+#define SKX_MSR_CPU_BUS_VALID_BIT (1ULL << 63)
+#define BUS_NUM_STRIDE 8
+
/* SKX CHA */
#define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0)
#define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9)
@@ -3612,6 +3636,170 @@ static struct intel_uncore_ops skx_uncore_iio_ops = {
.read_counter = uncore_msr_read_counter,
};
+static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die)
+{
+ return pmu->type->topology[die] >> (pmu->pmu_idx * BUS_NUM_STRIDE);
+}
+
+static umode_t
+skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
+{
+ struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
+
+ /* Root bus 0x00 is valid only for die 0 AND pmu_idx = 0. */
+ return (!skx_iio_stack(pmu, die) && pmu->pmu_idx) ? 0 : attr->mode;
+}
+
+static ssize_t skx_iio_mapping_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_bus *bus = pci_find_next_bus(NULL);
+ struct intel_uncore_pmu *uncore_pmu = dev_to_uncore_pmu(dev);
+ struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
+ long die = (long)ea->var;
+
+ /*
+ * Current implementation is for single segment configuration hence it's
+ * safe to take the segment value from the first available root bus.
+ */
+ return sprintf(buf, "%04x:%02x\n", pci_domain_nr(bus),
+ skx_iio_stack(uncore_pmu, die));
+}
+
+static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
+{
+ u64 msr_value;
+
+ if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
+ !(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
+ return -ENXIO;
+
+ *topology = msr_value;
+
+ return 0;
+}
+
+static int die_to_cpu(int die)
+{
+ int res = 0, cpu, current_die;
+ /*
+ * Using cpus_read_lock() to ensure cpu is not going down between
+ * looking at cpu_online_mask.
+ */
+ cpus_read_lock();
+ for_each_online_cpu(cpu) {
+ current_die = topology_logical_die_id(cpu);
+ if (current_die == die) {
+ res = cpu;
+ break;
+ }
+ }
+ cpus_read_unlock();
+ return res;
+}
+
+static int skx_iio_get_topology(struct intel_uncore_type *type)
+{
+ int i, ret;
+ struct pci_bus *bus = NULL;
+
+ /*
+ * Verified single-segment environments only; disabled for multiple
+ * segment topologies for now except VMD domains.
+ * VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
+ */
+ while ((bus = pci_find_next_bus(bus))
+ && (!pci_domain_nr(bus) || pci_domain_nr(bus) > 0xffff))
+ ;
+ if (bus)
+ return -EPERM;
+
+ type->topology = kcalloc(uncore_max_dies(), sizeof(u64), GFP_KERNEL);
+ if (!type->topology)
+ return -ENOMEM;
+
+ for (i = 0; i < uncore_max_dies(); i++) {
+ ret = skx_msr_cpu_bus_read(die_to_cpu(i), &type->topology[i]);
+ if (ret) {
+ kfree(type->topology);
+ type->topology = NULL;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static struct attribute_group skx_iio_mapping_group = {
+ .is_visible = skx_iio_mapping_visible,
+};
+
+static const struct attribute_group *skx_iio_attr_update[] = {
+ &skx_iio_mapping_group,
+ NULL,
+};
+
+static int skx_iio_set_mapping(struct intel_uncore_type *type)
+{
+ char buf[64];
+ int ret;
+ long die = -1;
+ struct attribute **attrs = NULL;
+ struct dev_ext_attribute *eas = NULL;
+
+ ret = skx_iio_get_topology(type);
+ if (ret)
+ return ret;
+
+ /* One more for NULL. */
+ attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ goto err;
+
+ eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
+ if (!eas)
+ goto err;
+
+ for (die = 0; die < uncore_max_dies(); die++) {
+ sprintf(buf, "die%ld", die);
+ sysfs_attr_init(&eas[die].attr.attr);
+ eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
+ if (!eas[die].attr.attr.name)
+ goto err;
+ eas[die].attr.attr.mode = 0444;
+ eas[die].attr.show = skx_iio_mapping_show;
+ eas[die].attr.store = NULL;
+ eas[die].var = (void *)die;
+ attrs[die] = &eas[die].attr.attr;
+ }
+ skx_iio_mapping_group.attrs = attrs;
+
+ return 0;
+err:
+ for (; die >= 0; die--)
+ kfree(eas[die].attr.attr.name);
+ kfree(eas);
+ kfree(attrs);
+ kfree(type->topology);
+ type->attr_update = NULL;
+ return -ENOMEM;
+}
+
+static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
+{
+ struct attribute **attr = skx_iio_mapping_group.attrs;
+
+ if (!attr)
+ return;
+
+ for (; *attr; attr++)
+ kfree((*attr)->name);
+ kfree(attr_to_ext_attr(*skx_iio_mapping_group.attrs));
+ kfree(skx_iio_mapping_group.attrs);
+ skx_iio_mapping_group.attrs = NULL;
+ kfree(type->topology);
+}
+
static struct intel_uncore_type skx_uncore_iio = {
.name = "iio",
.num_counters = 4,
@@ -3626,6 +3814,9 @@ static struct intel_uncore_type skx_uncore_iio = {
.constraints = skx_uncore_iio_constraints,
.ops = &skx_uncore_iio_ops,
.format_group = &skx_uncore_iio_format_group,
+ .attr_update = skx_iio_attr_update,
+ .set_mapping = skx_iio_set_mapping,
+ .cleanup_mapping = skx_iio_cleanup_mapping,
};
enum perf_uncore_iio_freerunning_type_id {
@@ -4421,6 +4612,7 @@ static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
unsigned int box_ctl, int mem_offset)
{
struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid);
+ struct intel_uncore_type *type = box->pmu->type;
resource_size_t addr;
u32 pci_dword;
@@ -4435,9 +4627,11 @@ static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
addr += box_ctl;
- box->io_addr = ioremap(addr, SNR_IMC_MMIO_SIZE);
- if (!box->io_addr)
+ box->io_addr = ioremap(addr, type->mmio_map_size);
+ if (!box->io_addr) {
+ pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
return;
+ }
writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
}
@@ -4480,6 +4674,9 @@ static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
if (!box->io_addr)
return;
+ if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
+ return;
+
writel(hwc->config | SNBEP_PMON_CTL_EN,
box->io_addr + hwc->config_base);
}
@@ -4492,6 +4689,9 @@ static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
if (!box->io_addr)
return;
+ if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
+ return;
+
writel(hwc->config, box->io_addr + hwc->config_base);
}
@@ -4530,6 +4730,7 @@ static struct intel_uncore_type snr_uncore_imc = {
.event_mask = SNBEP_PMON_RAW_EVENT_MASK,
.box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
.mmio_offset = SNR_IMC_MMIO_OFFSET,
+ .mmio_map_size = SNR_IMC_MMIO_SIZE,
.ops = &snr_uncore_mmio_ops,
.format_group = &skx_uncore_format_group,
};
@@ -4570,6 +4771,7 @@ static struct intel_uncore_type snr_uncore_imc_free_running = {
.num_counters = 3,
.num_boxes = 1,
.num_freerunning_types = SNR_IMC_FREERUNNING_TYPE_MAX,
+ .mmio_map_size = SNR_IMC_MMIO_SIZE,
.freerunning = snr_imc_freerunning,
.ops = &snr_uncore_imc_freerunning_ops,
.event_descs = snr_uncore_imc_freerunning_events,
@@ -4987,6 +5189,7 @@ static struct intel_uncore_type icx_uncore_imc = {
.event_mask = SNBEP_PMON_RAW_EVENT_MASK,
.box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
.mmio_offset = SNR_IMC_MMIO_OFFSET,
+ .mmio_map_size = SNR_IMC_MMIO_SIZE,
.ops = &icx_uncore_mmio_ops,
.format_group = &skx_uncore_format_group,
};
@@ -5044,6 +5247,7 @@ static struct intel_uncore_type icx_uncore_imc_free_running = {
.num_counters = 5,
.num_boxes = 4,
.num_freerunning_types = ICX_IMC_FREERUNNING_TYPE_MAX,
+ .mmio_map_size = SNR_IMC_MMIO_SIZE,
.freerunning = icx_imc_freerunning,
.ops = &icx_uncore_imc_freerunning_ops,
.event_descs = icx_uncore_imc_freerunning_events,
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index e17a3d8a47ed..7b68ab5f19e7 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -78,6 +78,7 @@ static inline bool constraint_match(struct event_constraint *c, u64 ecode)
#define PERF_X86_EVENT_LARGE_PEBS 0x0400 /* use large PEBS */
#define PERF_X86_EVENT_PEBS_VIA_PT 0x0800 /* use PT buffer for PEBS */
#define PERF_X86_EVENT_PAIR 0x1000 /* Large Increment per Cycle */
+#define PERF_X86_EVENT_LBR_SELECT 0x2000 /* Save/Restore MSR_LBR_SELECT */
struct amd_nb {
int nb_id; /* NorthBridge id */
@@ -179,6 +180,17 @@ struct x86_perf_task_context;
#define MAX_LBR_ENTRIES 32
enum {
+ LBR_FORMAT_32 = 0x00,
+ LBR_FORMAT_LIP = 0x01,
+ LBR_FORMAT_EIP = 0x02,
+ LBR_FORMAT_EIP_FLAGS = 0x03,
+ LBR_FORMAT_EIP_FLAGS2 = 0x04,
+ LBR_FORMAT_INFO = 0x05,
+ LBR_FORMAT_TIME = 0x06,
+ LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_TIME,
+};
+
+enum {
X86_PERF_KFREE_SHARED = 0,
X86_PERF_KFREE_EXCL = 1,
X86_PERF_KFREE_MAX
@@ -233,10 +245,15 @@ struct cpu_hw_events {
int lbr_pebs_users;
struct perf_branch_stack lbr_stack;
struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
- struct er_account *lbr_sel;
+ union {
+ struct er_account *lbr_sel;
+ struct er_account *lbr_ctl;
+ };
u64 br_sel;
- struct x86_perf_task_context *last_task_ctx;
+ void *last_task_ctx;
int last_log_id;
+ int lbr_select;
+ void *lbr_xsave;
/*
* Intel host/guest exclude bits
@@ -673,14 +690,38 @@ struct x86_pmu {
/*
* Intel LBR
*/
- unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
- int lbr_nr; /* hardware stack size */
- u64 lbr_sel_mask; /* LBR_SELECT valid bits */
- const int *lbr_sel_map; /* lbr_select mappings */
+ unsigned int lbr_tos, lbr_from, lbr_to,
+ lbr_info, lbr_nr; /* LBR base regs and size */
+ union {
+ u64 lbr_sel_mask; /* LBR_SELECT valid bits */
+ u64 lbr_ctl_mask; /* LBR_CTL valid bits */
+ };
+ union {
+ const int *lbr_sel_map; /* lbr_select mappings */
+ int *lbr_ctl_map; /* LBR_CTL mappings */
+ };
bool lbr_double_abort; /* duplicated lbr aborts */
bool lbr_pt_coexist; /* (LBR|BTS) may coexist with PT */
/*
+ * Intel Architectural LBR CPUID Enumeration
+ */
+ unsigned int lbr_depth_mask:8;
+ unsigned int lbr_deep_c_reset:1;
+ unsigned int lbr_lip:1;
+ unsigned int lbr_cpl:1;
+ unsigned int lbr_filter:1;
+ unsigned int lbr_call_stack:1;
+ unsigned int lbr_mispred:1;
+ unsigned int lbr_timed_lbr:1;
+ unsigned int lbr_br_type:1;
+
+ void (*lbr_reset)(void);
+ void (*lbr_read)(struct cpu_hw_events *cpuc);
+ void (*lbr_save)(void *ctx);
+ void (*lbr_restore)(void *ctx);
+
+ /*
* Intel PT/LBR/BTS are exclusive
*/
atomic_t lbr_exclusive[x86_lbr_exclusive_max];
@@ -718,17 +759,46 @@ struct x86_pmu {
int (*aux_output_match) (struct perf_event *event);
};
-struct x86_perf_task_context {
- u64 lbr_from[MAX_LBR_ENTRIES];
- u64 lbr_to[MAX_LBR_ENTRIES];
- u64 lbr_info[MAX_LBR_ENTRIES];
- int tos;
- int valid_lbrs;
+struct x86_perf_task_context_opt {
int lbr_callstack_users;
int lbr_stack_state;
int log_id;
};
+struct x86_perf_task_context {
+ u64 lbr_sel;
+ int tos;
+ int valid_lbrs;
+ struct x86_perf_task_context_opt opt;
+ struct lbr_entry lbr[MAX_LBR_ENTRIES];
+};
+
+struct x86_perf_task_context_arch_lbr {
+ struct x86_perf_task_context_opt opt;
+ struct lbr_entry entries[];
+};
+
+/*
+ * Add padding to guarantee the 64-byte alignment of the state buffer.
+ *
+ * The structure is dynamically allocated. The size of the LBR state may vary
+ * based on the number of LBR registers.
+ *
+ * Do not put anything after the LBR state.
+ */
+struct x86_perf_task_context_arch_lbr_xsave {
+ struct x86_perf_task_context_opt opt;
+
+ union {
+ struct xregs_state xsave;
+ struct {
+ struct fxregs_state i387;
+ struct xstate_header header;
+ struct arch_lbr_state lbr;
+ } __attribute__ ((packed, aligned (XSAVE_ALIGNMENT)));
+ };
+};
+
#define x86_add_quirk(func_) \
do { \
static struct x86_pmu_quirk __quirk __initdata = { \
@@ -777,6 +847,14 @@ static struct perf_pmu_events_ht_attr event_attr_##v = { \
struct pmu *x86_get_pmu(void);
extern struct x86_pmu x86_pmu __read_mostly;
+static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx)
+{
+ if (static_cpu_has(X86_FEATURE_ARCH_LBR))
+ return &((struct x86_perf_task_context_arch_lbr *)ctx)->opt;
+
+ return &((struct x86_perf_task_context *)ctx)->opt;
+}
+
static inline bool x86_pmu_has_lbr_callstack(void)
{
return x86_pmu.lbr_sel_map &&
@@ -989,7 +1067,10 @@ void release_ds_buffers(void);
void reserve_ds_buffers(void);
+void release_lbr_buffers(void);
+
extern struct event_constraint bts_constraint;
+extern struct event_constraint vlbr_constraint;
void intel_pmu_enable_bts(u64 config);
@@ -1041,7 +1122,7 @@ void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
void intel_pmu_auto_reload_read(struct perf_event *event);
-void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr);
+void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr);
void intel_ds_init(void);
@@ -1054,6 +1135,10 @@ u64 lbr_from_signext_quirk_wr(u64 val);
void intel_pmu_lbr_reset(void);
+void intel_pmu_lbr_reset_32(void);
+
+void intel_pmu_lbr_reset_64(void);
+
void intel_pmu_lbr_add(struct perf_event *event);
void intel_pmu_lbr_del(struct perf_event *event);
@@ -1064,6 +1149,14 @@ void intel_pmu_lbr_disable_all(void);
void intel_pmu_lbr_read(void);
+void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc);
+
+void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc);
+
+void intel_pmu_lbr_save(void *ctx);
+
+void intel_pmu_lbr_restore(void *ctx);
+
void intel_pmu_lbr_init_core(void);
void intel_pmu_lbr_init_nhm(void);
@@ -1080,6 +1173,8 @@ void intel_pmu_lbr_init_skl(void);
void intel_pmu_lbr_init_knl(void);
+void intel_pmu_arch_lbr_init(void);
+
void intel_pmu_pebs_data_source_nhm(void);
void intel_pmu_pebs_data_source_skl(bool pmem);
@@ -1115,6 +1210,10 @@ static inline void release_ds_buffers(void)
{
}
+static inline void release_lbr_buffers(void)
+{
+}
+
static inline int intel_pmu_init(void)
{
return 0;
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index 0f2bf59f4354..68b38820b10e 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -787,7 +787,8 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &model_hsx),
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &model_skl),
- X86_MATCH_VENDOR_FAM(AMD, 0x17, &model_amd_fam17h),
+ X86_MATCH_VENDOR_FAM(AMD, 0x17, &model_amd_fam17h),
+ X86_MATCH_VENDOR_FAM(HYGON, 0x18, &model_amd_fam17h),
{},
};
MODULE_DEVICE_TABLE(x86cpu, rapl_model_match);
diff --git a/arch/x86/events/zhaoxin/core.c b/arch/x86/events/zhaoxin/core.c
index 898fa1ae9ceb..e68827e604ad 100644
--- a/arch/x86/events/zhaoxin/core.c
+++ b/arch/x86/events/zhaoxin/core.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Zhoaxin PMU; like Intel Architectural PerfMon-v2
+ * Zhaoxin PMU; like Intel Architectural PerfMon-v2
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 0f63585edf5f..5c15f95b1ba7 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -144,7 +144,7 @@
_ASM_PTR (entry); \
.popsection
-#else
+#else /* ! __ASSEMBLY__ */
# define _EXPAND_EXTABLE_HANDLE(x) #x
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
" .pushsection \"__ex_table\",\"a\"\n" \
@@ -164,9 +164,7 @@
_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
/* For C file, we already have NOKPROBE_SYMBOL macro */
-#endif
-#ifndef __ASSEMBLY__
/*
* This output constraint should be used for any inline asm which has a "call"
* instruction. Otherwise the asm may be inserted before the frame pointer
@@ -175,6 +173,6 @@
*/
register unsigned long current_stack_pointer asm(_ASM_SP);
#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
-#endif
+#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_ASM_H */
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index bf35e476a776..b6cac6e9bb70 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -14,8 +14,6 @@
* resource counting etc..
*/
-#define ATOMIC_INIT(i) { (i) }
-
/**
* arch_atomic_read - read atomic variable
* @v: pointer of type atomic_t
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 680c320363db..9191280d9ea3 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -24,9 +24,16 @@
# error "Invalid value for CONFIG_PHYSICAL_ALIGN"
#endif
-#ifdef CONFIG_KERNEL_BZIP2
+#if defined(CONFIG_KERNEL_BZIP2)
# define BOOT_HEAP_SIZE 0x400000
-#else /* !CONFIG_KERNEL_BZIP2 */
+#elif defined(CONFIG_KERNEL_ZSTD)
+/*
+ * Zstd needs to allocate the ZSTD_DCtx in order to decompress the kernel.
+ * The ZSTD_DCtx is ~160KB, so set the heap size to 192KB because it is a
+ * round number and to allow some slack.
+ */
+# define BOOT_HEAP_SIZE 0x30000
+#else
# define BOOT_HEAP_SIZE 0x10000
#endif
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index 028189575560..297fa12e7e27 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -3,6 +3,7 @@
#define _ASM_X86_BUG_H
#include <linux/stringify.h>
+#include <linux/instrumentation.h>
/*
* Despite that some emulators terminate on UD2, we use it for WARN().
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index 1a2eafca7038..0a7fe0321613 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -3,7 +3,7 @@
#define _ASM_X86_CMPXCHG_32_H
/*
- * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
+ * Note: if you use set64_bit(), __cmpxchg64(), or their variants,
* you need to test for the feature in boot_cpu_data.
*/
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 02dabc9e77b0..2901d5df4366 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -96,6 +96,7 @@
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
+/* free ( 3*32+17) */
#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
@@ -107,6 +108,7 @@
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
+/* free ( 3*32+29) */
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
@@ -365,7 +367,9 @@
#define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* "" SRBDS mitigation MSR available */
#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
+#define X86_FEATURE_SERIALIZE (18*32+14) /* SERIALIZE instruction */
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
+#define X86_FEATURE_ARCH_LBR (18*32+19) /* Intel ARCH LBR */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */
diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
index 9b8cb50768c2..b8f1dc0761e4 100644
--- a/arch/x86/include/asm/div64.h
+++ b/arch/x86/include/asm/div64.h
@@ -74,16 +74,26 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
#else
# include <asm-generic/div64.h>
-static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 div)
+/*
+ * Will generate an #DE when the result doesn't fit u64, could fix with an
+ * __ex_table[] entry when it becomes an issue.
+ */
+static inline u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div)
{
u64 q;
asm ("mulq %2; divq %3" : "=a" (q)
- : "a" (a), "rm" ((u64)mul), "rm" ((u64)div)
+ : "a" (a), "rm" (mul), "rm" (div)
: "rdx");
return q;
}
+#define mul_u64_u64_div_u64 mul_u64_u64_div_u64
+
+static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 div)
+{
+ return mul_u64_u64_div_u64(a, mul, div);
+}
#define mul_u64_u32_div mul_u64_u32_div
#endif /* CONFIG_X86_32 */
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 6b15a24930e0..fed67eafcacc 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -3,8 +3,8 @@
#define _ASM_X86_DMA_MAPPING_H
/*
- * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and
- * Documentation/DMA-API.txt for documentation.
+ * IOMMU interface. See Documentation/core-api/dma-api-howto.rst and
+ * Documentation/core-api/dma-api.rst for documentation.
*/
#include <linux/scatterlist.h>
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index e7d2ccfdd507..b9c2667ac46c 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -22,17 +22,7 @@ extern unsigned long efi_fw_vendor, efi_config_table;
*
* This is the main reason why we're doing stable VA mappings for RT
* services.
- *
- * SGI UV1 machines are known to be incompatible with this scheme, so we
- * provide an opt-out for these machines via a DMI quirk that sets the
- * attribute below.
*/
-#define EFI_UV1_MEMMAP EFI_ARCH_1
-
-static inline bool efi_have_uv1_memmap(void)
-{
- return IS_ENABLED(CONFIG_X86_UV) && efi_enabled(EFI_UV1_MEMMAP);
-}
#define EFI32_LOADER_SIGNATURE "EL32"
#define EFI64_LOADER_SIGNATURE "EL64"
@@ -122,9 +112,7 @@ struct efi_scratch {
efi_sync_low_kernel_mappings(); \
kernel_fpu_begin(); \
firmware_restrict_branch_speculation_start(); \
- \
- if (!efi_have_uv1_memmap()) \
- efi_switch_mm(&efi_mm); \
+ efi_switch_mm(&efi_mm); \
})
#define arch_efi_call_virt(p, f, args...) \
@@ -132,9 +120,7 @@ struct efi_scratch {
#define arch_efi_call_virt_teardown() \
({ \
- if (!efi_have_uv1_memmap()) \
- efi_switch_mm(efi_scratch.prev_mm); \
- \
+ efi_switch_mm(efi_scratch.prev_mm); \
firmware_restrict_branch_speculation_end(); \
kernel_fpu_end(); \
})
@@ -176,8 +162,6 @@ extern void efi_delete_dummy_variable(void);
extern void efi_switch_mm(struct mm_struct *mm);
extern void efi_recover_from_page_fault(unsigned long phys_addr);
extern void efi_free_boot_services(void);
-extern pgd_t * __init efi_uv1_memmap_phys_prolog(void);
-extern void __init efi_uv1_memmap_phys_epilog(pgd_t *save_pgd);
/* kexec external ABI */
struct efi_setup_data {
diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
new file mode 100644
index 000000000000..a8f9315b9eae
--- /dev/null
+++ b/arch/x86/include/asm/entry-common.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_X86_ENTRY_COMMON_H
+#define _ASM_X86_ENTRY_COMMON_H
+
+#include <linux/user-return-notifier.h>
+
+#include <asm/nospec-branch.h>
+#include <asm/io_bitmap.h>
+#include <asm/fpu/api.h>
+
+/* Check that the stack and regs on entry from user mode are sane. */
+static __always_inline void arch_check_user_regs(struct pt_regs *regs)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) {
+ /*
+ * Make sure that the entry code gave us a sensible EFLAGS
+ * register. Native because we want to check the actual CPU
+ * state, not the interrupt state as imagined by Xen.
+ */
+ unsigned long flags = native_save_fl();
+ WARN_ON_ONCE(flags & (X86_EFLAGS_AC | X86_EFLAGS_DF |
+ X86_EFLAGS_NT));
+
+ /* We think we came from user mode. Make sure pt_regs agrees. */
+ WARN_ON_ONCE(!user_mode(regs));
+
+ /*
+ * All entries from user mode (except #DF) should be on the
+ * normal thread stack and should have user pt_regs in the
+ * correct location.
+ */
+ WARN_ON_ONCE(!on_thread_stack());
+ WARN_ON_ONCE(regs != task_pt_regs(current));
+ }
+}
+#define arch_check_user_regs arch_check_user_regs
+
+#define ARCH_SYSCALL_EXIT_WORK (_TIF_SINGLESTEP)
+
+static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ unsigned long ti_work)
+{
+ if (ti_work & _TIF_USER_RETURN_NOTIFY)
+ fire_user_return_notifiers();
+
+ if (unlikely(ti_work & _TIF_IO_BITMAP))
+ tss_update_io_bitmap();
+
+ fpregs_assert_state_consistent();
+ if (unlikely(ti_work & _TIF_NEED_FPU_LOAD))
+ switch_fpu_return();
+
+#ifdef CONFIG_COMPAT
+ /*
+ * Compat syscalls set TS_COMPAT. Make sure we clear it before
+ * returning to user mode. We need to clear it *after* signal
+ * handling, because syscall restart has a fixup for compat
+ * syscalls. The fixup is exercised by the ptrace_syscall_32
+ * selftest.
+ *
+ * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
+ * special case only applies after poking regs and before the
+ * very next return to user mode.
+ */
+ current_thread_info()->status &= ~(TS_COMPAT | TS_I386_REGS_POKED);
+#endif
+}
+#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
+
+static __always_inline void arch_exit_to_user_mode(void)
+{
+ mds_user_clear_cpu_buffers();
+}
+#define arch_exit_to_user_mode arch_exit_to_user_mode
+
+#endif
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 845e7481ab77..6b10cdaa7c96 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -274,7 +274,7 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
*/
static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
{
- u64 mask = -1;
+ u64 mask = xfeatures_mask_all;
u32 lmask = mask;
u32 hmask = mask >> 32;
int err;
@@ -320,7 +320,7 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
*/
static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
{
- u64 mask = -1;
+ u64 mask = xfeatures_mask_all;
u32 lmask = mask;
u32 hmask = mask >> 32;
int err;
@@ -356,6 +356,9 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
*/
static inline int copy_xregs_to_user(struct xregs_state __user *buf)
{
+ u64 mask = xfeatures_mask_user();
+ u32 lmask = mask;
+ u32 hmask = mask >> 32;
int err;
/*
@@ -367,7 +370,7 @@ static inline int copy_xregs_to_user(struct xregs_state __user *buf)
return -EFAULT;
stac();
- XSTATE_OP(XSAVE, buf, -1, -1, err);
+ XSTATE_OP(XSAVE, buf, lmask, hmask, err);
clac();
return err;
@@ -408,43 +411,7 @@ static inline int copy_kernel_to_xregs_err(struct xregs_state *xstate, u64 mask)
return err;
}
-/*
- * These must be called with preempt disabled. Returns
- * 'true' if the FPU state is still intact and we can
- * keep registers active.
- *
- * The legacy FNSAVE instruction cleared all FPU state
- * unconditionally, so registers are essentially destroyed.
- * Modern FPU state can be kept in registers, if there are
- * no pending FP exceptions.
- */
-static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
-{
- if (likely(use_xsave())) {
- copy_xregs_to_kernel(&fpu->state.xsave);
-
- /*
- * AVX512 state is tracked here because its use is
- * known to slow the max clock speed of the core.
- */
- if (fpu->state.xsave.header.xfeatures & XFEATURE_MASK_AVX512)
- fpu->avx512_timestamp = jiffies;
- return 1;
- }
-
- if (likely(use_fxsr())) {
- copy_fxregs_to_kernel(fpu);
- return 1;
- }
-
- /*
- * Legacy FPU register saving, FNSAVE always clears FPU registers,
- * so we have to mark them inactive:
- */
- asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave));
-
- return 0;
-}
+extern int copy_fpregs_to_fpstate(struct fpu *fpu);
static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
{
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index f098f6cab94b..c87364ea6446 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -114,6 +114,12 @@ enum xfeature {
XFEATURE_Hi16_ZMM,
XFEATURE_PT_UNIMPLEMENTED_SO_FAR,
XFEATURE_PKRU,
+ XFEATURE_RSRVD_COMP_10,
+ XFEATURE_RSRVD_COMP_11,
+ XFEATURE_RSRVD_COMP_12,
+ XFEATURE_RSRVD_COMP_13,
+ XFEATURE_RSRVD_COMP_14,
+ XFEATURE_LBR,
XFEATURE_MAX,
};
@@ -128,6 +134,7 @@ enum xfeature {
#define XFEATURE_MASK_Hi16_ZMM (1 << XFEATURE_Hi16_ZMM)
#define XFEATURE_MASK_PT (1 << XFEATURE_PT_UNIMPLEMENTED_SO_FAR)
#define XFEATURE_MASK_PKRU (1 << XFEATURE_PKRU)
+#define XFEATURE_MASK_LBR (1 << XFEATURE_LBR)
#define XFEATURE_MASK_FPSSE (XFEATURE_MASK_FP | XFEATURE_MASK_SSE)
#define XFEATURE_MASK_AVX512 (XFEATURE_MASK_OPMASK \
@@ -229,6 +236,26 @@ struct pkru_state {
u32 pad;
} __packed;
+/*
+ * State component 15: Architectural LBR configuration state.
+ * The size of Arch LBR state depends on the number of LBRs (lbr_depth).
+ */
+
+struct lbr_entry {
+ u64 from;
+ u64 to;
+ u64 info;
+};
+
+struct arch_lbr_state {
+ u64 lbr_ctl;
+ u64 lbr_depth;
+ u64 ler_from;
+ u64 ler_to;
+ u64 ler_info;
+ struct lbr_entry entries[];
+} __packed;
+
struct xstate_header {
u64 xfeatures;
u64 xcomp_bv;
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
index 422d8369012a..1559554af931 100644
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -21,6 +21,8 @@
#define XSAVE_YMM_SIZE 256
#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
+#define XSAVE_ALIGNMENT 64
+
/* All currently supported user features */
#define XFEATURE_MASK_USER_SUPPORTED (XFEATURE_MASK_FP | \
XFEATURE_MASK_SSE | \
@@ -36,6 +38,27 @@
#define XFEATURE_MASK_SUPERVISOR_SUPPORTED (0)
/*
+ * A supervisor state component may not always contain valuable information,
+ * and its size may be huge. Saving/restoring such supervisor state components
+ * at each context switch can cause high CPU and space overhead, which should
+ * be avoided. Such supervisor state components should only be saved/restored
+ * on demand. The on-demand dynamic supervisor features are set in this mask.
+ *
+ * Unlike the existing supported supervisor features, a dynamic supervisor
+ * feature does not allocate a buffer in task->fpu, and the corresponding
+ * supervisor state component cannot be saved/restored at each context switch.
+ *
+ * To support a dynamic supervisor feature, a developer should follow the
+ * dos and don'ts as below:
+ * - Do dynamically allocate a buffer for the supervisor state component.
+ * - Do manually invoke the XSAVES/XRSTORS instruction to save/restore the
+ * state component to/from the buffer.
+ * - Don't set the bit corresponding to the dynamic supervisor feature in
+ * IA32_XSS at run time, since it has been set at boot time.
+ */
+#define XFEATURE_MASK_DYNAMIC (XFEATURE_MASK_LBR)
+
+/*
* Unsupported supervisor features. When a supervisor feature in this mask is
* supported in the future, move it to the supported supervisor feature mask.
*/
@@ -43,6 +66,7 @@
/* All supervisor states including supported and unsupported states. */
#define XFEATURE_MASK_SUPERVISOR_ALL (XFEATURE_MASK_SUPERVISOR_SUPPORTED | \
+ XFEATURE_MASK_DYNAMIC | \
XFEATURE_MASK_SUPERVISOR_UNSUPPORTED)
#ifdef CONFIG_X86_64
@@ -63,6 +87,14 @@ static inline u64 xfeatures_mask_user(void)
return xfeatures_mask_all & XFEATURE_MASK_USER_SUPPORTED;
}
+static inline u64 xfeatures_mask_dynamic(void)
+{
+ if (!boot_cpu_has(X86_FEATURE_ARCH_LBR))
+ return XFEATURE_MASK_DYNAMIC & ~XFEATURE_MASK_LBR;
+
+ return XFEATURE_MASK_DYNAMIC;
+}
+
extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
extern void __init update_regset_xstate_info(unsigned int size,
@@ -71,11 +103,15 @@ extern void __init update_regset_xstate_info(unsigned int size,
void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr);
const void *get_xsave_field_ptr(int xfeature_nr);
int using_compacted_format(void);
+int xfeature_size(int xfeature_nr);
int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf);
int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf);
void copy_supervisor_to_kernel(struct xregs_state *xsave);
+void copy_dynamic_supervisor_to_kernel(struct xregs_state *xstate, u64 mask);
+void copy_kernel_to_dynamic_supervisor(struct xregs_state *xstate, u64 mask);
+
/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
int validate_user_xstate_header(const struct xstate_header *hdr);
diff --git a/arch/x86/include/asm/fsgsbase.h b/arch/x86/include/asm/fsgsbase.h
index bca4c743de77..d552646411a9 100644
--- a/arch/x86/include/asm/fsgsbase.h
+++ b/arch/x86/include/asm/fsgsbase.h
@@ -19,36 +19,65 @@ extern unsigned long x86_gsbase_read_task(struct task_struct *task);
extern void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase);
extern void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase);
-/* Helper functions for reading/writing FS/GS base */
+/* Must be protected by X86_FEATURE_FSGSBASE check. */
-static inline unsigned long x86_fsbase_read_cpu(void)
+static __always_inline unsigned long rdfsbase(void)
{
unsigned long fsbase;
- rdmsrl(MSR_FS_BASE, fsbase);
+ asm volatile("rdfsbase %0" : "=r" (fsbase) :: "memory");
return fsbase;
}
-static inline unsigned long x86_gsbase_read_cpu_inactive(void)
+static __always_inline unsigned long rdgsbase(void)
{
unsigned long gsbase;
- rdmsrl(MSR_KERNEL_GS_BASE, gsbase);
+ asm volatile("rdgsbase %0" : "=r" (gsbase) :: "memory");
return gsbase;
}
-static inline void x86_fsbase_write_cpu(unsigned long fsbase)
+static __always_inline void wrfsbase(unsigned long fsbase)
{
- wrmsrl(MSR_FS_BASE, fsbase);
+ asm volatile("wrfsbase %0" :: "r" (fsbase) : "memory");
}
-static inline void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
+static __always_inline void wrgsbase(unsigned long gsbase)
{
- wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
+ asm volatile("wrgsbase %0" :: "r" (gsbase) : "memory");
}
+#include <asm/cpufeature.h>
+
+/* Helper functions for reading/writing FS/GS base */
+
+static inline unsigned long x86_fsbase_read_cpu(void)
+{
+ unsigned long fsbase;
+
+ if (static_cpu_has(X86_FEATURE_FSGSBASE))
+ fsbase = rdfsbase();
+ else
+ rdmsrl(MSR_FS_BASE, fsbase);
+
+ return fsbase;
+}
+
+static inline void x86_fsbase_write_cpu(unsigned long fsbase)
+{
+ if (static_cpu_has(X86_FEATURE_FSGSBASE))
+ wrfsbase(fsbase);
+ else
+ wrmsrl(MSR_FS_BASE, fsbase);
+}
+
+extern unsigned long x86_gsbase_read_cpu_inactive(void);
+extern void x86_gsbase_write_cpu_inactive(unsigned long gsbase);
+extern unsigned long x86_fsgsbase_read_task(struct task_struct *task,
+ unsigned short selector);
+
#endif /* CONFIG_X86_64 */
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
index 80d3b30d3ee3..ff198fc2495e 100644
--- a/arch/x86/include/asm/idtentry.h
+++ b/arch/x86/include/asm/idtentry.h
@@ -6,15 +6,13 @@
#include <asm/trapnr.h>
#ifndef __ASSEMBLY__
+#include <linux/entry-common.h>
#include <linux/hardirq.h>
#include <asm/irq_stack.h>
-void idtentry_enter_user(struct pt_regs *regs);
-void idtentry_exit_user(struct pt_regs *regs);
-
-bool idtentry_enter_cond_rcu(struct pt_regs *regs);
-void idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit);
+bool idtentry_enter_nmi(struct pt_regs *regs);
+void idtentry_exit_nmi(struct pt_regs *regs, bool irq_state);
/**
* DECLARE_IDTENTRY - Declare functions for simple IDT entry points
@@ -45,8 +43,8 @@ void idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit);
* The macro is written so it acts as function definition. Append the
* body with a pair of curly brackets.
*
- * idtentry_enter() contains common code which has to be invoked before
- * arbitrary code in the body. idtentry_exit() contains common code
+ * irqentry_enter() contains common code which has to be invoked before
+ * arbitrary code in the body. irqentry_exit() contains common code
* which has to run before returning to the low level assembly code.
*/
#define DEFINE_IDTENTRY(func) \
@@ -54,12 +52,12 @@ static __always_inline void __##func(struct pt_regs *regs); \
\
__visible noinstr void func(struct pt_regs *regs) \
{ \
- bool rcu_exit = idtentry_enter_cond_rcu(regs); \
+ irqentry_state_t state = irqentry_enter(regs); \
\
instrumentation_begin(); \
__##func (regs); \
instrumentation_end(); \
- idtentry_exit_cond_rcu(regs, rcu_exit); \
+ irqentry_exit(regs, state); \
} \
\
static __always_inline void __##func(struct pt_regs *regs)
@@ -101,12 +99,12 @@ static __always_inline void __##func(struct pt_regs *regs, \
__visible noinstr void func(struct pt_regs *regs, \
unsigned long error_code) \
{ \
- bool rcu_exit = idtentry_enter_cond_rcu(regs); \
+ irqentry_state_t state = irqentry_enter(regs); \
\
instrumentation_begin(); \
__##func (regs, error_code); \
instrumentation_end(); \
- idtentry_exit_cond_rcu(regs, rcu_exit); \
+ irqentry_exit(regs, state); \
} \
\
static __always_inline void __##func(struct pt_regs *regs, \
@@ -161,7 +159,7 @@ __visible noinstr void func(struct pt_regs *regs)
* body with a pair of curly brackets.
*
* Contrary to DEFINE_IDTENTRY_ERRORCODE() this does not invoke the
- * idtentry_enter/exit() helpers before and after the body invocation. This
+ * irqentry_enter/exit() helpers before and after the body invocation. This
* needs to be done in the body itself if applicable. Use if extra work
* is required before the enter/exit() helpers are invoked.
*/
@@ -187,11 +185,9 @@ __visible noinstr void func(struct pt_regs *regs, unsigned long error_code)
* to the function as error_code argument which needs to be truncated
* to an u8 because the push is sign extending.
*
- * On 64-bit idtentry_enter/exit() are invoked in the ASM entry code before
- * and after switching to the interrupt stack. On 32-bit this happens in C.
- *
* irq_enter/exit_rcu() are invoked before the function body and the
- * KVM L1D flush request is set.
+ * KVM L1D flush request is set. Stack switching to the interrupt stack
+ * has to be done in the function body if necessary.
*/
#define DEFINE_IDTENTRY_IRQ(func) \
static __always_inline void __##func(struct pt_regs *regs, u8 vector); \
@@ -199,7 +195,7 @@ static __always_inline void __##func(struct pt_regs *regs, u8 vector); \
__visible noinstr void func(struct pt_regs *regs, \
unsigned long error_code) \
{ \
- bool rcu_exit = idtentry_enter_cond_rcu(regs); \
+ irqentry_state_t state = irqentry_enter(regs); \
\
instrumentation_begin(); \
irq_enter_rcu(); \
@@ -207,7 +203,7 @@ __visible noinstr void func(struct pt_regs *regs, \
__##func (regs, (u8)error_code); \
irq_exit_rcu(); \
instrumentation_end(); \
- idtentry_exit_cond_rcu(regs, rcu_exit); \
+ irqentry_exit(regs, state); \
} \
\
static __always_inline void __##func(struct pt_regs *regs, u8 vector)
@@ -231,7 +227,7 @@ static __always_inline void __##func(struct pt_regs *regs, u8 vector)
* DEFINE_IDTENTRY_SYSVEC - Emit code for system vector IDT entry points
* @func: Function name of the entry point
*
- * idtentry_enter/exit() and irq_enter/exit_rcu() are invoked before the
+ * irqentry_enter/exit() and irq_enter/exit_rcu() are invoked before the
* function body. KVM L1D flush request is set.
*
* Runs the function on the interrupt stack if the entry hit kernel mode
@@ -241,7 +237,7 @@ static void __##func(struct pt_regs *regs); \
\
__visible noinstr void func(struct pt_regs *regs) \
{ \
- bool rcu_exit = idtentry_enter_cond_rcu(regs); \
+ irqentry_state_t state = irqentry_enter(regs); \
\
instrumentation_begin(); \
irq_enter_rcu(); \
@@ -249,7 +245,7 @@ __visible noinstr void func(struct pt_regs *regs) \
run_on_irqstack_cond(__##func, regs, regs); \
irq_exit_rcu(); \
instrumentation_end(); \
- idtentry_exit_cond_rcu(regs, rcu_exit); \
+ irqentry_exit(regs, state); \
} \
\
static noinline void __##func(struct pt_regs *regs)
@@ -270,7 +266,7 @@ static __always_inline void __##func(struct pt_regs *regs); \
\
__visible noinstr void func(struct pt_regs *regs) \
{ \
- bool rcu_exit = idtentry_enter_cond_rcu(regs); \
+ irqentry_state_t state = irqentry_enter(regs); \
\
instrumentation_begin(); \
__irq_enter_raw(); \
@@ -278,7 +274,7 @@ __visible noinstr void func(struct pt_regs *regs) \
__##func (regs); \
__irq_exit_raw(); \
instrumentation_end(); \
- idtentry_exit_cond_rcu(regs, rcu_exit); \
+ irqentry_exit(regs, state); \
} \
\
static __always_inline void __##func(struct pt_regs *regs)
diff --git a/arch/x86/include/asm/inst.h b/arch/x86/include/asm/inst.h
index f5a796da07f8..bd7f02480ca1 100644
--- a/arch/x86/include/asm/inst.h
+++ b/arch/x86/include/asm/inst.h
@@ -12,7 +12,6 @@
#define REG_TYPE_R32 0
#define REG_TYPE_R64 1
-#define REG_TYPE_XMM 2
#define REG_TYPE_INVALID 100
.macro R32_NUM opd r32
@@ -123,77 +122,18 @@
#endif
.endm
- .macro XMM_NUM opd xmm
- \opd = REG_NUM_INVALID
- .ifc \xmm,%xmm0
- \opd = 0
- .endif
- .ifc \xmm,%xmm1
- \opd = 1
- .endif
- .ifc \xmm,%xmm2
- \opd = 2
- .endif
- .ifc \xmm,%xmm3
- \opd = 3
- .endif
- .ifc \xmm,%xmm4
- \opd = 4
- .endif
- .ifc \xmm,%xmm5
- \opd = 5
- .endif
- .ifc \xmm,%xmm6
- \opd = 6
- .endif
- .ifc \xmm,%xmm7
- \opd = 7
- .endif
- .ifc \xmm,%xmm8
- \opd = 8
- .endif
- .ifc \xmm,%xmm9
- \opd = 9
- .endif
- .ifc \xmm,%xmm10
- \opd = 10
- .endif
- .ifc \xmm,%xmm11
- \opd = 11
- .endif
- .ifc \xmm,%xmm12
- \opd = 12
- .endif
- .ifc \xmm,%xmm13
- \opd = 13
- .endif
- .ifc \xmm,%xmm14
- \opd = 14
- .endif
- .ifc \xmm,%xmm15
- \opd = 15
- .endif
- .endm
-
.macro REG_TYPE type reg
R32_NUM reg_type_r32 \reg
R64_NUM reg_type_r64 \reg
- XMM_NUM reg_type_xmm \reg
.if reg_type_r64 <> REG_NUM_INVALID
\type = REG_TYPE_R64
.elseif reg_type_r32 <> REG_NUM_INVALID
\type = REG_TYPE_R32
- .elseif reg_type_xmm <> REG_NUM_INVALID
- \type = REG_TYPE_XMM
.else
\type = REG_TYPE_INVALID
.endif
.endm
- .macro PFX_OPD_SIZE
- .byte 0x66
- .endm
-
.macro PFX_REX opd1 opd2 W=0
.if ((\opd1 | \opd2) & 8) || \W
.byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
@@ -204,108 +144,20 @@
.byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3)
.endm
- .macro PSHUFB_XMM xmm1 xmm2
- XMM_NUM pshufb_opd1 \xmm1
- XMM_NUM pshufb_opd2 \xmm2
- PFX_OPD_SIZE
- PFX_REX pshufb_opd1 pshufb_opd2
- .byte 0x0f, 0x38, 0x00
- MODRM 0xc0 pshufb_opd1 pshufb_opd2
- .endm
-
- .macro PCLMULQDQ imm8 xmm1 xmm2
- XMM_NUM clmul_opd1 \xmm1
- XMM_NUM clmul_opd2 \xmm2
- PFX_OPD_SIZE
- PFX_REX clmul_opd1 clmul_opd2
- .byte 0x0f, 0x3a, 0x44
- MODRM 0xc0 clmul_opd1 clmul_opd2
- .byte \imm8
- .endm
-
- .macro PEXTRD imm8 xmm gpr
- R32_NUM extrd_opd1 \gpr
- XMM_NUM extrd_opd2 \xmm
- PFX_OPD_SIZE
- PFX_REX extrd_opd1 extrd_opd2
- .byte 0x0f, 0x3a, 0x16
- MODRM 0xc0 extrd_opd1 extrd_opd2
- .byte \imm8
- .endm
-
- .macro AESKEYGENASSIST rcon xmm1 xmm2
- XMM_NUM aeskeygen_opd1 \xmm1
- XMM_NUM aeskeygen_opd2 \xmm2
- PFX_OPD_SIZE
- PFX_REX aeskeygen_opd1 aeskeygen_opd2
- .byte 0x0f, 0x3a, 0xdf
- MODRM 0xc0 aeskeygen_opd1 aeskeygen_opd2
- .byte \rcon
- .endm
-
- .macro AESIMC xmm1 xmm2
- XMM_NUM aesimc_opd1 \xmm1
- XMM_NUM aesimc_opd2 \xmm2
- PFX_OPD_SIZE
- PFX_REX aesimc_opd1 aesimc_opd2
- .byte 0x0f, 0x38, 0xdb
- MODRM 0xc0 aesimc_opd1 aesimc_opd2
- .endm
-
- .macro AESENC xmm1 xmm2
- XMM_NUM aesenc_opd1 \xmm1
- XMM_NUM aesenc_opd2 \xmm2
- PFX_OPD_SIZE
- PFX_REX aesenc_opd1 aesenc_opd2
- .byte 0x0f, 0x38, 0xdc
- MODRM 0xc0 aesenc_opd1 aesenc_opd2
- .endm
-
- .macro AESENCLAST xmm1 xmm2
- XMM_NUM aesenclast_opd1 \xmm1
- XMM_NUM aesenclast_opd2 \xmm2
- PFX_OPD_SIZE
- PFX_REX aesenclast_opd1 aesenclast_opd2
- .byte 0x0f, 0x38, 0xdd
- MODRM 0xc0 aesenclast_opd1 aesenclast_opd2
- .endm
-
- .macro AESDEC xmm1 xmm2
- XMM_NUM aesdec_opd1 \xmm1
- XMM_NUM aesdec_opd2 \xmm2
- PFX_OPD_SIZE
- PFX_REX aesdec_opd1 aesdec_opd2
- .byte 0x0f, 0x38, 0xde
- MODRM 0xc0 aesdec_opd1 aesdec_opd2
- .endm
-
- .macro AESDECLAST xmm1 xmm2
- XMM_NUM aesdeclast_opd1 \xmm1
- XMM_NUM aesdeclast_opd2 \xmm2
- PFX_OPD_SIZE
- PFX_REX aesdeclast_opd1 aesdeclast_opd2
- .byte 0x0f, 0x38, 0xdf
- MODRM 0xc0 aesdeclast_opd1 aesdeclast_opd2
- .endm
-
- .macro MOVQ_R64_XMM opd1 opd2
- REG_TYPE movq_r64_xmm_opd1_type \opd1
- .if movq_r64_xmm_opd1_type == REG_TYPE_XMM
- XMM_NUM movq_r64_xmm_opd1 \opd1
- R64_NUM movq_r64_xmm_opd2 \opd2
+.macro RDPID opd
+ REG_TYPE rdpid_opd_type \opd
+ .if rdpid_opd_type == REG_TYPE_R64
+ R64_NUM rdpid_opd \opd
.else
- R64_NUM movq_r64_xmm_opd1 \opd1
- XMM_NUM movq_r64_xmm_opd2 \opd2
+ R32_NUM rdpid_opd \opd
.endif
- PFX_OPD_SIZE
- PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1
- .if movq_r64_xmm_opd1_type == REG_TYPE_XMM
- .byte 0x0f, 0x7e
- .else
- .byte 0x0f, 0x6e
+ .byte 0xf3
+ .if rdpid_opd > 7
+ PFX_REX rdpid_opd 0
.endif
- MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2
- .endm
+ .byte 0x0f, 0xc7
+ MODRM 0xc0 rdpid_opd 0x7
+.endm
#endif
#endif
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index a338a6deb950..5e658ba2654a 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -89,8 +89,15 @@
#define INTEL_FAM6_COMETLAKE 0xA5
#define INTEL_FAM6_COMETLAKE_L 0xA6
+#define INTEL_FAM6_ROCKETLAKE 0xA7
+
#define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F
+/* Hybrid Core/Atom Processors */
+
+#define INTEL_FAM6_LAKEFIELD 0x8A
+#define INTEL_FAM6_ALDERLAKE 0x97
+
/* "Small Core" Processors (Atom) */
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index fd20a2334885..a1a26f6d3aa4 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -99,7 +99,6 @@ struct IR_IO_APIC_route_entry {
struct irq_alloc_info;
struct ioapic_domain_cfg;
-#define IOAPIC_AUTO -1
#define IOAPIC_EDGE 0
#define IOAPIC_LEVEL 1
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
index 247ab14c6309..d1514e70477b 100644
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -36,8 +36,9 @@ extern void die(const char *, struct pt_regs *,long);
void die_addr(const char *str, struct pt_regs *regs, long err, long gp_addr);
extern int __must_check __die(const char *, struct pt_regs *, long);
extern void show_stack_regs(struct pt_regs *regs);
-extern void __show_regs(struct pt_regs *regs, enum show_regs_mode);
-extern void show_iret_regs(struct pt_regs *regs);
+extern void __show_regs(struct pt_regs *regs, enum show_regs_mode,
+ const char *log_lvl);
+extern void show_iret_regs(struct pt_regs *regs, const char *log_lvl);
extern unsigned long oops_begin(void);
extern void oops_end(unsigned long, struct pt_regs *, int signr);
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 073eb7ad2f56..143bc9abe99c 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -66,6 +66,8 @@ struct arch_specific_insn {
*/
bool boostable;
bool if_modifier;
+ /* Number of bytes of text poked */
+ int tp_len;
};
struct arch_optimized_insn {
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 848ce43b9040..5049f6c22683 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -43,9 +43,10 @@ void __init sme_enable(struct boot_params *bp);
int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
+void __init mem_encrypt_free_decrypted_mem(void);
+
/* Architecture __weak replacement functions */
void __init mem_encrypt_init(void);
-void __init mem_encrypt_free_decrypted_mem(void);
bool sme_active(void);
bool sev_active(void);
@@ -77,6 +78,8 @@ early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0;
static inline int __init
early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
+static inline void mem_encrypt_free_decrypted_mem(void) { }
+
#define __bss_decrypted
#endif /* CONFIG_AMD_MEM_ENCRYPT */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index e8370e64a155..2859ee4f39a8 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -149,6 +149,10 @@
#define MSR_LBR_SELECT 0x000001c8
#define MSR_LBR_TOS 0x000001c9
+
+#define MSR_IA32_POWER_CTL 0x000001fc
+#define MSR_IA32_POWER_CTL_BIT_EE 19
+
#define MSR_LBR_NHM_FROM 0x00000680
#define MSR_LBR_NHM_TO 0x000006c0
#define MSR_LBR_CORE_FROM 0x00000040
@@ -158,7 +162,23 @@
#define LBR_INFO_MISPRED BIT_ULL(63)
#define LBR_INFO_IN_TX BIT_ULL(62)
#define LBR_INFO_ABORT BIT_ULL(61)
+#define LBR_INFO_CYC_CNT_VALID BIT_ULL(60)
#define LBR_INFO_CYCLES 0xffff
+#define LBR_INFO_BR_TYPE_OFFSET 56
+#define LBR_INFO_BR_TYPE (0xfull << LBR_INFO_BR_TYPE_OFFSET)
+
+#define MSR_ARCH_LBR_CTL 0x000014ce
+#define ARCH_LBR_CTL_LBREN BIT(0)
+#define ARCH_LBR_CTL_CPL_OFFSET 1
+#define ARCH_LBR_CTL_CPL (0x3ull << ARCH_LBR_CTL_CPL_OFFSET)
+#define ARCH_LBR_CTL_STACK_OFFSET 3
+#define ARCH_LBR_CTL_STACK (0x1ull << ARCH_LBR_CTL_STACK_OFFSET)
+#define ARCH_LBR_CTL_FILTER_OFFSET 16
+#define ARCH_LBR_CTL_FILTER (0x7full << ARCH_LBR_CTL_FILTER_OFFSET)
+#define MSR_ARCH_LBR_DEPTH 0x000014cf
+#define MSR_ARCH_LBR_FROM_0 0x00001500
+#define MSR_ARCH_LBR_TO_0 0x00001600
+#define MSR_ARCH_LBR_INFO_0 0x00001200
#define MSR_IA32_PEBS_ENABLE 0x000003f1
#define MSR_PEBS_DATA_CFG 0x000003f2
@@ -253,8 +273,6 @@
#define MSR_PEBS_FRONTEND 0x000003f7
-#define MSR_IA32_POWER_CTL 0x000001fc
-
#define MSR_IA32_MC0_CTL 0x00000400
#define MSR_IA32_MC0_STATUS 0x00000401
#define MSR_IA32_MC0_ADDR 0x00000402
@@ -418,7 +436,6 @@
#define MSR_AMD64_PATCH_LEVEL 0x0000008b
#define MSR_AMD64_TSC_RATIO 0xc0000104
#define MSR_AMD64_NB_CFG 0xc001001f
-#define MSR_AMD64_CPUID_FN_1 0xc0011004
#define MSR_AMD64_PATCH_LOADER 0xc0010020
#define MSR_AMD_PERF_CTL 0xc0010062
#define MSR_AMD_PERF_STATUS 0xc0010063
@@ -427,6 +444,7 @@
#define MSR_AMD64_OSVW_STATUS 0xc0010141
#define MSR_AMD_PPIN_CTL 0xc00102f0
#define MSR_AMD_PPIN 0xc00102f1
+#define MSR_AMD64_CPUID_FN_1 0xc0011004
#define MSR_AMD64_LS_CFG 0xc0011020
#define MSR_AMD64_DC_CFG 0xc0011022
#define MSR_AMD64_BU_CFG2 0xc001102a
@@ -466,6 +484,8 @@
#define MSR_F16H_DR0_ADDR_MASK 0xc0011027
/* Fam 15h MSRs */
+#define MSR_F15H_CU_PWR_ACCUMULATOR 0xc001007a
+#define MSR_F15H_CU_MAX_PWR_ACCUMULATOR 0xc001007b
#define MSR_F15H_PERF_CTL 0xc0010200
#define MSR_F15H_PERF_CTL0 MSR_F15H_PERF_CTL
#define MSR_F15H_PERF_CTL1 (MSR_F15H_PERF_CTL + 2)
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 2278797c769d..a3c33b79fb86 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -4,33 +4,15 @@
#ifdef CONFIG_X86_64
#define __percpu_seg gs
-#define __percpu_mov_op movq
#else
#define __percpu_seg fs
-#define __percpu_mov_op movl
#endif
#ifdef __ASSEMBLY__
-/*
- * PER_CPU finds an address of a per-cpu variable.
- *
- * Args:
- * var - variable name
- * reg - 32bit register
- *
- * The resulting address is stored in the "reg" argument.
- *
- * Example:
- * PER_CPU(cpu_gdt_descr, %ebx)
- */
#ifdef CONFIG_SMP
-#define PER_CPU(var, reg) \
- __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \
- lea var(reg), reg
#define PER_CPU_VAR(var) %__percpu_seg:var
#else /* ! SMP */
-#define PER_CPU(var, reg) __percpu_mov_op $var, reg
#define PER_CPU_VAR(var) var
#endif /* SMP */
@@ -85,213 +67,108 @@
/* For arch-specific code, we can use direct single-insn ops (they
* don't give an lvalue though). */
-extern void __bad_percpu_size(void);
-
-#define percpu_to_op(qual, op, var, val) \
-do { \
- typedef typeof(var) pto_T__; \
- if (0) { \
- pto_T__ pto_tmp__; \
- pto_tmp__ = (val); \
- (void)pto_tmp__; \
- } \
- switch (sizeof(var)) { \
- case 1: \
- asm qual (op "b %1,"__percpu_arg(0) \
- : "+m" (var) \
- : "qi" ((pto_T__)(val))); \
- break; \
- case 2: \
- asm qual (op "w %1,"__percpu_arg(0) \
- : "+m" (var) \
- : "ri" ((pto_T__)(val))); \
- break; \
- case 4: \
- asm qual (op "l %1,"__percpu_arg(0) \
- : "+m" (var) \
- : "ri" ((pto_T__)(val))); \
- break; \
- case 8: \
- asm qual (op "q %1,"__percpu_arg(0) \
- : "+m" (var) \
- : "re" ((pto_T__)(val))); \
- break; \
- default: __bad_percpu_size(); \
- } \
+
+#define __pcpu_type_1 u8
+#define __pcpu_type_2 u16
+#define __pcpu_type_4 u32
+#define __pcpu_type_8 u64
+
+#define __pcpu_cast_1(val) ((u8)(((unsigned long) val) & 0xff))
+#define __pcpu_cast_2(val) ((u16)(((unsigned long) val) & 0xffff))
+#define __pcpu_cast_4(val) ((u32)(((unsigned long) val) & 0xffffffff))
+#define __pcpu_cast_8(val) ((u64)(val))
+
+#define __pcpu_op1_1(op, dst) op "b " dst
+#define __pcpu_op1_2(op, dst) op "w " dst
+#define __pcpu_op1_4(op, dst) op "l " dst
+#define __pcpu_op1_8(op, dst) op "q " dst
+
+#define __pcpu_op2_1(op, src, dst) op "b " src ", " dst
+#define __pcpu_op2_2(op, src, dst) op "w " src ", " dst
+#define __pcpu_op2_4(op, src, dst) op "l " src ", " dst
+#define __pcpu_op2_8(op, src, dst) op "q " src ", " dst
+
+#define __pcpu_reg_1(mod, x) mod "q" (x)
+#define __pcpu_reg_2(mod, x) mod "r" (x)
+#define __pcpu_reg_4(mod, x) mod "r" (x)
+#define __pcpu_reg_8(mod, x) mod "r" (x)
+
+#define __pcpu_reg_imm_1(x) "qi" (x)
+#define __pcpu_reg_imm_2(x) "ri" (x)
+#define __pcpu_reg_imm_4(x) "ri" (x)
+#define __pcpu_reg_imm_8(x) "re" (x)
+
+#define percpu_to_op(size, qual, op, _var, _val) \
+do { \
+ __pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \
+ if (0) { \
+ typeof(_var) pto_tmp__; \
+ pto_tmp__ = (_val); \
+ (void)pto_tmp__; \
+ } \
+ asm qual(__pcpu_op2_##size(op, "%[val]", __percpu_arg([var])) \
+ : [var] "+m" (_var) \
+ : [val] __pcpu_reg_imm_##size(pto_val__)); \
} while (0)
+#define percpu_unary_op(size, qual, op, _var) \
+({ \
+ asm qual (__pcpu_op1_##size(op, __percpu_arg([var])) \
+ : [var] "+m" (_var)); \
+})
+
/*
* Generate a percpu add to memory instruction and optimize code
* if one is added or subtracted.
*/
-#define percpu_add_op(qual, var, val) \
+#define percpu_add_op(size, qual, var, val) \
do { \
- typedef typeof(var) pao_T__; \
const int pao_ID__ = (__builtin_constant_p(val) && \
((val) == 1 || (val) == -1)) ? \
(int)(val) : 0; \
if (0) { \
- pao_T__ pao_tmp__; \
+ typeof(var) pao_tmp__; \
pao_tmp__ = (val); \
(void)pao_tmp__; \
} \
- switch (sizeof(var)) { \
- case 1: \
- if (pao_ID__ == 1) \
- asm qual ("incb "__percpu_arg(0) : "+m" (var)); \
- else if (pao_ID__ == -1) \
- asm qual ("decb "__percpu_arg(0) : "+m" (var)); \
- else \
- asm qual ("addb %1, "__percpu_arg(0) \
- : "+m" (var) \
- : "qi" ((pao_T__)(val))); \
- break; \
- case 2: \
- if (pao_ID__ == 1) \
- asm qual ("incw "__percpu_arg(0) : "+m" (var)); \
- else if (pao_ID__ == -1) \
- asm qual ("decw "__percpu_arg(0) : "+m" (var)); \
- else \
- asm qual ("addw %1, "__percpu_arg(0) \
- : "+m" (var) \
- : "ri" ((pao_T__)(val))); \
- break; \
- case 4: \
- if (pao_ID__ == 1) \
- asm qual ("incl "__percpu_arg(0) : "+m" (var)); \
- else if (pao_ID__ == -1) \
- asm qual ("decl "__percpu_arg(0) : "+m" (var)); \
- else \
- asm qual ("addl %1, "__percpu_arg(0) \
- : "+m" (var) \
- : "ri" ((pao_T__)(val))); \
- break; \
- case 8: \
- if (pao_ID__ == 1) \
- asm qual ("incq "__percpu_arg(0) : "+m" (var)); \
- else if (pao_ID__ == -1) \
- asm qual ("decq "__percpu_arg(0) : "+m" (var)); \
- else \
- asm qual ("addq %1, "__percpu_arg(0) \
- : "+m" (var) \
- : "re" ((pao_T__)(val))); \
- break; \
- default: __bad_percpu_size(); \
- } \
+ if (pao_ID__ == 1) \
+ percpu_unary_op(size, qual, "inc", var); \
+ else if (pao_ID__ == -1) \
+ percpu_unary_op(size, qual, "dec", var); \
+ else \
+ percpu_to_op(size, qual, "add", var, val); \
} while (0)
-#define percpu_from_op(qual, op, var) \
-({ \
- typeof(var) pfo_ret__; \
- switch (sizeof(var)) { \
- case 1: \
- asm qual (op "b "__percpu_arg(1)",%0" \
- : "=q" (pfo_ret__) \
- : "m" (var)); \
- break; \
- case 2: \
- asm qual (op "w "__percpu_arg(1)",%0" \
- : "=r" (pfo_ret__) \
- : "m" (var)); \
- break; \
- case 4: \
- asm qual (op "l "__percpu_arg(1)",%0" \
- : "=r" (pfo_ret__) \
- : "m" (var)); \
- break; \
- case 8: \
- asm qual (op "q "__percpu_arg(1)",%0" \
- : "=r" (pfo_ret__) \
- : "m" (var)); \
- break; \
- default: __bad_percpu_size(); \
- } \
- pfo_ret__; \
-})
-
-#define percpu_stable_op(op, var) \
-({ \
- typeof(var) pfo_ret__; \
- switch (sizeof(var)) { \
- case 1: \
- asm(op "b "__percpu_arg(P1)",%0" \
- : "=q" (pfo_ret__) \
- : "p" (&(var))); \
- break; \
- case 2: \
- asm(op "w "__percpu_arg(P1)",%0" \
- : "=r" (pfo_ret__) \
- : "p" (&(var))); \
- break; \
- case 4: \
- asm(op "l "__percpu_arg(P1)",%0" \
- : "=r" (pfo_ret__) \
- : "p" (&(var))); \
- break; \
- case 8: \
- asm(op "q "__percpu_arg(P1)",%0" \
- : "=r" (pfo_ret__) \
- : "p" (&(var))); \
- break; \
- default: __bad_percpu_size(); \
- } \
- pfo_ret__; \
+#define percpu_from_op(size, qual, op, _var) \
+({ \
+ __pcpu_type_##size pfo_val__; \
+ asm qual (__pcpu_op2_##size(op, __percpu_arg([var]), "%[val]") \
+ : [val] __pcpu_reg_##size("=", pfo_val__) \
+ : [var] "m" (_var)); \
+ (typeof(_var))(unsigned long) pfo_val__; \
})
-#define percpu_unary_op(qual, op, var) \
-({ \
- switch (sizeof(var)) { \
- case 1: \
- asm qual (op "b "__percpu_arg(0) \
- : "+m" (var)); \
- break; \
- case 2: \
- asm qual (op "w "__percpu_arg(0) \
- : "+m" (var)); \
- break; \
- case 4: \
- asm qual (op "l "__percpu_arg(0) \
- : "+m" (var)); \
- break; \
- case 8: \
- asm qual (op "q "__percpu_arg(0) \
- : "+m" (var)); \
- break; \
- default: __bad_percpu_size(); \
- } \
+#define percpu_stable_op(size, op, _var) \
+({ \
+ __pcpu_type_##size pfo_val__; \
+ asm(__pcpu_op2_##size(op, __percpu_arg(P[var]), "%[val]") \
+ : [val] __pcpu_reg_##size("=", pfo_val__) \
+ : [var] "p" (&(_var))); \
+ (typeof(_var))(unsigned long) pfo_val__; \
})
/*
* Add return operation
*/
-#define percpu_add_return_op(qual, var, val) \
+#define percpu_add_return_op(size, qual, _var, _val) \
({ \
- typeof(var) paro_ret__ = val; \
- switch (sizeof(var)) { \
- case 1: \
- asm qual ("xaddb %0, "__percpu_arg(1) \
- : "+q" (paro_ret__), "+m" (var) \
- : : "memory"); \
- break; \
- case 2: \
- asm qual ("xaddw %0, "__percpu_arg(1) \
- : "+r" (paro_ret__), "+m" (var) \
- : : "memory"); \
- break; \
- case 4: \
- asm qual ("xaddl %0, "__percpu_arg(1) \
- : "+r" (paro_ret__), "+m" (var) \
- : : "memory"); \
- break; \
- case 8: \
- asm qual ("xaddq %0, "__percpu_arg(1) \
- : "+re" (paro_ret__), "+m" (var) \
- : : "memory"); \
- break; \
- default: __bad_percpu_size(); \
- } \
- paro_ret__ += val; \
- paro_ret__; \
+ __pcpu_type_##size paro_tmp__ = __pcpu_cast_##size(_val); \
+ asm qual (__pcpu_op2_##size("xadd", "%[tmp]", \
+ __percpu_arg([var])) \
+ : [tmp] __pcpu_reg_##size("+", paro_tmp__), \
+ [var] "+m" (_var) \
+ : : "memory"); \
+ (typeof(_var))(unsigned long) (paro_tmp__ + _val); \
})
/*
@@ -299,85 +176,38 @@ do { \
* expensive due to the implied lock prefix. The processor cannot prefetch
* cachelines if xchg is used.
*/
-#define percpu_xchg_op(qual, var, nval) \
+#define percpu_xchg_op(size, qual, _var, _nval) \
({ \
- typeof(var) pxo_ret__; \
- typeof(var) pxo_new__ = (nval); \
- switch (sizeof(var)) { \
- case 1: \
- asm qual ("\n\tmov "__percpu_arg(1)",%%al" \
- "\n1:\tcmpxchgb %2, "__percpu_arg(1) \
- "\n\tjnz 1b" \
- : "=&a" (pxo_ret__), "+m" (var) \
- : "q" (pxo_new__) \
- : "memory"); \
- break; \
- case 2: \
- asm qual ("\n\tmov "__percpu_arg(1)",%%ax" \
- "\n1:\tcmpxchgw %2, "__percpu_arg(1) \
- "\n\tjnz 1b" \
- : "=&a" (pxo_ret__), "+m" (var) \
- : "r" (pxo_new__) \
- : "memory"); \
- break; \
- case 4: \
- asm qual ("\n\tmov "__percpu_arg(1)",%%eax" \
- "\n1:\tcmpxchgl %2, "__percpu_arg(1) \
- "\n\tjnz 1b" \
- : "=&a" (pxo_ret__), "+m" (var) \
- : "r" (pxo_new__) \
- : "memory"); \
- break; \
- case 8: \
- asm qual ("\n\tmov "__percpu_arg(1)",%%rax" \
- "\n1:\tcmpxchgq %2, "__percpu_arg(1) \
- "\n\tjnz 1b" \
- : "=&a" (pxo_ret__), "+m" (var) \
- : "r" (pxo_new__) \
- : "memory"); \
- break; \
- default: __bad_percpu_size(); \
- } \
- pxo_ret__; \
+ __pcpu_type_##size pxo_old__; \
+ __pcpu_type_##size pxo_new__ = __pcpu_cast_##size(_nval); \
+ asm qual (__pcpu_op2_##size("mov", __percpu_arg([var]), \
+ "%[oval]") \
+ "\n1:\t" \
+ __pcpu_op2_##size("cmpxchg", "%[nval]", \
+ __percpu_arg([var])) \
+ "\n\tjnz 1b" \
+ : [oval] "=&a" (pxo_old__), \
+ [var] "+m" (_var) \
+ : [nval] __pcpu_reg_##size(, pxo_new__) \
+ : "memory"); \
+ (typeof(_var))(unsigned long) pxo_old__; \
})
/*
* cmpxchg has no such implied lock semantics as a result it is much
* more efficient for cpu local operations.
*/
-#define percpu_cmpxchg_op(qual, var, oval, nval) \
+#define percpu_cmpxchg_op(size, qual, _var, _oval, _nval) \
({ \
- typeof(var) pco_ret__; \
- typeof(var) pco_old__ = (oval); \
- typeof(var) pco_new__ = (nval); \
- switch (sizeof(var)) { \
- case 1: \
- asm qual ("cmpxchgb %2, "__percpu_arg(1) \
- : "=a" (pco_ret__), "+m" (var) \
- : "q" (pco_new__), "0" (pco_old__) \
- : "memory"); \
- break; \
- case 2: \
- asm qual ("cmpxchgw %2, "__percpu_arg(1) \
- : "=a" (pco_ret__), "+m" (var) \
- : "r" (pco_new__), "0" (pco_old__) \
- : "memory"); \
- break; \
- case 4: \
- asm qual ("cmpxchgl %2, "__percpu_arg(1) \
- : "=a" (pco_ret__), "+m" (var) \
- : "r" (pco_new__), "0" (pco_old__) \
- : "memory"); \
- break; \
- case 8: \
- asm qual ("cmpxchgq %2, "__percpu_arg(1) \
- : "=a" (pco_ret__), "+m" (var) \
- : "r" (pco_new__), "0" (pco_old__) \
- : "memory"); \
- break; \
- default: __bad_percpu_size(); \
- } \
- pco_ret__; \
+ __pcpu_type_##size pco_old__ = __pcpu_cast_##size(_oval); \
+ __pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval); \
+ asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]", \
+ __percpu_arg([var])) \
+ : [oval] "+a" (pco_old__), \
+ [var] "+m" (_var) \
+ : [nval] __pcpu_reg_##size(, pco_new__) \
+ : "memory"); \
+ (typeof(_var))(unsigned long) pco_old__; \
})
/*
@@ -389,24 +219,28 @@ do { \
* per-thread variables implemented as per-cpu variables and thus
* stable for the duration of the respective task.
*/
-#define this_cpu_read_stable(var) percpu_stable_op("mov", var)
-
-#define raw_cpu_read_1(pcp) percpu_from_op(, "mov", pcp)
-#define raw_cpu_read_2(pcp) percpu_from_op(, "mov", pcp)
-#define raw_cpu_read_4(pcp) percpu_from_op(, "mov", pcp)
-
-#define raw_cpu_write_1(pcp, val) percpu_to_op(, "mov", (pcp), val)
-#define raw_cpu_write_2(pcp, val) percpu_to_op(, "mov", (pcp), val)
-#define raw_cpu_write_4(pcp, val) percpu_to_op(, "mov", (pcp), val)
-#define raw_cpu_add_1(pcp, val) percpu_add_op(, (pcp), val)
-#define raw_cpu_add_2(pcp, val) percpu_add_op(, (pcp), val)
-#define raw_cpu_add_4(pcp, val) percpu_add_op(, (pcp), val)
-#define raw_cpu_and_1(pcp, val) percpu_to_op(, "and", (pcp), val)
-#define raw_cpu_and_2(pcp, val) percpu_to_op(, "and", (pcp), val)
-#define raw_cpu_and_4(pcp, val) percpu_to_op(, "and", (pcp), val)
-#define raw_cpu_or_1(pcp, val) percpu_to_op(, "or", (pcp), val)
-#define raw_cpu_or_2(pcp, val) percpu_to_op(, "or", (pcp), val)
-#define raw_cpu_or_4(pcp, val) percpu_to_op(, "or", (pcp), val)
+#define this_cpu_read_stable_1(pcp) percpu_stable_op(1, "mov", pcp)
+#define this_cpu_read_stable_2(pcp) percpu_stable_op(2, "mov", pcp)
+#define this_cpu_read_stable_4(pcp) percpu_stable_op(4, "mov", pcp)
+#define this_cpu_read_stable_8(pcp) percpu_stable_op(8, "mov", pcp)
+#define this_cpu_read_stable(pcp) __pcpu_size_call_return(this_cpu_read_stable_, pcp)
+
+#define raw_cpu_read_1(pcp) percpu_from_op(1, , "mov", pcp)
+#define raw_cpu_read_2(pcp) percpu_from_op(2, , "mov", pcp)
+#define raw_cpu_read_4(pcp) percpu_from_op(4, , "mov", pcp)
+
+#define raw_cpu_write_1(pcp, val) percpu_to_op(1, , "mov", (pcp), val)
+#define raw_cpu_write_2(pcp, val) percpu_to_op(2, , "mov", (pcp), val)
+#define raw_cpu_write_4(pcp, val) percpu_to_op(4, , "mov", (pcp), val)
+#define raw_cpu_add_1(pcp, val) percpu_add_op(1, , (pcp), val)
+#define raw_cpu_add_2(pcp, val) percpu_add_op(2, , (pcp), val)
+#define raw_cpu_add_4(pcp, val) percpu_add_op(4, , (pcp), val)
+#define raw_cpu_and_1(pcp, val) percpu_to_op(1, , "and", (pcp), val)
+#define raw_cpu_and_2(pcp, val) percpu_to_op(2, , "and", (pcp), val)
+#define raw_cpu_and_4(pcp, val) percpu_to_op(4, , "and", (pcp), val)
+#define raw_cpu_or_1(pcp, val) percpu_to_op(1, , "or", (pcp), val)
+#define raw_cpu_or_2(pcp, val) percpu_to_op(2, , "or", (pcp), val)
+#define raw_cpu_or_4(pcp, val) percpu_to_op(4, , "or", (pcp), val)
/*
* raw_cpu_xchg() can use a load-store since it is not required to be
@@ -423,38 +257,38 @@ do { \
#define raw_cpu_xchg_2(pcp, val) raw_percpu_xchg_op(pcp, val)
#define raw_cpu_xchg_4(pcp, val) raw_percpu_xchg_op(pcp, val)
-#define this_cpu_read_1(pcp) percpu_from_op(volatile, "mov", pcp)
-#define this_cpu_read_2(pcp) percpu_from_op(volatile, "mov", pcp)
-#define this_cpu_read_4(pcp) percpu_from_op(volatile, "mov", pcp)
-#define this_cpu_write_1(pcp, val) percpu_to_op(volatile, "mov", (pcp), val)
-#define this_cpu_write_2(pcp, val) percpu_to_op(volatile, "mov", (pcp), val)
-#define this_cpu_write_4(pcp, val) percpu_to_op(volatile, "mov", (pcp), val)
-#define this_cpu_add_1(pcp, val) percpu_add_op(volatile, (pcp), val)
-#define this_cpu_add_2(pcp, val) percpu_add_op(volatile, (pcp), val)
-#define this_cpu_add_4(pcp, val) percpu_add_op(volatile, (pcp), val)
-#define this_cpu_and_1(pcp, val) percpu_to_op(volatile, "and", (pcp), val)
-#define this_cpu_and_2(pcp, val) percpu_to_op(volatile, "and", (pcp), val)
-#define this_cpu_and_4(pcp, val) percpu_to_op(volatile, "and", (pcp), val)
-#define this_cpu_or_1(pcp, val) percpu_to_op(volatile, "or", (pcp), val)
-#define this_cpu_or_2(pcp, val) percpu_to_op(volatile, "or", (pcp), val)
-#define this_cpu_or_4(pcp, val) percpu_to_op(volatile, "or", (pcp), val)
-#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(volatile, pcp, nval)
-#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(volatile, pcp, nval)
-#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(volatile, pcp, nval)
-
-#define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(, pcp, val)
-#define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(, pcp, val)
-#define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(, pcp, val)
-#define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(, pcp, oval, nval)
-#define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(, pcp, oval, nval)
-#define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(, pcp, oval, nval)
-
-#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(volatile, pcp, val)
-#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(volatile, pcp, val)
-#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(volatile, pcp, val)
-#define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(volatile, pcp, oval, nval)
-#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(volatile, pcp, oval, nval)
-#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(volatile, pcp, oval, nval)
+#define this_cpu_read_1(pcp) percpu_from_op(1, volatile, "mov", pcp)
+#define this_cpu_read_2(pcp) percpu_from_op(2, volatile, "mov", pcp)
+#define this_cpu_read_4(pcp) percpu_from_op(4, volatile, "mov", pcp)
+#define this_cpu_write_1(pcp, val) percpu_to_op(1, volatile, "mov", (pcp), val)
+#define this_cpu_write_2(pcp, val) percpu_to_op(2, volatile, "mov", (pcp), val)
+#define this_cpu_write_4(pcp, val) percpu_to_op(4, volatile, "mov", (pcp), val)
+#define this_cpu_add_1(pcp, val) percpu_add_op(1, volatile, (pcp), val)
+#define this_cpu_add_2(pcp, val) percpu_add_op(2, volatile, (pcp), val)
+#define this_cpu_add_4(pcp, val) percpu_add_op(4, volatile, (pcp), val)
+#define this_cpu_and_1(pcp, val) percpu_to_op(1, volatile, "and", (pcp), val)
+#define this_cpu_and_2(pcp, val) percpu_to_op(2, volatile, "and", (pcp), val)
+#define this_cpu_and_4(pcp, val) percpu_to_op(4, volatile, "and", (pcp), val)
+#define this_cpu_or_1(pcp, val) percpu_to_op(1, volatile, "or", (pcp), val)
+#define this_cpu_or_2(pcp, val) percpu_to_op(2, volatile, "or", (pcp), val)
+#define this_cpu_or_4(pcp, val) percpu_to_op(4, volatile, "or", (pcp), val)
+#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(1, volatile, pcp, nval)
+#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(2, volatile, pcp, nval)
+#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(4, volatile, pcp, nval)
+
+#define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(1, , pcp, val)
+#define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(2, , pcp, val)
+#define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(4, , pcp, val)
+#define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, , pcp, oval, nval)
+#define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, , pcp, oval, nval)
+#define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, , pcp, oval, nval)
+
+#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(1, volatile, pcp, val)
+#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(2, volatile, pcp, val)
+#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(4, volatile, pcp, val)
+#define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, volatile, pcp, oval, nval)
+#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, volatile, pcp, oval, nval)
+#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, volatile, pcp, oval, nval)
#ifdef CONFIG_X86_CMPXCHG64
#define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2) \
@@ -478,23 +312,23 @@ do { \
* 32 bit must fall back to generic operations.
*/
#ifdef CONFIG_X86_64
-#define raw_cpu_read_8(pcp) percpu_from_op(, "mov", pcp)
-#define raw_cpu_write_8(pcp, val) percpu_to_op(, "mov", (pcp), val)
-#define raw_cpu_add_8(pcp, val) percpu_add_op(, (pcp), val)
-#define raw_cpu_and_8(pcp, val) percpu_to_op(, "and", (pcp), val)
-#define raw_cpu_or_8(pcp, val) percpu_to_op(, "or", (pcp), val)
-#define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(, pcp, val)
+#define raw_cpu_read_8(pcp) percpu_from_op(8, , "mov", pcp)
+#define raw_cpu_write_8(pcp, val) percpu_to_op(8, , "mov", (pcp), val)
+#define raw_cpu_add_8(pcp, val) percpu_add_op(8, , (pcp), val)
+#define raw_cpu_and_8(pcp, val) percpu_to_op(8, , "and", (pcp), val)
+#define raw_cpu_or_8(pcp, val) percpu_to_op(8, , "or", (pcp), val)
+#define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(8, , pcp, val)
#define raw_cpu_xchg_8(pcp, nval) raw_percpu_xchg_op(pcp, nval)
-#define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(, pcp, oval, nval)
-
-#define this_cpu_read_8(pcp) percpu_from_op(volatile, "mov", pcp)
-#define this_cpu_write_8(pcp, val) percpu_to_op(volatile, "mov", (pcp), val)
-#define this_cpu_add_8(pcp, val) percpu_add_op(volatile, (pcp), val)
-#define this_cpu_and_8(pcp, val) percpu_to_op(volatile, "and", (pcp), val)
-#define this_cpu_or_8(pcp, val) percpu_to_op(volatile, "or", (pcp), val)
-#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(volatile, pcp, val)
-#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(volatile, pcp, nval)
-#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(volatile, pcp, oval, nval)
+#define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval)
+
+#define this_cpu_read_8(pcp) percpu_from_op(8, volatile, "mov", pcp)
+#define this_cpu_write_8(pcp, val) percpu_to_op(8, volatile, "mov", (pcp), val)
+#define this_cpu_add_8(pcp, val) percpu_add_op(8, volatile, (pcp), val)
+#define this_cpu_and_8(pcp, val) percpu_to_op(8, volatile, "and", (pcp), val)
+#define this_cpu_or_8(pcp, val) percpu_to_op(8, volatile, "or", (pcp), val)
+#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(8, volatile, pcp, val)
+#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(8, volatile, pcp, nval)
+#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval)
/*
* Pretty complex macro to generate cmpxchg16 instruction. The instruction
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index e855e9cf2c37..0c1b13720525 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -142,6 +142,46 @@ union cpuid10_edx {
unsigned int full;
};
+/*
+ * Intel Architectural LBR CPUID detection/enumeration details:
+ */
+union cpuid28_eax {
+ struct {
+ /* Supported LBR depth values */
+ unsigned int lbr_depth_mask:8;
+ unsigned int reserved:22;
+ /* Deep C-state Reset */
+ unsigned int lbr_deep_c_reset:1;
+ /* IP values contain LIP */
+ unsigned int lbr_lip:1;
+ } split;
+ unsigned int full;
+};
+
+union cpuid28_ebx {
+ struct {
+ /* CPL Filtering Supported */
+ unsigned int lbr_cpl:1;
+ /* Branch Filtering Supported */
+ unsigned int lbr_filter:1;
+ /* Call-stack Mode Supported */
+ unsigned int lbr_call_stack:1;
+ } split;
+ unsigned int full;
+};
+
+union cpuid28_ecx {
+ struct {
+ /* Mispredict Bit Supported */
+ unsigned int lbr_mispred:1;
+ /* Timed LBRs Supported */
+ unsigned int lbr_timed_lbr:1;
+ /* Branch Type Field Supported */
+ unsigned int lbr_br_type:1;
+ } split;
+ unsigned int full;
+};
+
struct x86_pmu_capability {
int version;
int num_counters_gp;
@@ -192,10 +232,30 @@ struct x86_pmu_capability {
#define GLOBAL_STATUS_UNC_OVF BIT_ULL(61)
#define GLOBAL_STATUS_ASIF BIT_ULL(60)
#define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59)
-#define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(58)
+#define GLOBAL_STATUS_LBRS_FROZEN_BIT 58
+#define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT)
#define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(55)
/*
+ * We model guest LBR event tracing as another fixed-mode PMC like BTS.
+ *
+ * We choose bit 58 because it's used to indicate LBR stack frozen state
+ * for architectural perfmon v4, also we unconditionally mask that bit in
+ * the handle_pmi_common(), so it'll never be set in the overflow handling.
+ *
+ * With this fake counter assigned, the guest LBR event user (such as KVM),
+ * can program the LBR registers on its own, and we don't actually do anything
+ * with then in the host context.
+ */
+#define INTEL_PMC_IDX_FIXED_VLBR (GLOBAL_STATUS_LBRS_FROZEN_BIT)
+
+/*
+ * Pseudo-encoding the guest LBR event as event=0x00,umask=0x1b,
+ * since it would claim bit 58 which is effectively Fixed26.
+ */
+#define INTEL_FIXED_VLBR_EVENT 0x1b00
+
+/*
* Adaptive PEBS v4
*/
@@ -222,14 +282,6 @@ struct pebs_xmm {
u64 xmm[16*2]; /* two entries for each register */
};
-struct pebs_lbr_entry {
- u64 from, to, info;
-};
-
-struct pebs_lbr {
- struct pebs_lbr_entry lbr[0]; /* Variable length */
-};
-
/*
* IBS cpuid feature detection
*/
@@ -333,6 +385,13 @@ struct perf_guest_switch_msr {
u64 host, guest;
};
+struct x86_pmu_lbr {
+ unsigned int nr;
+ unsigned int from;
+ unsigned int to;
+ unsigned int info;
+};
+
extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
extern void perf_check_microcode(void);
extern int x86_perf_rdpmc_index(struct perf_event *event);
@@ -348,12 +407,17 @@ static inline void perf_check_microcode(void) { }
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
+extern int x86_perf_get_lbr(struct x86_pmu_lbr *lbr);
#else
static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
{
*nr = 0;
return NULL;
}
+static inline int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
+{
+ return -1;
+}
#endif
#ifdef CONFIG_CPU_SUP_INTEL
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 76aa21e8128d..b836138ce852 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -999,15 +999,12 @@ extern int direct_gbpages;
void init_mem_mapping(void);
void early_alloc_pgt_buf(void);
extern void memblock_find_dma_reserve(void);
-
-
-#ifdef CONFIG_X86_64
-extern pgd_t trampoline_pgd_entry;
-
void __init poking_init(void);
-
unsigned long init_memory_mapping(unsigned long start,
unsigned long end, pgprot_t prot);
+
+#ifdef CONFIG_X86_64
+extern pgd_t trampoline_pgd_entry;
#endif
/* local pte updates need not use xchg for locking */
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 1b68d24dc6a0..56d0399a0cd1 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -168,23 +168,18 @@ static inline void native_pgd_clear(pgd_t *pgd)
native_set_pgd(pgd, native_make_pgd(0));
}
-extern void sync_global_pgds(unsigned long start, unsigned long end);
-
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
-/*
- * Level 4 access.
- */
-#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
+/* PGD - Level 4 access */
-/* PUD - Level3 access */
+/* PUD - Level 3 access */
-/* PMD - Level 2 access */
+/* PMD - Level 2 access */
-/* PTE - Level 1 access. */
+/* PTE - Level 1 access */
/*
* Encode and de-code a swap entry
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 8f63efb2a2cc..52e5f5f2240d 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -159,6 +159,4 @@ extern unsigned int ptrs_per_p4d;
#define PGD_KERNEL_START ((PAGE_SIZE / 2) / sizeof(pgd_t))
-#define ARCH_PAGE_TABLE_SYNC_MASK (pgtable_l5_enabled() ? PGTBL_PGD_MODIFIED : PGTBL_P4D_MODIFIED)
-
#endif /* _ASM_X86_PGTABLE_64_DEFS_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 03b7c4ca425a..97143d87994c 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -457,10 +457,8 @@ static inline unsigned long cpu_kernelmode_gs_base(int cpu)
DECLARE_PER_CPU(unsigned int, irq_count);
extern asmlinkage void ignore_sysret(void);
-#if IS_ENABLED(CONFIG_KVM)
/* Save actual FS/GS selectors and bases to current->thread */
-void save_fsgs_for_kvm(void);
-#endif
+void current_save_fsgs(void);
#else /* X86_64 */
#ifdef CONFIG_STACKPROTECTOR
/*
@@ -575,7 +573,7 @@ native_load_sp0(unsigned long sp0)
this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
}
-static inline void native_swapgs(void)
+static __always_inline void native_swapgs(void)
{
#ifdef CONFIG_X86_64
asm volatile("swapgs" ::: "memory");
@@ -678,70 +676,6 @@ static inline unsigned int cpuid_edx(unsigned int op)
return edx;
}
-/*
- * This function forces the icache and prefetched instruction stream to
- * catch up with reality in two very specific cases:
- *
- * a) Text was modified using one virtual address and is about to be executed
- * from the same physical page at a different virtual address.
- *
- * b) Text was modified on a different CPU, may subsequently be
- * executed on this CPU, and you want to make sure the new version
- * gets executed. This generally means you're calling this in a IPI.
- *
- * If you're calling this for a different reason, you're probably doing
- * it wrong.
- */
-static inline void sync_core(void)
-{
- /*
- * There are quite a few ways to do this. IRET-to-self is nice
- * because it works on every CPU, at any CPL (so it's compatible
- * with paravirtualization), and it never exits to a hypervisor.
- * The only down sides are that it's a bit slow (it seems to be
- * a bit more than 2x slower than the fastest options) and that
- * it unmasks NMIs. The "push %cs" is needed because, in
- * paravirtual environments, __KERNEL_CS may not be a valid CS
- * value when we do IRET directly.
- *
- * In case NMI unmasking or performance ever becomes a problem,
- * the next best option appears to be MOV-to-CR2 and an
- * unconditional jump. That sequence also works on all CPUs,
- * but it will fault at CPL3 (i.e. Xen PV).
- *
- * CPUID is the conventional way, but it's nasty: it doesn't
- * exist on some 486-like CPUs, and it usually exits to a
- * hypervisor.
- *
- * Like all of Linux's memory ordering operations, this is a
- * compiler barrier as well.
- */
-#ifdef CONFIG_X86_32
- asm volatile (
- "pushfl\n\t"
- "pushl %%cs\n\t"
- "pushl $1f\n\t"
- "iret\n\t"
- "1:"
- : ASM_CALL_CONSTRAINT : : "memory");
-#else
- unsigned int tmp;
-
- asm volatile (
- "mov %%ss, %0\n\t"
- "pushq %q0\n\t"
- "pushq %%rsp\n\t"
- "addq $8, (%%rsp)\n\t"
- "pushfq\n\t"
- "mov %%cs, %0\n\t"
- "pushq %q0\n\t"
- "pushq $1f\n\t"
- "iretq\n\t"
- "1:"
- : "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
-#endif
-}
-
extern void select_idle_routine(const struct cpuinfo_x86 *c);
extern void amd_e400_c1e_apic_setup(void);
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 255b2dde2c1b..40aa69d04862 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -209,6 +209,11 @@ static inline void user_stack_pointer_set(struct pt_regs *regs,
regs->sp = val;
}
+static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
+{
+ return !(regs->flags & X86_EFLAGS_IF);
+}
+
/* Query offset/name of register from its name/offset */
extern int regs_query_register_offset(const char *name);
extern const char *regs_query_register_name(unsigned int offset);
diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
index 33d3c88a7225..6fd8410a3910 100644
--- a/arch/x86/include/asm/signal.h
+++ b/arch/x86/include/asm/signal.h
@@ -35,7 +35,6 @@ typedef sigset_t compat_sigset_t;
#endif /* __ASSEMBLY__ */
#include <uapi/asm/signal.h>
#ifndef __ASSEMBLY__
-extern void do_signal(struct pt_regs *regs);
#define __ARCH_HAS_SA_RESTORER
diff --git a/arch/x86/include/asm/sparsemem.h b/arch/x86/include/asm/sparsemem.h
index 199218719a86..6bfc878f6771 100644
--- a/arch/x86/include/asm/sparsemem.h
+++ b/arch/x86/include/asm/sparsemem.h
@@ -10,24 +10,20 @@
* field of the struct page
*
* SECTION_SIZE_BITS 2^n: size of each section
- * MAX_PHYSADDR_BITS 2^n: max size of physical address space
- * MAX_PHYSMEM_BITS 2^n: how much memory we can have in that space
+ * MAX_PHYSMEM_BITS 2^n: max size of physical address space
*
*/
#ifdef CONFIG_X86_32
# ifdef CONFIG_X86_PAE
# define SECTION_SIZE_BITS 29
-# define MAX_PHYSADDR_BITS 36
# define MAX_PHYSMEM_BITS 36
# else
# define SECTION_SIZE_BITS 26
-# define MAX_PHYSADDR_BITS 32
# define MAX_PHYSMEM_BITS 32
# endif
#else /* CONFIG_X86_32 */
# define SECTION_SIZE_BITS 27 /* matt - 128 is convenient right now */
-# define MAX_PHYSADDR_BITS (pgtable_l5_enabled() ? 52 : 44)
# define MAX_PHYSMEM_BITS (pgtable_l5_enabled() ? 52 : 46)
#endif
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index eb8e781c4353..59a3e13204c3 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -234,7 +234,6 @@ static inline void clwb(volatile void *__p)
#define nop() asm volatile ("nop")
-
#endif /* __KERNEL__ */
#endif /* _ASM_X86_SPECIAL_INSNS_H */
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index 9804a7957f4e..7fb482f0f25b 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -90,6 +90,15 @@ static __always_inline void boot_init_stack_canary(void)
#endif
}
+static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
+{
+#ifdef CONFIG_X86_64
+ per_cpu(fixed_percpu_data.stack_canary, cpu) = idle->stack_canary;
+#else
+ per_cpu(stack_canary.canary, cpu) = idle->stack_canary;
+#endif
+}
+
static inline void setup_stack_canary_segment(int cpu)
{
#ifdef CONFIG_X86_32
@@ -119,6 +128,9 @@ static inline void load_stack_canary_segment(void)
static inline void setup_stack_canary_segment(int cpu)
{ }
+static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
+{ }
+
static inline void load_stack_canary_segment(void)
{
#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/sync_core.h b/arch/x86/include/asm/sync_core.h
index c67caafd3381..fdb5b356e59b 100644
--- a/arch/x86/include/asm/sync_core.h
+++ b/arch/x86/include/asm/sync_core.h
@@ -6,6 +6,78 @@
#include <asm/processor.h>
#include <asm/cpufeature.h>
+#ifdef CONFIG_X86_32
+static inline void iret_to_self(void)
+{
+ asm volatile (
+ "pushfl\n\t"
+ "pushl %%cs\n\t"
+ "pushl $1f\n\t"
+ "iret\n\t"
+ "1:"
+ : ASM_CALL_CONSTRAINT : : "memory");
+}
+#else
+static inline void iret_to_self(void)
+{
+ unsigned int tmp;
+
+ asm volatile (
+ "mov %%ss, %0\n\t"
+ "pushq %q0\n\t"
+ "pushq %%rsp\n\t"
+ "addq $8, (%%rsp)\n\t"
+ "pushfq\n\t"
+ "mov %%cs, %0\n\t"
+ "pushq %q0\n\t"
+ "pushq $1f\n\t"
+ "iretq\n\t"
+ "1:"
+ : "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
+}
+#endif /* CONFIG_X86_32 */
+
+/*
+ * This function forces the icache and prefetched instruction stream to
+ * catch up with reality in two very specific cases:
+ *
+ * a) Text was modified using one virtual address and is about to be executed
+ * from the same physical page at a different virtual address.
+ *
+ * b) Text was modified on a different CPU, may subsequently be
+ * executed on this CPU, and you want to make sure the new version
+ * gets executed. This generally means you're calling this in a IPI.
+ *
+ * If you're calling this for a different reason, you're probably doing
+ * it wrong.
+ */
+static inline void sync_core(void)
+{
+ /*
+ * There are quite a few ways to do this. IRET-to-self is nice
+ * because it works on every CPU, at any CPL (so it's compatible
+ * with paravirtualization), and it never exits to a hypervisor.
+ * The only down sides are that it's a bit slow (it seems to be
+ * a bit more than 2x slower than the fastest options) and that
+ * it unmasks NMIs. The "push %cs" is needed because, in
+ * paravirtual environments, __KERNEL_CS may not be a valid CS
+ * value when we do IRET directly.
+ *
+ * In case NMI unmasking or performance ever becomes a problem,
+ * the next best option appears to be MOV-to-CR2 and an
+ * unconditional jump. That sequence also works on all CPUs,
+ * but it will fault at CPL3 (i.e. Xen PV).
+ *
+ * CPUID is the conventional way, but it's nasty: it doesn't
+ * exist on some 486-like CPUs, and it usually exits to a
+ * hypervisor.
+ *
+ * Like all of Linux's memory ordering operations, this is a
+ * compiler barrier as well.
+ */
+ iret_to_self();
+}
+
/*
* Ensure that a core serializing instruction is issued before returning
* to user-mode. x86 implements return to user-space through sysexit,
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 8de8ceccb8bc..267701ae3d86 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -133,11 +133,6 @@ struct thread_info {
#define _TIF_X32 (1 << TIF_X32)
#define _TIF_FSCHECK (1 << TIF_FSCHECK)
-/* Work to do before invoking the actual syscall. */
-#define _TIF_WORK_SYSCALL_ENTRY \
- (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
-
/* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW_BASE \
(_TIF_NOCPUID | _TIF_NOTSC | _TIF_BLOCKSTEP | \
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 79d8d5496330..f4234575f3fd 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -193,7 +193,7 @@ static inline void sched_clear_itmt_support(void)
}
#endif /* CONFIG_SCHED_MC_PRIO */
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(CONFIG_X86_64)
#include <asm/cpufeature.h>
DECLARE_STATIC_KEY_FALSE(arch_scale_freq_key);
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 8a0c25c6bf09..b7b2624fba86 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -7,9 +7,6 @@
#include <asm/processor.h>
-#define NS_SCALE 10 /* 2^10, carefully chosen */
-#define US_SCALE 32 /* 2^32, arbitralrily chosen */
-
/*
* Standard way to access the cycle counter.
*/
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 18dfa07d3ef0..2f3e8f2a958f 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -314,11 +314,14 @@ do { \
#define __get_user_size(x, ptr, size, retval) \
do { \
+ unsigned char x_u8__; \
+ \
retval = 0; \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
- __get_user_asm(x, ptr, retval, "b", "=q"); \
+ __get_user_asm(x_u8__, ptr, retval, "b", "=q"); \
+ (x) = x_u8__; \
break; \
case 2: \
__get_user_asm(x, ptr, retval, "w", "=r"); \
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
index 2fcc3ac12e76..70050d0136c3 100644
--- a/arch/x86/include/asm/uv/bios.h
+++ b/arch/x86/include/asm/uv/bios.h
@@ -72,7 +72,7 @@ struct uv_gam_range_entry {
};
#define UV_SYSTAB_SIG "UVST"
-#define UV_SYSTAB_VERSION_1 1 /* UV1/2/3 BIOS version */
+#define UV_SYSTAB_VERSION_1 1 /* UV2/3 BIOS version */
#define UV_SYSTAB_VERSION_UV4 0x400 /* UV4 BIOS base version */
#define UV_SYSTAB_VERSION_UV4_1 0x401 /* + gpa_shift */
#define UV_SYSTAB_VERSION_UV4_2 0x402 /* + TYPE_NVRAM/WINDOW/MBOX */
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index 3db85626048f..e48aea9ba47d 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -4,7 +4,7 @@
#include <asm/tlbflush.h>
-enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
+enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC};
struct cpumask;
struct mm_struct;
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index f1188bd47658..cd24804955d7 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -46,10 +46,7 @@
#define UV_ACT_STATUS_SIZE 2
#define UV_DISTRIBUTION_SIZE 256
#define UV_SW_ACK_NPENDING 8
-#define UV1_NET_ENDPOINT_INTD 0x38
-#define UV2_NET_ENDPOINT_INTD 0x28
-#define UV_NET_ENDPOINT_INTD (is_uv1_hub() ? \
- UV1_NET_ENDPOINT_INTD : UV2_NET_ENDPOINT_INTD)
+#define UV_NET_ENDPOINT_INTD 0x28
#define UV_PAYLOADQ_GNODE_SHIFT 49
#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
#define UV_BAU_BASENAME "sgi_uv/bau_tunables"
@@ -64,14 +61,9 @@
* UV2: Bit 19 selects between
* (0): 10 microsecond timebase and
* (1): 80 microseconds
- * we're using 560us, similar to UV1: 65 units of 10us
+ * we're using 560us
*/
-#define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL)
-#define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL)
-
-#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD (is_uv1_hub() ? \
- UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD : \
- UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD)
+#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL)
/* assuming UV3 is the same */
#define BAU_MISC_CONTROL_MULT_MASK 3
@@ -148,7 +140,6 @@
#define UV_LB_SUBNODEID 0x10
-/* these two are the same for UV1 and UV2: */
#define UV_SA_SHFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT
#define UV_SA_MASK UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK
/* 4 bits of software ack period */
@@ -189,8 +180,7 @@
#define BAU_DESC_QUALIFIER 0x534749
enum uv_bau_version {
- UV_BAU_V1 = 1,
- UV_BAU_V2,
+ UV_BAU_V2 = 2,
UV_BAU_V3,
UV_BAU_V4,
};
@@ -233,12 +223,12 @@ struct bau_local_cpumask {
*/
/**
- * struct uv1_2_3_bau_msg_payload - defines payload for INTD transactions
+ * struct uv2_3_bau_msg_payload - defines payload for INTD transactions
* @address: Signifies a page or all TLB's of the cpu
* @sending_cpu: CPU from which the message originates
* @acknowledge_count: CPUs on the destination Hub that received the interrupt
*/
-struct uv1_2_3_bau_msg_payload {
+struct uv2_3_bau_msg_payload {
u64 address;
u16 sending_cpu;
u16 acknowledge_count;
@@ -260,89 +250,6 @@ struct uv4_bau_msg_payload {
};
/*
- * UV1 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
- * see table 4.2.3.0.1 in broacast_assist spec.
- */
-struct uv1_bau_msg_header {
- unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
- /* bits 5:0 */
- unsigned int base_dest_nasid:15; /* nasid of the first bit */
- /* bits 20:6 */ /* in uvhub map */
- unsigned int command:8; /* message type */
- /* bits 28:21 */
- /* 0x38: SN3net EndPoint Message */
- unsigned int rsvd_1:3; /* must be zero */
- /* bits 31:29 */
- /* int will align on 32 bits */
- unsigned int rsvd_2:9; /* must be zero */
- /* bits 40:32 */
- /* Suppl_A is 56-41 */
- unsigned int sequence:16; /* message sequence number */
- /* bits 56:41 */ /* becomes bytes 16-17 of msg */
- /* Address field (96:57) is
- never used as an address
- (these are address bits
- 42:3) */
-
- unsigned int rsvd_3:1; /* must be zero */
- /* bit 57 */
- /* address bits 27:4 are payload */
- /* these next 24 (58-81) bits become bytes 12-14 of msg */
- /* bits 65:58 land in byte 12 */
- unsigned int replied_to:1; /* sent as 0 by the source to
- byte 12 */
- /* bit 58 */
- unsigned int msg_type:3; /* software type of the
- message */
- /* bits 61:59 */
- unsigned int canceled:1; /* message canceled, resource
- is to be freed*/
- /* bit 62 */
- unsigned int payload_1a:1; /* not currently used */
- /* bit 63 */
- unsigned int payload_1b:2; /* not currently used */
- /* bits 65:64 */
-
- /* bits 73:66 land in byte 13 */
- unsigned int payload_1ca:6; /* not currently used */
- /* bits 71:66 */
- unsigned int payload_1c:2; /* not currently used */
- /* bits 73:72 */
-
- /* bits 81:74 land in byte 14 */
- unsigned int payload_1d:6; /* not currently used */
- /* bits 79:74 */
- unsigned int payload_1e:2; /* not currently used */
- /* bits 81:80 */
-
- unsigned int rsvd_4:7; /* must be zero */
- /* bits 88:82 */
- unsigned int swack_flag:1; /* software acknowledge flag */
- /* bit 89 */
- /* INTD trasactions at
- destination are to wait for
- software acknowledge */
- unsigned int rsvd_5:6; /* must be zero */
- /* bits 95:90 */
- unsigned int rsvd_6:5; /* must be zero */
- /* bits 100:96 */
- unsigned int int_both:1; /* if 1, interrupt both sockets
- on the uvhub */
- /* bit 101*/
- unsigned int fairness:3; /* usually zero */
- /* bits 104:102 */
- unsigned int multilevel:1; /* multi-level multicast
- format */
- /* bit 105 */
- /* 0 for TLB: endpoint multi-unicast messages */
- unsigned int chaining:1; /* next descriptor is part of
- this activation*/
- /* bit 106 */
- unsigned int rsvd_7:21; /* must be zero */
- /* bits 127:107 */
-};
-
-/*
* UV2 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
* see figure 9-2 of harp_sys.pdf
* assuming UV3 is the same
@@ -418,25 +325,14 @@ struct bau_desc {
* message template, consisting of header and payload:
*/
union bau_msg_header {
- struct uv1_bau_msg_header uv1_hdr;
struct uv2_3_bau_msg_header uv2_3_hdr;
} header;
union bau_payload_header {
- struct uv1_2_3_bau_msg_payload uv1_2_3;
+ struct uv2_3_bau_msg_payload uv2_3;
struct uv4_bau_msg_payload uv4;
} payload;
};
-/* UV1:
- * -payload-- ---------header------
- * bytes 0-11 bits 41-56 bits 58-81
- * A B (2) C (3)
- *
- * A/B/C are moved to:
- * A C B
- * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
- * ------------payload queue-----------
- */
/* UV2:
* -payload-- ---------header------
* bytes 0-11 bits 70-78 bits 21-44
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 60ca0afdeaf9..100d66806503 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -224,17 +224,11 @@ static inline struct uv_hub_info_s *uv_cpu_hub_info(int cpu)
* This is a software convention - NOT the hardware revision numbers in
* the hub chip.
*/
-#define UV1_HUB_REVISION_BASE 1
#define UV2_HUB_REVISION_BASE 3
#define UV3_HUB_REVISION_BASE 5
#define UV4_HUB_REVISION_BASE 7
#define UV4A_HUB_REVISION_BASE 8 /* UV4 (fixed) rev 2 */
-static inline int is_uv1_hub(void)
-{
- return is_uv_hubbed(uv(1));
-}
-
static inline int is_uv2_hub(void)
{
return is_uv_hubbed(uv(2));
@@ -265,7 +259,7 @@ static inline int is_uvx_hub(void)
static inline int is_uv_hub(void)
{
- return is_uv1_hub() || is_uvx_hub();
+ return is_uvx_hub();
}
union uvh_apicid {
@@ -292,11 +286,6 @@ union uvh_apicid {
#define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra)
#define UV_PNODE_TO_NASID(p) (UV_PNODE_TO_GNODE(p) << 1)
-#define UV1_LOCAL_MMR_BASE 0xf4000000UL
-#define UV1_GLOBAL_MMR32_BASE 0xf8000000UL
-#define UV1_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
-#define UV1_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
-
#define UV2_LOCAL_MMR_BASE 0xfa000000UL
#define UV2_GLOBAL_MMR32_BASE 0xfc000000UL
#define UV2_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
@@ -313,25 +302,21 @@ union uvh_apicid {
#define UV4_GLOBAL_MMR32_SIZE (16UL * 1024 * 1024)
#define UV_LOCAL_MMR_BASE ( \
- is_uv1_hub() ? UV1_LOCAL_MMR_BASE : \
is_uv2_hub() ? UV2_LOCAL_MMR_BASE : \
is_uv3_hub() ? UV3_LOCAL_MMR_BASE : \
/*is_uv4_hub*/ UV4_LOCAL_MMR_BASE)
#define UV_GLOBAL_MMR32_BASE ( \
- is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE : \
is_uv2_hub() ? UV2_GLOBAL_MMR32_BASE : \
is_uv3_hub() ? UV3_GLOBAL_MMR32_BASE : \
/*is_uv4_hub*/ UV4_GLOBAL_MMR32_BASE)
#define UV_LOCAL_MMR_SIZE ( \
- is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \
is_uv2_hub() ? UV2_LOCAL_MMR_SIZE : \
is_uv3_hub() ? UV3_LOCAL_MMR_SIZE : \
/*is_uv4_hub*/ UV4_LOCAL_MMR_SIZE)
#define UV_GLOBAL_MMR32_SIZE ( \
- is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE : \
is_uv2_hub() ? UV2_GLOBAL_MMR32_SIZE : \
is_uv3_hub() ? UV3_GLOBAL_MMR32_SIZE : \
/*is_uv4_hub*/ UV4_GLOBAL_MMR32_SIZE)
@@ -352,8 +337,6 @@ union uvh_apicid {
#define UVH_APICID 0x002D0E00L
#define UV_APIC_PNODE_SHIFT 6
-#define UV_APICID_HIBIT_MASK 0xffff0000
-
/* Local Bus from cpu's perspective */
#define LOCAL_BUS_BASE 0x1c00000
#define LOCAL_BUS_SIZE (4 * 1024 * 1024)
@@ -560,15 +543,6 @@ static inline int uv_apicid_to_pnode(int apicid)
return s2pn ? s2pn[pnode - uv_hub_info->min_socket] : pnode;
}
-/* Convert an apicid to the socket number on the blade */
-static inline int uv_apicid_to_socket(int apicid)
-{
- if (is_uv1_hub())
- return (apicid >> (uv_hub_info->apic_pnode_shift - 1)) & 1;
- else
- return 0;
-}
-
/*
* Access global MMRs using the low memory MMR32 space. This region supports
* faster MMR access but not all MMRs are accessible in this space.
@@ -660,7 +634,7 @@ static inline int uv_cpu_blade_processor_id(int cpu)
return uv_cpu_info_per(cpu)->blade_cpu_id;
}
-/* Blade number to Node number (UV1..UV4 is 1:1) */
+/* Blade number to Node number (UV2..UV4 is 1:1) */
static inline int uv_blade_to_node(int blade)
{
return blade;
@@ -674,7 +648,7 @@ static inline int uv_numa_blade_id(void)
/*
* Convert linux node number to the UV blade number.
- * .. Currently for UV1 thru UV4 the node and the blade are identical.
+ * .. Currently for UV2 thru UV4 the node and the blade are identical.
* .. If this changes then you MUST check references to this function!
*/
static inline int uv_node_to_blade_id(int nid)
@@ -682,7 +656,7 @@ static inline int uv_node_to_blade_id(int nid)
return nid;
}
-/* Convert a cpu number to the the UV blade number */
+/* Convert a CPU number to the UV blade number */
static inline int uv_cpu_to_blade_id(int cpu)
{
return uv_node_to_blade_id(cpu_to_node(cpu));
@@ -821,8 +795,6 @@ static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
}
}
-extern unsigned int uv_apicid_hibits;
-
/*
* Get the minimum revision number of the hub chips within the partition.
* (See UVx_HUB_REVISION_BASE above for specific values.)
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index 9ee5ed6e8b34..775bf143a072 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -19,7 +19,6 @@
*
* UVH - definitions common to all UV hub types.
* UVXH - definitions common to all UV eXtended hub types (currently 2, 3, 4).
- * UV1H - definitions specific to UV type 1 hub.
* UV2H - definitions specific to UV type 2 hub.
* UV3H - definitions specific to UV type 3 hub.
* UV4H - definitions specific to UV type 4 hub.
@@ -35,19 +34,6 @@
*
* If the MMR exists on all hub types but have different addresses,
* use a conditional operator to define the value at runtime.
- * #define UV1Hxxx a
- * #define UV2Hxxx b
- * #define UV3Hxxx c
- * #define UV4Hxxx d
- * #define UV4AHxxx e
- * #define UVHxxx (is_uv1_hub() ? UV1Hxxx :
- * (is_uv2_hub() ? UV2Hxxx :
- * (is_uv3_hub() ? UV3Hxxx :
- * (is_uv4a_hub() ? UV4AHxxx :
- * UV4Hxxx))
- *
- * If the MMR exists on all hub types > 1 but have different addresses, the
- * variation using "UVX" as the prefix exists.
* #define UV2Hxxx b
* #define UV3Hxxx c
* #define UV4Hxxx d
@@ -61,8 +47,6 @@
* unsigned long v;
* struct uvh_xxx_s { # Common fields only
* } s;
- * struct uv1h_xxx_s { # Full UV1 definition (*)
- * } s1;
* struct uv2h_xxx_s { # Full UV2 definition (*)
* } s2;
* struct uv3h_xxx_s { # Full UV3 definition (*)
@@ -92,7 +76,6 @@
#define UV_MMR_ENABLE (1UL << 63)
-#define UV1_HUB_PART_NUMBER 0x88a5
#define UV2_HUB_PART_NUMBER 0x8eb8
#define UV2_HUB_PART_NUMBER_X 0x1111
#define UV3_HUB_PART_NUMBER 0x9578
@@ -107,12 +90,10 @@ extern unsigned long uv_undefined(char *str);
/* ========================================================================= */
#define UVH_BAU_DATA_BROADCAST 0x61688UL
-#define UV1H_BAU_DATA_BROADCAST_32 0x440
#define UV2H_BAU_DATA_BROADCAST_32 0x440
#define UV3H_BAU_DATA_BROADCAST_32 0x440
#define UV4H_BAU_DATA_BROADCAST_32 0x360
#define UVH_BAU_DATA_BROADCAST_32 ( \
- is_uv1_hub() ? UV1H_BAU_DATA_BROADCAST_32 : \
is_uv2_hub() ? UV2H_BAU_DATA_BROADCAST_32 : \
is_uv3_hub() ? UV3H_BAU_DATA_BROADCAST_32 : \
/*is_uv4_hub*/ UV4H_BAU_DATA_BROADCAST_32)
@@ -134,12 +115,10 @@ union uvh_bau_data_broadcast_u {
/* ========================================================================= */
#define UVH_BAU_DATA_CONFIG 0x61680UL
-#define UV1H_BAU_DATA_CONFIG_32 0x438
#define UV2H_BAU_DATA_CONFIG_32 0x438
#define UV3H_BAU_DATA_CONFIG_32 0x438
#define UV4H_BAU_DATA_CONFIG_32 0x358
#define UVH_BAU_DATA_CONFIG_32 ( \
- is_uv1_hub() ? UV1H_BAU_DATA_CONFIG_32 : \
is_uv2_hub() ? UV2H_BAU_DATA_CONFIG_32 : \
is_uv3_hub() ? UV3H_BAU_DATA_CONFIG_32 : \
/*is_uv4_hub*/ UV4H_BAU_DATA_CONFIG_32)
@@ -189,117 +168,6 @@ union uvh_bau_data_config_u {
#define UVH_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
#define UVH_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
-#define UV1H_EVENT_OCCURRED0_GR0_HCERR_SHFT 1
-#define UV1H_EVENT_OCCURRED0_GR1_HCERR_SHFT 2
-#define UV1H_EVENT_OCCURRED0_LH_HCERR_SHFT 3
-#define UV1H_EVENT_OCCURRED0_RH_HCERR_SHFT 4
-#define UV1H_EVENT_OCCURRED0_XN_HCERR_SHFT 5
-#define UV1H_EVENT_OCCURRED0_SI_HCERR_SHFT 6
-#define UV1H_EVENT_OCCURRED0_LB_AOERR0_SHFT 7
-#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8
-#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9
-#define UV1H_EVENT_OCCURRED0_LH_AOERR0_SHFT 10
-#define UV1H_EVENT_OCCURRED0_XN_AOERR0_SHFT 12
-#define UV1H_EVENT_OCCURRED0_SI_AOERR0_SHFT 13
-#define UV1H_EVENT_OCCURRED0_LB_AOERR1_SHFT 14
-#define UV1H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 15
-#define UV1H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 16
-#define UV1H_EVENT_OCCURRED0_LH_AOERR1_SHFT 17
-#define UV1H_EVENT_OCCURRED0_RH_AOERR1_SHFT 18
-#define UV1H_EVENT_OCCURRED0_XN_AOERR1_SHFT 19
-#define UV1H_EVENT_OCCURRED0_SI_AOERR1_SHFT 20
-#define UV1H_EVENT_OCCURRED0_RH_VPI_INT_SHFT 21
-#define UV1H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 22
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 23
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 24
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 25
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 26
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 27
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 28
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 29
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 30
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 31
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 32
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 33
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 34
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 35
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 36
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 37
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 38
-#define UV1H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 39
-#define UV1H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 40
-#define UV1H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 41
-#define UV1H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 42
-#define UV1H_EVENT_OCCURRED0_LTC_INT_SHFT 43
-#define UV1H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 44
-#define UV1H_EVENT_OCCURRED0_IPI_INT_SHFT 45
-#define UV1H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 46
-#define UV1H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 47
-#define UV1H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 48
-#define UV1H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 49
-#define UV1H_EVENT_OCCURRED0_PROFILE_INT_SHFT 50
-#define UV1H_EVENT_OCCURRED0_RTC0_SHFT 51
-#define UV1H_EVENT_OCCURRED0_RTC1_SHFT 52
-#define UV1H_EVENT_OCCURRED0_RTC2_SHFT 53
-#define UV1H_EVENT_OCCURRED0_RTC3_SHFT 54
-#define UV1H_EVENT_OCCURRED0_BAU_DATA_SHFT 55
-#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56
-#define UV1H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL
-#define UV1H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL
-#define UV1H_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL
-#define UV1H_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000010UL
-#define UV1H_EVENT_OCCURRED0_XN_HCERR_MASK 0x0000000000000020UL
-#define UV1H_EVENT_OCCURRED0_SI_HCERR_MASK 0x0000000000000040UL
-#define UV1H_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000080UL
-#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL
-#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL
-#define UV1H_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL
-#define UV1H_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL
-#define UV1H_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL
-#define UV1H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL
-#define UV1H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000000008000UL
-#define UV1H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000000010000UL
-#define UV1H_EVENT_OCCURRED0_LH_AOERR1_MASK 0x0000000000020000UL
-#define UV1H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000040000UL
-#define UV1H_EVENT_OCCURRED0_XN_AOERR1_MASK 0x0000000000080000UL
-#define UV1H_EVENT_OCCURRED0_SI_AOERR1_MASK 0x0000000000100000UL
-#define UV1H_EVENT_OCCURRED0_RH_VPI_INT_MASK 0x0000000000200000UL
-#define UV1H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000000800000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000001000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000002000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000004000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000000008000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000000010000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000000020000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000000040000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000000080000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000000100000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000000200000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000000400000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000000800000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000001000000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000002000000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000004000000000UL
-#define UV1H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0000008000000000UL
-#define UV1H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0000010000000000UL
-#define UV1H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0000020000000000UL
-#define UV1H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0000040000000000UL
-#define UV1H_EVENT_OCCURRED0_LTC_INT_MASK 0x0000080000000000UL
-#define UV1H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
-#define UV1H_EVENT_OCCURRED0_IPI_INT_MASK 0x0000200000000000UL
-#define UV1H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0000400000000000UL
-#define UV1H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0000800000000000UL
-#define UV1H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0001000000000000UL
-#define UV1H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0002000000000000UL
-#define UV1H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0004000000000000UL
-#define UV1H_EVENT_OCCURRED0_RTC0_MASK 0x0008000000000000UL
-#define UV1H_EVENT_OCCURRED0_RTC1_MASK 0x0010000000000000UL
-#define UV1H_EVENT_OCCURRED0_RTC2_MASK 0x0020000000000000UL
-#define UV1H_EVENT_OCCURRED0_RTC3_MASK 0x0040000000000000UL
-#define UV1H_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL
-#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL
-
#define UVXH_EVENT_OCCURRED0_RH_HCERR_SHFT 2
#define UVXH_EVENT_OCCURRED0_LH0_HCERR_SHFT 3
#define UVXH_EVENT_OCCURRED0_LH1_HCERR_SHFT 4
@@ -605,7 +473,6 @@ union uvh_bau_data_config_u {
#define UV4H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x8000000000000000UL
#define UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT ( \
- is_uv1_hub() ? UV1H_EVENT_OCCURRED0_EXTIO_INT0_SHFT : \
is_uv2_hub() ? UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT : \
is_uv3_hub() ? UV3H_EVENT_OCCURRED0_EXTIO_INT0_SHFT : \
/*is_uv4_hub*/ UV4H_EVENT_OCCURRED0_EXTIO_INT0_SHFT)
@@ -718,12 +585,10 @@ union uvh_event_occurred0_u {
/* ========================================================================= */
#define UVH_EXTIO_INT0_BROADCAST 0x61448UL
-#define UV1H_EXTIO_INT0_BROADCAST_32 0x3f0
#define UV2H_EXTIO_INT0_BROADCAST_32 0x3f0
#define UV3H_EXTIO_INT0_BROADCAST_32 0x3f0
#define UV4H_EXTIO_INT0_BROADCAST_32 0x310
#define UVH_EXTIO_INT0_BROADCAST_32 ( \
- is_uv1_hub() ? UV1H_EXTIO_INT0_BROADCAST_32 : \
is_uv2_hub() ? UV2H_EXTIO_INT0_BROADCAST_32 : \
is_uv3_hub() ? UV3H_EXTIO_INT0_BROADCAST_32 : \
/*is_uv4_hub*/ UV4H_EXTIO_INT0_BROADCAST_32)
@@ -821,12 +686,10 @@ union uvh_gr0_tlb_int1_config_u {
/* ========================================================================= */
/* UVH_GR0_TLB_MMR_CONTROL */
/* ========================================================================= */
-#define UV1H_GR0_TLB_MMR_CONTROL 0x401080UL
#define UV2H_GR0_TLB_MMR_CONTROL 0xc01080UL
#define UV3H_GR0_TLB_MMR_CONTROL 0xc01080UL
#define UV4H_GR0_TLB_MMR_CONTROL 0x601080UL
#define UVH_GR0_TLB_MMR_CONTROL ( \
- is_uv1_hub() ? UV1H_GR0_TLB_MMR_CONTROL : \
is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL : \
is_uv3_hub() ? UV3H_GR0_TLB_MMR_CONTROL : \
/*is_uv4_hub*/ UV4H_GR0_TLB_MMR_CONTROL)
@@ -841,29 +704,6 @@ union uvh_gr0_tlb_int1_config_u {
#define UVH_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
#define UVH_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
-#define UV1H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
-#define UV1H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
-#define UV1H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
-#define UV1H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
-#define UV1H_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
-#define UV1H_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
-#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_SHFT 48
-#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_SHFT 52
-#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBPGSIZE_SHFT 54
-#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_SHFT 56
-#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_SHFT 60
-#define UV1H_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
-#define UV1H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
-#define UV1H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
-#define UV1H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
-#define UV1H_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
-#define UV1H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
-#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
-#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
-#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBPGSIZE_MASK 0x0040000000000000UL
-#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK 0x0100000000000000UL
-#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK 0x1000000000000000UL
-
#define UVXH_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
#define UVXH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
#define UVXH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
@@ -932,17 +772,14 @@ union uvh_gr0_tlb_int1_config_u {
#define UV4H_GR0_TLB_MMR_CONTROL_PAGE_SIZE_MASK 0xf800000000000000UL
#define UVH_GR0_TLB_MMR_CONTROL_INDEX_MASK ( \
- is_uv1_hub() ? UV1H_GR0_TLB_MMR_CONTROL_INDEX_MASK : \
is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL_INDEX_MASK : \
is_uv3_hub() ? UV3H_GR0_TLB_MMR_CONTROL_INDEX_MASK : \
/*is_uv4_hub*/ UV4H_GR0_TLB_MMR_CONTROL_INDEX_MASK)
#define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK ( \
- is_uv1_hub() ? UV1H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK : \
is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK : \
is_uv3_hub() ? UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK : \
/*is_uv4_hub*/ UV4H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK)
#define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT ( \
- is_uv1_hub() ? UV1H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT : \
is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT : \
is_uv3_hub() ? UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT : \
/*is_uv4_hub*/ UV4H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT)
@@ -961,28 +798,6 @@ union uvh_gr0_tlb_mmr_control_u {
unsigned long rsvd_49_51:3;
unsigned long rsvd_52_63:12;
} s;
- struct uv1h_gr0_tlb_mmr_control_s {
- unsigned long index:12; /* RW */
- unsigned long mem_sel:2; /* RW */
- unsigned long rsvd_14_15:2;
- unsigned long auto_valid_en:1; /* RW */
- unsigned long rsvd_17_19:3;
- unsigned long mmr_hash_index_en:1; /* RW */
- unsigned long rsvd_21_29:9;
- unsigned long mmr_write:1; /* WP */
- unsigned long mmr_read:1; /* WP */
- unsigned long rsvd_32_47:16;
- unsigned long mmr_inj_con:1; /* RW */
- unsigned long rsvd_49_51:3;
- unsigned long mmr_inj_tlbram:1; /* RW */
- unsigned long rsvd_53:1;
- unsigned long mmr_inj_tlbpgsize:1; /* RW */
- unsigned long rsvd_55:1;
- unsigned long mmr_inj_tlbrreg:1; /* RW */
- unsigned long rsvd_57_59:3;
- unsigned long mmr_inj_tlblruv:1; /* RW */
- unsigned long rsvd_61_63:3;
- } s1;
struct uvxh_gr0_tlb_mmr_control_s {
unsigned long rsvd_0_15:16;
unsigned long auto_valid_en:1; /* RW */
@@ -1055,27 +870,16 @@ union uvh_gr0_tlb_mmr_control_u {
/* ========================================================================= */
/* UVH_GR0_TLB_MMR_READ_DATA_HI */
/* ========================================================================= */
-#define UV1H_GR0_TLB_MMR_READ_DATA_HI 0x4010a0UL
#define UV2H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL
#define UV3H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL
#define UV4H_GR0_TLB_MMR_READ_DATA_HI 0x6010a0UL
#define UVH_GR0_TLB_MMR_READ_DATA_HI ( \
- is_uv1_hub() ? UV1H_GR0_TLB_MMR_READ_DATA_HI : \
is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_HI : \
is_uv3_hub() ? UV3H_GR0_TLB_MMR_READ_DATA_HI : \
/*is_uv4_hub*/ UV4H_GR0_TLB_MMR_READ_DATA_HI)
#define UVH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
-#define UV1H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
-#define UV1H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
-#define UV1H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
-#define UV1H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
-#define UV1H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
-#define UV1H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
-#define UV1H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
-#define UV1H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
-
#define UVXH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
#define UV2H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
@@ -1118,13 +922,6 @@ union uvh_gr0_tlb_mmr_control_u {
union uvh_gr0_tlb_mmr_read_data_hi_u {
unsigned long v;
- struct uv1h_gr0_tlb_mmr_read_data_hi_s {
- unsigned long pfn:41; /* RO */
- unsigned long gaa:2; /* RO */
- unsigned long dirty:1; /* RO */
- unsigned long larger:1; /* RO */
- unsigned long rsvd_45_63:19;
- } s1;
struct uv2h_gr0_tlb_mmr_read_data_hi_s {
unsigned long pfn:41; /* RO */
unsigned long gaa:2; /* RO */
@@ -1156,12 +953,10 @@ union uvh_gr0_tlb_mmr_read_data_hi_u {
/* ========================================================================= */
/* UVH_GR0_TLB_MMR_READ_DATA_LO */
/* ========================================================================= */
-#define UV1H_GR0_TLB_MMR_READ_DATA_LO 0x4010a8UL
#define UV2H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL
#define UV3H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL
#define UV4H_GR0_TLB_MMR_READ_DATA_LO 0x6010a8UL
#define UVH_GR0_TLB_MMR_READ_DATA_LO ( \
- is_uv1_hub() ? UV1H_GR0_TLB_MMR_READ_DATA_LO : \
is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_LO : \
is_uv3_hub() ? UV3H_GR0_TLB_MMR_READ_DATA_LO : \
/*is_uv4_hub*/ UV4H_GR0_TLB_MMR_READ_DATA_LO)
@@ -1173,13 +968,6 @@ union uvh_gr0_tlb_mmr_read_data_hi_u {
#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
#define UVH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
-#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
-#define UV1H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
-#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
-#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
-#define UV1H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
-#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
-
#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
#define UVXH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
@@ -1216,11 +1004,6 @@ union uvh_gr0_tlb_mmr_read_data_lo_u {
unsigned long asid:24; /* RO */
unsigned long valid:1; /* RO */
} s;
- struct uv1h_gr0_tlb_mmr_read_data_lo_s {
- unsigned long vpn:39; /* RO */
- unsigned long asid:24; /* RO */
- unsigned long valid:1; /* RO */
- } s1;
struct uvxh_gr0_tlb_mmr_read_data_lo_s {
unsigned long vpn:39; /* RO */
unsigned long asid:24; /* RO */
@@ -1246,12 +1029,10 @@ union uvh_gr0_tlb_mmr_read_data_lo_u {
/* ========================================================================= */
/* UVH_GR1_TLB_INT0_CONFIG */
/* ========================================================================= */
-#define UV1H_GR1_TLB_INT0_CONFIG 0x61f00UL
#define UV2H_GR1_TLB_INT0_CONFIG 0x61f00UL
#define UV3H_GR1_TLB_INT0_CONFIG 0x61f00UL
#define UV4H_GR1_TLB_INT0_CONFIG 0x62100UL
#define UVH_GR1_TLB_INT0_CONFIG ( \
- is_uv1_hub() ? UV1H_GR1_TLB_INT0_CONFIG : \
is_uv2_hub() ? UV2H_GR1_TLB_INT0_CONFIG : \
is_uv3_hub() ? UV3H_GR1_TLB_INT0_CONFIG : \
/*is_uv4_hub*/ UV4H_GR1_TLB_INT0_CONFIG)
@@ -1293,12 +1074,10 @@ union uvh_gr1_tlb_int0_config_u {
/* ========================================================================= */
/* UVH_GR1_TLB_INT1_CONFIG */
/* ========================================================================= */
-#define UV1H_GR1_TLB_INT1_CONFIG 0x61f40UL
#define UV2H_GR1_TLB_INT1_CONFIG 0x61f40UL
#define UV3H_GR1_TLB_INT1_CONFIG 0x61f40UL
#define UV4H_GR1_TLB_INT1_CONFIG 0x62140UL
#define UVH_GR1_TLB_INT1_CONFIG ( \
- is_uv1_hub() ? UV1H_GR1_TLB_INT1_CONFIG : \
is_uv2_hub() ? UV2H_GR1_TLB_INT1_CONFIG : \
is_uv3_hub() ? UV3H_GR1_TLB_INT1_CONFIG : \
/*is_uv4_hub*/ UV4H_GR1_TLB_INT1_CONFIG)
@@ -1340,12 +1119,10 @@ union uvh_gr1_tlb_int1_config_u {
/* ========================================================================= */
/* UVH_GR1_TLB_MMR_CONTROL */
/* ========================================================================= */
-#define UV1H_GR1_TLB_MMR_CONTROL 0x801080UL
#define UV2H_GR1_TLB_MMR_CONTROL 0x1001080UL
#define UV3H_GR1_TLB_MMR_CONTROL 0x1001080UL
#define UV4H_GR1_TLB_MMR_CONTROL 0x701080UL
#define UVH_GR1_TLB_MMR_CONTROL ( \
- is_uv1_hub() ? UV1H_GR1_TLB_MMR_CONTROL : \
is_uv2_hub() ? UV2H_GR1_TLB_MMR_CONTROL : \
is_uv3_hub() ? UV3H_GR1_TLB_MMR_CONTROL : \
/*is_uv4_hub*/ UV4H_GR1_TLB_MMR_CONTROL)
@@ -1360,29 +1137,6 @@ union uvh_gr1_tlb_int1_config_u {
#define UVH_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
#define UVH_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
-#define UV1H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
-#define UV1H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
-#define UV1H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
-#define UV1H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
-#define UV1H_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
-#define UV1H_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
-#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_SHFT 48
-#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_SHFT 52
-#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBPGSIZE_SHFT 54
-#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_SHFT 56
-#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_SHFT 60
-#define UV1H_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
-#define UV1H_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
-#define UV1H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
-#define UV1H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
-#define UV1H_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
-#define UV1H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
-#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
-#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
-#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBPGSIZE_MASK 0x0040000000000000UL
-#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK 0x0100000000000000UL
-#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK 0x1000000000000000UL
-
#define UVXH_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
#define UVXH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
#define UVXH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
@@ -1465,28 +1219,6 @@ union uvh_gr1_tlb_mmr_control_u {
unsigned long rsvd_49_51:3;
unsigned long rsvd_52_63:12;
} s;
- struct uv1h_gr1_tlb_mmr_control_s {
- unsigned long index:12; /* RW */
- unsigned long mem_sel:2; /* RW */
- unsigned long rsvd_14_15:2;
- unsigned long auto_valid_en:1; /* RW */
- unsigned long rsvd_17_19:3;
- unsigned long mmr_hash_index_en:1; /* RW */
- unsigned long rsvd_21_29:9;
- unsigned long mmr_write:1; /* WP */
- unsigned long mmr_read:1; /* WP */
- unsigned long rsvd_32_47:16;
- unsigned long mmr_inj_con:1; /* RW */
- unsigned long rsvd_49_51:3;
- unsigned long mmr_inj_tlbram:1; /* RW */
- unsigned long rsvd_53:1;
- unsigned long mmr_inj_tlbpgsize:1; /* RW */
- unsigned long rsvd_55:1;
- unsigned long mmr_inj_tlbrreg:1; /* RW */
- unsigned long rsvd_57_59:3;
- unsigned long mmr_inj_tlblruv:1; /* RW */
- unsigned long rsvd_61_63:3;
- } s1;
struct uvxh_gr1_tlb_mmr_control_s {
unsigned long rsvd_0_15:16;
unsigned long auto_valid_en:1; /* RW */
@@ -1559,27 +1291,16 @@ union uvh_gr1_tlb_mmr_control_u {
/* ========================================================================= */
/* UVH_GR1_TLB_MMR_READ_DATA_HI */
/* ========================================================================= */
-#define UV1H_GR1_TLB_MMR_READ_DATA_HI 0x8010a0UL
#define UV2H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL
#define UV3H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL
#define UV4H_GR1_TLB_MMR_READ_DATA_HI 0x7010a0UL
#define UVH_GR1_TLB_MMR_READ_DATA_HI ( \
- is_uv1_hub() ? UV1H_GR1_TLB_MMR_READ_DATA_HI : \
is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_HI : \
is_uv3_hub() ? UV3H_GR1_TLB_MMR_READ_DATA_HI : \
/*is_uv4_hub*/ UV4H_GR1_TLB_MMR_READ_DATA_HI)
#define UVH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
-#define UV1H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
-#define UV1H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
-#define UV1H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
-#define UV1H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
-#define UV1H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
-#define UV1H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
-#define UV1H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
-#define UV1H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
-
#define UVXH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
#define UV2H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
@@ -1622,13 +1343,6 @@ union uvh_gr1_tlb_mmr_control_u {
union uvh_gr1_tlb_mmr_read_data_hi_u {
unsigned long v;
- struct uv1h_gr1_tlb_mmr_read_data_hi_s {
- unsigned long pfn:41; /* RO */
- unsigned long gaa:2; /* RO */
- unsigned long dirty:1; /* RO */
- unsigned long larger:1; /* RO */
- unsigned long rsvd_45_63:19;
- } s1;
struct uv2h_gr1_tlb_mmr_read_data_hi_s {
unsigned long pfn:41; /* RO */
unsigned long gaa:2; /* RO */
@@ -1660,12 +1374,10 @@ union uvh_gr1_tlb_mmr_read_data_hi_u {
/* ========================================================================= */
/* UVH_GR1_TLB_MMR_READ_DATA_LO */
/* ========================================================================= */
-#define UV1H_GR1_TLB_MMR_READ_DATA_LO 0x8010a8UL
#define UV2H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL
#define UV3H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL
#define UV4H_GR1_TLB_MMR_READ_DATA_LO 0x7010a8UL
#define UVH_GR1_TLB_MMR_READ_DATA_LO ( \
- is_uv1_hub() ? UV1H_GR1_TLB_MMR_READ_DATA_LO : \
is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_LO : \
is_uv3_hub() ? UV3H_GR1_TLB_MMR_READ_DATA_LO : \
/*is_uv4_hub*/ UV4H_GR1_TLB_MMR_READ_DATA_LO)
@@ -1677,13 +1389,6 @@ union uvh_gr1_tlb_mmr_read_data_hi_u {
#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
#define UVH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
-#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
-#define UV1H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
-#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
-#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
-#define UV1H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
-#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
-
#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
#define UVXH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
@@ -1720,11 +1425,6 @@ union uvh_gr1_tlb_mmr_read_data_lo_u {
unsigned long asid:24; /* RO */
unsigned long valid:1; /* RO */
} s;
- struct uv1h_gr1_tlb_mmr_read_data_lo_s {
- unsigned long vpn:39; /* RO */
- unsigned long asid:24; /* RO */
- unsigned long valid:1; /* RO */
- } s1;
struct uvxh_gr1_tlb_mmr_read_data_lo_s {
unsigned long vpn:39; /* RO */
unsigned long asid:24; /* RO */
@@ -1770,9 +1470,6 @@ union uvh_int_cmpb_u {
#define UVH_INT_CMPC 0x22100UL
-#define UV1H_INT_CMPC_REAL_TIME_CMPC_SHFT 0
-#define UV1H_INT_CMPC_REAL_TIME_CMPC_MASK 0x00ffffffffffffffUL
-
#define UVXH_INT_CMPC_REAL_TIME_CMP_2_SHFT 0
#define UVXH_INT_CMPC_REAL_TIME_CMP_2_MASK 0x00ffffffffffffffUL
@@ -1791,9 +1488,6 @@ union uvh_int_cmpc_u {
#define UVH_INT_CMPD 0x22180UL
-#define UV1H_INT_CMPD_REAL_TIME_CMPD_SHFT 0
-#define UV1H_INT_CMPD_REAL_TIME_CMPD_MASK 0x00ffffffffffffffUL
-
#define UVXH_INT_CMPD_REAL_TIME_CMP_3_SHFT 0
#define UVXH_INT_CMPD_REAL_TIME_CMP_3_MASK 0x00ffffffffffffffUL
@@ -1811,12 +1505,10 @@ union uvh_int_cmpd_u {
/* ========================================================================= */
#define UVH_IPI_INT 0x60500UL
-#define UV1H_IPI_INT_32 0x348
#define UV2H_IPI_INT_32 0x348
#define UV3H_IPI_INT_32 0x348
#define UV4H_IPI_INT_32 0x268
#define UVH_IPI_INT_32 ( \
- is_uv1_hub() ? UV1H_IPI_INT_32 : \
is_uv2_hub() ? UV2H_IPI_INT_32 : \
is_uv3_hub() ? UV3H_IPI_INT_32 : \
/*is_uv4_hub*/ UV4H_IPI_INT_32)
@@ -1849,24 +1541,16 @@ union uvh_ipi_int_u {
/* ========================================================================= */
/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */
/* ========================================================================= */
-#define UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
#define UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST uv_undefined("UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST")
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST ( \
- is_uv1_hub() ? UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST : \
is_uv2_hub() ? UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST : \
is_uv3_hub() ? UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST : \
/*is_uv4_hub*/ UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST)
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x9c0
-#define UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
-#define UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49
-#define UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL
-#define UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_MASK 0x7ffe000000000000UL
-
-
#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49
#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL
@@ -1880,13 +1564,6 @@ union uvh_ipi_int_u {
union uvh_lb_bau_intd_payload_queue_first_u {
unsigned long v;
- struct uv1h_lb_bau_intd_payload_queue_first_s {
- unsigned long rsvd_0_3:4;
- unsigned long address:39; /* RW */
- unsigned long rsvd_43_48:6;
- unsigned long node_id:14; /* RW */
- unsigned long rsvd_63:1;
- } s1;
struct uv2h_lb_bau_intd_payload_queue_first_s {
unsigned long rsvd_0_3:4;
unsigned long address:39; /* RW */
@@ -1906,22 +1583,16 @@ union uvh_lb_bau_intd_payload_queue_first_u {
/* ========================================================================= */
/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */
/* ========================================================================= */
-#define UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
#define UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST uv_undefined("UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST")
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST ( \
- is_uv1_hub() ? UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST : \
is_uv2_hub() ? UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST : \
is_uv3_hub() ? UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST : \
/*is_uv4_hub*/ UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST)
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x9c8
-#define UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4
-#define UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL
-
-
#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4
#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL
@@ -1931,11 +1602,6 @@ union uvh_lb_bau_intd_payload_queue_first_u {
union uvh_lb_bau_intd_payload_queue_last_u {
unsigned long v;
- struct uv1h_lb_bau_intd_payload_queue_last_s {
- unsigned long rsvd_0_3:4;
- unsigned long address:39; /* RW */
- unsigned long rsvd_43_63:21;
- } s1;
struct uv2h_lb_bau_intd_payload_queue_last_s {
unsigned long rsvd_0_3:4;
unsigned long address:39; /* RW */
@@ -1951,22 +1617,16 @@ union uvh_lb_bau_intd_payload_queue_last_u {
/* ========================================================================= */
/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */
/* ========================================================================= */
-#define UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
#define UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL uv_undefined("UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL")
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL ( \
- is_uv1_hub() ? UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL : \
is_uv2_hub() ? UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL : \
is_uv3_hub() ? UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL : \
/*is_uv4_hub*/ UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL)
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x9d0
-#define UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4
-#define UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL
-
-
#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4
#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL
@@ -1976,11 +1636,6 @@ union uvh_lb_bau_intd_payload_queue_last_u {
union uvh_lb_bau_intd_payload_queue_tail_u {
unsigned long v;
- struct uv1h_lb_bau_intd_payload_queue_tail_s {
- unsigned long rsvd_0_3:4;
- unsigned long address:39; /* RW */
- unsigned long rsvd_43_63:21;
- } s1;
struct uv2h_lb_bau_intd_payload_queue_tail_s {
unsigned long rsvd_0_3:4;
unsigned long address:39; /* RW */
@@ -1996,52 +1651,16 @@ union uvh_lb_bau_intd_payload_queue_tail_u {
/* ========================================================================= */
/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */
/* ========================================================================= */
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
#define UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE uv_undefined("UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE")
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE ( \
- is_uv1_hub() ? UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE : \
is_uv2_hub() ? UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE : \
is_uv3_hub() ? UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE : \
/*is_uv4_hub*/ UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE)
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0xa68
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_SHFT 2
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_SHFT 3
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_SHFT 4
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_SHFT 5
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_SHFT 6
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_SHFT 7
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_SHFT 8
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_SHFT 9
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_SHFT 10
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_SHFT 11
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_SHFT 12
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_SHFT 13
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_SHFT 14
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_MASK 0x0000000000000002UL
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_MASK 0x0000000000000004UL
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_MASK 0x0000000000000008UL
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_MASK 0x0000000000000010UL
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_MASK 0x0000000000000020UL
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_MASK 0x0000000000000040UL
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_MASK 0x0000000000000080UL
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_MASK 0x0000000000000100UL
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_MASK 0x0000000000000200UL
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_MASK 0x0000000000000400UL
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_MASK 0x0000000000000800UL
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_MASK 0x0000000000001000UL
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_MASK 0x0000000000002000UL
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL
-
-
#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1
#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_SHFT 2
@@ -2111,25 +1730,6 @@ union uvh_lb_bau_intd_payload_queue_tail_u {
union uvh_lb_bau_intd_software_acknowledge_u {
unsigned long v;
- struct uv1h_lb_bau_intd_software_acknowledge_s {
- unsigned long pending_0:1; /* RW, W1C */
- unsigned long pending_1:1; /* RW, W1C */
- unsigned long pending_2:1; /* RW, W1C */
- unsigned long pending_3:1; /* RW, W1C */
- unsigned long pending_4:1; /* RW, W1C */
- unsigned long pending_5:1; /* RW, W1C */
- unsigned long pending_6:1; /* RW, W1C */
- unsigned long pending_7:1; /* RW, W1C */
- unsigned long timeout_0:1; /* RW, W1C */
- unsigned long timeout_1:1; /* RW, W1C */
- unsigned long timeout_2:1; /* RW, W1C */
- unsigned long timeout_3:1; /* RW, W1C */
- unsigned long timeout_4:1; /* RW, W1C */
- unsigned long timeout_5:1; /* RW, W1C */
- unsigned long timeout_6:1; /* RW, W1C */
- unsigned long timeout_7:1; /* RW, W1C */
- unsigned long rsvd_16_63:48;
- } s1;
struct uv2h_lb_bau_intd_software_acknowledge_s {
unsigned long pending_0:1; /* RW */
unsigned long pending_1:1; /* RW */
@@ -2173,12 +1773,10 @@ union uvh_lb_bau_intd_software_acknowledge_u {
/* ========================================================================= */
/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */
/* ========================================================================= */
-#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x320088UL
#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x320088UL
#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x320088UL
#define UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS uv_undefined("UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS")
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS ( \
- is_uv1_hub() ? UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS : \
is_uv2_hub() ? UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS : \
is_uv3_hub() ? UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS : \
/*is_uv4_hub*/ UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS)
@@ -2188,22 +1786,18 @@ union uvh_lb_bau_intd_software_acknowledge_u {
/* ========================================================================= */
/* UVH_LB_BAU_MISC_CONTROL */
/* ========================================================================= */
-#define UV1H_LB_BAU_MISC_CONTROL 0x320170UL
#define UV2H_LB_BAU_MISC_CONTROL 0x320170UL
#define UV3H_LB_BAU_MISC_CONTROL 0x320170UL
#define UV4H_LB_BAU_MISC_CONTROL 0xc8170UL
#define UVH_LB_BAU_MISC_CONTROL ( \
- is_uv1_hub() ? UV1H_LB_BAU_MISC_CONTROL : \
is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL : \
is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL : \
/*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL)
-#define UV1H_LB_BAU_MISC_CONTROL_32 0xa10
#define UV2H_LB_BAU_MISC_CONTROL_32 0xa10
#define UV3H_LB_BAU_MISC_CONTROL_32 0xa10
#define UV4H_LB_BAU_MISC_CONTROL_32 0xa18
#define UVH_LB_BAU_MISC_CONTROL_32 ( \
- is_uv1_hub() ? UV1H_LB_BAU_MISC_CONTROL_32 : \
is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_32 : \
is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_32 : \
/*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_32)
@@ -2237,39 +1831,6 @@ union uvh_lb_bau_intd_software_acknowledge_u {
#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
#define UVH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
-#define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
-#define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
-#define UV1H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
-#define UV1H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
-#define UV1H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
-#define UV1H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
-#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
-#define UV1H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
-#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
-#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
-#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
-#define UV1H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
-#define UV1H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
-#define UV1H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
-#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
-#define UV1H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
-#define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
-#define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
-#define UV1H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
-#define UV1H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
-#define UV1H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
-#define UV1H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
-#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
-#define UV1H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
-#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
-#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
-#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
-#define UV1H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
-#define UV1H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
-#define UV1H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
-#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
-#define UV1H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
-
#define UVXH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
#define UVXH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
@@ -2469,28 +2030,24 @@ union uvh_lb_bau_intd_software_acknowledge_u {
#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK \
uv_undefined("UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK")
#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK ( \
- is_uv1_hub() ? UV1H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK : \
is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK : \
is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK : \
/*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK)
#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT \
uv_undefined("UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT")
#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT ( \
- is_uv1_hub() ? UV1H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT : \
is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT : \
is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT : \
/*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT)
#define UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK \
uv_undefined("UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK")
#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK ( \
- is_uv1_hub() ? UV1H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK : \
is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK : \
is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK : \
/*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK)
#define UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT \
uv_undefined("UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT")
#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT ( \
- is_uv1_hub() ? UV1H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT : \
is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT : \
is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT : \
/*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT)
@@ -2515,25 +2072,6 @@ union uvh_lb_bau_misc_control_u {
unsigned long rsvd_29_47:19;
unsigned long fun:16; /* RW */
} s;
- struct uv1h_lb_bau_misc_control_s {
- unsigned long rejection_delay:8; /* RW */
- unsigned long apic_mode:1; /* RW */
- unsigned long force_broadcast:1; /* RW */
- unsigned long force_lock_nop:1; /* RW */
- unsigned long qpi_agent_presence_vector:3; /* RW */
- unsigned long descriptor_fetch_mode:1; /* RW */
- unsigned long enable_intd_soft_ack_mode:1; /* RW */
- unsigned long intd_soft_ack_timeout_period:4; /* RW */
- unsigned long enable_dual_mapping_mode:1; /* RW */
- unsigned long vga_io_port_decode_enable:1; /* RW */
- unsigned long vga_io_port_16_bit_decode:1; /* RW */
- unsigned long suppress_dest_registration:1; /* RW */
- unsigned long programmed_initial_priority:3; /* RW */
- unsigned long use_incoming_priority:1; /* RW */
- unsigned long enable_programmed_initial_priority:1;/* RW */
- unsigned long rsvd_29_47:19;
- unsigned long fun:16; /* RW */
- } s1;
struct uvxh_lb_bau_misc_control_s {
unsigned long rejection_delay:8; /* RW */
unsigned long apic_mode:1; /* RW */
@@ -2648,22 +2186,18 @@ union uvh_lb_bau_misc_control_u {
/* ========================================================================= */
/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */
/* ========================================================================= */
-#define UV1H_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
#define UV2H_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
#define UV3H_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
#define UV4H_LB_BAU_SB_ACTIVATION_CONTROL 0xc8020UL
#define UVH_LB_BAU_SB_ACTIVATION_CONTROL ( \
- is_uv1_hub() ? UV1H_LB_BAU_SB_ACTIVATION_CONTROL : \
is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_CONTROL : \
is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_CONTROL : \
/*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_CONTROL)
-#define UV1H_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
#define UV2H_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
#define UV3H_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
#define UV4H_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9c8
#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 ( \
- is_uv1_hub() ? UV1H_LB_BAU_SB_ACTIVATION_CONTROL_32 : \
is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_CONTROL_32 : \
is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_CONTROL_32 : \
/*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_CONTROL_32)
@@ -2689,22 +2223,18 @@ union uvh_lb_bau_sb_activation_control_u {
/* ========================================================================= */
/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */
/* ========================================================================= */
-#define UV1H_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_0 0xc8030UL
#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 ( \
- is_uv1_hub() ? UV1H_LB_BAU_SB_ACTIVATION_STATUS_0 : \
is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_0 : \
is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_0 : \
/*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_0)
-#define UV1H_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9d0
#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 ( \
- is_uv1_hub() ? UV1H_LB_BAU_SB_ACTIVATION_STATUS_0_32 : \
is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_0_32 : \
is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_0_32 : \
/*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_0_32)
@@ -2723,22 +2253,18 @@ union uvh_lb_bau_sb_activation_status_0_u {
/* ========================================================================= */
/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */
/* ========================================================================= */
-#define UV1H_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_1 0xc8040UL
#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 ( \
- is_uv1_hub() ? UV1H_LB_BAU_SB_ACTIVATION_STATUS_1 : \
is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_1 : \
is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_1 : \
/*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_1)
-#define UV1H_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9d8
#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 ( \
- is_uv1_hub() ? UV1H_LB_BAU_SB_ACTIVATION_STATUS_1_32 : \
is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_1_32 : \
is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_1_32 : \
/*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_1_32)
@@ -2757,32 +2283,24 @@ union uvh_lb_bau_sb_activation_status_1_u {
/* ========================================================================= */
/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */
/* ========================================================================= */
-#define UV1H_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE 0xc8010UL
#define UVH_LB_BAU_SB_DESCRIPTOR_BASE ( \
- is_uv1_hub() ? UV1H_LB_BAU_SB_DESCRIPTOR_BASE : \
is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_BASE : \
is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_BASE : \
/*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_BASE)
-#define UV1H_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9c0
#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 ( \
- is_uv1_hub() ? UV1H_LB_BAU_SB_DESCRIPTOR_BASE_32 : \
is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_BASE_32 : \
is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_BASE_32 : \
/*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_BASE_32)
#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12
-#define UV1H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
-#define UV1H_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
-#define UV1H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL
-
#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL
@@ -2800,21 +2318,18 @@ union uvh_lb_bau_sb_activation_status_1_u {
#define UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0xffe0000000000000UL
#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT ( \
- is_uv1_hub() ? UV1H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT : \
is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT : \
is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT : \
is_uv4a_hub() ? UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT : \
/*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT)
#define UVH_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK ( \
- is_uv1_hub() ? UV1H_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK : \
is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK : \
is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK : \
is_uv4a_hub() ? UV4AH_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK : \
/*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK)
#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK ( \
- is_uv1_hub() ? UV1H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK : \
is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK : \
is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK : \
is_uv4a_hub() ? UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK : \
@@ -2824,7 +2339,6 @@ union uvh_lb_bau_sb_activation_status_1_u {
/* UVH_NODE_ID */
/* ========================================================================= */
#define UVH_NODE_ID 0x0UL
-#define UV1H_NODE_ID 0x0UL
#define UV2H_NODE_ID 0x0UL
#define UV3H_NODE_ID 0x0UL
#define UV4H_NODE_ID 0x0UL
@@ -2840,21 +2354,6 @@ union uvh_lb_bau_sb_activation_status_1_u {
#define UVH_NODE_ID_REVISION_MASK 0x00000000f0000000UL
#define UVH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
-#define UV1H_NODE_ID_FORCE1_SHFT 0
-#define UV1H_NODE_ID_MANUFACTURER_SHFT 1
-#define UV1H_NODE_ID_PART_NUMBER_SHFT 12
-#define UV1H_NODE_ID_REVISION_SHFT 28
-#define UV1H_NODE_ID_NODE_ID_SHFT 32
-#define UV1H_NODE_ID_NODES_PER_BIT_SHFT 48
-#define UV1H_NODE_ID_NI_PORT_SHFT 56
-#define UV1H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
-#define UV1H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
-#define UV1H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
-#define UV1H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
-#define UV1H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
-#define UV1H_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL
-#define UV1H_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL
-
#define UVXH_NODE_ID_FORCE1_SHFT 0
#define UVXH_NODE_ID_MANUFACTURER_SHFT 1
#define UVXH_NODE_ID_PART_NUMBER_SHFT 12
@@ -2934,18 +2433,6 @@ union uvh_node_id_u {
unsigned long node_id:15; /* RW */
unsigned long rsvd_47_63:17;
} s;
- struct uv1h_node_id_s {
- unsigned long force1:1; /* RO */
- unsigned long manufacturer:11; /* RO */
- unsigned long part_number:16; /* RO */
- unsigned long revision:4; /* RO */
- unsigned long node_id:15; /* RW */
- unsigned long rsvd_47:1;
- unsigned long nodes_per_bit:7; /* RW */
- unsigned long rsvd_55:1;
- unsigned long ni_port:4; /* RO */
- unsigned long rsvd_60_63:4;
- } s1;
struct uvxh_node_id_s {
unsigned long force1:1; /* RO */
unsigned long manufacturer:11; /* RO */
@@ -3001,12 +2488,10 @@ union uvh_node_id_u {
/* ========================================================================= */
#define UVH_NODE_PRESENT_TABLE 0x1400UL
-#define UV1H_NODE_PRESENT_TABLE_DEPTH 16
#define UV2H_NODE_PRESENT_TABLE_DEPTH 16
#define UV3H_NODE_PRESENT_TABLE_DEPTH 16
#define UV4H_NODE_PRESENT_TABLE_DEPTH 4
#define UVH_NODE_PRESENT_TABLE_DEPTH ( \
- is_uv1_hub() ? UV1H_NODE_PRESENT_TABLE_DEPTH : \
is_uv2_hub() ? UV2H_NODE_PRESENT_TABLE_DEPTH : \
is_uv3_hub() ? UV3H_NODE_PRESENT_TABLE_DEPTH : \
/*is_uv4_hub*/ UV4H_NODE_PRESENT_TABLE_DEPTH)
@@ -3025,12 +2510,10 @@ union uvh_node_present_table_u {
/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */
/* ========================================================================= */
-#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x4800c8UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR ( \
- is_uv1_hub() ? UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR : \
is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR : \
is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR : \
/*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR)
@@ -3042,13 +2525,6 @@ union uvh_node_present_table_u {
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
-#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
-#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
-#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
-#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
-#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
-
#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
@@ -3088,14 +2564,6 @@ union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
} s;
- struct uv1h_rh_gam_alias210_overlay_config_0_mmr_s {
- unsigned long rsvd_0_23:24;
- unsigned long base:8; /* RW */
- unsigned long rsvd_32_47:16;
- unsigned long m_alias:5; /* RW */
- unsigned long rsvd_53_62:10;
- unsigned long enable:1; /* RW */
- } s1;
struct uvxh_rh_gam_alias210_overlay_config_0_mmr_s {
unsigned long rsvd_0_23:24;
unsigned long base:8; /* RW */
@@ -3133,12 +2601,10 @@ union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */
/* ========================================================================= */
-#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x4800d8UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR ( \
- is_uv1_hub() ? UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR : \
is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR : \
is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR : \
/*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR)
@@ -3150,13 +2616,6 @@ union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
-#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
-#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
-#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
-#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
-#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
-
#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
@@ -3196,14 +2655,6 @@ union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
} s;
- struct uv1h_rh_gam_alias210_overlay_config_1_mmr_s {
- unsigned long rsvd_0_23:24;
- unsigned long base:8; /* RW */
- unsigned long rsvd_32_47:16;
- unsigned long m_alias:5; /* RW */
- unsigned long rsvd_53_62:10;
- unsigned long enable:1; /* RW */
- } s1;
struct uvxh_rh_gam_alias210_overlay_config_1_mmr_s {
unsigned long rsvd_0_23:24;
unsigned long base:8; /* RW */
@@ -3241,12 +2692,10 @@ union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */
/* ========================================================================= */
-#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x4800e8UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR ( \
- is_uv1_hub() ? UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR : \
is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR : \
is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR : \
/*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR)
@@ -3258,13 +2707,6 @@ union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
-#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
-#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
-#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
-#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
-#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
-
#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
@@ -3304,14 +2746,6 @@ union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
} s;
- struct uv1h_rh_gam_alias210_overlay_config_2_mmr_s {
- unsigned long rsvd_0_23:24;
- unsigned long base:8; /* RW */
- unsigned long rsvd_32_47:16;
- unsigned long m_alias:5; /* RW */
- unsigned long rsvd_53_62:10;
- unsigned long enable:1; /* RW */
- } s1;
struct uvxh_rh_gam_alias210_overlay_config_2_mmr_s {
unsigned long rsvd_0_23:24;
unsigned long base:8; /* RW */
@@ -3349,12 +2783,10 @@ union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */
/* ========================================================================= */
-#define UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x4800d0UL
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR ( \
- is_uv1_hub() ? UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR : \
is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR : \
is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR : \
/*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR)
@@ -3362,9 +2794,6 @@ union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
-#define UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
-#define UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
-
#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
@@ -3385,11 +2814,6 @@ union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
} s;
- struct uv1h_rh_gam_alias210_redirect_config_0_mmr_s {
- unsigned long rsvd_0_23:24;
- unsigned long dest_base:22; /* RW */
- unsigned long rsvd_46_63:18;
- } s1;
struct uvxh_rh_gam_alias210_redirect_config_0_mmr_s {
unsigned long rsvd_0_23:24;
unsigned long dest_base:22; /* RW */
@@ -3415,12 +2839,10 @@ union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR */
/* ========================================================================= */
-#define UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x4800e0UL
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR ( \
- is_uv1_hub() ? UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR : \
is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR : \
is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR : \
/*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR)
@@ -3428,9 +2850,6 @@ union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
-#define UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
-#define UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
-
#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
@@ -3451,11 +2870,6 @@ union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
} s;
- struct uv1h_rh_gam_alias210_redirect_config_1_mmr_s {
- unsigned long rsvd_0_23:24;
- unsigned long dest_base:22; /* RW */
- unsigned long rsvd_46_63:18;
- } s1;
struct uvxh_rh_gam_alias210_redirect_config_1_mmr_s {
unsigned long rsvd_0_23:24;
unsigned long dest_base:22; /* RW */
@@ -3481,12 +2895,10 @@ union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR */
/* ========================================================================= */
-#define UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x4800f0UL
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR ( \
- is_uv1_hub() ? UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR : \
is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR : \
is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR : \
/*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR)
@@ -3494,9 +2906,6 @@ union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
-#define UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
-#define UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
-
#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
@@ -3517,11 +2926,6 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
} s;
- struct uv1h_rh_gam_alias210_redirect_config_2_mmr_s {
- unsigned long rsvd_0_23:24;
- unsigned long dest_base:22; /* RW */
- unsigned long rsvd_46_63:18;
- } s1;
struct uvxh_rh_gam_alias210_redirect_config_2_mmr_s {
unsigned long rsvd_0_23:24;
unsigned long dest_base:22; /* RW */
@@ -3547,12 +2951,10 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_CONFIG_MMR */
/* ========================================================================= */
-#define UV1H_RH_GAM_CONFIG_MMR 0x1600000UL
#define UV2H_RH_GAM_CONFIG_MMR 0x1600000UL
#define UV3H_RH_GAM_CONFIG_MMR 0x1600000UL
#define UV4H_RH_GAM_CONFIG_MMR 0x480000UL
#define UVH_RH_GAM_CONFIG_MMR ( \
- is_uv1_hub() ? UV1H_RH_GAM_CONFIG_MMR : \
is_uv2_hub() ? UV2H_RH_GAM_CONFIG_MMR : \
is_uv3_hub() ? UV3H_RH_GAM_CONFIG_MMR : \
/*is_uv4_hub*/ UV4H_RH_GAM_CONFIG_MMR)
@@ -3560,13 +2962,6 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
#define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
-#define UV1H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
-#define UV1H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
-#define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_SHFT 12
-#define UV1H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
-#define UV1H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
-#define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL
-
#define UVXH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
#define UVXH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
@@ -3591,13 +2986,6 @@ union uvh_rh_gam_config_mmr_u {
unsigned long n_skt:4; /* RW */
unsigned long rsvd_10_63:54;
} s;
- struct uv1h_rh_gam_config_mmr_s {
- unsigned long m_skt:6; /* RW */
- unsigned long n_skt:4; /* RW */
- unsigned long rsvd_10_11:2;
- unsigned long mmiol_cfg:1; /* RW */
- unsigned long rsvd_13_63:51;
- } s1;
struct uvxh_rh_gam_config_mmr_s {
unsigned long rsvd_0_5:6;
unsigned long n_skt:4; /* RW */
@@ -3623,12 +3011,10 @@ union uvh_rh_gam_config_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
/* ========================================================================= */
-#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x480010UL
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR ( \
- is_uv1_hub() ? UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR : \
is_uv2_hub() ? UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR : \
is_uv3_hub() ? UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR : \
/*is_uv4_hub*/ UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR)
@@ -3638,15 +3024,6 @@ union uvh_rh_gam_config_mmr_u {
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
-#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48
-#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
-#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
-#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0001000000000000UL
-#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
-#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
@@ -3676,12 +3053,10 @@ union uvh_rh_gam_config_mmr_u {
#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK ( \
- is_uv1_hub() ? UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK : \
is_uv2_hub() ? UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK : \
is_uv3_hub() ? UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK : \
/*is_uv4_hub*/ UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK)
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT ( \
- is_uv1_hub() ? UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT : \
is_uv2_hub() ? UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT : \
is_uv3_hub() ? UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT : \
/*is_uv4_hub*/ UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT)
@@ -3694,16 +3069,6 @@ union uvh_rh_gam_gru_overlay_config_mmr_u {
unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
} s;
- struct uv1h_rh_gam_gru_overlay_config_mmr_s {
- unsigned long rsvd_0_27:28;
- unsigned long base:18; /* RW */
- unsigned long rsvd_46_47:2;
- unsigned long gr4:1; /* RW */
- unsigned long rsvd_49_51:3;
- unsigned long n_gru:4; /* RW */
- unsigned long rsvd_56_62:7;
- unsigned long enable:1; /* RW */
- } s1;
struct uvxh_rh_gam_gru_overlay_config_mmr_s {
unsigned long rsvd_0_45:46;
unsigned long rsvd_46_51:6;
@@ -3742,12 +3107,10 @@ union uvh_rh_gam_gru_overlay_config_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR */
/* ========================================================================= */
-#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR uv_undefined("UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR")
#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR uv_undefined("UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR")
#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR 0x1603000UL
#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR 0x483000UL
#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR ( \
- is_uv1_hub() ? UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR : \
is_uv2_hub() ? UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR : \
is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR : \
/*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR)
@@ -3823,12 +3186,10 @@ union uvh_rh_gam_mmioh_overlay_config0_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR */
/* ========================================================================= */
-#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR uv_undefined("UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR")
#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR uv_undefined("UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR")
#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR 0x1603000UL
#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR 0x484000UL
#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR ( \
- is_uv1_hub() ? UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR : \
is_uv2_hub() ? UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR : \
is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR : \
/*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR)
@@ -3898,27 +3259,15 @@ union uvh_rh_gam_mmioh_overlay_config1_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR */
/* ========================================================================= */
-#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR uv_undefined("UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR")
#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR uv_undefined("UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR")
#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR ( \
- is_uv1_hub() ? UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR : \
is_uv2_hub() ? UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR : \
is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR : \
/*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR)
-#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 30
-#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
-#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
-#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003fffc0000000UL
-#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL
-#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL
-#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-
#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 27
#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
@@ -3931,14 +3280,6 @@ union uvh_rh_gam_mmioh_overlay_config1_mmr_u {
union uvh_rh_gam_mmioh_overlay_config_mmr_u {
unsigned long v;
- struct uv1h_rh_gam_mmioh_overlay_config_mmr_s {
- unsigned long rsvd_0_29:30;
- unsigned long base:16; /* RW */
- unsigned long m_io:6; /* RW */
- unsigned long n_io:4; /* RW */
- unsigned long rsvd_56_62:7;
- unsigned long enable:1; /* RW */
- } s1;
struct uv2h_rh_gam_mmioh_overlay_config_mmr_s {
unsigned long rsvd_0_26:27;
unsigned long base:19; /* RW */
@@ -3952,22 +3293,18 @@ union uvh_rh_gam_mmioh_overlay_config_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR */
/* ========================================================================= */
-#define UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR uv_undefined("UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR")
#define UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR uv_undefined("UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR")
#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR 0x1603800UL
#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR 0x483800UL
#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR ( \
- is_uv1_hub() ? UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR : \
is_uv2_hub() ? UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR : \
is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR : \
/*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR)
-#define UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH uv_undefined("UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH")
#define UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH uv_undefined("UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH")
#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH 128
#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH 128
#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH ( \
- is_uv1_hub() ? UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH : \
is_uv2_hub() ? UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH : \
is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH : \
/*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH)
@@ -4005,22 +3342,18 @@ union uvh_rh_gam_mmioh_redirect_config0_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR */
/* ========================================================================= */
-#define UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR uv_undefined("UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR")
#define UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR uv_undefined("UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR")
#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR 0x1604800UL
#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR 0x484800UL
#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR ( \
- is_uv1_hub() ? UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR : \
is_uv2_hub() ? UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR : \
is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR : \
/*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR)
-#define UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH uv_undefined("UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH")
#define UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH uv_undefined("UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH")
#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH 128
#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH 128
#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH ( \
- is_uv1_hub() ? UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH : \
is_uv2_hub() ? UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH : \
is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH : \
/*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH)
@@ -4058,12 +3391,10 @@ union uvh_rh_gam_mmioh_redirect_config1_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */
/* ========================================================================= */
-#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
#define UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x480028UL
#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR ( \
- is_uv1_hub() ? UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR : \
is_uv2_hub() ? UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR : \
is_uv3_hub() ? UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR : \
/*is_uv4_hub*/ UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR)
@@ -4073,13 +3404,6 @@ union uvh_rh_gam_mmioh_redirect_config1_mmr_u {
#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
-#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46
-#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL
-#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
@@ -4109,13 +3433,6 @@ union uvh_rh_gam_mmr_overlay_config_mmr_u {
unsigned long rsvd_46_62:17;
unsigned long enable:1; /* RW */
} s;
- struct uv1h_rh_gam_mmr_overlay_config_mmr_s {
- unsigned long rsvd_0_25:26;
- unsigned long base:20; /* RW */
- unsigned long dual_hub:1; /* RW */
- unsigned long rsvd_47_62:16;
- unsigned long enable:1; /* RW */
- } s1;
struct uvxh_rh_gam_mmr_overlay_config_mmr_s {
unsigned long rsvd_0_25:26;
unsigned long base:20; /* RW */
@@ -4145,12 +3462,10 @@ union uvh_rh_gam_mmr_overlay_config_mmr_u {
/* ========================================================================= */
/* UVH_RTC */
/* ========================================================================= */
-#define UV1H_RTC 0x340000UL
#define UV2H_RTC 0x340000UL
#define UV3H_RTC 0x340000UL
#define UV4H_RTC 0xe0000UL
#define UVH_RTC ( \
- is_uv1_hub() ? UV1H_RTC : \
is_uv2_hub() ? UV2H_RTC : \
is_uv3_hub() ? UV3H_RTC : \
/*is_uv4_hub*/ UV4H_RTC)
@@ -4209,22 +3524,18 @@ union uvh_rtc1_int_config_u {
/* ========================================================================= */
/* UVH_SCRATCH5 */
/* ========================================================================= */
-#define UV1H_SCRATCH5 0x2d0200UL
#define UV2H_SCRATCH5 0x2d0200UL
#define UV3H_SCRATCH5 0x2d0200UL
#define UV4H_SCRATCH5 0xb0200UL
#define UVH_SCRATCH5 ( \
- is_uv1_hub() ? UV1H_SCRATCH5 : \
is_uv2_hub() ? UV2H_SCRATCH5 : \
is_uv3_hub() ? UV3H_SCRATCH5 : \
/*is_uv4_hub*/ UV4H_SCRATCH5)
-#define UV1H_SCRATCH5_32 0x778
#define UV2H_SCRATCH5_32 0x778
#define UV3H_SCRATCH5_32 0x778
#define UV4H_SCRATCH5_32 0x798
#define UVH_SCRATCH5_32 ( \
- is_uv1_hub() ? UV1H_SCRATCH5_32 : \
is_uv2_hub() ? UV2H_SCRATCH5_32 : \
is_uv3_hub() ? UV3H_SCRATCH5_32 : \
/*is_uv4_hub*/ UV4H_SCRATCH5_32)
@@ -4243,22 +3554,18 @@ union uvh_scratch5_u {
/* ========================================================================= */
/* UVH_SCRATCH5_ALIAS */
/* ========================================================================= */
-#define UV1H_SCRATCH5_ALIAS 0x2d0208UL
#define UV2H_SCRATCH5_ALIAS 0x2d0208UL
#define UV3H_SCRATCH5_ALIAS 0x2d0208UL
#define UV4H_SCRATCH5_ALIAS 0xb0208UL
#define UVH_SCRATCH5_ALIAS ( \
- is_uv1_hub() ? UV1H_SCRATCH5_ALIAS : \
is_uv2_hub() ? UV2H_SCRATCH5_ALIAS : \
is_uv3_hub() ? UV3H_SCRATCH5_ALIAS : \
/*is_uv4_hub*/ UV4H_SCRATCH5_ALIAS)
-#define UV1H_SCRATCH5_ALIAS_32 0x780
#define UV2H_SCRATCH5_ALIAS_32 0x780
#define UV3H_SCRATCH5_ALIAS_32 0x780
#define UV4H_SCRATCH5_ALIAS_32 0x7a0
#define UVH_SCRATCH5_ALIAS_32 ( \
- is_uv1_hub() ? UV1H_SCRATCH5_ALIAS_32 : \
is_uv2_hub() ? UV2H_SCRATCH5_ALIAS_32 : \
is_uv3_hub() ? UV3H_SCRATCH5_ALIAS_32 : \
/*is_uv4_hub*/ UV4H_SCRATCH5_ALIAS_32)
@@ -4267,12 +3574,10 @@ union uvh_scratch5_u {
/* ========================================================================= */
/* UVH_SCRATCH5_ALIAS_2 */
/* ========================================================================= */
-#define UV1H_SCRATCH5_ALIAS_2 0x2d0210UL
#define UV2H_SCRATCH5_ALIAS_2 0x2d0210UL
#define UV3H_SCRATCH5_ALIAS_2 0x2d0210UL
#define UV4H_SCRATCH5_ALIAS_2 0xb0210UL
#define UVH_SCRATCH5_ALIAS_2 ( \
- is_uv1_hub() ? UV1H_SCRATCH5_ALIAS_2 : \
is_uv2_hub() ? UV2H_SCRATCH5_ALIAS_2 : \
is_uv3_hub() ? UV3H_SCRATCH5_ALIAS_2 : \
/*is_uv4_hub*/ UV4H_SCRATCH5_ALIAS_2)
@@ -4719,23 +4024,6 @@ union uvxh_lb_bau_sb_activation_status_2_u {
};
/* ========================================================================= */
-/* UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK */
-/* ========================================================================= */
-#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK 0x320130UL
-#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_32 0x9f0
-
-#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_SHFT 0
-#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_MASK 0x00000000ffffffffUL
-
-union uv1h_lb_target_physical_apic_id_mask_u {
- unsigned long v;
- struct uv1h_lb_target_physical_apic_id_mask_s {
- unsigned long bit_enables:32; /* RW */
- unsigned long rsvd_32_63:32;
- } s1;
-};
-
-/* ========================================================================= */
/* UV3H_GR0_GAM_GR_CONFIG */
/* ========================================================================= */
#define UV3H_GR0_GAM_GR_CONFIG 0xc00028UL
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index 8669c6bdbb84..600a141c8805 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -255,7 +255,7 @@ struct boot_params {
* currently supportd through this PV boot path.
* @X86_SUBARCH_INTEL_MID: Used for Intel MID (Mobile Internet Device) platform
* systems which do not have the PCI legacy interfaces.
- * @X86_SUBARCH_CE4100: Used for Intel CE media processor (CE4100) SoC for
+ * @X86_SUBARCH_CE4100: Used for Intel CE media processor (CE4100) SoC
* for settop boxes and media devices, the use of a subarch for CE4100
* is more of a hack...
*/
diff --git a/arch/x86/include/uapi/asm/hwcap2.h b/arch/x86/include/uapi/asm/hwcap2.h
index 8b2effe6efb8..5fdfcb47000f 100644
--- a/arch/x86/include/uapi/asm/hwcap2.h
+++ b/arch/x86/include/uapi/asm/hwcap2.h
@@ -5,4 +5,7 @@
/* MONITOR/MWAIT enabled in Ring 3 */
#define HWCAP2_RING3MWAIT (1 << 0)
+/* Kernel allows FSGSBASE instructions available in Ring 3 */
+#define HWCAP2_FSGSBASE BIT(1)
+
#endif
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 8fd39ff74a49..c826cddae157 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -3,6 +3,7 @@
#include <linux/module.h>
#include <linux/sched.h>
+#include <linux/perf_event.h>
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/stringify.h>
@@ -15,6 +16,7 @@
#include <linux/kprobes.h>
#include <linux/mmu_context.h>
#include <linux/bsearch.h>
+#include <linux/sync_core.h>
#include <asm/text-patching.h>
#include <asm/alternative.h>
#include <asm/sections.h>
@@ -53,7 +55,7 @@ __setup("noreplace-smp", setup_noreplace_smp);
#define DPRINTK(fmt, args...) \
do { \
if (debug_alternative) \
- printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
+ printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args); \
} while (0)
#define DUMP_BYTES(buf, len, fmt, args...) \
@@ -64,7 +66,7 @@ do { \
if (!(len)) \
break; \
\
- printk(KERN_DEBUG fmt, ##args); \
+ printk(KERN_DEBUG pr_fmt(fmt), ##args); \
for (j = 0; j < (len) - 1; j++) \
printk(KERN_CONT "%02hhx ", buf[j]); \
printk(KERN_CONT "%02hhx\n", buf[j]); \
@@ -1001,6 +1003,7 @@ struct text_poke_loc {
s32 rel32;
u8 opcode;
const u8 text[POKE_MAX_OPCODE_SIZE];
+ u8 old;
};
struct bp_patching_desc {
@@ -1044,7 +1047,7 @@ static __always_inline int patch_cmp(const void *key, const void *elt)
return 0;
}
-int noinstr poke_int3_handler(struct pt_regs *regs)
+noinstr int poke_int3_handler(struct pt_regs *regs)
{
struct bp_patching_desc *desc;
struct text_poke_loc *tp;
@@ -1168,8 +1171,10 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
/*
* First step: add a int3 trap to the address that will be patched.
*/
- for (i = 0; i < nr_entries; i++)
+ for (i = 0; i < nr_entries; i++) {
+ tp[i].old = *(u8 *)text_poke_addr(&tp[i]);
text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
+ }
text_poke_sync();
@@ -1177,14 +1182,45 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
* Second step: update all but the first byte of the patched range.
*/
for (do_sync = 0, i = 0; i < nr_entries; i++) {
+ u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, };
int len = text_opcode_size(tp[i].opcode);
if (len - INT3_INSN_SIZE > 0) {
+ memcpy(old + INT3_INSN_SIZE,
+ text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
+ len - INT3_INSN_SIZE);
text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
(const char *)tp[i].text + INT3_INSN_SIZE,
len - INT3_INSN_SIZE);
do_sync++;
}
+
+ /*
+ * Emit a perf event to record the text poke, primarily to
+ * support Intel PT decoding which must walk the executable code
+ * to reconstruct the trace. The flow up to here is:
+ * - write INT3 byte
+ * - IPI-SYNC
+ * - write instruction tail
+ * At this point the actual control flow will be through the
+ * INT3 and handler and not hit the old or new instruction.
+ * Intel PT outputs FUP/TIP packets for the INT3, so the flow
+ * can still be decoded. Subsequently:
+ * - emit RECORD_TEXT_POKE with the new instruction
+ * - IPI-SYNC
+ * - write first byte
+ * - IPI-SYNC
+ * So before the text poke event timestamp, the decoder will see
+ * either the old instruction flow or FUP/TIP of INT3. After the
+ * text poke event timestamp, the decoder will see either the
+ * new instruction flow or FUP/TIP of INT3. Thus decoders can
+ * use the timestamp as the point at which to modify the
+ * executable code.
+ * The old instruction is recorded so that the event can be
+ * processed forwards or backwards.
+ */
+ perf_event_text_poke(text_poke_addr(&tp[i]), old, len,
+ tp[i].text, len);
}
if (do_sync) {
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index 17cb5b933dcf..e89031e9c847 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -6,7 +6,7 @@
* This allows to use PCI devices that only support 32bit addresses on systems
* with more than 4GB.
*
- * See Documentation/DMA-API-HOWTO.txt for the interface specification.
+ * See Documentation/core-api/dma-api-howto.rst for the interface specification.
*
* Copyright 2002 Andi Kleen, SuSE Labs.
*/
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 81ffcfbfaef2..21325a4a78b9 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2335,8 +2335,13 @@ static int mp_irqdomain_create(int ioapic)
static void ioapic_destroy_irqdomain(int idx)
{
+ struct ioapic_domain_cfg *cfg = &ioapics[idx].irqdomain_cfg;
+ struct fwnode_handle *fn = ioapics[idx].irqdomain->fwnode;
+
if (ioapics[idx].irqdomain) {
irq_domain_remove(ioapics[idx].irqdomain);
+ if (!cfg->dev)
+ irq_domain_free_fwnode(fn);
ioapics[idx].irqdomain = NULL;
}
}
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 7649da2478d8..dae32d948bf2 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -560,6 +560,10 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
* as that can corrupt the affinity move state.
*/
irqd_set_handle_enforce_irqctx(irqd);
+
+ /* Don't invoke affinity setter on deactivated interrupts */
+ irqd_set_affinity_on_activate(irqd);
+
/*
* Legacy vectors are already assigned when the IOAPIC
* takes them over. They stay on the same vector. This is
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 69e70ed0f5e6..0b6eea3f54e6 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -24,8 +24,6 @@
#include <asm/uv/uv.h>
#include <asm/apic.h>
-static DEFINE_PER_CPU(int, x2apic_extra_bits);
-
static enum uv_system_type uv_system_type;
static int uv_hubbed_system;
static int uv_hubless_system;
@@ -40,7 +38,7 @@ static u8 oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
static struct {
unsigned int apicid_shift;
unsigned int apicid_mask;
- unsigned int socketid_shift; /* aka pnode_shift for UV1/2/3 */
+ unsigned int socketid_shift; /* aka pnode_shift for UV2/3 */
unsigned int pnode_mask;
unsigned int gpa_shift;
unsigned int gnode_shift;
@@ -48,8 +46,6 @@ static struct {
static int uv_min_hub_revision_id;
-unsigned int uv_apicid_hibits;
-
static struct apic apic_x2apic_uv_x;
static struct uv_hub_info_s uv_hub_info_node0;
@@ -139,12 +135,8 @@ static void __init uv_tsc_check_sync(void)
/* Accommodate different UV arch BIOSes */
mmr = uv_early_read_mmr(UVH_TSC_SYNC_MMR);
mmr_shift =
- is_uv1_hub() ? 0 :
is_uv2_hub() ? UVH_TSC_SYNC_SHIFT_UV2K : UVH_TSC_SYNC_SHIFT;
- if (mmr_shift)
- sync_state = (mmr >> mmr_shift) & UVH_TSC_SYNC_MASK;
- else
- sync_state = 0;
+ sync_state = (mmr >> mmr_shift) & UVH_TSC_SYNC_MASK;
switch (sync_state) {
case UVH_TSC_SYNC_VALID:
@@ -223,21 +215,6 @@ static void __init early_get_apic_socketid_shift(void)
pr_info("UV: socketid_shift:%d pnode_mask:0x%x\n", uv_cpuid.socketid_shift, uv_cpuid.pnode_mask);
}
-/*
- * Add an extra bit as dictated by bios to the destination apicid of
- * interrupts potentially passing through the UV HUB. This prevents
- * a deadlock between interrupts and IO port operations.
- */
-static void __init uv_set_apicid_hibit(void)
-{
- union uv1h_lb_target_physical_apic_id_mask_u apicid_mask;
-
- if (is_uv1_hub()) {
- apicid_mask.v = uv_early_read_mmr(UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK);
- uv_apicid_hibits = apicid_mask.s1.bit_enables & UV_APICID_HIBIT_MASK;
- }
-}
-
static void __init uv_stringify(int len, char *to, char *from)
{
/* Relies on 'to' being NULL chars so result will be NULL terminated */
@@ -280,36 +257,25 @@ static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id)
/*
* Determine UV arch type.
- * SGI: UV100/1000
* SGI2: UV2000/3000
* SGI3: UV300 (truncated to 4 chars because of different varieties)
* SGI4: UV400 (truncated to 4 chars because of different varieties)
*/
- uv_hub_info->hub_revision =
- !strncmp(oem_id, "SGI4", 4) ? UV4_HUB_REVISION_BASE :
- !strncmp(oem_id, "SGI3", 4) ? UV3_HUB_REVISION_BASE :
- !strcmp(oem_id, "SGI2") ? UV2_HUB_REVISION_BASE :
- !strcmp(oem_id, "SGI") ? UV1_HUB_REVISION_BASE : 0;
-
- if (uv_hub_info->hub_revision == 0)
- goto badbios;
-
- switch (uv_hub_info->hub_revision) {
- case UV4_HUB_REVISION_BASE:
+ if (!strncmp(oem_id, "SGI4", 4)) {
+ uv_hub_info->hub_revision = UV4_HUB_REVISION_BASE;
uv_hubbed_system = 0x11;
- break;
- case UV3_HUB_REVISION_BASE:
+ } else if (!strncmp(oem_id, "SGI3", 4)) {
+ uv_hub_info->hub_revision = UV3_HUB_REVISION_BASE;
uv_hubbed_system = 0x9;
- break;
- case UV2_HUB_REVISION_BASE:
+ } else if (!strcmp(oem_id, "SGI2")) {
+ uv_hub_info->hub_revision = UV2_HUB_REVISION_BASE;
uv_hubbed_system = 0x5;
- break;
- case UV1_HUB_REVISION_BASE:
- uv_hubbed_system = 0x3;
- break;
+ } else {
+ uv_hub_info->hub_revision = 0;
+ goto badbios;
}
pnodeid = early_get_pnodeid();
@@ -323,14 +289,6 @@ static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id)
uv_system_type = UV_X2APIC;
uv_apic = 0;
- } else if (!strcmp(oem_table_id, "UVH")) {
- /* Only UV1 systems: */
- uv_system_type = UV_NON_UNIQUE_APIC;
- x86_platform.legacy.warm_reset = 0;
- __this_cpu_write(x2apic_extra_bits, pnodeid << uvh_apicid.s.pnode_shift);
- uv_set_apicid_hibit();
- uv_apic = 1;
-
} else if (!strcmp(oem_table_id, "UVL")) {
/* Only used for very small systems: */
uv_system_type = UV_LEGACY_APIC;
@@ -347,7 +305,7 @@ static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id)
badbios:
pr_err("UV: OEM_ID:%s OEM_TABLE_ID:%s\n", oem_id, oem_table_id);
- pr_err("Current BIOS not supported, update kernel and/or BIOS\n");
+ pr_err("Current UV Type or BIOS not supported\n");
BUG();
}
@@ -545,7 +503,6 @@ static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
int pnode;
pnode = uv_apicid_to_pnode(phys_apicid);
- phys_apicid |= uv_apicid_hibits;
val = (1UL << UVH_IPI_INT_SEND_SHFT) |
(phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
@@ -576,7 +533,7 @@ static void uv_send_IPI_one(int cpu, int vector)
dmode = dest_Fixed;
val = (1UL << UVH_IPI_INT_SEND_SHFT) |
- ((apicid | uv_apicid_hibits) << UVH_IPI_INT_APIC_ID_SHFT) |
+ (apicid << UVH_IPI_INT_APIC_ID_SHFT) |
(dmode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
(vector << UVH_IPI_INT_VECTOR_SHFT);
@@ -634,22 +591,16 @@ static void uv_init_apic_ldr(void)
static u32 apic_uv_calc_apicid(unsigned int cpu)
{
- return apic_default_calc_apicid(cpu) | uv_apicid_hibits;
+ return apic_default_calc_apicid(cpu);
}
-static unsigned int x2apic_get_apic_id(unsigned long x)
+static unsigned int x2apic_get_apic_id(unsigned long id)
{
- unsigned int id;
-
- WARN_ON(preemptible() && num_online_cpus() > 1);
- id = x | __this_cpu_read(x2apic_extra_bits);
-
return id;
}
static u32 set_apic_id(unsigned int id)
{
- /* CHECKME: Do we need to mask out the xapic extra bits? */
return id;
}
@@ -721,11 +672,6 @@ static struct apic apic_x2apic_uv_x __ro_after_init = {
.safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
};
-static void set_x2apic_extra_bits(int pnode)
-{
- __this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift);
-}
-
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_LENGTH 3
#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
@@ -920,15 +866,7 @@ static __init void map_mmioh_high(int min_pnode, int max_pnode)
return;
}
- if (is_uv1_hub()) {
- mmr = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
- shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
- mmioh.v = uv_read_local_mmr(mmr);
- enable = !!mmioh.s1.enable;
- base = mmioh.s1.base;
- m_io = mmioh.s1.m_io;
- n_io = mmioh.s1.n_io;
- } else if (is_uv2_hub()) {
+ if (is_uv2_hub()) {
mmr = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
mmioh.v = uv_read_local_mmr(mmr);
@@ -936,16 +874,15 @@ static __init void map_mmioh_high(int min_pnode, int max_pnode)
base = mmioh.s2.base;
m_io = mmioh.s2.m_io;
n_io = mmioh.s2.n_io;
- } else {
- return;
- }
- if (enable) {
- max_pnode &= (1 << n_io) - 1;
- pr_info("UV: base:0x%lx shift:%d N_IO:%d M_IO:%d max_pnode:0x%x\n", base, shift, m_io, n_io, max_pnode);
- map_high("MMIOH", base, shift, m_io, max_pnode, map_uc);
- } else {
- pr_info("UV: MMIOH disabled\n");
+ if (enable) {
+ max_pnode &= (1 << n_io) - 1;
+ pr_info("UV: base:0x%lx shift:%d N_IO:%d M_IO:%d max_pnode:0x%x\n",
+ base, shift, m_io, n_io, max_pnode);
+ map_high("MMIOH", base, shift, m_io, max_pnode, map_uc);
+ } else {
+ pr_info("UV: MMIOH disabled\n");
+ }
}
}
@@ -1081,9 +1018,6 @@ void uv_cpu_init(void)
return;
uv_hub_info->nr_online_cpus++;
-
- if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
- set_x2apic_extra_bits(uv_hub_info->pnode);
}
struct mn {
@@ -1114,9 +1048,6 @@ static void get_mn(struct mn *mnp)
} else if (is_uv2_hub()) {
mnp->m_val = m_n_config.s2.m_skt;
mnp->n_lshift = mnp->m_val == 40 ? 40 : 39;
- } else if (is_uv1_hub()) {
- mnp->m_val = m_n_config.s1.m_skt;
- mnp->n_lshift = mnp->m_val;
}
mnp->m_shift = mnp->m_val ? 64 - mnp->m_val : 0;
}
@@ -1318,7 +1249,7 @@ static void __init build_socket_tables(void)
size_t bytes;
if (!gre) {
- if (is_uv1_hub() || is_uv2_hub() || is_uv3_hub()) {
+ if (is_uv2_hub() || is_uv3_hub()) {
pr_info("UV: No UVsystab socket table, ignoring\n");
return;
}
@@ -1500,8 +1431,7 @@ static void __init uv_system_init_hub(void)
unsigned short min_pnode = 9999, max_pnode = 0;
char *hub = is_uv4_hub() ? "UV400" :
is_uv3_hub() ? "UV300" :
- is_uv2_hub() ? "UV2000/3000" :
- is_uv1_hub() ? "UV100/1000" : NULL;
+ is_uv2_hub() ? "UV2000/3000" : NULL;
if (!hub) {
pr_err("UV: Unknown/unsupported UV hub\n");
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 6c5d0b978065..af31fbfba398 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -544,14 +544,12 @@ static void __init spectre_v1_select_mitigation(void)
* If FSGSBASE is enabled, the user can put a kernel address in
* GS, in which case SMAP provides no protection.
*
- * [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the
- * FSGSBASE enablement patches have been merged. ]
- *
* If FSGSBASE is disabled, the user can only put a user space
* address in GS. That makes an attack harder, but still
* possible if there's no SMAP protection.
*/
- if (!smap_works_speculatively()) {
+ if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
+ !smap_works_speculatively()) {
/*
* Mitigation can be provided from SWAPGS itself or
* PTI as the CR3 write in the Meltdown mitigation
@@ -764,10 +762,12 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
}
/*
- * If enhanced IBRS is enabled or SMT impossible, STIBP is not
+ * If no STIBP, enhanced IBRS is enabled or SMT impossible, STIBP is not
* required.
*/
- if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+ if (!boot_cpu_has(X86_FEATURE_STIBP) ||
+ !smt_possible ||
+ spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
return;
/*
@@ -779,12 +779,6 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
mode = SPECTRE_V2_USER_STRICT_PREFERRED;
- /*
- * If STIBP is not available, clear the STIBP mode.
- */
- if (!boot_cpu_has(X86_FEATURE_STIBP))
- mode = SPECTRE_V2_USER_NONE;
-
spectre_v2_user_stibp = mode;
set_mode:
@@ -1271,7 +1265,6 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
* Indirect branch speculation is always disabled in strict
* mode. It can neither be enabled if it was force-disabled
* by a previous prctl call.
-
*/
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 95c090a45b4b..965474d78cef 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -441,6 +441,22 @@ static void __init setup_cr_pinning(void)
static_key_enable(&cr_pinning.key);
}
+static __init int x86_nofsgsbase_setup(char *arg)
+{
+ /* Require an exact match without trailing characters. */
+ if (strlen(arg))
+ return 0;
+
+ /* Do not emit a message if the feature is not present. */
+ if (!boot_cpu_has(X86_FEATURE_FSGSBASE))
+ return 1;
+
+ setup_clear_cpu_cap(X86_FEATURE_FSGSBASE);
+ pr_info("FSGSBASE disabled via kernel command line\n");
+ return 1;
+}
+__setup("nofsgsbase", x86_nofsgsbase_setup);
+
/*
* Protection Keys are not available in 32-bit mode.
*/
@@ -1495,6 +1511,12 @@ static void identify_cpu(struct cpuinfo_x86 *c)
setup_smap(c);
setup_umip(c);
+ /* Enable FSGSBASE instructions if available. */
+ if (cpu_has(c, X86_FEATURE_FSGSBASE)) {
+ cr4_set_bits(X86_CR4_FSGSBASE);
+ elf_hwcap2 |= HWCAP2_FSGSBASE;
+ }
+
/*
* The vendor-specific functions might have changed features.
* Now we do "generic changes."
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 0ab48f1cdf84..b6b7b38dff5f 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -1156,6 +1156,8 @@ static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, 1),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, 1),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, 1),
+ X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 1),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, 1),
{}
};
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index 14e4b4d17ee5..f43a78bde670 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -42,6 +42,7 @@
#include <linux/export.h>
#include <linux/jump_label.h>
#include <linux/set_memory.h>
+#include <linux/sync_core.h>
#include <linux/task_work.h>
#include <linux/hardirq.h>
@@ -244,6 +245,8 @@ static void __print_mce(struct mce *m)
pr_cont("ADDR %llx ", m->addr);
if (m->misc)
pr_cont("MISC %llx ", m->misc);
+ if (m->ppin)
+ pr_cont("PPIN %llx ", m->ppin);
if (mce_flags.smca) {
if (m->synd)
@@ -1212,7 +1215,7 @@ static void kill_me_maybe(struct callback_head *cb)
* backing the user stack, tracing that reads the user stack will cause
* potentially infinite recursion.
*/
-void noinstr do_machine_check(struct pt_regs *regs)
+noinstr void do_machine_check(struct pt_regs *regs)
{
DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
DECLARE_BITMAP(toclear, MAX_NR_BANKS);
@@ -1927,11 +1930,11 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
static __always_inline void exc_machine_check_user(struct pt_regs *regs)
{
- idtentry_enter_user(regs);
+ irqentry_enter_from_user_mode(regs);
instrumentation_begin();
machine_check_vector(regs);
instrumentation_end();
- idtentry_exit_user(regs);
+ irqentry_exit_to_user_mode(regs);
}
#ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/cpu/mce/dev-mcelog.c b/arch/x86/kernel/cpu/mce/dev-mcelog.c
index 43c466020ed5..03e51053592a 100644
--- a/arch/x86/kernel/cpu/mce/dev-mcelog.c
+++ b/arch/x86/kernel/cpu/mce/dev-mcelog.c
@@ -345,7 +345,7 @@ static __init int dev_mcelog_init_device(void)
int err;
mce_log_len = max(MCE_LOG_MIN_LEN, num_online_cpus());
- mcelog = kzalloc(sizeof(*mcelog) + mce_log_len * sizeof(struct mce), GFP_KERNEL);
+ mcelog = kzalloc(struct_size(mcelog, entry, mce_log_len), GFP_KERNEL);
if (!mcelog)
return -ENOMEM;
diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
index 0593b192eb8f..7843ab3fde09 100644
--- a/arch/x86/kernel/cpu/mce/inject.c
+++ b/arch/x86/kernel/cpu/mce/inject.c
@@ -511,7 +511,7 @@ static void do_inject(void)
*/
if (inj_type == DFR_INT_INJ) {
i_mce.status |= MCI_STATUS_DEFERRED;
- i_mce.status |= (i_mce.status & ~MCI_STATUS_UC);
+ i_mce.status &= ~MCI_STATUS_UC;
}
/*
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index baec68b7e010..ec6f0415bc6d 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -145,7 +145,6 @@ extern struct builtin_fw __end_builtin_fw[];
bool get_builtin_firmware(struct cpio_data *cd, const char *name)
{
-#ifdef CONFIG_FW_LOADER
struct builtin_fw *b_fw;
for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
@@ -155,7 +154,6 @@ bool get_builtin_firmware(struct cpio_data *cd, const char *name)
return true;
}
}
-#endif
return false;
}
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 7401cc12c3cc..48ce44576947 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -133,15 +133,15 @@ void show_ip(struct pt_regs *regs, const char *loglvl)
show_opcodes(regs, loglvl);
}
-void show_iret_regs(struct pt_regs *regs)
+void show_iret_regs(struct pt_regs *regs, const char *log_lvl)
{
- show_ip(regs, KERN_DEFAULT);
- printk(KERN_DEFAULT "RSP: %04x:%016lx EFLAGS: %08lx", (int)regs->ss,
+ show_ip(regs, log_lvl);
+ printk("%sRSP: %04x:%016lx EFLAGS: %08lx", log_lvl, (int)regs->ss,
regs->sp, regs->flags);
}
static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
- bool partial)
+ bool partial, const char *log_lvl)
{
/*
* These on_stack() checks aren't strictly necessary: the unwind code
@@ -153,7 +153,7 @@ static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
* they can be printed in the right context.
*/
if (!partial && on_stack(info, regs, sizeof(*regs))) {
- __show_regs(regs, SHOW_REGS_SHORT);
+ __show_regs(regs, SHOW_REGS_SHORT, log_lvl);
} else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
IRET_FRAME_SIZE)) {
@@ -162,7 +162,7 @@ static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
* full pt_regs might not have been saved yet. In that case
* just print the iret frame.
*/
- show_iret_regs(regs);
+ show_iret_regs(regs, log_lvl);
}
}
@@ -217,7 +217,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
printk("%s <%s>\n", log_lvl, stack_name);
if (regs)
- show_regs_if_on_stack(&stack_info, regs, partial);
+ show_regs_if_on_stack(&stack_info, regs, partial, log_lvl);
/*
* Scan the stack, printing any text addresses we find. At the
@@ -278,7 +278,7 @@ next:
/* if the frame has entry regs, print them */
regs = unwind_get_entry_regs(&state, &partial);
if (regs)
- show_regs_if_on_stack(&stack_info, regs, partial);
+ show_regs_if_on_stack(&stack_info, regs, partial, log_lvl);
}
if (stack_name)
@@ -352,7 +352,7 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
oops_exit();
/* Executive summary in case the oops scrolled away */
- __show_regs(&exec_summary_regs, SHOW_REGS_ALL);
+ __show_regs(&exec_summary_regs, SHOW_REGS_ALL, KERN_DEFAULT);
if (!signr)
return;
@@ -444,9 +444,12 @@ void die_addr(const char *str, struct pt_regs *regs, long err, long gp_addr)
void show_regs(struct pt_regs *regs)
{
+ enum show_regs_mode print_kernel_regs;
+
show_regs_print_info(KERN_DEFAULT);
- __show_regs(regs, user_mode(regs) ? SHOW_REGS_USER : SHOW_REGS_ALL);
+ print_kernel_regs = user_mode(regs) ? SHOW_REGS_USER : SHOW_REGS_ALL;
+ __show_regs(regs, print_kernel_regs, KERN_DEFAULT);
/*
* When in-kernel, we also print out the stack at the time of the fault..
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 15247b96c6ea..eb86a2b831b1 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -82,6 +82,45 @@ bool irq_fpu_usable(void)
}
EXPORT_SYMBOL(irq_fpu_usable);
+/*
+ * These must be called with preempt disabled. Returns
+ * 'true' if the FPU state is still intact and we can
+ * keep registers active.
+ *
+ * The legacy FNSAVE instruction cleared all FPU state
+ * unconditionally, so registers are essentially destroyed.
+ * Modern FPU state can be kept in registers, if there are
+ * no pending FP exceptions.
+ */
+int copy_fpregs_to_fpstate(struct fpu *fpu)
+{
+ if (likely(use_xsave())) {
+ copy_xregs_to_kernel(&fpu->state.xsave);
+
+ /*
+ * AVX512 state is tracked here because its use is
+ * known to slow the max clock speed of the core.
+ */
+ if (fpu->state.xsave.header.xfeatures & XFEATURE_MASK_AVX512)
+ fpu->avx512_timestamp = jiffies;
+ return 1;
+ }
+
+ if (likely(use_fxsr())) {
+ copy_fxregs_to_kernel(fpu);
+ return 1;
+ }
+
+ /*
+ * Legacy FPU register saving, FNSAVE always clears FPU registers,
+ * so we have to mark them inactive:
+ */
+ asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave));
+
+ return 0;
+}
+EXPORT_SYMBOL(copy_fpregs_to_fpstate);
+
void kernel_fpu_begin(void)
{
preempt_disable();
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index ad3a2b37927d..be2a68a09d19 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -233,8 +233,10 @@ void fpu__init_cpu_xstate(void)
/*
* MSR_IA32_XSS sets supervisor states managed by XSAVES.
*/
- if (boot_cpu_has(X86_FEATURE_XSAVES))
- wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor());
+ if (boot_cpu_has(X86_FEATURE_XSAVES)) {
+ wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() |
+ xfeatures_mask_dynamic());
+ }
}
static bool xfeature_enabled(enum xfeature xfeature)
@@ -486,7 +488,7 @@ static int xfeature_uncompacted_offset(int xfeature_nr)
return ebx;
}
-static int xfeature_size(int xfeature_nr)
+int xfeature_size(int xfeature_nr)
{
u32 eax, ebx, ecx, edx;
@@ -598,7 +600,8 @@ static void check_xstate_against_struct(int nr)
*/
if ((nr < XFEATURE_YMM) ||
(nr >= XFEATURE_MAX) ||
- (nr == XFEATURE_PT_UNIMPLEMENTED_SO_FAR)) {
+ (nr == XFEATURE_PT_UNIMPLEMENTED_SO_FAR) ||
+ ((nr >= XFEATURE_RSRVD_COMP_10) && (nr <= XFEATURE_LBR))) {
WARN_ONCE(1, "no structure for xstate: %d\n", nr);
XSTATE_WARN_ON(1);
}
@@ -847,8 +850,10 @@ void fpu__resume_cpu(void)
* Restore IA32_XSS. The same CPUID bit enumerates support
* of XSAVES and MSR_IA32_XSS.
*/
- if (boot_cpu_has(X86_FEATURE_XSAVES))
- wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor());
+ if (boot_cpu_has(X86_FEATURE_XSAVES)) {
+ wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() |
+ xfeatures_mask_dynamic());
+ }
}
/*
@@ -1356,6 +1361,78 @@ void copy_supervisor_to_kernel(struct xregs_state *xstate)
}
}
+/**
+ * copy_dynamic_supervisor_to_kernel() - Save dynamic supervisor states to
+ * an xsave area
+ * @xstate: A pointer to an xsave area
+ * @mask: Represent the dynamic supervisor features saved into the xsave area
+ *
+ * Only the dynamic supervisor states sets in the mask are saved into the xsave
+ * area (See the comment in XFEATURE_MASK_DYNAMIC for the details of dynamic
+ * supervisor feature). Besides the dynamic supervisor states, the legacy
+ * region and XSAVE header are also saved into the xsave area. The supervisor
+ * features in the XFEATURE_MASK_SUPERVISOR_SUPPORTED and
+ * XFEATURE_MASK_SUPERVISOR_UNSUPPORTED are not saved.
+ *
+ * The xsave area must be 64-bytes aligned.
+ */
+void copy_dynamic_supervisor_to_kernel(struct xregs_state *xstate, u64 mask)
+{
+ u64 dynamic_mask = xfeatures_mask_dynamic() & mask;
+ u32 lmask, hmask;
+ int err;
+
+ if (WARN_ON_FPU(!boot_cpu_has(X86_FEATURE_XSAVES)))
+ return;
+
+ if (WARN_ON_FPU(!dynamic_mask))
+ return;
+
+ lmask = dynamic_mask;
+ hmask = dynamic_mask >> 32;
+
+ XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
+
+ /* Should never fault when copying to a kernel buffer */
+ WARN_ON_FPU(err);
+}
+
+/**
+ * copy_kernel_to_dynamic_supervisor() - Restore dynamic supervisor states from
+ * an xsave area
+ * @xstate: A pointer to an xsave area
+ * @mask: Represent the dynamic supervisor features restored from the xsave area
+ *
+ * Only the dynamic supervisor states sets in the mask are restored from the
+ * xsave area (See the comment in XFEATURE_MASK_DYNAMIC for the details of
+ * dynamic supervisor feature). Besides the dynamic supervisor states, the
+ * legacy region and XSAVE header are also restored from the xsave area. The
+ * supervisor features in the XFEATURE_MASK_SUPERVISOR_SUPPORTED and
+ * XFEATURE_MASK_SUPERVISOR_UNSUPPORTED are not restored.
+ *
+ * The xsave area must be 64-bytes aligned.
+ */
+void copy_kernel_to_dynamic_supervisor(struct xregs_state *xstate, u64 mask)
+{
+ u64 dynamic_mask = xfeatures_mask_dynamic() & mask;
+ u32 lmask, hmask;
+ int err;
+
+ if (WARN_ON_FPU(!boot_cpu_has(X86_FEATURE_XSAVES)))
+ return;
+
+ if (WARN_ON_FPU(!dynamic_mask))
+ return;
+
+ lmask = dynamic_mask;
+ hmask = dynamic_mask >> 32;
+
+ XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
+
+ /* Should never fault when copying from a kernel buffer */
+ WARN_ON_FPU(err);
+}
+
#ifdef CONFIG_PROC_PID_ARCH_STATUS
/*
* Report the amount of time elapsed in millisecond since last AVX512
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
index 0db21206f2f3..7ecf9babf0cb 100644
--- a/arch/x86/kernel/idt.c
+++ b/arch/x86/kernel/idt.c
@@ -160,7 +160,7 @@ static const __initconst struct idt_data apic_idts[] = {
/* Must be page-aligned because the real IDT is used in the cpu entry area */
static gate_desc idt_table[IDT_ENTRIES] __page_aligned_bss;
-struct desc_ptr idt_descr __ro_after_init = {
+static struct desc_ptr idt_descr __ro_after_init = {
.size = IDT_TABLE_SIZE - 1,
.address = (unsigned long) idt_table,
};
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index db6578d45157..57c2ecf43134 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -170,15 +170,6 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr,
if (!current_ei->efi_memmap_size)
return 0;
- /*
- * If 1:1 mapping is not enabled, second kernel can not setup EFI
- * and use EFI run time services. User space will have to pass
- * acpi_rsdp=<addr> on kernel command line to make second kernel boot
- * without efi.
- */
- if (efi_have_uv1_memmap())
- return 0;
-
params->secure_boot = boot_params.secure_boot;
ei->efi_loader_signature = current_ei->efi_loader_signature;
ei->efi_systab = current_ei->efi_systab;
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index ada39ddbc922..fdadc37d72af 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -33,6 +33,7 @@
#include <linux/hardirq.h>
#include <linux/preempt.h>
#include <linux/sched/debug.h>
+#include <linux/perf_event.h>
#include <linux/extable.h>
#include <linux/kdebug.h>
#include <linux/kallsyms.h>
@@ -472,6 +473,9 @@ static int arch_copy_kprobe(struct kprobe *p)
/* Also, displacement change doesn't affect the first byte */
p->opcode = buf[0];
+ p->ainsn.tp_len = len;
+ perf_event_text_poke(p->ainsn.insn, NULL, 0, buf, len);
+
/* OK, write back the instruction(s) into ROX insn buffer */
text_poke(p->ainsn.insn, buf, len);
@@ -503,12 +507,18 @@ int arch_prepare_kprobe(struct kprobe *p)
void arch_arm_kprobe(struct kprobe *p)
{
- text_poke(p->addr, ((unsigned char []){INT3_INSN_OPCODE}), 1);
+ u8 int3 = INT3_INSN_OPCODE;
+
+ text_poke(p->addr, &int3, 1);
text_poke_sync();
+ perf_event_text_poke(p->addr, &p->opcode, 1, &int3, 1);
}
void arch_disarm_kprobe(struct kprobe *p)
{
+ u8 int3 = INT3_INSN_OPCODE;
+
+ perf_event_text_poke(p->addr, &int3, 1, &p->opcode, 1);
text_poke(p->addr, &p->opcode, 1);
text_poke_sync();
}
@@ -516,6 +526,9 @@ void arch_disarm_kprobe(struct kprobe *p)
void arch_remove_kprobe(struct kprobe *p)
{
if (p->ainsn.insn) {
+ /* Record the perf event before freeing the slot */
+ perf_event_text_poke(p->ainsn.insn, p->ainsn.insn,
+ p->ainsn.tp_len, NULL, 0);
free_insn_slot(p->ainsn.insn, p->ainsn.boostable);
p->ainsn.insn = NULL;
}
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 7af4c61dde52..40f380461e6d 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -6,6 +6,7 @@
* Copyright (C) Hitachi Ltd., 2012
*/
#include <linux/kprobes.h>
+#include <linux/perf_event.h>
#include <linux/ptrace.h>
#include <linux/string.h>
#include <linux/slab.h>
@@ -352,8 +353,15 @@ int arch_within_optimized_kprobe(struct optimized_kprobe *op,
static
void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
{
- if (op->optinsn.insn) {
- free_optinsn_slot(op->optinsn.insn, dirty);
+ u8 *slot = op->optinsn.insn;
+ if (slot) {
+ int len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE;
+
+ /* Record the perf event before freeing the slot */
+ if (dirty)
+ perf_event_text_poke(slot, slot, len, NULL, 0);
+
+ free_optinsn_slot(slot, dirty);
op->optinsn.insn = NULL;
op->optinsn.size = 0;
}
@@ -424,8 +432,15 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
(u8 *)op->kp.addr + op->optinsn.size);
len += JMP32_INSN_SIZE;
+ /*
+ * Note len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE is also
+ * used in __arch_remove_optimized_kprobe().
+ */
+
/* We have to use text_poke() for instruction buffer because it is RO */
+ perf_event_text_poke(slot, NULL, 0, buf, len);
text_poke(slot, buf, len);
+
ret = 0;
out:
kfree(buf);
@@ -477,10 +492,23 @@ void arch_optimize_kprobes(struct list_head *oplist)
*/
void arch_unoptimize_kprobe(struct optimized_kprobe *op)
{
- arch_arm_kprobe(&op->kp);
- text_poke(op->kp.addr + INT3_INSN_SIZE,
- op->optinsn.copied_insn, DISP32_SIZE);
+ u8 new[JMP32_INSN_SIZE] = { INT3_INSN_OPCODE, };
+ u8 old[JMP32_INSN_SIZE];
+ u8 *addr = op->kp.addr;
+
+ memcpy(old, op->kp.addr, JMP32_INSN_SIZE);
+ memcpy(new + INT3_INSN_SIZE,
+ op->optinsn.copied_insn,
+ JMP32_INSN_SIZE - INT3_INSN_SIZE);
+
+ text_poke(addr, new, INT3_INSN_SIZE);
text_poke_sync();
+ text_poke(addr + INT3_INSN_SIZE,
+ new + INT3_INSN_SIZE,
+ JMP32_INSN_SIZE - INT3_INSN_SIZE);
+ text_poke_sync();
+
+ perf_event_text_poke(op->kp.addr, old, JMP32_INSN_SIZE, new, JMP32_INSN_SIZE);
}
/*
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index df63786e7bfa..233c77d056c9 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -233,7 +233,7 @@ EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
{
u32 reason = kvm_read_and_reset_apf_flags();
- bool rcu_exit;
+ irqentry_state_t state;
switch (reason) {
case KVM_PV_REASON_PAGE_NOT_PRESENT:
@@ -243,7 +243,7 @@ noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
return false;
}
- rcu_exit = idtentry_enter_cond_rcu(regs);
+ state = irqentry_enter(regs);
instrumentation_begin();
/*
@@ -264,7 +264,7 @@ noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
}
instrumentation_end();
- idtentry_exit_cond_rcu(regs, rcu_exit);
+ irqentry_exit(regs, state);
return true;
}
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 1547be359d7f..49dcfb85e773 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -42,6 +42,14 @@
static struct class *msr_class;
static enum cpuhp_state cpuhp_msr_state;
+enum allow_write_msrs {
+ MSR_WRITES_ON,
+ MSR_WRITES_OFF,
+ MSR_WRITES_DEFAULT,
+};
+
+static enum allow_write_msrs allow_writes = MSR_WRITES_DEFAULT;
+
static ssize_t msr_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -70,6 +78,24 @@ static ssize_t msr_read(struct file *file, char __user *buf,
return bytes ? bytes : err;
}
+static int filter_write(u32 reg)
+{
+ switch (allow_writes) {
+ case MSR_WRITES_ON: return 0;
+ case MSR_WRITES_OFF: return -EPERM;
+ default: break;
+ }
+
+ if (reg == MSR_IA32_ENERGY_PERF_BIAS)
+ return 0;
+
+ pr_err_ratelimited("Write to unrecognized MSR 0x%x by %s\n"
+ "Please report to x86@kernel.org\n",
+ reg, current->comm);
+
+ return 0;
+}
+
static ssize_t msr_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -84,6 +110,10 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
if (err)
return err;
+ err = filter_write(reg);
+ if (err)
+ return err;
+
if (count % 8)
return -EINVAL; /* Invalid chunk size */
@@ -92,9 +122,13 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
err = -EFAULT;
break;
}
+
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+
err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]);
if (err)
break;
+
tmp += 2;
bytes += 8;
}
@@ -242,6 +276,41 @@ static void __exit msr_exit(void)
}
module_exit(msr_exit)
+static int set_allow_writes(const char *val, const struct kernel_param *cp)
+{
+ /* val is NUL-terminated, see kernfs_fop_write() */
+ char *s = strstrip((char *)val);
+
+ if (!strcmp(s, "on"))
+ allow_writes = MSR_WRITES_ON;
+ else if (!strcmp(s, "off"))
+ allow_writes = MSR_WRITES_OFF;
+ else
+ allow_writes = MSR_WRITES_DEFAULT;
+
+ return 0;
+}
+
+static int get_allow_writes(char *buf, const struct kernel_param *kp)
+{
+ const char *res;
+
+ switch (allow_writes) {
+ case MSR_WRITES_ON: res = "on"; break;
+ case MSR_WRITES_OFF: res = "off"; break;
+ default: res = "default"; break;
+ }
+
+ return sprintf(buf, "%s\n", res);
+}
+
+static const struct kernel_param_ops allow_writes_ops = {
+ .set = set_allow_writes,
+ .get = get_allow_writes
+};
+
+module_param_cb(allow_writes, &allow_writes_ops, NULL, 0600);
+
MODULE_AUTHOR("H. Peter Anvin <hpa@zytor.com>");
MODULE_DESCRIPTION("x86 generic MSR driver");
MODULE_LICENSE("GPL");
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index d7c5e44b26f7..4fc9954a9560 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -330,7 +330,6 @@ static noinstr void default_do_nmi(struct pt_regs *regs)
__this_cpu_write(last_nmi_rip, regs->ip);
instrumentation_begin();
- trace_hardirqs_off_finish();
handled = nmi_handle(NMI_LOCAL, regs);
__this_cpu_add(nmi_stats.normal, handled);
@@ -417,8 +416,6 @@ static noinstr void default_do_nmi(struct pt_regs *regs)
unknown_nmi_error(reason, regs);
out:
- if (regs->flags & X86_EFLAGS_IF)
- trace_hardirqs_on_prepare();
instrumentation_end();
}
@@ -478,6 +475,8 @@ static DEFINE_PER_CPU(unsigned long, nmi_dr7);
DEFINE_IDTENTRY_RAW(exc_nmi)
{
+ bool irq_state;
+
if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id()))
return;
@@ -491,14 +490,14 @@ nmi_restart:
this_cpu_write(nmi_dr7, local_db_save());
- nmi_enter();
+ irq_state = idtentry_enter_nmi(regs);
inc_irq_stat(__nmi_count);
if (!ignore_nmis)
default_do_nmi(regs);
- nmi_exit();
+ idtentry_exit_nmi(regs, irq_state);
local_db_restore(this_cpu_read(nmi_dr7));
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index fe67dbd76e51..994d8393f2f7 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -121,8 +121,8 @@ static int set_new_tls(struct task_struct *p, unsigned long tls)
return do_set_thread_area_64(p, ARCH_SET_FS, tls);
}
-int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
- unsigned long arg, struct task_struct *p, unsigned long tls)
+int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
+ struct task_struct *p, unsigned long tls)
{
struct inactive_task_frame *frame;
struct fork_frame *fork_frame;
@@ -140,10 +140,12 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
#ifdef CONFIG_X86_64
- savesegment(gs, p->thread.gsindex);
- p->thread.gsbase = p->thread.gsindex ? 0 : current->thread.gsbase;
- savesegment(fs, p->thread.fsindex);
- p->thread.fsbase = p->thread.fsindex ? 0 : current->thread.fsbase;
+ current_save_fsgs();
+ p->thread.fsindex = current->thread.fsindex;
+ p->thread.fsbase = current->thread.fsbase;
+ p->thread.gsindex = current->thread.gsindex;
+ p->thread.gsbase = current->thread.gsbase;
+
savesegment(es, p->thread.es);
savesegment(ds, p->thread.ds);
#else
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index acfd6d2a0cbf..4f2f54e1281c 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -56,7 +56,8 @@
#include "process.h"
-void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
+void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
+ const char *log_lvl)
{
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
unsigned long d0, d1, d2, d3, d6, d7;
@@ -67,14 +68,14 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
else
savesegment(gs, gs);
- show_ip(regs, KERN_DEFAULT);
+ show_ip(regs, log_lvl);
- printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
- regs->ax, regs->bx, regs->cx, regs->dx);
- printk(KERN_DEFAULT "ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
- regs->si, regs->di, regs->bp, regs->sp);
- printk(KERN_DEFAULT "DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n",
- (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, regs->ss, regs->flags);
+ printk("%sEAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
+ log_lvl, regs->ax, regs->bx, regs->cx, regs->dx);
+ printk("%sESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
+ log_lvl, regs->si, regs->di, regs->bp, regs->sp);
+ printk("%sDS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n",
+ log_lvl, (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, regs->ss, regs->flags);
if (mode != SHOW_REGS_ALL)
return;
@@ -83,8 +84,8 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
cr2 = read_cr2();
cr3 = __read_cr3();
cr4 = __read_cr4();
- printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
- cr0, cr2, cr3, cr4);
+ printk("%sCR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
+ log_lvl, cr0, cr2, cr3, cr4);
get_debugreg(d0, 0);
get_debugreg(d1, 1);
@@ -98,10 +99,10 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
(d6 == DR6_RESERVED) && (d7 == 0x400))
return;
- printk(KERN_DEFAULT "DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
- d0, d1, d2, d3);
- printk(KERN_DEFAULT "DR6: %08lx DR7: %08lx\n",
- d6, d7);
+ printk("%sDR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
+ log_lvl, d0, d1, d2, d3);
+ printk("%sDR6: %08lx DR7: %08lx\n",
+ log_lvl, d6, d7);
}
void release_thread(struct task_struct *dead_task)
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 9a97415b2139..d6f946707270 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -62,30 +62,31 @@
#include "process.h"
/* Prints also some state that isn't saved in the pt_regs */
-void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
+void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
+ const char *log_lvl)
{
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
unsigned long d0, d1, d2, d3, d6, d7;
unsigned int fsindex, gsindex;
unsigned int ds, es;
- show_iret_regs(regs);
+ show_iret_regs(regs, log_lvl);
if (regs->orig_ax != -1)
pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
else
pr_cont("\n");
- printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
- regs->ax, regs->bx, regs->cx);
- printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
- regs->dx, regs->si, regs->di);
- printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
- regs->bp, regs->r8, regs->r9);
- printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
- regs->r10, regs->r11, regs->r12);
- printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
- regs->r13, regs->r14, regs->r15);
+ printk("%sRAX: %016lx RBX: %016lx RCX: %016lx\n",
+ log_lvl, regs->ax, regs->bx, regs->cx);
+ printk("%sRDX: %016lx RSI: %016lx RDI: %016lx\n",
+ log_lvl, regs->dx, regs->si, regs->di);
+ printk("%sRBP: %016lx R08: %016lx R09: %016lx\n",
+ log_lvl, regs->bp, regs->r8, regs->r9);
+ printk("%sR10: %016lx R11: %016lx R12: %016lx\n",
+ log_lvl, regs->r10, regs->r11, regs->r12);
+ printk("%sR13: %016lx R14: %016lx R15: %016lx\n",
+ log_lvl, regs->r13, regs->r14, regs->r15);
if (mode == SHOW_REGS_SHORT)
return;
@@ -93,8 +94,8 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
if (mode == SHOW_REGS_USER) {
rdmsrl(MSR_FS_BASE, fs);
rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
- printk(KERN_DEFAULT "FS: %016lx GS: %016lx\n",
- fs, shadowgs);
+ printk("%sFS: %016lx GS: %016lx\n",
+ log_lvl, fs, shadowgs);
return;
}
@@ -112,12 +113,12 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
cr3 = __read_cr3();
cr4 = __read_cr4();
- printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
- fs, fsindex, gs, gsindex, shadowgs);
- printk(KERN_DEFAULT "CS: %04lx DS: %04x ES: %04x CR0: %016lx\n", regs->cs, ds,
- es, cr0);
- printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
- cr4);
+ printk("%sFS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
+ log_lvl, fs, fsindex, gs, gsindex, shadowgs);
+ printk("%sCS: %04lx DS: %04x ES: %04x CR0: %016lx\n",
+ log_lvl, regs->cs, ds, es, cr0);
+ printk("%sCR2: %016lx CR3: %016lx CR4: %016lx\n",
+ log_lvl, cr2, cr3, cr4);
get_debugreg(d0, 0);
get_debugreg(d1, 1);
@@ -129,14 +130,14 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
/* Only print out debug registers if they are in their non-default state. */
if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
(d6 == DR6_RESERVED) && (d7 == 0x400))) {
- printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n",
- d0, d1, d2);
- printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n",
- d3, d6, d7);
+ printk("%sDR0: %016lx DR1: %016lx DR2: %016lx\n",
+ log_lvl, d0, d1, d2);
+ printk("%sDR3: %016lx DR6: %016lx DR7: %016lx\n",
+ log_lvl, d3, d6, d7);
}
if (boot_cpu_has(X86_FEATURE_OSPKE))
- printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
+ printk("%sPKRU: %08x\n", log_lvl, read_pkru());
}
void release_thread(struct task_struct *dead_task)
@@ -150,6 +151,56 @@ enum which_selector {
};
/*
+ * Out of line to be protected from kprobes and tracing. If this would be
+ * traced or probed than any access to a per CPU variable happens with
+ * the wrong GS.
+ *
+ * It is not used on Xen paravirt. When paravirt support is needed, it
+ * needs to be renamed with native_ prefix.
+ */
+static noinstr unsigned long __rdgsbase_inactive(void)
+{
+ unsigned long gsbase;
+
+ lockdep_assert_irqs_disabled();
+
+ if (!static_cpu_has(X86_FEATURE_XENPV)) {
+ native_swapgs();
+ gsbase = rdgsbase();
+ native_swapgs();
+ } else {
+ instrumentation_begin();
+ rdmsrl(MSR_KERNEL_GS_BASE, gsbase);
+ instrumentation_end();
+ }
+
+ return gsbase;
+}
+
+/*
+ * Out of line to be protected from kprobes and tracing. If this would be
+ * traced or probed than any access to a per CPU variable happens with
+ * the wrong GS.
+ *
+ * It is not used on Xen paravirt. When paravirt support is needed, it
+ * needs to be renamed with native_ prefix.
+ */
+static noinstr void __wrgsbase_inactive(unsigned long gsbase)
+{
+ lockdep_assert_irqs_disabled();
+
+ if (!static_cpu_has(X86_FEATURE_XENPV)) {
+ native_swapgs();
+ wrgsbase(gsbase);
+ native_swapgs();
+ } else {
+ instrumentation_begin();
+ wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
+ instrumentation_end();
+ }
+}
+
+/*
* Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
* not available. The goal is to be reasonably fast on non-FSGSBASE systems.
* It's forcibly inlined because it'll generate better code and this function
@@ -198,22 +249,35 @@ static __always_inline void save_fsgs(struct task_struct *task)
{
savesegment(fs, task->thread.fsindex);
savesegment(gs, task->thread.gsindex);
- save_base_legacy(task, task->thread.fsindex, FS);
- save_base_legacy(task, task->thread.gsindex, GS);
+ if (static_cpu_has(X86_FEATURE_FSGSBASE)) {
+ /*
+ * If FSGSBASE is enabled, we can't make any useful guesses
+ * about the base, and user code expects us to save the current
+ * value. Fortunately, reading the base directly is efficient.
+ */
+ task->thread.fsbase = rdfsbase();
+ task->thread.gsbase = __rdgsbase_inactive();
+ } else {
+ save_base_legacy(task, task->thread.fsindex, FS);
+ save_base_legacy(task, task->thread.gsindex, GS);
+ }
}
-#if IS_ENABLED(CONFIG_KVM)
/*
* While a process is running,current->thread.fsbase and current->thread.gsbase
- * may not match the corresponding CPU registers (see save_base_legacy()). KVM
- * wants an efficient way to save and restore FSBASE and GSBASE.
- * When FSGSBASE extensions are enabled, this will have to use RD{FS,GS}BASE.
+ * may not match the corresponding CPU registers (see save_base_legacy()).
*/
-void save_fsgs_for_kvm(void)
+void current_save_fsgs(void)
{
+ unsigned long flags;
+
+ /* Interrupts need to be off for FSGSBASE */
+ local_irq_save(flags);
save_fsgs(current);
+ local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(save_fsgs_for_kvm);
+#if IS_ENABLED(CONFIG_KVM)
+EXPORT_SYMBOL_GPL(current_save_fsgs);
#endif
static __always_inline void loadseg(enum which_selector which,
@@ -278,14 +342,26 @@ static __always_inline void load_seg_legacy(unsigned short prev_index,
static __always_inline void x86_fsgsbase_load(struct thread_struct *prev,
struct thread_struct *next)
{
- load_seg_legacy(prev->fsindex, prev->fsbase,
- next->fsindex, next->fsbase, FS);
- load_seg_legacy(prev->gsindex, prev->gsbase,
- next->gsindex, next->gsbase, GS);
+ if (static_cpu_has(X86_FEATURE_FSGSBASE)) {
+ /* Update the FS and GS selectors if they could have changed. */
+ if (unlikely(prev->fsindex || next->fsindex))
+ loadseg(FS, next->fsindex);
+ if (unlikely(prev->gsindex || next->gsindex))
+ loadseg(GS, next->gsindex);
+
+ /* Update the bases. */
+ wrfsbase(next->fsbase);
+ __wrgsbase_inactive(next->gsbase);
+ } else {
+ load_seg_legacy(prev->fsindex, prev->fsbase,
+ next->fsindex, next->fsbase, FS);
+ load_seg_legacy(prev->gsindex, prev->gsbase,
+ next->gsindex, next->gsbase, GS);
+ }
}
-static unsigned long x86_fsgsbase_read_task(struct task_struct *task,
- unsigned short selector)
+unsigned long x86_fsgsbase_read_task(struct task_struct *task,
+ unsigned short selector)
{
unsigned short idx = selector >> 3;
unsigned long base;
@@ -327,13 +403,44 @@ static unsigned long x86_fsgsbase_read_task(struct task_struct *task,
return base;
}
+unsigned long x86_gsbase_read_cpu_inactive(void)
+{
+ unsigned long gsbase;
+
+ if (static_cpu_has(X86_FEATURE_FSGSBASE)) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ gsbase = __rdgsbase_inactive();
+ local_irq_restore(flags);
+ } else {
+ rdmsrl(MSR_KERNEL_GS_BASE, gsbase);
+ }
+
+ return gsbase;
+}
+
+void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
+{
+ if (static_cpu_has(X86_FEATURE_FSGSBASE)) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __wrgsbase_inactive(gsbase);
+ local_irq_restore(flags);
+ } else {
+ wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
+ }
+}
+
unsigned long x86_fsbase_read_task(struct task_struct *task)
{
unsigned long fsbase;
if (task == current)
fsbase = x86_fsbase_read_cpu();
- else if (task->thread.fsindex == 0)
+ else if (static_cpu_has(X86_FEATURE_FSGSBASE) ||
+ (task->thread.fsindex == 0))
fsbase = task->thread.fsbase;
else
fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex);
@@ -347,7 +454,8 @@ unsigned long x86_gsbase_read_task(struct task_struct *task)
if (task == current)
gsbase = x86_gsbase_read_cpu_inactive();
- else if (task->thread.gsindex == 0)
+ else if (static_cpu_has(X86_FEATURE_FSGSBASE) ||
+ (task->thread.gsindex == 0))
gsbase = task->thread.gsbase;
else
gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex);
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 44130588987f..3f006489087f 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -281,17 +281,9 @@ static int set_segment_reg(struct task_struct *task,
return -EIO;
/*
- * This function has some ABI oddities.
- *
- * A 32-bit ptracer probably expects that writing FS or GS will change
- * FSBASE or GSBASE respectively. In the absence of FSGSBASE support,
- * this code indeed has that effect. When FSGSBASE is added, this
- * will require a special case.
- *
- * For existing 64-bit ptracers, writing FS or GS *also* currently
- * changes the base if the selector is nonzero the next time the task
- * is run. This behavior may not be needed, and trying to preserve it
- * when FSGSBASE is added would be complicated at best.
+ * Writes to FS and GS will change the stored selector. Whether
+ * this changes the segment base as well depends on whether
+ * FSGSBASE is enabled.
*/
switch (offset) {
@@ -379,25 +371,12 @@ static int putreg(struct task_struct *child,
case offsetof(struct user_regs_struct,fs_base):
if (value >= TASK_SIZE_MAX)
return -EIO;
- /*
- * When changing the FS base, use do_arch_prctl_64()
- * to set the index to zero and to set the base
- * as requested.
- *
- * NB: This behavior is nonsensical and likely needs to
- * change when FSGSBASE support is added.
- */
- if (child->thread.fsbase != value)
- return do_arch_prctl_64(child, ARCH_SET_FS, value);
+ x86_fsbase_write_task(child, value);
return 0;
case offsetof(struct user_regs_struct,gs_base):
- /*
- * Exactly the same here as the %fs handling above.
- */
if (value >= TASK_SIZE_MAX)
return -EIO;
- if (child->thread.gsbase != value)
- return do_arch_prctl_64(child, ARCH_SET_GS, value);
+ x86_gsbase_write_task(child, value);
return 0;
#endif
}
@@ -880,14 +859,39 @@ long arch_ptrace(struct task_struct *child, long request,
static int putreg32(struct task_struct *child, unsigned regno, u32 value)
{
struct pt_regs *regs = task_pt_regs(child);
+ int ret;
switch (regno) {
SEG32(cs);
SEG32(ds);
SEG32(es);
- SEG32(fs);
- SEG32(gs);
+
+ /*
+ * A 32-bit ptracer on a 64-bit kernel expects that writing
+ * FS or GS will also update the base. This is needed for
+ * operations like PTRACE_SETREGS to fully restore a saved
+ * CPU state.
+ */
+
+ case offsetof(struct user32, regs.fs):
+ ret = set_segment_reg(child,
+ offsetof(struct user_regs_struct, fs),
+ value);
+ if (ret == 0)
+ child->thread.fsbase =
+ x86_fsgsbase_read_task(child, value);
+ return ret;
+
+ case offsetof(struct user32, regs.gs):
+ ret = set_segment_reg(child,
+ offsetof(struct user_regs_struct, gs),
+ value);
+ if (ret == 0)
+ child->thread.gsbase =
+ x86_fsgsbase_read_task(child, value);
+ return ret;
+
SEG32(ss);
R32(ebx, bx);
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 896d74cb5081..1b10717c9321 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -95,7 +95,7 @@ static void ich_force_hpet_resume(void)
static void ich_force_enable_hpet(struct pci_dev *dev)
{
u32 val;
- u32 uninitialized_var(rcba);
+ u32 rcba;
int err = 0;
if (hpet_address || force_hpet_address)
@@ -185,7 +185,7 @@ static void hpet_print_force_info(void)
static void old_ich_force_hpet_resume(void)
{
u32 val;
- u32 uninitialized_var(gen_cntl);
+ u32 gen_cntl;
if (!force_hpet_address || !cached_dev)
return;
@@ -207,7 +207,7 @@ static void old_ich_force_hpet_resume(void)
static void old_ich_force_enable_hpet(struct pci_dev *dev)
{
u32 val;
- u32 uninitialized_var(gen_cntl);
+ u32 gen_cntl;
if (hpet_address || force_hpet_address)
return;
@@ -298,7 +298,7 @@ static void vt8237_force_hpet_resume(void)
static void vt8237_force_enable_hpet(struct pci_dev *dev)
{
- u32 uninitialized_var(val);
+ u32 val;
if (hpet_address || force_hpet_address)
return;
@@ -429,7 +429,7 @@ static void nvidia_force_hpet_resume(void)
static void nvidia_force_enable_hpet(struct pci_dev *dev)
{
- u32 uninitialized_var(val);
+ u32 val;
if (hpet_address || force_hpet_address)
return;
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 399f97abee02..d5fa494c2304 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -25,6 +25,7 @@
#include <linux/user-return-notifier.h>
#include <linux/uprobes.h>
#include <linux/context_tracking.h>
+#include <linux/entry-common.h>
#include <linux/syscalls.h>
#include <asm/processor.h>
@@ -803,7 +804,7 @@ static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
-void do_signal(struct pt_regs *regs)
+void arch_do_signal(struct pt_regs *regs)
{
struct ksignal ksig;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ffbd9a3d78d8..27aa04a95702 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -51,11 +51,11 @@
#include <linux/err.h>
#include <linux/nmi.h>
#include <linux/tboot.h>
-#include <linux/stackprotector.h>
#include <linux/gfp.h>
#include <linux/cpuidle.h>
#include <linux/numa.h>
#include <linux/pgtable.h>
+#include <linux/overflow.h>
#include <asm/acpi.h>
#include <asm/desc.h>
@@ -80,6 +80,7 @@
#include <asm/cpu_device_id.h>
#include <asm/spec-ctrl.h>
#include <asm/hw_irq.h>
+#include <asm/stackprotector.h>
/* representing HT siblings of each logical CPU */
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
@@ -259,21 +260,10 @@ static void notrace start_secondary(void *unused)
/* enable local interrupts */
local_irq_enable();
- /* to prevent fake stack check failure in clock setup */
- boot_init_stack_canary();
-
x86_cpuinit.setup_percpu_clockev();
wmb();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
-
- /*
- * Prevent tail call to cpu_startup_entry() because the stack protector
- * guard has been changed a couple of function calls up, in
- * boot_init_stack_canary() and must not be checked before tail calling
- * another function.
- */
- prevent_tail_call_optimization();
}
/**
@@ -1011,6 +1001,7 @@ int common_cpu_up(unsigned int cpu, struct task_struct *idle)
alternatives_enable_smp();
per_cpu(current_task, cpu) = idle;
+ cpu_init_stack_canary(cpu, idle);
/* Initialize the interrupt stack(s) */
ret = irq_init_percpu_irqstack(cpu);
@@ -1777,6 +1768,7 @@ void native_play_dead(void)
#endif
+#ifdef CONFIG_X86_64
/*
* APERF/MPERF frequency ratio computation.
*
@@ -1975,6 +1967,7 @@ static bool core_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
static bool intel_set_max_freq_ratio(void)
{
u64 base_freq, turbo_freq;
+ u64 turbo_ratio;
if (slv_set_max_freq_ratio(&base_freq, &turbo_freq))
goto out;
@@ -2000,15 +1993,23 @@ out:
/*
* Some hypervisors advertise X86_FEATURE_APERFMPERF
* but then fill all MSR's with zeroes.
+ * Some CPUs have turbo boost but don't declare any turbo ratio
+ * in MSR_TURBO_RATIO_LIMIT.
*/
- if (!base_freq) {
- pr_debug("Couldn't determine cpu base frequency, necessary for scale-invariant accounting.\n");
+ if (!base_freq || !turbo_freq) {
+ pr_debug("Couldn't determine cpu base or turbo frequency, necessary for scale-invariant accounting.\n");
return false;
}
- arch_turbo_freq_ratio = div_u64(turbo_freq * SCHED_CAPACITY_SCALE,
- base_freq);
+ turbo_ratio = div_u64(turbo_freq * SCHED_CAPACITY_SCALE, base_freq);
+ if (!turbo_ratio) {
+ pr_debug("Non-zero turbo and base frequencies led to a 0 ratio.\n");
+ return false;
+ }
+
+ arch_turbo_freq_ratio = turbo_ratio;
arch_set_max_freq_ratio(turbo_disabled());
+
return true;
}
@@ -2048,11 +2049,19 @@ static void init_freq_invariance(bool secondary)
}
}
+static void disable_freq_invariance_workfn(struct work_struct *work)
+{
+ static_branch_disable(&arch_scale_freq_key);
+}
+
+static DECLARE_WORK(disable_freq_invariance_work,
+ disable_freq_invariance_workfn);
+
DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
void arch_scale_freq_tick(void)
{
- u64 freq_scale;
+ u64 freq_scale = SCHED_CAPACITY_SCALE;
u64 aperf, mperf;
u64 acnt, mcnt;
@@ -2064,19 +2073,32 @@ void arch_scale_freq_tick(void)
acnt = aperf - this_cpu_read(arch_prev_aperf);
mcnt = mperf - this_cpu_read(arch_prev_mperf);
- if (!mcnt)
- return;
this_cpu_write(arch_prev_aperf, aperf);
this_cpu_write(arch_prev_mperf, mperf);
- acnt <<= 2*SCHED_CAPACITY_SHIFT;
- mcnt *= arch_max_freq_ratio;
+ if (check_shl_overflow(acnt, 2*SCHED_CAPACITY_SHIFT, &acnt))
+ goto error;
+
+ if (check_mul_overflow(mcnt, arch_max_freq_ratio, &mcnt) || !mcnt)
+ goto error;
freq_scale = div64_u64(acnt, mcnt);
+ if (!freq_scale)
+ goto error;
if (freq_scale > SCHED_CAPACITY_SCALE)
freq_scale = SCHED_CAPACITY_SCALE;
this_cpu_write(arch_freq_scale, freq_scale);
+ return;
+
+error:
+ pr_warn("Scheduler frequency invariance went wobbly, disabling!\n");
+ schedule_work(&disable_freq_invariance_work);
+}
+#else
+static inline void init_freq_invariance(bool secondary)
+{
}
+#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/kernel/sys_ia32.c b/arch/x86/kernel/sys_ia32.c
index f8d65c99feb8..720cde885042 100644
--- a/arch/x86/kernel/sys_ia32.c
+++ b/arch/x86/kernel/sys_ia32.c
@@ -251,9 +251,6 @@ COMPAT_SYSCALL_DEFINE5(ia32_clone, unsigned long, clone_flags,
.tls = tls_val,
};
- if (!legacy_clone_args_valid(&args))
- return -EINVAL;
-
return _do_fork(&args);
}
#endif /* CONFIG_IA32_EMULATION */
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index b7cb3e0716f7..438fc554d48d 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -245,7 +245,7 @@ static noinstr bool handle_bug(struct pt_regs *regs)
DEFINE_IDTENTRY_RAW(exc_invalid_op)
{
- bool rcu_exit;
+ irqentry_state_t state;
/*
* We use UD2 as a short encoding for 'CALL __WARN', as such
@@ -255,11 +255,11 @@ DEFINE_IDTENTRY_RAW(exc_invalid_op)
if (!user_mode(regs) && handle_bug(regs))
return;
- rcu_exit = idtentry_enter_cond_rcu(regs);
+ state = irqentry_enter(regs);
instrumentation_begin();
handle_invalid_op(regs);
instrumentation_end();
- idtentry_exit_cond_rcu(regs, rcu_exit);
+ irqentry_exit(regs, state);
}
DEFINE_IDTENTRY(exc_coproc_segment_overrun)
@@ -405,7 +405,7 @@ DEFINE_IDTENTRY_DF(exc_double_fault)
}
#endif
- nmi_enter();
+ idtentry_enter_nmi(regs);
instrumentation_begin();
notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
@@ -638,28 +638,25 @@ DEFINE_IDTENTRY_RAW(exc_int3)
return;
/*
- * idtentry_enter_user() uses static_branch_{,un}likely() and therefore
- * can trigger INT3, hence poke_int3_handler() must be done
- * before. If the entry came from kernel mode, then use nmi_enter()
- * because the INT3 could have been hit in any context including
- * NMI.
+ * irqentry_enter_from_user_mode() uses static_branch_{,un}likely()
+ * and therefore can trigger INT3, hence poke_int3_handler() must
+ * be done before. If the entry came from kernel mode, then use
+ * nmi_enter() because the INT3 could have been hit in any context
+ * including NMI.
*/
if (user_mode(regs)) {
- idtentry_enter_user(regs);
+ irqentry_enter_from_user_mode(regs);
instrumentation_begin();
do_int3_user(regs);
instrumentation_end();
- idtentry_exit_user(regs);
+ irqentry_exit_to_user_mode(regs);
} else {
- nmi_enter();
+ bool irq_state = idtentry_enter_nmi(regs);
instrumentation_begin();
- trace_hardirqs_off_finish();
if (!do_int3(regs))
die("int3", regs, 0);
- if (regs->flags & X86_EFLAGS_IF)
- trace_hardirqs_on_prepare();
instrumentation_end();
- nmi_exit();
+ idtentry_exit_nmi(regs, irq_state);
}
}
@@ -867,9 +864,8 @@ out:
static __always_inline void exc_debug_kernel(struct pt_regs *regs,
unsigned long dr6)
{
- nmi_enter();
+ bool irq_state = idtentry_enter_nmi(regs);
instrumentation_begin();
- trace_hardirqs_off_finish();
/*
* If something gets miswired and we end up here for a user mode
@@ -886,10 +882,8 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
handle_debug(regs, dr6, false);
- if (regs->flags & X86_EFLAGS_IF)
- trace_hardirqs_on_prepare();
instrumentation_end();
- nmi_exit();
+ idtentry_exit_nmi(regs, irq_state);
}
static __always_inline void exc_debug_user(struct pt_regs *regs,
@@ -901,12 +895,13 @@ static __always_inline void exc_debug_user(struct pt_regs *regs,
*/
WARN_ON_ONCE(!user_mode(regs));
- idtentry_enter_user(regs);
+ irqentry_enter_from_user_mode(regs);
instrumentation_begin();
handle_debug(regs, dr6, true);
+
instrumentation_end();
- idtentry_exit_user(regs);
+ irqentry_exit_to_user_mode(regs);
}
#ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index 722a85f3b2dd..d7c44b257f7f 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -269,13 +269,13 @@ bool unwind_next_frame(struct unwind_state *state)
/*
* kthreads (other than the boot CPU's idle thread) have some
* partial regs at the end of their stack which were placed
- * there by copy_thread_tls(). But the regs don't have any
+ * there by copy_thread(). But the regs don't have any
* useful information, so we can skip them.
*
* This user_mode() check is slightly broader than a PF_KTHREAD
* check because it also catches the awkward situation where a
* newly forked kthread transitions into a user task by calling
- * do_execve(), which eventually clears PF_KTHREAD.
+ * kernel_execve(), which eventually clears PF_KTHREAD.
*/
if (!user_mode(regs))
goto the_end;
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index b277a2db6267..fbd5bd7a945a 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -42,6 +42,7 @@ config KVM
select HAVE_KVM_MSI
select HAVE_KVM_CPU_RELAX_INTERCEPT
select HAVE_KVM_NO_POLL
+ select KVM_XFER_TO_GUEST_WORK
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_VFIO
select SRCU
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 0d178bb8ab8f..cf89328780b1 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1986,7 +1986,7 @@ static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
unsigned long data)
{
u64 *sptep;
- struct rmap_iterator uninitialized_var(iter);
+ struct rmap_iterator iter;
int young = 0;
for_each_rmap_spte(rmap_head, &iter, sptep)
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index bd70ece1ef8b..275564a0ebdb 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -314,7 +314,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
{
int ret;
pt_element_t pte;
- pt_element_t __user *uninitialized_var(ptep_user);
+ pt_element_t __user *ptep_user;
gfn_t table_gfn;
u64 pt_access, pte_access;
unsigned index, accessed_dirty, pte_pkey;
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 13745f2a5ecd..559634b59d2a 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/tboot.h>
#include <linux/trace_events.h>
+#include <linux/entry-kvm.h>
#include <asm/apic.h>
#include <asm/asm.h>
@@ -1169,7 +1170,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
gs_base = cpu_kernelmode_gs_base(cpu);
if (likely(is_64bit_mm(current->mm))) {
- save_fsgs_for_kvm();
+ current_save_fsgs();
fs_sel = current->thread.fsindex;
gs_sel = current->thread.gsindex;
fs_base = current->thread.fsbase;
@@ -5373,14 +5374,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
}
/*
- * Note, return 1 and not 0, vcpu_run() is responsible for
- * morphing the pending signal into the proper return code.
+ * Note, return 1 and not 0, vcpu_run() will invoke
+ * xfer_to_guest_mode() which will create a proper return
+ * code.
*/
- if (signal_pending(current))
+ if (__xfer_to_guest_mode_work_pending())
return 1;
-
- if (need_resched())
- schedule();
}
return 1;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1e536cdc856f..9f379fe16aa8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -56,6 +56,7 @@
#include <linux/sched/stat.h>
#include <linux/sched/isolation.h>
#include <linux/mem_encrypt.h>
+#include <linux/entry-kvm.h>
#include <trace/events/kvm.h>
@@ -1589,7 +1590,7 @@ EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
{
return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) ||
- need_resched() || signal_pending(current);
+ xfer_to_guest_mode_work_pending();
}
EXPORT_SYMBOL_GPL(kvm_vcpu_exit_request);
@@ -8683,15 +8684,11 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
break;
}
- if (signal_pending(current)) {
- r = -EINTR;
- vcpu->run->exit_reason = KVM_EXIT_INTR;
- ++vcpu->stat.signal_exits;
- break;
- }
- if (need_resched()) {
+ if (__xfer_to_guest_mode_work_pending()) {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
- cond_resched();
+ r = xfer_to_guest_mode_handle_work(vcpu);
+ if (r)
+ return r;
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
}
}
@@ -9934,7 +9931,7 @@ void kvm_arch_sync_events(struct kvm *kvm)
int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
{
int i, r;
- unsigned long hva, uninitialized_var(old_npages);
+ unsigned long hva, old_npages;
struct kvm_memslots *slots = kvm_memslots(kvm);
struct kvm_memory_slot *slot;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 1ead568c0101..5e5edd2ec893 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1377,7 +1377,7 @@ handle_page_fault(struct pt_regs *regs, unsigned long error_code,
DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
{
unsigned long address = read_cr2();
- bool rcu_exit;
+ irqentry_state_t state;
prefetchw(&current->mm->mmap_lock);
@@ -1412,11 +1412,11 @@ DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
* code reenabled RCU to avoid subsequent wreckage which helps
* debugability.
*/
- rcu_exit = idtentry_enter_cond_rcu(regs);
+ state = irqentry_enter(regs);
instrumentation_begin();
handle_page_fault(regs, error_code, address);
instrumentation_end();
- idtentry_exit_cond_rcu(regs, rcu_exit);
+ irqentry_exit(regs, state);
}
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 001dd7dc829f..c7a47603537f 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -25,6 +25,7 @@
#include <asm/cpufeature.h>
#include <asm/pti.h>
#include <asm/text-patching.h>
+#include <asm/memtype.h>
/*
* We need to define the tracepoints somewhere, and tlb.c
@@ -912,8 +913,6 @@ void free_kernel_image_pages(const char *what, void *begin, void *end)
set_memory_np_noalias(begin_ul, len_pages);
}
-void __weak mem_encrypt_free_decrypted_mem(void) { }
-
void __ref free_initmem(void)
{
e820__reallocate_tables();
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index dbae185511cd..e65b96f381a7 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -209,7 +209,7 @@ static void sync_global_pgds_l4(unsigned long start, unsigned long end)
* When memory was added make sure all the processes MM have
* suitable PGD entries in the local PGD level page.
*/
-void sync_global_pgds(unsigned long start, unsigned long end)
+static void sync_global_pgds(unsigned long start, unsigned long end)
{
if (pgtable_l5_enabled())
sync_global_pgds_l5(start, end);
@@ -217,11 +217,6 @@ void sync_global_pgds(unsigned long start, unsigned long end)
sync_global_pgds_l4(start, end);
}
-void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
-{
- sync_global_pgds(start, end);
-}
-
/*
* NOTE: This function is marked __ref because it calls __init function
* (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
@@ -1238,6 +1233,56 @@ static void __init register_page_bootmem_info(void)
#endif
}
+/*
+ * Pre-allocates page-table pages for the vmalloc area in the kernel page-table.
+ * Only the level which needs to be synchronized between all page-tables is
+ * allocated because the synchronization can be expensive.
+ */
+static void __init preallocate_vmalloc_pages(void)
+{
+ unsigned long addr;
+ const char *lvl;
+
+ for (addr = VMALLOC_START; addr <= VMALLOC_END; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
+ pgd_t *pgd = pgd_offset_k(addr);
+ p4d_t *p4d;
+ pud_t *pud;
+
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_none(*p4d)) {
+ /* Can only happen with 5-level paging */
+ p4d = p4d_alloc(&init_mm, pgd, addr);
+ if (!p4d) {
+ lvl = "p4d";
+ goto failed;
+ }
+ }
+
+ if (pgtable_l5_enabled())
+ continue;
+
+ pud = pud_offset(p4d, addr);
+ if (pud_none(*pud)) {
+ /* Ends up here only with 4-level paging */
+ pud = pud_alloc(&init_mm, p4d, addr);
+ if (!pud) {
+ lvl = "pud";
+ goto failed;
+ }
+ }
+ }
+
+ return;
+
+failed:
+
+ /*
+ * The pages have to be there now or they will be missing in
+ * process page-tables later.
+ */
+ panic("Failed to pre-allocate %s pages for vmalloc area\n", lvl);
+}
+
void __init mem_init(void)
{
pci_iommu_alloc();
@@ -1261,6 +1306,8 @@ void __init mem_init(void)
if (get_gate_vma(&init_mm))
kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER);
+ preallocate_vmalloc_pages();
+
mem_init_print_info(NULL);
}
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 4a781cf99e92..9f1177edc2e7 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -376,7 +376,6 @@ bool force_dma_unencrypted(struct device *dev)
return false;
}
-/* Architecture __weak replacement functions */
void __init mem_encrypt_free_decrypted_mem(void)
{
unsigned long vaddr, vaddr_end, npages;
@@ -401,6 +400,7 @@ void __init mem_encrypt_free_decrypted_mem(void)
free_init_pages("unused decrypted", vaddr, vaddr_end);
}
+/* Architecture __weak replacement functions */
void __init mem_encrypt_init(void)
{
if (!sme_me_mask)
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 8ee952038c80..b05f45e5e8e2 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -543,7 +543,6 @@ static void __init numa_clear_kernel_node_hotplug(void)
static int __init numa_register_memblks(struct numa_meminfo *mi)
{
- unsigned long uninitialized_var(pfn_align);
int i, nid;
/* Account for nodes with cpus and no memory */
@@ -571,15 +570,16 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
* If sections array is gonna be used for pfn -> nid mapping, check
* whether its granularity is fine enough.
*/
-#ifdef NODE_NOT_IN_PAGE_FLAGS
- pfn_align = node_map_pfn_alignment();
- if (pfn_align && pfn_align < PAGES_PER_SECTION) {
- printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
- PFN_PHYS(pfn_align) >> 20,
- PFN_PHYS(PAGES_PER_SECTION) >> 20);
- return -EINVAL;
+ if (IS_ENABLED(NODE_NOT_IN_PAGE_FLAGS)) {
+ unsigned long pfn_align = node_map_pfn_alignment();
+
+ if (pfn_align && pfn_align < PAGES_PER_SECTION) {
+ pr_warn("Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
+ PFN_PHYS(pfn_align) >> 20,
+ PFN_PHYS(PAGES_PER_SECTION) >> 20);
+ return -EINVAL;
+ }
}
-#endif
if (!numa_meminfo_cover_memory(mi))
return -EINVAL;
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 77e04304a2a7..d1b2a889f035 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -135,7 +135,7 @@ static inline void cpa_inc_2m_checked(void)
static inline void cpa_inc_4k_install(void)
{
- cpa_4k_install++;
+ data_race(cpa_4k_install++);
}
static inline void cpa_inc_lp_sameprot(int level)
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index e966115d105c..f6ea8f1a9d57 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -496,7 +496,7 @@ void __init efi_init(void)
efi_print_memmap();
}
-#if defined(CONFIG_X86_32) || defined(CONFIG_X86_UV)
+#if defined(CONFIG_X86_32)
void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
{
@@ -648,7 +648,7 @@ static inline void *efi_map_next_entry_reverse(void *entry)
*/
static void *efi_map_next_entry(void *entry)
{
- if (!efi_have_uv1_memmap() && efi_enabled(EFI_64BIT)) {
+ if (efi_enabled(EFI_64BIT)) {
/*
* Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
* config table feature requires us to map all entries
@@ -777,11 +777,9 @@ static void __init kexec_enter_virtual_mode(void)
/*
* We don't do virtual mode, since we don't do runtime services, on
- * non-native EFI. With the UV1 memmap, we don't do runtime services in
- * kexec kernel because in the initial boot something else might
- * have been mapped at these virtual addresses.
+ * non-native EFI.
*/
- if (efi_is_mixed() || efi_have_uv1_memmap()) {
+ if (efi_is_mixed()) {
efi_memmap_unmap();
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
return;
@@ -832,12 +830,6 @@ static void __init kexec_enter_virtual_mode(void)
* has the runtime attribute bit set in its memory descriptor into the
* efi_pgd page table.
*
- * The old method which used to update that memory descriptor with the
- * virtual address obtained from ioremap() is still supported when the
- * kernel is booted on SG1 UV1 hardware. Same old method enabled the
- * runtime services to be called without having to thunk back into
- * physical mode for every invocation.
- *
* The new method does a pagetable switch in a preemption-safe manner
* so that we're in a different address space when calling a runtime
* function. For function arguments passing we do copy the PUDs of the
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 8e364c4c6768..413583f904a6 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -74,9 +74,6 @@ int __init efi_alloc_page_tables(void)
pud_t *pud;
gfp_t gfp_mask;
- if (efi_have_uv1_memmap())
- return 0;
-
gfp_mask = GFP_KERNEL | __GFP_ZERO;
efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
if (!efi_pgd)
@@ -115,9 +112,6 @@ void efi_sync_low_kernel_mappings(void)
pud_t *pud_k, *pud_efi;
pgd_t *efi_pgd = efi_mm.pgd;
- if (efi_have_uv1_memmap())
- return;
-
/*
* We can share all PGD entries apart from the one entry that
* covers the EFI runtime mapping space.
@@ -206,9 +200,6 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
unsigned npages;
pgd_t *pgd = efi_mm.pgd;
- if (efi_have_uv1_memmap())
- return 0;
-
/*
* It can happen that the physical address of new_memmap lands in memory
* which is not mapped in the EFI page table. Therefore we need to go
@@ -315,9 +306,6 @@ void __init efi_map_region(efi_memory_desc_t *md)
unsigned long size = md->num_pages << PAGE_SHIFT;
u64 pa = md->phys_addr;
- if (efi_have_uv1_memmap())
- return old_map_region(md);
-
/*
* Make sure the 1:1 mappings are present as a catch-all for b0rked
* firmware which doesn't update all internal pointers after switching
@@ -420,12 +408,6 @@ void __init efi_runtime_update_mappings(void)
{
efi_memory_desc_t *md;
- if (efi_have_uv1_memmap()) {
- if (__supported_pte_mask & _PAGE_NX)
- runtime_code_page_mkexec();
- return;
- }
-
/*
* Use the EFI Memory Attribute Table for mapping permissions if it
* exists, since it is intended to supersede EFI_PROPERTIES_TABLE.
@@ -474,10 +456,7 @@ void __init efi_runtime_update_mappings(void)
void __init efi_dump_pagetable(void)
{
#ifdef CONFIG_EFI_PGT_DUMP
- if (efi_have_uv1_memmap())
- ptdump_walk_pgd_level(NULL, &init_mm);
- else
- ptdump_walk_pgd_level(NULL, &efi_mm);
+ ptdump_walk_pgd_level(NULL, &efi_mm);
#endif
}
@@ -849,21 +828,13 @@ efi_set_virtual_address_map(unsigned long memory_map_size,
const efi_system_table_t *systab = (efi_system_table_t *)systab_phys;
efi_status_t status;
unsigned long flags;
- pgd_t *save_pgd = NULL;
if (efi_is_mixed())
return efi_thunk_set_virtual_address_map(memory_map_size,
descriptor_size,
descriptor_version,
virtual_map);
-
- if (efi_have_uv1_memmap()) {
- save_pgd = efi_uv1_memmap_phys_prolog();
- if (!save_pgd)
- return EFI_ABORTED;
- } else {
- efi_switch_mm(&efi_mm);
- }
+ efi_switch_mm(&efi_mm);
kernel_fpu_begin();
@@ -879,10 +850,7 @@ efi_set_virtual_address_map(unsigned long memory_map_size,
/* grab the virtually remapped EFI runtime services table pointer */
efi.runtime = READ_ONCE(systab->runtime);
- if (save_pgd)
- efi_uv1_memmap_phys_epilog(save_pgd);
- else
- efi_switch_mm(efi_scratch.prev_mm);
+ efi_switch_mm(efi_scratch.prev_mm);
return status;
}
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index a5a469cdf5bf..5a40fe411ebd 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -381,14 +381,6 @@ static void __init efi_unmap_pages(efi_memory_desc_t *md)
u64 va = md->virt_addr;
/*
- * To Do: Remove this check after adding functionality to unmap EFI boot
- * services code/data regions from direct mapping area because the UV1
- * memory map maps EFI regions in swapper_pg_dir.
- */
- if (efi_have_uv1_memmap())
- return;
-
- /*
* EFI mixed mode has all RAM mapped to access arguments while making
* EFI runtime calls, hence don't unmap EFI boot services code/data
* regions.
@@ -558,16 +550,6 @@ out:
return ret;
}
-static const struct dmi_system_id sgi_uv1_dmi[] __initconst = {
- { NULL, "SGI UV1",
- { DMI_MATCH(DMI_PRODUCT_NAME, "Stoutland Platform"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
- DMI_MATCH(DMI_BIOS_VENDOR, "SGI.COM"),
- }
- },
- { } /* NULL entry stops DMI scanning */
-};
-
void __init efi_apply_memmap_quirks(void)
{
/*
@@ -579,17 +561,6 @@ void __init efi_apply_memmap_quirks(void)
pr_info("Setup done, disabling due to 32/64-bit mismatch\n");
efi_memmap_unmap();
}
-
- /* UV2+ BIOS has a fix for this issue. UV1 still needs the quirk. */
- if (dmi_check_system(sgi_uv1_dmi)) {
- if (IS_ENABLED(CONFIG_X86_UV)) {
- set_bit(EFI_UV1_MEMMAP, &efi.flags);
- } else {
- pr_warn("EFI runtime disabled, needs CONFIG_X86_UV=y on UV1\n");
- clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
- efi_memmap_unmap();
- }
- }
}
/*
@@ -723,8 +694,6 @@ void efi_recover_from_page_fault(unsigned long phys_addr)
/*
* Make sure that an efi runtime service caused the page fault.
- * "efi_mm" cannot be used to check if the page fault had occurred
- * in the firmware context because the UV1 memmap doesn't use efi_pgd.
*/
if (efi_rts_work.efi_rts_id == EFI_NONE)
return;
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
index 4494589a288a..a6e5f2c1805d 100644
--- a/arch/x86/platform/uv/bios_uv.c
+++ b/arch/x86/platform/uv/bios_uv.c
@@ -30,17 +30,7 @@ static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
*/
return BIOS_STATUS_UNIMPLEMENTED;
- /*
- * If EFI_UV1_MEMMAP is set, we need to fall back to using our old EFI
- * callback method, which uses efi_call() directly, with the kernel page tables:
- */
- if (unlikely(efi_enabled(EFI_UV1_MEMMAP))) {
- kernel_fpu_begin();
- ret = efi_call((void *)__va(tab->function), (u64)which, a1, a2, a3, a4, a5);
- kernel_fpu_end();
- } else {
- ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
- }
+ ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
return ret;
}
@@ -209,164 +199,3 @@ int uv_bios_init(void)
pr_info("UV: UVsystab: Revision:%x\n", uv_systab->revision);
return 0;
}
-
-static void __init early_code_mapping_set_exec(int executable)
-{
- efi_memory_desc_t *md;
-
- if (!(__supported_pte_mask & _PAGE_NX))
- return;
-
- /* Make EFI service code area executable */
- for_each_efi_memory_desc(md) {
- if (md->type == EFI_RUNTIME_SERVICES_CODE ||
- md->type == EFI_BOOT_SERVICES_CODE)
- efi_set_executable(md, executable);
- }
-}
-
-void __init efi_uv1_memmap_phys_epilog(pgd_t *save_pgd)
-{
- /*
- * After the lock is released, the original page table is restored.
- */
- int pgd_idx, i;
- int nr_pgds;
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
-
- nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
-
- for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) {
- pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
- set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
-
- if (!pgd_present(*pgd))
- continue;
-
- for (i = 0; i < PTRS_PER_P4D; i++) {
- p4d = p4d_offset(pgd,
- pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
-
- if (!p4d_present(*p4d))
- continue;
-
- pud = (pud_t *)p4d_page_vaddr(*p4d);
- pud_free(&init_mm, pud);
- }
-
- p4d = (p4d_t *)pgd_page_vaddr(*pgd);
- p4d_free(&init_mm, p4d);
- }
-
- kfree(save_pgd);
-
- __flush_tlb_all();
- early_code_mapping_set_exec(0);
-}
-
-pgd_t * __init efi_uv1_memmap_phys_prolog(void)
-{
- unsigned long vaddr, addr_pgd, addr_p4d, addr_pud;
- pgd_t *save_pgd, *pgd_k, *pgd_efi;
- p4d_t *p4d, *p4d_k, *p4d_efi;
- pud_t *pud;
-
- int pgd;
- int n_pgds, i, j;
-
- early_code_mapping_set_exec(1);
-
- n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
- save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);
- if (!save_pgd)
- return NULL;
-
- /*
- * Build 1:1 identity mapping for UV1 memmap usage. Note that
- * PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while
- * it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical
- * address X, the pud_index(X) != pud_index(__va(X)), we can only copy
- * PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping.
- * This means here we can only reuse the PMD tables of the direct mapping.
- */
- for (pgd = 0; pgd < n_pgds; pgd++) {
- addr_pgd = (unsigned long)(pgd * PGDIR_SIZE);
- vaddr = (unsigned long)__va(pgd * PGDIR_SIZE);
- pgd_efi = pgd_offset_k(addr_pgd);
- save_pgd[pgd] = *pgd_efi;
-
- p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd);
- if (!p4d) {
- pr_err("Failed to allocate p4d table!\n");
- goto out;
- }
-
- for (i = 0; i < PTRS_PER_P4D; i++) {
- addr_p4d = addr_pgd + i * P4D_SIZE;
- p4d_efi = p4d + p4d_index(addr_p4d);
-
- pud = pud_alloc(&init_mm, p4d_efi, addr_p4d);
- if (!pud) {
- pr_err("Failed to allocate pud table!\n");
- goto out;
- }
-
- for (j = 0; j < PTRS_PER_PUD; j++) {
- addr_pud = addr_p4d + j * PUD_SIZE;
-
- if (addr_pud > (max_pfn << PAGE_SHIFT))
- break;
-
- vaddr = (unsigned long)__va(addr_pud);
-
- pgd_k = pgd_offset_k(vaddr);
- p4d_k = p4d_offset(pgd_k, vaddr);
- pud[j] = *pud_offset(p4d_k, vaddr);
- }
- }
- pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX;
- }
-
- __flush_tlb_all();
- return save_pgd;
-out:
- efi_uv1_memmap_phys_epilog(save_pgd);
- return NULL;
-}
-
-void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
- u32 type, u64 attribute)
-{
- unsigned long last_map_pfn;
-
- if (type == EFI_MEMORY_MAPPED_IO)
- return ioremap(phys_addr, size);
-
- last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size,
- PAGE_KERNEL);
- if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
- unsigned long top = last_map_pfn << PAGE_SHIFT;
- efi_ioremap(top, size - (top - phys_addr), type, attribute);
- }
-
- if (!(attribute & EFI_MEMORY_WB))
- efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
-
- return (void __iomem *)__va(phys_addr);
-}
-
-static int __init arch_parse_efi_cmdline(char *str)
-{
- if (!str) {
- pr_warn("need at least one option\n");
- return -EINVAL;
- }
-
- if (!efi_is_mixed() && parse_option_str(str, "old_map"))
- set_bit(EFI_UV1_MEMMAP, &efi.flags);
-
- return 0;
-}
-early_param("efi", arch_parse_efi_cmdline);
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 0ac96ca304c7..62ea907668f8 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -23,18 +23,6 @@
static struct bau_operations ops __ro_after_init;
-/* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
-static const int timeout_base_ns[] = {
- 20,
- 160,
- 1280,
- 10240,
- 81920,
- 655360,
- 5242880,
- 167772160
-};
-
static int timeout_us;
static bool nobau = true;
static int nobau_perm;
@@ -510,70 +498,6 @@ static inline void end_uvhub_quiesce(struct bau_control *hmaster)
atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce);
}
-static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift)
-{
- unsigned long descriptor_status;
-
- descriptor_status = uv_read_local_mmr(mmr_offset);
- descriptor_status >>= right_shift;
- descriptor_status &= UV_ACT_STATUS_MASK;
- return descriptor_status;
-}
-
-/*
- * Wait for completion of a broadcast software ack message
- * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
- */
-static int uv1_wait_completion(struct bau_desc *bau_desc,
- struct bau_control *bcp, long try)
-{
- unsigned long descriptor_status;
- cycles_t ttm;
- u64 mmr_offset = bcp->status_mmr;
- int right_shift = bcp->status_index;
- struct ptc_stats *stat = bcp->statp;
-
- descriptor_status = uv1_read_status(mmr_offset, right_shift);
- /* spin on the status MMR, waiting for it to go idle */
- while ((descriptor_status != DS_IDLE)) {
- /*
- * Our software ack messages may be blocked because
- * there are no swack resources available. As long
- * as none of them has timed out hardware will NACK
- * our message and its state will stay IDLE.
- */
- if (descriptor_status == DS_SOURCE_TIMEOUT) {
- stat->s_stimeout++;
- return FLUSH_GIVEUP;
- } else if (descriptor_status == DS_DESTINATION_TIMEOUT) {
- stat->s_dtimeout++;
- ttm = get_cycles();
-
- /*
- * Our retries may be blocked by all destination
- * swack resources being consumed, and a timeout
- * pending. In that case hardware returns the
- * ERROR that looks like a destination timeout.
- */
- if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
- bcp->conseccompletes = 0;
- return FLUSH_RETRY_PLUGGED;
- }
-
- bcp->conseccompletes = 0;
- return FLUSH_RETRY_TIMEOUT;
- } else {
- /*
- * descriptor_status is still BUSY
- */
- cpu_relax();
- }
- descriptor_status = uv1_read_status(mmr_offset, right_shift);
- }
- bcp->conseccompletes++;
- return FLUSH_COMPLETE;
-}
-
/*
* UV2 could have an extra bit of status in the ACTIVATION_STATUS_2 register.
* But not currently used.
@@ -853,24 +777,6 @@ static void record_send_stats(cycles_t time1, cycles_t time2,
}
/*
- * Because of a uv1 hardware bug only a limited number of concurrent
- * requests can be made.
- */
-static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
-{
- spinlock_t *lock = &hmaster->uvhub_lock;
- atomic_t *v;
-
- v = &hmaster->active_descriptor_count;
- if (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr)) {
- stat->s_throttles++;
- do {
- cpu_relax();
- } while (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr));
- }
-}
-
-/*
* Handle the completion status of a message send.
*/
static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
@@ -899,50 +805,30 @@ static int uv_flush_send_and_wait(struct cpumask *flush_mask,
{
int seq_number = 0;
int completion_stat = 0;
- int uv1 = 0;
long try = 0;
unsigned long index;
cycles_t time1;
cycles_t time2;
struct ptc_stats *stat = bcp->statp;
struct bau_control *hmaster = bcp->uvhub_master;
- struct uv1_bau_msg_header *uv1_hdr = NULL;
struct uv2_3_bau_msg_header *uv2_3_hdr = NULL;
- if (bcp->uvhub_version == UV_BAU_V1) {
- uv1 = 1;
- uv1_throttle(hmaster, stat);
- }
-
while (hmaster->uvhub_quiesce)
cpu_relax();
time1 = get_cycles();
- if (uv1)
- uv1_hdr = &bau_desc->header.uv1_hdr;
- else
- /* uv2 and uv3 */
- uv2_3_hdr = &bau_desc->header.uv2_3_hdr;
+ uv2_3_hdr = &bau_desc->header.uv2_3_hdr;
do {
if (try == 0) {
- if (uv1)
- uv1_hdr->msg_type = MSG_REGULAR;
- else
- uv2_3_hdr->msg_type = MSG_REGULAR;
+ uv2_3_hdr->msg_type = MSG_REGULAR;
seq_number = bcp->message_number++;
} else {
- if (uv1)
- uv1_hdr->msg_type = MSG_RETRY;
- else
- uv2_3_hdr->msg_type = MSG_RETRY;
+ uv2_3_hdr->msg_type = MSG_RETRY;
stat->s_retry_messages++;
}
- if (uv1)
- uv1_hdr->sequence = seq_number;
- else
- uv2_3_hdr->sequence = seq_number;
+ uv2_3_hdr->sequence = seq_number;
index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
bcp->send_message = get_cycles();
@@ -1162,11 +1048,10 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
address = TLB_FLUSH_ALL;
switch (bcp->uvhub_version) {
- case UV_BAU_V1:
case UV_BAU_V2:
case UV_BAU_V3:
- bau_desc->payload.uv1_2_3.address = address;
- bau_desc->payload.uv1_2_3.sending_cpu = cpu;
+ bau_desc->payload.uv2_3.address = address;
+ bau_desc->payload.uv2_3.sending_cpu = cpu;
break;
case UV_BAU_V4:
bau_desc->payload.uv4.address = address;
@@ -1300,7 +1185,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_uv_bau_message)
if (bcp->uvhub_version == UV_BAU_V2)
process_uv2_message(&msgdesc, bcp);
else
- /* no error workaround for uv1 or uv3 */
+ /* no error workaround for uv3 */
bau_process_message(&msgdesc, bcp, 1);
msg++;
@@ -1350,12 +1235,7 @@ static void __init enable_timeouts(void)
mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT);
mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT);
write_mmr_misc_control(pnode, mmr_image);
- /*
- * UV1:
- * Subsequent reversals of the timebase bit (3) cause an
- * immediate timeout of one or all INTD resources as
- * indicated in bits 2:0 (7 causes all of them to timeout).
- */
+
mmr_image |= (1L << SOFTACK_MSHIFT);
if (is_uv2_hub()) {
/* do not touch the legacy mode bit */
@@ -1711,14 +1591,12 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
{
int i;
int cpu;
- int uv1 = 0;
unsigned long gpa;
unsigned long m;
unsigned long n;
size_t dsize;
struct bau_desc *bau_desc;
struct bau_desc *bd2;
- struct uv1_bau_msg_header *uv1_hdr;
struct uv2_3_bau_msg_header *uv2_3_hdr;
struct bau_control *bcp;
@@ -1733,8 +1611,6 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
gpa = uv_gpa(bau_desc);
n = uv_gpa_to_gnode(gpa);
m = ops.bau_gpa_to_offset(gpa);
- if (is_uv1_hub())
- uv1 = 1;
/* the 14-bit pnode */
write_mmr_descriptor_base(pnode,
@@ -1746,37 +1622,15 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
*/
for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
memset(bd2, 0, sizeof(struct bau_desc));
- if (uv1) {
- uv1_hdr = &bd2->header.uv1_hdr;
- uv1_hdr->swack_flag = 1;
- /*
- * The base_dest_nasid set in the message header
- * is the nasid of the first uvhub in the partition.
- * The bit map will indicate destination pnode numbers
- * relative to that base. They may not be consecutive
- * if nasid striding is being used.
- */
- uv1_hdr->base_dest_nasid =
- UV_PNODE_TO_NASID(base_pnode);
- uv1_hdr->dest_subnodeid = UV_LB_SUBNODEID;
- uv1_hdr->command = UV_NET_ENDPOINT_INTD;
- uv1_hdr->int_both = 1;
- /*
- * all others need to be set to zero:
- * fairness chaining multilevel count replied_to
- */
- } else {
- /*
- * BIOS uses legacy mode, but uv2 and uv3 hardware always
- * uses native mode for selective broadcasts.
- */
- uv2_3_hdr = &bd2->header.uv2_3_hdr;
- uv2_3_hdr->swack_flag = 1;
- uv2_3_hdr->base_dest_nasid =
- UV_PNODE_TO_NASID(base_pnode);
- uv2_3_hdr->dest_subnodeid = UV_LB_SUBNODEID;
- uv2_3_hdr->command = UV_NET_ENDPOINT_INTD;
- }
+ /*
+ * BIOS uses legacy mode, but uv2 and uv3 hardware always
+ * uses native mode for selective broadcasts.
+ */
+ uv2_3_hdr = &bd2->header.uv2_3_hdr;
+ uv2_3_hdr->swack_flag = 1;
+ uv2_3_hdr->base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
+ uv2_3_hdr->dest_subnodeid = UV_LB_SUBNODEID;
+ uv2_3_hdr->command = UV_NET_ENDPOINT_INTD;
}
for_each_present_cpu(cpu) {
if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
@@ -1861,7 +1715,7 @@ static void __init init_uvhub(int uvhub, int vector, int base_pnode)
* The below initialization can't be in firmware because the
* messaging IRQ will be determined by the OS.
*/
- apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
+ apicid = uvhub_to_first_apicid(uvhub);
write_mmr_data_config(pnode, ((apicid << 32) | vector));
}
@@ -1874,33 +1728,20 @@ static int calculate_destination_timeout(void)
{
unsigned long mmr_image;
int mult1;
- int mult2;
- int index;
int base;
int ret;
- unsigned long ts_ns;
-
- if (is_uv1_hub()) {
- mult1 = SOFTACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
- mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
- index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
- mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
- mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
- ts_ns = timeout_base_ns[index];
- ts_ns *= (mult1 * mult2);
- ret = ts_ns / 1000;
- } else {
- /* same destination timeout for uv2 and uv3 */
- /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
- mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
- mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
- if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
- base = 80;
- else
- base = 10;
- mult1 = mmr_image & UV2_ACK_MASK;
- ret = mult1 * base;
- }
+
+ /* same destination timeout for uv2 and uv3 */
+ /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
+ mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
+ mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
+ if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
+ base = 80;
+ else
+ base = 10;
+ mult1 = mmr_image & UV2_ACK_MASK;
+ ret = mult1 * base;
+
return ret;
}
@@ -2039,9 +1880,7 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
bcp->cpus_in_socket = sdp->num_cpus;
bcp->socket_master = *smasterp;
bcp->uvhub = bdp->uvhub;
- if (is_uv1_hub())
- bcp->uvhub_version = UV_BAU_V1;
- else if (is_uv2_hub())
+ if (is_uv2_hub())
bcp->uvhub_version = UV_BAU_V2;
else if (is_uv3_hub())
bcp->uvhub_version = UV_BAU_V3;
@@ -2123,7 +1962,7 @@ static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
struct uvhub_desc *uvhub_descs;
unsigned char *uvhub_mask = NULL;
- if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
+ if (is_uv3_hub() || is_uv2_hub())
timeout_us = calculate_destination_timeout();
uvhub_descs = kcalloc(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL);
@@ -2151,17 +1990,6 @@ fail:
return 1;
}
-static const struct bau_operations uv1_bau_ops __initconst = {
- .bau_gpa_to_offset = uv_gpa_to_offset,
- .read_l_sw_ack = read_mmr_sw_ack,
- .read_g_sw_ack = read_gmmr_sw_ack,
- .write_l_sw_ack = write_mmr_sw_ack,
- .write_g_sw_ack = write_gmmr_sw_ack,
- .write_payload_first = write_mmr_payload_first,
- .write_payload_last = write_mmr_payload_last,
- .wait_completion = uv1_wait_completion,
-};
-
static const struct bau_operations uv2_3_bau_ops __initconst = {
.bau_gpa_to_offset = uv_gpa_to_offset,
.read_l_sw_ack = read_mmr_sw_ack,
@@ -2206,8 +2034,6 @@ static int __init uv_bau_init(void)
ops = uv2_3_bau_ops;
else if (is_uv2_hub())
ops = uv2_3_bau_ops;
- else if (is_uv1_hub())
- ops = uv1_bau_ops;
nuvhubs = uv_num_possible_blades();
if (nuvhubs < 2) {
@@ -2228,7 +2054,7 @@ static int __init uv_bau_init(void)
}
/* software timeouts are not supported on UV4 */
- if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
+ if (is_uv3_hub() || is_uv2_hub())
enable_timeouts();
if (init_per_cpu(nuvhubs, uv_base_pnode)) {
@@ -2251,8 +2077,7 @@ static int __init uv_bau_init(void)
val = 1L << 63;
write_gmmr_activation(pnode, val);
mmr = 1; /* should be 1 to broadcast to both sockets */
- if (!is_uv1_hub())
- write_mmr_data_broadcast(pnode, mmr);
+ write_mmr_data_broadcast(pnode, mmr);
}
}
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index 7af31b245636..f82a1337a608 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -74,7 +74,6 @@ static void uv_rtc_send_IPI(int cpu)
apicid = cpu_physical_id(cpu);
pnode = uv_apicid_to_pnode(apicid);
- apicid |= uv_apicid_hibits;
val = (1UL << UVH_IPI_INT_SEND_SHFT) |
(apicid << UVH_IPI_INT_APIC_ID_SHFT) |
(X86_PLATFORM_IPI_VECTOR << UVH_IPI_INT_VECTOR_SHFT);
@@ -85,10 +84,7 @@ static void uv_rtc_send_IPI(int cpu)
/* Check for an RTC interrupt pending */
static int uv_intr_pending(int pnode)
{
- if (is_uv1_hub())
- return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) &
- UV1H_EVENT_OCCURRED0_RTC1_MASK;
- else if (is_uvx_hub())
+ if (is_uvx_hub())
return uv_read_global_mmr64(pnode, UVXH_EVENT_OCCURRED2) &
UVXH_EVENT_OCCURRED2_RTC_1_MASK;
return 0;
@@ -98,19 +94,15 @@ static int uv_intr_pending(int pnode)
static int uv_setup_intr(int cpu, u64 expires)
{
u64 val;
- unsigned long apicid = cpu_physical_id(cpu) | uv_apicid_hibits;
+ unsigned long apicid = cpu_physical_id(cpu);
int pnode = uv_cpu_to_pnode(cpu);
uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
UVH_RTC1_INT_CONFIG_M_MASK);
uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L);
- if (is_uv1_hub())
- uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS,
- UV1H_EVENT_OCCURRED0_RTC1_MASK);
- else
- uv_write_global_mmr64(pnode, UVXH_EVENT_OCCURRED2_ALIAS,
- UVXH_EVENT_OCCURRED2_RTC_1_MASK);
+ uv_write_global_mmr64(pnode, UVXH_EVENT_OCCURRED2_ALIAS,
+ UVXH_EVENT_OCCURRED2_RTC_1_MASK);
val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index c46b9f2e732f..2aab43a13a8c 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -873,7 +873,7 @@ static void xen_load_sp0(unsigned long sp0)
static void xen_invalidate_io_bitmap(void)
{
struct physdev_set_iobitmap iobitmap = {
- .bitmap = 0,
+ .bitmap = NULL,
.nr_ports = 0,
};
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 171aff1b11f2..9ea598dcc132 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -92,9 +92,7 @@ static void cpu_bringup(void)
asmlinkage __visible void cpu_bringup_and_idle(void)
{
cpu_bringup();
- boot_init_stack_canary();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
- prevent_tail_call_optimization();
}
void xen_smp_intr_free_pv(unsigned int cpu)
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index c8897aad13cd..91f5b330dcc6 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -39,6 +39,7 @@ static unsigned long xen_tsc_khz(void)
struct pvclock_vcpu_time_info *info =
&HYPERVISOR_shared_info->vcpu_info[0].time;
+ setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
return pvclock_tsc_khz(info);
}