diff options
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/Kconfig | 33 | ||||
-rw-r--r-- | arch/arm64/include/asm/cpucaps.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/cputype.h | 5 | ||||
-rw-r--r-- | arch/arm64/include/asm/irq.h | 9 | ||||
-rw-r--r-- | arch/arm64/include/asm/mmu_context.h | 6 | ||||
-rw-r--r-- | arch/arm64/kernel/cpu_errata.c | 58 | ||||
-rw-r--r-- | arch/arm64/kernel/entry.S | 3 | ||||
-rw-r--r-- | arch/arm64/mm/context.c | 79 |
8 files changed, 195 insertions, 2 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index cf5f1dafcf74..0c2270ebcf30 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -597,6 +597,31 @@ config CAVIUM_ERRATUM_30115 If unsure, say Y. +config CAVIUM_ERRATUM_36890 + bool "Cavium erratum 36890" + default y + help + Enable workaround for erratum 36890. On all ThunderX T88xx and + OcteonTX1 T81/T83 and some OcteonTX2 chips, the "dc zva" instruction + does not work all the time. This happens when there are two VAs + that match up with one PA; including when the two VAs match but have + different asids. The fix is to disable "dc zva" in userspace. + + If unsure, say Y. + +config MRVL_ERRATUM_37119 + bool "Marvell erratum 37119" + default y + help + Enable workaround for erratum 37119. On some OcteonTX2 chips, + while using the SSO workslots in userspace can cause problems + as the cache are not invalidated correctly. To workaround + this erratum, a local tlbi is placed before the eret. + This TLBI does not need to invalidate anything so a tlbi against + asid 0 is used. + + If unsure, say Y. + config QCOM_FALKOR_ERRATUM_1003 bool "Falkor E1003: Incorrect translation due to ASID change" default y @@ -682,6 +707,14 @@ config FUJITSU_ERRATUM_010001 endmenu +config MRVL_OCTEONTX_EL0_INTR + bool "Handle interrupts in EL0 via EL3" + default y + help + Handle certain interrupts in EL0 with the help of EL3 firmware to achieve + low latency and also not break task isolation. + Generally implemented and tested on OcteonTx and its successive + generation CPUs. choice prompt "Page size" diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index f19fe4b9acc4..782e5403027c 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -52,7 +52,9 @@ #define ARM64_HAS_IRQ_PRIO_MASKING 42 #define ARM64_HAS_DCPODP 43 #define ARM64_WORKAROUND_1463225 44 +#define ARM64_WORKAROUND_CAVIUM_36890 45 +#define ARM64_WORKAROUND_MRVL_37119 46 -#define ARM64_NCAPS 45 +#define ARM64_NCAPS 47 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index b1454d117cd2..8ff341621fd2 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -79,6 +79,9 @@ #define CAVIUM_CPU_PART_THUNDERX_83XX 0x0A3 #define CAVIUM_CPU_PART_THUNDERX2 0x0AF +#define MRVL_CPU_PART_OCTEONTX2_96XX 0x0B2 +#define MRVL_CPU_PART_OCTEONTX2_95XX 0x0B3 + #define BRCM_CPU_PART_VULCAN 0x516 #define QCOM_CPU_PART_FALKOR_V1 0x800 @@ -105,6 +108,8 @@ #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) #define MIDR_CAVIUM_THUNDERX2 MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX2) +#define MIDR_MRVL_OCTEONTX2_96XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, MRVL_CPU_PART_OCTEONTX2_96XX) +#define MIDR_MRVL_OCTEONTX2_95XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, MRVL_CPU_PART_OCTEONTX2_95XX) #define MIDR_BRCM_VULCAN MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN) #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1) #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR) diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h index b2b0c6405eb0..882de50937e9 100644 --- a/arch/arm64/include/asm/irq.h +++ b/arch/arm64/include/asm/irq.h @@ -6,6 +6,15 @@ #include <asm-generic/irq.h> +/* Platforms with multiple SR-IOV capable PCI devices will + * need large number of MSIX vectors, hence keep this number + * fairly high. + */ +#ifdef CONFIG_PCI_MSI +#undef NR_IRQS +#define NR_IRQS 65536 +#endif + struct pt_regs; static inline int nr_legacy_irqs(void) diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 7ed0adb187a8..d6f46d8a72be 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -248,6 +248,12 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, void verify_cpu_asid_bits(void); void post_ttbr_update_workaround(void); +#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR +int lock_context(struct mm_struct *mm, int index); +int unlock_context_by_index(int index); +bool unlock_context_by_mm(struct mm_struct *mm); +#endif + #endif /* !__ASSEMBLY__ */ #endif /* !__ASM_MMU_CONTEXT_H */ diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index ca11ff7bf55e..6959c0e59e66 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -97,6 +97,23 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused) sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); } +#ifdef CONFIG_CAVIUM_ERRATUM_36890 +static void +cpu_enable_trap_zva_access(const struct arm64_cpu_capabilities *__unused) +{ + /* + * Clear SCTLR_EL2.DZE or SCTLR_EL1.DZE depending + * on if we are in EL2. + */ + if (!is_kernel_in_hyp_mode()) + sysreg_clear_set(sctlr_el1, SCTLR_EL1_DZE, 0); + else + sysreg_clear_set(sctlr_el2, SCTLR_EL1_DZE, 0); + + return; +} +#endif + atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1); #include <asm/mmu_context.h> @@ -658,6 +675,32 @@ static const struct midr_range cavium_erratum_30115_cpus[] = { }; #endif +#ifdef CONFIG_CAVIUM_ERRATUM_36890 +static const struct midr_range cavium_erratum_36890_cpus[] = { + /* Cavium ThunderX, T88 all passes */ + MIDR_ALL_VERSIONS(MIDR_THUNDERX), + /* Cavium ThunderX, T81 all passes */ + MIDR_ALL_VERSIONS(MIDR_THUNDERX_81XX), + /* Cavium ThunderX, T83 all passes */ + MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX), + /* Marvell OcteonTX 2, 96xx pass A0, A1, and B0 */ + MIDR_RANGE(MIDR_MRVL_OCTEONTX2_96XX, 0, 0, 1, 0), + /* Marvell OcteonTX 2, 95 pass A0/A1 */ + MIDR_RANGE(MIDR_MRVL_OCTEONTX2_95XX, 0, 0, 0, 1), + {}, +}; +#endif + +#ifdef CONFIG_MRVL_ERRATUM_37119 +static const struct midr_range mrvl_erratum_37119_cpus[] = { + /* Marvell OcteonTX 2, 96xx pass A0, A1, and B0 */ + MIDR_RANGE(MIDR_MRVL_OCTEONTX2_96XX, 0, 0, 1, 0), + /* Marvell OcteonTX 2, 95 pass A0/A1 */ + MIDR_RANGE(MIDR_MRVL_OCTEONTX2_95XX, 0, 0, 0, 1), + {}, +}; +#endif + #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = { { @@ -769,6 +812,21 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus), }, #endif +#ifdef CONFIG_CAVIUM_ERRATUM_36890 + { + .desc = "Cavium erratum 36890", + .capability = ARM64_WORKAROUND_CAVIUM_36890, + ERRATA_MIDR_RANGE_LIST(cavium_erratum_36890_cpus), + .cpu_enable = cpu_enable_trap_zva_access, + }, +#endif +#ifdef CONFIG_MRVL_ERRATUM_37119 + { + .desc = "Marvell erratum 37119", + .capability = ARM64_WORKAROUND_MRVL_37119, + ERRATA_MIDR_RANGE_LIST(mrvl_erratum_37119_cpus), + }, +#endif { .desc = "Mismatched cache type (CTR_EL0)", .capability = ARM64_MISMATCHED_CACHE_TYPE, diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 84a822748c84..4db2bf6a0e84 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -364,6 +364,9 @@ alternative_else_nop_endif ldr lr, [sp, #S_LR] add sp, sp, #S_FRAME_SIZE // restore sp +#ifdef CONFIG_MRVL_ERRATUM_37119 +alternative_insn "tlbi aside1, xzr", "nop", ARM64_WORKAROUND_MRVL_37119 +#endif .if \el == 0 alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index b5e329fde2dd..aa3e43e87dab 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -24,6 +24,13 @@ static unsigned long *asid_map; static DEFINE_PER_CPU(atomic64_t, active_asids); static DEFINE_PER_CPU(u64, reserved_asids); + +#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR +#define LOCKED_ASIDS_COUNT 128 + +static u64 locked_asids[LOCKED_ASIDS_COUNT]; +#endif + static cpumask_t tlb_flush_pending; #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) @@ -100,6 +107,14 @@ static void flush_context(void) per_cpu(reserved_asids, i) = asid; } +#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR + /* Set bits for locked ASIDs. */ + for (i = 0; i < LOCKED_ASIDS_COUNT; i++) { + asid = locked_asids[i]; + if (asid != 0) + __set_bit(asid & ~ASID_MASK, asid_map); + } +#endif /* * Queue a TLB invalidation for each CPU to perform on next * context-switch @@ -107,9 +122,61 @@ static void flush_context(void) cpumask_setall(&tlb_flush_pending); } +#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR +int lock_context(struct mm_struct *mm, int index) +{ + unsigned long flags; + u64 asid; + + if ((index < 0) || (index >= LOCKED_ASIDS_COUNT)) + return -1; + raw_spin_lock_irqsave(&cpu_asid_lock, flags); + asid = atomic64_read(&mm->context.id); + locked_asids[index] = asid; + raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); + return 0; +} +EXPORT_SYMBOL(lock_context); + +int unlock_context_by_index(int index) +{ + unsigned long flags; + + if ((index < 0) || (index >= LOCKED_ASIDS_COUNT)) + return -1; + raw_spin_lock_irqsave(&cpu_asid_lock, flags); + locked_asids[index] = 0; + raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); + return 0; +} +EXPORT_SYMBOL(unlock_context_by_index); + +bool unlock_context_by_mm(struct mm_struct *mm) +{ + int i; + unsigned long flags; + bool hit = false; + u64 asid; + + raw_spin_lock_irqsave(&cpu_asid_lock, flags); + asid = atomic64_read(&mm->context.id); + + for (i = 0; i < LOCKED_ASIDS_COUNT; i++) { + if (locked_asids[i] == asid) { + hit = true; + locked_asids[i] = 0; + } + } + + raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); + + return hit; +} +#endif + static bool check_update_reserved_asid(u64 asid, u64 newasid) { - int cpu; + int i, cpu; bool hit = false; /* @@ -128,6 +195,16 @@ static bool check_update_reserved_asid(u64 asid, u64 newasid) } } +#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR + /* Same mechanism for locked ASIDs */ + for (i = 0; i < LOCKED_ASIDS_COUNT; i++) { + if (locked_asids[i] == asid) { + hit = true; + locked_asids[i] = newasid; + } + } +#endif + return hit; } |