aboutsummaryrefslogtreecommitdiffstats
path: root/arch/riscv/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/include/asm')
-rw-r--r--arch/riscv/include/asm/alternative-macros.h153
-rw-r--r--arch/riscv/include/asm/alternative.h34
-rw-r--r--arch/riscv/include/asm/asm-prototypes.h2
-rw-r--r--arch/riscv/include/asm/asm.h62
-rw-r--r--arch/riscv/include/asm/assembler.h82
-rw-r--r--arch/riscv/include/asm/atomic.h2
-rw-r--r--arch/riscv/include/asm/cacheflush.h10
-rw-r--r--arch/riscv/include/asm/cmpxchg.h4
-rw-r--r--arch/riscv/include/asm/cpufeature.h23
-rw-r--r--arch/riscv/include/asm/csr.h108
-rw-r--r--arch/riscv/include/asm/efi.h21
-rw-r--r--arch/riscv/include/asm/elf.h10
-rw-r--r--arch/riscv/include/asm/entry-common.h11
-rw-r--r--arch/riscv/include/asm/errata_list.h28
-rw-r--r--arch/riscv/include/asm/fixmap.h8
-rw-r--r--arch/riscv/include/asm/ftrace.h52
-rw-r--r--arch/riscv/include/asm/hugetlb.h43
-rw-r--r--arch/riscv/include/asm/hwcap.h144
-rw-r--r--arch/riscv/include/asm/hwprobe.h13
-rw-r--r--arch/riscv/include/asm/insn-def.h62
-rw-r--r--arch/riscv/include/asm/insn.h381
-rw-r--r--arch/riscv/include/asm/io.h5
-rw-r--r--arch/riscv/include/asm/irq.h4
-rw-r--r--arch/riscv/include/asm/jump_label.h2
-rw-r--r--arch/riscv/include/asm/kexec.h5
-rw-r--r--arch/riscv/include/asm/kfence.h33
-rw-r--r--arch/riscv/include/asm/kprobes.h2
-rw-r--r--arch/riscv/include/asm/kvm_aia.h127
-rw-r--r--arch/riscv/include/asm/kvm_host.h39
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_pmu.h107
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_sbi.h27
-rw-r--r--arch/riscv/include/asm/module.h16
-rw-r--r--arch/riscv/include/asm/page.h53
-rw-r--r--arch/riscv/include/asm/parse_asm.h219
-rw-r--r--arch/riscv/include/asm/patch.h4
-rw-r--r--arch/riscv/include/asm/perf_event.h7
-rw-r--r--arch/riscv/include/asm/pgalloc.h11
-rw-r--r--arch/riscv/include/asm/pgtable-64.h40
-rw-r--r--arch/riscv/include/asm/pgtable-bits.h3
-rw-r--r--arch/riscv/include/asm/pgtable.h90
-rw-r--r--arch/riscv/include/asm/ptrace.h10
-rw-r--r--arch/riscv/include/asm/sbi.h23
-rw-r--r--arch/riscv/include/asm/semihost.h26
-rw-r--r--arch/riscv/include/asm/set_memory.h3
-rw-r--r--arch/riscv/include/asm/signal.h2
-rw-r--r--arch/riscv/include/asm/smp.h52
-rw-r--r--arch/riscv/include/asm/stackprotector.h10
-rw-r--r--arch/riscv/include/asm/stacktrace.h5
-rw-r--r--arch/riscv/include/asm/string.h10
-rw-r--r--arch/riscv/include/asm/suspend.h22
-rw-r--r--arch/riscv/include/asm/switch_to.h3
-rw-r--r--arch/riscv/include/asm/syscall.h25
-rw-r--r--arch/riscv/include/asm/thread_info.h14
-rw-r--r--arch/riscv/include/asm/tlbflush.h2
-rw-r--r--arch/riscv/include/asm/topology.h21
-rw-r--r--arch/riscv/include/asm/uaccess.h2
-rw-r--r--arch/riscv/include/asm/vdso.h6
-rw-r--r--arch/riscv/include/asm/vdso/data.h17
-rw-r--r--arch/riscv/include/asm/vdso/gettimeofday.h8
-rw-r--r--arch/riscv/include/asm/vdso/processor.h28
-rw-r--r--arch/riscv/include/asm/vmalloc.h77
61 files changed, 1823 insertions, 590 deletions
diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h
index ec2f3f1b836f..b8c55fb3ab2c 100644
--- a/arch/riscv/include/asm/alternative-macros.h
+++ b/arch/riscv/include/asm/alternative-macros.h
@@ -6,18 +6,18 @@
#ifdef __ASSEMBLY__
-.macro ALT_ENTRY oldptr newptr vendor_id errata_id new_len
- RISCV_PTR \oldptr
- RISCV_PTR \newptr
- REG_ASM \vendor_id
- REG_ASM \new_len
- .word \errata_id
+.macro ALT_ENTRY oldptr newptr vendor_id patch_id new_len
+ .4byte \oldptr - .
+ .4byte \newptr - .
+ .2byte \vendor_id
+ .2byte \new_len
+ .4byte \patch_id
.endm
-.macro ALT_NEW_CONTENT vendor_id, errata_id, enable = 1, new_c : vararg
+.macro ALT_NEW_CONTENT vendor_id, patch_id, enable = 1, new_c
.if \enable
.pushsection .alternative, "a"
- ALT_ENTRY 886b, 888f, \vendor_id, \errata_id, 889f - 888f
+ ALT_ENTRY 886b, 888f, \vendor_id, \patch_id, 889f - 888f
.popsection
.subsection 1
888 :
@@ -33,7 +33,7 @@
.endif
.endm
-.macro __ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, enable
+.macro ALTERNATIVE_CFG old_c, new_c, vendor_id, patch_id, enable
886 :
.option push
.option norvc
@@ -41,50 +41,34 @@
\old_c
.option pop
887 :
- ALT_NEW_CONTENT \vendor_id, \errata_id, \enable, \new_c
+ ALT_NEW_CONTENT \vendor_id, \patch_id, \enable, "\new_c"
.endm
-#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
- __ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k)
-
-.macro __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \
- new_c_2, vendor_id_2, errata_id_2, enable_2
-886 :
- .option push
- .option norvc
- .option norelax
- \old_c
- .option pop
-887 :
- ALT_NEW_CONTENT \vendor_id_1, \errata_id_1, \enable_1, \new_c_1
- ALT_NEW_CONTENT \vendor_id_2, \errata_id_2, \enable_2, \new_c_2
+.macro ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, patch_id_1, enable_1, \
+ new_c_2, vendor_id_2, patch_id_2, enable_2
+ ALTERNATIVE_CFG "\old_c", "\new_c_1", \vendor_id_1, \patch_id_1, \enable_1
+ ALT_NEW_CONTENT \vendor_id_2, \patch_id_2, \enable_2, "\new_c_2"
.endm
-#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
- CONFIG_k_1, \
- new_c_2, vendor_id_2, errata_id_2, \
- CONFIG_k_2) \
- __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, \
- IS_ENABLED(CONFIG_k_1), \
- new_c_2, vendor_id_2, errata_id_2, \
- IS_ENABLED(CONFIG_k_2)
+#define __ALTERNATIVE_CFG(...) ALTERNATIVE_CFG __VA_ARGS__
+#define __ALTERNATIVE_CFG_2(...) ALTERNATIVE_CFG_2 __VA_ARGS__
#else /* !__ASSEMBLY__ */
#include <asm/asm.h>
#include <linux/stringify.h>
-#define ALT_ENTRY(oldptr, newptr, vendor_id, errata_id, newlen) \
- RISCV_PTR " " oldptr "\n" \
- RISCV_PTR " " newptr "\n" \
- REG_ASM " " vendor_id "\n" \
- REG_ASM " " newlen "\n" \
- ".word " errata_id "\n"
+#define ALT_ENTRY(oldptr, newptr, vendor_id, patch_id, newlen) \
+ ".4byte ((" oldptr ") - .) \n" \
+ ".4byte ((" newptr ") - .) \n" \
+ ".2byte " vendor_id "\n" \
+ ".2byte " newlen "\n" \
+ ".4byte " patch_id "\n"
-#define ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c) \
+#define ALT_NEW_CONTENT(vendor_id, patch_id, enable, new_c) \
".if " __stringify(enable) " == 1\n" \
".pushsection .alternative, \"a\"\n" \
- ALT_ENTRY("886b", "888f", __stringify(vendor_id), __stringify(errata_id), "889f - 888f") \
+ ALT_ENTRY("886b", "888f", __stringify(vendor_id), __stringify(patch_id), "889f - 888f") \
".popsection\n" \
".subsection 1\n" \
"888 :\n" \
@@ -99,7 +83,7 @@
".previous\n" \
".endif\n"
-#define __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, enable) \
+#define __ALTERNATIVE_CFG(old_c, new_c, vendor_id, patch_id, enable) \
"886 :\n" \
".option push\n" \
".option norvc\n" \
@@ -107,84 +91,65 @@
old_c "\n" \
".option pop\n" \
"887 :\n" \
- ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c)
+ ALT_NEW_CONTENT(vendor_id, patch_id, enable, new_c)
-#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
- __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k))
-
-#define __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
- enable_1, \
- new_c_2, vendor_id_2, errata_id_2, \
- enable_2) \
- "886 :\n" \
- ".option push\n" \
- ".option norvc\n" \
- ".option norelax\n" \
- old_c "\n" \
- ".option pop\n" \
- "887 :\n" \
- ALT_NEW_CONTENT(vendor_id_1, errata_id_1, enable_1, new_c_1) \
- ALT_NEW_CONTENT(vendor_id_2, errata_id_2, enable_2, new_c_2)
-
-#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
- CONFIG_k_1, \
- new_c_2, vendor_id_2, errata_id_2, \
- CONFIG_k_2) \
- __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
- IS_ENABLED(CONFIG_k_1), \
- new_c_2, vendor_id_2, errata_id_2, \
- IS_ENABLED(CONFIG_k_2))
+#define __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, patch_id_1, enable_1, \
+ new_c_2, vendor_id_2, patch_id_2, enable_2) \
+ __ALTERNATIVE_CFG(old_c, new_c_1, vendor_id_1, patch_id_1, enable_1) \
+ ALT_NEW_CONTENT(vendor_id_2, patch_id_2, enable_2, new_c_2)
#endif /* __ASSEMBLY__ */
+#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, patch_id, CONFIG_k) \
+ __ALTERNATIVE_CFG(old_c, new_c, vendor_id, patch_id, IS_ENABLED(CONFIG_k))
+
+#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, patch_id_1, CONFIG_k_1, \
+ new_c_2, vendor_id_2, patch_id_2, CONFIG_k_2) \
+ __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, patch_id_1, IS_ENABLED(CONFIG_k_1), \
+ new_c_2, vendor_id_2, patch_id_2, IS_ENABLED(CONFIG_k_2))
+
#else /* CONFIG_RISCV_ALTERNATIVE */
#ifdef __ASSEMBLY__
-.macro __ALTERNATIVE_CFG old_c
+.macro ALTERNATIVE_CFG old_c
\old_c
.endm
-#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
- __ALTERNATIVE_CFG old_c
+#define _ALTERNATIVE_CFG(old_c, ...) \
+ ALTERNATIVE_CFG old_c
-#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
- CONFIG_k_1, \
- new_c_2, vendor_id_2, errata_id_2, \
- CONFIG_k_2) \
- __ALTERNATIVE_CFG old_c
+#define _ALTERNATIVE_CFG_2(old_c, ...) \
+ ALTERNATIVE_CFG old_c
#else /* !__ASSEMBLY__ */
-#define __ALTERNATIVE_CFG(old_c) \
+#define __ALTERNATIVE_CFG(old_c) \
old_c "\n"
-#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
+#define _ALTERNATIVE_CFG(old_c, ...) \
__ALTERNATIVE_CFG(old_c)
-#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
- CONFIG_k_1, \
- new_c_2, vendor_id_2, errata_id_2, \
- CONFIG_k_2) \
- __ALTERNATIVE_CFG(old_c)
+#define _ALTERNATIVE_CFG_2(old_c, ...) \
+ __ALTERNATIVE_CFG(old_c)
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_RISCV_ALTERNATIVE */
/*
* Usage:
- * ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k)
+ * ALTERNATIVE(old_content, new_content, vendor_id, patch_id, CONFIG_k)
* in the assembly code. Otherwise,
- * asm(ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k));
+ * asm(ALTERNATIVE(old_content, new_content, vendor_id, patch_id, CONFIG_k));
*
* old_content: The old content which is probably replaced with new content.
* new_content: The new content.
* vendor_id: The CPU vendor ID.
- * errata_id: The errata ID.
- * CONFIG_k: The Kconfig of this errata. When Kconfig is disabled, the old
+ * patch_id: The patch ID (erratum ID or cpufeature ID).
+ * CONFIG_k: The Kconfig of this patch ID. When Kconfig is disabled, the old
* content will alwyas be executed.
*/
-#define ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k) \
- _ALTERNATIVE_CFG(old_content, new_content, vendor_id, errata_id, CONFIG_k)
+#define ALTERNATIVE(old_content, new_content, vendor_id, patch_id, CONFIG_k) \
+ _ALTERNATIVE_CFG(old_content, new_content, vendor_id, patch_id, CONFIG_k)
/*
* A vendor wants to replace an old_content, but another vendor has used
@@ -193,13 +158,9 @@
* on the following sample code and then replace ALTERNATIVE() with
* ALTERNATIVE_2() to append its customized content.
*/
-#define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, \
- errata_id_1, CONFIG_k_1, \
- new_content_2, vendor_id_2, \
- errata_id_2, CONFIG_k_2) \
- _ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, \
- errata_id_1, CONFIG_k_1, \
- new_content_2, vendor_id_2, \
- errata_id_2, CONFIG_k_2)
+#define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, patch_id_1, CONFIG_k_1, \
+ new_content_2, vendor_id_2, patch_id_2, CONFIG_k_2) \
+ _ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, patch_id_1, CONFIG_k_1, \
+ new_content_2, vendor_id_2, patch_id_2, CONFIG_k_2)
#endif
diff --git a/arch/riscv/include/asm/alternative.h b/arch/riscv/include/asm/alternative.h
index 6511dd73e812..6a41537826a7 100644
--- a/arch/riscv/include/asm/alternative.h
+++ b/arch/riscv/include/asm/alternative.h
@@ -6,8 +6,6 @@
#ifndef __ASM_ALTERNATIVE_H
#define __ASM_ALTERNATIVE_H
-#define ERRATA_STRING_LENGTH_MAX 32
-
#include <asm/alternative-macros.h>
#ifndef __ASSEMBLY__
@@ -15,29 +13,37 @@
#ifdef CONFIG_RISCV_ALTERNATIVE
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <asm/hwcap.h>
+#define PATCH_ID_CPUFEATURE_ID(p) lower_16_bits(p)
+#define PATCH_ID_CPUFEATURE_VALUE(p) upper_16_bits(p)
+
#define RISCV_ALTERNATIVES_BOOT 0 /* alternatives applied during regular boot */
#define RISCV_ALTERNATIVES_MODULE 1 /* alternatives applied during module-init */
#define RISCV_ALTERNATIVES_EARLY_BOOT 2 /* alternatives applied before mmu start */
+/* add the relative offset to the address of the offset to get the absolute address */
+#define __ALT_PTR(a, f) ((void *)&(a)->f + (a)->f)
+#define ALT_OLD_PTR(a) __ALT_PTR(a, old_offset)
+#define ALT_ALT_PTR(a) __ALT_PTR(a, alt_offset)
+
+void probe_vendor_features(unsigned int cpu);
void __init apply_boot_alternatives(void);
void __init apply_early_boot_alternatives(void);
void apply_module_alternatives(void *start, size_t length);
+void riscv_alternative_fix_offsets(void *alt_ptr, unsigned int len,
+ int patch_offset);
+
struct alt_entry {
- void *old_ptr; /* address of original instruciton or data */
- void *alt_ptr; /* address of replacement instruction or data */
- unsigned long vendor_id; /* cpu vendor id */
- unsigned long alt_len; /* The replacement size */
- unsigned int errata_id; /* The errata id */
-} __packed;
-
-struct errata_checkfunc_id {
- unsigned long vendor_id;
- bool (*func)(struct alt_entry *alt);
+ s32 old_offset; /* offset relative to original instruction or data */
+ s32 alt_offset; /* offset relative to replacement instruction or data */
+ u16 vendor_id; /* CPU vendor ID */
+ u16 alt_len; /* The replacement size */
+ u32 patch_id; /* The patch ID (erratum ID or cpufeature ID) */
};
void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
@@ -47,11 +53,15 @@ void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
unsigned long archid, unsigned long impid,
unsigned int stage);
+void thead_feature_probe_func(unsigned int cpu, unsigned long archid,
+ unsigned long impid);
+
void riscv_cpufeature_patch_func(struct alt_entry *begin, struct alt_entry *end,
unsigned int stage);
#else /* CONFIG_RISCV_ALTERNATIVE */
+static inline void probe_vendor_features(unsigned int cpu) { }
static inline void apply_boot_alternatives(void) { }
static inline void apply_early_boot_alternatives(void) { }
static inline void apply_module_alternatives(void *start, size_t length) { }
diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h
index ef386fcf3939..61ba8ed43d8f 100644
--- a/arch/riscv/include/asm/asm-prototypes.h
+++ b/arch/riscv/include/asm/asm-prototypes.h
@@ -27,5 +27,7 @@ DECLARE_DO_ERROR_INFO(do_trap_break);
asmlinkage unsigned long get_overflow_stack(void);
asmlinkage void handle_bad_stack(struct pt_regs *regs);
+asmlinkage void do_page_fault(struct pt_regs *regs);
+asmlinkage void do_irq(struct pt_regs *regs);
#endif /* _ASM_RISCV_PROTOTYPES_H */
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
index 1b471ff73178..114bbadaef41 100644
--- a/arch/riscv/include/asm/asm.h
+++ b/arch/riscv/include/asm/asm.h
@@ -23,6 +23,7 @@
#define REG_L __REG_SEL(ld, lw)
#define REG_S __REG_SEL(sd, sw)
#define REG_SC __REG_SEL(sc.d, sc.w)
+#define REG_AMOSWAP_AQ __REG_SEL(amoswap.d.aq, amoswap.w.aq)
#define REG_ASM __REG_SEL(.dword, .word)
#define SZREG __REG_SEL(8, 4)
#define LGREG __REG_SEL(3, 2)
@@ -68,6 +69,7 @@
#endif
#ifdef __ASSEMBLY__
+#include <asm/asm-offsets.h>
/* Common assembly source macros */
@@ -80,6 +82,66 @@
.endr
.endm
+ /* save all GPs except x1 ~ x5 */
+ .macro save_from_x6_to_x31
+ REG_S x6, PT_T1(sp)
+ REG_S x7, PT_T2(sp)
+ REG_S x8, PT_S0(sp)
+ REG_S x9, PT_S1(sp)
+ REG_S x10, PT_A0(sp)
+ REG_S x11, PT_A1(sp)
+ REG_S x12, PT_A2(sp)
+ REG_S x13, PT_A3(sp)
+ REG_S x14, PT_A4(sp)
+ REG_S x15, PT_A5(sp)
+ REG_S x16, PT_A6(sp)
+ REG_S x17, PT_A7(sp)
+ REG_S x18, PT_S2(sp)
+ REG_S x19, PT_S3(sp)
+ REG_S x20, PT_S4(sp)
+ REG_S x21, PT_S5(sp)
+ REG_S x22, PT_S6(sp)
+ REG_S x23, PT_S7(sp)
+ REG_S x24, PT_S8(sp)
+ REG_S x25, PT_S9(sp)
+ REG_S x26, PT_S10(sp)
+ REG_S x27, PT_S11(sp)
+ REG_S x28, PT_T3(sp)
+ REG_S x29, PT_T4(sp)
+ REG_S x30, PT_T5(sp)
+ REG_S x31, PT_T6(sp)
+ .endm
+
+ /* restore all GPs except x1 ~ x5 */
+ .macro restore_from_x6_to_x31
+ REG_L x6, PT_T1(sp)
+ REG_L x7, PT_T2(sp)
+ REG_L x8, PT_S0(sp)
+ REG_L x9, PT_S1(sp)
+ REG_L x10, PT_A0(sp)
+ REG_L x11, PT_A1(sp)
+ REG_L x12, PT_A2(sp)
+ REG_L x13, PT_A3(sp)
+ REG_L x14, PT_A4(sp)
+ REG_L x15, PT_A5(sp)
+ REG_L x16, PT_A6(sp)
+ REG_L x17, PT_A7(sp)
+ REG_L x18, PT_S2(sp)
+ REG_L x19, PT_S3(sp)
+ REG_L x20, PT_S4(sp)
+ REG_L x21, PT_S5(sp)
+ REG_L x22, PT_S6(sp)
+ REG_L x23, PT_S7(sp)
+ REG_L x24, PT_S8(sp)
+ REG_L x25, PT_S9(sp)
+ REG_L x26, PT_S10(sp)
+ REG_L x27, PT_S11(sp)
+ REG_L x28, PT_T3(sp)
+ REG_L x29, PT_T4(sp)
+ REG_L x30, PT_T5(sp)
+ REG_L x31, PT_T6(sp)
+ .endm
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_ASM_H */
diff --git a/arch/riscv/include/asm/assembler.h b/arch/riscv/include/asm/assembler.h
new file mode 100644
index 000000000000..44b1457d3e95
--- /dev/null
+++ b/arch/riscv/include/asm/assembler.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023 StarFive Technology Co., Ltd.
+ *
+ * Author: Jee Heng Sia <jeeheng.sia@starfivetech.com>
+ */
+
+#ifndef __ASSEMBLY__
+#error "Only include this from assembly code"
+#endif
+
+#ifndef __ASM_ASSEMBLER_H
+#define __ASM_ASSEMBLER_H
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/csr.h>
+
+/*
+ * suspend_restore_csrs - restore CSRs
+ */
+ .macro suspend_restore_csrs
+ REG_L t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0)
+ csrw CSR_EPC, t0
+ REG_L t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0)
+ csrw CSR_STATUS, t0
+ REG_L t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0)
+ csrw CSR_TVAL, t0
+ REG_L t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0)
+ csrw CSR_CAUSE, t0
+ .endm
+
+/*
+ * suspend_restore_regs - Restore registers (except A0 and T0-T6)
+ */
+ .macro suspend_restore_regs
+ REG_L ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
+ REG_L sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
+ REG_L gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0)
+ REG_L tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0)
+ REG_L s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0)
+ REG_L s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0)
+ REG_L a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0)
+ REG_L a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0)
+ REG_L a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0)
+ REG_L a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0)
+ REG_L a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0)
+ REG_L a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0)
+ REG_L a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0)
+ REG_L s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0)
+ REG_L s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0)
+ REG_L s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0)
+ REG_L s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0)
+ REG_L s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0)
+ REG_L s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0)
+ REG_L s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0)
+ REG_L s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0)
+ REG_L s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0)
+ REG_L s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
+ .endm
+
+/*
+ * copy_page - copy 1 page (4KB) of data from source to destination
+ * @a0 - destination
+ * @a1 - source
+ */
+ .macro copy_page a0, a1
+ lui a2, 0x1
+ add a2, a2, a0
+1 :
+ REG_L t0, 0(a1)
+ REG_L t1, SZREG(a1)
+
+ REG_S t0, 0(a0)
+ REG_S t1, SZREG(a0)
+
+ addi a0, a0, 2 * SZREG
+ addi a1, a1, 2 * SZREG
+ bne a2, a0, 1b
+ .endm
+
+#endif /* __ASM_ASSEMBLER_H */
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index 0dfe9d857a76..bba472928b53 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -261,7 +261,7 @@ c_t arch_atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
static __always_inline \
c_t arch_atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
{ \
- return __xchg(&(v->counter), n, size); \
+ return __arch_xchg(&(v->counter), n, size); \
} \
static __always_inline \
c_t arch_atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index f6fbe7042f1c..8091b8bf4883 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -17,6 +17,13 @@ static inline void local_flush_icache_all(void)
static inline void flush_dcache_page(struct page *page)
{
+ /*
+ * HugeTLB pages are always fully mapped and only head page will be
+ * set PG_dcache_clean (see comments in flush_icache_pte()).
+ */
+ if (PageHuge(page))
+ page = compound_head(page);
+
if (test_bit(PG_dcache_clean, &page->flags))
clear_bit(PG_dcache_clean, &page->flags);
}
@@ -43,7 +50,8 @@ void flush_icache_mm(struct mm_struct *mm, bool local);
#endif /* CONFIG_SMP */
extern unsigned int riscv_cbom_block_size;
-void riscv_init_cbom_blocksize(void);
+extern unsigned int riscv_cboz_block_size;
+void riscv_init_cbo_blocksizes(void);
#ifdef CONFIG_RISCV_DMA_NONCOHERENT
void riscv_noncoherent_supported(void);
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 12debce235e5..2f4726d3cfcc 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -114,7 +114,7 @@
_x_, sizeof(*(ptr))); \
})
-#define __xchg(ptr, new, size) \
+#define __arch_xchg(ptr, new, size) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(new) __new = (new); \
@@ -143,7 +143,7 @@
#define arch_xchg(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg((ptr), _x_, sizeof(*(ptr))); \
+ (__typeof__(*(ptr))) __arch_xchg((ptr), _x_, sizeof(*(ptr))); \
})
#define xchg32(ptr, x) \
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
new file mode 100644
index 000000000000..808d5403f2ac
--- /dev/null
+++ b/arch/riscv/include/asm/cpufeature.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2022-2023 Rivos, Inc
+ */
+
+#ifndef _ASM_CPUFEATURE_H
+#define _ASM_CPUFEATURE_H
+
+/*
+ * These are probed via a device_initcall(), via either the SBI or directly
+ * from the corresponding CSRs.
+ */
+struct riscv_cpuinfo {
+ unsigned long mvendorid;
+ unsigned long marchid;
+ unsigned long mimpid;
+};
+
+DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
+
+DECLARE_PER_CPU(long, misaligned_access_speed);
+
+#endif
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 0e571f6483d9..b6acb7ed115f 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -7,7 +7,7 @@
#define _ASM_RISCV_CSR_H
#include <asm/asm.h>
-#include <linux/const.h>
+#include <linux/bits.h>
/* Status register flags */
#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */
@@ -40,7 +40,6 @@
#define SR_UXL _AC(0x300000000, UL) /* XLEN mask for U-mode */
#define SR_UXL_32 _AC(0x100000000, UL) /* XLEN = 32 for U-mode */
#define SR_UXL_64 _AC(0x200000000, UL) /* XLEN = 64 for U-mode */
-#define SR_UXL_SHIFT 32
#endif
/* SATP flags */
@@ -73,7 +72,10 @@
#define IRQ_S_EXT 9
#define IRQ_VS_EXT 10
#define IRQ_M_EXT 11
+#define IRQ_S_GEXT 12
#define IRQ_PMU_OVF 13
+#define IRQ_LOCAL_MAX (IRQ_PMU_OVF + 1)
+#define IRQ_LOCAL_MASK GENMASK((IRQ_LOCAL_MAX - 1), 0)
/* Exception causes */
#define EXC_INST_MISALIGNED 0
@@ -128,25 +130,25 @@
#define HGATP32_MODE_SHIFT 31
#define HGATP32_VMID_SHIFT 22
-#define HGATP32_VMID_MASK _AC(0x1FC00000, UL)
-#define HGATP32_PPN _AC(0x003FFFFF, UL)
+#define HGATP32_VMID GENMASK(28, 22)
+#define HGATP32_PPN GENMASK(21, 0)
#define HGATP64_MODE_SHIFT 60
#define HGATP64_VMID_SHIFT 44
-#define HGATP64_VMID_MASK _AC(0x03FFF00000000000, UL)
-#define HGATP64_PPN _AC(0x00000FFFFFFFFFFF, UL)
+#define HGATP64_VMID GENMASK(57, 44)
+#define HGATP64_PPN GENMASK(43, 0)
#define HGATP_PAGE_SHIFT 12
#ifdef CONFIG_64BIT
#define HGATP_PPN HGATP64_PPN
#define HGATP_VMID_SHIFT HGATP64_VMID_SHIFT
-#define HGATP_VMID_MASK HGATP64_VMID_MASK
+#define HGATP_VMID HGATP64_VMID
#define HGATP_MODE_SHIFT HGATP64_MODE_SHIFT
#else
#define HGATP_PPN HGATP32_PPN
#define HGATP_VMID_SHIFT HGATP32_VMID_SHIFT
-#define HGATP_VMID_MASK HGATP32_VMID_MASK
+#define HGATP_VMID HGATP32_VMID
#define HGATP_MODE_SHIFT HGATP32_MODE_SHIFT
#endif
@@ -156,6 +158,27 @@
(_AC(1, UL) << IRQ_S_TIMER) | \
(_AC(1, UL) << IRQ_S_EXT))
+/* AIA CSR bits */
+#define TOPI_IID_SHIFT 16
+#define TOPI_IID_MASK GENMASK(11, 0)
+#define TOPI_IPRIO_MASK GENMASK(7, 0)
+#define TOPI_IPRIO_BITS 8
+
+#define TOPEI_ID_SHIFT 16
+#define TOPEI_ID_MASK GENMASK(10, 0)
+#define TOPEI_PRIO_MASK GENMASK(10, 0)
+
+#define ISELECT_IPRIO0 0x30
+#define ISELECT_IPRIO15 0x3f
+#define ISELECT_MASK GENMASK(8, 0)
+
+#define HVICTL_VTI BIT(30)
+#define HVICTL_IID GENMASK(27, 16)
+#define HVICTL_IID_SHIFT 16
+#define HVICTL_DPR BIT(9)
+#define HVICTL_IPRIOM BIT(8)
+#define HVICTL_IPRIO GENMASK(7, 0)
+
/* xENVCFG flags */
#define ENVCFG_STCE (_AC(1, ULL) << 63)
#define ENVCFG_PBMTE (_AC(1, ULL) << 62)
@@ -250,6 +273,18 @@
#define CSR_STIMECMP 0x14D
#define CSR_STIMECMPH 0x15D
+/* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
+#define CSR_SISELECT 0x150
+#define CSR_SIREG 0x151
+
+/* Supervisor-Level Interrupts (AIA) */
+#define CSR_STOPEI 0x15c
+#define CSR_STOPI 0xdb0
+
+/* Supervisor-Level High-Half CSRs (AIA) */
+#define CSR_SIEH 0x114
+#define CSR_SIPH 0x154
+
#define CSR_VSSTATUS 0x200
#define CSR_VSIE 0x204
#define CSR_VSTVEC 0x205
@@ -279,8 +314,32 @@
#define CSR_HGATP 0x680
#define CSR_HGEIP 0xe12
+/* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
+#define CSR_HVIEN 0x608
+#define CSR_HVICTL 0x609
+#define CSR_HVIPRIO1 0x646
+#define CSR_HVIPRIO2 0x647
+
+/* VS-Level Window to Indirectly Accessed Registers (H-extension with AIA) */
+#define CSR_VSISELECT 0x250
+#define CSR_VSIREG 0x251
+
+/* VS-Level Interrupts (H-extension with AIA) */
+#define CSR_VSTOPEI 0x25c
+#define CSR_VSTOPI 0xeb0
+
+/* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
+#define CSR_HIDELEGH 0x613
+#define CSR_HVIENH 0x618
+#define CSR_HVIPH 0x655
+#define CSR_HVIPRIO1H 0x656
+#define CSR_HVIPRIO2H 0x657
+#define CSR_VSIEH 0x214
+#define CSR_VSIPH 0x254
+
#define CSR_MSTATUS 0x300
#define CSR_MISA 0x301
+#define CSR_MIDELEG 0x303
#define CSR_MIE 0x304
#define CSR_MTVEC 0x305
#define CSR_MENVCFG 0x30a
@@ -297,6 +356,25 @@
#define CSR_MIMPID 0xf13
#define CSR_MHARTID 0xf14
+/* Machine-Level Window to Indirectly Accessed Registers (AIA) */
+#define CSR_MISELECT 0x350
+#define CSR_MIREG 0x351
+
+/* Machine-Level Interrupts (AIA) */
+#define CSR_MTOPEI 0x35c
+#define CSR_MTOPI 0xfb0
+
+/* Virtual Interrupts for Supervisor Level (AIA) */
+#define CSR_MVIEN 0x308
+#define CSR_MVIP 0x309
+
+/* Machine-Level High-Half CSRs (AIA) */
+#define CSR_MIDELEGH 0x313
+#define CSR_MIEH 0x314
+#define CSR_MVIENH 0x318
+#define CSR_MVIPH 0x319
+#define CSR_MIPH 0x354
+
#ifdef CONFIG_RISCV_M_MODE
# define CSR_STATUS CSR_MSTATUS
# define CSR_IE CSR_MIE
@@ -307,6 +385,13 @@
# define CSR_TVAL CSR_MTVAL
# define CSR_IP CSR_MIP
+# define CSR_IEH CSR_MIEH
+# define CSR_ISELECT CSR_MISELECT
+# define CSR_IREG CSR_MIREG
+# define CSR_IPH CSR_MIPH
+# define CSR_TOPEI CSR_MTOPEI
+# define CSR_TOPI CSR_MTOPI
+
# define SR_IE SR_MIE
# define SR_PIE SR_MPIE
# define SR_PP SR_MPP
@@ -324,6 +409,13 @@
# define CSR_TVAL CSR_STVAL
# define CSR_IP CSR_SIP
+# define CSR_IEH CSR_SIEH
+# define CSR_ISELECT CSR_SISELECT
+# define CSR_IREG CSR_SIREG
+# define CSR_IPH CSR_SIPH
+# define CSR_TOPEI CSR_STOPEI
+# define CSR_TOPI CSR_STOPI
+
# define SR_IE SR_SIE
# define SR_PIE SR_SPIE
# define SR_PP SR_SPP
diff --git a/arch/riscv/include/asm/efi.h b/arch/riscv/include/asm/efi.h
index f74879a8f1ea..29e9a0d84b16 100644
--- a/arch/riscv/include/asm/efi.h
+++ b/arch/riscv/include/asm/efi.h
@@ -10,6 +10,7 @@
#include <asm/mmu_context.h>
#include <asm/ptrace.h>
#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
#ifdef CONFIG_EFI
extern void efi_init(void);
@@ -18,9 +19,12 @@ extern void efi_init(void);
#endif
int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
-int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
+int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md, bool);
-#define arch_efi_call_virt_setup() efi_virtmap_load()
+#define arch_efi_call_virt_setup() ({ \
+ sync_kernel_mappings(efi_mm.pgd); \
+ efi_virtmap_load(); \
+ })
#define arch_efi_call_virt_teardown() efi_virtmap_unload()
#define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE)
@@ -31,13 +35,20 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
return ULONG_MAX;
}
-#define alloc_screen_info(x...) (&screen_info)
-
-static inline void free_screen_info(struct screen_info *si)
+static inline unsigned long efi_get_kimg_min_align(void)
{
+ /*
+ * RISC-V requires the kernel image to placed 2 MB aligned base for 64
+ * bit and 4MB for 32 bit.
+ */
+ return IS_ENABLED(CONFIG_64BIT) ? SZ_2M : SZ_4M;
}
+#define EFI_KIMG_PREFERRED_ADDRESS efi_get_kimg_min_align()
+
void efi_virtmap_load(void);
void efi_virtmap_unload(void);
+unsigned long stext_offset(void);
+
#endif /* _ASM_EFI_H */
diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h
index e7acffdf21d2..30e7d2455960 100644
--- a/arch/riscv/include/asm/elf.h
+++ b/arch/riscv/include/asm/elf.h
@@ -14,6 +14,7 @@
#include <asm/auxvec.h>
#include <asm/byteorder.h>
#include <asm/cacheinfo.h>
+#include <asm/hwcap.h>
/*
* These are used to set parameters in the core dumps.
@@ -59,12 +60,13 @@ extern bool compat_elf_check_arch(Elf32_Ehdr *hdr);
#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
#endif
#endif
+
/*
- * This yields a mask that user programs can use to figure out what
- * instruction set this CPU supports. This could be done in user space,
- * but it's not easy, and we've already done it here.
+ * Provides information on the availiable set of ISA extensions to userspace,
+ * via a bitmap that coorespends to each single-letter ISA extension. This is
+ * essentially defunct, but will remain for compatibility with userspace.
*/
-#define ELF_HWCAP (elf_hwcap)
+#define ELF_HWCAP (elf_hwcap & ((1UL << RISCV_ISA_EXT_BASE) - 1))
extern unsigned long elf_hwcap;
/*
diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h
new file mode 100644
index 000000000000..6e4dee49d84b
--- /dev/null
+++ b/arch/riscv/include/asm/entry-common.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_ENTRY_COMMON_H
+#define _ASM_RISCV_ENTRY_COMMON_H
+
+#include <asm/stacktrace.h>
+
+void handle_page_fault(struct pt_regs *regs);
+void handle_break(struct pt_regs *regs);
+
+#endif /* _ASM_RISCV_ENTRY_COMMON_H */
diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
index 19a771085781..fb1a810f3d8c 100644
--- a/arch/riscv/include/asm/errata_list.h
+++ b/arch/riscv/include/asm/errata_list.h
@@ -6,6 +6,9 @@
#define ASM_ERRATA_LIST_H
#include <asm/alternative.h>
+#include <asm/csr.h>
+#include <asm/insn-def.h>
+#include <asm/hwcap.h>
#include <asm/vendorid_list.h>
#ifdef CONFIG_ERRATA_SIFIVE
@@ -17,13 +20,10 @@
#ifdef CONFIG_ERRATA_THEAD
#define ERRATA_THEAD_PBMT 0
#define ERRATA_THEAD_CMO 1
-#define ERRATA_THEAD_NUMBER 2
+#define ERRATA_THEAD_PMU 2
+#define ERRATA_THEAD_NUMBER 3
#endif
-#define CPUFEATURE_SVPBMT 0
-#define CPUFEATURE_ZICBOM 1
-#define CPUFEATURE_NUMBER 2
-
#ifdef __ASSEMBLY__
#define ALT_INSN_FAULT(x) \
@@ -53,7 +53,7 @@ asm(ALTERNATIVE("sfence.vma %0", "sfence.vma", SIFIVE_VENDOR_ID, \
#define ALT_SVPBMT(_val, prot) \
asm(ALTERNATIVE_2("li %0, 0\t\nnop", \
"li %0, %1\t\nslli %0,%0,%3", 0, \
- CPUFEATURE_SVPBMT, CONFIG_RISCV_ISA_SVPBMT, \
+ RISCV_ISA_EXT_SVPBMT, CONFIG_RISCV_ISA_SVPBMT, \
"li %0, %2\t\nslli %0,%0,%4", THEAD_VENDOR_ID, \
ERRATA_THEAD_PBMT, CONFIG_ERRATA_THEAD_PBMT) \
: "=r"(_val) \
@@ -123,11 +123,11 @@ asm volatile(ALTERNATIVE_2( \
"mv a0, %1\n\t" \
"j 2f\n\t" \
"3:\n\t" \
- "cbo." __stringify(_op) " (a0)\n\t" \
+ CBO_##_op(a0) \
"add a0, a0, %0\n\t" \
"2:\n\t" \
"bltu a0, %2, 3b\n\t" \
- "nop", 0, CPUFEATURE_ZICBOM, CONFIG_RISCV_ISA_ZICBOM, \
+ "nop", 0, RISCV_ISA_EXT_ZICBOM, CONFIG_RISCV_ISA_ZICBOM, \
"mv a0, %1\n\t" \
"j 2f\n\t" \
"3:\n\t" \
@@ -142,6 +142,18 @@ asm volatile(ALTERNATIVE_2( \
"r"((unsigned long)(_start) + (_size)) \
: "a0")
+#define THEAD_C9XX_RV_IRQ_PMU 17
+#define THEAD_C9XX_CSR_SCOUNTEROF 0x5c5
+
+#define ALT_SBI_PMU_OVERFLOW(__ovl) \
+asm volatile(ALTERNATIVE( \
+ "csrr %0, " __stringify(CSR_SSCOUNTOVF), \
+ "csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \
+ THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \
+ CONFIG_ERRATA_THEAD_PMU) \
+ : "=r" (__ovl) : \
+ : "memory")
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h
index 5c3e7b97fcc6..0a55099bb734 100644
--- a/arch/riscv/include/asm/fixmap.h
+++ b/arch/riscv/include/asm/fixmap.h
@@ -22,6 +22,14 @@
*/
enum fixed_addresses {
FIX_HOLE,
+ /*
+ * The fdt fixmap mapping must be PMD aligned and will be mapped
+ * using PMD entries in fixmap_pmd in 64-bit and a PGD entry in 32-bit.
+ */
+ FIX_FDT_END,
+ FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1,
+
+ /* Below fixmaps will be mapped using fixmap_pte */
FIX_PTE,
FIX_PMD,
FIX_PUD,
diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
index 04dad3380041..d47d87c2d7e3 100644
--- a/arch/riscv/include/asm/ftrace.h
+++ b/arch/riscv/include/asm/ftrace.h
@@ -42,6 +42,14 @@ struct dyn_arch_ftrace {
* 2) jalr: setting low-12 offset to ra, jump to ra, and set ra to
* return address (original pc + 4)
*
+ *<ftrace enable>:
+ * 0: auipc t0/ra, 0x?
+ * 4: jalr t0/ra, ?(t0/ra)
+ *
+ *<ftrace disable>:
+ * 0: nop
+ * 4: nop
+ *
* Dynamic ftrace generates probes to call sites, so we must deal with
* both auipc and jalr at the same time.
*/
@@ -52,25 +60,43 @@ struct dyn_arch_ftrace {
#define AUIPC_OFFSET_MASK (0xfffff000)
#define AUIPC_PAD (0x00001000)
#define JALR_SHIFT 20
-#define JALR_BASIC (0x000080e7)
-#define AUIPC_BASIC (0x00000097)
+#define JALR_RA (0x000080e7)
+#define AUIPC_RA (0x00000097)
+#define JALR_T0 (0x000282e7)
+#define AUIPC_T0 (0x00000297)
#define NOP4 (0x00000013)
-#define make_call(caller, callee, call) \
+#define to_jalr_t0(offset) \
+ (((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_T0)
+
+#define to_auipc_t0(offset) \
+ ((offset & JALR_SIGN_MASK) ? \
+ (((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_T0) : \
+ ((offset & AUIPC_OFFSET_MASK) | AUIPC_T0))
+
+#define make_call_t0(caller, callee, call) \
do { \
- call[0] = to_auipc_insn((unsigned int)((unsigned long)callee - \
- (unsigned long)caller)); \
- call[1] = to_jalr_insn((unsigned int)((unsigned long)callee - \
- (unsigned long)caller)); \
+ unsigned int offset = \
+ (unsigned long) callee - (unsigned long) caller; \
+ call[0] = to_auipc_t0(offset); \
+ call[1] = to_jalr_t0(offset); \
} while (0)
-#define to_jalr_insn(offset) \
- (((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_BASIC)
+#define to_jalr_ra(offset) \
+ (((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_RA)
-#define to_auipc_insn(offset) \
+#define to_auipc_ra(offset) \
((offset & JALR_SIGN_MASK) ? \
- (((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_BASIC) : \
- ((offset & AUIPC_OFFSET_MASK) | AUIPC_BASIC))
+ (((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_RA) : \
+ ((offset & AUIPC_OFFSET_MASK) | AUIPC_RA))
+
+#define make_call_ra(caller, callee, call) \
+do { \
+ unsigned int offset = \
+ (unsigned long) callee - (unsigned long) caller; \
+ call[0] = to_auipc_ra(offset); \
+ call[1] = to_jalr_ra(offset); \
+} while (0)
/*
* Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
@@ -83,6 +109,6 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
#define ftrace_init_nop ftrace_init_nop
#endif
-#endif
+#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* _ASM_RISCV_FTRACE_H */
diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h
index a5c2ca1d1cd8..ce1ebda1a49a 100644
--- a/arch/riscv/include/asm/hugetlb.h
+++ b/arch/riscv/include/asm/hugetlb.h
@@ -2,7 +2,48 @@
#ifndef _ASM_RISCV_HUGETLB_H
#define _ASM_RISCV_HUGETLB_H
-#include <asm-generic/hugetlb.h>
#include <asm/page.h>
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+ clear_bit(PG_dcache_clean, &page->flags);
+}
+#define arch_clear_hugepage_flags arch_clear_hugepage_flags
+
+#ifdef CONFIG_RISCV_ISA_SVNAPOT
+#define __HAVE_ARCH_HUGE_PTE_CLEAR
+void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long sz);
+
+#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
+void set_huge_pte_at(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep, pte_t pte);
+
+#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep);
+
+#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
+pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep);
+
+#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
+void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep);
+
+#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
+int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty);
+
+#define __HAVE_ARCH_HUGE_PTEP_GET
+pte_t huge_ptep_get(pte_t *ptep);
+
+pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
+#define arch_make_huge_pte arch_make_huge_pte
+
+#endif /*CONFIG_RISCV_ISA_SVNAPOT*/
+
+#include <asm-generic/hugetlb.h>
+
#endif /* _ASM_RISCV_HUGETLB_H */
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index b22525290073..e0c40a4c63d5 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -8,24 +8,11 @@
#ifndef _ASM_RISCV_HWCAP_H
#define _ASM_RISCV_HWCAP_H
+#include <asm/alternative-macros.h>
#include <asm/errno.h>
#include <linux/bits.h>
#include <uapi/asm/hwcap.h>
-#ifndef __ASSEMBLY__
-#include <linux/jump_label.h>
-/*
- * This yields a mask that user programs can use to figure out what
- * instruction set this cpu supports.
- */
-#define ELF_HWCAP (elf_hwcap)
-
-enum {
- CAP_HWCAP = 1,
-};
-
-extern unsigned long elf_hwcap;
-
#define RISCV_ISA_EXT_a ('a' - 'a')
#define RISCV_ISA_EXT_c ('c' - 'a')
#define RISCV_ISA_EXT_d ('d' - 'a')
@@ -37,42 +24,41 @@ extern unsigned long elf_hwcap;
#define RISCV_ISA_EXT_u ('u' - 'a')
/*
- * Increse this to higher value as kernel support more ISA extensions.
+ * These macros represent the logical IDs of each multi-letter RISC-V ISA
+ * extension and are used in the ISA bitmap. The logical IDs start from
+ * RISCV_ISA_EXT_BASE, which allows the 0-25 range to be reserved for single
+ * letter extensions. The maximum, RISCV_ISA_EXT_MAX, is defined in order
+ * to allocate the bitmap and may be increased when necessary.
+ *
+ * New extensions should just be added to the bottom, rather than added
+ * alphabetically, in order to avoid unnecessary shuffling.
*/
-#define RISCV_ISA_EXT_MAX 64
-#define RISCV_ISA_EXT_NAME_LEN_MAX 32
-
-/* The base ID for multi-letter ISA extensions */
-#define RISCV_ISA_EXT_BASE 26
+#define RISCV_ISA_EXT_BASE 26
+
+#define RISCV_ISA_EXT_SSCOFPMF 26
+#define RISCV_ISA_EXT_SSTC 27
+#define RISCV_ISA_EXT_SVINVAL 28
+#define RISCV_ISA_EXT_SVPBMT 29
+#define RISCV_ISA_EXT_ZBB 30
+#define RISCV_ISA_EXT_ZICBOM 31
+#define RISCV_ISA_EXT_ZIHINTPAUSE 32
+#define RISCV_ISA_EXT_SVNAPOT 33
+#define RISCV_ISA_EXT_ZICBOZ 34
+#define RISCV_ISA_EXT_SMAIA 35
+#define RISCV_ISA_EXT_SSAIA 36
+
+#define RISCV_ISA_EXT_MAX 64
+#define RISCV_ISA_EXT_NAME_LEN_MAX 32
+
+#ifdef CONFIG_RISCV_M_MODE
+#define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SMAIA
+#else
+#define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SSAIA
+#endif
-/*
- * This enum represent the logical ID for each multi-letter RISC-V ISA extension.
- * The logical ID should start from RISCV_ISA_EXT_BASE and must not exceed
- * RISCV_ISA_EXT_MAX. 0-25 range is reserved for single letter
- * extensions while all the multi-letter extensions should define the next
- * available logical extension id.
- */
-enum riscv_isa_ext_id {
- RISCV_ISA_EXT_SSCOFPMF = RISCV_ISA_EXT_BASE,
- RISCV_ISA_EXT_SVPBMT,
- RISCV_ISA_EXT_ZICBOM,
- RISCV_ISA_EXT_ZIHINTPAUSE,
- RISCV_ISA_EXT_SSTC,
- RISCV_ISA_EXT_SVINVAL,
- RISCV_ISA_EXT_ID_MAX = RISCV_ISA_EXT_MAX,
-};
+#ifndef __ASSEMBLY__
-/*
- * This enum represents the logical ID for each RISC-V ISA extension static
- * keys. We can use static key to optimize code path if some ISA extensions
- * are available.
- */
-enum riscv_isa_ext_key {
- RISCV_ISA_EXT_KEY_FPU, /* For 'F' and 'D' */
- RISCV_ISA_EXT_KEY_ZIHINTPAUSE,
- RISCV_ISA_EXT_KEY_SVINVAL,
- RISCV_ISA_EXT_KEY_MAX,
-};
+#include <linux/jump_label.h>
struct riscv_isa_ext_data {
/* Name of the extension displayed to userspace via /proc/cpuinfo */
@@ -81,24 +67,6 @@ struct riscv_isa_ext_data {
unsigned int isa_ext_id;
};
-extern struct static_key_false riscv_isa_ext_keys[RISCV_ISA_EXT_KEY_MAX];
-
-static __always_inline int riscv_isa_ext2key(int num)
-{
- switch (num) {
- case RISCV_ISA_EXT_f:
- return RISCV_ISA_EXT_KEY_FPU;
- case RISCV_ISA_EXT_d:
- return RISCV_ISA_EXT_KEY_FPU;
- case RISCV_ISA_EXT_ZIHINTPAUSE:
- return RISCV_ISA_EXT_KEY_ZIHINTPAUSE;
- case RISCV_ISA_EXT_SVINVAL:
- return RISCV_ISA_EXT_KEY_SVINVAL;
- default:
- return -EINVAL;
- }
-}
-
unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
#define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext)
@@ -107,6 +75,52 @@ bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit);
#define riscv_isa_extension_available(isa_bitmap, ext) \
__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext)
+static __always_inline bool
+riscv_has_extension_likely(const unsigned long ext)
+{
+ compiletime_assert(ext < RISCV_ISA_EXT_MAX,
+ "ext must be < RISCV_ISA_EXT_MAX");
+
+ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+ asm_volatile_goto(
+ ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1)
+ :
+ : [ext] "i" (ext)
+ :
+ : l_no);
+ } else {
+ if (!__riscv_isa_extension_available(NULL, ext))
+ goto l_no;
+ }
+
+ return true;
+l_no:
+ return false;
+}
+
+static __always_inline bool
+riscv_has_extension_unlikely(const unsigned long ext)
+{
+ compiletime_assert(ext < RISCV_ISA_EXT_MAX,
+ "ext must be < RISCV_ISA_EXT_MAX");
+
+ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+ asm_volatile_goto(
+ ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1)
+ :
+ : [ext] "i" (ext)
+ :
+ : l_yes);
+ } else {
+ if (__riscv_isa_extension_available(NULL, ext))
+ goto l_yes;
+ }
+
+ return false;
+l_yes:
+ return true;
+}
+
#endif
#endif /* _ASM_RISCV_HWCAP_H */
diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h
new file mode 100644
index 000000000000..78936f4ff513
--- /dev/null
+++ b/arch/riscv/include/asm/hwprobe.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright 2023 Rivos, Inc
+ */
+
+#ifndef _ASM_HWPROBE_H
+#define _ASM_HWPROBE_H
+
+#include <uapi/asm/hwprobe.h>
+
+#define RISCV_HWPROBE_MAX_KEY 5
+
+#endif
diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h
index 16044affa57c..6960beb75f32 100644
--- a/arch/riscv/include/asm/insn-def.h
+++ b/arch/riscv/include/asm/insn-def.h
@@ -12,6 +12,12 @@
#define INSN_R_RD_SHIFT 7
#define INSN_R_OPCODE_SHIFT 0
+#define INSN_I_SIMM12_SHIFT 20
+#define INSN_I_RS1_SHIFT 15
+#define INSN_I_FUNC3_SHIFT 12
+#define INSN_I_RD_SHIFT 7
+#define INSN_I_OPCODE_SHIFT 0
+
#ifdef __ASSEMBLY__
#ifdef CONFIG_AS_HAS_INSN
@@ -20,6 +26,10 @@
.insn r \opcode, \func3, \func7, \rd, \rs1, \rs2
.endm
+ .macro insn_i, opcode, func3, rd, rs1, simm12
+ .insn i \opcode, \func3, \rd, \rs1, \simm12
+ .endm
+
#else
#include <asm/gpr-num.h>
@@ -33,9 +43,18 @@
(.L__gpr_num_\rs2 << INSN_R_RS2_SHIFT))
.endm
+ .macro insn_i, opcode, func3, rd, rs1, simm12
+ .4byte ((\opcode << INSN_I_OPCODE_SHIFT) | \
+ (\func3 << INSN_I_FUNC3_SHIFT) | \
+ (.L__gpr_num_\rd << INSN_I_RD_SHIFT) | \
+ (.L__gpr_num_\rs1 << INSN_I_RS1_SHIFT) | \
+ (\simm12 << INSN_I_SIMM12_SHIFT))
+ .endm
+
#endif
#define __INSN_R(...) insn_r __VA_ARGS__
+#define __INSN_I(...) insn_i __VA_ARGS__
#else /* ! __ASSEMBLY__ */
@@ -44,6 +63,9 @@
#define __INSN_R(opcode, func3, func7, rd, rs1, rs2) \
".insn r " opcode ", " func3 ", " func7 ", " rd ", " rs1 ", " rs2 "\n"
+#define __INSN_I(opcode, func3, rd, rs1, simm12) \
+ ".insn i " opcode ", " func3 ", " rd ", " rs1 ", " simm12 "\n"
+
#else
#include <linux/stringify.h>
@@ -60,14 +82,32 @@
" (.L__gpr_num_\\rs2 << " __stringify(INSN_R_RS2_SHIFT) "))\n" \
" .endm\n"
+#define DEFINE_INSN_I \
+ __DEFINE_ASM_GPR_NUMS \
+" .macro insn_i, opcode, func3, rd, rs1, simm12\n" \
+" .4byte ((\\opcode << " __stringify(INSN_I_OPCODE_SHIFT) ") |" \
+" (\\func3 << " __stringify(INSN_I_FUNC3_SHIFT) ") |" \
+" (.L__gpr_num_\\rd << " __stringify(INSN_I_RD_SHIFT) ") |" \
+" (.L__gpr_num_\\rs1 << " __stringify(INSN_I_RS1_SHIFT) ") |" \
+" (\\simm12 << " __stringify(INSN_I_SIMM12_SHIFT) "))\n" \
+" .endm\n"
+
#define UNDEFINE_INSN_R \
" .purgem insn_r\n"
+#define UNDEFINE_INSN_I \
+" .purgem insn_i\n"
+
#define __INSN_R(opcode, func3, func7, rd, rs1, rs2) \
DEFINE_INSN_R \
"insn_r " opcode ", " func3 ", " func7 ", " rd ", " rs1 ", " rs2 "\n" \
UNDEFINE_INSN_R
+#define __INSN_I(opcode, func3, rd, rs1, simm12) \
+ DEFINE_INSN_I \
+ "insn_i " opcode ", " func3 ", " rd ", " rs1 ", " simm12 "\n" \
+ UNDEFINE_INSN_I
+
#endif
#endif /* ! __ASSEMBLY__ */
@@ -76,9 +116,14 @@
__INSN_R(RV_##opcode, RV_##func3, RV_##func7, \
RV_##rd, RV_##rs1, RV_##rs2)
+#define INSN_I(opcode, func3, rd, rs1, simm12) \
+ __INSN_I(RV_##opcode, RV_##func3, RV_##rd, \
+ RV_##rs1, RV_##simm12)
+
#define RV_OPCODE(v) __ASM_STR(v)
#define RV_FUNC3(v) __ASM_STR(v)
#define RV_FUNC7(v) __ASM_STR(v)
+#define RV_SIMM12(v) __ASM_STR(v)
#define RV_RD(v) __ASM_STR(v)
#define RV_RS1(v) __ASM_STR(v)
#define RV_RS2(v) __ASM_STR(v)
@@ -87,6 +132,7 @@
#define RV___RS1(v) __RV_REG(v)
#define RV___RS2(v) __RV_REG(v)
+#define RV_OPCODE_MISC_MEM RV_OPCODE(15)
#define RV_OPCODE_SYSTEM RV_OPCODE(115)
#define HFENCE_VVMA(vaddr, asid) \
@@ -134,4 +180,20 @@
INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(51), \
__RD(0), RS1(gaddr), RS2(vmid))
+#define CBO_inval(base) \
+ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \
+ RS1(base), SIMM12(0))
+
+#define CBO_clean(base) \
+ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \
+ RS1(base), SIMM12(1))
+
+#define CBO_flush(base) \
+ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \
+ RS1(base), SIMM12(2))
+
+#define CBO_zero(base) \
+ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \
+ RS1(base), SIMM12(4))
+
#endif /* __ASM_INSN_DEF_H */
diff --git a/arch/riscv/include/asm/insn.h b/arch/riscv/include/asm/insn.h
new file mode 100644
index 000000000000..8d5c84f2d5ef
--- /dev/null
+++ b/arch/riscv/include/asm/insn.h
@@ -0,0 +1,381 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 SiFive
+ */
+
+#ifndef _ASM_RISCV_INSN_H
+#define _ASM_RISCV_INSN_H
+
+#include <linux/bits.h>
+
+#define RV_INSN_FUNCT3_MASK GENMASK(14, 12)
+#define RV_INSN_FUNCT3_OPOFF 12
+#define RV_INSN_OPCODE_MASK GENMASK(6, 0)
+#define RV_INSN_OPCODE_OPOFF 0
+#define RV_INSN_FUNCT12_OPOFF 20
+
+#define RV_ENCODE_FUNCT3(f_) (RVG_FUNCT3_##f_ << RV_INSN_FUNCT3_OPOFF)
+#define RV_ENCODE_FUNCT12(f_) (RVG_FUNCT12_##f_ << RV_INSN_FUNCT12_OPOFF)
+
+/* The bit field of immediate value in I-type instruction */
+#define RV_I_IMM_SIGN_OPOFF 31
+#define RV_I_IMM_11_0_OPOFF 20
+#define RV_I_IMM_SIGN_OFF 12
+#define RV_I_IMM_11_0_OFF 0
+#define RV_I_IMM_11_0_MASK GENMASK(11, 0)
+
+/* The bit field of immediate value in J-type instruction */
+#define RV_J_IMM_SIGN_OPOFF 31
+#define RV_J_IMM_10_1_OPOFF 21
+#define RV_J_IMM_11_OPOFF 20
+#define RV_J_IMM_19_12_OPOFF 12
+#define RV_J_IMM_SIGN_OFF 20
+#define RV_J_IMM_10_1_OFF 1
+#define RV_J_IMM_11_OFF 11
+#define RV_J_IMM_19_12_OFF 12
+#define RV_J_IMM_10_1_MASK GENMASK(9, 0)
+#define RV_J_IMM_11_MASK GENMASK(0, 0)
+#define RV_J_IMM_19_12_MASK GENMASK(7, 0)
+
+/*
+ * U-type IMMs contain the upper 20bits [31:20] of an immediate with
+ * the rest filled in by zeros, so no shifting required. Similarly,
+ * bit31 contains the signed state, so no sign extension necessary.
+ */
+#define RV_U_IMM_SIGN_OPOFF 31
+#define RV_U_IMM_31_12_OPOFF 0
+#define RV_U_IMM_31_12_MASK GENMASK(31, 12)
+
+/* The bit field of immediate value in B-type instruction */
+#define RV_B_IMM_SIGN_OPOFF 31
+#define RV_B_IMM_10_5_OPOFF 25
+#define RV_B_IMM_4_1_OPOFF 8
+#define RV_B_IMM_11_OPOFF 7
+#define RV_B_IMM_SIGN_OFF 12
+#define RV_B_IMM_10_5_OFF 5
+#define RV_B_IMM_4_1_OFF 1
+#define RV_B_IMM_11_OFF 11
+#define RV_B_IMM_10_5_MASK GENMASK(5, 0)
+#define RV_B_IMM_4_1_MASK GENMASK(3, 0)
+#define RV_B_IMM_11_MASK GENMASK(0, 0)
+
+/* The register offset in RVG instruction */
+#define RVG_RS1_OPOFF 15
+#define RVG_RS2_OPOFF 20
+#define RVG_RD_OPOFF 7
+#define RVG_RD_MASK GENMASK(4, 0)
+
+/* The bit field of immediate value in RVC J instruction */
+#define RVC_J_IMM_SIGN_OPOFF 12
+#define RVC_J_IMM_4_OPOFF 11
+#define RVC_J_IMM_9_8_OPOFF 9
+#define RVC_J_IMM_10_OPOFF 8
+#define RVC_J_IMM_6_OPOFF 7
+#define RVC_J_IMM_7_OPOFF 6
+#define RVC_J_IMM_3_1_OPOFF 3
+#define RVC_J_IMM_5_OPOFF 2
+#define RVC_J_IMM_SIGN_OFF 11
+#define RVC_J_IMM_4_OFF 4
+#define RVC_J_IMM_9_8_OFF 8
+#define RVC_J_IMM_10_OFF 10
+#define RVC_J_IMM_6_OFF 6
+#define RVC_J_IMM_7_OFF 7
+#define RVC_J_IMM_3_1_OFF 1
+#define RVC_J_IMM_5_OFF 5
+#define RVC_J_IMM_4_MASK GENMASK(0, 0)
+#define RVC_J_IMM_9_8_MASK GENMASK(1, 0)
+#define RVC_J_IMM_10_MASK GENMASK(0, 0)
+#define RVC_J_IMM_6_MASK GENMASK(0, 0)
+#define RVC_J_IMM_7_MASK GENMASK(0, 0)
+#define RVC_J_IMM_3_1_MASK GENMASK(2, 0)
+#define RVC_J_IMM_5_MASK GENMASK(0, 0)
+
+/* The bit field of immediate value in RVC B instruction */
+#define RVC_B_IMM_SIGN_OPOFF 12
+#define RVC_B_IMM_4_3_OPOFF 10
+#define RVC_B_IMM_7_6_OPOFF 5
+#define RVC_B_IMM_2_1_OPOFF 3
+#define RVC_B_IMM_5_OPOFF 2
+#define RVC_B_IMM_SIGN_OFF 8
+#define RVC_B_IMM_4_3_OFF 3
+#define RVC_B_IMM_7_6_OFF 6
+#define RVC_B_IMM_2_1_OFF 1
+#define RVC_B_IMM_5_OFF 5
+#define RVC_B_IMM_4_3_MASK GENMASK(1, 0)
+#define RVC_B_IMM_7_6_MASK GENMASK(1, 0)
+#define RVC_B_IMM_2_1_MASK GENMASK(1, 0)
+#define RVC_B_IMM_5_MASK GENMASK(0, 0)
+
+#define RVC_INSN_FUNCT4_MASK GENMASK(15, 12)
+#define RVC_INSN_FUNCT4_OPOFF 12
+#define RVC_INSN_FUNCT3_MASK GENMASK(15, 13)
+#define RVC_INSN_FUNCT3_OPOFF 13
+#define RVC_INSN_J_RS2_MASK GENMASK(6, 2)
+#define RVC_INSN_OPCODE_MASK GENMASK(1, 0)
+#define RVC_ENCODE_FUNCT3(f_) (RVC_FUNCT3_##f_ << RVC_INSN_FUNCT3_OPOFF)
+#define RVC_ENCODE_FUNCT4(f_) (RVC_FUNCT4_##f_ << RVC_INSN_FUNCT4_OPOFF)
+
+/* The register offset in RVC op=C0 instruction */
+#define RVC_C0_RS1_OPOFF 7
+#define RVC_C0_RS2_OPOFF 2
+#define RVC_C0_RD_OPOFF 2
+
+/* The register offset in RVC op=C1 instruction */
+#define RVC_C1_RS1_OPOFF 7
+#define RVC_C1_RS2_OPOFF 2
+#define RVC_C1_RD_OPOFF 7
+
+/* The register offset in RVC op=C2 instruction */
+#define RVC_C2_RS1_OPOFF 7
+#define RVC_C2_RS2_OPOFF 2
+#define RVC_C2_RD_OPOFF 7
+
+/* parts of opcode for RVG*/
+#define RVG_OPCODE_FENCE 0x0f
+#define RVG_OPCODE_AUIPC 0x17
+#define RVG_OPCODE_BRANCH 0x63
+#define RVG_OPCODE_JALR 0x67
+#define RVG_OPCODE_JAL 0x6f
+#define RVG_OPCODE_SYSTEM 0x73
+
+/* parts of opcode for RVC*/
+#define RVC_OPCODE_C0 0x0
+#define RVC_OPCODE_C1 0x1
+#define RVC_OPCODE_C2 0x2
+
+/* parts of funct3 code for I, M, A extension*/
+#define RVG_FUNCT3_JALR 0x0
+#define RVG_FUNCT3_BEQ 0x0
+#define RVG_FUNCT3_BNE 0x1
+#define RVG_FUNCT3_BLT 0x4
+#define RVG_FUNCT3_BGE 0x5
+#define RVG_FUNCT3_BLTU 0x6
+#define RVG_FUNCT3_BGEU 0x7
+
+/* parts of funct3 code for C extension*/
+#define RVC_FUNCT3_C_BEQZ 0x6
+#define RVC_FUNCT3_C_BNEZ 0x7
+#define RVC_FUNCT3_C_J 0x5
+#define RVC_FUNCT3_C_JAL 0x1
+#define RVC_FUNCT4_C_JR 0x8
+#define RVC_FUNCT4_C_JALR 0x9
+#define RVC_FUNCT4_C_EBREAK 0x9
+
+#define RVG_FUNCT12_EBREAK 0x1
+#define RVG_FUNCT12_SRET 0x102
+
+#define RVG_MATCH_AUIPC (RVG_OPCODE_AUIPC)
+#define RVG_MATCH_JALR (RV_ENCODE_FUNCT3(JALR) | RVG_OPCODE_JALR)
+#define RVG_MATCH_JAL (RVG_OPCODE_JAL)
+#define RVG_MATCH_FENCE (RVG_OPCODE_FENCE)
+#define RVG_MATCH_BEQ (RV_ENCODE_FUNCT3(BEQ) | RVG_OPCODE_BRANCH)
+#define RVG_MATCH_BNE (RV_ENCODE_FUNCT3(BNE) | RVG_OPCODE_BRANCH)
+#define RVG_MATCH_BLT (RV_ENCODE_FUNCT3(BLT) | RVG_OPCODE_BRANCH)
+#define RVG_MATCH_BGE (RV_ENCODE_FUNCT3(BGE) | RVG_OPCODE_BRANCH)
+#define RVG_MATCH_BLTU (RV_ENCODE_FUNCT3(BLTU) | RVG_OPCODE_BRANCH)
+#define RVG_MATCH_BGEU (RV_ENCODE_FUNCT3(BGEU) | RVG_OPCODE_BRANCH)
+#define RVG_MATCH_EBREAK (RV_ENCODE_FUNCT12(EBREAK) | RVG_OPCODE_SYSTEM)
+#define RVG_MATCH_SRET (RV_ENCODE_FUNCT12(SRET) | RVG_OPCODE_SYSTEM)
+#define RVC_MATCH_C_BEQZ (RVC_ENCODE_FUNCT3(C_BEQZ) | RVC_OPCODE_C1)
+#define RVC_MATCH_C_BNEZ (RVC_ENCODE_FUNCT3(C_BNEZ) | RVC_OPCODE_C1)
+#define RVC_MATCH_C_J (RVC_ENCODE_FUNCT3(C_J) | RVC_OPCODE_C1)
+#define RVC_MATCH_C_JAL (RVC_ENCODE_FUNCT3(C_JAL) | RVC_OPCODE_C1)
+#define RVC_MATCH_C_JR (RVC_ENCODE_FUNCT4(C_JR) | RVC_OPCODE_C2)
+#define RVC_MATCH_C_JALR (RVC_ENCODE_FUNCT4(C_JALR) | RVC_OPCODE_C2)
+#define RVC_MATCH_C_EBREAK (RVC_ENCODE_FUNCT4(C_EBREAK) | RVC_OPCODE_C2)
+
+#define RVG_MASK_AUIPC (RV_INSN_OPCODE_MASK)
+#define RVG_MASK_JALR (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK)
+#define RVG_MASK_JAL (RV_INSN_OPCODE_MASK)
+#define RVG_MASK_FENCE (RV_INSN_OPCODE_MASK)
+#define RVC_MASK_C_JALR (RVC_INSN_FUNCT4_MASK | RVC_INSN_J_RS2_MASK | RVC_INSN_OPCODE_MASK)
+#define RVC_MASK_C_JR (RVC_INSN_FUNCT4_MASK | RVC_INSN_J_RS2_MASK | RVC_INSN_OPCODE_MASK)
+#define RVC_MASK_C_JAL (RVC_INSN_FUNCT3_MASK | RVC_INSN_OPCODE_MASK)
+#define RVC_MASK_C_J (RVC_INSN_FUNCT3_MASK | RVC_INSN_OPCODE_MASK)
+#define RVG_MASK_BEQ (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK)
+#define RVG_MASK_BNE (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK)
+#define RVG_MASK_BLT (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK)
+#define RVG_MASK_BGE (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK)
+#define RVG_MASK_BLTU (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK)
+#define RVG_MASK_BGEU (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK)
+#define RVC_MASK_C_BEQZ (RVC_INSN_FUNCT3_MASK | RVC_INSN_OPCODE_MASK)
+#define RVC_MASK_C_BNEZ (RVC_INSN_FUNCT3_MASK | RVC_INSN_OPCODE_MASK)
+#define RVC_MASK_C_EBREAK 0xffff
+#define RVG_MASK_EBREAK 0xffffffff
+#define RVG_MASK_SRET 0xffffffff
+
+#define __INSN_LENGTH_MASK _UL(0x3)
+#define __INSN_LENGTH_GE_32 _UL(0x3)
+#define __INSN_OPCODE_MASK _UL(0x7F)
+#define __INSN_BRANCH_OPCODE _UL(RVG_OPCODE_BRANCH)
+
+#define __RISCV_INSN_FUNCS(name, mask, val) \
+static __always_inline bool riscv_insn_is_##name(u32 code) \
+{ \
+ BUILD_BUG_ON(~(mask) & (val)); \
+ return (code & (mask)) == (val); \
+} \
+
+#if __riscv_xlen == 32
+/* C.JAL is an RV32C-only instruction */
+__RISCV_INSN_FUNCS(c_jal, RVC_MASK_C_JAL, RVC_MATCH_C_JAL)
+#else
+#define riscv_insn_is_c_jal(opcode) 0
+#endif
+__RISCV_INSN_FUNCS(auipc, RVG_MASK_AUIPC, RVG_MATCH_AUIPC)
+__RISCV_INSN_FUNCS(jalr, RVG_MASK_JALR, RVG_MATCH_JALR)
+__RISCV_INSN_FUNCS(jal, RVG_MASK_JAL, RVG_MATCH_JAL)
+__RISCV_INSN_FUNCS(c_jr, RVC_MASK_C_JR, RVC_MATCH_C_JR)
+__RISCV_INSN_FUNCS(c_jalr, RVC_MASK_C_JALR, RVC_MATCH_C_JALR)
+__RISCV_INSN_FUNCS(c_j, RVC_MASK_C_J, RVC_MATCH_C_J)
+__RISCV_INSN_FUNCS(beq, RVG_MASK_BEQ, RVG_MATCH_BEQ)
+__RISCV_INSN_FUNCS(bne, RVG_MASK_BNE, RVG_MATCH_BNE)
+__RISCV_INSN_FUNCS(blt, RVG_MASK_BLT, RVG_MATCH_BLT)
+__RISCV_INSN_FUNCS(bge, RVG_MASK_BGE, RVG_MATCH_BGE)
+__RISCV_INSN_FUNCS(bltu, RVG_MASK_BLTU, RVG_MATCH_BLTU)
+__RISCV_INSN_FUNCS(bgeu, RVG_MASK_BGEU, RVG_MATCH_BGEU)
+__RISCV_INSN_FUNCS(c_beqz, RVC_MASK_C_BEQZ, RVC_MATCH_C_BEQZ)
+__RISCV_INSN_FUNCS(c_bnez, RVC_MASK_C_BNEZ, RVC_MATCH_C_BNEZ)
+__RISCV_INSN_FUNCS(c_ebreak, RVC_MASK_C_EBREAK, RVC_MATCH_C_EBREAK)
+__RISCV_INSN_FUNCS(ebreak, RVG_MASK_EBREAK, RVG_MATCH_EBREAK)
+__RISCV_INSN_FUNCS(sret, RVG_MASK_SRET, RVG_MATCH_SRET)
+__RISCV_INSN_FUNCS(fence, RVG_MASK_FENCE, RVG_MATCH_FENCE);
+
+/* special case to catch _any_ system instruction */
+static __always_inline bool riscv_insn_is_system(u32 code)
+{
+ return (code & RV_INSN_OPCODE_MASK) == RVG_OPCODE_SYSTEM;
+}
+
+/* special case to catch _any_ branch instruction */
+static __always_inline bool riscv_insn_is_branch(u32 code)
+{
+ return (code & RV_INSN_OPCODE_MASK) == RVG_OPCODE_BRANCH;
+}
+
+#define RV_IMM_SIGN(x) (-(((x) >> 31) & 1))
+#define RVC_IMM_SIGN(x) (-(((x) >> 12) & 1))
+#define RV_X(X, s, mask) (((X) >> (s)) & (mask))
+#define RVC_X(X, s, mask) RV_X(X, s, mask)
+
+#define RV_EXTRACT_RD_REG(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, RVG_RD_OPOFF, RVG_RD_MASK)); })
+
+#define RV_EXTRACT_UTYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, RV_U_IMM_31_12_OPOFF, RV_U_IMM_31_12_MASK)); })
+
+#define RV_EXTRACT_JTYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, RV_J_IMM_10_1_OPOFF, RV_J_IMM_10_1_MASK) << RV_J_IMM_10_1_OFF) | \
+ (RV_X(x_, RV_J_IMM_11_OPOFF, RV_J_IMM_11_MASK) << RV_J_IMM_11_OFF) | \
+ (RV_X(x_, RV_J_IMM_19_12_OPOFF, RV_J_IMM_19_12_MASK) << RV_J_IMM_19_12_OFF) | \
+ (RV_IMM_SIGN(x_) << RV_J_IMM_SIGN_OFF); })
+
+#define RV_EXTRACT_ITYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, RV_I_IMM_11_0_OPOFF, RV_I_IMM_11_0_MASK)) | \
+ (RV_IMM_SIGN(x_) << RV_I_IMM_SIGN_OFF); })
+
+#define RV_EXTRACT_BTYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, RV_B_IMM_4_1_OPOFF, RV_B_IMM_4_1_MASK) << RV_B_IMM_4_1_OFF) | \
+ (RV_X(x_, RV_B_IMM_10_5_OPOFF, RV_B_IMM_10_5_MASK) << RV_B_IMM_10_5_OFF) | \
+ (RV_X(x_, RV_B_IMM_11_OPOFF, RV_B_IMM_11_MASK) << RV_B_IMM_11_OFF) | \
+ (RV_IMM_SIGN(x_) << RV_B_IMM_SIGN_OFF); })
+
+#define RVC_EXTRACT_JTYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RVC_X(x_, RVC_J_IMM_3_1_OPOFF, RVC_J_IMM_3_1_MASK) << RVC_J_IMM_3_1_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_4_OPOFF, RVC_J_IMM_4_MASK) << RVC_J_IMM_4_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_5_OPOFF, RVC_J_IMM_5_MASK) << RVC_J_IMM_5_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_6_OPOFF, RVC_J_IMM_6_MASK) << RVC_J_IMM_6_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_7_OPOFF, RVC_J_IMM_7_MASK) << RVC_J_IMM_7_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_9_8_OPOFF, RVC_J_IMM_9_8_MASK) << RVC_J_IMM_9_8_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_10_OPOFF, RVC_J_IMM_10_MASK) << RVC_J_IMM_10_OFF) | \
+ (RVC_IMM_SIGN(x_) << RVC_J_IMM_SIGN_OFF); })
+
+#define RVC_EXTRACT_BTYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RVC_X(x_, RVC_B_IMM_2_1_OPOFF, RVC_B_IMM_2_1_MASK) << RVC_B_IMM_2_1_OFF) | \
+ (RVC_X(x_, RVC_B_IMM_4_3_OPOFF, RVC_B_IMM_4_3_MASK) << RVC_B_IMM_4_3_OFF) | \
+ (RVC_X(x_, RVC_B_IMM_5_OPOFF, RVC_B_IMM_5_MASK) << RVC_B_IMM_5_OFF) | \
+ (RVC_X(x_, RVC_B_IMM_7_6_OPOFF, RVC_B_IMM_7_6_MASK) << RVC_B_IMM_7_6_OFF) | \
+ (RVC_IMM_SIGN(x_) << RVC_B_IMM_SIGN_OFF); })
+
+/*
+ * Get the immediate from a J-type instruction.
+ *
+ * @insn: instruction to process
+ * Return: immediate
+ */
+static inline s32 riscv_insn_extract_jtype_imm(u32 insn)
+{
+ return RV_EXTRACT_JTYPE_IMM(insn);
+}
+
+/*
+ * Update a J-type instruction with an immediate value.
+ *
+ * @insn: pointer to the jtype instruction
+ * @imm: the immediate to insert into the instruction
+ */
+static inline void riscv_insn_insert_jtype_imm(u32 *insn, s32 imm)
+{
+ /* drop the old IMMs, all jal IMM bits sit at 31:12 */
+ *insn &= ~GENMASK(31, 12);
+ *insn |= (RV_X(imm, RV_J_IMM_10_1_OFF, RV_J_IMM_10_1_MASK) << RV_J_IMM_10_1_OPOFF) |
+ (RV_X(imm, RV_J_IMM_11_OFF, RV_J_IMM_11_MASK) << RV_J_IMM_11_OPOFF) |
+ (RV_X(imm, RV_J_IMM_19_12_OFF, RV_J_IMM_19_12_MASK) << RV_J_IMM_19_12_OPOFF) |
+ (RV_X(imm, RV_J_IMM_SIGN_OFF, 1) << RV_J_IMM_SIGN_OPOFF);
+}
+
+/*
+ * Put together one immediate from a U-type and I-type instruction pair.
+ *
+ * The U-type contains an upper immediate, meaning bits[31:12] with [11:0]
+ * being zero, while the I-type contains a 12bit immediate.
+ * Combined these can encode larger 32bit values and are used for example
+ * in auipc + jalr pairs to allow larger jumps.
+ *
+ * @utype_insn: instruction containing the upper immediate
+ * @itype_insn: instruction
+ * Return: combined immediate
+ */
+static inline s32 riscv_insn_extract_utype_itype_imm(u32 utype_insn, u32 itype_insn)
+{
+ s32 imm;
+
+ imm = RV_EXTRACT_UTYPE_IMM(utype_insn);
+ imm += RV_EXTRACT_ITYPE_IMM(itype_insn);
+
+ return imm;
+}
+
+/*
+ * Update a set of two instructions (U-type + I-type) with an immediate value.
+ *
+ * Used for example in auipc+jalrs pairs the U-type instructions contains
+ * a 20bit upper immediate representing bits[31:12], while the I-type
+ * instruction contains a 12bit immediate representing bits[11:0].
+ *
+ * This also takes into account that both separate immediates are
+ * considered as signed values, so if the I-type immediate becomes
+ * negative (BIT(11) set) the U-type part gets adjusted.
+ *
+ * @utype_insn: pointer to the utype instruction of the pair
+ * @itype_insn: pointer to the itype instruction of the pair
+ * @imm: the immediate to insert into the two instructions
+ */
+static inline void riscv_insn_insert_utype_itype_imm(u32 *utype_insn, u32 *itype_insn, s32 imm)
+{
+ /* drop possible old IMM values */
+ *utype_insn &= ~(RV_U_IMM_31_12_MASK);
+ *itype_insn &= ~(RV_I_IMM_11_0_MASK << RV_I_IMM_11_0_OPOFF);
+
+ /* add the adapted IMMs */
+ *utype_insn |= (imm & RV_U_IMM_31_12_MASK) + ((imm & BIT(11)) << 1);
+ *itype_insn |= ((imm & RV_I_IMM_11_0_MASK) << RV_I_IMM_11_0_OPOFF);
+}
+#endif /* _ASM_RISCV_INSN_H */
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index 92080a227937..42497d487a17 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -135,4 +135,9 @@ __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw())
#include <asm-generic/io.h>
+#ifdef CONFIG_MMU
+#define arch_memremap_wb(addr, size) \
+ ((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL))
+#endif
+
#endif /* _ASM_RISCV_IO_H */
diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h
index e4c435509983..43b9ebfbd943 100644
--- a/arch/riscv/include/asm/irq.h
+++ b/arch/riscv/include/asm/irq.h
@@ -12,6 +12,10 @@
#include <asm-generic/irq.h>
+void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void));
+
+struct fwnode_handle *riscv_get_intc_hwnode(void);
+
extern void __init init_IRQ(void);
#endif /* _ASM_RISCV_IRQ_H */
diff --git a/arch/riscv/include/asm/jump_label.h b/arch/riscv/include/asm/jump_label.h
index 6d58bbb5da46..14a5ea8d8ef0 100644
--- a/arch/riscv/include/asm/jump_label.h
+++ b/arch/riscv/include/asm/jump_label.h
@@ -18,6 +18,7 @@ static __always_inline bool arch_static_branch(struct static_key * const key,
const bool branch)
{
asm_volatile_goto(
+ " .align 2 \n\t"
" .option push \n\t"
" .option norelax \n\t"
" .option norvc \n\t"
@@ -39,6 +40,7 @@ static __always_inline bool arch_static_branch_jump(struct static_key * const ke
const bool branch)
{
asm_volatile_goto(
+ " .align 2 \n\t"
" .option push \n\t"
" .option norelax \n\t"
" .option norvc \n\t"
diff --git a/arch/riscv/include/asm/kexec.h b/arch/riscv/include/asm/kexec.h
index eee260e8ab30..2b56769cb530 100644
--- a/arch/riscv/include/asm/kexec.h
+++ b/arch/riscv/include/asm/kexec.h
@@ -39,6 +39,7 @@ crash_setup_regs(struct pt_regs *newregs,
#define ARCH_HAS_KIMAGE_ARCH
struct kimage_arch {
+ void *fdt; /* For CONFIG_KEXEC_FILE */
unsigned long fdt_addr;
};
@@ -62,6 +63,10 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
const Elf_Shdr *relsec,
const Elf_Shdr *symtab);
#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
+
+struct kimage;
+int arch_kimage_file_post_load_cleanup(struct kimage *image);
+#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
#endif
#endif
diff --git a/arch/riscv/include/asm/kfence.h b/arch/riscv/include/asm/kfence.h
index d887a54042aa..0bbffd528096 100644
--- a/arch/riscv/include/asm/kfence.h
+++ b/arch/riscv/include/asm/kfence.h
@@ -8,41 +8,8 @@
#include <asm-generic/pgalloc.h>
#include <asm/pgtable.h>
-static inline int split_pmd_page(unsigned long addr)
-{
- int i;
- unsigned long pfn = PFN_DOWN(__pa((addr & PMD_MASK)));
- pmd_t *pmd = pmd_off_k(addr);
- pte_t *pte = pte_alloc_one_kernel(&init_mm);
-
- if (!pte)
- return -ENOMEM;
-
- for (i = 0; i < PTRS_PER_PTE; i++)
- set_pte(pte + i, pfn_pte(pfn + i, PAGE_KERNEL));
- set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(pte)), PAGE_TABLE));
-
- flush_tlb_kernel_range(addr, addr + PMD_SIZE);
- return 0;
-}
-
static inline bool arch_kfence_init_pool(void)
{
- int ret;
- unsigned long addr;
- pmd_t *pmd;
-
- for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
- addr += PAGE_SIZE) {
- pmd = pmd_off_k(addr);
-
- if (pmd_leaf(*pmd)) {
- ret = split_pmd_page(addr);
- if (ret)
- return false;
- }
- }
-
return true;
}
diff --git a/arch/riscv/include/asm/kprobes.h b/arch/riscv/include/asm/kprobes.h
index 217ef89f22b9..e7882ccb0fd4 100644
--- a/arch/riscv/include/asm/kprobes.h
+++ b/arch/riscv/include/asm/kprobes.h
@@ -40,8 +40,6 @@ void arch_remove_kprobe(struct kprobe *p);
int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr);
bool kprobe_breakpoint_handler(struct pt_regs *regs);
bool kprobe_single_step_handler(struct pt_regs *regs);
-void __kretprobe_trampoline(void);
-void __kprobes *trampoline_probe_handler(struct pt_regs *regs);
#endif /* CONFIG_KPROBES */
#endif /* _ASM_RISCV_KPROBES_H */
diff --git a/arch/riscv/include/asm/kvm_aia.h b/arch/riscv/include/asm/kvm_aia.h
new file mode 100644
index 000000000000..1de0717112e5
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_aia.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ *
+ * Authors:
+ * Anup Patel <apatel@ventanamicro.com>
+ */
+
+#ifndef __KVM_RISCV_AIA_H
+#define __KVM_RISCV_AIA_H
+
+#include <linux/jump_label.h>
+#include <linux/kvm_types.h>
+#include <asm/csr.h>
+
+struct kvm_aia {
+ /* In-kernel irqchip created */
+ bool in_kernel;
+
+ /* In-kernel irqchip initialized */
+ bool initialized;
+};
+
+struct kvm_vcpu_aia_csr {
+ unsigned long vsiselect;
+ unsigned long hviprio1;
+ unsigned long hviprio2;
+ unsigned long vsieh;
+ unsigned long hviph;
+ unsigned long hviprio1h;
+ unsigned long hviprio2h;
+};
+
+struct kvm_vcpu_aia {
+ /* CPU AIA CSR context of Guest VCPU */
+ struct kvm_vcpu_aia_csr guest_csr;
+
+ /* CPU AIA CSR context upon Guest VCPU reset */
+ struct kvm_vcpu_aia_csr guest_reset_csr;
+};
+
+#define kvm_riscv_aia_initialized(k) ((k)->arch.aia.initialized)
+
+#define irqchip_in_kernel(k) ((k)->arch.aia.in_kernel)
+
+DECLARE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
+#define kvm_riscv_aia_available() \
+ static_branch_unlikely(&kvm_riscv_aia_available)
+
+#define KVM_RISCV_AIA_IMSIC_TOPEI (ISELECT_MASK + 1)
+static inline int kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu *vcpu,
+ unsigned long isel,
+ unsigned long *val,
+ unsigned long new_val,
+ unsigned long wr_mask)
+{
+ return 0;
+}
+
+#ifdef CONFIG_32BIT
+void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu);
+#else
+static inline void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu)
+{
+}
+static inline void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
+{
+}
+#endif
+bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask);
+
+void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu);
+void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu,
+ unsigned long reg_num,
+ unsigned long *out_val);
+int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu,
+ unsigned long reg_num,
+ unsigned long val);
+
+int kvm_riscv_vcpu_aia_rmw_topei(struct kvm_vcpu *vcpu,
+ unsigned int csr_num,
+ unsigned long *val,
+ unsigned long new_val,
+ unsigned long wr_mask);
+int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
+ unsigned long *val, unsigned long new_val,
+ unsigned long wr_mask);
+#define KVM_RISCV_VCPU_AIA_CSR_FUNCS \
+{ .base = CSR_SIREG, .count = 1, .func = kvm_riscv_vcpu_aia_rmw_ireg }, \
+{ .base = CSR_STOPEI, .count = 1, .func = kvm_riscv_vcpu_aia_rmw_topei },
+
+static inline int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu)
+{
+ return 1;
+}
+
+static inline void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline int kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+static inline void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline void kvm_riscv_aia_init_vm(struct kvm *kvm)
+{
+}
+
+static inline void kvm_riscv_aia_destroy_vm(struct kvm *kvm)
+{
+}
+
+void kvm_riscv_aia_enable(void);
+void kvm_riscv_aia_disable(void);
+int kvm_riscv_aia_init(void);
+void kvm_riscv_aia_exit(void);
+
+#endif
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index dbbf43d52623..ee0acccb1d3b 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -13,11 +13,13 @@
#include <linux/kvm.h>
#include <linux/kvm_types.h>
#include <linux/spinlock.h>
-#include <asm/csr.h>
#include <asm/hwcap.h>
+#include <asm/kvm_aia.h>
#include <asm/kvm_vcpu_fp.h>
#include <asm/kvm_vcpu_insn.h>
+#include <asm/kvm_vcpu_sbi.h>
#include <asm/kvm_vcpu_timer.h>
+#include <asm/kvm_vcpu_pmu.h>
#define KVM_MAX_VCPUS 1024
@@ -93,10 +95,9 @@ struct kvm_arch {
/* Guest Timer */
struct kvm_guest_timer timer;
-};
-struct kvm_sbi_context {
- int return_handled;
+ /* AIA Guest/VM context */
+ struct kvm_aia aia;
};
struct kvm_cpu_trap {
@@ -169,6 +170,11 @@ struct kvm_vcpu_arch {
/* ISA feature bits (similar to MISA) */
DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX);
+ /* Vendor, Arch, and Implementation details */
+ unsigned long mvendorid;
+ unsigned long marchid;
+ unsigned long mimpid;
+
/* SSCRATCH, STVEC, and SCOUNTEREN of Host */
unsigned long host_sscratch;
unsigned long host_stvec;
@@ -198,8 +204,9 @@ struct kvm_vcpu_arch {
* in irqs_pending. Our approach is modeled around multiple producer
* and single consumer problem where the consumer is the VCPU itself.
*/
- unsigned long irqs_pending;
- unsigned long irqs_pending_mask;
+#define KVM_RISCV_VCPU_NR_IRQS 64
+ DECLARE_BITMAP(irqs_pending, KVM_RISCV_VCPU_NR_IRQS);
+ DECLARE_BITMAP(irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS);
/* VCPU Timer */
struct kvm_vcpu_timer timer;
@@ -217,7 +224,10 @@ struct kvm_vcpu_arch {
struct kvm_csr_decode csr_decode;
/* SBI context */
- struct kvm_sbi_context sbi_context;
+ struct kvm_vcpu_sbi_context sbi_context;
+
+ /* AIA VCPU context */
+ struct kvm_vcpu_aia aia_context;
/* Cache pages needed to program page tables with spinlock held */
struct kvm_mmu_memory_cache mmu_page_cache;
@@ -227,9 +237,11 @@ struct kvm_vcpu_arch {
/* Don't run the VCPU (blocked) */
bool pause;
+
+ /* Performance monitoring context */
+ struct kvm_pmu pmu_context;
};
-static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
@@ -296,11 +308,11 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm);
void kvm_riscv_gstage_free_pgd(struct kvm *kvm);
void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu);
-void kvm_riscv_gstage_mode_detect(void);
-unsigned long kvm_riscv_gstage_mode(void);
+void __init kvm_riscv_gstage_mode_detect(void);
+unsigned long __init kvm_riscv_gstage_mode(void);
int kvm_riscv_gstage_gpa_bits(void);
-void kvm_riscv_gstage_vmid_detect(void);
+void __init kvm_riscv_gstage_vmid_detect(void);
unsigned long kvm_riscv_gstage_vmid_bits(void);
int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
@@ -323,11 +335,8 @@ int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu);
-bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask);
+bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask);
void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
-int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run);
-
#endif /* __RISCV_KVM_HOST_H__ */
diff --git a/arch/riscv/include/asm/kvm_vcpu_pmu.h b/arch/riscv/include/asm/kvm_vcpu_pmu.h
new file mode 100644
index 000000000000..395518a1664e
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_vcpu_pmu.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023 Rivos Inc
+ *
+ * Authors:
+ * Atish Patra <atishp@rivosinc.com>
+ */
+
+#ifndef __KVM_VCPU_RISCV_PMU_H
+#define __KVM_VCPU_RISCV_PMU_H
+
+#include <linux/perf/riscv_pmu.h>
+#include <asm/sbi.h>
+
+#ifdef CONFIG_RISCV_PMU_SBI
+#define RISCV_KVM_MAX_FW_CTRS 32
+#define RISCV_KVM_MAX_HW_CTRS 32
+#define RISCV_KVM_MAX_COUNTERS (RISCV_KVM_MAX_HW_CTRS + RISCV_KVM_MAX_FW_CTRS)
+static_assert(RISCV_KVM_MAX_COUNTERS <= 64);
+
+struct kvm_fw_event {
+ /* Current value of the event */
+ unsigned long value;
+
+ /* Event monitoring status */
+ bool started;
+};
+
+/* Per virtual pmu counter data */
+struct kvm_pmc {
+ u8 idx;
+ struct perf_event *perf_event;
+ u64 counter_val;
+ union sbi_pmu_ctr_info cinfo;
+ /* Event monitoring status */
+ bool started;
+ /* Monitoring event ID */
+ unsigned long event_idx;
+};
+
+/* PMU data structure per vcpu */
+struct kvm_pmu {
+ struct kvm_pmc pmc[RISCV_KVM_MAX_COUNTERS];
+ struct kvm_fw_event fw_event[RISCV_KVM_MAX_FW_CTRS];
+ /* Number of the virtual firmware counters available */
+ int num_fw_ctrs;
+ /* Number of the virtual hardware counters available */
+ int num_hw_ctrs;
+ /* A flag to indicate that pmu initialization is done */
+ bool init_done;
+ /* Bit map of all the virtual counter used */
+ DECLARE_BITMAP(pmc_in_use, RISCV_KVM_MAX_COUNTERS);
+};
+
+#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu_context)
+#define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu_context))
+
+#if defined(CONFIG_32BIT)
+#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
+{.base = CSR_CYCLEH, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, \
+{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm },
+#else
+#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
+{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm },
+#endif
+
+int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid);
+int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num,
+ unsigned long *val, unsigned long new_val,
+ unsigned long wr_mask);
+
+int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx,
+ struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base,
+ unsigned long ctr_mask, unsigned long flags, u64 ival,
+ struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base,
+ unsigned long ctr_mask, unsigned long flags,
+ struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base,
+ unsigned long ctr_mask, unsigned long flags,
+ unsigned long eidx, u64 evtdata,
+ struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
+ struct kvm_vcpu_sbi_return *retdata);
+void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu);
+
+#else
+struct kvm_pmu {
+};
+
+#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
+{.base = 0, .count = 0, .func = NULL },
+
+static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {}
+static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid)
+{
+ return 0;
+}
+
+static inline void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) {}
+static inline void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) {}
+#endif /* CONFIG_RISCV_PMU_SBI */
+#endif /* !__KVM_VCPU_RISCV_PMU_H */
diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h
index d4e3e600beef..4278125a38a5 100644
--- a/arch/riscv/include/asm/kvm_vcpu_sbi.h
+++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h
@@ -14,6 +14,18 @@
#define KVM_SBI_VERSION_MAJOR 1
#define KVM_SBI_VERSION_MINOR 0
+struct kvm_vcpu_sbi_context {
+ int return_handled;
+ bool extension_disabled[KVM_RISCV_SBI_EXT_MAX];
+};
+
+struct kvm_vcpu_sbi_return {
+ unsigned long out_val;
+ unsigned long err_val;
+ struct kvm_cpu_trap *utrap;
+ bool uexit;
+};
+
struct kvm_vcpu_sbi_extension {
unsigned long extid_start;
unsigned long extid_end;
@@ -23,15 +35,24 @@ struct kvm_vcpu_sbi_extension {
* specific error codes.
*/
int (*handler)(struct kvm_vcpu *vcpu, struct kvm_run *run,
- unsigned long *out_val, struct kvm_cpu_trap *utrap,
- bool *exit);
+ struct kvm_vcpu_sbi_return *retdata);
+
+ /* Extension specific probe function */
+ unsigned long (*probe)(struct kvm_vcpu *vcpu);
};
void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run);
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
struct kvm_run *run,
u32 type, u64 flags);
-const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid);
+int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg);
+int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg);
+const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
+ struct kvm_vcpu *vcpu, unsigned long extid);
+int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run);
#ifdef CONFIG_RISCV_SBI_V01
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01;
diff --git a/arch/riscv/include/asm/module.h b/arch/riscv/include/asm/module.h
index 76aa96a9fc08..0f3baaa6a9a8 100644
--- a/arch/riscv/include/asm/module.h
+++ b/arch/riscv/include/asm/module.h
@@ -5,6 +5,7 @@
#define _ASM_RISCV_MODULE_H
#include <asm-generic/module.h>
+#include <linux/elf.h>
struct module;
unsigned long module_emit_got_entry(struct module *mod, unsigned long val);
@@ -111,4 +112,19 @@ static inline struct plt_entry *get_plt_entry(unsigned long val,
#endif /* CONFIG_MODULE_SECTIONS */
+static inline const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ const char *name)
+{
+ const Elf_Shdr *s, *se;
+ const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+ for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
+ if (strcmp(name, secstrs + s->sh_name) == 0)
+ return s;
+ }
+
+ return NULL;
+}
+
#endif /* _ASM_RISCV_MODULE_H */
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index ac70b0fd9a9a..b55ba20903ec 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -16,11 +16,6 @@
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE - 1))
-#ifdef CONFIG_64BIT
-#define HUGE_MAX_HSTATE 2
-#else
-#define HUGE_MAX_HSTATE 1
-#endif
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
@@ -49,10 +44,14 @@
#ifndef __ASSEMBLY__
+#ifdef CONFIG_RISCV_ISA_ZICBOZ
+void clear_page(void *page);
+#else
#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
+#endif
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
-#define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE)
+#define clear_user_page(pgaddr, vaddr, page) clear_page(pgaddr)
#define copy_user_page(vto, vfrom, vaddr, topg) \
memcpy((vto), (vfrom), PAGE_SIZE)
@@ -90,9 +89,16 @@ typedef struct page *pgtable_t;
#define PTE_FMT "%08lx"
#endif
+#ifdef CONFIG_64BIT
+/*
+ * We override this value as its generic definition uses __pa too early in
+ * the boot process (before kernel_map.va_pa_offset is set).
+ */
+#define MIN_MEMBLOCK_ADDR 0
+#endif
+
#ifdef CONFIG_MMU
-extern unsigned long riscv_pfn_base;
-#define ARCH_PFN_OFFSET (riscv_pfn_base)
+#define ARCH_PFN_OFFSET (PFN_DOWN((unsigned long)phys_ram_base))
#else
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
#endif /* CONFIG_MMU */
@@ -122,21 +128,29 @@ extern phys_addr_t phys_ram_base;
#define is_linear_mapping(x) \
((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < PAGE_OFFSET + KERN_VIRT_SIZE))
+#ifndef CONFIG_DEBUG_VIRTUAL
#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
-#define kernel_mapping_pa_to_va(y) ({ \
- unsigned long _y = y; \
- (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \
- (void *)((unsigned long)(_y) + kernel_map.va_kernel_xip_pa_offset) : \
- (void *)((unsigned long)(_y) + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \
+#else
+void *linear_mapping_pa_to_va(unsigned long x);
+#endif
+#define kernel_mapping_pa_to_va(y) ({ \
+ unsigned long _y = (unsigned long)(y); \
+ (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \
+ (void *)(_y + kernel_map.va_kernel_xip_pa_offset) : \
+ (void *)(_y + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \
})
#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
+#ifndef CONFIG_DEBUG_VIRTUAL
#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - kernel_map.va_pa_offset)
+#else
+phys_addr_t linear_mapping_va_to_pa(unsigned long x);
+#endif
#define kernel_mapping_va_to_pa(y) ({ \
- unsigned long _y = y; \
- (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \
- ((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : \
- ((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \
+ unsigned long _y = (unsigned long)(y); \
+ (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \
+ (_y - kernel_map.va_kernel_xip_pa_offset) : \
+ (_y - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \
})
#define __va_to_pa_nodebug(x) ({ \
@@ -171,11 +185,6 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
-#ifdef CONFIG_FLATMEM
-#define pfn_valid(pfn) \
- (((pfn) >= ARCH_PFN_OFFSET) && (((pfn) - ARCH_PFN_OFFSET) < max_mapnr))
-#endif
-
#endif /* __ASSEMBLY__ */
#define virt_addr_valid(vaddr) ({ \
diff --git a/arch/riscv/include/asm/parse_asm.h b/arch/riscv/include/asm/parse_asm.h
deleted file mode 100644
index f36368de839f..000000000000
--- a/arch/riscv/include/asm/parse_asm.h
+++ /dev/null
@@ -1,219 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2020 SiFive
- */
-
-#include <linux/bits.h>
-
-/* The bit field of immediate value in I-type instruction */
-#define I_IMM_SIGN_OPOFF 31
-#define I_IMM_11_0_OPOFF 20
-#define I_IMM_SIGN_OFF 12
-#define I_IMM_11_0_OFF 0
-#define I_IMM_11_0_MASK GENMASK(11, 0)
-
-/* The bit field of immediate value in J-type instruction */
-#define J_IMM_SIGN_OPOFF 31
-#define J_IMM_10_1_OPOFF 21
-#define J_IMM_11_OPOFF 20
-#define J_IMM_19_12_OPOFF 12
-#define J_IMM_SIGN_OFF 20
-#define J_IMM_10_1_OFF 1
-#define J_IMM_11_OFF 11
-#define J_IMM_19_12_OFF 12
-#define J_IMM_10_1_MASK GENMASK(9, 0)
-#define J_IMM_11_MASK GENMASK(0, 0)
-#define J_IMM_19_12_MASK GENMASK(7, 0)
-
-/* The bit field of immediate value in B-type instruction */
-#define B_IMM_SIGN_OPOFF 31
-#define B_IMM_10_5_OPOFF 25
-#define B_IMM_4_1_OPOFF 8
-#define B_IMM_11_OPOFF 7
-#define B_IMM_SIGN_OFF 12
-#define B_IMM_10_5_OFF 5
-#define B_IMM_4_1_OFF 1
-#define B_IMM_11_OFF 11
-#define B_IMM_10_5_MASK GENMASK(5, 0)
-#define B_IMM_4_1_MASK GENMASK(3, 0)
-#define B_IMM_11_MASK GENMASK(0, 0)
-
-/* The register offset in RVG instruction */
-#define RVG_RS1_OPOFF 15
-#define RVG_RS2_OPOFF 20
-#define RVG_RD_OPOFF 7
-
-/* The bit field of immediate value in RVC J instruction */
-#define RVC_J_IMM_SIGN_OPOFF 12
-#define RVC_J_IMM_4_OPOFF 11
-#define RVC_J_IMM_9_8_OPOFF 9
-#define RVC_J_IMM_10_OPOFF 8
-#define RVC_J_IMM_6_OPOFF 7
-#define RVC_J_IMM_7_OPOFF 6
-#define RVC_J_IMM_3_1_OPOFF 3
-#define RVC_J_IMM_5_OPOFF 2
-#define RVC_J_IMM_SIGN_OFF 11
-#define RVC_J_IMM_4_OFF 4
-#define RVC_J_IMM_9_8_OFF 8
-#define RVC_J_IMM_10_OFF 10
-#define RVC_J_IMM_6_OFF 6
-#define RVC_J_IMM_7_OFF 7
-#define RVC_J_IMM_3_1_OFF 1
-#define RVC_J_IMM_5_OFF 5
-#define RVC_J_IMM_4_MASK GENMASK(0, 0)
-#define RVC_J_IMM_9_8_MASK GENMASK(1, 0)
-#define RVC_J_IMM_10_MASK GENMASK(0, 0)
-#define RVC_J_IMM_6_MASK GENMASK(0, 0)
-#define RVC_J_IMM_7_MASK GENMASK(0, 0)
-#define RVC_J_IMM_3_1_MASK GENMASK(2, 0)
-#define RVC_J_IMM_5_MASK GENMASK(0, 0)
-
-/* The bit field of immediate value in RVC B instruction */
-#define RVC_B_IMM_SIGN_OPOFF 12
-#define RVC_B_IMM_4_3_OPOFF 10
-#define RVC_B_IMM_7_6_OPOFF 5
-#define RVC_B_IMM_2_1_OPOFF 3
-#define RVC_B_IMM_5_OPOFF 2
-#define RVC_B_IMM_SIGN_OFF 8
-#define RVC_B_IMM_4_3_OFF 3
-#define RVC_B_IMM_7_6_OFF 6
-#define RVC_B_IMM_2_1_OFF 1
-#define RVC_B_IMM_5_OFF 5
-#define RVC_B_IMM_4_3_MASK GENMASK(1, 0)
-#define RVC_B_IMM_7_6_MASK GENMASK(1, 0)
-#define RVC_B_IMM_2_1_MASK GENMASK(1, 0)
-#define RVC_B_IMM_5_MASK GENMASK(0, 0)
-
-/* The register offset in RVC op=C0 instruction */
-#define RVC_C0_RS1_OPOFF 7
-#define RVC_C0_RS2_OPOFF 2
-#define RVC_C0_RD_OPOFF 2
-
-/* The register offset in RVC op=C1 instruction */
-#define RVC_C1_RS1_OPOFF 7
-#define RVC_C1_RS2_OPOFF 2
-#define RVC_C1_RD_OPOFF 7
-
-/* The register offset in RVC op=C2 instruction */
-#define RVC_C2_RS1_OPOFF 7
-#define RVC_C2_RS2_OPOFF 2
-#define RVC_C2_RD_OPOFF 7
-
-/* parts of opcode for RVG*/
-#define OPCODE_BRANCH 0x63
-#define OPCODE_JALR 0x67
-#define OPCODE_JAL 0x6f
-#define OPCODE_SYSTEM 0x73
-
-/* parts of opcode for RVC*/
-#define OPCODE_C_0 0x0
-#define OPCODE_C_1 0x1
-#define OPCODE_C_2 0x2
-
-/* parts of funct3 code for I, M, A extension*/
-#define FUNCT3_JALR 0x0
-#define FUNCT3_BEQ 0x0
-#define FUNCT3_BNE 0x1000
-#define FUNCT3_BLT 0x4000
-#define FUNCT3_BGE 0x5000
-#define FUNCT3_BLTU 0x6000
-#define FUNCT3_BGEU 0x7000
-
-/* parts of funct3 code for C extension*/
-#define FUNCT3_C_BEQZ 0xc000
-#define FUNCT3_C_BNEZ 0xe000
-#define FUNCT3_C_J 0xa000
-#define FUNCT3_C_JAL 0x2000
-#define FUNCT4_C_JR 0x8000
-#define FUNCT4_C_JALR 0xf000
-
-#define FUNCT12_SRET 0x10200000
-
-#define MATCH_JALR (FUNCT3_JALR | OPCODE_JALR)
-#define MATCH_JAL (OPCODE_JAL)
-#define MATCH_BEQ (FUNCT3_BEQ | OPCODE_BRANCH)
-#define MATCH_BNE (FUNCT3_BNE | OPCODE_BRANCH)
-#define MATCH_BLT (FUNCT3_BLT | OPCODE_BRANCH)
-#define MATCH_BGE (FUNCT3_BGE | OPCODE_BRANCH)
-#define MATCH_BLTU (FUNCT3_BLTU | OPCODE_BRANCH)
-#define MATCH_BGEU (FUNCT3_BGEU | OPCODE_BRANCH)
-#define MATCH_SRET (FUNCT12_SRET | OPCODE_SYSTEM)
-#define MATCH_C_BEQZ (FUNCT3_C_BEQZ | OPCODE_C_1)
-#define MATCH_C_BNEZ (FUNCT3_C_BNEZ | OPCODE_C_1)
-#define MATCH_C_J (FUNCT3_C_J | OPCODE_C_1)
-#define MATCH_C_JAL (FUNCT3_C_JAL | OPCODE_C_1)
-#define MATCH_C_JR (FUNCT4_C_JR | OPCODE_C_2)
-#define MATCH_C_JALR (FUNCT4_C_JALR | OPCODE_C_2)
-
-#define MASK_JALR 0x707f
-#define MASK_JAL 0x7f
-#define MASK_C_JALR 0xf07f
-#define MASK_C_JR 0xf07f
-#define MASK_C_JAL 0xe003
-#define MASK_C_J 0xe003
-#define MASK_BEQ 0x707f
-#define MASK_BNE 0x707f
-#define MASK_BLT 0x707f
-#define MASK_BGE 0x707f
-#define MASK_BLTU 0x707f
-#define MASK_BGEU 0x707f
-#define MASK_C_BEQZ 0xe003
-#define MASK_C_BNEZ 0xe003
-#define MASK_SRET 0xffffffff
-
-#define __INSN_LENGTH_MASK _UL(0x3)
-#define __INSN_LENGTH_GE_32 _UL(0x3)
-#define __INSN_OPCODE_MASK _UL(0x7F)
-#define __INSN_BRANCH_OPCODE _UL(OPCODE_BRANCH)
-
-/* Define a series of is_XXX_insn functions to check if the value INSN
- * is an instance of instruction XXX.
- */
-#define DECLARE_INSN(INSN_NAME, INSN_MATCH, INSN_MASK) \
-static inline bool is_ ## INSN_NAME ## _insn(long insn) \
-{ \
- return (insn & (INSN_MASK)) == (INSN_MATCH); \
-}
-
-#define RV_IMM_SIGN(x) (-(((x) >> 31) & 1))
-#define RVC_IMM_SIGN(x) (-(((x) >> 12) & 1))
-#define RV_X(X, s, mask) (((X) >> (s)) & (mask))
-#define RVC_X(X, s, mask) RV_X(X, s, mask)
-
-#define EXTRACT_JTYPE_IMM(x) \
- ({typeof(x) x_ = (x); \
- (RV_X(x_, J_IMM_10_1_OPOFF, J_IMM_10_1_MASK) << J_IMM_10_1_OFF) | \
- (RV_X(x_, J_IMM_11_OPOFF, J_IMM_11_MASK) << J_IMM_11_OFF) | \
- (RV_X(x_, J_IMM_19_12_OPOFF, J_IMM_19_12_MASK) << J_IMM_19_12_OFF) | \
- (RV_IMM_SIGN(x_) << J_IMM_SIGN_OFF); })
-
-#define EXTRACT_ITYPE_IMM(x) \
- ({typeof(x) x_ = (x); \
- (RV_X(x_, I_IMM_11_0_OPOFF, I_IMM_11_0_MASK)) | \
- (RV_IMM_SIGN(x_) << I_IMM_SIGN_OFF); })
-
-#define EXTRACT_BTYPE_IMM(x) \
- ({typeof(x) x_ = (x); \
- (RV_X(x_, B_IMM_4_1_OPOFF, B_IMM_4_1_MASK) << B_IMM_4_1_OFF) | \
- (RV_X(x_, B_IMM_10_5_OPOFF, B_IMM_10_5_MASK) << B_IMM_10_5_OFF) | \
- (RV_X(x_, B_IMM_11_OPOFF, B_IMM_11_MASK) << B_IMM_11_OFF) | \
- (RV_IMM_SIGN(x_) << B_IMM_SIGN_OFF); })
-
-#define EXTRACT_RVC_J_IMM(x) \
- ({typeof(x) x_ = (x); \
- (RVC_X(x_, RVC_J_IMM_3_1_OPOFF, RVC_J_IMM_3_1_MASK) << RVC_J_IMM_3_1_OFF) | \
- (RVC_X(x_, RVC_J_IMM_4_OPOFF, RVC_J_IMM_4_MASK) << RVC_J_IMM_4_OFF) | \
- (RVC_X(x_, RVC_J_IMM_5_OPOFF, RVC_J_IMM_5_MASK) << RVC_J_IMM_5_OFF) | \
- (RVC_X(x_, RVC_J_IMM_6_OPOFF, RVC_J_IMM_6_MASK) << RVC_J_IMM_6_OFF) | \
- (RVC_X(x_, RVC_J_IMM_7_OPOFF, RVC_J_IMM_7_MASK) << RVC_J_IMM_7_OFF) | \
- (RVC_X(x_, RVC_J_IMM_9_8_OPOFF, RVC_J_IMM_9_8_MASK) << RVC_J_IMM_9_8_OFF) | \
- (RVC_X(x_, RVC_J_IMM_10_OPOFF, RVC_J_IMM_10_MASK) << RVC_J_IMM_10_OFF) | \
- (RVC_IMM_SIGN(x_) << RVC_J_IMM_SIGN_OFF); })
-
-#define EXTRACT_RVC_B_IMM(x) \
- ({typeof(x) x_ = (x); \
- (RVC_X(x_, RVC_B_IMM_2_1_OPOFF, RVC_B_IMM_2_1_MASK) << RVC_B_IMM_2_1_OFF) | \
- (RVC_X(x_, RVC_B_IMM_4_3_OPOFF, RVC_B_IMM_4_3_MASK) << RVC_B_IMM_4_3_OFF) | \
- (RVC_X(x_, RVC_B_IMM_5_OPOFF, RVC_B_IMM_5_MASK) << RVC_B_IMM_5_OFF) | \
- (RVC_X(x_, RVC_B_IMM_7_6_OPOFF, RVC_B_IMM_7_6_MASK) << RVC_B_IMM_7_6_OFF) | \
- (RVC_IMM_SIGN(x_) << RVC_B_IMM_SIGN_OFF); })
diff --git a/arch/riscv/include/asm/patch.h b/arch/riscv/include/asm/patch.h
index 9a7d7346001e..63c98833d510 100644
--- a/arch/riscv/include/asm/patch.h
+++ b/arch/riscv/include/asm/patch.h
@@ -7,6 +7,8 @@
#define _ASM_RISCV_PATCH_H
int patch_text_nosync(void *addr, const void *insns, size_t len);
-int patch_text(void *addr, u32 insn);
+int patch_text(void *addr, u32 *insns, int ninsns);
+
+extern int riscv_patch_in_stop_machine;
#endif /* _ASM_RISCV_PATCH_H */
diff --git a/arch/riscv/include/asm/perf_event.h b/arch/riscv/include/asm/perf_event.h
index d42c901f9a97..665bbc9b2f84 100644
--- a/arch/riscv/include/asm/perf_event.h
+++ b/arch/riscv/include/asm/perf_event.h
@@ -10,4 +10,11 @@
#include <linux/perf_event.h>
#define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs
+
+#define perf_arch_fetch_caller_regs(regs, __ip) { \
+ (regs)->epc = (__ip); \
+ (regs)->s0 = (unsigned long) __builtin_frame_address(0); \
+ (regs)->sp = current_stack_pointer; \
+ (regs)->status = SR_PP; \
+}
#endif /* _ASM_RISCV_PERF_EVENT_H */
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
index 947f23d7b6af..59dc12b5b7e8 100644
--- a/arch/riscv/include/asm/pgalloc.h
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -127,6 +127,13 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
#define __p4d_free_tlb(tlb, p4d, addr) p4d_free((tlb)->mm, p4d)
#endif /* __PAGETABLE_PMD_FOLDED */
+static inline void sync_kernel_mappings(pgd_t *pgd)
+{
+ memcpy(pgd + USER_PTRS_PER_PGD,
+ init_mm.pgd + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+}
+
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
@@ -135,9 +142,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
if (likely(pgd != NULL)) {
memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
/* Copy kernel mappings */
- memcpy(pgd + USER_PTRS_PER_PGD,
- init_mm.pgd + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ sync_kernel_mappings(pgd);
}
return pgd;
}
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index dc42375c2357..7a5097202e15 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -25,7 +25,11 @@ extern bool pgtable_l5_enabled;
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
/* p4d is folded into pgd in case of 4-level page table */
-#define P4D_SHIFT 39
+#define P4D_SHIFT_L3 30
+#define P4D_SHIFT_L4 39
+#define P4D_SHIFT_L5 39
+#define P4D_SHIFT (pgtable_l5_enabled ? P4D_SHIFT_L5 : \
+ (pgtable_l4_enabled ? P4D_SHIFT_L4 : P4D_SHIFT_L3))
#define P4D_SIZE (_AC(1, UL) << P4D_SHIFT)
#define P4D_MASK (~(P4D_SIZE - 1))
@@ -75,6 +79,40 @@ typedef struct {
#define _PAGE_PFN_MASK GENMASK(53, 10)
/*
+ * [63] Svnapot definitions:
+ * 0 Svnapot disabled
+ * 1 Svnapot enabled
+ */
+#define _PAGE_NAPOT_SHIFT 63
+#define _PAGE_NAPOT BIT(_PAGE_NAPOT_SHIFT)
+/*
+ * Only 64KB (order 4) napot ptes supported.
+ */
+#define NAPOT_CONT_ORDER_BASE 4
+enum napot_cont_order {
+ NAPOT_CONT64KB_ORDER = NAPOT_CONT_ORDER_BASE,
+ NAPOT_ORDER_MAX,
+};
+
+#define for_each_napot_order(order) \
+ for (order = NAPOT_CONT_ORDER_BASE; order < NAPOT_ORDER_MAX; order++)
+#define for_each_napot_order_rev(order) \
+ for (order = NAPOT_ORDER_MAX - 1; \
+ order >= NAPOT_CONT_ORDER_BASE; order--)
+#define napot_cont_order(val) (__builtin_ctzl((val.pte >> _PAGE_PFN_SHIFT) << 1))
+
+#define napot_cont_shift(order) ((order) + PAGE_SHIFT)
+#define napot_cont_size(order) BIT(napot_cont_shift(order))
+#define napot_cont_mask(order) (~(napot_cont_size(order) - 1UL))
+#define napot_pte_num(order) BIT(order)
+
+#ifdef CONFIG_RISCV_ISA_SVNAPOT
+#define HUGE_MAX_HSTATE (2 + (NAPOT_ORDER_MAX - NAPOT_CONT_ORDER_BASE))
+#else
+#define HUGE_MAX_HSTATE 2
+#endif
+
+/*
* [62:61] Svpbmt Memory Type definitions:
*
* 00 - PMA Normal Cacheable, No change to implied PMA memory type
diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h
index b9e13a8fe2b7..f896708e8331 100644
--- a/arch/riscv/include/asm/pgtable-bits.h
+++ b/arch/riscv/include/asm/pgtable-bits.h
@@ -27,6 +27,9 @@
*/
#define _PAGE_PROT_NONE _PAGE_GLOBAL
+/* Used for swap PTEs only. */
+#define _PAGE_SWP_EXCLUSIVE _PAGE_ACCESSED
+
#define _PAGE_PFN_SHIFT 10
/*
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 7ec936910a96..75970ee2bda2 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -31,7 +31,7 @@
#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
/*
- * Half of the kernel address space (half of the entries of the page global
+ * Half of the kernel address space (1/4 of the entries of the page global
* directory) is for the direct mapping.
*/
#define KERN_VIRT_SIZE ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2)
@@ -87,9 +87,13 @@
#define FIXADDR_TOP PCI_IO_START
#ifdef CONFIG_64BIT
-#define FIXADDR_SIZE PMD_SIZE
+#define MAX_FDT_SIZE PMD_SIZE
+#define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M)
+#define FIXADDR_SIZE (PMD_SIZE + FIX_FDT_SIZE)
#else
-#define FIXADDR_SIZE PGDIR_SIZE
+#define MAX_FDT_SIZE PGDIR_SIZE
+#define FIX_FDT_SIZE MAX_FDT_SIZE
+#define FIXADDR_SIZE (PGDIR_SIZE + FIX_FDT_SIZE)
#endif
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
@@ -161,8 +165,7 @@ extern struct pt_alloc_ops pt_ops __initdata;
_PAGE_EXEC | _PAGE_WRITE)
#define PAGE_COPY PAGE_READ
-#define PAGE_COPY_EXEC PAGE_EXEC
-#define PAGE_COPY_READ_EXEC PAGE_READ_EXEC
+#define PAGE_COPY_EXEC PAGE_READ_EXEC
#define PAGE_SHARED PAGE_WRITE
#define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
@@ -264,10 +267,47 @@ static inline pte_t pud_pte(pud_t pud)
return __pte(pud_val(pud));
}
+#ifdef CONFIG_RISCV_ISA_SVNAPOT
+
+static __always_inline bool has_svnapot(void)
+{
+ return riscv_has_extension_likely(RISCV_ISA_EXT_SVNAPOT);
+}
+
+static inline unsigned long pte_napot(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_NAPOT;
+}
+
+static inline pte_t pte_mknapot(pte_t pte, unsigned int order)
+{
+ int pos = order - 1 + _PAGE_PFN_SHIFT;
+ unsigned long napot_bit = BIT(pos);
+ unsigned long napot_mask = ~GENMASK(pos, _PAGE_PFN_SHIFT);
+
+ return __pte((pte_val(pte) & napot_mask) | napot_bit | _PAGE_NAPOT);
+}
+
+#else
+
+static __always_inline bool has_svnapot(void) { return false; }
+
+static inline unsigned long pte_napot(pte_t pte)
+{
+ return 0;
+}
+
+#endif /* CONFIG_RISCV_ISA_SVNAPOT */
+
/* Yields the page frame number (PFN) of a page table entry */
static inline unsigned long pte_pfn(pte_t pte)
{
- return __page_val_to_pfn(pte_val(pte));
+ unsigned long res = __page_val_to_pfn(pte_val(pte));
+
+ if (has_svnapot() && pte_napot(pte))
+ res = res & (res - 1UL);
+
+ return res;
}
#define pte_page(x) pfn_to_page(pte_pfn(x))
@@ -418,6 +458,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
local_flush_tlb_page(address);
}
+#define __HAVE_ARCH_UPDATE_MMU_TLB
+#define update_mmu_tlb update_mmu_cache
+
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
@@ -600,6 +643,7 @@ static inline int pmd_dirty(pmd_t pmd)
return pte_dirty(pmd_pte(pmd));
}
+#define pmd_young pmd_young
static inline int pmd_young(pmd_t pmd)
{
return pte_young(pmd_pte(pmd));
@@ -717,19 +761,25 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
}
+
+#define pmdp_collapse_flush pmdp_collapse_flush
+extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
- * Encode and decode a swap entry
+ * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
+ * are !pte_none() && !pte_present().
*
* Format of swap PTE:
* bit 0: _PAGE_PRESENT (zero)
* bit 1 to 3: _PAGE_LEAF (zero)
* bit 5: _PAGE_PROT_NONE (zero)
- * bits 6 to 10: swap type
- * bits 10 to XLEN-1: swap offset
+ * bit 6: exclusive marker
+ * bits 7 to 11: swap type
+ * bits 11 to XLEN-1: swap offset
*/
-#define __SWP_TYPE_SHIFT 6
+#define __SWP_TYPE_SHIFT 7
#define __SWP_TYPE_BITS 5
#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
@@ -740,11 +790,27 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
#define __swp_entry(type, offset) ((swp_entry_t) \
- { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
+ { (((type) & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | \
+ ((offset) << __SWP_OFFSET_SHIFT) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+static inline int pte_swp_exclusive(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
+}
+
+static inline pte_t pte_swp_mkexclusive(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
+}
+
+static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
+}
+
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
#define __swp_entry_to_pmd(swp) __pmd((swp).val)
@@ -801,8 +867,6 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
#endif /* !CONFIG_MMU */
-#define kern_addr_valid(addr) (1) /* FIXME */
-
extern char _start[];
extern void *_dtb_early_va;
extern uintptr_t _dtb_early_pa;
diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h
index 6ecd461129d2..b5b0adcc85c1 100644
--- a/arch/riscv/include/asm/ptrace.h
+++ b/arch/riscv/include/asm/ptrace.h
@@ -53,6 +53,9 @@ struct pt_regs {
unsigned long orig_a0;
};
+#define PTRACE_SYSEMU 0x1f
+#define PTRACE_SYSEMU_SINGLESTEP 0x20
+
#ifdef CONFIG_64BIT
#define REG_FMT "%016lx"
#else
@@ -121,8 +124,6 @@ extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long frame_pointer);
-int do_syscall_trace_enter(struct pt_regs *regs);
-void do_syscall_trace_exit(struct pt_regs *regs);
/**
* regs_get_register() - get register value from its offset
@@ -172,6 +173,11 @@ static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
return 0;
}
+static inline int regs_irqs_disabled(struct pt_regs *regs)
+{
+ return !(regs->status & SR_PIE);
+}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_PTRACE_H */
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 2a0ef738695e..5b4a1bf5f439 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -169,9 +169,9 @@ enum sbi_pmu_fw_generic_events_t {
SBI_PMU_FW_ILLEGAL_INSN = 4,
SBI_PMU_FW_SET_TIMER = 5,
SBI_PMU_FW_IPI_SENT = 6,
- SBI_PMU_FW_IPI_RECVD = 7,
+ SBI_PMU_FW_IPI_RCVD = 7,
SBI_PMU_FW_FENCE_I_SENT = 8,
- SBI_PMU_FW_FENCE_I_RECVD = 9,
+ SBI_PMU_FW_FENCE_I_RCVD = 9,
SBI_PMU_FW_SFENCE_VMA_SENT = 10,
SBI_PMU_FW_SFENCE_VMA_RCVD = 11,
SBI_PMU_FW_SFENCE_VMA_ASID_SENT = 12,
@@ -215,6 +215,9 @@ enum sbi_pmu_ctr_type {
#define SBI_PMU_EVENT_CACHE_OP_ID_CODE_MASK 0x06
#define SBI_PMU_EVENT_CACHE_RESULT_ID_CODE_MASK 0x01
+#define SBI_PMU_EVENT_CACHE_ID_SHIFT 3
+#define SBI_PMU_EVENT_CACHE_OP_SHIFT 1
+
#define SBI_PMU_EVENT_IDX_INVALID 0xFFFFFFFF
/* Flags defined for config matching function */
@@ -268,8 +271,7 @@ long sbi_get_marchid(void);
long sbi_get_mimpid(void);
void sbi_set_timer(uint64_t stime_value);
void sbi_shutdown(void);
-void sbi_clear_ipi(void);
-int sbi_send_ipi(const struct cpumask *cpu_mask);
+void sbi_send_ipi(unsigned int cpu);
int sbi_remote_fence_i(const struct cpumask *cpu_mask);
int sbi_remote_sfence_vma(const struct cpumask *cpu_mask,
unsigned long start,
@@ -293,7 +295,7 @@ int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask,
unsigned long start,
unsigned long size,
unsigned long asid);
-int sbi_probe_extension(int ext);
+long sbi_probe_extension(int ext);
/* Check if current SBI specification version is 0.1 or not */
static inline int sbi_spec_is_0_1(void)
@@ -327,4 +329,15 @@ int sbi_err_map_linux_errno(int err);
static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1; }
static inline void sbi_init(void) {}
#endif /* CONFIG_RISCV_SBI */
+
+unsigned long riscv_cached_mvendorid(unsigned int cpu_id);
+unsigned long riscv_cached_marchid(unsigned int cpu_id);
+unsigned long riscv_cached_mimpid(unsigned int cpu_id);
+
+#if IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_RISCV_SBI)
+void sbi_ipi_init(void);
+#else
+static inline void sbi_ipi_init(void) { }
+#endif
+
#endif /* _ASM_RISCV_SBI_H */
diff --git a/arch/riscv/include/asm/semihost.h b/arch/riscv/include/asm/semihost.h
new file mode 100644
index 000000000000..557a34938193
--- /dev/null
+++ b/arch/riscv/include/asm/semihost.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 tinylab.org
+ * Author: Bin Meng <bmeng@tinylab.org>
+ */
+
+#ifndef _RISCV_SEMIHOST_H_
+#define _RISCV_SEMIHOST_H_
+
+struct uart_port;
+
+static inline void smh_putc(struct uart_port *port, unsigned char c)
+{
+ asm volatile("addi a1, %0, 0\n"
+ "addi a0, zero, 3\n"
+ ".balign 16\n"
+ ".option push\n"
+ ".option norvc\n"
+ "slli zero, zero, 0x1f\n"
+ "ebreak\n"
+ "srai zero, zero, 0x7\n"
+ ".option pop\n"
+ : : "r" (&c) : "a0", "a1", "memory");
+}
+
+#endif /* _RISCV_SEMIHOST_H_ */
diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h
index a2c14d4b3993..ec11001c3fe0 100644
--- a/arch/riscv/include/asm/set_memory.h
+++ b/arch/riscv/include/asm/set_memory.h
@@ -56,4 +56,7 @@ bool kernel_page_present(struct page *page);
#define SECTION_ALIGN L1_CACHE_BYTES
#endif /* CONFIG_STRICT_KERNEL_RWX */
+#define PECOFF_SECTION_ALIGNMENT 0x1000
+#define PECOFF_FILE_ALIGNMENT 0x200
+
#endif /* _ASM_RISCV_SET_MEMORY_H */
diff --git a/arch/riscv/include/asm/signal.h b/arch/riscv/include/asm/signal.h
index 532c29ef0376..956ae0a01bad 100644
--- a/arch/riscv/include/asm/signal.h
+++ b/arch/riscv/include/asm/signal.h
@@ -7,6 +7,6 @@
#include <uapi/asm/ptrace.h>
asmlinkage __visible
-void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
+void do_work_pending(struct pt_regs *regs, unsigned long thread_info_flags);
#endif
diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h
index d3443be7eedc..c4b77017ec58 100644
--- a/arch/riscv/include/asm/smp.h
+++ b/arch/riscv/include/asm/smp.h
@@ -15,12 +15,10 @@
struct seq_file;
extern unsigned long boot_cpu_hartid;
-struct riscv_ipi_ops {
- void (*ipi_inject)(const struct cpumask *target);
- void (*ipi_clear)(void);
-};
-
#ifdef CONFIG_SMP
+
+#include <linux/jump_label.h>
+
/*
* Mapping between linux logical cpu index and hartid.
*/
@@ -33,9 +31,6 @@ void show_ipi_stats(struct seq_file *p, int prec);
/* SMP initialization hook for setup_arch */
void __init setup_smp(void);
-/* Called from C code, this handles an IPI. */
-void handle_IPI(struct pt_regs *regs);
-
/* Hook for the generic smp_call_function_many() routine. */
void arch_send_call_function_ipi_mask(struct cpumask *mask);
@@ -44,11 +39,25 @@ void arch_send_call_function_single_ipi(int cpu);
int riscv_hartid_to_cpuid(unsigned long hartid);
-/* Set custom IPI operations */
-void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops);
+/* Enable IPI for CPU hotplug */
+void riscv_ipi_enable(void);
+
+/* Disable IPI for CPU hotplug */
+void riscv_ipi_disable(void);
-/* Clear IPI for current CPU */
-void riscv_clear_ipi(void);
+/* Check if IPI interrupt numbers are available */
+bool riscv_ipi_have_virq_range(void);
+
+/* Set the IPI interrupt numbers for arch (called by irqchip drivers) */
+void riscv_ipi_set_virq_range(int virq, int nr, bool use_for_rfence);
+
+/* Check if we can use IPIs for remote FENCEs */
+DECLARE_STATIC_KEY_FALSE(riscv_ipi_for_rfence);
+#define riscv_use_ipi_for_rfence() \
+ static_branch_unlikely(&riscv_ipi_for_rfence)
+
+/* Check other CPUs stop or not */
+bool smp_crash_stop_failed(void);
/* Secondary hart entry */
asmlinkage void smp_callin(void);
@@ -82,14 +91,29 @@ static inline unsigned long cpuid_to_hartid_map(int cpu)
return boot_cpu_hartid;
}
-static inline void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops)
+static inline void riscv_ipi_enable(void)
{
}
-static inline void riscv_clear_ipi(void)
+static inline void riscv_ipi_disable(void)
{
}
+static inline bool riscv_ipi_have_virq_range(void)
+{
+ return false;
+}
+
+static inline void riscv_ipi_set_virq_range(int virq, int nr,
+ bool use_for_rfence)
+{
+}
+
+static inline bool riscv_use_ipi_for_rfence(void)
+{
+ return false;
+}
+
#endif /* CONFIG_SMP */
#if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP)
diff --git a/arch/riscv/include/asm/stackprotector.h b/arch/riscv/include/asm/stackprotector.h
index 09093af46565..43895b90fe3f 100644
--- a/arch/riscv/include/asm/stackprotector.h
+++ b/arch/riscv/include/asm/stackprotector.h
@@ -3,9 +3,6 @@
#ifndef _ASM_RISCV_STACKPROTECTOR_H
#define _ASM_RISCV_STACKPROTECTOR_H
-#include <linux/random.h>
-#include <linux/version.h>
-
extern unsigned long __stack_chk_guard;
/*
@@ -16,12 +13,7 @@ extern unsigned long __stack_chk_guard;
*/
static __always_inline void boot_init_stack_canary(void)
{
- unsigned long canary;
-
- /* Try to get a semi random initial value. */
- get_random_bytes(&canary, sizeof(canary));
- canary ^= LINUX_VERSION_CODE;
- canary &= CANARY_MASK;
+ unsigned long canary = get_random_canary();
current->stack_canary = canary;
if (!IS_ENABLED(CONFIG_STACKPROTECTOR_PER_TASK))
diff --git a/arch/riscv/include/asm/stacktrace.h b/arch/riscv/include/asm/stacktrace.h
index 3450c1912afd..f7e8ef2418b9 100644
--- a/arch/riscv/include/asm/stacktrace.h
+++ b/arch/riscv/include/asm/stacktrace.h
@@ -16,4 +16,9 @@ extern void notrace walk_stackframe(struct task_struct *task, struct pt_regs *re
extern void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
const char *loglvl);
+static inline bool on_thread_stack(void)
+{
+ return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
+}
+
#endif /* _ASM_RISCV_STACKTRACE_H */
diff --git a/arch/riscv/include/asm/string.h b/arch/riscv/include/asm/string.h
index 909049366555..a96b1fea24fe 100644
--- a/arch/riscv/include/asm/string.h
+++ b/arch/riscv/include/asm/string.h
@@ -18,6 +18,16 @@ extern asmlinkage void *__memcpy(void *, const void *, size_t);
#define __HAVE_ARCH_MEMMOVE
extern asmlinkage void *memmove(void *, const void *, size_t);
extern asmlinkage void *__memmove(void *, const void *, size_t);
+
+#define __HAVE_ARCH_STRCMP
+extern asmlinkage int strcmp(const char *cs, const char *ct);
+
+#define __HAVE_ARCH_STRLEN
+extern asmlinkage __kernel_size_t strlen(const char *);
+
+#define __HAVE_ARCH_STRNCMP
+extern asmlinkage int strncmp(const char *cs, const char *ct, size_t count);
+
/* For those files which don't want to check by kasan. */
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
#define memcpy(dst, src, len) __memcpy(dst, src, len)
diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h
index 8be391c2aecb..02f87867389a 100644
--- a/arch/riscv/include/asm/suspend.h
+++ b/arch/riscv/include/asm/suspend.h
@@ -21,6 +21,11 @@ struct suspend_context {
#endif
};
+/*
+ * Used by hibernation core and cleared during resume sequence
+ */
+extern int in_suspend;
+
/* Low-level CPU suspend entry function */
int __cpu_suspend_enter(struct suspend_context *context);
@@ -33,4 +38,21 @@ int cpu_suspend(unsigned long arg,
/* Low-level CPU resume entry function */
int __cpu_resume_enter(unsigned long hartid, unsigned long context);
+/* Used to save and restore the CSRs */
+void suspend_save_csrs(struct suspend_context *context);
+void suspend_restore_csrs(struct suspend_context *context);
+
+/* Low-level API to support hibernation */
+int swsusp_arch_suspend(void);
+int swsusp_arch_resume(void);
+int arch_hibernation_header_save(void *addr, unsigned int max_size);
+int arch_hibernation_header_restore(void *addr);
+int __hibernate_cpu_resume(void);
+
+/* Used to resume on the CPU we hibernated on */
+int hibernate_resume_nonboot_cpu_disable(void);
+
+asmlinkage void hibernate_restore_image(unsigned long resume_satp, unsigned long satp_temp,
+ unsigned long cpu_resume);
+asmlinkage int hibernate_core_restore_code(void);
#endif
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
index 11463489fec6..60f8ca01d36e 100644
--- a/arch/riscv/include/asm/switch_to.h
+++ b/arch/riscv/include/asm/switch_to.h
@@ -59,7 +59,8 @@ static inline void __switch_to_aux(struct task_struct *prev,
static __always_inline bool has_fpu(void)
{
- return static_branch_likely(&riscv_isa_ext_keys[RISCV_ISA_EXT_KEY_FPU]);
+ return riscv_has_extension_likely(RISCV_ISA_EXT_f) ||
+ riscv_has_extension_likely(RISCV_ISA_EXT_d);
}
#else
static __always_inline bool has_fpu(void) { return false; }
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
index 384a63b86420..0148c6bd9675 100644
--- a/arch/riscv/include/asm/syscall.h
+++ b/arch/riscv/include/asm/syscall.h
@@ -10,6 +10,7 @@
#ifndef _ASM_RISCV_SYSCALL_H
#define _ASM_RISCV_SYSCALL_H
+#include <asm/hwprobe.h>
#include <uapi/linux/audit.h>
#include <linux/sched.h>
#include <linux/err.h>
@@ -74,5 +75,29 @@ static inline int syscall_get_arch(struct task_struct *task)
#endif
}
+typedef long (*syscall_t)(ulong, ulong, ulong, ulong, ulong, ulong, ulong);
+static inline void syscall_handler(struct pt_regs *regs, ulong syscall)
+{
+ syscall_t fn;
+
+#ifdef CONFIG_COMPAT
+ if ((regs->status & SR_UXL) == SR_UXL_32)
+ fn = compat_sys_call_table[syscall];
+ else
+#endif
+ fn = sys_call_table[syscall];
+
+ regs->a0 = fn(regs->orig_a0, regs->a1, regs->a2,
+ regs->a3, regs->a4, regs->a5, regs->a6);
+}
+
+static inline bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
+{
+ return false;
+}
+
asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
+
+asmlinkage long sys_riscv_hwprobe(struct riscv_hwprobe *, size_t, size_t,
+ unsigned long *, unsigned int);
#endif /* _ASM_RISCV_SYSCALL_H */
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index 67322f878e0d..e0d202134b44 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -43,6 +43,7 @@
#ifndef __ASSEMBLY__
extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)];
+extern unsigned long spin_shadow_stack;
#include <asm/processor.h>
#include <asm/csr.h>
@@ -66,6 +67,7 @@ struct thread_info {
long kernel_sp; /* Kernel stack pointer */
long user_sp; /* User stack pointer */
int cpu;
+ unsigned long syscall_work; /* SYSCALL_WORK_ flags */
};
/*
@@ -88,26 +90,18 @@ struct thread_info {
* - pending work-to-be-done flags are in lowest half-word
* - other flags in upper half-word(s)
*/
-#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
-#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
-#define TIF_SYSCALL_AUDIT 7 /* syscall auditing */
-#define TIF_SECCOMP 8 /* syscall secure computing */
#define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */
#define TIF_UPROBE 10 /* uprobe breakpoint or singlestep */
#define TIF_32BIT 11 /* compat-mode 32bit process */
-#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
-#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
-#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
-#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
#define _TIF_UPROBE (1 << TIF_UPROBE)
@@ -115,8 +109,4 @@ struct thread_info {
(_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \
_TIF_NOTIFY_SIGNAL | _TIF_UPROBE)
-#define _TIF_SYSCALL_WORK \
- (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT | \
- _TIF_SECCOMP)
-
#endif /* _ASM_RISCV_THREAD_INFO_H */
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 801019381dea..a09196f8de68 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -12,6 +12,8 @@
#include <asm/errata_list.h>
#ifdef CONFIG_MMU
+extern unsigned long asid_mask;
+
static inline void local_flush_tlb_all(void)
{
__asm__ __volatile__ ("sfence.vma" : : : "memory");
diff --git a/arch/riscv/include/asm/topology.h b/arch/riscv/include/asm/topology.h
new file mode 100644
index 000000000000..e316ab3b77f3
--- /dev/null
+++ b/arch/riscv/include/asm/topology.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_TOPOLOGY_H
+#define _ASM_RISCV_TOPOLOGY_H
+
+#include <linux/arch_topology.h>
+
+/* Replace task scheduler's default frequency-invariant accounting */
+#define arch_scale_freq_tick topology_scale_freq_tick
+#define arch_set_freq_scale topology_set_freq_scale
+#define arch_scale_freq_capacity topology_get_freq_scale
+#define arch_scale_freq_invariant topology_scale_freq_invariant
+
+/* Replace task scheduler's default cpu-invariant accounting */
+#define arch_scale_cpu_capacity topology_get_cpu_scale
+
+/* Enable topology flag updates */
+#define arch_update_cpu_topology topology_update_cpu_topology
+
+#include <asm-generic/topology.h>
+
+#endif /* _ASM_RISCV_TOPOLOGY_H */
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index 855450bed9f5..ec0cab9fbddd 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -165,7 +165,7 @@ do { \
might_fault(); \
access_ok(__p, sizeof(*__p)) ? \
__get_user((x), __p) : \
- ((x) = 0, -EFAULT); \
+ ((x) = (__force __typeof__(x))0, -EFAULT); \
})
#define __put_user_asm(insn, x, ptr, err) \
diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h
index af981426fe0f..f891478829a5 100644
--- a/arch/riscv/include/asm/vdso.h
+++ b/arch/riscv/include/asm/vdso.h
@@ -10,7 +10,7 @@
/*
* All systems with an MMU have a VDSO, but systems without an MMU don't
- * support shared libraries and therefor don't have one.
+ * support shared libraries and therefore don't have one.
*/
#ifdef CONFIG_MMU
@@ -28,8 +28,12 @@
#define COMPAT_VDSO_SYMBOL(base, name) \
(void __user *)((unsigned long)(base) + compat__vdso_##name##_offset)
+extern char compat_vdso_start[], compat_vdso_end[];
+
#endif /* CONFIG_COMPAT */
+extern char vdso_start[], vdso_end[];
+
#endif /* !__ASSEMBLY__ */
#endif /* CONFIG_MMU */
diff --git a/arch/riscv/include/asm/vdso/data.h b/arch/riscv/include/asm/vdso/data.h
new file mode 100644
index 000000000000..dc2f76f58b76
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/data.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __RISCV_ASM_VDSO_DATA_H
+#define __RISCV_ASM_VDSO_DATA_H
+
+#include <linux/types.h>
+#include <vdso/datapage.h>
+#include <asm/hwprobe.h>
+
+struct arch_vdso_data {
+ /* Stash static answers to the hwprobe queries when all CPUs are selected. */
+ __u64 all_cpu_hwprobe_values[RISCV_HWPROBE_MAX_KEY + 1];
+
+ /* Boolean indicating all CPUs have the same static hwprobe values. */
+ __u8 homogeneous_cpus;
+};
+
+#endif /* __RISCV_ASM_VDSO_DATA_H */
diff --git a/arch/riscv/include/asm/vdso/gettimeofday.h b/arch/riscv/include/asm/vdso/gettimeofday.h
index 77d9c2f721c4..ba3283cf7acc 100644
--- a/arch/riscv/include/asm/vdso/gettimeofday.h
+++ b/arch/riscv/include/asm/vdso/gettimeofday.h
@@ -9,6 +9,12 @@
#include <asm/csr.h>
#include <uapi/linux/time.h>
+/*
+ * 32-bit land is lacking generic time vsyscalls as well as the legacy 32-bit
+ * time syscalls like gettimeofday. Skip these definitions since on 32-bit.
+ */
+#ifdef CONFIG_GENERIC_TIME_VSYSCALL
+
#define VDSO_HAS_CLOCK_GETRES 1
static __always_inline
@@ -60,6 +66,8 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
return ret;
}
+#endif /* CONFIG_GENERIC_TIME_VSYSCALL */
+
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
const struct vdso_data *vd)
{
diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h
index fa70cfe507aa..14f5d27783b8 100644
--- a/arch/riscv/include/asm/vdso/processor.h
+++ b/arch/riscv/include/asm/vdso/processor.h
@@ -4,30 +4,26 @@
#ifndef __ASSEMBLY__
-#include <linux/jump_label.h>
#include <asm/barrier.h>
-#include <asm/hwcap.h>
static inline void cpu_relax(void)
{
- if (!static_branch_likely(&riscv_isa_ext_keys[RISCV_ISA_EXT_KEY_ZIHINTPAUSE])) {
#ifdef __riscv_muldiv
- int dummy;
- /* In lieu of a halt instruction, induce a long-latency stall. */
- __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
+ int dummy;
+ /* In lieu of a halt instruction, induce a long-latency stall. */
+ __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
#endif
- } else {
- /*
- * Reduce instruction retirement.
- * This assumes the PC changes.
- */
-#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE
- __asm__ __volatile__ ("pause");
+
+#ifdef __riscv_zihintpause
+ /*
+ * Reduce instruction retirement.
+ * This assumes the PC changes.
+ */
+ __asm__ __volatile__ ("pause");
#else
- /* Encoding of the pause instruction */
- __asm__ __volatile__ (".4byte 0x100000F");
+ /* Encoding of the pause instruction */
+ __asm__ __volatile__ (".4byte 0x100000F");
#endif
- }
barrier();
}
diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h
index ff9abc00d139..58d3e447f191 100644
--- a/arch/riscv/include/asm/vmalloc.h
+++ b/arch/riscv/include/asm/vmalloc.h
@@ -1,4 +1,81 @@
#ifndef _ASM_RISCV_VMALLOC_H
#define _ASM_RISCV_VMALLOC_H
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+
+#define IOREMAP_MAX_ORDER (PUD_SHIFT)
+
+#define arch_vmap_pud_supported arch_vmap_pud_supported
+static inline bool arch_vmap_pud_supported(pgprot_t prot)
+{
+ return true;
+}
+
+#define arch_vmap_pmd_supported arch_vmap_pmd_supported
+static inline bool arch_vmap_pmd_supported(pgprot_t prot)
+{
+ return true;
+}
+
+#ifdef CONFIG_RISCV_ISA_SVNAPOT
+#include <linux/pgtable.h>
+
+#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
+static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
+ u64 pfn, unsigned int max_page_shift)
+{
+ unsigned long map_size = PAGE_SIZE;
+ unsigned long size, order;
+
+ if (!has_svnapot())
+ return map_size;
+
+ for_each_napot_order_rev(order) {
+ if (napot_cont_shift(order) > max_page_shift)
+ continue;
+
+ size = napot_cont_size(order);
+ if (end - addr < size)
+ continue;
+
+ if (!IS_ALIGNED(addr, size))
+ continue;
+
+ if (!IS_ALIGNED(PFN_PHYS(pfn), size))
+ continue;
+
+ map_size = size;
+ break;
+ }
+
+ return map_size;
+}
+
+#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
+static inline int arch_vmap_pte_supported_shift(unsigned long size)
+{
+ int shift = PAGE_SHIFT;
+ unsigned long order;
+
+ if (!has_svnapot())
+ return shift;
+
+ WARN_ON_ONCE(size >= PMD_SIZE);
+
+ for_each_napot_order_rev(order) {
+ if (napot_cont_size(order) > size)
+ continue;
+
+ if (!IS_ALIGNED(size, napot_cont_size(order)))
+ continue;
+
+ shift = napot_cont_shift(order);
+ break;
+ }
+
+ return shift;
+}
+
+#endif /* CONFIG_RISCV_ISA_SVNAPOT */
+#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
#endif /* _ASM_RISCV_VMALLOC_H */