aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/vmem.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm/vmem.c')
-rw-r--r--arch/s390/mm/vmem.c128
1 files changed, 98 insertions, 30 deletions
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index ee1a97078527..5b22c6e24528 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -5,12 +5,14 @@
#include <linux/memory_hotplug.h>
#include <linux/memblock.h>
+#include <linux/kasan.h>
#include <linux/pfn.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/hugetlb.h>
#include <linux/slab.h>
+#include <linux/sort.h>
#include <asm/cacheflush.h>
#include <asm/nospec-branch.h>
#include <asm/pgalloc.h>
@@ -296,10 +298,7 @@ static void try_free_pmd_table(pud_t *pud, unsigned long start)
/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
if (end > VMALLOC_START)
return;
-#ifdef CONFIG_KASAN
- if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
- return;
-#endif
+
pmd = pmd_offset(pud, start);
for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
if (!pmd_none(*pmd))
@@ -371,10 +370,6 @@ static void try_free_pud_table(p4d_t *p4d, unsigned long start)
/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
if (end > VMALLOC_START)
return;
-#ifdef CONFIG_KASAN
- if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
- return;
-#endif
pud = pud_offset(p4d, start);
for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
@@ -425,10 +420,6 @@ static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
if (end > VMALLOC_START)
return;
-#ifdef CONFIG_KASAN
- if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
- return;
-#endif
p4d = p4d_offset(pgd, start);
for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
@@ -657,6 +648,26 @@ void vmem_unmap_4k_page(unsigned long addr)
mutex_unlock(&vmem_mutex);
}
+static int __init memblock_region_cmp(const void *a, const void *b)
+{
+ const struct memblock_region *r1 = a;
+ const struct memblock_region *r2 = b;
+
+ if (r1->base < r2->base)
+ return -1;
+ if (r1->base > r2->base)
+ return 1;
+ return 0;
+}
+
+static void __init memblock_region_swap(void *a, void *b, int size)
+{
+ swap(*(struct memblock_region *)a, *(struct memblock_region *)b);
+}
+
+#ifdef CONFIG_KASAN
+#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
+#endif
/*
* map whole physical memory to virtual memory (identity mapping)
* we reserve enough space in the vmalloc area for vmemmap to hotplug
@@ -664,29 +675,86 @@ void vmem_unmap_4k_page(unsigned long addr)
*/
void __init vmem_map_init(void)
{
+ struct memblock_region memory_rwx_regions[] = {
+ {
+ .base = 0,
+ .size = sizeof(struct lowcore),
+ .flags = MEMBLOCK_NONE,
+#ifdef CONFIG_NUMA
+ .nid = NUMA_NO_NODE,
+#endif
+ },
+ {
+ .base = __pa(_stext),
+ .size = _etext - _stext,
+ .flags = MEMBLOCK_NONE,
+#ifdef CONFIG_NUMA
+ .nid = NUMA_NO_NODE,
+#endif
+ },
+ {
+ .base = __pa(_sinittext),
+ .size = _einittext - _sinittext,
+ .flags = MEMBLOCK_NONE,
+#ifdef CONFIG_NUMA
+ .nid = NUMA_NO_NODE,
+#endif
+ },
+ {
+ .base = __stext_amode31,
+ .size = __etext_amode31 - __stext_amode31,
+ .flags = MEMBLOCK_NONE,
+#ifdef CONFIG_NUMA
+ .nid = NUMA_NO_NODE,
+#endif
+ },
+ };
+ struct memblock_type memory_rwx = {
+ .regions = memory_rwx_regions,
+ .cnt = ARRAY_SIZE(memory_rwx_regions),
+ .max = ARRAY_SIZE(memory_rwx_regions),
+ };
phys_addr_t base, end;
u64 i;
- for_each_mem_range(i, &base, &end)
- vmem_add_range(base, end - base);
- __set_memory((unsigned long)_stext,
- (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
- SET_MEMORY_RO | SET_MEMORY_X);
- __set_memory((unsigned long)_etext,
- (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
- SET_MEMORY_RO);
- __set_memory((unsigned long)_sinittext,
- (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
- SET_MEMORY_RO | SET_MEMORY_X);
- __set_memory(__stext_amode31, (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT,
- SET_MEMORY_RO | SET_MEMORY_X);
-
- /* lowcore requires 4k mapping for real addresses / prefixing */
- set_memory_4k(0, LC_PAGES);
+ /*
+ * Set RW+NX attribute on all memory, except regions enumerated with
+ * memory_rwx exclude type. These regions need different attributes,
+ * which are enforced afterwards.
+ *
+ * __for_each_mem_range() iterate and exclude types should be sorted.
+ * The relative location of _stext and _sinittext is hardcoded in the
+ * linker script. However a location of __stext_amode31 and the kernel
+ * image itself are chosen dynamically. Thus, sort the exclude type.
+ */
+ sort(&memory_rwx_regions,
+ ARRAY_SIZE(memory_rwx_regions), sizeof(memory_rwx_regions[0]),
+ memblock_region_cmp, memblock_region_swap);
+ __for_each_mem_range(i, &memblock.memory, &memory_rwx,
+ NUMA_NO_NODE, MEMBLOCK_NONE, &base, &end, NULL) {
+ set_memory_rwnx((unsigned long)__va(base),
+ (end - base) >> PAGE_SHIFT);
+ }
+
+#ifdef CONFIG_KASAN
+ for_each_mem_range(i, &base, &end) {
+ set_memory_rwnx(__sha(base),
+ (__sha(end) - __sha(base)) >> PAGE_SHIFT);
+ }
+#endif
+ set_memory_rox((unsigned long)_stext,
+ (unsigned long)(_etext - _stext) >> PAGE_SHIFT);
+ set_memory_ro((unsigned long)_etext,
+ (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT);
+ set_memory_rox((unsigned long)_sinittext,
+ (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT);
+ set_memory_rox(__stext_amode31,
+ (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT);
/* lowcore must be executable for LPSWE */
- if (!static_key_enabled(&cpu_has_bear))
- set_memory_x(0, 1);
+ if (static_key_enabled(&cpu_has_bear))
+ set_memory_nx(0, 1);
+ set_memory_nx(PAGE_SIZE, 1);
pr_info("Write protected kernel read-only data: %luk\n",
(unsigned long)(__end_rodata - _stext) >> 10);