aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/Makefile1
-rw-r--r--arch/arm64/mm/cache.S10
-rw-r--r--arch/arm64/mm/dump.c329
-rw-r--r--arch/arm64/mm/fault.c2
-rw-r--r--arch/arm64/mm/init.c2
-rw-r--r--arch/arm64/mm/ioremap.c93
-rw-r--r--arch/arm64/mm/mm.h1
-rw-r--r--arch/arm64/mm/mmap.c12
-rw-r--r--arch/arm64/mm/mmu.c108
-rw-r--r--arch/arm64/mm/pgd.c20
10 files changed, 466 insertions, 112 deletions
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index c56179ed2c09..773d37a14039 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -3,3 +3,4 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
ioremap.o mmap.o pgd.o mmu.o \
context.o proc.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+obj-$(CONFIG_ARM64_PTDUMP) += dump.o
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 23663837acff..2560e1e1562e 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -17,9 +17,12 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/alternative-asm.h>
#include "proc-macros.S"
@@ -138,9 +141,12 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU
add x4, x4, x2
cmp x4, x1
b.lo 1b
-9: // ignore any faulting cache operation
dsb ish
isb
+ mov x0, #0
+ ret
+9:
+ mov x0, #-EFAULT
ret
ENDPROC(flush_icache_range)
ENDPROC(__flush_cache_user_range)
@@ -210,7 +216,7 @@ __dma_clean_range:
dcache_line_size x2, x3
sub x3, x2, #1
bic x0, x0, x3
-1: dc cvac, x0 // clean D / U line
+1: alternative_insn "dc cvac, x0", "dc civac, x0", ARM64_WORKAROUND_CLEAN_CACHE
add x0, x0, x2
cmp x0, x1
b.lo 1b
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
new file mode 100644
index 000000000000..cf33f33333cc
--- /dev/null
+++ b/arch/arm64/mm/dump.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Debug helper to dump the current kernel pagetables of the system
+ * so that we can see what the various memory ranges are set to.
+ *
+ * Derived from x86 and arm implementation:
+ * (C) Copyright 2008 Intel Corporation
+ *
+ * Author: Arjan van de Ven <arjan@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+
+#include <asm/fixmap.h>
+#include <asm/pgtable.h>
+
+#define LOWEST_ADDR (UL(0xffffffffffffffff) << VA_BITS)
+
+struct addr_marker {
+ unsigned long start_address;
+ const char *name;
+};
+
+enum address_markers_idx {
+ VMALLOC_START_NR = 0,
+ VMALLOC_END_NR,
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ VMEMMAP_START_NR,
+ VMEMMAP_END_NR,
+#endif
+ PCI_START_NR,
+ PCI_END_NR,
+ FIXADDR_START_NR,
+ FIXADDR_END_NR,
+ MODULES_START_NR,
+ MODUELS_END_NR,
+ KERNEL_SPACE_NR,
+};
+
+static struct addr_marker address_markers[] = {
+ { VMALLOC_START, "vmalloc() Area" },
+ { VMALLOC_END, "vmalloc() End" },
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ { 0, "vmemmap start" },
+ { 0, "vmemmap end" },
+#endif
+ { (unsigned long) PCI_IOBASE, "PCI I/O start" },
+ { (unsigned long) PCI_IOBASE + SZ_16M, "PCI I/O end" },
+ { FIXADDR_START, "Fixmap start" },
+ { FIXADDR_TOP, "Fixmap end" },
+ { MODULES_VADDR, "Modules start" },
+ { MODULES_END, "Modules end" },
+ { PAGE_OFFSET, "Kernel Mapping" },
+ { -1, NULL },
+};
+
+struct pg_state {
+ struct seq_file *seq;
+ const struct addr_marker *marker;
+ unsigned long start_address;
+ unsigned level;
+ u64 current_prot;
+};
+
+struct prot_bits {
+ u64 mask;
+ u64 val;
+ const char *set;
+ const char *clear;
+};
+
+static const struct prot_bits pte_bits[] = {
+ {
+ .mask = PTE_USER,
+ .val = PTE_USER,
+ .set = "USR",
+ .clear = " ",
+ }, {
+ .mask = PTE_RDONLY,
+ .val = PTE_RDONLY,
+ .set = "ro",
+ .clear = "RW",
+ }, {
+ .mask = PTE_PXN,
+ .val = PTE_PXN,
+ .set = "NX",
+ .clear = "x ",
+ }, {
+ .mask = PTE_SHARED,
+ .val = PTE_SHARED,
+ .set = "SHD",
+ .clear = " ",
+ }, {
+ .mask = PTE_AF,
+ .val = PTE_AF,
+ .set = "AF",
+ .clear = " ",
+ }, {
+ .mask = PTE_NG,
+ .val = PTE_NG,
+ .set = "NG",
+ .clear = " ",
+ }, {
+ .mask = PTE_UXN,
+ .val = PTE_UXN,
+ .set = "UXN",
+ }, {
+ .mask = PTE_ATTRINDX_MASK,
+ .val = PTE_ATTRINDX(MT_DEVICE_nGnRnE),
+ .set = "DEVICE/nGnRnE",
+ }, {
+ .mask = PTE_ATTRINDX_MASK,
+ .val = PTE_ATTRINDX(MT_DEVICE_nGnRE),
+ .set = "DEVICE/nGnRE",
+ }, {
+ .mask = PTE_ATTRINDX_MASK,
+ .val = PTE_ATTRINDX(MT_DEVICE_GRE),
+ .set = "DEVICE/GRE",
+ }, {
+ .mask = PTE_ATTRINDX_MASK,
+ .val = PTE_ATTRINDX(MT_NORMAL_NC),
+ .set = "MEM/NORMAL-NC",
+ }, {
+ .mask = PTE_ATTRINDX_MASK,
+ .val = PTE_ATTRINDX(MT_NORMAL),
+ .set = "MEM/NORMAL",
+ }
+};
+
+struct pg_level {
+ const struct prot_bits *bits;
+ size_t num;
+ u64 mask;
+};
+
+static struct pg_level pg_level[] = {
+ {
+ }, { /* pgd */
+ .bits = pte_bits,
+ .num = ARRAY_SIZE(pte_bits),
+ }, { /* pud */
+ .bits = pte_bits,
+ .num = ARRAY_SIZE(pte_bits),
+ }, { /* pmd */
+ .bits = pte_bits,
+ .num = ARRAY_SIZE(pte_bits),
+ }, { /* pte */
+ .bits = pte_bits,
+ .num = ARRAY_SIZE(pte_bits),
+ },
+};
+
+static void dump_prot(struct pg_state *st, const struct prot_bits *bits,
+ size_t num)
+{
+ unsigned i;
+
+ for (i = 0; i < num; i++, bits++) {
+ const char *s;
+
+ if ((st->current_prot & bits->mask) == bits->val)
+ s = bits->set;
+ else
+ s = bits->clear;
+
+ if (s)
+ seq_printf(st->seq, " %s", s);
+ }
+}
+
+static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
+ u64 val)
+{
+ static const char units[] = "KMGTPE";
+ u64 prot = val & pg_level[level].mask;
+
+ if (!st->level) {
+ st->level = level;
+ st->current_prot = prot;
+ st->start_address = addr;
+ seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ } else if (prot != st->current_prot || level != st->level ||
+ addr >= st->marker[1].start_address) {
+ const char *unit = units;
+ unsigned long delta;
+
+ if (st->current_prot) {
+ seq_printf(st->seq, "0x%16lx-0x%16lx ",
+ st->start_address, addr);
+
+ delta = (addr - st->start_address) >> 10;
+ while (!(delta & 1023) && unit[1]) {
+ delta >>= 10;
+ unit++;
+ }
+ seq_printf(st->seq, "%9lu%c", delta, *unit);
+ if (pg_level[st->level].bits)
+ dump_prot(st, pg_level[st->level].bits,
+ pg_level[st->level].num);
+ seq_puts(st->seq, "\n");
+ }
+
+ if (addr >= st->marker[1].start_address) {
+ st->marker++;
+ seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ }
+
+ st->start_address = addr;
+ st->current_prot = prot;
+ st->level = level;
+ }
+
+ if (addr >= st->marker[1].start_address) {
+ st->marker++;
+ seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ }
+
+}
+
+static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
+{
+ pte_t *pte = pte_offset_kernel(pmd, 0);
+ unsigned long addr;
+ unsigned i;
+
+ for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
+ addr = start + i * PAGE_SIZE;
+ note_page(st, addr, 4, pte_val(*pte));
+ }
+}
+
+static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
+{
+ pmd_t *pmd = pmd_offset(pud, 0);
+ unsigned long addr;
+ unsigned i;
+
+ for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
+ addr = start + i * PMD_SIZE;
+ if (pmd_none(*pmd) || pmd_sect(*pmd) || pmd_bad(*pmd))
+ note_page(st, addr, 3, pmd_val(*pmd));
+ else
+ walk_pte(st, pmd, addr);
+ }
+}
+
+static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
+{
+ pud_t *pud = pud_offset(pgd, 0);
+ unsigned long addr;
+ unsigned i;
+
+ for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
+ addr = start + i * PUD_SIZE;
+ if (pud_none(*pud) || pud_sect(*pud) || pud_bad(*pud))
+ note_page(st, addr, 2, pud_val(*pud));
+ else
+ walk_pmd(st, pud, addr);
+ }
+}
+
+static void walk_pgd(struct pg_state *st, struct mm_struct *mm, unsigned long start)
+{
+ pgd_t *pgd = pgd_offset(mm, 0UL);
+ unsigned i;
+ unsigned long addr;
+
+ for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
+ addr = start + i * PGDIR_SIZE;
+ if (pgd_none(*pgd) || pgd_bad(*pgd))
+ note_page(st, addr, 1, pgd_val(*pgd));
+ else
+ walk_pud(st, pgd, addr);
+ }
+}
+
+static int ptdump_show(struct seq_file *m, void *v)
+{
+ struct pg_state st = {
+ .seq = m,
+ .marker = address_markers,
+ };
+
+ walk_pgd(&st, &init_mm, LOWEST_ADDR);
+
+ note_page(&st, 0, 0, 0);
+ return 0;
+}
+
+static int ptdump_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ptdump_show, NULL);
+}
+
+static const struct file_operations ptdump_fops = {
+ .open = ptdump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int ptdump_init(void)
+{
+ struct dentry *pe;
+ unsigned i, j;
+
+ for (i = 0; i < ARRAY_SIZE(pg_level); i++)
+ if (pg_level[i].bits)
+ for (j = 0; j < pg_level[i].num; j++)
+ pg_level[i].mask |= pg_level[i].bits[j].mask;
+
+ address_markers[VMEMMAP_START_NR].start_address =
+ (unsigned long)virt_to_page(PAGE_OFFSET);
+ address_markers[VMEMMAP_END_NR].start_address =
+ (unsigned long)virt_to_page(high_memory);
+
+ pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL,
+ &ptdump_fops);
+ return pe ? 0 : -ENOMEM;
+}
+device_initcall(ptdump_init);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 41cb6d3d6075..c11cd27ca8f5 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -380,7 +380,7 @@ static struct fault_info {
{ do_bad, SIGBUS, 0, "level 1 address size fault" },
{ do_bad, SIGBUS, 0, "level 2 address size fault" },
{ do_bad, SIGBUS, 0, "level 3 address size fault" },
- { do_translation_fault, SIGSEGV, SEGV_MAPERR, "input address range fault" },
+ { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 494297c698ca..bac492c12fcc 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -39,6 +39,7 @@
#include <asm/setup.h>
#include <asm/sizes.h>
#include <asm/tlb.h>
+#include <asm/alternative.h>
#include "mm.h"
@@ -325,6 +326,7 @@ void __init mem_init(void)
void free_initmem(void)
{
free_initmem_default(0);
+ free_alternatives_memory();
}
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index fa324bd5a5c4..cbb99c8f1e04 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -103,97 +103,10 @@ void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
}
EXPORT_SYMBOL(ioremap_cache);
-static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
-#if CONFIG_ARM64_PGTABLE_LEVELS > 2
-static pte_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
-#endif
-#if CONFIG_ARM64_PGTABLE_LEVELS > 3
-static pte_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
-#endif
-
-static inline pud_t * __init early_ioremap_pud(unsigned long addr)
-{
- pgd_t *pgd;
-
- pgd = pgd_offset_k(addr);
- BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
-
- return pud_offset(pgd, addr);
-}
-
-static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
-{
- pud_t *pud = early_ioremap_pud(addr);
-
- BUG_ON(pud_none(*pud) || pud_bad(*pud));
-
- return pmd_offset(pud, addr);
-}
-
-static inline pte_t * __init early_ioremap_pte(unsigned long addr)
-{
- pmd_t *pmd = early_ioremap_pmd(addr);
-
- BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
-
- return pte_offset_kernel(pmd, addr);
-}
-
+/*
+ * Must be called after early_fixmap_init
+ */
void __init early_ioremap_init(void)
{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- unsigned long addr = fix_to_virt(FIX_BTMAP_BEGIN);
-
- pgd = pgd_offset_k(addr);
- pgd_populate(&init_mm, pgd, bm_pud);
- pud = pud_offset(pgd, addr);
- pud_populate(&init_mm, pud, bm_pmd);
- pmd = pmd_offset(pud, addr);
- pmd_populate_kernel(&init_mm, pmd, bm_pte);
-
- /*
- * The boot-ioremap range spans multiple pmds, for which
- * we are not prepared:
- */
- BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
- != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
-
- if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
- WARN_ON(1);
- pr_warn("pmd %p != %p\n",
- pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
- pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
- fix_to_virt(FIX_BTMAP_BEGIN));
- pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
- fix_to_virt(FIX_BTMAP_END));
-
- pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
- pr_warn("FIX_BTMAP_BEGIN: %d\n",
- FIX_BTMAP_BEGIN);
- }
-
early_ioremap_setup();
}
-
-void __init __early_set_fixmap(enum fixed_addresses idx,
- phys_addr_t phys, pgprot_t flags)
-{
- unsigned long addr = __fix_to_virt(idx);
- pte_t *pte;
-
- if (idx >= __end_of_fixed_addresses) {
- BUG();
- return;
- }
-
- pte = early_ioremap_pte(addr);
-
- if (pgprot_val(flags))
- set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
- else {
- pte_clear(&init_mm, addr, pte);
- flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
- }
-}
diff --git a/arch/arm64/mm/mm.h b/arch/arm64/mm/mm.h
index d519f4f50c8c..50c3351df9c7 100644
--- a/arch/arm64/mm/mm.h
+++ b/arch/arm64/mm/mm.h
@@ -1,2 +1 @@
extern void __init bootmem_init(void);
-extern void __init arm64_swiotlb_init(void);
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 1d73662f00ff..54922d1275b8 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -47,22 +47,14 @@ static int mmap_is_legacy(void)
return sysctl_legacy_va_layout;
}
-/*
- * Since get_random_int() returns the same value within a 1 jiffy window, we
- * will almost always get the same randomisation for the stack and mmap
- * region. This will mean the relative distance between stack and mmap will be
- * the same.
- *
- * To avoid this we can shift the randomness by 1 bit.
- */
static unsigned long mmap_rnd(void)
{
unsigned long rnd = 0;
if (current->flags & PF_RANDOMIZE)
- rnd = (long)get_random_int() & (STACK_RND_MASK >> 1);
+ rnd = (long)get_random_int() & STACK_RND_MASK;
- return rnd << (PAGE_SHIFT + 1);
+ return rnd << PAGE_SHIFT;
}
static unsigned long mmap_base(void)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 6894ef3e6234..6032f3e3056a 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -28,6 +28,7 @@
#include <linux/io.h>
#include <asm/cputype.h>
+#include <asm/fixmap.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sizes.h>
@@ -202,7 +203,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
}
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
- unsigned long end, unsigned long phys,
+ unsigned long end, phys_addr_t phys,
int map_io)
{
pud_t *pud;
@@ -297,11 +298,15 @@ static void __init map_mem(void)
* create_mapping requires puds, pmds and ptes to be allocated from
* memory addressable from the initial direct kernel mapping.
*
- * The initial direct kernel mapping, located at swapper_pg_dir,
- * gives us PUD_SIZE memory starting from PHYS_OFFSET (which must be
- * aligned to 2MB as per Documentation/arm64/booting.txt).
+ * The initial direct kernel mapping, located at swapper_pg_dir, gives
+ * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from
+ * PHYS_OFFSET (which must be aligned to 2MB as per
+ * Documentation/arm64/booting.txt).
*/
- limit = PHYS_OFFSET + PUD_SIZE;
+ if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
+ limit = PHYS_OFFSET + PMD_SIZE;
+ else
+ limit = PHYS_OFFSET + PUD_SIZE;
memblock_set_current_limit(limit);
/* map all the memory banks */
@@ -459,3 +464,96 @@ void vmemmap_free(unsigned long start, unsigned long end)
{
}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+
+static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
+#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
+#endif
+#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
+#endif
+
+static inline pud_t * fixmap_pud(unsigned long addr)
+{
+ pgd_t *pgd = pgd_offset_k(addr);
+
+ BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
+
+ return pud_offset(pgd, addr);
+}
+
+static inline pmd_t * fixmap_pmd(unsigned long addr)
+{
+ pud_t *pud = fixmap_pud(addr);
+
+ BUG_ON(pud_none(*pud) || pud_bad(*pud));
+
+ return pmd_offset(pud, addr);
+}
+
+static inline pte_t * fixmap_pte(unsigned long addr)
+{
+ pmd_t *pmd = fixmap_pmd(addr);
+
+ BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
+
+ return pte_offset_kernel(pmd, addr);
+}
+
+void __init early_fixmap_init(void)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ unsigned long addr = FIXADDR_START;
+
+ pgd = pgd_offset_k(addr);
+ pgd_populate(&init_mm, pgd, bm_pud);
+ pud = pud_offset(pgd, addr);
+ pud_populate(&init_mm, pud, bm_pmd);
+ pmd = pmd_offset(pud, addr);
+ pmd_populate_kernel(&init_mm, pmd, bm_pte);
+
+ /*
+ * The boot-ioremap range spans multiple pmds, for which
+ * we are not preparted:
+ */
+ BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
+ != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
+
+ if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
+ || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
+ WARN_ON(1);
+ pr_warn("pmd %p != %p, %p\n",
+ pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
+ fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
+ pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
+ fix_to_virt(FIX_BTMAP_BEGIN));
+ pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
+ fix_to_virt(FIX_BTMAP_END));
+
+ pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
+ pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
+ }
+}
+
+void __set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t flags)
+{
+ unsigned long addr = __fix_to_virt(idx);
+ pte_t *pte;
+
+ if (idx >= __end_of_fixed_addresses) {
+ BUG();
+ return;
+ }
+
+ pte = fixmap_pte(addr);
+
+ if (pgprot_val(flags)) {
+ set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
+ } else {
+ pte_clear(&init_mm, addr, pte);
+ flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
+ }
+}
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 62c6101df260..71ca104f97bd 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -30,12 +30,14 @@
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
+static struct kmem_cache *pgd_cache;
+
pgd_t *pgd_alloc(struct mm_struct *mm)
{
if (PGD_SIZE == PAGE_SIZE)
- return (pgd_t *)get_zeroed_page(GFP_KERNEL);
+ return (pgd_t *)__get_free_page(PGALLOC_GFP);
else
- return kzalloc(PGD_SIZE, GFP_KERNEL);
+ return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
@@ -43,5 +45,17 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
if (PGD_SIZE == PAGE_SIZE)
free_page((unsigned long)pgd);
else
- kfree(pgd);
+ kmem_cache_free(pgd_cache, pgd);
+}
+
+static int __init pgd_cache_init(void)
+{
+ /*
+ * Naturally aligned pgds required by the architecture.
+ */
+ if (PGD_SIZE != PAGE_SIZE)
+ pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
+ SLAB_PANIC, NULL);
+ return 0;
}
+core_initcall(pgd_cache_init);