aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/fault.c25
-rw-r--r--arch/arm/mm/hugetlbpage.c1
-rw-r--r--arch/arm/mm/init.c9
-rw-r--r--arch/arm/mm/mmu.c6
-rw-r--r--arch/arm/mm/proc-macros.S4
5 files changed, 10 insertions, 35 deletions
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index c6550eddfce1..efa402025031 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -202,7 +202,8 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
static vm_fault_t __kprobes
__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
- unsigned int flags, struct task_struct *tsk)
+ unsigned int flags, struct task_struct *tsk,
+ struct pt_regs *regs)
{
struct vm_area_struct *vma;
vm_fault_t fault;
@@ -224,7 +225,7 @@ good_area:
goto out;
}
- return handle_mm_fault(vma, addr & PAGE_MASK, flags);
+ return handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
check_stack:
/* Don't allow expansion below FIRST_USER_ADDRESS */
@@ -266,6 +267,8 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
flags |= FAULT_FLAG_WRITE;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
+
/*
* As per x86, we may deadlock here. However, since the kernel only
* validly references user space from well defined areas of the code,
@@ -290,7 +293,7 @@ retry:
#endif
}
- fault = __do_page_fault(mm, addr, fsr, flags, tsk);
+ fault = __do_page_fault(mm, addr, fsr, flags, tsk, regs);
/* If we need to retry but a fatal signal is pending, handle the
* signal first. We do not need to release the mmap_lock because
@@ -302,23 +305,7 @@ retry:
return 0;
}
- /*
- * Major/minor page fault accounting is only done on the
- * initial attempt. If we go through a retry, it is extremely
- * likely that the page will be found in page cache at that point.
- */
-
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_MAJOR) {
- tsk->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
- regs, addr);
- } else {
- tsk->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
- regs, addr);
- }
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
goto retry;
diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c
index a1e5aace897a..dd7a0277c5c0 100644
--- a/arch/arm/mm/hugetlbpage.c
+++ b/arch/arm/mm/hugetlbpage.c
@@ -17,7 +17,6 @@
#include <asm/mman.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
-#include <asm/pgalloc.h>
/*
* On ARM, huge pages are backed by pmd's rather than pte's, so we do a lot
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 01e18e43b174..000c1b48e973 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -243,13 +243,8 @@ void __init bootmem_init(void)
(phys_addr_t)max_low_pfn << PAGE_SHIFT);
/*
- * Sparsemem tries to allocate bootmem in memory_present(),
- * so must be done after the fixed reservations
- */
- memblocks_present();
-
- /*
- * sparse_init() needs the bootmem allocator up and running.
+ * sparse_init() tries to allocate memory from memblock, so must be
+ * done after the fixed reservations
*/
sparse_init();
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index bcd82614c25d..c36f977b2ccb 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -29,6 +29,7 @@
#include <asm/traps.h>
#include <asm/procinfo.h>
#include <asm/memory.h>
+#include <asm/pgalloc.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -74,8 +75,6 @@ struct cachepolicy {
pteval_t pte;
};
-unsigned long kimage_voffset __ro_after_init;
-
static struct cachepolicy cache_policies[] __initdata = {
{
.policy = "uncached",
@@ -1654,9 +1653,6 @@ void __init paging_init(const struct machine_desc *mdesc)
empty_zero_page = virt_to_page(zero_page);
__flush_dcache_page(NULL, empty_zero_page);
-
- /* Compute the virt/idmap offset, mostly for the sake of KVM */
- kimage_voffset = (unsigned long)&kimage_voffset - virt_to_idmap(&kimage_voffset);
}
void __init early_mm_init(const struct machine_desc *mdesc)
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 60ac7c5999a9..e2c743aa2eb2 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -5,7 +5,6 @@
* VMA_VM_FLAGS
* VM_EXEC
*/
-#include <linux/const.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
@@ -31,8 +30,7 @@
* act_mm - get current->active_mm
*/
.macro act_mm, rd
- bic \rd, sp, #(THREAD_SIZE - 1) & ~63
- bic \rd, \rd, #63
+ get_thread_info \rd
ldr \rd, [\rd, #TI_TASK]
.if (TSK_ACTIVE_MM > IMM12_MASK)
add \rd, \rd, #TSK_ACTIVE_MM & ~IMM12_MASK