aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile6
-rw-r--r--arch/arm/kernel/entry-armv.S19
-rw-r--r--arch/arm/kernel/entry-header.S9
-rw-r--r--arch/arm/kernel/ftrace.c50
-rw-r--r--arch/arm/kernel/head-common.S1
-rw-r--r--arch/arm/kernel/head.S6
-rw-r--r--arch/arm/kernel/hw_breakpoint.c123
-rw-r--r--arch/arm/kernel/insn.c19
-rw-r--r--arch/arm/kernel/module-plts.c49
-rw-r--r--arch/arm/kernel/ptrace.c4
-rw-r--r--arch/arm/kernel/return_address.c4
-rw-r--r--arch/arm/kernel/setup.c16
-rw-r--r--arch/arm/kernel/signal.c14
-rw-r--r--arch/arm/kernel/stacktrace.c23
-rw-r--r--arch/arm/kernel/suspend.c19
15 files changed, 255 insertions, 107 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 50de918252b7..3939ad178405 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -17,10 +17,14 @@ CFLAGS_REMOVE_return_address.o = -pg
# Object file lists.
obj-y := elf.o entry-common.o irq.o opcodes.o \
- process.o ptrace.o reboot.o return_address.o \
+ process.o ptrace.o reboot.o \
setup.o signal.o sigreturn_codes.o \
stacktrace.o sys_arm.o time.o traps.o
+ifneq ($(CONFIG_ARM_UNWIND),y)
+obj-$(CONFIG_FRAME_POINTER) += return_address.o
+endif
+
obj-$(CONFIG_ATAGS) += atags_parse.o
obj-$(CONFIG_ATAGS_PROC) += atags_proc.o
obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index fbc707626b3e..2d8323648256 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -30,6 +30,7 @@
#include <asm/unistd.h>
#include <asm/tls.h>
#include <asm/system_info.h>
+#include <asm/uaccess-asm.h>
#include "entry-header.S"
#include <asm/entry-macro-multi.S>
@@ -186,15 +187,7 @@ ENDPROC(__und_invalid)
stmia r7, {r2 - r6}
get_thread_info tsk
- ldr r0, [tsk, #TI_ADDR_LIMIT]
- mov r1, #TASK_SIZE
- str r1, [tsk, #TI_ADDR_LIMIT]
- str r0, [sp, #SVC_ADDR_LIMIT]
-
- uaccess_save r0
- .if \uaccess
- uaccess_disable r0
- .endif
+ uaccess_entry tsk, r0, r1, r2, \uaccess
.if \trace
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -631,11 +624,9 @@ call_fpe:
tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
reteq lr
and r8, r0, #0x00000f00 @ mask out CP number
- THUMB( lsr r8, r8, #8 )
mov r7, #1
- add r6, r10, #TI_USED_CP
- ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
- THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
+ add r6, r10, r8, lsr #8 @ add used_cp[] array offset first
+ strb r7, [r6, #TI_USED_CP] @ set appropriate used_cp[]
#ifdef CONFIG_IWMMXT
@ Test if we need to give access to iWMMXt coprocessors
ldr r5, [r10, #TI_FLAGS]
@@ -644,7 +635,7 @@ call_fpe:
bcs iwmmxt_task_enable
#endif
ARM( add pc, pc, r8, lsr #6 )
- THUMB( lsl r8, r8, #2 )
+ THUMB( lsr r8, r8, #6 )
THUMB( add pc, r8 )
nop
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 62db1c9746cb..7b595f2d4a28 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -6,6 +6,7 @@
#include <asm/asm-offsets.h>
#include <asm/errno.h>
#include <asm/thread_info.h>
+#include <asm/uaccess-asm.h>
#include <asm/v7m.h>
@ Bad Abort numbers
@@ -217,9 +218,7 @@
blne trace_hardirqs_off
#endif
.endif
- ldr r1, [sp, #SVC_ADDR_LIMIT]
- uaccess_restore
- str r1, [tsk, #TI_ADDR_LIMIT]
+ uaccess_exit tsk, r0, r1
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode SVC restore
@@ -263,9 +262,7 @@
@ on the stack remains correct).
@
.macro svc_exit_via_fiq
- ldr r1, [sp, #SVC_ADDR_LIMIT]
- uaccess_restore
- str r1, [tsk, #TI_ADDR_LIMIT]
+ uaccess_exit tsk, r0, r1
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode restore
mov r0, sp
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 5617932a83df..1a5edcfb0306 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -96,9 +96,10 @@ int ftrace_arch_code_modify_post_process(void)
return 0;
}
-static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
+static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr,
+ bool warn)
{
- return arm_gen_branch_link(pc, addr);
+ return arm_gen_branch_link(pc, addr, warn);
}
static int ftrace_modify_code(unsigned long pc, unsigned long old,
@@ -137,14 +138,14 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
int ret;
pc = (unsigned long)&ftrace_call;
- new = ftrace_call_replace(pc, (unsigned long)func);
+ new = ftrace_call_replace(pc, (unsigned long)func, true);
ret = ftrace_modify_code(pc, 0, new, false);
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
if (!ret) {
pc = (unsigned long)&ftrace_regs_call;
- new = ftrace_call_replace(pc, (unsigned long)func);
+ new = ftrace_call_replace(pc, (unsigned long)func, true);
ret = ftrace_modify_code(pc, 0, new, false);
}
@@ -153,7 +154,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
#ifdef CONFIG_OLD_MCOUNT
if (!ret) {
pc = (unsigned long)&ftrace_call_old;
- new = ftrace_call_replace(pc, (unsigned long)func);
+ new = ftrace_call_replace(pc, (unsigned long)func, true);
ret = ftrace_modify_code(pc, 0, new, false);
}
@@ -166,10 +167,22 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long new, old;
unsigned long ip = rec->ip;
+ unsigned long aaddr = adjust_address(rec, addr);
+ struct module *mod = NULL;
+
+#ifdef CONFIG_ARM_MODULE_PLTS
+ mod = rec->arch.mod;
+#endif
old = ftrace_nop_replace(rec);
- new = ftrace_call_replace(ip, adjust_address(rec, addr));
+ new = ftrace_call_replace(ip, aaddr, !mod);
+#ifdef CONFIG_ARM_MODULE_PLTS
+ if (!new && mod) {
+ aaddr = get_module_plt(mod, ip, aaddr);
+ new = ftrace_call_replace(ip, aaddr, true);
+ }
+#endif
return ftrace_modify_code(rec->ip, old, new, true);
}
@@ -182,9 +195,9 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long new, old;
unsigned long ip = rec->ip;
- old = ftrace_call_replace(ip, adjust_address(rec, old_addr));
+ old = ftrace_call_replace(ip, adjust_address(rec, old_addr), true);
- new = ftrace_call_replace(ip, adjust_address(rec, addr));
+ new = ftrace_call_replace(ip, adjust_address(rec, addr), true);
return ftrace_modify_code(rec->ip, old, new, true);
}
@@ -194,12 +207,29 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
+ unsigned long aaddr = adjust_address(rec, addr);
unsigned long ip = rec->ip;
unsigned long old;
unsigned long new;
int ret;
- old = ftrace_call_replace(ip, adjust_address(rec, addr));
+#ifdef CONFIG_ARM_MODULE_PLTS
+ /* mod is only supplied during module loading */
+ if (!mod)
+ mod = rec->arch.mod;
+ else
+ rec->arch.mod = mod;
+#endif
+
+ old = ftrace_call_replace(ip, aaddr,
+ !IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || !mod);
+#ifdef CONFIG_ARM_MODULE_PLTS
+ if (!old && mod) {
+ aaddr = get_module_plt(mod, ip, aaddr);
+ old = ftrace_call_replace(ip, aaddr, true);
+ }
+#endif
+
new = ftrace_nop_replace(rec);
ret = ftrace_modify_code(ip, old, new, true);
@@ -207,7 +237,7 @@ int ftrace_make_nop(struct module *mod,
if (ret == -EINVAL && addr == MCOUNT_ADDR) {
rec->arch.old_mcount = true;
- old = ftrace_call_replace(ip, adjust_address(rec, addr));
+ old = ftrace_call_replace(ip, adjust_address(rec, addr), true);
new = ftrace_nop_replace(rec);
ret = ftrace_modify_code(ip, old, new, true);
}
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 7e662bdd5cb3..932b2244e709 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -101,6 +101,7 @@ __mmap_switched:
str r2, [r6] @ Save atags pointer
cmp r7, #0
strne r0, [r7] @ Save control register values
+ mov lr, #0
b start_kernel
ENDPROC(__mmap_switched)
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 6b1148cafffd..90add5ded3f1 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -675,11 +675,7 @@ ARM_BE8(rev16 ip, ip)
bcc 1b
bx lr
#else
-#ifdef CONFIG_CPU_ENDIAN_BE8
- moveq r0, #0x00004000 @ set bit 22, mov to mvn instruction
-#else
moveq r0, #0x400000 @ set bit 22, mov to mvn instruction
-#endif
b 2f
1: ldr ip, [r7, r3]
#ifdef CONFIG_CPU_ENDIAN_BE8
@@ -688,7 +684,7 @@ ARM_BE8(rev16 ip, ip)
tst ip, #0x000f0000 @ check the rotation field
orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24
biceq ip, ip, #0x00004000 @ clear bit 22
- orreq ip, ip, r0 @ mask in offset bits 7-0
+ orreq ip, ip, r0, ror #8 @ mask in offset bits 7-0
#else
bic ip, ip, #0x000000ff
tst ip, #0xf00 @ check the rotation field
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index af2a7f1e3103..a8783964ebde 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -688,26 +688,68 @@ static void disable_single_step(struct perf_event *bp)
arch_install_hw_breakpoint(bp);
}
+/*
+ * Arm32 hardware does not always report a watchpoint hit address that matches
+ * one of the watchpoints set. It can also report an address "near" the
+ * watchpoint if a single instruction access both watched and unwatched
+ * addresses. There is no straight-forward way, short of disassembling the
+ * offending instruction, to map that address back to the watchpoint. This
+ * function computes the distance of the memory access from the watchpoint as a
+ * heuristic for the likelyhood that a given access triggered the watchpoint.
+ *
+ * See this same function in the arm64 platform code, which has the same
+ * problem.
+ *
+ * The function returns the distance of the address from the bytes watched by
+ * the watchpoint. In case of an exact match, it returns 0.
+ */
+static u32 get_distance_from_watchpoint(unsigned long addr, u32 val,
+ struct arch_hw_breakpoint_ctrl *ctrl)
+{
+ u32 wp_low, wp_high;
+ u32 lens, lene;
+
+ lens = __ffs(ctrl->len);
+ lene = __fls(ctrl->len);
+
+ wp_low = val + lens;
+ wp_high = val + lene;
+ if (addr < wp_low)
+ return wp_low - addr;
+ else if (addr > wp_high)
+ return addr - wp_high;
+ else
+ return 0;
+}
+
+static int watchpoint_fault_on_uaccess(struct pt_regs *regs,
+ struct arch_hw_breakpoint *info)
+{
+ return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER;
+}
+
static void watchpoint_handler(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
- int i, access;
- u32 val, ctrl_reg, alignment_mask;
+ int i, access, closest_match = 0;
+ u32 min_dist = -1, dist;
+ u32 val, ctrl_reg;
struct perf_event *wp, **slots;
struct arch_hw_breakpoint *info;
struct arch_hw_breakpoint_ctrl ctrl;
slots = this_cpu_ptr(wp_on_reg);
+ /*
+ * Find all watchpoints that match the reported address. If no exact
+ * match is found. Attribute the hit to the closest watchpoint.
+ */
+ rcu_read_lock();
for (i = 0; i < core_num_wrps; ++i) {
- rcu_read_lock();
-
wp = slots[i];
-
if (wp == NULL)
- goto unlock;
+ continue;
- info = counter_arch_bp(wp);
/*
* The DFAR is an unknown value on debug architectures prior
* to 7.1. Since we only allow a single watchpoint on these
@@ -716,50 +758,69 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
*/
if (debug_arch < ARM_DEBUG_ARCH_V7_1) {
BUG_ON(i > 0);
+ info = counter_arch_bp(wp);
info->trigger = wp->attr.bp_addr;
} else {
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
- alignment_mask = 0x7;
- else
- alignment_mask = 0x3;
-
- /* Check if the watchpoint value matches. */
- val = read_wb_reg(ARM_BASE_WVR + i);
- if (val != (addr & ~alignment_mask))
- goto unlock;
-
- /* Possible match, check the byte address select. */
- ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
- decode_ctrl_reg(ctrl_reg, &ctrl);
- if (!((1 << (addr & alignment_mask)) & ctrl.len))
- goto unlock;
-
/* Check that the access type matches. */
if (debug_exception_updates_fsr()) {
access = (fsr & ARM_FSR_ACCESS_MASK) ?
HW_BREAKPOINT_W : HW_BREAKPOINT_R;
if (!(access & hw_breakpoint_type(wp)))
- goto unlock;
+ continue;
}
+ val = read_wb_reg(ARM_BASE_WVR + i);
+ ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
+ decode_ctrl_reg(ctrl_reg, &ctrl);
+ dist = get_distance_from_watchpoint(addr, val, &ctrl);
+ if (dist < min_dist) {
+ min_dist = dist;
+ closest_match = i;
+ }
+ /* Is this an exact match? */
+ if (dist != 0)
+ continue;
+
/* We have a winner. */
+ info = counter_arch_bp(wp);
info->trigger = addr;
}
pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
+
+ /*
+ * If we triggered a user watchpoint from a uaccess routine,
+ * then handle the stepping ourselves since userspace really
+ * can't help us with this.
+ */
+ if (watchpoint_fault_on_uaccess(regs, info))
+ goto step;
+
perf_bp_event(wp, regs);
/*
- * If no overflow handler is present, insert a temporary
- * mismatch breakpoint so we can single-step over the
- * watchpoint trigger.
+ * Defer stepping to the overflow handler if one is installed.
+ * Otherwise, insert a temporary mismatch breakpoint so that
+ * we can single-step over the watchpoint trigger.
*/
+ if (!is_default_overflow_handler(wp))
+ continue;
+step:
+ enable_single_step(wp, instruction_pointer(regs));
+ }
+
+ if (min_dist > 0 && min_dist != -1) {
+ /* No exact match found. */
+ wp = slots[closest_match];
+ info = counter_arch_bp(wp);
+ info->trigger = addr;
+ pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
+ perf_bp_event(wp, regs);
if (is_default_overflow_handler(wp))
enable_single_step(wp, instruction_pointer(regs));
-
-unlock:
- rcu_read_unlock();
}
+
+ rcu_read_unlock();
}
static void watchpoint_single_step_handler(unsigned long pc)
@@ -830,7 +891,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
info->trigger = addr;
pr_debug("breakpoint fired: address = 0x%x\n", addr);
perf_bp_event(bp, regs);
- if (!bp->overflow_handler)
+ if (is_default_overflow_handler(bp))
enable_single_step(bp, addr);
goto unlock;
}
diff --git a/arch/arm/kernel/insn.c b/arch/arm/kernel/insn.c
index 2e844b70386b..db0acbb7d7a0 100644
--- a/arch/arm/kernel/insn.c
+++ b/arch/arm/kernel/insn.c
@@ -3,8 +3,9 @@
#include <linux/kernel.h>
#include <asm/opcodes.h>
-static unsigned long
-__arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link)
+static unsigned long __arm_gen_branch_thumb2(unsigned long pc,
+ unsigned long addr, bool link,
+ bool warn)
{
unsigned long s, j1, j2, i1, i2, imm10, imm11;
unsigned long first, second;
@@ -12,7 +13,7 @@ __arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link)
offset = (long)addr - (long)(pc + 4);
if (offset < -16777216 || offset > 16777214) {
- WARN_ON_ONCE(1);
+ WARN_ON_ONCE(warn);
return 0;
}
@@ -33,8 +34,8 @@ __arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link)
return __opcode_thumb32_compose(first, second);
}
-static unsigned long
-__arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link)
+static unsigned long __arm_gen_branch_arm(unsigned long pc, unsigned long addr,
+ bool link, bool warn)
{
unsigned long opcode = 0xea000000;
long offset;
@@ -44,7 +45,7 @@ __arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link)
offset = (long)addr - (long)(pc + 8);
if (unlikely(offset < -33554432 || offset > 33554428)) {
- WARN_ON_ONCE(1);
+ WARN_ON_ONCE(warn);
return 0;
}
@@ -54,10 +55,10 @@ __arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link)
}
unsigned long
-__arm_gen_branch(unsigned long pc, unsigned long addr, bool link)
+__arm_gen_branch(unsigned long pc, unsigned long addr, bool link, bool warn)
{
if (IS_ENABLED(CONFIG_THUMB2_KERNEL))
- return __arm_gen_branch_thumb2(pc, addr, link);
+ return __arm_gen_branch_thumb2(pc, addr, link, warn);
else
- return __arm_gen_branch_arm(pc, addr, link);
+ return __arm_gen_branch_arm(pc, addr, link, warn);
}
diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
index 3d0c2e4dda1d..ed0e09cc735f 100644
--- a/arch/arm/kernel/module-plts.c
+++ b/arch/arm/kernel/module-plts.c
@@ -7,6 +7,7 @@
*/
#include <linux/elf.h>
+#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sort.h>
@@ -14,10 +15,6 @@
#include <asm/cache.h>
#include <asm/opcodes.h>
-#define PLT_ENT_STRIDE L1_CACHE_BYTES
-#define PLT_ENT_COUNT (PLT_ENT_STRIDE / sizeof(u32))
-#define PLT_ENT_SIZE (sizeof(struct plt_entries) / PLT_ENT_COUNT)
-
#ifdef CONFIG_THUMB2_KERNEL
#define PLT_ENT_LDR __opcode_to_mem_thumb32(0xf8dff000 | \
(PLT_ENT_STRIDE - 4))
@@ -26,9 +23,11 @@
(PLT_ENT_STRIDE - 8))
#endif
-struct plt_entries {
- u32 ldr[PLT_ENT_COUNT];
- u32 lit[PLT_ENT_COUNT];
+static const u32 fixed_plts[] = {
+#ifdef CONFIG_DYNAMIC_FTRACE
+ FTRACE_ADDR,
+ MCOUNT_ADDR,
+#endif
};
static bool in_init(const struct module *mod, unsigned long loc)
@@ -36,14 +35,40 @@ static bool in_init(const struct module *mod, unsigned long loc)
return loc - (u32)mod->init_layout.base < mod->init_layout.size;
}
+static void prealloc_fixed(struct mod_plt_sec *pltsec, struct plt_entries *plt)
+{
+ int i;
+
+ if (!ARRAY_SIZE(fixed_plts) || pltsec->plt_count)
+ return;
+ pltsec->plt_count = ARRAY_SIZE(fixed_plts);
+
+ for (i = 0; i < ARRAY_SIZE(plt->ldr); ++i)
+ plt->ldr[i] = PLT_ENT_LDR;
+
+ BUILD_BUG_ON(sizeof(fixed_plts) > sizeof(plt->lit));
+ memcpy(plt->lit, fixed_plts, sizeof(fixed_plts));
+}
+
u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
{
struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
&mod->arch.init;
+ struct plt_entries *plt;
+ int idx;
+
+ /* cache the address, ELF header is available only during module load */
+ if (!pltsec->plt_ent)
+ pltsec->plt_ent = (struct plt_entries *)pltsec->plt->sh_addr;
+ plt = pltsec->plt_ent;
- struct plt_entries *plt = (struct plt_entries *)pltsec->plt->sh_addr;
- int idx = 0;
+ prealloc_fixed(pltsec, plt);
+
+ for (idx = 0; idx < ARRAY_SIZE(fixed_plts); ++idx)
+ if (plt->lit[idx] == val)
+ return (u32)&plt->ldr[idx];
+ idx = 0;
/*
* Look for an existing entry pointing to 'val'. Given that the
* relocations are sorted, this will be the last entry we allocated.
@@ -191,8 +216,8 @@ static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod)
{
- unsigned long core_plts = 0;
- unsigned long init_plts = 0;
+ unsigned long core_plts = ARRAY_SIZE(fixed_plts);
+ unsigned long init_plts = ARRAY_SIZE(fixed_plts);
Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
Elf32_Sym *syms = NULL;
@@ -247,6 +272,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
sizeof(struct plt_entries));
mod->arch.core.plt_count = 0;
+ mod->arch.core.plt_ent = NULL;
mod->arch.init.plt->sh_type = SHT_NOBITS;
mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
@@ -254,6 +280,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
sizeof(struct plt_entries));
mod->arch.init.plt_count = 0;
+ mod->arch.init.plt_ent = NULL;
pr_debug("%s: plt=%x, init.plt=%x\n", __func__,
mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size);
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 58e3771e4c5b..368b4b404985 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -228,8 +228,8 @@ static struct undef_hook arm_break_hook = {
};
static struct undef_hook thumb_break_hook = {
- .instr_mask = 0xffff,
- .instr_val = 0xde01,
+ .instr_mask = 0xffffffff,
+ .instr_val = 0x0000de01,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = PSR_T_BIT,
.fn = break_trap,
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index 36ed35073289..f945742dea44 100644
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -10,8 +10,6 @@
*/
#include <linux/export.h>
#include <linux/ftrace.h>
-
-#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
#include <linux/sched.h>
#include <asm/stacktrace.h>
@@ -56,6 +54,4 @@ void *return_address(unsigned int level)
return NULL;
}
-#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */
-
EXPORT_SYMBOL_GPL(return_address);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index a6d27284105a..ac4ffd97ae82 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -547,9 +547,11 @@ void notrace cpu_init(void)
* In Thumb-2, msr with an immediate value is not allowed.
*/
#ifdef CONFIG_THUMB2_KERNEL
-#define PLC "r"
+#define PLC_l "l"
+#define PLC_r "r"
#else
-#define PLC "I"
+#define PLC_l "I"
+#define PLC_r "I"
#endif
/*
@@ -571,15 +573,15 @@ void notrace cpu_init(void)
"msr cpsr_c, %9"
:
: "r" (stk),
- PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
"I" (offsetof(struct stack, irq[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
"I" (offsetof(struct stack, abt[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
"I" (offsetof(struct stack, und[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
"I" (offsetof(struct stack, fiq[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
+ PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
: "r14");
#endif
}
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 02e6b6dfffa7..19e4ff507209 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -667,18 +667,20 @@ struct page *get_signal_page(void)
addr = page_address(page);
+ /* Poison the entire page */
+ memset32(addr, __opcode_to_mem_arm(0xe7fddef1),
+ PAGE_SIZE / sizeof(u32));
+
/* Give the signal return code some randomness */
offset = 0x200 + (get_random_int() & 0x7fc);
signal_return_offset = offset;
- /*
- * Copy signal return handlers into the vector page, and
- * set sigreturn to be a pointer to these.
- */
+ /* Copy signal return handlers into the page */
memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
- ptr = (unsigned long)addr + offset;
- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
+ /* Flush out all instructions in this page */
+ ptr = (unsigned long)addr;
+ flush_icache_range(ptr, ptr + PAGE_SIZE);
return page;
}
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 65228bf4c6df..31af81d46aae 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -20,6 +20,19 @@
* A simple function epilogue looks like this:
* ldm sp, {fp, sp, pc}
*
+ * When compiled with clang, pc and sp are not pushed. A simple function
+ * prologue looks like this when built with clang:
+ *
+ * stmdb {..., fp, lr}
+ * add fp, sp, #x
+ * sub sp, sp, #y
+ *
+ * A simple function epilogue looks like this when built with clang:
+ *
+ * sub sp, fp, #x
+ * ldm {..., fp, pc}
+ *
+ *
* Note that with framepointer enabled, even the leaf functions have the same
* prologue and epilogue, therefore we can ignore the LR value in this case.
*/
@@ -32,6 +45,15 @@ int notrace unwind_frame(struct stackframe *frame)
low = frame->sp;
high = ALIGN(low, THREAD_SIZE);
+#ifdef CONFIG_CC_IS_CLANG
+ /* check current frame pointer is within bounds */
+ if (fp < low + 4 || fp > high - 4)
+ return -EINVAL;
+
+ frame->sp = frame->fp;
+ frame->fp = *(unsigned long *)(fp);
+ frame->pc = *(unsigned long *)(fp + 4);
+#else
/* check current frame pointer is within bounds */
if (fp < low + 12 || fp > high - 4)
return -EINVAL;
@@ -40,6 +62,7 @@ int notrace unwind_frame(struct stackframe *frame)
frame->fp = *(unsigned long *)(fp - 12);
frame->sp = *(unsigned long *)(fp - 8);
frame->pc = *(unsigned long *)(fp - 4);
+#endif
return 0;
}
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index d08099269e35..e126386fb78a 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/ftrace.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mm_types.h>
@@ -27,12 +28,22 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
return -EINVAL;
/*
+ * Function graph tracer state gets incosistent when the kernel
+ * calls functions that never return (aka suspend finishers) hence
+ * disable graph tracing during their execution.
+ */
+ pause_graph_tracing();
+
+ /*
* Provide a temporary page table with an identity mapping for
* the MMU-enable code, required for resuming. On successful
* resume (indicated by a zero return code), we need to switch
* back to the correct page tables.
*/
ret = __cpu_suspend(arg, fn, __mpidr);
+
+ unpause_graph_tracing();
+
if (ret == 0) {
cpu_switch_mm(mm->pgd, mm);
local_flush_bp_all();
@@ -46,7 +57,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
u32 __mpidr = cpu_logical_map(smp_processor_id());
- return __cpu_suspend(arg, fn, __mpidr);
+ int ret;
+
+ pause_graph_tracing();
+ ret = __cpu_suspend(arg, fn, __mpidr);
+ unpause_graph_tracing();
+
+ return ret;
}
#define idmap_pgd NULL
#endif