summaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/assembler.h29
-rw-r--r--arch/arm/include/asm/cacheflush.h15
-rw-r--r--arch/arm/include/asm/glue-cache.h1
-rw-r--r--arch/arm/include/asm/opcodes-virt.h10
-rw-r--r--arch/arm/include/asm/processor.h5
-rw-r--r--arch/arm/include/asm/ptrace.h6
-rw-r--r--arch/arm/include/asm/system.h1
-rw-r--r--arch/arm/include/asm/thread_info.h2
-rw-r--r--arch/arm/include/asm/unistd.h1
-rw-r--r--arch/arm/include/asm/vfpmacros.h4
-rw-r--r--arch/arm/include/asm/virt.h69
11 files changed, 133 insertions, 10 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 5c8b3bf4d825..2ef95813fce0 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -22,6 +22,7 @@
#include <asm/ptrace.h>
#include <asm/domain.h>
+#include <asm/opcodes-virt.h>
#define IOMEM(x) (x)
@@ -240,6 +241,34 @@
#endif
/*
+ * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
+ * a scratch register for the macro to overwrite.
+ *
+ * This macro is intended for forcing the CPU into SVC mode at boot time.
+ * you cannot return to the original mode.
+ *
+ * Beware, it also clobers LR.
+ */
+.macro safe_svcmode_maskall reg:req
+ mrs \reg , cpsr
+ mov lr , \reg
+ and lr , lr , #MODE_MASK
+ cmp lr , #HYP_MODE
+ orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT
+ bic \reg , \reg , #MODE_MASK
+ orr \reg , \reg , #SVC_MODE
+THUMB( orr \reg , \reg , #PSR_T_BIT )
+ bne 1f
+ orr \reg, \reg, #PSR_A_BIT
+ adr lr, BSYM(2f)
+ msr spsr_cxsf, \reg
+ __MSR_ELR_HYP(14)
+ __ERET
+1: msr cpsr_c, \reg
+2:
+.endm
+
+/*
* STRT/LDRT access macros with ARM and Thumb-2 variants
*/
#ifdef CONFIG_THUMB2_KERNEL
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index e4448e16046d..e1489c54cd12 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -49,6 +49,13 @@
*
* Unconditionally clean and invalidate the entire cache.
*
+ * flush_kern_louis()
+ *
+ * Flush data cache levels up to the level of unification
+ * inner shareable and invalidate the I-cache.
+ * Only needed from v7 onwards, falls back to flush_cache_all()
+ * for all other processor versions.
+ *
* flush_user_all()
*
* Clean and invalidate all user space cache entries
@@ -97,6 +104,7 @@
struct cpu_cache_fns {
void (*flush_icache_all)(void);
void (*flush_kern_all)(void);
+ void (*flush_kern_louis)(void);
void (*flush_user_all)(void);
void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
@@ -119,6 +127,7 @@ extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
+#define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis
#define __cpuc_flush_user_all cpu_cache.flush_user_all
#define __cpuc_flush_user_range cpu_cache.flush_user_range
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
@@ -139,6 +148,7 @@ extern struct cpu_cache_fns cpu_cache;
extern void __cpuc_flush_icache_all(void);
extern void __cpuc_flush_kern_all(void);
+extern void __cpuc_flush_kern_louis(void);
extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
@@ -204,6 +214,11 @@ static inline void __flush_icache_all(void)
__flush_icache_preferred();
}
+/*
+ * Flush caches up to Level of Unification Inner Shareable
+ */
+#define flush_cache_louis() __cpuc_flush_kern_louis()
+
#define flush_cache_all() __cpuc_flush_kern_all()
static inline void vivt_flush_cache_mm(struct mm_struct *mm)
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index 4f8d2c0dc441..cca9f15704ed 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -132,6 +132,7 @@
#ifndef MULTI_CACHE
#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
+#define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_louis)
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
diff --git a/arch/arm/include/asm/opcodes-virt.h b/arch/arm/include/asm/opcodes-virt.h
index b85665a96f8e..efcfdf92d9d5 100644
--- a/arch/arm/include/asm/opcodes-virt.h
+++ b/arch/arm/include/asm/opcodes-virt.h
@@ -26,4 +26,14 @@
0xF7E08000 | (((imm16) & 0xF000) << 4) | ((imm16) & 0x0FFF) \
)
+#define __ERET __inst_arm_thumb32( \
+ 0xE160006E, \
+ 0xF3DE8F00 \
+)
+
+#define __MSR_ELR_HYP(regnum) __inst_arm_thumb32( \
+ 0xE12EF300 | regnum, \
+ 0xF3808E30 | (regnum << 16) \
+)
+
#endif /* ! __ASM_ARM_OPCODES_VIRT_H */
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 99afa7498260..06e7d509eaac 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -85,11 +85,6 @@ unsigned long get_wchan(struct task_struct *p);
#define cpu_relax() barrier()
#endif
-/*
- * Create a new kernel thread
- */
-extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
-
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 355ece523f41..142d6ae41231 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -44,6 +44,7 @@
#define IRQ_MODE 0x00000012
#define SVC_MODE 0x00000013
#define ABT_MODE 0x00000017
+#define HYP_MODE 0x0000001a
#define UND_MODE 0x0000001b
#define SYSTEM_MODE 0x0000001f
#define MODE32_BIT 0x00000010
@@ -254,6 +255,11 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs)
return regs->ARM_sp;
}
+#define current_pt_regs(void) ({ \
+ register unsigned long sp asm ("sp"); \
+ (struct pt_regs *)((sp | (THREAD_SIZE - 1)) - 7) - 1; \
+})
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 74542c52f9be..368165e33c1c 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -2,7 +2,6 @@
#include <asm/barrier.h>
#include <asm/compiler.h>
#include <asm/cmpxchg.h>
-#include <asm/exec.h>
#include <asm/switch_to.h>
#include <asm/system_info.h>
#include <asm/system_misc.h>
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index f71cdab18b87..8477b4c1d39f 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -151,7 +151,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
#define TIF_SYSCALL_TRACEPOINT 10
-#define TIF_POLLING_NRFLAG 16
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 20
@@ -164,7 +163,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
-#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index d9ff5cc3a506..91819ad54424 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -478,6 +478,7 @@
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_SOCKETCALL
#endif
+#define __ARCH_WANT_SYS_EXECVE
/*
* "Conditional" syscalls
diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
index a7aadbd9a6dd..6a6f1e485f41 100644
--- a/arch/arm/include/asm/vfpmacros.h
+++ b/arch/arm/include/asm/vfpmacros.h
@@ -28,7 +28,7 @@
ldr \tmp, =elf_hwcap @ may not have MVFR regs
ldr \tmp, [\tmp, #0]
tst \tmp, #HWCAP_VFPv3D16
- ldceq p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
+ ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
addne \base, \base, #32*4 @ step over unused register space
#else
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
@@ -52,7 +52,7 @@
ldr \tmp, =elf_hwcap @ may not have MVFR regs
ldr \tmp, [\tmp, #0]
tst \tmp, #HWCAP_VFPv3D16
- stceq p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
+ stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
addne \base, \base, #32*4 @ step over unused register space
#else
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
new file mode 100644
index 000000000000..86164df86cb4
--- /dev/null
+++ b/arch/arm/include/asm/virt.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2012 Linaro Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef VIRT_H
+#define VIRT_H
+
+#include <asm/ptrace.h>
+
+/*
+ * Flag indicating that the kernel was not entered in the same mode on every
+ * CPU. The zImage loader stashes this value in an SPSR, so we need an
+ * architecturally defined flag bit here (the N flag, as it happens)
+ */
+#define BOOT_CPU_MODE_MISMATCH (1<<31)
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_ARM_VIRT_EXT
+/*
+ * __boot_cpu_mode records what mode the primary CPU was booted in.
+ * A correctly-implemented bootloader must start all CPUs in the same mode:
+ * if it fails to do this, the flag BOOT_CPU_MODE_MISMATCH is set to indicate
+ * that some CPU(s) were booted in a different mode.
+ *
+ * This allows the kernel to flag an error when the secondaries have come up.
+ */
+extern int __boot_cpu_mode;
+
+void __hyp_set_vectors(unsigned long phys_vector_base);
+unsigned long __hyp_get_vectors(void);
+#else
+#define __boot_cpu_mode (SVC_MODE)
+#endif
+
+#ifndef ZIMAGE
+void hyp_mode_check(void);
+
+/* Reports the availability of HYP mode */
+static inline bool is_hyp_mode_available(void)
+{
+ return ((__boot_cpu_mode & MODE_MASK) == HYP_MODE &&
+ !(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH));
+}
+
+/* Check if the bootloader has booted CPUs in different modes */
+static inline bool is_hyp_mode_mismatched(void)
+{
+ return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH);
+}
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* ! VIRT_H */