aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/kernel/sys.c2
-rw-r--r--arch/mips/ath79/setup.c6
-rw-r--r--arch/mips/mm/mmap.c5
-rw-r--r--arch/mips/pistachio/Platform1
-rw-r--r--arch/powerpc/kvm/book3s_xive.c4
-rw-r--r--arch/powerpc/perf/core-book3s.c6
-rw-r--r--arch/powerpc/perf/power8-pmu.c3
-rw-r--r--arch/powerpc/perf/power9-pmu.c3
-rw-r--r--arch/s390/crypto/aes_s390.c156
-rw-r--r--arch/s390/crypto/des_s390.c7
-rw-r--r--arch/s390/mm/fault.c5
-rw-r--r--arch/sparc/mm/ultra.S4
-rw-r--r--arch/x86/kernel/ftrace.c49
-rw-r--r--arch/x86/kernel/ftrace_64.S8
-rw-r--r--arch/x86/kernel/kprobes/core.c24
-rw-r--r--arch/x86/kernel/vmlinux.lds.S6
-rw-r--r--arch/x86/lib/insn-eval.c47
17 files changed, 227 insertions, 109 deletions
diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c
index 750919e083b3..a5e996121709 100644
--- a/arch/arm64/kernel/sys.c
+++ b/arch/arm64/kernel/sys.c
@@ -50,7 +50,7 @@ SYSCALL_DEFINE1(arm64_personality, unsigned int, personality)
*/
asmlinkage long sys_rt_sigreturn_wrapper(void);
#define sys_rt_sigreturn sys_rt_sigreturn_wrapper
-#define sys_personality sys_arm64_personality
+#define __arm64_sys_personality __arm64_sys_arm64_personality
#undef __SYSCALL
#define __SYSCALL(nr, sym) [nr] = sym,
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index c7c31e214813..26a058d58d37 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -183,6 +183,12 @@ const char *get_system_type(void)
return ath79_sys_type;
}
+int get_c0_perfcount_int(void)
+{
+ return ATH79_MISC_IRQ(5);
+}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
+
unsigned int get_c0_compare_int(void)
{
return CP0_LEGACY_COMPARE_IRQ;
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 2f616ebeb7e0..7755a1fad05a 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -203,6 +203,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
int __virt_addr_valid(const volatile void *kaddr)
{
+ unsigned long vaddr = (unsigned long)vaddr;
+
+ if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE))
+ return 0;
+
return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
}
EXPORT_SYMBOL_GPL(__virt_addr_valid);
diff --git a/arch/mips/pistachio/Platform b/arch/mips/pistachio/Platform
index d80cd612df1f..c3592b374ad2 100644
--- a/arch/mips/pistachio/Platform
+++ b/arch/mips/pistachio/Platform
@@ -6,3 +6,4 @@ cflags-$(CONFIG_MACH_PISTACHIO) += \
-I$(srctree)/arch/mips/include/asm/mach-pistachio
load-$(CONFIG_MACH_PISTACHIO) += 0xffffffff80400000
zload-$(CONFIG_MACH_PISTACHIO) += 0xffffffff81000000
+all-$(CONFIG_MACH_PISTACHIO) := uImage.gz
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index f9818d7d3381..ab9ad332477b 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -1718,7 +1718,6 @@ static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
{
xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
xive_native_configure_irq(hw_num, 0, MASKED, 0);
- xive_cleanup_irq_data(xd);
}
static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
@@ -1732,9 +1731,10 @@ static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
continue;
kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
+ xive_cleanup_irq_data(&state->ipi_data);
xive_native_free_irq(state->ipi_number);
- /* Pass-through, cleanup too */
+ /* Pass-through, cleanup too but keep IRQ hw data */
if (state->pt_number)
kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 3f66fcf8ad99..64296d340a02 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1861,6 +1861,7 @@ static int power_pmu_event_init(struct perf_event *event)
int n;
int err;
struct cpu_hw_events *cpuhw;
+ u64 bhrb_filter;
if (!ppmu)
return -ENOENT;
@@ -1966,13 +1967,14 @@ static int power_pmu_event_init(struct perf_event *event)
err = power_check_constraints(cpuhw, events, cflags, n + 1);
if (has_branch_stack(event)) {
- cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
+ bhrb_filter = ppmu->bhrb_filter_map(
event->attr.branch_sample_type);
- if (cpuhw->bhrb_filter == -1) {
+ if (bhrb_filter == -1) {
put_cpu_var(cpu_hw_events);
return -EOPNOTSUPP;
}
+ cpuhw->bhrb_filter = bhrb_filter;
}
put_cpu_var(cpu_hw_events);
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index d12a2db26353..d10feef93b6b 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -29,6 +29,7 @@ enum {
#define POWER8_MMCRA_IFM1 0x0000000040000000UL
#define POWER8_MMCRA_IFM2 0x0000000080000000UL
#define POWER8_MMCRA_IFM3 0x00000000C0000000UL
+#define POWER8_MMCRA_BHRB_MASK 0x00000000C0000000UL
/*
* Raw event encoding for PowerISA v2.07 (Power8):
@@ -243,6 +244,8 @@ static u64 power8_bhrb_filter_map(u64 branch_sample_type)
static void power8_config_bhrb(u64 pmu_bhrb_filter)
{
+ pmu_bhrb_filter &= POWER8_MMCRA_BHRB_MASK;
+
/* Enable BHRB filter in PMU */
mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
}
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 2ca0b33b4efb..88219cbd0d7d 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -100,6 +100,7 @@ enum {
#define POWER9_MMCRA_IFM1 0x0000000040000000UL
#define POWER9_MMCRA_IFM2 0x0000000080000000UL
#define POWER9_MMCRA_IFM3 0x00000000C0000000UL
+#define POWER9_MMCRA_BHRB_MASK 0x00000000C0000000UL
/* Nasty Power9 specific hack */
#define PVR_POWER9_CUMULUS 0x00002000
@@ -325,6 +326,8 @@ static u64 power9_bhrb_filter_map(u64 branch_sample_type)
static void power9_config_bhrb(u64 pmu_bhrb_filter)
{
+ pmu_bhrb_filter &= POWER9_MMCRA_BHRB_MASK;
+
/* Enable BHRB filter in PMU */
mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
}
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index ad47abd08630..a510cae939e8 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -27,14 +27,14 @@
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <linux/init.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/fips.h>
#include <linux/string.h>
#include <crypto/xts.h>
#include <asm/cpacf.h>
static u8 *ctrblk;
-static DEFINE_SPINLOCK(ctrblk_lock);
+static DEFINE_MUTEX(ctrblk_lock);
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
kma_functions;
@@ -698,7 +698,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
unsigned int n, nbytes;
int ret, locked;
- locked = spin_trylock(&ctrblk_lock);
+ locked = mutex_trylock(&ctrblk_lock);
ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
@@ -716,7 +716,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
ret = blkcipher_walk_done(desc, walk, nbytes - n);
}
if (locked)
- spin_unlock(&ctrblk_lock);
+ mutex_unlock(&ctrblk_lock);
/*
* final block may be < AES_BLOCK_SIZE, copy only nbytes
*/
@@ -826,19 +826,45 @@ static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
return 0;
}
-static void gcm_sg_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
- unsigned int len)
+static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
+ unsigned int len)
{
memset(gw, 0, sizeof(*gw));
gw->walk_bytes_remain = len;
scatterwalk_start(&gw->walk, sg);
}
-static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
+static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
+{
+ struct scatterlist *nextsg;
+
+ gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
+ while (!gw->walk_bytes) {
+ nextsg = sg_next(gw->walk.sg);
+ if (!nextsg)
+ return 0;
+ scatterwalk_start(&gw->walk, nextsg);
+ gw->walk_bytes = scatterwalk_clamp(&gw->walk,
+ gw->walk_bytes_remain);
+ }
+ gw->walk_ptr = scatterwalk_map(&gw->walk);
+ return gw->walk_bytes;
+}
+
+static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
+ unsigned int nbytes)
+{
+ gw->walk_bytes_remain -= nbytes;
+ scatterwalk_unmap(&gw->walk);
+ scatterwalk_advance(&gw->walk, nbytes);
+ scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
+ gw->walk_ptr = NULL;
+}
+
+static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
{
int n;
- /* minbytesneeded <= AES_BLOCK_SIZE */
if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
gw->ptr = gw->buf;
gw->nbytes = gw->buf_bytes;
@@ -851,13 +877,11 @@ static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
goto out;
}
- gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
- if (!gw->walk_bytes) {
- scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
- gw->walk_bytes = scatterwalk_clamp(&gw->walk,
- gw->walk_bytes_remain);
+ if (!_gcm_sg_clamp_and_map(gw)) {
+ gw->ptr = NULL;
+ gw->nbytes = 0;
+ goto out;
}
- gw->walk_ptr = scatterwalk_map(&gw->walk);
if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
gw->ptr = gw->walk_ptr;
@@ -869,51 +893,90 @@ static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
gw->buf_bytes += n;
- gw->walk_bytes_remain -= n;
- scatterwalk_unmap(&gw->walk);
- scatterwalk_advance(&gw->walk, n);
- scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
-
+ _gcm_sg_unmap_and_advance(gw, n);
if (gw->buf_bytes >= minbytesneeded) {
gw->ptr = gw->buf;
gw->nbytes = gw->buf_bytes;
goto out;
}
-
- gw->walk_bytes = scatterwalk_clamp(&gw->walk,
- gw->walk_bytes_remain);
- if (!gw->walk_bytes) {
- scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
- gw->walk_bytes = scatterwalk_clamp(&gw->walk,
- gw->walk_bytes_remain);
+ if (!_gcm_sg_clamp_and_map(gw)) {
+ gw->ptr = NULL;
+ gw->nbytes = 0;
+ goto out;
}
- gw->walk_ptr = scatterwalk_map(&gw->walk);
}
out:
return gw->nbytes;
}
-static void gcm_sg_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
+static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
{
- int n;
+ if (gw->walk_bytes_remain == 0) {
+ gw->ptr = NULL;
+ gw->nbytes = 0;
+ goto out;
+ }
+ if (!_gcm_sg_clamp_and_map(gw)) {
+ gw->ptr = NULL;
+ gw->nbytes = 0;
+ goto out;
+ }
+
+ if (gw->walk_bytes >= minbytesneeded) {
+ gw->ptr = gw->walk_ptr;
+ gw->nbytes = gw->walk_bytes;
+ goto out;
+ }
+
+ scatterwalk_unmap(&gw->walk);
+ gw->walk_ptr = NULL;
+
+ gw->ptr = gw->buf;
+ gw->nbytes = sizeof(gw->buf);
+
+out:
+ return gw->nbytes;
+}
+
+static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
+{
if (gw->ptr == NULL)
- return;
+ return 0;
if (gw->ptr == gw->buf) {
- n = gw->buf_bytes - bytesdone;
+ int n = gw->buf_bytes - bytesdone;
if (n > 0) {
memmove(gw->buf, gw->buf + bytesdone, n);
- gw->buf_bytes -= n;
+ gw->buf_bytes = n;
} else
gw->buf_bytes = 0;
- } else {
- gw->walk_bytes_remain -= bytesdone;
- scatterwalk_unmap(&gw->walk);
- scatterwalk_advance(&gw->walk, bytesdone);
- scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
- }
+ } else
+ _gcm_sg_unmap_and_advance(gw, bytesdone);
+
+ return bytesdone;
+}
+
+static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
+{
+ int i, n;
+
+ if (gw->ptr == NULL)
+ return 0;
+
+ if (gw->ptr == gw->buf) {
+ for (i = 0; i < bytesdone; i += n) {
+ if (!_gcm_sg_clamp_and_map(gw))
+ return i;
+ n = min(gw->walk_bytes, bytesdone - i);
+ memcpy(gw->walk_ptr, gw->buf + i, n);
+ _gcm_sg_unmap_and_advance(gw, n);
+ }
+ } else
+ _gcm_sg_unmap_and_advance(gw, bytesdone);
+
+ return bytesdone;
}
static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
@@ -926,7 +989,7 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
unsigned int pclen = req->cryptlen;
int ret = 0;
- unsigned int len, in_bytes, out_bytes,
+ unsigned int n, len, in_bytes, out_bytes,
min_bytes, bytes, aad_bytes, pc_bytes;
struct gcm_sg_walk gw_in, gw_out;
u8 tag[GHASH_DIGEST_SIZE];
@@ -963,14 +1026,14 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
*(u32 *)(param.j0 + ivsize) = 1;
memcpy(param.k, ctx->key, ctx->key_len);
- gcm_sg_walk_start(&gw_in, req->src, len);
- gcm_sg_walk_start(&gw_out, req->dst, len);
+ gcm_walk_start(&gw_in, req->src, len);
+ gcm_walk_start(&gw_out, req->dst, len);
do {
min_bytes = min_t(unsigned int,
aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
- in_bytes = gcm_sg_walk_go(&gw_in, min_bytes);
- out_bytes = gcm_sg_walk_go(&gw_out, min_bytes);
+ in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
+ out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
bytes = min(in_bytes, out_bytes);
if (aadlen + pclen <= bytes) {
@@ -997,8 +1060,11 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
gw_in.ptr + aad_bytes, pc_bytes,
gw_in.ptr, aad_bytes);
- gcm_sg_walk_done(&gw_in, aad_bytes + pc_bytes);
- gcm_sg_walk_done(&gw_out, aad_bytes + pc_bytes);
+ n = aad_bytes + pc_bytes;
+ if (gcm_in_walk_done(&gw_in, n) != n)
+ return -ENOMEM;
+ if (gcm_out_walk_done(&gw_out, n) != n)
+ return -ENOMEM;
aadlen -= aad_bytes;
pclen -= pc_bytes;
} while (aadlen + pclen > 0);
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 5346b5a80bb6..65bda1178963 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -14,6 +14,7 @@
#include <linux/cpufeature.h>
#include <linux/crypto.h>
#include <linux/fips.h>
+#include <linux/mutex.h>
#include <crypto/algapi.h>
#include <crypto/des.h>
#include <asm/cpacf.h>
@@ -21,7 +22,7 @@
#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
static u8 *ctrblk;
-static DEFINE_SPINLOCK(ctrblk_lock);
+static DEFINE_MUTEX(ctrblk_lock);
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
@@ -387,7 +388,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
unsigned int n, nbytes;
int ret, locked;
- locked = spin_trylock(&ctrblk_lock);
+ locked = mutex_trylock(&ctrblk_lock);
ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
@@ -404,7 +405,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
ret = blkcipher_walk_done(desc, walk, nbytes - n);
}
if (locked)
- spin_unlock(&ctrblk_lock);
+ mutex_unlock(&ctrblk_lock);
/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
if (nbytes) {
cpacf_kmctr(fc, ctx->key, buf, walk->src.virt.addr,
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 4cc3f06b0ab3..5189b76133d4 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -107,7 +107,6 @@ void bust_spinlocks(int yes)
/*
* Find out which address space caused the exception.
- * Access register mode is impossible, ignore space == 3.
*/
static inline enum fault_type get_fault_type(struct pt_regs *regs)
{
@@ -132,6 +131,10 @@ static inline enum fault_type get_fault_type(struct pt_regs *regs)
}
return VDSO_FAULT;
}
+ if (trans_exc_code == 1) {
+ /* access register mode, not used in the kernel */
+ return USER_FAULT;
+ }
/* home space exception -> access via kernel ASCE */
return KERNEL_FAULT;
}
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index d245f89d1395..d220b6848746 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -587,7 +587,7 @@ xcall_flush_tlb_kernel_range: /* 44 insns */
sub %g7, %g1, %g3
srlx %g3, 18, %g2
brnz,pn %g2, 2f
- add %g2, 1, %g2
+ sethi %hi(PAGE_SIZE), %g2
sub %g3, %g2, %g3
or %g1, 0x20, %g1 ! Nucleus
1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
@@ -751,7 +751,7 @@ __cheetah_xcall_flush_tlb_kernel_range: /* 44 insns */
sub %g7, %g1, %g3
srlx %g3, 18, %g2
brnz,pn %g2, 2f
- add %g2, 1, %g2
+ sethi %hi(PAGE_SIZE), %g2
sub %g3, %g2, %g3
or %g1, 0x20, %g1 ! Nucleus
1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 4d2a401c178b..9f033dfd2766 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -752,18 +752,21 @@ union ftrace_op_code_union {
} __attribute__((packed));
};
+#define RET_SIZE 1
+
static unsigned long
create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
{
- unsigned const char *jmp;
unsigned long start_offset;
unsigned long end_offset;
unsigned long op_offset;
unsigned long offset;
+ unsigned long npages;
unsigned long size;
- unsigned long ip;
+ unsigned long retq;
unsigned long *ptr;
void *trampoline;
+ void *ip;
/* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
union ftrace_op_code_union op_ptr;
@@ -783,27 +786,28 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
/*
* Allocate enough size to store the ftrace_caller code,
- * the jmp to ftrace_epilogue, as well as the address of
- * the ftrace_ops this trampoline is used for.
+ * the iret , as well as the address of the ftrace_ops this
+ * trampoline is used for.
*/
- trampoline = alloc_tramp(size + MCOUNT_INSN_SIZE + sizeof(void *));
+ trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *));
if (!trampoline)
return 0;
- *tramp_size = size + MCOUNT_INSN_SIZE + sizeof(void *);
+ *tramp_size = size + RET_SIZE + sizeof(void *);
+ npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
/* Copy ftrace_caller onto the trampoline memory */
ret = probe_kernel_read(trampoline, (void *)start_offset, size);
- if (WARN_ON(ret < 0)) {
- tramp_free(trampoline, *tramp_size);
- return 0;
- }
+ if (WARN_ON(ret < 0))
+ goto fail;
- ip = (unsigned long)trampoline + size;
+ ip = trampoline + size;
- /* The trampoline ends with a jmp to ftrace_epilogue */
- jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_epilogue);
- memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE);
+ /* The trampoline ends with ret(q) */
+ retq = (unsigned long)ftrace_stub;
+ ret = probe_kernel_read(ip, (void *)retq, RET_SIZE);
+ if (WARN_ON(ret < 0))
+ goto fail;
/*
* The address of the ftrace_ops that is used for this trampoline
@@ -813,17 +817,15 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
* the global function_trace_op variable.
*/
- ptr = (unsigned long *)(trampoline + size + MCOUNT_INSN_SIZE);
+ ptr = (unsigned long *)(trampoline + size + RET_SIZE);
*ptr = (unsigned long)ops;
op_offset -= start_offset;
memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
/* Are we pointing to the reference? */
- if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) {
- tramp_free(trampoline, *tramp_size);
- return 0;
- }
+ if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0))
+ goto fail;
/* Load the contents of ptr into the callback parameter */
offset = (unsigned long)ptr;
@@ -837,7 +839,16 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
/* ALLOC_TRAMP flags lets us know we created it */
ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
+ /*
+ * Module allocation needs to be completed by making the page
+ * executable. The page is still writable, which is a security hazard,
+ * but anyhow ftrace breaks W^X completely.
+ */
+ set_memory_x((unsigned long)trampoline, npages);
return (unsigned long)trampoline;
+fail:
+ tramp_free(trampoline, *tramp_size);
+ return 0;
}
static unsigned long calc_trampoline_call_offset(bool save_regs)
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index 91b2cff4b79a..75f2b36b41a6 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -171,9 +171,6 @@ GLOBAL(ftrace_call)
restore_mcount_regs
/*
- * The copied trampoline must call ftrace_epilogue as it
- * still may need to call the function graph tracer.
- *
* The code up to this label is copied into trampolines so
* think twice before adding any new code or changing the
* layout here.
@@ -185,7 +182,10 @@ GLOBAL(ftrace_graph_call)
jmp ftrace_stub
#endif
-/* This is weak to keep gas from relaxing the jumps */
+/*
+ * This is weak to keep gas from relaxing the jumps.
+ * It is also used to copy the retq for trampolines.
+ */
WEAK(ftrace_stub)
retq
ENDPROC(ftrace_caller)
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 42b74dc99308..9447c7afae80 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -433,8 +433,20 @@ void *alloc_insn_page(void)
void *page;
page = module_alloc(PAGE_SIZE);
- if (page)
- set_memory_ro((unsigned long)page & PAGE_MASK, 1);
+ if (!page)
+ return NULL;
+
+ /*
+ * First make the page read-only, and only then make it executable to
+ * prevent it from being W+X in between.
+ */
+ set_memory_ro((unsigned long)page, 1);
+
+ /*
+ * TODO: Once additional kernel code protection mechanisms are set, ensure
+ * that the page was not maliciously altered and it is still zeroed.
+ */
+ set_memory_x((unsigned long)page, 1);
return page;
}
@@ -442,8 +454,12 @@ void *alloc_insn_page(void)
/* Recover page to RW mode before releasing it */
void free_insn_page(void *page)
{
- set_memory_nx((unsigned long)page & PAGE_MASK, 1);
- set_memory_rw((unsigned long)page & PAGE_MASK, 1);
+ /*
+ * First make the page non-executable, and only then make it writable to
+ * prevent it from being W+X in between.
+ */
+ set_memory_nx((unsigned long)page, 1);
+ set_memory_rw((unsigned long)page, 1);
module_memfree(page);
}
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index b17c05044595..ca1076975e0b 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -130,10 +130,10 @@ SECTIONS
*(.text.__x86.indirect_thunk)
__indirect_thunk_end = .;
#endif
- } :text = 0x9090
- /* End of text section */
- _etext = .;
+ /* End of text section */
+ _etext = .;
+ } :text = 0x9090
NOTES :text :note
diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c
index 9119d8e41f1f..87dcba101e56 100644
--- a/arch/x86/lib/insn-eval.c
+++ b/arch/x86/lib/insn-eval.c
@@ -555,7 +555,8 @@ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs,
}
/**
- * get_desc() - Obtain pointer to a segment descriptor
+ * get_desc() - Obtain contents of a segment descriptor
+ * @out: Segment descriptor contents on success
* @sel: Segment selector
*
* Given a segment selector, obtain a pointer to the segment descriptor.
@@ -563,18 +564,18 @@ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs,
*
* Returns:
*
- * Pointer to segment descriptor on success.
+ * True on success, false on failure.
*
* NULL on error.
*/
-static struct desc_struct *get_desc(unsigned short sel)
+static bool get_desc(struct desc_struct *out, unsigned short sel)
{
struct desc_ptr gdt_desc = {0, 0};
unsigned long desc_base;
#ifdef CONFIG_MODIFY_LDT_SYSCALL
if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) {
- struct desc_struct *desc = NULL;
+ bool success = false;
struct ldt_struct *ldt;
/* Bits [15:3] contain the index of the desired entry. */
@@ -582,12 +583,14 @@ static struct desc_struct *get_desc(unsigned short sel)
mutex_lock(&current->active_mm->context.lock);
ldt = current->active_mm->context.ldt;
- if (ldt && sel < ldt->nr_entries)
- desc = &ldt->entries[sel];
+ if (ldt && sel < ldt->nr_entries) {
+ *out = ldt->entries[sel];
+ success = true;
+ }
mutex_unlock(&current->active_mm->context.lock);
- return desc;
+ return success;
}
#endif
native_store_gdt(&gdt_desc);
@@ -602,9 +605,10 @@ static struct desc_struct *get_desc(unsigned short sel)
desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK);
if (desc_base > gdt_desc.size)
- return NULL;
+ return false;
- return (struct desc_struct *)(gdt_desc.address + desc_base);
+ *out = *(struct desc_struct *)(gdt_desc.address + desc_base);
+ return true;
}
/**
@@ -626,7 +630,7 @@ static struct desc_struct *get_desc(unsigned short sel)
*/
unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
{
- struct desc_struct *desc;
+ struct desc_struct desc;
short sel;
sel = get_segment_selector(regs, seg_reg_idx);
@@ -664,11 +668,10 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
if (!sel)
return -1L;
- desc = get_desc(sel);
- if (!desc)
+ if (!get_desc(&desc, sel))
return -1L;
- return get_desc_base(desc);
+ return get_desc_base(&desc);
}
/**
@@ -690,7 +693,7 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
*/
static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
{
- struct desc_struct *desc;
+ struct desc_struct desc;
unsigned long limit;
short sel;
@@ -704,8 +707,7 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
if (!sel)
return 0;
- desc = get_desc(sel);
- if (!desc)
+ if (!get_desc(&desc, sel))
return 0;
/*
@@ -714,8 +716,8 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
* not tested when checking the segment limits. In practice,
* this means that the segment ends in (limit << 12) + 0xfff.
*/
- limit = get_desc_limit(desc);
- if (desc->g)
+ limit = get_desc_limit(&desc);
+ if (desc.g)
limit = (limit << 12) + 0xfff;
return limit;
@@ -739,7 +741,7 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
*/
int insn_get_code_seg_params(struct pt_regs *regs)
{
- struct desc_struct *desc;
+ struct desc_struct desc;
short sel;
if (v8086_mode(regs))
@@ -750,8 +752,7 @@ int insn_get_code_seg_params(struct pt_regs *regs)
if (sel < 0)
return sel;
- desc = get_desc(sel);
- if (!desc)
+ if (!get_desc(&desc, sel))
return -EINVAL;
/*
@@ -759,10 +760,10 @@ int insn_get_code_seg_params(struct pt_regs *regs)
* determines whether a segment contains data or code. If this is a data
* segment, return error.
*/
- if (!(desc->type & BIT(3)))
+ if (!(desc.type & BIT(3)))
return -EINVAL;
- switch ((desc->l << 1) | desc->d) {
+ switch ((desc.l << 1) | desc.d) {
case 0: /*
* Legacy mode. CS.L=0, CS.D=0. Address and operand size are
* both 16-bit.