aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/arch_timer.h9
-rw-r--r--arch/arm/include/asm/barrier.h4
-rw-r--r--arch/arm/include/asm/cacheflush.h10
-rw-r--r--arch/arm/include/asm/cpuidle.h1
-rw-r--r--arch/arm/include/asm/device.h1
-rw-r--r--arch/arm/include/asm/dma-mapping.h14
-rw-r--r--arch/arm/include/asm/firmware.h10
-rw-r--r--arch/arm/include/asm/fixmap.h31
-rw-r--r--arch/arm/include/asm/hardware/coresight.h157
-rw-r--r--arch/arm/include/asm/hardware/cp14.h542
-rw-r--r--arch/arm/include/asm/hw_irq.h1
-rw-r--r--arch/arm/include/asm/io.h89
-rw-r--r--arch/arm/include/asm/kvm_emulate.h5
-rw-r--r--arch/arm/include/asm/kvm_host.h2
-rw-r--r--arch/arm/include/asm/kvm_mmu.h6
-rw-r--r--arch/arm/include/asm/mach/pci.h10
-rw-r--r--arch/arm/include/asm/mcpm.h17
-rw-r--r--arch/arm/include/asm/memory.h4
-rw-r--r--arch/arm/include/asm/percpu.h4
-rw-r--r--arch/arm/include/asm/perf_event.h2
-rw-r--r--arch/arm/include/asm/pgalloc.h10
-rw-r--r--arch/arm/include/asm/pgtable-2level-hwdef.h2
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h1
-rw-r--r--arch/arm/include/asm/pgtable.h62
-rw-r--r--arch/arm/include/asm/pmu.h36
-rw-r--r--arch/arm/include/asm/ptrace.h5
-rw-r--r--arch/arm/include/asm/spinlock.h4
-rw-r--r--arch/arm/include/asm/thread_info.h20
-rw-r--r--arch/arm/include/asm/vfp.h5
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h66
-rw-r--r--arch/arm/include/asm/xen/page.h4
32 files changed, 848 insertions, 287 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 70cd84eb7fda..fe74c0d1e485 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -7,7 +7,6 @@ generic-y += current.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
-generic-y += hash.h
generic-y += ioctl.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index 92793ba69c40..d4ebf5679f1f 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -78,6 +78,15 @@ static inline u32 arch_timer_get_cntfrq(void)
return val;
}
+static inline u64 arch_counter_get_cntpct(void)
+{
+ u64 cval;
+
+ isb();
+ asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
+ return cval;
+}
+
static inline u64 arch_counter_get_cntvct(void)
{
u64 cval;
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index c6a3e73a6e24..d2f81e6b8c1c 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -43,10 +43,14 @@
#define mb() do { dsb(); outer_sync(); } while (0)
#define rmb() dsb()
#define wmb() do { dsb(st); outer_sync(); } while (0)
+#define dma_rmb() dmb(osh)
+#define dma_wmb() dmb(oshst)
#else
#define mb() barrier()
#define rmb() barrier()
#define wmb() barrier()
+#define dma_rmb() barrier()
+#define dma_wmb() barrier()
#endif
#ifndef CONFIG_SMP
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 10e78d00a0bb..2d46862e7bef 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -487,6 +487,16 @@ int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void);
+void set_kernel_text_rw(void);
+void set_kernel_text_ro(void);
+#else
+static inline void set_kernel_text_rw(void) { }
+static inline void set_kernel_text_ro(void) { }
+#endif
+
void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
void *kaddr, unsigned long len);
+
#endif
diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
index 2fca60ab513a..af319ac4960c 100644
--- a/arch/arm/include/asm/cpuidle.h
+++ b/arch/arm/include/asm/cpuidle.h
@@ -15,7 +15,6 @@ static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
.exit_latency = 1,\
.target_residency = 1,\
.power_usage = p,\
- .flags = CPUIDLE_FLAG_TIME_VALID,\
.name = "WFI",\
.desc = "ARM WFI",\
}
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index dc662fca9230..4111592f0130 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -17,6 +17,7 @@ struct dev_archdata {
#ifdef CONFIG_ARM_DMA_USE_IOMMU
struct dma_iommu_mapping *mapping;
#endif
+ bool dma_coherent;
};
struct omap_device;
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 85738b200023..b52101d37ec7 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -121,12 +121,18 @@ static inline unsigned long dma_max_pfn(struct device *dev)
}
#define dma_max_pfn(dev) dma_max_pfn(dev)
-static inline int set_arch_dma_coherent_ops(struct device *dev)
+#define arch_setup_dma_ops arch_setup_dma_ops
+extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ struct iommu_ops *iommu, bool coherent);
+
+#define arch_teardown_dma_ops arch_teardown_dma_ops
+extern void arch_teardown_dma_ops(struct device *dev);
+
+/* do not use this function in a driver */
+static inline bool is_device_dma_coherent(struct device *dev)
{
- set_dma_ops(dev, &arm_coherent_dma_ops);
- return 0;
+ return dev->archdata.dma_coherent;
}
-#define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev)
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
diff --git a/arch/arm/include/asm/firmware.h b/arch/arm/include/asm/firmware.h
index 2c9f10df7568..89aefe10d66b 100644
--- a/arch/arm/include/asm/firmware.h
+++ b/arch/arm/include/asm/firmware.h
@@ -28,7 +28,7 @@ struct firmware_ops {
/*
* Enters CPU idle mode
*/
- int (*do_idle)(void);
+ int (*do_idle)(unsigned long mode);
/*
* Sets boot address of specified physical CPU
*/
@@ -41,6 +41,14 @@ struct firmware_ops {
* Initializes L2 cache
*/
int (*l2x0_init)(void);
+ /*
+ * Enter system-wide suspend.
+ */
+ int (*suspend)(void);
+ /*
+ * Restore state of privileged hardware after system-wide suspend.
+ */
+ int (*resume)(void);
};
/* Global pointer for current firmware_ops structure, can't be NULL. */
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index 74124b0d0d79..0415eae1df27 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -2,27 +2,24 @@
#define _ASM_FIXMAP_H
#define FIXADDR_START 0xffc00000UL
-#define FIXADDR_TOP 0xffe00000UL
-#define FIXADDR_SIZE (FIXADDR_TOP - FIXADDR_START)
+#define FIXADDR_END 0xfff00000UL
+#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
-#define FIX_KMAP_NR_PTES (FIXADDR_SIZE >> PAGE_SHIFT)
+#include <asm/kmap_types.h>
-#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT))
-#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
+enum fixed_addresses {
+ FIX_KMAP_BEGIN,
+ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
-extern void __this_fixmap_does_not_exist(void);
+ /* Support writing RO kernel text via kprobes, jump labels, etc. */
+ FIX_TEXT_POKE0,
+ FIX_TEXT_POKE1,
-static inline unsigned long fix_to_virt(const unsigned int idx)
-{
- if (idx >= FIX_KMAP_NR_PTES)
- __this_fixmap_does_not_exist();
- return __fix_to_virt(idx);
-}
+ __end_of_fixed_addresses
+};
-static inline unsigned int virt_to_fix(const unsigned long vaddr)
-{
- BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
- return __virt_to_fix(vaddr);
-}
+void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
+
+#include <asm-generic/fixmap.h>
#endif
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
deleted file mode 100644
index ad774f37c47c..000000000000
--- a/arch/arm/include/asm/hardware/coresight.h
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * linux/arch/arm/include/asm/hardware/coresight.h
- *
- * CoreSight components' registers
- *
- * Copyright (C) 2009 Nokia Corporation.
- * Alexander Shishkin
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_HARDWARE_CORESIGHT_H
-#define __ASM_HARDWARE_CORESIGHT_H
-
-#define TRACER_ACCESSED_BIT 0
-#define TRACER_RUNNING_BIT 1
-#define TRACER_CYCLE_ACC_BIT 2
-#define TRACER_ACCESSED BIT(TRACER_ACCESSED_BIT)
-#define TRACER_RUNNING BIT(TRACER_RUNNING_BIT)
-#define TRACER_CYCLE_ACC BIT(TRACER_CYCLE_ACC_BIT)
-
-#define TRACER_TIMEOUT 10000
-
-#define etm_writel(t, v, x) \
- (writel_relaxed((v), (t)->etm_regs + (x)))
-#define etm_readl(t, x) (readl_relaxed((t)->etm_regs + (x)))
-
-/* CoreSight Management Registers */
-#define CSMR_LOCKACCESS 0xfb0
-#define CSMR_LOCKSTATUS 0xfb4
-#define CSMR_AUTHSTATUS 0xfb8
-#define CSMR_DEVID 0xfc8
-#define CSMR_DEVTYPE 0xfcc
-/* CoreSight Component Registers */
-#define CSCR_CLASS 0xff4
-
-#define CS_LAR_KEY 0xc5acce55
-
-/* ETM control register, "ETM Architecture", 3.3.1 */
-#define ETMR_CTRL 0
-#define ETMCTRL_POWERDOWN 1
-#define ETMCTRL_PROGRAM (1 << 10)
-#define ETMCTRL_PORTSEL (1 << 11)
-#define ETMCTRL_DO_CONTEXTID (3 << 14)
-#define ETMCTRL_PORTMASK1 (7 << 4)
-#define ETMCTRL_PORTMASK2 (1 << 21)
-#define ETMCTRL_PORTMASK (ETMCTRL_PORTMASK1 | ETMCTRL_PORTMASK2)
-#define ETMCTRL_PORTSIZE(x) ((((x) & 7) << 4) | (!!((x) & 8)) << 21)
-#define ETMCTRL_DO_CPRT (1 << 1)
-#define ETMCTRL_DATAMASK (3 << 2)
-#define ETMCTRL_DATA_DO_DATA (1 << 2)
-#define ETMCTRL_DATA_DO_ADDR (1 << 3)
-#define ETMCTRL_DATA_DO_BOTH (ETMCTRL_DATA_DO_DATA | ETMCTRL_DATA_DO_ADDR)
-#define ETMCTRL_BRANCH_OUTPUT (1 << 8)
-#define ETMCTRL_CYCLEACCURATE (1 << 12)
-
-/* ETM configuration code register */
-#define ETMR_CONFCODE (0x04)
-
-/* ETM trace start/stop resource control register */
-#define ETMR_TRACESSCTRL (0x18)
-
-/* ETM trigger event register */
-#define ETMR_TRIGEVT (0x08)
-
-/* address access type register bits, "ETM architecture",
- * table 3-27 */
-/* - access type */
-#define ETMAAT_IFETCH 0
-#define ETMAAT_IEXEC 1
-#define ETMAAT_IEXECPASS 2
-#define ETMAAT_IEXECFAIL 3
-#define ETMAAT_DLOADSTORE 4
-#define ETMAAT_DLOAD 5
-#define ETMAAT_DSTORE 6
-/* - comparison access size */
-#define ETMAAT_JAVA (0 << 3)
-#define ETMAAT_THUMB (1 << 3)
-#define ETMAAT_ARM (3 << 3)
-/* - data value comparison control */
-#define ETMAAT_NOVALCMP (0 << 5)
-#define ETMAAT_VALMATCH (1 << 5)
-#define ETMAAT_VALNOMATCH (3 << 5)
-/* - exact match */
-#define ETMAAT_EXACTMATCH (1 << 7)
-/* - context id comparator control */
-#define ETMAAT_IGNCONTEXTID (0 << 8)
-#define ETMAAT_VALUE1 (1 << 8)
-#define ETMAAT_VALUE2 (2 << 8)
-#define ETMAAT_VALUE3 (3 << 8)
-/* - security level control */
-#define ETMAAT_IGNSECURITY (0 << 10)
-#define ETMAAT_NSONLY (1 << 10)
-#define ETMAAT_SONLY (2 << 10)
-
-#define ETMR_COMP_VAL(x) (0x40 + (x) * 4)
-#define ETMR_COMP_ACC_TYPE(x) (0x80 + (x) * 4)
-
-/* ETM status register, "ETM Architecture", 3.3.2 */
-#define ETMR_STATUS (0x10)
-#define ETMST_OVERFLOW BIT(0)
-#define ETMST_PROGBIT BIT(1)
-#define ETMST_STARTSTOP BIT(2)
-#define ETMST_TRIGGER BIT(3)
-
-#define etm_progbit(t) (etm_readl((t), ETMR_STATUS) & ETMST_PROGBIT)
-#define etm_started(t) (etm_readl((t), ETMR_STATUS) & ETMST_STARTSTOP)
-#define etm_triggered(t) (etm_readl((t), ETMR_STATUS) & ETMST_TRIGGER)
-
-#define ETMR_TRACEENCTRL2 0x1c
-#define ETMR_TRACEENCTRL 0x24
-#define ETMTE_INCLEXCL BIT(24)
-#define ETMR_TRACEENEVT 0x20
-#define ETMCTRL_OPTS (ETMCTRL_DO_CPRT | \
- ETMCTRL_DATA_DO_ADDR | \
- ETMCTRL_BRANCH_OUTPUT | \
- ETMCTRL_DO_CONTEXTID)
-
-/* ETM management registers, "ETM Architecture", 3.5.24 */
-#define ETMMR_OSLAR 0x300
-#define ETMMR_OSLSR 0x304
-#define ETMMR_OSSRR 0x308
-#define ETMMR_PDSR 0x314
-
-/* ETB registers, "CoreSight Components TRM", 9.3 */
-#define ETBR_DEPTH 0x04
-#define ETBR_STATUS 0x0c
-#define ETBR_READMEM 0x10
-#define ETBR_READADDR 0x14
-#define ETBR_WRITEADDR 0x18
-#define ETBR_TRIGGERCOUNT 0x1c
-#define ETBR_CTRL 0x20
-#define ETBR_FORMATTERCTRL 0x304
-#define ETBFF_ENFTC 1
-#define ETBFF_ENFCONT BIT(1)
-#define ETBFF_FONFLIN BIT(4)
-#define ETBFF_MANUAL_FLUSH BIT(6)
-#define ETBFF_TRIGIN BIT(8)
-#define ETBFF_TRIGEVT BIT(9)
-#define ETBFF_TRIGFL BIT(10)
-
-#define etb_writel(t, v, x) \
- (writel_relaxed((v), (t)->etb_regs + (x)))
-#define etb_readl(t, x) (readl_relaxed((t)->etb_regs + (x)))
-
-#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
-#define etm_unlock(t) \
- do { etm_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
-
-#define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)
-#define etb_unlock(t) \
- do { etb_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
-
-#endif /* __ASM_HARDWARE_CORESIGHT_H */
-
diff --git a/arch/arm/include/asm/hardware/cp14.h b/arch/arm/include/asm/hardware/cp14.h
new file mode 100644
index 000000000000..61576dc58ede
--- /dev/null
+++ b/arch/arm/include/asm/hardware/cp14.h
@@ -0,0 +1,542 @@
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_HARDWARE_CP14_H
+#define __ASM_HARDWARE_CP14_H
+
+#include <linux/types.h>
+
+/* Accessors for CP14 registers */
+#define dbg_read(reg) RCP14_##reg()
+#define dbg_write(val, reg) WCP14_##reg(val)
+#define etm_read(reg) RCP14_##reg()
+#define etm_write(val, reg) WCP14_##reg(val)
+
+/* MRC14 and MCR14 */
+#define MRC14(op1, crn, crm, op2) \
+({ \
+u32 val; \
+asm volatile("mrc p14, "#op1", %0, "#crn", "#crm", "#op2 : "=r" (val)); \
+val; \
+})
+
+#define MCR14(val, op1, crn, crm, op2) \
+({ \
+asm volatile("mcr p14, "#op1", %0, "#crn", "#crm", "#op2 : : "r" (val));\
+})
+
+/*
+ * Debug Registers
+ *
+ * Available only in DBGv7
+ * DBGECR, DBGDSCCR, DBGDSMCR, DBGDRCR
+ *
+ * Available only in DBGv7.1
+ * DBGBXVRm, DBGOSDLR, DBGDEVID2, DBGDEVID1
+ *
+ * Read only
+ * DBGDIDR, DBGDSCRint, DBGDTRRXint, DBGDRAR, DBGOSLSR, DBGOSSRR, DBGPRSR,
+ * DBGPRSR, DBGDSAR, DBGAUTHSTATUS, DBGDEVID2, DBGDEVID1, DBGDEVID
+ *
+ * Write only
+ * DBGDTRTXint, DBGOSLAR
+ */
+#define RCP14_DBGDIDR() MRC14(0, c0, c0, 0)
+#define RCP14_DBGDSCRint() MRC14(0, c0, c1, 0)
+#define RCP14_DBGDTRRXint() MRC14(0, c0, c5, 0)
+#define RCP14_DBGWFAR() MRC14(0, c0, c6, 0)
+#define RCP14_DBGVCR() MRC14(0, c0, c7, 0)
+#define RCP14_DBGECR() MRC14(0, c0, c9, 0)
+#define RCP14_DBGDSCCR() MRC14(0, c0, c10, 0)
+#define RCP14_DBGDSMCR() MRC14(0, c0, c11, 0)
+#define RCP14_DBGDTRRXext() MRC14(0, c0, c0, 2)
+#define RCP14_DBGDSCRext() MRC14(0, c0, c2, 2)
+#define RCP14_DBGDTRTXext() MRC14(0, c0, c3, 2)
+#define RCP14_DBGDRCR() MRC14(0, c0, c4, 2)
+#define RCP14_DBGBVR0() MRC14(0, c0, c0, 4)
+#define RCP14_DBGBVR1() MRC14(0, c0, c1, 4)
+#define RCP14_DBGBVR2() MRC14(0, c0, c2, 4)
+#define RCP14_DBGBVR3() MRC14(0, c0, c3, 4)
+#define RCP14_DBGBVR4() MRC14(0, c0, c4, 4)
+#define RCP14_DBGBVR5() MRC14(0, c0, c5, 4)
+#define RCP14_DBGBVR6() MRC14(0, c0, c6, 4)
+#define RCP14_DBGBVR7() MRC14(0, c0, c7, 4)
+#define RCP14_DBGBVR8() MRC14(0, c0, c8, 4)
+#define RCP14_DBGBVR9() MRC14(0, c0, c9, 4)
+#define RCP14_DBGBVR10() MRC14(0, c0, c10, 4)
+#define RCP14_DBGBVR11() MRC14(0, c0, c11, 4)
+#define RCP14_DBGBVR12() MRC14(0, c0, c12, 4)
+#define RCP14_DBGBVR13() MRC14(0, c0, c13, 4)
+#define RCP14_DBGBVR14() MRC14(0, c0, c14, 4)
+#define RCP14_DBGBVR15() MRC14(0, c0, c15, 4)
+#define RCP14_DBGBCR0() MRC14(0, c0, c0, 5)
+#define RCP14_DBGBCR1() MRC14(0, c0, c1, 5)
+#define RCP14_DBGBCR2() MRC14(0, c0, c2, 5)
+#define RCP14_DBGBCR3() MRC14(0, c0, c3, 5)
+#define RCP14_DBGBCR4() MRC14(0, c0, c4, 5)
+#define RCP14_DBGBCR5() MRC14(0, c0, c5, 5)
+#define RCP14_DBGBCR6() MRC14(0, c0, c6, 5)
+#define RCP14_DBGBCR7() MRC14(0, c0, c7, 5)
+#define RCP14_DBGBCR8() MRC14(0, c0, c8, 5)
+#define RCP14_DBGBCR9() MRC14(0, c0, c9, 5)
+#define RCP14_DBGBCR10() MRC14(0, c0, c10, 5)
+#define RCP14_DBGBCR11() MRC14(0, c0, c11, 5)
+#define RCP14_DBGBCR12() MRC14(0, c0, c12, 5)
+#define RCP14_DBGBCR13() MRC14(0, c0, c13, 5)
+#define RCP14_DBGBCR14() MRC14(0, c0, c14, 5)
+#define RCP14_DBGBCR15() MRC14(0, c0, c15, 5)
+#define RCP14_DBGWVR0() MRC14(0, c0, c0, 6)
+#define RCP14_DBGWVR1() MRC14(0, c0, c1, 6)
+#define RCP14_DBGWVR2() MRC14(0, c0, c2, 6)
+#define RCP14_DBGWVR3() MRC14(0, c0, c3, 6)
+#define RCP14_DBGWVR4() MRC14(0, c0, c4, 6)
+#define RCP14_DBGWVR5() MRC14(0, c0, c5, 6)
+#define RCP14_DBGWVR6() MRC14(0, c0, c6, 6)
+#define RCP14_DBGWVR7() MRC14(0, c0, c7, 6)
+#define RCP14_DBGWVR8() MRC14(0, c0, c8, 6)
+#define RCP14_DBGWVR9() MRC14(0, c0, c9, 6)
+#define RCP14_DBGWVR10() MRC14(0, c0, c10, 6)
+#define RCP14_DBGWVR11() MRC14(0, c0, c11, 6)
+#define RCP14_DBGWVR12() MRC14(0, c0, c12, 6)
+#define RCP14_DBGWVR13() MRC14(0, c0, c13, 6)
+#define RCP14_DBGWVR14() MRC14(0, c0, c14, 6)
+#define RCP14_DBGWVR15() MRC14(0, c0, c15, 6)
+#define RCP14_DBGWCR0() MRC14(0, c0, c0, 7)
+#define RCP14_DBGWCR1() MRC14(0, c0, c1, 7)
+#define RCP14_DBGWCR2() MRC14(0, c0, c2, 7)
+#define RCP14_DBGWCR3() MRC14(0, c0, c3, 7)
+#define RCP14_DBGWCR4() MRC14(0, c0, c4, 7)
+#define RCP14_DBGWCR5() MRC14(0, c0, c5, 7)
+#define RCP14_DBGWCR6() MRC14(0, c0, c6, 7)
+#define RCP14_DBGWCR7() MRC14(0, c0, c7, 7)
+#define RCP14_DBGWCR8() MRC14(0, c0, c8, 7)
+#define RCP14_DBGWCR9() MRC14(0, c0, c9, 7)
+#define RCP14_DBGWCR10() MRC14(0, c0, c10, 7)
+#define RCP14_DBGWCR11() MRC14(0, c0, c11, 7)
+#define RCP14_DBGWCR12() MRC14(0, c0, c12, 7)
+#define RCP14_DBGWCR13() MRC14(0, c0, c13, 7)
+#define RCP14_DBGWCR14() MRC14(0, c0, c14, 7)
+#define RCP14_DBGWCR15() MRC14(0, c0, c15, 7)
+#define RCP14_DBGDRAR() MRC14(0, c1, c0, 0)
+#define RCP14_DBGBXVR0() MRC14(0, c1, c0, 1)
+#define RCP14_DBGBXVR1() MRC14(0, c1, c1, 1)
+#define RCP14_DBGBXVR2() MRC14(0, c1, c2, 1)
+#define RCP14_DBGBXVR3() MRC14(0, c1, c3, 1)
+#define RCP14_DBGBXVR4() MRC14(0, c1, c4, 1)
+#define RCP14_DBGBXVR5() MRC14(0, c1, c5, 1)
+#define RCP14_DBGBXVR6() MRC14(0, c1, c6, 1)
+#define RCP14_DBGBXVR7() MRC14(0, c1, c7, 1)
+#define RCP14_DBGBXVR8() MRC14(0, c1, c8, 1)
+#define RCP14_DBGBXVR9() MRC14(0, c1, c9, 1)
+#define RCP14_DBGBXVR10() MRC14(0, c1, c10, 1)
+#define RCP14_DBGBXVR11() MRC14(0, c1, c11, 1)
+#define RCP14_DBGBXVR12() MRC14(0, c1, c12, 1)
+#define RCP14_DBGBXVR13() MRC14(0, c1, c13, 1)
+#define RCP14_DBGBXVR14() MRC14(0, c1, c14, 1)
+#define RCP14_DBGBXVR15() MRC14(0, c1, c15, 1)
+#define RCP14_DBGOSLSR() MRC14(0, c1, c1, 4)
+#define RCP14_DBGOSSRR() MRC14(0, c1, c2, 4)
+#define RCP14_DBGOSDLR() MRC14(0, c1, c3, 4)
+#define RCP14_DBGPRCR() MRC14(0, c1, c4, 4)
+#define RCP14_DBGPRSR() MRC14(0, c1, c5, 4)
+#define RCP14_DBGDSAR() MRC14(0, c2, c0, 0)
+#define RCP14_DBGITCTRL() MRC14(0, c7, c0, 4)
+#define RCP14_DBGCLAIMSET() MRC14(0, c7, c8, 6)
+#define RCP14_DBGCLAIMCLR() MRC14(0, c7, c9, 6)
+#define RCP14_DBGAUTHSTATUS() MRC14(0, c7, c14, 6)
+#define RCP14_DBGDEVID2() MRC14(0, c7, c0, 7)
+#define RCP14_DBGDEVID1() MRC14(0, c7, c1, 7)
+#define RCP14_DBGDEVID() MRC14(0, c7, c2, 7)
+
+#define WCP14_DBGDTRTXint(val) MCR14(val, 0, c0, c5, 0)
+#define WCP14_DBGWFAR(val) MCR14(val, 0, c0, c6, 0)
+#define WCP14_DBGVCR(val) MCR14(val, 0, c0, c7, 0)
+#define WCP14_DBGECR(val) MCR14(val, 0, c0, c9, 0)
+#define WCP14_DBGDSCCR(val) MCR14(val, 0, c0, c10, 0)
+#define WCP14_DBGDSMCR(val) MCR14(val, 0, c0, c11, 0)
+#define WCP14_DBGDTRRXext(val) MCR14(val, 0, c0, c0, 2)
+#define WCP14_DBGDSCRext(val) MCR14(val, 0, c0, c2, 2)
+#define WCP14_DBGDTRTXext(val) MCR14(val, 0, c0, c3, 2)
+#define WCP14_DBGDRCR(val) MCR14(val, 0, c0, c4, 2)
+#define WCP14_DBGBVR0(val) MCR14(val, 0, c0, c0, 4)
+#define WCP14_DBGBVR1(val) MCR14(val, 0, c0, c1, 4)
+#define WCP14_DBGBVR2(val) MCR14(val, 0, c0, c2, 4)
+#define WCP14_DBGBVR3(val) MCR14(val, 0, c0, c3, 4)
+#define WCP14_DBGBVR4(val) MCR14(val, 0, c0, c4, 4)
+#define WCP14_DBGBVR5(val) MCR14(val, 0, c0, c5, 4)
+#define WCP14_DBGBVR6(val) MCR14(val, 0, c0, c6, 4)
+#define WCP14_DBGBVR7(val) MCR14(val, 0, c0, c7, 4)
+#define WCP14_DBGBVR8(val) MCR14(val, 0, c0, c8, 4)
+#define WCP14_DBGBVR9(val) MCR14(val, 0, c0, c9, 4)
+#define WCP14_DBGBVR10(val) MCR14(val, 0, c0, c10, 4)
+#define WCP14_DBGBVR11(val) MCR14(val, 0, c0, c11, 4)
+#define WCP14_DBGBVR12(val) MCR14(val, 0, c0, c12, 4)
+#define WCP14_DBGBVR13(val) MCR14(val, 0, c0, c13, 4)
+#define WCP14_DBGBVR14(val) MCR14(val, 0, c0, c14, 4)
+#define WCP14_DBGBVR15(val) MCR14(val, 0, c0, c15, 4)
+#define WCP14_DBGBCR0(val) MCR14(val, 0, c0, c0, 5)
+#define WCP14_DBGBCR1(val) MCR14(val, 0, c0, c1, 5)
+#define WCP14_DBGBCR2(val) MCR14(val, 0, c0, c2, 5)
+#define WCP14_DBGBCR3(val) MCR14(val, 0, c0, c3, 5)
+#define WCP14_DBGBCR4(val) MCR14(val, 0, c0, c4, 5)
+#define WCP14_DBGBCR5(val) MCR14(val, 0, c0, c5, 5)
+#define WCP14_DBGBCR6(val) MCR14(val, 0, c0, c6, 5)
+#define WCP14_DBGBCR7(val) MCR14(val, 0, c0, c7, 5)
+#define WCP14_DBGBCR8(val) MCR14(val, 0, c0, c8, 5)
+#define WCP14_DBGBCR9(val) MCR14(val, 0, c0, c9, 5)
+#define WCP14_DBGBCR10(val) MCR14(val, 0, c0, c10, 5)
+#define WCP14_DBGBCR11(val) MCR14(val, 0, c0, c11, 5)
+#define WCP14_DBGBCR12(val) MCR14(val, 0, c0, c12, 5)
+#define WCP14_DBGBCR13(val) MCR14(val, 0, c0, c13, 5)
+#define WCP14_DBGBCR14(val) MCR14(val, 0, c0, c14, 5)
+#define WCP14_DBGBCR15(val) MCR14(val, 0, c0, c15, 5)
+#define WCP14_DBGWVR0(val) MCR14(val, 0, c0, c0, 6)
+#define WCP14_DBGWVR1(val) MCR14(val, 0, c0, c1, 6)
+#define WCP14_DBGWVR2(val) MCR14(val, 0, c0, c2, 6)
+#define WCP14_DBGWVR3(val) MCR14(val, 0, c0, c3, 6)
+#define WCP14_DBGWVR4(val) MCR14(val, 0, c0, c4, 6)
+#define WCP14_DBGWVR5(val) MCR14(val, 0, c0, c5, 6)
+#define WCP14_DBGWVR6(val) MCR14(val, 0, c0, c6, 6)
+#define WCP14_DBGWVR7(val) MCR14(val, 0, c0, c7, 6)
+#define WCP14_DBGWVR8(val) MCR14(val, 0, c0, c8, 6)
+#define WCP14_DBGWVR9(val) MCR14(val, 0, c0, c9, 6)
+#define WCP14_DBGWVR10(val) MCR14(val, 0, c0, c10, 6)
+#define WCP14_DBGWVR11(val) MCR14(val, 0, c0, c11, 6)
+#define WCP14_DBGWVR12(val) MCR14(val, 0, c0, c12, 6)
+#define WCP14_DBGWVR13(val) MCR14(val, 0, c0, c13, 6)
+#define WCP14_DBGWVR14(val) MCR14(val, 0, c0, c14, 6)
+#define WCP14_DBGWVR15(val) MCR14(val, 0, c0, c15, 6)
+#define WCP14_DBGWCR0(val) MCR14(val, 0, c0, c0, 7)
+#define WCP14_DBGWCR1(val) MCR14(val, 0, c0, c1, 7)
+#define WCP14_DBGWCR2(val) MCR14(val, 0, c0, c2, 7)
+#define WCP14_DBGWCR3(val) MCR14(val, 0, c0, c3, 7)
+#define WCP14_DBGWCR4(val) MCR14(val, 0, c0, c4, 7)
+#define WCP14_DBGWCR5(val) MCR14(val, 0, c0, c5, 7)
+#define WCP14_DBGWCR6(val) MCR14(val, 0, c0, c6, 7)
+#define WCP14_DBGWCR7(val) MCR14(val, 0, c0, c7, 7)
+#define WCP14_DBGWCR8(val) MCR14(val, 0, c0, c8, 7)
+#define WCP14_DBGWCR9(val) MCR14(val, 0, c0, c9, 7)
+#define WCP14_DBGWCR10(val) MCR14(val, 0, c0, c10, 7)
+#define WCP14_DBGWCR11(val) MCR14(val, 0, c0, c11, 7)
+#define WCP14_DBGWCR12(val) MCR14(val, 0, c0, c12, 7)
+#define WCP14_DBGWCR13(val) MCR14(val, 0, c0, c13, 7)
+#define WCP14_DBGWCR14(val) MCR14(val, 0, c0, c14, 7)
+#define WCP14_DBGWCR15(val) MCR14(val, 0, c0, c15, 7)
+#define WCP14_DBGBXVR0(val) MCR14(val, 0, c1, c0, 1)
+#define WCP14_DBGBXVR1(val) MCR14(val, 0, c1, c1, 1)
+#define WCP14_DBGBXVR2(val) MCR14(val, 0, c1, c2, 1)
+#define WCP14_DBGBXVR3(val) MCR14(val, 0, c1, c3, 1)
+#define WCP14_DBGBXVR4(val) MCR14(val, 0, c1, c4, 1)
+#define WCP14_DBGBXVR5(val) MCR14(val, 0, c1, c5, 1)
+#define WCP14_DBGBXVR6(val) MCR14(val, 0, c1, c6, 1)
+#define WCP14_DBGBXVR7(val) MCR14(val, 0, c1, c7, 1)
+#define WCP14_DBGBXVR8(val) MCR14(val, 0, c1, c8, 1)
+#define WCP14_DBGBXVR9(val) MCR14(val, 0, c1, c9, 1)
+#define WCP14_DBGBXVR10(val) MCR14(val, 0, c1, c10, 1)
+#define WCP14_DBGBXVR11(val) MCR14(val, 0, c1, c11, 1)
+#define WCP14_DBGBXVR12(val) MCR14(val, 0, c1, c12, 1)
+#define WCP14_DBGBXVR13(val) MCR14(val, 0, c1, c13, 1)
+#define WCP14_DBGBXVR14(val) MCR14(val, 0, c1, c14, 1)
+#define WCP14_DBGBXVR15(val) MCR14(val, 0, c1, c15, 1)
+#define WCP14_DBGOSLAR(val) MCR14(val, 0, c1, c0, 4)
+#define WCP14_DBGOSSRR(val) MCR14(val, 0, c1, c2, 4)
+#define WCP14_DBGOSDLR(val) MCR14(val, 0, c1, c3, 4)
+#define WCP14_DBGPRCR(val) MCR14(val, 0, c1, c4, 4)
+#define WCP14_DBGITCTRL(val) MCR14(val, 0, c7, c0, 4)
+#define WCP14_DBGCLAIMSET(val) MCR14(val, 0, c7, c8, 6)
+#define WCP14_DBGCLAIMCLR(val) MCR14(val, 0, c7, c9, 6)
+
+/*
+ * ETM Registers
+ *
+ * Available only in ETMv3.3, 3.4, 3.5
+ * ETMASICCR, ETMTECR2, ETMFFRR, ETMVDEVR, ETMVDCR1, ETMVDCR2, ETMVDCR3,
+ * ETMDCVRn, ETMDCMRn
+ *
+ * Available only in ETMv3.5 as read only
+ * ETMIDR2
+ *
+ * Available only in ETMv3.5, PFTv1.0, 1.1
+ * ETMTSEVR, ETMVMIDCVR, ETMPDCR
+ *
+ * Read only
+ * ETMCCR, ETMSCR, ETMIDR, ETMCCER, ETMOSLSR
+ * ETMLSR, ETMAUTHSTATUS, ETMDEVID, ETMDEVTYPE, ETMPIDR4, ETMPIDR5, ETMPIDR6,
+ * ETMPIDR7, ETMPIDR0, ETMPIDR1, ETMPIDR2, ETMPIDR2, ETMPIDR3, ETMCIDR0,
+ * ETMCIDR1, ETMCIDR2, ETMCIDR3
+ *
+ * Write only
+ * ETMOSLAR, ETMLAR
+ * Note: ETMCCER[11] controls WO nature of certain regs. Refer ETM arch spec.
+ */
+#define RCP14_ETMCR() MRC14(1, c0, c0, 0)
+#define RCP14_ETMCCR() MRC14(1, c0, c1, 0)
+#define RCP14_ETMTRIGGER() MRC14(1, c0, c2, 0)
+#define RCP14_ETMASICCR() MRC14(1, c0, c3, 0)
+#define RCP14_ETMSR() MRC14(1, c0, c4, 0)
+#define RCP14_ETMSCR() MRC14(1, c0, c5, 0)
+#define RCP14_ETMTSSCR() MRC14(1, c0, c6, 0)
+#define RCP14_ETMTECR2() MRC14(1, c0, c7, 0)
+#define RCP14_ETMTEEVR() MRC14(1, c0, c8, 0)
+#define RCP14_ETMTECR1() MRC14(1, c0, c9, 0)
+#define RCP14_ETMFFRR() MRC14(1, c0, c10, 0)
+#define RCP14_ETMFFLR() MRC14(1, c0, c11, 0)
+#define RCP14_ETMVDEVR() MRC14(1, c0, c12, 0)
+#define RCP14_ETMVDCR1() MRC14(1, c0, c13, 0)
+#define RCP14_ETMVDCR2() MRC14(1, c0, c14, 0)
+#define RCP14_ETMVDCR3() MRC14(1, c0, c15, 0)
+#define RCP14_ETMACVR0() MRC14(1, c0, c0, 1)
+#define RCP14_ETMACVR1() MRC14(1, c0, c1, 1)
+#define RCP14_ETMACVR2() MRC14(1, c0, c2, 1)
+#define RCP14_ETMACVR3() MRC14(1, c0, c3, 1)
+#define RCP14_ETMACVR4() MRC14(1, c0, c4, 1)
+#define RCP14_ETMACVR5() MRC14(1, c0, c5, 1)
+#define RCP14_ETMACVR6() MRC14(1, c0, c6, 1)
+#define RCP14_ETMACVR7() MRC14(1, c0, c7, 1)
+#define RCP14_ETMACVR8() MRC14(1, c0, c8, 1)
+#define RCP14_ETMACVR9() MRC14(1, c0, c9, 1)
+#define RCP14_ETMACVR10() MRC14(1, c0, c10, 1)
+#define RCP14_ETMACVR11() MRC14(1, c0, c11, 1)
+#define RCP14_ETMACVR12() MRC14(1, c0, c12, 1)
+#define RCP14_ETMACVR13() MRC14(1, c0, c13, 1)
+#define RCP14_ETMACVR14() MRC14(1, c0, c14, 1)
+#define RCP14_ETMACVR15() MRC14(1, c0, c15, 1)
+#define RCP14_ETMACTR0() MRC14(1, c0, c0, 2)
+#define RCP14_ETMACTR1() MRC14(1, c0, c1, 2)
+#define RCP14_ETMACTR2() MRC14(1, c0, c2, 2)
+#define RCP14_ETMACTR3() MRC14(1, c0, c3, 2)
+#define RCP14_ETMACTR4() MRC14(1, c0, c4, 2)
+#define RCP14_ETMACTR5() MRC14(1, c0, c5, 2)
+#define RCP14_ETMACTR6() MRC14(1, c0, c6, 2)
+#define RCP14_ETMACTR7() MRC14(1, c0, c7, 2)
+#define RCP14_ETMACTR8() MRC14(1, c0, c8, 2)
+#define RCP14_ETMACTR9() MRC14(1, c0, c9, 2)
+#define RCP14_ETMACTR10() MRC14(1, c0, c10, 2)
+#define RCP14_ETMACTR11() MRC14(1, c0, c11, 2)
+#define RCP14_ETMACTR12() MRC14(1, c0, c12, 2)
+#define RCP14_ETMACTR13() MRC14(1, c0, c13, 2)
+#define RCP14_ETMACTR14() MRC14(1, c0, c14, 2)
+#define RCP14_ETMACTR15() MRC14(1, c0, c15, 2)
+#define RCP14_ETMDCVR0() MRC14(1, c0, c0, 3)
+#define RCP14_ETMDCVR2() MRC14(1, c0, c2, 3)
+#define RCP14_ETMDCVR4() MRC14(1, c0, c4, 3)
+#define RCP14_ETMDCVR6() MRC14(1, c0, c6, 3)
+#define RCP14_ETMDCVR8() MRC14(1, c0, c8, 3)
+#define RCP14_ETMDCVR10() MRC14(1, c0, c10, 3)
+#define RCP14_ETMDCVR12() MRC14(1, c0, c12, 3)
+#define RCP14_ETMDCVR14() MRC14(1, c0, c14, 3)
+#define RCP14_ETMDCMR0() MRC14(1, c0, c0, 4)
+#define RCP14_ETMDCMR2() MRC14(1, c0, c2, 4)
+#define RCP14_ETMDCMR4() MRC14(1, c0, c4, 4)
+#define RCP14_ETMDCMR6() MRC14(1, c0, c6, 4)
+#define RCP14_ETMDCMR8() MRC14(1, c0, c8, 4)
+#define RCP14_ETMDCMR10() MRC14(1, c0, c10, 4)
+#define RCP14_ETMDCMR12() MRC14(1, c0, c12, 4)
+#define RCP14_ETMDCMR14() MRC14(1, c0, c14, 4)
+#define RCP14_ETMCNTRLDVR0() MRC14(1, c0, c0, 5)
+#define RCP14_ETMCNTRLDVR1() MRC14(1, c0, c1, 5)
+#define RCP14_ETMCNTRLDVR2() MRC14(1, c0, c2, 5)
+#define RCP14_ETMCNTRLDVR3() MRC14(1, c0, c3, 5)
+#define RCP14_ETMCNTENR0() MRC14(1, c0, c4, 5)
+#define RCP14_ETMCNTENR1() MRC14(1, c0, c5, 5)
+#define RCP14_ETMCNTENR2() MRC14(1, c0, c6, 5)
+#define RCP14_ETMCNTENR3() MRC14(1, c0, c7, 5)
+#define RCP14_ETMCNTRLDEVR0() MRC14(1, c0, c8, 5)
+#define RCP14_ETMCNTRLDEVR1() MRC14(1, c0, c9, 5)
+#define RCP14_ETMCNTRLDEVR2() MRC14(1, c0, c10, 5)
+#define RCP14_ETMCNTRLDEVR3() MRC14(1, c0, c11, 5)
+#define RCP14_ETMCNTVR0() MRC14(1, c0, c12, 5)
+#define RCP14_ETMCNTVR1() MRC14(1, c0, c13, 5)
+#define RCP14_ETMCNTVR2() MRC14(1, c0, c14, 5)
+#define RCP14_ETMCNTVR3() MRC14(1, c0, c15, 5)
+#define RCP14_ETMSQ12EVR() MRC14(1, c0, c0, 6)
+#define RCP14_ETMSQ21EVR() MRC14(1, c0, c1, 6)
+#define RCP14_ETMSQ23EVR() MRC14(1, c0, c2, 6)
+#define RCP14_ETMSQ31EVR() MRC14(1, c0, c3, 6)
+#define RCP14_ETMSQ32EVR() MRC14(1, c0, c4, 6)
+#define RCP14_ETMSQ13EVR() MRC14(1, c0, c5, 6)
+#define RCP14_ETMSQR() MRC14(1, c0, c7, 6)
+#define RCP14_ETMEXTOUTEVR0() MRC14(1, c0, c8, 6)
+#define RCP14_ETMEXTOUTEVR1() MRC14(1, c0, c9, 6)
+#define RCP14_ETMEXTOUTEVR2() MRC14(1, c0, c10, 6)
+#define RCP14_ETMEXTOUTEVR3() MRC14(1, c0, c11, 6)
+#define RCP14_ETMCIDCVR0() MRC14(1, c0, c12, 6)
+#define RCP14_ETMCIDCVR1() MRC14(1, c0, c13, 6)
+#define RCP14_ETMCIDCVR2() MRC14(1, c0, c14, 6)
+#define RCP14_ETMCIDCMR() MRC14(1, c0, c15, 6)
+#define RCP14_ETMIMPSPEC0() MRC14(1, c0, c0, 7)
+#define RCP14_ETMIMPSPEC1() MRC14(1, c0, c1, 7)
+#define RCP14_ETMIMPSPEC2() MRC14(1, c0, c2, 7)
+#define RCP14_ETMIMPSPEC3() MRC14(1, c0, c3, 7)
+#define RCP14_ETMIMPSPEC4() MRC14(1, c0, c4, 7)
+#define RCP14_ETMIMPSPEC5() MRC14(1, c0, c5, 7)
+#define RCP14_ETMIMPSPEC6() MRC14(1, c0, c6, 7)
+#define RCP14_ETMIMPSPEC7() MRC14(1, c0, c7, 7)
+#define RCP14_ETMSYNCFR() MRC14(1, c0, c8, 7)
+#define RCP14_ETMIDR() MRC14(1, c0, c9, 7)
+#define RCP14_ETMCCER() MRC14(1, c0, c10, 7)
+#define RCP14_ETMEXTINSELR() MRC14(1, c0, c11, 7)
+#define RCP14_ETMTESSEICR() MRC14(1, c0, c12, 7)
+#define RCP14_ETMEIBCR() MRC14(1, c0, c13, 7)
+#define RCP14_ETMTSEVR() MRC14(1, c0, c14, 7)
+#define RCP14_ETMAUXCR() MRC14(1, c0, c15, 7)
+#define RCP14_ETMTRACEIDR() MRC14(1, c1, c0, 0)
+#define RCP14_ETMIDR2() MRC14(1, c1, c2, 0)
+#define RCP14_ETMVMIDCVR() MRC14(1, c1, c0, 1)
+#define RCP14_ETMOSLSR() MRC14(1, c1, c1, 4)
+/* Not available in PFTv1.1 */
+#define RCP14_ETMOSSRR() MRC14(1, c1, c2, 4)
+#define RCP14_ETMPDCR() MRC14(1, c1, c4, 4)
+#define RCP14_ETMPDSR() MRC14(1, c1, c5, 4)
+#define RCP14_ETMITCTRL() MRC14(1, c7, c0, 4)
+#define RCP14_ETMCLAIMSET() MRC14(1, c7, c8, 6)
+#define RCP14_ETMCLAIMCLR() MRC14(1, c7, c9, 6)
+#define RCP14_ETMLSR() MRC14(1, c7, c13, 6)
+#define RCP14_ETMAUTHSTATUS() MRC14(1, c7, c14, 6)
+#define RCP14_ETMDEVID() MRC14(1, c7, c2, 7)
+#define RCP14_ETMDEVTYPE() MRC14(1, c7, c3, 7)
+#define RCP14_ETMPIDR4() MRC14(1, c7, c4, 7)
+#define RCP14_ETMPIDR5() MRC14(1, c7, c5, 7)
+#define RCP14_ETMPIDR6() MRC14(1, c7, c6, 7)
+#define RCP14_ETMPIDR7() MRC14(1, c7, c7, 7)
+#define RCP14_ETMPIDR0() MRC14(1, c7, c8, 7)
+#define RCP14_ETMPIDR1() MRC14(1, c7, c9, 7)
+#define RCP14_ETMPIDR2() MRC14(1, c7, c10, 7)
+#define RCP14_ETMPIDR3() MRC14(1, c7, c11, 7)
+#define RCP14_ETMCIDR0() MRC14(1, c7, c12, 7)
+#define RCP14_ETMCIDR1() MRC14(1, c7, c13, 7)
+#define RCP14_ETMCIDR2() MRC14(1, c7, c14, 7)
+#define RCP14_ETMCIDR3() MRC14(1, c7, c15, 7)
+
+#define WCP14_ETMCR(val) MCR14(val, 1, c0, c0, 0)
+#define WCP14_ETMTRIGGER(val) MCR14(val, 1, c0, c2, 0)
+#define WCP14_ETMASICCR(val) MCR14(val, 1, c0, c3, 0)
+#define WCP14_ETMSR(val) MCR14(val, 1, c0, c4, 0)
+#define WCP14_ETMTSSCR(val) MCR14(val, 1, c0, c6, 0)
+#define WCP14_ETMTECR2(val) MCR14(val, 1, c0, c7, 0)
+#define WCP14_ETMTEEVR(val) MCR14(val, 1, c0, c8, 0)
+#define WCP14_ETMTECR1(val) MCR14(val, 1, c0, c9, 0)
+#define WCP14_ETMFFRR(val) MCR14(val, 1, c0, c10, 0)
+#define WCP14_ETMFFLR(val) MCR14(val, 1, c0, c11, 0)
+#define WCP14_ETMVDEVR(val) MCR14(val, 1, c0, c12, 0)
+#define WCP14_ETMVDCR1(val) MCR14(val, 1, c0, c13, 0)
+#define WCP14_ETMVDCR2(val) MCR14(val, 1, c0, c14, 0)
+#define WCP14_ETMVDCR3(val) MCR14(val, 1, c0, c15, 0)
+#define WCP14_ETMACVR0(val) MCR14(val, 1, c0, c0, 1)
+#define WCP14_ETMACVR1(val) MCR14(val, 1, c0, c1, 1)
+#define WCP14_ETMACVR2(val) MCR14(val, 1, c0, c2, 1)
+#define WCP14_ETMACVR3(val) MCR14(val, 1, c0, c3, 1)
+#define WCP14_ETMACVR4(val) MCR14(val, 1, c0, c4, 1)
+#define WCP14_ETMACVR5(val) MCR14(val, 1, c0, c5, 1)
+#define WCP14_ETMACVR6(val) MCR14(val, 1, c0, c6, 1)
+#define WCP14_ETMACVR7(val) MCR14(val, 1, c0, c7, 1)
+#define WCP14_ETMACVR8(val) MCR14(val, 1, c0, c8, 1)
+#define WCP14_ETMACVR9(val) MCR14(val, 1, c0, c9, 1)
+#define WCP14_ETMACVR10(val) MCR14(val, 1, c0, c10, 1)
+#define WCP14_ETMACVR11(val) MCR14(val, 1, c0, c11, 1)
+#define WCP14_ETMACVR12(val) MCR14(val, 1, c0, c12, 1)
+#define WCP14_ETMACVR13(val) MCR14(val, 1, c0, c13, 1)
+#define WCP14_ETMACVR14(val) MCR14(val, 1, c0, c14, 1)
+#define WCP14_ETMACVR15(val) MCR14(val, 1, c0, c15, 1)
+#define WCP14_ETMACTR0(val) MCR14(val, 1, c0, c0, 2)
+#define WCP14_ETMACTR1(val) MCR14(val, 1, c0, c1, 2)
+#define WCP14_ETMACTR2(val) MCR14(val, 1, c0, c2, 2)
+#define WCP14_ETMACTR3(val) MCR14(val, 1, c0, c3, 2)
+#define WCP14_ETMACTR4(val) MCR14(val, 1, c0, c4, 2)
+#define WCP14_ETMACTR5(val) MCR14(val, 1, c0, c5, 2)
+#define WCP14_ETMACTR6(val) MCR14(val, 1, c0, c6, 2)
+#define WCP14_ETMACTR7(val) MCR14(val, 1, c0, c7, 2)
+#define WCP14_ETMACTR8(val) MCR14(val, 1, c0, c8, 2)
+#define WCP14_ETMACTR9(val) MCR14(val, 1, c0, c9, 2)
+#define WCP14_ETMACTR10(val) MCR14(val, 1, c0, c10, 2)
+#define WCP14_ETMACTR11(val) MCR14(val, 1, c0, c11, 2)
+#define WCP14_ETMACTR12(val) MCR14(val, 1, c0, c12, 2)
+#define WCP14_ETMACTR13(val) MCR14(val, 1, c0, c13, 2)
+#define WCP14_ETMACTR14(val) MCR14(val, 1, c0, c14, 2)
+#define WCP14_ETMACTR15(val) MCR14(val, 1, c0, c15, 2)
+#define WCP14_ETMDCVR0(val) MCR14(val, 1, c0, c0, 3)
+#define WCP14_ETMDCVR2(val) MCR14(val, 1, c0, c2, 3)
+#define WCP14_ETMDCVR4(val) MCR14(val, 1, c0, c4, 3)
+#define WCP14_ETMDCVR6(val) MCR14(val, 1, c0, c6, 3)
+#define WCP14_ETMDCVR8(val) MCR14(val, 1, c0, c8, 3)
+#define WCP14_ETMDCVR10(val) MCR14(val, 1, c0, c10, 3)
+#define WCP14_ETMDCVR12(val) MCR14(val, 1, c0, c12, 3)
+#define WCP14_ETMDCVR14(val) MCR14(val, 1, c0, c14, 3)
+#define WCP14_ETMDCMR0(val) MCR14(val, 1, c0, c0, 4)
+#define WCP14_ETMDCMR2(val) MCR14(val, 1, c0, c2, 4)
+#define WCP14_ETMDCMR4(val) MCR14(val, 1, c0, c4, 4)
+#define WCP14_ETMDCMR6(val) MCR14(val, 1, c0, c6, 4)
+#define WCP14_ETMDCMR8(val) MCR14(val, 1, c0, c8, 4)
+#define WCP14_ETMDCMR10(val) MCR14(val, 1, c0, c10, 4)
+#define WCP14_ETMDCMR12(val) MCR14(val, 1, c0, c12, 4)
+#define WCP14_ETMDCMR14(val) MCR14(val, 1, c0, c14, 4)
+#define WCP14_ETMCNTRLDVR0(val) MCR14(val, 1, c0, c0, 5)
+#define WCP14_ETMCNTRLDVR1(val) MCR14(val, 1, c0, c1, 5)
+#define WCP14_ETMCNTRLDVR2(val) MCR14(val, 1, c0, c2, 5)
+#define WCP14_ETMCNTRLDVR3(val) MCR14(val, 1, c0, c3, 5)
+#define WCP14_ETMCNTENR0(val) MCR14(val, 1, c0, c4, 5)
+#define WCP14_ETMCNTENR1(val) MCR14(val, 1, c0, c5, 5)
+#define WCP14_ETMCNTENR2(val) MCR14(val, 1, c0, c6, 5)
+#define WCP14_ETMCNTENR3(val) MCR14(val, 1, c0, c7, 5)
+#define WCP14_ETMCNTRLDEVR0(val) MCR14(val, 1, c0, c8, 5)
+#define WCP14_ETMCNTRLDEVR1(val) MCR14(val, 1, c0, c9, 5)
+#define WCP14_ETMCNTRLDEVR2(val) MCR14(val, 1, c0, c10, 5)
+#define WCP14_ETMCNTRLDEVR3(val) MCR14(val, 1, c0, c11, 5)
+#define WCP14_ETMCNTVR0(val) MCR14(val, 1, c0, c12, 5)
+#define WCP14_ETMCNTVR1(val) MCR14(val, 1, c0, c13, 5)
+#define WCP14_ETMCNTVR2(val) MCR14(val, 1, c0, c14, 5)
+#define WCP14_ETMCNTVR3(val) MCR14(val, 1, c0, c15, 5)
+#define WCP14_ETMSQ12EVR(val) MCR14(val, 1, c0, c0, 6)
+#define WCP14_ETMSQ21EVR(val) MCR14(val, 1, c0, c1, 6)
+#define WCP14_ETMSQ23EVR(val) MCR14(val, 1, c0, c2, 6)
+#define WCP14_ETMSQ31EVR(val) MCR14(val, 1, c0, c3, 6)
+#define WCP14_ETMSQ32EVR(val) MCR14(val, 1, c0, c4, 6)
+#define WCP14_ETMSQ13EVR(val) MCR14(val, 1, c0, c5, 6)
+#define WCP14_ETMSQR(val) MCR14(val, 1, c0, c7, 6)
+#define WCP14_ETMEXTOUTEVR0(val) MCR14(val, 1, c0, c8, 6)
+#define WCP14_ETMEXTOUTEVR1(val) MCR14(val, 1, c0, c9, 6)
+#define WCP14_ETMEXTOUTEVR2(val) MCR14(val, 1, c0, c10, 6)
+#define WCP14_ETMEXTOUTEVR3(val) MCR14(val, 1, c0, c11, 6)
+#define WCP14_ETMCIDCVR0(val) MCR14(val, 1, c0, c12, 6)
+#define WCP14_ETMCIDCVR1(val) MCR14(val, 1, c0, c13, 6)
+#define WCP14_ETMCIDCVR2(val) MCR14(val, 1, c0, c14, 6)
+#define WCP14_ETMCIDCMR(val) MCR14(val, 1, c0, c15, 6)
+#define WCP14_ETMIMPSPEC0(val) MCR14(val, 1, c0, c0, 7)
+#define WCP14_ETMIMPSPEC1(val) MCR14(val, 1, c0, c1, 7)
+#define WCP14_ETMIMPSPEC2(val) MCR14(val, 1, c0, c2, 7)
+#define WCP14_ETMIMPSPEC3(val) MCR14(val, 1, c0, c3, 7)
+#define WCP14_ETMIMPSPEC4(val) MCR14(val, 1, c0, c4, 7)
+#define WCP14_ETMIMPSPEC5(val) MCR14(val, 1, c0, c5, 7)
+#define WCP14_ETMIMPSPEC6(val) MCR14(val, 1, c0, c6, 7)
+#define WCP14_ETMIMPSPEC7(val) MCR14(val, 1, c0, c7, 7)
+/* Can be read only in ETMv3.4, ETMv3.5 */
+#define WCP14_ETMSYNCFR(val) MCR14(val, 1, c0, c8, 7)
+#define WCP14_ETMEXTINSELR(val) MCR14(val, 1, c0, c11, 7)
+#define WCP14_ETMTESSEICR(val) MCR14(val, 1, c0, c12, 7)
+#define WCP14_ETMEIBCR(val) MCR14(val, 1, c0, c13, 7)
+#define WCP14_ETMTSEVR(val) MCR14(val, 1, c0, c14, 7)
+#define WCP14_ETMAUXCR(val) MCR14(val, 1, c0, c15, 7)
+#define WCP14_ETMTRACEIDR(val) MCR14(val, 1, c1, c0, 0)
+#define WCP14_ETMIDR2(val) MCR14(val, 1, c1, c2, 0)
+#define WCP14_ETMVMIDCVR(val) MCR14(val, 1, c1, c0, 1)
+#define WCP14_ETMOSLAR(val) MCR14(val, 1, c1, c0, 4)
+/* Not available in PFTv1.1 */
+#define WCP14_ETMOSSRR(val) MCR14(val, 1, c1, c2, 4)
+#define WCP14_ETMPDCR(val) MCR14(val, 1, c1, c4, 4)
+#define WCP14_ETMPDSR(val) MCR14(val, 1, c1, c5, 4)
+#define WCP14_ETMITCTRL(val) MCR14(val, 1, c7, c0, 4)
+#define WCP14_ETMCLAIMSET(val) MCR14(val, 1, c7, c8, 6)
+#define WCP14_ETMCLAIMCLR(val) MCR14(val, 1, c7, c9, 6)
+/* Writes to this from CP14 interface are ignored */
+#define WCP14_ETMLAR(val) MCR14(val, 1, c7, c12, 6)
+
+#endif
diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h
index a71b417b1856..af79da40af2a 100644
--- a/arch/arm/include/asm/hw_irq.h
+++ b/arch/arm/include/asm/hw_irq.h
@@ -8,6 +8,7 @@ static inline void ack_bad_irq(int irq)
{
extern unsigned long irq_err_count;
irq_err_count++;
+ pr_crit("unexpected IRQ trap at vector %02x\n", irq);
}
void set_irq_flags(unsigned int irq, unsigned int flags);
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 180567408ee8..db58deb00aa7 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -47,13 +47,13 @@ extern void atomic_io_modify_relaxed(void __iomem *reg, u32 mask, u32 set);
* Generic IO read/write. These perform native-endian accesses. Note
* that some architectures will want to re-define __raw_{read,write}w.
*/
-extern void __raw_writesb(void __iomem *addr, const void *data, int bytelen);
-extern void __raw_writesw(void __iomem *addr, const void *data, int wordlen);
-extern void __raw_writesl(void __iomem *addr, const void *data, int longlen);
+void __raw_writesb(volatile void __iomem *addr, const void *data, int bytelen);
+void __raw_writesw(volatile void __iomem *addr, const void *data, int wordlen);
+void __raw_writesl(volatile void __iomem *addr, const void *data, int longlen);
-extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen);
-extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
-extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
+void __raw_readsb(const volatile void __iomem *addr, void *data, int bytelen);
+void __raw_readsw(const volatile void __iomem *addr, void *data, int wordlen);
+void __raw_readsl(const volatile void __iomem *addr, void *data, int longlen);
#if __LINUX_ARM_ARCH__ < 6
/*
@@ -69,6 +69,7 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
* writeback addressing modes as these incur a significant performance
* overhead (the address generation must be emulated in software).
*/
+#define __raw_writew __raw_writew
static inline void __raw_writew(u16 val, volatile void __iomem *addr)
{
asm volatile("strh %1, %0"
@@ -76,6 +77,7 @@ static inline void __raw_writew(u16 val, volatile void __iomem *addr)
: "r" (val));
}
+#define __raw_readw __raw_readw
static inline u16 __raw_readw(const volatile void __iomem *addr)
{
u16 val;
@@ -86,6 +88,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr)
}
#endif
+#define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
{
asm volatile("strb %1, %0"
@@ -93,6 +96,7 @@ static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
: "r" (val));
}
+#define __raw_writel __raw_writel
static inline void __raw_writel(u32 val, volatile void __iomem *addr)
{
asm volatile("str %1, %0"
@@ -100,6 +104,7 @@ static inline void __raw_writel(u32 val, volatile void __iomem *addr)
: "r" (val));
}
+#define __raw_readb __raw_readb
static inline u8 __raw_readb(const volatile void __iomem *addr)
{
u8 val;
@@ -109,6 +114,7 @@ static inline u8 __raw_readb(const volatile void __iomem *addr)
return val;
}
+#define __raw_readl __raw_readl
static inline u32 __raw_readl(const volatile void __iomem *addr)
{
u32 val;
@@ -267,20 +273,6 @@ extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
#define insl(p,d,l) __raw_readsl(__io(p),d,l)
#endif
-#define outb_p(val,port) outb((val),(port))
-#define outw_p(val,port) outw((val),(port))
-#define outl_p(val,port) outl((val),(port))
-#define inb_p(port) inb((port))
-#define inw_p(port) inw((port))
-#define inl_p(port) inl((port))
-
-#define outsb_p(port,from,len) outsb(port,from,len)
-#define outsw_p(port,from,len) outsw(port,from,len)
-#define outsl_p(port,from,len) outsl(port,from,len)
-#define insb_p(port,to,len) insb(port,to,len)
-#define insw_p(port,to,len) insw(port,to,len)
-#define insl_p(port,to,len) insl(port,to,len)
-
/*
* String version of IO memory access ops:
*/
@@ -347,40 +339,42 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
#define iounmap __arm_iounmap
/*
- * io{read,write}{8,16,32} macros
+ * io{read,write}{16,32}be() macros
*/
-#ifndef ioread8
-#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __iormb(); __v; })
-#define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __iormb(); __v; })
-#define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __iormb(); __v; })
-
-#define ioread16be(p) ({ unsigned int __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
-#define ioread32be(p) ({ unsigned int __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
-
-#define iowrite8(v,p) ({ __iowmb(); __raw_writeb(v, p); })
-#define iowrite16(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_le16(v), p); })
-#define iowrite32(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_le32(v), p); })
+#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
+#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
-#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
-#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
-
-#define ioread8_rep(p,d,c) __raw_readsb(p,d,c)
-#define ioread16_rep(p,d,c) __raw_readsw(p,d,c)
-#define ioread32_rep(p,d,c) __raw_readsl(p,d,c)
-
-#define iowrite8_rep(p,s,c) __raw_writesb(p,s,c)
-#define iowrite16_rep(p,s,c) __raw_writesw(p,s,c)
-#define iowrite32_rep(p,s,c) __raw_writesl(p,s,c)
+#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
+#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
+#ifndef ioport_map
+#define ioport_map ioport_map
extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
+#endif
+#ifndef ioport_unmap
+#define ioport_unmap ioport_unmap
extern void ioport_unmap(void __iomem *addr);
#endif
struct pci_dev;
+#define pci_iounmap pci_iounmap
extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
/*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * access
+ */
+#define xlate_dev_mem_ptr(p) __va(p)
+
+/*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+#define xlate_dev_kmem_ptr(p) p
+
+#include <asm-generic/io.h>
+
+/*
* can the hardware map this into one segment or not, given no other
* constraints.
*/
@@ -402,17 +396,6 @@ extern int devmem_is_allowed(unsigned long pfn);
#endif
/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#define xlate_dev_mem_ptr(p) __va(p)
-
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p) p
-
-/*
* Register ISA memory and port locations for glibc iopl/inb/outb
* emulation.
*/
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index b9db269c6e61..66ce17655bb9 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -33,6 +33,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.hcr = HCR_GUEST_MASK;
+}
+
static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
{
return 1;
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 53036e21756b..254e0650e48b 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -150,8 +150,6 @@ struct kvm_vcpu_stat {
u32 halt_wakeup;
};
-int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
- const struct kvm_vcpu_init *init);
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index acb0d5712716..63e0ecc04901 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -52,6 +52,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
void free_boot_hyp_pgd(void);
void free_hyp_pgds(void);
+void stage2_unmap_vm(struct kvm *kvm);
int kvm_alloc_stage2_pgd(struct kvm *kvm);
void kvm_free_stage2_pgd(struct kvm *kvm);
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
@@ -161,9 +162,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
}
static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
- unsigned long size)
+ unsigned long size,
+ bool ipa_uncached)
{
- if (!vcpu_has_cache_enabled(vcpu))
+ if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
kvm_flush_dcache_to_poc((void *)hva, size);
/*
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index 7fc42784becb..8292b5f81e23 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -22,6 +22,9 @@ struct hw_pci {
#ifdef CONFIG_PCI_DOMAINS
int domain;
#endif
+#ifdef CONFIG_PCI_MSI
+ struct msi_controller *msi_ctrl;
+#endif
struct pci_ops *ops;
int nr_controllers;
void **private_data;
@@ -36,8 +39,6 @@ struct hw_pci {
resource_size_t start,
resource_size_t size,
resource_size_t align);
- void (*add_bus)(struct pci_bus *bus);
- void (*remove_bus)(struct pci_bus *bus);
};
/*
@@ -47,6 +48,9 @@ struct pci_sys_data {
#ifdef CONFIG_PCI_DOMAINS
int domain;
#endif
+#ifdef CONFIG_PCI_MSI
+ struct msi_controller *msi_ctrl;
+#endif
struct list_head node;
int busnr; /* primary bus number */
u64 mem_offset; /* bus->cpu memory mapping offset */
@@ -65,8 +69,6 @@ struct pci_sys_data {
resource_size_t start,
resource_size_t size,
resource_size_t align);
- void (*add_bus)(struct pci_bus *bus);
- void (*remove_bus)(struct pci_bus *bus);
void *private_data; /* platform controller private data */
};
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index d428e386c88e..3446f6a1d9fa 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -219,6 +219,23 @@ void __mcpm_outbound_leave_critical(unsigned int cluster, int state);
bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster);
int __mcpm_cluster_state(unsigned int cluster);
+/**
+ * mcpm_sync_init - Initialize the cluster synchronization support
+ *
+ * @power_up_setup: platform specific function invoked during very
+ * early CPU/cluster bringup stage.
+ *
+ * This prepares memory used by vlocks and the MCPM state machine used
+ * across CPUs that may have their caches active or inactive. Must be
+ * called only after a successful call to mcpm_platform_register().
+ *
+ * The power_up_setup argument is a pointer to assembly code called when
+ * the MMU and caches are still disabled during boot and no stack space is
+ * available. The affinity level passed to that code corresponds to the
+ * resource that needs to be initialized (e.g. 1 for cluster level, 0 for
+ * CPU level). Proper exclusion mechanisms are already activated at that
+ * point.
+ */
int __init mcpm_sync_init(
void (*power_up_setup)(unsigned int affinity_level));
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index e731018869a7..184def0e1652 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -274,11 +274,13 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
* translation for translating DMA addresses. Use the driver
* DMA support - see dma-mapping.h.
*/
+#define virt_to_phys virt_to_phys
static inline phys_addr_t virt_to_phys(const volatile void *x)
{
return __virt_to_phys((unsigned long)(x));
}
+#define phys_to_virt phys_to_virt
static inline void *phys_to_virt(phys_addr_t x)
{
return (void *)__phys_to_virt(x);
@@ -322,11 +324,13 @@ static inline phys_addr_t __virt_to_idmap(unsigned long x)
#endif
#ifdef CONFIG_VIRT_TO_BUS
+#define virt_to_bus virt_to_bus
static inline __deprecated unsigned long virt_to_bus(void *x)
{
return __virt_to_bus((unsigned long)x);
}
+#define bus_to_virt bus_to_virt
static inline __deprecated void *bus_to_virt(unsigned long x)
{
return (void *)__bus_to_virt(x);
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
index 209e6504922e..a89b4076cde4 100644
--- a/arch/arm/include/asm/percpu.h
+++ b/arch/arm/include/asm/percpu.h
@@ -30,14 +30,14 @@ static inline void set_my_cpu_offset(unsigned long off)
static inline unsigned long __my_cpu_offset(void)
{
unsigned long off;
- register unsigned long *sp asm ("sp");
/*
* Read TPIDRPRW.
* We want to allow caching the value, so avoid using volatile and
* instead use a fake stack read to hazard against barrier().
*/
- asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : "Q" (*sp));
+ asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off)
+ : "Q" (*(const unsigned long *)current_stack_pointer));
return off;
}
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index c3a83691af8e..d9cf138fd7d4 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -12,7 +12,7 @@
#ifndef __ARM_PERF_EVENT_H__
#define __ARM_PERF_EVENT_H__
-#ifdef CONFIG_HW_PERF_EVENTS
+#ifdef CONFIG_PERF_EVENTS
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
extern unsigned long perf_misc_flags(struct pt_regs *regs);
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 78a779361682..19cfab526d13 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -157,7 +157,15 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
{
- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
+ extern pmdval_t user_pmd_table;
+ pmdval_t prot;
+
+ if (__LINUX_ARM_ARCH__ >= 6 && !IS_ENABLED(CONFIG_ARM_LPAE))
+ prot = user_pmd_table;
+ else
+ prot = _PAGE_USER_TABLE;
+
+ __pmd_populate(pmdp, page_to_phys(ptep), prot);
}
#define pmd_pgtable(pmd) pmd_page(pmd)
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
index 5cfba15cb401..5e68278e953e 100644
--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -20,12 +20,14 @@
#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
#define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
#define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
#define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
/*
* - section
*/
+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
#define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index 9fd61c72a33a..f8f1cff62065 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -76,6 +76,7 @@
#define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
#define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
#define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
#define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
/*
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 3b30062975b2..d5cac545ba33 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -252,17 +252,57 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
set_pte_ext(ptep, pteval, ext);
}
-#define PTE_BIT_FUNC(fn,op) \
-static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
-
-PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY);
-PTE_BIT_FUNC(mkwrite, &= ~L_PTE_RDONLY);
-PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
-PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
-PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
-PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
-PTE_BIT_FUNC(mkexec, &= ~L_PTE_XN);
-PTE_BIT_FUNC(mknexec, |= L_PTE_XN);
+static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
+{
+ pte_val(pte) &= ~pgprot_val(prot);
+ return pte;
+}
+
+static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
+{
+ pte_val(pte) |= pgprot_val(prot);
+ return pte;
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY));
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(L_PTE_DIRTY));
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(L_PTE_DIRTY));
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(L_PTE_YOUNG));
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(L_PTE_YOUNG));
+}
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(L_PTE_XN));
+}
+
+static inline pte_t pte_mknexec(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(L_PTE_XN));
+}
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 0b648c541293..b1596bd59129 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -15,6 +15,8 @@
#include <linux/interrupt.h>
#include <linux/perf_event.h>
+#include <asm/cputype.h>
+
/*
* struct arm_pmu_platdata - ARM PMU platform data
*
@@ -66,19 +68,25 @@ struct pmu_hw_events {
/*
* The events that are active on the PMU for the given index.
*/
- struct perf_event **events;
+ struct perf_event *events[ARMPMU_MAX_HWEVENTS];
/*
* A 1 bit for an index indicates that the counter is being used for
* an event. A 0 means that the counter can be used.
*/
- unsigned long *used_mask;
+ DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
/*
* Hardware lock to serialize accesses to PMU registers. Needed for the
* read/modify/write sequences.
*/
raw_spinlock_t pmu_lock;
+
+ /*
+ * When using percpu IRQs, we need a percpu dev_id. Place it here as we
+ * already have to allocate this struct per cpu.
+ */
+ struct arm_pmu *percpu_pmu;
};
struct arm_pmu {
@@ -107,7 +115,8 @@ struct arm_pmu {
struct mutex reserve_mutex;
u64 max_period;
struct platform_device *plat_device;
- struct pmu_hw_events *(*get_hw_events)(void);
+ struct pmu_hw_events __percpu *hw_events;
+ struct notifier_block hotplug_nb;
};
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
@@ -127,6 +136,27 @@ int armpmu_map_event(struct perf_event *event,
[PERF_COUNT_HW_CACHE_RESULT_MAX],
u32 raw_event_mask);
+struct pmu_probe_info {
+ unsigned int cpuid;
+ unsigned int mask;
+ int (*init)(struct arm_pmu *);
+};
+
+#define PMU_PROBE(_cpuid, _mask, _fn) \
+{ \
+ .cpuid = (_cpuid), \
+ .mask = (_mask), \
+ .init = (_fn), \
+}
+
+#define ARM_PMU_PROBE(_cpuid, _fn) \
+ PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
+
+#define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
+
+#define XSCALE_PMU_PROBE(_version, _fn) \
+ PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
+
#endif /* CONFIG_HW_PERF_EVENTS */
#endif /* __ARM_PMU_H__ */
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 601264d983fa..51622ba7c4a6 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -154,9 +154,8 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs)
return regs->ARM_sp;
}
-#define current_pt_regs(void) ({ \
- register unsigned long sp asm ("sp"); \
- (struct pt_regs *)((sp | (THREAD_SIZE - 1)) - 7) - 1; \
+#define current_pt_regs(void) ({ (struct pt_regs *) \
+ ((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1; \
})
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index ac4bfae26702..0fa418463f49 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -120,12 +120,12 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
- return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
+ return !arch_spin_value_unlocked(READ_ONCE(*lock));
}
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
- struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
+ struct __raw_tickets tickets = READ_ONCE(lock->tickets);
return (tickets.next - tickets.owner) > 1;
}
#define arch_spin_is_contended arch_spin_is_contended
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index fc44d3761f9e..d890e41f5520 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -44,16 +44,6 @@ struct cpu_context_save {
__u32 extra[2]; /* Xscale 'acc' register, etc */
};
-struct arm_restart_block {
- union {
- /* For user cache flushing */
- struct {
- unsigned long start;
- unsigned long end;
- } cache;
- };
-};
-
/*
* low level task data that entry.S needs immediate access to.
* __switch_to() assumes cpu_context follows immediately after cpu_domain.
@@ -79,7 +69,6 @@ struct thread_info {
unsigned long thumbee_state; /* ThumbEE Handler Base register */
#endif
struct restart_block restart_block;
- struct arm_restart_block arm_restart_block;
};
#define INIT_THREAD_INFO(tsk) \
@@ -101,14 +90,19 @@ struct thread_info {
#define init_stack (init_thread_union.stack)
/*
+ * how to get the current stack pointer in C
+ */
+register unsigned long current_stack_pointer asm ("sp");
+
+/*
* how to get the thread information struct from C
*/
static inline struct thread_info *current_thread_info(void) __attribute_const__;
static inline struct thread_info *current_thread_info(void)
{
- register unsigned long sp asm ("sp");
- return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
+ return (struct thread_info *)
+ (current_stack_pointer & ~(THREAD_SIZE - 1));
}
#define thread_saved_pc(tsk) \
diff --git a/arch/arm/include/asm/vfp.h b/arch/arm/include/asm/vfp.h
index f4ab34fd4f72..ee5f3084243c 100644
--- a/arch/arm/include/asm/vfp.h
+++ b/arch/arm/include/asm/vfp.h
@@ -22,6 +22,7 @@
#define FPSID_NODOUBLE (1<<20)
#define FPSID_ARCH_BIT (16)
#define FPSID_ARCH_MASK (0xF << FPSID_ARCH_BIT)
+#define FPSID_CPUID_ARCH_MASK (0x7F << FPSID_ARCH_BIT)
#define FPSID_PART_BIT (8)
#define FPSID_PART_MASK (0xFF << FPSID_PART_BIT)
#define FPSID_VARIANT_BIT (4)
@@ -75,6 +76,10 @@
/* MVFR0 bits */
#define MVFR0_A_SIMD_BIT (0)
#define MVFR0_A_SIMD_MASK (0xf << MVFR0_A_SIMD_BIT)
+#define MVFR0_SP_BIT (4)
+#define MVFR0_SP_MASK (0xf << MVFR0_SP_BIT)
+#define MVFR0_DP_BIT (8)
+#define MVFR0_DP_MASK (0xf << MVFR0_DP_BIT)
/* Bit patterns for decoding the packaged operation descriptors */
#define VFPOPDESC_LENGTH_BIT (9)
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index e8275ea88e88..efd562412850 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -5,6 +5,18 @@
#include <linux/dma-attrs.h>
#include <linux/dma-mapping.h>
+void __xen_dma_map_page(struct device *hwdev, struct page *page,
+ dma_addr_t dev_addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs);
+void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs);
+void __xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir);
+
+void __xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir);
+
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
@@ -20,20 +32,56 @@ static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
}
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ dma_addr_t dev_addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
- __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+ bool local = PFN_DOWN(dev_addr) == page_to_pfn(page);
+ /* Dom0 is mapped 1:1, so if pfn == mfn the page is local otherwise
+ * is a foreign page grant-mapped in dom0. If the page is local we
+ * can safely call the native dma_ops function, otherwise we call
+ * the xen specific function. */
+ if (local)
+ __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+ else
+ __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
}
-void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ struct dma_attrs *attrs)
+{
+ unsigned long pfn = PFN_DOWN(handle);
+ /* Dom0 is mapped 1:1, so calling pfn_valid on a foreign mfn will
+ * always return false. If the page is local we can safely call the
+ * native dma_ops function, otherwise we call the xen specific
+ * function. */
+ if (pfn_valid(pfn)) {
+ if (__generic_dma_ops(hwdev)->unmap_page)
+ __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
+ } else
+ __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
+}
-void xen_dma_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir);
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ unsigned long pfn = PFN_DOWN(handle);
+ if (pfn_valid(pfn)) {
+ if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
+ __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
+ } else
+ __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
+}
-void xen_dma_sync_single_for_device(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir);
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ unsigned long pfn = PFN_DOWN(handle);
+ if (pfn_valid(pfn)) {
+ if (__generic_dma_ops(hwdev)->sync_single_for_device)
+ __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
+ } else
+ __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
+}
#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 135c24a5ba26..68c739b3fdf4 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -107,4 +107,8 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
#define xen_unmap(cookie) iounmap((cookie))
+bool xen_arch_need_swiotlb(struct device *dev,
+ unsigned long pfn,
+ unsigned long mfn);
+
#endif /* _ASM_ARM_XEN_PAGE_H */