aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig7
-rw-r--r--arch/arm/mm/cache-l2x0.c443
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/hugetlbpage.c6
-rw-r--r--arch/arm/mm/init.c5
-rw-r--r--arch/arm/mm/pgd.c4
-rw-r--r--arch/arm/mm/proc-macros.S2
7 files changed, 239 insertions, 230 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c43c71455566..9b4f29e595a4 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -892,13 +892,6 @@ config CACHE_L2X0
if CACHE_L2X0
-config CACHE_PL310
- bool
- default y if CPU_V7 && !(CPU_V6 || CPU_V6K)
- help
- This option enables optimisations for the PL310 cache
- controller.
-
config PL310_ERRATA_588369
bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
help
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 5e65ca8dea62..c6c7696b8db9 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -1,5 +1,5 @@
/*
- * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
+ * arch/arm/mm/cache-l2x0.c - L210/L220/L310 cache controller support
*
* Copyright (C) 2007 ARM Limited
*
@@ -41,12 +41,14 @@ struct l2c_init_data {
void (*enable)(void __iomem *, u32, unsigned);
void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
void (*save)(void __iomem *);
+ void (*configure)(void __iomem *);
struct outer_cache_fns outer_cache;
};
#define CACHE_LINE_SIZE 32
static void __iomem *l2x0_base;
+static const struct l2c_init_data *l2x0_data;
static DEFINE_RAW_SPINLOCK(l2x0_lock);
static u32 l2x0_way_mask; /* Bitmask of active ways */
static u32 l2x0_size;
@@ -106,6 +108,19 @@ static inline void l2c_unlock(void __iomem *base, unsigned num)
}
}
+static void l2c_configure(void __iomem *base)
+{
+ if (outer_cache.configure) {
+ outer_cache.configure(&l2x0_saved_regs);
+ return;
+ }
+
+ if (l2x0_data->configure)
+ l2x0_data->configure(base);
+
+ l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL);
+}
+
/*
* Enable the L2 cache controller. This function must only be
* called when the cache controller is known to be disabled.
@@ -114,7 +129,12 @@ static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
{
unsigned long flags;
- l2c_write_sec(aux, base, L2X0_AUX_CTRL);
+ /* Do not touch the controller if already enabled. */
+ if (readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)
+ return;
+
+ l2x0_saved_regs.aux_ctrl = aux;
+ l2c_configure(base);
l2c_unlock(base, num_lock);
@@ -136,76 +156,14 @@ static void l2c_disable(void)
dsb(st);
}
-#ifdef CONFIG_CACHE_PL310
-static inline void cache_wait(void __iomem *reg, unsigned long mask)
-{
- /* cache operations by line are atomic on PL310 */
-}
-#else
-#define cache_wait l2c_wait_mask
-#endif
-
-static inline void cache_sync(void)
-{
- void __iomem *base = l2x0_base;
-
- writel_relaxed(0, base + sync_reg_offset);
- cache_wait(base + L2X0_CACHE_SYNC, 1);
-}
-
-#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
-static inline void debug_writel(unsigned long val)
-{
- l2c_set_debug(l2x0_base, val);
-}
-#else
-/* Optimised out for non-errata case */
-static inline void debug_writel(unsigned long val)
-{
-}
-#endif
-
-static void l2x0_cache_sync(void)
-{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&l2x0_lock, flags);
- cache_sync();
- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
-}
-
-static void __l2x0_flush_all(void)
-{
- debug_writel(0x03);
- __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
- cache_sync();
- debug_writel(0x00);
-}
-
-static void l2x0_flush_all(void)
-{
- unsigned long flags;
-
- /* clean all ways */
- raw_spin_lock_irqsave(&l2x0_lock, flags);
- __l2x0_flush_all();
- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
-}
-
-static void l2x0_disable(void)
+static void l2c_save(void __iomem *base)
{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&l2x0_lock, flags);
- __l2x0_flush_all();
- l2c_write_sec(0, l2x0_base, L2X0_CTRL);
- dsb(st);
- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
}
-static void l2c_save(void __iomem *base)
+static void l2c_resume(void)
{
- l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
+ l2c_enable(l2x0_base, l2x0_saved_regs.aux_ctrl, l2x0_data->num_lock);
}
/*
@@ -288,14 +246,6 @@ static void l2c210_sync(void)
__l2c210_cache_sync(l2x0_base);
}
-static void l2c210_resume(void)
-{
- void __iomem *base = l2x0_base;
-
- if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
- l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1);
-}
-
static const struct l2c_init_data l2c210_data __initconst = {
.type = "L2C-210",
.way_size_0 = SZ_8K,
@@ -309,7 +259,7 @@ static const struct l2c_init_data l2c210_data __initconst = {
.flush_all = l2c210_flush_all,
.disable = l2c_disable,
.sync = l2c210_sync,
- .resume = l2c210_resume,
+ .resume = l2c_resume,
},
};
@@ -466,7 +416,7 @@ static const struct l2c_init_data l2c220_data = {
.flush_all = l2c220_flush_all,
.disable = l2c_disable,
.sync = l2c220_sync,
- .resume = l2c210_resume,
+ .resume = l2c_resume,
},
};
@@ -615,39 +565,29 @@ static void __init l2c310_save(void __iomem *base)
L310_POWER_CTRL);
}
-static void l2c310_resume(void)
+static void l2c310_configure(void __iomem *base)
{
- void __iomem *base = l2x0_base;
+ unsigned revision;
- if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
- unsigned revision;
-
- /* restore pl310 setup */
- writel_relaxed(l2x0_saved_regs.tag_latency,
- base + L310_TAG_LATENCY_CTRL);
- writel_relaxed(l2x0_saved_regs.data_latency,
- base + L310_DATA_LATENCY_CTRL);
- writel_relaxed(l2x0_saved_regs.filter_end,
- base + L310_ADDR_FILTER_END);
- writel_relaxed(l2x0_saved_regs.filter_start,
- base + L310_ADDR_FILTER_START);
-
- revision = readl_relaxed(base + L2X0_CACHE_ID) &
- L2X0_CACHE_ID_RTL_MASK;
-
- if (revision >= L310_CACHE_ID_RTL_R2P0)
- l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
- L310_PREFETCH_CTRL);
- if (revision >= L310_CACHE_ID_RTL_R3P0)
- l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
- L310_POWER_CTRL);
-
- l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
-
- /* Re-enable full-line-of-zeros for Cortex-A9 */
- if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
- set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
- }
+ /* restore pl310 setup */
+ l2c_write_sec(l2x0_saved_regs.tag_latency, base,
+ L310_TAG_LATENCY_CTRL);
+ l2c_write_sec(l2x0_saved_regs.data_latency, base,
+ L310_DATA_LATENCY_CTRL);
+ l2c_write_sec(l2x0_saved_regs.filter_end, base,
+ L310_ADDR_FILTER_END);
+ l2c_write_sec(l2x0_saved_regs.filter_start, base,
+ L310_ADDR_FILTER_START);
+
+ revision = readl_relaxed(base + L2X0_CACHE_ID) &
+ L2X0_CACHE_ID_RTL_MASK;
+
+ if (revision >= L310_CACHE_ID_RTL_R2P0)
+ l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
+ L310_PREFETCH_CTRL);
+ if (revision >= L310_CACHE_ID_RTL_R3P0)
+ l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
+ L310_POWER_CTRL);
}
static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data)
@@ -699,6 +639,23 @@ static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP);
}
+ /* r3p0 or later has power control register */
+ if (rev >= L310_CACHE_ID_RTL_R3P0)
+ l2x0_saved_regs.pwr_ctrl = L310_DYNAMIC_CLK_GATING_EN |
+ L310_STNDBY_MODE_EN;
+
+ /*
+ * Always enable non-secure access to the lockdown registers -
+ * we write to them as part of the L2C enable sequence so they
+ * need to be accessible.
+ */
+ aux |= L310_AUX_CTRL_NS_LOCKDOWN;
+
+ l2c_enable(base, aux, num_lock);
+
+ /* Read back resulting AUX_CTRL value as it could have been altered. */
+ aux = readl_relaxed(base + L2X0_AUX_CTRL);
+
if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) {
u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL);
@@ -712,23 +669,12 @@ static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
if (rev >= L310_CACHE_ID_RTL_R3P0) {
u32 power_ctrl;
- l2c_write_sec(L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN,
- base, L310_POWER_CTRL);
power_ctrl = readl_relaxed(base + L310_POWER_CTRL);
pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n",
power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis",
power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
}
- /*
- * Always enable non-secure access to the lockdown registers -
- * we write to them as part of the L2C enable sequence so they
- * need to be accessible.
- */
- aux |= L310_AUX_CTRL_NS_LOCKDOWN;
-
- l2c_enable(base, aux, num_lock);
-
if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) {
set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
cpu_notifier(l2c310_cpu_enable_flz, 0);
@@ -760,11 +706,11 @@ static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
if (revision >= L310_CACHE_ID_RTL_R3P0 &&
revision < L310_CACHE_ID_RTL_R3P2) {
- u32 val = readl_relaxed(base + L310_PREFETCH_CTRL);
+ u32 val = l2x0_saved_regs.prefetch_ctrl;
/* I don't think bit23 is required here... but iMX6 does so */
if (val & (BIT(30) | BIT(23))) {
val &= ~(BIT(30) | BIT(23));
- l2c_write_sec(val, base, L310_PREFETCH_CTRL);
+ l2x0_saved_regs.prefetch_ctrl = val;
errata[n++] = "752271";
}
}
@@ -800,6 +746,15 @@ static void l2c310_disable(void)
l2c_disable();
}
+static void l2c310_resume(void)
+{
+ l2c_resume();
+
+ /* Re-enable full-line-of-zeros for Cortex-A9 */
+ if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
+ set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
+}
+
static const struct l2c_init_data l2c310_init_fns __initconst = {
.type = "L2C-310",
.way_size_0 = SZ_8K,
@@ -807,6 +762,7 @@ static const struct l2c_init_data l2c310_init_fns __initconst = {
.enable = l2c310_enable,
.fixup = l2c310_fixup,
.save = l2c310_save,
+ .configure = l2c310_configure,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
@@ -818,14 +774,22 @@ static const struct l2c_init_data l2c310_init_fns __initconst = {
},
};
-static void __init __l2c_init(const struct l2c_init_data *data,
- u32 aux_val, u32 aux_mask, u32 cache_id)
+static int __init __l2c_init(const struct l2c_init_data *data,
+ u32 aux_val, u32 aux_mask, u32 cache_id)
{
struct outer_cache_fns fns;
unsigned way_size_bits, ways;
u32 aux, old_aux;
/*
+ * Save the pointer globally so that callbacks which do not receive
+ * context from callers can access the structure.
+ */
+ l2x0_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
+ if (!l2x0_data)
+ return -ENOMEM;
+
+ /*
* Sanity check the aux values. aux_mask is the bits we preserve
* from reading the hardware register, and aux_val is the bits we
* set.
@@ -884,6 +848,7 @@ static void __init __l2c_init(const struct l2c_init_data *data,
fns = data->outer_cache;
fns.write_sec = outer_cache.write_sec;
+ fns.configure = outer_cache.configure;
if (data->fixup)
data->fixup(l2x0_base, cache_id, &fns);
@@ -910,6 +875,8 @@ static void __init __l2c_init(const struct l2c_init_data *data,
data->type, ways, l2x0_size >> 10);
pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
data->type, cache_id, aux);
+
+ return 0;
}
void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
@@ -936,6 +903,10 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
break;
}
+ /* Read back current (default) hardware configuration */
+ if (data->save)
+ data->save(l2x0_base);
+
__l2c_init(data, aux_val, aux_mask, cache_id);
}
@@ -979,7 +950,7 @@ static int __init l2x0_cache_size_of_parse(const struct device_node *np,
/* All these l2 caches have the same line = block size actually */
if (!line_size) {
if (block_size) {
- /* If linesize if not given, it is equal to blocksize */
+ /* If linesize is not given, it is equal to blocksize */
line_size = block_size;
} else {
/* Fall back to known size */
@@ -1102,7 +1073,7 @@ static const struct l2c_init_data of_l2c210_data __initconst = {
.flush_all = l2c210_flush_all,
.disable = l2c_disable,
.sync = l2c210_sync,
- .resume = l2c210_resume,
+ .resume = l2c_resume,
},
};
@@ -1120,7 +1091,7 @@ static const struct l2c_init_data of_l2c220_data __initconst = {
.flush_all = l2c220_flush_all,
.disable = l2c_disable,
.sync = l2c220_sync,
- .resume = l2c210_resume,
+ .resume = l2c_resume,
},
};
@@ -1131,32 +1102,32 @@ static void __init l2c310_of_parse(const struct device_node *np,
u32 tag[3] = { 0, 0, 0 };
u32 filter[2] = { 0, 0 };
u32 assoc;
+ u32 prefetch;
+ u32 val;
int ret;
of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
if (tag[0] && tag[1] && tag[2])
- writel_relaxed(
+ l2x0_saved_regs.tag_latency =
L310_LATENCY_CTRL_RD(tag[0] - 1) |
L310_LATENCY_CTRL_WR(tag[1] - 1) |
- L310_LATENCY_CTRL_SETUP(tag[2] - 1),
- l2x0_base + L310_TAG_LATENCY_CTRL);
+ L310_LATENCY_CTRL_SETUP(tag[2] - 1);
of_property_read_u32_array(np, "arm,data-latency",
data, ARRAY_SIZE(data));
if (data[0] && data[1] && data[2])
- writel_relaxed(
+ l2x0_saved_regs.data_latency =
L310_LATENCY_CTRL_RD(data[0] - 1) |
L310_LATENCY_CTRL_WR(data[1] - 1) |
- L310_LATENCY_CTRL_SETUP(data[2] - 1),
- l2x0_base + L310_DATA_LATENCY_CTRL);
+ L310_LATENCY_CTRL_SETUP(data[2] - 1);
of_property_read_u32_array(np, "arm,filter-ranges",
filter, ARRAY_SIZE(filter));
if (filter[1]) {
- writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
- l2x0_base + L310_ADDR_FILTER_END);
- writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L310_ADDR_FILTER_EN,
- l2x0_base + L310_ADDR_FILTER_START);
+ l2x0_saved_regs.filter_end =
+ ALIGN(filter[0] + filter[1], SZ_1M);
+ l2x0_saved_regs.filter_start = (filter[0] & ~(SZ_1M - 1))
+ | L310_ADDR_FILTER_EN;
}
ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
@@ -1178,6 +1149,58 @@ static void __init l2c310_of_parse(const struct device_node *np,
assoc);
break;
}
+
+ prefetch = l2x0_saved_regs.prefetch_ctrl;
+
+ ret = of_property_read_u32(np, "arm,double-linefill", &val);
+ if (ret == 0) {
+ if (val)
+ prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL;
+ else
+ prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL;
+ } else if (ret != -EINVAL) {
+ pr_err("L2C-310 OF arm,double-linefill property value is missing\n");
+ }
+
+ ret = of_property_read_u32(np, "arm,double-linefill-incr", &val);
+ if (ret == 0) {
+ if (val)
+ prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
+ else
+ prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
+ } else if (ret != -EINVAL) {
+ pr_err("L2C-310 OF arm,double-linefill-incr property value is missing\n");
+ }
+
+ ret = of_property_read_u32(np, "arm,double-linefill-wrap", &val);
+ if (ret == 0) {
+ if (!val)
+ prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
+ else
+ prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
+ } else if (ret != -EINVAL) {
+ pr_err("L2C-310 OF arm,double-linefill-wrap property value is missing\n");
+ }
+
+ ret = of_property_read_u32(np, "arm,prefetch-drop", &val);
+ if (ret == 0) {
+ if (val)
+ prefetch |= L310_PREFETCH_CTRL_PREFETCH_DROP;
+ else
+ prefetch &= ~L310_PREFETCH_CTRL_PREFETCH_DROP;
+ } else if (ret != -EINVAL) {
+ pr_err("L2C-310 OF arm,prefetch-drop property value is missing\n");
+ }
+
+ ret = of_property_read_u32(np, "arm,prefetch-offset", &val);
+ if (ret == 0) {
+ prefetch &= ~L310_PREFETCH_CTRL_OFFSET_MASK;
+ prefetch |= val & L310_PREFETCH_CTRL_OFFSET_MASK;
+ } else if (ret != -EINVAL) {
+ pr_err("L2C-310 OF arm,prefetch-offset property value is missing\n");
+ }
+
+ l2x0_saved_regs.prefetch_ctrl = prefetch;
}
static const struct l2c_init_data of_l2c310_data __initconst = {
@@ -1188,6 +1211,7 @@ static const struct l2c_init_data of_l2c310_data __initconst = {
.enable = l2c310_enable,
.fixup = l2c310_fixup,
.save = l2c310_save,
+ .configure = l2c310_configure,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
@@ -1216,6 +1240,7 @@ static const struct l2c_init_data of_l2c310_coherent_data __initconst = {
.enable = l2c310_enable,
.fixup = l2c310_fixup,
.save = l2c310_save,
+ .configure = l2c310_configure,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
@@ -1231,7 +1256,7 @@ static const struct l2c_init_data of_l2c310_coherent_data __initconst = {
* noninclusive, while the hardware cache range operations use
* inclusive start and end addresses.
*/
-static unsigned long calc_range_end(unsigned long start, unsigned long end)
+static unsigned long aurora_range_end(unsigned long start, unsigned long end)
{
/*
* Limit the number of cache lines processed at once,
@@ -1250,25 +1275,13 @@ static unsigned long calc_range_end(unsigned long start, unsigned long end)
return end;
}
-/*
- * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
- * and range operations only do a TLB lookup on the start address.
- */
static void aurora_pa_range(unsigned long start, unsigned long end,
- unsigned long offset)
+ unsigned long offset)
{
+ void __iomem *base = l2x0_base;
+ unsigned long range_end;
unsigned long flags;
- raw_spin_lock_irqsave(&l2x0_lock, flags);
- writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
- writel_relaxed(end, l2x0_base + offset);
- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
-
- cache_sync();
-}
-
-static void aurora_inv_range(unsigned long start, unsigned long end)
-{
/*
* round start and end adresses up to cache line size
*/
@@ -1276,15 +1289,24 @@ static void aurora_inv_range(unsigned long start, unsigned long end)
end = ALIGN(end, CACHE_LINE_SIZE);
/*
- * Invalidate all full cache lines between 'start' and 'end'.
+ * perform operation on all full cache lines between 'start' and 'end'
*/
while (start < end) {
- unsigned long range_end = calc_range_end(start, end);
- aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
- AURORA_INVAL_RANGE_REG);
+ range_end = aurora_range_end(start, end);
+
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ writel_relaxed(start, base + AURORA_RANGE_BASE_ADDR_REG);
+ writel_relaxed(range_end - CACHE_LINE_SIZE, base + offset);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+
+ writel_relaxed(0, base + AURORA_SYNC_REG);
start = range_end;
}
}
+static void aurora_inv_range(unsigned long start, unsigned long end)
+{
+ aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
+}
static void aurora_clean_range(unsigned long start, unsigned long end)
{
@@ -1292,52 +1314,53 @@ static void aurora_clean_range(unsigned long start, unsigned long end)
* If L2 is forced to WT, the L2 will always be clean and we
* don't need to do anything here.
*/
- if (!l2_wt_override) {
- start &= ~(CACHE_LINE_SIZE - 1);
- end = ALIGN(end, CACHE_LINE_SIZE);
- while (start != end) {
- unsigned long range_end = calc_range_end(start, end);
- aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
- AURORA_CLEAN_RANGE_REG);
- start = range_end;
- }
- }
+ if (!l2_wt_override)
+ aurora_pa_range(start, end, AURORA_CLEAN_RANGE_REG);
}
static void aurora_flush_range(unsigned long start, unsigned long end)
{
- start &= ~(CACHE_LINE_SIZE - 1);
- end = ALIGN(end, CACHE_LINE_SIZE);
- while (start != end) {
- unsigned long range_end = calc_range_end(start, end);
- /*
- * If L2 is forced to WT, the L2 will always be clean and we
- * just need to invalidate.
- */
- if (l2_wt_override)
- aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
- AURORA_INVAL_RANGE_REG);
- else
- aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
- AURORA_FLUSH_RANGE_REG);
- start = range_end;
- }
+ if (l2_wt_override)
+ aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
+ else
+ aurora_pa_range(start, end, AURORA_FLUSH_RANGE_REG);
}
-static void aurora_save(void __iomem *base)
+static void aurora_flush_all(void)
{
- l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
- l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
+ void __iomem *base = l2x0_base;
+ unsigned long flags;
+
+ /* clean all ways */
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+
+ writel_relaxed(0, base + AURORA_SYNC_REG);
}
-static void aurora_resume(void)
+static void aurora_cache_sync(void)
+{
+ writel_relaxed(0, l2x0_base + AURORA_SYNC_REG);
+}
+
+static void aurora_disable(void)
{
void __iomem *base = l2x0_base;
+ unsigned long flags;
- if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
- writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL);
- writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL);
- }
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
+ writel_relaxed(0, base + AURORA_SYNC_REG);
+ l2c_write_sec(0, base, L2X0_CTRL);
+ dsb(st);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+}
+
+static void aurora_save(void __iomem *base)
+{
+ l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
+ l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
}
/*
@@ -1398,10 +1421,10 @@ static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
.inv_range = aurora_inv_range,
.clean_range = aurora_clean_range,
.flush_range = aurora_flush_range,
- .flush_all = l2x0_flush_all,
- .disable = l2x0_disable,
- .sync = l2x0_cache_sync,
- .resume = aurora_resume,
+ .flush_all = aurora_flush_all,
+ .disable = aurora_disable,
+ .sync = aurora_cache_sync,
+ .resume = l2c_resume,
},
};
@@ -1414,7 +1437,7 @@ static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
.fixup = aurora_fixup,
.save = aurora_save,
.outer_cache = {
- .resume = aurora_resume,
+ .resume = l2c_resume,
},
};
@@ -1562,6 +1585,7 @@ static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
.of_parse = l2c310_of_parse,
.enable = l2c310_enable,
.save = l2c310_save,
+ .configure = l2c310_configure,
.outer_cache = {
.inv_range = bcm_inv_range,
.clean_range = bcm_clean_range,
@@ -1583,18 +1607,12 @@ static void __init tauros3_save(void __iomem *base)
readl_relaxed(base + L310_PREFETCH_CTRL);
}
-static void tauros3_resume(void)
+static void tauros3_configure(void __iomem *base)
{
- void __iomem *base = l2x0_base;
-
- if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
- writel_relaxed(l2x0_saved_regs.aux2_ctrl,
- base + TAUROS3_AUX2_CTRL);
- writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
- base + L310_PREFETCH_CTRL);
-
- l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
- }
+ writel_relaxed(l2x0_saved_regs.aux2_ctrl,
+ base + TAUROS3_AUX2_CTRL);
+ writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
+ base + L310_PREFETCH_CTRL);
}
static const struct l2c_init_data of_tauros3_data __initconst = {
@@ -1603,9 +1621,10 @@ static const struct l2c_init_data of_tauros3_data __initconst = {
.num_lock = 8,
.enable = l2c_enable,
.save = tauros3_save,
+ .configure = tauros3_configure,
/* Tauros3 broadcasts L1 cache operations to L2 */
.outer_cache = {
- .resume = tauros3_resume,
+ .resume = l2c_resume,
},
};
@@ -1661,6 +1680,10 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
if (!of_property_read_bool(np, "cache-unified"))
pr_err("L2C: device tree omits to specify unified cache\n");
+ /* Read back current (default) hardware configuration */
+ if (data->save)
+ data->save(l2x0_base);
+
/* L2 configuration can only be changed if the cache is disabled */
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
if (data->of_parse)
@@ -1671,8 +1694,6 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
else
cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
- __l2c_init(data, aux_val, aux_mask, cache_id);
-
- return 0;
+ return __l2c_init(data, aux_val, aux_mask, cache_id);
}
#endif
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 903dba064a03..170a116d1b29 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1106,7 +1106,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
int i = 0;
if (array_size <= PAGE_SIZE)
- pages = kzalloc(array_size, gfp);
+ pages = kzalloc(array_size, GFP_KERNEL);
else
pages = vzalloc(array_size);
if (!pages)
diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c
index 66781bf34077..c72412415093 100644
--- a/arch/arm/mm/hugetlbpage.c
+++ b/arch/arm/mm/hugetlbpage.c
@@ -36,12 +36,6 @@
* of type casting from pmd_t * to pte_t *.
*/
-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
- int write)
-{
- return ERR_PTR(-EINVAL);
-}
-
int pud_huge(pud_t pud)
{
return 0;
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 2495c8cb47ba..1609b022a72f 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -319,10 +319,7 @@ void __init arm_memblock_init(const struct machine_desc *mdesc)
early_init_fdt_scan_reserved_mem();
- /*
- * reserve memory for DMA contigouos allocations,
- * must come from DMA area inside low memory
- */
+ /* reserve memory for DMA contiguous allocations */
dma_contiguous_reserve(arm_dma_limit);
arm_memblock_steal_permitted = false;
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index 249379535be2..a3681f11dd9f 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -97,6 +97,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
no_pte:
pmd_free(mm, new_pmd);
+ mm_dec_nr_pmds(mm);
no_pmd:
pud_free(mm, new_pud);
no_pud:
@@ -130,9 +131,11 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
pte = pmd_pgtable(*pmd);
pmd_clear(pmd);
pte_free(mm, pte);
+ atomic_long_dec(&mm->nr_ptes);
no_pmd:
pud_clear(pud);
pmd_free(mm, pmd);
+ mm_dec_nr_pmds(mm);
no_pud:
pgd_clear(pgd);
pud_free(mm, pud);
@@ -152,6 +155,7 @@ no_pgd:
pmd = pmd_offset(pud, 0);
pud_clear(pud);
pmd_free(mm, pmd);
+ mm_dec_nr_pmds(mm);
pgd_clear(pgd);
pud_free(mm, pud);
}
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index ba1196c968d8..082b9f2f7e90 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -98,7 +98,7 @@
#endif
#if !defined (CONFIG_ARM_LPAE) && \
(L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\
- L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED
+ L_PTE_PRESENT) > L_PTE_SHARED
#error Invalid Linux PTE bit settings
#endif
#endif /* CONFIG_MMU */