aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/genex.S6
-rw-r--r--arch/mips/kernel/mips-cm.c27
-rw-r--r--arch/mips/kernel/r2300_fpu.S4
-rw-r--r--arch/mips/kernel/relocate.c10
-rw-r--r--arch/mips/kernel/setup.c10
-rw-r--r--arch/mips/kernel/smp-bmips.c4
-rw-r--r--arch/mips/kernel/syscall.c9
-rw-r--r--arch/mips/kernel/time.c65
-rw-r--r--arch/mips/kernel/topology.c2
-rw-r--r--arch/mips/kernel/traps.c1
-rw-r--r--arch/mips/kernel/vmlinux.lds.S3
11 files changed, 108 insertions, 33 deletions
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 37b9383eacd3..cf74a963839f 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -431,20 +431,20 @@ NESTED(nmi_handler, PT_SIZE, sp)
.endm
.macro __build_clear_fpe
+ CLI
+ TRACE_IRQS_OFF
.set push
/* gas fails to assemble cfc1 for some archs (octeon).*/ \
.set mips1
SET_HARDFLOAT
cfc1 a1, fcr31
.set pop
- CLI
- TRACE_IRQS_OFF
.endm
.macro __build_clear_msa_fpe
- _cfcmsa a1, MSA_CSR
CLI
TRACE_IRQS_OFF
+ _cfcmsa a1, MSA_CSR
.endm
.macro __build_clear_ade
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index 7f3f136572de..51cfcb44e670 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -123,9 +123,9 @@ static char *cm2_causes[32] = {
"COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07",
"0x08", "0x09", "0x0a", "0x0b",
"0x0c", "0x0d", "0x0e", "0x0f",
- "0x10", "0x11", "0x12", "0x13",
- "0x14", "0x15", "0x16", "INTVN_WR_ERR",
- "INTVN_RD_ERR", "0x19", "0x1a", "0x1b",
+ "0x10", "INTVN_WR_ERR", "INTVN_RD_ERR", "0x13",
+ "0x14", "0x15", "0x16", "0x17",
+ "0x18", "0x19", "0x1a", "0x1b",
"0x1c", "0x1d", "0x1e", "0x1f"
};
@@ -183,8 +183,7 @@ static void mips_cm_probe_l2sync(void)
phys_addr_t addr;
/* L2-only sync was introduced with CM major revision 6 */
- major_rev = (read_gcr_rev() & CM_GCR_REV_MAJOR) >>
- __ffs(CM_GCR_REV_MAJOR);
+ major_rev = FIELD_GET(CM_GCR_REV_MAJOR, read_gcr_rev());
if (major_rev < 6)
return;
@@ -267,13 +266,13 @@ void mips_cm_lock_other(unsigned int cluster, unsigned int core,
preempt_disable();
if (cm_rev >= CM_REV_CM3) {
- val = core << __ffs(CM3_GCR_Cx_OTHER_CORE);
- val |= vp << __ffs(CM3_GCR_Cx_OTHER_VP);
+ val = FIELD_PREP(CM3_GCR_Cx_OTHER_CORE, core) |
+ FIELD_PREP(CM3_GCR_Cx_OTHER_VP, vp);
if (cm_rev >= CM_REV_CM3_5) {
val |= CM_GCR_Cx_OTHER_CLUSTER_EN;
- val |= cluster << __ffs(CM_GCR_Cx_OTHER_CLUSTER);
- val |= block << __ffs(CM_GCR_Cx_OTHER_BLOCK);
+ val |= FIELD_PREP(CM_GCR_Cx_OTHER_CLUSTER, cluster);
+ val |= FIELD_PREP(CM_GCR_Cx_OTHER_BLOCK, block);
} else {
WARN_ON(cluster != 0);
WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);
@@ -303,7 +302,7 @@ void mips_cm_lock_other(unsigned int cluster, unsigned int core,
spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core),
per_cpu(cm_core_lock_flags, curr_core));
- val = core << __ffs(CM_GCR_Cx_OTHER_CORENUM);
+ val = FIELD_PREP(CM_GCR_Cx_OTHER_CORENUM, core);
}
write_gcr_cl_other(val);
@@ -347,8 +346,8 @@ void mips_cm_error_report(void)
cm_other = read_gcr_error_mult();
if (revision < CM_REV_CM3) { /* CM2 */
- cause = cm_error >> __ffs(CM_GCR_ERROR_CAUSE_ERRTYPE);
- ocause = cm_other >> __ffs(CM_GCR_ERROR_MULT_ERR2ND);
+ cause = FIELD_GET(CM_GCR_ERROR_CAUSE_ERRTYPE, cm_error);
+ ocause = FIELD_GET(CM_GCR_ERROR_MULT_ERR2ND, cm_other);
if (!cause)
return;
@@ -390,8 +389,8 @@ void mips_cm_error_report(void)
ulong core_id_bits, vp_id_bits, cmd_bits, cmd_group_bits;
ulong cm3_cca_bits, mcp_bits, cm3_tr_bits, sched_bit;
- cause = cm_error >> __ffs64(CM3_GCR_ERROR_CAUSE_ERRTYPE);
- ocause = cm_other >> __ffs(CM_GCR_ERROR_MULT_ERR2ND);
+ cause = FIELD_GET(CM3_GCR_ERROR_CAUSE_ERRTYPE, cm_error);
+ ocause = FIELD_GET(CM_GCR_ERROR_MULT_ERR2ND, cm_other);
if (!cause)
return;
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S
index 3062ba66c563..f7ce7b3971a4 100644
--- a/arch/mips/kernel/r2300_fpu.S
+++ b/arch/mips/kernel/r2300_fpu.S
@@ -29,8 +29,8 @@
#define EX2(a,b) \
9: a,##b; \
.section __ex_table,"a"; \
- PTR 9b,bad_stack; \
- PTR 9b+4,bad_stack; \
+ PTR 9b,fault; \
+ PTR 9b+4,fault; \
.previous
.set mips1
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
index cbf4cc0b0b6c..934caf204078 100644
--- a/arch/mips/kernel/relocate.c
+++ b/arch/mips/kernel/relocate.c
@@ -187,8 +187,14 @@ static int __init relocate_exception_table(long offset)
static inline __init unsigned long rotate_xor(unsigned long hash,
const void *area, size_t size)
{
- size_t i;
- unsigned long *ptr = (unsigned long *)area;
+ const typeof(hash) *ptr = PTR_ALIGN(area, sizeof(hash));
+ size_t diff, i;
+
+ diff = (void *)ptr - area;
+ if (unlikely(size < diff + sizeof(hash)))
+ return hash;
+
+ size = ALIGN_DOWN(size - diff, sizeof(hash));
for (i = 0; i < size / sizeof(hash); i++) {
/* Rotate by odd number of bits and XOR. */
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 05ed4ed411c7..abd7ee9e90ab 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -911,7 +911,17 @@ static void __init arch_mem_init(char **cmdline_p)
BOOTMEM_DEFAULT);
#endif
device_tree_init();
+
+ /*
+ * In order to reduce the possibility of kernel panic when failed to
+ * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
+ * low memory as small as possible before plat_swiotlb_setup(), so
+ * make sparse_init() using top-down allocation.
+ */
+ memblock_set_bottom_up(false);
sparse_init();
+ memblock_set_bottom_up(true);
+
plat_swiotlb_setup();
dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 45fbcbbf2504..d16e6654a655 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -240,6 +240,8 @@ static int bmips_boot_secondary(int cpu, struct task_struct *idle)
*/
static void bmips_init_secondary(void)
{
+ bmips_cpu_setup();
+
switch (current_cpu_type()) {
case CPU_BMIPS4350:
case CPU_BMIPS4380:
@@ -572,7 +574,7 @@ asmlinkage void __weak plat_wired_tlb_setup(void)
*/
}
-void __init bmips_cpu_setup(void)
+void bmips_cpu_setup(void)
{
void __iomem __maybe_unused *cbr = BMIPS_GET_CBR();
u32 __maybe_unused cfg;
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 58c6f634b550..d25b8566af98 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -233,12 +233,3 @@ SYSCALL_DEFINE3(cachectl, char *, addr, int, nbytes, int, op)
{
return -ENOSYS;
}
-
-/*
- * If we ever come here the user sp is bad. Zap the process right away.
- * Due to the bad stack signaling wouldn't work.
- */
-asmlinkage void bad_stack(void)
-{
- do_exit(SIGSEGV);
-}
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index a6ebc8135112..79ebf349aab4 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -22,12 +22,77 @@
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/export.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/div64.h>
#include <asm/time.h>
+#ifdef CONFIG_CPU_FREQ
+
+static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref);
+static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref_freq);
+static unsigned long glb_lpj_ref;
+static unsigned long glb_lpj_ref_freq;
+
+static int cpufreq_callback(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ int cpu;
+ struct cpufreq_freqs *freq = data;
+
+ /*
+ * Skip lpj numbers adjustment if the CPU-freq transition is safe for
+ * the loops delay. (Is this possible?)
+ */
+ if (freq->flags & CPUFREQ_CONST_LOOPS)
+ return NOTIFY_OK;
+
+ /* Save the initial values of the lpjes for future scaling. */
+ if (!glb_lpj_ref) {
+ glb_lpj_ref = boot_cpu_data.udelay_val;
+ glb_lpj_ref_freq = freq->old;
+
+ for_each_online_cpu(cpu) {
+ per_cpu(pcp_lpj_ref, cpu) =
+ cpu_data[cpu].udelay_val;
+ per_cpu(pcp_lpj_ref_freq, cpu) = freq->old;
+ }
+ }
+
+ cpu = freq->cpu;
+ /*
+ * Adjust global lpj variable and per-CPU udelay_val number in
+ * accordance with the new CPU frequency.
+ */
+ if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
+ (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
+ loops_per_jiffy = cpufreq_scale(glb_lpj_ref,
+ glb_lpj_ref_freq,
+ freq->new);
+
+ cpu_data[cpu].udelay_val = cpufreq_scale(per_cpu(pcp_lpj_ref, cpu),
+ per_cpu(pcp_lpj_ref_freq, cpu), freq->new);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpufreq_notifier = {
+ .notifier_call = cpufreq_callback,
+};
+
+static int __init register_cpufreq_notifier(void)
+{
+ return cpufreq_register_notifier(&cpufreq_notifier,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+core_initcall(register_cpufreq_notifier);
+
+#endif /* CONFIG_CPU_FREQ */
+
/*
* forward reference
*/
diff --git a/arch/mips/kernel/topology.c b/arch/mips/kernel/topology.c
index cd3e1f82e1a5..08ad6371fbe0 100644
--- a/arch/mips/kernel/topology.c
+++ b/arch/mips/kernel/topology.c
@@ -20,7 +20,7 @@ static int __init topology_init(void)
for_each_present_cpu(i) {
struct cpu *c = &per_cpu(cpu_devices, i);
- c->hotpluggable = 1;
+ c->hotpluggable = !!i;
ret = register_cpu(c, i);
if (ret)
printk(KERN_WARNING "topology_init: register_cpu %d "
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 583aed906933..4a23d89e251c 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2135,6 +2135,7 @@ static void configure_status(void)
change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
status_set);
+ back_to_back_c0_hazard();
}
unsigned int hwrena;
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 36f2e860ba3e..968c5765020c 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -50,7 +50,7 @@ SECTIONS
/* . = 0xa800000000300000; */
. = 0xffffffff80300000;
#endif
- . = VMLINUX_LOAD_ADDRESS;
+ . = LINKER_LOAD_ADDRESS;
/* read-only */
_text = .; /* Text and read-only data */
.text : {
@@ -93,6 +93,7 @@ SECTIONS
INIT_TASK_DATA(THREAD_SIZE)
NOSAVE_DATA
+ PAGE_ALIGNED_DATA(PAGE_SIZE)
CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
DATA_DATA