aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm/context.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm/context.c')
-rw-r--r--arch/arm64/mm/context.c79
1 files changed, 78 insertions, 1 deletions
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index b5e329fde2dd..aa3e43e87dab 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -24,6 +24,13 @@ static unsigned long *asid_map;
static DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
+
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+#define LOCKED_ASIDS_COUNT 128
+
+static u64 locked_asids[LOCKED_ASIDS_COUNT];
+#endif
+
static cpumask_t tlb_flush_pending;
#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
@@ -100,6 +107,14 @@ static void flush_context(void)
per_cpu(reserved_asids, i) = asid;
}
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+ /* Set bits for locked ASIDs. */
+ for (i = 0; i < LOCKED_ASIDS_COUNT; i++) {
+ asid = locked_asids[i];
+ if (asid != 0)
+ __set_bit(asid & ~ASID_MASK, asid_map);
+ }
+#endif
/*
* Queue a TLB invalidation for each CPU to perform on next
* context-switch
@@ -107,9 +122,61 @@ static void flush_context(void)
cpumask_setall(&tlb_flush_pending);
}
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+int lock_context(struct mm_struct *mm, int index)
+{
+ unsigned long flags;
+ u64 asid;
+
+ if ((index < 0) || (index >= LOCKED_ASIDS_COUNT))
+ return -1;
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+ asid = atomic64_read(&mm->context.id);
+ locked_asids[index] = asid;
+ raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(lock_context);
+
+int unlock_context_by_index(int index)
+{
+ unsigned long flags;
+
+ if ((index < 0) || (index >= LOCKED_ASIDS_COUNT))
+ return -1;
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+ locked_asids[index] = 0;
+ raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(unlock_context_by_index);
+
+bool unlock_context_by_mm(struct mm_struct *mm)
+{
+ int i;
+ unsigned long flags;
+ bool hit = false;
+ u64 asid;
+
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+ asid = atomic64_read(&mm->context.id);
+
+ for (i = 0; i < LOCKED_ASIDS_COUNT; i++) {
+ if (locked_asids[i] == asid) {
+ hit = true;
+ locked_asids[i] = 0;
+ }
+ }
+
+ raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+
+ return hit;
+}
+#endif
+
static bool check_update_reserved_asid(u64 asid, u64 newasid)
{
- int cpu;
+ int i, cpu;
bool hit = false;
/*
@@ -128,6 +195,16 @@ static bool check_update_reserved_asid(u64 asid, u64 newasid)
}
}
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+ /* Same mechanism for locked ASIDs */
+ for (i = 0; i < LOCKED_ASIDS_COUNT; i++) {
+ if (locked_asids[i] == asid) {
+ hit = true;
+ locked_asids[i] = newasid;
+ }
+ }
+#endif
+
return hit;
}