aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r--kernel/cpu.c67
1 files changed, 67 insertions, 0 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 59e418f192d2..edbb3edc6d83 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -75,6 +75,11 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
.fail = CPUHP_INVALID,
};
+#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PREEMPT_RT_FULL)
+static DEFINE_PER_CPU(struct rt_rw_lock, cpuhp_pin_lock) = \
+ __RWLOCK_RT_INITIALIZER(cpuhp_pin_lock);
+#endif
+
#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
static struct lockdep_map cpuhp_state_up_map =
STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
@@ -284,6 +289,55 @@ static int cpu_hotplug_disabled;
#ifdef CONFIG_HOTPLUG_CPU
+/**
+ * pin_current_cpu - Prevent the current cpu from being unplugged
+ */
+void pin_current_cpu(void)
+{
+#ifdef CONFIG_PREEMPT_RT_FULL
+ struct rt_rw_lock *cpuhp_pin;
+ unsigned int cpu;
+ int ret;
+
+again:
+ cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock);
+ ret = __read_rt_trylock(cpuhp_pin);
+ if (ret) {
+ current->pinned_on_cpu = smp_processor_id();
+ return;
+ }
+ cpu = smp_processor_id();
+ preempt_lazy_enable();
+ preempt_enable();
+
+ __read_rt_lock(cpuhp_pin);
+
+ preempt_disable();
+ preempt_lazy_disable();
+ if (cpu != smp_processor_id()) {
+ __read_rt_unlock(cpuhp_pin);
+ goto again;
+ }
+ current->pinned_on_cpu = cpu;
+#endif
+}
+
+/**
+ * unpin_current_cpu - Allow unplug of current cpu
+ */
+void unpin_current_cpu(void)
+{
+#ifdef CONFIG_PREEMPT_RT_FULL
+ struct rt_rw_lock *cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock);
+
+ if (WARN_ON(current->pinned_on_cpu != smp_processor_id()))
+ cpuhp_pin = per_cpu_ptr(&cpuhp_pin_lock, current->pinned_on_cpu);
+
+ current->pinned_on_cpu = -1;
+ __read_rt_unlock(cpuhp_pin);
+#endif
+}
+
DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
void cpus_read_lock(void)
@@ -871,6 +925,9 @@ static int take_cpu_down(void *_param)
static int takedown_cpu(unsigned int cpu)
{
+#ifdef CONFIG_PREEMPT_RT_FULL
+ struct rt_rw_lock *cpuhp_pin = per_cpu_ptr(&cpuhp_pin_lock, cpu);
+#endif
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int err;
@@ -883,11 +940,18 @@ static int takedown_cpu(unsigned int cpu)
*/
irq_lock_sparse();
+#ifdef CONFIG_PREEMPT_RT_FULL
+ __write_rt_lock(cpuhp_pin);
+#endif
+
/*
* So now all preempt/rcu users must observe !cpu_active().
*/
err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
if (err) {
+#ifdef CONFIG_PREEMPT_RT_FULL
+ __write_rt_unlock(cpuhp_pin);
+#endif
/* CPU refused to die */
irq_unlock_sparse();
/* Unpark the hotplug thread so we can rollback there */
@@ -906,6 +970,9 @@ static int takedown_cpu(unsigned int cpu)
wait_for_ap_thread(st, false);
BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
+#ifdef CONFIG_PREEMPT_RT_FULL
+ __write_rt_unlock(cpuhp_pin);
+#endif
/* Interrupts are moved away from the dying cpu, reenable alloc/free */
irq_unlock_sparse();