summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0227-cpu-hotplug-Implement-CPU-pinning.patch
blob: 65d876792f47398e3bef5a3f1a9fe78f611d4a3d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 19 Jul 2017 17:31:20 +0200
Subject: [PATCH 227/351] cpu/hotplug: Implement CPU pinning
Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=9f0de8987ce46748aebdeb23f492c0775194b0bc

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/sched.h |  1 +
 kernel/cpu.c          | 38 ++++++++++++++++++++++++++++++++++++++
 2 files changed, 39 insertions(+)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index b32eb75dd73b..19eb4838cf03 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -675,6 +675,7 @@ struct task_struct {
 #if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
 	int				migrate_disable;
 	int				migrate_disable_update;
+	int				pinned_on_cpu;
 # ifdef CONFIG_SCHED_DEBUG
 	int				migrate_disable_atomic;
 # endif
diff --git a/kernel/cpu.c b/kernel/cpu.c
index f4c49bbc3fa3..56cb58fce84c 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -78,6 +78,11 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
 	.fail = CPUHP_INVALID,
 };
 
+#ifdef CONFIG_HOTPLUG_CPU
+static DEFINE_PER_CPU(struct rt_rw_lock, cpuhp_pin_lock) = \
+	__RWLOCK_RT_INITIALIZER(cpuhp_pin_lock);
+#endif
+
 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
 static struct lockdep_map cpuhp_state_up_map =
 	STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
@@ -289,7 +294,28 @@ static int cpu_hotplug_disabled;
  */
 void pin_current_cpu(void)
 {
+	struct rt_rw_lock *cpuhp_pin;
+	unsigned int cpu;
+	int ret;
+
+again:
+	cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock);
+	ret = __read_rt_trylock(cpuhp_pin);
+	if (ret) {
+		current->pinned_on_cpu = smp_processor_id();
+		return;
+	}
+	cpu = smp_processor_id();
+	preempt_enable();
+
+	__read_rt_lock(cpuhp_pin);
 
+	preempt_disable();
+	if (cpu != smp_processor_id()) {
+		__read_rt_unlock(cpuhp_pin);
+		goto again;
+	}
+	current->pinned_on_cpu = cpu;
 }
 
 /**
@@ -297,6 +323,13 @@ void pin_current_cpu(void)
  */
 void unpin_current_cpu(void)
 {
+	struct rt_rw_lock *cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock);
+
+	if (WARN_ON(current->pinned_on_cpu != smp_processor_id()))
+		cpuhp_pin = per_cpu_ptr(&cpuhp_pin_lock, current->pinned_on_cpu);
+
+	current->pinned_on_cpu = -1;
+	__read_rt_unlock(cpuhp_pin);
 }
 
 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
@@ -921,6 +954,7 @@ static int take_cpu_down(void *_param)
 
 static int takedown_cpu(unsigned int cpu)
 {
+	struct rt_rw_lock *cpuhp_pin = per_cpu_ptr(&cpuhp_pin_lock, cpu);
 	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 	int err;
 
@@ -933,11 +967,14 @@ static int takedown_cpu(unsigned int cpu)
 	 */
 	irq_lock_sparse();
 
+	__write_rt_lock(cpuhp_pin);
+
 	/*
 	 * So now all preempt/rcu users must observe !cpu_active().
 	 */
 	err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
 	if (err) {
+		__write_rt_unlock(cpuhp_pin);
 		/* CPU refused to die */
 		irq_unlock_sparse();
 		/* Unpark the hotplug thread so we can rollback there */
@@ -956,6 +993,7 @@ static int takedown_cpu(unsigned int cpu)
 	wait_for_ap_thread(st, false);
 	BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
 
+	__write_rt_unlock(cpuhp_pin);
 	/* Interrupts are moved away from the dying cpu, reenable alloc/free */
 	irq_unlock_sparse();