summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0023-EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch
blob: 8e2e9cfd4320e9bd6200423ee72092c254cd96dc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
From: "Paul E. McKenney" <paulmck@linux.ibm.com>
Date: Mon, 29 Oct 2018 11:53:01 +0100
Subject: [PATCH 023/353] EXP rcu: Revert expedited GP parallelization
 cleverness
Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=4fc273a9ba585393a5651eade8a6f01a83743594

(Commit 258ba8e089db23f760139266c232f01bad73f85c from linux-rcu)

This commit reverts a series of commits starting with fcc635436501 ("rcu:
Make expedited GPs handle CPU 0 being offline") and its successors, thus
queueing each rcu_node structure's expedited grace-period initialization
work on the first CPU of that rcu_node structure.

Suggested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 kernel/rcu/tree_exp.h | 9 +--------
 1 file changed, 1 insertion(+), 8 deletions(-)

diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index fa6ae9ed2e1d..289a26b81fd7 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -472,7 +472,6 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
 static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
 				     smp_call_func_t func)
 {
-	int cpu;
 	struct rcu_node *rnp;
 
 	trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset"));
@@ -494,13 +493,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
 			continue;
 		}
 		INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
-		preempt_disable();
-		cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask);
-		/* If all offline, queue the work on an unbound CPU. */
-		if (unlikely(cpu > rnp->grphi))
-			cpu = WORK_CPU_UNBOUND;
-		queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
-		preempt_enable();
+		queue_work_on(rnp->grplo, rcu_par_gp_wq, &rnp->rew.rew_work);
 		rnp->exp_need_flush = true;
 	}