summaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 10:15:33 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 10:15:33 +0000
commita097cf30108c0151cb2684afd962bd2678ff97d7 (patch)
treede1ac8afc7248eec4b2fb60496b8e9273452d9f7 /kernel/rcu
parentAdding upstream version 6.1.82. (diff)
downloadlinux-a097cf30108c0151cb2684afd962bd2678ff97d7.tar.xz
linux-a097cf30108c0151cb2684afd962bd2678ff97d7.zip
Adding upstream version 6.1.85.upstream/6.1.85
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--kernel/rcu/tree.c3
-rw-r--r--kernel/rcu/tree_exp.h25
2 files changed, 22 insertions, 6 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 9d7464a90..61f9503a5 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -4465,13 +4465,16 @@ static void __init rcu_start_exp_gp_kworkers(void)
rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name);
if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
pr_err("Failed to create %s!\n", gp_kworker_name);
+ rcu_exp_gp_kworker = NULL;
return;
}
rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name);
if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) {
pr_err("Failed to create %s!\n", par_gp_kworker_name);
+ rcu_exp_par_gp_kworker = NULL;
kthread_destroy_worker(rcu_exp_gp_kworker);
+ rcu_exp_gp_kworker = NULL;
return;
}
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 6d2cbed96..75e8d9652 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -427,7 +427,12 @@ static void sync_rcu_exp_select_node_cpus(struct kthread_work *wp)
__sync_rcu_exp_select_node_cpus(rewp);
}
-static inline bool rcu_gp_par_worker_started(void)
+static inline bool rcu_exp_worker_started(void)
+{
+ return !!READ_ONCE(rcu_exp_gp_kworker);
+}
+
+static inline bool rcu_exp_par_worker_started(void)
{
return !!READ_ONCE(rcu_exp_par_gp_kworker);
}
@@ -477,7 +482,12 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
__sync_rcu_exp_select_node_cpus(rewp);
}
-static inline bool rcu_gp_par_worker_started(void)
+static inline bool rcu_exp_worker_started(void)
+{
+ return !!READ_ONCE(rcu_gp_wq);
+}
+
+static inline bool rcu_exp_par_worker_started(void)
{
return !!READ_ONCE(rcu_par_gp_wq);
}
@@ -540,7 +550,7 @@ static void sync_rcu_exp_select_cpus(void)
rnp->exp_need_flush = false;
if (!READ_ONCE(rnp->expmask))
continue; /* Avoid early boot non-existent wq. */
- if (!rcu_gp_par_worker_started() ||
+ if (!rcu_exp_par_worker_started() ||
rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
rcu_is_last_leaf_node(rnp)) {
/* No worker started yet or last leaf, do direct call. */
@@ -910,7 +920,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
*/
void synchronize_rcu_expedited(void)
{
- bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
+ bool use_worker;
unsigned long flags;
struct rcu_exp_work rew;
struct rcu_node *rnp;
@@ -921,6 +931,9 @@ void synchronize_rcu_expedited(void)
lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_rcu_expedited() in RCU read-side critical section");
+ use_worker = (rcu_scheduler_active != RCU_SCHEDULER_INIT) &&
+ rcu_exp_worker_started();
+
/* Is the state is such that the call is a grace period? */
if (rcu_blocking_is_gp()) {
// Note well that this code runs with !PREEMPT && !SMP.
@@ -950,7 +963,7 @@ void synchronize_rcu_expedited(void)
return; /* Someone else did our work for us. */
/* Ensure that load happens before action based on it. */
- if (unlikely(boottime)) {
+ if (unlikely(!use_worker)) {
/* Direct call during scheduler init and early_initcalls(). */
rcu_exp_sel_wait_wake(s);
} else {
@@ -968,7 +981,7 @@ void synchronize_rcu_expedited(void)
/* Let the next expedited grace period start. */
mutex_unlock(&rcu_state.exp_mutex);
- if (likely(!boottime))
+ if (likely(use_worker))
synchronize_rcu_expedited_destroy_work(&rew);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);