summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-08 17:45:29 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-08 17:45:29 +0000
commit83506c85f8d4332b3edfdc8f1fd07aa691415350 (patch)
tree316b9630e093bb3b80e5d6e1c304151b5597901e /kernel/sched
parentAdding upstream version 5.10.209. (diff)
downloadlinux-upstream/5.10.216.tar.xz
linux-upstream/5.10.216.zip
Adding upstream version 5.10.216.upstream/5.10.216
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--kernel/sched/membarrier.c9
-rw-r--r--kernel/sched/rt.c10
2 files changed, 14 insertions, 5 deletions
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index cc7cd512e..1b7c3bdba 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -34,6 +34,8 @@
| MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \
| MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK)
+static DEFINE_MUTEX(membarrier_ipi_mutex);
+
static void ipi_mb(void *info)
{
smp_mb(); /* IPIs should be serializing but paranoid. */
@@ -119,6 +121,7 @@ static int membarrier_global_expedited(void)
if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
return -ENOMEM;
+ mutex_lock(&membarrier_ipi_mutex);
cpus_read_lock();
rcu_read_lock();
for_each_online_cpu(cpu) {
@@ -165,6 +168,8 @@ static int membarrier_global_expedited(void)
* rq->curr modification in scheduler.
*/
smp_mb(); /* exit from system call is not a mb */
+ mutex_unlock(&membarrier_ipi_mutex);
+
return 0;
}
@@ -208,6 +213,7 @@ static int membarrier_private_expedited(int flags, int cpu_id)
if (cpu_id < 0 && !zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
return -ENOMEM;
+ mutex_lock(&membarrier_ipi_mutex);
cpus_read_lock();
if (cpu_id >= 0) {
@@ -280,6 +286,7 @@ out:
* rq->curr modification in scheduler.
*/
smp_mb(); /* exit from system call is not a mb */
+ mutex_unlock(&membarrier_ipi_mutex);
return 0;
}
@@ -321,6 +328,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm)
* between threads which are users of @mm has its membarrier state
* updated.
*/
+ mutex_lock(&membarrier_ipi_mutex);
cpus_read_lock();
rcu_read_lock();
for_each_online_cpu(cpu) {
@@ -337,6 +345,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm)
free_cpumask_var(tmpmask);
cpus_read_unlock();
+ mutex_unlock(&membarrier_ipi_mutex);
return 0;
}
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index f690f901b..1289991c9 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -8,7 +8,7 @@
#include "pelt.h"
int sched_rr_timeslice = RR_TIMESLICE;
-int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
+int sysctl_sched_rr_timeslice = (MSEC_PER_SEC * RR_TIMESLICE) / HZ;
/* More than 4 hours if BW_SHIFT equals 20. */
static const u64 max_rt_runtime = MAX_BW;
@@ -2727,9 +2727,6 @@ static int sched_rt_global_constraints(void)
static int sched_rt_global_validate(void)
{
- if (sysctl_sched_rt_period <= 0)
- return -EINVAL;
-
if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
((u64)sysctl_sched_rt_runtime *
@@ -2760,7 +2757,7 @@ int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
old_period = sysctl_sched_rt_period;
old_runtime = sysctl_sched_rt_runtime;
- ret = proc_dointvec(table, write, buffer, lenp, ppos);
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (!ret && write) {
ret = sched_rt_global_validate();
@@ -2804,6 +2801,9 @@ int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
sched_rr_timeslice =
sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
msecs_to_jiffies(sysctl_sched_rr_timeslice);
+
+ if (sysctl_sched_rr_timeslice <= 0)
+ sysctl_sched_rr_timeslice = jiffies_to_msecs(RR_TIMESLICE);
}
mutex_unlock(&mutex);