diff options
Diffstat (limited to 'debian/patches-rt/0014-cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch')
-rw-r--r-- | debian/patches-rt/0014-cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch | 50 |
1 files changed, 50 insertions, 0 deletions
diff --git a/debian/patches-rt/0014-cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch b/debian/patches-rt/0014-cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch new file mode 100644 index 000000000..6d9213b5b --- /dev/null +++ b/debian/patches-rt/0014-cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch @@ -0,0 +1,50 @@ +From 9cbe69b64cc03234634926f56a219900edb90899 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Date: Tue, 3 Jul 2018 18:19:48 +0200 +Subject: [PATCH 014/347] cgroup: use irqsave in cgroup_rstat_flush_locked() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz + +All callers of cgroup_rstat_flush_locked() acquire cgroup_rstat_lock +either with spin_lock_irq() or spin_lock_irqsave(). +cgroup_rstat_flush_locked() itself acquires cgroup_rstat_cpu_lock which +is a raw_spin_lock. This lock is also acquired in cgroup_rstat_updated() +in IRQ context and therefore requires _irqsave() locking suffix in +cgroup_rstat_flush_locked(). +Since there is no difference between spin_lock_t and raw_spin_lock_t +on !RT lockdep does not complain here. On RT lockdep complains because +the interrupts were not disabled here and a deadlock is possible. + +Acquire the raw_spin_lock_t with disabled interrupts. + +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + kernel/cgroup/rstat.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c +index d0ed410b4127..3c949c46c6b3 100644 +--- a/kernel/cgroup/rstat.c ++++ b/kernel/cgroup/rstat.c +@@ -149,8 +149,9 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep) + raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, + cpu); + struct cgroup *pos = NULL; ++ unsigned long flags; + +- raw_spin_lock(cpu_lock); ++ raw_spin_lock_irqsave(cpu_lock, flags); + while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) { + struct cgroup_subsys_state *css; + +@@ -162,7 +163,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep) + css->ss->css_rstat_flush(css, cpu); + rcu_read_unlock(); + } +- raw_spin_unlock(cpu_lock); ++ raw_spin_unlock_irqrestore(cpu_lock, flags); + + /* if @may_sleep, play nice and yield if necessary */ + if (may_sleep && (need_resched() || +-- +2.36.1 + |