diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:38 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:38 +0000 |
commit | 08b74a000942a380fe028845f92cd3a0dee827d5 (patch) | |
tree | aa78b4e12607c3e1fcce8d5cc42df4330792f118 /debian/patches-rt/0195-seqlock-Prevent-rt-starvation.patch | |
parent | Adding upstream version 4.19.249. (diff) | |
download | linux-08b74a000942a380fe028845f92cd3a0dee827d5.tar.xz linux-08b74a000942a380fe028845f92cd3a0dee827d5.zip |
Adding debian version 4.19.249-2.debian/4.19.249-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0195-seqlock-Prevent-rt-starvation.patch')
-rw-r--r-- | debian/patches-rt/0195-seqlock-Prevent-rt-starvation.patch | 195 |
1 files changed, 195 insertions, 0 deletions
diff --git a/debian/patches-rt/0195-seqlock-Prevent-rt-starvation.patch b/debian/patches-rt/0195-seqlock-Prevent-rt-starvation.patch new file mode 100644 index 000000000..6398461d7 --- /dev/null +++ b/debian/patches-rt/0195-seqlock-Prevent-rt-starvation.patch @@ -0,0 +1,195 @@ +From d2adc15afeaebd45ad21834295e100c5627a7fa2 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Wed, 22 Feb 2012 12:03:30 +0100 +Subject: [PATCH 195/347] seqlock: Prevent rt starvation +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz + +If a low prio writer gets preempted while holding the seqlock write +locked, a high prio reader spins forever on RT. + +To prevent this let the reader grab the spinlock, so it blocks and +eventually boosts the writer. This way the writer can proceed and +endless spinning is prevented. + +For seqcount writers we disable preemption over the update code +path. Thanks to Al Viro for distangling some VFS code to make that +possible. + +Nicholas Mc Guire: +- spin_lock+unlock => spin_unlock_wait +- __write_seqcount_begin => __raw_write_seqcount_begin + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +--- + include/linux/seqlock.h | 57 ++++++++++++++++++++++++++++++++--------- + include/net/neighbour.h | 6 ++--- + 2 files changed, 48 insertions(+), 15 deletions(-) + +diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h +index a42a29952889..a3768cfce914 100644 +--- a/include/linux/seqlock.h ++++ b/include/linux/seqlock.h +@@ -221,20 +221,30 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) + return __read_seqcount_retry(s, start); + } + +- +- +-static inline void raw_write_seqcount_begin(seqcount_t *s) ++static inline void __raw_write_seqcount_begin(seqcount_t *s) + { + s->sequence++; + smp_wmb(); + } + +-static inline void raw_write_seqcount_end(seqcount_t *s) ++static inline void raw_write_seqcount_begin(seqcount_t *s) ++{ ++ preempt_disable_rt(); ++ __raw_write_seqcount_begin(s); ++} ++ ++static inline void __raw_write_seqcount_end(seqcount_t *s) + { + smp_wmb(); + s->sequence++; + } + ++static inline void raw_write_seqcount_end(seqcount_t *s) ++{ ++ __raw_write_seqcount_end(s); ++ preempt_enable_rt(); ++} ++ + /** + * raw_write_seqcount_barrier - do a seq write barrier + * @s: pointer to seqcount_t +@@ -435,10 +445,33 @@ typedef struct { + /* + * Read side functions for starting and finalizing a read side section. + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + static inline unsigned read_seqbegin(const seqlock_t *sl) + { + return read_seqcount_begin(&sl->seqcount); + } ++#else ++/* ++ * Starvation safe read side for RT ++ */ ++static inline unsigned read_seqbegin(seqlock_t *sl) ++{ ++ unsigned ret; ++ ++repeat: ++ ret = READ_ONCE(sl->seqcount.sequence); ++ if (unlikely(ret & 1)) { ++ /* ++ * Take the lock and let the writer proceed (i.e. evtl ++ * boost it), otherwise we could loop here forever. ++ */ ++ spin_unlock_wait(&sl->lock); ++ goto repeat; ++ } ++ smp_rmb(); ++ return ret; ++} ++#endif + + static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) + { +@@ -453,36 +486,36 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) + static inline void write_seqlock(seqlock_t *sl) + { + spin_lock(&sl->lock); +- write_seqcount_begin(&sl->seqcount); ++ __raw_write_seqcount_begin(&sl->seqcount); + } + + static inline void write_sequnlock(seqlock_t *sl) + { +- write_seqcount_end(&sl->seqcount); ++ __raw_write_seqcount_end(&sl->seqcount); + spin_unlock(&sl->lock); + } + + static inline void write_seqlock_bh(seqlock_t *sl) + { + spin_lock_bh(&sl->lock); +- write_seqcount_begin(&sl->seqcount); ++ __raw_write_seqcount_begin(&sl->seqcount); + } + + static inline void write_sequnlock_bh(seqlock_t *sl) + { +- write_seqcount_end(&sl->seqcount); ++ __raw_write_seqcount_end(&sl->seqcount); + spin_unlock_bh(&sl->lock); + } + + static inline void write_seqlock_irq(seqlock_t *sl) + { + spin_lock_irq(&sl->lock); +- write_seqcount_begin(&sl->seqcount); ++ __raw_write_seqcount_begin(&sl->seqcount); + } + + static inline void write_sequnlock_irq(seqlock_t *sl) + { +- write_seqcount_end(&sl->seqcount); ++ __raw_write_seqcount_end(&sl->seqcount); + spin_unlock_irq(&sl->lock); + } + +@@ -491,7 +524,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) + unsigned long flags; + + spin_lock_irqsave(&sl->lock, flags); +- write_seqcount_begin(&sl->seqcount); ++ __raw_write_seqcount_begin(&sl->seqcount); + return flags; + } + +@@ -501,7 +534,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) + static inline void + write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) + { +- write_seqcount_end(&sl->seqcount); ++ __raw_write_seqcount_end(&sl->seqcount); + spin_unlock_irqrestore(&sl->lock, flags); + } + +diff --git a/include/net/neighbour.h b/include/net/neighbour.h +index 5ce035984a4d..1166fc17b757 100644 +--- a/include/net/neighbour.h ++++ b/include/net/neighbour.h +@@ -451,7 +451,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) + } + #endif + +-static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) ++static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb) + { + unsigned int hh_alen = 0; + unsigned int seq; +@@ -493,7 +493,7 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb + + static inline int neigh_output(struct neighbour *n, struct sk_buff *skb) + { +- const struct hh_cache *hh = &n->hh; ++ struct hh_cache *hh = &n->hh; + + if ((n->nud_state & NUD_CONNECTED) && hh->hh_len) + return neigh_hh_output(hh, skb); +@@ -534,7 +534,7 @@ struct neighbour_cb { + + #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) + +-static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, ++static inline void neigh_ha_snapshot(char *dst, struct neighbour *n, + const struct net_device *dev) + { + unsigned int seq; +-- +2.36.1 + |