From: Thomas Gleixner Date: Wed, 22 Feb 2012 12:03:30 +0100 Subject: [PATCH 195/342] seqlock: Prevent rt starvation Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=8072825488181a21a2f501d13d57d3220abcb7f3 If a low prio writer gets preempted while holding the seqlock write locked, a high prio reader spins forever on RT. To prevent this let the reader grab the spinlock, so it blocks and eventually boosts the writer. This way the writer can proceed and endless spinning is prevented. For seqcount writers we disable preemption over the update code path. Thanks to Al Viro for distangling some VFS code to make that possible. Nicholas Mc Guire: - spin_lock+unlock => spin_unlock_wait - __write_seqcount_begin => __raw_write_seqcount_begin Signed-off-by: Thomas Gleixner --- include/linux/seqlock.h | 57 ++++++++++++++++++++++++++++++++--------- include/net/neighbour.h | 6 ++--- 2 files changed, 48 insertions(+), 15 deletions(-) diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index a42a29952889..a3768cfce914 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -221,20 +221,30 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) return __read_seqcount_retry(s, start); } - - -static inline void raw_write_seqcount_begin(seqcount_t *s) +static inline void __raw_write_seqcount_begin(seqcount_t *s) { s->sequence++; smp_wmb(); } -static inline void raw_write_seqcount_end(seqcount_t *s) +static inline void raw_write_seqcount_begin(seqcount_t *s) +{ + preempt_disable_rt(); + __raw_write_seqcount_begin(s); +} + +static inline void __raw_write_seqcount_end(seqcount_t *s) { smp_wmb(); s->sequence++; } +static inline void raw_write_seqcount_end(seqcount_t *s) +{ + __raw_write_seqcount_end(s); + preempt_enable_rt(); +} + /** * raw_write_seqcount_barrier - do a seq write barrier * @s: pointer to seqcount_t @@ -435,10 +445,33 @@ typedef struct { /* * Read side functions for starting and finalizing a read side section. */ +#ifndef CONFIG_PREEMPT_RT_FULL static inline unsigned read_seqbegin(const seqlock_t *sl) { return read_seqcount_begin(&sl->seqcount); } +#else +/* + * Starvation safe read side for RT + */ +static inline unsigned read_seqbegin(seqlock_t *sl) +{ + unsigned ret; + +repeat: + ret = READ_ONCE(sl->seqcount.sequence); + if (unlikely(ret & 1)) { + /* + * Take the lock and let the writer proceed (i.e. evtl + * boost it), otherwise we could loop here forever. + */ + spin_unlock_wait(&sl->lock); + goto repeat; + } + smp_rmb(); + return ret; +} +#endif static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) { @@ -453,36 +486,36 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) static inline void write_seqlock(seqlock_t *sl) { spin_lock(&sl->lock); - write_seqcount_begin(&sl->seqcount); + __raw_write_seqcount_begin(&sl->seqcount); } static inline void write_sequnlock(seqlock_t *sl) { - write_seqcount_end(&sl->seqcount); + __raw_write_seqcount_end(&sl->seqcount); spin_unlock(&sl->lock); } static inline void write_seqlock_bh(seqlock_t *sl) { spin_lock_bh(&sl->lock); - write_seqcount_begin(&sl->seqcount); + __raw_write_seqcount_begin(&sl->seqcount); } static inline void write_sequnlock_bh(seqlock_t *sl) { - write_seqcount_end(&sl->seqcount); + __raw_write_seqcount_end(&sl->seqcount); spin_unlock_bh(&sl->lock); } static inline void write_seqlock_irq(seqlock_t *sl) { spin_lock_irq(&sl->lock); - write_seqcount_begin(&sl->seqcount); + __raw_write_seqcount_begin(&sl->seqcount); } static inline void write_sequnlock_irq(seqlock_t *sl) { - write_seqcount_end(&sl->seqcount); + __raw_write_seqcount_end(&sl->seqcount); spin_unlock_irq(&sl->lock); } @@ -491,7 +524,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) unsigned long flags; spin_lock_irqsave(&sl->lock, flags); - write_seqcount_begin(&sl->seqcount); + __raw_write_seqcount_begin(&sl->seqcount); return flags; } @@ -501,7 +534,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) static inline void write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) { - write_seqcount_end(&sl->seqcount); + __raw_write_seqcount_end(&sl->seqcount); spin_unlock_irqrestore(&sl->lock, flags); } diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 5ce035984a4d..1166fc17b757 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -451,7 +451,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) } #endif -static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) +static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb) { unsigned int hh_alen = 0; unsigned int seq; @@ -493,7 +493,7 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb static inline int neigh_output(struct neighbour *n, struct sk_buff *skb) { - const struct hh_cache *hh = &n->hh; + struct hh_cache *hh = &n->hh; if ((n->nud_state & NUD_CONNECTED) && hh->hh_len) return neigh_hh_output(hh, skb); @@ -534,7 +534,7 @@ struct neighbour_cb { #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) -static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, +static inline void neigh_ha_snapshot(char *dst, struct neighbour *n, const struct net_device *dev) { unsigned int seq;