summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0183-u64_stats-Disable-preemption-on-32bit-UP-SMP-with-RT.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0183-u64_stats-Disable-preemption-on-32bit-UP-SMP-with-RT.patch')
-rw-r--r--debian/patches-rt/0183-u64_stats-Disable-preemption-on-32bit-UP-SMP-with-RT.patch33
1 files changed, 14 insertions, 19 deletions
diff --git a/debian/patches-rt/0183-u64_stats-Disable-preemption-on-32bit-UP-SMP-with-RT.patch b/debian/patches-rt/0183-u64_stats-Disable-preemption-on-32bit-UP-SMP-with-RT.patch
index f100e0717..fe690eaf6 100644
--- a/debian/patches-rt/0183-u64_stats-Disable-preemption-on-32bit-UP-SMP-with-RT.patch
+++ b/debian/patches-rt/0183-u64_stats-Disable-preemption-on-32bit-UP-SMP-with-RT.patch
@@ -1,9 +1,9 @@
-From e6ec60749d80fcdde8451a7a544a218f7c5ef393 Mon Sep 17 00:00:00 2001
+From 8b79413c531cd633e6608aef388828e9742996c1 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 17 Aug 2020 12:28:10 +0200
Subject: [PATCH 183/323] u64_stats: Disable preemption on 32bit-UP/SMP with RT
during updates
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.215-rt107.tar.xz
On RT the seqcount_t is required even on UP because the softirq can be
preempted. The IRQ handler is threaded so it is also preemptible.
@@ -18,8 +18,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
include/linux/u64_stats_sync.h | 42 ++++++++++++++++++++++------------
1 file changed, 28 insertions(+), 14 deletions(-)
-diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
-index e81856c0ba13..66eb968a09d4 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -66,7 +66,7 @@
@@ -31,16 +29,16 @@ index e81856c0ba13..66eb968a09d4 100644
seqcount_t seq;
#endif
};
-@@ -115,7 +115,7 @@ static inline void u64_stats_inc(u64_stats_t *p)
+@@ -115,7 +115,7 @@
}
#endif
-#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
- #define u64_stats_init(syncp) seqcount_init(&(syncp)->seq)
- #else
- static inline void u64_stats_init(struct u64_stats_sync *syncp)
-@@ -125,15 +125,19 @@ static inline void u64_stats_init(struct u64_stats_sync *syncp)
+ #define u64_stats_init(syncp) \
+ do { \
+ struct u64_stats_sync *__s = (syncp); \
+@@ -129,15 +129,19 @@
static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
{
@@ -62,7 +60,7 @@ index e81856c0ba13..66eb968a09d4 100644
#endif
}
-@@ -142,8 +146,11 @@ u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
+@@ -146,8 +150,11 @@
{
unsigned long flags = 0;
@@ -76,7 +74,7 @@ index e81856c0ba13..66eb968a09d4 100644
write_seqcount_begin(&syncp->seq);
#endif
return flags;
-@@ -153,15 +160,18 @@ static inline void
+@@ -157,15 +164,18 @@
u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
unsigned long flags)
{
@@ -98,7 +96,7 @@ index e81856c0ba13..66eb968a09d4 100644
return read_seqcount_begin(&syncp->seq);
#else
return 0;
-@@ -170,7 +180,7 @@ static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *
+@@ -174,7 +184,7 @@
static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{
@@ -107,7 +105,7 @@ index e81856c0ba13..66eb968a09d4 100644
preempt_disable();
#endif
return __u64_stats_fetch_begin(syncp);
-@@ -179,7 +189,7 @@ static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *sy
+@@ -183,7 +193,7 @@
static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
unsigned int start)
{
@@ -116,7 +114,7 @@ index e81856c0ba13..66eb968a09d4 100644
return read_seqcount_retry(&syncp->seq, start);
#else
return false;
-@@ -189,7 +199,7 @@ static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+@@ -193,7 +203,7 @@
static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
unsigned int start)
{
@@ -125,7 +123,7 @@ index e81856c0ba13..66eb968a09d4 100644
preempt_enable();
#endif
return __u64_stats_fetch_retry(syncp, start);
-@@ -203,7 +213,9 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+@@ -207,7 +217,9 @@
*/
static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
{
@@ -136,7 +134,7 @@ index e81856c0ba13..66eb968a09d4 100644
local_irq_disable();
#endif
return __u64_stats_fetch_begin(syncp);
-@@ -212,7 +224,9 @@ static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync
+@@ -216,7 +228,9 @@
static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
unsigned int start)
{
@@ -147,6 +145,3 @@ index e81856c0ba13..66eb968a09d4 100644
local_irq_enable();
#endif
return __u64_stats_fetch_retry(syncp, start);
---
-2.43.0
-