diff options
Diffstat (limited to '')
-rw-r--r-- | debian/patches-rt/0079-mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch | 145 |
1 files changed, 145 insertions, 0 deletions
diff --git a/debian/patches-rt/0079-mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch b/debian/patches-rt/0079-mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch new file mode 100644 index 000000000..0c7ea39e8 --- /dev/null +++ b/debian/patches-rt/0079-mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch @@ -0,0 +1,145 @@ +From a61a39e63a0af178f57da615a22877b00fee0a58 Mon Sep 17 00:00:00 2001 +From: Ingo Molnar <mingo@elte.hu> +Date: Fri, 3 Jul 2009 08:30:13 -0500 +Subject: [PATCH 079/347] mm/vmstat: Protect per cpu variables with preempt + disable on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz + +Disable preemption on -RT for the vmstat code. On vanila the code runs in +IRQ-off regions while on -RT it is not. "preempt_disable" ensures that the +same ressources is not updated in parallel due to preemption. + +Signed-off-by: Ingo Molnar <mingo@elte.hu> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +--- + include/linux/vmstat.h | 4 ++++ + mm/vmstat.c | 12 ++++++++++++ + 2 files changed, 16 insertions(+) + +diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h +index f25cef84b41d..febee8649220 100644 +--- a/include/linux/vmstat.h ++++ b/include/linux/vmstat.h +@@ -54,7 +54,9 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states); + */ + static inline void __count_vm_event(enum vm_event_item item) + { ++ preempt_disable_rt(); + raw_cpu_inc(vm_event_states.event[item]); ++ preempt_enable_rt(); + } + + static inline void count_vm_event(enum vm_event_item item) +@@ -64,7 +66,9 @@ static inline void count_vm_event(enum vm_event_item item) + + static inline void __count_vm_events(enum vm_event_item item, long delta) + { ++ preempt_disable_rt(); + raw_cpu_add(vm_event_states.event[item], delta); ++ preempt_enable_rt(); + } + + static inline void count_vm_events(enum vm_event_item item, long delta) +diff --git a/mm/vmstat.c b/mm/vmstat.c +index 21e07e71ea2d..ff7c0d08f2b6 100644 +--- a/mm/vmstat.c ++++ b/mm/vmstat.c +@@ -320,6 +320,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, + long x; + long t; + ++ preempt_disable_rt(); + x = delta + __this_cpu_read(*p); + + t = __this_cpu_read(pcp->stat_threshold); +@@ -329,6 +330,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, + x = 0; + } + __this_cpu_write(*p, x); ++ preempt_enable_rt(); + } + EXPORT_SYMBOL(__mod_zone_page_state); + +@@ -340,6 +342,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, + long x; + long t; + ++ preempt_disable_rt(); + x = delta + __this_cpu_read(*p); + + t = __this_cpu_read(pcp->stat_threshold); +@@ -349,6 +352,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, + x = 0; + } + __this_cpu_write(*p, x); ++ preempt_enable_rt(); + } + EXPORT_SYMBOL(__mod_node_page_state); + +@@ -381,6 +385,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item) + s8 __percpu *p = pcp->vm_stat_diff + item; + s8 v, t; + ++ preempt_disable_rt(); + v = __this_cpu_inc_return(*p); + t = __this_cpu_read(pcp->stat_threshold); + if (unlikely(v > t)) { +@@ -389,6 +394,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item) + zone_page_state_add(v + overstep, zone, item); + __this_cpu_write(*p, -overstep); + } ++ preempt_enable_rt(); + } + + void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) +@@ -397,6 +403,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) + s8 __percpu *p = pcp->vm_node_stat_diff + item; + s8 v, t; + ++ preempt_disable_rt(); + v = __this_cpu_inc_return(*p); + t = __this_cpu_read(pcp->stat_threshold); + if (unlikely(v > t)) { +@@ -405,6 +412,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) + node_page_state_add(v + overstep, pgdat, item); + __this_cpu_write(*p, -overstep); + } ++ preempt_enable_rt(); + } + + void __inc_zone_page_state(struct page *page, enum zone_stat_item item) +@@ -425,6 +433,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item) + s8 __percpu *p = pcp->vm_stat_diff + item; + s8 v, t; + ++ preempt_disable_rt(); + v = __this_cpu_dec_return(*p); + t = __this_cpu_read(pcp->stat_threshold); + if (unlikely(v < - t)) { +@@ -433,6 +442,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item) + zone_page_state_add(v - overstep, zone, item); + __this_cpu_write(*p, overstep); + } ++ preempt_enable_rt(); + } + + void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) +@@ -441,6 +451,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) + s8 __percpu *p = pcp->vm_node_stat_diff + item; + s8 v, t; + ++ preempt_disable_rt(); + v = __this_cpu_dec_return(*p); + t = __this_cpu_read(pcp->stat_threshold); + if (unlikely(v < - t)) { +@@ -449,6 +460,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) + node_page_state_add(v - overstep, pgdat, item); + __this_cpu_write(*p, overstep); + } ++ preempt_enable_rt(); + } + + void __dec_zone_page_state(struct page *page, enum zone_stat_item item) +-- +2.36.1 + |