diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-08 03:21:37 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-08 03:21:37 +0000 |
commit | 06343b27411344fc542f4f3a643f8441aa35252d (patch) | |
tree | 66aa45187c93c350bbdf7e6ae4467a70bf3a8f4c /debian/patches-rt/0075-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch | |
parent | Merging upstream version 4.19.260. (diff) | |
download | linux-06343b27411344fc542f4f3a643f8441aa35252d.tar.xz linux-06343b27411344fc542f4f3a643f8441aa35252d.zip |
Adding debian version 4.19.260-1.debian/4.19.260-1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0075-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch')
-rw-r--r-- | debian/patches-rt/0075-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch | 16 |
1 files changed, 6 insertions, 10 deletions
diff --git a/debian/patches-rt/0075-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch b/debian/patches-rt/0075-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch index c62fd4658..0035016fb 100644 --- a/debian/patches-rt/0075-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch +++ b/debian/patches-rt/0075-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch @@ -1,9 +1,8 @@ -From 77ff019039696fc4bd8962110c689eb3520386b1 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner <tglx@linutronix.de> Date: Thu, 21 Jun 2018 17:29:19 +0200 -Subject: [PATCH 075/347] mm/SLUB: delay giving back empty slubs to IRQ enabled +Subject: [PATCH 075/342] mm/SLUB: delay giving back empty slubs to IRQ enabled regions -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=2ea46cd4066413ae217e7307020fa0e4a4cbc223 __free_slab() is invoked with disabled interrupts which increases the irq-off time while __free_pages() is doing the work. @@ -18,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> 1 file changed, 69 insertions(+), 5 deletions(-) diff --git a/mm/slub.c b/mm/slub.c -index 0e4670add1e9..a5a086ed4493 100644 +index ef6fc4df3ed5..e9abe4aa5801 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1350,6 +1350,12 @@ static bool freelist_corrupted(struct kmem_cache *s, struct page *page, @@ -64,7 +63,7 @@ index 0e4670add1e9..a5a086ed4493 100644 } else __free_slab(s, page); } -@@ -2256,14 +2278,21 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) +@@ -2257,14 +2279,21 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) pobjects = oldpage->pobjects; pages = oldpage->pages; if (drain && pobjects > s->cpu_partial) { @@ -86,7 +85,7 @@ index 0e4670add1e9..a5a086ed4493 100644 oldpage = NULL; pobjects = 0; pages = 0; -@@ -2333,7 +2362,22 @@ static bool has_cpu_slab(int cpu, void *info) +@@ -2332,7 +2361,22 @@ static bool has_cpu_slab(int cpu, void *info) static void flush_all(struct kmem_cache *s) { @@ -109,7 +108,7 @@ index 0e4670add1e9..a5a086ed4493 100644 } /* -@@ -2531,8 +2575,10 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page) +@@ -2530,8 +2574,10 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page) * already disabled (which is the case for bulk allocation). */ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, @@ -218,6 +217,3 @@ index 0e4670add1e9..a5a086ed4493 100644 if (debug_guardpage_minorder()) slub_max_order = 0; --- -2.36.1 - |