diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-08 03:22:37 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-08 03:26:40 +0000 |
commit | 08f003891b84f52e49a5bdbc8a589fb052ac9a4e (patch) | |
tree | b7e426b4a4eb48e9e71188a2812a4c71625c35ac /debian/patches-rt/0074-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch | |
parent | Merging upstream version 4.19.260. (diff) | |
download | linux-08f003891b84f52e49a5bdbc8a589fb052ac9a4e.tar.xz linux-08f003891b84f52e49a5bdbc8a589fb052ac9a4e.zip |
Merging debian version 4.19.260-1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0074-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch')
-rw-r--r-- | debian/patches-rt/0074-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch | 16 |
1 files changed, 6 insertions, 10 deletions
diff --git a/debian/patches-rt/0074-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch b/debian/patches-rt/0074-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch index 564ebe845..e3bd8de68 100644 --- a/debian/patches-rt/0074-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch +++ b/debian/patches-rt/0074-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch @@ -1,8 +1,7 @@ -From 3e8b37b77db38c83eaff3b8fdcd23e91f8d78613 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner <tglx@linutronix.de> Date: Mon, 28 May 2018 15:24:22 +0200 -Subject: [PATCH 074/347] mm/SLxB: change list_lock to raw_spinlock_t -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz +Subject: [PATCH 074/342] mm/SLxB: change list_lock to raw_spinlock_t +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=d8c58de5abafe875e6c797d46f6a9272262d1358 The list_lock is used with used with IRQs off on RT. Make it a raw_spinlock_t otherwise the interrupts won't be disabled on -RT. The locking rules remain @@ -399,7 +398,7 @@ index 0ed7a463f476..12b7da32bcd0 100644 #ifdef CONFIG_SLAB struct list_head slabs_partial; /* partial list first, better asm code */ diff --git a/mm/slub.c b/mm/slub.c -index 499fb073d1ff..0e4670add1e9 100644 +index 0fefe0ad8f57..ef6fc4df3ed5 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1182,7 +1182,7 @@ static noinline int free_debug_processing( @@ -465,7 +464,7 @@ index 499fb073d1ff..0e4670add1e9 100644 if (m == M_FREE) { stat(s, DEACTIVATE_EMPTY); -@@ -2187,10 +2187,10 @@ static void unfreeze_partials(struct kmem_cache *s, +@@ -2188,10 +2188,10 @@ static void unfreeze_partials(struct kmem_cache *s, n2 = get_node(s, page_to_nid(page)); if (n != n2) { if (n) @@ -478,7 +477,7 @@ index 499fb073d1ff..0e4670add1e9 100644 } do { -@@ -2219,7 +2219,7 @@ static void unfreeze_partials(struct kmem_cache *s, +@@ -2220,7 +2220,7 @@ static void unfreeze_partials(struct kmem_cache *s, } if (n) @@ -487,7 +486,7 @@ index 499fb073d1ff..0e4670add1e9 100644 while (discard_page) { page = discard_page; -@@ -2388,10 +2388,10 @@ static unsigned long count_partial(struct kmem_cache_node *n, +@@ -2387,10 +2387,10 @@ static unsigned long count_partial(struct kmem_cache_node *n, unsigned long x = 0; struct page *page; @@ -614,6 +613,3 @@ index 499fb073d1ff..0e4670add1e9 100644 } for (i = 0; i < t.count; i++) { --- -2.36.1 - |