diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:06:00 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:06:00 +0000 |
commit | b15a952c52a6825376d3e7f6c1bf5c886c6d8b74 (patch) | |
tree | 1500f2f8f276908a36d8126cb632c0d6b1276764 /debian/patches-rt/0198-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch | |
parent | Adding upstream version 5.10.209. (diff) | |
download | linux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.tar.xz linux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.zip |
Adding debian version 5.10.209-2.debian/5.10.209-2debian
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0198-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch')
-rw-r--r-- | debian/patches-rt/0198-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch | 603 |
1 files changed, 603 insertions, 0 deletions
diff --git a/debian/patches-rt/0198-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch b/debian/patches-rt/0198-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch new file mode 100644 index 000000000..903ce7fd5 --- /dev/null +++ b/debian/patches-rt/0198-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch @@ -0,0 +1,603 @@ +From 24eecb5c4b97d1a359a8b16be1dae2a499720795 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Mon, 28 May 2018 15:24:22 +0200 +Subject: [PATCH 198/323] mm/SLxB: change list_lock to raw_spinlock_t +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz + +The list_lock is used with used with IRQs off on RT. Make it a raw_spinlock_t +otherwise the interrupts won't be disabled on -RT. The locking rules remain +the same on !RT. +This patch changes it for SLAB and SLUB since both share the same header +file for struct kmem_cache_node defintion. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + mm/slab.c | 90 +++++++++++++++++++++++++++---------------------------- + mm/slab.h | 2 +- + mm/slub.c | 50 +++++++++++++++---------------- + 3 files changed, 71 insertions(+), 71 deletions(-) + +diff --git a/mm/slab.c b/mm/slab.c +index b2cc2cf7d8a3..677c0651ef66 100644 +--- a/mm/slab.c ++++ b/mm/slab.c +@@ -233,7 +233,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent) + parent->shared = NULL; + parent->alien = NULL; + parent->colour_next = 0; +- spin_lock_init(&parent->list_lock); ++ raw_spin_lock_init(&parent->list_lock); + parent->free_objects = 0; + parent->free_touched = 0; + } +@@ -558,9 +558,9 @@ static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep, + page_node = page_to_nid(page); + n = get_node(cachep, page_node); + +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + free_block(cachep, &objp, 1, page_node, &list); +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + + slabs_destroy(cachep, &list); + } +@@ -698,7 +698,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep, + struct kmem_cache_node *n = get_node(cachep, node); + + if (ac->avail) { +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + /* + * Stuff objects into the remote nodes shared array first. + * That way we could avoid the overhead of putting the objects +@@ -709,7 +709,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep, + + free_block(cachep, ac->entry, ac->avail, node, list); + ac->avail = 0; +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + } + } + +@@ -782,9 +782,9 @@ static int __cache_free_alien(struct kmem_cache *cachep, void *objp, + slabs_destroy(cachep, &list); + } else { + n = get_node(cachep, page_node); +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + free_block(cachep, &objp, 1, page_node, &list); +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + slabs_destroy(cachep, &list); + } + return 1; +@@ -825,10 +825,10 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp) + */ + n = get_node(cachep, node); + if (n) { +- spin_lock_irq(&n->list_lock); ++ raw_spin_lock_irq(&n->list_lock); + n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + + cachep->num; +- spin_unlock_irq(&n->list_lock); ++ raw_spin_unlock_irq(&n->list_lock); + + return 0; + } +@@ -907,7 +907,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep, + goto fail; + + n = get_node(cachep, node); +- spin_lock_irq(&n->list_lock); ++ raw_spin_lock_irq(&n->list_lock); + if (n->shared && force_change) { + free_block(cachep, n->shared->entry, + n->shared->avail, node, &list); +@@ -925,7 +925,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep, + new_alien = NULL; + } + +- spin_unlock_irq(&n->list_lock); ++ raw_spin_unlock_irq(&n->list_lock); + slabs_destroy(cachep, &list); + + /* +@@ -964,7 +964,7 @@ static void cpuup_canceled(long cpu) + if (!n) + continue; + +- spin_lock_irq(&n->list_lock); ++ raw_spin_lock_irq(&n->list_lock); + + /* Free limit for this kmem_cache_node */ + n->free_limit -= cachep->batchcount; +@@ -975,7 +975,7 @@ static void cpuup_canceled(long cpu) + nc->avail = 0; + + if (!cpumask_empty(mask)) { +- spin_unlock_irq(&n->list_lock); ++ raw_spin_unlock_irq(&n->list_lock); + goto free_slab; + } + +@@ -989,7 +989,7 @@ static void cpuup_canceled(long cpu) + alien = n->alien; + n->alien = NULL; + +- spin_unlock_irq(&n->list_lock); ++ raw_spin_unlock_irq(&n->list_lock); + + kfree(shared); + if (alien) { +@@ -1173,7 +1173,7 @@ static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node * + /* + * Do not assume that spinlocks can be initialized via memcpy: + */ +- spin_lock_init(&ptr->list_lock); ++ raw_spin_lock_init(&ptr->list_lock); + + MAKE_ALL_LISTS(cachep, ptr, nodeid); + cachep->node[nodeid] = ptr; +@@ -1344,11 +1344,11 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) + for_each_kmem_cache_node(cachep, node, n) { + unsigned long total_slabs, free_slabs, free_objs; + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + total_slabs = n->total_slabs; + free_slabs = n->free_slabs; + free_objs = n->free_objects; +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + + pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n", + node, total_slabs - free_slabs, total_slabs, +@@ -2105,7 +2105,7 @@ static void check_spinlock_acquired(struct kmem_cache *cachep) + { + #ifdef CONFIG_SMP + check_irq_off(); +- assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); ++ assert_raw_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); + #endif + } + +@@ -2113,7 +2113,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) + { + #ifdef CONFIG_SMP + check_irq_off(); +- assert_spin_locked(&get_node(cachep, node)->list_lock); ++ assert_raw_spin_locked(&get_node(cachep, node)->list_lock); + #endif + } + +@@ -2153,9 +2153,9 @@ static void do_drain(void *arg) + check_irq_off(); + ac = cpu_cache_get(cachep); + n = get_node(cachep, node); +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + free_block(cachep, ac->entry, ac->avail, node, &list); +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + ac->avail = 0; + slabs_destroy(cachep, &list); + } +@@ -2173,9 +2173,9 @@ static void drain_cpu_caches(struct kmem_cache *cachep) + drain_alien_cache(cachep, n->alien); + + for_each_kmem_cache_node(cachep, node, n) { +- spin_lock_irq(&n->list_lock); ++ raw_spin_lock_irq(&n->list_lock); + drain_array_locked(cachep, n->shared, node, true, &list); +- spin_unlock_irq(&n->list_lock); ++ raw_spin_unlock_irq(&n->list_lock); + + slabs_destroy(cachep, &list); + } +@@ -2197,10 +2197,10 @@ static int drain_freelist(struct kmem_cache *cache, + nr_freed = 0; + while (nr_freed < tofree && !list_empty(&n->slabs_free)) { + +- spin_lock_irq(&n->list_lock); ++ raw_spin_lock_irq(&n->list_lock); + p = n->slabs_free.prev; + if (p == &n->slabs_free) { +- spin_unlock_irq(&n->list_lock); ++ raw_spin_unlock_irq(&n->list_lock); + goto out; + } + +@@ -2213,7 +2213,7 @@ static int drain_freelist(struct kmem_cache *cache, + * to the cache. + */ + n->free_objects -= cache->num; +- spin_unlock_irq(&n->list_lock); ++ raw_spin_unlock_irq(&n->list_lock); + slab_destroy(cache, page); + nr_freed++; + } +@@ -2649,7 +2649,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page) + INIT_LIST_HEAD(&page->slab_list); + n = get_node(cachep, page_to_nid(page)); + +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + n->total_slabs++; + if (!page->active) { + list_add_tail(&page->slab_list, &n->slabs_free); +@@ -2659,7 +2659,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page) + + STATS_INC_GROWN(cachep); + n->free_objects += cachep->num - page->active; +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + + fixup_objfreelist_debug(cachep, &list); + } +@@ -2825,7 +2825,7 @@ static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc) + { + struct page *page; + +- assert_spin_locked(&n->list_lock); ++ assert_raw_spin_locked(&n->list_lock); + page = list_first_entry_or_null(&n->slabs_partial, struct page, + slab_list); + if (!page) { +@@ -2852,10 +2852,10 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep, + if (!gfp_pfmemalloc_allowed(flags)) + return NULL; + +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + page = get_first_slab(n, true); + if (!page) { +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + return NULL; + } + +@@ -2864,7 +2864,7 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep, + + fixup_slab_list(cachep, n, page, &list); + +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + fixup_objfreelist_debug(cachep, &list); + + return obj; +@@ -2923,7 +2923,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) + if (!n->free_objects && (!shared || !shared->avail)) + goto direct_grow; + +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + shared = READ_ONCE(n->shared); + + /* See if we can refill from the shared array */ +@@ -2947,7 +2947,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) + must_grow: + n->free_objects -= ac->avail; + alloc_done: +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + fixup_objfreelist_debug(cachep, &list); + + direct_grow: +@@ -3172,7 +3172,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, + BUG_ON(!n); + + check_irq_off(); +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + page = get_first_slab(n, false); + if (!page) + goto must_grow; +@@ -3190,12 +3190,12 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, + + fixup_slab_list(cachep, n, page, &list); + +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + fixup_objfreelist_debug(cachep, &list); + return obj; + + must_grow: +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid); + if (page) { + /* This slab isn't counted yet so don't update free_objects */ +@@ -3373,7 +3373,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) + + check_irq_off(); + n = get_node(cachep, node); +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + if (n->shared) { + struct array_cache *shared_array = n->shared; + int max = shared_array->limit - shared_array->avail; +@@ -3402,7 +3402,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) + STATS_SET_FREEABLE(cachep, i); + } + #endif +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + ac->avail -= batchcount; + memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); + slabs_destroy(cachep, &list); +@@ -3831,9 +3831,9 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, + + node = cpu_to_mem(cpu); + n = get_node(cachep, node); +- spin_lock_irq(&n->list_lock); ++ raw_spin_lock_irq(&n->list_lock); + free_block(cachep, ac->entry, ac->avail, node, &list); +- spin_unlock_irq(&n->list_lock); ++ raw_spin_unlock_irq(&n->list_lock); + slabs_destroy(cachep, &list); + } + free_percpu(prev); +@@ -3928,9 +3928,9 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, + return; + } + +- spin_lock_irq(&n->list_lock); ++ raw_spin_lock_irq(&n->list_lock); + drain_array_locked(cachep, ac, node, false, &list); +- spin_unlock_irq(&n->list_lock); ++ raw_spin_unlock_irq(&n->list_lock); + + slabs_destroy(cachep, &list); + } +@@ -4014,7 +4014,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) + + for_each_kmem_cache_node(cachep, node, n) { + check_irq_on(); +- spin_lock_irq(&n->list_lock); ++ raw_spin_lock_irq(&n->list_lock); + + total_slabs += n->total_slabs; + free_slabs += n->free_slabs; +@@ -4023,7 +4023,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) + if (n->shared) + shared_avail += n->shared->avail; + +- spin_unlock_irq(&n->list_lock); ++ raw_spin_unlock_irq(&n->list_lock); + } + num_objs = total_slabs * cachep->num; + active_slabs = total_slabs - free_slabs; +diff --git a/mm/slab.h b/mm/slab.h +index 6952e10cf33b..ed5dd6e9e0cc 100644 +--- a/mm/slab.h ++++ b/mm/slab.h +@@ -543,7 +543,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, + * The slab lists for all objects. + */ + struct kmem_cache_node { +- spinlock_t list_lock; ++ raw_spinlock_t list_lock; + + #ifdef CONFIG_SLAB + struct list_head slabs_partial; /* partial list first, better asm code */ +diff --git a/mm/slub.c b/mm/slub.c +index b0f637519ac9..863554db3323 100644 +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -1214,7 +1214,7 @@ static noinline int free_debug_processing( + unsigned long flags; + int ret = 0; + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + slab_lock(page); + + if (s->flags & SLAB_CONSISTENCY_CHECKS) { +@@ -1249,7 +1249,7 @@ static noinline int free_debug_processing( + bulk_cnt, cnt); + + slab_unlock(page); +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + if (!ret) + slab_fix(s, "Object at 0x%p not freed", object); + return ret; +@@ -1967,7 +1967,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, + if (!n || !n->nr_partial) + return NULL; + +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + list_for_each_entry_safe(page, page2, &n->partial, slab_list) { + void *t; + +@@ -1992,7 +1992,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, + break; + + } +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + return object; + } + +@@ -2246,7 +2246,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, + * that acquire_slab() will see a slab page that + * is frozen + */ +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + } + } else { + m = M_FULL; +@@ -2258,7 +2258,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, + * slabs from diagnostic functions will not see + * any frozen slabs. + */ +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + } + #endif + } +@@ -2283,7 +2283,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, + goto redo; + + if (lock) +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + + if (m == M_PARTIAL) + stat(s, tail); +@@ -2323,10 +2323,10 @@ static void unfreeze_partials(struct kmem_cache *s, + n2 = get_node(s, page_to_nid(page)); + if (n != n2) { + if (n) +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + + n = n2; +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + } + + do { +@@ -2355,7 +2355,7 @@ static void unfreeze_partials(struct kmem_cache *s, + } + + if (n) +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + + while (discard_page) { + page = discard_page; +@@ -2520,10 +2520,10 @@ static unsigned long count_partial(struct kmem_cache_node *n, + unsigned long x = 0; + struct page *page; + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, slab_list) + x += get_count(page); +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + return x; + } + #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ +@@ -2983,7 +2983,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, + + do { + if (unlikely(n)) { +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + n = NULL; + } + prior = page->freelist; +@@ -3015,7 +3015,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, + * Otherwise the list_lock will synchronize with + * other processors updating the list of slabs. + */ +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + + } + } +@@ -3057,7 +3057,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, + add_partial(n, page, DEACTIVATE_TO_TAIL); + stat(s, FREE_ADD_PARTIAL); + } +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + return; + + slab_empty: +@@ -3072,7 +3072,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, + remove_full(s, n, page); + } + +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + stat(s, FREE_SLAB); + discard_slab(s, page); + } +@@ -3479,7 +3479,7 @@ static void + init_kmem_cache_node(struct kmem_cache_node *n) + { + n->nr_partial = 0; +- spin_lock_init(&n->list_lock); ++ raw_spin_lock_init(&n->list_lock); + INIT_LIST_HEAD(&n->partial); + #ifdef CONFIG_SLUB_DEBUG + atomic_long_set(&n->nr_slabs, 0); +@@ -3874,7 +3874,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) + struct page *page, *h; + + BUG_ON(irqs_disabled()); +- spin_lock_irq(&n->list_lock); ++ raw_spin_lock_irq(&n->list_lock); + list_for_each_entry_safe(page, h, &n->partial, slab_list) { + if (!page->inuse) { + remove_partial(n, page); +@@ -3884,7 +3884,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) + "Objects remaining in %s on __kmem_cache_shutdown()"); + } + } +- spin_unlock_irq(&n->list_lock); ++ raw_spin_unlock_irq(&n->list_lock); + + list_for_each_entry_safe(page, h, &discard, slab_list) + discard_slab(s, page); +@@ -4155,7 +4155,7 @@ int __kmem_cache_shrink(struct kmem_cache *s) + for (i = 0; i < SHRINK_PROMOTE_MAX; i++) + INIT_LIST_HEAD(promote + i); + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + + /* + * Build lists of slabs to discard or promote. +@@ -4186,7 +4186,7 @@ int __kmem_cache_shrink(struct kmem_cache *s) + for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) + list_splice(promote + i, &n->partial); + +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + + /* Release empty slabs */ + list_for_each_entry_safe(page, t, &discard, slab_list) +@@ -4548,7 +4548,7 @@ static int validate_slab_node(struct kmem_cache *s, + struct page *page; + unsigned long flags; + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + + list_for_each_entry(page, &n->partial, slab_list) { + validate_slab(s, page); +@@ -4570,7 +4570,7 @@ static int validate_slab_node(struct kmem_cache *s, + s->name, count, atomic_long_read(&n->nr_slabs)); + + out: +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + return count; + } + +@@ -4749,12 +4749,12 @@ static int list_locations(struct kmem_cache *s, char *buf, + if (!atomic_long_read(&n->nr_slabs)) + continue; + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, slab_list) + process_slab(&t, s, page, alloc); + list_for_each_entry(page, &n->full, slab_list) + process_slab(&t, s, page, alloc); +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + } + + for (i = 0; i < t.count; i++) { +-- +2.43.0 + |