diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:38 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:38 +0000 |
commit | 08b74a000942a380fe028845f92cd3a0dee827d5 (patch) | |
tree | aa78b4e12607c3e1fcce8d5cc42df4330792f118 /debian/patches-rt/0077-mm-swap-Convert-to-percpu-locked.patch | |
parent | Adding upstream version 4.19.249. (diff) | |
download | linux-08b74a000942a380fe028845f92cd3a0dee827d5.tar.xz linux-08b74a000942a380fe028845f92cd3a0dee827d5.zip |
Adding debian version 4.19.249-2.debian/4.19.249-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r-- | debian/patches-rt/0077-mm-swap-Convert-to-percpu-locked.patch | 211 |
1 files changed, 211 insertions, 0 deletions
diff --git a/debian/patches-rt/0077-mm-swap-Convert-to-percpu-locked.patch b/debian/patches-rt/0077-mm-swap-Convert-to-percpu-locked.patch new file mode 100644 index 000000000..a2925871b --- /dev/null +++ b/debian/patches-rt/0077-mm-swap-Convert-to-percpu-locked.patch @@ -0,0 +1,211 @@ +From cba209957a582e2758768ae3c7e5be17d11fc94a Mon Sep 17 00:00:00 2001 +From: Ingo Molnar <mingo@elte.hu> +Date: Fri, 3 Jul 2009 08:29:51 -0500 +Subject: [PATCH 077/347] mm/swap: Convert to percpu locked +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz + +Replace global locks (get_cpu + local_irq_save) with "local_locks()". +Currently there is one of for "rotate" and one for "swap". + +Signed-off-by: Ingo Molnar <mingo@elte.hu> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +--- + include/linux/swap.h | 2 ++ + mm/compaction.c | 6 ++++-- + mm/page_alloc.c | 3 ++- + mm/swap.c | 38 ++++++++++++++++++++++---------------- + 4 files changed, 30 insertions(+), 19 deletions(-) + +diff --git a/include/linux/swap.h b/include/linux/swap.h +index ee8f9f554a9e..2ad000e362bd 100644 +--- a/include/linux/swap.h ++++ b/include/linux/swap.h +@@ -12,6 +12,7 @@ + #include <linux/fs.h> + #include <linux/atomic.h> + #include <linux/page-flags.h> ++#include <linux/locallock.h> + #include <asm/page.h> + + struct notifier_block; +@@ -331,6 +332,7 @@ extern unsigned long nr_free_pagecache_pages(void); + + + /* linux/mm/swap.c */ ++DECLARE_LOCAL_IRQ_LOCK(swapvec_lock); + extern void lru_cache_add(struct page *); + extern void lru_cache_add_anon(struct page *page); + extern void lru_cache_add_file(struct page *page); +diff --git a/mm/compaction.c b/mm/compaction.c +index 5079ddbec8f9..c40d3a13cbbd 100644 +--- a/mm/compaction.c ++++ b/mm/compaction.c +@@ -1668,10 +1668,12 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro + block_start_pfn(cc->migrate_pfn, cc->order); + + if (cc->last_migrated_pfn < current_block_start) { +- cpu = get_cpu(); ++ cpu = get_cpu_light(); ++ local_lock_irq(swapvec_lock); + lru_add_drain_cpu(cpu); ++ local_unlock_irq(swapvec_lock); + drain_local_pages(zone); +- put_cpu(); ++ put_cpu_light(); + /* No more flushing until we migrate again */ + cc->last_migrated_pfn = 0; + } +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 59ea701ecfe0..11c08cfd6dfb 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -7257,8 +7257,9 @@ void __init free_area_init(unsigned long *zones_size) + + static int page_alloc_cpu_dead(unsigned int cpu) + { +- ++ local_lock_irq_on(swapvec_lock, cpu); + lru_add_drain_cpu(cpu); ++ local_unlock_irq_on(swapvec_lock, cpu); + drain_pages(cpu); + + /* +diff --git a/mm/swap.c b/mm/swap.c +index 45fdbfb6b2a6..92f994b962f0 100644 +--- a/mm/swap.c ++++ b/mm/swap.c +@@ -33,6 +33,7 @@ + #include <linux/memcontrol.h> + #include <linux/gfp.h> + #include <linux/uio.h> ++#include <linux/locallock.h> + #include <linux/hugetlb.h> + #include <linux/page_idle.h> + +@@ -51,6 +52,8 @@ static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs); + #ifdef CONFIG_SMP + static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); + #endif ++static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); ++DEFINE_LOCAL_IRQ_LOCK(swapvec_lock); + + /* + * This path almost never happens for VM activity - pages are normally +@@ -253,11 +256,11 @@ void rotate_reclaimable_page(struct page *page) + unsigned long flags; + + get_page(page); +- local_irq_save(flags); ++ local_lock_irqsave(rotate_lock, flags); + pvec = this_cpu_ptr(&lru_rotate_pvecs); + if (!pagevec_add(pvec, page) || PageCompound(page)) + pagevec_move_tail(pvec); +- local_irq_restore(flags); ++ local_unlock_irqrestore(rotate_lock, flags); + } + } + +@@ -307,12 +310,13 @@ void activate_page(struct page *page) + { + page = compound_head(page); + if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { +- struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, ++ activate_page_pvecs); + + get_page(page); + if (!pagevec_add(pvec, page) || PageCompound(page)) + pagevec_lru_move_fn(pvec, __activate_page, NULL); +- put_cpu_var(activate_page_pvecs); ++ put_locked_var(swapvec_lock, activate_page_pvecs); + } + } + +@@ -334,7 +338,7 @@ void activate_page(struct page *page) + + static void __lru_cache_activate_page(struct page *page) + { +- struct pagevec *pvec = &get_cpu_var(lru_add_pvec); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); + int i; + + /* +@@ -356,7 +360,7 @@ static void __lru_cache_activate_page(struct page *page) + } + } + +- put_cpu_var(lru_add_pvec); ++ put_locked_var(swapvec_lock, lru_add_pvec); + } + + /* +@@ -398,12 +402,12 @@ EXPORT_SYMBOL(mark_page_accessed); + + static void __lru_cache_add(struct page *page) + { +- struct pagevec *pvec = &get_cpu_var(lru_add_pvec); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); + + get_page(page); + if (!pagevec_add(pvec, page) || PageCompound(page)) + __pagevec_lru_add(pvec); +- put_cpu_var(lru_add_pvec); ++ put_locked_var(swapvec_lock, lru_add_pvec); + } + + /** +@@ -581,9 +585,9 @@ void lru_add_drain_cpu(int cpu) + unsigned long flags; + + /* No harm done if a racing interrupt already did this */ +- local_irq_save(flags); ++ local_lock_irqsave(rotate_lock, flags); + pagevec_move_tail(pvec); +- local_irq_restore(flags); ++ local_unlock_irqrestore(rotate_lock, flags); + } + + pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); +@@ -615,11 +619,12 @@ void deactivate_file_page(struct page *page) + return; + + if (likely(get_page_unless_zero(page))) { +- struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, ++ lru_deactivate_file_pvecs); + + if (!pagevec_add(pvec, page) || PageCompound(page)) + pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); +- put_cpu_var(lru_deactivate_file_pvecs); ++ put_locked_var(swapvec_lock, lru_deactivate_file_pvecs); + } + } + +@@ -634,19 +639,20 @@ void mark_page_lazyfree(struct page *page) + { + if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && + !PageSwapCache(page) && !PageUnevictable(page)) { +- struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, ++ lru_lazyfree_pvecs); + + get_page(page); + if (!pagevec_add(pvec, page) || PageCompound(page)) + pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); +- put_cpu_var(lru_lazyfree_pvecs); ++ put_locked_var(swapvec_lock, lru_lazyfree_pvecs); + } + } + + void lru_add_drain(void) + { +- lru_add_drain_cpu(get_cpu()); +- put_cpu(); ++ lru_add_drain_cpu(local_lock_cpu(swapvec_lock)); ++ local_unlock_cpu(swapvec_lock); + } + + #ifdef CONFIG_SMP +-- +2.36.1 + |