diff options
Diffstat (limited to '')
-rw-r--r-- | debian/patches-rt/0203-mm-page_alloc-rt-friendly-per-cpu-pages.patch | 197 |
1 files changed, 197 insertions, 0 deletions
diff --git a/debian/patches-rt/0203-mm-page_alloc-rt-friendly-per-cpu-pages.patch b/debian/patches-rt/0203-mm-page_alloc-rt-friendly-per-cpu-pages.patch new file mode 100644 index 000000000..8102e89d2 --- /dev/null +++ b/debian/patches-rt/0203-mm-page_alloc-rt-friendly-per-cpu-pages.patch @@ -0,0 +1,197 @@ +From 9f33cce55eb2594c4602653658103381064572d7 Mon Sep 17 00:00:00 2001 +From: Ingo Molnar <mingo@elte.hu> +Date: Fri, 3 Jul 2009 08:29:37 -0500 +Subject: [PATCH 203/323] mm: page_alloc: rt-friendly per-cpu pages +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz + +rt-friendly per-cpu pages: convert the irqs-off per-cpu locking +method into a preemptible, explicit-per-cpu-locks method. + +Contains fixes from: + Peter Zijlstra <a.p.zijlstra@chello.nl> + Thomas Gleixner <tglx@linutronix.de> + +Signed-off-by: Ingo Molnar <mingo@elte.hu> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +--- + mm/page_alloc.c | 47 ++++++++++++++++++++++++++++------------------- + 1 file changed, 28 insertions(+), 19 deletions(-) + +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 508650843235..c5eb7d6844ae 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -61,6 +61,7 @@ + #include <linux/hugetlb.h> + #include <linux/sched/rt.h> + #include <linux/sched/mm.h> ++#include <linux/local_lock.h> + #include <linux/page_owner.h> + #include <linux/kthread.h> + #include <linux/memcontrol.h> +@@ -386,6 +387,13 @@ EXPORT_SYMBOL(nr_node_ids); + EXPORT_SYMBOL(nr_online_nodes); + #endif + ++struct pa_lock { ++ local_lock_t l; ++}; ++static DEFINE_PER_CPU(struct pa_lock, pa_lock) = { ++ .l = INIT_LOCAL_LOCK(l), ++}; ++ + int page_group_by_mobility_disabled __read_mostly; + + #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT +@@ -1543,11 +1551,11 @@ static void __free_pages_ok(struct page *page, unsigned int order, + return; + + migratetype = get_pfnblock_migratetype(page, pfn); +- local_irq_save(flags); ++ local_lock_irqsave(&pa_lock.l, flags); + __count_vm_events(PGFREE, 1 << order); + free_one_page(page_zone(page), page, pfn, order, migratetype, + fpi_flags); +- local_irq_restore(flags); ++ local_unlock_irqrestore(&pa_lock.l, flags); + } + + void __free_pages_core(struct page *page, unsigned int order) +@@ -2961,13 +2969,13 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) + int to_drain, batch; + LIST_HEAD(dst); + +- local_irq_save(flags); ++ local_lock_irqsave(&pa_lock.l, flags); + batch = READ_ONCE(pcp->batch); + to_drain = min(pcp->count, batch); + if (to_drain > 0) + isolate_pcp_pages(to_drain, pcp, &dst); + +- local_irq_restore(flags); ++ local_unlock_irqrestore(&pa_lock.l, flags); + + if (to_drain > 0) + free_pcppages_bulk(zone, &dst, false); +@@ -2989,7 +2997,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone) + LIST_HEAD(dst); + int count; + +- local_irq_save(flags); ++ local_lock_irqsave(&pa_lock.l, flags); + pset = per_cpu_ptr(zone->pageset, cpu); + + pcp = &pset->pcp; +@@ -2997,7 +3005,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone) + if (count) + isolate_pcp_pages(count, pcp, &dst); + +- local_irq_restore(flags); ++ local_unlock_irqrestore(&pa_lock.l, flags); + + if (count) + free_pcppages_bulk(zone, &dst, false); +@@ -3248,9 +3256,9 @@ void free_unref_page(struct page *page) + if (!free_unref_page_prepare(page, pfn)) + return; + +- local_irq_save(flags); ++ local_lock_irqsave(&pa_lock.l, flags); + free_unref_page_commit(page, pfn, &dst); +- local_irq_restore(flags); ++ local_unlock_irqrestore(&pa_lock.l, flags); + if (!list_empty(&dst)) + free_pcppages_bulk(zone, &dst, false); + } +@@ -3277,7 +3285,7 @@ void free_unref_page_list(struct list_head *list) + set_page_private(page, pfn); + } + +- local_irq_save(flags); ++ local_lock_irqsave(&pa_lock.l, flags); + list_for_each_entry_safe(page, next, list, lru) { + unsigned long pfn = page_private(page); + enum zone_type type; +@@ -3292,12 +3300,12 @@ void free_unref_page_list(struct list_head *list) + * a large list of pages to free. + */ + if (++batch_count == SWAP_CLUSTER_MAX) { +- local_irq_restore(flags); ++ local_unlock_irqrestore(&pa_lock.l, flags); + batch_count = 0; +- local_irq_save(flags); ++ local_lock_irqsave(&pa_lock.l, flags); + } + } +- local_irq_restore(flags); ++ local_unlock_irqrestore(&pa_lock.l, flags); + + for (i = 0; i < __MAX_NR_ZONES; ) { + struct page *page; +@@ -3468,7 +3476,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, + struct page *page; + unsigned long flags; + +- local_irq_save(flags); ++ local_lock_irqsave(&pa_lock.l, flags); + pcp = &this_cpu_ptr(zone->pageset)->pcp; + list = &pcp->lists[migratetype]; + page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list); +@@ -3476,7 +3484,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, + __count_zid_vm_events(PGALLOC, page_zonenum(page), 1); + zone_statistics(preferred_zone, zone); + } +- local_irq_restore(flags); ++ local_unlock_irqrestore(&pa_lock.l, flags); + return page; + } + +@@ -3510,7 +3518,8 @@ struct page *rmqueue(struct zone *preferred_zone, + * allocate greater than order-1 page units with __GFP_NOFAIL. + */ + WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); +- spin_lock_irqsave(&zone->lock, flags); ++ local_lock_irqsave(&pa_lock.l, flags); ++ spin_lock(&zone->lock); + + do { + page = NULL; +@@ -3536,7 +3545,7 @@ struct page *rmqueue(struct zone *preferred_zone, + + __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); + zone_statistics(preferred_zone, zone); +- local_irq_restore(flags); ++ local_unlock_irqrestore(&pa_lock.l, flags); + + out: + /* Separate test+clear to avoid unnecessary atomics */ +@@ -3549,7 +3558,7 @@ struct page *rmqueue(struct zone *preferred_zone, + return page; + + failed: +- local_irq_restore(flags); ++ local_unlock_irqrestore(&pa_lock.l, flags); + return NULL; + } + +@@ -8892,7 +8901,7 @@ void zone_pcp_reset(struct zone *zone) + struct per_cpu_pageset *pset; + + /* avoid races with drain_pages() */ +- local_irq_save(flags); ++ local_lock_irqsave(&pa_lock.l, flags); + if (zone->pageset != &boot_pageset) { + for_each_online_cpu(cpu) { + pset = per_cpu_ptr(zone->pageset, cpu); +@@ -8901,7 +8910,7 @@ void zone_pcp_reset(struct zone *zone) + free_percpu(zone->pageset); + zone->pageset = &boot_pageset; + } +- local_irq_restore(flags); ++ local_unlock_irqrestore(&pa_lock.l, flags); + } + + #ifdef CONFIG_MEMORY_HOTREMOVE +-- +2.43.0 + |