diff options
Diffstat (limited to 'debian/patches-rt/0076-mm-page_alloc-rt-friendly-per-cpu-pages.patch')
-rw-r--r-- | debian/patches-rt/0076-mm-page_alloc-rt-friendly-per-cpu-pages.patch | 239 |
1 files changed, 239 insertions, 0 deletions
diff --git a/debian/patches-rt/0076-mm-page_alloc-rt-friendly-per-cpu-pages.patch b/debian/patches-rt/0076-mm-page_alloc-rt-friendly-per-cpu-pages.patch new file mode 100644 index 000000000..819fc71a0 --- /dev/null +++ b/debian/patches-rt/0076-mm-page_alloc-rt-friendly-per-cpu-pages.patch @@ -0,0 +1,239 @@ +From 85f14b575e2fadf9ab80ef7b3e5158833d79939e Mon Sep 17 00:00:00 2001 +From: Ingo Molnar <mingo@elte.hu> +Date: Fri, 3 Jul 2009 08:29:37 -0500 +Subject: [PATCH 076/347] mm: page_alloc: rt-friendly per-cpu pages +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz + +rt-friendly per-cpu pages: convert the irqs-off per-cpu locking +method into a preemptible, explicit-per-cpu-locks method. + +Contains fixes from: + Peter Zijlstra <a.p.zijlstra@chello.nl> + Thomas Gleixner <tglx@linutronix.de> + +Signed-off-by: Ingo Molnar <mingo@elte.hu> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +--- + mm/page_alloc.c | 63 +++++++++++++++++++++++++++++++++---------------- + 1 file changed, 43 insertions(+), 20 deletions(-) + +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index ba6f9b334073..59ea701ecfe0 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -60,6 +60,7 @@ + #include <linux/hugetlb.h> + #include <linux/sched/rt.h> + #include <linux/sched/mm.h> ++#include <linux/locallock.h> + #include <linux/page_owner.h> + #include <linux/kthread.h> + #include <linux/memcontrol.h> +@@ -292,6 +293,18 @@ EXPORT_SYMBOL(nr_node_ids); + EXPORT_SYMBOL(nr_online_nodes); + #endif + ++static DEFINE_LOCAL_IRQ_LOCK(pa_lock); ++ ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define cpu_lock_irqsave(cpu, flags) \ ++ local_lock_irqsave_on(pa_lock, flags, cpu) ++# define cpu_unlock_irqrestore(cpu, flags) \ ++ local_unlock_irqrestore_on(pa_lock, flags, cpu) ++#else ++# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags) ++# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags) ++#endif ++ + int page_group_by_mobility_disabled __read_mostly; + + #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT +@@ -1302,10 +1315,10 @@ static void __free_pages_ok(struct page *page, unsigned int order) + return; + + migratetype = get_pfnblock_migratetype(page, pfn); +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + __count_vm_events(PGFREE, 1 << order); + free_one_page(page_zone(page), page, pfn, order, migratetype); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + } + + static void __init __free_pages_boot_core(struct page *page, unsigned int order) +@@ -2570,13 +2583,13 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) + int to_drain, batch; + LIST_HEAD(dst); + +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + batch = READ_ONCE(pcp->batch); + to_drain = min(pcp->count, batch); + if (to_drain > 0) + isolate_pcp_pages(to_drain, pcp, &dst); + +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + + if (to_drain > 0) + free_pcppages_bulk(zone, &dst, false); +@@ -2598,7 +2611,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone) + LIST_HEAD(dst); + int count; + +- local_irq_save(flags); ++ cpu_lock_irqsave(cpu, flags); + pset = per_cpu_ptr(zone->pageset, cpu); + + pcp = &pset->pcp; +@@ -2606,7 +2619,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone) + if (count) + isolate_pcp_pages(count, pcp, &dst); + +- local_irq_restore(flags); ++ cpu_unlock_irqrestore(cpu, flags); + + if (count) + free_pcppages_bulk(zone, &dst, false); +@@ -2644,6 +2657,7 @@ void drain_local_pages(struct zone *zone) + drain_pages(cpu); + } + ++#ifndef CONFIG_PREEMPT_RT_BASE + static void drain_local_pages_wq(struct work_struct *work) + { + /* +@@ -2657,6 +2671,7 @@ static void drain_local_pages_wq(struct work_struct *work) + drain_local_pages(NULL); + preempt_enable(); + } ++#endif + + /* + * Spill all the per-cpu pages from all CPUs back into the buddy allocator. +@@ -2723,7 +2738,14 @@ void drain_all_pages(struct zone *zone) + else + cpumask_clear_cpu(cpu, &cpus_with_pcps); + } +- ++#ifdef CONFIG_PREEMPT_RT_BASE ++ for_each_cpu(cpu, &cpus_with_pcps) { ++ if (zone) ++ drain_pages_zone(cpu, zone); ++ else ++ drain_pages(cpu); ++ } ++#else + for_each_cpu(cpu, &cpus_with_pcps) { + struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu); + INIT_WORK(work, drain_local_pages_wq); +@@ -2731,6 +2753,7 @@ void drain_all_pages(struct zone *zone) + } + for_each_cpu(cpu, &cpus_with_pcps) + flush_work(per_cpu_ptr(&pcpu_drain, cpu)); ++#endif + + mutex_unlock(&pcpu_drain_mutex); + } +@@ -2850,9 +2873,9 @@ void free_unref_page(struct page *page) + if (!free_unref_page_prepare(page, pfn)) + return; + +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + free_unref_page_commit(page, pfn, &dst); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + if (!list_empty(&dst)) + free_pcppages_bulk(zone, &dst, false); + } +@@ -2879,7 +2902,7 @@ void free_unref_page_list(struct list_head *list) + set_page_private(page, pfn); + } + +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + list_for_each_entry_safe(page, next, list, lru) { + unsigned long pfn = page_private(page); + enum zone_type type; +@@ -2894,12 +2917,12 @@ void free_unref_page_list(struct list_head *list) + * a large list of pages to free. + */ + if (++batch_count == SWAP_CLUSTER_MAX) { +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + batch_count = 0; +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + } + } +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + + for (i = 0; i < __MAX_NR_ZONES; ) { + struct page *page; +@@ -3048,7 +3071,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, + struct page *page; + unsigned long flags; + +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + pcp = &this_cpu_ptr(zone->pageset)->pcp; + list = &pcp->lists[migratetype]; + page = __rmqueue_pcplist(zone, migratetype, pcp, list); +@@ -3056,7 +3079,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, + __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); + zone_statistics(preferred_zone, zone); + } +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + return page; + } + +@@ -3083,7 +3106,7 @@ struct page *rmqueue(struct zone *preferred_zone, + * allocate greater than order-1 page units with __GFP_NOFAIL. + */ + WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); +- spin_lock_irqsave(&zone->lock, flags); ++ local_spin_lock_irqsave(pa_lock, &zone->lock, flags); + + do { + page = NULL; +@@ -3103,14 +3126,14 @@ struct page *rmqueue(struct zone *preferred_zone, + + __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); + zone_statistics(preferred_zone, zone); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + + out: + VM_BUG_ON_PAGE(page && bad_range(zone, page), page); + return page; + + failed: +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + return NULL; + } + +@@ -8146,7 +8169,7 @@ void zone_pcp_reset(struct zone *zone) + struct per_cpu_pageset *pset; + + /* avoid races with drain_pages() */ +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + if (zone->pageset != &boot_pageset) { + for_each_online_cpu(cpu) { + pset = per_cpu_ptr(zone->pageset, cpu); +@@ -8155,7 +8178,7 @@ void zone_pcp_reset(struct zone *zone) + free_percpu(zone->pageset); + zone->pageset = &boot_pageset; + } +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + } + + #ifdef CONFIG_MEMORY_HOTREMOVE +-- +2.36.1 + |