summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0072-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:38 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:38 +0000
commit08b74a000942a380fe028845f92cd3a0dee827d5 (patch)
treeaa78b4e12607c3e1fcce8d5cc42df4330792f118 /debian/patches-rt/0072-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
parentAdding upstream version 4.19.249. (diff)
downloadlinux-08b74a000942a380fe028845f92cd3a0dee827d5.tar.xz
linux-08b74a000942a380fe028845f92cd3a0dee827d5.zip
Adding debian version 4.19.249-2.debian/4.19.249-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0072-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch')
-rw-r--r--debian/patches-rt/0072-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch173
1 files changed, 173 insertions, 0 deletions
diff --git a/debian/patches-rt/0072-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/debian/patches-rt/0072-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
new file mode 100644
index 000000000..deadce9a4
--- /dev/null
+++ b/debian/patches-rt/0072-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
@@ -0,0 +1,173 @@
+From a2351869ba46346be8b541abd8f1ca7a81df2537 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 28 May 2018 15:24:20 +0200
+Subject: [PATCH 072/347] Split IRQ-off and zone->lock while freeing pages from
+ PCP list #1
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
+
+Split the IRQ-off section while accessing the PCP list from zone->lock
+while freeing pages.
+Introcude isolate_pcp_pages() which separates the pages from the PCP
+list onto a temporary list and then free the temporary list via
+free_pcppages_bulk().
+
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/page_alloc.c | 82 +++++++++++++++++++++++++++++++------------------
+ 1 file changed, 52 insertions(+), 30 deletions(-)
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 9c35403d9646..6763dfc2dde0 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1096,7 +1096,7 @@ static inline void prefetch_buddy(struct page *page)
+ }
+
+ /*
+- * Frees a number of pages from the PCP lists
++ * Frees a number of pages which have been collected from the pcp lists.
+ * Assumes all pages on list are in same zone, and of same order.
+ * count is the number of pages to free.
+ *
+@@ -1107,14 +1107,41 @@ static inline void prefetch_buddy(struct page *page)
+ * pinned" detection logic.
+ */
+ static void free_pcppages_bulk(struct zone *zone, int count,
+- struct per_cpu_pages *pcp)
++ struct list_head *head)
++{
++ bool isolated_pageblocks;
++ struct page *page, *tmp;
++ unsigned long flags;
++
++ spin_lock_irqsave(&zone->lock, flags);
++ isolated_pageblocks = has_isolate_pageblock(zone);
++
++ /*
++ * Use safe version since after __free_one_page(),
++ * page->lru.next will not point to original list.
++ */
++ list_for_each_entry_safe(page, tmp, head, lru) {
++ int mt = get_pcppage_migratetype(page);
++ /* MIGRATE_ISOLATE page should not go to pcplists */
++ VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
++ /* Pageblock could have been isolated meanwhile */
++ if (unlikely(isolated_pageblocks))
++ mt = get_pageblock_migratetype(page);
++
++ __free_one_page(page, page_to_pfn(page), zone, 0, mt);
++ trace_mm_page_pcpu_drain(page, 0, mt);
++ }
++ spin_unlock_irqrestore(&zone->lock, flags);
++}
++
++static void isolate_pcp_pages(int count, struct per_cpu_pages *pcp,
++ struct list_head *dst)
++
+ {
+ int migratetype = 0;
+ int batch_free = 0;
+ int prefetch_nr = 0;
+- bool isolated_pageblocks;
+- struct page *page, *tmp;
+- LIST_HEAD(head);
++ struct page *page;
+
+ /*
+ * Ensure proper count is passed which otherwise would stuck in the
+@@ -1151,7 +1178,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
+ if (bulkfree_pcp_prepare(page))
+ continue;
+
+- list_add_tail(&page->lru, &head);
++ list_add_tail(&page->lru, dst);
+
+ /*
+ * We are going to put the page back to the global
+@@ -1166,26 +1193,6 @@ static void free_pcppages_bulk(struct zone *zone, int count,
+ prefetch_buddy(page);
+ } while (--count && --batch_free && !list_empty(list));
+ }
+-
+- spin_lock(&zone->lock);
+- isolated_pageblocks = has_isolate_pageblock(zone);
+-
+- /*
+- * Use safe version since after __free_one_page(),
+- * page->lru.next will not point to original list.
+- */
+- list_for_each_entry_safe(page, tmp, &head, lru) {
+- int mt = get_pcppage_migratetype(page);
+- /* MIGRATE_ISOLATE page should not go to pcplists */
+- VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
+- /* Pageblock could have been isolated meanwhile */
+- if (unlikely(isolated_pageblocks))
+- mt = get_pageblock_migratetype(page);
+-
+- __free_one_page(page, page_to_pfn(page), zone, 0, mt);
+- trace_mm_page_pcpu_drain(page, 0, mt);
+- }
+- spin_unlock(&zone->lock);
+ }
+
+ static void free_one_page(struct zone *zone,
+@@ -2546,13 +2553,18 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
+ {
+ unsigned long flags;
+ int to_drain, batch;
++ LIST_HEAD(dst);
+
+ local_irq_save(flags);
+ batch = READ_ONCE(pcp->batch);
+ to_drain = min(pcp->count, batch);
+ if (to_drain > 0)
+- free_pcppages_bulk(zone, to_drain, pcp);
++ isolate_pcp_pages(to_drain, pcp, &dst);
++
+ local_irq_restore(flags);
++
++ if (to_drain > 0)
++ free_pcppages_bulk(zone, to_drain, &dst);
+ }
+ #endif
+
+@@ -2568,14 +2580,21 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
+ unsigned long flags;
+ struct per_cpu_pageset *pset;
+ struct per_cpu_pages *pcp;
++ LIST_HEAD(dst);
++ int count;
+
+ local_irq_save(flags);
+ pset = per_cpu_ptr(zone->pageset, cpu);
+
+ pcp = &pset->pcp;
+- if (pcp->count)
+- free_pcppages_bulk(zone, pcp->count, pcp);
++ count = pcp->count;
++ if (count)
++ isolate_pcp_pages(count, pcp, &dst);
++
+ local_irq_restore(flags);
++
++ if (count)
++ free_pcppages_bulk(zone, count, &dst);
+ }
+
+ /*
+@@ -2797,7 +2816,10 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn)
+ pcp->count++;
+ if (pcp->count >= pcp->high) {
+ unsigned long batch = READ_ONCE(pcp->batch);
+- free_pcppages_bulk(zone, batch, pcp);
++ LIST_HEAD(dst);
++
++ isolate_pcp_pages(batch, pcp, &dst);
++ free_pcppages_bulk(zone, batch, &dst);
+ }
+ }
+
+--
+2.36.1
+