summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0196-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:06:00 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:06:00 +0000
commitb15a952c52a6825376d3e7f6c1bf5c886c6d8b74 (patch)
tree1500f2f8f276908a36d8126cb632c0d6b1276764 /debian/patches-rt/0196-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
parentAdding upstream version 5.10.209. (diff)
downloadlinux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.tar.xz
linux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.zip
Adding debian version 5.10.209-2.debian/5.10.209-2debian
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0196-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch')
-rw-r--r--debian/patches-rt/0196-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch172
1 files changed, 172 insertions, 0 deletions
diff --git a/debian/patches-rt/0196-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/debian/patches-rt/0196-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
new file mode 100644
index 000000000..999298a0e
--- /dev/null
+++ b/debian/patches-rt/0196-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
@@ -0,0 +1,172 @@
+From 1e362e9e053f608c55deb05577371d61d5db1a92 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 28 May 2018 15:24:20 +0200
+Subject: [PATCH 196/323] Split IRQ-off and zone->lock while freeing pages from
+ PCP list #1
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz
+
+Split the IRQ-off section while accessing the PCP list from zone->lock
+while freeing pages.
+Introcude isolate_pcp_pages() which separates the pages from the PCP
+list onto a temporary list and then free the temporary list via
+free_pcppages_bulk().
+
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/page_alloc.c | 81 +++++++++++++++++++++++++++++++------------------
+ 1 file changed, 51 insertions(+), 30 deletions(-)
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 124ab9324610..0dc0eb767fb6 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1331,7 +1331,7 @@ static inline void prefetch_buddy(struct page *page)
+ }
+
+ /*
+- * Frees a number of pages from the PCP lists
++ * Frees a number of pages which have been collected from the pcp lists.
+ * Assumes all pages on list are in same zone, and of same order.
+ * count is the number of pages to free.
+ *
+@@ -1342,14 +1342,40 @@ static inline void prefetch_buddy(struct page *page)
+ * pinned" detection logic.
+ */
+ static void free_pcppages_bulk(struct zone *zone, int count,
+- struct per_cpu_pages *pcp)
++ struct list_head *head)
++{
++ bool isolated_pageblocks;
++ struct page *page, *tmp;
++ unsigned long flags;
++
++ spin_lock_irqsave(&zone->lock, flags);
++ isolated_pageblocks = has_isolate_pageblock(zone);
++
++ /*
++ * Use safe version since after __free_one_page(),
++ * page->lru.next will not point to original list.
++ */
++ list_for_each_entry_safe(page, tmp, head, lru) {
++ int mt = get_pcppage_migratetype(page);
++ /* MIGRATE_ISOLATE page should not go to pcplists */
++ VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
++ /* Pageblock could have been isolated meanwhile */
++ if (unlikely(isolated_pageblocks))
++ mt = get_pageblock_migratetype(page);
++
++ __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE);
++ trace_mm_page_pcpu_drain(page, 0, mt);
++ }
++ spin_unlock_irqrestore(&zone->lock, flags);
++}
++
++static void isolate_pcp_pages(int count, struct per_cpu_pages *pcp,
++ struct list_head *dst)
+ {
+ int migratetype = 0;
+ int batch_free = 0;
+ int prefetch_nr = 0;
+- bool isolated_pageblocks;
+- struct page *page, *tmp;
+- LIST_HEAD(head);
++ struct page *page;
+
+ /*
+ * Ensure proper count is passed which otherwise would stuck in the
+@@ -1386,7 +1412,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
+ if (bulkfree_pcp_prepare(page))
+ continue;
+
+- list_add_tail(&page->lru, &head);
++ list_add_tail(&page->lru, dst);
+
+ /*
+ * We are going to put the page back to the global
+@@ -1401,26 +1427,6 @@ static void free_pcppages_bulk(struct zone *zone, int count,
+ prefetch_buddy(page);
+ } while (--count && --batch_free && !list_empty(list));
+ }
+-
+- spin_lock(&zone->lock);
+- isolated_pageblocks = has_isolate_pageblock(zone);
+-
+- /*
+- * Use safe version since after __free_one_page(),
+- * page->lru.next will not point to original list.
+- */
+- list_for_each_entry_safe(page, tmp, &head, lru) {
+- int mt = get_pcppage_migratetype(page);
+- /* MIGRATE_ISOLATE page should not go to pcplists */
+- VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
+- /* Pageblock could have been isolated meanwhile */
+- if (unlikely(isolated_pageblocks))
+- mt = get_pageblock_migratetype(page);
+-
+- __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE);
+- trace_mm_page_pcpu_drain(page, 0, mt);
+- }
+- spin_unlock(&zone->lock);
+ }
+
+ static void free_one_page(struct zone *zone,
+@@ -2938,13 +2944,18 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
+ {
+ unsigned long flags;
+ int to_drain, batch;
++ LIST_HEAD(dst);
+
+ local_irq_save(flags);
+ batch = READ_ONCE(pcp->batch);
+ to_drain = min(pcp->count, batch);
+ if (to_drain > 0)
+- free_pcppages_bulk(zone, to_drain, pcp);
++ isolate_pcp_pages(to_drain, pcp, &dst);
++
+ local_irq_restore(flags);
++
++ if (to_drain > 0)
++ free_pcppages_bulk(zone, to_drain, &dst);
+ }
+ #endif
+
+@@ -2960,14 +2971,21 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
+ unsigned long flags;
+ struct per_cpu_pageset *pset;
+ struct per_cpu_pages *pcp;
++ LIST_HEAD(dst);
++ int count;
+
+ local_irq_save(flags);
+ pset = per_cpu_ptr(zone->pageset, cpu);
+
+ pcp = &pset->pcp;
+- if (pcp->count)
+- free_pcppages_bulk(zone, pcp->count, pcp);
++ count = pcp->count;
++ if (count)
++ isolate_pcp_pages(count, pcp, &dst);
++
+ local_irq_restore(flags);
++
++ if (count)
++ free_pcppages_bulk(zone, count, &dst);
+ }
+
+ /*
+@@ -3196,7 +3214,10 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn)
+ pcp->count++;
+ if (pcp->count >= pcp->high) {
+ unsigned long batch = READ_ONCE(pcp->batch);
+- free_pcppages_bulk(zone, batch, pcp);
++ LIST_HEAD(dst);
++
++ isolate_pcp_pages(batch, pcp, &dst);
++ free_pcppages_bulk(zone, batch, &dst);
+ }
+ }
+
+--
+2.43.0
+