From: Peter Zijlstra Date: Mon, 28 May 2018 15:24:20 +0200 Subject: [PATCH 072/353] Split IRQ-off and zone->lock while freeing pages from PCP list #1 Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=0073312549bfa62ca583d487d14b2e78056df9c6 Split the IRQ-off section while accessing the PCP list from zone->lock while freeing pages. Introcude isolate_pcp_pages() which separates the pages from the PCP list onto a temporary list and then free the temporary list via free_pcppages_bulk(). Signed-off-by: Peter Zijlstra Signed-off-by: Sebastian Andrzej Siewior --- mm/page_alloc.c | 82 +++++++++++++++++++++++++++++++------------------ 1 file changed, 52 insertions(+), 30 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1cffd4e1fd8f..7b48a742f235 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1096,7 +1096,7 @@ static inline void prefetch_buddy(struct page *page) } /* - * Frees a number of pages from the PCP lists + * Frees a number of pages which have been collected from the pcp lists. * Assumes all pages on list are in same zone, and of same order. * count is the number of pages to free. * @@ -1107,14 +1107,41 @@ static inline void prefetch_buddy(struct page *page) * pinned" detection logic. */ static void free_pcppages_bulk(struct zone *zone, int count, - struct per_cpu_pages *pcp) + struct list_head *head) +{ + bool isolated_pageblocks; + struct page *page, *tmp; + unsigned long flags; + + spin_lock_irqsave(&zone->lock, flags); + isolated_pageblocks = has_isolate_pageblock(zone); + + /* + * Use safe version since after __free_one_page(), + * page->lru.next will not point to original list. + */ + list_for_each_entry_safe(page, tmp, head, lru) { + int mt = get_pcppage_migratetype(page); + /* MIGRATE_ISOLATE page should not go to pcplists */ + VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); + /* Pageblock could have been isolated meanwhile */ + if (unlikely(isolated_pageblocks)) + mt = get_pageblock_migratetype(page); + + __free_one_page(page, page_to_pfn(page), zone, 0, mt); + trace_mm_page_pcpu_drain(page, 0, mt); + } + spin_unlock_irqrestore(&zone->lock, flags); +} + +static void isolate_pcp_pages(int count, struct per_cpu_pages *pcp, + struct list_head *dst) + { int migratetype = 0; int batch_free = 0; int prefetch_nr = 0; - bool isolated_pageblocks; - struct page *page, *tmp; - LIST_HEAD(head); + struct page *page; /* * Ensure proper count is passed which otherwise would stuck in the @@ -1151,7 +1178,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, if (bulkfree_pcp_prepare(page)) continue; - list_add_tail(&page->lru, &head); + list_add_tail(&page->lru, dst); /* * We are going to put the page back to the global @@ -1166,26 +1193,6 @@ static void free_pcppages_bulk(struct zone *zone, int count, prefetch_buddy(page); } while (--count && --batch_free && !list_empty(list)); } - - spin_lock(&zone->lock); - isolated_pageblocks = has_isolate_pageblock(zone); - - /* - * Use safe version since after __free_one_page(), - * page->lru.next will not point to original list. - */ - list_for_each_entry_safe(page, tmp, &head, lru) { - int mt = get_pcppage_migratetype(page); - /* MIGRATE_ISOLATE page should not go to pcplists */ - VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); - /* Pageblock could have been isolated meanwhile */ - if (unlikely(isolated_pageblocks)) - mt = get_pageblock_migratetype(page); - - __free_one_page(page, page_to_pfn(page), zone, 0, mt); - trace_mm_page_pcpu_drain(page, 0, mt); - } - spin_unlock(&zone->lock); } static void free_one_page(struct zone *zone, @@ -2546,13 +2553,18 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) { unsigned long flags; int to_drain, batch; + LIST_HEAD(dst); local_irq_save(flags); batch = READ_ONCE(pcp->batch); to_drain = min(pcp->count, batch); if (to_drain > 0) - free_pcppages_bulk(zone, to_drain, pcp); + isolate_pcp_pages(to_drain, pcp, &dst); + local_irq_restore(flags); + + if (to_drain > 0) + free_pcppages_bulk(zone, to_drain, &dst); } #endif @@ -2568,14 +2580,21 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone) unsigned long flags; struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; + LIST_HEAD(dst); + int count; local_irq_save(flags); pset = per_cpu_ptr(zone->pageset, cpu); pcp = &pset->pcp; - if (pcp->count) - free_pcppages_bulk(zone, pcp->count, pcp); + count = pcp->count; + if (count) + isolate_pcp_pages(count, pcp, &dst); + local_irq_restore(flags); + + if (count) + free_pcppages_bulk(zone, count, &dst); } /* @@ -2797,7 +2816,10 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn) pcp->count++; if (pcp->count >= pcp->high) { unsigned long batch = READ_ONCE(pcp->batch); - free_pcppages_bulk(zone, batch, pcp); + LIST_HEAD(dst); + + isolate_pcp_pages(batch, pcp, &dst); + free_pcppages_bulk(zone, batch, &dst); } }