From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:51 -0500 Subject: [PATCH 077/353] mm/swap: Convert to percpu locked Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=f11cbef82fab199491592055c6bd404867e5d463 Replace global locks (get_cpu + local_irq_save) with "local_locks()". Currently there is one of for "rotate" and one for "swap". Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- include/linux/swap.h | 2 ++ mm/compaction.c | 6 ++++-- mm/page_alloc.c | 3 ++- mm/swap.c | 38 ++++++++++++++++++++++---------------- 4 files changed, 30 insertions(+), 19 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index ee8f9f554a9e..2ad000e362bd 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -12,6 +12,7 @@ #include #include #include +#include #include struct notifier_block; @@ -331,6 +332,7 @@ extern unsigned long nr_free_pagecache_pages(void); /* linux/mm/swap.c */ +DECLARE_LOCAL_IRQ_LOCK(swapvec_lock); extern void lru_cache_add(struct page *); extern void lru_cache_add_anon(struct page *page); extern void lru_cache_add_file(struct page *page); diff --git a/mm/compaction.c b/mm/compaction.c index 5079ddbec8f9..c40d3a13cbbd 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1668,10 +1668,12 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro block_start_pfn(cc->migrate_pfn, cc->order); if (cc->last_migrated_pfn < current_block_start) { - cpu = get_cpu(); + cpu = get_cpu_light(); + local_lock_irq(swapvec_lock); lru_add_drain_cpu(cpu); + local_unlock_irq(swapvec_lock); drain_local_pages(zone); - put_cpu(); + put_cpu_light(); /* No more flushing until we migrate again */ cc->last_migrated_pfn = 0; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 51f71e7740b2..a34a071048d2 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -7318,8 +7318,9 @@ void __init free_area_init(unsigned long *zones_size) static int page_alloc_cpu_dead(unsigned int cpu) { - + local_lock_irq_on(swapvec_lock, cpu); lru_add_drain_cpu(cpu); + local_unlock_irq_on(swapvec_lock, cpu); drain_pages(cpu); /* diff --git a/mm/swap.c b/mm/swap.c index 45fdbfb6b2a6..92f994b962f0 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -51,6 +52,8 @@ static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs); #ifdef CONFIG_SMP static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); #endif +static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); +DEFINE_LOCAL_IRQ_LOCK(swapvec_lock); /* * This path almost never happens for VM activity - pages are normally @@ -253,11 +256,11 @@ void rotate_reclaimable_page(struct page *page) unsigned long flags; get_page(page); - local_irq_save(flags); + local_lock_irqsave(rotate_lock, flags); pvec = this_cpu_ptr(&lru_rotate_pvecs); if (!pagevec_add(pvec, page) || PageCompound(page)) pagevec_move_tail(pvec); - local_irq_restore(flags); + local_unlock_irqrestore(rotate_lock, flags); } } @@ -307,12 +310,13 @@ void activate_page(struct page *page) { page = compound_head(page); if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { - struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); + struct pagevec *pvec = &get_locked_var(swapvec_lock, + activate_page_pvecs); get_page(page); if (!pagevec_add(pvec, page) || PageCompound(page)) pagevec_lru_move_fn(pvec, __activate_page, NULL); - put_cpu_var(activate_page_pvecs); + put_locked_var(swapvec_lock, activate_page_pvecs); } } @@ -334,7 +338,7 @@ void activate_page(struct page *page) static void __lru_cache_activate_page(struct page *page) { - struct pagevec *pvec = &get_cpu_var(lru_add_pvec); + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); int i; /* @@ -356,7 +360,7 @@ static void __lru_cache_activate_page(struct page *page) } } - put_cpu_var(lru_add_pvec); + put_locked_var(swapvec_lock, lru_add_pvec); } /* @@ -398,12 +402,12 @@ EXPORT_SYMBOL(mark_page_accessed); static void __lru_cache_add(struct page *page) { - struct pagevec *pvec = &get_cpu_var(lru_add_pvec); + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); get_page(page); if (!pagevec_add(pvec, page) || PageCompound(page)) __pagevec_lru_add(pvec); - put_cpu_var(lru_add_pvec); + put_locked_var(swapvec_lock, lru_add_pvec); } /** @@ -581,9 +585,9 @@ void lru_add_drain_cpu(int cpu) unsigned long flags; /* No harm done if a racing interrupt already did this */ - local_irq_save(flags); + local_lock_irqsave(rotate_lock, flags); pagevec_move_tail(pvec); - local_irq_restore(flags); + local_unlock_irqrestore(rotate_lock, flags); } pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); @@ -615,11 +619,12 @@ void deactivate_file_page(struct page *page) return; if (likely(get_page_unless_zero(page))) { - struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); + struct pagevec *pvec = &get_locked_var(swapvec_lock, + lru_deactivate_file_pvecs); if (!pagevec_add(pvec, page) || PageCompound(page)) pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); - put_cpu_var(lru_deactivate_file_pvecs); + put_locked_var(swapvec_lock, lru_deactivate_file_pvecs); } } @@ -634,19 +639,20 @@ void mark_page_lazyfree(struct page *page) { if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && !PageSwapCache(page) && !PageUnevictable(page)) { - struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs); + struct pagevec *pvec = &get_locked_var(swapvec_lock, + lru_lazyfree_pvecs); get_page(page); if (!pagevec_add(pvec, page) || PageCompound(page)) pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); - put_cpu_var(lru_lazyfree_pvecs); + put_locked_var(swapvec_lock, lru_lazyfree_pvecs); } } void lru_add_drain(void) { - lru_add_drain_cpu(get_cpu()); - put_cpu(); + lru_add_drain_cpu(local_lock_cpu(swapvec_lock)); + local_unlock_cpu(swapvec_lock); } #ifdef CONFIG_SMP