summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c296
1 files changed, 202 insertions, 94 deletions
diff --git a/mm/memory.c b/mm/memory.c
index ec7b775fb4..296e4f05e8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -123,9 +123,7 @@ static bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
/*
* A number of key systems in x86 including ioremap() rely on the assumption
* that high_memory defines the upper bound on direct map memory, then end
- * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
- * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
- * and ZONE_HIGHMEM.
+ * of ZONE_NORMAL.
*/
void *high_memory;
EXPORT_SYMBOL(high_memory);
@@ -374,6 +372,8 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
* be 0. This will underflow and is okay.
*/
next = mas_find(mas, ceiling - 1);
+ if (unlikely(xa_is_zero(next)))
+ next = NULL;
/*
* Hide vma from rmap and truncate_pagecache before freeing
@@ -395,6 +395,8 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
&& !is_vm_hugetlb_page(next)) {
vma = next;
next = mas_find(mas, ceiling - 1);
+ if (unlikely(xa_is_zero(next)))
+ next = NULL;
if (mm_wr_locked)
vma_start_write(vma);
unlink_anon_vmas(vma);
@@ -706,6 +708,7 @@ static void restore_exclusive_pte(struct vm_area_struct *vma,
struct page *page, unsigned long address,
pte_t *ptep)
{
+ struct folio *folio = page_folio(page);
pte_t orig_pte;
pte_t pte;
swp_entry_t entry;
@@ -721,14 +724,15 @@ static void restore_exclusive_pte(struct vm_area_struct *vma,
else if (is_writable_device_exclusive_entry(entry))
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
- VM_BUG_ON(pte_write(pte) && !(PageAnon(page) && PageAnonExclusive(page)));
+ VM_BUG_ON_FOLIO(pte_write(pte) && (!folio_test_anon(folio) &&
+ PageAnonExclusive(page)), folio);
/*
* No need to take a page reference as one was already
* created when the swap entry was made.
*/
- if (PageAnon(page))
- page_add_anon_rmap(page, vma, address, RMAP_NONE);
+ if (folio_test_anon(folio))
+ folio_add_anon_rmap_pte(folio, page, vma, address, RMAP_NONE);
else
/*
* Currently device exclusive access only supports anonymous
@@ -779,6 +783,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
unsigned long vm_flags = dst_vma->vm_flags;
pte_t orig_pte = ptep_get(src_pte);
pte_t pte = orig_pte;
+ struct folio *folio;
struct page *page;
swp_entry_t entry = pte_to_swp_entry(orig_pte);
@@ -823,6 +828,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
}
} else if (is_device_private_entry(entry)) {
page = pfn_swap_entry_to_page(entry);
+ folio = page_folio(page);
/*
* Update rss count even for unaddressable pages, as
@@ -833,10 +839,10 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
* for unaddressable pages, at some point. But for now
* keep things as they are.
*/
- get_page(page);
+ folio_get(folio);
rss[mm_counter(page)]++;
/* Cannot fail as these pages cannot get pinned. */
- BUG_ON(page_try_dup_anon_rmap(page, false, src_vma));
+ folio_try_dup_anon_rmap_pte(folio, page, src_vma);
/*
* We do not preserve soft-dirty information, because so
@@ -950,7 +956,7 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
* future.
*/
folio_get(folio);
- if (unlikely(page_try_dup_anon_rmap(page, false, src_vma))) {
+ if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) {
/* Page may be pinned, we have to copy. */
folio_put(folio);
return copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
@@ -959,7 +965,7 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
rss[MM_ANONPAGES]++;
} else if (page) {
folio_get(folio);
- page_dup_file_rmap(page, false);
+ folio_dup_file_rmap_pte(folio, page);
rss[mm_counter_file(page)]++;
}
@@ -988,12 +994,17 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
return 0;
}
-static inline struct folio *page_copy_prealloc(struct mm_struct *src_mm,
- struct vm_area_struct *vma, unsigned long addr)
+static inline struct folio *folio_prealloc(struct mm_struct *src_mm,
+ struct vm_area_struct *vma, unsigned long addr, bool need_zero)
{
struct folio *new_folio;
- new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
+ if (need_zero)
+ new_folio = vma_alloc_zeroed_movable_folio(vma, addr);
+ else
+ new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
+ addr, false);
+
if (!new_folio)
return NULL;
@@ -1125,7 +1136,7 @@ again:
} else if (ret == -EBUSY) {
goto out;
} else if (ret == -EAGAIN) {
- prealloc = page_copy_prealloc(src_mm, src_vma, addr);
+ prealloc = folio_prealloc(src_mm, src_vma, addr, false);
if (!prealloc)
return -ENOMEM;
} else if (ret) {
@@ -1423,6 +1434,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
arch_enter_lazy_mmu_mode();
do {
pte_t ptent = ptep_get(pte);
+ struct folio *folio;
struct page *page;
if (pte_none(ptent))
@@ -1448,21 +1460,22 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
continue;
}
+ folio = page_folio(page);
delay_rmap = 0;
- if (!PageAnon(page)) {
+ if (!folio_test_anon(folio)) {
if (pte_dirty(ptent)) {
- set_page_dirty(page);
+ folio_mark_dirty(folio);
if (tlb_delay_rmap(tlb)) {
delay_rmap = 1;
force_flush = 1;
}
}
if (pte_young(ptent) && likely(vma_has_recency(vma)))
- mark_page_accessed(page);
+ folio_mark_accessed(folio);
}
rss[mm_counter(page)]--;
if (!delay_rmap) {
- page_remove_rmap(page, vma, false);
+ folio_remove_rmap_pte(folio, page, vma);
if (unlikely(page_mapcount(page) < 0))
print_bad_pte(vma, addr, ptent, page);
}
@@ -1478,6 +1491,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
if (is_device_private_entry(entry) ||
is_device_exclusive_entry(entry)) {
page = pfn_swap_entry_to_page(entry);
+ folio = page_folio(page);
if (unlikely(!should_zap_page(details, page)))
continue;
/*
@@ -1489,8 +1503,8 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
WARN_ON_ONCE(!vma_is_anonymous(vma));
rss[mm_counter(page)]--;
if (is_device_private_entry(entry))
- page_remove_rmap(page, vma, false);
- put_page(page);
+ folio_remove_rmap_pte(folio, page, vma);
+ folio_put(folio);
} else if (!non_swap_entry(entry)) {
/* Genuine swap entry, hence a private anon page */
if (!should_zap_cows(details))
@@ -1744,7 +1758,8 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
unmap_single_vma(tlb, vma, start, end, &details,
mm_wr_locked);
hugetlb_zap_end(vma, &details);
- } while ((vma = mas_find(mas, tree_end - 1)) != NULL);
+ vma = mas_find(mas, tree_end - 1);
+ } while (vma && likely(!xa_is_zero(vma)));
mmu_notifier_invalidate_range_end(&range);
}
@@ -1837,21 +1852,26 @@ pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
static int validate_page_before_insert(struct page *page)
{
- if (PageAnon(page) || PageSlab(page) || page_has_type(page))
+ struct folio *folio = page_folio(page);
+
+ if (folio_test_anon(folio) || folio_test_slab(folio) ||
+ page_has_type(page))
return -EINVAL;
- flush_dcache_page(page);
+ flush_dcache_folio(folio);
return 0;
}
static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
unsigned long addr, struct page *page, pgprot_t prot)
{
+ struct folio *folio = page_folio(page);
+
if (!pte_none(ptep_get(pte)))
return -EBUSY;
/* Ok, finally just insert the thing.. */
- get_page(page);
+ folio_get(folio);
inc_mm_counter(vma->vm_mm, mm_counter_file(page));
- page_add_file_rmap(page, vma, false);
+ folio_add_file_rmap_pte(folio, page, vma);
set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot));
return 0;
}
@@ -2836,7 +2856,8 @@ static inline int __wp_page_copy_user(struct page *dst, struct page *src,
* just copying from the original user address. If that
* fails, we just zero-fill it. Live with it.
*/
- kaddr = kmap_atomic(dst);
+ kaddr = kmap_local_page(dst);
+ pagefault_disable();
uaddr = (void __user *)(addr & PAGE_MASK);
/*
@@ -2904,7 +2925,8 @@ warn:
pte_unlock:
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
- kunmap_atomic(kaddr);
+ pagefault_enable();
+ kunmap_local(kaddr);
flush_dcache_page(dst);
return ret;
@@ -3102,6 +3124,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
int page_copied = 0;
struct mmu_notifier_range range;
vm_fault_t ret;
+ bool pfn_is_zero;
delayacct_wpcopy_start();
@@ -3111,16 +3134,13 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
if (unlikely(ret))
goto out;
- if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
- new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
- if (!new_folio)
- goto oom;
- } else {
+ pfn_is_zero = is_zero_pfn(pte_pfn(vmf->orig_pte));
+ new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero);
+ if (!new_folio)
+ goto oom;
+
+ if (!pfn_is_zero) {
int err;
- new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
- vmf->address, false);
- if (!new_folio)
- goto oom;
err = __wp_page_copy_user(&new_folio->page, vmf->page, vmf);
if (err) {
@@ -3141,10 +3161,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
kmsan_copy_page_meta(&new_folio->page, vmf->page);
}
- if (mem_cgroup_charge(new_folio, mm, GFP_KERNEL))
- goto oom_free_new;
- folio_throttle_swaprate(new_folio, GFP_KERNEL);
-
__folio_mark_uptodate(new_folio);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
@@ -3207,10 +3223,10 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
* threads.
*
* The critical issue is to order this
- * page_remove_rmap with the ptp_clear_flush above.
- * Those stores are ordered by (if nothing else,)
+ * folio_remove_rmap_pte() with the ptp_clear_flush
+ * above. Those stores are ordered by (if nothing else,)
* the barrier present in the atomic_add_negative
- * in page_remove_rmap.
+ * in folio_remove_rmap_pte();
*
* Then the TLB flush in ptep_clear_flush ensures that
* no process can access the old page before the
@@ -3219,7 +3235,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
* mapcount is visible. So transitively, TLBs to
* old page will be flushed before it can be reused.
*/
- page_remove_rmap(vmf->page, vma, false);
+ folio_remove_rmap_pte(old_folio, vmf->page, vma);
}
/* Free the old page.. */
@@ -3243,8 +3259,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
delayacct_wpcopy_end();
return 0;
-oom_free_new:
- folio_put(new_folio);
oom:
ret = VM_FAULT_OOM;
out:
@@ -3890,9 +3904,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
folio_add_lru(folio);
- /* To provide entry to swap_readpage() */
+ /* To provide entry to swap_read_folio() */
folio->swap = entry;
- swap_readpage(page, true, NULL);
+ swap_read_folio(folio, true, NULL);
folio->private = NULL;
}
} else {
@@ -3950,15 +3964,18 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* page->index of !PageKSM() pages would be nonlinear inside the
* anon VMA -- PageKSM() is lost on actual swapout.
*/
- page = ksm_might_need_to_copy(page, vma, vmf->address);
- if (unlikely(!page)) {
+ folio = ksm_might_need_to_copy(folio, vma, vmf->address);
+ if (unlikely(!folio)) {
ret = VM_FAULT_OOM;
+ folio = swapcache;
goto out_page;
- } else if (unlikely(PTR_ERR(page) == -EHWPOISON)) {
+ } else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
ret = VM_FAULT_HWPOISON;
+ folio = swapcache;
goto out_page;
}
- folio = page_folio(page);
+ if (folio != swapcache)
+ page = folio_page(folio, 0);
/*
* If we want to map a page that's in the swapcache writable, we
@@ -4076,10 +4093,11 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
/* ksm created a completely new copy */
if (unlikely(folio != swapcache && swapcache)) {
- page_add_new_anon_rmap(page, vma, vmf->address);
+ folio_add_new_anon_rmap(folio, vma, vmf->address);
folio_add_lru_vma(folio, vma);
} else {
- page_add_anon_rmap(page, vma, vmf->address, rmap_flags);
+ folio_add_anon_rmap_pte(folio, page, vma, vmf->address,
+ rmap_flags);
}
VM_BUG_ON(!folio_test_anon(folio) ||
@@ -4138,6 +4156,84 @@ out_release:
return ret;
}
+static bool pte_range_none(pte_t *pte, int nr_pages)
+{
+ int i;
+
+ for (i = 0; i < nr_pages; i++) {
+ if (!pte_none(ptep_get_lockless(pte + i)))
+ return false;
+ }
+
+ return true;
+}
+
+static struct folio *alloc_anon_folio(struct vm_fault *vmf)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ struct vm_area_struct *vma = vmf->vma;
+ unsigned long orders;
+ struct folio *folio;
+ unsigned long addr;
+ pte_t *pte;
+ gfp_t gfp;
+ int order;
+
+ /*
+ * If uffd is active for the vma we need per-page fault fidelity to
+ * maintain the uffd semantics.
+ */
+ if (unlikely(userfaultfd_armed(vma)))
+ goto fallback;
+
+ /*
+ * Get a list of all the (large) orders below PMD_ORDER that are enabled
+ * for this vma. Then filter out the orders that can't be allocated over
+ * the faulting address and still be fully contained in the vma.
+ */
+ orders = thp_vma_allowable_orders(vma, vma->vm_flags, false, true, true,
+ BIT(PMD_ORDER) - 1);
+ orders = thp_vma_suitable_orders(vma, vmf->address, orders);
+
+ if (!orders)
+ goto fallback;
+
+ pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK);
+ if (!pte)
+ return ERR_PTR(-EAGAIN);
+
+ /*
+ * Find the highest order where the aligned range is completely
+ * pte_none(). Note that all remaining orders will be completely
+ * pte_none().
+ */
+ order = highest_order(orders);
+ while (orders) {
+ addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
+ if (pte_range_none(pte + pte_index(addr), 1 << order))
+ break;
+ order = next_order(&orders, order);
+ }
+
+ pte_unmap(pte);
+
+ /* Try allocating the highest of the remaining orders. */
+ gfp = vma_thp_gfp_mask(vma);
+ while (orders) {
+ addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
+ folio = vma_alloc_folio(gfp, order, vma, addr, true);
+ if (folio) {
+ clear_huge_page(&folio->page, vmf->address, 1 << order);
+ return folio;
+ }
+ order = next_order(&orders, order);
+ }
+
+fallback:
+#endif
+ return vma_alloc_zeroed_movable_folio(vmf->vma, vmf->address);
+}
+
/*
* We enter with non-exclusive mmap_lock (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
@@ -4147,9 +4243,12 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
{
bool uffd_wp = vmf_orig_pte_uffd_wp(vmf);
struct vm_area_struct *vma = vmf->vma;
+ unsigned long addr = vmf->address;
struct folio *folio;
vm_fault_t ret = 0;
+ int nr_pages = 1;
pte_t entry;
+ int i;
/* File mapping without ->vm_ops ? */
if (vma->vm_flags & VM_SHARED)
@@ -4189,10 +4288,16 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
/* Allocate our own private page. */
if (unlikely(anon_vma_prepare(vma)))
goto oom;
- folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
+ /* Returns NULL on OOM or ERR_PTR(-EAGAIN) if we must retry the fault */
+ folio = alloc_anon_folio(vmf);
+ if (IS_ERR(folio))
+ return 0;
if (!folio)
goto oom;
+ nr_pages = folio_nr_pages(folio);
+ addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
+
if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
goto oom_free_page;
folio_throttle_swaprate(folio, GFP_KERNEL);
@@ -4209,12 +4314,15 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
if (vma->vm_flags & VM_WRITE)
entry = pte_mkwrite(pte_mkdirty(entry), vma);
- vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
- &vmf->ptl);
+ vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
if (!vmf->pte)
goto release;
- if (vmf_pte_changed(vmf)) {
- update_mmu_tlb(vma, vmf->address, vmf->pte);
+ if (nr_pages == 1 && vmf_pte_changed(vmf)) {
+ update_mmu_tlb(vma, addr, vmf->pte);
+ goto release;
+ } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
+ for (i = 0; i < nr_pages; i++)
+ update_mmu_tlb(vma, addr + PAGE_SIZE * i, vmf->pte + i);
goto release;
}
@@ -4229,16 +4337,17 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
return handle_userfault(vmf, VM_UFFD_MISSING);
}
- inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
- folio_add_new_anon_rmap(folio, vma, vmf->address);
+ folio_ref_add(folio, nr_pages - 1);
+ add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
+ folio_add_new_anon_rmap(folio, vma, addr);
folio_add_lru_vma(folio, vma);
setpte:
if (uffd_wp)
entry = pte_mkuffd_wp(entry);
- set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
+ set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages);
/* No need to invalidate - it was non-present before */
- update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
+ update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr_pages);
unlock:
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -4260,6 +4369,7 @@ oom:
static vm_fault_t __do_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
+ struct folio *folio;
vm_fault_t ret;
/*
@@ -4288,27 +4398,26 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
VM_FAULT_DONE_COW)))
return ret;
+ folio = page_folio(vmf->page);
if (unlikely(PageHWPoison(vmf->page))) {
- struct page *page = vmf->page;
vm_fault_t poisonret = VM_FAULT_HWPOISON;
if (ret & VM_FAULT_LOCKED) {
- if (page_mapped(page))
- unmap_mapping_pages(page_mapping(page),
- page->index, 1, false);
- /* Retry if a clean page was removed from the cache. */
- if (invalidate_inode_page(page))
+ if (page_mapped(vmf->page))
+ unmap_mapping_folio(folio);
+ /* Retry if a clean folio was removed from the cache. */
+ if (mapping_evict_folio(folio->mapping, folio))
poisonret = VM_FAULT_NOPAGE;
- unlock_page(page);
+ folio_unlock(folio);
}
- put_page(page);
+ folio_put(folio);
vmf->page = NULL;
return poisonret;
}
if (unlikely(!(ret & VM_FAULT_LOCKED)))
- lock_page(vmf->page);
+ folio_lock(folio);
else
- VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
+ VM_BUG_ON_PAGE(!folio_test_locked(folio), vmf->page);
return ret;
}
@@ -4329,17 +4438,17 @@ static void deposit_prealloc_pte(struct vm_fault *vmf)
vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
{
+ struct folio *folio = page_folio(page);
struct vm_area_struct *vma = vmf->vma;
bool write = vmf->flags & FAULT_FLAG_WRITE;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
pmd_t entry;
vm_fault_t ret = VM_FAULT_FALLBACK;
- if (!transhuge_vma_suitable(vma, haddr))
+ if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
return ret;
- page = compound_head(page);
- if (compound_order(page) != HPAGE_PMD_ORDER)
+ if (page != &folio->page || folio_order(folio) != HPAGE_PMD_ORDER)
return ret;
/*
@@ -4348,7 +4457,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
* check. This kind of THP just can be PTE mapped. Access to
* the corrupted subpage should trigger SIGBUS as expected.
*/
- if (unlikely(PageHasHWPoisoned(page)))
+ if (unlikely(folio_test_has_hwpoisoned(folio)))
return ret;
/*
@@ -4372,7 +4481,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
- page_add_file_rmap(page, vma, true);
+ folio_add_file_rmap_pmd(folio, page, vma);
/*
* deposit and withdraw with pmd lock held
@@ -4435,7 +4544,7 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio,
folio_add_lru_vma(folio, vma);
} else {
add_mm_counter(vma->vm_mm, mm_counter_file(page), nr);
- folio_add_file_rmap_range(folio, page, nr, vma, false);
+ folio_add_file_rmap_ptes(folio, page, nr, vma);
}
set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr);
@@ -4661,6 +4770,7 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf)
static vm_fault_t do_cow_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
+ struct folio *folio;
vm_fault_t ret;
ret = vmf_can_call_fault(vmf);
@@ -4669,16 +4779,11 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf)
if (ret)
return ret;
- vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
- if (!vmf->cow_page)
+ folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false);
+ if (!folio)
return VM_FAULT_OOM;
- if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm,
- GFP_KERNEL)) {
- put_page(vmf->cow_page);
- return VM_FAULT_OOM;
- }
- folio_throttle_swaprate(page_folio(vmf->cow_page), GFP_KERNEL);
+ vmf->cow_page = &folio->page;
ret = __do_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
@@ -4687,7 +4792,7 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf)
return ret;
copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
- __SetPageUptodate(vmf->cow_page);
+ __folio_mark_uptodate(folio);
ret |= finish_fault(vmf);
unlock_page(vmf->page);
@@ -4696,7 +4801,7 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf)
goto uncharge_out;
return ret;
uncharge_out:
- put_page(vmf->cow_page);
+ folio_put(folio);
return ret;
}
@@ -5133,7 +5238,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
return VM_FAULT_OOM;
retry_pud:
if (pud_none(*vmf.pud) &&
- hugepage_vma_check(vma, vm_flags, false, true, true)) {
+ thp_vma_allowable_order(vma, vm_flags, false, true, true, PUD_ORDER)) {
ret = create_huge_pud(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
@@ -5167,7 +5272,7 @@ retry_pud:
goto retry_pud;
if (pmd_none(*vmf.pmd) &&
- hugepage_vma_check(vma, vm_flags, false, true, true)) {
+ thp_vma_allowable_order(vma, vm_flags, false, true, true, PMD_ORDER)) {
ret = create_huge_pmd(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
@@ -5732,6 +5837,10 @@ int follow_phys(struct vm_area_struct *vma,
goto out;
pte = ptep_get(ptep);
+ /* Never return PFNs of anon folios in COW mappings. */
+ if (vm_normal_folio(vma, address, pte))
+ goto unlock;
+
if ((flags & FOLL_WRITE) && !pte_write(pte))
goto unlock;
@@ -5870,7 +5979,7 @@ static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
if (bytes > PAGE_SIZE-offset)
bytes = PAGE_SIZE-offset;
- maddr = kmap(page);
+ maddr = kmap_local_page(page);
if (write) {
copy_to_user_page(vma, page, addr,
maddr + offset, buf, bytes);
@@ -5879,8 +5988,7 @@ static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
copy_from_user_page(vma, page, addr,
buf, maddr + offset, bytes);
}
- kunmap(page);
- put_page(page);
+ unmap_and_put_page(page, maddr);
}
len -= bytes;
buf += bytes;