summaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c94
1 files changed, 41 insertions, 53 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index a8c6f466e3..9dabeb90f7 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2557,16 +2557,44 @@ static struct folio *alloc_misplaced_dst_folio(struct folio *src,
return __folio_alloc_node(gfp, order, nid);
}
-static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
+/*
+ * Prepare for calling migrate_misplaced_folio() by isolating the folio if
+ * permitted. Must be called with the PTL still held.
+ */
+int migrate_misplaced_folio_prepare(struct folio *folio,
+ struct vm_area_struct *vma, int node)
{
int nr_pages = folio_nr_pages(folio);
+ pg_data_t *pgdat = NODE_DATA(node);
+
+ if (folio_is_file_lru(folio)) {
+ /*
+ * Do not migrate file folios that are mapped in multiple
+ * processes with execute permissions as they are probably
+ * shared libraries.
+ *
+ * See folio_likely_mapped_shared() on possible imprecision
+ * when we cannot easily detect if a folio is shared.
+ */
+ if ((vma->vm_flags & VM_EXEC) &&
+ folio_likely_mapped_shared(folio))
+ return -EACCES;
+
+ /*
+ * Do not migrate dirty folios as not all filesystems can move
+ * dirty folios in MIGRATE_ASYNC mode which is a waste of
+ * cycles.
+ */
+ if (folio_test_dirty(folio))
+ return -EAGAIN;
+ }
/* Avoid migrating to a node that is nearly full */
if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
int z;
if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
- return 0;
+ return -EAGAIN;
for (z = pgdat->nr_zones - 1; z >= 0; z--) {
if (managed_zone(pgdat->node_zones + z))
break;
@@ -2577,78 +2605,42 @@ static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
* further.
*/
if (z < 0)
- return 0;
+ return -EAGAIN;
wakeup_kswapd(pgdat->node_zones + z, 0,
folio_order(folio), ZONE_MOVABLE);
- return 0;
+ return -EAGAIN;
}
if (!folio_isolate_lru(folio))
- return 0;
+ return -EAGAIN;
node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
nr_pages);
-
- /*
- * Isolating the folio has taken another reference, so the
- * caller's reference can be safely dropped without the folio
- * disappearing underneath us during migration.
- */
- folio_put(folio);
- return 1;
+ return 0;
}
/*
* Attempt to migrate a misplaced folio to the specified destination
- * node. Caller is expected to have an elevated reference count on
- * the folio that will be dropped by this function before returning.
+ * node. Caller is expected to have isolated the folio by calling
+ * migrate_misplaced_folio_prepare(), which will result in an
+ * elevated reference count on the folio. This function will un-isolate the
+ * folio, dereferencing the folio before returning.
*/
int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
int node)
{
pg_data_t *pgdat = NODE_DATA(node);
- int isolated;
int nr_remaining;
unsigned int nr_succeeded;
LIST_HEAD(migratepages);
- int nr_pages = folio_nr_pages(folio);
-
- /*
- * Don't migrate file folios that are mapped in multiple processes
- * with execute permissions as they are probably shared libraries.
- *
- * See folio_likely_mapped_shared() on possible imprecision when we
- * cannot easily detect if a folio is shared.
- */
- if (folio_likely_mapped_shared(folio) && folio_is_file_lru(folio) &&
- (vma->vm_flags & VM_EXEC))
- goto out;
-
- /*
- * Also do not migrate dirty folios as not all filesystems can move
- * dirty folios in MIGRATE_ASYNC mode which is a waste of cycles.
- */
- if (folio_is_file_lru(folio) && folio_test_dirty(folio))
- goto out;
-
- isolated = numamigrate_isolate_folio(pgdat, folio);
- if (!isolated)
- goto out;
list_add(&folio->lru, &migratepages);
nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
NULL, node, MIGRATE_ASYNC,
MR_NUMA_MISPLACED, &nr_succeeded);
- if (nr_remaining) {
- if (!list_empty(&migratepages)) {
- list_del(&folio->lru);
- node_stat_mod_folio(folio, NR_ISOLATED_ANON +
- folio_is_file_lru(folio), -nr_pages);
- folio_putback_lru(folio);
- }
- isolated = 0;
- }
+ if (nr_remaining && !list_empty(&migratepages))
+ putback_movable_pages(&migratepages);
if (nr_succeeded) {
count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node))
@@ -2656,11 +2648,7 @@ int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
nr_succeeded);
}
BUG_ON(!list_empty(&migratepages));
- return isolated;
-
-out:
- folio_put(folio);
- return 0;
+ return nr_remaining ? -EAGAIN : 0;
}
#endif /* CONFIG_NUMA_BALANCING */
#endif /* CONFIG_NUMA */