return rc;
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR
+#else
+#define NR_MAX_BATCHED_MIGRATION 512
+#endif
#define NR_MAX_MIGRATE_PAGES_RETRY 10
struct migrate_pages_stats {
return nr_failed;
}
-/*
- * migrate_pages - migrate the folios specified in a list, to the free folios
- * supplied as the target for the page migration
- *
- * @from: The list of folios to be migrated.
- * @get_new_page: The function used to allocate free folios to be used
- * as the target of the folio migration.
- * @put_new_page: The function used to free target folios if migration
- * fails, or NULL if no special handling is necessary.
- * @private: Private data to be passed on to get_new_page()
- * @mode: The migration mode that specifies the constraints for
- * folio migration, if any.
- * @reason: The reason for folio migration.
- * @ret_succeeded: Set to the number of folios migrated successfully if
- * the caller passes a non-NULL pointer.
- *
- * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
- * are movable any more because the list has become empty or no retryable folios
- * exist any more. It is caller's responsibility to call putback_movable_pages()
- * only if ret != 0.
- *
- * Returns the number of {normal folio, large folio, hugetlb} that were not
- * migrated, or an error code. The number of large folio splits will be
- * considered as the number of non-migrated large folio, no matter how many
- * split folios of the large folio are migrated successfully.
- */
-int migrate_pages(struct list_head *from, new_page_t get_new_page,
+static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
free_page_t put_new_page, unsigned long private,
- enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
+ enum migrate_mode mode, int reason, struct list_head *ret_folios,
+ struct migrate_pages_stats *stats)
{
int retry = 1;
int large_retry = 1;
int thp_retry = 1;
- int nr_failed;
+ int nr_failed = 0;
int nr_retry_pages = 0;
int nr_large_failed = 0;
int pass = 0;
bool is_thp = false;
struct folio *folio, *folio2;
int rc, nr_pages;
- LIST_HEAD(ret_folios);
LIST_HEAD(split_folios);
bool nosplit = (reason == MR_NUMA_MISPLACED);
bool no_split_folio_counting = false;
- struct migrate_pages_stats stats;
-
- trace_mm_migrate_pages_start(mode, reason);
-
- memset(&stats, 0, sizeof(stats));
- rc = migrate_hugetlbs(from, get_new_page, put_new_page, private, mode, reason,
- &stats, &ret_folios);
- if (rc < 0)
- goto out;
- nr_failed = rc;
split_folio_migration:
for (pass = 0;
nr_retry_pages = 0;
list_for_each_entry_safe(folio, folio2, from, lru) {
- /* Retried hugetlb folios will be kept in list */
- if (folio_test_hugetlb(folio)) {
- list_move_tail(&folio->lru, &ret_folios);
- continue;
- }
-
/*
* Large folio statistics is based on the source large
* folio. Capture required information that might get
rc = unmap_and_move(get_new_page, put_new_page,
private, folio, pass > 2, mode,
- reason, &ret_folios);
+ reason, ret_folios);
/*
* The rules are:
* Success: folio will be freed
* -EAGAIN: stay on the from list
* -ENOMEM: stay on the from list
* -ENOSYS: stay on the from list
- * Other errno: put on ret_folios list then splice to
- * from list
+ * Other errno: put on ret_folios list
*/
switch(rc) {
/*
/* Large folio migration is unsupported */
if (is_large) {
nr_large_failed++;
- stats.nr_thp_failed += is_thp;
+ stats->nr_thp_failed += is_thp;
if (!try_split_folio(folio, &split_folios)) {
- stats.nr_thp_split += is_thp;
+ stats->nr_thp_split += is_thp;
break;
}
} else if (!no_split_folio_counting) {
nr_failed++;
}
- stats.nr_failed_pages += nr_pages;
- list_move_tail(&folio->lru, &ret_folios);
+ stats->nr_failed_pages += nr_pages;
+ list_move_tail(&folio->lru, ret_folios);
break;
case -ENOMEM:
/*
*/
if (is_large) {
nr_large_failed++;
- stats.nr_thp_failed += is_thp;
+ stats->nr_thp_failed += is_thp;
/* Large folio NUMA faulting doesn't split to retry. */
if (!nosplit) {
int ret = try_split_folio(folio, &split_folios);
if (!ret) {
- stats.nr_thp_split += is_thp;
+ stats->nr_thp_split += is_thp;
break;
} else if (reason == MR_LONGTERM_PIN &&
ret == -EAGAIN) {
nr_failed++;
}
- stats.nr_failed_pages += nr_pages + nr_retry_pages;
+ stats->nr_failed_pages += nr_pages + nr_retry_pages;
/*
* There might be some split folios of fail-to-migrate large
- * folios left in split_folios list. Move them back to migration
+ * folios left in split_folios list. Move them to ret_folios
* list so that they could be put back to the right list by
* the caller otherwise the folio refcnt will be leaked.
*/
- list_splice_init(&split_folios, from);
+ list_splice_init(&split_folios, ret_folios);
/* nr_failed isn't updated for not used */
nr_large_failed += large_retry;
- stats.nr_thp_failed += thp_retry;
+ stats->nr_thp_failed += thp_retry;
goto out;
case -EAGAIN:
if (is_large) {
nr_retry_pages += nr_pages;
break;
case MIGRATEPAGE_SUCCESS:
- stats.nr_succeeded += nr_pages;
- stats.nr_thp_succeeded += is_thp;
+ stats->nr_succeeded += nr_pages;
+ stats->nr_thp_succeeded += is_thp;
break;
default:
/*
*/
if (is_large) {
nr_large_failed++;
- stats.nr_thp_failed += is_thp;
+ stats->nr_thp_failed += is_thp;
} else if (!no_split_folio_counting) {
nr_failed++;
}
- stats.nr_failed_pages += nr_pages;
+ stats->nr_failed_pages += nr_pages;
break;
}
}
}
nr_failed += retry;
nr_large_failed += large_retry;
- stats.nr_thp_failed += thp_retry;
- stats.nr_failed_pages += nr_retry_pages;
+ stats->nr_thp_failed += thp_retry;
+ stats->nr_failed_pages += nr_retry_pages;
/*
* Try to migrate split folios of fail-to-migrate large folios, no
* nr_failed counting in this round, since all split folios of a
* Move non-migrated folios (after NR_MAX_MIGRATE_PAGES_RETRY
* retries) to ret_folios to avoid migrating them again.
*/
- list_splice_init(from, &ret_folios);
+ list_splice_init(from, ret_folios);
list_splice_init(&split_folios, from);
no_split_folio_counting = true;
retry = 1;
rc = nr_failed + nr_large_failed;
out:
+ return rc;
+}
+
+/*
+ * migrate_pages - migrate the folios specified in a list, to the free folios
+ * supplied as the target for the page migration
+ *
+ * @from: The list of folios to be migrated.
+ * @get_new_page: The function used to allocate free folios to be used
+ * as the target of the folio migration.
+ * @put_new_page: The function used to free target folios if migration
+ * fails, or NULL if no special handling is necessary.
+ * @private: Private data to be passed on to get_new_page()
+ * @mode: The migration mode that specifies the constraints for
+ * folio migration, if any.
+ * @reason: The reason for folio migration.
+ * @ret_succeeded: Set to the number of folios migrated successfully if
+ * the caller passes a non-NULL pointer.
+ *
+ * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
+ * are movable any more because the list has become empty or no retryable folios
+ * exist any more. It is caller's responsibility to call putback_movable_pages()
+ * only if ret != 0.
+ *
+ * Returns the number of {normal folio, large folio, hugetlb} that were not
+ * migrated, or an error code. The number of large folio splits will be
+ * considered as the number of non-migrated large folio, no matter how many
+ * split folios of the large folio are migrated successfully.
+ */
+int migrate_pages(struct list_head *from, new_page_t get_new_page,
+ free_page_t put_new_page, unsigned long private,
+ enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
+{
+ int rc, rc_gather;
+ int nr_pages;
+ struct folio *folio, *folio2;
+ LIST_HEAD(folios);
+ LIST_HEAD(ret_folios);
+ struct migrate_pages_stats stats;
+
+ trace_mm_migrate_pages_start(mode, reason);
+
+ memset(&stats, 0, sizeof(stats));
+
+ rc_gather = migrate_hugetlbs(from, get_new_page, put_new_page, private,
+ mode, reason, &stats, &ret_folios);
+ if (rc_gather < 0)
+ goto out;
+again:
+ nr_pages = 0;
+ list_for_each_entry_safe(folio, folio2, from, lru) {
+ /* Retried hugetlb folios will be kept in list */
+ if (folio_test_hugetlb(folio)) {
+ list_move_tail(&folio->lru, &ret_folios);
+ continue;
+ }
+
+ nr_pages += folio_nr_pages(folio);
+ if (nr_pages > NR_MAX_BATCHED_MIGRATION)
+ break;
+ }
+ if (nr_pages > NR_MAX_BATCHED_MIGRATION)
+ list_cut_before(&folios, from, &folio->lru);
+ else
+ list_splice_init(from, &folios);
+ rc = migrate_pages_batch(&folios, get_new_page, put_new_page, private,
+ mode, reason, &ret_folios, &stats);
+ list_splice_tail_init(&folios, &ret_folios);
+ if (rc < 0) {
+ rc_gather = rc;
+ goto out;
+ }
+ rc_gather += rc;
+ if (!list_empty(from))
+ goto again;
+out:
/*
* Put the permanent failure folio back to migration list, they
* will be put back to the right list by the caller.
* are migrated successfully.
*/
if (list_empty(from))
- rc = 0;
+ rc_gather = 0;
count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
if (ret_succeeded)
*ret_succeeded = stats.nr_succeeded;
- return rc;
+ return rc_gather;
}
struct page *alloc_migration_target(struct page *page, unsigned long private)