*/
};
-#ifdef CONFIG_64BIT
-#define SWAP_RA_ORDER_CEILING 5
-#else
-/* Avoid stack overflow, because we need to save part of page table */
-#define SWAP_RA_ORDER_CEILING 3
-#define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING)
-#endif
-
-struct vma_swap_readahead {
- unsigned short win;
- unsigned short offset;
- unsigned short nr_pte;
-#ifdef CONFIG_64BIT
- pte_t *ptes;
-#else
- pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
-#endif
-};
-
static inline swp_entry_t folio_swap_entry(struct folio *folio)
{
swp_entry_t entry = { .val = page_private(&folio->page) };
swapper_spaces[type] = NULL;
}
+#define SWAP_RA_ORDER_CEILING 5
+
+struct vma_swap_readahead {
+ unsigned short win;
+ unsigned short offset;
+ unsigned short nr_pte;
+};
+
static void swap_ra_info(struct vm_fault *vmf,
struct vma_swap_readahead *ra_info)
{
unsigned long ra_val;
unsigned long faddr, pfn, fpfn, lpfn, rpfn;
unsigned long start, end;
- pte_t *pte, *orig_pte;
unsigned int max_win, hits, prev_win, win;
-#ifndef CONFIG_64BIT
- pte_t *tpte;
-#endif
max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
SWAP_RA_ORDER_CEILING);
max_win, prev_win);
atomic_long_set(&vma->swap_readahead_info,
SWAP_RA_VAL(faddr, win, 0));
-
if (win == 1)
return;
- /* Copy the PTEs because the page table may be unmapped */
- orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
if (fpfn == pfn + 1) {
lpfn = fpfn;
rpfn = fpfn + win;
ra_info->nr_pte = end - start;
ra_info->offset = fpfn - start;
- pte -= ra_info->offset;
-#ifdef CONFIG_64BIT
- ra_info->ptes = pte;
-#else
- tpte = ra_info->ptes;
- for (pfn = start; pfn != end; pfn++)
- *tpte++ = *pte++;
-#endif
- pte_unmap(orig_pte);
}
/**
struct swap_iocb *splug = NULL;
struct vm_area_struct *vma = vmf->vma;
struct page *page;
- pte_t *pte, pentry;
+ pte_t *pte = NULL, pentry;
+ unsigned long addr;
swp_entry_t entry;
unsigned int i;
bool page_allocated;
if (ra_info.win == 1)
goto skip;
+ addr = vmf->address - (ra_info.offset * PAGE_SIZE);
+
blk_start_plug(&plug);
- for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
- i++, pte++) {
- pentry = *pte;
+ for (i = 0; i < ra_info.nr_pte; i++, addr += PAGE_SIZE) {
+ if (!pte++) {
+ pte = pte_offset_map(vmf->pmd, addr);
+ if (!pte)
+ break;
+ }
+ pentry = ptep_get_lockless(pte);
if (!is_swap_pte(pentry))
continue;
entry = pte_to_swp_entry(pentry);
if (unlikely(non_swap_entry(entry)))
continue;
+ pte_unmap(pte);
+ pte = NULL;
page = __read_swap_cache_async(entry, gfp_mask, vma,
- vmf->address, &page_allocated);
+ addr, &page_allocated);
if (!page)
continue;
if (page_allocated) {
}
put_page(page);
}
+ if (pte)
+ pte_unmap(pte);
blk_finish_plug(&plug);
swap_read_unplug(splug);
lru_add_drain();