mm, thp, migrate: handling migration of 64KB hugepages
[platform/kernel/linux-rpi.git] / arch / arm64 / mm / huge_memory.c
index 2ef1a21..1073fde 100644 (file)
@@ -1087,4 +1087,79 @@ void split_huge_pte_address(struct vm_area_struct *vma, unsigned long address,
 
        __split_huge_pte(vma, pmd, pte, haddr, freeze, page);
 }
+
+void set_huge_pte_migration_entry(
+               struct page_vma_mapped_walk *pvmw,
+               struct page *page)
+{
+       int i;
+       struct vm_area_struct *vma = pvmw->vma;
+       struct mm_struct *mm = vma->vm_mm;
+       unsigned long address = pvmw->address;
+       pte_t pteval, *pte;
+       swp_entry_t entry;
+       pte_t pteswp;
+       struct page *_page = page;
+
+       if (!(pvmw->pmd && pvmw->pte))
+               return;
+
+       flush_cache_range(vma, address, address + HPAGE_CONT_PTE_SIZE);
+       pte = pvmw->pte;
+
+       //arch_set_huge_pte_at(mm, address, pvmw->pte, ptee);
+       for (i = 0, pte = pvmw->pte; i < HPAGE_CONT_PTE_NR; i++, pte++) {
+               pteval = ptep_invalidate(vma, address, pte);
+               if (pte_dirty(pteval))
+                       set_page_dirty(_page);
+               entry = make_migration_entry(page, pte_write(pteval));
+               pteswp = swp_entry_to_pte(entry);
+               if (pte_soft_dirty(pteval))
+                       pteswp = pte_swp_mksoft_dirty(pteswp);
+               set_pte_at(mm, address, pte, pteswp);
+               _page++;
+               address += PAGE_SIZE;
+       }
+
+       pvmw->pte = pte;
+       pvmw->address = address;
+
+       page_remove_rmap(page, true);
+       put_page(page);
+}
+
+void remove_migration_huge_pte(
+               struct page_vma_mapped_walk *pvmw, struct page *new)
+{
+       struct vm_area_struct *vma = pvmw->vma;
+       struct mm_struct *mm = vma->vm_mm;
+       unsigned long address = pvmw->address;
+       unsigned long mmun_start = address & HPAGE_CONT_PTE_MASK;
+       pte_t ptee;
+       swp_entry_t entry;
+
+       if (!(pvmw->pmd && !pvmw->pte))
+               return;
+
+       entry = pmd_to_swp_entry(*pvmw->pmd);
+       get_page(new);
+       ptee = pte_mkold(arch_make_huge_pte(new, vma));
+       if (pte_swp_soft_dirty(*pvmw->pte))
+               ptee = pte_mksoft_dirty(ptee);
+       if (is_write_migration_entry(entry))
+               ptee = maybe_mkwrite(ptee, vma);
+
+       flush_cache_range(vma, mmun_start, mmun_start + HPAGE_CONT_PTE_SIZE);
+       if (PageAnon(new))
+               page_add_anon_rmap(new, vma, mmun_start, true);
+       else
+               page_add_file_rmap(new, true);
+
+       arch_set_huge_pte_at(mm, mmun_start, pvmw->pte, ptee, 0);
+       if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new))
+               mlock_vma_page(new);
+       pvmw->address = address + HPAGE_CONT_PTE_SIZE;
+       pvmw->pte = pvmw->pte + HPAGE_CONT_PTE_NR;
+       update_mmu_cache_pmd(vma, address, pvmw->pmd);
+}
 #endif /* CONFIG_FINEGRAINED_THP */