pte_t old_pte, new_pte = __pte(0);
while (1) {
- old_pte = pte_val(*ptep);
+ old_pte = *ptep;
/*
* wait until _PAGE_BUSY is clear then set it atomically
*/
- if (unlikely(old_pte & _PAGE_BUSY)) {
+ if (unlikely(pte_val(old_pte) & _PAGE_BUSY)) {
cpu_relax();
continue;
}
return __pte(0);
#endif
/* If pte is not present return None */
- if (unlikely(!(old_pte & _PAGE_PRESENT)))
+ if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
return __pte(0);
new_pte = pte_mkyoung(old_pte);
if (writing && pte_write(old_pte))
new_pte = pte_mkdirty(new_pte);
- if (old_pte == __cmpxchg_u64((unsigned long *)ptep, old_pte,
- new_pte))
+ if (pte_val(old_pte) == __cmpxchg_u64((unsigned long *)ptep,
+ pte_val(old_pte),
+ pte_val(new_pte))) {
break;
+ }
}
return new_pte;
}
do {
SetPageReserved(page);
map_page(vaddr, page_to_phys(page),
- pgprot_noncached(PAGE_KERNEL));
+ pgprot_val(pgprot_noncached(PAGE_KERNEL)));
page++;
vaddr += PAGE_SIZE;
} while (size -= PAGE_SIZE);
unsigned long cam_sz;
cam_sz = calc_cam_sz(ram, virt, phys);
- settlbcam(i, virt, phys, cam_sz, PAGE_KERNEL_X, 0);
+ settlbcam(i, virt, phys, cam_sz, pgprot_val(PAGE_KERNEL_X), 0);
ram -= cam_sz;
amount_mapped += cam_sz;
* atomically mark the linux large page PMD busy and dirty
*/
do {
- pmd_t pmd = ACCESS_ONCE(*pmdp);
+ pmd_t pmd = READ_ONCE(*pmdp);
old_pmd = pmd_val(pmd);
/* If PMD busy, retry the access */
*shift = 0;
pgdp = pgdir + pgd_index(ea);
- pgd = ACCESS_ONCE(*pgdp);
+ pgd = READ_ONCE(*pgdp);
/*
* Always operate on the local stack value. This make sure the
* value don't get updated by a parallel THP split/collapse,
if (pte_end < end)
end = pte_end;
- pte = ACCESS_ONCE(*ptep);
+ pte = READ_ONCE(*ptep);
mask = _PAGE_PRESENT | _PAGE_USER;
if (write)
mask |= _PAGE_RW;
/* Make sure we have the base flags */
if ((flags & _PAGE_PRESENT) == 0)
- flags |= PAGE_KERNEL;
+ flags |= pgprot_val(PAGE_KERNEL);
/* Non-cacheable page cannot be coherent */
if (flags & _PAGE_NO_CACHE)
p = memstart_addr + s;
for (; s < top; s += PAGE_SIZE) {
ktext = ((char *) v >= _stext && (char *) v < etext);
- f = ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL;
+ f = ktext ? pgprot_val(PAGE_KERNEL_TEXT) : pgprot_val(PAGE_KERNEL);
map_page(v, p, f);
#ifdef CONFIG_PPC_STD_MMU_32
if (ktext)
assert_spin_locked(&mm->page_table_lock);
WARN_ON(!pmd_trans_huge(pmd));
#endif
- trace_hugepage_set_pmd(addr, pmd);
+ trace_hugepage_set_pmd(addr, pmd_val(pmd));
return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
}
continue;
pte = pte_val(*ptep);
if (hugepage_shift)
- trace_hugepage_invalidate(start, pte_val(pte));
+ trace_hugepage_invalidate(start, pte);
if (!(pte & _PAGE_HASHPTE))
continue;
if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte)))