vma->vm_page_prot is read lockless from the rmap_walk, it may be updated
concurrently and this prevents the risk of reading intermediate values.
Link: http://lkml.kernel.org/r/1474660305-19222-1-git-send-email-aarcange@redhat.com
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Jan Vorlicek <janvorli@microsoft.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-int vma_wants_writenotify(struct vm_area_struct *vma);
+int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl);
extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl);
if (soft_dirty)
entry = pte_swp_mksoft_dirty(entry);
} else {
if (soft_dirty)
entry = pte_swp_mksoft_dirty(entry);
} else {
- entry = mk_pte(page + i, vma->vm_page_prot);
+ entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
entry = maybe_mkwrite(entry, vma);
if (!write)
entry = pte_wrprotect(entry);
entry = maybe_mkwrite(entry, vma);
if (!write)
entry = pte_wrprotect(entry);
goto unlock;
get_page(new);
goto unlock;
get_page(new);
- pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
+ pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
if (pte_swp_soft_dirty(*ptep))
pte = pte_mksoft_dirty(pte);
if (pte_swp_soft_dirty(*ptep))
pte = pte_mksoft_dirty(pte);
void vma_set_page_prot(struct vm_area_struct *vma)
{
unsigned long vm_flags = vma->vm_flags;
void vma_set_page_prot(struct vm_area_struct *vma)
{
unsigned long vm_flags = vma->vm_flags;
- vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
- if (vma_wants_writenotify(vma)) {
+ vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
+ if (vma_wants_writenotify(vma, vm_page_prot)) {
- vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot,
- vm_flags);
+ vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
+ /* remove_protection_ptes reads vma->vm_page_prot without mmap_sem */
+ WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
* to the private version (using protection_map[] without the
* VM_SHARED bit).
*/
* to the private version (using protection_map[] without the
* VM_SHARED bit).
*/
-int vma_wants_writenotify(struct vm_area_struct *vma)
+int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
{
vm_flags_t vm_flags = vma->vm_flags;
const struct vm_operations_struct *vm_ops = vma->vm_ops;
{
vm_flags_t vm_flags = vma->vm_flags;
const struct vm_operations_struct *vm_ops = vma->vm_ops;
/* The open routine did something to the protections that pgprot_modify
* won't preserve? */
/* The open routine did something to the protections that pgprot_modify
* won't preserve? */
- if (pgprot_val(vma->vm_page_prot) !=
- pgprot_val(vm_pgprot_modify(vma->vm_page_prot, vm_flags)))
+ if (pgprot_val(vm_page_prot) !=
+ pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags)))
return 0;
/* Do we need to track softdirty? */
return 0;
/* Do we need to track softdirty? */
* held in write mode.
*/
vma->vm_flags = newflags;
* held in write mode.
*/
vma->vm_flags = newflags;
- dirty_accountable = vma_wants_writenotify(vma);
+ dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot);
vma_set_page_prot(vma);
change_protection(vma, start, end, vma->vm_page_prot,
vma_set_page_prot(vma);
change_protection(vma, start, end, vma->vm_page_prot,