From 8a13897fb0daa8f56821f263f0c63661e1c6acae Mon Sep 17 00:00:00 2001 From: Axel Rasmussen Date: Fri, 7 Jul 2023 14:55:37 -0700 Subject: [PATCH] mm: userfaultfd: support UFFDIO_POISON for hugetlbfs The behavior here is the same as it is for anon/shmem. This is done separately because hugetlb pte marker handling is a bit different. Link: https://lkml.kernel.org/r/20230707215540.2324998-6-axelrasmussen@google.com Signed-off-by: Axel Rasmussen Acked-by: Peter Xu Cc: Al Viro Cc: Brian Geffon Cc: Christian Brauner Cc: David Hildenbrand Cc: Gaosheng Cui Cc: Huang, Ying Cc: Hugh Dickins Cc: James Houghton Cc: Jan Alexander Steffens (heftig) Cc: Jiaqi Yan Cc: Jonathan Corbet Cc: Kefeng Wang Cc: Liam R. Howlett Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Muchun Song Cc: Nadav Amit Cc: Naoya Horiguchi Cc: Ryan Roberts Cc: Shuah Khan Cc: Suleiman Souhlal Cc: Suren Baghdasaryan Cc: T.J. Alumbaugh Cc: Yu Zhao Cc: ZhangPeng Signed-off-by: Andrew Morton --- mm/hugetlb.c | 19 +++++++++++++++++++ mm/userfaultfd.c | 3 +-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ffee297..7b076eb 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6262,6 +6262,25 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, int writable; bool folio_in_pagecache = false; + if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) { + ptl = huge_pte_lock(h, dst_mm, dst_pte); + + /* Don't overwrite any existing PTEs (even markers) */ + if (!huge_pte_none(huge_ptep_get(dst_pte))) { + spin_unlock(ptl); + return -EEXIST; + } + + _dst_pte = make_pte_marker(PTE_MARKER_POISONED); + set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); + + /* No need to invalidate - it was non-present before */ + update_mmu_cache(dst_vma, dst_addr, dst_pte); + + spin_unlock(ptl); + return 0; + } + if (is_continue) { ret = -EFAULT; folio = filemap_lock_folio(mapping, idx); diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index dd16718..0fc69ef 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -377,8 +377,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb( * by THP. Since we can not reliably insert a zero page, this * feature is not supported. */ - if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE) || - uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) { + if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) { mmap_read_unlock(dst_mm); return -EINVAL; } -- 2.7.4