dmapool: simplify freeing
[platform/kernel/linux-starfive.git] / mm / rmap.c
index 8632e02..19392e0 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
  *     mapping->invalidate_lock (in filemap_fault)
  *       page->flags PG_locked (lock_page)
  *         hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below)
- *           mapping->i_mmap_rwsem
- *             anon_vma->rwsem
- *               mm->page_table_lock or pte_lock
- *                 swap_lock (in swap_duplicate, swap_info_get)
- *                   mmlist_lock (in mmput, drain_mmlist and others)
- *                   mapping->private_lock (in block_dirty_folio)
- *                     folio_lock_memcg move_lock (in block_dirty_folio)
- *                       i_pages lock (widely used)
- *                         lruvec->lru_lock (in folio_lruvec_lock_irq)
- *                   inode->i_lock (in set_page_dirty's __mark_inode_dirty)
- *                   bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
- *                     sb_lock (within inode_lock in fs/fs-writeback.c)
- *                     i_pages lock (widely used, in set_page_dirty,
- *                               in arch-dependent flush_dcache_mmap_lock,
- *                               within bdi.wb->list_lock in __sync_single_inode)
+ *           vma_start_write
+ *             mapping->i_mmap_rwsem
+ *               anon_vma->rwsem
+ *                 mm->page_table_lock or pte_lock
+ *                   swap_lock (in swap_duplicate, swap_info_get)
+ *                     mmlist_lock (in mmput, drain_mmlist and others)
+ *                     mapping->private_lock (in block_dirty_folio)
+ *                       folio_lock_memcg move_lock (in block_dirty_folio)
+ *                         i_pages lock (widely used)
+ *                           lruvec->lru_lock (in folio_lruvec_lock_irq)
+ *                     inode->i_lock (in set_page_dirty's __mark_inode_dirty)
+ *                     bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
+ *                       sb_lock (within inode_lock in fs/fs-writeback.c)
+ *                       i_pages lock (widely used, in set_page_dirty,
+ *                                 in arch-dependent flush_dcache_mmap_lock,
+ *                                 within bdi.wb->list_lock in __sync_single_inode)
  *
  * anon_vma->rwsem,mapping->i_mmap_rwsem   (memory_failure, collect_procs_anon)
  *   ->tasklist_lock
@@ -641,10 +642,14 @@ void try_to_unmap_flush_dirty(void)
 #define TLB_FLUSH_BATCH_PENDING_LARGE                  \
        (TLB_FLUSH_BATCH_PENDING_MASK / 2)
 
-static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
+static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval)
 {
        struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
-       int batch, nbatch;
+       int batch;
+       bool writable = pte_dirty(pteval);
+
+       if (!pte_accessible(mm, pteval))
+               return;
 
        arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
        tlb_ubc->flush_required = true;
@@ -662,11 +667,8 @@ retry:
                 * overflow.  Reset `pending' and `flushed' to be 1 and 0 if
                 * `pending' becomes large.
                 */
-               nbatch = atomic_cmpxchg(&mm->tlb_flush_batched, batch, 1);
-               if (nbatch != batch) {
-                       batch = nbatch;
+               if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1))
                        goto retry;
-               }
        } else {
                atomic_inc(&mm->tlb_flush_batched);
        }
@@ -731,7 +733,7 @@ void flush_tlb_batched_pending(struct mm_struct *mm)
        }
 }
 #else
-static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
+static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval)
 {
 }
 
@@ -1582,7 +1584,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
                                 */
                                pteval = ptep_get_and_clear(mm, address, pvmw.pte);
 
-                               set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
+                               set_tlb_ubc_flush_pending(mm, pteval);
                        } else {
                                pteval = ptep_clear_flush(vma, address, pvmw.pte);
                        }
@@ -1963,7 +1965,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                                 */
                                pteval = ptep_get_and_clear(mm, address, pvmw.pte);
 
-                               set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
+                               set_tlb_ubc_flush_pending(mm, pteval);
                        } else {
                                pteval = ptep_clear_flush(vma, address, pvmw.pte);
                        }