From bdeb91881088810ab1d8ae620862c3b4d78f4041 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Mon, 27 Feb 2023 22:42:28 +0100 Subject: [PATCH] mm/rmap: use atomic_try_cmpxchg in set_tlb_ubc_flush_pending Use atomic_try_cmpxchg instead of atomic_cmpxchg (*ptr, old, new) == old in set_tlb_ubc_flush_pending. 86 CMPXCHG instruction returns success in ZF flag, so this change saves a compare after cmpxchg (and related move instruction in front of cmpxchg). Also, try_cmpxchg implicitly assigns old *ptr value to "old" when cmpxchg fails. No functional change intended. Link: https://lkml.kernel.org/r/20230227214228.3533299-1-ubizjak@gmail.com Signed-off-by: Uros Bizjak Signed-off-by: Andrew Morton --- mm/rmap.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index 8632e02..1ea2756 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -644,7 +644,7 @@ void try_to_unmap_flush_dirty(void) static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) { struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; - int batch, nbatch; + int batch; arch_tlbbatch_add_mm(&tlb_ubc->arch, mm); tlb_ubc->flush_required = true; @@ -662,11 +662,8 @@ retry: * overflow. Reset `pending' and `flushed' to be 1 and 0 if * `pending' becomes large. */ - nbatch = atomic_cmpxchg(&mm->tlb_flush_batched, batch, 1); - if (nbatch != batch) { - batch = nbatch; + if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1)) goto retry; - } } else { atomic_inc(&mm->tlb_flush_batched); } -- 2.7.4