pcpcntr: remove percpu_counter_sum_all()
authorDave Chinner <dchinner@redhat.com>
Thu, 16 Mar 2023 00:31:03 +0000 (17:31 -0700)
committerDarrick J. Wong <djwong@kernel.org>
Sun, 19 Mar 2023 17:02:04 +0000 (10:02 -0700)
percpu_counter_sum_all() is now redundant as the race condition it
was invented to handle is now dealt with by percpu_counter_sum()
directly and all users of percpu_counter_sum_all() have been
removed.

Remove it.

This effectively reverts the changes made in f689054aace2
("percpu_counter: add percpu_counter_sum_all interface") except for
the cpumask iteration that fixes percpu_counter_sum() made earlier
in this series.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
include/linux/percpu_counter.h
lib/percpu_counter.c

index 521a733..75b73c8 100644 (file)
@@ -45,7 +45,6 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
                              s32 batch);
 s64 __percpu_counter_sum(struct percpu_counter *fbc);
-s64 percpu_counter_sum_all(struct percpu_counter *fbc);
 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
 void percpu_counter_sync(struct percpu_counter *fbc);
 
@@ -196,11 +195,6 @@ static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
        return percpu_counter_read(fbc);
 }
 
-static inline s64 percpu_counter_sum_all(struct percpu_counter *fbc)
-{
-       return percpu_counter_read(fbc);
-}
-
 static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
 {
        return true;
index 0e09631..5004463 100644 (file)
@@ -122,23 +122,6 @@ void percpu_counter_sync(struct percpu_counter *fbc)
 }
 EXPORT_SYMBOL(percpu_counter_sync);
 
-static s64 __percpu_counter_sum_mask(struct percpu_counter *fbc,
-                             const struct cpumask *cpu_mask)
-{
-       s64 ret;
-       int cpu;
-       unsigned long flags;
-
-       raw_spin_lock_irqsave(&fbc->lock, flags);
-       ret = fbc->count;
-       for_each_cpu_or(cpu, cpu_online_mask, cpu_mask) {
-               s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
-               ret += *pcount;
-       }
-       raw_spin_unlock_irqrestore(&fbc->lock, flags);
-       return ret;
-}
-
 /*
  * Add up all the per-cpu counts, return the result.  This is a more accurate
  * but much slower version of percpu_counter_read_positive().
@@ -153,22 +136,21 @@ static s64 __percpu_counter_sum_mask(struct percpu_counter *fbc,
  */
 s64 __percpu_counter_sum(struct percpu_counter *fbc)
 {
+       s64 ret;
+       int cpu;
+       unsigned long flags;
 
-       return __percpu_counter_sum_mask(fbc, cpu_dying_mask);
+       raw_spin_lock_irqsave(&fbc->lock, flags);
+       ret = fbc->count;
+       for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) {
+               s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
+               ret += *pcount;
+       }
+       raw_spin_unlock_irqrestore(&fbc->lock, flags);
+       return ret;
 }
 EXPORT_SYMBOL(__percpu_counter_sum);
 
-/*
- * This is slower version of percpu_counter_sum as it traverses all possible
- * cpus. Use this only in the cases where accurate data is needed in the
- * presense of CPUs getting offlined.
- */
-s64 percpu_counter_sum_all(struct percpu_counter *fbc)
-{
-       return __percpu_counter_sum_mask(fbc, cpu_possible_mask);
-}
-EXPORT_SYMBOL(percpu_counter_sum_all);
-
 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
                          struct lock_class_key *key)
 {