libceph: Partially revert changes to support MSG_SPLICE_PAGES
[platform/kernel/linux-starfive.git] / lib / ref_tracker.c
index dc7b14a..cf5609b 100644 (file)
@@ -1,11 +1,16 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
+
+#define pr_fmt(fmt) "ref_tracker: " fmt
+
 #include <linux/export.h>
+#include <linux/list_sort.h>
 #include <linux/ref_tracker.h>
 #include <linux/slab.h>
 #include <linux/stacktrace.h>
 #include <linux/stackdepot.h>
 
 #define REF_TRACKER_STACK_ENTRIES 16
+#define STACK_BUF_SIZE 1024
 
 struct ref_tracker {
        struct list_head        head;   /* anchor into dir->list or dir->quarantine */
@@ -14,6 +19,141 @@ struct ref_tracker {
        depot_stack_handle_t    free_stack_handle;
 };
 
+struct ref_tracker_dir_stats {
+       int total;
+       int count;
+       struct {
+               depot_stack_handle_t stack_handle;
+               unsigned int count;
+       } stacks[];
+};
+
+static struct ref_tracker_dir_stats *
+ref_tracker_get_stats(struct ref_tracker_dir *dir, unsigned int limit)
+{
+       struct ref_tracker_dir_stats *stats;
+       struct ref_tracker *tracker;
+
+       stats = kmalloc(struct_size(stats, stacks, limit),
+                       GFP_NOWAIT | __GFP_NOWARN);
+       if (!stats)
+               return ERR_PTR(-ENOMEM);
+       stats->total = 0;
+       stats->count = 0;
+
+       list_for_each_entry(tracker, &dir->list, head) {
+               depot_stack_handle_t stack = tracker->alloc_stack_handle;
+               int i;
+
+               ++stats->total;
+               for (i = 0; i < stats->count; ++i)
+                       if (stats->stacks[i].stack_handle == stack)
+                               break;
+               if (i >= limit)
+                       continue;
+               if (i >= stats->count) {
+                       stats->stacks[i].stack_handle = stack;
+                       stats->stacks[i].count = 0;
+                       ++stats->count;
+               }
+               ++stats->stacks[i].count;
+       }
+
+       return stats;
+}
+
+struct ostream {
+       char *buf;
+       int size, used;
+};
+
+#define pr_ostream(stream, fmt, args...) \
+({ \
+       struct ostream *_s = (stream); \
+\
+       if (!_s->buf) { \
+               pr_err(fmt, ##args); \
+       } else { \
+               int ret, len = _s->size - _s->used; \
+               ret = snprintf(_s->buf + _s->used, len, pr_fmt(fmt), ##args); \
+               _s->used += min(ret, len); \
+       } \
+})
+
+static void
+__ref_tracker_dir_pr_ostream(struct ref_tracker_dir *dir,
+                            unsigned int display_limit, struct ostream *s)
+{
+       struct ref_tracker_dir_stats *stats;
+       unsigned int i = 0, skipped;
+       depot_stack_handle_t stack;
+       char *sbuf;
+
+       lockdep_assert_held(&dir->lock);
+
+       if (list_empty(&dir->list))
+               return;
+
+       stats = ref_tracker_get_stats(dir, display_limit);
+       if (IS_ERR(stats)) {
+               pr_ostream(s, "%s@%pK: couldn't get stats, error %pe\n",
+                          dir->name, dir, stats);
+               return;
+       }
+
+       sbuf = kmalloc(STACK_BUF_SIZE, GFP_NOWAIT | __GFP_NOWARN);
+
+       for (i = 0, skipped = stats->total; i < stats->count; ++i) {
+               stack = stats->stacks[i].stack_handle;
+               if (sbuf && !stack_depot_snprint(stack, sbuf, STACK_BUF_SIZE, 4))
+                       sbuf[0] = 0;
+               pr_ostream(s, "%s@%pK has %d/%d users at\n%s\n", dir->name, dir,
+                          stats->stacks[i].count, stats->total, sbuf);
+               skipped -= stats->stacks[i].count;
+       }
+
+       if (skipped)
+               pr_ostream(s, "%s@%pK skipped reports about %d/%d users.\n",
+                          dir->name, dir, skipped, stats->total);
+
+       kfree(sbuf);
+
+       kfree(stats);
+}
+
+void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir,
+                                 unsigned int display_limit)
+{
+       struct ostream os = {};
+
+       __ref_tracker_dir_pr_ostream(dir, display_limit, &os);
+}
+EXPORT_SYMBOL(ref_tracker_dir_print_locked);
+
+void ref_tracker_dir_print(struct ref_tracker_dir *dir,
+                          unsigned int display_limit)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&dir->lock, flags);
+       ref_tracker_dir_print_locked(dir, display_limit);
+       spin_unlock_irqrestore(&dir->lock, flags);
+}
+EXPORT_SYMBOL(ref_tracker_dir_print);
+
+int ref_tracker_dir_snprint(struct ref_tracker_dir *dir, char *buf, size_t size)
+{
+       struct ostream os = { .buf = buf, .size = size };
+       unsigned long flags;
+
+       spin_lock_irqsave(&dir->lock, flags);
+       __ref_tracker_dir_pr_ostream(dir, 16, &os);
+       spin_unlock_irqrestore(&dir->lock, flags);
+
+       return os.used;
+}
+EXPORT_SYMBOL(ref_tracker_dir_snprint);
+
 void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
 {
        struct ref_tracker *tracker, *n;
@@ -27,13 +167,13 @@ void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
                kfree(tracker);
                dir->quarantine_avail++;
        }
-       list_for_each_entry_safe(tracker, n, &dir->list, head) {
-               pr_err("leaked reference.\n");
-               if (tracker->alloc_stack_handle)
-                       stack_depot_print(tracker->alloc_stack_handle);
+       if (!list_empty(&dir->list)) {
+               ref_tracker_dir_print_locked(dir, 16);
                leak = true;
-               list_del(&tracker->head);
-               kfree(tracker);
+               list_for_each_entry_safe(tracker, n, &dir->list, head) {
+                       list_del(&tracker->head);
+                       kfree(tracker);
+               }
        }
        spin_unlock_irqrestore(&dir->lock, flags);
        WARN_ON_ONCE(leak);
@@ -42,28 +182,6 @@ void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
 }
 EXPORT_SYMBOL(ref_tracker_dir_exit);
 
-void ref_tracker_dir_print(struct ref_tracker_dir *dir,
-                          unsigned int display_limit)
-{
-       struct ref_tracker *tracker;
-       unsigned long flags;
-       unsigned int i = 0;
-
-       spin_lock_irqsave(&dir->lock, flags);
-       list_for_each_entry(tracker, &dir->list, head) {
-               if (i < display_limit) {
-                       pr_err("leaked reference.\n");
-                       if (tracker->alloc_stack_handle)
-                               stack_depot_print(tracker->alloc_stack_handle);
-                       i++;
-               } else {
-                       break;
-               }
-       }
-       spin_unlock_irqrestore(&dir->lock, flags);
-}
-EXPORT_SYMBOL(ref_tracker_dir_print);
-
 int ref_tracker_alloc(struct ref_tracker_dir *dir,
                      struct ref_tracker **trackerp,
                      gfp_t gfp)
@@ -71,7 +189,7 @@ int ref_tracker_alloc(struct ref_tracker_dir *dir,
        unsigned long entries[REF_TRACKER_STACK_ENTRIES];
        struct ref_tracker *tracker;
        unsigned int nr_entries;
-       gfp_t gfp_mask = gfp;
+       gfp_t gfp_mask = gfp | __GFP_NOWARN;
        unsigned long flags;
 
        WARN_ON_ONCE(dir->dead);
@@ -119,7 +237,8 @@ int ref_tracker_free(struct ref_tracker_dir *dir,
                return -EEXIST;
        }
        nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
-       stack_handle = stack_depot_save(entries, nr_entries, GFP_ATOMIC);
+       stack_handle = stack_depot_save(entries, nr_entries,
+                                       GFP_NOWAIT | __GFP_NOWARN);
 
        spin_lock_irqsave(&dir->lock, flags);
        if (tracker->dead) {