1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
8 struct xstats xfsstats;
10 static int counter_val(struct xfsstats __percpu *stats, int idx)
14 for_each_possible_cpu(cpu)
15 val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx));
19 int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
23 uint64_t xs_xstrat_bytes = 0;
24 uint64_t xs_write_bytes = 0;
25 uint64_t xs_read_bytes = 0;
26 uint64_t defer_relog = 0;
28 static const struct xstats_entry {
32 { "extent_alloc", xfsstats_offset(xs_abt_lookup) },
33 { "abt", xfsstats_offset(xs_blk_mapr) },
34 { "blk_map", xfsstats_offset(xs_bmbt_lookup) },
35 { "bmbt", xfsstats_offset(xs_dir_lookup) },
36 { "dir", xfsstats_offset(xs_trans_sync) },
37 { "trans", xfsstats_offset(xs_ig_attempts) },
38 { "ig", xfsstats_offset(xs_log_writes) },
39 { "log", xfsstats_offset(xs_try_logspace)},
40 { "push_ail", xfsstats_offset(xs_xstrat_quick)},
41 { "xstrat", xfsstats_offset(xs_write_calls) },
42 { "rw", xfsstats_offset(xs_attr_get) },
43 { "attr", xfsstats_offset(xs_iflush_count)},
44 { "icluster", xfsstats_offset(vn_active) },
45 { "vnodes", xfsstats_offset(xb_get) },
46 { "buf", xfsstats_offset(xs_abtb_2) },
47 { "abtb2", xfsstats_offset(xs_abtc_2) },
48 { "abtc2", xfsstats_offset(xs_bmbt_2) },
49 { "bmbt2", xfsstats_offset(xs_ibt_2) },
50 { "ibt2", xfsstats_offset(xs_fibt_2) },
51 { "fibt2", xfsstats_offset(xs_rmap_2) },
52 { "rmapbt", xfsstats_offset(xs_refcbt_2) },
53 { "refcntbt", xfsstats_offset(xs_qm_dqreclaims)},
54 /* we print both series of quota information together */
55 { "qm", xfsstats_offset(xs_xstrat_bytes)},
58 /* Loop over all stats groups */
60 for (i = j = 0; i < ARRAY_SIZE(xstats); i++) {
61 len += scnprintf(buf + len, PATH_MAX - len, "%s",
63 /* inner loop does each group */
64 for (; j < xstats[i].endpoint; j++)
65 len += scnprintf(buf + len, PATH_MAX - len, " %u",
66 counter_val(stats, j));
67 len += scnprintf(buf + len, PATH_MAX - len, "\n");
69 /* extra precision counters */
70 for_each_possible_cpu(i) {
71 xs_xstrat_bytes += per_cpu_ptr(stats, i)->s.xs_xstrat_bytes;
72 xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes;
73 xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes;
74 defer_relog += per_cpu_ptr(stats, i)->s.defer_relog;
77 len += scnprintf(buf + len, PATH_MAX-len, "xpc %llu %llu %llu\n",
78 xs_xstrat_bytes, xs_write_bytes, xs_read_bytes);
79 len += scnprintf(buf + len, PATH_MAX-len, "defer_relog %llu\n",
81 len += scnprintf(buf + len, PATH_MAX-len, "debug %u\n",
91 void xfs_stats_clearall(struct xfsstats __percpu *stats)
96 xfs_notice(NULL, "Clearing xfsstats");
97 for_each_possible_cpu(c) {
99 /* save vn_active, it's a universal truth! */
100 vn_active = per_cpu_ptr(stats, c)->s.vn_active;
101 memset(per_cpu_ptr(stats, c), 0, sizeof(*stats));
102 per_cpu_ptr(stats, c)->s.vn_active = vn_active;
107 #ifdef CONFIG_PROC_FS
108 /* legacy quota interfaces */
109 #ifdef CONFIG_XFS_QUOTA
111 #define XFSSTAT_START_XQMSTAT xfsstats_offset(xs_qm_dqreclaims)
112 #define XFSSTAT_END_XQMSTAT xfsstats_offset(xs_qm_dquot)
114 static int xqm_proc_show(struct seq_file *m, void *v)
116 /* maximum; incore; ratio free to inuse; freelist */
117 seq_printf(m, "%d\t%d\t%d\t%u\n",
118 0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT),
119 0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT + 1));
123 /* legacy quota stats interface no 2 */
124 static int xqmstat_proc_show(struct seq_file *m, void *v)
129 for (j = XFSSTAT_START_XQMSTAT; j < XFSSTAT_END_XQMSTAT; j++)
130 seq_printf(m, " %u", counter_val(xfsstats.xs_stats, j));
134 #endif /* CONFIG_XFS_QUOTA */
137 xfs_init_procfs(void)
139 if (!proc_mkdir("fs/xfs", NULL))
142 if (!proc_symlink("fs/xfs/stat", NULL,
143 "/sys/fs/xfs/stats/stats"))
146 #ifdef CONFIG_XFS_QUOTA
147 if (!proc_create_single("fs/xfs/xqmstat", 0, NULL, xqmstat_proc_show))
149 if (!proc_create_single("fs/xfs/xqm", 0, NULL, xqm_proc_show))
155 remove_proc_subtree("fs/xfs", NULL);
160 xfs_cleanup_procfs(void)
162 remove_proc_subtree("fs/xfs", NULL);
164 #endif /* CONFIG_PROC_FS */