Merge branches 'clk-samsung', 'clk-mtk', 'clk-rm', 'clk-ast' and 'clk-qcom' into...
[platform/kernel/linux-starfive.git] / block / blk-stat.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Block stat tracking code
4  *
5  * Copyright (C) 2016 Jens Axboe
6  */
7 #include <linux/kernel.h>
8 #include <linux/rculist.h>
9 #include <linux/blk-mq.h>
10
11 #include "blk-stat.h"
12 #include "blk-mq.h"
13 #include "blk.h"
14
15 struct blk_queue_stats {
16         struct list_head callbacks;
17         spinlock_t lock;
18         int accounting;
19 };
20
21 void blk_rq_stat_init(struct blk_rq_stat *stat)
22 {
23         stat->min = -1ULL;
24         stat->max = stat->nr_samples = stat->mean = 0;
25         stat->batch = 0;
26 }
27
28 /* src is a per-cpu stat, mean isn't initialized */
29 void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
30 {
31         if (!src->nr_samples)
32                 return;
33
34         dst->min = min(dst->min, src->min);
35         dst->max = max(dst->max, src->max);
36
37         dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
38                                 dst->nr_samples + src->nr_samples);
39
40         dst->nr_samples += src->nr_samples;
41 }
42
43 void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value)
44 {
45         stat->min = min(stat->min, value);
46         stat->max = max(stat->max, value);
47         stat->batch += value;
48         stat->nr_samples++;
49 }
50
51 void blk_stat_add(struct request *rq, u64 now)
52 {
53         struct request_queue *q = rq->q;
54         struct blk_stat_callback *cb;
55         struct blk_rq_stat *stat;
56         int bucket, cpu;
57         u64 value;
58
59         value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
60
61         blk_throtl_stat_add(rq, value);
62
63         rcu_read_lock();
64         cpu = get_cpu();
65         list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
66                 if (!blk_stat_is_active(cb))
67                         continue;
68
69                 bucket = cb->bucket_fn(rq);
70                 if (bucket < 0)
71                         continue;
72
73                 stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket];
74                 blk_rq_stat_add(stat, value);
75         }
76         put_cpu();
77         rcu_read_unlock();
78 }
79
80 static void blk_stat_timer_fn(struct timer_list *t)
81 {
82         struct blk_stat_callback *cb = from_timer(cb, t, timer);
83         unsigned int bucket;
84         int cpu;
85
86         for (bucket = 0; bucket < cb->buckets; bucket++)
87                 blk_rq_stat_init(&cb->stat[bucket]);
88
89         for_each_online_cpu(cpu) {
90                 struct blk_rq_stat *cpu_stat;
91
92                 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
93                 for (bucket = 0; bucket < cb->buckets; bucket++) {
94                         blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
95                         blk_rq_stat_init(&cpu_stat[bucket]);
96                 }
97         }
98
99         cb->timer_fn(cb);
100 }
101
102 struct blk_stat_callback *
103 blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
104                         int (*bucket_fn)(const struct request *),
105                         unsigned int buckets, void *data)
106 {
107         struct blk_stat_callback *cb;
108
109         cb = kmalloc(sizeof(*cb), GFP_KERNEL);
110         if (!cb)
111                 return NULL;
112
113         cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
114                                  GFP_KERNEL);
115         if (!cb->stat) {
116                 kfree(cb);
117                 return NULL;
118         }
119         cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
120                                       __alignof__(struct blk_rq_stat));
121         if (!cb->cpu_stat) {
122                 kfree(cb->stat);
123                 kfree(cb);
124                 return NULL;
125         }
126
127         cb->timer_fn = timer_fn;
128         cb->bucket_fn = bucket_fn;
129         cb->data = data;
130         cb->buckets = buckets;
131         timer_setup(&cb->timer, blk_stat_timer_fn, 0);
132
133         return cb;
134 }
135
136 void blk_stat_add_callback(struct request_queue *q,
137                            struct blk_stat_callback *cb)
138 {
139         unsigned int bucket;
140         unsigned long flags;
141         int cpu;
142
143         for_each_possible_cpu(cpu) {
144                 struct blk_rq_stat *cpu_stat;
145
146                 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
147                 for (bucket = 0; bucket < cb->buckets; bucket++)
148                         blk_rq_stat_init(&cpu_stat[bucket]);
149         }
150
151         spin_lock_irqsave(&q->stats->lock, flags);
152         list_add_tail_rcu(&cb->list, &q->stats->callbacks);
153         blk_queue_flag_set(QUEUE_FLAG_STATS, q);
154         spin_unlock_irqrestore(&q->stats->lock, flags);
155 }
156
157 void blk_stat_remove_callback(struct request_queue *q,
158                               struct blk_stat_callback *cb)
159 {
160         unsigned long flags;
161
162         spin_lock_irqsave(&q->stats->lock, flags);
163         list_del_rcu(&cb->list);
164         if (list_empty(&q->stats->callbacks) && !q->stats->accounting)
165                 blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
166         spin_unlock_irqrestore(&q->stats->lock, flags);
167
168         del_timer_sync(&cb->timer);
169 }
170
171 static void blk_stat_free_callback_rcu(struct rcu_head *head)
172 {
173         struct blk_stat_callback *cb;
174
175         cb = container_of(head, struct blk_stat_callback, rcu);
176         free_percpu(cb->cpu_stat);
177         kfree(cb->stat);
178         kfree(cb);
179 }
180
181 void blk_stat_free_callback(struct blk_stat_callback *cb)
182 {
183         if (cb)
184                 call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
185 }
186
187 void blk_stat_disable_accounting(struct request_queue *q)
188 {
189         unsigned long flags;
190
191         spin_lock_irqsave(&q->stats->lock, flags);
192         if (!--q->stats->accounting)
193                 blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
194         spin_unlock_irqrestore(&q->stats->lock, flags);
195 }
196 EXPORT_SYMBOL_GPL(blk_stat_disable_accounting);
197
198 void blk_stat_enable_accounting(struct request_queue *q)
199 {
200         unsigned long flags;
201
202         spin_lock_irqsave(&q->stats->lock, flags);
203         if (!q->stats->accounting++)
204                 blk_queue_flag_set(QUEUE_FLAG_STATS, q);
205         spin_unlock_irqrestore(&q->stats->lock, flags);
206 }
207 EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
208
209 struct blk_queue_stats *blk_alloc_queue_stats(void)
210 {
211         struct blk_queue_stats *stats;
212
213         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
214         if (!stats)
215                 return NULL;
216
217         INIT_LIST_HEAD(&stats->callbacks);
218         spin_lock_init(&stats->lock);
219         stats->accounting = 0;
220
221         return stats;
222 }
223
224 void blk_free_queue_stats(struct blk_queue_stats *stats)
225 {
226         if (!stats)
227                 return;
228
229         WARN_ON(!list_empty(&stats->callbacks));
230
231         kfree(stats);
232 }
233
234 bool blk_stats_alloc_enable(struct request_queue *q)
235 {
236         struct blk_rq_stat *poll_stat;
237
238         poll_stat = kcalloc(BLK_MQ_POLL_STATS_BKTS, sizeof(*poll_stat),
239                                 GFP_ATOMIC);
240         if (!poll_stat)
241                 return false;
242
243         if (cmpxchg(&q->poll_stat, NULL, poll_stat) != NULL) {
244                 kfree(poll_stat);
245                 return true;
246         }
247
248         blk_stat_add_callback(q, q->poll_cb);
249         return false;
250 }