bcache: Bkey indexing renaming
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / md / bcache / debug.c
1 /*
2  * Assorted bcache debug code
3  *
4  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcache.h"
9 #include "btree.h"
10 #include "debug.h"
11
12 #include <linux/console.h>
13 #include <linux/debugfs.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 #include <linux/seq_file.h>
17
18 static struct dentry *debug;
19
20 const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
21 {
22         unsigned i;
23
24         for (i = 0; i < KEY_PTRS(k); i++)
25                 if (ptr_available(c, k, i)) {
26                         struct cache *ca = PTR_CACHE(c, k, i);
27                         size_t bucket = PTR_BUCKET_NR(c, k, i);
28                         size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
29
30                         if (KEY_SIZE(k) + r > c->sb.bucket_size)
31                                 return "bad, length too big";
32                         if (bucket <  ca->sb.first_bucket)
33                                 return "bad, short offset";
34                         if (bucket >= ca->sb.nbuckets)
35                                 return "bad, offset past end of device";
36                         if (ptr_stale(c, k, i))
37                                 return "stale";
38                 }
39
40         if (!bkey_cmp(k, &ZERO_KEY))
41                 return "bad, null key";
42         if (!KEY_PTRS(k))
43                 return "bad, no pointers";
44         if (!KEY_SIZE(k))
45                 return "zeroed key";
46         return "";
47 }
48
49 int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k)
50 {
51         unsigned i = 0;
52         char *out = buf, *end = buf + size;
53
54 #define p(...)  (out += scnprintf(out, end - out, __VA_ARGS__))
55
56         p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k));
57
58         for (i = 0; i < KEY_PTRS(k); i++) {
59                 if (i)
60                         p(", ");
61
62                 if (PTR_DEV(k, i) == PTR_CHECK_DEV)
63                         p("check dev");
64                 else
65                         p("%llu:%llu gen %llu", PTR_DEV(k, i),
66                           PTR_OFFSET(k, i), PTR_GEN(k, i));
67         }
68
69         p("]");
70
71         if (KEY_DIRTY(k))
72                 p(" dirty");
73         if (KEY_CSUM(k))
74                 p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
75 #undef p
76         return out - buf;
77 }
78
79 #ifdef CONFIG_BCACHE_DEBUG
80
81 static void dump_bset(struct btree *b, struct bset *i, unsigned set)
82 {
83         struct bkey *k, *next;
84         unsigned j;
85         char buf[80];
86
87         for (k = i->start; k < bset_bkey_last(i); k = next) {
88                 next = bkey_next(k);
89
90                 bch_bkey_to_text(buf, sizeof(buf), k);
91                 printk(KERN_ERR "b %u k %zi/%u: %s", set,
92                        (uint64_t *) k - i->d, i->keys, buf);
93
94                 for (j = 0; j < KEY_PTRS(k); j++) {
95                         size_t n = PTR_BUCKET_NR(b->c, k, j);
96                         printk(" bucket %zu", n);
97
98                         if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
99                                 printk(" prio %i",
100                                        PTR_BUCKET(b->c, k, j)->prio);
101                 }
102
103                 printk(" %s\n", bch_ptr_status(b->c, k));
104
105                 if (next < bset_bkey_last(i) &&
106                     bkey_cmp(k, !b->level ? &START_KEY(next) : next) > 0)
107                         printk(KERN_ERR "Key skipped backwards\n");
108         }
109 }
110
111 static void bch_dump_bucket(struct btree *b)
112 {
113         unsigned i;
114
115         console_lock();
116         for (i = 0; i <= b->nsets; i++)
117                 dump_bset(b, b->sets[i].data,
118                           bset_block_offset(b, b->sets[i].data));
119         console_unlock();
120 }
121
122 #define for_each_written_bset(b, start, i)                              \
123         for (i = (start);                                               \
124              (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
125              i->seq == (start)->seq;                                    \
126              i = (void *) i + set_blocks(i, b->c) * block_bytes(b->c))
127
128 void bch_btree_verify(struct btree *b)
129 {
130         struct btree *v = b->c->verify_data;
131         struct bset *ondisk, *sorted, *inmemory;
132         struct bio *bio;
133
134         if (!b->c->verify || !b->c->verify_ondisk)
135                 return;
136
137         down(&b->io_mutex);
138         mutex_lock(&b->c->verify_lock);
139
140         ondisk = b->c->verify_ondisk;
141         sorted = b->c->verify_data->sets->data;
142         inmemory = b->sets->data;
143
144         bkey_copy(&v->key, &b->key);
145         v->written = 0;
146         v->level = b->level;
147
148         bio = bch_bbio_alloc(b->c);
149         bio->bi_bdev            = PTR_CACHE(b->c, &b->key, 0)->bdev;
150         bio->bi_iter.bi_sector  = PTR_OFFSET(&b->key, 0);
151         bio->bi_iter.bi_size    = KEY_SIZE(&v->key) << 9;
152         bch_bio_map(bio, sorted);
153
154         submit_bio_wait(REQ_META|READ_SYNC, bio);
155         bch_bbio_free(bio, b->c);
156
157         memcpy(ondisk, sorted, KEY_SIZE(&v->key) << 9);
158
159         bch_btree_node_read_done(v);
160         sorted = v->sets->data;
161
162         if (inmemory->keys != sorted->keys ||
163             memcmp(inmemory->start,
164                    sorted->start,
165                    (void *) bset_bkey_last(inmemory) - (void *) inmemory->start)) {
166                 struct bset *i;
167                 unsigned j;
168
169                 console_lock();
170
171                 printk(KERN_ERR "*** in memory:\n");
172                 dump_bset(b, inmemory, 0);
173
174                 printk(KERN_ERR "*** read back in:\n");
175                 dump_bset(v, sorted, 0);
176
177                 for_each_written_bset(b, ondisk, i) {
178                         unsigned block = ((void *) i - (void *) ondisk) /
179                                 block_bytes(b->c);
180
181                         printk(KERN_ERR "*** on disk block %u:\n", block);
182                         dump_bset(b, i, block);
183                 }
184
185                 printk(KERN_ERR "*** block %zu not written\n",
186                        ((void *) i - (void *) ondisk) / block_bytes(b->c));
187
188                 for (j = 0; j < inmemory->keys; j++)
189                         if (inmemory->d[j] != sorted->d[j])
190                                 break;
191
192                 printk(KERN_ERR "b->written %u\n", b->written);
193
194                 console_unlock();
195                 panic("verify failed at %u\n", j);
196         }
197
198         mutex_unlock(&b->c->verify_lock);
199         up(&b->io_mutex);
200 }
201
202 void bch_data_verify(struct cached_dev *dc, struct bio *bio)
203 {
204         char name[BDEVNAME_SIZE];
205         struct bio *check;
206         struct bio_vec bv, *bv2;
207         struct bvec_iter iter;
208         int i;
209
210         check = bio_clone(bio, GFP_NOIO);
211         if (!check)
212                 return;
213
214         if (bio_alloc_pages(check, GFP_NOIO))
215                 goto out_put;
216
217         submit_bio_wait(READ_SYNC, check);
218
219         bio_for_each_segment(bv, bio, iter) {
220                 void *p1 = kmap_atomic(bv.bv_page);
221                 void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page);
222
223                 cache_set_err_on(memcmp(p1 + bv.bv_offset,
224                                         p2 + bv.bv_offset,
225                                         bv.bv_len),
226                                  dc->disk.c,
227                                  "verify failed at dev %s sector %llu",
228                                  bdevname(dc->bdev, name),
229                                  (uint64_t) bio->bi_iter.bi_sector);
230
231                 kunmap_atomic(p1);
232         }
233
234         bio_for_each_segment_all(bv2, check, i)
235                 __free_page(bv2->bv_page);
236 out_put:
237         bio_put(check);
238 }
239
240 int __bch_count_data(struct btree *b)
241 {
242         unsigned ret = 0;
243         struct btree_iter iter;
244         struct bkey *k;
245
246         if (!b->level)
247                 for_each_key(b, k, &iter)
248                         ret += KEY_SIZE(k);
249         return ret;
250 }
251
252 void __bch_check_keys(struct btree *b, const char *fmt, ...)
253 {
254         va_list args;
255         struct bkey *k, *p = NULL;
256         struct btree_iter iter;
257         const char *err;
258
259         for_each_key(b, k, &iter) {
260                 if (!b->level) {
261                         err = "Keys out of order";
262                         if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
263                                 goto bug;
264
265                         if (bch_ptr_invalid(b, k))
266                                 continue;
267
268                         err =  "Overlapping keys";
269                         if (p && bkey_cmp(p, &START_KEY(k)) > 0)
270                                 goto bug;
271                 } else {
272                         if (bch_ptr_bad(b, k))
273                                 continue;
274
275                         err = "Duplicate keys";
276                         if (p && !bkey_cmp(p, k))
277                                 goto bug;
278                 }
279                 p = k;
280         }
281
282         err = "Key larger than btree node key";
283         if (p && bkey_cmp(p, &b->key) > 0)
284                 goto bug;
285
286         return;
287 bug:
288         bch_dump_bucket(b);
289
290         va_start(args, fmt);
291         vprintk(fmt, args);
292         va_end(args);
293
294         panic("bcache error: %s:\n", err);
295 }
296
297 void bch_btree_iter_next_check(struct btree_iter *iter)
298 {
299         struct bkey *k = iter->data->k, *next = bkey_next(k);
300
301         if (next < iter->data->end &&
302             bkey_cmp(k, iter->b->level ? next : &START_KEY(next)) > 0) {
303                 bch_dump_bucket(iter->b);
304                 panic("Key skipped backwards\n");
305         }
306 }
307
308 #endif
309
310 #ifdef CONFIG_DEBUG_FS
311
312 /* XXX: cache set refcounting */
313
314 struct dump_iterator {
315         char                    buf[PAGE_SIZE];
316         size_t                  bytes;
317         struct cache_set        *c;
318         struct keybuf           keys;
319 };
320
321 static bool dump_pred(struct keybuf *buf, struct bkey *k)
322 {
323         return true;
324 }
325
326 static ssize_t bch_dump_read(struct file *file, char __user *buf,
327                              size_t size, loff_t *ppos)
328 {
329         struct dump_iterator *i = file->private_data;
330         ssize_t ret = 0;
331         char kbuf[80];
332
333         while (size) {
334                 struct keybuf_key *w;
335                 unsigned bytes = min(i->bytes, size);
336
337                 int err = copy_to_user(buf, i->buf, bytes);
338                 if (err)
339                         return err;
340
341                 ret      += bytes;
342                 buf      += bytes;
343                 size     -= bytes;
344                 i->bytes -= bytes;
345                 memmove(i->buf, i->buf + bytes, i->bytes);
346
347                 if (i->bytes)
348                         break;
349
350                 w = bch_keybuf_next_rescan(i->c, &i->keys, &MAX_KEY, dump_pred);
351                 if (!w)
352                         break;
353
354                 bch_bkey_to_text(kbuf, sizeof(kbuf), &w->key);
355                 i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", kbuf);
356                 bch_keybuf_del(&i->keys, w);
357         }
358
359         return ret;
360 }
361
362 static int bch_dump_open(struct inode *inode, struct file *file)
363 {
364         struct cache_set *c = inode->i_private;
365         struct dump_iterator *i;
366
367         i = kzalloc(sizeof(struct dump_iterator), GFP_KERNEL);
368         if (!i)
369                 return -ENOMEM;
370
371         file->private_data = i;
372         i->c = c;
373         bch_keybuf_init(&i->keys);
374         i->keys.last_scanned = KEY(0, 0, 0);
375
376         return 0;
377 }
378
379 static int bch_dump_release(struct inode *inode, struct file *file)
380 {
381         kfree(file->private_data);
382         return 0;
383 }
384
385 static const struct file_operations cache_set_debug_ops = {
386         .owner          = THIS_MODULE,
387         .open           = bch_dump_open,
388         .read           = bch_dump_read,
389         .release        = bch_dump_release
390 };
391
392 void bch_debug_init_cache_set(struct cache_set *c)
393 {
394         if (!IS_ERR_OR_NULL(debug)) {
395                 char name[50];
396                 snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
397
398                 c->debug = debugfs_create_file(name, 0400, debug, c,
399                                                &cache_set_debug_ops);
400         }
401 }
402
403 #endif
404
405 void bch_debug_exit(void)
406 {
407         if (!IS_ERR_OR_NULL(debug))
408                 debugfs_remove_recursive(debug);
409 }
410
411 int __init bch_debug_init(struct kobject *kobj)
412 {
413         int ret = 0;
414
415         debug = debugfs_create_dir("bcache", NULL);
416         return ret;
417 }