dm cache: fix race causing dirty blocks to be marked as clean
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / md / faulty.c
1 /*
2  * faulty.c : Multiple Devices driver for Linux
3  *
4  * Copyright (C) 2004 Neil Brown
5  *
6  * fautly-device-simulator personality for md
7  *
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2, or (at your option)
12  * any later version.
13  *
14  * You should have received a copy of the GNU General Public License
15  * (for example /usr/src/linux/COPYING); if not, write to the Free
16  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17  */
18
19
20 /*
21  * The "faulty" personality causes some requests to fail.
22  *
23  * Possible failure modes are:
24  *   reads fail "randomly" but succeed on retry
25  *   writes fail "randomly" but succeed on retry
26  *   reads for some address fail and then persist until a write
27  *   reads for some address fail and then persist irrespective of write
28  *   writes for some address fail and persist
29  *   all writes fail
30  *
31  * Different modes can be active at a time, but only
32  * one can be set at array creation.  Others can be added later.
33  * A mode can be one-shot or recurrent with the recurrence being
34  * once in every N requests.
35  * The bottom 5 bits of the "layout" indicate the mode.  The
36  * remainder indicate a period, or 0 for one-shot.
37  *
38  * There is an implementation limit on the number of concurrently
39  * persisting-faulty blocks. When a new fault is requested that would
40  * exceed the limit, it is ignored.
41  * All current faults can be clear using a layout of "0".
42  *
43  * Requests are always sent to the device.  If they are to fail,
44  * we clone the bio and insert a new b_end_io into the chain.
45  */
46
47 #define WriteTransient  0
48 #define ReadTransient   1
49 #define WritePersistent 2
50 #define ReadPersistent  3
51 #define WriteAll        4 /* doesn't go to device */
52 #define ReadFixable     5
53 #define Modes   6
54
55 #define ClearErrors     31
56 #define ClearFaults     30
57
58 #define AllPersist      100 /* internal use only */
59 #define NoPersist       101
60
61 #define ModeMask        0x1f
62 #define ModeShift       5
63
64 #define MaxFault        50
65 #include <linux/blkdev.h>
66 #include <linux/module.h>
67 #include <linux/raid/md_u.h>
68 #include <linux/slab.h>
69 #include "md.h"
70 #include <linux/seq_file.h>
71
72
73 static void faulty_fail(struct bio *bio, int error)
74 {
75         struct bio *b = bio->bi_private;
76
77         b->bi_iter.bi_size = bio->bi_iter.bi_size;
78         b->bi_iter.bi_sector = bio->bi_iter.bi_sector;
79
80         bio_put(bio);
81
82         bio_io_error(b);
83 }
84
85 struct faulty_conf {
86         int period[Modes];
87         atomic_t counters[Modes];
88         sector_t faults[MaxFault];
89         int     modes[MaxFault];
90         int nfaults;
91         struct md_rdev *rdev;
92 };
93
94 static int check_mode(struct faulty_conf *conf, int mode)
95 {
96         if (conf->period[mode] == 0 &&
97             atomic_read(&conf->counters[mode]) <= 0)
98                 return 0; /* no failure, no decrement */
99
100
101         if (atomic_dec_and_test(&conf->counters[mode])) {
102                 if (conf->period[mode])
103                         atomic_set(&conf->counters[mode], conf->period[mode]);
104                 return 1;
105         }
106         return 0;
107 }
108
109 static int check_sector(struct faulty_conf *conf, sector_t start, sector_t end, int dir)
110 {
111         /* If we find a ReadFixable sector, we fix it ... */
112         int i;
113         for (i=0; i<conf->nfaults; i++)
114                 if (conf->faults[i] >= start &&
115                     conf->faults[i] < end) {
116                         /* found it ... */
117                         switch (conf->modes[i] * 2 + dir) {
118                         case WritePersistent*2+WRITE: return 1;
119                         case ReadPersistent*2+READ: return 1;
120                         case ReadFixable*2+READ: return 1;
121                         case ReadFixable*2+WRITE:
122                                 conf->modes[i] = NoPersist;
123                                 return 0;
124                         case AllPersist*2+READ:
125                         case AllPersist*2+WRITE: return 1;
126                         default:
127                                 return 0;
128                         }
129                 }
130         return 0;
131 }
132
133 static void add_sector(struct faulty_conf *conf, sector_t start, int mode)
134 {
135         int i;
136         int n = conf->nfaults;
137         for (i=0; i<conf->nfaults; i++)
138                 if (conf->faults[i] == start) {
139                         switch(mode) {
140                         case NoPersist: conf->modes[i] = mode; return;
141                         case WritePersistent:
142                                 if (conf->modes[i] == ReadPersistent ||
143                                     conf->modes[i] == ReadFixable)
144                                         conf->modes[i] = AllPersist;
145                                 else
146                                         conf->modes[i] = WritePersistent;
147                                 return;
148                         case ReadPersistent:
149                                 if (conf->modes[i] == WritePersistent)
150                                         conf->modes[i] = AllPersist;
151                                 else
152                                         conf->modes[i] = ReadPersistent;
153                                 return;
154                         case ReadFixable:
155                                 if (conf->modes[i] == WritePersistent ||
156                                     conf->modes[i] == ReadPersistent)
157                                         conf->modes[i] = AllPersist;
158                                 else
159                                         conf->modes[i] = ReadFixable;
160                                 return;
161                         }
162                 } else if (conf->modes[i] == NoPersist)
163                         n = i;
164
165         if (n >= MaxFault)
166                 return;
167         conf->faults[n] = start;
168         conf->modes[n] = mode;
169         if (conf->nfaults == n)
170                 conf->nfaults = n+1;
171 }
172
173 static void make_request(struct mddev *mddev, struct bio *bio)
174 {
175         struct faulty_conf *conf = mddev->private;
176         int failit = 0;
177
178         if (bio_data_dir(bio) == WRITE) {
179                 /* write request */
180                 if (atomic_read(&conf->counters[WriteAll])) {
181                         /* special case - don't decrement, don't generic_make_request,
182                          * just fail immediately
183                          */
184                         bio_endio(bio, -EIO);
185                         return;
186                 }
187
188                 if (check_sector(conf, bio->bi_iter.bi_sector,
189                                  bio_end_sector(bio), WRITE))
190                         failit = 1;
191                 if (check_mode(conf, WritePersistent)) {
192                         add_sector(conf, bio->bi_iter.bi_sector,
193                                    WritePersistent);
194                         failit = 1;
195                 }
196                 if (check_mode(conf, WriteTransient))
197                         failit = 1;
198         } else {
199                 /* read request */
200                 if (check_sector(conf, bio->bi_iter.bi_sector,
201                                  bio_end_sector(bio), READ))
202                         failit = 1;
203                 if (check_mode(conf, ReadTransient))
204                         failit = 1;
205                 if (check_mode(conf, ReadPersistent)) {
206                         add_sector(conf, bio->bi_iter.bi_sector,
207                                    ReadPersistent);
208                         failit = 1;
209                 }
210                 if (check_mode(conf, ReadFixable)) {
211                         add_sector(conf, bio->bi_iter.bi_sector,
212                                    ReadFixable);
213                         failit = 1;
214                 }
215         }
216         if (failit) {
217                 struct bio *b = bio_clone_mddev(bio, GFP_NOIO, mddev);
218
219                 b->bi_bdev = conf->rdev->bdev;
220                 b->bi_private = bio;
221                 b->bi_end_io = faulty_fail;
222                 bio = b;
223         } else
224                 bio->bi_bdev = conf->rdev->bdev;
225
226         generic_make_request(bio);
227 }
228
229 static void status(struct seq_file *seq, struct mddev *mddev)
230 {
231         struct faulty_conf *conf = mddev->private;
232         int n;
233
234         if ((n=atomic_read(&conf->counters[WriteTransient])) != 0)
235                 seq_printf(seq, " WriteTransient=%d(%d)",
236                            n, conf->period[WriteTransient]);
237
238         if ((n=atomic_read(&conf->counters[ReadTransient])) != 0)
239                 seq_printf(seq, " ReadTransient=%d(%d)",
240                            n, conf->period[ReadTransient]);
241
242         if ((n=atomic_read(&conf->counters[WritePersistent])) != 0)
243                 seq_printf(seq, " WritePersistent=%d(%d)",
244                            n, conf->period[WritePersistent]);
245
246         if ((n=atomic_read(&conf->counters[ReadPersistent])) != 0)
247                 seq_printf(seq, " ReadPersistent=%d(%d)",
248                            n, conf->period[ReadPersistent]);
249
250
251         if ((n=atomic_read(&conf->counters[ReadFixable])) != 0)
252                 seq_printf(seq, " ReadFixable=%d(%d)",
253                            n, conf->period[ReadFixable]);
254
255         if ((n=atomic_read(&conf->counters[WriteAll])) != 0)
256                 seq_printf(seq, " WriteAll");
257
258         seq_printf(seq, " nfaults=%d", conf->nfaults);
259 }
260
261
262 static int reshape(struct mddev *mddev)
263 {
264         int mode = mddev->new_layout & ModeMask;
265         int count = mddev->new_layout >> ModeShift;
266         struct faulty_conf *conf = mddev->private;
267
268         if (mddev->new_layout < 0)
269                 return 0;
270
271         /* new layout */
272         if (mode == ClearFaults)
273                 conf->nfaults = 0;
274         else if (mode == ClearErrors) {
275                 int i;
276                 for (i=0 ; i < Modes ; i++) {
277                         conf->period[i] = 0;
278                         atomic_set(&conf->counters[i], 0);
279                 }
280         } else if (mode < Modes) {
281                 conf->period[mode] = count;
282                 if (!count) count++;
283                 atomic_set(&conf->counters[mode], count);
284         } else
285                 return -EINVAL;
286         mddev->new_layout = -1;
287         mddev->layout = -1; /* makes sure further changes come through */
288         return 0;
289 }
290
291 static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disks)
292 {
293         WARN_ONCE(raid_disks,
294                   "%s does not support generic reshape\n", __func__);
295
296         if (sectors == 0)
297                 return mddev->dev_sectors;
298
299         return sectors;
300 }
301
302 static int run(struct mddev *mddev)
303 {
304         struct md_rdev *rdev;
305         int i;
306         struct faulty_conf *conf;
307
308         if (md_check_no_bitmap(mddev))
309                 return -EINVAL;
310
311         conf = kmalloc(sizeof(*conf), GFP_KERNEL);
312         if (!conf)
313                 return -ENOMEM;
314
315         for (i=0; i<Modes; i++) {
316                 atomic_set(&conf->counters[i], 0);
317                 conf->period[i] = 0;
318         }
319         conf->nfaults = 0;
320
321         rdev_for_each(rdev, mddev) {
322                 conf->rdev = rdev;
323                 disk_stack_limits(mddev->gendisk, rdev->bdev,
324                                   rdev->data_offset << 9);
325         }
326
327         md_set_array_sectors(mddev, faulty_size(mddev, 0, 0));
328         mddev->private = conf;
329
330         reshape(mddev);
331
332         return 0;
333 }
334
335 static int stop(struct mddev *mddev)
336 {
337         struct faulty_conf *conf = mddev->private;
338
339         kfree(conf);
340         mddev->private = NULL;
341         return 0;
342 }
343
344 static struct md_personality faulty_personality =
345 {
346         .name           = "faulty",
347         .level          = LEVEL_FAULTY,
348         .owner          = THIS_MODULE,
349         .make_request   = make_request,
350         .run            = run,
351         .stop           = stop,
352         .status         = status,
353         .check_reshape  = reshape,
354         .size           = faulty_size,
355 };
356
357 static int __init raid_init(void)
358 {
359         return register_md_personality(&faulty_personality);
360 }
361
362 static void raid_exit(void)
363 {
364         unregister_md_personality(&faulty_personality);
365 }
366
367 module_init(raid_init);
368 module_exit(raid_exit);
369 MODULE_LICENSE("GPL");
370 MODULE_DESCRIPTION("Fault injection personality for MD");
371 MODULE_ALIAS("md-personality-10"); /* faulty */
372 MODULE_ALIAS("md-faulty");
373 MODULE_ALIAS("md-level--5");