Merge tag 'f2fs-for-6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk...
[platform/kernel/linux-starfive.git] / drivers / md / dm-bio-prison-v2.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012-2017 Red Hat, Inc.
4  *
5  * This file is released under the GPL.
6  */
7
8 #include "dm.h"
9 #include "dm-bio-prison-v2.h"
10
11 #include <linux/spinlock.h>
12 #include <linux/mempool.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/rwsem.h>
16
17 /*----------------------------------------------------------------*/
18
19 #define MIN_CELLS 1024
20
21 struct dm_bio_prison_v2 {
22         struct workqueue_struct *wq;
23
24         spinlock_t lock;
25         struct rb_root cells;
26         mempool_t cell_pool;
27 };
28
29 static struct kmem_cache *_cell_cache;
30
31 /*----------------------------------------------------------------*/
32
33 /*
34  * @nr_cells should be the number of cells you want in use _concurrently_.
35  * Don't confuse it with the number of distinct keys.
36  */
37 struct dm_bio_prison_v2 *dm_bio_prison_create_v2(struct workqueue_struct *wq)
38 {
39         struct dm_bio_prison_v2 *prison = kzalloc(sizeof(*prison), GFP_KERNEL);
40         int ret;
41
42         if (!prison)
43                 return NULL;
44
45         prison->wq = wq;
46         spin_lock_init(&prison->lock);
47
48         ret = mempool_init_slab_pool(&prison->cell_pool, MIN_CELLS, _cell_cache);
49         if (ret) {
50                 kfree(prison);
51                 return NULL;
52         }
53
54         prison->cells = RB_ROOT;
55
56         return prison;
57 }
58 EXPORT_SYMBOL_GPL(dm_bio_prison_create_v2);
59
60 void dm_bio_prison_destroy_v2(struct dm_bio_prison_v2 *prison)
61 {
62         mempool_exit(&prison->cell_pool);
63         kfree(prison);
64 }
65 EXPORT_SYMBOL_GPL(dm_bio_prison_destroy_v2);
66
67 struct dm_bio_prison_cell_v2 *dm_bio_prison_alloc_cell_v2(struct dm_bio_prison_v2 *prison, gfp_t gfp)
68 {
69         return mempool_alloc(&prison->cell_pool, gfp);
70 }
71 EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell_v2);
72
73 void dm_bio_prison_free_cell_v2(struct dm_bio_prison_v2 *prison,
74                                 struct dm_bio_prison_cell_v2 *cell)
75 {
76         mempool_free(cell, &prison->cell_pool);
77 }
78 EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell_v2);
79
80 static void __setup_new_cell(struct dm_cell_key_v2 *key,
81                              struct dm_bio_prison_cell_v2 *cell)
82 {
83         memset(cell, 0, sizeof(*cell));
84         memcpy(&cell->key, key, sizeof(cell->key));
85         bio_list_init(&cell->bios);
86 }
87
88 static int cmp_keys(struct dm_cell_key_v2 *lhs,
89                     struct dm_cell_key_v2 *rhs)
90 {
91         if (lhs->virtual < rhs->virtual)
92                 return -1;
93
94         if (lhs->virtual > rhs->virtual)
95                 return 1;
96
97         if (lhs->dev < rhs->dev)
98                 return -1;
99
100         if (lhs->dev > rhs->dev)
101                 return 1;
102
103         if (lhs->block_end <= rhs->block_begin)
104                 return -1;
105
106         if (lhs->block_begin >= rhs->block_end)
107                 return 1;
108
109         return 0;
110 }
111
112 /*
113  * Returns true if node found, otherwise it inserts a new one.
114  */
115 static bool __find_or_insert(struct dm_bio_prison_v2 *prison,
116                              struct dm_cell_key_v2 *key,
117                              struct dm_bio_prison_cell_v2 *cell_prealloc,
118                              struct dm_bio_prison_cell_v2 **result)
119 {
120         int r;
121         struct rb_node **new = &prison->cells.rb_node, *parent = NULL;
122
123         while (*new) {
124                 struct dm_bio_prison_cell_v2 *cell =
125                         rb_entry(*new, struct dm_bio_prison_cell_v2, node);
126
127                 r = cmp_keys(key, &cell->key);
128
129                 parent = *new;
130                 if (r < 0)
131                         new = &((*new)->rb_left);
132
133                 else if (r > 0)
134                         new = &((*new)->rb_right);
135
136                 else {
137                         *result = cell;
138                         return true;
139                 }
140         }
141
142         __setup_new_cell(key, cell_prealloc);
143         *result = cell_prealloc;
144         rb_link_node(&cell_prealloc->node, parent, new);
145         rb_insert_color(&cell_prealloc->node, &prison->cells);
146
147         return false;
148 }
149
150 static bool __get(struct dm_bio_prison_v2 *prison,
151                   struct dm_cell_key_v2 *key,
152                   unsigned int lock_level,
153                   struct bio *inmate,
154                   struct dm_bio_prison_cell_v2 *cell_prealloc,
155                   struct dm_bio_prison_cell_v2 **cell)
156 {
157         if (__find_or_insert(prison, key, cell_prealloc, cell)) {
158                 if ((*cell)->exclusive_lock) {
159                         if (lock_level <= (*cell)->exclusive_level) {
160                                 bio_list_add(&(*cell)->bios, inmate);
161                                 return false;
162                         }
163                 }
164
165                 (*cell)->shared_count++;
166
167         } else
168                 (*cell)->shared_count = 1;
169
170         return true;
171 }
172
173 bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
174                     struct dm_cell_key_v2 *key,
175                     unsigned int lock_level,
176                     struct bio *inmate,
177                     struct dm_bio_prison_cell_v2 *cell_prealloc,
178                     struct dm_bio_prison_cell_v2 **cell_result)
179 {
180         int r;
181
182         spin_lock_irq(&prison->lock);
183         r = __get(prison, key, lock_level, inmate, cell_prealloc, cell_result);
184         spin_unlock_irq(&prison->lock);
185
186         return r;
187 }
188 EXPORT_SYMBOL_GPL(dm_cell_get_v2);
189
190 static bool __put(struct dm_bio_prison_v2 *prison,
191                   struct dm_bio_prison_cell_v2 *cell)
192 {
193         BUG_ON(!cell->shared_count);
194         cell->shared_count--;
195
196         // FIXME: shared locks granted above the lock level could starve this
197         if (!cell->shared_count) {
198                 if (cell->exclusive_lock) {
199                         if (cell->quiesce_continuation) {
200                                 queue_work(prison->wq, cell->quiesce_continuation);
201                                 cell->quiesce_continuation = NULL;
202                         }
203                 } else {
204                         rb_erase(&cell->node, &prison->cells);
205                         return true;
206                 }
207         }
208
209         return false;
210 }
211
212 bool dm_cell_put_v2(struct dm_bio_prison_v2 *prison,
213                     struct dm_bio_prison_cell_v2 *cell)
214 {
215         bool r;
216         unsigned long flags;
217
218         spin_lock_irqsave(&prison->lock, flags);
219         r = __put(prison, cell);
220         spin_unlock_irqrestore(&prison->lock, flags);
221
222         return r;
223 }
224 EXPORT_SYMBOL_GPL(dm_cell_put_v2);
225
226 static int __lock(struct dm_bio_prison_v2 *prison,
227                   struct dm_cell_key_v2 *key,
228                   unsigned int lock_level,
229                   struct dm_bio_prison_cell_v2 *cell_prealloc,
230                   struct dm_bio_prison_cell_v2 **cell_result)
231 {
232         struct dm_bio_prison_cell_v2 *cell;
233
234         if (__find_or_insert(prison, key, cell_prealloc, &cell)) {
235                 if (cell->exclusive_lock)
236                         return -EBUSY;
237
238                 cell->exclusive_lock = true;
239                 cell->exclusive_level = lock_level;
240                 *cell_result = cell;
241
242                 // FIXME: we don't yet know what level these shared locks
243                 // were taken at, so have to quiesce them all.
244                 return cell->shared_count > 0;
245
246         } else {
247                 cell = cell_prealloc;
248                 cell->shared_count = 0;
249                 cell->exclusive_lock = true;
250                 cell->exclusive_level = lock_level;
251                 *cell_result = cell;
252         }
253
254         return 0;
255 }
256
257 int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
258                     struct dm_cell_key_v2 *key,
259                     unsigned int lock_level,
260                     struct dm_bio_prison_cell_v2 *cell_prealloc,
261                     struct dm_bio_prison_cell_v2 **cell_result)
262 {
263         int r;
264
265         spin_lock_irq(&prison->lock);
266         r = __lock(prison, key, lock_level, cell_prealloc, cell_result);
267         spin_unlock_irq(&prison->lock);
268
269         return r;
270 }
271 EXPORT_SYMBOL_GPL(dm_cell_lock_v2);
272
273 static void __quiesce(struct dm_bio_prison_v2 *prison,
274                       struct dm_bio_prison_cell_v2 *cell,
275                       struct work_struct *continuation)
276 {
277         if (!cell->shared_count)
278                 queue_work(prison->wq, continuation);
279         else
280                 cell->quiesce_continuation = continuation;
281 }
282
283 void dm_cell_quiesce_v2(struct dm_bio_prison_v2 *prison,
284                         struct dm_bio_prison_cell_v2 *cell,
285                         struct work_struct *continuation)
286 {
287         spin_lock_irq(&prison->lock);
288         __quiesce(prison, cell, continuation);
289         spin_unlock_irq(&prison->lock);
290 }
291 EXPORT_SYMBOL_GPL(dm_cell_quiesce_v2);
292
293 static int __promote(struct dm_bio_prison_v2 *prison,
294                      struct dm_bio_prison_cell_v2 *cell,
295                      unsigned int new_lock_level)
296 {
297         if (!cell->exclusive_lock)
298                 return -EINVAL;
299
300         cell->exclusive_level = new_lock_level;
301         return cell->shared_count > 0;
302 }
303
304 int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
305                             struct dm_bio_prison_cell_v2 *cell,
306                             unsigned int new_lock_level)
307 {
308         int r;
309
310         spin_lock_irq(&prison->lock);
311         r = __promote(prison, cell, new_lock_level);
312         spin_unlock_irq(&prison->lock);
313
314         return r;
315 }
316 EXPORT_SYMBOL_GPL(dm_cell_lock_promote_v2);
317
318 static bool __unlock(struct dm_bio_prison_v2 *prison,
319                      struct dm_bio_prison_cell_v2 *cell,
320                      struct bio_list *bios)
321 {
322         BUG_ON(!cell->exclusive_lock);
323
324         bio_list_merge(bios, &cell->bios);
325         bio_list_init(&cell->bios);
326
327         if (cell->shared_count) {
328                 cell->exclusive_lock = false;
329                 return false;
330         }
331
332         rb_erase(&cell->node, &prison->cells);
333         return true;
334 }
335
336 bool dm_cell_unlock_v2(struct dm_bio_prison_v2 *prison,
337                        struct dm_bio_prison_cell_v2 *cell,
338                        struct bio_list *bios)
339 {
340         bool r;
341
342         spin_lock_irq(&prison->lock);
343         r = __unlock(prison, cell, bios);
344         spin_unlock_irq(&prison->lock);
345
346         return r;
347 }
348 EXPORT_SYMBOL_GPL(dm_cell_unlock_v2);
349
350 /*----------------------------------------------------------------*/
351
352 int __init dm_bio_prison_init_v2(void)
353 {
354         _cell_cache = KMEM_CACHE(dm_bio_prison_cell_v2, 0);
355         if (!_cell_cache)
356                 return -ENOMEM;
357
358         return 0;
359 }
360
361 void dm_bio_prison_exit_v2(void)
362 {
363         kmem_cache_destroy(_cell_cache);
364         _cell_cache = NULL;
365 }