1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012-2017 Red Hat, Inc.
5 * This file is released under the GPL.
9 #include "dm-bio-prison-v2.h"
11 #include <linux/spinlock.h>
12 #include <linux/mempool.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/rwsem.h>
17 /*----------------------------------------------------------------*/
19 #define MIN_CELLS 1024
21 struct dm_bio_prison_v2 {
22 struct workqueue_struct *wq;
29 static struct kmem_cache *_cell_cache;
31 /*----------------------------------------------------------------*/
34 * @nr_cells should be the number of cells you want in use _concurrently_.
35 * Don't confuse it with the number of distinct keys.
37 struct dm_bio_prison_v2 *dm_bio_prison_create_v2(struct workqueue_struct *wq)
39 struct dm_bio_prison_v2 *prison = kzalloc(sizeof(*prison), GFP_KERNEL);
46 spin_lock_init(&prison->lock);
48 ret = mempool_init_slab_pool(&prison->cell_pool, MIN_CELLS, _cell_cache);
54 prison->cells = RB_ROOT;
58 EXPORT_SYMBOL_GPL(dm_bio_prison_create_v2);
60 void dm_bio_prison_destroy_v2(struct dm_bio_prison_v2 *prison)
62 mempool_exit(&prison->cell_pool);
65 EXPORT_SYMBOL_GPL(dm_bio_prison_destroy_v2);
67 struct dm_bio_prison_cell_v2 *dm_bio_prison_alloc_cell_v2(struct dm_bio_prison_v2 *prison, gfp_t gfp)
69 return mempool_alloc(&prison->cell_pool, gfp);
71 EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell_v2);
73 void dm_bio_prison_free_cell_v2(struct dm_bio_prison_v2 *prison,
74 struct dm_bio_prison_cell_v2 *cell)
76 mempool_free(cell, &prison->cell_pool);
78 EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell_v2);
80 static void __setup_new_cell(struct dm_cell_key_v2 *key,
81 struct dm_bio_prison_cell_v2 *cell)
83 memset(cell, 0, sizeof(*cell));
84 memcpy(&cell->key, key, sizeof(cell->key));
85 bio_list_init(&cell->bios);
88 static int cmp_keys(struct dm_cell_key_v2 *lhs,
89 struct dm_cell_key_v2 *rhs)
91 if (lhs->virtual < rhs->virtual)
94 if (lhs->virtual > rhs->virtual)
97 if (lhs->dev < rhs->dev)
100 if (lhs->dev > rhs->dev)
103 if (lhs->block_end <= rhs->block_begin)
106 if (lhs->block_begin >= rhs->block_end)
113 * Returns true if node found, otherwise it inserts a new one.
115 static bool __find_or_insert(struct dm_bio_prison_v2 *prison,
116 struct dm_cell_key_v2 *key,
117 struct dm_bio_prison_cell_v2 *cell_prealloc,
118 struct dm_bio_prison_cell_v2 **result)
121 struct rb_node **new = &prison->cells.rb_node, *parent = NULL;
124 struct dm_bio_prison_cell_v2 *cell =
125 rb_entry(*new, struct dm_bio_prison_cell_v2, node);
127 r = cmp_keys(key, &cell->key);
131 new = &((*new)->rb_left);
134 new = &((*new)->rb_right);
142 __setup_new_cell(key, cell_prealloc);
143 *result = cell_prealloc;
144 rb_link_node(&cell_prealloc->node, parent, new);
145 rb_insert_color(&cell_prealloc->node, &prison->cells);
150 static bool __get(struct dm_bio_prison_v2 *prison,
151 struct dm_cell_key_v2 *key,
152 unsigned int lock_level,
154 struct dm_bio_prison_cell_v2 *cell_prealloc,
155 struct dm_bio_prison_cell_v2 **cell)
157 if (__find_or_insert(prison, key, cell_prealloc, cell)) {
158 if ((*cell)->exclusive_lock) {
159 if (lock_level <= (*cell)->exclusive_level) {
160 bio_list_add(&(*cell)->bios, inmate);
165 (*cell)->shared_count++;
168 (*cell)->shared_count = 1;
173 bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
174 struct dm_cell_key_v2 *key,
175 unsigned int lock_level,
177 struct dm_bio_prison_cell_v2 *cell_prealloc,
178 struct dm_bio_prison_cell_v2 **cell_result)
182 spin_lock_irq(&prison->lock);
183 r = __get(prison, key, lock_level, inmate, cell_prealloc, cell_result);
184 spin_unlock_irq(&prison->lock);
188 EXPORT_SYMBOL_GPL(dm_cell_get_v2);
190 static bool __put(struct dm_bio_prison_v2 *prison,
191 struct dm_bio_prison_cell_v2 *cell)
193 BUG_ON(!cell->shared_count);
194 cell->shared_count--;
196 // FIXME: shared locks granted above the lock level could starve this
197 if (!cell->shared_count) {
198 if (cell->exclusive_lock) {
199 if (cell->quiesce_continuation) {
200 queue_work(prison->wq, cell->quiesce_continuation);
201 cell->quiesce_continuation = NULL;
204 rb_erase(&cell->node, &prison->cells);
212 bool dm_cell_put_v2(struct dm_bio_prison_v2 *prison,
213 struct dm_bio_prison_cell_v2 *cell)
218 spin_lock_irqsave(&prison->lock, flags);
219 r = __put(prison, cell);
220 spin_unlock_irqrestore(&prison->lock, flags);
224 EXPORT_SYMBOL_GPL(dm_cell_put_v2);
226 static int __lock(struct dm_bio_prison_v2 *prison,
227 struct dm_cell_key_v2 *key,
228 unsigned int lock_level,
229 struct dm_bio_prison_cell_v2 *cell_prealloc,
230 struct dm_bio_prison_cell_v2 **cell_result)
232 struct dm_bio_prison_cell_v2 *cell;
234 if (__find_or_insert(prison, key, cell_prealloc, &cell)) {
235 if (cell->exclusive_lock)
238 cell->exclusive_lock = true;
239 cell->exclusive_level = lock_level;
242 // FIXME: we don't yet know what level these shared locks
243 // were taken at, so have to quiesce them all.
244 return cell->shared_count > 0;
247 cell = cell_prealloc;
248 cell->shared_count = 0;
249 cell->exclusive_lock = true;
250 cell->exclusive_level = lock_level;
257 int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
258 struct dm_cell_key_v2 *key,
259 unsigned int lock_level,
260 struct dm_bio_prison_cell_v2 *cell_prealloc,
261 struct dm_bio_prison_cell_v2 **cell_result)
265 spin_lock_irq(&prison->lock);
266 r = __lock(prison, key, lock_level, cell_prealloc, cell_result);
267 spin_unlock_irq(&prison->lock);
271 EXPORT_SYMBOL_GPL(dm_cell_lock_v2);
273 static void __quiesce(struct dm_bio_prison_v2 *prison,
274 struct dm_bio_prison_cell_v2 *cell,
275 struct work_struct *continuation)
277 if (!cell->shared_count)
278 queue_work(prison->wq, continuation);
280 cell->quiesce_continuation = continuation;
283 void dm_cell_quiesce_v2(struct dm_bio_prison_v2 *prison,
284 struct dm_bio_prison_cell_v2 *cell,
285 struct work_struct *continuation)
287 spin_lock_irq(&prison->lock);
288 __quiesce(prison, cell, continuation);
289 spin_unlock_irq(&prison->lock);
291 EXPORT_SYMBOL_GPL(dm_cell_quiesce_v2);
293 static int __promote(struct dm_bio_prison_v2 *prison,
294 struct dm_bio_prison_cell_v2 *cell,
295 unsigned int new_lock_level)
297 if (!cell->exclusive_lock)
300 cell->exclusive_level = new_lock_level;
301 return cell->shared_count > 0;
304 int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
305 struct dm_bio_prison_cell_v2 *cell,
306 unsigned int new_lock_level)
310 spin_lock_irq(&prison->lock);
311 r = __promote(prison, cell, new_lock_level);
312 spin_unlock_irq(&prison->lock);
316 EXPORT_SYMBOL_GPL(dm_cell_lock_promote_v2);
318 static bool __unlock(struct dm_bio_prison_v2 *prison,
319 struct dm_bio_prison_cell_v2 *cell,
320 struct bio_list *bios)
322 BUG_ON(!cell->exclusive_lock);
324 bio_list_merge(bios, &cell->bios);
325 bio_list_init(&cell->bios);
327 if (cell->shared_count) {
328 cell->exclusive_lock = false;
332 rb_erase(&cell->node, &prison->cells);
336 bool dm_cell_unlock_v2(struct dm_bio_prison_v2 *prison,
337 struct dm_bio_prison_cell_v2 *cell,
338 struct bio_list *bios)
342 spin_lock_irq(&prison->lock);
343 r = __unlock(prison, cell, bios);
344 spin_unlock_irq(&prison->lock);
348 EXPORT_SYMBOL_GPL(dm_cell_unlock_v2);
350 /*----------------------------------------------------------------*/
352 int __init dm_bio_prison_init_v2(void)
354 _cell_cache = KMEM_CACHE(dm_bio_prison_cell_v2, 0);
361 void dm_bio_prison_exit_v2(void)
363 kmem_cache_destroy(_cell_cache);