2 * Copyright (C) 2016 Facebook
3 * Copyright (C) 2013-2014 Jens Axboe
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <https://www.gnu.org/licenses/>.
18 #include <linux/sched.h>
19 #include <linux/random.h>
20 #include <linux/sbitmap.h>
21 #include <linux/seq_file.h>
23 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
24 gfp_t flags, int node)
26 unsigned int bits_per_word;
30 shift = ilog2(BITS_PER_LONG);
32 * If the bitmap is small, shrink the number of bits per word so
33 * we spread over a few cachelines, at least. If less than 4
34 * bits, just forget about it, it's not going to work optimally
38 while ((4U << shift) > depth)
42 bits_per_word = 1U << shift;
43 if (bits_per_word > BITS_PER_LONG)
48 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
55 sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
59 for (i = 0; i < sb->map_nr; i++) {
60 sb->map[i].depth = min(depth, bits_per_word);
61 depth -= sb->map[i].depth;
62 spin_lock_init(&sb->map[i].swap_lock);
66 EXPORT_SYMBOL_GPL(sbitmap_init_node);
68 void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
70 unsigned int bits_per_word = 1U << sb->shift;
74 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
76 for (i = 0; i < sb->map_nr; i++) {
77 sb->map[i].depth = min(depth, bits_per_word);
78 depth -= sb->map[i].depth;
81 EXPORT_SYMBOL_GPL(sbitmap_resize);
83 static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
84 unsigned int hint, bool wrap)
86 unsigned int orig_hint = hint;
90 nr = find_next_zero_bit(word, depth, hint);
91 if (unlikely(nr >= depth)) {
93 * We started with an offset, and we didn't reset the
94 * offset to 0 in a failure case, so start from 0 to
97 if (orig_hint && hint && wrap) {
104 if (!test_and_set_bit_lock(nr, word))
108 if (hint >= depth - 1)
116 * See if we have deferred clears that we can batch move
118 static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
120 unsigned long mask, val;
121 unsigned long __maybe_unused flags;
124 /* Silence bogus lockdep warning */
125 #if defined(CONFIG_LOCKDEP)
126 local_irq_save(flags);
128 spin_lock(&sb->map[index].swap_lock);
130 if (!sb->map[index].cleared)
134 * First get a stable cleared mask, setting the old mask to 0.
137 mask = sb->map[index].cleared;
138 } while (cmpxchg(&sb->map[index].cleared, mask, 0) != mask);
141 * Now clear the masked bits in our free word
144 val = sb->map[index].word;
145 } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val);
149 spin_unlock(&sb->map[index].swap_lock);
150 #if defined(CONFIG_LOCKDEP)
151 local_irq_restore(flags);
156 static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
157 unsigned int alloc_hint, bool round_robin)
162 nr = __sbitmap_get_word(&sb->map[index].word,
163 sb->map[index].depth, alloc_hint,
167 if (!sbitmap_deferred_clear(sb, index))
174 int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
176 unsigned int i, index;
179 index = SB_NR_TO_INDEX(sb, alloc_hint);
182 * Unless we're doing round robin tag allocation, just use the
183 * alloc_hint to find the right word index. No point in looping
184 * twice in find_next_zero_bit() for that case.
187 alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
191 for (i = 0; i < sb->map_nr; i++) {
192 nr = sbitmap_find_bit_in_index(sb, index, alloc_hint,
195 nr += index << sb->shift;
199 /* Jump to next index. */
201 if (++index >= sb->map_nr)
207 EXPORT_SYMBOL_GPL(sbitmap_get);
209 int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
210 unsigned long shallow_depth)
212 unsigned int i, index;
215 index = SB_NR_TO_INDEX(sb, alloc_hint);
217 for (i = 0; i < sb->map_nr; i++) {
218 nr = __sbitmap_get_word(&sb->map[index].word,
219 min(sb->map[index].depth, shallow_depth),
220 SB_NR_TO_BIT(sb, alloc_hint), true);
222 nr += index << sb->shift;
226 /* Jump to next index. */
228 alloc_hint = index << sb->shift;
230 if (index >= sb->map_nr) {
238 EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
240 bool sbitmap_any_bit_set(const struct sbitmap *sb)
244 for (i = 0; i < sb->map_nr; i++) {
250 EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
252 bool sbitmap_any_bit_clear(const struct sbitmap *sb)
256 for (i = 0; i < sb->map_nr; i++) {
257 const struct sbitmap_word *word = &sb->map[i];
260 ret = find_first_zero_bit(&word->word, word->depth);
261 if (ret < word->depth)
266 EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear);
268 static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
270 unsigned int i, weight = 0;
272 for (i = 0; i < sb->map_nr; i++) {
273 const struct sbitmap_word *word = &sb->map[i];
276 weight += bitmap_weight(&word->word, word->depth);
278 weight += bitmap_weight(&word->cleared, word->depth);
283 static unsigned int sbitmap_weight(const struct sbitmap *sb)
285 return __sbitmap_weight(sb, true);
288 static unsigned int sbitmap_cleared(const struct sbitmap *sb)
290 return __sbitmap_weight(sb, false);
293 void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
295 seq_printf(m, "depth=%u\n", sb->depth);
296 seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb));
297 seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
298 seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
299 seq_printf(m, "map_nr=%u\n", sb->map_nr);
301 EXPORT_SYMBOL_GPL(sbitmap_show);
303 static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
305 if ((offset & 0xf) == 0) {
308 seq_printf(m, "%08x:", offset);
310 if ((offset & 0x1) == 0)
312 seq_printf(m, "%02x", byte);
315 void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
318 unsigned int byte_bits = 0;
319 unsigned int offset = 0;
322 for (i = 0; i < sb->map_nr; i++) {
323 unsigned long word = READ_ONCE(sb->map[i].word);
324 unsigned int word_bits = READ_ONCE(sb->map[i].depth);
326 while (word_bits > 0) {
327 unsigned int bits = min(8 - byte_bits, word_bits);
329 byte |= (word & (BIT(bits) - 1)) << byte_bits;
331 if (byte_bits == 8) {
332 emit_byte(m, offset, byte);
342 emit_byte(m, offset, byte);
348 EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
350 static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
353 unsigned int wake_batch;
354 unsigned int shallow_depth;
357 * For each batch, we wake up one queue. We need to make sure that our
358 * batch size is small enough that the full depth of the bitmap,
359 * potentially limited by a shallow depth, is enough to wake up all of
362 * Each full word of the bitmap has bits_per_word bits, and there might
363 * be a partial word. There are depth / bits_per_word full words and
364 * depth % bits_per_word bits left over. In bitwise arithmetic:
366 * bits_per_word = 1 << shift
367 * depth / bits_per_word = depth >> shift
368 * depth % bits_per_word = depth & ((1 << shift) - 1)
370 * Each word can be limited to sbq->min_shallow_depth bits.
372 shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
373 depth = ((depth >> sbq->sb.shift) * shallow_depth +
374 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
375 wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
381 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
382 int shift, bool round_robin, gfp_t flags, int node)
387 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node);
391 sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
392 if (!sbq->alloc_hint) {
393 sbitmap_free(&sbq->sb);
397 if (depth && !round_robin) {
398 for_each_possible_cpu(i)
399 *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
402 sbq->min_shallow_depth = UINT_MAX;
403 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
404 atomic_set(&sbq->wake_index, 0);
405 atomic_set(&sbq->ws_active, 0);
407 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
409 free_percpu(sbq->alloc_hint);
410 sbitmap_free(&sbq->sb);
414 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
415 init_waitqueue_head(&sbq->ws[i].wait);
416 atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
419 sbq->round_robin = round_robin;
422 EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
424 static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
427 unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth);
430 if (sbq->wake_batch != wake_batch) {
431 WRITE_ONCE(sbq->wake_batch, wake_batch);
433 * Pairs with the memory barrier in sbitmap_queue_wake_up()
434 * to ensure that the batch size is updated before the wait
437 smp_mb__before_atomic();
438 for (i = 0; i < SBQ_WAIT_QUEUES; i++)
439 atomic_set(&sbq->ws[i].wait_cnt, 1);
443 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
445 sbitmap_queue_update_wake_batch(sbq, depth);
446 sbitmap_resize(&sbq->sb, depth);
448 EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
450 int __sbitmap_queue_get(struct sbitmap_queue *sbq)
452 unsigned int hint, depth;
455 hint = this_cpu_read(*sbq->alloc_hint);
456 depth = READ_ONCE(sbq->sb.depth);
457 if (unlikely(hint >= depth)) {
458 hint = depth ? prandom_u32() % depth : 0;
459 this_cpu_write(*sbq->alloc_hint, hint);
461 nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
464 /* If the map is full, a hint won't do us much good. */
465 this_cpu_write(*sbq->alloc_hint, 0);
466 } else if (nr == hint || unlikely(sbq->round_robin)) {
467 /* Only update the hint if we used it. */
469 if (hint >= depth - 1)
471 this_cpu_write(*sbq->alloc_hint, hint);
476 EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
478 int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
479 unsigned int shallow_depth)
481 unsigned int hint, depth;
484 WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
486 hint = this_cpu_read(*sbq->alloc_hint);
487 depth = READ_ONCE(sbq->sb.depth);
488 if (unlikely(hint >= depth)) {
489 hint = depth ? prandom_u32() % depth : 0;
490 this_cpu_write(*sbq->alloc_hint, hint);
492 nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
495 /* If the map is full, a hint won't do us much good. */
496 this_cpu_write(*sbq->alloc_hint, 0);
497 } else if (nr == hint || unlikely(sbq->round_robin)) {
498 /* Only update the hint if we used it. */
500 if (hint >= depth - 1)
502 this_cpu_write(*sbq->alloc_hint, hint);
507 EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
509 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
510 unsigned int min_shallow_depth)
512 sbq->min_shallow_depth = min_shallow_depth;
513 sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
515 EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
517 static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
521 if (!atomic_read(&sbq->ws_active))
524 wake_index = atomic_read(&sbq->wake_index);
525 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
526 struct sbq_wait_state *ws = &sbq->ws[wake_index];
528 if (waitqueue_active(&ws->wait)) {
529 int o = atomic_read(&sbq->wake_index);
532 atomic_cmpxchg(&sbq->wake_index, o, wake_index);
536 wake_index = sbq_index_inc(wake_index);
542 static bool __sbq_wake_up(struct sbitmap_queue *sbq)
544 struct sbq_wait_state *ws;
545 unsigned int wake_batch;
548 ws = sbq_wake_ptr(sbq);
552 wait_cnt = atomic_dec_return(&ws->wait_cnt);
556 wake_batch = READ_ONCE(sbq->wake_batch);
559 * Pairs with the memory barrier in sbitmap_queue_resize() to
560 * ensure that we see the batch size update before the wait
563 smp_mb__before_atomic();
566 * For concurrent callers of this, the one that failed the
567 * atomic_cmpxhcg() race should call this function again
568 * to wakeup a new batch on a different 'ws'.
570 ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch);
571 if (ret == wait_cnt) {
572 sbq_index_atomic_inc(&sbq->wake_index);
573 wake_up_nr(&ws->wait, wake_batch);
583 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
585 while (__sbq_wake_up(sbq))
588 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
590 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
593 sbitmap_deferred_clear_bit(&sbq->sb, nr);
596 * Pairs with the memory barrier in set_current_state() to ensure the
597 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
598 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
599 * waiter. See the comment on waitqueue_active().
601 smp_mb__after_atomic();
602 sbitmap_queue_wake_up(sbq);
604 if (likely(!sbq->round_robin && nr < sbq->sb.depth))
605 *per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
607 EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
609 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
614 * Pairs with the memory barrier in set_current_state() like in
615 * sbitmap_queue_wake_up().
618 wake_index = atomic_read(&sbq->wake_index);
619 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
620 struct sbq_wait_state *ws = &sbq->ws[wake_index];
622 if (waitqueue_active(&ws->wait))
625 wake_index = sbq_index_inc(wake_index);
628 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
630 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
635 sbitmap_show(&sbq->sb, m);
637 seq_puts(m, "alloc_hint={");
639 for_each_possible_cpu(i) {
643 seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i));
647 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
648 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
649 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
651 seq_puts(m, "ws={\n");
652 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
653 struct sbq_wait_state *ws = &sbq->ws[i];
655 seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
656 atomic_read(&ws->wait_cnt),
657 waitqueue_active(&ws->wait) ? "active" : "inactive");
661 seq_printf(m, "round_robin=%d\n", sbq->round_robin);
662 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
664 EXPORT_SYMBOL_GPL(sbitmap_queue_show);
666 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
667 struct sbq_wait_state *ws,
668 struct sbq_wait *sbq_wait, int state)
670 if (!sbq_wait->accounted) {
671 atomic_inc(&sbq->ws_active);
672 sbq_wait->accounted = 1;
674 prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
676 EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
678 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
679 struct sbq_wait *sbq_wait)
681 finish_wait(&ws->wait, &sbq_wait->wait);
682 if (sbq_wait->accounted) {
683 atomic_dec(&sbq->ws_active);
684 sbq_wait->accounted = 0;
687 EXPORT_SYMBOL_GPL(sbitmap_finish_wait);