1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
6 * This code builds two trees of free clusters extents.
7 * Trees are sorted by start of extent and by length of extent.
8 * NTFS_MAX_WND_EXTENTS defines the maximum number of elements in trees.
9 * In extreme case code reads on-disk bitmap to find free clusters.
13 #include <linux/blkdev.h>
14 #include <linux/buffer_head.h>
16 #include <linux/nls.h>
23 * Maximum number of extents in tree.
25 #define NTFS_MAX_WND_EXTENTS (32u * 1024u)
33 struct rb_node_key start; /* Tree sorted by start. */
34 struct rb_node_key count; /* Tree sorted by len. */
37 static int wnd_rescan(struct wnd_bitmap *wnd);
38 static struct buffer_head *wnd_map(struct wnd_bitmap *wnd, size_t iw);
39 static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits);
41 static struct kmem_cache *ntfs_enode_cachep;
43 int __init ntfs3_init_bitmap(void)
46 kmem_cache_create("ntfs3_enode_cache", sizeof(struct e_node), 0,
47 SLAB_RECLAIM_ACCOUNT, NULL);
48 return ntfs_enode_cachep ? 0 : -ENOMEM;
51 void ntfs3_exit_bitmap(void)
53 kmem_cache_destroy(ntfs_enode_cachep);
56 static inline u32 wnd_bits(const struct wnd_bitmap *wnd, size_t i)
58 return i + 1 == wnd->nwnd ? wnd->bits_last : wnd->sb->s_blocksize * 8;
64 * b_pos + b_len - biggest fragment.
65 * Scan range [wpos wbits) window @buf.
67 * Return: -1 if not found.
69 static size_t wnd_scan(const ulong *buf, size_t wbit, u32 wpos, u32 wend,
70 size_t to_alloc, size_t *prev_tail, size_t *b_pos,
76 u32 used = find_next_zero_bit(buf, wend, wpos);
79 if (*b_len < *prev_tail) {
80 *b_pos = wbit - *prev_tail;
90 if (*b_len < *prev_tail) {
91 *b_pos = wbit - *prev_tail;
99 * Now we have a fragment [wpos, wend) staring with 0.
101 end = wpos + to_alloc - *prev_tail;
102 free_bits = find_next_bit(buf, min(end, wend), wpos);
104 free_len = *prev_tail + free_bits - wpos;
106 if (*b_len < free_len) {
107 *b_pos = wbit + wpos - *prev_tail;
111 if (free_len >= to_alloc)
112 return wbit + wpos - *prev_tail;
114 if (free_bits >= wend) {
115 *prev_tail += free_bits - wpos;
119 wpos = free_bits + 1;
128 * wnd_close - Frees all resources.
130 void wnd_close(struct wnd_bitmap *wnd)
132 struct rb_node *node, *next;
134 kfree(wnd->free_bits);
135 run_close(&wnd->run);
137 node = rb_first(&wnd->start_tree);
140 next = rb_next(node);
141 rb_erase(node, &wnd->start_tree);
142 kmem_cache_free(ntfs_enode_cachep,
143 rb_entry(node, struct e_node, start.node));
148 static struct rb_node *rb_lookup(struct rb_root *root, size_t v)
150 struct rb_node **p = &root->rb_node;
151 struct rb_node *r = NULL;
154 struct rb_node_key *k;
156 k = rb_entry(*p, struct rb_node_key, node);
159 } else if (v > k->key) {
171 * rb_insert_count - Helper function to insert special kind of 'count' tree.
173 static inline bool rb_insert_count(struct rb_root *root, struct e_node *e)
175 struct rb_node **p = &root->rb_node;
176 struct rb_node *parent = NULL;
177 size_t e_ckey = e->count.key;
178 size_t e_skey = e->start.key;
182 rb_entry(parent = *p, struct e_node, count.node);
184 if (e_ckey > k->count.key) {
186 } else if (e_ckey < k->count.key) {
188 } else if (e_skey < k->start.key) {
190 } else if (e_skey > k->start.key) {
198 rb_link_node(&e->count.node, parent, p);
199 rb_insert_color(&e->count.node, root);
204 * rb_insert_start - Helper function to insert special kind of 'count' tree.
206 static inline bool rb_insert_start(struct rb_root *root, struct e_node *e)
208 struct rb_node **p = &root->rb_node;
209 struct rb_node *parent = NULL;
210 size_t e_skey = e->start.key;
217 k = rb_entry(parent, struct e_node, start.node);
218 if (e_skey < k->start.key) {
220 } else if (e_skey > k->start.key) {
228 rb_link_node(&e->start.node, parent, p);
229 rb_insert_color(&e->start.node, root);
234 * wnd_add_free_ext - Adds a new extent of free space.
235 * @build: 1 when building tree.
237 static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len,
240 struct e_node *e, *e0 = NULL;
241 size_t ib, end_in = bit + len;
245 /* Use extent_min to filter too short extents. */
246 if (wnd->count >= NTFS_MAX_WND_EXTENTS &&
247 len <= wnd->extent_min) {
252 /* Try to find extent before 'bit'. */
253 n = rb_lookup(&wnd->start_tree, bit);
256 n = rb_first(&wnd->start_tree);
258 e = rb_entry(n, struct e_node, start.node);
260 if (e->start.key + e->count.key == bit) {
264 rb_erase(&e->start.node, &wnd->start_tree);
265 rb_erase(&e->count.node, &wnd->count_tree);
274 e = rb_entry(n, struct e_node, start.node);
275 next_end = e->start.key + e->count.key;
276 if (e->start.key > end_in)
281 len += next_end - end_in;
283 rb_erase(&e->start.node, &wnd->start_tree);
284 rb_erase(&e->count.node, &wnd->count_tree);
290 kmem_cache_free(ntfs_enode_cachep, e);
293 if (wnd->uptodated != 1) {
294 /* Check bits before 'bit'. */
295 ib = wnd->zone_bit == wnd->zone_end ||
300 while (bit > ib && wnd_is_free_hlp(wnd, bit - 1, 1)) {
305 /* Check bits after 'end_in'. */
306 ib = wnd->zone_bit == wnd->zone_end ||
307 end_in > wnd->zone_bit
311 while (end_in < ib && wnd_is_free_hlp(wnd, end_in, 1)) {
317 /* Insert new fragment. */
318 if (wnd->count >= NTFS_MAX_WND_EXTENTS) {
320 kmem_cache_free(ntfs_enode_cachep, e0);
324 /* Compare with smallest fragment. */
325 n = rb_last(&wnd->count_tree);
326 e = rb_entry(n, struct e_node, count.node);
327 if (len <= e->count.key)
328 goto out; /* Do not insert small fragments. */
334 e2 = rb_entry(n, struct e_node, count.node);
335 /* Smallest fragment will be 'e2->count.key'. */
336 wnd->extent_min = e2->count.key;
339 /* Replace smallest fragment by new one. */
340 rb_erase(&e->start.node, &wnd->start_tree);
341 rb_erase(&e->count.node, &wnd->count_tree);
344 e = e0 ? e0 : kmem_cache_alloc(ntfs_enode_cachep, GFP_ATOMIC);
350 if (build && len <= wnd->extent_min)
351 wnd->extent_min = len;
355 if (len > wnd->extent_max)
356 wnd->extent_max = len;
358 rb_insert_start(&wnd->start_tree, e);
359 rb_insert_count(&wnd->count_tree, e);
366 * wnd_remove_free_ext - Remove a run from the cached free space.
368 static void wnd_remove_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len)
370 struct rb_node *n, *n3;
371 struct e_node *e, *e3;
372 size_t end_in = bit + len;
373 size_t end3, end, new_key, new_len, max_new_len;
375 /* Try to find extent before 'bit'. */
376 n = rb_lookup(&wnd->start_tree, bit);
381 e = rb_entry(n, struct e_node, start.node);
382 end = e->start.key + e->count.key;
384 new_key = new_len = 0;
387 /* Range [bit,end_in) must be inside 'e' or outside 'e' and 'n'. */
388 if (e->start.key > bit)
390 else if (end_in <= end) {
391 /* Range [bit,end_in) inside 'e'. */
393 new_len = end - end_in;
394 len = bit - e->start.key;
395 } else if (bit > end) {
401 e3 = rb_entry(n3, struct e_node, start.node);
402 if (e3->start.key >= end_in)
405 if (e3->count.key == wnd->extent_max)
408 end3 = e3->start.key + e3->count.key;
410 e3->start.key = end_in;
411 rb_erase(&e3->count.node, &wnd->count_tree);
412 e3->count.key = end3 - end_in;
413 rb_insert_count(&wnd->count_tree, e3);
418 rb_erase(&e3->start.node, &wnd->start_tree);
419 rb_erase(&e3->count.node, &wnd->count_tree);
421 kmem_cache_free(ntfs_enode_cachep, e3);
425 n3 = rb_first(&wnd->count_tree);
427 n3 ? rb_entry(n3, struct e_node, count.node)->count.key
432 if (e->count.key != wnd->extent_max) {
434 } else if (rb_prev(&e->count.node)) {
437 n3 = rb_next(&e->count.node);
438 max_new_len = len > new_len ? len : new_len;
440 wnd->extent_max = max_new_len;
442 e3 = rb_entry(n3, struct e_node, count.node);
443 wnd->extent_max = max(e3->count.key, max_new_len);
449 e->start.key = new_key;
450 rb_erase(&e->count.node, &wnd->count_tree);
451 e->count.key = new_len;
452 rb_insert_count(&wnd->count_tree, e);
454 rb_erase(&e->start.node, &wnd->start_tree);
455 rb_erase(&e->count.node, &wnd->count_tree);
457 kmem_cache_free(ntfs_enode_cachep, e);
461 rb_erase(&e->count.node, &wnd->count_tree);
463 rb_insert_count(&wnd->count_tree, e);
468 if (wnd->count >= NTFS_MAX_WND_EXTENTS) {
471 /* Get minimal extent. */
472 e = rb_entry(rb_last(&wnd->count_tree), struct e_node,
474 if (e->count.key > new_len)
477 /* Replace minimum. */
478 rb_erase(&e->start.node, &wnd->start_tree);
479 rb_erase(&e->count.node, &wnd->count_tree);
482 e = kmem_cache_alloc(ntfs_enode_cachep, GFP_ATOMIC);
488 e->start.key = new_key;
489 e->count.key = new_len;
490 rb_insert_start(&wnd->start_tree, e);
491 rb_insert_count(&wnd->count_tree, e);
496 if (!wnd->count && 1 != wnd->uptodated)
501 * wnd_rescan - Scan all bitmap. Used while initialization.
503 static int wnd_rescan(struct wnd_bitmap *wnd)
506 size_t prev_tail = 0;
507 struct super_block *sb = wnd->sb;
508 struct ntfs_sb_info *sbi = sb->s_fs_info;
510 u32 blocksize = sb->s_blocksize;
511 u8 cluster_bits = sbi->cluster_bits;
512 u32 wbits = 8 * sb->s_blocksize;
515 size_t wpos, wbit, iw, vbo;
516 struct buffer_head *bh = NULL;
521 wnd->extent_min = MINUS_ONE_T;
522 wnd->total_zeroes = 0;
526 for (iw = 0; iw < wnd->nwnd; iw++) {
527 if (iw + 1 == wnd->nwnd)
528 wbits = wnd->bits_last;
531 if (!wnd->free_bits[iw]) {
534 wnd_add_free_ext(wnd,
541 if (wbits == wnd->free_bits[iw]) {
544 wnd->total_zeroes += wbits;
550 u32 off = vbo & sbi->cluster_mask;
552 if (!run_lookup_entry(&wnd->run, vbo >> cluster_bits,
553 &lcn, &clen, NULL)) {
558 lbo = ((u64)lcn << cluster_bits) + off;
559 len = ((u64)clen << cluster_bits) - off;
562 bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
568 buf = (ulong *)bh->b_data;
570 used = __bitmap_weight(buf, wbits);
573 wnd->free_bits[iw] = frb;
574 wnd->total_zeroes += frb;
580 if (wbit + wbits > wnd->nbits)
581 wbits = wnd->nbits - wbit;
584 used = find_next_zero_bit(buf, wbits, wpos);
586 if (used > wpos && prev_tail) {
587 wnd_add_free_ext(wnd, wbit + wpos - prev_tail,
595 /* No free blocks. */
600 frb = find_next_bit(buf, wbits, wpos);
602 /* Keep last free block. */
603 prev_tail += frb - wpos;
607 wnd_add_free_ext(wnd, wbit + wpos - prev_tail,
608 frb + prev_tail - wpos, true);
610 /* Skip free block and first '1'. */
612 /* Reset previous tail. */
614 } while (wpos < wbits);
629 /* Add last block. */
631 wnd_add_free_ext(wnd, wnd->nbits - prev_tail, prev_tail, true);
634 * Before init cycle wnd->uptodated was 0.
635 * If any errors or limits occurs while initialization then
636 * wnd->uptodated will be -1.
637 * If 'uptodated' is still 0 then Tree is really updated.
642 if (wnd->zone_bit != wnd->zone_end) {
643 size_t zlen = wnd->zone_end - wnd->zone_bit;
645 wnd->zone_end = wnd->zone_bit;
646 wnd_zone_set(wnd, wnd->zone_bit, zlen);
653 int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
656 u32 blocksize = sb->s_blocksize;
657 u32 wbits = blocksize * 8;
659 init_rwsem(&wnd->rw_lock);
663 wnd->total_zeroes = nbits;
664 wnd->extent_max = MINUS_ONE_T;
665 wnd->zone_bit = wnd->zone_end = 0;
666 wnd->nwnd = bytes_to_block(sb, bitmap_size(nbits));
667 wnd->bits_last = nbits & (wbits - 1);
669 wnd->bits_last = wbits;
671 wnd->free_bits = kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS);
675 err = wnd_rescan(wnd);
685 * wnd_map - Call sb_bread for requested window.
687 static struct buffer_head *wnd_map(struct wnd_bitmap *wnd, size_t iw)
691 struct super_block *sb = wnd->sb;
692 struct ntfs_sb_info *sbi;
693 struct buffer_head *bh;
697 vbo = (u64)iw << sb->s_blocksize_bits;
699 if (!run_lookup_entry(&wnd->run, vbo >> sbi->cluster_bits, &lcn, &clen,
701 return ERR_PTR(-ENOENT);
704 lbo = ((u64)lcn << sbi->cluster_bits) + (vbo & sbi->cluster_mask);
706 bh = ntfs_bread(wnd->sb, lbo >> sb->s_blocksize_bits);
708 return ERR_PTR(-EIO);
714 * wnd_set_free - Mark the bits range from bit to bit + bits as free.
716 int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
719 struct super_block *sb = wnd->sb;
721 u32 wbits = 8 * sb->s_blocksize;
722 size_t iw = bit >> (sb->s_blocksize_bits + 3);
723 u32 wbit = bit & (wbits - 1);
724 struct buffer_head *bh;
726 while (iw < wnd->nwnd && bits) {
730 if (iw + 1 == wnd->nwnd)
731 wbits = wnd->bits_last;
734 op = tail < bits ? tail : bits;
736 bh = wnd_map(wnd, iw);
742 buf = (ulong *)bh->b_data;
746 __bitmap_clear(buf, wbit, op);
748 wnd->free_bits[iw] += op;
750 set_buffer_uptodate(bh);
751 mark_buffer_dirty(bh);
755 wnd->total_zeroes += op;
761 wnd_add_free_ext(wnd, bit, bits0, false);
767 * wnd_set_used - Mark the bits range from bit to bit + bits as used.
769 int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
772 struct super_block *sb = wnd->sb;
774 size_t iw = bit >> (sb->s_blocksize_bits + 3);
775 u32 wbits = 8 * sb->s_blocksize;
776 u32 wbit = bit & (wbits - 1);
777 struct buffer_head *bh;
779 while (iw < wnd->nwnd && bits) {
783 if (unlikely(iw + 1 == wnd->nwnd))
784 wbits = wnd->bits_last;
787 op = tail < bits ? tail : bits;
789 bh = wnd_map(wnd, iw);
794 buf = (ulong *)bh->b_data;
798 __bitmap_set(buf, wbit, op);
799 wnd->free_bits[iw] -= op;
801 set_buffer_uptodate(bh);
802 mark_buffer_dirty(bh);
806 wnd->total_zeroes -= op;
812 if (!RB_EMPTY_ROOT(&wnd->start_tree))
813 wnd_remove_free_ext(wnd, bit, bits0);
821 * Return: True if all clusters [bit, bit+bits) are free (bitmap only).
823 static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits)
825 struct super_block *sb = wnd->sb;
826 size_t iw = bit >> (sb->s_blocksize_bits + 3);
827 u32 wbits = 8 * sb->s_blocksize;
828 u32 wbit = bit & (wbits - 1);
830 while (iw < wnd->nwnd && bits) {
833 if (unlikely(iw + 1 == wnd->nwnd))
834 wbits = wnd->bits_last;
837 op = tail < bits ? tail : bits;
839 if (wbits != wnd->free_bits[iw]) {
841 struct buffer_head *bh = wnd_map(wnd, iw);
846 ret = are_bits_clear((ulong *)bh->b_data, wbit, op);
864 * Return: True if all clusters [bit, bit+bits) are free.
866 bool wnd_is_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
873 if (RB_EMPTY_ROOT(&wnd->start_tree))
876 n = rb_lookup(&wnd->start_tree, bit);
880 e = rb_entry(n, struct e_node, start.node);
882 end = e->start.key + e->count.key;
884 if (bit < end && bit + bits <= end)
888 ret = wnd_is_free_hlp(wnd, bit, bits);
896 * Return: True if all clusters [bit, bit+bits) are used.
898 bool wnd_is_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
901 struct super_block *sb = wnd->sb;
902 size_t iw = bit >> (sb->s_blocksize_bits + 3);
903 u32 wbits = 8 * sb->s_blocksize;
904 u32 wbit = bit & (wbits - 1);
909 if (RB_EMPTY_ROOT(&wnd->start_tree))
913 n = rb_lookup(&wnd->start_tree, end - 1);
917 e = rb_entry(n, struct e_node, start.node);
918 if (e->start.key + e->count.key > bit)
922 while (iw < wnd->nwnd && bits) {
925 if (unlikely(iw + 1 == wnd->nwnd))
926 wbits = wnd->bits_last;
929 op = tail < bits ? tail : bits;
931 if (wnd->free_bits[iw]) {
933 struct buffer_head *bh = wnd_map(wnd, iw);
938 ret = are_bits_set((ulong *)bh->b_data, wbit, op);
955 * wnd_find - Look for free space.
957 * - flags - BITMAP_FIND_XXX flags
959 * Return: 0 if not found.
961 size_t wnd_find(struct wnd_bitmap *wnd, size_t to_alloc, size_t hint,
962 size_t flags, size_t *allocated)
964 struct super_block *sb;
965 u32 wbits, wpos, wzbit, wzend;
966 size_t fnd, max_alloc, b_len, b_pos;
967 size_t iw, prev_tail, nwnd, wbit, ebit, zbit, zend;
968 size_t to_alloc0 = to_alloc;
970 const struct e_node *e;
971 const struct rb_node *pr, *cr;
974 struct buffer_head *bh;
976 /* Fast checking for available free space. */
977 if (flags & BITMAP_FIND_FULL) {
978 size_t zeroes = wnd_zeroes(wnd);
980 zeroes -= wnd->zone_end - wnd->zone_bit;
981 if (zeroes < to_alloc0)
984 if (to_alloc0 > wnd->extent_max)
987 if (to_alloc > wnd->extent_max)
988 to_alloc = wnd->extent_max;
991 if (wnd->zone_bit <= hint && hint < wnd->zone_end)
992 hint = wnd->zone_end;
994 max_alloc = wnd->nbits;
997 if (hint >= max_alloc)
1000 if (RB_EMPTY_ROOT(&wnd->start_tree)) {
1001 if (wnd->uptodated == 1) {
1002 /* Extents tree is updated -> No free space. */
1010 goto allocate_biggest;
1012 /* Use hint: Enumerate extents by start >= hint. */
1014 cr = wnd->start_tree.rb_node;
1017 e = rb_entry(cr, struct e_node, start.node);
1019 if (e->start.key == hint)
1022 if (e->start.key < hint) {
1032 e = pr ? rb_entry(pr, struct e_node, start.node) : NULL;
1038 goto allocate_biggest;
1040 if (e->start.key + e->count.key > hint) {
1041 /* We have found extension with 'hint' inside. */
1042 size_t len = e->start.key + e->count.key - hint;
1044 if (len >= to_alloc && hint + to_alloc <= max_alloc) {
1049 if (!(flags & BITMAP_FIND_FULL)) {
1053 if (hint + len <= max_alloc) {
1062 /* Allocate from biggest free extent. */
1063 e = rb_entry(rb_first(&wnd->count_tree), struct e_node, count.node);
1064 if (e->count.key != wnd->extent_max)
1065 wnd->extent_max = e->count.key;
1067 if (e->count.key < max_alloc) {
1068 if (e->count.key >= to_alloc) {
1070 } else if (flags & BITMAP_FIND_FULL) {
1071 if (e->count.key < to_alloc0) {
1072 /* Biggest free block is less then requested. */
1075 to_alloc = e->count.key;
1076 } else if (-1 != wnd->uptodated) {
1077 to_alloc = e->count.key;
1079 /* Check if we can use more bits. */
1080 size_t op, max_check;
1081 struct rb_root start_tree;
1083 memcpy(&start_tree, &wnd->start_tree,
1084 sizeof(struct rb_root));
1085 memset(&wnd->start_tree, 0, sizeof(struct rb_root));
1087 max_check = e->start.key + to_alloc;
1088 if (max_check > max_alloc)
1089 max_check = max_alloc;
1090 for (op = e->start.key + e->count.key; op < max_check;
1092 if (!wnd_is_free(wnd, op, 1))
1095 memcpy(&wnd->start_tree, &start_tree,
1096 sizeof(struct rb_root));
1097 to_alloc = op - e->start.key;
1100 /* Prepare to return. */
1102 if (e->start.key + to_alloc > max_alloc)
1103 to_alloc = max_alloc - e->start.key;
1107 if (wnd->uptodated == 1) {
1108 /* Extents tree is updated -> no free space. */
1112 b_len = e->count.key;
1113 b_pos = e->start.key;
1117 log2_bits = sb->s_blocksize_bits + 3;
1119 /* At most two ranges [hint, max_alloc) + [0, hint). */
1122 /* TODO: Optimize request for case nbits > wbits. */
1123 iw = hint >> log2_bits;
1124 wbits = sb->s_blocksize * 8;
1125 wpos = hint & (wbits - 1);
1129 if (max_alloc == wnd->nbits) {
1132 size_t t = max_alloc + wbits - 1;
1134 nwnd = likely(t > max_alloc) ? (t >> log2_bits) : wnd->nwnd;
1137 /* Enumerate all windows. */
1138 for (; iw < nwnd; iw++) {
1139 wbit = iw << log2_bits;
1141 if (!wnd->free_bits[iw]) {
1142 if (prev_tail > b_len) {
1143 b_pos = wbit - prev_tail;
1147 /* Skip full used window. */
1153 if (unlikely(iw + 1 == nwnd)) {
1154 if (max_alloc == wnd->nbits) {
1155 wbits = wnd->bits_last;
1157 size_t t = max_alloc & (wbits - 1);
1161 fbits_valid = false;
1166 if (wnd->zone_end > wnd->zone_bit) {
1167 ebit = wbit + wbits;
1168 zbit = max(wnd->zone_bit, wbit);
1169 zend = min(wnd->zone_end, ebit);
1171 /* Here we have a window [wbit, ebit) and zone [zbit, zend). */
1173 /* Zone does not overlap window. */
1175 wzbit = zbit - wbit;
1176 wzend = zend - wbit;
1178 /* Zone overlaps window. */
1179 if (wnd->free_bits[iw] == wzend - wzbit) {
1185 /* Scan two ranges window: [wbit, zbit) and [zend, ebit). */
1186 bh = wnd_map(wnd, iw);
1195 buf = (ulong *)bh->b_data;
1197 /* Scan range [wbit, zbit). */
1199 /* Scan range [wpos, zbit). */
1200 fnd = wnd_scan(buf, wbit, wpos, wzbit,
1201 to_alloc, &prev_tail,
1203 if (fnd != MINUS_ONE_T) {
1211 /* Scan range [zend, ebit). */
1212 if (wzend < wbits) {
1213 fnd = wnd_scan(buf, wbit,
1214 max(wzend, wpos), wbits,
1215 to_alloc, &prev_tail,
1217 if (fnd != MINUS_ONE_T) {
1229 /* Current window does not overlap zone. */
1230 if (!wpos && fbits_valid && wnd->free_bits[iw] == wbits) {
1231 /* Window is empty. */
1232 if (prev_tail + wbits >= to_alloc) {
1233 fnd = wbit + wpos - prev_tail;
1237 /* Increase 'prev_tail' and process next window. */
1244 bh = wnd_map(wnd, iw);
1252 buf = (ulong *)bh->b_data;
1254 /* Scan range [wpos, eBits). */
1255 fnd = wnd_scan(buf, wbit, wpos, wbits, to_alloc, &prev_tail,
1258 if (fnd != MINUS_ONE_T)
1262 if (b_len < prev_tail) {
1263 /* The last fragment. */
1265 b_pos = max_alloc - prev_tail;
1270 * We have scanned range [hint max_alloc).
1271 * Prepare to scan range [0 hint + to_alloc).
1273 size_t nextmax = hint + to_alloc;
1275 if (likely(nextmax >= hint) && nextmax < max_alloc)
1276 max_alloc = nextmax;
1284 wnd->extent_max = b_len;
1286 if (flags & BITMAP_FIND_FULL)
1293 if (flags & BITMAP_FIND_MARK_AS_USED) {
1294 /* TODO: Optimize remove extent (pass 'e'?). */
1295 if (wnd_set_used(wnd, fnd, to_alloc))
1297 } else if (wnd->extent_max != MINUS_ONE_T &&
1298 to_alloc > wnd->extent_max) {
1299 wnd->extent_max = to_alloc;
1310 * wnd_extend - Extend bitmap ($MFT bitmap).
1312 int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
1315 struct super_block *sb = wnd->sb;
1316 struct ntfs_sb_info *sbi = sb->s_fs_info;
1317 u32 blocksize = sb->s_blocksize;
1318 u32 wbits = blocksize * 8;
1320 size_t bits, iw, new_wnd;
1321 size_t old_bits = wnd->nbits;
1324 if (new_bits <= old_bits)
1327 /* Align to 8 byte boundary. */
1328 new_wnd = bytes_to_block(sb, bitmap_size(new_bits));
1329 new_last = new_bits & (wbits - 1);
1333 if (new_wnd != wnd->nwnd) {
1334 new_free = kmalloc(new_wnd * sizeof(u16), GFP_NOFS);
1338 if (new_free != wnd->free_bits)
1339 memcpy(new_free, wnd->free_bits,
1340 wnd->nwnd * sizeof(short));
1341 memset(new_free + wnd->nwnd, 0,
1342 (new_wnd - wnd->nwnd) * sizeof(short));
1343 kfree(wnd->free_bits);
1344 wnd->free_bits = new_free;
1347 /* Zero bits [old_bits,new_bits). */
1348 bits = new_bits - old_bits;
1349 b0 = old_bits & (wbits - 1);
1351 for (iw = old_bits >> (sb->s_blocksize_bits + 3); bits; iw += 1) {
1354 u64 vbo, lbo, bytes;
1355 struct buffer_head *bh;
1358 if (iw + 1 == new_wnd)
1361 op = b0 + bits > wbits ? wbits - b0 : bits;
1362 vbo = (u64)iw * blocksize;
1364 err = ntfs_vbo_to_lbo(sbi, &wnd->run, vbo, &lbo, &bytes);
1368 bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1373 buf = (ulong *)bh->b_data;
1375 __bitmap_clear(buf, b0, blocksize * 8 - b0);
1376 frb = wbits - __bitmap_weight(buf, wbits);
1377 wnd->total_zeroes += frb - wnd->free_bits[iw];
1378 wnd->free_bits[iw] = frb;
1380 set_buffer_uptodate(bh);
1381 mark_buffer_dirty(bh);
1383 /* err = sync_dirty_buffer(bh); */
1389 wnd->nbits = new_bits;
1390 wnd->nwnd = new_wnd;
1391 wnd->bits_last = new_last;
1393 wnd_add_free_ext(wnd, old_bits, new_bits - old_bits, false);
1398 void wnd_zone_set(struct wnd_bitmap *wnd, size_t lcn, size_t len)
1402 zlen = wnd->zone_end - wnd->zone_bit;
1404 wnd_add_free_ext(wnd, wnd->zone_bit, zlen, false);
1406 if (!RB_EMPTY_ROOT(&wnd->start_tree) && len)
1407 wnd_remove_free_ext(wnd, lcn, len);
1409 wnd->zone_bit = lcn;
1410 wnd->zone_end = lcn + len;
1413 int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range)
1416 struct super_block *sb = sbi->sb;
1417 struct wnd_bitmap *wnd = &sbi->used.bitmap;
1418 u32 wbits = 8 * sb->s_blocksize;
1419 CLST len = 0, lcn = 0, done = 0;
1420 CLST minlen = bytes_to_cluster(sbi, range->minlen);
1421 CLST lcn_from = bytes_to_cluster(sbi, range->start);
1422 size_t iw = lcn_from >> (sb->s_blocksize_bits + 3);
1423 u32 wbit = lcn_from & (wbits - 1);
1430 if (range->len == (u64)-1)
1431 lcn_to = wnd->nbits;
1433 lcn_to = bytes_to_cluster(sbi, range->start + range->len);
1435 down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
1437 for (; iw < wnd->nbits; iw++, wbit = 0) {
1438 CLST lcn_wnd = iw * wbits;
1439 struct buffer_head *bh;
1441 if (lcn_wnd > lcn_to)
1444 if (!wnd->free_bits[iw])
1447 if (iw + 1 == wnd->nwnd)
1448 wbits = wnd->bits_last;
1450 if (lcn_wnd + wbits > lcn_to)
1451 wbits = lcn_to - lcn_wnd;
1453 bh = wnd_map(wnd, iw);
1459 buf = (ulong *)bh->b_data;
1461 for (; wbit < wbits; wbit++) {
1462 if (!test_bit(wbit, buf)) {
1464 lcn = lcn_wnd + wbit;
1468 if (len >= minlen) {
1469 err = ntfs_discard(sbi, lcn, len);
1479 /* Process the last fragment. */
1480 if (len >= minlen) {
1481 err = ntfs_discard(sbi, lcn, len);
1488 range->len = (u64)done << sbi->cluster_bits;
1490 up_read(&wnd->rw_lock);