netfs, fscache: Prevent Oops in fscache_put_cache()
[platform/kernel/linux-rpi.git] / fs / ntfs3 / bitmap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  * This code builds two trees of free clusters extents.
7  * Trees are sorted by start of extent and by length of extent.
8  * NTFS_MAX_WND_EXTENTS defines the maximum number of elements in trees.
9  * In extreme case code reads on-disk bitmap to find free clusters.
10  *
11  */
12
13 #include <linux/buffer_head.h>
14 #include <linux/fs.h>
15 #include <linux/kernel.h>
16
17 #include "ntfs.h"
18 #include "ntfs_fs.h"
19
20 /*
21  * Maximum number of extents in tree.
22  */
23 #define NTFS_MAX_WND_EXTENTS (32u * 1024u)
24
25 struct rb_node_key {
26         struct rb_node node;
27         size_t key;
28 };
29
30 struct e_node {
31         struct rb_node_key start; /* Tree sorted by start. */
32         struct rb_node_key count; /* Tree sorted by len. */
33 };
34
35 static int wnd_rescan(struct wnd_bitmap *wnd);
36 static struct buffer_head *wnd_map(struct wnd_bitmap *wnd, size_t iw);
37 static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits);
38
39 static struct kmem_cache *ntfs_enode_cachep;
40
41 int __init ntfs3_init_bitmap(void)
42 {
43         ntfs_enode_cachep = kmem_cache_create("ntfs3_enode_cache",
44                                               sizeof(struct e_node), 0,
45                                               SLAB_RECLAIM_ACCOUNT, NULL);
46         return ntfs_enode_cachep ? 0 : -ENOMEM;
47 }
48
49 void ntfs3_exit_bitmap(void)
50 {
51         kmem_cache_destroy(ntfs_enode_cachep);
52 }
53
54 /*
55  * wnd_scan
56  *
57  * b_pos + b_len - biggest fragment.
58  * Scan range [wpos wbits) window @buf.
59  *
60  * Return: -1 if not found.
61  */
62 static size_t wnd_scan(const void *buf, size_t wbit, u32 wpos, u32 wend,
63                        size_t to_alloc, size_t *prev_tail, size_t *b_pos,
64                        size_t *b_len)
65 {
66         while (wpos < wend) {
67                 size_t free_len;
68                 u32 free_bits, end;
69                 u32 used = find_next_zero_bit_le(buf, wend, wpos);
70
71                 if (used >= wend) {
72                         if (*b_len < *prev_tail) {
73                                 *b_pos = wbit - *prev_tail;
74                                 *b_len = *prev_tail;
75                         }
76
77                         *prev_tail = 0;
78                         return -1;
79                 }
80
81                 if (used > wpos) {
82                         wpos = used;
83                         if (*b_len < *prev_tail) {
84                                 *b_pos = wbit - *prev_tail;
85                                 *b_len = *prev_tail;
86                         }
87
88                         *prev_tail = 0;
89                 }
90
91                 /*
92                  * Now we have a fragment [wpos, wend) staring with 0.
93                  */
94                 end = wpos + to_alloc - *prev_tail;
95                 free_bits = find_next_bit_le(buf, min(end, wend), wpos);
96
97                 free_len = *prev_tail + free_bits - wpos;
98
99                 if (*b_len < free_len) {
100                         *b_pos = wbit + wpos - *prev_tail;
101                         *b_len = free_len;
102                 }
103
104                 if (free_len >= to_alloc)
105                         return wbit + wpos - *prev_tail;
106
107                 if (free_bits >= wend) {
108                         *prev_tail += free_bits - wpos;
109                         return -1;
110                 }
111
112                 wpos = free_bits + 1;
113
114                 *prev_tail = 0;
115         }
116
117         return -1;
118 }
119
120 /*
121  * wnd_close - Frees all resources.
122  */
123 void wnd_close(struct wnd_bitmap *wnd)
124 {
125         struct rb_node *node, *next;
126
127         kfree(wnd->free_bits);
128         wnd->free_bits = NULL;
129         run_close(&wnd->run);
130
131         node = rb_first(&wnd->start_tree);
132
133         while (node) {
134                 next = rb_next(node);
135                 rb_erase(node, &wnd->start_tree);
136                 kmem_cache_free(ntfs_enode_cachep,
137                                 rb_entry(node, struct e_node, start.node));
138                 node = next;
139         }
140 }
141
142 static struct rb_node *rb_lookup(struct rb_root *root, size_t v)
143 {
144         struct rb_node **p = &root->rb_node;
145         struct rb_node *r = NULL;
146
147         while (*p) {
148                 struct rb_node_key *k;
149
150                 k = rb_entry(*p, struct rb_node_key, node);
151                 if (v < k->key) {
152                         p = &(*p)->rb_left;
153                 } else if (v > k->key) {
154                         r = &k->node;
155                         p = &(*p)->rb_right;
156                 } else {
157                         return &k->node;
158                 }
159         }
160
161         return r;
162 }
163
164 /*
165  * rb_insert_count - Helper function to insert special kind of 'count' tree.
166  */
167 static inline bool rb_insert_count(struct rb_root *root, struct e_node *e)
168 {
169         struct rb_node **p = &root->rb_node;
170         struct rb_node *parent = NULL;
171         size_t e_ckey = e->count.key;
172         size_t e_skey = e->start.key;
173
174         while (*p) {
175                 struct e_node *k =
176                         rb_entry(parent = *p, struct e_node, count.node);
177
178                 if (e_ckey > k->count.key) {
179                         p = &(*p)->rb_left;
180                 } else if (e_ckey < k->count.key) {
181                         p = &(*p)->rb_right;
182                 } else if (e_skey < k->start.key) {
183                         p = &(*p)->rb_left;
184                 } else if (e_skey > k->start.key) {
185                         p = &(*p)->rb_right;
186                 } else {
187                         WARN_ON(1);
188                         return false;
189                 }
190         }
191
192         rb_link_node(&e->count.node, parent, p);
193         rb_insert_color(&e->count.node, root);
194         return true;
195 }
196
197 /*
198  * rb_insert_start - Helper function to insert special kind of 'count' tree.
199  */
200 static inline bool rb_insert_start(struct rb_root *root, struct e_node *e)
201 {
202         struct rb_node **p = &root->rb_node;
203         struct rb_node *parent = NULL;
204         size_t e_skey = e->start.key;
205
206         while (*p) {
207                 struct e_node *k;
208
209                 parent = *p;
210
211                 k = rb_entry(parent, struct e_node, start.node);
212                 if (e_skey < k->start.key) {
213                         p = &(*p)->rb_left;
214                 } else if (e_skey > k->start.key) {
215                         p = &(*p)->rb_right;
216                 } else {
217                         WARN_ON(1);
218                         return false;
219                 }
220         }
221
222         rb_link_node(&e->start.node, parent, p);
223         rb_insert_color(&e->start.node, root);
224         return true;
225 }
226
227 /*
228  * wnd_add_free_ext - Adds a new extent of free space.
229  * @build:      1 when building tree.
230  */
231 static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len,
232                              bool build)
233 {
234         struct e_node *e, *e0 = NULL;
235         size_t ib, end_in = bit + len;
236         struct rb_node *n;
237
238         if (build) {
239                 /* Use extent_min to filter too short extents. */
240                 if (wnd->count >= NTFS_MAX_WND_EXTENTS &&
241                     len <= wnd->extent_min) {
242                         wnd->uptodated = -1;
243                         return;
244                 }
245         } else {
246                 /* Try to find extent before 'bit'. */
247                 n = rb_lookup(&wnd->start_tree, bit);
248
249                 if (!n) {
250                         n = rb_first(&wnd->start_tree);
251                 } else {
252                         e = rb_entry(n, struct e_node, start.node);
253                         n = rb_next(n);
254                         if (e->start.key + e->count.key == bit) {
255                                 /* Remove left. */
256                                 bit = e->start.key;
257                                 len += e->count.key;
258                                 rb_erase(&e->start.node, &wnd->start_tree);
259                                 rb_erase(&e->count.node, &wnd->count_tree);
260                                 wnd->count -= 1;
261                                 e0 = e;
262                         }
263                 }
264
265                 while (n) {
266                         size_t next_end;
267
268                         e = rb_entry(n, struct e_node, start.node);
269                         next_end = e->start.key + e->count.key;
270                         if (e->start.key > end_in)
271                                 break;
272
273                         /* Remove right. */
274                         n = rb_next(n);
275                         len += next_end - end_in;
276                         end_in = next_end;
277                         rb_erase(&e->start.node, &wnd->start_tree);
278                         rb_erase(&e->count.node, &wnd->count_tree);
279                         wnd->count -= 1;
280
281                         if (!e0)
282                                 e0 = e;
283                         else
284                                 kmem_cache_free(ntfs_enode_cachep, e);
285                 }
286
287                 if (wnd->uptodated != 1) {
288                         /* Check bits before 'bit'. */
289                         ib = wnd->zone_bit == wnd->zone_end ||
290                                              bit < wnd->zone_end ?
291                                      0 :
292                                      wnd->zone_end;
293
294                         while (bit > ib && wnd_is_free_hlp(wnd, bit - 1, 1)) {
295                                 bit -= 1;
296                                 len += 1;
297                         }
298
299                         /* Check bits after 'end_in'. */
300                         ib = wnd->zone_bit == wnd->zone_end ||
301                                              end_in > wnd->zone_bit ?
302                                      wnd->nbits :
303                                      wnd->zone_bit;
304
305                         while (end_in < ib && wnd_is_free_hlp(wnd, end_in, 1)) {
306                                 end_in += 1;
307                                 len += 1;
308                         }
309                 }
310         }
311         /* Insert new fragment. */
312         if (wnd->count >= NTFS_MAX_WND_EXTENTS) {
313                 if (e0)
314                         kmem_cache_free(ntfs_enode_cachep, e0);
315
316                 wnd->uptodated = -1;
317
318                 /* Compare with smallest fragment. */
319                 n = rb_last(&wnd->count_tree);
320                 e = rb_entry(n, struct e_node, count.node);
321                 if (len <= e->count.key)
322                         goto out; /* Do not insert small fragments. */
323
324                 if (build) {
325                         struct e_node *e2;
326
327                         n = rb_prev(n);
328                         e2 = rb_entry(n, struct e_node, count.node);
329                         /* Smallest fragment will be 'e2->count.key'. */
330                         wnd->extent_min = e2->count.key;
331                 }
332
333                 /* Replace smallest fragment by new one. */
334                 rb_erase(&e->start.node, &wnd->start_tree);
335                 rb_erase(&e->count.node, &wnd->count_tree);
336                 wnd->count -= 1;
337         } else {
338                 e = e0 ? e0 : kmem_cache_alloc(ntfs_enode_cachep, GFP_ATOMIC);
339                 if (!e) {
340                         wnd->uptodated = -1;
341                         goto out;
342                 }
343
344                 if (build && len <= wnd->extent_min)
345                         wnd->extent_min = len;
346         }
347         e->start.key = bit;
348         e->count.key = len;
349         if (len > wnd->extent_max)
350                 wnd->extent_max = len;
351
352         rb_insert_start(&wnd->start_tree, e);
353         rb_insert_count(&wnd->count_tree, e);
354         wnd->count += 1;
355
356 out:;
357 }
358
359 /*
360  * wnd_remove_free_ext - Remove a run from the cached free space.
361  */
362 static void wnd_remove_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len)
363 {
364         struct rb_node *n, *n3;
365         struct e_node *e, *e3;
366         size_t end_in = bit + len;
367         size_t end3, end, new_key, new_len, max_new_len;
368
369         /* Try to find extent before 'bit'. */
370         n = rb_lookup(&wnd->start_tree, bit);
371
372         if (!n)
373                 return;
374
375         e = rb_entry(n, struct e_node, start.node);
376         end = e->start.key + e->count.key;
377
378         new_key = new_len = 0;
379         len = e->count.key;
380
381         /* Range [bit,end_in) must be inside 'e' or outside 'e' and 'n'. */
382         if (e->start.key > bit)
383                 ;
384         else if (end_in <= end) {
385                 /* Range [bit,end_in) inside 'e'. */
386                 new_key = end_in;
387                 new_len = end - end_in;
388                 len = bit - e->start.key;
389         } else if (bit > end) {
390                 bool bmax = false;
391
392                 n3 = rb_next(n);
393
394                 while (n3) {
395                         e3 = rb_entry(n3, struct e_node, start.node);
396                         if (e3->start.key >= end_in)
397                                 break;
398
399                         if (e3->count.key == wnd->extent_max)
400                                 bmax = true;
401
402                         end3 = e3->start.key + e3->count.key;
403                         if (end3 > end_in) {
404                                 e3->start.key = end_in;
405                                 rb_erase(&e3->count.node, &wnd->count_tree);
406                                 e3->count.key = end3 - end_in;
407                                 rb_insert_count(&wnd->count_tree, e3);
408                                 break;
409                         }
410
411                         n3 = rb_next(n3);
412                         rb_erase(&e3->start.node, &wnd->start_tree);
413                         rb_erase(&e3->count.node, &wnd->count_tree);
414                         wnd->count -= 1;
415                         kmem_cache_free(ntfs_enode_cachep, e3);
416                 }
417                 if (!bmax)
418                         return;
419                 n3 = rb_first(&wnd->count_tree);
420                 wnd->extent_max =
421                         n3 ? rb_entry(n3, struct e_node, count.node)->count.key :
422                              0;
423                 return;
424         }
425
426         if (e->count.key != wnd->extent_max) {
427                 ;
428         } else if (rb_prev(&e->count.node)) {
429                 ;
430         } else {
431                 n3 = rb_next(&e->count.node);
432                 max_new_len = max(len, new_len);
433                 if (!n3) {
434                         wnd->extent_max = max_new_len;
435                 } else {
436                         e3 = rb_entry(n3, struct e_node, count.node);
437                         wnd->extent_max = max(e3->count.key, max_new_len);
438                 }
439         }
440
441         if (!len) {
442                 if (new_len) {
443                         e->start.key = new_key;
444                         rb_erase(&e->count.node, &wnd->count_tree);
445                         e->count.key = new_len;
446                         rb_insert_count(&wnd->count_tree, e);
447                 } else {
448                         rb_erase(&e->start.node, &wnd->start_tree);
449                         rb_erase(&e->count.node, &wnd->count_tree);
450                         wnd->count -= 1;
451                         kmem_cache_free(ntfs_enode_cachep, e);
452                 }
453                 goto out;
454         }
455         rb_erase(&e->count.node, &wnd->count_tree);
456         e->count.key = len;
457         rb_insert_count(&wnd->count_tree, e);
458
459         if (!new_len)
460                 goto out;
461
462         if (wnd->count >= NTFS_MAX_WND_EXTENTS) {
463                 wnd->uptodated = -1;
464
465                 /* Get minimal extent. */
466                 e = rb_entry(rb_last(&wnd->count_tree), struct e_node,
467                              count.node);
468                 if (e->count.key > new_len)
469                         goto out;
470
471                 /* Replace minimum. */
472                 rb_erase(&e->start.node, &wnd->start_tree);
473                 rb_erase(&e->count.node, &wnd->count_tree);
474                 wnd->count -= 1;
475         } else {
476                 e = kmem_cache_alloc(ntfs_enode_cachep, GFP_ATOMIC);
477                 if (!e)
478                         wnd->uptodated = -1;
479         }
480
481         if (e) {
482                 e->start.key = new_key;
483                 e->count.key = new_len;
484                 rb_insert_start(&wnd->start_tree, e);
485                 rb_insert_count(&wnd->count_tree, e);
486                 wnd->count += 1;
487         }
488
489 out:
490         if (!wnd->count && 1 != wnd->uptodated)
491                 wnd_rescan(wnd);
492 }
493
494 /*
495  * wnd_rescan - Scan all bitmap. Used while initialization.
496  */
497 static int wnd_rescan(struct wnd_bitmap *wnd)
498 {
499         int err = 0;
500         size_t prev_tail = 0;
501         struct super_block *sb = wnd->sb;
502         struct ntfs_sb_info *sbi = sb->s_fs_info;
503         u64 lbo, len = 0;
504         u32 blocksize = sb->s_blocksize;
505         u8 cluster_bits = sbi->cluster_bits;
506         u32 wbits = 8 * sb->s_blocksize;
507         u32 used, frb;
508         size_t wpos, wbit, iw, vbo;
509         struct buffer_head *bh = NULL;
510         CLST lcn, clen;
511
512         wnd->uptodated = 0;
513         wnd->extent_max = 0;
514         wnd->extent_min = MINUS_ONE_T;
515         wnd->total_zeroes = 0;
516
517         vbo = 0;
518
519         for (iw = 0; iw < wnd->nwnd; iw++) {
520                 if (iw + 1 == wnd->nwnd)
521                         wbits = wnd->bits_last;
522
523                 if (wnd->inited) {
524                         if (!wnd->free_bits[iw]) {
525                                 /* All ones. */
526                                 if (prev_tail) {
527                                         wnd_add_free_ext(wnd,
528                                                          vbo * 8 - prev_tail,
529                                                          prev_tail, true);
530                                         prev_tail = 0;
531                                 }
532                                 goto next_wnd;
533                         }
534                         if (wbits == wnd->free_bits[iw]) {
535                                 /* All zeroes. */
536                                 prev_tail += wbits;
537                                 wnd->total_zeroes += wbits;
538                                 goto next_wnd;
539                         }
540                 }
541
542                 if (!len) {
543                         u32 off = vbo & sbi->cluster_mask;
544
545                         if (!run_lookup_entry(&wnd->run, vbo >> cluster_bits,
546                                               &lcn, &clen, NULL)) {
547                                 err = -ENOENT;
548                                 goto out;
549                         }
550
551                         lbo = ((u64)lcn << cluster_bits) + off;
552                         len = ((u64)clen << cluster_bits) - off;
553                 }
554
555                 bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
556                 if (!bh) {
557                         err = -EIO;
558                         goto out;
559                 }
560
561                 used = ntfs_bitmap_weight_le(bh->b_data, wbits);
562                 if (used < wbits) {
563                         frb = wbits - used;
564                         wnd->free_bits[iw] = frb;
565                         wnd->total_zeroes += frb;
566                 }
567
568                 wpos = 0;
569                 wbit = vbo * 8;
570
571                 if (wbit + wbits > wnd->nbits)
572                         wbits = wnd->nbits - wbit;
573
574                 do {
575                         used = find_next_zero_bit_le(bh->b_data, wbits, wpos);
576
577                         if (used > wpos && prev_tail) {
578                                 wnd_add_free_ext(wnd, wbit + wpos - prev_tail,
579                                                  prev_tail, true);
580                                 prev_tail = 0;
581                         }
582
583                         wpos = used;
584
585                         if (wpos >= wbits) {
586                                 /* No free blocks. */
587                                 prev_tail = 0;
588                                 break;
589                         }
590
591                         frb = find_next_bit_le(bh->b_data, wbits, wpos);
592                         if (frb >= wbits) {
593                                 /* Keep last free block. */
594                                 prev_tail += frb - wpos;
595                                 break;
596                         }
597
598                         wnd_add_free_ext(wnd, wbit + wpos - prev_tail,
599                                          frb + prev_tail - wpos, true);
600
601                         /* Skip free block and first '1'. */
602                         wpos = frb + 1;
603                         /* Reset previous tail. */
604                         prev_tail = 0;
605                 } while (wpos < wbits);
606
607 next_wnd:
608
609                 if (bh)
610                         put_bh(bh);
611                 bh = NULL;
612
613                 vbo += blocksize;
614                 if (len) {
615                         len -= blocksize;
616                         lbo += blocksize;
617                 }
618         }
619
620         /* Add last block. */
621         if (prev_tail)
622                 wnd_add_free_ext(wnd, wnd->nbits - prev_tail, prev_tail, true);
623
624         /*
625          * Before init cycle wnd->uptodated was 0.
626          * If any errors or limits occurs while initialization then
627          * wnd->uptodated will be -1.
628          * If 'uptodated' is still 0 then Tree is really updated.
629          */
630         if (!wnd->uptodated)
631                 wnd->uptodated = 1;
632
633         if (wnd->zone_bit != wnd->zone_end) {
634                 size_t zlen = wnd->zone_end - wnd->zone_bit;
635
636                 wnd->zone_end = wnd->zone_bit;
637                 wnd_zone_set(wnd, wnd->zone_bit, zlen);
638         }
639
640 out:
641         return err;
642 }
643
644 int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
645 {
646         int err;
647         u32 blocksize = sb->s_blocksize;
648         u32 wbits = blocksize * 8;
649
650         init_rwsem(&wnd->rw_lock);
651
652         wnd->sb = sb;
653         wnd->nbits = nbits;
654         wnd->total_zeroes = nbits;
655         wnd->extent_max = MINUS_ONE_T;
656         wnd->zone_bit = wnd->zone_end = 0;
657         wnd->nwnd = bytes_to_block(sb, bitmap_size(nbits));
658         wnd->bits_last = nbits & (wbits - 1);
659         if (!wnd->bits_last)
660                 wnd->bits_last = wbits;
661
662         wnd->free_bits =
663                 kvmalloc_array(wnd->nwnd, sizeof(u16), GFP_KERNEL | __GFP_ZERO);
664
665         if (!wnd->free_bits)
666                 return -ENOMEM;
667
668         err = wnd_rescan(wnd);
669         if (err)
670                 return err;
671
672         wnd->inited = true;
673
674         return 0;
675 }
676
677 /*
678  * wnd_map - Call sb_bread for requested window.
679  */
680 static struct buffer_head *wnd_map(struct wnd_bitmap *wnd, size_t iw)
681 {
682         size_t vbo;
683         CLST lcn, clen;
684         struct super_block *sb = wnd->sb;
685         struct ntfs_sb_info *sbi;
686         struct buffer_head *bh;
687         u64 lbo;
688
689         sbi = sb->s_fs_info;
690         vbo = (u64)iw << sb->s_blocksize_bits;
691
692         if (!run_lookup_entry(&wnd->run, vbo >> sbi->cluster_bits, &lcn, &clen,
693                               NULL)) {
694                 return ERR_PTR(-ENOENT);
695         }
696
697         lbo = ((u64)lcn << sbi->cluster_bits) + (vbo & sbi->cluster_mask);
698
699         bh = ntfs_bread(wnd->sb, lbo >> sb->s_blocksize_bits);
700         if (!bh)
701                 return ERR_PTR(-EIO);
702
703         return bh;
704 }
705
706 /*
707  * wnd_set_free - Mark the bits range from bit to bit + bits as free.
708  */
709 int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
710 {
711         int err = 0;
712         struct super_block *sb = wnd->sb;
713         size_t bits0 = bits;
714         u32 wbits = 8 * sb->s_blocksize;
715         size_t iw = bit >> (sb->s_blocksize_bits + 3);
716         u32 wbit = bit & (wbits - 1);
717         struct buffer_head *bh;
718
719         while (iw < wnd->nwnd && bits) {
720                 u32 tail, op;
721
722                 if (iw + 1 == wnd->nwnd)
723                         wbits = wnd->bits_last;
724
725                 tail = wbits - wbit;
726                 op = min_t(u32, tail, bits);
727
728                 bh = wnd_map(wnd, iw);
729                 if (IS_ERR(bh)) {
730                         err = PTR_ERR(bh);
731                         break;
732                 }
733
734                 lock_buffer(bh);
735
736                 ntfs_bitmap_clear_le(bh->b_data, wbit, op);
737
738                 wnd->free_bits[iw] += op;
739
740                 set_buffer_uptodate(bh);
741                 mark_buffer_dirty(bh);
742                 unlock_buffer(bh);
743                 put_bh(bh);
744
745                 wnd->total_zeroes += op;
746                 bits -= op;
747                 wbit = 0;
748                 iw += 1;
749         }
750
751         wnd_add_free_ext(wnd, bit, bits0, false);
752
753         return err;
754 }
755
756 /*
757  * wnd_set_used - Mark the bits range from bit to bit + bits as used.
758  */
759 int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
760 {
761         int err = 0;
762         struct super_block *sb = wnd->sb;
763         size_t bits0 = bits;
764         size_t iw = bit >> (sb->s_blocksize_bits + 3);
765         u32 wbits = 8 * sb->s_blocksize;
766         u32 wbit = bit & (wbits - 1);
767         struct buffer_head *bh;
768
769         while (iw < wnd->nwnd && bits) {
770                 u32 tail, op;
771
772                 if (unlikely(iw + 1 == wnd->nwnd))
773                         wbits = wnd->bits_last;
774
775                 tail = wbits - wbit;
776                 op = min_t(u32, tail, bits);
777
778                 bh = wnd_map(wnd, iw);
779                 if (IS_ERR(bh)) {
780                         err = PTR_ERR(bh);
781                         break;
782                 }
783
784                 lock_buffer(bh);
785
786                 ntfs_bitmap_set_le(bh->b_data, wbit, op);
787                 wnd->free_bits[iw] -= op;
788
789                 set_buffer_uptodate(bh);
790                 mark_buffer_dirty(bh);
791                 unlock_buffer(bh);
792                 put_bh(bh);
793
794                 wnd->total_zeroes -= op;
795                 bits -= op;
796                 wbit = 0;
797                 iw += 1;
798         }
799
800         if (!RB_EMPTY_ROOT(&wnd->start_tree))
801                 wnd_remove_free_ext(wnd, bit, bits0);
802
803         return err;
804 }
805
806 /*
807  * wnd_set_used_safe - Mark the bits range from bit to bit + bits as used.
808  *
809  * Unlikely wnd_set_used/wnd_set_free this function is not full trusted.
810  * It scans every bit in bitmap and marks free bit as used.
811  * @done - how many bits were marked as used.
812  *
813  * NOTE: normally *done should be 0.
814  */
815 int wnd_set_used_safe(struct wnd_bitmap *wnd, size_t bit, size_t bits,
816                       size_t *done)
817 {
818         size_t i, from = 0, len = 0;
819         int err = 0;
820
821         *done = 0;
822         for (i = 0; i < bits; i++) {
823                 if (wnd_is_free(wnd, bit + i, 1)) {
824                         if (!len)
825                                 from = bit + i;
826                         len += 1;
827                 } else if (len) {
828                         err = wnd_set_used(wnd, from, len);
829                         *done += len;
830                         len = 0;
831                         if (err)
832                                 break;
833                 }
834         }
835
836         if (len) {
837                 /* last fragment. */
838                 err = wnd_set_used(wnd, from, len);
839                 *done += len;
840         }
841         return err;
842 }
843
844 /*
845  * wnd_is_free_hlp
846  *
847  * Return: True if all clusters [bit, bit+bits) are free (bitmap only).
848  */
849 static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits)
850 {
851         struct super_block *sb = wnd->sb;
852         size_t iw = bit >> (sb->s_blocksize_bits + 3);
853         u32 wbits = 8 * sb->s_blocksize;
854         u32 wbit = bit & (wbits - 1);
855
856         while (iw < wnd->nwnd && bits) {
857                 u32 tail, op;
858
859                 if (unlikely(iw + 1 == wnd->nwnd))
860                         wbits = wnd->bits_last;
861
862                 tail = wbits - wbit;
863                 op = min_t(u32, tail, bits);
864
865                 if (wbits != wnd->free_bits[iw]) {
866                         bool ret;
867                         struct buffer_head *bh = wnd_map(wnd, iw);
868
869                         if (IS_ERR(bh))
870                                 return false;
871
872                         ret = are_bits_clear(bh->b_data, wbit, op);
873
874                         put_bh(bh);
875                         if (!ret)
876                                 return false;
877                 }
878
879                 bits -= op;
880                 wbit = 0;
881                 iw += 1;
882         }
883
884         return true;
885 }
886
887 /*
888  * wnd_is_free
889  *
890  * Return: True if all clusters [bit, bit+bits) are free.
891  */
892 bool wnd_is_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
893 {
894         bool ret;
895         struct rb_node *n;
896         size_t end;
897         struct e_node *e;
898
899         if (RB_EMPTY_ROOT(&wnd->start_tree))
900                 goto use_wnd;
901
902         n = rb_lookup(&wnd->start_tree, bit);
903         if (!n)
904                 goto use_wnd;
905
906         e = rb_entry(n, struct e_node, start.node);
907
908         end = e->start.key + e->count.key;
909
910         if (bit < end && bit + bits <= end)
911                 return true;
912
913 use_wnd:
914         ret = wnd_is_free_hlp(wnd, bit, bits);
915
916         return ret;
917 }
918
919 /*
920  * wnd_is_used
921  *
922  * Return: True if all clusters [bit, bit+bits) are used.
923  */
924 bool wnd_is_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
925 {
926         bool ret = false;
927         struct super_block *sb = wnd->sb;
928         size_t iw = bit >> (sb->s_blocksize_bits + 3);
929         u32 wbits = 8 * sb->s_blocksize;
930         u32 wbit = bit & (wbits - 1);
931         size_t end;
932         struct rb_node *n;
933         struct e_node *e;
934
935         if (RB_EMPTY_ROOT(&wnd->start_tree))
936                 goto use_wnd;
937
938         end = bit + bits;
939         n = rb_lookup(&wnd->start_tree, end - 1);
940         if (!n)
941                 goto use_wnd;
942
943         e = rb_entry(n, struct e_node, start.node);
944         if (e->start.key + e->count.key > bit)
945                 return false;
946
947 use_wnd:
948         while (iw < wnd->nwnd && bits) {
949                 u32 tail, op;
950
951                 if (unlikely(iw + 1 == wnd->nwnd))
952                         wbits = wnd->bits_last;
953
954                 tail = wbits - wbit;
955                 op = min_t(u32, tail, bits);
956
957                 if (wnd->free_bits[iw]) {
958                         bool ret;
959                         struct buffer_head *bh = wnd_map(wnd, iw);
960
961                         if (IS_ERR(bh))
962                                 goto out;
963
964                         ret = are_bits_set(bh->b_data, wbit, op);
965                         put_bh(bh);
966                         if (!ret)
967                                 goto out;
968                 }
969
970                 bits -= op;
971                 wbit = 0;
972                 iw += 1;
973         }
974         ret = true;
975
976 out:
977         return ret;
978 }
979
980 /*
981  * wnd_find - Look for free space.
982  *
983  * - flags - BITMAP_FIND_XXX flags
984  *
985  * Return: 0 if not found.
986  */
987 size_t wnd_find(struct wnd_bitmap *wnd, size_t to_alloc, size_t hint,
988                 size_t flags, size_t *allocated)
989 {
990         struct super_block *sb;
991         u32 wbits, wpos, wzbit, wzend;
992         size_t fnd, max_alloc, b_len, b_pos;
993         size_t iw, prev_tail, nwnd, wbit, ebit, zbit, zend;
994         size_t to_alloc0 = to_alloc;
995         const struct e_node *e;
996         const struct rb_node *pr, *cr;
997         u8 log2_bits;
998         bool fbits_valid;
999         struct buffer_head *bh;
1000
1001         /* Fast checking for available free space. */
1002         if (flags & BITMAP_FIND_FULL) {
1003                 size_t zeroes = wnd_zeroes(wnd);
1004
1005                 zeroes -= wnd->zone_end - wnd->zone_bit;
1006                 if (zeroes < to_alloc0)
1007                         goto no_space;
1008
1009                 if (to_alloc0 > wnd->extent_max)
1010                         goto no_space;
1011         } else {
1012                 if (to_alloc > wnd->extent_max)
1013                         to_alloc = wnd->extent_max;
1014         }
1015
1016         if (wnd->zone_bit <= hint && hint < wnd->zone_end)
1017                 hint = wnd->zone_end;
1018
1019         max_alloc = wnd->nbits;
1020         b_len = b_pos = 0;
1021
1022         if (hint >= max_alloc)
1023                 hint = 0;
1024
1025         if (RB_EMPTY_ROOT(&wnd->start_tree)) {
1026                 if (wnd->uptodated == 1) {
1027                         /* Extents tree is updated -> No free space. */
1028                         goto no_space;
1029                 }
1030                 goto scan_bitmap;
1031         }
1032
1033         e = NULL;
1034         if (!hint)
1035                 goto allocate_biggest;
1036
1037         /* Use hint: Enumerate extents by start >= hint. */
1038         pr = NULL;
1039         cr = wnd->start_tree.rb_node;
1040
1041         for (;;) {
1042                 e = rb_entry(cr, struct e_node, start.node);
1043
1044                 if (e->start.key == hint)
1045                         break;
1046
1047                 if (e->start.key < hint) {
1048                         pr = cr;
1049                         cr = cr->rb_right;
1050                         if (!cr)
1051                                 break;
1052                         continue;
1053                 }
1054
1055                 cr = cr->rb_left;
1056                 if (!cr) {
1057                         e = pr ? rb_entry(pr, struct e_node, start.node) : NULL;
1058                         break;
1059                 }
1060         }
1061
1062         if (!e)
1063                 goto allocate_biggest;
1064
1065         if (e->start.key + e->count.key > hint) {
1066                 /* We have found extension with 'hint' inside. */
1067                 size_t len = e->start.key + e->count.key - hint;
1068
1069                 if (len >= to_alloc && hint + to_alloc <= max_alloc) {
1070                         fnd = hint;
1071                         goto found;
1072                 }
1073
1074                 if (!(flags & BITMAP_FIND_FULL)) {
1075                         if (len > to_alloc)
1076                                 len = to_alloc;
1077
1078                         if (hint + len <= max_alloc) {
1079                                 fnd = hint;
1080                                 to_alloc = len;
1081                                 goto found;
1082                         }
1083                 }
1084         }
1085
1086 allocate_biggest:
1087         /* Allocate from biggest free extent. */
1088         e = rb_entry(rb_first(&wnd->count_tree), struct e_node, count.node);
1089         if (e->count.key != wnd->extent_max)
1090                 wnd->extent_max = e->count.key;
1091
1092         if (e->count.key < max_alloc) {
1093                 if (e->count.key >= to_alloc) {
1094                         ;
1095                 } else if (flags & BITMAP_FIND_FULL) {
1096                         if (e->count.key < to_alloc0) {
1097                                 /* Biggest free block is less then requested. */
1098                                 goto no_space;
1099                         }
1100                         to_alloc = e->count.key;
1101                 } else if (-1 != wnd->uptodated) {
1102                         to_alloc = e->count.key;
1103                 } else {
1104                         /* Check if we can use more bits. */
1105                         size_t op, max_check;
1106                         struct rb_root start_tree;
1107
1108                         memcpy(&start_tree, &wnd->start_tree,
1109                                sizeof(struct rb_root));
1110                         memset(&wnd->start_tree, 0, sizeof(struct rb_root));
1111
1112                         max_check = e->start.key + to_alloc;
1113                         if (max_check > max_alloc)
1114                                 max_check = max_alloc;
1115                         for (op = e->start.key + e->count.key; op < max_check;
1116                              op++) {
1117                                 if (!wnd_is_free(wnd, op, 1))
1118                                         break;
1119                         }
1120                         memcpy(&wnd->start_tree, &start_tree,
1121                                sizeof(struct rb_root));
1122                         to_alloc = op - e->start.key;
1123                 }
1124
1125                 /* Prepare to return. */
1126                 fnd = e->start.key;
1127                 if (e->start.key + to_alloc > max_alloc)
1128                         to_alloc = max_alloc - e->start.key;
1129                 goto found;
1130         }
1131
1132         if (wnd->uptodated == 1) {
1133                 /* Extents tree is updated -> no free space. */
1134                 goto no_space;
1135         }
1136
1137         b_len = e->count.key;
1138         b_pos = e->start.key;
1139
1140 scan_bitmap:
1141         sb = wnd->sb;
1142         log2_bits = sb->s_blocksize_bits + 3;
1143
1144         /* At most two ranges [hint, max_alloc) + [0, hint). */
1145 Again:
1146
1147         /* TODO: Optimize request for case nbits > wbits. */
1148         iw = hint >> log2_bits;
1149         wbits = sb->s_blocksize * 8;
1150         wpos = hint & (wbits - 1);
1151         prev_tail = 0;
1152         fbits_valid = true;
1153
1154         if (max_alloc == wnd->nbits) {
1155                 nwnd = wnd->nwnd;
1156         } else {
1157                 size_t t = max_alloc + wbits - 1;
1158
1159                 nwnd = likely(t > max_alloc) ? (t >> log2_bits) : wnd->nwnd;
1160         }
1161
1162         /* Enumerate all windows. */
1163         for (; iw < nwnd; iw++) {
1164                 wbit = iw << log2_bits;
1165
1166                 if (!wnd->free_bits[iw]) {
1167                         if (prev_tail > b_len) {
1168                                 b_pos = wbit - prev_tail;
1169                                 b_len = prev_tail;
1170                         }
1171
1172                         /* Skip full used window. */
1173                         prev_tail = 0;
1174                         wpos = 0;
1175                         continue;
1176                 }
1177
1178                 if (unlikely(iw + 1 == nwnd)) {
1179                         if (max_alloc == wnd->nbits) {
1180                                 wbits = wnd->bits_last;
1181                         } else {
1182                                 size_t t = max_alloc & (wbits - 1);
1183
1184                                 if (t) {
1185                                         wbits = t;
1186                                         fbits_valid = false;
1187                                 }
1188                         }
1189                 }
1190
1191                 if (wnd->zone_end > wnd->zone_bit) {
1192                         ebit = wbit + wbits;
1193                         zbit = max(wnd->zone_bit, wbit);
1194                         zend = min(wnd->zone_end, ebit);
1195
1196                         /* Here we have a window [wbit, ebit) and zone [zbit, zend). */
1197                         if (zend <= zbit) {
1198                                 /* Zone does not overlap window. */
1199                         } else {
1200                                 wzbit = zbit - wbit;
1201                                 wzend = zend - wbit;
1202
1203                                 /* Zone overlaps window. */
1204                                 if (wnd->free_bits[iw] == wzend - wzbit) {
1205                                         prev_tail = 0;
1206                                         wpos = 0;
1207                                         continue;
1208                                 }
1209
1210                                 /* Scan two ranges window: [wbit, zbit) and [zend, ebit). */
1211                                 bh = wnd_map(wnd, iw);
1212
1213                                 if (IS_ERR(bh)) {
1214                                         /* TODO: Error */
1215                                         prev_tail = 0;
1216                                         wpos = 0;
1217                                         continue;
1218                                 }
1219
1220                                 /* Scan range [wbit, zbit). */
1221                                 if (wpos < wzbit) {
1222                                         /* Scan range [wpos, zbit). */
1223                                         fnd = wnd_scan(bh->b_data, wbit, wpos,
1224                                                        wzbit, to_alloc,
1225                                                        &prev_tail, &b_pos,
1226                                                        &b_len);
1227                                         if (fnd != MINUS_ONE_T) {
1228                                                 put_bh(bh);
1229                                                 goto found;
1230                                         }
1231                                 }
1232
1233                                 prev_tail = 0;
1234
1235                                 /* Scan range [zend, ebit). */
1236                                 if (wzend < wbits) {
1237                                         fnd = wnd_scan(bh->b_data, wbit,
1238                                                        max(wzend, wpos), wbits,
1239                                                        to_alloc, &prev_tail,
1240                                                        &b_pos, &b_len);
1241                                         if (fnd != MINUS_ONE_T) {
1242                                                 put_bh(bh);
1243                                                 goto found;
1244                                         }
1245                                 }
1246
1247                                 wpos = 0;
1248                                 put_bh(bh);
1249                                 continue;
1250                         }
1251                 }
1252
1253                 /* Current window does not overlap zone. */
1254                 if (!wpos && fbits_valid && wnd->free_bits[iw] == wbits) {
1255                         /* Window is empty. */
1256                         if (prev_tail + wbits >= to_alloc) {
1257                                 fnd = wbit + wpos - prev_tail;
1258                                 goto found;
1259                         }
1260
1261                         /* Increase 'prev_tail' and process next window. */
1262                         prev_tail += wbits;
1263                         wpos = 0;
1264                         continue;
1265                 }
1266
1267                 /* Read window. */
1268                 bh = wnd_map(wnd, iw);
1269                 if (IS_ERR(bh)) {
1270                         // TODO: Error.
1271                         prev_tail = 0;
1272                         wpos = 0;
1273                         continue;
1274                 }
1275
1276                 /* Scan range [wpos, eBits). */
1277                 fnd = wnd_scan(bh->b_data, wbit, wpos, wbits, to_alloc,
1278                                &prev_tail, &b_pos, &b_len);
1279                 put_bh(bh);
1280                 if (fnd != MINUS_ONE_T)
1281                         goto found;
1282         }
1283
1284         if (b_len < prev_tail) {
1285                 /* The last fragment. */
1286                 b_len = prev_tail;
1287                 b_pos = max_alloc - prev_tail;
1288         }
1289
1290         if (hint) {
1291                 /*
1292                  * We have scanned range [hint max_alloc).
1293                  * Prepare to scan range [0 hint + to_alloc).
1294                  */
1295                 size_t nextmax = hint + to_alloc;
1296
1297                 if (likely(nextmax >= hint) && nextmax < max_alloc)
1298                         max_alloc = nextmax;
1299                 hint = 0;
1300                 goto Again;
1301         }
1302
1303         if (!b_len)
1304                 goto no_space;
1305
1306         wnd->extent_max = b_len;
1307
1308         if (flags & BITMAP_FIND_FULL)
1309                 goto no_space;
1310
1311         fnd = b_pos;
1312         to_alloc = b_len;
1313
1314 found:
1315         if (flags & BITMAP_FIND_MARK_AS_USED) {
1316                 /* TODO: Optimize remove extent (pass 'e'?). */
1317                 if (wnd_set_used(wnd, fnd, to_alloc))
1318                         goto no_space;
1319         } else if (wnd->extent_max != MINUS_ONE_T &&
1320                    to_alloc > wnd->extent_max) {
1321                 wnd->extent_max = to_alloc;
1322         }
1323
1324         *allocated = fnd;
1325         return to_alloc;
1326
1327 no_space:
1328         return 0;
1329 }
1330
1331 /*
1332  * wnd_extend - Extend bitmap ($MFT bitmap).
1333  */
1334 int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
1335 {
1336         int err;
1337         struct super_block *sb = wnd->sb;
1338         struct ntfs_sb_info *sbi = sb->s_fs_info;
1339         u32 blocksize = sb->s_blocksize;
1340         u32 wbits = blocksize * 8;
1341         u32 b0, new_last;
1342         size_t bits, iw, new_wnd;
1343         size_t old_bits = wnd->nbits;
1344         u16 *new_free;
1345
1346         if (new_bits <= old_bits)
1347                 return -EINVAL;
1348
1349         /* Align to 8 byte boundary. */
1350         new_wnd = bytes_to_block(sb, bitmap_size(new_bits));
1351         new_last = new_bits & (wbits - 1);
1352         if (!new_last)
1353                 new_last = wbits;
1354
1355         if (new_wnd != wnd->nwnd) {
1356                 new_free = kmalloc_array(new_wnd, sizeof(u16), GFP_NOFS);
1357                 if (!new_free)
1358                         return -ENOMEM;
1359
1360                 memcpy(new_free, wnd->free_bits, wnd->nwnd * sizeof(short));
1361                 memset(new_free + wnd->nwnd, 0,
1362                        (new_wnd - wnd->nwnd) * sizeof(short));
1363                 kfree(wnd->free_bits);
1364                 wnd->free_bits = new_free;
1365         }
1366
1367         /* Zero bits [old_bits,new_bits). */
1368         bits = new_bits - old_bits;
1369         b0 = old_bits & (wbits - 1);
1370
1371         for (iw = old_bits >> (sb->s_blocksize_bits + 3); bits; iw += 1) {
1372                 u32 op;
1373                 size_t frb;
1374                 u64 vbo, lbo, bytes;
1375                 struct buffer_head *bh;
1376
1377                 if (iw + 1 == new_wnd)
1378                         wbits = new_last;
1379
1380                 op = b0 + bits > wbits ? wbits - b0 : bits;
1381                 vbo = (u64)iw * blocksize;
1382
1383                 err = ntfs_vbo_to_lbo(sbi, &wnd->run, vbo, &lbo, &bytes);
1384                 if (err)
1385                         break;
1386
1387                 bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1388                 if (!bh)
1389                         return -EIO;
1390
1391                 lock_buffer(bh);
1392
1393                 ntfs_bitmap_clear_le(bh->b_data, b0, blocksize * 8 - b0);
1394                 frb = wbits - ntfs_bitmap_weight_le(bh->b_data, wbits);
1395                 wnd->total_zeroes += frb - wnd->free_bits[iw];
1396                 wnd->free_bits[iw] = frb;
1397
1398                 set_buffer_uptodate(bh);
1399                 mark_buffer_dirty(bh);
1400                 unlock_buffer(bh);
1401                 /* err = sync_dirty_buffer(bh); */
1402
1403                 b0 = 0;
1404                 bits -= op;
1405         }
1406
1407         wnd->nbits = new_bits;
1408         wnd->nwnd = new_wnd;
1409         wnd->bits_last = new_last;
1410
1411         wnd_add_free_ext(wnd, old_bits, new_bits - old_bits, false);
1412
1413         return 0;
1414 }
1415
1416 void wnd_zone_set(struct wnd_bitmap *wnd, size_t lcn, size_t len)
1417 {
1418         size_t zlen = wnd->zone_end - wnd->zone_bit;
1419
1420         if (zlen)
1421                 wnd_add_free_ext(wnd, wnd->zone_bit, zlen, false);
1422
1423         if (!RB_EMPTY_ROOT(&wnd->start_tree) && len)
1424                 wnd_remove_free_ext(wnd, lcn, len);
1425
1426         wnd->zone_bit = lcn;
1427         wnd->zone_end = lcn + len;
1428 }
1429
1430 int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range)
1431 {
1432         int err = 0;
1433         struct super_block *sb = sbi->sb;
1434         struct wnd_bitmap *wnd = &sbi->used.bitmap;
1435         u32 wbits = 8 * sb->s_blocksize;
1436         CLST len = 0, lcn = 0, done = 0;
1437         CLST minlen = bytes_to_cluster(sbi, range->minlen);
1438         CLST lcn_from = bytes_to_cluster(sbi, range->start);
1439         size_t iw = lcn_from >> (sb->s_blocksize_bits + 3);
1440         u32 wbit = lcn_from & (wbits - 1);
1441         CLST lcn_to;
1442
1443         if (!minlen)
1444                 minlen = 1;
1445
1446         if (range->len == (u64)-1)
1447                 lcn_to = wnd->nbits;
1448         else
1449                 lcn_to = bytes_to_cluster(sbi, range->start + range->len);
1450
1451         down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
1452
1453         for (; iw < wnd->nwnd; iw++, wbit = 0) {
1454                 CLST lcn_wnd = iw * wbits;
1455                 struct buffer_head *bh;
1456
1457                 if (lcn_wnd > lcn_to)
1458                         break;
1459
1460                 if (!wnd->free_bits[iw])
1461                         continue;
1462
1463                 if (iw + 1 == wnd->nwnd)
1464                         wbits = wnd->bits_last;
1465
1466                 if (lcn_wnd + wbits > lcn_to)
1467                         wbits = lcn_to - lcn_wnd;
1468
1469                 bh = wnd_map(wnd, iw);
1470                 if (IS_ERR(bh)) {
1471                         err = PTR_ERR(bh);
1472                         break;
1473                 }
1474
1475                 for (; wbit < wbits; wbit++) {
1476                         if (!test_bit_le(wbit, bh->b_data)) {
1477                                 if (!len)
1478                                         lcn = lcn_wnd + wbit;
1479                                 len += 1;
1480                                 continue;
1481                         }
1482                         if (len >= minlen) {
1483                                 err = ntfs_discard(sbi, lcn, len);
1484                                 if (err)
1485                                         goto out;
1486                                 done += len;
1487                         }
1488                         len = 0;
1489                 }
1490                 put_bh(bh);
1491         }
1492
1493         /* Process the last fragment. */
1494         if (len >= minlen) {
1495                 err = ntfs_discard(sbi, lcn, len);
1496                 if (err)
1497                         goto out;
1498                 done += len;
1499         }
1500
1501 out:
1502         range->len = (u64)done << sbi->cluster_bits;
1503
1504         up_read(&wnd->rw_lock);
1505
1506         return err;
1507 }
1508
1509 #if BITS_PER_LONG == 64
1510 typedef __le64 bitmap_ulong;
1511 #define cpu_to_ul(x) cpu_to_le64(x)
1512 #define ul_to_cpu(x) le64_to_cpu(x)
1513 #else
1514 typedef __le32 bitmap_ulong;
1515 #define cpu_to_ul(x) cpu_to_le32(x)
1516 #define ul_to_cpu(x) le32_to_cpu(x)
1517 #endif
1518
1519 void ntfs_bitmap_set_le(void *map, unsigned int start, int len)
1520 {
1521         bitmap_ulong *p = (bitmap_ulong *)map + BIT_WORD(start);
1522         const unsigned int size = start + len;
1523         int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
1524         bitmap_ulong mask_to_set = cpu_to_ul(BITMAP_FIRST_WORD_MASK(start));
1525
1526         while (len - bits_to_set >= 0) {
1527                 *p |= mask_to_set;
1528                 len -= bits_to_set;
1529                 bits_to_set = BITS_PER_LONG;
1530                 mask_to_set = cpu_to_ul(~0UL);
1531                 p++;
1532         }
1533         if (len) {
1534                 mask_to_set &= cpu_to_ul(BITMAP_LAST_WORD_MASK(size));
1535                 *p |= mask_to_set;
1536         }
1537 }
1538
1539 void ntfs_bitmap_clear_le(void *map, unsigned int start, int len)
1540 {
1541         bitmap_ulong *p = (bitmap_ulong *)map + BIT_WORD(start);
1542         const unsigned int size = start + len;
1543         int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
1544         bitmap_ulong mask_to_clear = cpu_to_ul(BITMAP_FIRST_WORD_MASK(start));
1545
1546         while (len - bits_to_clear >= 0) {
1547                 *p &= ~mask_to_clear;
1548                 len -= bits_to_clear;
1549                 bits_to_clear = BITS_PER_LONG;
1550                 mask_to_clear = cpu_to_ul(~0UL);
1551                 p++;
1552         }
1553         if (len) {
1554                 mask_to_clear &= cpu_to_ul(BITMAP_LAST_WORD_MASK(size));
1555                 *p &= ~mask_to_clear;
1556         }
1557 }
1558
1559 unsigned int ntfs_bitmap_weight_le(const void *bitmap, int bits)
1560 {
1561         const ulong *bmp = bitmap;
1562         unsigned int k, lim = bits / BITS_PER_LONG;
1563         unsigned int w = 0;
1564
1565         for (k = 0; k < lim; k++)
1566                 w += hweight_long(bmp[k]);
1567
1568         if (bits % BITS_PER_LONG) {
1569                 w += hweight_long(ul_to_cpu(((bitmap_ulong *)bitmap)[k]) &
1570                                   BITMAP_LAST_WORD_MASK(bits));
1571         }
1572
1573         return w;
1574 }