arm64: dts: qcom: msm8994-msft-lumia-octagon: align resin node name with bindings
[platform/kernel/linux-starfive.git] / fs / ext4 / mballoc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
4  * Written by Alex Tomas <alex@clusterfs.com>
5  */
6
7
8 /*
9  * mballoc.c contains the multiblocks allocation routines
10  */
11
12 #include "ext4_jbd2.h"
13 #include "mballoc.h"
14 #include <linux/log2.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/nospec.h>
18 #include <linux/backing-dev.h>
19 #include <trace/events/ext4.h>
20
21 /*
22  * MUSTDO:
23  *   - test ext4_ext_search_left() and ext4_ext_search_right()
24  *   - search for metadata in few groups
25  *
26  * TODO v4:
27  *   - normalization should take into account whether file is still open
28  *   - discard preallocations if no free space left (policy?)
29  *   - don't normalize tails
30  *   - quota
31  *   - reservation for superuser
32  *
33  * TODO v3:
34  *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
35  *   - track min/max extents in each group for better group selection
36  *   - mb_mark_used() may allocate chunk right after splitting buddy
37  *   - tree of groups sorted by number of free blocks
38  *   - error handling
39  */
40
41 /*
42  * The allocation request involve request for multiple number of blocks
43  * near to the goal(block) value specified.
44  *
45  * During initialization phase of the allocator we decide to use the
46  * group preallocation or inode preallocation depending on the size of
47  * the file. The size of the file could be the resulting file size we
48  * would have after allocation, or the current file size, which ever
49  * is larger. If the size is less than sbi->s_mb_stream_request we
50  * select to use the group preallocation. The default value of
51  * s_mb_stream_request is 16 blocks. This can also be tuned via
52  * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
53  * terms of number of blocks.
54  *
55  * The main motivation for having small file use group preallocation is to
56  * ensure that we have small files closer together on the disk.
57  *
58  * First stage the allocator looks at the inode prealloc list,
59  * ext4_inode_info->i_prealloc_list, which contains list of prealloc
60  * spaces for this particular inode. The inode prealloc space is
61  * represented as:
62  *
63  * pa_lstart -> the logical start block for this prealloc space
64  * pa_pstart -> the physical start block for this prealloc space
65  * pa_len    -> length for this prealloc space (in clusters)
66  * pa_free   ->  free space available in this prealloc space (in clusters)
67  *
68  * The inode preallocation space is used looking at the _logical_ start
69  * block. If only the logical file block falls within the range of prealloc
70  * space we will consume the particular prealloc space. This makes sure that
71  * we have contiguous physical blocks representing the file blocks
72  *
73  * The important thing to be noted in case of inode prealloc space is that
74  * we don't modify the values associated to inode prealloc space except
75  * pa_free.
76  *
77  * If we are not able to find blocks in the inode prealloc space and if we
78  * have the group allocation flag set then we look at the locality group
79  * prealloc space. These are per CPU prealloc list represented as
80  *
81  * ext4_sb_info.s_locality_groups[smp_processor_id()]
82  *
83  * The reason for having a per cpu locality group is to reduce the contention
84  * between CPUs. It is possible to get scheduled at this point.
85  *
86  * The locality group prealloc space is used looking at whether we have
87  * enough free space (pa_free) within the prealloc space.
88  *
89  * If we can't allocate blocks via inode prealloc or/and locality group
90  * prealloc then we look at the buddy cache. The buddy cache is represented
91  * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
92  * mapped to the buddy and bitmap information regarding different
93  * groups. The buddy information is attached to buddy cache inode so that
94  * we can access them through the page cache. The information regarding
95  * each group is loaded via ext4_mb_load_buddy.  The information involve
96  * block bitmap and buddy information. The information are stored in the
97  * inode as:
98  *
99  *  {                        page                        }
100  *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
101  *
102  *
103  * one block each for bitmap and buddy information.  So for each group we
104  * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
105  * blocksize) blocks.  So it can have information regarding groups_per_page
106  * which is blocks_per_page/2
107  *
108  * The buddy cache inode is not stored on disk. The inode is thrown
109  * away when the filesystem is unmounted.
110  *
111  * We look for count number of blocks in the buddy cache. If we were able
112  * to locate that many free blocks we return with additional information
113  * regarding rest of the contiguous physical block available
114  *
115  * Before allocating blocks via buddy cache we normalize the request
116  * blocks. This ensure we ask for more blocks that we needed. The extra
117  * blocks that we get after allocation is added to the respective prealloc
118  * list. In case of inode preallocation we follow a list of heuristics
119  * based on file size. This can be found in ext4_mb_normalize_request. If
120  * we are doing a group prealloc we try to normalize the request to
121  * sbi->s_mb_group_prealloc.  The default value of s_mb_group_prealloc is
122  * dependent on the cluster size; for non-bigalloc file systems, it is
123  * 512 blocks. This can be tuned via
124  * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
125  * terms of number of blocks. If we have mounted the file system with -O
126  * stripe=<value> option the group prealloc request is normalized to the
127  * smallest multiple of the stripe value (sbi->s_stripe) which is
128  * greater than the default mb_group_prealloc.
129  *
130  * If "mb_optimize_scan" mount option is set, we maintain in memory group info
131  * structures in two data structures:
132  *
133  * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders)
134  *
135  *    Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks)
136  *
137  *    This is an array of lists where the index in the array represents the
138  *    largest free order in the buddy bitmap of the participating group infos of
139  *    that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total
140  *    number of buddy bitmap orders possible) number of lists. Group-infos are
141  *    placed in appropriate lists.
142  *
143  * 2) Average fragment size rb tree (sbi->s_mb_avg_fragment_size_root)
144  *
145  *    Locking: sbi->s_mb_rb_lock (rwlock)
146  *
147  *    This is a red black tree consisting of group infos and the tree is sorted
148  *    by average fragment sizes (which is calculated as ext4_group_info->bb_free
149  *    / ext4_group_info->bb_fragments).
150  *
151  * When "mb_optimize_scan" mount option is set, mballoc consults the above data
152  * structures to decide the order in which groups are to be traversed for
153  * fulfilling an allocation request.
154  *
155  * At CR = 0, we look for groups which have the largest_free_order >= the order
156  * of the request. We directly look at the largest free order list in the data
157  * structure (1) above where largest_free_order = order of the request. If that
158  * list is empty, we look at remaining list in the increasing order of
159  * largest_free_order. This allows us to perform CR = 0 lookup in O(1) time.
160  *
161  * At CR = 1, we only consider groups where average fragment size > request
162  * size. So, we lookup a group which has average fragment size just above or
163  * equal to request size using our rb tree (data structure 2) in O(log N) time.
164  *
165  * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in
166  * linear order which requires O(N) search time for each CR 0 and CR 1 phase.
167  *
168  * The regular allocator (using the buddy cache) supports a few tunables.
169  *
170  * /sys/fs/ext4/<partition>/mb_min_to_scan
171  * /sys/fs/ext4/<partition>/mb_max_to_scan
172  * /sys/fs/ext4/<partition>/mb_order2_req
173  * /sys/fs/ext4/<partition>/mb_linear_limit
174  *
175  * The regular allocator uses buddy scan only if the request len is power of
176  * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
177  * value of s_mb_order2_reqs can be tuned via
178  * /sys/fs/ext4/<partition>/mb_order2_req.  If the request len is equal to
179  * stripe size (sbi->s_stripe), we try to search for contiguous block in
180  * stripe size. This should result in better allocation on RAID setups. If
181  * not, we search in the specific group using bitmap for best extents. The
182  * tunable min_to_scan and max_to_scan control the behaviour here.
183  * min_to_scan indicate how long the mballoc __must__ look for a best
184  * extent and max_to_scan indicates how long the mballoc __can__ look for a
185  * best extent in the found extents. Searching for the blocks starts with
186  * the group specified as the goal value in allocation context via
187  * ac_g_ex. Each group is first checked based on the criteria whether it
188  * can be used for allocation. ext4_mb_good_group explains how the groups are
189  * checked.
190  *
191  * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not
192  * get traversed linearly. That may result in subsequent allocations being not
193  * close to each other. And so, the underlying device may get filled up in a
194  * non-linear fashion. While that may not matter on non-rotational devices, for
195  * rotational devices that may result in higher seek times. "mb_linear_limit"
196  * tells mballoc how many groups mballoc should search linearly before
197  * performing consulting above data structures for more efficient lookups. For
198  * non rotational devices, this value defaults to 0 and for rotational devices
199  * this is set to MB_DEFAULT_LINEAR_LIMIT.
200  *
201  * Both the prealloc space are getting populated as above. So for the first
202  * request we will hit the buddy cache which will result in this prealloc
203  * space getting filled. The prealloc space is then later used for the
204  * subsequent request.
205  */
206
207 /*
208  * mballoc operates on the following data:
209  *  - on-disk bitmap
210  *  - in-core buddy (actually includes buddy and bitmap)
211  *  - preallocation descriptors (PAs)
212  *
213  * there are two types of preallocations:
214  *  - inode
215  *    assiged to specific inode and can be used for this inode only.
216  *    it describes part of inode's space preallocated to specific
217  *    physical blocks. any block from that preallocated can be used
218  *    independent. the descriptor just tracks number of blocks left
219  *    unused. so, before taking some block from descriptor, one must
220  *    make sure corresponded logical block isn't allocated yet. this
221  *    also means that freeing any block within descriptor's range
222  *    must discard all preallocated blocks.
223  *  - locality group
224  *    assigned to specific locality group which does not translate to
225  *    permanent set of inodes: inode can join and leave group. space
226  *    from this type of preallocation can be used for any inode. thus
227  *    it's consumed from the beginning to the end.
228  *
229  * relation between them can be expressed as:
230  *    in-core buddy = on-disk bitmap + preallocation descriptors
231  *
232  * this mean blocks mballoc considers used are:
233  *  - allocated blocks (persistent)
234  *  - preallocated blocks (non-persistent)
235  *
236  * consistency in mballoc world means that at any time a block is either
237  * free or used in ALL structures. notice: "any time" should not be read
238  * literally -- time is discrete and delimited by locks.
239  *
240  *  to keep it simple, we don't use block numbers, instead we count number of
241  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
242  *
243  * all operations can be expressed as:
244  *  - init buddy:                       buddy = on-disk + PAs
245  *  - new PA:                           buddy += N; PA = N
246  *  - use inode PA:                     on-disk += N; PA -= N
247  *  - discard inode PA                  buddy -= on-disk - PA; PA = 0
248  *  - use locality group PA             on-disk += N; PA -= N
249  *  - discard locality group PA         buddy -= PA; PA = 0
250  *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
251  *        is used in real operation because we can't know actual used
252  *        bits from PA, only from on-disk bitmap
253  *
254  * if we follow this strict logic, then all operations above should be atomic.
255  * given some of them can block, we'd have to use something like semaphores
256  * killing performance on high-end SMP hardware. let's try to relax it using
257  * the following knowledge:
258  *  1) if buddy is referenced, it's already initialized
259  *  2) while block is used in buddy and the buddy is referenced,
260  *     nobody can re-allocate that block
261  *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
262  *     bit set and PA claims same block, it's OK. IOW, one can set bit in
263  *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
264  *     block
265  *
266  * so, now we're building a concurrency table:
267  *  - init buddy vs.
268  *    - new PA
269  *      blocks for PA are allocated in the buddy, buddy must be referenced
270  *      until PA is linked to allocation group to avoid concurrent buddy init
271  *    - use inode PA
272  *      we need to make sure that either on-disk bitmap or PA has uptodate data
273  *      given (3) we care that PA-=N operation doesn't interfere with init
274  *    - discard inode PA
275  *      the simplest way would be to have buddy initialized by the discard
276  *    - use locality group PA
277  *      again PA-=N must be serialized with init
278  *    - discard locality group PA
279  *      the simplest way would be to have buddy initialized by the discard
280  *  - new PA vs.
281  *    - use inode PA
282  *      i_data_sem serializes them
283  *    - discard inode PA
284  *      discard process must wait until PA isn't used by another process
285  *    - use locality group PA
286  *      some mutex should serialize them
287  *    - discard locality group PA
288  *      discard process must wait until PA isn't used by another process
289  *  - use inode PA
290  *    - use inode PA
291  *      i_data_sem or another mutex should serializes them
292  *    - discard inode PA
293  *      discard process must wait until PA isn't used by another process
294  *    - use locality group PA
295  *      nothing wrong here -- they're different PAs covering different blocks
296  *    - discard locality group PA
297  *      discard process must wait until PA isn't used by another process
298  *
299  * now we're ready to make few consequences:
300  *  - PA is referenced and while it is no discard is possible
301  *  - PA is referenced until block isn't marked in on-disk bitmap
302  *  - PA changes only after on-disk bitmap
303  *  - discard must not compete with init. either init is done before
304  *    any discard or they're serialized somehow
305  *  - buddy init as sum of on-disk bitmap and PAs is done atomically
306  *
307  * a special case when we've used PA to emptiness. no need to modify buddy
308  * in this case, but we should care about concurrent init
309  *
310  */
311
312  /*
313  * Logic in few words:
314  *
315  *  - allocation:
316  *    load group
317  *    find blocks
318  *    mark bits in on-disk bitmap
319  *    release group
320  *
321  *  - use preallocation:
322  *    find proper PA (per-inode or group)
323  *    load group
324  *    mark bits in on-disk bitmap
325  *    release group
326  *    release PA
327  *
328  *  - free:
329  *    load group
330  *    mark bits in on-disk bitmap
331  *    release group
332  *
333  *  - discard preallocations in group:
334  *    mark PAs deleted
335  *    move them onto local list
336  *    load on-disk bitmap
337  *    load group
338  *    remove PA from object (inode or locality group)
339  *    mark free blocks in-core
340  *
341  *  - discard inode's preallocations:
342  */
343
344 /*
345  * Locking rules
346  *
347  * Locks:
348  *  - bitlock on a group        (group)
349  *  - object (inode/locality)   (object)
350  *  - per-pa lock               (pa)
351  *  - cr0 lists lock            (cr0)
352  *  - cr1 tree lock             (cr1)
353  *
354  * Paths:
355  *  - new pa
356  *    object
357  *    group
358  *
359  *  - find and use pa:
360  *    pa
361  *
362  *  - release consumed pa:
363  *    pa
364  *    group
365  *    object
366  *
367  *  - generate in-core bitmap:
368  *    group
369  *        pa
370  *
371  *  - discard all for given object (inode, locality group):
372  *    object
373  *        pa
374  *    group
375  *
376  *  - discard all for given group:
377  *    group
378  *        pa
379  *    group
380  *        object
381  *
382  *  - allocation path (ext4_mb_regular_allocator)
383  *    group
384  *    cr0/cr1
385  */
386 static struct kmem_cache *ext4_pspace_cachep;
387 static struct kmem_cache *ext4_ac_cachep;
388 static struct kmem_cache *ext4_free_data_cachep;
389
390 /* We create slab caches for groupinfo data structures based on the
391  * superblock block size.  There will be one per mounted filesystem for
392  * each unique s_blocksize_bits */
393 #define NR_GRPINFO_CACHES 8
394 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
395
396 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
397         "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
398         "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
399         "ext4_groupinfo_64k", "ext4_groupinfo_128k"
400 };
401
402 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
403                                         ext4_group_t group);
404 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
405                                                 ext4_group_t group);
406 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
407
408 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
409                                ext4_group_t group, int cr);
410
411 static int ext4_try_to_trim_range(struct super_block *sb,
412                 struct ext4_buddy *e4b, ext4_grpblk_t start,
413                 ext4_grpblk_t max, ext4_grpblk_t minblocks);
414
415 /*
416  * The algorithm using this percpu seq counter goes below:
417  * 1. We sample the percpu discard_pa_seq counter before trying for block
418  *    allocation in ext4_mb_new_blocks().
419  * 2. We increment this percpu discard_pa_seq counter when we either allocate
420  *    or free these blocks i.e. while marking those blocks as used/free in
421  *    mb_mark_used()/mb_free_blocks().
422  * 3. We also increment this percpu seq counter when we successfully identify
423  *    that the bb_prealloc_list is not empty and hence proceed for discarding
424  *    of those PAs inside ext4_mb_discard_group_preallocations().
425  *
426  * Now to make sure that the regular fast path of block allocation is not
427  * affected, as a small optimization we only sample the percpu seq counter
428  * on that cpu. Only when the block allocation fails and when freed blocks
429  * found were 0, that is when we sample percpu seq counter for all cpus using
430  * below function ext4_get_discard_pa_seq_sum(). This happens after making
431  * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty.
432  */
433 static DEFINE_PER_CPU(u64, discard_pa_seq);
434 static inline u64 ext4_get_discard_pa_seq_sum(void)
435 {
436         int __cpu;
437         u64 __seq = 0;
438
439         for_each_possible_cpu(__cpu)
440                 __seq += per_cpu(discard_pa_seq, __cpu);
441         return __seq;
442 }
443
444 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
445 {
446 #if BITS_PER_LONG == 64
447         *bit += ((unsigned long) addr & 7UL) << 3;
448         addr = (void *) ((unsigned long) addr & ~7UL);
449 #elif BITS_PER_LONG == 32
450         *bit += ((unsigned long) addr & 3UL) << 3;
451         addr = (void *) ((unsigned long) addr & ~3UL);
452 #else
453 #error "how many bits you are?!"
454 #endif
455         return addr;
456 }
457
458 static inline int mb_test_bit(int bit, void *addr)
459 {
460         /*
461          * ext4_test_bit on architecture like powerpc
462          * needs unsigned long aligned address
463          */
464         addr = mb_correct_addr_and_bit(&bit, addr);
465         return ext4_test_bit(bit, addr);
466 }
467
468 static inline void mb_set_bit(int bit, void *addr)
469 {
470         addr = mb_correct_addr_and_bit(&bit, addr);
471         ext4_set_bit(bit, addr);
472 }
473
474 static inline void mb_clear_bit(int bit, void *addr)
475 {
476         addr = mb_correct_addr_and_bit(&bit, addr);
477         ext4_clear_bit(bit, addr);
478 }
479
480 static inline int mb_test_and_clear_bit(int bit, void *addr)
481 {
482         addr = mb_correct_addr_and_bit(&bit, addr);
483         return ext4_test_and_clear_bit(bit, addr);
484 }
485
486 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
487 {
488         int fix = 0, ret, tmpmax;
489         addr = mb_correct_addr_and_bit(&fix, addr);
490         tmpmax = max + fix;
491         start += fix;
492
493         ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
494         if (ret > max)
495                 return max;
496         return ret;
497 }
498
499 static inline int mb_find_next_bit(void *addr, int max, int start)
500 {
501         int fix = 0, ret, tmpmax;
502         addr = mb_correct_addr_and_bit(&fix, addr);
503         tmpmax = max + fix;
504         start += fix;
505
506         ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
507         if (ret > max)
508                 return max;
509         return ret;
510 }
511
512 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
513 {
514         char *bb;
515
516         BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
517         BUG_ON(max == NULL);
518
519         if (order > e4b->bd_blkbits + 1) {
520                 *max = 0;
521                 return NULL;
522         }
523
524         /* at order 0 we see each particular block */
525         if (order == 0) {
526                 *max = 1 << (e4b->bd_blkbits + 3);
527                 return e4b->bd_bitmap;
528         }
529
530         bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
531         *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
532
533         return bb;
534 }
535
536 #ifdef DOUBLE_CHECK
537 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
538                            int first, int count)
539 {
540         int i;
541         struct super_block *sb = e4b->bd_sb;
542
543         if (unlikely(e4b->bd_info->bb_bitmap == NULL))
544                 return;
545         assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
546         for (i = 0; i < count; i++) {
547                 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
548                         ext4_fsblk_t blocknr;
549
550                         blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
551                         blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
552                         ext4_grp_locked_error(sb, e4b->bd_group,
553                                               inode ? inode->i_ino : 0,
554                                               blocknr,
555                                               "freeing block already freed "
556                                               "(bit %u)",
557                                               first + i);
558                         ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
559                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
560                 }
561                 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
562         }
563 }
564
565 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
566 {
567         int i;
568
569         if (unlikely(e4b->bd_info->bb_bitmap == NULL))
570                 return;
571         assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
572         for (i = 0; i < count; i++) {
573                 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
574                 mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
575         }
576 }
577
578 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
579 {
580         if (unlikely(e4b->bd_info->bb_bitmap == NULL))
581                 return;
582         if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
583                 unsigned char *b1, *b2;
584                 int i;
585                 b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
586                 b2 = (unsigned char *) bitmap;
587                 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
588                         if (b1[i] != b2[i]) {
589                                 ext4_msg(e4b->bd_sb, KERN_ERR,
590                                          "corruption in group %u "
591                                          "at byte %u(%u): %x in copy != %x "
592                                          "on disk/prealloc",
593                                          e4b->bd_group, i, i * 8, b1[i], b2[i]);
594                                 BUG();
595                         }
596                 }
597         }
598 }
599
600 static void mb_group_bb_bitmap_alloc(struct super_block *sb,
601                         struct ext4_group_info *grp, ext4_group_t group)
602 {
603         struct buffer_head *bh;
604
605         grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS);
606         if (!grp->bb_bitmap)
607                 return;
608
609         bh = ext4_read_block_bitmap(sb, group);
610         if (IS_ERR_OR_NULL(bh)) {
611                 kfree(grp->bb_bitmap);
612                 grp->bb_bitmap = NULL;
613                 return;
614         }
615
616         memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize);
617         put_bh(bh);
618 }
619
620 static void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
621 {
622         kfree(grp->bb_bitmap);
623 }
624
625 #else
626 static inline void mb_free_blocks_double(struct inode *inode,
627                                 struct ext4_buddy *e4b, int first, int count)
628 {
629         return;
630 }
631 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
632                                                 int first, int count)
633 {
634         return;
635 }
636 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
637 {
638         return;
639 }
640
641 static inline void mb_group_bb_bitmap_alloc(struct super_block *sb,
642                         struct ext4_group_info *grp, ext4_group_t group)
643 {
644         return;
645 }
646
647 static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
648 {
649         return;
650 }
651 #endif
652
653 #ifdef AGGRESSIVE_CHECK
654
655 #define MB_CHECK_ASSERT(assert)                                         \
656 do {                                                                    \
657         if (!(assert)) {                                                \
658                 printk(KERN_EMERG                                       \
659                         "Assertion failure in %s() at %s:%d: \"%s\"\n", \
660                         function, file, line, # assert);                \
661                 BUG();                                                  \
662         }                                                               \
663 } while (0)
664
665 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
666                                 const char *function, int line)
667 {
668         struct super_block *sb = e4b->bd_sb;
669         int order = e4b->bd_blkbits + 1;
670         int max;
671         int max2;
672         int i;
673         int j;
674         int k;
675         int count;
676         struct ext4_group_info *grp;
677         int fragments = 0;
678         int fstart;
679         struct list_head *cur;
680         void *buddy;
681         void *buddy2;
682
683         if (e4b->bd_info->bb_check_counter++ % 10)
684                 return 0;
685
686         while (order > 1) {
687                 buddy = mb_find_buddy(e4b, order, &max);
688                 MB_CHECK_ASSERT(buddy);
689                 buddy2 = mb_find_buddy(e4b, order - 1, &max2);
690                 MB_CHECK_ASSERT(buddy2);
691                 MB_CHECK_ASSERT(buddy != buddy2);
692                 MB_CHECK_ASSERT(max * 2 == max2);
693
694                 count = 0;
695                 for (i = 0; i < max; i++) {
696
697                         if (mb_test_bit(i, buddy)) {
698                                 /* only single bit in buddy2 may be 0 */
699                                 if (!mb_test_bit(i << 1, buddy2)) {
700                                         MB_CHECK_ASSERT(
701                                                 mb_test_bit((i<<1)+1, buddy2));
702                                 }
703                                 continue;
704                         }
705
706                         /* both bits in buddy2 must be 1 */
707                         MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
708                         MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
709
710                         for (j = 0; j < (1 << order); j++) {
711                                 k = (i * (1 << order)) + j;
712                                 MB_CHECK_ASSERT(
713                                         !mb_test_bit(k, e4b->bd_bitmap));
714                         }
715                         count++;
716                 }
717                 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
718                 order--;
719         }
720
721         fstart = -1;
722         buddy = mb_find_buddy(e4b, 0, &max);
723         for (i = 0; i < max; i++) {
724                 if (!mb_test_bit(i, buddy)) {
725                         MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
726                         if (fstart == -1) {
727                                 fragments++;
728                                 fstart = i;
729                         }
730                         continue;
731                 }
732                 fstart = -1;
733                 /* check used bits only */
734                 for (j = 0; j < e4b->bd_blkbits + 1; j++) {
735                         buddy2 = mb_find_buddy(e4b, j, &max2);
736                         k = i >> j;
737                         MB_CHECK_ASSERT(k < max2);
738                         MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
739                 }
740         }
741         MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
742         MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
743
744         grp = ext4_get_group_info(sb, e4b->bd_group);
745         list_for_each(cur, &grp->bb_prealloc_list) {
746                 ext4_group_t groupnr;
747                 struct ext4_prealloc_space *pa;
748                 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
749                 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
750                 MB_CHECK_ASSERT(groupnr == e4b->bd_group);
751                 for (i = 0; i < pa->pa_len; i++)
752                         MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
753         }
754         return 0;
755 }
756 #undef MB_CHECK_ASSERT
757 #define mb_check_buddy(e4b) __mb_check_buddy(e4b,       \
758                                         __FILE__, __func__, __LINE__)
759 #else
760 #define mb_check_buddy(e4b)
761 #endif
762
763 /*
764  * Divide blocks started from @first with length @len into
765  * smaller chunks with power of 2 blocks.
766  * Clear the bits in bitmap which the blocks of the chunk(s) covered,
767  * then increase bb_counters[] for corresponded chunk size.
768  */
769 static void ext4_mb_mark_free_simple(struct super_block *sb,
770                                 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
771                                         struct ext4_group_info *grp)
772 {
773         struct ext4_sb_info *sbi = EXT4_SB(sb);
774         ext4_grpblk_t min;
775         ext4_grpblk_t max;
776         ext4_grpblk_t chunk;
777         unsigned int border;
778
779         BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
780
781         border = 2 << sb->s_blocksize_bits;
782
783         while (len > 0) {
784                 /* find how many blocks can be covered since this position */
785                 max = ffs(first | border) - 1;
786
787                 /* find how many blocks of power 2 we need to mark */
788                 min = fls(len) - 1;
789
790                 if (max < min)
791                         min = max;
792                 chunk = 1 << min;
793
794                 /* mark multiblock chunks only */
795                 grp->bb_counters[min]++;
796                 if (min > 0)
797                         mb_clear_bit(first >> min,
798                                      buddy + sbi->s_mb_offsets[min]);
799
800                 len -= chunk;
801                 first += chunk;
802         }
803 }
804
805 static void ext4_mb_rb_insert(struct rb_root *root, struct rb_node *new,
806                         int (*cmp)(struct rb_node *, struct rb_node *))
807 {
808         struct rb_node **iter = &root->rb_node, *parent = NULL;
809
810         while (*iter) {
811                 parent = *iter;
812                 if (cmp(new, *iter) > 0)
813                         iter = &((*iter)->rb_left);
814                 else
815                         iter = &((*iter)->rb_right);
816         }
817
818         rb_link_node(new, parent, iter);
819         rb_insert_color(new, root);
820 }
821
822 static int
823 ext4_mb_avg_fragment_size_cmp(struct rb_node *rb1, struct rb_node *rb2)
824 {
825         struct ext4_group_info *grp1 = rb_entry(rb1,
826                                                 struct ext4_group_info,
827                                                 bb_avg_fragment_size_rb);
828         struct ext4_group_info *grp2 = rb_entry(rb2,
829                                                 struct ext4_group_info,
830                                                 bb_avg_fragment_size_rb);
831         int num_frags_1, num_frags_2;
832
833         num_frags_1 = grp1->bb_fragments ?
834                 grp1->bb_free / grp1->bb_fragments : 0;
835         num_frags_2 = grp2->bb_fragments ?
836                 grp2->bb_free / grp2->bb_fragments : 0;
837
838         return (num_frags_2 - num_frags_1);
839 }
840
841 /*
842  * Reinsert grpinfo into the avg_fragment_size tree with new average
843  * fragment size.
844  */
845 static void
846 mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
847 {
848         struct ext4_sb_info *sbi = EXT4_SB(sb);
849
850         if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0)
851                 return;
852
853         write_lock(&sbi->s_mb_rb_lock);
854         if (!RB_EMPTY_NODE(&grp->bb_avg_fragment_size_rb)) {
855                 rb_erase(&grp->bb_avg_fragment_size_rb,
856                                 &sbi->s_mb_avg_fragment_size_root);
857                 RB_CLEAR_NODE(&grp->bb_avg_fragment_size_rb);
858         }
859
860         ext4_mb_rb_insert(&sbi->s_mb_avg_fragment_size_root,
861                 &grp->bb_avg_fragment_size_rb,
862                 ext4_mb_avg_fragment_size_cmp);
863         write_unlock(&sbi->s_mb_rb_lock);
864 }
865
866 /*
867  * Choose next group by traversing largest_free_order lists. Updates *new_cr if
868  * cr level needs an update.
869  */
870 static void ext4_mb_choose_next_group_cr0(struct ext4_allocation_context *ac,
871                         int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
872 {
873         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
874         struct ext4_group_info *iter, *grp;
875         int i;
876
877         if (ac->ac_status == AC_STATUS_FOUND)
878                 return;
879
880         if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR0_OPTIMIZED))
881                 atomic_inc(&sbi->s_bal_cr0_bad_suggestions);
882
883         grp = NULL;
884         for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
885                 if (list_empty(&sbi->s_mb_largest_free_orders[i]))
886                         continue;
887                 read_lock(&sbi->s_mb_largest_free_orders_locks[i]);
888                 if (list_empty(&sbi->s_mb_largest_free_orders[i])) {
889                         read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
890                         continue;
891                 }
892                 grp = NULL;
893                 list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i],
894                                     bb_largest_free_order_node) {
895                         if (sbi->s_mb_stats)
896                                 atomic64_inc(&sbi->s_bal_cX_groups_considered[0]);
897                         if (likely(ext4_mb_good_group(ac, iter->bb_group, 0))) {
898                                 grp = iter;
899                                 break;
900                         }
901                 }
902                 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
903                 if (grp)
904                         break;
905         }
906
907         if (!grp) {
908                 /* Increment cr and search again */
909                 *new_cr = 1;
910         } else {
911                 *group = grp->bb_group;
912                 ac->ac_last_optimal_group = *group;
913                 ac->ac_flags |= EXT4_MB_CR0_OPTIMIZED;
914         }
915 }
916
917 /*
918  * Choose next group by traversing average fragment size tree. Updates *new_cr
919  * if cr lvel needs an update. Sets EXT4_MB_SEARCH_NEXT_LINEAR to indicate that
920  * the linear search should continue for one iteration since there's lock
921  * contention on the rb tree lock.
922  */
923 static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac,
924                 int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
925 {
926         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
927         int avg_fragment_size, best_so_far;
928         struct rb_node *node, *found;
929         struct ext4_group_info *grp;
930
931         /*
932          * If there is contention on the lock, instead of waiting for the lock
933          * to become available, just continue searching lineraly. We'll resume
934          * our rb tree search later starting at ac->ac_last_optimal_group.
935          */
936         if (!read_trylock(&sbi->s_mb_rb_lock)) {
937                 ac->ac_flags |= EXT4_MB_SEARCH_NEXT_LINEAR;
938                 return;
939         }
940
941         if (unlikely(ac->ac_flags & EXT4_MB_CR1_OPTIMIZED)) {
942                 if (sbi->s_mb_stats)
943                         atomic_inc(&sbi->s_bal_cr1_bad_suggestions);
944                 /* We have found something at CR 1 in the past */
945                 grp = ext4_get_group_info(ac->ac_sb, ac->ac_last_optimal_group);
946                 for (found = rb_next(&grp->bb_avg_fragment_size_rb); found != NULL;
947                      found = rb_next(found)) {
948                         grp = rb_entry(found, struct ext4_group_info,
949                                        bb_avg_fragment_size_rb);
950                         if (sbi->s_mb_stats)
951                                 atomic64_inc(&sbi->s_bal_cX_groups_considered[1]);
952                         if (likely(ext4_mb_good_group(ac, grp->bb_group, 1)))
953                                 break;
954                 }
955                 goto done;
956         }
957
958         node = sbi->s_mb_avg_fragment_size_root.rb_node;
959         best_so_far = 0;
960         found = NULL;
961
962         while (node) {
963                 grp = rb_entry(node, struct ext4_group_info,
964                                bb_avg_fragment_size_rb);
965                 avg_fragment_size = 0;
966                 if (ext4_mb_good_group(ac, grp->bb_group, 1)) {
967                         avg_fragment_size = grp->bb_fragments ?
968                                 grp->bb_free / grp->bb_fragments : 0;
969                         if (!best_so_far || avg_fragment_size < best_so_far) {
970                                 best_so_far = avg_fragment_size;
971                                 found = node;
972                         }
973                 }
974                 if (avg_fragment_size > ac->ac_g_ex.fe_len)
975                         node = node->rb_right;
976                 else
977                         node = node->rb_left;
978         }
979
980 done:
981         if (found) {
982                 grp = rb_entry(found, struct ext4_group_info,
983                                bb_avg_fragment_size_rb);
984                 *group = grp->bb_group;
985                 ac->ac_flags |= EXT4_MB_CR1_OPTIMIZED;
986         } else {
987                 *new_cr = 2;
988         }
989
990         read_unlock(&sbi->s_mb_rb_lock);
991         ac->ac_last_optimal_group = *group;
992 }
993
994 static inline int should_optimize_scan(struct ext4_allocation_context *ac)
995 {
996         if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN)))
997                 return 0;
998         if (ac->ac_criteria >= 2)
999                 return 0;
1000         if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))
1001                 return 0;
1002         return 1;
1003 }
1004
1005 /*
1006  * Return next linear group for allocation. If linear traversal should not be
1007  * performed, this function just returns the same group
1008  */
1009 static int
1010 next_linear_group(struct ext4_allocation_context *ac, int group, int ngroups)
1011 {
1012         if (!should_optimize_scan(ac))
1013                 goto inc_and_return;
1014
1015         if (ac->ac_groups_linear_remaining) {
1016                 ac->ac_groups_linear_remaining--;
1017                 goto inc_and_return;
1018         }
1019
1020         if (ac->ac_flags & EXT4_MB_SEARCH_NEXT_LINEAR) {
1021                 ac->ac_flags &= ~EXT4_MB_SEARCH_NEXT_LINEAR;
1022                 goto inc_and_return;
1023         }
1024
1025         return group;
1026 inc_and_return:
1027         /*
1028          * Artificially restricted ngroups for non-extent
1029          * files makes group > ngroups possible on first loop.
1030          */
1031         return group + 1 >= ngroups ? 0 : group + 1;
1032 }
1033
1034 /*
1035  * ext4_mb_choose_next_group: choose next group for allocation.
1036  *
1037  * @ac        Allocation Context
1038  * @new_cr    This is an output parameter. If the there is no good group
1039  *            available at current CR level, this field is updated to indicate
1040  *            the new cr level that should be used.
1041  * @group     This is an input / output parameter. As an input it indicates the
1042  *            next group that the allocator intends to use for allocation. As
1043  *            output, this field indicates the next group that should be used as
1044  *            determined by the optimization functions.
1045  * @ngroups   Total number of groups
1046  */
1047 static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
1048                 int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
1049 {
1050         *new_cr = ac->ac_criteria;
1051
1052         if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining)
1053                 return;
1054
1055         if (*new_cr == 0) {
1056                 ext4_mb_choose_next_group_cr0(ac, new_cr, group, ngroups);
1057         } else if (*new_cr == 1) {
1058                 ext4_mb_choose_next_group_cr1(ac, new_cr, group, ngroups);
1059         } else {
1060                 /*
1061                  * TODO: For CR=2, we can arrange groups in an rb tree sorted by
1062                  * bb_free. But until that happens, we should never come here.
1063                  */
1064                 WARN_ON(1);
1065         }
1066 }
1067
1068 /*
1069  * Cache the order of the largest free extent we have available in this block
1070  * group.
1071  */
1072 static void
1073 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
1074 {
1075         struct ext4_sb_info *sbi = EXT4_SB(sb);
1076         int i;
1077
1078         if (test_opt2(sb, MB_OPTIMIZE_SCAN) && grp->bb_largest_free_order >= 0) {
1079                 write_lock(&sbi->s_mb_largest_free_orders_locks[
1080                                               grp->bb_largest_free_order]);
1081                 list_del_init(&grp->bb_largest_free_order_node);
1082                 write_unlock(&sbi->s_mb_largest_free_orders_locks[
1083                                               grp->bb_largest_free_order]);
1084         }
1085         grp->bb_largest_free_order = -1; /* uninit */
1086
1087         for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) {
1088                 if (grp->bb_counters[i] > 0) {
1089                         grp->bb_largest_free_order = i;
1090                         break;
1091                 }
1092         }
1093         if (test_opt2(sb, MB_OPTIMIZE_SCAN) &&
1094             grp->bb_largest_free_order >= 0 && grp->bb_free) {
1095                 write_lock(&sbi->s_mb_largest_free_orders_locks[
1096                                               grp->bb_largest_free_order]);
1097                 list_add_tail(&grp->bb_largest_free_order_node,
1098                       &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]);
1099                 write_unlock(&sbi->s_mb_largest_free_orders_locks[
1100                                               grp->bb_largest_free_order]);
1101         }
1102 }
1103
1104 static noinline_for_stack
1105 void ext4_mb_generate_buddy(struct super_block *sb,
1106                                 void *buddy, void *bitmap, ext4_group_t group)
1107 {
1108         struct ext4_group_info *grp = ext4_get_group_info(sb, group);
1109         struct ext4_sb_info *sbi = EXT4_SB(sb);
1110         ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
1111         ext4_grpblk_t i = 0;
1112         ext4_grpblk_t first;
1113         ext4_grpblk_t len;
1114         unsigned free = 0;
1115         unsigned fragments = 0;
1116         unsigned long long period = get_cycles();
1117
1118         /* initialize buddy from bitmap which is aggregation
1119          * of on-disk bitmap and preallocations */
1120         i = mb_find_next_zero_bit(bitmap, max, 0);
1121         grp->bb_first_free = i;
1122         while (i < max) {
1123                 fragments++;
1124                 first = i;
1125                 i = mb_find_next_bit(bitmap, max, i);
1126                 len = i - first;
1127                 free += len;
1128                 if (len > 1)
1129                         ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
1130                 else
1131                         grp->bb_counters[0]++;
1132                 if (i < max)
1133                         i = mb_find_next_zero_bit(bitmap, max, i);
1134         }
1135         grp->bb_fragments = fragments;
1136
1137         if (free != grp->bb_free) {
1138                 ext4_grp_locked_error(sb, group, 0, 0,
1139                                       "block bitmap and bg descriptor "
1140                                       "inconsistent: %u vs %u free clusters",
1141                                       free, grp->bb_free);
1142                 /*
1143                  * If we intend to continue, we consider group descriptor
1144                  * corrupt and update bb_free using bitmap value
1145                  */
1146                 grp->bb_free = free;
1147                 ext4_mark_group_bitmap_corrupted(sb, group,
1148                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1149         }
1150         mb_set_largest_free_order(sb, grp);
1151
1152         clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
1153
1154         period = get_cycles() - period;
1155         atomic_inc(&sbi->s_mb_buddies_generated);
1156         atomic64_add(period, &sbi->s_mb_generation_time);
1157         mb_update_avg_fragment_size(sb, grp);
1158 }
1159
1160 /* The buddy information is attached the buddy cache inode
1161  * for convenience. The information regarding each group
1162  * is loaded via ext4_mb_load_buddy. The information involve
1163  * block bitmap and buddy information. The information are
1164  * stored in the inode as
1165  *
1166  * {                        page                        }
1167  * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
1168  *
1169  *
1170  * one block each for bitmap and buddy information.
1171  * So for each group we take up 2 blocks. A page can
1172  * contain blocks_per_page (PAGE_SIZE / blocksize)  blocks.
1173  * So it can have information regarding groups_per_page which
1174  * is blocks_per_page/2
1175  *
1176  * Locking note:  This routine takes the block group lock of all groups
1177  * for this page; do not hold this lock when calling this routine!
1178  */
1179
1180 static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
1181 {
1182         ext4_group_t ngroups;
1183         int blocksize;
1184         int blocks_per_page;
1185         int groups_per_page;
1186         int err = 0;
1187         int i;
1188         ext4_group_t first_group, group;
1189         int first_block;
1190         struct super_block *sb;
1191         struct buffer_head *bhs;
1192         struct buffer_head **bh = NULL;
1193         struct inode *inode;
1194         char *data;
1195         char *bitmap;
1196         struct ext4_group_info *grinfo;
1197
1198         inode = page->mapping->host;
1199         sb = inode->i_sb;
1200         ngroups = ext4_get_groups_count(sb);
1201         blocksize = i_blocksize(inode);
1202         blocks_per_page = PAGE_SIZE / blocksize;
1203
1204         mb_debug(sb, "init page %lu\n", page->index);
1205
1206         groups_per_page = blocks_per_page >> 1;
1207         if (groups_per_page == 0)
1208                 groups_per_page = 1;
1209
1210         /* allocate buffer_heads to read bitmaps */
1211         if (groups_per_page > 1) {
1212                 i = sizeof(struct buffer_head *) * groups_per_page;
1213                 bh = kzalloc(i, gfp);
1214                 if (bh == NULL) {
1215                         err = -ENOMEM;
1216                         goto out;
1217                 }
1218         } else
1219                 bh = &bhs;
1220
1221         first_group = page->index * blocks_per_page / 2;
1222
1223         /* read all groups the page covers into the cache */
1224         for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1225                 if (group >= ngroups)
1226                         break;
1227
1228                 grinfo = ext4_get_group_info(sb, group);
1229                 /*
1230                  * If page is uptodate then we came here after online resize
1231                  * which added some new uninitialized group info structs, so
1232                  * we must skip all initialized uptodate buddies on the page,
1233                  * which may be currently in use by an allocating task.
1234                  */
1235                 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
1236                         bh[i] = NULL;
1237                         continue;
1238                 }
1239                 bh[i] = ext4_read_block_bitmap_nowait(sb, group, false);
1240                 if (IS_ERR(bh[i])) {
1241                         err = PTR_ERR(bh[i]);
1242                         bh[i] = NULL;
1243                         goto out;
1244                 }
1245                 mb_debug(sb, "read bitmap for group %u\n", group);
1246         }
1247
1248         /* wait for I/O completion */
1249         for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1250                 int err2;
1251
1252                 if (!bh[i])
1253                         continue;
1254                 err2 = ext4_wait_block_bitmap(sb, group, bh[i]);
1255                 if (!err)
1256                         err = err2;
1257         }
1258
1259         first_block = page->index * blocks_per_page;
1260         for (i = 0; i < blocks_per_page; i++) {
1261                 group = (first_block + i) >> 1;
1262                 if (group >= ngroups)
1263                         break;
1264
1265                 if (!bh[group - first_group])
1266                         /* skip initialized uptodate buddy */
1267                         continue;
1268
1269                 if (!buffer_verified(bh[group - first_group]))
1270                         /* Skip faulty bitmaps */
1271                         continue;
1272                 err = 0;
1273
1274                 /*
1275                  * data carry information regarding this
1276                  * particular group in the format specified
1277                  * above
1278                  *
1279                  */
1280                 data = page_address(page) + (i * blocksize);
1281                 bitmap = bh[group - first_group]->b_data;
1282
1283                 /*
1284                  * We place the buddy block and bitmap block
1285                  * close together
1286                  */
1287                 if ((first_block + i) & 1) {
1288                         /* this is block of buddy */
1289                         BUG_ON(incore == NULL);
1290                         mb_debug(sb, "put buddy for group %u in page %lu/%x\n",
1291                                 group, page->index, i * blocksize);
1292                         trace_ext4_mb_buddy_bitmap_load(sb, group);
1293                         grinfo = ext4_get_group_info(sb, group);
1294                         grinfo->bb_fragments = 0;
1295                         memset(grinfo->bb_counters, 0,
1296                                sizeof(*grinfo->bb_counters) *
1297                                (MB_NUM_ORDERS(sb)));
1298                         /*
1299                          * incore got set to the group block bitmap below
1300                          */
1301                         ext4_lock_group(sb, group);
1302                         /* init the buddy */
1303                         memset(data, 0xff, blocksize);
1304                         ext4_mb_generate_buddy(sb, data, incore, group);
1305                         ext4_unlock_group(sb, group);
1306                         incore = NULL;
1307                 } else {
1308                         /* this is block of bitmap */
1309                         BUG_ON(incore != NULL);
1310                         mb_debug(sb, "put bitmap for group %u in page %lu/%x\n",
1311                                 group, page->index, i * blocksize);
1312                         trace_ext4_mb_bitmap_load(sb, group);
1313
1314                         /* see comments in ext4_mb_put_pa() */
1315                         ext4_lock_group(sb, group);
1316                         memcpy(data, bitmap, blocksize);
1317
1318                         /* mark all preallocated blks used in in-core bitmap */
1319                         ext4_mb_generate_from_pa(sb, data, group);
1320                         ext4_mb_generate_from_freelist(sb, data, group);
1321                         ext4_unlock_group(sb, group);
1322
1323                         /* set incore so that the buddy information can be
1324                          * generated using this
1325                          */
1326                         incore = data;
1327                 }
1328         }
1329         SetPageUptodate(page);
1330
1331 out:
1332         if (bh) {
1333                 for (i = 0; i < groups_per_page; i++)
1334                         brelse(bh[i]);
1335                 if (bh != &bhs)
1336                         kfree(bh);
1337         }
1338         return err;
1339 }
1340
1341 /*
1342  * Lock the buddy and bitmap pages. This make sure other parallel init_group
1343  * on the same buddy page doesn't happen whild holding the buddy page lock.
1344  * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
1345  * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
1346  */
1347 static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
1348                 ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
1349 {
1350         struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
1351         int block, pnum, poff;
1352         int blocks_per_page;
1353         struct page *page;
1354
1355         e4b->bd_buddy_page = NULL;
1356         e4b->bd_bitmap_page = NULL;
1357
1358         blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1359         /*
1360          * the buddy cache inode stores the block bitmap
1361          * and buddy information in consecutive blocks.
1362          * So for each group we need two blocks.
1363          */
1364         block = group * 2;
1365         pnum = block / blocks_per_page;
1366         poff = block % blocks_per_page;
1367         page = find_or_create_page(inode->i_mapping, pnum, gfp);
1368         if (!page)
1369                 return -ENOMEM;
1370         BUG_ON(page->mapping != inode->i_mapping);
1371         e4b->bd_bitmap_page = page;
1372         e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1373
1374         if (blocks_per_page >= 2) {
1375                 /* buddy and bitmap are on the same page */
1376                 return 0;
1377         }
1378
1379         block++;
1380         pnum = block / blocks_per_page;
1381         page = find_or_create_page(inode->i_mapping, pnum, gfp);
1382         if (!page)
1383                 return -ENOMEM;
1384         BUG_ON(page->mapping != inode->i_mapping);
1385         e4b->bd_buddy_page = page;
1386         return 0;
1387 }
1388
1389 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1390 {
1391         if (e4b->bd_bitmap_page) {
1392                 unlock_page(e4b->bd_bitmap_page);
1393                 put_page(e4b->bd_bitmap_page);
1394         }
1395         if (e4b->bd_buddy_page) {
1396                 unlock_page(e4b->bd_buddy_page);
1397                 put_page(e4b->bd_buddy_page);
1398         }
1399 }
1400
1401 /*
1402  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1403  * block group lock of all groups for this page; do not hold the BG lock when
1404  * calling this routine!
1405  */
1406 static noinline_for_stack
1407 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
1408 {
1409
1410         struct ext4_group_info *this_grp;
1411         struct ext4_buddy e4b;
1412         struct page *page;
1413         int ret = 0;
1414
1415         might_sleep();
1416         mb_debug(sb, "init group %u\n", group);
1417         this_grp = ext4_get_group_info(sb, group);
1418         /*
1419          * This ensures that we don't reinit the buddy cache
1420          * page which map to the group from which we are already
1421          * allocating. If we are looking at the buddy cache we would
1422          * have taken a reference using ext4_mb_load_buddy and that
1423          * would have pinned buddy page to page cache.
1424          * The call to ext4_mb_get_buddy_page_lock will mark the
1425          * page accessed.
1426          */
1427         ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
1428         if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1429                 /*
1430                  * somebody initialized the group
1431                  * return without doing anything
1432                  */
1433                 goto err;
1434         }
1435
1436         page = e4b.bd_bitmap_page;
1437         ret = ext4_mb_init_cache(page, NULL, gfp);
1438         if (ret)
1439                 goto err;
1440         if (!PageUptodate(page)) {
1441                 ret = -EIO;
1442                 goto err;
1443         }
1444
1445         if (e4b.bd_buddy_page == NULL) {
1446                 /*
1447                  * If both the bitmap and buddy are in
1448                  * the same page we don't need to force
1449                  * init the buddy
1450                  */
1451                 ret = 0;
1452                 goto err;
1453         }
1454         /* init buddy cache */
1455         page = e4b.bd_buddy_page;
1456         ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
1457         if (ret)
1458                 goto err;
1459         if (!PageUptodate(page)) {
1460                 ret = -EIO;
1461                 goto err;
1462         }
1463 err:
1464         ext4_mb_put_buddy_page_lock(&e4b);
1465         return ret;
1466 }
1467
1468 /*
1469  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1470  * block group lock of all groups for this page; do not hold the BG lock when
1471  * calling this routine!
1472  */
1473 static noinline_for_stack int
1474 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1475                        struct ext4_buddy *e4b, gfp_t gfp)
1476 {
1477         int blocks_per_page;
1478         int block;
1479         int pnum;
1480         int poff;
1481         struct page *page;
1482         int ret;
1483         struct ext4_group_info *grp;
1484         struct ext4_sb_info *sbi = EXT4_SB(sb);
1485         struct inode *inode = sbi->s_buddy_cache;
1486
1487         might_sleep();
1488         mb_debug(sb, "load group %u\n", group);
1489
1490         blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1491         grp = ext4_get_group_info(sb, group);
1492
1493         e4b->bd_blkbits = sb->s_blocksize_bits;
1494         e4b->bd_info = grp;
1495         e4b->bd_sb = sb;
1496         e4b->bd_group = group;
1497         e4b->bd_buddy_page = NULL;
1498         e4b->bd_bitmap_page = NULL;
1499
1500         if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1501                 /*
1502                  * we need full data about the group
1503                  * to make a good selection
1504                  */
1505                 ret = ext4_mb_init_group(sb, group, gfp);
1506                 if (ret)
1507                         return ret;
1508         }
1509
1510         /*
1511          * the buddy cache inode stores the block bitmap
1512          * and buddy information in consecutive blocks.
1513          * So for each group we need two blocks.
1514          */
1515         block = group * 2;
1516         pnum = block / blocks_per_page;
1517         poff = block % blocks_per_page;
1518
1519         /* we could use find_or_create_page(), but it locks page
1520          * what we'd like to avoid in fast path ... */
1521         page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1522         if (page == NULL || !PageUptodate(page)) {
1523                 if (page)
1524                         /*
1525                          * drop the page reference and try
1526                          * to get the page with lock. If we
1527                          * are not uptodate that implies
1528                          * somebody just created the page but
1529                          * is yet to initialize the same. So
1530                          * wait for it to initialize.
1531                          */
1532                         put_page(page);
1533                 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1534                 if (page) {
1535                         BUG_ON(page->mapping != inode->i_mapping);
1536                         if (!PageUptodate(page)) {
1537                                 ret = ext4_mb_init_cache(page, NULL, gfp);
1538                                 if (ret) {
1539                                         unlock_page(page);
1540                                         goto err;
1541                                 }
1542                                 mb_cmp_bitmaps(e4b, page_address(page) +
1543                                                (poff * sb->s_blocksize));
1544                         }
1545                         unlock_page(page);
1546                 }
1547         }
1548         if (page == NULL) {
1549                 ret = -ENOMEM;
1550                 goto err;
1551         }
1552         if (!PageUptodate(page)) {
1553                 ret = -EIO;
1554                 goto err;
1555         }
1556
1557         /* Pages marked accessed already */
1558         e4b->bd_bitmap_page = page;
1559         e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1560
1561         block++;
1562         pnum = block / blocks_per_page;
1563         poff = block % blocks_per_page;
1564
1565         page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1566         if (page == NULL || !PageUptodate(page)) {
1567                 if (page)
1568                         put_page(page);
1569                 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1570                 if (page) {
1571                         BUG_ON(page->mapping != inode->i_mapping);
1572                         if (!PageUptodate(page)) {
1573                                 ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
1574                                                          gfp);
1575                                 if (ret) {
1576                                         unlock_page(page);
1577                                         goto err;
1578                                 }
1579                         }
1580                         unlock_page(page);
1581                 }
1582         }
1583         if (page == NULL) {
1584                 ret = -ENOMEM;
1585                 goto err;
1586         }
1587         if (!PageUptodate(page)) {
1588                 ret = -EIO;
1589                 goto err;
1590         }
1591
1592         /* Pages marked accessed already */
1593         e4b->bd_buddy_page = page;
1594         e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1595
1596         return 0;
1597
1598 err:
1599         if (page)
1600                 put_page(page);
1601         if (e4b->bd_bitmap_page)
1602                 put_page(e4b->bd_bitmap_page);
1603         if (e4b->bd_buddy_page)
1604                 put_page(e4b->bd_buddy_page);
1605         e4b->bd_buddy = NULL;
1606         e4b->bd_bitmap = NULL;
1607         return ret;
1608 }
1609
1610 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1611                               struct ext4_buddy *e4b)
1612 {
1613         return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
1614 }
1615
1616 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1617 {
1618         if (e4b->bd_bitmap_page)
1619                 put_page(e4b->bd_bitmap_page);
1620         if (e4b->bd_buddy_page)
1621                 put_page(e4b->bd_buddy_page);
1622 }
1623
1624
1625 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1626 {
1627         int order = 1, max;
1628         void *bb;
1629
1630         BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1631         BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1632
1633         while (order <= e4b->bd_blkbits + 1) {
1634                 bb = mb_find_buddy(e4b, order, &max);
1635                 if (!mb_test_bit(block >> order, bb)) {
1636                         /* this block is part of buddy of order 'order' */
1637                         return order;
1638                 }
1639                 order++;
1640         }
1641         return 0;
1642 }
1643
1644 static void mb_clear_bits(void *bm, int cur, int len)
1645 {
1646         __u32 *addr;
1647
1648         len = cur + len;
1649         while (cur < len) {
1650                 if ((cur & 31) == 0 && (len - cur) >= 32) {
1651                         /* fast path: clear whole word at once */
1652                         addr = bm + (cur >> 3);
1653                         *addr = 0;
1654                         cur += 32;
1655                         continue;
1656                 }
1657                 mb_clear_bit(cur, bm);
1658                 cur++;
1659         }
1660 }
1661
1662 /* clear bits in given range
1663  * will return first found zero bit if any, -1 otherwise
1664  */
1665 static int mb_test_and_clear_bits(void *bm, int cur, int len)
1666 {
1667         __u32 *addr;
1668         int zero_bit = -1;
1669
1670         len = cur + len;
1671         while (cur < len) {
1672                 if ((cur & 31) == 0 && (len - cur) >= 32) {
1673                         /* fast path: clear whole word at once */
1674                         addr = bm + (cur >> 3);
1675                         if (*addr != (__u32)(-1) && zero_bit == -1)
1676                                 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
1677                         *addr = 0;
1678                         cur += 32;
1679                         continue;
1680                 }
1681                 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
1682                         zero_bit = cur;
1683                 cur++;
1684         }
1685
1686         return zero_bit;
1687 }
1688
1689 void mb_set_bits(void *bm, int cur, int len)
1690 {
1691         __u32 *addr;
1692
1693         len = cur + len;
1694         while (cur < len) {
1695                 if ((cur & 31) == 0 && (len - cur) >= 32) {
1696                         /* fast path: set whole word at once */
1697                         addr = bm + (cur >> 3);
1698                         *addr = 0xffffffff;
1699                         cur += 32;
1700                         continue;
1701                 }
1702                 mb_set_bit(cur, bm);
1703                 cur++;
1704         }
1705 }
1706
1707 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
1708 {
1709         if (mb_test_bit(*bit + side, bitmap)) {
1710                 mb_clear_bit(*bit, bitmap);
1711                 (*bit) -= side;
1712                 return 1;
1713         }
1714         else {
1715                 (*bit) += side;
1716                 mb_set_bit(*bit, bitmap);
1717                 return -1;
1718         }
1719 }
1720
1721 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
1722 {
1723         int max;
1724         int order = 1;
1725         void *buddy = mb_find_buddy(e4b, order, &max);
1726
1727         while (buddy) {
1728                 void *buddy2;
1729
1730                 /* Bits in range [first; last] are known to be set since
1731                  * corresponding blocks were allocated. Bits in range
1732                  * (first; last) will stay set because they form buddies on
1733                  * upper layer. We just deal with borders if they don't
1734                  * align with upper layer and then go up.
1735                  * Releasing entire group is all about clearing
1736                  * single bit of highest order buddy.
1737                  */
1738
1739                 /* Example:
1740                  * ---------------------------------
1741                  * |   1   |   1   |   1   |   1   |
1742                  * ---------------------------------
1743                  * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1744                  * ---------------------------------
1745                  *   0   1   2   3   4   5   6   7
1746                  *      \_____________________/
1747                  *
1748                  * Neither [1] nor [6] is aligned to above layer.
1749                  * Left neighbour [0] is free, so mark it busy,
1750                  * decrease bb_counters and extend range to
1751                  * [0; 6]
1752                  * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1753                  * mark [6] free, increase bb_counters and shrink range to
1754                  * [0; 5].
1755                  * Then shift range to [0; 2], go up and do the same.
1756                  */
1757
1758
1759                 if (first & 1)
1760                         e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
1761                 if (!(last & 1))
1762                         e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
1763                 if (first > last)
1764                         break;
1765                 order++;
1766
1767                 if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) {
1768                         mb_clear_bits(buddy, first, last - first + 1);
1769                         e4b->bd_info->bb_counters[order - 1] += last - first + 1;
1770                         break;
1771                 }
1772                 first >>= 1;
1773                 last >>= 1;
1774                 buddy = buddy2;
1775         }
1776 }
1777
1778 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1779                            int first, int count)
1780 {
1781         int left_is_free = 0;
1782         int right_is_free = 0;
1783         int block;
1784         int last = first + count - 1;
1785         struct super_block *sb = e4b->bd_sb;
1786
1787         if (WARN_ON(count == 0))
1788                 return;
1789         BUG_ON(last >= (sb->s_blocksize << 3));
1790         assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1791         /* Don't bother if the block group is corrupt. */
1792         if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1793                 return;
1794
1795         mb_check_buddy(e4b);
1796         mb_free_blocks_double(inode, e4b, first, count);
1797
1798         this_cpu_inc(discard_pa_seq);
1799         e4b->bd_info->bb_free += count;
1800         if (first < e4b->bd_info->bb_first_free)
1801                 e4b->bd_info->bb_first_free = first;
1802
1803         /* access memory sequentially: check left neighbour,
1804          * clear range and then check right neighbour
1805          */
1806         if (first != 0)
1807                 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap);
1808         block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
1809         if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0])
1810                 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1811
1812         if (unlikely(block != -1)) {
1813                 struct ext4_sb_info *sbi = EXT4_SB(sb);
1814                 ext4_fsblk_t blocknr;
1815
1816                 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1817                 blocknr += EXT4_C2B(sbi, block);
1818                 if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
1819                         ext4_grp_locked_error(sb, e4b->bd_group,
1820                                               inode ? inode->i_ino : 0,
1821                                               blocknr,
1822                                               "freeing already freed block (bit %u); block bitmap corrupt.",
1823                                               block);
1824                         ext4_mark_group_bitmap_corrupted(
1825                                 sb, e4b->bd_group,
1826                                 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1827                 }
1828                 goto done;
1829         }
1830
1831         /* let's maintain fragments counter */
1832         if (left_is_free && right_is_free)
1833                 e4b->bd_info->bb_fragments--;
1834         else if (!left_is_free && !right_is_free)
1835                 e4b->bd_info->bb_fragments++;
1836
1837         /* buddy[0] == bd_bitmap is a special case, so handle
1838          * it right away and let mb_buddy_mark_free stay free of
1839          * zero order checks.
1840          * Check if neighbours are to be coaleasced,
1841          * adjust bitmap bb_counters and borders appropriately.
1842          */
1843         if (first & 1) {
1844                 first += !left_is_free;
1845                 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1;
1846         }
1847         if (!(last & 1)) {
1848                 last -= !right_is_free;
1849                 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1;
1850         }
1851
1852         if (first <= last)
1853                 mb_buddy_mark_free(e4b, first >> 1, last >> 1);
1854
1855 done:
1856         mb_set_largest_free_order(sb, e4b->bd_info);
1857         mb_update_avg_fragment_size(sb, e4b->bd_info);
1858         mb_check_buddy(e4b);
1859 }
1860
1861 static int mb_find_extent(struct ext4_buddy *e4b, int block,
1862                                 int needed, struct ext4_free_extent *ex)
1863 {
1864         int next = block;
1865         int max, order;
1866         void *buddy;
1867
1868         assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1869         BUG_ON(ex == NULL);
1870
1871         buddy = mb_find_buddy(e4b, 0, &max);
1872         BUG_ON(buddy == NULL);
1873         BUG_ON(block >= max);
1874         if (mb_test_bit(block, buddy)) {
1875                 ex->fe_len = 0;
1876                 ex->fe_start = 0;
1877                 ex->fe_group = 0;
1878                 return 0;
1879         }
1880
1881         /* find actual order */
1882         order = mb_find_order_for_block(e4b, block);
1883         block = block >> order;
1884
1885         ex->fe_len = 1 << order;
1886         ex->fe_start = block << order;
1887         ex->fe_group = e4b->bd_group;
1888
1889         /* calc difference from given start */
1890         next = next - ex->fe_start;
1891         ex->fe_len -= next;
1892         ex->fe_start += next;
1893
1894         while (needed > ex->fe_len &&
1895                mb_find_buddy(e4b, order, &max)) {
1896
1897                 if (block + 1 >= max)
1898                         break;
1899
1900                 next = (block + 1) * (1 << order);
1901                 if (mb_test_bit(next, e4b->bd_bitmap))
1902                         break;
1903
1904                 order = mb_find_order_for_block(e4b, next);
1905
1906                 block = next >> order;
1907                 ex->fe_len += 1 << order;
1908         }
1909
1910         if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
1911                 /* Should never happen! (but apparently sometimes does?!?) */
1912                 WARN_ON(1);
1913                 ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0,
1914                         "corruption or bug in mb_find_extent "
1915                         "block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
1916                         block, order, needed, ex->fe_group, ex->fe_start,
1917                         ex->fe_len, ex->fe_logical);
1918                 ex->fe_len = 0;
1919                 ex->fe_start = 0;
1920                 ex->fe_group = 0;
1921         }
1922         return ex->fe_len;
1923 }
1924
1925 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1926 {
1927         int ord;
1928         int mlen = 0;
1929         int max = 0;
1930         int cur;
1931         int start = ex->fe_start;
1932         int len = ex->fe_len;
1933         unsigned ret = 0;
1934         int len0 = len;
1935         void *buddy;
1936         bool split = false;
1937
1938         BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1939         BUG_ON(e4b->bd_group != ex->fe_group);
1940         assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1941         mb_check_buddy(e4b);
1942         mb_mark_used_double(e4b, start, len);
1943
1944         this_cpu_inc(discard_pa_seq);
1945         e4b->bd_info->bb_free -= len;
1946         if (e4b->bd_info->bb_first_free == start)
1947                 e4b->bd_info->bb_first_free += len;
1948
1949         /* let's maintain fragments counter */
1950         if (start != 0)
1951                 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
1952         if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1953                 max = !mb_test_bit(start + len, e4b->bd_bitmap);
1954         if (mlen && max)
1955                 e4b->bd_info->bb_fragments++;
1956         else if (!mlen && !max)
1957                 e4b->bd_info->bb_fragments--;
1958
1959         /* let's maintain buddy itself */
1960         while (len) {
1961                 if (!split)
1962                         ord = mb_find_order_for_block(e4b, start);
1963
1964                 if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1965                         /* the whole chunk may be allocated at once! */
1966                         mlen = 1 << ord;
1967                         if (!split)
1968                                 buddy = mb_find_buddy(e4b, ord, &max);
1969                         else
1970                                 split = false;
1971                         BUG_ON((start >> ord) >= max);
1972                         mb_set_bit(start >> ord, buddy);
1973                         e4b->bd_info->bb_counters[ord]--;
1974                         start += mlen;
1975                         len -= mlen;
1976                         BUG_ON(len < 0);
1977                         continue;
1978                 }
1979
1980                 /* store for history */
1981                 if (ret == 0)
1982                         ret = len | (ord << 16);
1983
1984                 /* we have to split large buddy */
1985                 BUG_ON(ord <= 0);
1986                 buddy = mb_find_buddy(e4b, ord, &max);
1987                 mb_set_bit(start >> ord, buddy);
1988                 e4b->bd_info->bb_counters[ord]--;
1989
1990                 ord--;
1991                 cur = (start >> ord) & ~1U;
1992                 buddy = mb_find_buddy(e4b, ord, &max);
1993                 mb_clear_bit(cur, buddy);
1994                 mb_clear_bit(cur + 1, buddy);
1995                 e4b->bd_info->bb_counters[ord]++;
1996                 e4b->bd_info->bb_counters[ord]++;
1997                 split = true;
1998         }
1999         mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
2000
2001         mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info);
2002         mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
2003         mb_check_buddy(e4b);
2004
2005         return ret;
2006 }
2007
2008 /*
2009  * Must be called under group lock!
2010  */
2011 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
2012                                         struct ext4_buddy *e4b)
2013 {
2014         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2015         int ret;
2016
2017         BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
2018         BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2019
2020         ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
2021         ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
2022         ret = mb_mark_used(e4b, &ac->ac_b_ex);
2023
2024         /* preallocation can change ac_b_ex, thus we store actually
2025          * allocated blocks for history */
2026         ac->ac_f_ex = ac->ac_b_ex;
2027
2028         ac->ac_status = AC_STATUS_FOUND;
2029         ac->ac_tail = ret & 0xffff;
2030         ac->ac_buddy = ret >> 16;
2031
2032         /*
2033          * take the page reference. We want the page to be pinned
2034          * so that we don't get a ext4_mb_init_cache_call for this
2035          * group until we update the bitmap. That would mean we
2036          * double allocate blocks. The reference is dropped
2037          * in ext4_mb_release_context
2038          */
2039         ac->ac_bitmap_page = e4b->bd_bitmap_page;
2040         get_page(ac->ac_bitmap_page);
2041         ac->ac_buddy_page = e4b->bd_buddy_page;
2042         get_page(ac->ac_buddy_page);
2043         /* store last allocated for subsequent stream allocation */
2044         if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2045                 spin_lock(&sbi->s_md_lock);
2046                 sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
2047                 sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
2048                 spin_unlock(&sbi->s_md_lock);
2049         }
2050         /*
2051          * As we've just preallocated more space than
2052          * user requested originally, we store allocated
2053          * space in a special descriptor.
2054          */
2055         if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
2056                 ext4_mb_new_preallocation(ac);
2057
2058 }
2059
2060 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
2061                                         struct ext4_buddy *e4b,
2062                                         int finish_group)
2063 {
2064         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2065         struct ext4_free_extent *bex = &ac->ac_b_ex;
2066         struct ext4_free_extent *gex = &ac->ac_g_ex;
2067         struct ext4_free_extent ex;
2068         int max;
2069
2070         if (ac->ac_status == AC_STATUS_FOUND)
2071                 return;
2072         /*
2073          * We don't want to scan for a whole year
2074          */
2075         if (ac->ac_found > sbi->s_mb_max_to_scan &&
2076                         !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2077                 ac->ac_status = AC_STATUS_BREAK;
2078                 return;
2079         }
2080
2081         /*
2082          * Haven't found good chunk so far, let's continue
2083          */
2084         if (bex->fe_len < gex->fe_len)
2085                 return;
2086
2087         if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
2088                         && bex->fe_group == e4b->bd_group) {
2089                 /* recheck chunk's availability - we don't know
2090                  * when it was found (within this lock-unlock
2091                  * period or not) */
2092                 max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex);
2093                 if (max >= gex->fe_len) {
2094                         ext4_mb_use_best_found(ac, e4b);
2095                         return;
2096                 }
2097         }
2098 }
2099
2100 /*
2101  * The routine checks whether found extent is good enough. If it is,
2102  * then the extent gets marked used and flag is set to the context
2103  * to stop scanning. Otherwise, the extent is compared with the
2104  * previous found extent and if new one is better, then it's stored
2105  * in the context. Later, the best found extent will be used, if
2106  * mballoc can't find good enough extent.
2107  *
2108  * FIXME: real allocation policy is to be designed yet!
2109  */
2110 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
2111                                         struct ext4_free_extent *ex,
2112                                         struct ext4_buddy *e4b)
2113 {
2114         struct ext4_free_extent *bex = &ac->ac_b_ex;
2115         struct ext4_free_extent *gex = &ac->ac_g_ex;
2116
2117         BUG_ON(ex->fe_len <= 0);
2118         BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2119         BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2120         BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
2121
2122         ac->ac_found++;
2123
2124         /*
2125          * The special case - take what you catch first
2126          */
2127         if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2128                 *bex = *ex;
2129                 ext4_mb_use_best_found(ac, e4b);
2130                 return;
2131         }
2132
2133         /*
2134          * Let's check whether the chuck is good enough
2135          */
2136         if (ex->fe_len == gex->fe_len) {
2137                 *bex = *ex;
2138                 ext4_mb_use_best_found(ac, e4b);
2139                 return;
2140         }
2141
2142         /*
2143          * If this is first found extent, just store it in the context
2144          */
2145         if (bex->fe_len == 0) {
2146                 *bex = *ex;
2147                 return;
2148         }
2149
2150         /*
2151          * If new found extent is better, store it in the context
2152          */
2153         if (bex->fe_len < gex->fe_len) {
2154                 /* if the request isn't satisfied, any found extent
2155                  * larger than previous best one is better */
2156                 if (ex->fe_len > bex->fe_len)
2157                         *bex = *ex;
2158         } else if (ex->fe_len > gex->fe_len) {
2159                 /* if the request is satisfied, then we try to find
2160                  * an extent that still satisfy the request, but is
2161                  * smaller than previous one */
2162                 if (ex->fe_len < bex->fe_len)
2163                         *bex = *ex;
2164         }
2165
2166         ext4_mb_check_limits(ac, e4b, 0);
2167 }
2168
2169 static noinline_for_stack
2170 int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
2171                                         struct ext4_buddy *e4b)
2172 {
2173         struct ext4_free_extent ex = ac->ac_b_ex;
2174         ext4_group_t group = ex.fe_group;
2175         int max;
2176         int err;
2177
2178         BUG_ON(ex.fe_len <= 0);
2179         err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2180         if (err)
2181                 return err;
2182
2183         ext4_lock_group(ac->ac_sb, group);
2184         max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
2185
2186         if (max > 0) {
2187                 ac->ac_b_ex = ex;
2188                 ext4_mb_use_best_found(ac, e4b);
2189         }
2190
2191         ext4_unlock_group(ac->ac_sb, group);
2192         ext4_mb_unload_buddy(e4b);
2193
2194         return 0;
2195 }
2196
2197 static noinline_for_stack
2198 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
2199                                 struct ext4_buddy *e4b)
2200 {
2201         ext4_group_t group = ac->ac_g_ex.fe_group;
2202         int max;
2203         int err;
2204         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2205         struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2206         struct ext4_free_extent ex;
2207
2208         if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
2209                 return 0;
2210         if (grp->bb_free == 0)
2211                 return 0;
2212
2213         err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2214         if (err)
2215                 return err;
2216
2217         if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
2218                 ext4_mb_unload_buddy(e4b);
2219                 return 0;
2220         }
2221
2222         ext4_lock_group(ac->ac_sb, group);
2223         max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
2224                              ac->ac_g_ex.fe_len, &ex);
2225         ex.fe_logical = 0xDEADFA11; /* debug value */
2226
2227         if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
2228                 ext4_fsblk_t start;
2229
2230                 start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
2231                         ex.fe_start;
2232                 /* use do_div to get remainder (would be 64-bit modulo) */
2233                 if (do_div(start, sbi->s_stripe) == 0) {
2234                         ac->ac_found++;
2235                         ac->ac_b_ex = ex;
2236                         ext4_mb_use_best_found(ac, e4b);
2237                 }
2238         } else if (max >= ac->ac_g_ex.fe_len) {
2239                 BUG_ON(ex.fe_len <= 0);
2240                 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2241                 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2242                 ac->ac_found++;
2243                 ac->ac_b_ex = ex;
2244                 ext4_mb_use_best_found(ac, e4b);
2245         } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
2246                 /* Sometimes, caller may want to merge even small
2247                  * number of blocks to an existing extent */
2248                 BUG_ON(ex.fe_len <= 0);
2249                 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2250                 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2251                 ac->ac_found++;
2252                 ac->ac_b_ex = ex;
2253                 ext4_mb_use_best_found(ac, e4b);
2254         }
2255         ext4_unlock_group(ac->ac_sb, group);
2256         ext4_mb_unload_buddy(e4b);
2257
2258         return 0;
2259 }
2260
2261 /*
2262  * The routine scans buddy structures (not bitmap!) from given order
2263  * to max order and tries to find big enough chunk to satisfy the req
2264  */
2265 static noinline_for_stack
2266 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
2267                                         struct ext4_buddy *e4b)
2268 {
2269         struct super_block *sb = ac->ac_sb;
2270         struct ext4_group_info *grp = e4b->bd_info;
2271         void *buddy;
2272         int i;
2273         int k;
2274         int max;
2275
2276         BUG_ON(ac->ac_2order <= 0);
2277         for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) {
2278                 if (grp->bb_counters[i] == 0)
2279                         continue;
2280
2281                 buddy = mb_find_buddy(e4b, i, &max);
2282                 BUG_ON(buddy == NULL);
2283
2284                 k = mb_find_next_zero_bit(buddy, max, 0);
2285                 if (k >= max) {
2286                         ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
2287                                 "%d free clusters of order %d. But found 0",
2288                                 grp->bb_counters[i], i);
2289                         ext4_mark_group_bitmap_corrupted(ac->ac_sb,
2290                                          e4b->bd_group,
2291                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2292                         break;
2293                 }
2294                 ac->ac_found++;
2295
2296                 ac->ac_b_ex.fe_len = 1 << i;
2297                 ac->ac_b_ex.fe_start = k << i;
2298                 ac->ac_b_ex.fe_group = e4b->bd_group;
2299
2300                 ext4_mb_use_best_found(ac, e4b);
2301
2302                 BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len);
2303
2304                 if (EXT4_SB(sb)->s_mb_stats)
2305                         atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
2306
2307                 break;
2308         }
2309 }
2310
2311 /*
2312  * The routine scans the group and measures all found extents.
2313  * In order to optimize scanning, caller must pass number of
2314  * free blocks in the group, so the routine can know upper limit.
2315  */
2316 static noinline_for_stack
2317 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
2318                                         struct ext4_buddy *e4b)
2319 {
2320         struct super_block *sb = ac->ac_sb;
2321         void *bitmap = e4b->bd_bitmap;
2322         struct ext4_free_extent ex;
2323         int i;
2324         int free;
2325
2326         free = e4b->bd_info->bb_free;
2327         if (WARN_ON(free <= 0))
2328                 return;
2329
2330         i = e4b->bd_info->bb_first_free;
2331
2332         while (free && ac->ac_status == AC_STATUS_CONTINUE) {
2333                 i = mb_find_next_zero_bit(bitmap,
2334                                                 EXT4_CLUSTERS_PER_GROUP(sb), i);
2335                 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
2336                         /*
2337                          * IF we have corrupt bitmap, we won't find any
2338                          * free blocks even though group info says we
2339                          * have free blocks
2340                          */
2341                         ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2342                                         "%d free clusters as per "
2343                                         "group info. But bitmap says 0",
2344                                         free);
2345                         ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2346                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2347                         break;
2348                 }
2349
2350                 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
2351                 if (WARN_ON(ex.fe_len <= 0))
2352                         break;
2353                 if (free < ex.fe_len) {
2354                         ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2355                                         "%d free clusters as per "
2356                                         "group info. But got %d blocks",
2357                                         free, ex.fe_len);
2358                         ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2359                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2360                         /*
2361                          * The number of free blocks differs. This mostly
2362                          * indicate that the bitmap is corrupt. So exit
2363                          * without claiming the space.
2364                          */
2365                         break;
2366                 }
2367                 ex.fe_logical = 0xDEADC0DE; /* debug value */
2368                 ext4_mb_measure_extent(ac, &ex, e4b);
2369
2370                 i += ex.fe_len;
2371                 free -= ex.fe_len;
2372         }
2373
2374         ext4_mb_check_limits(ac, e4b, 1);
2375 }
2376
2377 /*
2378  * This is a special case for storages like raid5
2379  * we try to find stripe-aligned chunks for stripe-size-multiple requests
2380  */
2381 static noinline_for_stack
2382 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
2383                                  struct ext4_buddy *e4b)
2384 {
2385         struct super_block *sb = ac->ac_sb;
2386         struct ext4_sb_info *sbi = EXT4_SB(sb);
2387         void *bitmap = e4b->bd_bitmap;
2388         struct ext4_free_extent ex;
2389         ext4_fsblk_t first_group_block;
2390         ext4_fsblk_t a;
2391         ext4_grpblk_t i;
2392         int max;
2393
2394         BUG_ON(sbi->s_stripe == 0);
2395
2396         /* find first stripe-aligned block in group */
2397         first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
2398
2399         a = first_group_block + sbi->s_stripe - 1;
2400         do_div(a, sbi->s_stripe);
2401         i = (a * sbi->s_stripe) - first_group_block;
2402
2403         while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
2404                 if (!mb_test_bit(i, bitmap)) {
2405                         max = mb_find_extent(e4b, i, sbi->s_stripe, &ex);
2406                         if (max >= sbi->s_stripe) {
2407                                 ac->ac_found++;
2408                                 ex.fe_logical = 0xDEADF00D; /* debug value */
2409                                 ac->ac_b_ex = ex;
2410                                 ext4_mb_use_best_found(ac, e4b);
2411                                 break;
2412                         }
2413                 }
2414                 i += sbi->s_stripe;
2415         }
2416 }
2417
2418 /*
2419  * This is also called BEFORE we load the buddy bitmap.
2420  * Returns either 1 or 0 indicating that the group is either suitable
2421  * for the allocation or not.
2422  */
2423 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
2424                                 ext4_group_t group, int cr)
2425 {
2426         ext4_grpblk_t free, fragments;
2427         int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
2428         struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2429
2430         BUG_ON(cr < 0 || cr >= 4);
2431
2432         if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2433                 return false;
2434
2435         free = grp->bb_free;
2436         if (free == 0)
2437                 return false;
2438
2439         fragments = grp->bb_fragments;
2440         if (fragments == 0)
2441                 return false;
2442
2443         switch (cr) {
2444         case 0:
2445                 BUG_ON(ac->ac_2order == 0);
2446
2447                 /* Avoid using the first bg of a flexgroup for data files */
2448                 if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
2449                     (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
2450                     ((group % flex_size) == 0))
2451                         return false;
2452
2453                 if (free < ac->ac_g_ex.fe_len)
2454                         return false;
2455
2456                 if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb))
2457                         return true;
2458
2459                 if (grp->bb_largest_free_order < ac->ac_2order)
2460                         return false;
2461
2462                 return true;
2463         case 1:
2464                 if ((free / fragments) >= ac->ac_g_ex.fe_len)
2465                         return true;
2466                 break;
2467         case 2:
2468                 if (free >= ac->ac_g_ex.fe_len)
2469                         return true;
2470                 break;
2471         case 3:
2472                 return true;
2473         default:
2474                 BUG();
2475         }
2476
2477         return false;
2478 }
2479
2480 /*
2481  * This could return negative error code if something goes wrong
2482  * during ext4_mb_init_group(). This should not be called with
2483  * ext4_lock_group() held.
2484  *
2485  * Note: because we are conditionally operating with the group lock in
2486  * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this
2487  * function using __acquire and __release.  This means we need to be
2488  * super careful before messing with the error path handling via "goto
2489  * out"!
2490  */
2491 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
2492                                      ext4_group_t group, int cr)
2493 {
2494         struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2495         struct super_block *sb = ac->ac_sb;
2496         struct ext4_sb_info *sbi = EXT4_SB(sb);
2497         bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
2498         ext4_grpblk_t free;
2499         int ret = 0;
2500
2501         if (sbi->s_mb_stats)
2502                 atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]);
2503         if (should_lock) {
2504                 ext4_lock_group(sb, group);
2505                 __release(ext4_group_lock_ptr(sb, group));
2506         }
2507         free = grp->bb_free;
2508         if (free == 0)
2509                 goto out;
2510         if (cr <= 2 && free < ac->ac_g_ex.fe_len)
2511                 goto out;
2512         if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2513                 goto out;
2514         if (should_lock) {
2515                 __acquire(ext4_group_lock_ptr(sb, group));
2516                 ext4_unlock_group(sb, group);
2517         }
2518
2519         /* We only do this if the grp has never been initialized */
2520         if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
2521                 struct ext4_group_desc *gdp =
2522                         ext4_get_group_desc(sb, group, NULL);
2523                 int ret;
2524
2525                 /* cr=0/1 is a very optimistic search to find large
2526                  * good chunks almost for free.  If buddy data is not
2527                  * ready, then this optimization makes no sense.  But
2528                  * we never skip the first block group in a flex_bg,
2529                  * since this gets used for metadata block allocation,
2530                  * and we want to make sure we locate metadata blocks
2531                  * in the first block group in the flex_bg if possible.
2532                  */
2533                 if (cr < 2 &&
2534                     (!sbi->s_log_groups_per_flex ||
2535                      ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
2536                     !(ext4_has_group_desc_csum(sb) &&
2537                       (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))))
2538                         return 0;
2539                 ret = ext4_mb_init_group(sb, group, GFP_NOFS);
2540                 if (ret)
2541                         return ret;
2542         }
2543
2544         if (should_lock) {
2545                 ext4_lock_group(sb, group);
2546                 __release(ext4_group_lock_ptr(sb, group));
2547         }
2548         ret = ext4_mb_good_group(ac, group, cr);
2549 out:
2550         if (should_lock) {
2551                 __acquire(ext4_group_lock_ptr(sb, group));
2552                 ext4_unlock_group(sb, group);
2553         }
2554         return ret;
2555 }
2556
2557 /*
2558  * Start prefetching @nr block bitmaps starting at @group.
2559  * Return the next group which needs to be prefetched.
2560  */
2561 ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
2562                               unsigned int nr, int *cnt)
2563 {
2564         ext4_group_t ngroups = ext4_get_groups_count(sb);
2565         struct buffer_head *bh;
2566         struct blk_plug plug;
2567
2568         blk_start_plug(&plug);
2569         while (nr-- > 0) {
2570                 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2571                                                                   NULL);
2572                 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2573
2574                 /*
2575                  * Prefetch block groups with free blocks; but don't
2576                  * bother if it is marked uninitialized on disk, since
2577                  * it won't require I/O to read.  Also only try to
2578                  * prefetch once, so we avoid getblk() call, which can
2579                  * be expensive.
2580                  */
2581                 if (!EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
2582                     EXT4_MB_GRP_NEED_INIT(grp) &&
2583                     ext4_free_group_clusters(sb, gdp) > 0 &&
2584                     !(ext4_has_group_desc_csum(sb) &&
2585                       (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
2586                         bh = ext4_read_block_bitmap_nowait(sb, group, true);
2587                         if (bh && !IS_ERR(bh)) {
2588                                 if (!buffer_uptodate(bh) && cnt)
2589                                         (*cnt)++;
2590                                 brelse(bh);
2591                         }
2592                 }
2593                 if (++group >= ngroups)
2594                         group = 0;
2595         }
2596         blk_finish_plug(&plug);
2597         return group;
2598 }
2599
2600 /*
2601  * Prefetching reads the block bitmap into the buffer cache; but we
2602  * need to make sure that the buddy bitmap in the page cache has been
2603  * initialized.  Note that ext4_mb_init_group() will block if the I/O
2604  * is not yet completed, or indeed if it was not initiated by
2605  * ext4_mb_prefetch did not start the I/O.
2606  *
2607  * TODO: We should actually kick off the buddy bitmap setup in a work
2608  * queue when the buffer I/O is completed, so that we don't block
2609  * waiting for the block allocation bitmap read to finish when
2610  * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator().
2611  */
2612 void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
2613                            unsigned int nr)
2614 {
2615         while (nr-- > 0) {
2616                 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2617                                                                   NULL);
2618                 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2619
2620                 if (!group)
2621                         group = ext4_get_groups_count(sb);
2622                 group--;
2623                 grp = ext4_get_group_info(sb, group);
2624
2625                 if (EXT4_MB_GRP_NEED_INIT(grp) &&
2626                     ext4_free_group_clusters(sb, gdp) > 0 &&
2627                     !(ext4_has_group_desc_csum(sb) &&
2628                       (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
2629                         if (ext4_mb_init_group(sb, group, GFP_NOFS))
2630                                 break;
2631                 }
2632         }
2633 }
2634
2635 static noinline_for_stack int
2636 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
2637 {
2638         ext4_group_t prefetch_grp = 0, ngroups, group, i;
2639         int cr = -1;
2640         int err = 0, first_err = 0;
2641         unsigned int nr = 0, prefetch_ios = 0;
2642         struct ext4_sb_info *sbi;
2643         struct super_block *sb;
2644         struct ext4_buddy e4b;
2645         int lost;
2646
2647         sb = ac->ac_sb;
2648         sbi = EXT4_SB(sb);
2649         ngroups = ext4_get_groups_count(sb);
2650         /* non-extent files are limited to low blocks/groups */
2651         if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
2652                 ngroups = sbi->s_blockfile_groups;
2653
2654         BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2655
2656         /* first, try the goal */
2657         err = ext4_mb_find_by_goal(ac, &e4b);
2658         if (err || ac->ac_status == AC_STATUS_FOUND)
2659                 goto out;
2660
2661         if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2662                 goto out;
2663
2664         /*
2665          * ac->ac_2order is set only if the fe_len is a power of 2
2666          * if ac->ac_2order is set we also set criteria to 0 so that we
2667          * try exact allocation using buddy.
2668          */
2669         i = fls(ac->ac_g_ex.fe_len);
2670         ac->ac_2order = 0;
2671         /*
2672          * We search using buddy data only if the order of the request
2673          * is greater than equal to the sbi_s_mb_order2_reqs
2674          * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
2675          * We also support searching for power-of-two requests only for
2676          * requests upto maximum buddy size we have constructed.
2677          */
2678         if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) {
2679                 /*
2680                  * This should tell if fe_len is exactly power of 2
2681                  */
2682                 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
2683                         ac->ac_2order = array_index_nospec(i - 1,
2684                                                            MB_NUM_ORDERS(sb));
2685         }
2686
2687         /* if stream allocation is enabled, use global goal */
2688         if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2689                 /* TBD: may be hot point */
2690                 spin_lock(&sbi->s_md_lock);
2691                 ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2692                 ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2693                 spin_unlock(&sbi->s_md_lock);
2694         }
2695
2696         /* Let's just scan groups to find more-less suitable blocks */
2697         cr = ac->ac_2order ? 0 : 1;
2698         /*
2699          * cr == 0 try to get exact allocation,
2700          * cr == 3  try to get anything
2701          */
2702 repeat:
2703         for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2704                 ac->ac_criteria = cr;
2705                 /*
2706                  * searching for the right group start
2707                  * from the goal value specified
2708                  */
2709                 group = ac->ac_g_ex.fe_group;
2710                 ac->ac_last_optimal_group = group;
2711                 ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
2712                 prefetch_grp = group;
2713
2714                 for (i = 0; i < ngroups; group = next_linear_group(ac, group, ngroups),
2715                              i++) {
2716                         int ret = 0, new_cr;
2717
2718                         cond_resched();
2719
2720                         ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups);
2721                         if (new_cr != cr) {
2722                                 cr = new_cr;
2723                                 goto repeat;
2724                         }
2725
2726                         /*
2727                          * Batch reads of the block allocation bitmaps
2728                          * to get multiple READs in flight; limit
2729                          * prefetching at cr=0/1, otherwise mballoc can
2730                          * spend a lot of time loading imperfect groups
2731                          */
2732                         if ((prefetch_grp == group) &&
2733                             (cr > 1 ||
2734                              prefetch_ios < sbi->s_mb_prefetch_limit)) {
2735                                 unsigned int curr_ios = prefetch_ios;
2736
2737                                 nr = sbi->s_mb_prefetch;
2738                                 if (ext4_has_feature_flex_bg(sb)) {
2739                                         nr = 1 << sbi->s_log_groups_per_flex;
2740                                         nr -= group & (nr - 1);
2741                                         nr = min(nr, sbi->s_mb_prefetch);
2742                                 }
2743                                 prefetch_grp = ext4_mb_prefetch(sb, group,
2744                                                         nr, &prefetch_ios);
2745                                 if (prefetch_ios == curr_ios)
2746                                         nr = 0;
2747                         }
2748
2749                         /* This now checks without needing the buddy page */
2750                         ret = ext4_mb_good_group_nolock(ac, group, cr);
2751                         if (ret <= 0) {
2752                                 if (!first_err)
2753                                         first_err = ret;
2754                                 continue;
2755                         }
2756
2757                         err = ext4_mb_load_buddy(sb, group, &e4b);
2758                         if (err)
2759                                 goto out;
2760
2761                         ext4_lock_group(sb, group);
2762
2763                         /*
2764                          * We need to check again after locking the
2765                          * block group
2766                          */
2767                         ret = ext4_mb_good_group(ac, group, cr);
2768                         if (ret == 0) {
2769                                 ext4_unlock_group(sb, group);
2770                                 ext4_mb_unload_buddy(&e4b);
2771                                 continue;
2772                         }
2773
2774                         ac->ac_groups_scanned++;
2775                         if (cr == 0)
2776                                 ext4_mb_simple_scan_group(ac, &e4b);
2777                         else if (cr == 1 && sbi->s_stripe &&
2778                                         !(ac->ac_g_ex.fe_len % sbi->s_stripe))
2779                                 ext4_mb_scan_aligned(ac, &e4b);
2780                         else
2781                                 ext4_mb_complex_scan_group(ac, &e4b);
2782
2783                         ext4_unlock_group(sb, group);
2784                         ext4_mb_unload_buddy(&e4b);
2785
2786                         if (ac->ac_status != AC_STATUS_CONTINUE)
2787                                 break;
2788                 }
2789                 /* Processed all groups and haven't found blocks */
2790                 if (sbi->s_mb_stats && i == ngroups)
2791                         atomic64_inc(&sbi->s_bal_cX_failed[cr]);
2792         }
2793
2794         if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2795             !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2796                 /*
2797                  * We've been searching too long. Let's try to allocate
2798                  * the best chunk we've found so far
2799                  */
2800                 ext4_mb_try_best_found(ac, &e4b);
2801                 if (ac->ac_status != AC_STATUS_FOUND) {
2802                         /*
2803                          * Someone more lucky has already allocated it.
2804                          * The only thing we can do is just take first
2805                          * found block(s)
2806                          */
2807                         lost = atomic_inc_return(&sbi->s_mb_lost_chunks);
2808                         mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n",
2809                                  ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start,
2810                                  ac->ac_b_ex.fe_len, lost);
2811
2812                         ac->ac_b_ex.fe_group = 0;
2813                         ac->ac_b_ex.fe_start = 0;
2814                         ac->ac_b_ex.fe_len = 0;
2815                         ac->ac_status = AC_STATUS_CONTINUE;
2816                         ac->ac_flags |= EXT4_MB_HINT_FIRST;
2817                         cr = 3;
2818                         goto repeat;
2819                 }
2820         }
2821
2822         if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND)
2823                 atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]);
2824 out:
2825         if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
2826                 err = first_err;
2827
2828         mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
2829                  ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
2830                  ac->ac_flags, cr, err);
2831
2832         if (nr)
2833                 ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
2834
2835         return err;
2836 }
2837
2838 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2839 {
2840         struct super_block *sb = pde_data(file_inode(seq->file));
2841         ext4_group_t group;
2842
2843         if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2844                 return NULL;
2845         group = *pos + 1;
2846         return (void *) ((unsigned long) group);
2847 }
2848
2849 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2850 {
2851         struct super_block *sb = pde_data(file_inode(seq->file));
2852         ext4_group_t group;
2853
2854         ++*pos;
2855         if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2856                 return NULL;
2857         group = *pos + 1;
2858         return (void *) ((unsigned long) group);
2859 }
2860
2861 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2862 {
2863         struct super_block *sb = pde_data(file_inode(seq->file));
2864         ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2865         int i;
2866         int err, buddy_loaded = 0;
2867         struct ext4_buddy e4b;
2868         struct ext4_group_info *grinfo;
2869         unsigned char blocksize_bits = min_t(unsigned char,
2870                                              sb->s_blocksize_bits,
2871                                              EXT4_MAX_BLOCK_LOG_SIZE);
2872         struct sg {
2873                 struct ext4_group_info info;
2874                 ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
2875         } sg;
2876
2877         group--;
2878         if (group == 0)
2879                 seq_puts(seq, "#group: free  frags first ["
2880                               " 2^0   2^1   2^2   2^3   2^4   2^5   2^6  "
2881                               " 2^7   2^8   2^9   2^10  2^11  2^12  2^13  ]\n");
2882
2883         i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2884                 sizeof(struct ext4_group_info);
2885
2886         grinfo = ext4_get_group_info(sb, group);
2887         /* Load the group info in memory only if not already loaded. */
2888         if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
2889                 err = ext4_mb_load_buddy(sb, group, &e4b);
2890                 if (err) {
2891                         seq_printf(seq, "#%-5u: I/O error\n", group);
2892                         return 0;
2893                 }
2894                 buddy_loaded = 1;
2895         }
2896
2897         memcpy(&sg, ext4_get_group_info(sb, group), i);
2898
2899         if (buddy_loaded)
2900                 ext4_mb_unload_buddy(&e4b);
2901
2902         seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2903                         sg.info.bb_fragments, sg.info.bb_first_free);
2904         for (i = 0; i <= 13; i++)
2905                 seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
2906                                 sg.info.bb_counters[i] : 0);
2907         seq_puts(seq, " ]\n");
2908
2909         return 0;
2910 }
2911
2912 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2913 {
2914 }
2915
2916 const struct seq_operations ext4_mb_seq_groups_ops = {
2917         .start  = ext4_mb_seq_groups_start,
2918         .next   = ext4_mb_seq_groups_next,
2919         .stop   = ext4_mb_seq_groups_stop,
2920         .show   = ext4_mb_seq_groups_show,
2921 };
2922
2923 int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
2924 {
2925         struct super_block *sb = seq->private;
2926         struct ext4_sb_info *sbi = EXT4_SB(sb);
2927
2928         seq_puts(seq, "mballoc:\n");
2929         if (!sbi->s_mb_stats) {
2930                 seq_puts(seq, "\tmb stats collection turned off.\n");
2931                 seq_puts(seq, "\tTo enable, please write \"1\" to sysfs file mb_stats.\n");
2932                 return 0;
2933         }
2934         seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs));
2935         seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success));
2936
2937         seq_printf(seq, "\tgroups_scanned: %u\n",  atomic_read(&sbi->s_bal_groups_scanned));
2938
2939         seq_puts(seq, "\tcr0_stats:\n");
2940         seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[0]));
2941         seq_printf(seq, "\t\tgroups_considered: %llu\n",
2942                    atomic64_read(&sbi->s_bal_cX_groups_considered[0]));
2943         seq_printf(seq, "\t\tuseless_loops: %llu\n",
2944                    atomic64_read(&sbi->s_bal_cX_failed[0]));
2945         seq_printf(seq, "\t\tbad_suggestions: %u\n",
2946                    atomic_read(&sbi->s_bal_cr0_bad_suggestions));
2947
2948         seq_puts(seq, "\tcr1_stats:\n");
2949         seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[1]));
2950         seq_printf(seq, "\t\tgroups_considered: %llu\n",
2951                    atomic64_read(&sbi->s_bal_cX_groups_considered[1]));
2952         seq_printf(seq, "\t\tuseless_loops: %llu\n",
2953                    atomic64_read(&sbi->s_bal_cX_failed[1]));
2954         seq_printf(seq, "\t\tbad_suggestions: %u\n",
2955                    atomic_read(&sbi->s_bal_cr1_bad_suggestions));
2956
2957         seq_puts(seq, "\tcr2_stats:\n");
2958         seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[2]));
2959         seq_printf(seq, "\t\tgroups_considered: %llu\n",
2960                    atomic64_read(&sbi->s_bal_cX_groups_considered[2]));
2961         seq_printf(seq, "\t\tuseless_loops: %llu\n",
2962                    atomic64_read(&sbi->s_bal_cX_failed[2]));
2963
2964         seq_puts(seq, "\tcr3_stats:\n");
2965         seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[3]));
2966         seq_printf(seq, "\t\tgroups_considered: %llu\n",
2967                    atomic64_read(&sbi->s_bal_cX_groups_considered[3]));
2968         seq_printf(seq, "\t\tuseless_loops: %llu\n",
2969                    atomic64_read(&sbi->s_bal_cX_failed[3]));
2970         seq_printf(seq, "\textents_scanned: %u\n", atomic_read(&sbi->s_bal_ex_scanned));
2971         seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals));
2972         seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders));
2973         seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks));
2974         seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks));
2975
2976         seq_printf(seq, "\tbuddies_generated: %u/%u\n",
2977                    atomic_read(&sbi->s_mb_buddies_generated),
2978                    ext4_get_groups_count(sb));
2979         seq_printf(seq, "\tbuddies_time_used: %llu\n",
2980                    atomic64_read(&sbi->s_mb_generation_time));
2981         seq_printf(seq, "\tpreallocated: %u\n",
2982                    atomic_read(&sbi->s_mb_preallocated));
2983         seq_printf(seq, "\tdiscarded: %u\n",
2984                    atomic_read(&sbi->s_mb_discarded));
2985         return 0;
2986 }
2987
2988 static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos)
2989 __acquires(&EXT4_SB(sb)->s_mb_rb_lock)
2990 {
2991         struct super_block *sb = pde_data(file_inode(seq->file));
2992         unsigned long position;
2993
2994         read_lock(&EXT4_SB(sb)->s_mb_rb_lock);
2995
2996         if (*pos < 0 || *pos >= MB_NUM_ORDERS(sb) + 1)
2997                 return NULL;
2998         position = *pos + 1;
2999         return (void *) ((unsigned long) position);
3000 }
3001
3002 static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos)
3003 {
3004         struct super_block *sb = pde_data(file_inode(seq->file));
3005         unsigned long position;
3006
3007         ++*pos;
3008         if (*pos < 0 || *pos >= MB_NUM_ORDERS(sb) + 1)
3009                 return NULL;
3010         position = *pos + 1;
3011         return (void *) ((unsigned long) position);
3012 }
3013
3014 static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
3015 {
3016         struct super_block *sb = pde_data(file_inode(seq->file));
3017         struct ext4_sb_info *sbi = EXT4_SB(sb);
3018         unsigned long position = ((unsigned long) v);
3019         struct ext4_group_info *grp;
3020         struct rb_node *n;
3021         unsigned int count, min, max;
3022
3023         position--;
3024         if (position >= MB_NUM_ORDERS(sb)) {
3025                 seq_puts(seq, "fragment_size_tree:\n");
3026                 n = rb_first(&sbi->s_mb_avg_fragment_size_root);
3027                 if (!n) {
3028                         seq_puts(seq, "\ttree_min: 0\n\ttree_max: 0\n\ttree_nodes: 0\n");
3029                         return 0;
3030                 }
3031                 grp = rb_entry(n, struct ext4_group_info, bb_avg_fragment_size_rb);
3032                 min = grp->bb_fragments ? grp->bb_free / grp->bb_fragments : 0;
3033                 count = 1;
3034                 while (rb_next(n)) {
3035                         count++;
3036                         n = rb_next(n);
3037                 }
3038                 grp = rb_entry(n, struct ext4_group_info, bb_avg_fragment_size_rb);
3039                 max = grp->bb_fragments ? grp->bb_free / grp->bb_fragments : 0;
3040
3041                 seq_printf(seq, "\ttree_min: %u\n\ttree_max: %u\n\ttree_nodes: %u\n",
3042                            min, max, count);
3043                 return 0;
3044         }
3045
3046         if (position == 0) {
3047                 seq_printf(seq, "optimize_scan: %d\n",
3048                            test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0);
3049                 seq_puts(seq, "max_free_order_lists:\n");
3050         }
3051         count = 0;
3052         list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position],
3053                             bb_largest_free_order_node)
3054                 count++;
3055         seq_printf(seq, "\tlist_order_%u_groups: %u\n",
3056                    (unsigned int)position, count);
3057
3058         return 0;
3059 }
3060
3061 static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v)
3062 __releases(&EXT4_SB(sb)->s_mb_rb_lock)
3063 {
3064         struct super_block *sb = pde_data(file_inode(seq->file));
3065
3066         read_unlock(&EXT4_SB(sb)->s_mb_rb_lock);
3067 }
3068
3069 const struct seq_operations ext4_mb_seq_structs_summary_ops = {
3070         .start  = ext4_mb_seq_structs_summary_start,
3071         .next   = ext4_mb_seq_structs_summary_next,
3072         .stop   = ext4_mb_seq_structs_summary_stop,
3073         .show   = ext4_mb_seq_structs_summary_show,
3074 };
3075
3076 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
3077 {
3078         int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3079         struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
3080
3081         BUG_ON(!cachep);
3082         return cachep;
3083 }
3084
3085 /*
3086  * Allocate the top-level s_group_info array for the specified number
3087  * of groups
3088  */
3089 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
3090 {
3091         struct ext4_sb_info *sbi = EXT4_SB(sb);
3092         unsigned size;
3093         struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
3094
3095         size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
3096                 EXT4_DESC_PER_BLOCK_BITS(sb);
3097         if (size <= sbi->s_group_info_size)
3098                 return 0;
3099
3100         size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
3101         new_groupinfo = kvzalloc(size, GFP_KERNEL);
3102         if (!new_groupinfo) {
3103                 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
3104                 return -ENOMEM;
3105         }
3106         rcu_read_lock();
3107         old_groupinfo = rcu_dereference(sbi->s_group_info);
3108         if (old_groupinfo)
3109                 memcpy(new_groupinfo, old_groupinfo,
3110                        sbi->s_group_info_size * sizeof(*sbi->s_group_info));
3111         rcu_read_unlock();
3112         rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
3113         sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
3114         if (old_groupinfo)
3115                 ext4_kvfree_array_rcu(old_groupinfo);
3116         ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
3117                    sbi->s_group_info_size);
3118         return 0;
3119 }
3120
3121 /* Create and initialize ext4_group_info data for the given group. */
3122 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
3123                           struct ext4_group_desc *desc)
3124 {
3125         int i;
3126         int metalen = 0;
3127         int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
3128         struct ext4_sb_info *sbi = EXT4_SB(sb);
3129         struct ext4_group_info **meta_group_info;
3130         struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3131
3132         /*
3133          * First check if this group is the first of a reserved block.
3134          * If it's true, we have to allocate a new table of pointers
3135          * to ext4_group_info structures
3136          */
3137         if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3138                 metalen = sizeof(*meta_group_info) <<
3139                         EXT4_DESC_PER_BLOCK_BITS(sb);
3140                 meta_group_info = kmalloc(metalen, GFP_NOFS);
3141                 if (meta_group_info == NULL) {
3142                         ext4_msg(sb, KERN_ERR, "can't allocate mem "
3143                                  "for a buddy group");
3144                         goto exit_meta_group_info;
3145                 }
3146                 rcu_read_lock();
3147                 rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
3148                 rcu_read_unlock();
3149         }
3150
3151         meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
3152         i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
3153
3154         meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
3155         if (meta_group_info[i] == NULL) {
3156                 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
3157                 goto exit_group_info;
3158         }
3159         set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
3160                 &(meta_group_info[i]->bb_state));
3161
3162         /*
3163          * initialize bb_free to be able to skip
3164          * empty groups without initialization
3165          */
3166         if (ext4_has_group_desc_csum(sb) &&
3167             (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3168                 meta_group_info[i]->bb_free =
3169                         ext4_free_clusters_after_init(sb, group, desc);
3170         } else {
3171                 meta_group_info[i]->bb_free =
3172                         ext4_free_group_clusters(sb, desc);
3173         }
3174
3175         INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
3176         init_rwsem(&meta_group_info[i]->alloc_sem);
3177         meta_group_info[i]->bb_free_root = RB_ROOT;
3178         INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node);
3179         RB_CLEAR_NODE(&meta_group_info[i]->bb_avg_fragment_size_rb);
3180         meta_group_info[i]->bb_largest_free_order = -1;  /* uninit */
3181         meta_group_info[i]->bb_group = group;
3182
3183         mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
3184         return 0;
3185
3186 exit_group_info:
3187         /* If a meta_group_info table has been allocated, release it now */
3188         if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3189                 struct ext4_group_info ***group_info;
3190
3191                 rcu_read_lock();
3192                 group_info = rcu_dereference(sbi->s_group_info);
3193                 kfree(group_info[idx]);
3194                 group_info[idx] = NULL;
3195                 rcu_read_unlock();
3196         }
3197 exit_meta_group_info:
3198         return -ENOMEM;
3199 } /* ext4_mb_add_groupinfo */
3200
3201 static int ext4_mb_init_backend(struct super_block *sb)
3202 {
3203         ext4_group_t ngroups = ext4_get_groups_count(sb);
3204         ext4_group_t i;
3205         struct ext4_sb_info *sbi = EXT4_SB(sb);
3206         int err;
3207         struct ext4_group_desc *desc;
3208         struct ext4_group_info ***group_info;
3209         struct kmem_cache *cachep;
3210
3211         err = ext4_mb_alloc_groupinfo(sb, ngroups);
3212         if (err)
3213                 return err;
3214
3215         sbi->s_buddy_cache = new_inode(sb);
3216         if (sbi->s_buddy_cache == NULL) {
3217                 ext4_msg(sb, KERN_ERR, "can't get new inode");
3218                 goto err_freesgi;
3219         }
3220         /* To avoid potentially colliding with an valid on-disk inode number,
3221          * use EXT4_BAD_INO for the buddy cache inode number.  This inode is
3222          * not in the inode hash, so it should never be found by iget(), but
3223          * this will avoid confusion if it ever shows up during debugging. */
3224         sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
3225         EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
3226         for (i = 0; i < ngroups; i++) {
3227                 cond_resched();
3228                 desc = ext4_get_group_desc(sb, i, NULL);
3229                 if (desc == NULL) {
3230                         ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
3231                         goto err_freebuddy;
3232                 }
3233                 if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
3234                         goto err_freebuddy;
3235         }
3236
3237         if (ext4_has_feature_flex_bg(sb)) {
3238                 /* a single flex group is supposed to be read by a single IO.
3239                  * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is
3240                  * unsigned integer, so the maximum shift is 32.
3241                  */
3242                 if (sbi->s_es->s_log_groups_per_flex >= 32) {
3243                         ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
3244                         goto err_freebuddy;
3245                 }
3246                 sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
3247                         BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
3248                 sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
3249         } else {
3250                 sbi->s_mb_prefetch = 32;
3251         }
3252         if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
3253                 sbi->s_mb_prefetch = ext4_get_groups_count(sb);
3254         /* now many real IOs to prefetch within a single allocation at cr=0
3255          * given cr=0 is an CPU-related optimization we shouldn't try to
3256          * load too many groups, at some point we should start to use what
3257          * we've got in memory.
3258          * with an average random access time 5ms, it'd take a second to get
3259          * 200 groups (* N with flex_bg), so let's make this limit 4
3260          */
3261         sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4;
3262         if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb))
3263                 sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb);
3264
3265         return 0;
3266
3267 err_freebuddy:
3268         cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3269         while (i-- > 0)
3270                 kmem_cache_free(cachep, ext4_get_group_info(sb, i));
3271         i = sbi->s_group_info_size;
3272         rcu_read_lock();
3273         group_info = rcu_dereference(sbi->s_group_info);
3274         while (i-- > 0)
3275                 kfree(group_info[i]);
3276         rcu_read_unlock();
3277         iput(sbi->s_buddy_cache);
3278 err_freesgi:
3279         rcu_read_lock();
3280         kvfree(rcu_dereference(sbi->s_group_info));
3281         rcu_read_unlock();
3282         return -ENOMEM;
3283 }
3284
3285 static void ext4_groupinfo_destroy_slabs(void)
3286 {
3287         int i;
3288
3289         for (i = 0; i < NR_GRPINFO_CACHES; i++) {
3290                 kmem_cache_destroy(ext4_groupinfo_caches[i]);
3291                 ext4_groupinfo_caches[i] = NULL;
3292         }
3293 }
3294
3295 static int ext4_groupinfo_create_slab(size_t size)
3296 {
3297         static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
3298         int slab_size;
3299         int blocksize_bits = order_base_2(size);
3300         int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3301         struct kmem_cache *cachep;
3302
3303         if (cache_index >= NR_GRPINFO_CACHES)
3304                 return -EINVAL;
3305
3306         if (unlikely(cache_index < 0))
3307                 cache_index = 0;
3308
3309         mutex_lock(&ext4_grpinfo_slab_create_mutex);
3310         if (ext4_groupinfo_caches[cache_index]) {
3311                 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3312                 return 0;       /* Already created */
3313         }
3314
3315         slab_size = offsetof(struct ext4_group_info,
3316                                 bb_counters[blocksize_bits + 2]);
3317
3318         cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
3319                                         slab_size, 0, SLAB_RECLAIM_ACCOUNT,
3320                                         NULL);
3321
3322         ext4_groupinfo_caches[cache_index] = cachep;
3323
3324         mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3325         if (!cachep) {
3326                 printk(KERN_EMERG
3327                        "EXT4-fs: no memory for groupinfo slab cache\n");
3328                 return -ENOMEM;
3329         }
3330
3331         return 0;
3332 }
3333
3334 static void ext4_discard_work(struct work_struct *work)
3335 {
3336         struct ext4_sb_info *sbi = container_of(work,
3337                         struct ext4_sb_info, s_discard_work);
3338         struct super_block *sb = sbi->s_sb;
3339         struct ext4_free_data *fd, *nfd;
3340         struct ext4_buddy e4b;
3341         struct list_head discard_list;
3342         ext4_group_t grp, load_grp;
3343         int err = 0;
3344
3345         INIT_LIST_HEAD(&discard_list);
3346         spin_lock(&sbi->s_md_lock);
3347         list_splice_init(&sbi->s_discard_list, &discard_list);
3348         spin_unlock(&sbi->s_md_lock);
3349
3350         load_grp = UINT_MAX;
3351         list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) {
3352                 /*
3353                  * If filesystem is umounting or no memory or suffering
3354                  * from no space, give up the discard
3355                  */
3356                 if ((sb->s_flags & SB_ACTIVE) && !err &&
3357                     !atomic_read(&sbi->s_retry_alloc_pending)) {
3358                         grp = fd->efd_group;
3359                         if (grp != load_grp) {
3360                                 if (load_grp != UINT_MAX)
3361                                         ext4_mb_unload_buddy(&e4b);
3362
3363                                 err = ext4_mb_load_buddy(sb, grp, &e4b);
3364                                 if (err) {
3365                                         kmem_cache_free(ext4_free_data_cachep, fd);
3366                                         load_grp = UINT_MAX;
3367                                         continue;
3368                                 } else {
3369                                         load_grp = grp;
3370                                 }
3371                         }
3372
3373                         ext4_lock_group(sb, grp);
3374                         ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster,
3375                                                 fd->efd_start_cluster + fd->efd_count - 1, 1);
3376                         ext4_unlock_group(sb, grp);
3377                 }
3378                 kmem_cache_free(ext4_free_data_cachep, fd);
3379         }
3380
3381         if (load_grp != UINT_MAX)
3382                 ext4_mb_unload_buddy(&e4b);
3383 }
3384
3385 int ext4_mb_init(struct super_block *sb)
3386 {
3387         struct ext4_sb_info *sbi = EXT4_SB(sb);
3388         unsigned i, j;
3389         unsigned offset, offset_incr;
3390         unsigned max;
3391         int ret;
3392
3393         i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets);
3394
3395         sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
3396         if (sbi->s_mb_offsets == NULL) {
3397                 ret = -ENOMEM;
3398                 goto out;
3399         }
3400
3401         i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs);
3402         sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
3403         if (sbi->s_mb_maxs == NULL) {
3404                 ret = -ENOMEM;
3405                 goto out;
3406         }
3407
3408         ret = ext4_groupinfo_create_slab(sb->s_blocksize);
3409         if (ret < 0)
3410                 goto out;
3411
3412         /* order 0 is regular bitmap */
3413         sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
3414         sbi->s_mb_offsets[0] = 0;
3415
3416         i = 1;
3417         offset = 0;
3418         offset_incr = 1 << (sb->s_blocksize_bits - 1);
3419         max = sb->s_blocksize << 2;
3420         do {
3421                 sbi->s_mb_offsets[i] = offset;
3422                 sbi->s_mb_maxs[i] = max;
3423                 offset += offset_incr;
3424                 offset_incr = offset_incr >> 1;
3425                 max = max >> 1;
3426                 i++;
3427         } while (i < MB_NUM_ORDERS(sb));
3428
3429         sbi->s_mb_avg_fragment_size_root = RB_ROOT;
3430         sbi->s_mb_largest_free_orders =
3431                 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3432                         GFP_KERNEL);
3433         if (!sbi->s_mb_largest_free_orders) {
3434                 ret = -ENOMEM;
3435                 goto out;
3436         }
3437         sbi->s_mb_largest_free_orders_locks =
3438                 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3439                         GFP_KERNEL);
3440         if (!sbi->s_mb_largest_free_orders_locks) {
3441                 ret = -ENOMEM;
3442                 goto out;
3443         }
3444         for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3445                 INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]);
3446                 rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]);
3447         }
3448         rwlock_init(&sbi->s_mb_rb_lock);
3449
3450         spin_lock_init(&sbi->s_md_lock);
3451         sbi->s_mb_free_pending = 0;
3452         INIT_LIST_HEAD(&sbi->s_freed_data_list);
3453         INIT_LIST_HEAD(&sbi->s_discard_list);
3454         INIT_WORK(&sbi->s_discard_work, ext4_discard_work);
3455         atomic_set(&sbi->s_retry_alloc_pending, 0);
3456
3457         sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
3458         sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
3459         sbi->s_mb_stats = MB_DEFAULT_STATS;
3460         sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
3461         sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
3462         sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC;
3463         /*
3464          * The default group preallocation is 512, which for 4k block
3465          * sizes translates to 2 megabytes.  However for bigalloc file
3466          * systems, this is probably too big (i.e, if the cluster size
3467          * is 1 megabyte, then group preallocation size becomes half a
3468          * gigabyte!).  As a default, we will keep a two megabyte
3469          * group pralloc size for cluster sizes up to 64k, and after
3470          * that, we will force a minimum group preallocation size of
3471          * 32 clusters.  This translates to 8 megs when the cluster
3472          * size is 256k, and 32 megs when the cluster size is 1 meg,
3473          * which seems reasonable as a default.
3474          */
3475         sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
3476                                        sbi->s_cluster_bits, 32);
3477         /*
3478          * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
3479          * to the lowest multiple of s_stripe which is bigger than
3480          * the s_mb_group_prealloc as determined above. We want
3481          * the preallocation size to be an exact multiple of the
3482          * RAID stripe size so that preallocations don't fragment
3483          * the stripes.
3484          */
3485         if (sbi->s_stripe > 1) {
3486                 sbi->s_mb_group_prealloc = roundup(
3487                         sbi->s_mb_group_prealloc, sbi->s_stripe);
3488         }
3489
3490         sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
3491         if (sbi->s_locality_groups == NULL) {
3492                 ret = -ENOMEM;
3493                 goto out;
3494         }
3495         for_each_possible_cpu(i) {
3496                 struct ext4_locality_group *lg;
3497                 lg = per_cpu_ptr(sbi->s_locality_groups, i);
3498                 mutex_init(&lg->lg_mutex);
3499                 for (j = 0; j < PREALLOC_TB_SIZE; j++)
3500                         INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
3501                 spin_lock_init(&lg->lg_prealloc_lock);
3502         }
3503
3504         if (bdev_nonrot(sb->s_bdev))
3505                 sbi->s_mb_max_linear_groups = 0;
3506         else
3507                 sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT;
3508         /* init file for buddy data */
3509         ret = ext4_mb_init_backend(sb);
3510         if (ret != 0)
3511                 goto out_free_locality_groups;
3512
3513         return 0;
3514
3515 out_free_locality_groups:
3516         free_percpu(sbi->s_locality_groups);
3517         sbi->s_locality_groups = NULL;
3518 out:
3519         kfree(sbi->s_mb_largest_free_orders);
3520         kfree(sbi->s_mb_largest_free_orders_locks);
3521         kfree(sbi->s_mb_offsets);
3522         sbi->s_mb_offsets = NULL;
3523         kfree(sbi->s_mb_maxs);
3524         sbi->s_mb_maxs = NULL;
3525         return ret;
3526 }
3527
3528 /* need to called with the ext4 group lock held */
3529 static int ext4_mb_cleanup_pa(struct ext4_group_info *grp)
3530 {
3531         struct ext4_prealloc_space *pa;
3532         struct list_head *cur, *tmp;
3533         int count = 0;
3534
3535         list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
3536                 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3537                 list_del(&pa->pa_group_list);
3538                 count++;
3539                 kmem_cache_free(ext4_pspace_cachep, pa);
3540         }
3541         return count;
3542 }
3543
3544 int ext4_mb_release(struct super_block *sb)
3545 {
3546         ext4_group_t ngroups = ext4_get_groups_count(sb);
3547         ext4_group_t i;
3548         int num_meta_group_infos;
3549         struct ext4_group_info *grinfo, ***group_info;
3550         struct ext4_sb_info *sbi = EXT4_SB(sb);
3551         struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3552         int count;
3553
3554         if (test_opt(sb, DISCARD)) {
3555                 /*
3556                  * wait the discard work to drain all of ext4_free_data
3557                  */
3558                 flush_work(&sbi->s_discard_work);
3559                 WARN_ON_ONCE(!list_empty(&sbi->s_discard_list));
3560         }
3561
3562         if (sbi->s_group_info) {
3563                 for (i = 0; i < ngroups; i++) {
3564                         cond_resched();
3565                         grinfo = ext4_get_group_info(sb, i);
3566                         mb_group_bb_bitmap_free(grinfo);
3567                         ext4_lock_group(sb, i);
3568                         count = ext4_mb_cleanup_pa(grinfo);
3569                         if (count)
3570                                 mb_debug(sb, "mballoc: %d PAs left\n",
3571                                          count);
3572                         ext4_unlock_group(sb, i);
3573                         kmem_cache_free(cachep, grinfo);
3574                 }
3575                 num_meta_group_infos = (ngroups +
3576                                 EXT4_DESC_PER_BLOCK(sb) - 1) >>
3577                         EXT4_DESC_PER_BLOCK_BITS(sb);
3578                 rcu_read_lock();
3579                 group_info = rcu_dereference(sbi->s_group_info);
3580                 for (i = 0; i < num_meta_group_infos; i++)
3581                         kfree(group_info[i]);
3582                 kvfree(group_info);
3583                 rcu_read_unlock();
3584         }
3585         kfree(sbi->s_mb_largest_free_orders);
3586         kfree(sbi->s_mb_largest_free_orders_locks);
3587         kfree(sbi->s_mb_offsets);
3588         kfree(sbi->s_mb_maxs);
3589         iput(sbi->s_buddy_cache);
3590         if (sbi->s_mb_stats) {
3591                 ext4_msg(sb, KERN_INFO,
3592                        "mballoc: %u blocks %u reqs (%u success)",
3593                                 atomic_read(&sbi->s_bal_allocated),
3594                                 atomic_read(&sbi->s_bal_reqs),
3595                                 atomic_read(&sbi->s_bal_success));
3596                 ext4_msg(sb, KERN_INFO,
3597                       "mballoc: %u extents scanned, %u groups scanned, %u goal hits, "
3598                                 "%u 2^N hits, %u breaks, %u lost",
3599                                 atomic_read(&sbi->s_bal_ex_scanned),
3600                                 atomic_read(&sbi->s_bal_groups_scanned),
3601                                 atomic_read(&sbi->s_bal_goals),
3602                                 atomic_read(&sbi->s_bal_2orders),
3603                                 atomic_read(&sbi->s_bal_breaks),
3604                                 atomic_read(&sbi->s_mb_lost_chunks));
3605                 ext4_msg(sb, KERN_INFO,
3606                        "mballoc: %u generated and it took %llu",
3607                                 atomic_read(&sbi->s_mb_buddies_generated),
3608                                 atomic64_read(&sbi->s_mb_generation_time));
3609                 ext4_msg(sb, KERN_INFO,
3610                        "mballoc: %u preallocated, %u discarded",
3611                                 atomic_read(&sbi->s_mb_preallocated),
3612                                 atomic_read(&sbi->s_mb_discarded));
3613         }
3614
3615         free_percpu(sbi->s_locality_groups);
3616
3617         return 0;
3618 }
3619
3620 static inline int ext4_issue_discard(struct super_block *sb,
3621                 ext4_group_t block_group, ext4_grpblk_t cluster, int count,
3622                 struct bio **biop)
3623 {
3624         ext4_fsblk_t discard_block;
3625
3626         discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
3627                          ext4_group_first_block_no(sb, block_group));
3628         count = EXT4_C2B(EXT4_SB(sb), count);
3629         trace_ext4_discard_blocks(sb,
3630                         (unsigned long long) discard_block, count);
3631         if (biop) {
3632                 return __blkdev_issue_discard(sb->s_bdev,
3633                         (sector_t)discard_block << (sb->s_blocksize_bits - 9),
3634                         (sector_t)count << (sb->s_blocksize_bits - 9),
3635                         GFP_NOFS, biop);
3636         } else
3637                 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
3638 }
3639
3640 static void ext4_free_data_in_buddy(struct super_block *sb,
3641                                     struct ext4_free_data *entry)
3642 {
3643         struct ext4_buddy e4b;
3644         struct ext4_group_info *db;
3645         int err, count = 0, count2 = 0;
3646
3647         mb_debug(sb, "gonna free %u blocks in group %u (0x%p):",
3648                  entry->efd_count, entry->efd_group, entry);
3649
3650         err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
3651         /* we expect to find existing buddy because it's pinned */
3652         BUG_ON(err != 0);
3653
3654         spin_lock(&EXT4_SB(sb)->s_md_lock);
3655         EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
3656         spin_unlock(&EXT4_SB(sb)->s_md_lock);
3657
3658         db = e4b.bd_info;
3659         /* there are blocks to put in buddy to make them really free */
3660         count += entry->efd_count;
3661         count2++;
3662         ext4_lock_group(sb, entry->efd_group);
3663         /* Take it out of per group rb tree */
3664         rb_erase(&entry->efd_node, &(db->bb_free_root));
3665         mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
3666
3667         /*
3668          * Clear the trimmed flag for the group so that the next
3669          * ext4_trim_fs can trim it.
3670          * If the volume is mounted with -o discard, online discard
3671          * is supported and the free blocks will be trimmed online.
3672          */
3673         if (!test_opt(sb, DISCARD))
3674                 EXT4_MB_GRP_CLEAR_TRIMMED(db);
3675
3676         if (!db->bb_free_root.rb_node) {
3677                 /* No more items in the per group rb tree
3678                  * balance refcounts from ext4_mb_free_metadata()
3679                  */
3680                 put_page(e4b.bd_buddy_page);
3681                 put_page(e4b.bd_bitmap_page);
3682         }
3683         ext4_unlock_group(sb, entry->efd_group);
3684         ext4_mb_unload_buddy(&e4b);
3685
3686         mb_debug(sb, "freed %d blocks in %d structures\n", count,
3687                  count2);
3688 }
3689
3690 /*
3691  * This function is called by the jbd2 layer once the commit has finished,
3692  * so we know we can free the blocks that were released with that commit.
3693  */
3694 void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
3695 {
3696         struct ext4_sb_info *sbi = EXT4_SB(sb);
3697         struct ext4_free_data *entry, *tmp;
3698         struct list_head freed_data_list;
3699         struct list_head *cut_pos = NULL;
3700         bool wake;
3701
3702         INIT_LIST_HEAD(&freed_data_list);
3703
3704         spin_lock(&sbi->s_md_lock);
3705         list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) {
3706                 if (entry->efd_tid != commit_tid)
3707                         break;
3708                 cut_pos = &entry->efd_list;
3709         }
3710         if (cut_pos)
3711                 list_cut_position(&freed_data_list, &sbi->s_freed_data_list,
3712                                   cut_pos);
3713         spin_unlock(&sbi->s_md_lock);
3714
3715         list_for_each_entry(entry, &freed_data_list, efd_list)
3716                 ext4_free_data_in_buddy(sb, entry);
3717
3718         if (test_opt(sb, DISCARD)) {
3719                 spin_lock(&sbi->s_md_lock);
3720                 wake = list_empty(&sbi->s_discard_list);
3721                 list_splice_tail(&freed_data_list, &sbi->s_discard_list);
3722                 spin_unlock(&sbi->s_md_lock);
3723                 if (wake)
3724                         queue_work(system_unbound_wq, &sbi->s_discard_work);
3725         } else {
3726                 list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
3727                         kmem_cache_free(ext4_free_data_cachep, entry);
3728         }
3729 }
3730
3731 int __init ext4_init_mballoc(void)
3732 {
3733         ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
3734                                         SLAB_RECLAIM_ACCOUNT);
3735         if (ext4_pspace_cachep == NULL)
3736                 goto out;
3737
3738         ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
3739                                     SLAB_RECLAIM_ACCOUNT);
3740         if (ext4_ac_cachep == NULL)
3741                 goto out_pa_free;
3742
3743         ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
3744                                            SLAB_RECLAIM_ACCOUNT);
3745         if (ext4_free_data_cachep == NULL)
3746                 goto out_ac_free;
3747
3748         return 0;
3749
3750 out_ac_free:
3751         kmem_cache_destroy(ext4_ac_cachep);
3752 out_pa_free:
3753         kmem_cache_destroy(ext4_pspace_cachep);
3754 out:
3755         return -ENOMEM;
3756 }
3757
3758 void ext4_exit_mballoc(void)
3759 {
3760         /*
3761          * Wait for completion of call_rcu()'s on ext4_pspace_cachep
3762          * before destroying the slab cache.
3763          */
3764         rcu_barrier();
3765         kmem_cache_destroy(ext4_pspace_cachep);
3766         kmem_cache_destroy(ext4_ac_cachep);
3767         kmem_cache_destroy(ext4_free_data_cachep);
3768         ext4_groupinfo_destroy_slabs();
3769 }
3770
3771
3772 /*
3773  * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
3774  * Returns 0 if success or error code
3775  */
3776 static noinline_for_stack int
3777 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
3778                                 handle_t *handle, unsigned int reserv_clstrs)
3779 {
3780         struct buffer_head *bitmap_bh = NULL;
3781         struct ext4_group_desc *gdp;
3782         struct buffer_head *gdp_bh;
3783         struct ext4_sb_info *sbi;
3784         struct super_block *sb;
3785         ext4_fsblk_t block;
3786         int err, len;
3787
3788         BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3789         BUG_ON(ac->ac_b_ex.fe_len <= 0);
3790
3791         sb = ac->ac_sb;
3792         sbi = EXT4_SB(sb);
3793
3794         bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
3795         if (IS_ERR(bitmap_bh)) {
3796                 err = PTR_ERR(bitmap_bh);
3797                 bitmap_bh = NULL;
3798                 goto out_err;
3799         }
3800
3801         BUFFER_TRACE(bitmap_bh, "getting write access");
3802         err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
3803                                             EXT4_JTR_NONE);
3804         if (err)
3805                 goto out_err;
3806
3807         err = -EIO;
3808         gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
3809         if (!gdp)
3810                 goto out_err;
3811
3812         ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
3813                         ext4_free_group_clusters(sb, gdp));
3814
3815         BUFFER_TRACE(gdp_bh, "get_write_access");
3816         err = ext4_journal_get_write_access(handle, sb, gdp_bh, EXT4_JTR_NONE);
3817         if (err)
3818                 goto out_err;
3819
3820         block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3821
3822         len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
3823         if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
3824                 ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
3825                            "fs metadata", block, block+len);
3826                 /* File system mounted not to panic on error
3827                  * Fix the bitmap and return EFSCORRUPTED
3828                  * We leak some of the blocks here.
3829                  */
3830                 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3831                 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3832                               ac->ac_b_ex.fe_len);
3833                 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3834                 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3835                 if (!err)
3836                         err = -EFSCORRUPTED;
3837                 goto out_err;
3838         }
3839
3840         ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3841 #ifdef AGGRESSIVE_CHECK
3842         {
3843                 int i;
3844                 for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
3845                         BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
3846                                                 bitmap_bh->b_data));
3847                 }
3848         }
3849 #endif
3850         mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3851                       ac->ac_b_ex.fe_len);
3852         if (ext4_has_group_desc_csum(sb) &&
3853             (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3854                 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3855                 ext4_free_group_clusters_set(sb, gdp,
3856                                              ext4_free_clusters_after_init(sb,
3857                                                 ac->ac_b_ex.fe_group, gdp));
3858         }
3859         len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
3860         ext4_free_group_clusters_set(sb, gdp, len);
3861         ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh);
3862         ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
3863
3864         ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3865         percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
3866         /*
3867          * Now reduce the dirty block count also. Should not go negative
3868          */
3869         if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
3870                 /* release all the reserved blocks if non delalloc */
3871                 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
3872                                    reserv_clstrs);
3873
3874         if (sbi->s_log_groups_per_flex) {
3875                 ext4_group_t flex_group = ext4_flex_group(sbi,
3876                                                           ac->ac_b_ex.fe_group);
3877                 atomic64_sub(ac->ac_b_ex.fe_len,
3878                              &sbi_array_rcu_deref(sbi, s_flex_groups,
3879                                                   flex_group)->free_clusters);
3880         }
3881
3882         err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3883         if (err)
3884                 goto out_err;
3885         err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
3886
3887 out_err:
3888         brelse(bitmap_bh);
3889         return err;
3890 }
3891
3892 /*
3893  * Idempotent helper for Ext4 fast commit replay path to set the state of
3894  * blocks in bitmaps and update counters.
3895  */
3896 void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
3897                         int len, int state)
3898 {
3899         struct buffer_head *bitmap_bh = NULL;
3900         struct ext4_group_desc *gdp;
3901         struct buffer_head *gdp_bh;
3902         struct ext4_sb_info *sbi = EXT4_SB(sb);
3903         ext4_group_t group;
3904         ext4_grpblk_t blkoff;
3905         int i, err;
3906         int already;
3907         unsigned int clen, clen_changed, thisgrp_len;
3908
3909         while (len > 0) {
3910                 ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
3911
3912                 /*
3913                  * Check to see if we are freeing blocks across a group
3914                  * boundary.
3915                  * In case of flex_bg, this can happen that (block, len) may
3916                  * span across more than one group. In that case we need to
3917                  * get the corresponding group metadata to work with.
3918                  * For this we have goto again loop.
3919                  */
3920                 thisgrp_len = min_t(unsigned int, (unsigned int)len,
3921                         EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff));
3922                 clen = EXT4_NUM_B2C(sbi, thisgrp_len);
3923
3924                 if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) {
3925                         ext4_error(sb, "Marking blocks in system zone - "
3926                                    "Block = %llu, len = %u",
3927                                    block, thisgrp_len);
3928                         bitmap_bh = NULL;
3929                         break;
3930                 }
3931
3932                 bitmap_bh = ext4_read_block_bitmap(sb, group);
3933                 if (IS_ERR(bitmap_bh)) {
3934                         err = PTR_ERR(bitmap_bh);
3935                         bitmap_bh = NULL;
3936                         break;
3937                 }
3938
3939                 err = -EIO;
3940                 gdp = ext4_get_group_desc(sb, group, &gdp_bh);
3941                 if (!gdp)
3942                         break;
3943
3944                 ext4_lock_group(sb, group);
3945                 already = 0;
3946                 for (i = 0; i < clen; i++)
3947                         if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
3948                                          !state)
3949                                 already++;
3950
3951                 clen_changed = clen - already;
3952                 if (state)
3953                         mb_set_bits(bitmap_bh->b_data, blkoff, clen);
3954                 else
3955                         mb_clear_bits(bitmap_bh->b_data, blkoff, clen);
3956                 if (ext4_has_group_desc_csum(sb) &&
3957                     (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3958                         gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3959                         ext4_free_group_clusters_set(sb, gdp,
3960                              ext4_free_clusters_after_init(sb, group, gdp));
3961                 }
3962                 if (state)
3963                         clen = ext4_free_group_clusters(sb, gdp) - clen_changed;
3964                 else
3965                         clen = ext4_free_group_clusters(sb, gdp) + clen_changed;
3966
3967                 ext4_free_group_clusters_set(sb, gdp, clen);
3968                 ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
3969                 ext4_group_desc_csum_set(sb, group, gdp);
3970
3971                 ext4_unlock_group(sb, group);
3972
3973                 if (sbi->s_log_groups_per_flex) {
3974                         ext4_group_t flex_group = ext4_flex_group(sbi, group);
3975                         struct flex_groups *fg = sbi_array_rcu_deref(sbi,
3976                                                    s_flex_groups, flex_group);
3977
3978                         if (state)
3979                                 atomic64_sub(clen_changed, &fg->free_clusters);
3980                         else
3981                                 atomic64_add(clen_changed, &fg->free_clusters);
3982
3983                 }
3984
3985                 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
3986                 if (err)
3987                         break;
3988                 sync_dirty_buffer(bitmap_bh);
3989                 err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
3990                 sync_dirty_buffer(gdp_bh);
3991                 if (err)
3992                         break;
3993
3994                 block += thisgrp_len;
3995                 len -= thisgrp_len;
3996                 brelse(bitmap_bh);
3997                 BUG_ON(len < 0);
3998         }
3999
4000         if (err)
4001                 brelse(bitmap_bh);
4002 }
4003
4004 /*
4005  * here we normalize request for locality group
4006  * Group request are normalized to s_mb_group_prealloc, which goes to
4007  * s_strip if we set the same via mount option.
4008  * s_mb_group_prealloc can be configured via
4009  * /sys/fs/ext4/<partition>/mb_group_prealloc
4010  *
4011  * XXX: should we try to preallocate more than the group has now?
4012  */
4013 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
4014 {
4015         struct super_block *sb = ac->ac_sb;
4016         struct ext4_locality_group *lg = ac->ac_lg;
4017
4018         BUG_ON(lg == NULL);
4019         ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
4020         mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len);
4021 }
4022
4023 /*
4024  * Normalization means making request better in terms of
4025  * size and alignment
4026  */
4027 static noinline_for_stack void
4028 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
4029                                 struct ext4_allocation_request *ar)
4030 {
4031         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4032         int bsbits, max;
4033         ext4_lblk_t end;
4034         loff_t size, start_off;
4035         loff_t orig_size __maybe_unused;
4036         ext4_lblk_t start;
4037         struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4038         struct ext4_prealloc_space *pa;
4039
4040         /* do normalize only data requests, metadata requests
4041            do not need preallocation */
4042         if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4043                 return;
4044
4045         /* sometime caller may want exact blocks */
4046         if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4047                 return;
4048
4049         /* caller may indicate that preallocation isn't
4050          * required (it's a tail, for example) */
4051         if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
4052                 return;
4053
4054         if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
4055                 ext4_mb_normalize_group_request(ac);
4056                 return ;
4057         }
4058
4059         bsbits = ac->ac_sb->s_blocksize_bits;
4060
4061         /* first, let's learn actual file size
4062          * given current request is allocated */
4063         size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
4064         size = size << bsbits;
4065         if (size < i_size_read(ac->ac_inode))
4066                 size = i_size_read(ac->ac_inode);
4067         orig_size = size;
4068
4069         /* max size of free chunks */
4070         max = 2 << bsbits;
4071
4072 #define NRL_CHECK_SIZE(req, size, max, chunk_size)      \
4073                 (req <= (size) || max <= (chunk_size))
4074
4075         /* first, try to predict filesize */
4076         /* XXX: should this table be tunable? */
4077         start_off = 0;
4078         if (size <= 16 * 1024) {
4079                 size = 16 * 1024;
4080         } else if (size <= 32 * 1024) {
4081                 size = 32 * 1024;
4082         } else if (size <= 64 * 1024) {
4083                 size = 64 * 1024;
4084         } else if (size <= 128 * 1024) {
4085                 size = 128 * 1024;
4086         } else if (size <= 256 * 1024) {
4087                 size = 256 * 1024;
4088         } else if (size <= 512 * 1024) {
4089                 size = 512 * 1024;
4090         } else if (size <= 1024 * 1024) {
4091                 size = 1024 * 1024;
4092         } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
4093                 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4094                                                 (21 - bsbits)) << 21;
4095                 size = 2 * 1024 * 1024;
4096         } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
4097                 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4098                                                         (22 - bsbits)) << 22;
4099                 size = 4 * 1024 * 1024;
4100         } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
4101                                         (8<<20)>>bsbits, max, 8 * 1024)) {
4102                 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4103                                                         (23 - bsbits)) << 23;
4104                 size = 8 * 1024 * 1024;
4105         } else {
4106                 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
4107                 size      = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
4108                                               ac->ac_o_ex.fe_len) << bsbits;
4109         }
4110         size = size >> bsbits;
4111         start = start_off >> bsbits;
4112
4113         /*
4114          * For tiny groups (smaller than 8MB) the chosen allocation
4115          * alignment may be larger than group size. Make sure the
4116          * alignment does not move allocation to a different group which
4117          * makes mballoc fail assertions later.
4118          */
4119         start = max(start, rounddown(ac->ac_o_ex.fe_logical,
4120                         (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
4121
4122         /* don't cover already allocated blocks in selected range */
4123         if (ar->pleft && start <= ar->lleft) {
4124                 size -= ar->lleft + 1 - start;
4125                 start = ar->lleft + 1;
4126         }
4127         if (ar->pright && start + size - 1 >= ar->lright)
4128                 size -= start + size - ar->lright;
4129
4130         /*
4131          * Trim allocation request for filesystems with artificially small
4132          * groups.
4133          */
4134         if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
4135                 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
4136
4137         end = start + size;
4138
4139         /* check we don't cross already preallocated blocks */
4140         rcu_read_lock();
4141         list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
4142                 ext4_lblk_t pa_end;
4143
4144                 if (pa->pa_deleted)
4145                         continue;
4146                 spin_lock(&pa->pa_lock);
4147                 if (pa->pa_deleted) {
4148                         spin_unlock(&pa->pa_lock);
4149                         continue;
4150                 }
4151
4152                 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
4153                                                   pa->pa_len);
4154
4155                 /* PA must not overlap original request */
4156                 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
4157                         ac->ac_o_ex.fe_logical < pa->pa_lstart));
4158
4159                 /* skip PAs this normalized request doesn't overlap with */
4160                 if (pa->pa_lstart >= end || pa_end <= start) {
4161                         spin_unlock(&pa->pa_lock);
4162                         continue;
4163                 }
4164                 BUG_ON(pa->pa_lstart <= start && pa_end >= end);
4165
4166                 /* adjust start or end to be adjacent to this pa */
4167                 if (pa_end <= ac->ac_o_ex.fe_logical) {
4168                         BUG_ON(pa_end < start);
4169                         start = pa_end;
4170                 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
4171                         BUG_ON(pa->pa_lstart > end);
4172                         end = pa->pa_lstart;
4173                 }
4174                 spin_unlock(&pa->pa_lock);
4175         }
4176         rcu_read_unlock();
4177         size = end - start;
4178
4179         /* XXX: extra loop to check we really don't overlap preallocations */
4180         rcu_read_lock();
4181         list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
4182                 ext4_lblk_t pa_end;
4183
4184                 spin_lock(&pa->pa_lock);
4185                 if (pa->pa_deleted == 0) {
4186                         pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
4187                                                           pa->pa_len);
4188                         BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
4189                 }
4190                 spin_unlock(&pa->pa_lock);
4191         }
4192         rcu_read_unlock();
4193
4194         /*
4195          * In this function "start" and "size" are normalized for better
4196          * alignment and length such that we could preallocate more blocks.
4197          * This normalization is done such that original request of
4198          * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and
4199          * "size" boundaries.
4200          * (Note fe_len can be relaxed since FS block allocation API does not
4201          * provide gurantee on number of contiguous blocks allocation since that
4202          * depends upon free space left, etc).
4203          * In case of inode pa, later we use the allocated blocks
4204          * [pa_start + fe_logical - pa_lstart, fe_len/size] from the preallocated
4205          * range of goal/best blocks [start, size] to put it at the
4206          * ac_o_ex.fe_logical extent of this inode.
4207          * (See ext4_mb_use_inode_pa() for more details)
4208          */
4209         if (start + size <= ac->ac_o_ex.fe_logical ||
4210                         start > ac->ac_o_ex.fe_logical) {
4211                 ext4_msg(ac->ac_sb, KERN_ERR,
4212                          "start %lu, size %lu, fe_logical %lu",
4213                          (unsigned long) start, (unsigned long) size,
4214                          (unsigned long) ac->ac_o_ex.fe_logical);
4215                 BUG();
4216         }
4217         BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
4218
4219         /* now prepare goal request */
4220
4221         /* XXX: is it better to align blocks WRT to logical
4222          * placement or satisfy big request as is */
4223         ac->ac_g_ex.fe_logical = start;
4224         ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
4225
4226         /* define goal start in order to merge */
4227         if (ar->pright && (ar->lright == (start + size))) {
4228                 /* merge to the right */
4229                 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
4230                                                 &ac->ac_f_ex.fe_group,
4231                                                 &ac->ac_f_ex.fe_start);
4232                 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4233         }
4234         if (ar->pleft && (ar->lleft + 1 == start)) {
4235                 /* merge to the left */
4236                 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
4237                                                 &ac->ac_f_ex.fe_group,
4238                                                 &ac->ac_f_ex.fe_start);
4239                 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4240         }
4241
4242         mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size,
4243                  orig_size, start);
4244 }
4245
4246 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
4247 {
4248         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4249
4250         if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) {
4251                 atomic_inc(&sbi->s_bal_reqs);
4252                 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
4253                 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
4254                         atomic_inc(&sbi->s_bal_success);
4255                 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
4256                 atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned);
4257                 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
4258                                 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
4259                         atomic_inc(&sbi->s_bal_goals);
4260                 if (ac->ac_found > sbi->s_mb_max_to_scan)
4261                         atomic_inc(&sbi->s_bal_breaks);
4262         }
4263
4264         if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
4265                 trace_ext4_mballoc_alloc(ac);
4266         else
4267                 trace_ext4_mballoc_prealloc(ac);
4268 }
4269
4270 /*
4271  * Called on failure; free up any blocks from the inode PA for this
4272  * context.  We don't need this for MB_GROUP_PA because we only change
4273  * pa_free in ext4_mb_release_context(), but on failure, we've already
4274  * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
4275  */
4276 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
4277 {
4278         struct ext4_prealloc_space *pa = ac->ac_pa;
4279         struct ext4_buddy e4b;
4280         int err;
4281
4282         if (pa == NULL) {
4283                 if (ac->ac_f_ex.fe_len == 0)
4284                         return;
4285                 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
4286                 if (err) {
4287                         /*
4288                          * This should never happen since we pin the
4289                          * pages in the ext4_allocation_context so
4290                          * ext4_mb_load_buddy() should never fail.
4291                          */
4292                         WARN(1, "mb_load_buddy failed (%d)", err);
4293                         return;
4294                 }
4295                 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4296                 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
4297                                ac->ac_f_ex.fe_len);
4298                 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4299                 ext4_mb_unload_buddy(&e4b);
4300                 return;
4301         }
4302         if (pa->pa_type == MB_INODE_PA)
4303                 pa->pa_free += ac->ac_b_ex.fe_len;
4304 }
4305
4306 /*
4307  * use blocks preallocated to inode
4308  */
4309 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
4310                                 struct ext4_prealloc_space *pa)
4311 {
4312         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4313         ext4_fsblk_t start;
4314         ext4_fsblk_t end;
4315         int len;
4316
4317         /* found preallocated blocks, use them */
4318         start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
4319         end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
4320                   start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
4321         len = EXT4_NUM_B2C(sbi, end - start);
4322         ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
4323                                         &ac->ac_b_ex.fe_start);
4324         ac->ac_b_ex.fe_len = len;
4325         ac->ac_status = AC_STATUS_FOUND;
4326         ac->ac_pa = pa;
4327
4328         BUG_ON(start < pa->pa_pstart);
4329         BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
4330         BUG_ON(pa->pa_free < len);
4331         pa->pa_free -= len;
4332
4333         mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
4334 }
4335
4336 /*
4337  * use blocks preallocated to locality group
4338  */
4339 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
4340                                 struct ext4_prealloc_space *pa)
4341 {
4342         unsigned int len = ac->ac_o_ex.fe_len;
4343
4344         ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
4345                                         &ac->ac_b_ex.fe_group,
4346                                         &ac->ac_b_ex.fe_start);
4347         ac->ac_b_ex.fe_len = len;
4348         ac->ac_status = AC_STATUS_FOUND;
4349         ac->ac_pa = pa;
4350
4351         /* we don't correct pa_pstart or pa_plen here to avoid
4352          * possible race when the group is being loaded concurrently
4353          * instead we correct pa later, after blocks are marked
4354          * in on-disk bitmap -- see ext4_mb_release_context()
4355          * Other CPUs are prevented from allocating from this pa by lg_mutex
4356          */
4357         mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
4358                  pa->pa_lstart-len, len, pa);
4359 }
4360
4361 /*
4362  * Return the prealloc space that have minimal distance
4363  * from the goal block. @cpa is the prealloc
4364  * space that is having currently known minimal distance
4365  * from the goal block.
4366  */
4367 static struct ext4_prealloc_space *
4368 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
4369                         struct ext4_prealloc_space *pa,
4370                         struct ext4_prealloc_space *cpa)
4371 {
4372         ext4_fsblk_t cur_distance, new_distance;
4373
4374         if (cpa == NULL) {
4375                 atomic_inc(&pa->pa_count);
4376                 return pa;
4377         }
4378         cur_distance = abs(goal_block - cpa->pa_pstart);
4379         new_distance = abs(goal_block - pa->pa_pstart);
4380
4381         if (cur_distance <= new_distance)
4382                 return cpa;
4383
4384         /* drop the previous reference */
4385         atomic_dec(&cpa->pa_count);
4386         atomic_inc(&pa->pa_count);
4387         return pa;
4388 }
4389
4390 /*
4391  * search goal blocks in preallocated space
4392  */
4393 static noinline_for_stack bool
4394 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
4395 {
4396         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4397         int order, i;
4398         struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4399         struct ext4_locality_group *lg;
4400         struct ext4_prealloc_space *pa, *cpa = NULL;
4401         ext4_fsblk_t goal_block;
4402
4403         /* only data can be preallocated */
4404         if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4405                 return false;
4406
4407         /* first, try per-file preallocation */
4408         rcu_read_lock();
4409         list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
4410
4411                 /* all fields in this condition don't change,
4412                  * so we can skip locking for them */
4413                 if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
4414                     ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
4415                                                EXT4_C2B(sbi, pa->pa_len)))
4416                         continue;
4417
4418                 /* non-extent files can't have physical blocks past 2^32 */
4419                 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
4420                     (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
4421                      EXT4_MAX_BLOCK_FILE_PHYS))
4422                         continue;
4423
4424                 /* found preallocated blocks, use them */
4425                 spin_lock(&pa->pa_lock);
4426                 if (pa->pa_deleted == 0 && pa->pa_free) {
4427                         atomic_inc(&pa->pa_count);
4428                         ext4_mb_use_inode_pa(ac, pa);
4429                         spin_unlock(&pa->pa_lock);
4430                         ac->ac_criteria = 10;
4431                         rcu_read_unlock();
4432                         return true;
4433                 }
4434                 spin_unlock(&pa->pa_lock);
4435         }
4436         rcu_read_unlock();
4437
4438         /* can we use group allocation? */
4439         if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
4440                 return false;
4441
4442         /* inode may have no locality group for some reason */
4443         lg = ac->ac_lg;
4444         if (lg == NULL)
4445                 return false;
4446         order  = fls(ac->ac_o_ex.fe_len) - 1;
4447         if (order > PREALLOC_TB_SIZE - 1)
4448                 /* The max size of hash table is PREALLOC_TB_SIZE */
4449                 order = PREALLOC_TB_SIZE - 1;
4450
4451         goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
4452         /*
4453          * search for the prealloc space that is having
4454          * minimal distance from the goal block.
4455          */
4456         for (i = order; i < PREALLOC_TB_SIZE; i++) {
4457                 rcu_read_lock();
4458                 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
4459                                         pa_inode_list) {
4460                         spin_lock(&pa->pa_lock);
4461                         if (pa->pa_deleted == 0 &&
4462                                         pa->pa_free >= ac->ac_o_ex.fe_len) {
4463
4464                                 cpa = ext4_mb_check_group_pa(goal_block,
4465                                                                 pa, cpa);
4466                         }
4467                         spin_unlock(&pa->pa_lock);
4468                 }
4469                 rcu_read_unlock();
4470         }
4471         if (cpa) {
4472                 ext4_mb_use_group_pa(ac, cpa);
4473                 ac->ac_criteria = 20;
4474                 return true;
4475         }
4476         return false;
4477 }
4478
4479 /*
4480  * the function goes through all block freed in the group
4481  * but not yet committed and marks them used in in-core bitmap.
4482  * buddy must be generated from this bitmap
4483  * Need to be called with the ext4 group lock held
4484  */
4485 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
4486                                                 ext4_group_t group)
4487 {
4488         struct rb_node *n;
4489         struct ext4_group_info *grp;
4490         struct ext4_free_data *entry;
4491
4492         grp = ext4_get_group_info(sb, group);
4493         n = rb_first(&(grp->bb_free_root));
4494
4495         while (n) {
4496                 entry = rb_entry(n, struct ext4_free_data, efd_node);
4497                 mb_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
4498                 n = rb_next(n);
4499         }
4500         return;
4501 }
4502
4503 /*
4504  * the function goes through all preallocation in this group and marks them
4505  * used in in-core bitmap. buddy must be generated from this bitmap
4506  * Need to be called with ext4 group lock held
4507  */
4508 static noinline_for_stack
4509 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
4510                                         ext4_group_t group)
4511 {
4512         struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4513         struct ext4_prealloc_space *pa;
4514         struct list_head *cur;
4515         ext4_group_t groupnr;
4516         ext4_grpblk_t start;
4517         int preallocated = 0;
4518         int len;
4519
4520         /* all form of preallocation discards first load group,
4521          * so the only competing code is preallocation use.
4522          * we don't need any locking here
4523          * notice we do NOT ignore preallocations with pa_deleted
4524          * otherwise we could leave used blocks available for
4525          * allocation in buddy when concurrent ext4_mb_put_pa()
4526          * is dropping preallocation
4527          */
4528         list_for_each(cur, &grp->bb_prealloc_list) {
4529                 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
4530                 spin_lock(&pa->pa_lock);
4531                 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4532                                              &groupnr, &start);
4533                 len = pa->pa_len;
4534                 spin_unlock(&pa->pa_lock);
4535                 if (unlikely(len == 0))
4536                         continue;
4537                 BUG_ON(groupnr != group);
4538                 mb_set_bits(bitmap, start, len);
4539                 preallocated += len;
4540         }
4541         mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
4542 }
4543
4544 static void ext4_mb_mark_pa_deleted(struct super_block *sb,
4545                                     struct ext4_prealloc_space *pa)
4546 {
4547         struct ext4_inode_info *ei;
4548
4549         if (pa->pa_deleted) {
4550                 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
4551                              pa->pa_type, pa->pa_pstart, pa->pa_lstart,
4552                              pa->pa_len);
4553                 return;
4554         }
4555
4556         pa->pa_deleted = 1;
4557
4558         if (pa->pa_type == MB_INODE_PA) {
4559                 ei = EXT4_I(pa->pa_inode);
4560                 atomic_dec(&ei->i_prealloc_active);
4561         }
4562 }
4563
4564 static void ext4_mb_pa_callback(struct rcu_head *head)
4565 {
4566         struct ext4_prealloc_space *pa;
4567         pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
4568
4569         BUG_ON(atomic_read(&pa->pa_count));
4570         BUG_ON(pa->pa_deleted == 0);
4571         kmem_cache_free(ext4_pspace_cachep, pa);
4572 }
4573
4574 /*
4575  * drops a reference to preallocated space descriptor
4576  * if this was the last reference and the space is consumed
4577  */
4578 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
4579                         struct super_block *sb, struct ext4_prealloc_space *pa)
4580 {
4581         ext4_group_t grp;
4582         ext4_fsblk_t grp_blk;
4583
4584         /* in this short window concurrent discard can set pa_deleted */
4585         spin_lock(&pa->pa_lock);
4586         if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
4587                 spin_unlock(&pa->pa_lock);
4588                 return;
4589         }
4590
4591         if (pa->pa_deleted == 1) {
4592                 spin_unlock(&pa->pa_lock);
4593                 return;
4594         }
4595
4596         ext4_mb_mark_pa_deleted(sb, pa);
4597         spin_unlock(&pa->pa_lock);
4598
4599         grp_blk = pa->pa_pstart;
4600         /*
4601          * If doing group-based preallocation, pa_pstart may be in the
4602          * next group when pa is used up
4603          */
4604         if (pa->pa_type == MB_GROUP_PA)
4605                 grp_blk--;
4606
4607         grp = ext4_get_group_number(sb, grp_blk);
4608
4609         /*
4610          * possible race:
4611          *
4612          *  P1 (buddy init)                     P2 (regular allocation)
4613          *                                      find block B in PA
4614          *  copy on-disk bitmap to buddy
4615          *                                      mark B in on-disk bitmap
4616          *                                      drop PA from group
4617          *  mark all PAs in buddy
4618          *
4619          * thus, P1 initializes buddy with B available. to prevent this
4620          * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
4621          * against that pair
4622          */
4623         ext4_lock_group(sb, grp);
4624         list_del(&pa->pa_group_list);
4625         ext4_unlock_group(sb, grp);
4626
4627         spin_lock(pa->pa_obj_lock);
4628         list_del_rcu(&pa->pa_inode_list);
4629         spin_unlock(pa->pa_obj_lock);
4630
4631         call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4632 }
4633
4634 /*
4635  * creates new preallocated space for given inode
4636  */
4637 static noinline_for_stack void
4638 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
4639 {
4640         struct super_block *sb = ac->ac_sb;
4641         struct ext4_sb_info *sbi = EXT4_SB(sb);
4642         struct ext4_prealloc_space *pa;
4643         struct ext4_group_info *grp;
4644         struct ext4_inode_info *ei;
4645
4646         /* preallocate only when found space is larger then requested */
4647         BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
4648         BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4649         BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
4650         BUG_ON(ac->ac_pa == NULL);
4651
4652         pa = ac->ac_pa;
4653
4654         if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
4655                 int winl;
4656                 int wins;
4657                 int win;
4658                 int offs;
4659
4660                 /* we can't allocate as much as normalizer wants.
4661                  * so, found space must get proper lstart
4662                  * to cover original request */
4663                 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
4664                 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
4665
4666                 /* we're limited by original request in that
4667                  * logical block must be covered any way
4668                  * winl is window we can move our chunk within */
4669                 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
4670
4671                 /* also, we should cover whole original request */
4672                 wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
4673
4674                 /* the smallest one defines real window */
4675                 win = min(winl, wins);
4676
4677                 offs = ac->ac_o_ex.fe_logical %
4678                         EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4679                 if (offs && offs < win)
4680                         win = offs;
4681
4682                 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
4683                         EXT4_NUM_B2C(sbi, win);
4684                 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
4685                 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
4686         }
4687
4688         /* preallocation can change ac_b_ex, thus we store actually
4689          * allocated blocks for history */
4690         ac->ac_f_ex = ac->ac_b_ex;
4691
4692         pa->pa_lstart = ac->ac_b_ex.fe_logical;
4693         pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4694         pa->pa_len = ac->ac_b_ex.fe_len;
4695         pa->pa_free = pa->pa_len;
4696         spin_lock_init(&pa->pa_lock);
4697         INIT_LIST_HEAD(&pa->pa_inode_list);
4698         INIT_LIST_HEAD(&pa->pa_group_list);
4699         pa->pa_deleted = 0;
4700         pa->pa_type = MB_INODE_PA;
4701
4702         mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4703                  pa->pa_len, pa->pa_lstart);
4704         trace_ext4_mb_new_inode_pa(ac, pa);
4705
4706         ext4_mb_use_inode_pa(ac, pa);
4707         atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
4708
4709         ei = EXT4_I(ac->ac_inode);
4710         grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4711
4712         pa->pa_obj_lock = &ei->i_prealloc_lock;
4713         pa->pa_inode = ac->ac_inode;
4714
4715         list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
4716
4717         spin_lock(pa->pa_obj_lock);
4718         list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
4719         spin_unlock(pa->pa_obj_lock);
4720         atomic_inc(&ei->i_prealloc_active);
4721 }
4722
4723 /*
4724  * creates new preallocated space for locality group inodes belongs to
4725  */
4726 static noinline_for_stack void
4727 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
4728 {
4729         struct super_block *sb = ac->ac_sb;
4730         struct ext4_locality_group *lg;
4731         struct ext4_prealloc_space *pa;
4732         struct ext4_group_info *grp;
4733
4734         /* preallocate only when found space is larger then requested */
4735         BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
4736         BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4737         BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
4738         BUG_ON(ac->ac_pa == NULL);
4739
4740         pa = ac->ac_pa;
4741
4742         /* preallocation can change ac_b_ex, thus we store actually
4743          * allocated blocks for history */
4744         ac->ac_f_ex = ac->ac_b_ex;
4745
4746         pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4747         pa->pa_lstart = pa->pa_pstart;
4748         pa->pa_len = ac->ac_b_ex.fe_len;
4749         pa->pa_free = pa->pa_len;
4750         spin_lock_init(&pa->pa_lock);
4751         INIT_LIST_HEAD(&pa->pa_inode_list);
4752         INIT_LIST_HEAD(&pa->pa_group_list);
4753         pa->pa_deleted = 0;
4754         pa->pa_type = MB_GROUP_PA;
4755
4756         mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4757                  pa->pa_len, pa->pa_lstart);
4758         trace_ext4_mb_new_group_pa(ac, pa);
4759
4760         ext4_mb_use_group_pa(ac, pa);
4761         atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
4762
4763         grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4764         lg = ac->ac_lg;
4765         BUG_ON(lg == NULL);
4766
4767         pa->pa_obj_lock = &lg->lg_prealloc_lock;
4768         pa->pa_inode = NULL;
4769
4770         list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
4771
4772         /*
4773          * We will later add the new pa to the right bucket
4774          * after updating the pa_free in ext4_mb_release_context
4775          */
4776 }
4777
4778 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
4779 {
4780         if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4781                 ext4_mb_new_group_pa(ac);
4782         else
4783                 ext4_mb_new_inode_pa(ac);
4784 }
4785
4786 /*
4787  * finds all unused blocks in on-disk bitmap, frees them in
4788  * in-core bitmap and buddy.
4789  * @pa must be unlinked from inode and group lists, so that
4790  * nobody else can find/use it.
4791  * the caller MUST hold group/inode locks.
4792  * TODO: optimize the case when there are no in-core structures yet
4793  */
4794 static noinline_for_stack int
4795 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
4796                         struct ext4_prealloc_space *pa)
4797 {
4798         struct super_block *sb = e4b->bd_sb;
4799         struct ext4_sb_info *sbi = EXT4_SB(sb);
4800         unsigned int end;
4801         unsigned int next;
4802         ext4_group_t group;
4803         ext4_grpblk_t bit;
4804         unsigned long long grp_blk_start;
4805         int free = 0;
4806
4807         BUG_ON(pa->pa_deleted == 0);
4808         ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4809         grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
4810         BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
4811         end = bit + pa->pa_len;
4812
4813         while (bit < end) {
4814                 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
4815                 if (bit >= end)
4816                         break;
4817                 next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
4818                 mb_debug(sb, "free preallocated %u/%u in group %u\n",
4819                          (unsigned) ext4_group_first_block_no(sb, group) + bit,
4820                          (unsigned) next - bit, (unsigned) group);
4821                 free += next - bit;
4822
4823                 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
4824                 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
4825                                                     EXT4_C2B(sbi, bit)),
4826                                                next - bit);
4827                 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
4828                 bit = next + 1;
4829         }
4830         if (free != pa->pa_free) {
4831                 ext4_msg(e4b->bd_sb, KERN_CRIT,
4832                          "pa %p: logic %lu, phys. %lu, len %d",
4833                          pa, (unsigned long) pa->pa_lstart,
4834                          (unsigned long) pa->pa_pstart,
4835                          pa->pa_len);
4836                 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
4837                                         free, pa->pa_free);
4838                 /*
4839                  * pa is already deleted so we use the value obtained
4840                  * from the bitmap and continue.
4841                  */
4842         }
4843         atomic_add(free, &sbi->s_mb_discarded);
4844
4845         return 0;
4846 }
4847
4848 static noinline_for_stack int
4849 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
4850                                 struct ext4_prealloc_space *pa)
4851 {
4852         struct super_block *sb = e4b->bd_sb;
4853         ext4_group_t group;
4854         ext4_grpblk_t bit;
4855
4856         trace_ext4_mb_release_group_pa(sb, pa);
4857         BUG_ON(pa->pa_deleted == 0);
4858         ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4859         BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
4860         mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
4861         atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
4862         trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
4863
4864         return 0;
4865 }
4866
4867 /*
4868  * releases all preallocations in given group
4869  *
4870  * first, we need to decide discard policy:
4871  * - when do we discard
4872  *   1) ENOSPC
4873  * - how many do we discard
4874  *   1) how many requested
4875  */
4876 static noinline_for_stack int
4877 ext4_mb_discard_group_preallocations(struct super_block *sb,
4878                                      ext4_group_t group, int *busy)
4879 {
4880         struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4881         struct buffer_head *bitmap_bh = NULL;
4882         struct ext4_prealloc_space *pa, *tmp;
4883         struct list_head list;
4884         struct ext4_buddy e4b;
4885         int err;
4886         int free = 0;
4887
4888         mb_debug(sb, "discard preallocation for group %u\n", group);
4889         if (list_empty(&grp->bb_prealloc_list))
4890                 goto out_dbg;
4891
4892         bitmap_bh = ext4_read_block_bitmap(sb, group);
4893         if (IS_ERR(bitmap_bh)) {
4894                 err = PTR_ERR(bitmap_bh);
4895                 ext4_error_err(sb, -err,
4896                                "Error %d reading block bitmap for %u",
4897                                err, group);
4898                 goto out_dbg;
4899         }
4900
4901         err = ext4_mb_load_buddy(sb, group, &e4b);
4902         if (err) {
4903                 ext4_warning(sb, "Error %d loading buddy information for %u",
4904                              err, group);
4905                 put_bh(bitmap_bh);
4906                 goto out_dbg;
4907         }
4908
4909         INIT_LIST_HEAD(&list);
4910         ext4_lock_group(sb, group);
4911         list_for_each_entry_safe(pa, tmp,
4912                                 &grp->bb_prealloc_list, pa_group_list) {
4913                 spin_lock(&pa->pa_lock);
4914                 if (atomic_read(&pa->pa_count)) {
4915                         spin_unlock(&pa->pa_lock);
4916                         *busy = 1;
4917                         continue;
4918                 }
4919                 if (pa->pa_deleted) {
4920                         spin_unlock(&pa->pa_lock);
4921                         continue;
4922                 }
4923
4924                 /* seems this one can be freed ... */
4925                 ext4_mb_mark_pa_deleted(sb, pa);
4926
4927                 if (!free)
4928                         this_cpu_inc(discard_pa_seq);
4929
4930                 /* we can trust pa_free ... */
4931                 free += pa->pa_free;
4932
4933                 spin_unlock(&pa->pa_lock);
4934
4935                 list_del(&pa->pa_group_list);
4936                 list_add(&pa->u.pa_tmp_list, &list);
4937         }
4938
4939         /* now free all selected PAs */
4940         list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4941
4942                 /* remove from object (inode or locality group) */
4943                 spin_lock(pa->pa_obj_lock);
4944                 list_del_rcu(&pa->pa_inode_list);
4945                 spin_unlock(pa->pa_obj_lock);
4946
4947                 if (pa->pa_type == MB_GROUP_PA)
4948                         ext4_mb_release_group_pa(&e4b, pa);
4949                 else
4950                         ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
4951
4952                 list_del(&pa->u.pa_tmp_list);
4953                 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4954         }
4955
4956         ext4_unlock_group(sb, group);
4957         ext4_mb_unload_buddy(&e4b);
4958         put_bh(bitmap_bh);
4959 out_dbg:
4960         mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
4961                  free, group, grp->bb_free);
4962         return free;
4963 }
4964
4965 /*
4966  * releases all non-used preallocated blocks for given inode
4967  *
4968  * It's important to discard preallocations under i_data_sem
4969  * We don't want another block to be served from the prealloc
4970  * space when we are discarding the inode prealloc space.
4971  *
4972  * FIXME!! Make sure it is valid at all the call sites
4973  */
4974 void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
4975 {
4976         struct ext4_inode_info *ei = EXT4_I(inode);
4977         struct super_block *sb = inode->i_sb;
4978         struct buffer_head *bitmap_bh = NULL;
4979         struct ext4_prealloc_space *pa, *tmp;
4980         ext4_group_t group = 0;
4981         struct list_head list;
4982         struct ext4_buddy e4b;
4983         int err;
4984
4985         if (!S_ISREG(inode->i_mode)) {
4986                 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
4987                 return;
4988         }
4989
4990         if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
4991                 return;
4992
4993         mb_debug(sb, "discard preallocation for inode %lu\n",
4994                  inode->i_ino);
4995         trace_ext4_discard_preallocations(inode,
4996                         atomic_read(&ei->i_prealloc_active), needed);
4997
4998         INIT_LIST_HEAD(&list);
4999
5000         if (needed == 0)
5001                 needed = UINT_MAX;
5002
5003 repeat:
5004         /* first, collect all pa's in the inode */
5005         spin_lock(&ei->i_prealloc_lock);
5006         while (!list_empty(&ei->i_prealloc_list) && needed) {
5007                 pa = list_entry(ei->i_prealloc_list.prev,
5008                                 struct ext4_prealloc_space, pa_inode_list);
5009                 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
5010                 spin_lock(&pa->pa_lock);
5011                 if (atomic_read(&pa->pa_count)) {
5012                         /* this shouldn't happen often - nobody should
5013                          * use preallocation while we're discarding it */
5014                         spin_unlock(&pa->pa_lock);
5015                         spin_unlock(&ei->i_prealloc_lock);
5016                         ext4_msg(sb, KERN_ERR,
5017                                  "uh-oh! used pa while discarding");
5018                         WARN_ON(1);
5019                         schedule_timeout_uninterruptible(HZ);
5020                         goto repeat;
5021
5022                 }
5023                 if (pa->pa_deleted == 0) {
5024                         ext4_mb_mark_pa_deleted(sb, pa);
5025                         spin_unlock(&pa->pa_lock);
5026                         list_del_rcu(&pa->pa_inode_list);
5027                         list_add(&pa->u.pa_tmp_list, &list);
5028                         needed--;
5029                         continue;
5030                 }
5031
5032                 /* someone is deleting pa right now */
5033                 spin_unlock(&pa->pa_lock);
5034                 spin_unlock(&ei->i_prealloc_lock);
5035
5036                 /* we have to wait here because pa_deleted
5037                  * doesn't mean pa is already unlinked from
5038                  * the list. as we might be called from
5039                  * ->clear_inode() the inode will get freed
5040                  * and concurrent thread which is unlinking
5041                  * pa from inode's list may access already
5042                  * freed memory, bad-bad-bad */
5043
5044                 /* XXX: if this happens too often, we can
5045                  * add a flag to force wait only in case
5046                  * of ->clear_inode(), but not in case of
5047                  * regular truncate */
5048                 schedule_timeout_uninterruptible(HZ);
5049                 goto repeat;
5050         }
5051         spin_unlock(&ei->i_prealloc_lock);
5052
5053         list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
5054                 BUG_ON(pa->pa_type != MB_INODE_PA);
5055                 group = ext4_get_group_number(sb, pa->pa_pstart);
5056
5057                 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5058                                              GFP_NOFS|__GFP_NOFAIL);
5059                 if (err) {
5060                         ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5061                                        err, group);
5062                         continue;
5063                 }
5064
5065                 bitmap_bh = ext4_read_block_bitmap(sb, group);
5066                 if (IS_ERR(bitmap_bh)) {
5067                         err = PTR_ERR(bitmap_bh);
5068                         ext4_error_err(sb, -err, "Error %d reading block bitmap for %u",
5069                                        err, group);
5070                         ext4_mb_unload_buddy(&e4b);
5071                         continue;
5072                 }
5073
5074                 ext4_lock_group(sb, group);
5075                 list_del(&pa->pa_group_list);
5076                 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
5077                 ext4_unlock_group(sb, group);
5078
5079                 ext4_mb_unload_buddy(&e4b);
5080                 put_bh(bitmap_bh);
5081
5082                 list_del(&pa->u.pa_tmp_list);
5083                 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5084         }
5085 }
5086
5087 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac)
5088 {
5089         struct ext4_prealloc_space *pa;
5090
5091         BUG_ON(ext4_pspace_cachep == NULL);
5092         pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS);
5093         if (!pa)
5094                 return -ENOMEM;
5095         atomic_set(&pa->pa_count, 1);
5096         ac->ac_pa = pa;
5097         return 0;
5098 }
5099
5100 static void ext4_mb_pa_free(struct ext4_allocation_context *ac)
5101 {
5102         struct ext4_prealloc_space *pa = ac->ac_pa;
5103
5104         BUG_ON(!pa);
5105         ac->ac_pa = NULL;
5106         WARN_ON(!atomic_dec_and_test(&pa->pa_count));
5107         kmem_cache_free(ext4_pspace_cachep, pa);
5108 }
5109
5110 #ifdef CONFIG_EXT4_DEBUG
5111 static inline void ext4_mb_show_pa(struct super_block *sb)
5112 {
5113         ext4_group_t i, ngroups;
5114
5115         if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
5116                 return;
5117
5118         ngroups = ext4_get_groups_count(sb);
5119         mb_debug(sb, "groups: ");
5120         for (i = 0; i < ngroups; i++) {
5121                 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
5122                 struct ext4_prealloc_space *pa;
5123                 ext4_grpblk_t start;
5124                 struct list_head *cur;
5125                 ext4_lock_group(sb, i);
5126                 list_for_each(cur, &grp->bb_prealloc_list) {
5127                         pa = list_entry(cur, struct ext4_prealloc_space,
5128                                         pa_group_list);
5129                         spin_lock(&pa->pa_lock);
5130                         ext4_get_group_no_and_offset(sb, pa->pa_pstart,
5131                                                      NULL, &start);
5132                         spin_unlock(&pa->pa_lock);
5133                         mb_debug(sb, "PA:%u:%d:%d\n", i, start,
5134                                  pa->pa_len);
5135                 }
5136                 ext4_unlock_group(sb, i);
5137                 mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free,
5138                          grp->bb_fragments);
5139         }
5140 }
5141
5142 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5143 {
5144         struct super_block *sb = ac->ac_sb;
5145
5146         if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
5147                 return;
5148
5149         mb_debug(sb, "Can't allocate:"
5150                         " Allocation context details:");
5151         mb_debug(sb, "status %u flags 0x%x",
5152                         ac->ac_status, ac->ac_flags);
5153         mb_debug(sb, "orig %lu/%lu/%lu@%lu, "
5154                         "goal %lu/%lu/%lu@%lu, "
5155                         "best %lu/%lu/%lu@%lu cr %d",
5156                         (unsigned long)ac->ac_o_ex.fe_group,
5157                         (unsigned long)ac->ac_o_ex.fe_start,
5158                         (unsigned long)ac->ac_o_ex.fe_len,
5159                         (unsigned long)ac->ac_o_ex.fe_logical,
5160                         (unsigned long)ac->ac_g_ex.fe_group,
5161                         (unsigned long)ac->ac_g_ex.fe_start,
5162                         (unsigned long)ac->ac_g_ex.fe_len,
5163                         (unsigned long)ac->ac_g_ex.fe_logical,
5164                         (unsigned long)ac->ac_b_ex.fe_group,
5165                         (unsigned long)ac->ac_b_ex.fe_start,
5166                         (unsigned long)ac->ac_b_ex.fe_len,
5167                         (unsigned long)ac->ac_b_ex.fe_logical,
5168                         (int)ac->ac_criteria);
5169         mb_debug(sb, "%u found", ac->ac_found);
5170         ext4_mb_show_pa(sb);
5171 }
5172 #else
5173 static inline void ext4_mb_show_pa(struct super_block *sb)
5174 {
5175         return;
5176 }
5177 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5178 {
5179         ext4_mb_show_pa(ac->ac_sb);
5180         return;
5181 }
5182 #endif
5183
5184 /*
5185  * We use locality group preallocation for small size file. The size of the
5186  * file is determined by the current size or the resulting size after
5187  * allocation which ever is larger
5188  *
5189  * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
5190  */
5191 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
5192 {
5193         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5194         int bsbits = ac->ac_sb->s_blocksize_bits;
5195         loff_t size, isize;
5196
5197         if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
5198                 return;
5199
5200         if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
5201                 return;
5202
5203         size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
5204         isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
5205                 >> bsbits;
5206
5207         if ((size == isize) && !ext4_fs_is_busy(sbi) &&
5208             !inode_is_open_for_write(ac->ac_inode)) {
5209                 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
5210                 return;
5211         }
5212
5213         if (sbi->s_mb_group_prealloc <= 0) {
5214                 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
5215                 return;
5216         }
5217
5218         /* don't use group allocation for large files */
5219         size = max(size, isize);
5220         if (size > sbi->s_mb_stream_request) {
5221                 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
5222                 return;
5223         }
5224
5225         BUG_ON(ac->ac_lg != NULL);
5226         /*
5227          * locality group prealloc space are per cpu. The reason for having
5228          * per cpu locality group is to reduce the contention between block
5229          * request from multiple CPUs.
5230          */
5231         ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
5232
5233         /* we're going to use group allocation */
5234         ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
5235
5236         /* serialize all allocations in the group */
5237         mutex_lock(&ac->ac_lg->lg_mutex);
5238 }
5239
5240 static noinline_for_stack int
5241 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
5242                                 struct ext4_allocation_request *ar)
5243 {
5244         struct super_block *sb = ar->inode->i_sb;
5245         struct ext4_sb_info *sbi = EXT4_SB(sb);
5246         struct ext4_super_block *es = sbi->s_es;
5247         ext4_group_t group;
5248         unsigned int len;
5249         ext4_fsblk_t goal;
5250         ext4_grpblk_t block;
5251
5252         /* we can't allocate > group size */
5253         len = ar->len;
5254
5255         /* just a dirty hack to filter too big requests  */
5256         if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
5257                 len = EXT4_CLUSTERS_PER_GROUP(sb);
5258
5259         /* start searching from the goal */
5260         goal = ar->goal;
5261         if (goal < le32_to_cpu(es->s_first_data_block) ||
5262                         goal >= ext4_blocks_count(es))
5263                 goal = le32_to_cpu(es->s_first_data_block);
5264         ext4_get_group_no_and_offset(sb, goal, &group, &block);
5265
5266         /* set up allocation goals */
5267         ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
5268         ac->ac_status = AC_STATUS_CONTINUE;
5269         ac->ac_sb = sb;
5270         ac->ac_inode = ar->inode;
5271         ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
5272         ac->ac_o_ex.fe_group = group;
5273         ac->ac_o_ex.fe_start = block;
5274         ac->ac_o_ex.fe_len = len;
5275         ac->ac_g_ex = ac->ac_o_ex;
5276         ac->ac_flags = ar->flags;
5277
5278         /* we have to define context: we'll work with a file or
5279          * locality group. this is a policy, actually */
5280         ext4_mb_group_or_file(ac);
5281
5282         mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, "
5283                         "left: %u/%u, right %u/%u to %swritable\n",
5284                         (unsigned) ar->len, (unsigned) ar->logical,
5285                         (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
5286                         (unsigned) ar->lleft, (unsigned) ar->pleft,
5287                         (unsigned) ar->lright, (unsigned) ar->pright,
5288                         inode_is_open_for_write(ar->inode) ? "" : "non-");
5289         return 0;
5290
5291 }
5292
5293 static noinline_for_stack void
5294 ext4_mb_discard_lg_preallocations(struct super_block *sb,
5295                                         struct ext4_locality_group *lg,
5296                                         int order, int total_entries)
5297 {
5298         ext4_group_t group = 0;
5299         struct ext4_buddy e4b;
5300         struct list_head discard_list;
5301         struct ext4_prealloc_space *pa, *tmp;
5302
5303         mb_debug(sb, "discard locality group preallocation\n");
5304
5305         INIT_LIST_HEAD(&discard_list);
5306
5307         spin_lock(&lg->lg_prealloc_lock);
5308         list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
5309                                 pa_inode_list,
5310                                 lockdep_is_held(&lg->lg_prealloc_lock)) {
5311                 spin_lock(&pa->pa_lock);
5312                 if (atomic_read(&pa->pa_count)) {
5313                         /*
5314                          * This is the pa that we just used
5315                          * for block allocation. So don't
5316                          * free that
5317                          */
5318                         spin_unlock(&pa->pa_lock);
5319                         continue;
5320                 }
5321                 if (pa->pa_deleted) {
5322                         spin_unlock(&pa->pa_lock);
5323                         continue;
5324                 }
5325                 /* only lg prealloc space */
5326                 BUG_ON(pa->pa_type != MB_GROUP_PA);
5327
5328                 /* seems this one can be freed ... */
5329                 ext4_mb_mark_pa_deleted(sb, pa);
5330                 spin_unlock(&pa->pa_lock);
5331
5332                 list_del_rcu(&pa->pa_inode_list);
5333                 list_add(&pa->u.pa_tmp_list, &discard_list);
5334
5335                 total_entries--;
5336                 if (total_entries <= 5) {
5337                         /*
5338                          * we want to keep only 5 entries
5339                          * allowing it to grow to 8. This
5340                          * mak sure we don't call discard
5341                          * soon for this list.
5342                          */
5343                         break;
5344                 }
5345         }
5346         spin_unlock(&lg->lg_prealloc_lock);
5347
5348         list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
5349                 int err;
5350
5351                 group = ext4_get_group_number(sb, pa->pa_pstart);
5352                 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5353                                              GFP_NOFS|__GFP_NOFAIL);
5354                 if (err) {
5355                         ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5356                                        err, group);
5357                         continue;
5358                 }
5359                 ext4_lock_group(sb, group);
5360                 list_del(&pa->pa_group_list);
5361                 ext4_mb_release_group_pa(&e4b, pa);
5362                 ext4_unlock_group(sb, group);
5363
5364                 ext4_mb_unload_buddy(&e4b);
5365                 list_del(&pa->u.pa_tmp_list);
5366                 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5367         }
5368 }
5369
5370 /*
5371  * We have incremented pa_count. So it cannot be freed at this
5372  * point. Also we hold lg_mutex. So no parallel allocation is
5373  * possible from this lg. That means pa_free cannot be updated.
5374  *
5375  * A parallel ext4_mb_discard_group_preallocations is possible.
5376  * which can cause the lg_prealloc_list to be updated.
5377  */
5378
5379 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
5380 {
5381         int order, added = 0, lg_prealloc_count = 1;
5382         struct super_block *sb = ac->ac_sb;
5383         struct ext4_locality_group *lg = ac->ac_lg;
5384         struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
5385
5386         order = fls(pa->pa_free) - 1;
5387         if (order > PREALLOC_TB_SIZE - 1)
5388                 /* The max size of hash table is PREALLOC_TB_SIZE */
5389                 order = PREALLOC_TB_SIZE - 1;
5390         /* Add the prealloc space to lg */
5391         spin_lock(&lg->lg_prealloc_lock);
5392         list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
5393                                 pa_inode_list,
5394                                 lockdep_is_held(&lg->lg_prealloc_lock)) {
5395                 spin_lock(&tmp_pa->pa_lock);
5396                 if (tmp_pa->pa_deleted) {
5397                         spin_unlock(&tmp_pa->pa_lock);
5398                         continue;
5399                 }
5400                 if (!added && pa->pa_free < tmp_pa->pa_free) {
5401                         /* Add to the tail of the previous entry */
5402                         list_add_tail_rcu(&pa->pa_inode_list,
5403                                                 &tmp_pa->pa_inode_list);
5404                         added = 1;
5405                         /*
5406                          * we want to count the total
5407                          * number of entries in the list
5408                          */
5409                 }
5410                 spin_unlock(&tmp_pa->pa_lock);
5411                 lg_prealloc_count++;
5412         }
5413         if (!added)
5414                 list_add_tail_rcu(&pa->pa_inode_list,
5415                                         &lg->lg_prealloc_list[order]);
5416         spin_unlock(&lg->lg_prealloc_lock);
5417
5418         /* Now trim the list to be not more than 8 elements */
5419         if (lg_prealloc_count > 8) {
5420                 ext4_mb_discard_lg_preallocations(sb, lg,
5421                                                   order, lg_prealloc_count);
5422                 return;
5423         }
5424         return ;
5425 }
5426
5427 /*
5428  * if per-inode prealloc list is too long, trim some PA
5429  */
5430 static void ext4_mb_trim_inode_pa(struct inode *inode)
5431 {
5432         struct ext4_inode_info *ei = EXT4_I(inode);
5433         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5434         int count, delta;
5435
5436         count = atomic_read(&ei->i_prealloc_active);
5437         delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1;
5438         if (count > sbi->s_mb_max_inode_prealloc + delta) {
5439                 count -= sbi->s_mb_max_inode_prealloc;
5440                 ext4_discard_preallocations(inode, count);
5441         }
5442 }
5443
5444 /*
5445  * release all resource we used in allocation
5446  */
5447 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
5448 {
5449         struct inode *inode = ac->ac_inode;
5450         struct ext4_inode_info *ei = EXT4_I(inode);
5451         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5452         struct ext4_prealloc_space *pa = ac->ac_pa;
5453         if (pa) {
5454                 if (pa->pa_type == MB_GROUP_PA) {
5455                         /* see comment in ext4_mb_use_group_pa() */
5456                         spin_lock(&pa->pa_lock);
5457                         pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5458                         pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5459                         pa->pa_free -= ac->ac_b_ex.fe_len;
5460                         pa->pa_len -= ac->ac_b_ex.fe_len;
5461                         spin_unlock(&pa->pa_lock);
5462
5463                         /*
5464                          * We want to add the pa to the right bucket.
5465                          * Remove it from the list and while adding
5466                          * make sure the list to which we are adding
5467                          * doesn't grow big.
5468                          */
5469                         if (likely(pa->pa_free)) {
5470                                 spin_lock(pa->pa_obj_lock);
5471                                 list_del_rcu(&pa->pa_inode_list);
5472                                 spin_unlock(pa->pa_obj_lock);
5473                                 ext4_mb_add_n_trim(ac);
5474                         }
5475                 }
5476
5477                 if (pa->pa_type == MB_INODE_PA) {
5478                         /*
5479                          * treat per-inode prealloc list as a lru list, then try
5480                          * to trim the least recently used PA.
5481                          */
5482                         spin_lock(pa->pa_obj_lock);
5483                         list_move(&pa->pa_inode_list, &ei->i_prealloc_list);
5484                         spin_unlock(pa->pa_obj_lock);
5485                 }
5486
5487                 ext4_mb_put_pa(ac, ac->ac_sb, pa);
5488         }
5489         if (ac->ac_bitmap_page)
5490                 put_page(ac->ac_bitmap_page);
5491         if (ac->ac_buddy_page)
5492                 put_page(ac->ac_buddy_page);
5493         if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
5494                 mutex_unlock(&ac->ac_lg->lg_mutex);
5495         ext4_mb_collect_stats(ac);
5496         ext4_mb_trim_inode_pa(inode);
5497         return 0;
5498 }
5499
5500 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
5501 {
5502         ext4_group_t i, ngroups = ext4_get_groups_count(sb);
5503         int ret;
5504         int freed = 0, busy = 0;
5505         int retry = 0;
5506
5507         trace_ext4_mb_discard_preallocations(sb, needed);
5508
5509         if (needed == 0)
5510                 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
5511  repeat:
5512         for (i = 0; i < ngroups && needed > 0; i++) {
5513                 ret = ext4_mb_discard_group_preallocations(sb, i, &busy);
5514                 freed += ret;
5515                 needed -= ret;
5516                 cond_resched();
5517         }
5518
5519         if (needed > 0 && busy && ++retry < 3) {
5520                 busy = 0;
5521                 goto repeat;
5522         }
5523
5524         return freed;
5525 }
5526
5527 static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
5528                         struct ext4_allocation_context *ac, u64 *seq)
5529 {
5530         int freed;
5531         u64 seq_retry = 0;
5532         bool ret = false;
5533
5534         freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
5535         if (freed) {
5536                 ret = true;
5537                 goto out_dbg;
5538         }
5539         seq_retry = ext4_get_discard_pa_seq_sum();
5540         if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) {
5541                 ac->ac_flags |= EXT4_MB_STRICT_CHECK;
5542                 *seq = seq_retry;
5543                 ret = true;
5544         }
5545
5546 out_dbg:
5547         mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no");
5548         return ret;
5549 }
5550
5551 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
5552                                 struct ext4_allocation_request *ar, int *errp);
5553
5554 /*
5555  * Main entry point into mballoc to allocate blocks
5556  * it tries to use preallocation first, then falls back
5557  * to usual allocation
5558  */
5559 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
5560                                 struct ext4_allocation_request *ar, int *errp)
5561 {
5562         struct ext4_allocation_context *ac = NULL;
5563         struct ext4_sb_info *sbi;
5564         struct super_block *sb;
5565         ext4_fsblk_t block = 0;
5566         unsigned int inquota = 0;
5567         unsigned int reserv_clstrs = 0;
5568         u64 seq;
5569
5570         might_sleep();
5571         sb = ar->inode->i_sb;
5572         sbi = EXT4_SB(sb);
5573
5574         trace_ext4_request_blocks(ar);
5575         if (sbi->s_mount_state & EXT4_FC_REPLAY)
5576                 return ext4_mb_new_blocks_simple(handle, ar, errp);
5577
5578         /* Allow to use superuser reservation for quota file */
5579         if (ext4_is_quota_file(ar->inode))
5580                 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
5581
5582         if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
5583                 /* Without delayed allocation we need to verify
5584                  * there is enough free blocks to do block allocation
5585                  * and verify allocation doesn't exceed the quota limits.
5586                  */
5587                 while (ar->len &&
5588                         ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
5589
5590                         /* let others to free the space */
5591                         cond_resched();
5592                         ar->len = ar->len >> 1;
5593                 }
5594                 if (!ar->len) {
5595                         ext4_mb_show_pa(sb);
5596                         *errp = -ENOSPC;
5597                         return 0;
5598                 }
5599                 reserv_clstrs = ar->len;
5600                 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
5601                         dquot_alloc_block_nofail(ar->inode,
5602                                                  EXT4_C2B(sbi, ar->len));
5603                 } else {
5604                         while (ar->len &&
5605                                 dquot_alloc_block(ar->inode,
5606                                                   EXT4_C2B(sbi, ar->len))) {
5607
5608                                 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
5609                                 ar->len--;
5610                         }
5611                 }
5612                 inquota = ar->len;
5613                 if (ar->len == 0) {
5614                         *errp = -EDQUOT;
5615                         goto out;
5616                 }
5617         }
5618
5619         ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
5620         if (!ac) {
5621                 ar->len = 0;
5622                 *errp = -ENOMEM;
5623                 goto out;
5624         }
5625
5626         *errp = ext4_mb_initialize_context(ac, ar);
5627         if (*errp) {
5628                 ar->len = 0;
5629                 goto out;
5630         }
5631
5632         ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
5633         seq = this_cpu_read(discard_pa_seq);
5634         if (!ext4_mb_use_preallocated(ac)) {
5635                 ac->ac_op = EXT4_MB_HISTORY_ALLOC;
5636                 ext4_mb_normalize_request(ac, ar);
5637
5638                 *errp = ext4_mb_pa_alloc(ac);
5639                 if (*errp)
5640                         goto errout;
5641 repeat:
5642                 /* allocate space in core */
5643                 *errp = ext4_mb_regular_allocator(ac);
5644                 /*
5645                  * pa allocated above is added to grp->bb_prealloc_list only
5646                  * when we were able to allocate some block i.e. when
5647                  * ac->ac_status == AC_STATUS_FOUND.
5648                  * And error from above mean ac->ac_status != AC_STATUS_FOUND
5649                  * So we have to free this pa here itself.
5650                  */
5651                 if (*errp) {
5652                         ext4_mb_pa_free(ac);
5653                         ext4_discard_allocated_blocks(ac);
5654                         goto errout;
5655                 }
5656                 if (ac->ac_status == AC_STATUS_FOUND &&
5657                         ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len)
5658                         ext4_mb_pa_free(ac);
5659         }
5660         if (likely(ac->ac_status == AC_STATUS_FOUND)) {
5661                 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
5662                 if (*errp) {
5663                         ext4_discard_allocated_blocks(ac);
5664                         goto errout;
5665                 } else {
5666                         block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5667                         ar->len = ac->ac_b_ex.fe_len;
5668                 }
5669         } else {
5670                 if (ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
5671                         goto repeat;
5672                 /*
5673                  * If block allocation fails then the pa allocated above
5674                  * needs to be freed here itself.
5675                  */
5676                 ext4_mb_pa_free(ac);
5677                 *errp = -ENOSPC;
5678         }
5679
5680 errout:
5681         if (*errp) {
5682                 ac->ac_b_ex.fe_len = 0;
5683                 ar->len = 0;
5684                 ext4_mb_show_ac(ac);
5685         }
5686         ext4_mb_release_context(ac);
5687 out:
5688         if (ac)
5689                 kmem_cache_free(ext4_ac_cachep, ac);
5690         if (inquota && ar->len < inquota)
5691                 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
5692         if (!ar->len) {
5693                 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
5694                         /* release all the reserved blocks if non delalloc */
5695                         percpu_counter_sub(&sbi->s_dirtyclusters_counter,
5696                                                 reserv_clstrs);
5697         }
5698
5699         trace_ext4_allocate_blocks(ar, (unsigned long long)block);
5700
5701         return block;
5702 }
5703
5704 /*
5705  * We can merge two free data extents only if the physical blocks
5706  * are contiguous, AND the extents were freed by the same transaction,
5707  * AND the blocks are associated with the same group.
5708  */
5709 static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi,
5710                                         struct ext4_free_data *entry,
5711                                         struct ext4_free_data *new_entry,
5712                                         struct rb_root *entry_rb_root)
5713 {
5714         if ((entry->efd_tid != new_entry->efd_tid) ||
5715             (entry->efd_group != new_entry->efd_group))
5716                 return;
5717         if (entry->efd_start_cluster + entry->efd_count ==
5718             new_entry->efd_start_cluster) {
5719                 new_entry->efd_start_cluster = entry->efd_start_cluster;
5720                 new_entry->efd_count += entry->efd_count;
5721         } else if (new_entry->efd_start_cluster + new_entry->efd_count ==
5722                    entry->efd_start_cluster) {
5723                 new_entry->efd_count += entry->efd_count;
5724         } else
5725                 return;
5726         spin_lock(&sbi->s_md_lock);
5727         list_del(&entry->efd_list);
5728         spin_unlock(&sbi->s_md_lock);
5729         rb_erase(&entry->efd_node, entry_rb_root);
5730         kmem_cache_free(ext4_free_data_cachep, entry);
5731 }
5732
5733 static noinline_for_stack int
5734 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
5735                       struct ext4_free_data *new_entry)
5736 {
5737         ext4_group_t group = e4b->bd_group;
5738         ext4_grpblk_t cluster;
5739         ext4_grpblk_t clusters = new_entry->efd_count;
5740         struct ext4_free_data *entry;
5741         struct ext4_group_info *db = e4b->bd_info;
5742         struct super_block *sb = e4b->bd_sb;
5743         struct ext4_sb_info *sbi = EXT4_SB(sb);
5744         struct rb_node **n = &db->bb_free_root.rb_node, *node;
5745         struct rb_node *parent = NULL, *new_node;
5746
5747         BUG_ON(!ext4_handle_valid(handle));
5748         BUG_ON(e4b->bd_bitmap_page == NULL);
5749         BUG_ON(e4b->bd_buddy_page == NULL);
5750
5751         new_node = &new_entry->efd_node;
5752         cluster = new_entry->efd_start_cluster;
5753
5754         if (!*n) {
5755                 /* first free block exent. We need to
5756                    protect buddy cache from being freed,
5757                  * otherwise we'll refresh it from
5758                  * on-disk bitmap and lose not-yet-available
5759                  * blocks */
5760                 get_page(e4b->bd_buddy_page);
5761                 get_page(e4b->bd_bitmap_page);
5762         }
5763         while (*n) {
5764                 parent = *n;
5765                 entry = rb_entry(parent, struct ext4_free_data, efd_node);
5766                 if (cluster < entry->efd_start_cluster)
5767                         n = &(*n)->rb_left;
5768                 else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
5769                         n = &(*n)->rb_right;
5770                 else {
5771                         ext4_grp_locked_error(sb, group, 0,
5772                                 ext4_group_first_block_no(sb, group) +
5773                                 EXT4_C2B(sbi, cluster),
5774                                 "Block already on to-be-freed list");
5775                         kmem_cache_free(ext4_free_data_cachep, new_entry);
5776                         return 0;
5777                 }
5778         }
5779
5780         rb_link_node(new_node, parent, n);
5781         rb_insert_color(new_node, &db->bb_free_root);
5782
5783         /* Now try to see the extent can be merged to left and right */
5784         node = rb_prev(new_node);
5785         if (node) {
5786                 entry = rb_entry(node, struct ext4_free_data, efd_node);
5787                 ext4_try_merge_freed_extent(sbi, entry, new_entry,
5788                                             &(db->bb_free_root));
5789         }
5790
5791         node = rb_next(new_node);
5792         if (node) {
5793                 entry = rb_entry(node, struct ext4_free_data, efd_node);
5794                 ext4_try_merge_freed_extent(sbi, entry, new_entry,
5795                                             &(db->bb_free_root));
5796         }
5797
5798         spin_lock(&sbi->s_md_lock);
5799         list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list);
5800         sbi->s_mb_free_pending += clusters;
5801         spin_unlock(&sbi->s_md_lock);
5802         return 0;
5803 }
5804
5805 /*
5806  * Simple allocator for Ext4 fast commit replay path. It searches for blocks
5807  * linearly starting at the goal block and also excludes the blocks which
5808  * are going to be in use after fast commit replay.
5809  */
5810 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
5811                                 struct ext4_allocation_request *ar, int *errp)
5812 {
5813         struct buffer_head *bitmap_bh;
5814         struct super_block *sb = ar->inode->i_sb;
5815         ext4_group_t group;
5816         ext4_grpblk_t blkoff;
5817         ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
5818         ext4_grpblk_t i = 0;
5819         ext4_fsblk_t goal, block;
5820         struct ext4_super_block *es = EXT4_SB(sb)->s_es;
5821
5822         goal = ar->goal;
5823         if (goal < le32_to_cpu(es->s_first_data_block) ||
5824                         goal >= ext4_blocks_count(es))
5825                 goal = le32_to_cpu(es->s_first_data_block);
5826
5827         ar->len = 0;
5828         ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
5829         for (; group < ext4_get_groups_count(sb); group++) {
5830                 bitmap_bh = ext4_read_block_bitmap(sb, group);
5831                 if (IS_ERR(bitmap_bh)) {
5832                         *errp = PTR_ERR(bitmap_bh);
5833                         pr_warn("Failed to read block bitmap\n");
5834                         return 0;
5835                 }
5836
5837                 ext4_get_group_no_and_offset(sb,
5838                         max(ext4_group_first_block_no(sb, group), goal),
5839                         NULL, &blkoff);
5840                 while (1) {
5841                         i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
5842                                                 blkoff);
5843                         if (i >= max)
5844                                 break;
5845                         if (ext4_fc_replay_check_excluded(sb,
5846                                 ext4_group_first_block_no(sb, group) + i)) {
5847                                 blkoff = i + 1;
5848                         } else
5849                                 break;
5850                 }
5851                 brelse(bitmap_bh);
5852                 if (i < max)
5853                         break;
5854         }
5855
5856         if (group >= ext4_get_groups_count(sb) || i >= max) {
5857                 *errp = -ENOSPC;
5858                 return 0;
5859         }
5860
5861         block = ext4_group_first_block_no(sb, group) + i;
5862         ext4_mb_mark_bb(sb, block, 1, 1);
5863         ar->len = 1;
5864
5865         return block;
5866 }
5867
5868 static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
5869                                         unsigned long count)
5870 {
5871         struct buffer_head *bitmap_bh;
5872         struct super_block *sb = inode->i_sb;
5873         struct ext4_group_desc *gdp;
5874         struct buffer_head *gdp_bh;
5875         ext4_group_t group;
5876         ext4_grpblk_t blkoff;
5877         int already_freed = 0, err, i;
5878
5879         ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
5880         bitmap_bh = ext4_read_block_bitmap(sb, group);
5881         if (IS_ERR(bitmap_bh)) {
5882                 err = PTR_ERR(bitmap_bh);
5883                 pr_warn("Failed to read block bitmap\n");
5884                 return;
5885         }
5886         gdp = ext4_get_group_desc(sb, group, &gdp_bh);
5887         if (!gdp)
5888                 return;
5889
5890         for (i = 0; i < count; i++) {
5891                 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data))
5892                         already_freed++;
5893         }
5894         mb_clear_bits(bitmap_bh->b_data, blkoff, count);
5895         err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
5896         if (err)
5897                 return;
5898         ext4_free_group_clusters_set(
5899                 sb, gdp, ext4_free_group_clusters(sb, gdp) +
5900                 count - already_freed);
5901         ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
5902         ext4_group_desc_csum_set(sb, group, gdp);
5903         ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
5904         sync_dirty_buffer(bitmap_bh);
5905         sync_dirty_buffer(gdp_bh);
5906         brelse(bitmap_bh);
5907 }
5908
5909 /**
5910  * ext4_mb_clear_bb() -- helper function for freeing blocks.
5911  *                      Used by ext4_free_blocks()
5912  * @handle:             handle for this transaction
5913  * @inode:              inode
5914  * @block:              starting physical block to be freed
5915  * @count:              number of blocks to be freed
5916  * @flags:              flags used by ext4_free_blocks
5917  */
5918 static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
5919                                ext4_fsblk_t block, unsigned long count,
5920                                int flags)
5921 {
5922         struct buffer_head *bitmap_bh = NULL;
5923         struct super_block *sb = inode->i_sb;
5924         struct ext4_group_desc *gdp;
5925         unsigned int overflow;
5926         ext4_grpblk_t bit;
5927         struct buffer_head *gd_bh;
5928         ext4_group_t block_group;
5929         struct ext4_sb_info *sbi;
5930         struct ext4_buddy e4b;
5931         unsigned int count_clusters;
5932         int err = 0;
5933         int ret;
5934
5935         sbi = EXT4_SB(sb);
5936
5937         if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
5938             !ext4_inode_block_valid(inode, block, count)) {
5939                 ext4_error(sb, "Freeing blocks in system zone - "
5940                            "Block = %llu, count = %lu", block, count);
5941                 /* err = 0. ext4_std_error should be a no op */
5942                 goto error_return;
5943         }
5944         flags |= EXT4_FREE_BLOCKS_VALIDATED;
5945
5946 do_more:
5947         overflow = 0;
5948         ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
5949
5950         if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(
5951                         ext4_get_group_info(sb, block_group))))
5952                 return;
5953
5954         /*
5955          * Check to see if we are freeing blocks across a group
5956          * boundary.
5957          */
5958         if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
5959                 overflow = EXT4_C2B(sbi, bit) + count -
5960                         EXT4_BLOCKS_PER_GROUP(sb);
5961                 count -= overflow;
5962                 /* The range changed so it's no longer validated */
5963                 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
5964         }
5965         count_clusters = EXT4_NUM_B2C(sbi, count);
5966         bitmap_bh = ext4_read_block_bitmap(sb, block_group);
5967         if (IS_ERR(bitmap_bh)) {
5968                 err = PTR_ERR(bitmap_bh);
5969                 bitmap_bh = NULL;
5970                 goto error_return;
5971         }
5972         gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
5973         if (!gdp) {
5974                 err = -EIO;
5975                 goto error_return;
5976         }
5977
5978         if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
5979             !ext4_inode_block_valid(inode, block, count)) {
5980                 ext4_error(sb, "Freeing blocks in system zone - "
5981                            "Block = %llu, count = %lu", block, count);
5982                 /* err = 0. ext4_std_error should be a no op */
5983                 goto error_return;
5984         }
5985
5986         BUFFER_TRACE(bitmap_bh, "getting write access");
5987         err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
5988                                             EXT4_JTR_NONE);
5989         if (err)
5990                 goto error_return;
5991
5992         /*
5993          * We are about to modify some metadata.  Call the journal APIs
5994          * to unshare ->b_data if a currently-committing transaction is
5995          * using it
5996          */
5997         BUFFER_TRACE(gd_bh, "get_write_access");
5998         err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE);
5999         if (err)
6000                 goto error_return;
6001 #ifdef AGGRESSIVE_CHECK
6002         {
6003                 int i;
6004                 for (i = 0; i < count_clusters; i++)
6005                         BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
6006         }
6007 #endif
6008         trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
6009
6010         /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
6011         err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
6012                                      GFP_NOFS|__GFP_NOFAIL);
6013         if (err)
6014                 goto error_return;
6015
6016         /*
6017          * We need to make sure we don't reuse the freed block until after the
6018          * transaction is committed. We make an exception if the inode is to be
6019          * written in writeback mode since writeback mode has weak data
6020          * consistency guarantees.
6021          */
6022         if (ext4_handle_valid(handle) &&
6023             ((flags & EXT4_FREE_BLOCKS_METADATA) ||
6024              !ext4_should_writeback_data(inode))) {
6025                 struct ext4_free_data *new_entry;
6026                 /*
6027                  * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
6028                  * to fail.
6029                  */
6030                 new_entry = kmem_cache_alloc(ext4_free_data_cachep,
6031                                 GFP_NOFS|__GFP_NOFAIL);
6032                 new_entry->efd_start_cluster = bit;
6033                 new_entry->efd_group = block_group;
6034                 new_entry->efd_count = count_clusters;
6035                 new_entry->efd_tid = handle->h_transaction->t_tid;
6036
6037                 ext4_lock_group(sb, block_group);
6038                 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
6039                 ext4_mb_free_metadata(handle, &e4b, new_entry);
6040         } else {
6041                 /* need to update group_info->bb_free and bitmap
6042                  * with group lock held. generate_buddy look at
6043                  * them with group lock_held
6044                  */
6045                 if (test_opt(sb, DISCARD)) {
6046                         err = ext4_issue_discard(sb, block_group, bit, count,
6047                                                  NULL);
6048                         if (err && err != -EOPNOTSUPP)
6049                                 ext4_msg(sb, KERN_WARNING, "discard request in"
6050                                          " group:%u block:%d count:%lu failed"
6051                                          " with %d", block_group, bit, count,
6052                                          err);
6053                 } else
6054                         EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
6055
6056                 ext4_lock_group(sb, block_group);
6057                 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
6058                 mb_free_blocks(inode, &e4b, bit, count_clusters);
6059         }
6060
6061         ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
6062         ext4_free_group_clusters_set(sb, gdp, ret);
6063         ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
6064         ext4_group_desc_csum_set(sb, block_group, gdp);
6065         ext4_unlock_group(sb, block_group);
6066
6067         if (sbi->s_log_groups_per_flex) {
6068                 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
6069                 atomic64_add(count_clusters,
6070                              &sbi_array_rcu_deref(sbi, s_flex_groups,
6071                                                   flex_group)->free_clusters);
6072         }
6073
6074         /*
6075          * on a bigalloc file system, defer the s_freeclusters_counter
6076          * update to the caller (ext4_remove_space and friends) so they
6077          * can determine if a cluster freed here should be rereserved
6078          */
6079         if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) {
6080                 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
6081                         dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
6082                 percpu_counter_add(&sbi->s_freeclusters_counter,
6083                                    count_clusters);
6084         }
6085
6086         ext4_mb_unload_buddy(&e4b);
6087
6088         /* We dirtied the bitmap block */
6089         BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
6090         err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
6091
6092         /* And the group descriptor block */
6093         BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
6094         ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
6095         if (!err)
6096                 err = ret;
6097
6098         if (overflow && !err) {
6099                 block += count;
6100                 count = overflow;
6101                 put_bh(bitmap_bh);
6102                 /* The range changed so it's no longer validated */
6103                 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6104                 goto do_more;
6105         }
6106 error_return:
6107         brelse(bitmap_bh);
6108         ext4_std_error(sb, err);
6109         return;
6110 }
6111
6112 /**
6113  * ext4_free_blocks() -- Free given blocks and update quota
6114  * @handle:             handle for this transaction
6115  * @inode:              inode
6116  * @bh:                 optional buffer of the block to be freed
6117  * @block:              starting physical block to be freed
6118  * @count:              number of blocks to be freed
6119  * @flags:              flags used by ext4_free_blocks
6120  */
6121 void ext4_free_blocks(handle_t *handle, struct inode *inode,
6122                       struct buffer_head *bh, ext4_fsblk_t block,
6123                       unsigned long count, int flags)
6124 {
6125         struct super_block *sb = inode->i_sb;
6126         unsigned int overflow;
6127         struct ext4_sb_info *sbi;
6128
6129         sbi = EXT4_SB(sb);
6130
6131         if (sbi->s_mount_state & EXT4_FC_REPLAY) {
6132                 ext4_free_blocks_simple(inode, block, count);
6133                 return;
6134         }
6135
6136         might_sleep();
6137         if (bh) {
6138                 if (block)
6139                         BUG_ON(block != bh->b_blocknr);
6140                 else
6141                         block = bh->b_blocknr;
6142         }
6143
6144         if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6145             !ext4_inode_block_valid(inode, block, count)) {
6146                 ext4_error(sb, "Freeing blocks not in datazone - "
6147                            "block = %llu, count = %lu", block, count);
6148                 return;
6149         }
6150         flags |= EXT4_FREE_BLOCKS_VALIDATED;
6151
6152         ext4_debug("freeing block %llu\n", block);
6153         trace_ext4_free_blocks(inode, block, count, flags);
6154
6155         if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6156                 BUG_ON(count > 1);
6157
6158                 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
6159                             inode, bh, block);
6160         }
6161
6162         /*
6163          * If the extent to be freed does not begin on a cluster
6164          * boundary, we need to deal with partial clusters at the
6165          * beginning and end of the extent.  Normally we will free
6166          * blocks at the beginning or the end unless we are explicitly
6167          * requested to avoid doing so.
6168          */
6169         overflow = EXT4_PBLK_COFF(sbi, block);
6170         if (overflow) {
6171                 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
6172                         overflow = sbi->s_cluster_ratio - overflow;
6173                         block += overflow;
6174                         if (count > overflow)
6175                                 count -= overflow;
6176                         else
6177                                 return;
6178                 } else {
6179                         block -= overflow;
6180                         count += overflow;
6181                 }
6182                 /* The range changed so it's no longer validated */
6183                 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6184         }
6185         overflow = EXT4_LBLK_COFF(sbi, count);
6186         if (overflow) {
6187                 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
6188                         if (count > overflow)
6189                                 count -= overflow;
6190                         else
6191                                 return;
6192                 } else
6193                         count += sbi->s_cluster_ratio - overflow;
6194                 /* The range changed so it's no longer validated */
6195                 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6196         }
6197
6198         if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6199                 int i;
6200                 int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
6201
6202                 for (i = 0; i < count; i++) {
6203                         cond_resched();
6204                         if (is_metadata)
6205                                 bh = sb_find_get_block(inode->i_sb, block + i);
6206                         ext4_forget(handle, is_metadata, inode, bh, block + i);
6207                 }
6208         }
6209
6210         ext4_mb_clear_bb(handle, inode, block, count, flags);
6211         return;
6212 }
6213
6214 /**
6215  * ext4_group_add_blocks() -- Add given blocks to an existing group
6216  * @handle:                     handle to this transaction
6217  * @sb:                         super block
6218  * @block:                      start physical block to add to the block group
6219  * @count:                      number of blocks to free
6220  *
6221  * This marks the blocks as free in the bitmap and buddy.
6222  */
6223 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
6224                          ext4_fsblk_t block, unsigned long count)
6225 {
6226         struct buffer_head *bitmap_bh = NULL;
6227         struct buffer_head *gd_bh;
6228         ext4_group_t block_group;
6229         ext4_grpblk_t bit;
6230         unsigned int i;
6231         struct ext4_group_desc *desc;
6232         struct ext4_sb_info *sbi = EXT4_SB(sb);
6233         struct ext4_buddy e4b;
6234         int err = 0, ret, free_clusters_count;
6235         ext4_grpblk_t clusters_freed;
6236         ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block);
6237         ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1);
6238         unsigned long cluster_count = last_cluster - first_cluster + 1;
6239
6240         ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
6241
6242         if (count == 0)
6243                 return 0;
6244
6245         ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
6246         /*
6247          * Check to see if we are freeing blocks across a group
6248          * boundary.
6249          */
6250         if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) {
6251                 ext4_warning(sb, "too many blocks added to group %u",
6252                              block_group);
6253                 err = -EINVAL;
6254                 goto error_return;
6255         }
6256
6257         bitmap_bh = ext4_read_block_bitmap(sb, block_group);
6258         if (IS_ERR(bitmap_bh)) {
6259                 err = PTR_ERR(bitmap_bh);
6260                 bitmap_bh = NULL;
6261                 goto error_return;
6262         }
6263
6264         desc = ext4_get_group_desc(sb, block_group, &gd_bh);
6265         if (!desc) {
6266                 err = -EIO;
6267                 goto error_return;
6268         }
6269
6270         if (!ext4_sb_block_valid(sb, NULL, block, count)) {
6271                 ext4_error(sb, "Adding blocks in system zones - "
6272                            "Block = %llu, count = %lu",
6273                            block, count);
6274                 err = -EINVAL;
6275                 goto error_return;
6276         }
6277
6278         BUFFER_TRACE(bitmap_bh, "getting write access");
6279         err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
6280                                             EXT4_JTR_NONE);
6281         if (err)
6282                 goto error_return;
6283
6284         /*
6285          * We are about to modify some metadata.  Call the journal APIs
6286          * to unshare ->b_data if a currently-committing transaction is
6287          * using it
6288          */
6289         BUFFER_TRACE(gd_bh, "get_write_access");
6290         err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE);
6291         if (err)
6292                 goto error_return;
6293
6294         for (i = 0, clusters_freed = 0; i < cluster_count; i++) {
6295                 BUFFER_TRACE(bitmap_bh, "clear bit");
6296                 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
6297                         ext4_error(sb, "bit already cleared for block %llu",
6298                                    (ext4_fsblk_t)(block + i));
6299                         BUFFER_TRACE(bitmap_bh, "bit already cleared");
6300                 } else {
6301                         clusters_freed++;
6302                 }
6303         }
6304
6305         err = ext4_mb_load_buddy(sb, block_group, &e4b);
6306         if (err)
6307                 goto error_return;
6308
6309         /*
6310          * need to update group_info->bb_free and bitmap
6311          * with group lock held. generate_buddy look at
6312          * them with group lock_held
6313          */
6314         ext4_lock_group(sb, block_group);
6315         mb_clear_bits(bitmap_bh->b_data, bit, cluster_count);
6316         mb_free_blocks(NULL, &e4b, bit, cluster_count);
6317         free_clusters_count = clusters_freed +
6318                 ext4_free_group_clusters(sb, desc);
6319         ext4_free_group_clusters_set(sb, desc, free_clusters_count);
6320         ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
6321         ext4_group_desc_csum_set(sb, block_group, desc);
6322         ext4_unlock_group(sb, block_group);
6323         percpu_counter_add(&sbi->s_freeclusters_counter,
6324                            clusters_freed);
6325
6326         if (sbi->s_log_groups_per_flex) {
6327                 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
6328                 atomic64_add(clusters_freed,
6329                              &sbi_array_rcu_deref(sbi, s_flex_groups,
6330                                                   flex_group)->free_clusters);
6331         }
6332
6333         ext4_mb_unload_buddy(&e4b);
6334
6335         /* We dirtied the bitmap block */
6336         BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
6337         err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
6338
6339         /* And the group descriptor block */
6340         BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
6341         ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
6342         if (!err)
6343                 err = ret;
6344
6345 error_return:
6346         brelse(bitmap_bh);
6347         ext4_std_error(sb, err);
6348         return err;
6349 }
6350
6351 /**
6352  * ext4_trim_extent -- function to TRIM one single free extent in the group
6353  * @sb:         super block for the file system
6354  * @start:      starting block of the free extent in the alloc. group
6355  * @count:      number of blocks to TRIM
6356  * @e4b:        ext4 buddy for the group
6357  *
6358  * Trim "count" blocks starting at "start" in the "group". To assure that no
6359  * one will allocate those blocks, mark it as used in buddy bitmap. This must
6360  * be called with under the group lock.
6361  */
6362 static int ext4_trim_extent(struct super_block *sb,
6363                 int start, int count, struct ext4_buddy *e4b)
6364 __releases(bitlock)
6365 __acquires(bitlock)
6366 {
6367         struct ext4_free_extent ex;
6368         ext4_group_t group = e4b->bd_group;
6369         int ret = 0;
6370
6371         trace_ext4_trim_extent(sb, group, start, count);
6372
6373         assert_spin_locked(ext4_group_lock_ptr(sb, group));
6374
6375         ex.fe_start = start;
6376         ex.fe_group = group;
6377         ex.fe_len = count;
6378
6379         /*
6380          * Mark blocks used, so no one can reuse them while
6381          * being trimmed.
6382          */
6383         mb_mark_used(e4b, &ex);
6384         ext4_unlock_group(sb, group);
6385         ret = ext4_issue_discard(sb, group, start, count, NULL);
6386         ext4_lock_group(sb, group);
6387         mb_free_blocks(NULL, e4b, start, ex.fe_len);
6388         return ret;
6389 }
6390
6391 static int ext4_try_to_trim_range(struct super_block *sb,
6392                 struct ext4_buddy *e4b, ext4_grpblk_t start,
6393                 ext4_grpblk_t max, ext4_grpblk_t minblocks)
6394 __acquires(ext4_group_lock_ptr(sb, e4b->bd_group))
6395 __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
6396 {
6397         ext4_grpblk_t next, count, free_count;
6398         void *bitmap;
6399
6400         bitmap = e4b->bd_bitmap;
6401         start = (e4b->bd_info->bb_first_free > start) ?
6402                 e4b->bd_info->bb_first_free : start;
6403         count = 0;
6404         free_count = 0;
6405
6406         while (start <= max) {
6407                 start = mb_find_next_zero_bit(bitmap, max + 1, start);
6408                 if (start > max)
6409                         break;
6410                 next = mb_find_next_bit(bitmap, max + 1, start);
6411
6412                 if ((next - start) >= minblocks) {
6413                         int ret = ext4_trim_extent(sb, start, next - start, e4b);
6414
6415                         if (ret && ret != -EOPNOTSUPP)
6416                                 break;
6417                         count += next - start;
6418                 }
6419                 free_count += next - start;
6420                 start = next + 1;
6421
6422                 if (fatal_signal_pending(current)) {
6423                         count = -ERESTARTSYS;
6424                         break;
6425                 }
6426
6427                 if (need_resched()) {
6428                         ext4_unlock_group(sb, e4b->bd_group);
6429                         cond_resched();
6430                         ext4_lock_group(sb, e4b->bd_group);
6431                 }
6432
6433                 if ((e4b->bd_info->bb_free - free_count) < minblocks)
6434                         break;
6435         }
6436
6437         return count;
6438 }
6439
6440 /**
6441  * ext4_trim_all_free -- function to trim all free space in alloc. group
6442  * @sb:                 super block for file system
6443  * @group:              group to be trimmed
6444  * @start:              first group block to examine
6445  * @max:                last group block to examine
6446  * @minblocks:          minimum extent block count
6447  * @set_trimmed:        set the trimmed flag if at least one block is trimmed
6448  *
6449  * ext4_trim_all_free walks through group's block bitmap searching for free
6450  * extents. When the free extent is found, mark it as used in group buddy
6451  * bitmap. Then issue a TRIM command on this extent and free the extent in
6452  * the group buddy bitmap.
6453  */
6454 static ext4_grpblk_t
6455 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
6456                    ext4_grpblk_t start, ext4_grpblk_t max,
6457                    ext4_grpblk_t minblocks, bool set_trimmed)
6458 {
6459         struct ext4_buddy e4b;
6460         int ret;
6461
6462         trace_ext4_trim_all_free(sb, group, start, max);
6463
6464         ret = ext4_mb_load_buddy(sb, group, &e4b);
6465         if (ret) {
6466                 ext4_warning(sb, "Error %d loading buddy information for %u",
6467                              ret, group);
6468                 return ret;
6469         }
6470
6471         ext4_lock_group(sb, group);
6472
6473         if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
6474             minblocks < EXT4_SB(sb)->s_last_trim_minblks) {
6475                 ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
6476                 if (ret >= 0 && set_trimmed)
6477                         EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
6478         } else {
6479                 ret = 0;
6480         }
6481
6482         ext4_unlock_group(sb, group);
6483         ext4_mb_unload_buddy(&e4b);
6484
6485         ext4_debug("trimmed %d blocks in the group %d\n",
6486                 ret, group);
6487
6488         return ret;
6489 }
6490
6491 /**
6492  * ext4_trim_fs() -- trim ioctl handle function
6493  * @sb:                 superblock for filesystem
6494  * @range:              fstrim_range structure
6495  *
6496  * start:       First Byte to trim
6497  * len:         number of Bytes to trim from start
6498  * minlen:      minimum extent length in Bytes
6499  * ext4_trim_fs goes through all allocation groups containing Bytes from
6500  * start to start+len. For each such a group ext4_trim_all_free function
6501  * is invoked to trim all free space.
6502  */
6503 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
6504 {
6505         unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev);
6506         struct ext4_group_info *grp;
6507         ext4_group_t group, first_group, last_group;
6508         ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
6509         uint64_t start, end, minlen, trimmed = 0;
6510         ext4_fsblk_t first_data_blk =
6511                         le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
6512         ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
6513         bool whole_group, eof = false;
6514         int ret = 0;
6515
6516         start = range->start >> sb->s_blocksize_bits;
6517         end = start + (range->len >> sb->s_blocksize_bits) - 1;
6518         minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6519                               range->minlen >> sb->s_blocksize_bits);
6520
6521         if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
6522             start >= max_blks ||
6523             range->len < sb->s_blocksize)
6524                 return -EINVAL;
6525         /* No point to try to trim less than discard granularity */
6526         if (range->minlen < discard_granularity) {
6527                 minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6528                                 discard_granularity >> sb->s_blocksize_bits);
6529                 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
6530                         goto out;
6531         }
6532         if (end >= max_blks - 1) {
6533                 end = max_blks - 1;
6534                 eof = true;
6535         }
6536         if (end <= first_data_blk)
6537                 goto out;
6538         if (start < first_data_blk)
6539                 start = first_data_blk;
6540
6541         /* Determine first and last group to examine based on start and end */
6542         ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
6543                                      &first_group, &first_cluster);
6544         ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
6545                                      &last_group, &last_cluster);
6546
6547         /* end now represents the last cluster to discard in this group */
6548         end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6549         whole_group = true;
6550
6551         for (group = first_group; group <= last_group; group++) {
6552                 grp = ext4_get_group_info(sb, group);
6553                 /* We only do this if the grp has never been initialized */
6554                 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
6555                         ret = ext4_mb_init_group(sb, group, GFP_NOFS);
6556                         if (ret)
6557                                 break;
6558                 }
6559
6560                 /*
6561                  * For all the groups except the last one, last cluster will
6562                  * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
6563                  * change it for the last group, note that last_cluster is
6564                  * already computed earlier by ext4_get_group_no_and_offset()
6565                  */
6566                 if (group == last_group) {
6567                         end = last_cluster;
6568                         whole_group = eof ? true : end == EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6569                 }
6570                 if (grp->bb_free >= minlen) {
6571                         cnt = ext4_trim_all_free(sb, group, first_cluster,
6572                                                  end, minlen, whole_group);
6573                         if (cnt < 0) {
6574                                 ret = cnt;
6575                                 break;
6576                         }
6577                         trimmed += cnt;
6578                 }
6579
6580                 /*
6581                  * For every group except the first one, we are sure
6582                  * that the first cluster to discard will be cluster #0.
6583                  */
6584                 first_cluster = 0;
6585         }
6586
6587         if (!ret)
6588                 EXT4_SB(sb)->s_last_trim_minblks = minlen;
6589
6590 out:
6591         range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
6592         return ret;
6593 }
6594
6595 /* Iterate all the free extents in the group. */
6596 int
6597 ext4_mballoc_query_range(
6598         struct super_block              *sb,
6599         ext4_group_t                    group,
6600         ext4_grpblk_t                   start,
6601         ext4_grpblk_t                   end,
6602         ext4_mballoc_query_range_fn     formatter,
6603         void                            *priv)
6604 {
6605         void                            *bitmap;
6606         ext4_grpblk_t                   next;
6607         struct ext4_buddy               e4b;
6608         int                             error;
6609
6610         error = ext4_mb_load_buddy(sb, group, &e4b);
6611         if (error)
6612                 return error;
6613         bitmap = e4b.bd_bitmap;
6614
6615         ext4_lock_group(sb, group);
6616
6617         start = (e4b.bd_info->bb_first_free > start) ?
6618                 e4b.bd_info->bb_first_free : start;
6619         if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
6620                 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6621
6622         while (start <= end) {
6623                 start = mb_find_next_zero_bit(bitmap, end + 1, start);
6624                 if (start > end)
6625                         break;
6626                 next = mb_find_next_bit(bitmap, end + 1, start);
6627
6628                 ext4_unlock_group(sb, group);
6629                 error = formatter(sb, group, start, next - start, priv);
6630                 if (error)
6631                         goto out_unload;
6632                 ext4_lock_group(sb, group);
6633
6634                 start = next + 1;
6635         }
6636
6637         ext4_unlock_group(sb, group);
6638 out_unload:
6639         ext4_mb_unload_buddy(&e4b);
6640
6641         return error;
6642 }