ext4: change some printk() calls to use ext4_msg() instead
[platform/adaptation/renesas_rcar/renesas_kernel.git] / fs / ext4 / extents.c
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * Architecture independence:
6  *   Copyright (c) 2005, Bull S.A.
7  *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public Licens
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
21  */
22
23 /*
24  * Extents support for EXT4
25  *
26  * TODO:
27  *   - ext4*_error() should be used in some situations
28  *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29  *   - smart tree reduction
30  */
31
32 #include <linux/fs.h>
33 #include <linux/time.h>
34 #include <linux/jbd2.h>
35 #include <linux/highuid.h>
36 #include <linux/pagemap.h>
37 #include <linux/quotaops.h>
38 #include <linux/string.h>
39 #include <linux/slab.h>
40 #include <linux/falloc.h>
41 #include <asm/uaccess.h>
42 #include <linux/fiemap.h>
43 #include "ext4_jbd2.h"
44
45 #include <trace/events/ext4.h>
46
47 /*
48  * used by extent splitting.
49  */
50 #define EXT4_EXT_MAY_ZEROOUT    0x1  /* safe to zeroout if split fails \
51                                         due to ENOSPC */
52 #define EXT4_EXT_MARK_UNINIT1   0x2  /* mark first half uninitialized */
53 #define EXT4_EXT_MARK_UNINIT2   0x4  /* mark second half uninitialized */
54
55 static int ext4_split_extent(handle_t *handle,
56                                 struct inode *inode,
57                                 struct ext4_ext_path *path,
58                                 struct ext4_map_blocks *map,
59                                 int split_flag,
60                                 int flags);
61
62 static int ext4_split_extent_at(handle_t *handle,
63                              struct inode *inode,
64                              struct ext4_ext_path *path,
65                              ext4_lblk_t split,
66                              int split_flag,
67                              int flags);
68
69 static int ext4_ext_truncate_extend_restart(handle_t *handle,
70                                             struct inode *inode,
71                                             int needed)
72 {
73         int err;
74
75         if (!ext4_handle_valid(handle))
76                 return 0;
77         if (handle->h_buffer_credits > needed)
78                 return 0;
79         err = ext4_journal_extend(handle, needed);
80         if (err <= 0)
81                 return err;
82         err = ext4_truncate_restart_trans(handle, inode, needed);
83         if (err == 0)
84                 err = -EAGAIN;
85
86         return err;
87 }
88
89 /*
90  * could return:
91  *  - EROFS
92  *  - ENOMEM
93  */
94 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
95                                 struct ext4_ext_path *path)
96 {
97         if (path->p_bh) {
98                 /* path points to block */
99                 return ext4_journal_get_write_access(handle, path->p_bh);
100         }
101         /* path points to leaf/index in inode body */
102         /* we use in-core data, no need to protect them */
103         return 0;
104 }
105
106 /*
107  * could return:
108  *  - EROFS
109  *  - ENOMEM
110  *  - EIO
111  */
112 #define ext4_ext_dirty(handle, inode, path) \
113                 __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
114 static int __ext4_ext_dirty(const char *where, unsigned int line,
115                             handle_t *handle, struct inode *inode,
116                             struct ext4_ext_path *path)
117 {
118         int err;
119         if (path->p_bh) {
120                 /* path points to block */
121                 err = __ext4_handle_dirty_metadata(where, line, handle,
122                                                    inode, path->p_bh);
123         } else {
124                 /* path points to leaf/index in inode body */
125                 err = ext4_mark_inode_dirty(handle, inode);
126         }
127         return err;
128 }
129
130 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
131                               struct ext4_ext_path *path,
132                               ext4_lblk_t block)
133 {
134         if (path) {
135                 int depth = path->p_depth;
136                 struct ext4_extent *ex;
137
138                 /*
139                  * Try to predict block placement assuming that we are
140                  * filling in a file which will eventually be
141                  * non-sparse --- i.e., in the case of libbfd writing
142                  * an ELF object sections out-of-order but in a way
143                  * the eventually results in a contiguous object or
144                  * executable file, or some database extending a table
145                  * space file.  However, this is actually somewhat
146                  * non-ideal if we are writing a sparse file such as
147                  * qemu or KVM writing a raw image file that is going
148                  * to stay fairly sparse, since it will end up
149                  * fragmenting the file system's free space.  Maybe we
150                  * should have some hueristics or some way to allow
151                  * userspace to pass a hint to file system,
152                  * especially if the latter case turns out to be
153                  * common.
154                  */
155                 ex = path[depth].p_ext;
156                 if (ex) {
157                         ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
158                         ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
159
160                         if (block > ext_block)
161                                 return ext_pblk + (block - ext_block);
162                         else
163                                 return ext_pblk - (ext_block - block);
164                 }
165
166                 /* it looks like index is empty;
167                  * try to find starting block from index itself */
168                 if (path[depth].p_bh)
169                         return path[depth].p_bh->b_blocknr;
170         }
171
172         /* OK. use inode's group */
173         return ext4_inode_to_goal_block(inode);
174 }
175
176 /*
177  * Allocation for a meta data block
178  */
179 static ext4_fsblk_t
180 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
181                         struct ext4_ext_path *path,
182                         struct ext4_extent *ex, int *err, unsigned int flags)
183 {
184         ext4_fsblk_t goal, newblock;
185
186         goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
187         newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
188                                         NULL, err);
189         return newblock;
190 }
191
192 static inline int ext4_ext_space_block(struct inode *inode, int check)
193 {
194         int size;
195
196         size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
197                         / sizeof(struct ext4_extent);
198 #ifdef AGGRESSIVE_TEST
199         if (!check && size > 6)
200                 size = 6;
201 #endif
202         return size;
203 }
204
205 static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
206 {
207         int size;
208
209         size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
210                         / sizeof(struct ext4_extent_idx);
211 #ifdef AGGRESSIVE_TEST
212         if (!check && size > 5)
213                 size = 5;
214 #endif
215         return size;
216 }
217
218 static inline int ext4_ext_space_root(struct inode *inode, int check)
219 {
220         int size;
221
222         size = sizeof(EXT4_I(inode)->i_data);
223         size -= sizeof(struct ext4_extent_header);
224         size /= sizeof(struct ext4_extent);
225 #ifdef AGGRESSIVE_TEST
226         if (!check && size > 3)
227                 size = 3;
228 #endif
229         return size;
230 }
231
232 static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
233 {
234         int size;
235
236         size = sizeof(EXT4_I(inode)->i_data);
237         size -= sizeof(struct ext4_extent_header);
238         size /= sizeof(struct ext4_extent_idx);
239 #ifdef AGGRESSIVE_TEST
240         if (!check && size > 4)
241                 size = 4;
242 #endif
243         return size;
244 }
245
246 /*
247  * Calculate the number of metadata blocks needed
248  * to allocate @blocks
249  * Worse case is one block per extent
250  */
251 int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
252 {
253         struct ext4_inode_info *ei = EXT4_I(inode);
254         int idxs;
255
256         idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
257                 / sizeof(struct ext4_extent_idx));
258
259         /*
260          * If the new delayed allocation block is contiguous with the
261          * previous da block, it can share index blocks with the
262          * previous block, so we only need to allocate a new index
263          * block every idxs leaf blocks.  At ldxs**2 blocks, we need
264          * an additional index block, and at ldxs**3 blocks, yet
265          * another index blocks.
266          */
267         if (ei->i_da_metadata_calc_len &&
268             ei->i_da_metadata_calc_last_lblock+1 == lblock) {
269                 int num = 0;
270
271                 if ((ei->i_da_metadata_calc_len % idxs) == 0)
272                         num++;
273                 if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
274                         num++;
275                 if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
276                         num++;
277                         ei->i_da_metadata_calc_len = 0;
278                 } else
279                         ei->i_da_metadata_calc_len++;
280                 ei->i_da_metadata_calc_last_lblock++;
281                 return num;
282         }
283
284         /*
285          * In the worst case we need a new set of index blocks at
286          * every level of the inode's extent tree.
287          */
288         ei->i_da_metadata_calc_len = 1;
289         ei->i_da_metadata_calc_last_lblock = lblock;
290         return ext_depth(inode) + 1;
291 }
292
293 static int
294 ext4_ext_max_entries(struct inode *inode, int depth)
295 {
296         int max;
297
298         if (depth == ext_depth(inode)) {
299                 if (depth == 0)
300                         max = ext4_ext_space_root(inode, 1);
301                 else
302                         max = ext4_ext_space_root_idx(inode, 1);
303         } else {
304                 if (depth == 0)
305                         max = ext4_ext_space_block(inode, 1);
306                 else
307                         max = ext4_ext_space_block_idx(inode, 1);
308         }
309
310         return max;
311 }
312
313 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
314 {
315         ext4_fsblk_t block = ext4_ext_pblock(ext);
316         int len = ext4_ext_get_actual_len(ext);
317
318         if (len == 0)
319                 return 0;
320         return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
321 }
322
323 static int ext4_valid_extent_idx(struct inode *inode,
324                                 struct ext4_extent_idx *ext_idx)
325 {
326         ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
327
328         return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
329 }
330
331 static int ext4_valid_extent_entries(struct inode *inode,
332                                 struct ext4_extent_header *eh,
333                                 int depth)
334 {
335         unsigned short entries;
336         if (eh->eh_entries == 0)
337                 return 1;
338
339         entries = le16_to_cpu(eh->eh_entries);
340
341         if (depth == 0) {
342                 /* leaf entries */
343                 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
344                 while (entries) {
345                         if (!ext4_valid_extent(inode, ext))
346                                 return 0;
347                         ext++;
348                         entries--;
349                 }
350         } else {
351                 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
352                 while (entries) {
353                         if (!ext4_valid_extent_idx(inode, ext_idx))
354                                 return 0;
355                         ext_idx++;
356                         entries--;
357                 }
358         }
359         return 1;
360 }
361
362 static int __ext4_ext_check(const char *function, unsigned int line,
363                             struct inode *inode, struct ext4_extent_header *eh,
364                             int depth)
365 {
366         const char *error_msg;
367         int max = 0;
368
369         if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
370                 error_msg = "invalid magic";
371                 goto corrupted;
372         }
373         if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
374                 error_msg = "unexpected eh_depth";
375                 goto corrupted;
376         }
377         if (unlikely(eh->eh_max == 0)) {
378                 error_msg = "invalid eh_max";
379                 goto corrupted;
380         }
381         max = ext4_ext_max_entries(inode, depth);
382         if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
383                 error_msg = "too large eh_max";
384                 goto corrupted;
385         }
386         if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
387                 error_msg = "invalid eh_entries";
388                 goto corrupted;
389         }
390         if (!ext4_valid_extent_entries(inode, eh, depth)) {
391                 error_msg = "invalid extent entries";
392                 goto corrupted;
393         }
394         return 0;
395
396 corrupted:
397         ext4_error_inode(inode, function, line, 0,
398                         "bad header/extent: %s - magic %x, "
399                         "entries %u, max %u(%u), depth %u(%u)",
400                         error_msg, le16_to_cpu(eh->eh_magic),
401                         le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
402                         max, le16_to_cpu(eh->eh_depth), depth);
403
404         return -EIO;
405 }
406
407 #define ext4_ext_check(inode, eh, depth)        \
408         __ext4_ext_check(__func__, __LINE__, inode, eh, depth)
409
410 int ext4_ext_check_inode(struct inode *inode)
411 {
412         return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
413 }
414
415 #ifdef EXT_DEBUG
416 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
417 {
418         int k, l = path->p_depth;
419
420         ext_debug("path:");
421         for (k = 0; k <= l; k++, path++) {
422                 if (path->p_idx) {
423                   ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
424                             ext4_idx_pblock(path->p_idx));
425                 } else if (path->p_ext) {
426                         ext_debug("  %d:[%d]%d:%llu ",
427                                   le32_to_cpu(path->p_ext->ee_block),
428                                   ext4_ext_is_uninitialized(path->p_ext),
429                                   ext4_ext_get_actual_len(path->p_ext),
430                                   ext4_ext_pblock(path->p_ext));
431                 } else
432                         ext_debug("  []");
433         }
434         ext_debug("\n");
435 }
436
437 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
438 {
439         int depth = ext_depth(inode);
440         struct ext4_extent_header *eh;
441         struct ext4_extent *ex;
442         int i;
443
444         if (!path)
445                 return;
446
447         eh = path[depth].p_hdr;
448         ex = EXT_FIRST_EXTENT(eh);
449
450         ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
451
452         for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
453                 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
454                           ext4_ext_is_uninitialized(ex),
455                           ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
456         }
457         ext_debug("\n");
458 }
459
460 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
461                         ext4_fsblk_t newblock, int level)
462 {
463         int depth = ext_depth(inode);
464         struct ext4_extent *ex;
465
466         if (depth != level) {
467                 struct ext4_extent_idx *idx;
468                 idx = path[level].p_idx;
469                 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
470                         ext_debug("%d: move %d:%llu in new index %llu\n", level,
471                                         le32_to_cpu(idx->ei_block),
472                                         ext4_idx_pblock(idx),
473                                         newblock);
474                         idx++;
475                 }
476
477                 return;
478         }
479
480         ex = path[depth].p_ext;
481         while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
482                 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
483                                 le32_to_cpu(ex->ee_block),
484                                 ext4_ext_pblock(ex),
485                                 ext4_ext_is_uninitialized(ex),
486                                 ext4_ext_get_actual_len(ex),
487                                 newblock);
488                 ex++;
489         }
490 }
491
492 #else
493 #define ext4_ext_show_path(inode, path)
494 #define ext4_ext_show_leaf(inode, path)
495 #define ext4_ext_show_move(inode, path, newblock, level)
496 #endif
497
498 void ext4_ext_drop_refs(struct ext4_ext_path *path)
499 {
500         int depth = path->p_depth;
501         int i;
502
503         for (i = 0; i <= depth; i++, path++)
504                 if (path->p_bh) {
505                         brelse(path->p_bh);
506                         path->p_bh = NULL;
507                 }
508 }
509
510 /*
511  * ext4_ext_binsearch_idx:
512  * binary search for the closest index of the given block
513  * the header must be checked before calling this
514  */
515 static void
516 ext4_ext_binsearch_idx(struct inode *inode,
517                         struct ext4_ext_path *path, ext4_lblk_t block)
518 {
519         struct ext4_extent_header *eh = path->p_hdr;
520         struct ext4_extent_idx *r, *l, *m;
521
522
523         ext_debug("binsearch for %u(idx):  ", block);
524
525         l = EXT_FIRST_INDEX(eh) + 1;
526         r = EXT_LAST_INDEX(eh);
527         while (l <= r) {
528                 m = l + (r - l) / 2;
529                 if (block < le32_to_cpu(m->ei_block))
530                         r = m - 1;
531                 else
532                         l = m + 1;
533                 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
534                                 m, le32_to_cpu(m->ei_block),
535                                 r, le32_to_cpu(r->ei_block));
536         }
537
538         path->p_idx = l - 1;
539         ext_debug("  -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
540                   ext4_idx_pblock(path->p_idx));
541
542 #ifdef CHECK_BINSEARCH
543         {
544                 struct ext4_extent_idx *chix, *ix;
545                 int k;
546
547                 chix = ix = EXT_FIRST_INDEX(eh);
548                 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
549                   if (k != 0 &&
550                       le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
551                                 printk(KERN_DEBUG "k=%d, ix=0x%p, "
552                                        "first=0x%p\n", k,
553                                        ix, EXT_FIRST_INDEX(eh));
554                                 printk(KERN_DEBUG "%u <= %u\n",
555                                        le32_to_cpu(ix->ei_block),
556                                        le32_to_cpu(ix[-1].ei_block));
557                         }
558                         BUG_ON(k && le32_to_cpu(ix->ei_block)
559                                            <= le32_to_cpu(ix[-1].ei_block));
560                         if (block < le32_to_cpu(ix->ei_block))
561                                 break;
562                         chix = ix;
563                 }
564                 BUG_ON(chix != path->p_idx);
565         }
566 #endif
567
568 }
569
570 /*
571  * ext4_ext_binsearch:
572  * binary search for closest extent of the given block
573  * the header must be checked before calling this
574  */
575 static void
576 ext4_ext_binsearch(struct inode *inode,
577                 struct ext4_ext_path *path, ext4_lblk_t block)
578 {
579         struct ext4_extent_header *eh = path->p_hdr;
580         struct ext4_extent *r, *l, *m;
581
582         if (eh->eh_entries == 0) {
583                 /*
584                  * this leaf is empty:
585                  * we get such a leaf in split/add case
586                  */
587                 return;
588         }
589
590         ext_debug("binsearch for %u:  ", block);
591
592         l = EXT_FIRST_EXTENT(eh) + 1;
593         r = EXT_LAST_EXTENT(eh);
594
595         while (l <= r) {
596                 m = l + (r - l) / 2;
597                 if (block < le32_to_cpu(m->ee_block))
598                         r = m - 1;
599                 else
600                         l = m + 1;
601                 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
602                                 m, le32_to_cpu(m->ee_block),
603                                 r, le32_to_cpu(r->ee_block));
604         }
605
606         path->p_ext = l - 1;
607         ext_debug("  -> %d:%llu:[%d]%d ",
608                         le32_to_cpu(path->p_ext->ee_block),
609                         ext4_ext_pblock(path->p_ext),
610                         ext4_ext_is_uninitialized(path->p_ext),
611                         ext4_ext_get_actual_len(path->p_ext));
612
613 #ifdef CHECK_BINSEARCH
614         {
615                 struct ext4_extent *chex, *ex;
616                 int k;
617
618                 chex = ex = EXT_FIRST_EXTENT(eh);
619                 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
620                         BUG_ON(k && le32_to_cpu(ex->ee_block)
621                                           <= le32_to_cpu(ex[-1].ee_block));
622                         if (block < le32_to_cpu(ex->ee_block))
623                                 break;
624                         chex = ex;
625                 }
626                 BUG_ON(chex != path->p_ext);
627         }
628 #endif
629
630 }
631
632 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
633 {
634         struct ext4_extent_header *eh;
635
636         eh = ext_inode_hdr(inode);
637         eh->eh_depth = 0;
638         eh->eh_entries = 0;
639         eh->eh_magic = EXT4_EXT_MAGIC;
640         eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
641         ext4_mark_inode_dirty(handle, inode);
642         ext4_ext_invalidate_cache(inode);
643         return 0;
644 }
645
646 struct ext4_ext_path *
647 ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
648                                         struct ext4_ext_path *path)
649 {
650         struct ext4_extent_header *eh;
651         struct buffer_head *bh;
652         short int depth, i, ppos = 0, alloc = 0;
653
654         eh = ext_inode_hdr(inode);
655         depth = ext_depth(inode);
656
657         /* account possible depth increase */
658         if (!path) {
659                 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
660                                 GFP_NOFS);
661                 if (!path)
662                         return ERR_PTR(-ENOMEM);
663                 alloc = 1;
664         }
665         path[0].p_hdr = eh;
666         path[0].p_bh = NULL;
667
668         i = depth;
669         /* walk through the tree */
670         while (i) {
671                 int need_to_validate = 0;
672
673                 ext_debug("depth %d: num %d, max %d\n",
674                           ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
675
676                 ext4_ext_binsearch_idx(inode, path + ppos, block);
677                 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
678                 path[ppos].p_depth = i;
679                 path[ppos].p_ext = NULL;
680
681                 bh = sb_getblk(inode->i_sb, path[ppos].p_block);
682                 if (unlikely(!bh))
683                         goto err;
684                 if (!bh_uptodate_or_lock(bh)) {
685                         trace_ext4_ext_load_extent(inode, block,
686                                                 path[ppos].p_block);
687                         if (bh_submit_read(bh) < 0) {
688                                 put_bh(bh);
689                                 goto err;
690                         }
691                         /* validate the extent entries */
692                         need_to_validate = 1;
693                 }
694                 eh = ext_block_hdr(bh);
695                 ppos++;
696                 if (unlikely(ppos > depth)) {
697                         put_bh(bh);
698                         EXT4_ERROR_INODE(inode,
699                                          "ppos %d > depth %d", ppos, depth);
700                         goto err;
701                 }
702                 path[ppos].p_bh = bh;
703                 path[ppos].p_hdr = eh;
704                 i--;
705
706                 if (need_to_validate && ext4_ext_check(inode, eh, i))
707                         goto err;
708         }
709
710         path[ppos].p_depth = i;
711         path[ppos].p_ext = NULL;
712         path[ppos].p_idx = NULL;
713
714         /* find extent */
715         ext4_ext_binsearch(inode, path + ppos, block);
716         /* if not an empty leaf */
717         if (path[ppos].p_ext)
718                 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
719
720         ext4_ext_show_path(inode, path);
721
722         return path;
723
724 err:
725         ext4_ext_drop_refs(path);
726         if (alloc)
727                 kfree(path);
728         return ERR_PTR(-EIO);
729 }
730
731 /*
732  * ext4_ext_insert_index:
733  * insert new index [@logical;@ptr] into the block at @curp;
734  * check where to insert: before @curp or after @curp
735  */
736 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
737                                  struct ext4_ext_path *curp,
738                                  int logical, ext4_fsblk_t ptr)
739 {
740         struct ext4_extent_idx *ix;
741         int len, err;
742
743         err = ext4_ext_get_access(handle, inode, curp);
744         if (err)
745                 return err;
746
747         if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
748                 EXT4_ERROR_INODE(inode,
749                                  "logical %d == ei_block %d!",
750                                  logical, le32_to_cpu(curp->p_idx->ei_block));
751                 return -EIO;
752         }
753
754         if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
755                              >= le16_to_cpu(curp->p_hdr->eh_max))) {
756                 EXT4_ERROR_INODE(inode,
757                                  "eh_entries %d >= eh_max %d!",
758                                  le16_to_cpu(curp->p_hdr->eh_entries),
759                                  le16_to_cpu(curp->p_hdr->eh_max));
760                 return -EIO;
761         }
762
763         if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
764                 /* insert after */
765                 ext_debug("insert new index %d after: %llu\n", logical, ptr);
766                 ix = curp->p_idx + 1;
767         } else {
768                 /* insert before */
769                 ext_debug("insert new index %d before: %llu\n", logical, ptr);
770                 ix = curp->p_idx;
771         }
772
773         len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
774         BUG_ON(len < 0);
775         if (len > 0) {
776                 ext_debug("insert new index %d: "
777                                 "move %d indices from 0x%p to 0x%p\n",
778                                 logical, len, ix, ix + 1);
779                 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
780         }
781
782         if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
783                 EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
784                 return -EIO;
785         }
786
787         ix->ei_block = cpu_to_le32(logical);
788         ext4_idx_store_pblock(ix, ptr);
789         le16_add_cpu(&curp->p_hdr->eh_entries, 1);
790
791         if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
792                 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
793                 return -EIO;
794         }
795
796         err = ext4_ext_dirty(handle, inode, curp);
797         ext4_std_error(inode->i_sb, err);
798
799         return err;
800 }
801
802 /*
803  * ext4_ext_split:
804  * inserts new subtree into the path, using free index entry
805  * at depth @at:
806  * - allocates all needed blocks (new leaf and all intermediate index blocks)
807  * - makes decision where to split
808  * - moves remaining extents and index entries (right to the split point)
809  *   into the newly allocated blocks
810  * - initializes subtree
811  */
812 static int ext4_ext_split(handle_t *handle, struct inode *inode,
813                           unsigned int flags,
814                           struct ext4_ext_path *path,
815                           struct ext4_extent *newext, int at)
816 {
817         struct buffer_head *bh = NULL;
818         int depth = ext_depth(inode);
819         struct ext4_extent_header *neh;
820         struct ext4_extent_idx *fidx;
821         int i = at, k, m, a;
822         ext4_fsblk_t newblock, oldblock;
823         __le32 border;
824         ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
825         int err = 0;
826
827         /* make decision: where to split? */
828         /* FIXME: now decision is simplest: at current extent */
829
830         /* if current leaf will be split, then we should use
831          * border from split point */
832         if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
833                 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
834                 return -EIO;
835         }
836         if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
837                 border = path[depth].p_ext[1].ee_block;
838                 ext_debug("leaf will be split."
839                                 " next leaf starts at %d\n",
840                                   le32_to_cpu(border));
841         } else {
842                 border = newext->ee_block;
843                 ext_debug("leaf will be added."
844                                 " next leaf starts at %d\n",
845                                 le32_to_cpu(border));
846         }
847
848         /*
849          * If error occurs, then we break processing
850          * and mark filesystem read-only. index won't
851          * be inserted and tree will be in consistent
852          * state. Next mount will repair buffers too.
853          */
854
855         /*
856          * Get array to track all allocated blocks.
857          * We need this to handle errors and free blocks
858          * upon them.
859          */
860         ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
861         if (!ablocks)
862                 return -ENOMEM;
863
864         /* allocate all needed blocks */
865         ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
866         for (a = 0; a < depth - at; a++) {
867                 newblock = ext4_ext_new_meta_block(handle, inode, path,
868                                                    newext, &err, flags);
869                 if (newblock == 0)
870                         goto cleanup;
871                 ablocks[a] = newblock;
872         }
873
874         /* initialize new leaf */
875         newblock = ablocks[--a];
876         if (unlikely(newblock == 0)) {
877                 EXT4_ERROR_INODE(inode, "newblock == 0!");
878                 err = -EIO;
879                 goto cleanup;
880         }
881         bh = sb_getblk(inode->i_sb, newblock);
882         if (!bh) {
883                 err = -EIO;
884                 goto cleanup;
885         }
886         lock_buffer(bh);
887
888         err = ext4_journal_get_create_access(handle, bh);
889         if (err)
890                 goto cleanup;
891
892         neh = ext_block_hdr(bh);
893         neh->eh_entries = 0;
894         neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
895         neh->eh_magic = EXT4_EXT_MAGIC;
896         neh->eh_depth = 0;
897
898         /* move remainder of path[depth] to the new leaf */
899         if (unlikely(path[depth].p_hdr->eh_entries !=
900                      path[depth].p_hdr->eh_max)) {
901                 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
902                                  path[depth].p_hdr->eh_entries,
903                                  path[depth].p_hdr->eh_max);
904                 err = -EIO;
905                 goto cleanup;
906         }
907         /* start copy from next extent */
908         m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
909         ext4_ext_show_move(inode, path, newblock, depth);
910         if (m) {
911                 struct ext4_extent *ex;
912                 ex = EXT_FIRST_EXTENT(neh);
913                 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
914                 le16_add_cpu(&neh->eh_entries, m);
915         }
916
917         set_buffer_uptodate(bh);
918         unlock_buffer(bh);
919
920         err = ext4_handle_dirty_metadata(handle, inode, bh);
921         if (err)
922                 goto cleanup;
923         brelse(bh);
924         bh = NULL;
925
926         /* correct old leaf */
927         if (m) {
928                 err = ext4_ext_get_access(handle, inode, path + depth);
929                 if (err)
930                         goto cleanup;
931                 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
932                 err = ext4_ext_dirty(handle, inode, path + depth);
933                 if (err)
934                         goto cleanup;
935
936         }
937
938         /* create intermediate indexes */
939         k = depth - at - 1;
940         if (unlikely(k < 0)) {
941                 EXT4_ERROR_INODE(inode, "k %d < 0!", k);
942                 err = -EIO;
943                 goto cleanup;
944         }
945         if (k)
946                 ext_debug("create %d intermediate indices\n", k);
947         /* insert new index into current index block */
948         /* current depth stored in i var */
949         i = depth - 1;
950         while (k--) {
951                 oldblock = newblock;
952                 newblock = ablocks[--a];
953                 bh = sb_getblk(inode->i_sb, newblock);
954                 if (!bh) {
955                         err = -EIO;
956                         goto cleanup;
957                 }
958                 lock_buffer(bh);
959
960                 err = ext4_journal_get_create_access(handle, bh);
961                 if (err)
962                         goto cleanup;
963
964                 neh = ext_block_hdr(bh);
965                 neh->eh_entries = cpu_to_le16(1);
966                 neh->eh_magic = EXT4_EXT_MAGIC;
967                 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
968                 neh->eh_depth = cpu_to_le16(depth - i);
969                 fidx = EXT_FIRST_INDEX(neh);
970                 fidx->ei_block = border;
971                 ext4_idx_store_pblock(fidx, oldblock);
972
973                 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
974                                 i, newblock, le32_to_cpu(border), oldblock);
975
976                 /* move remainder of path[i] to the new index block */
977                 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
978                                         EXT_LAST_INDEX(path[i].p_hdr))) {
979                         EXT4_ERROR_INODE(inode,
980                                          "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
981                                          le32_to_cpu(path[i].p_ext->ee_block));
982                         err = -EIO;
983                         goto cleanup;
984                 }
985                 /* start copy indexes */
986                 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
987                 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
988                                 EXT_MAX_INDEX(path[i].p_hdr));
989                 ext4_ext_show_move(inode, path, newblock, i);
990                 if (m) {
991                         memmove(++fidx, path[i].p_idx,
992                                 sizeof(struct ext4_extent_idx) * m);
993                         le16_add_cpu(&neh->eh_entries, m);
994                 }
995                 set_buffer_uptodate(bh);
996                 unlock_buffer(bh);
997
998                 err = ext4_handle_dirty_metadata(handle, inode, bh);
999                 if (err)
1000                         goto cleanup;
1001                 brelse(bh);
1002                 bh = NULL;
1003
1004                 /* correct old index */
1005                 if (m) {
1006                         err = ext4_ext_get_access(handle, inode, path + i);
1007                         if (err)
1008                                 goto cleanup;
1009                         le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1010                         err = ext4_ext_dirty(handle, inode, path + i);
1011                         if (err)
1012                                 goto cleanup;
1013                 }
1014
1015                 i--;
1016         }
1017
1018         /* insert new index */
1019         err = ext4_ext_insert_index(handle, inode, path + at,
1020                                     le32_to_cpu(border), newblock);
1021
1022 cleanup:
1023         if (bh) {
1024                 if (buffer_locked(bh))
1025                         unlock_buffer(bh);
1026                 brelse(bh);
1027         }
1028
1029         if (err) {
1030                 /* free all allocated blocks in error case */
1031                 for (i = 0; i < depth; i++) {
1032                         if (!ablocks[i])
1033                                 continue;
1034                         ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1035                                          EXT4_FREE_BLOCKS_METADATA);
1036                 }
1037         }
1038         kfree(ablocks);
1039
1040         return err;
1041 }
1042
1043 /*
1044  * ext4_ext_grow_indepth:
1045  * implements tree growing procedure:
1046  * - allocates new block
1047  * - moves top-level data (index block or leaf) into the new block
1048  * - initializes new top-level, creating index that points to the
1049  *   just created block
1050  */
1051 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1052                                  unsigned int flags,
1053                                  struct ext4_extent *newext)
1054 {
1055         struct ext4_extent_header *neh;
1056         struct buffer_head *bh;
1057         ext4_fsblk_t newblock;
1058         int err = 0;
1059
1060         newblock = ext4_ext_new_meta_block(handle, inode, NULL,
1061                 newext, &err, flags);
1062         if (newblock == 0)
1063                 return err;
1064
1065         bh = sb_getblk(inode->i_sb, newblock);
1066         if (!bh) {
1067                 err = -EIO;
1068                 ext4_std_error(inode->i_sb, err);
1069                 return err;
1070         }
1071         lock_buffer(bh);
1072
1073         err = ext4_journal_get_create_access(handle, bh);
1074         if (err) {
1075                 unlock_buffer(bh);
1076                 goto out;
1077         }
1078
1079         /* move top-level index/leaf into new block */
1080         memmove(bh->b_data, EXT4_I(inode)->i_data,
1081                 sizeof(EXT4_I(inode)->i_data));
1082
1083         /* set size of new block */
1084         neh = ext_block_hdr(bh);
1085         /* old root could have indexes or leaves
1086          * so calculate e_max right way */
1087         if (ext_depth(inode))
1088                 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1089         else
1090                 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1091         neh->eh_magic = EXT4_EXT_MAGIC;
1092         set_buffer_uptodate(bh);
1093         unlock_buffer(bh);
1094
1095         err = ext4_handle_dirty_metadata(handle, inode, bh);
1096         if (err)
1097                 goto out;
1098
1099         /* Update top-level index: num,max,pointer */
1100         neh = ext_inode_hdr(inode);
1101         neh->eh_entries = cpu_to_le16(1);
1102         ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
1103         if (neh->eh_depth == 0) {
1104                 /* Root extent block becomes index block */
1105                 neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1106                 EXT_FIRST_INDEX(neh)->ei_block =
1107                         EXT_FIRST_EXTENT(neh)->ee_block;
1108         }
1109         ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1110                   le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1111                   le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1112                   ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1113
1114         neh->eh_depth = cpu_to_le16(le16_to_cpu(neh->eh_depth) + 1);
1115         ext4_mark_inode_dirty(handle, inode);
1116 out:
1117         brelse(bh);
1118
1119         return err;
1120 }
1121
1122 /*
1123  * ext4_ext_create_new_leaf:
1124  * finds empty index and adds new leaf.
1125  * if no free index is found, then it requests in-depth growing.
1126  */
1127 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1128                                     unsigned int flags,
1129                                     struct ext4_ext_path *path,
1130                                     struct ext4_extent *newext)
1131 {
1132         struct ext4_ext_path *curp;
1133         int depth, i, err = 0;
1134
1135 repeat:
1136         i = depth = ext_depth(inode);
1137
1138         /* walk up to the tree and look for free index entry */
1139         curp = path + depth;
1140         while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1141                 i--;
1142                 curp--;
1143         }
1144
1145         /* we use already allocated block for index block,
1146          * so subsequent data blocks should be contiguous */
1147         if (EXT_HAS_FREE_INDEX(curp)) {
1148                 /* if we found index with free entry, then use that
1149                  * entry: create all needed subtree and add new leaf */
1150                 err = ext4_ext_split(handle, inode, flags, path, newext, i);
1151                 if (err)
1152                         goto out;
1153
1154                 /* refill path */
1155                 ext4_ext_drop_refs(path);
1156                 path = ext4_ext_find_extent(inode,
1157                                     (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1158                                     path);
1159                 if (IS_ERR(path))
1160                         err = PTR_ERR(path);
1161         } else {
1162                 /* tree is full, time to grow in depth */
1163                 err = ext4_ext_grow_indepth(handle, inode, flags, newext);
1164                 if (err)
1165                         goto out;
1166
1167                 /* refill path */
1168                 ext4_ext_drop_refs(path);
1169                 path = ext4_ext_find_extent(inode,
1170                                    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1171                                     path);
1172                 if (IS_ERR(path)) {
1173                         err = PTR_ERR(path);
1174                         goto out;
1175                 }
1176
1177                 /*
1178                  * only first (depth 0 -> 1) produces free space;
1179                  * in all other cases we have to split the grown tree
1180                  */
1181                 depth = ext_depth(inode);
1182                 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1183                         /* now we need to split */
1184                         goto repeat;
1185                 }
1186         }
1187
1188 out:
1189         return err;
1190 }
1191
1192 /*
1193  * search the closest allocated block to the left for *logical
1194  * and returns it at @logical + it's physical address at @phys
1195  * if *logical is the smallest allocated block, the function
1196  * returns 0 at @phys
1197  * return value contains 0 (success) or error code
1198  */
1199 static int ext4_ext_search_left(struct inode *inode,
1200                                 struct ext4_ext_path *path,
1201                                 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1202 {
1203         struct ext4_extent_idx *ix;
1204         struct ext4_extent *ex;
1205         int depth, ee_len;
1206
1207         if (unlikely(path == NULL)) {
1208                 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1209                 return -EIO;
1210         }
1211         depth = path->p_depth;
1212         *phys = 0;
1213
1214         if (depth == 0 && path->p_ext == NULL)
1215                 return 0;
1216
1217         /* usually extent in the path covers blocks smaller
1218          * then *logical, but it can be that extent is the
1219          * first one in the file */
1220
1221         ex = path[depth].p_ext;
1222         ee_len = ext4_ext_get_actual_len(ex);
1223         if (*logical < le32_to_cpu(ex->ee_block)) {
1224                 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1225                         EXT4_ERROR_INODE(inode,
1226                                          "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1227                                          *logical, le32_to_cpu(ex->ee_block));
1228                         return -EIO;
1229                 }
1230                 while (--depth >= 0) {
1231                         ix = path[depth].p_idx;
1232                         if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1233                                 EXT4_ERROR_INODE(inode,
1234                                   "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1235                                   ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1236                                   EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1237                 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
1238                                   depth);
1239                                 return -EIO;
1240                         }
1241                 }
1242                 return 0;
1243         }
1244
1245         if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1246                 EXT4_ERROR_INODE(inode,
1247                                  "logical %d < ee_block %d + ee_len %d!",
1248                                  *logical, le32_to_cpu(ex->ee_block), ee_len);
1249                 return -EIO;
1250         }
1251
1252         *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1253         *phys = ext4_ext_pblock(ex) + ee_len - 1;
1254         return 0;
1255 }
1256
1257 /*
1258  * search the closest allocated block to the right for *logical
1259  * and returns it at @logical + it's physical address at @phys
1260  * if *logical is the largest allocated block, the function
1261  * returns 0 at @phys
1262  * return value contains 0 (success) or error code
1263  */
1264 static int ext4_ext_search_right(struct inode *inode,
1265                                  struct ext4_ext_path *path,
1266                                  ext4_lblk_t *logical, ext4_fsblk_t *phys,
1267                                  struct ext4_extent **ret_ex)
1268 {
1269         struct buffer_head *bh = NULL;
1270         struct ext4_extent_header *eh;
1271         struct ext4_extent_idx *ix;
1272         struct ext4_extent *ex;
1273         ext4_fsblk_t block;
1274         int depth;      /* Note, NOT eh_depth; depth from top of tree */
1275         int ee_len;
1276
1277         if (unlikely(path == NULL)) {
1278                 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1279                 return -EIO;
1280         }
1281         depth = path->p_depth;
1282         *phys = 0;
1283
1284         if (depth == 0 && path->p_ext == NULL)
1285                 return 0;
1286
1287         /* usually extent in the path covers blocks smaller
1288          * then *logical, but it can be that extent is the
1289          * first one in the file */
1290
1291         ex = path[depth].p_ext;
1292         ee_len = ext4_ext_get_actual_len(ex);
1293         if (*logical < le32_to_cpu(ex->ee_block)) {
1294                 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1295                         EXT4_ERROR_INODE(inode,
1296                                          "first_extent(path[%d].p_hdr) != ex",
1297                                          depth);
1298                         return -EIO;
1299                 }
1300                 while (--depth >= 0) {
1301                         ix = path[depth].p_idx;
1302                         if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1303                                 EXT4_ERROR_INODE(inode,
1304                                                  "ix != EXT_FIRST_INDEX *logical %d!",
1305                                                  *logical);
1306                                 return -EIO;
1307                         }
1308                 }
1309                 goto found_extent;
1310         }
1311
1312         if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1313                 EXT4_ERROR_INODE(inode,
1314                                  "logical %d < ee_block %d + ee_len %d!",
1315                                  *logical, le32_to_cpu(ex->ee_block), ee_len);
1316                 return -EIO;
1317         }
1318
1319         if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1320                 /* next allocated block in this leaf */
1321                 ex++;
1322                 goto found_extent;
1323         }
1324
1325         /* go up and search for index to the right */
1326         while (--depth >= 0) {
1327                 ix = path[depth].p_idx;
1328                 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1329                         goto got_index;
1330         }
1331
1332         /* we've gone up to the root and found no index to the right */
1333         return 0;
1334
1335 got_index:
1336         /* we've found index to the right, let's
1337          * follow it and find the closest allocated
1338          * block to the right */
1339         ix++;
1340         block = ext4_idx_pblock(ix);
1341         while (++depth < path->p_depth) {
1342                 bh = sb_bread(inode->i_sb, block);
1343                 if (bh == NULL)
1344                         return -EIO;
1345                 eh = ext_block_hdr(bh);
1346                 /* subtract from p_depth to get proper eh_depth */
1347                 if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1348                         put_bh(bh);
1349                         return -EIO;
1350                 }
1351                 ix = EXT_FIRST_INDEX(eh);
1352                 block = ext4_idx_pblock(ix);
1353                 put_bh(bh);
1354         }
1355
1356         bh = sb_bread(inode->i_sb, block);
1357         if (bh == NULL)
1358                 return -EIO;
1359         eh = ext_block_hdr(bh);
1360         if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1361                 put_bh(bh);
1362                 return -EIO;
1363         }
1364         ex = EXT_FIRST_EXTENT(eh);
1365 found_extent:
1366         *logical = le32_to_cpu(ex->ee_block);
1367         *phys = ext4_ext_pblock(ex);
1368         *ret_ex = ex;
1369         if (bh)
1370                 put_bh(bh);
1371         return 0;
1372 }
1373
1374 /*
1375  * ext4_ext_next_allocated_block:
1376  * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1377  * NOTE: it considers block number from index entry as
1378  * allocated block. Thus, index entries have to be consistent
1379  * with leaves.
1380  */
1381 static ext4_lblk_t
1382 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1383 {
1384         int depth;
1385
1386         BUG_ON(path == NULL);
1387         depth = path->p_depth;
1388
1389         if (depth == 0 && path->p_ext == NULL)
1390                 return EXT_MAX_BLOCKS;
1391
1392         while (depth >= 0) {
1393                 if (depth == path->p_depth) {
1394                         /* leaf */
1395                         if (path[depth].p_ext &&
1396                                 path[depth].p_ext !=
1397                                         EXT_LAST_EXTENT(path[depth].p_hdr))
1398                           return le32_to_cpu(path[depth].p_ext[1].ee_block);
1399                 } else {
1400                         /* index */
1401                         if (path[depth].p_idx !=
1402                                         EXT_LAST_INDEX(path[depth].p_hdr))
1403                           return le32_to_cpu(path[depth].p_idx[1].ei_block);
1404                 }
1405                 depth--;
1406         }
1407
1408         return EXT_MAX_BLOCKS;
1409 }
1410
1411 /*
1412  * ext4_ext_next_leaf_block:
1413  * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1414  */
1415 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1416 {
1417         int depth;
1418
1419         BUG_ON(path == NULL);
1420         depth = path->p_depth;
1421
1422         /* zero-tree has no leaf blocks at all */
1423         if (depth == 0)
1424                 return EXT_MAX_BLOCKS;
1425
1426         /* go to index block */
1427         depth--;
1428
1429         while (depth >= 0) {
1430                 if (path[depth].p_idx !=
1431                                 EXT_LAST_INDEX(path[depth].p_hdr))
1432                         return (ext4_lblk_t)
1433                                 le32_to_cpu(path[depth].p_idx[1].ei_block);
1434                 depth--;
1435         }
1436
1437         return EXT_MAX_BLOCKS;
1438 }
1439
1440 /*
1441  * ext4_ext_correct_indexes:
1442  * if leaf gets modified and modified extent is first in the leaf,
1443  * then we have to correct all indexes above.
1444  * TODO: do we need to correct tree in all cases?
1445  */
1446 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1447                                 struct ext4_ext_path *path)
1448 {
1449         struct ext4_extent_header *eh;
1450         int depth = ext_depth(inode);
1451         struct ext4_extent *ex;
1452         __le32 border;
1453         int k, err = 0;
1454
1455         eh = path[depth].p_hdr;
1456         ex = path[depth].p_ext;
1457
1458         if (unlikely(ex == NULL || eh == NULL)) {
1459                 EXT4_ERROR_INODE(inode,
1460                                  "ex %p == NULL or eh %p == NULL", ex, eh);
1461                 return -EIO;
1462         }
1463
1464         if (depth == 0) {
1465                 /* there is no tree at all */
1466                 return 0;
1467         }
1468
1469         if (ex != EXT_FIRST_EXTENT(eh)) {
1470                 /* we correct tree if first leaf got modified only */
1471                 return 0;
1472         }
1473
1474         /*
1475          * TODO: we need correction if border is smaller than current one
1476          */
1477         k = depth - 1;
1478         border = path[depth].p_ext->ee_block;
1479         err = ext4_ext_get_access(handle, inode, path + k);
1480         if (err)
1481                 return err;
1482         path[k].p_idx->ei_block = border;
1483         err = ext4_ext_dirty(handle, inode, path + k);
1484         if (err)
1485                 return err;
1486
1487         while (k--) {
1488                 /* change all left-side indexes */
1489                 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1490                         break;
1491                 err = ext4_ext_get_access(handle, inode, path + k);
1492                 if (err)
1493                         break;
1494                 path[k].p_idx->ei_block = border;
1495                 err = ext4_ext_dirty(handle, inode, path + k);
1496                 if (err)
1497                         break;
1498         }
1499
1500         return err;
1501 }
1502
1503 int
1504 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1505                                 struct ext4_extent *ex2)
1506 {
1507         unsigned short ext1_ee_len, ext2_ee_len, max_len;
1508
1509         /*
1510          * Make sure that either both extents are uninitialized, or
1511          * both are _not_.
1512          */
1513         if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
1514                 return 0;
1515
1516         if (ext4_ext_is_uninitialized(ex1))
1517                 max_len = EXT_UNINIT_MAX_LEN;
1518         else
1519                 max_len = EXT_INIT_MAX_LEN;
1520
1521         ext1_ee_len = ext4_ext_get_actual_len(ex1);
1522         ext2_ee_len = ext4_ext_get_actual_len(ex2);
1523
1524         if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1525                         le32_to_cpu(ex2->ee_block))
1526                 return 0;
1527
1528         /*
1529          * To allow future support for preallocated extents to be added
1530          * as an RO_COMPAT feature, refuse to merge to extents if
1531          * this can result in the top bit of ee_len being set.
1532          */
1533         if (ext1_ee_len + ext2_ee_len > max_len)
1534                 return 0;
1535 #ifdef AGGRESSIVE_TEST
1536         if (ext1_ee_len >= 4)
1537                 return 0;
1538 #endif
1539
1540         if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1541                 return 1;
1542         return 0;
1543 }
1544
1545 /*
1546  * This function tries to merge the "ex" extent to the next extent in the tree.
1547  * It always tries to merge towards right. If you want to merge towards
1548  * left, pass "ex - 1" as argument instead of "ex".
1549  * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1550  * 1 if they got merged.
1551  */
1552 static int ext4_ext_try_to_merge_right(struct inode *inode,
1553                                  struct ext4_ext_path *path,
1554                                  struct ext4_extent *ex)
1555 {
1556         struct ext4_extent_header *eh;
1557         unsigned int depth, len;
1558         int merge_done = 0;
1559         int uninitialized = 0;
1560
1561         depth = ext_depth(inode);
1562         BUG_ON(path[depth].p_hdr == NULL);
1563         eh = path[depth].p_hdr;
1564
1565         while (ex < EXT_LAST_EXTENT(eh)) {
1566                 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1567                         break;
1568                 /* merge with next extent! */
1569                 if (ext4_ext_is_uninitialized(ex))
1570                         uninitialized = 1;
1571                 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1572                                 + ext4_ext_get_actual_len(ex + 1));
1573                 if (uninitialized)
1574                         ext4_ext_mark_uninitialized(ex);
1575
1576                 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1577                         len = (EXT_LAST_EXTENT(eh) - ex - 1)
1578                                 * sizeof(struct ext4_extent);
1579                         memmove(ex + 1, ex + 2, len);
1580                 }
1581                 le16_add_cpu(&eh->eh_entries, -1);
1582                 merge_done = 1;
1583                 WARN_ON(eh->eh_entries == 0);
1584                 if (!eh->eh_entries)
1585                         EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1586         }
1587
1588         return merge_done;
1589 }
1590
1591 /*
1592  * This function tries to merge the @ex extent to neighbours in the tree.
1593  * return 1 if merge left else 0.
1594  */
1595 static int ext4_ext_try_to_merge(struct inode *inode,
1596                                   struct ext4_ext_path *path,
1597                                   struct ext4_extent *ex) {
1598         struct ext4_extent_header *eh;
1599         unsigned int depth;
1600         int merge_done = 0;
1601         int ret = 0;
1602
1603         depth = ext_depth(inode);
1604         BUG_ON(path[depth].p_hdr == NULL);
1605         eh = path[depth].p_hdr;
1606
1607         if (ex > EXT_FIRST_EXTENT(eh))
1608                 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1609
1610         if (!merge_done)
1611                 ret = ext4_ext_try_to_merge_right(inode, path, ex);
1612
1613         return ret;
1614 }
1615
1616 /*
1617  * check if a portion of the "newext" extent overlaps with an
1618  * existing extent.
1619  *
1620  * If there is an overlap discovered, it updates the length of the newext
1621  * such that there will be no overlap, and then returns 1.
1622  * If there is no overlap found, it returns 0.
1623  */
1624 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1625                                            struct inode *inode,
1626                                            struct ext4_extent *newext,
1627                                            struct ext4_ext_path *path)
1628 {
1629         ext4_lblk_t b1, b2;
1630         unsigned int depth, len1;
1631         unsigned int ret = 0;
1632
1633         b1 = le32_to_cpu(newext->ee_block);
1634         len1 = ext4_ext_get_actual_len(newext);
1635         depth = ext_depth(inode);
1636         if (!path[depth].p_ext)
1637                 goto out;
1638         b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1639         b2 &= ~(sbi->s_cluster_ratio - 1);
1640
1641         /*
1642          * get the next allocated block if the extent in the path
1643          * is before the requested block(s)
1644          */
1645         if (b2 < b1) {
1646                 b2 = ext4_ext_next_allocated_block(path);
1647                 if (b2 == EXT_MAX_BLOCKS)
1648                         goto out;
1649                 b2 &= ~(sbi->s_cluster_ratio - 1);
1650         }
1651
1652         /* check for wrap through zero on extent logical start block*/
1653         if (b1 + len1 < b1) {
1654                 len1 = EXT_MAX_BLOCKS - b1;
1655                 newext->ee_len = cpu_to_le16(len1);
1656                 ret = 1;
1657         }
1658
1659         /* check for overlap */
1660         if (b1 + len1 > b2) {
1661                 newext->ee_len = cpu_to_le16(b2 - b1);
1662                 ret = 1;
1663         }
1664 out:
1665         return ret;
1666 }
1667
1668 /*
1669  * ext4_ext_insert_extent:
1670  * tries to merge requsted extent into the existing extent or
1671  * inserts requested extent as new one into the tree,
1672  * creating new leaf in the no-space case.
1673  */
1674 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1675                                 struct ext4_ext_path *path,
1676                                 struct ext4_extent *newext, int flag)
1677 {
1678         struct ext4_extent_header *eh;
1679         struct ext4_extent *ex, *fex;
1680         struct ext4_extent *nearex; /* nearest extent */
1681         struct ext4_ext_path *npath = NULL;
1682         int depth, len, err;
1683         ext4_lblk_t next;
1684         unsigned uninitialized = 0;
1685         int flags = 0;
1686
1687         if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1688                 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1689                 return -EIO;
1690         }
1691         depth = ext_depth(inode);
1692         ex = path[depth].p_ext;
1693         if (unlikely(path[depth].p_hdr == NULL)) {
1694                 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1695                 return -EIO;
1696         }
1697
1698         /* try to insert block into found extent and return */
1699         if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
1700                 && ext4_can_extents_be_merged(inode, ex, newext)) {
1701                 ext_debug("append [%d]%d block to %u:[%d]%d (from %llu)\n",
1702                           ext4_ext_is_uninitialized(newext),
1703                           ext4_ext_get_actual_len(newext),
1704                           le32_to_cpu(ex->ee_block),
1705                           ext4_ext_is_uninitialized(ex),
1706                           ext4_ext_get_actual_len(ex),
1707                           ext4_ext_pblock(ex));
1708                 err = ext4_ext_get_access(handle, inode, path + depth);
1709                 if (err)
1710                         return err;
1711
1712                 /*
1713                  * ext4_can_extents_be_merged should have checked that either
1714                  * both extents are uninitialized, or both aren't. Thus we
1715                  * need to check only one of them here.
1716                  */
1717                 if (ext4_ext_is_uninitialized(ex))
1718                         uninitialized = 1;
1719                 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1720                                         + ext4_ext_get_actual_len(newext));
1721                 if (uninitialized)
1722                         ext4_ext_mark_uninitialized(ex);
1723                 eh = path[depth].p_hdr;
1724                 nearex = ex;
1725                 goto merge;
1726         }
1727
1728         depth = ext_depth(inode);
1729         eh = path[depth].p_hdr;
1730         if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1731                 goto has_space;
1732
1733         /* probably next leaf has space for us? */
1734         fex = EXT_LAST_EXTENT(eh);
1735         next = EXT_MAX_BLOCKS;
1736         if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
1737                 next = ext4_ext_next_leaf_block(path);
1738         if (next != EXT_MAX_BLOCKS) {
1739                 ext_debug("next leaf block - %u\n", next);
1740                 BUG_ON(npath != NULL);
1741                 npath = ext4_ext_find_extent(inode, next, NULL);
1742                 if (IS_ERR(npath))
1743                         return PTR_ERR(npath);
1744                 BUG_ON(npath->p_depth != path->p_depth);
1745                 eh = npath[depth].p_hdr;
1746                 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1747                         ext_debug("next leaf isn't full(%d)\n",
1748                                   le16_to_cpu(eh->eh_entries));
1749                         path = npath;
1750                         goto has_space;
1751                 }
1752                 ext_debug("next leaf has no free space(%d,%d)\n",
1753                           le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1754         }
1755
1756         /*
1757          * There is no free space in the found leaf.
1758          * We're gonna add a new leaf in the tree.
1759          */
1760         if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT)
1761                 flags = EXT4_MB_USE_ROOT_BLOCKS;
1762         err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext);
1763         if (err)
1764                 goto cleanup;
1765         depth = ext_depth(inode);
1766         eh = path[depth].p_hdr;
1767
1768 has_space:
1769         nearex = path[depth].p_ext;
1770
1771         err = ext4_ext_get_access(handle, inode, path + depth);
1772         if (err)
1773                 goto cleanup;
1774
1775         if (!nearex) {
1776                 /* there is no extent in this leaf, create first one */
1777                 ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
1778                                 le32_to_cpu(newext->ee_block),
1779                                 ext4_ext_pblock(newext),
1780                                 ext4_ext_is_uninitialized(newext),
1781                                 ext4_ext_get_actual_len(newext));
1782                 nearex = EXT_FIRST_EXTENT(eh);
1783         } else {
1784                 if (le32_to_cpu(newext->ee_block)
1785                            > le32_to_cpu(nearex->ee_block)) {
1786                         /* Insert after */
1787                         ext_debug("insert %u:%llu:[%d]%d before: "
1788                                         "nearest %p\n",
1789                                         le32_to_cpu(newext->ee_block),
1790                                         ext4_ext_pblock(newext),
1791                                         ext4_ext_is_uninitialized(newext),
1792                                         ext4_ext_get_actual_len(newext),
1793                                         nearex);
1794                         nearex++;
1795                 } else {
1796                         /* Insert before */
1797                         BUG_ON(newext->ee_block == nearex->ee_block);
1798                         ext_debug("insert %u:%llu:[%d]%d after: "
1799                                         "nearest %p\n",
1800                                         le32_to_cpu(newext->ee_block),
1801                                         ext4_ext_pblock(newext),
1802                                         ext4_ext_is_uninitialized(newext),
1803                                         ext4_ext_get_actual_len(newext),
1804                                         nearex);
1805                 }
1806                 len = EXT_LAST_EXTENT(eh) - nearex + 1;
1807                 if (len > 0) {
1808                         ext_debug("insert %u:%llu:[%d]%d: "
1809                                         "move %d extents from 0x%p to 0x%p\n",
1810                                         le32_to_cpu(newext->ee_block),
1811                                         ext4_ext_pblock(newext),
1812                                         ext4_ext_is_uninitialized(newext),
1813                                         ext4_ext_get_actual_len(newext),
1814                                         len, nearex, nearex + 1);
1815                         memmove(nearex + 1, nearex,
1816                                 len * sizeof(struct ext4_extent));
1817                 }
1818         }
1819
1820         le16_add_cpu(&eh->eh_entries, 1);
1821         path[depth].p_ext = nearex;
1822         nearex->ee_block = newext->ee_block;
1823         ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
1824         nearex->ee_len = newext->ee_len;
1825
1826 merge:
1827         /* try to merge extents to the right */
1828         if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
1829                 ext4_ext_try_to_merge(inode, path, nearex);
1830
1831         /* try to merge extents to the left */
1832
1833         /* time to correct all indexes above */
1834         err = ext4_ext_correct_indexes(handle, inode, path);
1835         if (err)
1836                 goto cleanup;
1837
1838         err = ext4_ext_dirty(handle, inode, path + depth);
1839
1840 cleanup:
1841         if (npath) {
1842                 ext4_ext_drop_refs(npath);
1843                 kfree(npath);
1844         }
1845         ext4_ext_invalidate_cache(inode);
1846         return err;
1847 }
1848
1849 static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1850                                ext4_lblk_t num, ext_prepare_callback func,
1851                                void *cbdata)
1852 {
1853         struct ext4_ext_path *path = NULL;
1854         struct ext4_ext_cache cbex;
1855         struct ext4_extent *ex;
1856         ext4_lblk_t next, start = 0, end = 0;
1857         ext4_lblk_t last = block + num;
1858         int depth, exists, err = 0;
1859
1860         BUG_ON(func == NULL);
1861         BUG_ON(inode == NULL);
1862
1863         while (block < last && block != EXT_MAX_BLOCKS) {
1864                 num = last - block;
1865                 /* find extent for this block */
1866                 down_read(&EXT4_I(inode)->i_data_sem);
1867                 path = ext4_ext_find_extent(inode, block, path);
1868                 up_read(&EXT4_I(inode)->i_data_sem);
1869                 if (IS_ERR(path)) {
1870                         err = PTR_ERR(path);
1871                         path = NULL;
1872                         break;
1873                 }
1874
1875                 depth = ext_depth(inode);
1876                 if (unlikely(path[depth].p_hdr == NULL)) {
1877                         EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1878                         err = -EIO;
1879                         break;
1880                 }
1881                 ex = path[depth].p_ext;
1882                 next = ext4_ext_next_allocated_block(path);
1883
1884                 exists = 0;
1885                 if (!ex) {
1886                         /* there is no extent yet, so try to allocate
1887                          * all requested space */
1888                         start = block;
1889                         end = block + num;
1890                 } else if (le32_to_cpu(ex->ee_block) > block) {
1891                         /* need to allocate space before found extent */
1892                         start = block;
1893                         end = le32_to_cpu(ex->ee_block);
1894                         if (block + num < end)
1895                                 end = block + num;
1896                 } else if (block >= le32_to_cpu(ex->ee_block)
1897                                         + ext4_ext_get_actual_len(ex)) {
1898                         /* need to allocate space after found extent */
1899                         start = block;
1900                         end = block + num;
1901                         if (end >= next)
1902                                 end = next;
1903                 } else if (block >= le32_to_cpu(ex->ee_block)) {
1904                         /*
1905                          * some part of requested space is covered
1906                          * by found extent
1907                          */
1908                         start = block;
1909                         end = le32_to_cpu(ex->ee_block)
1910                                 + ext4_ext_get_actual_len(ex);
1911                         if (block + num < end)
1912                                 end = block + num;
1913                         exists = 1;
1914                 } else {
1915                         BUG();
1916                 }
1917                 BUG_ON(end <= start);
1918
1919                 if (!exists) {
1920                         cbex.ec_block = start;
1921                         cbex.ec_len = end - start;
1922                         cbex.ec_start = 0;
1923                 } else {
1924                         cbex.ec_block = le32_to_cpu(ex->ee_block);
1925                         cbex.ec_len = ext4_ext_get_actual_len(ex);
1926                         cbex.ec_start = ext4_ext_pblock(ex);
1927                 }
1928
1929                 if (unlikely(cbex.ec_len == 0)) {
1930                         EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
1931                         err = -EIO;
1932                         break;
1933                 }
1934                 err = func(inode, next, &cbex, ex, cbdata);
1935                 ext4_ext_drop_refs(path);
1936
1937                 if (err < 0)
1938                         break;
1939
1940                 if (err == EXT_REPEAT)
1941                         continue;
1942                 else if (err == EXT_BREAK) {
1943                         err = 0;
1944                         break;
1945                 }
1946
1947                 if (ext_depth(inode) != depth) {
1948                         /* depth was changed. we have to realloc path */
1949                         kfree(path);
1950                         path = NULL;
1951                 }
1952
1953                 block = cbex.ec_block + cbex.ec_len;
1954         }
1955
1956         if (path) {
1957                 ext4_ext_drop_refs(path);
1958                 kfree(path);
1959         }
1960
1961         return err;
1962 }
1963
1964 static void
1965 ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
1966                         __u32 len, ext4_fsblk_t start)
1967 {
1968         struct ext4_ext_cache *cex;
1969         BUG_ON(len == 0);
1970         spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1971         trace_ext4_ext_put_in_cache(inode, block, len, start);
1972         cex = &EXT4_I(inode)->i_cached_extent;
1973         cex->ec_block = block;
1974         cex->ec_len = len;
1975         cex->ec_start = start;
1976         spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1977 }
1978
1979 /*
1980  * ext4_ext_put_gap_in_cache:
1981  * calculate boundaries of the gap that the requested block fits into
1982  * and cache this gap
1983  */
1984 static void
1985 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
1986                                 ext4_lblk_t block)
1987 {
1988         int depth = ext_depth(inode);
1989         unsigned long len;
1990         ext4_lblk_t lblock;
1991         struct ext4_extent *ex;
1992
1993         ex = path[depth].p_ext;
1994         if (ex == NULL) {
1995                 /* there is no extent yet, so gap is [0;-] */
1996                 lblock = 0;
1997                 len = EXT_MAX_BLOCKS;
1998                 ext_debug("cache gap(whole file):");
1999         } else if (block < le32_to_cpu(ex->ee_block)) {
2000                 lblock = block;
2001                 len = le32_to_cpu(ex->ee_block) - block;
2002                 ext_debug("cache gap(before): %u [%u:%u]",
2003                                 block,
2004                                 le32_to_cpu(ex->ee_block),
2005                                  ext4_ext_get_actual_len(ex));
2006         } else if (block >= le32_to_cpu(ex->ee_block)
2007                         + ext4_ext_get_actual_len(ex)) {
2008                 ext4_lblk_t next;
2009                 lblock = le32_to_cpu(ex->ee_block)
2010                         + ext4_ext_get_actual_len(ex);
2011
2012                 next = ext4_ext_next_allocated_block(path);
2013                 ext_debug("cache gap(after): [%u:%u] %u",
2014                                 le32_to_cpu(ex->ee_block),
2015                                 ext4_ext_get_actual_len(ex),
2016                                 block);
2017                 BUG_ON(next == lblock);
2018                 len = next - lblock;
2019         } else {
2020                 lblock = len = 0;
2021                 BUG();
2022         }
2023
2024         ext_debug(" -> %u:%lu\n", lblock, len);
2025         ext4_ext_put_in_cache(inode, lblock, len, 0);
2026 }
2027
2028 /*
2029  * ext4_ext_check_cache()
2030  * Checks to see if the given block is in the cache.
2031  * If it is, the cached extent is stored in the given
2032  * cache extent pointer.  If the cached extent is a hole,
2033  * this routine should be used instead of
2034  * ext4_ext_in_cache if the calling function needs to
2035  * know the size of the hole.
2036  *
2037  * @inode: The files inode
2038  * @block: The block to look for in the cache
2039  * @ex:    Pointer where the cached extent will be stored
2040  *         if it contains block
2041  *
2042  * Return 0 if cache is invalid; 1 if the cache is valid
2043  */
2044 static int ext4_ext_check_cache(struct inode *inode, ext4_lblk_t block,
2045         struct ext4_ext_cache *ex){
2046         struct ext4_ext_cache *cex;
2047         struct ext4_sb_info *sbi;
2048         int ret = 0;
2049
2050         /*
2051          * We borrow i_block_reservation_lock to protect i_cached_extent
2052          */
2053         spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
2054         cex = &EXT4_I(inode)->i_cached_extent;
2055         sbi = EXT4_SB(inode->i_sb);
2056
2057         /* has cache valid data? */
2058         if (cex->ec_len == 0)
2059                 goto errout;
2060
2061         if (in_range(block, cex->ec_block, cex->ec_len)) {
2062                 memcpy(ex, cex, sizeof(struct ext4_ext_cache));
2063                 ext_debug("%u cached by %u:%u:%llu\n",
2064                                 block,
2065                                 cex->ec_block, cex->ec_len, cex->ec_start);
2066                 ret = 1;
2067         }
2068 errout:
2069         if (!ret)
2070                 sbi->extent_cache_misses++;
2071         else
2072                 sbi->extent_cache_hits++;
2073         trace_ext4_ext_in_cache(inode, block, ret);
2074         spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
2075         return ret;
2076 }
2077
2078 /*
2079  * ext4_ext_in_cache()
2080  * Checks to see if the given block is in the cache.
2081  * If it is, the cached extent is stored in the given
2082  * extent pointer.
2083  *
2084  * @inode: The files inode
2085  * @block: The block to look for in the cache
2086  * @ex:    Pointer where the cached extent will be stored
2087  *         if it contains block
2088  *
2089  * Return 0 if cache is invalid; 1 if the cache is valid
2090  */
2091 static int
2092 ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
2093                         struct ext4_extent *ex)
2094 {
2095         struct ext4_ext_cache cex;
2096         int ret = 0;
2097
2098         if (ext4_ext_check_cache(inode, block, &cex)) {
2099                 ex->ee_block = cpu_to_le32(cex.ec_block);
2100                 ext4_ext_store_pblock(ex, cex.ec_start);
2101                 ex->ee_len = cpu_to_le16(cex.ec_len);
2102                 ret = 1;
2103         }
2104
2105         return ret;
2106 }
2107
2108
2109 /*
2110  * ext4_ext_rm_idx:
2111  * removes index from the index block.
2112  */
2113 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2114                         struct ext4_ext_path *path)
2115 {
2116         int err;
2117         ext4_fsblk_t leaf;
2118
2119         /* free index block */
2120         path--;
2121         leaf = ext4_idx_pblock(path->p_idx);
2122         if (unlikely(path->p_hdr->eh_entries == 0)) {
2123                 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2124                 return -EIO;
2125         }
2126         err = ext4_ext_get_access(handle, inode, path);
2127         if (err)
2128                 return err;
2129
2130         if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
2131                 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
2132                 len *= sizeof(struct ext4_extent_idx);
2133                 memmove(path->p_idx, path->p_idx + 1, len);
2134         }
2135
2136         le16_add_cpu(&path->p_hdr->eh_entries, -1);
2137         err = ext4_ext_dirty(handle, inode, path);
2138         if (err)
2139                 return err;
2140         ext_debug("index is empty, remove it, free block %llu\n", leaf);
2141         trace_ext4_ext_rm_idx(inode, leaf);
2142
2143         ext4_free_blocks(handle, inode, NULL, leaf, 1,
2144                          EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2145         return err;
2146 }
2147
2148 /*
2149  * ext4_ext_calc_credits_for_single_extent:
2150  * This routine returns max. credits that needed to insert an extent
2151  * to the extent tree.
2152  * When pass the actual path, the caller should calculate credits
2153  * under i_data_sem.
2154  */
2155 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2156                                                 struct ext4_ext_path *path)
2157 {
2158         if (path) {
2159                 int depth = ext_depth(inode);
2160                 int ret = 0;
2161
2162                 /* probably there is space in leaf? */
2163                 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2164                                 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2165
2166                         /*
2167                          *  There are some space in the leaf tree, no
2168                          *  need to account for leaf block credit
2169                          *
2170                          *  bitmaps and block group descriptor blocks
2171                          *  and other metadata blocks still need to be
2172                          *  accounted.
2173                          */
2174                         /* 1 bitmap, 1 block group descriptor */
2175                         ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2176                         return ret;
2177                 }
2178         }
2179
2180         return ext4_chunk_trans_blocks(inode, nrblocks);
2181 }
2182
2183 /*
2184  * How many index/leaf blocks need to change/allocate to modify nrblocks?
2185  *
2186  * if nrblocks are fit in a single extent (chunk flag is 1), then
2187  * in the worse case, each tree level index/leaf need to be changed
2188  * if the tree split due to insert a new extent, then the old tree
2189  * index/leaf need to be updated too
2190  *
2191  * If the nrblocks are discontiguous, they could cause
2192  * the whole tree split more than once, but this is really rare.
2193  */
2194 int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
2195 {
2196         int index;
2197         int depth = ext_depth(inode);
2198
2199         if (chunk)
2200                 index = depth * 2;
2201         else
2202                 index = depth * 3;
2203
2204         return index;
2205 }
2206
2207 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2208                               struct ext4_extent *ex,
2209                               ext4_fsblk_t *partial_cluster,
2210                               ext4_lblk_t from, ext4_lblk_t to)
2211 {
2212         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2213         unsigned short ee_len =  ext4_ext_get_actual_len(ex);
2214         ext4_fsblk_t pblk;
2215         int flags = EXT4_FREE_BLOCKS_FORGET;
2216
2217         if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2218                 flags |= EXT4_FREE_BLOCKS_METADATA;
2219         /*
2220          * For bigalloc file systems, we never free a partial cluster
2221          * at the beginning of the extent.  Instead, we make a note
2222          * that we tried freeing the cluster, and check to see if we
2223          * need to free it on a subsequent call to ext4_remove_blocks,
2224          * or at the end of the ext4_truncate() operation.
2225          */
2226         flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2227
2228         trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
2229         /*
2230          * If we have a partial cluster, and it's different from the
2231          * cluster of the last block, we need to explicitly free the
2232          * partial cluster here.
2233          */
2234         pblk = ext4_ext_pblock(ex) + ee_len - 1;
2235         if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
2236                 ext4_free_blocks(handle, inode, NULL,
2237                                  EXT4_C2B(sbi, *partial_cluster),
2238                                  sbi->s_cluster_ratio, flags);
2239                 *partial_cluster = 0;
2240         }
2241
2242 #ifdef EXTENTS_STATS
2243         {
2244                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2245                 spin_lock(&sbi->s_ext_stats_lock);
2246                 sbi->s_ext_blocks += ee_len;
2247                 sbi->s_ext_extents++;
2248                 if (ee_len < sbi->s_ext_min)
2249                         sbi->s_ext_min = ee_len;
2250                 if (ee_len > sbi->s_ext_max)
2251                         sbi->s_ext_max = ee_len;
2252                 if (ext_depth(inode) > sbi->s_depth_max)
2253                         sbi->s_depth_max = ext_depth(inode);
2254                 spin_unlock(&sbi->s_ext_stats_lock);
2255         }
2256 #endif
2257         if (from >= le32_to_cpu(ex->ee_block)
2258             && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
2259                 /* tail removal */
2260                 ext4_lblk_t num;
2261
2262                 num = le32_to_cpu(ex->ee_block) + ee_len - from;
2263                 pblk = ext4_ext_pblock(ex) + ee_len - num;
2264                 ext_debug("free last %u blocks starting %llu\n", num, pblk);
2265                 ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
2266                 /*
2267                  * If the block range to be freed didn't start at the
2268                  * beginning of a cluster, and we removed the entire
2269                  * extent, save the partial cluster here, since we
2270                  * might need to delete if we determine that the
2271                  * truncate operation has removed all of the blocks in
2272                  * the cluster.
2273                  */
2274                 if (pblk & (sbi->s_cluster_ratio - 1) &&
2275                     (ee_len == num))
2276                         *partial_cluster = EXT4_B2C(sbi, pblk);
2277                 else
2278                         *partial_cluster = 0;
2279         } else if (from == le32_to_cpu(ex->ee_block)
2280                    && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
2281                 /* head removal */
2282                 ext4_lblk_t num;
2283                 ext4_fsblk_t start;
2284
2285                 num = to - from;
2286                 start = ext4_ext_pblock(ex);
2287
2288                 ext_debug("free first %u blocks starting %llu\n", num, start);
2289                 ext4_free_blocks(handle, inode, NULL, start, num, flags);
2290
2291         } else {
2292                 printk(KERN_INFO "strange request: removal(2) "
2293                                 "%u-%u from %u:%u\n",
2294                                 from, to, le32_to_cpu(ex->ee_block), ee_len);
2295         }
2296         return 0;
2297 }
2298
2299
2300 /*
2301  * ext4_ext_rm_leaf() Removes the extents associated with the
2302  * blocks appearing between "start" and "end", and splits the extents
2303  * if "start" and "end" appear in the same extent
2304  *
2305  * @handle: The journal handle
2306  * @inode:  The files inode
2307  * @path:   The path to the leaf
2308  * @start:  The first block to remove
2309  * @end:   The last block to remove
2310  */
2311 static int
2312 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2313                  struct ext4_ext_path *path, ext4_fsblk_t *partial_cluster,
2314                  ext4_lblk_t start, ext4_lblk_t end)
2315 {
2316         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2317         int err = 0, correct_index = 0;
2318         int depth = ext_depth(inode), credits;
2319         struct ext4_extent_header *eh;
2320         ext4_lblk_t a, b;
2321         unsigned num;
2322         ext4_lblk_t ex_ee_block;
2323         unsigned short ex_ee_len;
2324         unsigned uninitialized = 0;
2325         struct ext4_extent *ex;
2326
2327         /* the header must be checked already in ext4_ext_remove_space() */
2328         ext_debug("truncate since %u in leaf to %u\n", start, end);
2329         if (!path[depth].p_hdr)
2330                 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2331         eh = path[depth].p_hdr;
2332         if (unlikely(path[depth].p_hdr == NULL)) {
2333                 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2334                 return -EIO;
2335         }
2336         /* find where to start removing */
2337         ex = EXT_LAST_EXTENT(eh);
2338
2339         ex_ee_block = le32_to_cpu(ex->ee_block);
2340         ex_ee_len = ext4_ext_get_actual_len(ex);
2341
2342         trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
2343
2344         while (ex >= EXT_FIRST_EXTENT(eh) &&
2345                         ex_ee_block + ex_ee_len > start) {
2346
2347                 if (ext4_ext_is_uninitialized(ex))
2348                         uninitialized = 1;
2349                 else
2350                         uninitialized = 0;
2351
2352                 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
2353                          uninitialized, ex_ee_len);
2354                 path[depth].p_ext = ex;
2355
2356                 a = ex_ee_block > start ? ex_ee_block : start;
2357                 b = ex_ee_block+ex_ee_len - 1 < end ?
2358                         ex_ee_block+ex_ee_len - 1 : end;
2359
2360                 ext_debug("  border %u:%u\n", a, b);
2361
2362                 /* If this extent is beyond the end of the hole, skip it */
2363                 if (end < ex_ee_block) {
2364                         ex--;
2365                         ex_ee_block = le32_to_cpu(ex->ee_block);
2366                         ex_ee_len = ext4_ext_get_actual_len(ex);
2367                         continue;
2368                 } else if (b != ex_ee_block + ex_ee_len - 1) {
2369                         EXT4_ERROR_INODE(inode,
2370                                          "can not handle truncate %u:%u "
2371                                          "on extent %u:%u",
2372                                          start, end, ex_ee_block,
2373                                          ex_ee_block + ex_ee_len - 1);
2374                         err = -EIO;
2375                         goto out;
2376                 } else if (a != ex_ee_block) {
2377                         /* remove tail of the extent */
2378                         num = a - ex_ee_block;
2379                 } else {
2380                         /* remove whole extent: excellent! */
2381                         num = 0;
2382                 }
2383                 /*
2384                  * 3 for leaf, sb, and inode plus 2 (bmap and group
2385                  * descriptor) for each block group; assume two block
2386                  * groups plus ex_ee_len/blocks_per_block_group for
2387                  * the worst case
2388                  */
2389                 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2390                 if (ex == EXT_FIRST_EXTENT(eh)) {
2391                         correct_index = 1;
2392                         credits += (ext_depth(inode)) + 1;
2393                 }
2394                 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2395
2396                 err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2397                 if (err)
2398                         goto out;
2399
2400                 err = ext4_ext_get_access(handle, inode, path + depth);
2401                 if (err)
2402                         goto out;
2403
2404                 err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
2405                                          a, b);
2406                 if (err)
2407                         goto out;
2408
2409                 if (num == 0)
2410                         /* this extent is removed; mark slot entirely unused */
2411                         ext4_ext_store_pblock(ex, 0);
2412
2413                 ex->ee_len = cpu_to_le16(num);
2414                 /*
2415                  * Do not mark uninitialized if all the blocks in the
2416                  * extent have been removed.
2417                  */
2418                 if (uninitialized && num)
2419                         ext4_ext_mark_uninitialized(ex);
2420                 /*
2421                  * If the extent was completely released,
2422                  * we need to remove it from the leaf
2423                  */
2424                 if (num == 0) {
2425                         if (end != EXT_MAX_BLOCKS - 1) {
2426                                 /*
2427                                  * For hole punching, we need to scoot all the
2428                                  * extents up when an extent is removed so that
2429                                  * we dont have blank extents in the middle
2430                                  */
2431                                 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2432                                         sizeof(struct ext4_extent));
2433
2434                                 /* Now get rid of the one at the end */
2435                                 memset(EXT_LAST_EXTENT(eh), 0,
2436                                         sizeof(struct ext4_extent));
2437                         }
2438                         le16_add_cpu(&eh->eh_entries, -1);
2439                 } else
2440                         *partial_cluster = 0;
2441
2442                 err = ext4_ext_dirty(handle, inode, path + depth);
2443                 if (err)
2444                         goto out;
2445
2446                 ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num,
2447                                 ext4_ext_pblock(ex));
2448                 ex--;
2449                 ex_ee_block = le32_to_cpu(ex->ee_block);
2450                 ex_ee_len = ext4_ext_get_actual_len(ex);
2451         }
2452
2453         if (correct_index && eh->eh_entries)
2454                 err = ext4_ext_correct_indexes(handle, inode, path);
2455
2456         /*
2457          * If there is still a entry in the leaf node, check to see if
2458          * it references the partial cluster.  This is the only place
2459          * where it could; if it doesn't, we can free the cluster.
2460          */
2461         if (*partial_cluster && ex >= EXT_FIRST_EXTENT(eh) &&
2462             (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
2463              *partial_cluster)) {
2464                 int flags = EXT4_FREE_BLOCKS_FORGET;
2465
2466                 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2467                         flags |= EXT4_FREE_BLOCKS_METADATA;
2468
2469                 ext4_free_blocks(handle, inode, NULL,
2470                                  EXT4_C2B(sbi, *partial_cluster),
2471                                  sbi->s_cluster_ratio, flags);
2472                 *partial_cluster = 0;
2473         }
2474
2475         /* if this leaf is free, then we should
2476          * remove it from index block above */
2477         if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2478                 err = ext4_ext_rm_idx(handle, inode, path + depth);
2479
2480 out:
2481         return err;
2482 }
2483
2484 /*
2485  * ext4_ext_more_to_rm:
2486  * returns 1 if current index has to be freed (even partial)
2487  */
2488 static int
2489 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2490 {
2491         BUG_ON(path->p_idx == NULL);
2492
2493         if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2494                 return 0;
2495
2496         /*
2497          * if truncate on deeper level happened, it wasn't partial,
2498          * so we have to consider current index for truncation
2499          */
2500         if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2501                 return 0;
2502         return 1;
2503 }
2504
2505 static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2506                                  ext4_lblk_t end)
2507 {
2508         struct super_block *sb = inode->i_sb;
2509         int depth = ext_depth(inode);
2510         struct ext4_ext_path *path;
2511         ext4_fsblk_t partial_cluster = 0;
2512         handle_t *handle;
2513         int i, err;
2514
2515         ext_debug("truncate since %u to %u\n", start, end);
2516
2517         /* probably first extent we're gonna free will be last in block */
2518         handle = ext4_journal_start(inode, depth + 1);
2519         if (IS_ERR(handle))
2520                 return PTR_ERR(handle);
2521
2522 again:
2523         ext4_ext_invalidate_cache(inode);
2524
2525         trace_ext4_ext_remove_space(inode, start, depth);
2526
2527         /*
2528          * Check if we are removing extents inside the extent tree. If that
2529          * is the case, we are going to punch a hole inside the extent tree
2530          * so we have to check whether we need to split the extent covering
2531          * the last block to remove so we can easily remove the part of it
2532          * in ext4_ext_rm_leaf().
2533          */
2534         if (end < EXT_MAX_BLOCKS - 1) {
2535                 struct ext4_extent *ex;
2536                 ext4_lblk_t ee_block;
2537
2538                 /* find extent for this block */
2539                 path = ext4_ext_find_extent(inode, end, NULL);
2540                 if (IS_ERR(path)) {
2541                         ext4_journal_stop(handle);
2542                         return PTR_ERR(path);
2543                 }
2544                 depth = ext_depth(inode);
2545                 ex = path[depth].p_ext;
2546                 if (!ex)
2547                         goto cont;
2548
2549                 ee_block = le32_to_cpu(ex->ee_block);
2550
2551                 /*
2552                  * See if the last block is inside the extent, if so split
2553                  * the extent at 'end' block so we can easily remove the
2554                  * tail of the first part of the split extent in
2555                  * ext4_ext_rm_leaf().
2556                  */
2557                 if (end >= ee_block &&
2558                     end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
2559                         int split_flag = 0;
2560
2561                         if (ext4_ext_is_uninitialized(ex))
2562                                 split_flag = EXT4_EXT_MARK_UNINIT1 |
2563                                              EXT4_EXT_MARK_UNINIT2;
2564
2565                         /*
2566                          * Split the extent in two so that 'end' is the last
2567                          * block in the first new extent
2568                          */
2569                         err = ext4_split_extent_at(handle, inode, path,
2570                                                 end + 1, split_flag,
2571                                                 EXT4_GET_BLOCKS_PRE_IO |
2572                                                 EXT4_GET_BLOCKS_PUNCH_OUT_EXT);
2573
2574                         if (err < 0)
2575                                 goto out;
2576                 }
2577                 ext4_ext_drop_refs(path);
2578                 kfree(path);
2579         }
2580 cont:
2581
2582         /*
2583          * We start scanning from right side, freeing all the blocks
2584          * after i_size and walking into the tree depth-wise.
2585          */
2586         depth = ext_depth(inode);
2587         path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
2588         if (path == NULL) {
2589                 ext4_journal_stop(handle);
2590                 return -ENOMEM;
2591         }
2592         path[0].p_depth = depth;
2593         path[0].p_hdr = ext_inode_hdr(inode);
2594
2595         if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
2596                 err = -EIO;
2597                 goto out;
2598         }
2599         i = err = 0;
2600
2601         while (i >= 0 && err == 0) {
2602                 if (i == depth) {
2603                         /* this is leaf block */
2604                         err = ext4_ext_rm_leaf(handle, inode, path,
2605                                                &partial_cluster, start,
2606                                                end);
2607                         /* root level has p_bh == NULL, brelse() eats this */
2608                         brelse(path[i].p_bh);
2609                         path[i].p_bh = NULL;
2610                         i--;
2611                         continue;
2612                 }
2613
2614                 /* this is index block */
2615                 if (!path[i].p_hdr) {
2616                         ext_debug("initialize header\n");
2617                         path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2618                 }
2619
2620                 if (!path[i].p_idx) {
2621                         /* this level hasn't been touched yet */
2622                         path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2623                         path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2624                         ext_debug("init index ptr: hdr 0x%p, num %d\n",
2625                                   path[i].p_hdr,
2626                                   le16_to_cpu(path[i].p_hdr->eh_entries));
2627                 } else {
2628                         /* we were already here, see at next index */
2629                         path[i].p_idx--;
2630                 }
2631
2632                 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2633                                 i, EXT_FIRST_INDEX(path[i].p_hdr),
2634                                 path[i].p_idx);
2635                 if (ext4_ext_more_to_rm(path + i)) {
2636                         struct buffer_head *bh;
2637                         /* go to the next level */
2638                         ext_debug("move to level %d (block %llu)\n",
2639                                   i + 1, ext4_idx_pblock(path[i].p_idx));
2640                         memset(path + i + 1, 0, sizeof(*path));
2641                         bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx));
2642                         if (!bh) {
2643                                 /* should we reset i_size? */
2644                                 err = -EIO;
2645                                 break;
2646                         }
2647                         if (WARN_ON(i + 1 > depth)) {
2648                                 err = -EIO;
2649                                 break;
2650                         }
2651                         if (ext4_ext_check(inode, ext_block_hdr(bh),
2652                                                         depth - i - 1)) {
2653                                 err = -EIO;
2654                                 break;
2655                         }
2656                         path[i + 1].p_bh = bh;
2657
2658                         /* save actual number of indexes since this
2659                          * number is changed at the next iteration */
2660                         path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2661                         i++;
2662                 } else {
2663                         /* we finished processing this index, go up */
2664                         if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2665                                 /* index is empty, remove it;
2666                                  * handle must be already prepared by the
2667                                  * truncatei_leaf() */
2668                                 err = ext4_ext_rm_idx(handle, inode, path + i);
2669                         }
2670                         /* root level has p_bh == NULL, brelse() eats this */
2671                         brelse(path[i].p_bh);
2672                         path[i].p_bh = NULL;
2673                         i--;
2674                         ext_debug("return to level %d\n", i);
2675                 }
2676         }
2677
2678         trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster,
2679                         path->p_hdr->eh_entries);
2680
2681         /* If we still have something in the partial cluster and we have removed
2682          * even the first extent, then we should free the blocks in the partial
2683          * cluster as well. */
2684         if (partial_cluster && path->p_hdr->eh_entries == 0) {
2685                 int flags = EXT4_FREE_BLOCKS_FORGET;
2686
2687                 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2688                         flags |= EXT4_FREE_BLOCKS_METADATA;
2689
2690                 ext4_free_blocks(handle, inode, NULL,
2691                                  EXT4_C2B(EXT4_SB(sb), partial_cluster),
2692                                  EXT4_SB(sb)->s_cluster_ratio, flags);
2693                 partial_cluster = 0;
2694         }
2695
2696         /* TODO: flexible tree reduction should be here */
2697         if (path->p_hdr->eh_entries == 0) {
2698                 /*
2699                  * truncate to zero freed all the tree,
2700                  * so we need to correct eh_depth
2701                  */
2702                 err = ext4_ext_get_access(handle, inode, path);
2703                 if (err == 0) {
2704                         ext_inode_hdr(inode)->eh_depth = 0;
2705                         ext_inode_hdr(inode)->eh_max =
2706                                 cpu_to_le16(ext4_ext_space_root(inode, 0));
2707                         err = ext4_ext_dirty(handle, inode, path);
2708                 }
2709         }
2710 out:
2711         ext4_ext_drop_refs(path);
2712         kfree(path);
2713         if (err == -EAGAIN)
2714                 goto again;
2715         ext4_journal_stop(handle);
2716
2717         return err;
2718 }
2719
2720 /*
2721  * called at mount time
2722  */
2723 void ext4_ext_init(struct super_block *sb)
2724 {
2725         /*
2726          * possible initialization would be here
2727          */
2728
2729         if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2730 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2731                 printk(KERN_INFO "EXT4-fs: file extents enabled"
2732 #ifdef AGGRESSIVE_TEST
2733                        ", aggressive tests"
2734 #endif
2735 #ifdef CHECK_BINSEARCH
2736                        ", check binsearch"
2737 #endif
2738 #ifdef EXTENTS_STATS
2739                        ", stats"
2740 #endif
2741                        "\n");
2742 #endif
2743 #ifdef EXTENTS_STATS
2744                 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2745                 EXT4_SB(sb)->s_ext_min = 1 << 30;
2746                 EXT4_SB(sb)->s_ext_max = 0;
2747 #endif
2748         }
2749 }
2750
2751 /*
2752  * called at umount time
2753  */
2754 void ext4_ext_release(struct super_block *sb)
2755 {
2756         if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
2757                 return;
2758
2759 #ifdef EXTENTS_STATS
2760         if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2761                 struct ext4_sb_info *sbi = EXT4_SB(sb);
2762                 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2763                         sbi->s_ext_blocks, sbi->s_ext_extents,
2764                         sbi->s_ext_blocks / sbi->s_ext_extents);
2765                 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2766                         sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2767         }
2768 #endif
2769 }
2770
2771 /* FIXME!! we need to try to merge to left or right after zero-out  */
2772 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2773 {
2774         ext4_fsblk_t ee_pblock;
2775         unsigned int ee_len;
2776         int ret;
2777
2778         ee_len    = ext4_ext_get_actual_len(ex);
2779         ee_pblock = ext4_ext_pblock(ex);
2780
2781         ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
2782         if (ret > 0)
2783                 ret = 0;
2784
2785         return ret;
2786 }
2787
2788 /*
2789  * ext4_split_extent_at() splits an extent at given block.
2790  *
2791  * @handle: the journal handle
2792  * @inode: the file inode
2793  * @path: the path to the extent
2794  * @split: the logical block where the extent is splitted.
2795  * @split_flags: indicates if the extent could be zeroout if split fails, and
2796  *               the states(init or uninit) of new extents.
2797  * @flags: flags used to insert new extent to extent tree.
2798  *
2799  *
2800  * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
2801  * of which are deterimined by split_flag.
2802  *
2803  * There are two cases:
2804  *  a> the extent are splitted into two extent.
2805  *  b> split is not needed, and just mark the extent.
2806  *
2807  * return 0 on success.
2808  */
2809 static int ext4_split_extent_at(handle_t *handle,
2810                              struct inode *inode,
2811                              struct ext4_ext_path *path,
2812                              ext4_lblk_t split,
2813                              int split_flag,
2814                              int flags)
2815 {
2816         ext4_fsblk_t newblock;
2817         ext4_lblk_t ee_block;
2818         struct ext4_extent *ex, newex, orig_ex;
2819         struct ext4_extent *ex2 = NULL;
2820         unsigned int ee_len, depth;
2821         int err = 0;
2822
2823         ext_debug("ext4_split_extents_at: inode %lu, logical"
2824                 "block %llu\n", inode->i_ino, (unsigned long long)split);
2825
2826         ext4_ext_show_leaf(inode, path);
2827
2828         depth = ext_depth(inode);
2829         ex = path[depth].p_ext;
2830         ee_block = le32_to_cpu(ex->ee_block);
2831         ee_len = ext4_ext_get_actual_len(ex);
2832         newblock = split - ee_block + ext4_ext_pblock(ex);
2833
2834         BUG_ON(split < ee_block || split >= (ee_block + ee_len));
2835
2836         err = ext4_ext_get_access(handle, inode, path + depth);
2837         if (err)
2838                 goto out;
2839
2840         if (split == ee_block) {
2841                 /*
2842                  * case b: block @split is the block that the extent begins with
2843                  * then we just change the state of the extent, and splitting
2844                  * is not needed.
2845                  */
2846                 if (split_flag & EXT4_EXT_MARK_UNINIT2)
2847                         ext4_ext_mark_uninitialized(ex);
2848                 else
2849                         ext4_ext_mark_initialized(ex);
2850
2851                 if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
2852                         ext4_ext_try_to_merge(inode, path, ex);
2853
2854                 err = ext4_ext_dirty(handle, inode, path + depth);
2855                 goto out;
2856         }
2857
2858         /* case a */
2859         memcpy(&orig_ex, ex, sizeof(orig_ex));
2860         ex->ee_len = cpu_to_le16(split - ee_block);
2861         if (split_flag & EXT4_EXT_MARK_UNINIT1)
2862                 ext4_ext_mark_uninitialized(ex);
2863
2864         /*
2865          * path may lead to new leaf, not to original leaf any more
2866          * after ext4_ext_insert_extent() returns,
2867          */
2868         err = ext4_ext_dirty(handle, inode, path + depth);
2869         if (err)
2870                 goto fix_extent_len;
2871
2872         ex2 = &newex;
2873         ex2->ee_block = cpu_to_le32(split);
2874         ex2->ee_len   = cpu_to_le16(ee_len - (split - ee_block));
2875         ext4_ext_store_pblock(ex2, newblock);
2876         if (split_flag & EXT4_EXT_MARK_UNINIT2)
2877                 ext4_ext_mark_uninitialized(ex2);
2878
2879         err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
2880         if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
2881                 err = ext4_ext_zeroout(inode, &orig_ex);
2882                 if (err)
2883                         goto fix_extent_len;
2884                 /* update the extent length and mark as initialized */
2885                 ex->ee_len = cpu_to_le32(ee_len);
2886                 ext4_ext_try_to_merge(inode, path, ex);
2887                 err = ext4_ext_dirty(handle, inode, path + depth);
2888                 goto out;
2889         } else if (err)
2890                 goto fix_extent_len;
2891
2892 out:
2893         ext4_ext_show_leaf(inode, path);
2894         return err;
2895
2896 fix_extent_len:
2897         ex->ee_len = orig_ex.ee_len;
2898         ext4_ext_dirty(handle, inode, path + depth);
2899         return err;
2900 }
2901
2902 /*
2903  * ext4_split_extents() splits an extent and mark extent which is covered
2904  * by @map as split_flags indicates
2905  *
2906  * It may result in splitting the extent into multiple extents (upto three)
2907  * There are three possibilities:
2908  *   a> There is no split required
2909  *   b> Splits in two extents: Split is happening at either end of the extent
2910  *   c> Splits in three extents: Somone is splitting in middle of the extent
2911  *
2912  */
2913 static int ext4_split_extent(handle_t *handle,
2914                               struct inode *inode,
2915                               struct ext4_ext_path *path,
2916                               struct ext4_map_blocks *map,
2917                               int split_flag,
2918                               int flags)
2919 {
2920         ext4_lblk_t ee_block;
2921         struct ext4_extent *ex;
2922         unsigned int ee_len, depth;
2923         int err = 0;
2924         int uninitialized;
2925         int split_flag1, flags1;
2926
2927         depth = ext_depth(inode);
2928         ex = path[depth].p_ext;
2929         ee_block = le32_to_cpu(ex->ee_block);
2930         ee_len = ext4_ext_get_actual_len(ex);
2931         uninitialized = ext4_ext_is_uninitialized(ex);
2932
2933         if (map->m_lblk + map->m_len < ee_block + ee_len) {
2934                 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
2935                               EXT4_EXT_MAY_ZEROOUT : 0;
2936                 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
2937                 if (uninitialized)
2938                         split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
2939                                        EXT4_EXT_MARK_UNINIT2;
2940                 err = ext4_split_extent_at(handle, inode, path,
2941                                 map->m_lblk + map->m_len, split_flag1, flags1);
2942                 if (err)
2943                         goto out;
2944         }
2945
2946         ext4_ext_drop_refs(path);
2947         path = ext4_ext_find_extent(inode, map->m_lblk, path);
2948         if (IS_ERR(path))
2949                 return PTR_ERR(path);
2950
2951         if (map->m_lblk >= ee_block) {
2952                 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
2953                               EXT4_EXT_MAY_ZEROOUT : 0;
2954                 if (uninitialized)
2955                         split_flag1 |= EXT4_EXT_MARK_UNINIT1;
2956                 if (split_flag & EXT4_EXT_MARK_UNINIT2)
2957                         split_flag1 |= EXT4_EXT_MARK_UNINIT2;
2958                 err = ext4_split_extent_at(handle, inode, path,
2959                                 map->m_lblk, split_flag1, flags);
2960                 if (err)
2961                         goto out;
2962         }
2963
2964         ext4_ext_show_leaf(inode, path);
2965 out:
2966         return err ? err : map->m_len;
2967 }
2968
2969 #define EXT4_EXT_ZERO_LEN 7
2970 /*
2971  * This function is called by ext4_ext_map_blocks() if someone tries to write
2972  * to an uninitialized extent. It may result in splitting the uninitialized
2973  * extent into multiple extents (up to three - one initialized and two
2974  * uninitialized).
2975  * There are three possibilities:
2976  *   a> There is no split required: Entire extent should be initialized
2977  *   b> Splits in two extents: Write is happening at either end of the extent
2978  *   c> Splits in three extents: Somone is writing in middle of the extent
2979  *
2980  * Pre-conditions:
2981  *  - The extent pointed to by 'path' is uninitialized.
2982  *  - The extent pointed to by 'path' contains a superset
2983  *    of the logical span [map->m_lblk, map->m_lblk + map->m_len).
2984  *
2985  * Post-conditions on success:
2986  *  - the returned value is the number of blocks beyond map->l_lblk
2987  *    that are allocated and initialized.
2988  *    It is guaranteed to be >= map->m_len.
2989  */
2990 static int ext4_ext_convert_to_initialized(handle_t *handle,
2991                                            struct inode *inode,
2992                                            struct ext4_map_blocks *map,
2993                                            struct ext4_ext_path *path)
2994 {
2995         struct ext4_extent_header *eh;
2996         struct ext4_map_blocks split_map;
2997         struct ext4_extent zero_ex;
2998         struct ext4_extent *ex;
2999         ext4_lblk_t ee_block, eof_block;
3000         unsigned int ee_len, depth;
3001         int allocated;
3002         int err = 0;
3003         int split_flag = 0;
3004
3005         ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
3006                 "block %llu, max_blocks %u\n", inode->i_ino,
3007                 (unsigned long long)map->m_lblk, map->m_len);
3008
3009         eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3010                 inode->i_sb->s_blocksize_bits;
3011         if (eof_block < map->m_lblk + map->m_len)
3012                 eof_block = map->m_lblk + map->m_len;
3013
3014         depth = ext_depth(inode);
3015         eh = path[depth].p_hdr;
3016         ex = path[depth].p_ext;
3017         ee_block = le32_to_cpu(ex->ee_block);
3018         ee_len = ext4_ext_get_actual_len(ex);
3019         allocated = ee_len - (map->m_lblk - ee_block);
3020
3021         trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3022
3023         /* Pre-conditions */
3024         BUG_ON(!ext4_ext_is_uninitialized(ex));
3025         BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
3026
3027         /*
3028          * Attempt to transfer newly initialized blocks from the currently
3029          * uninitialized extent to its left neighbor. This is much cheaper
3030          * than an insertion followed by a merge as those involve costly
3031          * memmove() calls. This is the common case in steady state for
3032          * workloads doing fallocate(FALLOC_FL_KEEP_SIZE) followed by append
3033          * writes.
3034          *
3035          * Limitations of the current logic:
3036          *  - L1: we only deal with writes at the start of the extent.
3037          *    The approach could be extended to writes at the end
3038          *    of the extent but this scenario was deemed less common.
3039          *  - L2: we do not deal with writes covering the whole extent.
3040          *    This would require removing the extent if the transfer
3041          *    is possible.
3042          *  - L3: we only attempt to merge with an extent stored in the
3043          *    same extent tree node.
3044          */
3045         if ((map->m_lblk == ee_block) &&        /*L1*/
3046                 (map->m_len < ee_len) &&        /*L2*/
3047                 (ex > EXT_FIRST_EXTENT(eh))) {  /*L3*/
3048                 struct ext4_extent *prev_ex;
3049                 ext4_lblk_t prev_lblk;
3050                 ext4_fsblk_t prev_pblk, ee_pblk;
3051                 unsigned int prev_len, write_len;
3052
3053                 prev_ex = ex - 1;
3054                 prev_lblk = le32_to_cpu(prev_ex->ee_block);
3055                 prev_len = ext4_ext_get_actual_len(prev_ex);
3056                 prev_pblk = ext4_ext_pblock(prev_ex);
3057                 ee_pblk = ext4_ext_pblock(ex);
3058                 write_len = map->m_len;
3059
3060                 /*
3061                  * A transfer of blocks from 'ex' to 'prev_ex' is allowed
3062                  * upon those conditions:
3063                  * - C1: prev_ex is initialized,
3064                  * - C2: prev_ex is logically abutting ex,
3065                  * - C3: prev_ex is physically abutting ex,
3066                  * - C4: prev_ex can receive the additional blocks without
3067                  *   overflowing the (initialized) length limit.
3068                  */
3069                 if ((!ext4_ext_is_uninitialized(prev_ex)) &&            /*C1*/
3070                         ((prev_lblk + prev_len) == ee_block) &&         /*C2*/
3071                         ((prev_pblk + prev_len) == ee_pblk) &&          /*C3*/
3072                         (prev_len < (EXT_INIT_MAX_LEN - write_len))) {  /*C4*/
3073                         err = ext4_ext_get_access(handle, inode, path + depth);
3074                         if (err)
3075                                 goto out;
3076
3077                         trace_ext4_ext_convert_to_initialized_fastpath(inode,
3078                                 map, ex, prev_ex);
3079
3080                         /* Shift the start of ex by 'write_len' blocks */
3081                         ex->ee_block = cpu_to_le32(ee_block + write_len);
3082                         ext4_ext_store_pblock(ex, ee_pblk + write_len);
3083                         ex->ee_len = cpu_to_le16(ee_len - write_len);
3084                         ext4_ext_mark_uninitialized(ex); /* Restore the flag */
3085
3086                         /* Extend prev_ex by 'write_len' blocks */
3087                         prev_ex->ee_len = cpu_to_le16(prev_len + write_len);
3088
3089                         /* Mark the block containing both extents as dirty */
3090                         ext4_ext_dirty(handle, inode, path + depth);
3091
3092                         /* Update path to point to the right extent */
3093                         path[depth].p_ext = prev_ex;
3094
3095                         /* Result: number of initialized blocks past m_lblk */
3096                         allocated = write_len;
3097                         goto out;
3098                 }
3099         }
3100
3101         WARN_ON(map->m_lblk < ee_block);
3102         /*
3103          * It is safe to convert extent to initialized via explicit
3104          * zeroout only if extent is fully insde i_size or new_size.
3105          */
3106         split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3107
3108         /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
3109         if (ee_len <= 2*EXT4_EXT_ZERO_LEN &&
3110             (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3111                 err = ext4_ext_zeroout(inode, ex);
3112                 if (err)
3113                         goto out;
3114
3115                 err = ext4_ext_get_access(handle, inode, path + depth);
3116                 if (err)
3117                         goto out;
3118                 ext4_ext_mark_initialized(ex);
3119                 ext4_ext_try_to_merge(inode, path, ex);
3120                 err = ext4_ext_dirty(handle, inode, path + depth);
3121                 goto out;
3122         }
3123
3124         /*
3125          * four cases:
3126          * 1. split the extent into three extents.
3127          * 2. split the extent into two extents, zeroout the first half.
3128          * 3. split the extent into two extents, zeroout the second half.
3129          * 4. split the extent into two extents with out zeroout.
3130          */
3131         split_map.m_lblk = map->m_lblk;
3132         split_map.m_len = map->m_len;
3133
3134         if (allocated > map->m_len) {
3135                 if (allocated <= EXT4_EXT_ZERO_LEN &&
3136                     (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3137                         /* case 3 */
3138                         zero_ex.ee_block =
3139                                          cpu_to_le32(map->m_lblk);
3140                         zero_ex.ee_len = cpu_to_le16(allocated);
3141                         ext4_ext_store_pblock(&zero_ex,
3142                                 ext4_ext_pblock(ex) + map->m_lblk - ee_block);
3143                         err = ext4_ext_zeroout(inode, &zero_ex);
3144                         if (err)
3145                                 goto out;
3146                         split_map.m_lblk = map->m_lblk;
3147                         split_map.m_len = allocated;
3148                 } else if ((map->m_lblk - ee_block + map->m_len <
3149                            EXT4_EXT_ZERO_LEN) &&
3150                            (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3151                         /* case 2 */
3152                         if (map->m_lblk != ee_block) {
3153                                 zero_ex.ee_block = ex->ee_block;
3154                                 zero_ex.ee_len = cpu_to_le16(map->m_lblk -
3155                                                         ee_block);
3156                                 ext4_ext_store_pblock(&zero_ex,
3157                                                       ext4_ext_pblock(ex));
3158                                 err = ext4_ext_zeroout(inode, &zero_ex);
3159                                 if (err)
3160                                         goto out;
3161                         }
3162
3163                         split_map.m_lblk = ee_block;
3164                         split_map.m_len = map->m_lblk - ee_block + map->m_len;
3165                         allocated = map->m_len;
3166                 }
3167         }
3168
3169         allocated = ext4_split_extent(handle, inode, path,
3170                                        &split_map, split_flag, 0);
3171         if (allocated < 0)
3172                 err = allocated;
3173
3174 out:
3175         return err ? err : allocated;
3176 }
3177
3178 /*
3179  * This function is called by ext4_ext_map_blocks() from
3180  * ext4_get_blocks_dio_write() when DIO to write
3181  * to an uninitialized extent.
3182  *
3183  * Writing to an uninitialized extent may result in splitting the uninitialized
3184  * extent into multiple /initialized uninitialized extents (up to three)
3185  * There are three possibilities:
3186  *   a> There is no split required: Entire extent should be uninitialized
3187  *   b> Splits in two extents: Write is happening at either end of the extent
3188  *   c> Splits in three extents: Somone is writing in middle of the extent
3189  *
3190  * One of more index blocks maybe needed if the extent tree grow after
3191  * the uninitialized extent split. To prevent ENOSPC occur at the IO
3192  * complete, we need to split the uninitialized extent before DIO submit
3193  * the IO. The uninitialized extent called at this time will be split
3194  * into three uninitialized extent(at most). After IO complete, the part
3195  * being filled will be convert to initialized by the end_io callback function
3196  * via ext4_convert_unwritten_extents().
3197  *
3198  * Returns the size of uninitialized extent to be written on success.
3199  */
3200 static int ext4_split_unwritten_extents(handle_t *handle,
3201                                         struct inode *inode,
3202                                         struct ext4_map_blocks *map,
3203                                         struct ext4_ext_path *path,
3204                                         int flags)
3205 {
3206         ext4_lblk_t eof_block;
3207         ext4_lblk_t ee_block;
3208         struct ext4_extent *ex;
3209         unsigned int ee_len;
3210         int split_flag = 0, depth;
3211
3212         ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
3213                 "block %llu, max_blocks %u\n", inode->i_ino,
3214                 (unsigned long long)map->m_lblk, map->m_len);
3215
3216         eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3217                 inode->i_sb->s_blocksize_bits;
3218         if (eof_block < map->m_lblk + map->m_len)
3219                 eof_block = map->m_lblk + map->m_len;
3220         /*
3221          * It is safe to convert extent to initialized via explicit
3222          * zeroout only if extent is fully insde i_size or new_size.
3223          */
3224         depth = ext_depth(inode);
3225         ex = path[depth].p_ext;
3226         ee_block = le32_to_cpu(ex->ee_block);
3227         ee_len = ext4_ext_get_actual_len(ex);
3228
3229         split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3230         split_flag |= EXT4_EXT_MARK_UNINIT2;
3231
3232         flags |= EXT4_GET_BLOCKS_PRE_IO;
3233         return ext4_split_extent(handle, inode, path, map, split_flag, flags);
3234 }
3235
3236 static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3237                                               struct inode *inode,
3238                                               struct ext4_ext_path *path)
3239 {
3240         struct ext4_extent *ex;
3241         int depth;
3242         int err = 0;
3243
3244         depth = ext_depth(inode);
3245         ex = path[depth].p_ext;
3246
3247         ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
3248                 "block %llu, max_blocks %u\n", inode->i_ino,
3249                 (unsigned long long)le32_to_cpu(ex->ee_block),
3250                 ext4_ext_get_actual_len(ex));
3251
3252         err = ext4_ext_get_access(handle, inode, path + depth);
3253         if (err)
3254                 goto out;
3255         /* first mark the extent as initialized */
3256         ext4_ext_mark_initialized(ex);
3257
3258         /* note: ext4_ext_correct_indexes() isn't needed here because
3259          * borders are not changed
3260          */
3261         ext4_ext_try_to_merge(inode, path, ex);
3262
3263         /* Mark modified extent as dirty */
3264         err = ext4_ext_dirty(handle, inode, path + depth);
3265 out:
3266         ext4_ext_show_leaf(inode, path);
3267         return err;
3268 }
3269
3270 static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3271                         sector_t block, int count)
3272 {
3273         int i;
3274         for (i = 0; i < count; i++)
3275                 unmap_underlying_metadata(bdev, block + i);
3276 }
3277
3278 /*
3279  * Handle EOFBLOCKS_FL flag, clearing it if necessary
3280  */
3281 static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
3282                               ext4_lblk_t lblk,
3283                               struct ext4_ext_path *path,
3284                               unsigned int len)
3285 {
3286         int i, depth;
3287         struct ext4_extent_header *eh;
3288         struct ext4_extent *last_ex;
3289
3290         if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
3291                 return 0;
3292
3293         depth = ext_depth(inode);
3294         eh = path[depth].p_hdr;
3295
3296         if (unlikely(!eh->eh_entries)) {
3297                 EXT4_ERROR_INODE(inode, "eh->eh_entries == 0 and "
3298                                  "EOFBLOCKS_FL set");
3299                 return -EIO;
3300         }
3301         last_ex = EXT_LAST_EXTENT(eh);
3302         /*
3303          * We should clear the EOFBLOCKS_FL flag if we are writing the
3304          * last block in the last extent in the file.  We test this by
3305          * first checking to see if the caller to
3306          * ext4_ext_get_blocks() was interested in the last block (or
3307          * a block beyond the last block) in the current extent.  If
3308          * this turns out to be false, we can bail out from this
3309          * function immediately.
3310          */
3311         if (lblk + len < le32_to_cpu(last_ex->ee_block) +
3312             ext4_ext_get_actual_len(last_ex))
3313                 return 0;
3314         /*
3315          * If the caller does appear to be planning to write at or
3316          * beyond the end of the current extent, we then test to see
3317          * if the current extent is the last extent in the file, by
3318          * checking to make sure it was reached via the rightmost node
3319          * at each level of the tree.
3320          */
3321         for (i = depth-1; i >= 0; i--)
3322                 if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
3323                         return 0;
3324         ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3325         return ext4_mark_inode_dirty(handle, inode);
3326 }
3327
3328 /**
3329  * ext4_find_delalloc_range: find delayed allocated block in the given range.
3330  *
3331  * Goes through the buffer heads in the range [lblk_start, lblk_end] and returns
3332  * whether there are any buffers marked for delayed allocation. It returns '1'
3333  * on the first delalloc'ed buffer head found. If no buffer head in the given
3334  * range is marked for delalloc, it returns 0.
3335  * lblk_start should always be <= lblk_end.
3336  * search_hint_reverse is to indicate that searching in reverse from lblk_end to
3337  * lblk_start might be more efficient (i.e., we will likely hit the delalloc'ed
3338  * block sooner). This is useful when blocks are truncated sequentially from
3339  * lblk_start towards lblk_end.
3340  */
3341 static int ext4_find_delalloc_range(struct inode *inode,
3342                                     ext4_lblk_t lblk_start,
3343                                     ext4_lblk_t lblk_end,
3344                                     int search_hint_reverse)
3345 {
3346         struct address_space *mapping = inode->i_mapping;
3347         struct buffer_head *head, *bh = NULL;
3348         struct page *page;
3349         ext4_lblk_t i, pg_lblk;
3350         pgoff_t index;
3351
3352         if (!test_opt(inode->i_sb, DELALLOC))
3353                 return 0;
3354
3355         /* reverse search wont work if fs block size is less than page size */
3356         if (inode->i_blkbits < PAGE_CACHE_SHIFT)
3357                 search_hint_reverse = 0;
3358
3359         if (search_hint_reverse)
3360                 i = lblk_end;
3361         else
3362                 i = lblk_start;
3363
3364         index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
3365
3366         while ((i >= lblk_start) && (i <= lblk_end)) {
3367                 page = find_get_page(mapping, index);
3368                 if (!page)
3369                         goto nextpage;
3370
3371                 if (!page_has_buffers(page))
3372                         goto nextpage;
3373
3374                 head = page_buffers(page);
3375                 if (!head)
3376                         goto nextpage;
3377
3378                 bh = head;
3379                 pg_lblk = index << (PAGE_CACHE_SHIFT -
3380                                                 inode->i_blkbits);
3381                 do {
3382                         if (unlikely(pg_lblk < lblk_start)) {
3383                                 /*
3384                                  * This is possible when fs block size is less
3385                                  * than page size and our cluster starts/ends in
3386                                  * middle of the page. So we need to skip the
3387                                  * initial few blocks till we reach the 'lblk'
3388                                  */
3389                                 pg_lblk++;
3390                                 continue;
3391                         }
3392
3393                         /* Check if the buffer is delayed allocated and that it
3394                          * is not yet mapped. (when da-buffers are mapped during
3395                          * their writeout, their da_mapped bit is set.)
3396                          */
3397                         if (buffer_delay(bh) && !buffer_da_mapped(bh)) {
3398                                 page_cache_release(page);
3399                                 trace_ext4_find_delalloc_range(inode,
3400                                                 lblk_start, lblk_end,
3401                                                 search_hint_reverse,
3402                                                 1, i);
3403                                 return 1;
3404                         }
3405                         if (search_hint_reverse)
3406                                 i--;
3407                         else
3408                                 i++;
3409                 } while ((i >= lblk_start) && (i <= lblk_end) &&
3410                                 ((bh = bh->b_this_page) != head));
3411 nextpage:
3412                 if (page)
3413                         page_cache_release(page);
3414                 /*
3415                  * Move to next page. 'i' will be the first lblk in the next
3416                  * page.
3417                  */
3418                 if (search_hint_reverse)
3419                         index--;
3420                 else
3421                         index++;
3422                 i = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
3423         }
3424
3425         trace_ext4_find_delalloc_range(inode, lblk_start, lblk_end,
3426                                         search_hint_reverse, 0, 0);
3427         return 0;
3428 }
3429
3430 int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk,
3431                                int search_hint_reverse)
3432 {
3433         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3434         ext4_lblk_t lblk_start, lblk_end;
3435         lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
3436         lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
3437
3438         return ext4_find_delalloc_range(inode, lblk_start, lblk_end,
3439                                         search_hint_reverse);
3440 }
3441
3442 /**
3443  * Determines how many complete clusters (out of those specified by the 'map')
3444  * are under delalloc and were reserved quota for.
3445  * This function is called when we are writing out the blocks that were
3446  * originally written with their allocation delayed, but then the space was
3447  * allocated using fallocate() before the delayed allocation could be resolved.
3448  * The cases to look for are:
3449  * ('=' indicated delayed allocated blocks
3450  *  '-' indicates non-delayed allocated blocks)
3451  * (a) partial clusters towards beginning and/or end outside of allocated range
3452  *     are not delalloc'ed.
3453  *      Ex:
3454  *      |----c---=|====c====|====c====|===-c----|
3455  *               |++++++ allocated ++++++|
3456  *      ==> 4 complete clusters in above example
3457  *
3458  * (b) partial cluster (outside of allocated range) towards either end is
3459  *     marked for delayed allocation. In this case, we will exclude that
3460  *     cluster.
3461  *      Ex:
3462  *      |----====c========|========c========|
3463  *           |++++++ allocated ++++++|
3464  *      ==> 1 complete clusters in above example
3465  *
3466  *      Ex:
3467  *      |================c================|
3468  *            |++++++ allocated ++++++|
3469  *      ==> 0 complete clusters in above example
3470  *
3471  * The ext4_da_update_reserve_space will be called only if we
3472  * determine here that there were some "entire" clusters that span
3473  * this 'allocated' range.
3474  * In the non-bigalloc case, this function will just end up returning num_blks
3475  * without ever calling ext4_find_delalloc_range.
3476  */
3477 static unsigned int
3478 get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
3479                            unsigned int num_blks)
3480 {
3481         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3482         ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
3483         ext4_lblk_t lblk_from, lblk_to, c_offset;
3484         unsigned int allocated_clusters = 0;
3485
3486         alloc_cluster_start = EXT4_B2C(sbi, lblk_start);
3487         alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1);
3488
3489         /* max possible clusters for this allocation */
3490         allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
3491
3492         trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
3493
3494         /* Check towards left side */
3495         c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
3496         if (c_offset) {
3497                 lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
3498                 lblk_to = lblk_from + c_offset - 1;
3499
3500                 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
3501                         allocated_clusters--;
3502         }
3503
3504         /* Now check towards right. */
3505         c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1);
3506         if (allocated_clusters && c_offset) {
3507                 lblk_from = lblk_start + num_blks;
3508                 lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
3509
3510                 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
3511                         allocated_clusters--;
3512         }
3513
3514         return allocated_clusters;
3515 }
3516
3517 static int
3518 ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3519                         struct ext4_map_blocks *map,
3520                         struct ext4_ext_path *path, int flags,
3521                         unsigned int allocated, ext4_fsblk_t newblock)
3522 {
3523         int ret = 0;
3524         int err = 0;
3525         ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3526
3527         ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical "
3528                   "block %llu, max_blocks %u, flags %x, allocated %u\n",
3529                   inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
3530                   flags, allocated);
3531         ext4_ext_show_leaf(inode, path);
3532
3533         trace_ext4_ext_handle_uninitialized_extents(inode, map, allocated,
3534                                                     newblock);
3535
3536         /* get_block() before submit the IO, split the extent */
3537         if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3538                 ret = ext4_split_unwritten_extents(handle, inode, map,
3539                                                    path, flags);
3540                 /*
3541                  * Flag the inode(non aio case) or end_io struct (aio case)
3542                  * that this IO needs to conversion to written when IO is
3543                  * completed
3544                  */
3545                 if (io)
3546                         ext4_set_io_unwritten_flag(inode, io);
3547                 else
3548                         ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3549                 if (ext4_should_dioread_nolock(inode))
3550                         map->m_flags |= EXT4_MAP_UNINIT;
3551                 goto out;
3552         }
3553         /* IO end_io complete, convert the filled extent to written */
3554         if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
3555                 ret = ext4_convert_unwritten_extents_endio(handle, inode,
3556                                                         path);
3557                 if (ret >= 0) {
3558                         ext4_update_inode_fsync_trans(handle, inode, 1);
3559                         err = check_eofblocks_fl(handle, inode, map->m_lblk,
3560                                                  path, map->m_len);
3561                 } else
3562                         err = ret;
3563                 goto out2;
3564         }
3565         /* buffered IO case */
3566         /*
3567          * repeat fallocate creation request
3568          * we already have an unwritten extent
3569          */
3570         if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
3571                 goto map_out;
3572
3573         /* buffered READ or buffered write_begin() lookup */
3574         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3575                 /*
3576                  * We have blocks reserved already.  We
3577                  * return allocated blocks so that delalloc
3578                  * won't do block reservation for us.  But
3579                  * the buffer head will be unmapped so that
3580                  * a read from the block returns 0s.
3581                  */
3582                 map->m_flags |= EXT4_MAP_UNWRITTEN;
3583                 goto out1;
3584         }
3585
3586         /* buffered write, writepage time, convert*/
3587         ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
3588         if (ret >= 0)
3589                 ext4_update_inode_fsync_trans(handle, inode, 1);
3590 out:
3591         if (ret <= 0) {
3592                 err = ret;
3593                 goto out2;
3594         } else
3595                 allocated = ret;
3596         map->m_flags |= EXT4_MAP_NEW;
3597         /*
3598          * if we allocated more blocks than requested
3599          * we need to make sure we unmap the extra block
3600          * allocated. The actual needed block will get
3601          * unmapped later when we find the buffer_head marked
3602          * new.
3603          */
3604         if (allocated > map->m_len) {
3605                 unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
3606                                         newblock + map->m_len,
3607                                         allocated - map->m_len);
3608                 allocated = map->m_len;
3609         }
3610
3611         /*
3612          * If we have done fallocate with the offset that is already
3613          * delayed allocated, we would have block reservation
3614          * and quota reservation done in the delayed write path.
3615          * But fallocate would have already updated quota and block
3616          * count for this offset. So cancel these reservation
3617          */
3618         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
3619                 unsigned int reserved_clusters;
3620                 reserved_clusters = get_reserved_cluster_alloc(inode,
3621                                 map->m_lblk, map->m_len);
3622                 if (reserved_clusters)
3623                         ext4_da_update_reserve_space(inode,
3624                                                      reserved_clusters,
3625                                                      0);
3626         }
3627
3628 map_out:
3629         map->m_flags |= EXT4_MAP_MAPPED;
3630         if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
3631                 err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
3632                                          map->m_len);
3633                 if (err < 0)
3634                         goto out2;
3635         }
3636 out1:
3637         if (allocated > map->m_len)
3638                 allocated = map->m_len;
3639         ext4_ext_show_leaf(inode, path);
3640         map->m_pblk = newblock;
3641         map->m_len = allocated;
3642 out2:
3643         if (path) {
3644                 ext4_ext_drop_refs(path);
3645                 kfree(path);
3646         }
3647         return err ? err : allocated;
3648 }
3649
3650 /*
3651  * get_implied_cluster_alloc - check to see if the requested
3652  * allocation (in the map structure) overlaps with a cluster already
3653  * allocated in an extent.
3654  *      @sb     The filesystem superblock structure
3655  *      @map    The requested lblk->pblk mapping
3656  *      @ex     The extent structure which might contain an implied
3657  *                      cluster allocation
3658  *
3659  * This function is called by ext4_ext_map_blocks() after we failed to
3660  * find blocks that were already in the inode's extent tree.  Hence,
3661  * we know that the beginning of the requested region cannot overlap
3662  * the extent from the inode's extent tree.  There are three cases we
3663  * want to catch.  The first is this case:
3664  *
3665  *               |--- cluster # N--|
3666  *    |--- extent ---|  |---- requested region ---|
3667  *                      |==========|
3668  *
3669  * The second case that we need to test for is this one:
3670  *
3671  *   |--------- cluster # N ----------------|
3672  *         |--- requested region --|   |------- extent ----|
3673  *         |=======================|
3674  *
3675  * The third case is when the requested region lies between two extents
3676  * within the same cluster:
3677  *          |------------- cluster # N-------------|
3678  * |----- ex -----|                  |---- ex_right ----|
3679  *                  |------ requested region ------|
3680  *                  |================|
3681  *
3682  * In each of the above cases, we need to set the map->m_pblk and
3683  * map->m_len so it corresponds to the return the extent labelled as
3684  * "|====|" from cluster #N, since it is already in use for data in
3685  * cluster EXT4_B2C(sbi, map->m_lblk).  We will then return 1 to
3686  * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
3687  * as a new "allocated" block region.  Otherwise, we will return 0 and
3688  * ext4_ext_map_blocks() will then allocate one or more new clusters
3689  * by calling ext4_mb_new_blocks().
3690  */
3691 static int get_implied_cluster_alloc(struct super_block *sb,
3692                                      struct ext4_map_blocks *map,
3693                                      struct ext4_extent *ex,
3694                                      struct ext4_ext_path *path)
3695 {
3696         struct ext4_sb_info *sbi = EXT4_SB(sb);
3697         ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
3698         ext4_lblk_t ex_cluster_start, ex_cluster_end;
3699         ext4_lblk_t rr_cluster_start;
3700         ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3701         ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
3702         unsigned short ee_len = ext4_ext_get_actual_len(ex);
3703
3704         /* The extent passed in that we are trying to match */
3705         ex_cluster_start = EXT4_B2C(sbi, ee_block);
3706         ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
3707
3708         /* The requested region passed into ext4_map_blocks() */
3709         rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
3710
3711         if ((rr_cluster_start == ex_cluster_end) ||
3712             (rr_cluster_start == ex_cluster_start)) {
3713                 if (rr_cluster_start == ex_cluster_end)
3714                         ee_start += ee_len - 1;
3715                 map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) +
3716                         c_offset;
3717                 map->m_len = min(map->m_len,
3718                                  (unsigned) sbi->s_cluster_ratio - c_offset);
3719                 /*
3720                  * Check for and handle this case:
3721                  *
3722                  *   |--------- cluster # N-------------|
3723                  *                     |------- extent ----|
3724                  *         |--- requested region ---|
3725                  *         |===========|
3726                  */
3727
3728                 if (map->m_lblk < ee_block)
3729                         map->m_len = min(map->m_len, ee_block - map->m_lblk);
3730
3731                 /*
3732                  * Check for the case where there is already another allocated
3733                  * block to the right of 'ex' but before the end of the cluster.
3734                  *
3735                  *          |------------- cluster # N-------------|
3736                  * |----- ex -----|                  |---- ex_right ----|
3737                  *                  |------ requested region ------|
3738                  *                  |================|
3739                  */
3740                 if (map->m_lblk > ee_block) {
3741                         ext4_lblk_t next = ext4_ext_next_allocated_block(path);
3742                         map->m_len = min(map->m_len, next - map->m_lblk);
3743                 }
3744
3745                 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
3746                 return 1;
3747         }
3748
3749         trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
3750         return 0;
3751 }
3752
3753
3754 /*
3755  * Block allocation/map/preallocation routine for extents based files
3756  *
3757  *
3758  * Need to be called with
3759  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
3760  * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
3761  *
3762  * return > 0, number of of blocks already mapped/allocated
3763  *          if create == 0 and these are pre-allocated blocks
3764  *              buffer head is unmapped
3765  *          otherwise blocks are mapped
3766  *
3767  * return = 0, if plain look up failed (blocks have not been allocated)
3768  *          buffer head is unmapped
3769  *
3770  * return < 0, error case.
3771  */
3772 int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3773                         struct ext4_map_blocks *map, int flags)
3774 {
3775         struct ext4_ext_path *path = NULL;
3776         struct ext4_extent newex, *ex, *ex2;
3777         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3778         ext4_fsblk_t newblock = 0;
3779         int free_on_err = 0, err = 0, depth, ret;
3780         unsigned int allocated = 0, offset = 0;
3781         unsigned int allocated_clusters = 0;
3782         struct ext4_allocation_request ar;
3783         ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3784         ext4_lblk_t cluster_offset;
3785
3786         ext_debug("blocks %u/%u requested for inode %lu\n",
3787                   map->m_lblk, map->m_len, inode->i_ino);
3788         trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
3789
3790         /* check in cache */
3791         if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
3792                 if (!newex.ee_start_lo && !newex.ee_start_hi) {
3793                         if ((sbi->s_cluster_ratio > 1) &&
3794                             ext4_find_delalloc_cluster(inode, map->m_lblk, 0))
3795                                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3796
3797                         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3798                                 /*
3799                                  * block isn't allocated yet and
3800                                  * user doesn't want to allocate it
3801                                  */
3802                                 goto out2;
3803                         }
3804                         /* we should allocate requested block */
3805                 } else {
3806                         /* block is already allocated */
3807                         if (sbi->s_cluster_ratio > 1)
3808                                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3809                         newblock = map->m_lblk
3810                                    - le32_to_cpu(newex.ee_block)
3811                                    + ext4_ext_pblock(&newex);
3812                         /* number of remaining blocks in the extent */
3813                         allocated = ext4_ext_get_actual_len(&newex) -
3814                                 (map->m_lblk - le32_to_cpu(newex.ee_block));
3815                         goto out;
3816                 }
3817         }
3818
3819         /* find extent for this block */
3820         path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
3821         if (IS_ERR(path)) {
3822                 err = PTR_ERR(path);
3823                 path = NULL;
3824                 goto out2;
3825         }
3826
3827         depth = ext_depth(inode);
3828
3829         /*
3830          * consistent leaf must not be empty;
3831          * this situation is possible, though, _during_ tree modification;
3832          * this is why assert can't be put in ext4_ext_find_extent()
3833          */
3834         if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
3835                 EXT4_ERROR_INODE(inode, "bad extent address "
3836                                  "lblock: %lu, depth: %d pblock %lld",
3837                                  (unsigned long) map->m_lblk, depth,
3838                                  path[depth].p_block);
3839                 err = -EIO;
3840                 goto out2;
3841         }
3842
3843         ex = path[depth].p_ext;
3844         if (ex) {
3845                 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3846                 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
3847                 unsigned short ee_len;
3848
3849                 /*
3850                  * Uninitialized extents are treated as holes, except that
3851                  * we split out initialized portions during a write.
3852                  */
3853                 ee_len = ext4_ext_get_actual_len(ex);
3854
3855                 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
3856
3857                 /* if found extent covers block, simply return it */
3858                 if (in_range(map->m_lblk, ee_block, ee_len)) {
3859                         newblock = map->m_lblk - ee_block + ee_start;
3860                         /* number of remaining blocks in the extent */
3861                         allocated = ee_len - (map->m_lblk - ee_block);
3862                         ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
3863                                   ee_block, ee_len, newblock);
3864
3865                         /*
3866                          * Do not put uninitialized extent
3867                          * in the cache
3868                          */
3869                         if (!ext4_ext_is_uninitialized(ex)) {
3870                                 ext4_ext_put_in_cache(inode, ee_block,
3871                                         ee_len, ee_start);
3872                                 goto out;
3873                         }
3874                         ret = ext4_ext_handle_uninitialized_extents(
3875                                 handle, inode, map, path, flags,
3876                                 allocated, newblock);
3877                         return ret;
3878                 }
3879         }
3880
3881         if ((sbi->s_cluster_ratio > 1) &&
3882             ext4_find_delalloc_cluster(inode, map->m_lblk, 0))
3883                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3884
3885         /*
3886          * requested block isn't allocated yet;
3887          * we couldn't try to create block if create flag is zero
3888          */
3889         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3890                 /*
3891                  * put just found gap into cache to speed up
3892                  * subsequent requests
3893                  */
3894                 ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
3895                 goto out2;
3896         }
3897
3898         /*
3899          * Okay, we need to do block allocation.
3900          */
3901         map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
3902         newex.ee_block = cpu_to_le32(map->m_lblk);
3903         cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
3904
3905         /*
3906          * If we are doing bigalloc, check to see if the extent returned
3907          * by ext4_ext_find_extent() implies a cluster we can use.
3908          */
3909         if (cluster_offset && ex &&
3910             get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
3911                 ar.len = allocated = map->m_len;
3912                 newblock = map->m_pblk;
3913                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3914                 goto got_allocated_blocks;
3915         }
3916
3917         /* find neighbour allocated blocks */
3918         ar.lleft = map->m_lblk;
3919         err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
3920         if (err)
3921                 goto out2;
3922         ar.lright = map->m_lblk;
3923         ex2 = NULL;
3924         err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
3925         if (err)
3926                 goto out2;
3927
3928         /* Check if the extent after searching to the right implies a
3929          * cluster we can use. */
3930         if ((sbi->s_cluster_ratio > 1) && ex2 &&
3931             get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
3932                 ar.len = allocated = map->m_len;
3933                 newblock = map->m_pblk;
3934                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3935                 goto got_allocated_blocks;
3936         }
3937
3938         /*
3939          * See if request is beyond maximum number of blocks we can have in
3940          * a single extent. For an initialized extent this limit is
3941          * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
3942          * EXT_UNINIT_MAX_LEN.
3943          */
3944         if (map->m_len > EXT_INIT_MAX_LEN &&
3945             !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3946                 map->m_len = EXT_INIT_MAX_LEN;
3947         else if (map->m_len > EXT_UNINIT_MAX_LEN &&
3948                  (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3949                 map->m_len = EXT_UNINIT_MAX_LEN;
3950
3951         /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
3952         newex.ee_len = cpu_to_le16(map->m_len);
3953         err = ext4_ext_check_overlap(sbi, inode, &newex, path);
3954         if (err)
3955                 allocated = ext4_ext_get_actual_len(&newex);
3956         else
3957                 allocated = map->m_len;
3958
3959         /* allocate new block */
3960         ar.inode = inode;
3961         ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
3962         ar.logical = map->m_lblk;
3963         /*
3964          * We calculate the offset from the beginning of the cluster
3965          * for the logical block number, since when we allocate a
3966          * physical cluster, the physical block should start at the
3967          * same offset from the beginning of the cluster.  This is
3968          * needed so that future calls to get_implied_cluster_alloc()
3969          * work correctly.
3970          */
3971         offset = map->m_lblk & (sbi->s_cluster_ratio - 1);
3972         ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
3973         ar.goal -= offset;
3974         ar.logical -= offset;
3975         if (S_ISREG(inode->i_mode))
3976                 ar.flags = EXT4_MB_HINT_DATA;
3977         else
3978                 /* disable in-core preallocation for non-regular files */
3979                 ar.flags = 0;
3980         if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
3981                 ar.flags |= EXT4_MB_HINT_NOPREALLOC;
3982         newblock = ext4_mb_new_blocks(handle, &ar, &err);
3983         if (!newblock)
3984                 goto out2;
3985         ext_debug("allocate new block: goal %llu, found %llu/%u\n",
3986                   ar.goal, newblock, allocated);
3987         free_on_err = 1;
3988         allocated_clusters = ar.len;
3989         ar.len = EXT4_C2B(sbi, ar.len) - offset;
3990         if (ar.len > allocated)
3991                 ar.len = allocated;
3992
3993 got_allocated_blocks:
3994         /* try to insert new extent into found leaf and return */
3995         ext4_ext_store_pblock(&newex, newblock + offset);
3996         newex.ee_len = cpu_to_le16(ar.len);
3997         /* Mark uninitialized */
3998         if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
3999                 ext4_ext_mark_uninitialized(&newex);
4000                 /*
4001                  * io_end structure was created for every IO write to an
4002                  * uninitialized extent. To avoid unnecessary conversion,
4003                  * here we flag the IO that really needs the conversion.
4004                  * For non asycn direct IO case, flag the inode state
4005                  * that we need to perform conversion when IO is done.
4006                  */
4007                 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
4008                         if (io)
4009                                 ext4_set_io_unwritten_flag(inode, io);
4010                         else
4011                                 ext4_set_inode_state(inode,
4012                                                      EXT4_STATE_DIO_UNWRITTEN);
4013                 }
4014                 if (ext4_should_dioread_nolock(inode))
4015                         map->m_flags |= EXT4_MAP_UNINIT;
4016         }
4017
4018         err = 0;
4019         if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
4020                 err = check_eofblocks_fl(handle, inode, map->m_lblk,
4021                                          path, ar.len);
4022         if (!err)
4023                 err = ext4_ext_insert_extent(handle, inode, path,
4024                                              &newex, flags);
4025         if (err && free_on_err) {
4026                 int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
4027                         EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
4028                 /* free data blocks we just allocated */
4029                 /* not a good idea to call discard here directly,
4030                  * but otherwise we'd need to call it every free() */
4031                 ext4_discard_preallocations(inode);
4032                 ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex),
4033                                  ext4_ext_get_actual_len(&newex), fb_flags);
4034                 goto out2;
4035         }
4036
4037         /* previous routine could use block we allocated */
4038         newblock = ext4_ext_pblock(&newex);
4039         allocated = ext4_ext_get_actual_len(&newex);
4040         if (allocated > map->m_len)
4041                 allocated = map->m_len;
4042         map->m_flags |= EXT4_MAP_NEW;
4043
4044         /*
4045          * Update reserved blocks/metadata blocks after successful
4046          * block allocation which had been deferred till now.
4047          */
4048         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
4049                 unsigned int reserved_clusters;
4050                 /*
4051                  * Check how many clusters we had reserved this allocated range
4052                  */
4053                 reserved_clusters = get_reserved_cluster_alloc(inode,
4054                                                 map->m_lblk, allocated);
4055                 if (map->m_flags & EXT4_MAP_FROM_CLUSTER) {
4056                         if (reserved_clusters) {
4057                                 /*
4058                                  * We have clusters reserved for this range.
4059                                  * But since we are not doing actual allocation
4060                                  * and are simply using blocks from previously
4061                                  * allocated cluster, we should release the
4062                                  * reservation and not claim quota.
4063                                  */
4064                                 ext4_da_update_reserve_space(inode,
4065                                                 reserved_clusters, 0);
4066                         }
4067                 } else {
4068                         BUG_ON(allocated_clusters < reserved_clusters);
4069                         /* We will claim quota for all newly allocated blocks.*/
4070                         ext4_da_update_reserve_space(inode, allocated_clusters,
4071                                                         1);
4072                         if (reserved_clusters < allocated_clusters) {
4073                                 struct ext4_inode_info *ei = EXT4_I(inode);
4074                                 int reservation = allocated_clusters -
4075                                                   reserved_clusters;
4076                                 /*
4077                                  * It seems we claimed few clusters outside of
4078                                  * the range of this allocation. We should give
4079                                  * it back to the reservation pool. This can
4080                                  * happen in the following case:
4081                                  *
4082                                  * * Suppose s_cluster_ratio is 4 (i.e., each
4083                                  *   cluster has 4 blocks. Thus, the clusters
4084                                  *   are [0-3],[4-7],[8-11]...
4085                                  * * First comes delayed allocation write for
4086                                  *   logical blocks 10 & 11. Since there were no
4087                                  *   previous delayed allocated blocks in the
4088                                  *   range [8-11], we would reserve 1 cluster
4089                                  *   for this write.
4090                                  * * Next comes write for logical blocks 3 to 8.
4091                                  *   In this case, we will reserve 2 clusters
4092                                  *   (for [0-3] and [4-7]; and not for [8-11] as
4093                                  *   that range has a delayed allocated blocks.
4094                                  *   Thus total reserved clusters now becomes 3.
4095                                  * * Now, during the delayed allocation writeout
4096                                  *   time, we will first write blocks [3-8] and
4097                                  *   allocate 3 clusters for writing these
4098                                  *   blocks. Also, we would claim all these
4099                                  *   three clusters above.
4100                                  * * Now when we come here to writeout the
4101                                  *   blocks [10-11], we would expect to claim
4102                                  *   the reservation of 1 cluster we had made
4103                                  *   (and we would claim it since there are no
4104                                  *   more delayed allocated blocks in the range
4105                                  *   [8-11]. But our reserved cluster count had
4106                                  *   already gone to 0.
4107                                  *
4108                                  *   Thus, at the step 4 above when we determine
4109                                  *   that there are still some unwritten delayed
4110                                  *   allocated blocks outside of our current
4111                                  *   block range, we should increment the
4112                                  *   reserved clusters count so that when the
4113                                  *   remaining blocks finally gets written, we
4114                                  *   could claim them.
4115                                  */
4116                                 dquot_reserve_block(inode,
4117                                                 EXT4_C2B(sbi, reservation));
4118                                 spin_lock(&ei->i_block_reservation_lock);
4119                                 ei->i_reserved_data_blocks += reservation;
4120                                 spin_unlock(&ei->i_block_reservation_lock);
4121                         }
4122                 }
4123         }
4124
4125         /*
4126          * Cache the extent and update transaction to commit on fdatasync only
4127          * when it is _not_ an uninitialized extent.
4128          */
4129         if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
4130                 ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
4131                 ext4_update_inode_fsync_trans(handle, inode, 1);
4132         } else
4133                 ext4_update_inode_fsync_trans(handle, inode, 0);
4134 out:
4135         if (allocated > map->m_len)
4136                 allocated = map->m_len;
4137         ext4_ext_show_leaf(inode, path);
4138         map->m_flags |= EXT4_MAP_MAPPED;
4139         map->m_pblk = newblock;
4140         map->m_len = allocated;
4141 out2:
4142         if (path) {
4143                 ext4_ext_drop_refs(path);
4144                 kfree(path);
4145         }
4146
4147         trace_ext4_ext_map_blocks_exit(inode, map->m_lblk,
4148                 newblock, map->m_len, err ? err : allocated);
4149
4150         return err ? err : allocated;
4151 }
4152
4153 void ext4_ext_truncate(struct inode *inode)
4154 {
4155         struct address_space *mapping = inode->i_mapping;
4156         struct super_block *sb = inode->i_sb;
4157         ext4_lblk_t last_block;
4158         handle_t *handle;
4159         loff_t page_len;
4160         int err = 0;
4161
4162         /*
4163          * finish any pending end_io work so we won't run the risk of
4164          * converting any truncated blocks to initialized later
4165          */
4166         ext4_flush_completed_IO(inode);
4167
4168         /*
4169          * probably first extent we're gonna free will be last in block
4170          */
4171         err = ext4_writepage_trans_blocks(inode);
4172         handle = ext4_journal_start(inode, err);
4173         if (IS_ERR(handle))
4174                 return;
4175
4176         if (inode->i_size % PAGE_CACHE_SIZE != 0) {
4177                 page_len = PAGE_CACHE_SIZE -
4178                         (inode->i_size & (PAGE_CACHE_SIZE - 1));
4179
4180                 err = ext4_discard_partial_page_buffers(handle,
4181                         mapping, inode->i_size, page_len, 0);
4182
4183                 if (err)
4184                         goto out_stop;
4185         }
4186
4187         if (ext4_orphan_add(handle, inode))
4188                 goto out_stop;
4189
4190         down_write(&EXT4_I(inode)->i_data_sem);
4191         ext4_ext_invalidate_cache(inode);
4192
4193         ext4_discard_preallocations(inode);
4194
4195         /*
4196          * TODO: optimization is possible here.
4197          * Probably we need not scan at all,
4198          * because page truncation is enough.
4199          */
4200
4201         /* we have to know where to truncate from in crash case */
4202         EXT4_I(inode)->i_disksize = inode->i_size;
4203         ext4_mark_inode_dirty(handle, inode);
4204
4205         last_block = (inode->i_size + sb->s_blocksize - 1)
4206                         >> EXT4_BLOCK_SIZE_BITS(sb);
4207         err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
4208
4209         /* In a multi-transaction truncate, we only make the final
4210          * transaction synchronous.
4211          */
4212         if (IS_SYNC(inode))
4213                 ext4_handle_sync(handle);
4214
4215         up_write(&EXT4_I(inode)->i_data_sem);
4216
4217 out_stop:
4218         /*
4219          * If this was a simple ftruncate() and the file will remain alive,
4220          * then we need to clear up the orphan record which we created above.
4221          * However, if this was a real unlink then we were called by
4222          * ext4_delete_inode(), and we allow that function to clean up the
4223          * orphan info for us.
4224          */
4225         if (inode->i_nlink)
4226                 ext4_orphan_del(handle, inode);
4227
4228         inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4229         ext4_mark_inode_dirty(handle, inode);
4230         ext4_journal_stop(handle);
4231 }
4232
4233 static void ext4_falloc_update_inode(struct inode *inode,
4234                                 int mode, loff_t new_size, int update_ctime)
4235 {
4236         struct timespec now;
4237
4238         if (update_ctime) {
4239                 now = current_fs_time(inode->i_sb);
4240                 if (!timespec_equal(&inode->i_ctime, &now))
4241                         inode->i_ctime = now;
4242         }
4243         /*
4244          * Update only when preallocation was requested beyond
4245          * the file size.
4246          */
4247         if (!(mode & FALLOC_FL_KEEP_SIZE)) {
4248                 if (new_size > i_size_read(inode))
4249                         i_size_write(inode, new_size);
4250                 if (new_size > EXT4_I(inode)->i_disksize)
4251                         ext4_update_i_disksize(inode, new_size);
4252         } else {
4253                 /*
4254                  * Mark that we allocate beyond EOF so the subsequent truncate
4255                  * can proceed even if the new size is the same as i_size.
4256                  */
4257                 if (new_size > i_size_read(inode))
4258                         ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4259         }
4260
4261 }
4262
4263 /*
4264  * preallocate space for a file. This implements ext4's fallocate file
4265  * operation, which gets called from sys_fallocate system call.
4266  * For block-mapped files, posix_fallocate should fall back to the method
4267  * of writing zeroes to the required new blocks (the same behavior which is
4268  * expected for file systems which do not support fallocate() system call).
4269  */
4270 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4271 {
4272         struct inode *inode = file->f_path.dentry->d_inode;
4273         handle_t *handle;
4274         loff_t new_size;
4275         unsigned int max_blocks;
4276         int ret = 0;
4277         int ret2 = 0;
4278         int retries = 0;
4279         int flags;
4280         struct ext4_map_blocks map;
4281         unsigned int credits, blkbits = inode->i_blkbits;
4282
4283         /*
4284          * currently supporting (pre)allocate mode for extent-based
4285          * files _only_
4286          */
4287         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4288                 return -EOPNOTSUPP;
4289
4290         /* Return error if mode is not supported */
4291         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
4292                 return -EOPNOTSUPP;
4293
4294         if (mode & FALLOC_FL_PUNCH_HOLE)
4295                 return ext4_punch_hole(file, offset, len);
4296
4297         trace_ext4_fallocate_enter(inode, offset, len, mode);
4298         map.m_lblk = offset >> blkbits;
4299         /*
4300          * We can't just convert len to max_blocks because
4301          * If blocksize = 4096 offset = 3072 and len = 2048
4302          */
4303         max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
4304                 - map.m_lblk;
4305         /*
4306          * credits to insert 1 extent into extent tree
4307          */
4308         credits = ext4_chunk_trans_blocks(inode, max_blocks);
4309         mutex_lock(&inode->i_mutex);
4310         ret = inode_newsize_ok(inode, (len + offset));
4311         if (ret) {
4312                 mutex_unlock(&inode->i_mutex);
4313                 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
4314                 return ret;
4315         }
4316         flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT;
4317         if (mode & FALLOC_FL_KEEP_SIZE)
4318                 flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4319         /*
4320          * Don't normalize the request if it can fit in one extent so
4321          * that it doesn't get unnecessarily split into multiple
4322          * extents.
4323          */
4324         if (len <= EXT_UNINIT_MAX_LEN << blkbits)
4325                 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4326 retry:
4327         while (ret >= 0 && ret < max_blocks) {
4328                 map.m_lblk = map.m_lblk + ret;
4329                 map.m_len = max_blocks = max_blocks - ret;
4330                 handle = ext4_journal_start(inode, credits);
4331                 if (IS_ERR(handle)) {
4332                         ret = PTR_ERR(handle);
4333                         break;
4334                 }
4335                 ret = ext4_map_blocks(handle, inode, &map, flags);
4336                 if (ret <= 0) {
4337 #ifdef EXT4FS_DEBUG
4338                         WARN_ON(ret <= 0);
4339                         printk(KERN_ERR "%s: ext4_ext_map_blocks "
4340                                     "returned error inode#%lu, block=%u, "
4341                                     "max_blocks=%u", __func__,
4342                                     inode->i_ino, map.m_lblk, max_blocks);
4343 #endif
4344                         ext4_mark_inode_dirty(handle, inode);
4345                         ret2 = ext4_journal_stop(handle);
4346                         break;
4347                 }
4348                 if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
4349                                                 blkbits) >> blkbits))
4350                         new_size = offset + len;
4351                 else
4352                         new_size = ((loff_t) map.m_lblk + ret) << blkbits;
4353
4354                 ext4_falloc_update_inode(inode, mode, new_size,
4355                                          (map.m_flags & EXT4_MAP_NEW));
4356                 ext4_mark_inode_dirty(handle, inode);
4357                 ret2 = ext4_journal_stop(handle);
4358                 if (ret2)
4359                         break;
4360         }
4361         if (ret == -ENOSPC &&
4362                         ext4_should_retry_alloc(inode->i_sb, &retries)) {
4363                 ret = 0;
4364                 goto retry;
4365         }
4366         mutex_unlock(&inode->i_mutex);
4367         trace_ext4_fallocate_exit(inode, offset, max_blocks,
4368                                 ret > 0 ? ret2 : ret);
4369         return ret > 0 ? ret2 : ret;
4370 }
4371
4372 /*
4373  * This function convert a range of blocks to written extents
4374  * The caller of this function will pass the start offset and the size.
4375  * all unwritten extents within this range will be converted to
4376  * written extents.
4377  *
4378  * This function is called from the direct IO end io call back
4379  * function, to convert the fallocated extents after IO is completed.
4380  * Returns 0 on success.
4381  */
4382 int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
4383                                     ssize_t len)
4384 {
4385         handle_t *handle;
4386         unsigned int max_blocks;
4387         int ret = 0;
4388         int ret2 = 0;
4389         struct ext4_map_blocks map;
4390         unsigned int credits, blkbits = inode->i_blkbits;
4391
4392         map.m_lblk = offset >> blkbits;
4393         /*
4394          * We can't just convert len to max_blocks because
4395          * If blocksize = 4096 offset = 3072 and len = 2048
4396          */
4397         max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
4398                       map.m_lblk);
4399         /*
4400          * credits to insert 1 extent into extent tree
4401          */
4402         credits = ext4_chunk_trans_blocks(inode, max_blocks);
4403         while (ret >= 0 && ret < max_blocks) {
4404                 map.m_lblk += ret;
4405                 map.m_len = (max_blocks -= ret);
4406                 handle = ext4_journal_start(inode, credits);
4407                 if (IS_ERR(handle)) {
4408                         ret = PTR_ERR(handle);
4409                         break;
4410                 }
4411                 ret = ext4_map_blocks(handle, inode, &map,
4412                                       EXT4_GET_BLOCKS_IO_CONVERT_EXT);
4413                 if (ret <= 0) {
4414                         WARN_ON(ret <= 0);
4415                         ext4_msg(inode->i_sb, KERN_ERR,
4416                                  "%s:%d: inode #%lu: block %u: len %u: "
4417                                  "ext4_ext_map_blocks returned %d",
4418                                  __func__, __LINE__, inode->i_ino, map.m_lblk,
4419                                  map.m_len, ret);
4420                 }
4421                 ext4_mark_inode_dirty(handle, inode);
4422                 ret2 = ext4_journal_stop(handle);
4423                 if (ret <= 0 || ret2 )
4424                         break;
4425         }
4426         return ret > 0 ? ret2 : ret;
4427 }
4428
4429 /*
4430  * Callback function called for each extent to gather FIEMAP information.
4431  */
4432 static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next,
4433                        struct ext4_ext_cache *newex, struct ext4_extent *ex,
4434                        void *data)
4435 {
4436         __u64   logical;
4437         __u64   physical;
4438         __u64   length;
4439         __u32   flags = 0;
4440         int             ret = 0;
4441         struct fiemap_extent_info *fieinfo = data;
4442         unsigned char blksize_bits;
4443
4444         blksize_bits = inode->i_sb->s_blocksize_bits;
4445         logical = (__u64)newex->ec_block << blksize_bits;
4446
4447         if (newex->ec_start == 0) {
4448                 /*
4449                  * No extent in extent-tree contains block @newex->ec_start,
4450                  * then the block may stay in 1)a hole or 2)delayed-extent.
4451                  *
4452                  * Holes or delayed-extents are processed as follows.
4453                  * 1. lookup dirty pages with specified range in pagecache.
4454                  *    If no page is got, then there is no delayed-extent and
4455                  *    return with EXT_CONTINUE.
4456                  * 2. find the 1st mapped buffer,
4457                  * 3. check if the mapped buffer is both in the request range
4458                  *    and a delayed buffer. If not, there is no delayed-extent,
4459                  *    then return.
4460                  * 4. a delayed-extent is found, the extent will be collected.
4461                  */
4462                 ext4_lblk_t     end = 0;
4463                 pgoff_t         last_offset;
4464                 pgoff_t         offset;
4465                 pgoff_t         index;
4466                 pgoff_t         start_index = 0;
4467                 struct page     **pages = NULL;
4468                 struct buffer_head *bh = NULL;
4469                 struct buffer_head *head = NULL;
4470                 unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *);
4471
4472                 pages = kmalloc(PAGE_SIZE, GFP_KERNEL);
4473                 if (pages == NULL)
4474                         return -ENOMEM;
4475
4476                 offset = logical >> PAGE_SHIFT;
4477 repeat:
4478                 last_offset = offset;
4479                 head = NULL;
4480                 ret = find_get_pages_tag(inode->i_mapping, &offset,
4481                                         PAGECACHE_TAG_DIRTY, nr_pages, pages);
4482
4483                 if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
4484                         /* First time, try to find a mapped buffer. */
4485                         if (ret == 0) {
4486 out:
4487                                 for (index = 0; index < ret; index++)
4488                                         page_cache_release(pages[index]);
4489                                 /* just a hole. */
4490                                 kfree(pages);
4491                                 return EXT_CONTINUE;
4492                         }
4493                         index = 0;
4494
4495 next_page:
4496                         /* Try to find the 1st mapped buffer. */
4497                         end = ((__u64)pages[index]->index << PAGE_SHIFT) >>
4498                                   blksize_bits;
4499                         if (!page_has_buffers(pages[index]))
4500                                 goto out;
4501                         head = page_buffers(pages[index]);
4502                         if (!head)
4503                                 goto out;
4504
4505                         index++;
4506                         bh = head;
4507                         do {
4508                                 if (end >= newex->ec_block +
4509                                         newex->ec_len)
4510                                         /* The buffer is out of
4511                                          * the request range.
4512                                          */
4513                                         goto out;
4514
4515                                 if (buffer_mapped(bh) &&
4516                                     end >= newex->ec_block) {
4517                                         start_index = index - 1;
4518                                         /* get the 1st mapped buffer. */
4519                                         goto found_mapped_buffer;
4520                                 }
4521
4522                                 bh = bh->b_this_page;
4523                                 end++;
4524                         } while (bh != head);
4525
4526                         /* No mapped buffer in the range found in this page,
4527                          * We need to look up next page.
4528                          */
4529                         if (index >= ret) {
4530                                 /* There is no page left, but we need to limit
4531                                  * newex->ec_len.
4532                                  */
4533                                 newex->ec_len = end - newex->ec_block;
4534                                 goto out;
4535                         }
4536                         goto next_page;
4537                 } else {
4538                         /*Find contiguous delayed buffers. */
4539                         if (ret > 0 && pages[0]->index == last_offset)
4540                                 head = page_buffers(pages[0]);
4541                         bh = head;
4542                         index = 1;
4543                         start_index = 0;
4544                 }
4545
4546 found_mapped_buffer:
4547                 if (bh != NULL && buffer_delay(bh)) {
4548                         /* 1st or contiguous delayed buffer found. */
4549                         if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
4550                                 /*
4551                                  * 1st delayed buffer found, record
4552                                  * the start of extent.
4553                                  */
4554                                 flags |= FIEMAP_EXTENT_DELALLOC;
4555                                 newex->ec_block = end;
4556                                 logical = (__u64)end << blksize_bits;
4557                         }
4558                         /* Find contiguous delayed buffers. */
4559                         do {
4560                                 if (!buffer_delay(bh))
4561                                         goto found_delayed_extent;
4562                                 bh = bh->b_this_page;
4563                                 end++;
4564                         } while (bh != head);
4565
4566                         for (; index < ret; index++) {
4567                                 if (!page_has_buffers(pages[index])) {
4568                                         bh = NULL;
4569                                         break;
4570                                 }
4571                                 head = page_buffers(pages[index]);
4572                                 if (!head) {
4573                                         bh = NULL;
4574                                         break;
4575                                 }
4576
4577                                 if (pages[index]->index !=
4578                                     pages[start_index]->index + index
4579                                     - start_index) {
4580                                         /* Blocks are not contiguous. */
4581                                         bh = NULL;
4582                                         break;
4583                                 }
4584                                 bh = head;
4585                                 do {
4586                                         if (!buffer_delay(bh))
4587                                                 /* Delayed-extent ends. */
4588                                                 goto found_delayed_extent;
4589                                         bh = bh->b_this_page;
4590                                         end++;
4591                                 } while (bh != head);
4592                         }
4593                 } else if (!(flags & FIEMAP_EXTENT_DELALLOC))
4594                         /* a hole found. */
4595                         goto out;
4596
4597 found_delayed_extent:
4598                 newex->ec_len = min(end - newex->ec_block,
4599                                                 (ext4_lblk_t)EXT_INIT_MAX_LEN);
4600                 if (ret == nr_pages && bh != NULL &&
4601                         newex->ec_len < EXT_INIT_MAX_LEN &&
4602                         buffer_delay(bh)) {
4603                         /* Have not collected an extent and continue. */
4604                         for (index = 0; index < ret; index++)
4605                                 page_cache_release(pages[index]);
4606                         goto repeat;
4607                 }
4608
4609                 for (index = 0; index < ret; index++)
4610                         page_cache_release(pages[index]);
4611                 kfree(pages);
4612         }
4613
4614         physical = (__u64)newex->ec_start << blksize_bits;
4615         length =   (__u64)newex->ec_len << blksize_bits;
4616
4617         if (ex && ext4_ext_is_uninitialized(ex))
4618                 flags |= FIEMAP_EXTENT_UNWRITTEN;
4619
4620         if (next == EXT_MAX_BLOCKS)
4621                 flags |= FIEMAP_EXTENT_LAST;
4622
4623         ret = fiemap_fill_next_extent(fieinfo, logical, physical,
4624                                         length, flags);
4625         if (ret < 0)
4626                 return ret;
4627         if (ret == 1)
4628                 return EXT_BREAK;
4629         return EXT_CONTINUE;
4630 }
4631 /* fiemap flags we can handle specified here */
4632 #define EXT4_FIEMAP_FLAGS       (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
4633
4634 static int ext4_xattr_fiemap(struct inode *inode,
4635                                 struct fiemap_extent_info *fieinfo)
4636 {
4637         __u64 physical = 0;
4638         __u64 length;
4639         __u32 flags = FIEMAP_EXTENT_LAST;
4640         int blockbits = inode->i_sb->s_blocksize_bits;
4641         int error = 0;
4642
4643         /* in-inode? */
4644         if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4645                 struct ext4_iloc iloc;
4646                 int offset;     /* offset of xattr in inode */
4647
4648                 error = ext4_get_inode_loc(inode, &iloc);
4649                 if (error)
4650                         return error;
4651                 physical = iloc.bh->b_blocknr << blockbits;
4652                 offset = EXT4_GOOD_OLD_INODE_SIZE +
4653                                 EXT4_I(inode)->i_extra_isize;
4654                 physical += offset;
4655                 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
4656                 flags |= FIEMAP_EXTENT_DATA_INLINE;
4657                 brelse(iloc.bh);
4658         } else { /* external block */
4659                 physical = EXT4_I(inode)->i_file_acl << blockbits;
4660                 length = inode->i_sb->s_blocksize;
4661         }
4662
4663         if (physical)
4664                 error = fiemap_fill_next_extent(fieinfo, 0, physical,
4665                                                 length, flags);
4666         return (error < 0 ? error : 0);
4667 }
4668
4669 /*
4670  * ext4_ext_punch_hole
4671  *
4672  * Punches a hole of "length" bytes in a file starting
4673  * at byte "offset"
4674  *
4675  * @inode:  The inode of the file to punch a hole in
4676  * @offset: The starting byte offset of the hole
4677  * @length: The length of the hole
4678  *
4679  * Returns the number of blocks removed or negative on err
4680  */
4681 int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
4682 {
4683         struct inode *inode = file->f_path.dentry->d_inode;
4684         struct super_block *sb = inode->i_sb;
4685         ext4_lblk_t first_block, stop_block;
4686         struct address_space *mapping = inode->i_mapping;
4687         handle_t *handle;
4688         loff_t first_page, last_page, page_len;
4689         loff_t first_page_offset, last_page_offset;
4690         int credits, err = 0;
4691
4692         /* No need to punch hole beyond i_size */
4693         if (offset >= inode->i_size)
4694                 return 0;
4695
4696         /*
4697          * If the hole extends beyond i_size, set the hole
4698          * to end after the page that contains i_size
4699          */
4700         if (offset + length > inode->i_size) {
4701                 length = inode->i_size +
4702                    PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) -
4703                    offset;
4704         }
4705
4706         first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
4707         last_page = (offset + length) >> PAGE_CACHE_SHIFT;
4708
4709         first_page_offset = first_page << PAGE_CACHE_SHIFT;
4710         last_page_offset = last_page << PAGE_CACHE_SHIFT;
4711
4712         /*
4713          * Write out all dirty pages to avoid race conditions
4714          * Then release them.
4715          */
4716         if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
4717                 err = filemap_write_and_wait_range(mapping,
4718                         offset, offset + length - 1);
4719
4720                 if (err)
4721                         return err;
4722         }
4723
4724         /* Now release the pages */
4725         if (last_page_offset > first_page_offset) {
4726                 truncate_inode_pages_range(mapping, first_page_offset,
4727                                            last_page_offset-1);
4728         }
4729
4730         /* finish any pending end_io work */
4731         ext4_flush_completed_IO(inode);
4732
4733         credits = ext4_writepage_trans_blocks(inode);
4734         handle = ext4_journal_start(inode, credits);
4735         if (IS_ERR(handle))
4736                 return PTR_ERR(handle);
4737
4738         err = ext4_orphan_add(handle, inode);
4739         if (err)
4740                 goto out;
4741
4742         /*
4743          * Now we need to zero out the non-page-aligned data in the
4744          * pages at the start and tail of the hole, and unmap the buffer
4745          * heads for the block aligned regions of the page that were
4746          * completely zeroed.
4747          */
4748         if (first_page > last_page) {
4749                 /*
4750                  * If the file space being truncated is contained within a page
4751                  * just zero out and unmap the middle of that page
4752                  */
4753                 err = ext4_discard_partial_page_buffers(handle,
4754                         mapping, offset, length, 0);
4755
4756                 if (err)
4757                         goto out;
4758         } else {
4759                 /*
4760                  * zero out and unmap the partial page that contains
4761                  * the start of the hole
4762                  */
4763                 page_len  = first_page_offset - offset;
4764                 if (page_len > 0) {
4765                         err = ext4_discard_partial_page_buffers(handle, mapping,
4766                                                    offset, page_len, 0);
4767                         if (err)
4768                                 goto out;
4769                 }
4770
4771                 /*
4772                  * zero out and unmap the partial page that contains
4773                  * the end of the hole
4774                  */
4775                 page_len = offset + length - last_page_offset;
4776                 if (page_len > 0) {
4777                         err = ext4_discard_partial_page_buffers(handle, mapping,
4778                                         last_page_offset, page_len, 0);
4779                         if (err)
4780                                 goto out;
4781                 }
4782         }
4783
4784         /*
4785          * If i_size is contained in the last page, we need to
4786          * unmap and zero the partial page after i_size
4787          */
4788         if (inode->i_size >> PAGE_CACHE_SHIFT == last_page &&
4789            inode->i_size % PAGE_CACHE_SIZE != 0) {
4790
4791                 page_len = PAGE_CACHE_SIZE -
4792                         (inode->i_size & (PAGE_CACHE_SIZE - 1));
4793
4794                 if (page_len > 0) {
4795                         err = ext4_discard_partial_page_buffers(handle,
4796                           mapping, inode->i_size, page_len, 0);
4797
4798                         if (err)
4799                                 goto out;
4800                 }
4801         }
4802
4803         first_block = (offset + sb->s_blocksize - 1) >>
4804                 EXT4_BLOCK_SIZE_BITS(sb);
4805         stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
4806
4807         /* If there are no blocks to remove, return now */
4808         if (first_block >= stop_block)
4809                 goto out;
4810
4811         down_write(&EXT4_I(inode)->i_data_sem);
4812         ext4_ext_invalidate_cache(inode);
4813         ext4_discard_preallocations(inode);
4814
4815         err = ext4_ext_remove_space(inode, first_block, stop_block - 1);
4816
4817         ext4_ext_invalidate_cache(inode);
4818         ext4_discard_preallocations(inode);
4819
4820         if (IS_SYNC(inode))
4821                 ext4_handle_sync(handle);
4822
4823         up_write(&EXT4_I(inode)->i_data_sem);
4824
4825 out:
4826         ext4_orphan_del(handle, inode);
4827         inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4828         ext4_mark_inode_dirty(handle, inode);
4829         ext4_journal_stop(handle);
4830         return err;
4831 }
4832 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4833                 __u64 start, __u64 len)
4834 {
4835         ext4_lblk_t start_blk;
4836         int error = 0;
4837
4838         /* fallback to generic here if not in extents fmt */
4839         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4840                 return generic_block_fiemap(inode, fieinfo, start, len,
4841                         ext4_get_block);
4842
4843         if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
4844                 return -EBADR;
4845
4846         if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
4847                 error = ext4_xattr_fiemap(inode, fieinfo);
4848         } else {
4849                 ext4_lblk_t len_blks;
4850                 __u64 last_blk;
4851
4852                 start_blk = start >> inode->i_sb->s_blocksize_bits;
4853                 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
4854                 if (last_blk >= EXT_MAX_BLOCKS)
4855                         last_blk = EXT_MAX_BLOCKS-1;
4856                 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
4857
4858                 /*
4859                  * Walk the extent tree gathering extent information.
4860                  * ext4_ext_fiemap_cb will push extents back to user.
4861                  */
4862                 error = ext4_ext_walk_space(inode, start_blk, len_blks,
4863                                           ext4_ext_fiemap_cb, fieinfo);
4864         }
4865
4866         return error;
4867 }