Merge branch 'next' into for-linus
[platform/adaptation/renesas_rcar/renesas_kernel.git] / fs / ext4 / extents.c
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * Architecture independence:
6  *   Copyright (c) 2005, Bull S.A.
7  *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public Licens
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
21  */
22
23 /*
24  * Extents support for EXT4
25  *
26  * TODO:
27  *   - ext4*_error() should be used in some situations
28  *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29  *   - smart tree reduction
30  */
31
32 #include <linux/fs.h>
33 #include <linux/time.h>
34 #include <linux/jbd2.h>
35 #include <linux/highuid.h>
36 #include <linux/pagemap.h>
37 #include <linux/quotaops.h>
38 #include <linux/string.h>
39 #include <linux/slab.h>
40 #include <linux/falloc.h>
41 #include <asm/uaccess.h>
42 #include <linux/fiemap.h>
43 #include "ext4_jbd2.h"
44 #include "ext4_extents.h"
45 #include "xattr.h"
46
47 #include <trace/events/ext4.h>
48
49 /*
50  * used by extent splitting.
51  */
52 #define EXT4_EXT_MAY_ZEROOUT    0x1  /* safe to zeroout if split fails \
53                                         due to ENOSPC */
54 #define EXT4_EXT_MARK_UNINIT1   0x2  /* mark first half uninitialized */
55 #define EXT4_EXT_MARK_UNINIT2   0x4  /* mark second half uninitialized */
56
57 #define EXT4_EXT_DATA_VALID1    0x8  /* first half contains valid data */
58 #define EXT4_EXT_DATA_VALID2    0x10 /* second half contains valid data */
59
60 static __le32 ext4_extent_block_csum(struct inode *inode,
61                                      struct ext4_extent_header *eh)
62 {
63         struct ext4_inode_info *ei = EXT4_I(inode);
64         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
65         __u32 csum;
66
67         csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
68                            EXT4_EXTENT_TAIL_OFFSET(eh));
69         return cpu_to_le32(csum);
70 }
71
72 static int ext4_extent_block_csum_verify(struct inode *inode,
73                                          struct ext4_extent_header *eh)
74 {
75         struct ext4_extent_tail *et;
76
77         if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
78                 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
79                 return 1;
80
81         et = find_ext4_extent_tail(eh);
82         if (et->et_checksum != ext4_extent_block_csum(inode, eh))
83                 return 0;
84         return 1;
85 }
86
87 static void ext4_extent_block_csum_set(struct inode *inode,
88                                        struct ext4_extent_header *eh)
89 {
90         struct ext4_extent_tail *et;
91
92         if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
93                 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
94                 return;
95
96         et = find_ext4_extent_tail(eh);
97         et->et_checksum = ext4_extent_block_csum(inode, eh);
98 }
99
100 static int ext4_split_extent(handle_t *handle,
101                                 struct inode *inode,
102                                 struct ext4_ext_path *path,
103                                 struct ext4_map_blocks *map,
104                                 int split_flag,
105                                 int flags);
106
107 static int ext4_split_extent_at(handle_t *handle,
108                              struct inode *inode,
109                              struct ext4_ext_path *path,
110                              ext4_lblk_t split,
111                              int split_flag,
112                              int flags);
113
114 static int ext4_find_delayed_extent(struct inode *inode,
115                                     struct extent_status *newes);
116
117 static int ext4_ext_truncate_extend_restart(handle_t *handle,
118                                             struct inode *inode,
119                                             int needed)
120 {
121         int err;
122
123         if (!ext4_handle_valid(handle))
124                 return 0;
125         if (handle->h_buffer_credits > needed)
126                 return 0;
127         err = ext4_journal_extend(handle, needed);
128         if (err <= 0)
129                 return err;
130         err = ext4_truncate_restart_trans(handle, inode, needed);
131         if (err == 0)
132                 err = -EAGAIN;
133
134         return err;
135 }
136
137 /*
138  * could return:
139  *  - EROFS
140  *  - ENOMEM
141  */
142 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
143                                 struct ext4_ext_path *path)
144 {
145         if (path->p_bh) {
146                 /* path points to block */
147                 return ext4_journal_get_write_access(handle, path->p_bh);
148         }
149         /* path points to leaf/index in inode body */
150         /* we use in-core data, no need to protect them */
151         return 0;
152 }
153
154 /*
155  * could return:
156  *  - EROFS
157  *  - ENOMEM
158  *  - EIO
159  */
160 int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle,
161                      struct inode *inode, struct ext4_ext_path *path)
162 {
163         int err;
164         if (path->p_bh) {
165                 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
166                 /* path points to block */
167                 err = __ext4_handle_dirty_metadata(where, line, handle,
168                                                    inode, path->p_bh);
169         } else {
170                 /* path points to leaf/index in inode body */
171                 err = ext4_mark_inode_dirty(handle, inode);
172         }
173         return err;
174 }
175
176 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
177                               struct ext4_ext_path *path,
178                               ext4_lblk_t block)
179 {
180         if (path) {
181                 int depth = path->p_depth;
182                 struct ext4_extent *ex;
183
184                 /*
185                  * Try to predict block placement assuming that we are
186                  * filling in a file which will eventually be
187                  * non-sparse --- i.e., in the case of libbfd writing
188                  * an ELF object sections out-of-order but in a way
189                  * the eventually results in a contiguous object or
190                  * executable file, or some database extending a table
191                  * space file.  However, this is actually somewhat
192                  * non-ideal if we are writing a sparse file such as
193                  * qemu or KVM writing a raw image file that is going
194                  * to stay fairly sparse, since it will end up
195                  * fragmenting the file system's free space.  Maybe we
196                  * should have some hueristics or some way to allow
197                  * userspace to pass a hint to file system,
198                  * especially if the latter case turns out to be
199                  * common.
200                  */
201                 ex = path[depth].p_ext;
202                 if (ex) {
203                         ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
204                         ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
205
206                         if (block > ext_block)
207                                 return ext_pblk + (block - ext_block);
208                         else
209                                 return ext_pblk - (ext_block - block);
210                 }
211
212                 /* it looks like index is empty;
213                  * try to find starting block from index itself */
214                 if (path[depth].p_bh)
215                         return path[depth].p_bh->b_blocknr;
216         }
217
218         /* OK. use inode's group */
219         return ext4_inode_to_goal_block(inode);
220 }
221
222 /*
223  * Allocation for a meta data block
224  */
225 static ext4_fsblk_t
226 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
227                         struct ext4_ext_path *path,
228                         struct ext4_extent *ex, int *err, unsigned int flags)
229 {
230         ext4_fsblk_t goal, newblock;
231
232         goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
233         newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
234                                         NULL, err);
235         return newblock;
236 }
237
238 static inline int ext4_ext_space_block(struct inode *inode, int check)
239 {
240         int size;
241
242         size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
243                         / sizeof(struct ext4_extent);
244 #ifdef AGGRESSIVE_TEST
245         if (!check && size > 6)
246                 size = 6;
247 #endif
248         return size;
249 }
250
251 static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
252 {
253         int size;
254
255         size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
256                         / sizeof(struct ext4_extent_idx);
257 #ifdef AGGRESSIVE_TEST
258         if (!check && size > 5)
259                 size = 5;
260 #endif
261         return size;
262 }
263
264 static inline int ext4_ext_space_root(struct inode *inode, int check)
265 {
266         int size;
267
268         size = sizeof(EXT4_I(inode)->i_data);
269         size -= sizeof(struct ext4_extent_header);
270         size /= sizeof(struct ext4_extent);
271 #ifdef AGGRESSIVE_TEST
272         if (!check && size > 3)
273                 size = 3;
274 #endif
275         return size;
276 }
277
278 static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
279 {
280         int size;
281
282         size = sizeof(EXT4_I(inode)->i_data);
283         size -= sizeof(struct ext4_extent_header);
284         size /= sizeof(struct ext4_extent_idx);
285 #ifdef AGGRESSIVE_TEST
286         if (!check && size > 4)
287                 size = 4;
288 #endif
289         return size;
290 }
291
292 /*
293  * Calculate the number of metadata blocks needed
294  * to allocate @blocks
295  * Worse case is one block per extent
296  */
297 int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
298 {
299         struct ext4_inode_info *ei = EXT4_I(inode);
300         int idxs;
301
302         idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
303                 / sizeof(struct ext4_extent_idx));
304
305         /*
306          * If the new delayed allocation block is contiguous with the
307          * previous da block, it can share index blocks with the
308          * previous block, so we only need to allocate a new index
309          * block every idxs leaf blocks.  At ldxs**2 blocks, we need
310          * an additional index block, and at ldxs**3 blocks, yet
311          * another index blocks.
312          */
313         if (ei->i_da_metadata_calc_len &&
314             ei->i_da_metadata_calc_last_lblock+1 == lblock) {
315                 int num = 0;
316
317                 if ((ei->i_da_metadata_calc_len % idxs) == 0)
318                         num++;
319                 if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
320                         num++;
321                 if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
322                         num++;
323                         ei->i_da_metadata_calc_len = 0;
324                 } else
325                         ei->i_da_metadata_calc_len++;
326                 ei->i_da_metadata_calc_last_lblock++;
327                 return num;
328         }
329
330         /*
331          * In the worst case we need a new set of index blocks at
332          * every level of the inode's extent tree.
333          */
334         ei->i_da_metadata_calc_len = 1;
335         ei->i_da_metadata_calc_last_lblock = lblock;
336         return ext_depth(inode) + 1;
337 }
338
339 static int
340 ext4_ext_max_entries(struct inode *inode, int depth)
341 {
342         int max;
343
344         if (depth == ext_depth(inode)) {
345                 if (depth == 0)
346                         max = ext4_ext_space_root(inode, 1);
347                 else
348                         max = ext4_ext_space_root_idx(inode, 1);
349         } else {
350                 if (depth == 0)
351                         max = ext4_ext_space_block(inode, 1);
352                 else
353                         max = ext4_ext_space_block_idx(inode, 1);
354         }
355
356         return max;
357 }
358
359 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
360 {
361         ext4_fsblk_t block = ext4_ext_pblock(ext);
362         int len = ext4_ext_get_actual_len(ext);
363
364         if (len == 0)
365                 return 0;
366         return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
367 }
368
369 static int ext4_valid_extent_idx(struct inode *inode,
370                                 struct ext4_extent_idx *ext_idx)
371 {
372         ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
373
374         return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
375 }
376
377 static int ext4_valid_extent_entries(struct inode *inode,
378                                 struct ext4_extent_header *eh,
379                                 int depth)
380 {
381         unsigned short entries;
382         if (eh->eh_entries == 0)
383                 return 1;
384
385         entries = le16_to_cpu(eh->eh_entries);
386
387         if (depth == 0) {
388                 /* leaf entries */
389                 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
390                 while (entries) {
391                         if (!ext4_valid_extent(inode, ext))
392                                 return 0;
393                         ext++;
394                         entries--;
395                 }
396         } else {
397                 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
398                 while (entries) {
399                         if (!ext4_valid_extent_idx(inode, ext_idx))
400                                 return 0;
401                         ext_idx++;
402                         entries--;
403                 }
404         }
405         return 1;
406 }
407
408 static int __ext4_ext_check(const char *function, unsigned int line,
409                             struct inode *inode, struct ext4_extent_header *eh,
410                             int depth, ext4_fsblk_t pblk)
411 {
412         const char *error_msg;
413         int max = 0;
414
415         if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
416                 error_msg = "invalid magic";
417                 goto corrupted;
418         }
419         if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
420                 error_msg = "unexpected eh_depth";
421                 goto corrupted;
422         }
423         if (unlikely(eh->eh_max == 0)) {
424                 error_msg = "invalid eh_max";
425                 goto corrupted;
426         }
427         max = ext4_ext_max_entries(inode, depth);
428         if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
429                 error_msg = "too large eh_max";
430                 goto corrupted;
431         }
432         if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
433                 error_msg = "invalid eh_entries";
434                 goto corrupted;
435         }
436         if (!ext4_valid_extent_entries(inode, eh, depth)) {
437                 error_msg = "invalid extent entries";
438                 goto corrupted;
439         }
440         /* Verify checksum on non-root extent tree nodes */
441         if (ext_depth(inode) != depth &&
442             !ext4_extent_block_csum_verify(inode, eh)) {
443                 error_msg = "extent tree corrupted";
444                 goto corrupted;
445         }
446         return 0;
447
448 corrupted:
449         ext4_error_inode(inode, function, line, 0,
450                          "pblk %llu bad header/extent: %s - magic %x, "
451                          "entries %u, max %u(%u), depth %u(%u)",
452                          (unsigned long long) pblk, error_msg,
453                          le16_to_cpu(eh->eh_magic),
454                          le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
455                          max, le16_to_cpu(eh->eh_depth), depth);
456         return -EIO;
457 }
458
459 #define ext4_ext_check(inode, eh, depth, pblk)                  \
460         __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk))
461
462 int ext4_ext_check_inode(struct inode *inode)
463 {
464         return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
465 }
466
467 static struct buffer_head *
468 __read_extent_tree_block(const char *function, unsigned int line,
469                          struct inode *inode, ext4_fsblk_t pblk, int depth,
470                          int flags)
471 {
472         struct buffer_head              *bh;
473         int                             err;
474
475         bh = sb_getblk(inode->i_sb, pblk);
476         if (unlikely(!bh))
477                 return ERR_PTR(-ENOMEM);
478
479         if (!bh_uptodate_or_lock(bh)) {
480                 trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
481                 err = bh_submit_read(bh);
482                 if (err < 0)
483                         goto errout;
484         }
485         if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
486                 return bh;
487         err = __ext4_ext_check(function, line, inode,
488                                ext_block_hdr(bh), depth, pblk);
489         if (err)
490                 goto errout;
491         set_buffer_verified(bh);
492         /*
493          * If this is a leaf block, cache all of its entries
494          */
495         if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
496                 struct ext4_extent_header *eh = ext_block_hdr(bh);
497                 struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
498                 ext4_lblk_t prev = 0;
499                 int i;
500
501                 for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
502                         unsigned int status = EXTENT_STATUS_WRITTEN;
503                         ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
504                         int len = ext4_ext_get_actual_len(ex);
505
506                         if (prev && (prev != lblk))
507                                 ext4_es_cache_extent(inode, prev,
508                                                      lblk - prev, ~0,
509                                                      EXTENT_STATUS_HOLE);
510
511                         if (ext4_ext_is_uninitialized(ex))
512                                 status = EXTENT_STATUS_UNWRITTEN;
513                         ext4_es_cache_extent(inode, lblk, len,
514                                              ext4_ext_pblock(ex), status);
515                         prev = lblk + len;
516                 }
517         }
518         return bh;
519 errout:
520         put_bh(bh);
521         return ERR_PTR(err);
522
523 }
524
525 #define read_extent_tree_block(inode, pblk, depth, flags)               \
526         __read_extent_tree_block(__func__, __LINE__, (inode), (pblk),   \
527                                  (depth), (flags))
528
529 /*
530  * This function is called to cache a file's extent information in the
531  * extent status tree
532  */
533 int ext4_ext_precache(struct inode *inode)
534 {
535         struct ext4_inode_info *ei = EXT4_I(inode);
536         struct ext4_ext_path *path = NULL;
537         struct buffer_head *bh;
538         int i = 0, depth, ret = 0;
539
540         if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
541                 return 0;       /* not an extent-mapped inode */
542
543         down_read(&ei->i_data_sem);
544         depth = ext_depth(inode);
545
546         path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
547                        GFP_NOFS);
548         if (path == NULL) {
549                 up_read(&ei->i_data_sem);
550                 return -ENOMEM;
551         }
552
553         /* Don't cache anything if there are no external extent blocks */
554         if (depth == 0)
555                 goto out;
556         path[0].p_hdr = ext_inode_hdr(inode);
557         ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
558         if (ret)
559                 goto out;
560         path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
561         while (i >= 0) {
562                 /*
563                  * If this is a leaf block or we've reached the end of
564                  * the index block, go up
565                  */
566                 if ((i == depth) ||
567                     path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
568                         brelse(path[i].p_bh);
569                         path[i].p_bh = NULL;
570                         i--;
571                         continue;
572                 }
573                 bh = read_extent_tree_block(inode,
574                                             ext4_idx_pblock(path[i].p_idx++),
575                                             depth - i - 1,
576                                             EXT4_EX_FORCE_CACHE);
577                 if (IS_ERR(bh)) {
578                         ret = PTR_ERR(bh);
579                         break;
580                 }
581                 i++;
582                 path[i].p_bh = bh;
583                 path[i].p_hdr = ext_block_hdr(bh);
584                 path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
585         }
586         ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
587 out:
588         up_read(&ei->i_data_sem);
589         ext4_ext_drop_refs(path);
590         kfree(path);
591         return ret;
592 }
593
594 #ifdef EXT_DEBUG
595 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
596 {
597         int k, l = path->p_depth;
598
599         ext_debug("path:");
600         for (k = 0; k <= l; k++, path++) {
601                 if (path->p_idx) {
602                   ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
603                             ext4_idx_pblock(path->p_idx));
604                 } else if (path->p_ext) {
605                         ext_debug("  %d:[%d]%d:%llu ",
606                                   le32_to_cpu(path->p_ext->ee_block),
607                                   ext4_ext_is_uninitialized(path->p_ext),
608                                   ext4_ext_get_actual_len(path->p_ext),
609                                   ext4_ext_pblock(path->p_ext));
610                 } else
611                         ext_debug("  []");
612         }
613         ext_debug("\n");
614 }
615
616 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
617 {
618         int depth = ext_depth(inode);
619         struct ext4_extent_header *eh;
620         struct ext4_extent *ex;
621         int i;
622
623         if (!path)
624                 return;
625
626         eh = path[depth].p_hdr;
627         ex = EXT_FIRST_EXTENT(eh);
628
629         ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
630
631         for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
632                 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
633                           ext4_ext_is_uninitialized(ex),
634                           ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
635         }
636         ext_debug("\n");
637 }
638
639 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
640                         ext4_fsblk_t newblock, int level)
641 {
642         int depth = ext_depth(inode);
643         struct ext4_extent *ex;
644
645         if (depth != level) {
646                 struct ext4_extent_idx *idx;
647                 idx = path[level].p_idx;
648                 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
649                         ext_debug("%d: move %d:%llu in new index %llu\n", level,
650                                         le32_to_cpu(idx->ei_block),
651                                         ext4_idx_pblock(idx),
652                                         newblock);
653                         idx++;
654                 }
655
656                 return;
657         }
658
659         ex = path[depth].p_ext;
660         while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
661                 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
662                                 le32_to_cpu(ex->ee_block),
663                                 ext4_ext_pblock(ex),
664                                 ext4_ext_is_uninitialized(ex),
665                                 ext4_ext_get_actual_len(ex),
666                                 newblock);
667                 ex++;
668         }
669 }
670
671 #else
672 #define ext4_ext_show_path(inode, path)
673 #define ext4_ext_show_leaf(inode, path)
674 #define ext4_ext_show_move(inode, path, newblock, level)
675 #endif
676
677 void ext4_ext_drop_refs(struct ext4_ext_path *path)
678 {
679         int depth = path->p_depth;
680         int i;
681
682         for (i = 0; i <= depth; i++, path++)
683                 if (path->p_bh) {
684                         brelse(path->p_bh);
685                         path->p_bh = NULL;
686                 }
687 }
688
689 /*
690  * ext4_ext_binsearch_idx:
691  * binary search for the closest index of the given block
692  * the header must be checked before calling this
693  */
694 static void
695 ext4_ext_binsearch_idx(struct inode *inode,
696                         struct ext4_ext_path *path, ext4_lblk_t block)
697 {
698         struct ext4_extent_header *eh = path->p_hdr;
699         struct ext4_extent_idx *r, *l, *m;
700
701
702         ext_debug("binsearch for %u(idx):  ", block);
703
704         l = EXT_FIRST_INDEX(eh) + 1;
705         r = EXT_LAST_INDEX(eh);
706         while (l <= r) {
707                 m = l + (r - l) / 2;
708                 if (block < le32_to_cpu(m->ei_block))
709                         r = m - 1;
710                 else
711                         l = m + 1;
712                 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
713                                 m, le32_to_cpu(m->ei_block),
714                                 r, le32_to_cpu(r->ei_block));
715         }
716
717         path->p_idx = l - 1;
718         ext_debug("  -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
719                   ext4_idx_pblock(path->p_idx));
720
721 #ifdef CHECK_BINSEARCH
722         {
723                 struct ext4_extent_idx *chix, *ix;
724                 int k;
725
726                 chix = ix = EXT_FIRST_INDEX(eh);
727                 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
728                   if (k != 0 &&
729                       le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
730                                 printk(KERN_DEBUG "k=%d, ix=0x%p, "
731                                        "first=0x%p\n", k,
732                                        ix, EXT_FIRST_INDEX(eh));
733                                 printk(KERN_DEBUG "%u <= %u\n",
734                                        le32_to_cpu(ix->ei_block),
735                                        le32_to_cpu(ix[-1].ei_block));
736                         }
737                         BUG_ON(k && le32_to_cpu(ix->ei_block)
738                                            <= le32_to_cpu(ix[-1].ei_block));
739                         if (block < le32_to_cpu(ix->ei_block))
740                                 break;
741                         chix = ix;
742                 }
743                 BUG_ON(chix != path->p_idx);
744         }
745 #endif
746
747 }
748
749 /*
750  * ext4_ext_binsearch:
751  * binary search for closest extent of the given block
752  * the header must be checked before calling this
753  */
754 static void
755 ext4_ext_binsearch(struct inode *inode,
756                 struct ext4_ext_path *path, ext4_lblk_t block)
757 {
758         struct ext4_extent_header *eh = path->p_hdr;
759         struct ext4_extent *r, *l, *m;
760
761         if (eh->eh_entries == 0) {
762                 /*
763                  * this leaf is empty:
764                  * we get such a leaf in split/add case
765                  */
766                 return;
767         }
768
769         ext_debug("binsearch for %u:  ", block);
770
771         l = EXT_FIRST_EXTENT(eh) + 1;
772         r = EXT_LAST_EXTENT(eh);
773
774         while (l <= r) {
775                 m = l + (r - l) / 2;
776                 if (block < le32_to_cpu(m->ee_block))
777                         r = m - 1;
778                 else
779                         l = m + 1;
780                 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
781                                 m, le32_to_cpu(m->ee_block),
782                                 r, le32_to_cpu(r->ee_block));
783         }
784
785         path->p_ext = l - 1;
786         ext_debug("  -> %d:%llu:[%d]%d ",
787                         le32_to_cpu(path->p_ext->ee_block),
788                         ext4_ext_pblock(path->p_ext),
789                         ext4_ext_is_uninitialized(path->p_ext),
790                         ext4_ext_get_actual_len(path->p_ext));
791
792 #ifdef CHECK_BINSEARCH
793         {
794                 struct ext4_extent *chex, *ex;
795                 int k;
796
797                 chex = ex = EXT_FIRST_EXTENT(eh);
798                 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
799                         BUG_ON(k && le32_to_cpu(ex->ee_block)
800                                           <= le32_to_cpu(ex[-1].ee_block));
801                         if (block < le32_to_cpu(ex->ee_block))
802                                 break;
803                         chex = ex;
804                 }
805                 BUG_ON(chex != path->p_ext);
806         }
807 #endif
808
809 }
810
811 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
812 {
813         struct ext4_extent_header *eh;
814
815         eh = ext_inode_hdr(inode);
816         eh->eh_depth = 0;
817         eh->eh_entries = 0;
818         eh->eh_magic = EXT4_EXT_MAGIC;
819         eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
820         ext4_mark_inode_dirty(handle, inode);
821         return 0;
822 }
823
824 struct ext4_ext_path *
825 ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
826                      struct ext4_ext_path *path, int flags)
827 {
828         struct ext4_extent_header *eh;
829         struct buffer_head *bh;
830         short int depth, i, ppos = 0, alloc = 0;
831         int ret;
832
833         eh = ext_inode_hdr(inode);
834         depth = ext_depth(inode);
835
836         /* account possible depth increase */
837         if (!path) {
838                 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
839                                 GFP_NOFS);
840                 if (!path)
841                         return ERR_PTR(-ENOMEM);
842                 alloc = 1;
843         }
844         path[0].p_hdr = eh;
845         path[0].p_bh = NULL;
846
847         i = depth;
848         /* walk through the tree */
849         while (i) {
850                 ext_debug("depth %d: num %d, max %d\n",
851                           ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
852
853                 ext4_ext_binsearch_idx(inode, path + ppos, block);
854                 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
855                 path[ppos].p_depth = i;
856                 path[ppos].p_ext = NULL;
857
858                 bh = read_extent_tree_block(inode, path[ppos].p_block, --i,
859                                             flags);
860                 if (IS_ERR(bh)) {
861                         ret = PTR_ERR(bh);
862                         goto err;
863                 }
864
865                 eh = ext_block_hdr(bh);
866                 ppos++;
867                 if (unlikely(ppos > depth)) {
868                         put_bh(bh);
869                         EXT4_ERROR_INODE(inode,
870                                          "ppos %d > depth %d", ppos, depth);
871                         ret = -EIO;
872                         goto err;
873                 }
874                 path[ppos].p_bh = bh;
875                 path[ppos].p_hdr = eh;
876         }
877
878         path[ppos].p_depth = i;
879         path[ppos].p_ext = NULL;
880         path[ppos].p_idx = NULL;
881
882         /* find extent */
883         ext4_ext_binsearch(inode, path + ppos, block);
884         /* if not an empty leaf */
885         if (path[ppos].p_ext)
886                 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
887
888         ext4_ext_show_path(inode, path);
889
890         return path;
891
892 err:
893         ext4_ext_drop_refs(path);
894         if (alloc)
895                 kfree(path);
896         return ERR_PTR(ret);
897 }
898
899 /*
900  * ext4_ext_insert_index:
901  * insert new index [@logical;@ptr] into the block at @curp;
902  * check where to insert: before @curp or after @curp
903  */
904 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
905                                  struct ext4_ext_path *curp,
906                                  int logical, ext4_fsblk_t ptr)
907 {
908         struct ext4_extent_idx *ix;
909         int len, err;
910
911         err = ext4_ext_get_access(handle, inode, curp);
912         if (err)
913                 return err;
914
915         if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
916                 EXT4_ERROR_INODE(inode,
917                                  "logical %d == ei_block %d!",
918                                  logical, le32_to_cpu(curp->p_idx->ei_block));
919                 return -EIO;
920         }
921
922         if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
923                              >= le16_to_cpu(curp->p_hdr->eh_max))) {
924                 EXT4_ERROR_INODE(inode,
925                                  "eh_entries %d >= eh_max %d!",
926                                  le16_to_cpu(curp->p_hdr->eh_entries),
927                                  le16_to_cpu(curp->p_hdr->eh_max));
928                 return -EIO;
929         }
930
931         if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
932                 /* insert after */
933                 ext_debug("insert new index %d after: %llu\n", logical, ptr);
934                 ix = curp->p_idx + 1;
935         } else {
936                 /* insert before */
937                 ext_debug("insert new index %d before: %llu\n", logical, ptr);
938                 ix = curp->p_idx;
939         }
940
941         len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
942         BUG_ON(len < 0);
943         if (len > 0) {
944                 ext_debug("insert new index %d: "
945                                 "move %d indices from 0x%p to 0x%p\n",
946                                 logical, len, ix, ix + 1);
947                 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
948         }
949
950         if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
951                 EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
952                 return -EIO;
953         }
954
955         ix->ei_block = cpu_to_le32(logical);
956         ext4_idx_store_pblock(ix, ptr);
957         le16_add_cpu(&curp->p_hdr->eh_entries, 1);
958
959         if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
960                 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
961                 return -EIO;
962         }
963
964         err = ext4_ext_dirty(handle, inode, curp);
965         ext4_std_error(inode->i_sb, err);
966
967         return err;
968 }
969
970 /*
971  * ext4_ext_split:
972  * inserts new subtree into the path, using free index entry
973  * at depth @at:
974  * - allocates all needed blocks (new leaf and all intermediate index blocks)
975  * - makes decision where to split
976  * - moves remaining extents and index entries (right to the split point)
977  *   into the newly allocated blocks
978  * - initializes subtree
979  */
980 static int ext4_ext_split(handle_t *handle, struct inode *inode,
981                           unsigned int flags,
982                           struct ext4_ext_path *path,
983                           struct ext4_extent *newext, int at)
984 {
985         struct buffer_head *bh = NULL;
986         int depth = ext_depth(inode);
987         struct ext4_extent_header *neh;
988         struct ext4_extent_idx *fidx;
989         int i = at, k, m, a;
990         ext4_fsblk_t newblock, oldblock;
991         __le32 border;
992         ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
993         int err = 0;
994
995         /* make decision: where to split? */
996         /* FIXME: now decision is simplest: at current extent */
997
998         /* if current leaf will be split, then we should use
999          * border from split point */
1000         if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
1001                 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
1002                 return -EIO;
1003         }
1004         if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
1005                 border = path[depth].p_ext[1].ee_block;
1006                 ext_debug("leaf will be split."
1007                                 " next leaf starts at %d\n",
1008                                   le32_to_cpu(border));
1009         } else {
1010                 border = newext->ee_block;
1011                 ext_debug("leaf will be added."
1012                                 " next leaf starts at %d\n",
1013                                 le32_to_cpu(border));
1014         }
1015
1016         /*
1017          * If error occurs, then we break processing
1018          * and mark filesystem read-only. index won't
1019          * be inserted and tree will be in consistent
1020          * state. Next mount will repair buffers too.
1021          */
1022
1023         /*
1024          * Get array to track all allocated blocks.
1025          * We need this to handle errors and free blocks
1026          * upon them.
1027          */
1028         ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
1029         if (!ablocks)
1030                 return -ENOMEM;
1031
1032         /* allocate all needed blocks */
1033         ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
1034         for (a = 0; a < depth - at; a++) {
1035                 newblock = ext4_ext_new_meta_block(handle, inode, path,
1036                                                    newext, &err, flags);
1037                 if (newblock == 0)
1038                         goto cleanup;
1039                 ablocks[a] = newblock;
1040         }
1041
1042         /* initialize new leaf */
1043         newblock = ablocks[--a];
1044         if (unlikely(newblock == 0)) {
1045                 EXT4_ERROR_INODE(inode, "newblock == 0!");
1046                 err = -EIO;
1047                 goto cleanup;
1048         }
1049         bh = sb_getblk(inode->i_sb, newblock);
1050         if (unlikely(!bh)) {
1051                 err = -ENOMEM;
1052                 goto cleanup;
1053         }
1054         lock_buffer(bh);
1055
1056         err = ext4_journal_get_create_access(handle, bh);
1057         if (err)
1058                 goto cleanup;
1059
1060         neh = ext_block_hdr(bh);
1061         neh->eh_entries = 0;
1062         neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1063         neh->eh_magic = EXT4_EXT_MAGIC;
1064         neh->eh_depth = 0;
1065
1066         /* move remainder of path[depth] to the new leaf */
1067         if (unlikely(path[depth].p_hdr->eh_entries !=
1068                      path[depth].p_hdr->eh_max)) {
1069                 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
1070                                  path[depth].p_hdr->eh_entries,
1071                                  path[depth].p_hdr->eh_max);
1072                 err = -EIO;
1073                 goto cleanup;
1074         }
1075         /* start copy from next extent */
1076         m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
1077         ext4_ext_show_move(inode, path, newblock, depth);
1078         if (m) {
1079                 struct ext4_extent *ex;
1080                 ex = EXT_FIRST_EXTENT(neh);
1081                 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
1082                 le16_add_cpu(&neh->eh_entries, m);
1083         }
1084
1085         ext4_extent_block_csum_set(inode, neh);
1086         set_buffer_uptodate(bh);
1087         unlock_buffer(bh);
1088
1089         err = ext4_handle_dirty_metadata(handle, inode, bh);
1090         if (err)
1091                 goto cleanup;
1092         brelse(bh);
1093         bh = NULL;
1094
1095         /* correct old leaf */
1096         if (m) {
1097                 err = ext4_ext_get_access(handle, inode, path + depth);
1098                 if (err)
1099                         goto cleanup;
1100                 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
1101                 err = ext4_ext_dirty(handle, inode, path + depth);
1102                 if (err)
1103                         goto cleanup;
1104
1105         }
1106
1107         /* create intermediate indexes */
1108         k = depth - at - 1;
1109         if (unlikely(k < 0)) {
1110                 EXT4_ERROR_INODE(inode, "k %d < 0!", k);
1111                 err = -EIO;
1112                 goto cleanup;
1113         }
1114         if (k)
1115                 ext_debug("create %d intermediate indices\n", k);
1116         /* insert new index into current index block */
1117         /* current depth stored in i var */
1118         i = depth - 1;
1119         while (k--) {
1120                 oldblock = newblock;
1121                 newblock = ablocks[--a];
1122                 bh = sb_getblk(inode->i_sb, newblock);
1123                 if (unlikely(!bh)) {
1124                         err = -ENOMEM;
1125                         goto cleanup;
1126                 }
1127                 lock_buffer(bh);
1128
1129                 err = ext4_journal_get_create_access(handle, bh);
1130                 if (err)
1131                         goto cleanup;
1132
1133                 neh = ext_block_hdr(bh);
1134                 neh->eh_entries = cpu_to_le16(1);
1135                 neh->eh_magic = EXT4_EXT_MAGIC;
1136                 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1137                 neh->eh_depth = cpu_to_le16(depth - i);
1138                 fidx = EXT_FIRST_INDEX(neh);
1139                 fidx->ei_block = border;
1140                 ext4_idx_store_pblock(fidx, oldblock);
1141
1142                 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
1143                                 i, newblock, le32_to_cpu(border), oldblock);
1144
1145                 /* move remainder of path[i] to the new index block */
1146                 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1147                                         EXT_LAST_INDEX(path[i].p_hdr))) {
1148                         EXT4_ERROR_INODE(inode,
1149                                          "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1150                                          le32_to_cpu(path[i].p_ext->ee_block));
1151                         err = -EIO;
1152                         goto cleanup;
1153                 }
1154                 /* start copy indexes */
1155                 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
1156                 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
1157                                 EXT_MAX_INDEX(path[i].p_hdr));
1158                 ext4_ext_show_move(inode, path, newblock, i);
1159                 if (m) {
1160                         memmove(++fidx, path[i].p_idx,
1161                                 sizeof(struct ext4_extent_idx) * m);
1162                         le16_add_cpu(&neh->eh_entries, m);
1163                 }
1164                 ext4_extent_block_csum_set(inode, neh);
1165                 set_buffer_uptodate(bh);
1166                 unlock_buffer(bh);
1167
1168                 err = ext4_handle_dirty_metadata(handle, inode, bh);
1169                 if (err)
1170                         goto cleanup;
1171                 brelse(bh);
1172                 bh = NULL;
1173
1174                 /* correct old index */
1175                 if (m) {
1176                         err = ext4_ext_get_access(handle, inode, path + i);
1177                         if (err)
1178                                 goto cleanup;
1179                         le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1180                         err = ext4_ext_dirty(handle, inode, path + i);
1181                         if (err)
1182                                 goto cleanup;
1183                 }
1184
1185                 i--;
1186         }
1187
1188         /* insert new index */
1189         err = ext4_ext_insert_index(handle, inode, path + at,
1190                                     le32_to_cpu(border), newblock);
1191
1192 cleanup:
1193         if (bh) {
1194                 if (buffer_locked(bh))
1195                         unlock_buffer(bh);
1196                 brelse(bh);
1197         }
1198
1199         if (err) {
1200                 /* free all allocated blocks in error case */
1201                 for (i = 0; i < depth; i++) {
1202                         if (!ablocks[i])
1203                                 continue;
1204                         ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1205                                          EXT4_FREE_BLOCKS_METADATA);
1206                 }
1207         }
1208         kfree(ablocks);
1209
1210         return err;
1211 }
1212
1213 /*
1214  * ext4_ext_grow_indepth:
1215  * implements tree growing procedure:
1216  * - allocates new block
1217  * - moves top-level data (index block or leaf) into the new block
1218  * - initializes new top-level, creating index that points to the
1219  *   just created block
1220  */
1221 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1222                                  unsigned int flags,
1223                                  struct ext4_extent *newext)
1224 {
1225         struct ext4_extent_header *neh;
1226         struct buffer_head *bh;
1227         ext4_fsblk_t newblock;
1228         int err = 0;
1229
1230         newblock = ext4_ext_new_meta_block(handle, inode, NULL,
1231                 newext, &err, flags);
1232         if (newblock == 0)
1233                 return err;
1234
1235         bh = sb_getblk(inode->i_sb, newblock);
1236         if (unlikely(!bh))
1237                 return -ENOMEM;
1238         lock_buffer(bh);
1239
1240         err = ext4_journal_get_create_access(handle, bh);
1241         if (err) {
1242                 unlock_buffer(bh);
1243                 goto out;
1244         }
1245
1246         /* move top-level index/leaf into new block */
1247         memmove(bh->b_data, EXT4_I(inode)->i_data,
1248                 sizeof(EXT4_I(inode)->i_data));
1249
1250         /* set size of new block */
1251         neh = ext_block_hdr(bh);
1252         /* old root could have indexes or leaves
1253          * so calculate e_max right way */
1254         if (ext_depth(inode))
1255                 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1256         else
1257                 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1258         neh->eh_magic = EXT4_EXT_MAGIC;
1259         ext4_extent_block_csum_set(inode, neh);
1260         set_buffer_uptodate(bh);
1261         unlock_buffer(bh);
1262
1263         err = ext4_handle_dirty_metadata(handle, inode, bh);
1264         if (err)
1265                 goto out;
1266
1267         /* Update top-level index: num,max,pointer */
1268         neh = ext_inode_hdr(inode);
1269         neh->eh_entries = cpu_to_le16(1);
1270         ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
1271         if (neh->eh_depth == 0) {
1272                 /* Root extent block becomes index block */
1273                 neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1274                 EXT_FIRST_INDEX(neh)->ei_block =
1275                         EXT_FIRST_EXTENT(neh)->ee_block;
1276         }
1277         ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1278                   le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1279                   le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1280                   ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1281
1282         le16_add_cpu(&neh->eh_depth, 1);
1283         ext4_mark_inode_dirty(handle, inode);
1284 out:
1285         brelse(bh);
1286
1287         return err;
1288 }
1289
1290 /*
1291  * ext4_ext_create_new_leaf:
1292  * finds empty index and adds new leaf.
1293  * if no free index is found, then it requests in-depth growing.
1294  */
1295 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1296                                     unsigned int mb_flags,
1297                                     unsigned int gb_flags,
1298                                     struct ext4_ext_path *path,
1299                                     struct ext4_extent *newext)
1300 {
1301         struct ext4_ext_path *curp;
1302         int depth, i, err = 0;
1303
1304 repeat:
1305         i = depth = ext_depth(inode);
1306
1307         /* walk up to the tree and look for free index entry */
1308         curp = path + depth;
1309         while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1310                 i--;
1311                 curp--;
1312         }
1313
1314         /* we use already allocated block for index block,
1315          * so subsequent data blocks should be contiguous */
1316         if (EXT_HAS_FREE_INDEX(curp)) {
1317                 /* if we found index with free entry, then use that
1318                  * entry: create all needed subtree and add new leaf */
1319                 err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
1320                 if (err)
1321                         goto out;
1322
1323                 /* refill path */
1324                 ext4_ext_drop_refs(path);
1325                 path = ext4_ext_find_extent(inode,
1326                                     (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1327                                     path, gb_flags);
1328                 if (IS_ERR(path))
1329                         err = PTR_ERR(path);
1330         } else {
1331                 /* tree is full, time to grow in depth */
1332                 err = ext4_ext_grow_indepth(handle, inode, mb_flags, newext);
1333                 if (err)
1334                         goto out;
1335
1336                 /* refill path */
1337                 ext4_ext_drop_refs(path);
1338                 path = ext4_ext_find_extent(inode,
1339                                    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1340                                     path, gb_flags);
1341                 if (IS_ERR(path)) {
1342                         err = PTR_ERR(path);
1343                         goto out;
1344                 }
1345
1346                 /*
1347                  * only first (depth 0 -> 1) produces free space;
1348                  * in all other cases we have to split the grown tree
1349                  */
1350                 depth = ext_depth(inode);
1351                 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1352                         /* now we need to split */
1353                         goto repeat;
1354                 }
1355         }
1356
1357 out:
1358         return err;
1359 }
1360
1361 /*
1362  * search the closest allocated block to the left for *logical
1363  * and returns it at @logical + it's physical address at @phys
1364  * if *logical is the smallest allocated block, the function
1365  * returns 0 at @phys
1366  * return value contains 0 (success) or error code
1367  */
1368 static int ext4_ext_search_left(struct inode *inode,
1369                                 struct ext4_ext_path *path,
1370                                 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1371 {
1372         struct ext4_extent_idx *ix;
1373         struct ext4_extent *ex;
1374         int depth, ee_len;
1375
1376         if (unlikely(path == NULL)) {
1377                 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1378                 return -EIO;
1379         }
1380         depth = path->p_depth;
1381         *phys = 0;
1382
1383         if (depth == 0 && path->p_ext == NULL)
1384                 return 0;
1385
1386         /* usually extent in the path covers blocks smaller
1387          * then *logical, but it can be that extent is the
1388          * first one in the file */
1389
1390         ex = path[depth].p_ext;
1391         ee_len = ext4_ext_get_actual_len(ex);
1392         if (*logical < le32_to_cpu(ex->ee_block)) {
1393                 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1394                         EXT4_ERROR_INODE(inode,
1395                                          "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1396                                          *logical, le32_to_cpu(ex->ee_block));
1397                         return -EIO;
1398                 }
1399                 while (--depth >= 0) {
1400                         ix = path[depth].p_idx;
1401                         if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1402                                 EXT4_ERROR_INODE(inode,
1403                                   "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1404                                   ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1405                                   EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1406                 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
1407                                   depth);
1408                                 return -EIO;
1409                         }
1410                 }
1411                 return 0;
1412         }
1413
1414         if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1415                 EXT4_ERROR_INODE(inode,
1416                                  "logical %d < ee_block %d + ee_len %d!",
1417                                  *logical, le32_to_cpu(ex->ee_block), ee_len);
1418                 return -EIO;
1419         }
1420
1421         *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1422         *phys = ext4_ext_pblock(ex) + ee_len - 1;
1423         return 0;
1424 }
1425
1426 /*
1427  * search the closest allocated block to the right for *logical
1428  * and returns it at @logical + it's physical address at @phys
1429  * if *logical is the largest allocated block, the function
1430  * returns 0 at @phys
1431  * return value contains 0 (success) or error code
1432  */
1433 static int ext4_ext_search_right(struct inode *inode,
1434                                  struct ext4_ext_path *path,
1435                                  ext4_lblk_t *logical, ext4_fsblk_t *phys,
1436                                  struct ext4_extent **ret_ex)
1437 {
1438         struct buffer_head *bh = NULL;
1439         struct ext4_extent_header *eh;
1440         struct ext4_extent_idx *ix;
1441         struct ext4_extent *ex;
1442         ext4_fsblk_t block;
1443         int depth;      /* Note, NOT eh_depth; depth from top of tree */
1444         int ee_len;
1445
1446         if (unlikely(path == NULL)) {
1447                 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1448                 return -EIO;
1449         }
1450         depth = path->p_depth;
1451         *phys = 0;
1452
1453         if (depth == 0 && path->p_ext == NULL)
1454                 return 0;
1455
1456         /* usually extent in the path covers blocks smaller
1457          * then *logical, but it can be that extent is the
1458          * first one in the file */
1459
1460         ex = path[depth].p_ext;
1461         ee_len = ext4_ext_get_actual_len(ex);
1462         if (*logical < le32_to_cpu(ex->ee_block)) {
1463                 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1464                         EXT4_ERROR_INODE(inode,
1465                                          "first_extent(path[%d].p_hdr) != ex",
1466                                          depth);
1467                         return -EIO;
1468                 }
1469                 while (--depth >= 0) {
1470                         ix = path[depth].p_idx;
1471                         if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1472                                 EXT4_ERROR_INODE(inode,
1473                                                  "ix != EXT_FIRST_INDEX *logical %d!",
1474                                                  *logical);
1475                                 return -EIO;
1476                         }
1477                 }
1478                 goto found_extent;
1479         }
1480
1481         if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1482                 EXT4_ERROR_INODE(inode,
1483                                  "logical %d < ee_block %d + ee_len %d!",
1484                                  *logical, le32_to_cpu(ex->ee_block), ee_len);
1485                 return -EIO;
1486         }
1487
1488         if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1489                 /* next allocated block in this leaf */
1490                 ex++;
1491                 goto found_extent;
1492         }
1493
1494         /* go up and search for index to the right */
1495         while (--depth >= 0) {
1496                 ix = path[depth].p_idx;
1497                 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1498                         goto got_index;
1499         }
1500
1501         /* we've gone up to the root and found no index to the right */
1502         return 0;
1503
1504 got_index:
1505         /* we've found index to the right, let's
1506          * follow it and find the closest allocated
1507          * block to the right */
1508         ix++;
1509         block = ext4_idx_pblock(ix);
1510         while (++depth < path->p_depth) {
1511                 /* subtract from p_depth to get proper eh_depth */
1512                 bh = read_extent_tree_block(inode, block,
1513                                             path->p_depth - depth, 0);
1514                 if (IS_ERR(bh))
1515                         return PTR_ERR(bh);
1516                 eh = ext_block_hdr(bh);
1517                 ix = EXT_FIRST_INDEX(eh);
1518                 block = ext4_idx_pblock(ix);
1519                 put_bh(bh);
1520         }
1521
1522         bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0);
1523         if (IS_ERR(bh))
1524                 return PTR_ERR(bh);
1525         eh = ext_block_hdr(bh);
1526         ex = EXT_FIRST_EXTENT(eh);
1527 found_extent:
1528         *logical = le32_to_cpu(ex->ee_block);
1529         *phys = ext4_ext_pblock(ex);
1530         *ret_ex = ex;
1531         if (bh)
1532                 put_bh(bh);
1533         return 0;
1534 }
1535
1536 /*
1537  * ext4_ext_next_allocated_block:
1538  * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1539  * NOTE: it considers block number from index entry as
1540  * allocated block. Thus, index entries have to be consistent
1541  * with leaves.
1542  */
1543 static ext4_lblk_t
1544 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1545 {
1546         int depth;
1547
1548         BUG_ON(path == NULL);
1549         depth = path->p_depth;
1550
1551         if (depth == 0 && path->p_ext == NULL)
1552                 return EXT_MAX_BLOCKS;
1553
1554         while (depth >= 0) {
1555                 if (depth == path->p_depth) {
1556                         /* leaf */
1557                         if (path[depth].p_ext &&
1558                                 path[depth].p_ext !=
1559                                         EXT_LAST_EXTENT(path[depth].p_hdr))
1560                           return le32_to_cpu(path[depth].p_ext[1].ee_block);
1561                 } else {
1562                         /* index */
1563                         if (path[depth].p_idx !=
1564                                         EXT_LAST_INDEX(path[depth].p_hdr))
1565                           return le32_to_cpu(path[depth].p_idx[1].ei_block);
1566                 }
1567                 depth--;
1568         }
1569
1570         return EXT_MAX_BLOCKS;
1571 }
1572
1573 /*
1574  * ext4_ext_next_leaf_block:
1575  * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1576  */
1577 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1578 {
1579         int depth;
1580
1581         BUG_ON(path == NULL);
1582         depth = path->p_depth;
1583
1584         /* zero-tree has no leaf blocks at all */
1585         if (depth == 0)
1586                 return EXT_MAX_BLOCKS;
1587
1588         /* go to index block */
1589         depth--;
1590
1591         while (depth >= 0) {
1592                 if (path[depth].p_idx !=
1593                                 EXT_LAST_INDEX(path[depth].p_hdr))
1594                         return (ext4_lblk_t)
1595                                 le32_to_cpu(path[depth].p_idx[1].ei_block);
1596                 depth--;
1597         }
1598
1599         return EXT_MAX_BLOCKS;
1600 }
1601
1602 /*
1603  * ext4_ext_correct_indexes:
1604  * if leaf gets modified and modified extent is first in the leaf,
1605  * then we have to correct all indexes above.
1606  * TODO: do we need to correct tree in all cases?
1607  */
1608 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1609                                 struct ext4_ext_path *path)
1610 {
1611         struct ext4_extent_header *eh;
1612         int depth = ext_depth(inode);
1613         struct ext4_extent *ex;
1614         __le32 border;
1615         int k, err = 0;
1616
1617         eh = path[depth].p_hdr;
1618         ex = path[depth].p_ext;
1619
1620         if (unlikely(ex == NULL || eh == NULL)) {
1621                 EXT4_ERROR_INODE(inode,
1622                                  "ex %p == NULL or eh %p == NULL", ex, eh);
1623                 return -EIO;
1624         }
1625
1626         if (depth == 0) {
1627                 /* there is no tree at all */
1628                 return 0;
1629         }
1630
1631         if (ex != EXT_FIRST_EXTENT(eh)) {
1632                 /* we correct tree if first leaf got modified only */
1633                 return 0;
1634         }
1635
1636         /*
1637          * TODO: we need correction if border is smaller than current one
1638          */
1639         k = depth - 1;
1640         border = path[depth].p_ext->ee_block;
1641         err = ext4_ext_get_access(handle, inode, path + k);
1642         if (err)
1643                 return err;
1644         path[k].p_idx->ei_block = border;
1645         err = ext4_ext_dirty(handle, inode, path + k);
1646         if (err)
1647                 return err;
1648
1649         while (k--) {
1650                 /* change all left-side indexes */
1651                 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1652                         break;
1653                 err = ext4_ext_get_access(handle, inode, path + k);
1654                 if (err)
1655                         break;
1656                 path[k].p_idx->ei_block = border;
1657                 err = ext4_ext_dirty(handle, inode, path + k);
1658                 if (err)
1659                         break;
1660         }
1661
1662         return err;
1663 }
1664
1665 int
1666 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1667                                 struct ext4_extent *ex2)
1668 {
1669         unsigned short ext1_ee_len, ext2_ee_len, max_len;
1670
1671         /*
1672          * Make sure that both extents are initialized. We don't merge
1673          * uninitialized extents so that we can be sure that end_io code has
1674          * the extent that was written properly split out and conversion to
1675          * initialized is trivial.
1676          */
1677         if (ext4_ext_is_uninitialized(ex1) || ext4_ext_is_uninitialized(ex2))
1678                 return 0;
1679
1680         if (ext4_ext_is_uninitialized(ex1))
1681                 max_len = EXT_UNINIT_MAX_LEN;
1682         else
1683                 max_len = EXT_INIT_MAX_LEN;
1684
1685         ext1_ee_len = ext4_ext_get_actual_len(ex1);
1686         ext2_ee_len = ext4_ext_get_actual_len(ex2);
1687
1688         if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1689                         le32_to_cpu(ex2->ee_block))
1690                 return 0;
1691
1692         /*
1693          * To allow future support for preallocated extents to be added
1694          * as an RO_COMPAT feature, refuse to merge to extents if
1695          * this can result in the top bit of ee_len being set.
1696          */
1697         if (ext1_ee_len + ext2_ee_len > max_len)
1698                 return 0;
1699 #ifdef AGGRESSIVE_TEST
1700         if (ext1_ee_len >= 4)
1701                 return 0;
1702 #endif
1703
1704         if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1705                 return 1;
1706         return 0;
1707 }
1708
1709 /*
1710  * This function tries to merge the "ex" extent to the next extent in the tree.
1711  * It always tries to merge towards right. If you want to merge towards
1712  * left, pass "ex - 1" as argument instead of "ex".
1713  * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1714  * 1 if they got merged.
1715  */
1716 static int ext4_ext_try_to_merge_right(struct inode *inode,
1717                                  struct ext4_ext_path *path,
1718                                  struct ext4_extent *ex)
1719 {
1720         struct ext4_extent_header *eh;
1721         unsigned int depth, len;
1722         int merge_done = 0;
1723         int uninitialized = 0;
1724
1725         depth = ext_depth(inode);
1726         BUG_ON(path[depth].p_hdr == NULL);
1727         eh = path[depth].p_hdr;
1728
1729         while (ex < EXT_LAST_EXTENT(eh)) {
1730                 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1731                         break;
1732                 /* merge with next extent! */
1733                 if (ext4_ext_is_uninitialized(ex))
1734                         uninitialized = 1;
1735                 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1736                                 + ext4_ext_get_actual_len(ex + 1));
1737                 if (uninitialized)
1738                         ext4_ext_mark_uninitialized(ex);
1739
1740                 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1741                         len = (EXT_LAST_EXTENT(eh) - ex - 1)
1742                                 * sizeof(struct ext4_extent);
1743                         memmove(ex + 1, ex + 2, len);
1744                 }
1745                 le16_add_cpu(&eh->eh_entries, -1);
1746                 merge_done = 1;
1747                 WARN_ON(eh->eh_entries == 0);
1748                 if (!eh->eh_entries)
1749                         EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1750         }
1751
1752         return merge_done;
1753 }
1754
1755 /*
1756  * This function does a very simple check to see if we can collapse
1757  * an extent tree with a single extent tree leaf block into the inode.
1758  */
1759 static void ext4_ext_try_to_merge_up(handle_t *handle,
1760                                      struct inode *inode,
1761                                      struct ext4_ext_path *path)
1762 {
1763         size_t s;
1764         unsigned max_root = ext4_ext_space_root(inode, 0);
1765         ext4_fsblk_t blk;
1766
1767         if ((path[0].p_depth != 1) ||
1768             (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
1769             (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
1770                 return;
1771
1772         /*
1773          * We need to modify the block allocation bitmap and the block
1774          * group descriptor to release the extent tree block.  If we
1775          * can't get the journal credits, give up.
1776          */
1777         if (ext4_journal_extend(handle, 2))
1778                 return;
1779
1780         /*
1781          * Copy the extent data up to the inode
1782          */
1783         blk = ext4_idx_pblock(path[0].p_idx);
1784         s = le16_to_cpu(path[1].p_hdr->eh_entries) *
1785                 sizeof(struct ext4_extent_idx);
1786         s += sizeof(struct ext4_extent_header);
1787
1788         memcpy(path[0].p_hdr, path[1].p_hdr, s);
1789         path[0].p_depth = 0;
1790         path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
1791                 (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
1792         path[0].p_hdr->eh_max = cpu_to_le16(max_root);
1793
1794         brelse(path[1].p_bh);
1795         ext4_free_blocks(handle, inode, NULL, blk, 1,
1796                          EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET |
1797                          EXT4_FREE_BLOCKS_RESERVE);
1798 }
1799
1800 /*
1801  * This function tries to merge the @ex extent to neighbours in the tree.
1802  * return 1 if merge left else 0.
1803  */
1804 static void ext4_ext_try_to_merge(handle_t *handle,
1805                                   struct inode *inode,
1806                                   struct ext4_ext_path *path,
1807                                   struct ext4_extent *ex) {
1808         struct ext4_extent_header *eh;
1809         unsigned int depth;
1810         int merge_done = 0;
1811
1812         depth = ext_depth(inode);
1813         BUG_ON(path[depth].p_hdr == NULL);
1814         eh = path[depth].p_hdr;
1815
1816         if (ex > EXT_FIRST_EXTENT(eh))
1817                 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1818
1819         if (!merge_done)
1820                 (void) ext4_ext_try_to_merge_right(inode, path, ex);
1821
1822         ext4_ext_try_to_merge_up(handle, inode, path);
1823 }
1824
1825 /*
1826  * check if a portion of the "newext" extent overlaps with an
1827  * existing extent.
1828  *
1829  * If there is an overlap discovered, it updates the length of the newext
1830  * such that there will be no overlap, and then returns 1.
1831  * If there is no overlap found, it returns 0.
1832  */
1833 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1834                                            struct inode *inode,
1835                                            struct ext4_extent *newext,
1836                                            struct ext4_ext_path *path)
1837 {
1838         ext4_lblk_t b1, b2;
1839         unsigned int depth, len1;
1840         unsigned int ret = 0;
1841
1842         b1 = le32_to_cpu(newext->ee_block);
1843         len1 = ext4_ext_get_actual_len(newext);
1844         depth = ext_depth(inode);
1845         if (!path[depth].p_ext)
1846                 goto out;
1847         b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1848         b2 &= ~(sbi->s_cluster_ratio - 1);
1849
1850         /*
1851          * get the next allocated block if the extent in the path
1852          * is before the requested block(s)
1853          */
1854         if (b2 < b1) {
1855                 b2 = ext4_ext_next_allocated_block(path);
1856                 if (b2 == EXT_MAX_BLOCKS)
1857                         goto out;
1858                 b2 &= ~(sbi->s_cluster_ratio - 1);
1859         }
1860
1861         /* check for wrap through zero on extent logical start block*/
1862         if (b1 + len1 < b1) {
1863                 len1 = EXT_MAX_BLOCKS - b1;
1864                 newext->ee_len = cpu_to_le16(len1);
1865                 ret = 1;
1866         }
1867
1868         /* check for overlap */
1869         if (b1 + len1 > b2) {
1870                 newext->ee_len = cpu_to_le16(b2 - b1);
1871                 ret = 1;
1872         }
1873 out:
1874         return ret;
1875 }
1876
1877 /*
1878  * ext4_ext_insert_extent:
1879  * tries to merge requsted extent into the existing extent or
1880  * inserts requested extent as new one into the tree,
1881  * creating new leaf in the no-space case.
1882  */
1883 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1884                                 struct ext4_ext_path *path,
1885                                 struct ext4_extent *newext, int gb_flags)
1886 {
1887         struct ext4_extent_header *eh;
1888         struct ext4_extent *ex, *fex;
1889         struct ext4_extent *nearex; /* nearest extent */
1890         struct ext4_ext_path *npath = NULL;
1891         int depth, len, err;
1892         ext4_lblk_t next;
1893         unsigned uninitialized = 0;
1894         int mb_flags = 0;
1895
1896         if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1897                 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1898                 return -EIO;
1899         }
1900         depth = ext_depth(inode);
1901         ex = path[depth].p_ext;
1902         eh = path[depth].p_hdr;
1903         if (unlikely(path[depth].p_hdr == NULL)) {
1904                 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1905                 return -EIO;
1906         }
1907
1908         /* try to insert block into found extent and return */
1909         if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
1910
1911                 /*
1912                  * Try to see whether we should rather test the extent on
1913                  * right from ex, or from the left of ex. This is because
1914                  * ext4_ext_find_extent() can return either extent on the
1915                  * left, or on the right from the searched position. This
1916                  * will make merging more effective.
1917                  */
1918                 if (ex < EXT_LAST_EXTENT(eh) &&
1919                     (le32_to_cpu(ex->ee_block) +
1920                     ext4_ext_get_actual_len(ex) <
1921                     le32_to_cpu(newext->ee_block))) {
1922                         ex += 1;
1923                         goto prepend;
1924                 } else if ((ex > EXT_FIRST_EXTENT(eh)) &&
1925                            (le32_to_cpu(newext->ee_block) +
1926                            ext4_ext_get_actual_len(newext) <
1927                            le32_to_cpu(ex->ee_block)))
1928                         ex -= 1;
1929
1930                 /* Try to append newex to the ex */
1931                 if (ext4_can_extents_be_merged(inode, ex, newext)) {
1932                         ext_debug("append [%d]%d block to %u:[%d]%d"
1933                                   "(from %llu)\n",
1934                                   ext4_ext_is_uninitialized(newext),
1935                                   ext4_ext_get_actual_len(newext),
1936                                   le32_to_cpu(ex->ee_block),
1937                                   ext4_ext_is_uninitialized(ex),
1938                                   ext4_ext_get_actual_len(ex),
1939                                   ext4_ext_pblock(ex));
1940                         err = ext4_ext_get_access(handle, inode,
1941                                                   path + depth);
1942                         if (err)
1943                                 return err;
1944
1945                         /*
1946                          * ext4_can_extents_be_merged should have checked
1947                          * that either both extents are uninitialized, or
1948                          * both aren't. Thus we need to check only one of
1949                          * them here.
1950                          */
1951                         if (ext4_ext_is_uninitialized(ex))
1952                                 uninitialized = 1;
1953                         ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1954                                         + ext4_ext_get_actual_len(newext));
1955                         if (uninitialized)
1956                                 ext4_ext_mark_uninitialized(ex);
1957                         eh = path[depth].p_hdr;
1958                         nearex = ex;
1959                         goto merge;
1960                 }
1961
1962 prepend:
1963                 /* Try to prepend newex to the ex */
1964                 if (ext4_can_extents_be_merged(inode, newext, ex)) {
1965                         ext_debug("prepend %u[%d]%d block to %u:[%d]%d"
1966                                   "(from %llu)\n",
1967                                   le32_to_cpu(newext->ee_block),
1968                                   ext4_ext_is_uninitialized(newext),
1969                                   ext4_ext_get_actual_len(newext),
1970                                   le32_to_cpu(ex->ee_block),
1971                                   ext4_ext_is_uninitialized(ex),
1972                                   ext4_ext_get_actual_len(ex),
1973                                   ext4_ext_pblock(ex));
1974                         err = ext4_ext_get_access(handle, inode,
1975                                                   path + depth);
1976                         if (err)
1977                                 return err;
1978
1979                         /*
1980                          * ext4_can_extents_be_merged should have checked
1981                          * that either both extents are uninitialized, or
1982                          * both aren't. Thus we need to check only one of
1983                          * them here.
1984                          */
1985                         if (ext4_ext_is_uninitialized(ex))
1986                                 uninitialized = 1;
1987                         ex->ee_block = newext->ee_block;
1988                         ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
1989                         ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1990                                         + ext4_ext_get_actual_len(newext));
1991                         if (uninitialized)
1992                                 ext4_ext_mark_uninitialized(ex);
1993                         eh = path[depth].p_hdr;
1994                         nearex = ex;
1995                         goto merge;
1996                 }
1997         }
1998
1999         depth = ext_depth(inode);
2000         eh = path[depth].p_hdr;
2001         if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
2002                 goto has_space;
2003
2004         /* probably next leaf has space for us? */
2005         fex = EXT_LAST_EXTENT(eh);
2006         next = EXT_MAX_BLOCKS;
2007         if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
2008                 next = ext4_ext_next_leaf_block(path);
2009         if (next != EXT_MAX_BLOCKS) {
2010                 ext_debug("next leaf block - %u\n", next);
2011                 BUG_ON(npath != NULL);
2012                 npath = ext4_ext_find_extent(inode, next, NULL, 0);
2013                 if (IS_ERR(npath))
2014                         return PTR_ERR(npath);
2015                 BUG_ON(npath->p_depth != path->p_depth);
2016                 eh = npath[depth].p_hdr;
2017                 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
2018                         ext_debug("next leaf isn't full(%d)\n",
2019                                   le16_to_cpu(eh->eh_entries));
2020                         path = npath;
2021                         goto has_space;
2022                 }
2023                 ext_debug("next leaf has no free space(%d,%d)\n",
2024                           le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
2025         }
2026
2027         /*
2028          * There is no free space in the found leaf.
2029          * We're gonna add a new leaf in the tree.
2030          */
2031         if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
2032                 mb_flags = EXT4_MB_USE_RESERVED;
2033         err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
2034                                        path, newext);
2035         if (err)
2036                 goto cleanup;
2037         depth = ext_depth(inode);
2038         eh = path[depth].p_hdr;
2039
2040 has_space:
2041         nearex = path[depth].p_ext;
2042
2043         err = ext4_ext_get_access(handle, inode, path + depth);
2044         if (err)
2045                 goto cleanup;
2046
2047         if (!nearex) {
2048                 /* there is no extent in this leaf, create first one */
2049                 ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
2050                                 le32_to_cpu(newext->ee_block),
2051                                 ext4_ext_pblock(newext),
2052                                 ext4_ext_is_uninitialized(newext),
2053                                 ext4_ext_get_actual_len(newext));
2054                 nearex = EXT_FIRST_EXTENT(eh);
2055         } else {
2056                 if (le32_to_cpu(newext->ee_block)
2057                            > le32_to_cpu(nearex->ee_block)) {
2058                         /* Insert after */
2059                         ext_debug("insert %u:%llu:[%d]%d before: "
2060                                         "nearest %p\n",
2061                                         le32_to_cpu(newext->ee_block),
2062                                         ext4_ext_pblock(newext),
2063                                         ext4_ext_is_uninitialized(newext),
2064                                         ext4_ext_get_actual_len(newext),
2065                                         nearex);
2066                         nearex++;
2067                 } else {
2068                         /* Insert before */
2069                         BUG_ON(newext->ee_block == nearex->ee_block);
2070                         ext_debug("insert %u:%llu:[%d]%d after: "
2071                                         "nearest %p\n",
2072                                         le32_to_cpu(newext->ee_block),
2073                                         ext4_ext_pblock(newext),
2074                                         ext4_ext_is_uninitialized(newext),
2075                                         ext4_ext_get_actual_len(newext),
2076                                         nearex);
2077                 }
2078                 len = EXT_LAST_EXTENT(eh) - nearex + 1;
2079                 if (len > 0) {
2080                         ext_debug("insert %u:%llu:[%d]%d: "
2081                                         "move %d extents from 0x%p to 0x%p\n",
2082                                         le32_to_cpu(newext->ee_block),
2083                                         ext4_ext_pblock(newext),
2084                                         ext4_ext_is_uninitialized(newext),
2085                                         ext4_ext_get_actual_len(newext),
2086                                         len, nearex, nearex + 1);
2087                         memmove(nearex + 1, nearex,
2088                                 len * sizeof(struct ext4_extent));
2089                 }
2090         }
2091
2092         le16_add_cpu(&eh->eh_entries, 1);
2093         path[depth].p_ext = nearex;
2094         nearex->ee_block = newext->ee_block;
2095         ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
2096         nearex->ee_len = newext->ee_len;
2097
2098 merge:
2099         /* try to merge extents */
2100         if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
2101                 ext4_ext_try_to_merge(handle, inode, path, nearex);
2102
2103
2104         /* time to correct all indexes above */
2105         err = ext4_ext_correct_indexes(handle, inode, path);
2106         if (err)
2107                 goto cleanup;
2108
2109         err = ext4_ext_dirty(handle, inode, path + path->p_depth);
2110
2111 cleanup:
2112         if (npath) {
2113                 ext4_ext_drop_refs(npath);
2114                 kfree(npath);
2115         }
2116         return err;
2117 }
2118
2119 static int ext4_fill_fiemap_extents(struct inode *inode,
2120                                     ext4_lblk_t block, ext4_lblk_t num,
2121                                     struct fiemap_extent_info *fieinfo)
2122 {
2123         struct ext4_ext_path *path = NULL;
2124         struct ext4_extent *ex;
2125         struct extent_status es;
2126         ext4_lblk_t next, next_del, start = 0, end = 0;
2127         ext4_lblk_t last = block + num;
2128         int exists, depth = 0, err = 0;
2129         unsigned int flags = 0;
2130         unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
2131
2132         while (block < last && block != EXT_MAX_BLOCKS) {
2133                 num = last - block;
2134                 /* find extent for this block */
2135                 down_read(&EXT4_I(inode)->i_data_sem);
2136
2137                 if (path && ext_depth(inode) != depth) {
2138                         /* depth was changed. we have to realloc path */
2139                         kfree(path);
2140                         path = NULL;
2141                 }
2142
2143                 path = ext4_ext_find_extent(inode, block, path, 0);
2144                 if (IS_ERR(path)) {
2145                         up_read(&EXT4_I(inode)->i_data_sem);
2146                         err = PTR_ERR(path);
2147                         path = NULL;
2148                         break;
2149                 }
2150
2151                 depth = ext_depth(inode);
2152                 if (unlikely(path[depth].p_hdr == NULL)) {
2153                         up_read(&EXT4_I(inode)->i_data_sem);
2154                         EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2155                         err = -EIO;
2156                         break;
2157                 }
2158                 ex = path[depth].p_ext;
2159                 next = ext4_ext_next_allocated_block(path);
2160                 ext4_ext_drop_refs(path);
2161
2162                 flags = 0;
2163                 exists = 0;
2164                 if (!ex) {
2165                         /* there is no extent yet, so try to allocate
2166                          * all requested space */
2167                         start = block;
2168                         end = block + num;
2169                 } else if (le32_to_cpu(ex->ee_block) > block) {
2170                         /* need to allocate space before found extent */
2171                         start = block;
2172                         end = le32_to_cpu(ex->ee_block);
2173                         if (block + num < end)
2174                                 end = block + num;
2175                 } else if (block >= le32_to_cpu(ex->ee_block)
2176                                         + ext4_ext_get_actual_len(ex)) {
2177                         /* need to allocate space after found extent */
2178                         start = block;
2179                         end = block + num;
2180                         if (end >= next)
2181                                 end = next;
2182                 } else if (block >= le32_to_cpu(ex->ee_block)) {
2183                         /*
2184                          * some part of requested space is covered
2185                          * by found extent
2186                          */
2187                         start = block;
2188                         end = le32_to_cpu(ex->ee_block)
2189                                 + ext4_ext_get_actual_len(ex);
2190                         if (block + num < end)
2191                                 end = block + num;
2192                         exists = 1;
2193                 } else {
2194                         BUG();
2195                 }
2196                 BUG_ON(end <= start);
2197
2198                 if (!exists) {
2199                         es.es_lblk = start;
2200                         es.es_len = end - start;
2201                         es.es_pblk = 0;
2202                 } else {
2203                         es.es_lblk = le32_to_cpu(ex->ee_block);
2204                         es.es_len = ext4_ext_get_actual_len(ex);
2205                         es.es_pblk = ext4_ext_pblock(ex);
2206                         if (ext4_ext_is_uninitialized(ex))
2207                                 flags |= FIEMAP_EXTENT_UNWRITTEN;
2208                 }
2209
2210                 /*
2211                  * Find delayed extent and update es accordingly. We call
2212                  * it even in !exists case to find out whether es is the
2213                  * last existing extent or not.
2214                  */
2215                 next_del = ext4_find_delayed_extent(inode, &es);
2216                 if (!exists && next_del) {
2217                         exists = 1;
2218                         flags |= (FIEMAP_EXTENT_DELALLOC |
2219                                   FIEMAP_EXTENT_UNKNOWN);
2220                 }
2221                 up_read(&EXT4_I(inode)->i_data_sem);
2222
2223                 if (unlikely(es.es_len == 0)) {
2224                         EXT4_ERROR_INODE(inode, "es.es_len == 0");
2225                         err = -EIO;
2226                         break;
2227                 }
2228
2229                 /*
2230                  * This is possible iff next == next_del == EXT_MAX_BLOCKS.
2231                  * we need to check next == EXT_MAX_BLOCKS because it is
2232                  * possible that an extent is with unwritten and delayed
2233                  * status due to when an extent is delayed allocated and
2234                  * is allocated by fallocate status tree will track both of
2235                  * them in a extent.
2236                  *
2237                  * So we could return a unwritten and delayed extent, and
2238                  * its block is equal to 'next'.
2239                  */
2240                 if (next == next_del && next == EXT_MAX_BLOCKS) {
2241                         flags |= FIEMAP_EXTENT_LAST;
2242                         if (unlikely(next_del != EXT_MAX_BLOCKS ||
2243                                      next != EXT_MAX_BLOCKS)) {
2244                                 EXT4_ERROR_INODE(inode,
2245                                                  "next extent == %u, next "
2246                                                  "delalloc extent = %u",
2247                                                  next, next_del);
2248                                 err = -EIO;
2249                                 break;
2250                         }
2251                 }
2252
2253                 if (exists) {
2254                         err = fiemap_fill_next_extent(fieinfo,
2255                                 (__u64)es.es_lblk << blksize_bits,
2256                                 (__u64)es.es_pblk << blksize_bits,
2257                                 (__u64)es.es_len << blksize_bits,
2258                                 flags);
2259                         if (err < 0)
2260                                 break;
2261                         if (err == 1) {
2262                                 err = 0;
2263                                 break;
2264                         }
2265                 }
2266
2267                 block = es.es_lblk + es.es_len;
2268         }
2269
2270         if (path) {
2271                 ext4_ext_drop_refs(path);
2272                 kfree(path);
2273         }
2274
2275         return err;
2276 }
2277
2278 /*
2279  * ext4_ext_put_gap_in_cache:
2280  * calculate boundaries of the gap that the requested block fits into
2281  * and cache this gap
2282  */
2283 static void
2284 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
2285                                 ext4_lblk_t block)
2286 {
2287         int depth = ext_depth(inode);
2288         unsigned long len = 0;
2289         ext4_lblk_t lblock = 0;
2290         struct ext4_extent *ex;
2291
2292         ex = path[depth].p_ext;
2293         if (ex == NULL) {
2294                 /*
2295                  * there is no extent yet, so gap is [0;-] and we
2296                  * don't cache it
2297                  */
2298                 ext_debug("cache gap(whole file):");
2299         } else if (block < le32_to_cpu(ex->ee_block)) {
2300                 lblock = block;
2301                 len = le32_to_cpu(ex->ee_block) - block;
2302                 ext_debug("cache gap(before): %u [%u:%u]",
2303                                 block,
2304                                 le32_to_cpu(ex->ee_block),
2305                                  ext4_ext_get_actual_len(ex));
2306                 if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
2307                         ext4_es_insert_extent(inode, lblock, len, ~0,
2308                                               EXTENT_STATUS_HOLE);
2309         } else if (block >= le32_to_cpu(ex->ee_block)
2310                         + ext4_ext_get_actual_len(ex)) {
2311                 ext4_lblk_t next;
2312                 lblock = le32_to_cpu(ex->ee_block)
2313                         + ext4_ext_get_actual_len(ex);
2314
2315                 next = ext4_ext_next_allocated_block(path);
2316                 ext_debug("cache gap(after): [%u:%u] %u",
2317                                 le32_to_cpu(ex->ee_block),
2318                                 ext4_ext_get_actual_len(ex),
2319                                 block);
2320                 BUG_ON(next == lblock);
2321                 len = next - lblock;
2322                 if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
2323                         ext4_es_insert_extent(inode, lblock, len, ~0,
2324                                               EXTENT_STATUS_HOLE);
2325         } else {
2326                 BUG();
2327         }
2328
2329         ext_debug(" -> %u:%lu\n", lblock, len);
2330 }
2331
2332 /*
2333  * ext4_ext_rm_idx:
2334  * removes index from the index block.
2335  */
2336 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2337                         struct ext4_ext_path *path, int depth)
2338 {
2339         int err;
2340         ext4_fsblk_t leaf;
2341
2342         /* free index block */
2343         depth--;
2344         path = path + depth;
2345         leaf = ext4_idx_pblock(path->p_idx);
2346         if (unlikely(path->p_hdr->eh_entries == 0)) {
2347                 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2348                 return -EIO;
2349         }
2350         err = ext4_ext_get_access(handle, inode, path);
2351         if (err)
2352                 return err;
2353
2354         if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
2355                 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
2356                 len *= sizeof(struct ext4_extent_idx);
2357                 memmove(path->p_idx, path->p_idx + 1, len);
2358         }
2359
2360         le16_add_cpu(&path->p_hdr->eh_entries, -1);
2361         err = ext4_ext_dirty(handle, inode, path);
2362         if (err)
2363                 return err;
2364         ext_debug("index is empty, remove it, free block %llu\n", leaf);
2365         trace_ext4_ext_rm_idx(inode, leaf);
2366
2367         ext4_free_blocks(handle, inode, NULL, leaf, 1,
2368                          EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2369
2370         while (--depth >= 0) {
2371                 if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
2372                         break;
2373                 path--;
2374                 err = ext4_ext_get_access(handle, inode, path);
2375                 if (err)
2376                         break;
2377                 path->p_idx->ei_block = (path+1)->p_idx->ei_block;
2378                 err = ext4_ext_dirty(handle, inode, path);
2379                 if (err)
2380                         break;
2381         }
2382         return err;
2383 }
2384
2385 /*
2386  * ext4_ext_calc_credits_for_single_extent:
2387  * This routine returns max. credits that needed to insert an extent
2388  * to the extent tree.
2389  * When pass the actual path, the caller should calculate credits
2390  * under i_data_sem.
2391  */
2392 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2393                                                 struct ext4_ext_path *path)
2394 {
2395         if (path) {
2396                 int depth = ext_depth(inode);
2397                 int ret = 0;
2398
2399                 /* probably there is space in leaf? */
2400                 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2401                                 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2402
2403                         /*
2404                          *  There are some space in the leaf tree, no
2405                          *  need to account for leaf block credit
2406                          *
2407                          *  bitmaps and block group descriptor blocks
2408                          *  and other metadata blocks still need to be
2409                          *  accounted.
2410                          */
2411                         /* 1 bitmap, 1 block group descriptor */
2412                         ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2413                         return ret;
2414                 }
2415         }
2416
2417         return ext4_chunk_trans_blocks(inode, nrblocks);
2418 }
2419
2420 /*
2421  * How many index/leaf blocks need to change/allocate to add @extents extents?
2422  *
2423  * If we add a single extent, then in the worse case, each tree level
2424  * index/leaf need to be changed in case of the tree split.
2425  *
2426  * If more extents are inserted, they could cause the whole tree split more
2427  * than once, but this is really rare.
2428  */
2429 int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
2430 {
2431         int index;
2432         int depth;
2433
2434         /* If we are converting the inline data, only one is needed here. */
2435         if (ext4_has_inline_data(inode))
2436                 return 1;
2437
2438         depth = ext_depth(inode);
2439
2440         if (extents <= 1)
2441                 index = depth * 2;
2442         else
2443                 index = depth * 3;
2444
2445         return index;
2446 }
2447
2448 static inline int get_default_free_blocks_flags(struct inode *inode)
2449 {
2450         if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2451                 return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
2452         else if (ext4_should_journal_data(inode))
2453                 return EXT4_FREE_BLOCKS_FORGET;
2454         return 0;
2455 }
2456
2457 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2458                               struct ext4_extent *ex,
2459                               long long *partial_cluster,
2460                               ext4_lblk_t from, ext4_lblk_t to)
2461 {
2462         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2463         unsigned short ee_len =  ext4_ext_get_actual_len(ex);
2464         ext4_fsblk_t pblk;
2465         int flags = get_default_free_blocks_flags(inode);
2466
2467         /*
2468          * For bigalloc file systems, we never free a partial cluster
2469          * at the beginning of the extent.  Instead, we make a note
2470          * that we tried freeing the cluster, and check to see if we
2471          * need to free it on a subsequent call to ext4_remove_blocks,
2472          * or at the end of the ext4_truncate() operation.
2473          */
2474         flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2475
2476         trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
2477         /*
2478          * If we have a partial cluster, and it's different from the
2479          * cluster of the last block, we need to explicitly free the
2480          * partial cluster here.
2481          */
2482         pblk = ext4_ext_pblock(ex) + ee_len - 1;
2483         if ((*partial_cluster > 0) &&
2484             (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
2485                 ext4_free_blocks(handle, inode, NULL,
2486                                  EXT4_C2B(sbi, *partial_cluster),
2487                                  sbi->s_cluster_ratio, flags);
2488                 *partial_cluster = 0;
2489         }
2490
2491 #ifdef EXTENTS_STATS
2492         {
2493                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2494                 spin_lock(&sbi->s_ext_stats_lock);
2495                 sbi->s_ext_blocks += ee_len;
2496                 sbi->s_ext_extents++;
2497                 if (ee_len < sbi->s_ext_min)
2498                         sbi->s_ext_min = ee_len;
2499                 if (ee_len > sbi->s_ext_max)
2500                         sbi->s_ext_max = ee_len;
2501                 if (ext_depth(inode) > sbi->s_depth_max)
2502                         sbi->s_depth_max = ext_depth(inode);
2503                 spin_unlock(&sbi->s_ext_stats_lock);
2504         }
2505 #endif
2506         if (from >= le32_to_cpu(ex->ee_block)
2507             && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
2508                 /* tail removal */
2509                 ext4_lblk_t num;
2510                 unsigned int unaligned;
2511
2512                 num = le32_to_cpu(ex->ee_block) + ee_len - from;
2513                 pblk = ext4_ext_pblock(ex) + ee_len - num;
2514                 /*
2515                  * Usually we want to free partial cluster at the end of the
2516                  * extent, except for the situation when the cluster is still
2517                  * used by any other extent (partial_cluster is negative).
2518                  */
2519                 if (*partial_cluster < 0 &&
2520                     -(*partial_cluster) == EXT4_B2C(sbi, pblk + num - 1))
2521                         flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
2522
2523                 ext_debug("free last %u blocks starting %llu partial %lld\n",
2524                           num, pblk, *partial_cluster);
2525                 ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
2526                 /*
2527                  * If the block range to be freed didn't start at the
2528                  * beginning of a cluster, and we removed the entire
2529                  * extent and the cluster is not used by any other extent,
2530                  * save the partial cluster here, since we might need to
2531                  * delete if we determine that the truncate operation has
2532                  * removed all of the blocks in the cluster.
2533                  *
2534                  * On the other hand, if we did not manage to free the whole
2535                  * extent, we have to mark the cluster as used (store negative
2536                  * cluster number in partial_cluster).
2537                  */
2538                 unaligned = pblk & (sbi->s_cluster_ratio - 1);
2539                 if (unaligned && (ee_len == num) &&
2540                     (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk))))
2541                         *partial_cluster = EXT4_B2C(sbi, pblk);
2542                 else if (unaligned)
2543                         *partial_cluster = -((long long)EXT4_B2C(sbi, pblk));
2544                 else if (*partial_cluster > 0)
2545                         *partial_cluster = 0;
2546         } else
2547                 ext4_error(sbi->s_sb, "strange request: removal(2) "
2548                            "%u-%u from %u:%u\n",
2549                            from, to, le32_to_cpu(ex->ee_block), ee_len);
2550         return 0;
2551 }
2552
2553
2554 /*
2555  * ext4_ext_rm_leaf() Removes the extents associated with the
2556  * blocks appearing between "start" and "end", and splits the extents
2557  * if "start" and "end" appear in the same extent
2558  *
2559  * @handle: The journal handle
2560  * @inode:  The files inode
2561  * @path:   The path to the leaf
2562  * @partial_cluster: The cluster which we'll have to free if all extents
2563  *                   has been released from it. It gets negative in case
2564  *                   that the cluster is still used.
2565  * @start:  The first block to remove
2566  * @end:   The last block to remove
2567  */
2568 static int
2569 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2570                  struct ext4_ext_path *path,
2571                  long long *partial_cluster,
2572                  ext4_lblk_t start, ext4_lblk_t end)
2573 {
2574         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2575         int err = 0, correct_index = 0;
2576         int depth = ext_depth(inode), credits;
2577         struct ext4_extent_header *eh;
2578         ext4_lblk_t a, b;
2579         unsigned num;
2580         ext4_lblk_t ex_ee_block;
2581         unsigned short ex_ee_len;
2582         unsigned uninitialized = 0;
2583         struct ext4_extent *ex;
2584         ext4_fsblk_t pblk;
2585
2586         /* the header must be checked already in ext4_ext_remove_space() */
2587         ext_debug("truncate since %u in leaf to %u\n", start, end);
2588         if (!path[depth].p_hdr)
2589                 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2590         eh = path[depth].p_hdr;
2591         if (unlikely(path[depth].p_hdr == NULL)) {
2592                 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2593                 return -EIO;
2594         }
2595         /* find where to start removing */
2596         ex = path[depth].p_ext;
2597         if (!ex)
2598                 ex = EXT_LAST_EXTENT(eh);
2599
2600         ex_ee_block = le32_to_cpu(ex->ee_block);
2601         ex_ee_len = ext4_ext_get_actual_len(ex);
2602
2603         trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
2604
2605         while (ex >= EXT_FIRST_EXTENT(eh) &&
2606                         ex_ee_block + ex_ee_len > start) {
2607
2608                 if (ext4_ext_is_uninitialized(ex))
2609                         uninitialized = 1;
2610                 else
2611                         uninitialized = 0;
2612
2613                 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
2614                          uninitialized, ex_ee_len);
2615                 path[depth].p_ext = ex;
2616
2617                 a = ex_ee_block > start ? ex_ee_block : start;
2618                 b = ex_ee_block+ex_ee_len - 1 < end ?
2619                         ex_ee_block+ex_ee_len - 1 : end;
2620
2621                 ext_debug("  border %u:%u\n", a, b);
2622
2623                 /* If this extent is beyond the end of the hole, skip it */
2624                 if (end < ex_ee_block) {
2625                         /*
2626                          * We're going to skip this extent and move to another,
2627                          * so if this extent is not cluster aligned we have
2628                          * to mark the current cluster as used to avoid
2629                          * accidentally freeing it later on
2630                          */
2631                         pblk = ext4_ext_pblock(ex);
2632                         if (pblk & (sbi->s_cluster_ratio - 1))
2633                                 *partial_cluster =
2634                                         -((long long)EXT4_B2C(sbi, pblk));
2635                         ex--;
2636                         ex_ee_block = le32_to_cpu(ex->ee_block);
2637                         ex_ee_len = ext4_ext_get_actual_len(ex);
2638                         continue;
2639                 } else if (b != ex_ee_block + ex_ee_len - 1) {
2640                         EXT4_ERROR_INODE(inode,
2641                                          "can not handle truncate %u:%u "
2642                                          "on extent %u:%u",
2643                                          start, end, ex_ee_block,
2644                                          ex_ee_block + ex_ee_len - 1);
2645                         err = -EIO;
2646                         goto out;
2647                 } else if (a != ex_ee_block) {
2648                         /* remove tail of the extent */
2649                         num = a - ex_ee_block;
2650                 } else {
2651                         /* remove whole extent: excellent! */
2652                         num = 0;
2653                 }
2654                 /*
2655                  * 3 for leaf, sb, and inode plus 2 (bmap and group
2656                  * descriptor) for each block group; assume two block
2657                  * groups plus ex_ee_len/blocks_per_block_group for
2658                  * the worst case
2659                  */
2660                 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2661                 if (ex == EXT_FIRST_EXTENT(eh)) {
2662                         correct_index = 1;
2663                         credits += (ext_depth(inode)) + 1;
2664                 }
2665                 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2666
2667                 err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2668                 if (err)
2669                         goto out;
2670
2671                 err = ext4_ext_get_access(handle, inode, path + depth);
2672                 if (err)
2673                         goto out;
2674
2675                 err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
2676                                          a, b);
2677                 if (err)
2678                         goto out;
2679
2680                 if (num == 0)
2681                         /* this extent is removed; mark slot entirely unused */
2682                         ext4_ext_store_pblock(ex, 0);
2683
2684                 ex->ee_len = cpu_to_le16(num);
2685                 /*
2686                  * Do not mark uninitialized if all the blocks in the
2687                  * extent have been removed.
2688                  */
2689                 if (uninitialized && num)
2690                         ext4_ext_mark_uninitialized(ex);
2691                 /*
2692                  * If the extent was completely released,
2693                  * we need to remove it from the leaf
2694                  */
2695                 if (num == 0) {
2696                         if (end != EXT_MAX_BLOCKS - 1) {
2697                                 /*
2698                                  * For hole punching, we need to scoot all the
2699                                  * extents up when an extent is removed so that
2700                                  * we dont have blank extents in the middle
2701                                  */
2702                                 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2703                                         sizeof(struct ext4_extent));
2704
2705                                 /* Now get rid of the one at the end */
2706                                 memset(EXT_LAST_EXTENT(eh), 0,
2707                                         sizeof(struct ext4_extent));
2708                         }
2709                         le16_add_cpu(&eh->eh_entries, -1);
2710                 } else if (*partial_cluster > 0)
2711                         *partial_cluster = 0;
2712
2713                 err = ext4_ext_dirty(handle, inode, path + depth);
2714                 if (err)
2715                         goto out;
2716
2717                 ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num,
2718                                 ext4_ext_pblock(ex));
2719                 ex--;
2720                 ex_ee_block = le32_to_cpu(ex->ee_block);
2721                 ex_ee_len = ext4_ext_get_actual_len(ex);
2722         }
2723
2724         if (correct_index && eh->eh_entries)
2725                 err = ext4_ext_correct_indexes(handle, inode, path);
2726
2727         /*
2728          * Free the partial cluster only if the current extent does not
2729          * reference it. Otherwise we might free used cluster.
2730          */
2731         if (*partial_cluster > 0 &&
2732             (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
2733              *partial_cluster)) {
2734                 int flags = get_default_free_blocks_flags(inode);
2735
2736                 ext4_free_blocks(handle, inode, NULL,
2737                                  EXT4_C2B(sbi, *partial_cluster),
2738                                  sbi->s_cluster_ratio, flags);
2739                 *partial_cluster = 0;
2740         }
2741
2742         /* if this leaf is free, then we should
2743          * remove it from index block above */
2744         if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2745                 err = ext4_ext_rm_idx(handle, inode, path, depth);
2746
2747 out:
2748         return err;
2749 }
2750
2751 /*
2752  * ext4_ext_more_to_rm:
2753  * returns 1 if current index has to be freed (even partial)
2754  */
2755 static int
2756 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2757 {
2758         BUG_ON(path->p_idx == NULL);
2759
2760         if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2761                 return 0;
2762
2763         /*
2764          * if truncate on deeper level happened, it wasn't partial,
2765          * so we have to consider current index for truncation
2766          */
2767         if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2768                 return 0;
2769         return 1;
2770 }
2771
2772 int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2773                           ext4_lblk_t end)
2774 {
2775         struct super_block *sb = inode->i_sb;
2776         int depth = ext_depth(inode);
2777         struct ext4_ext_path *path = NULL;
2778         long long partial_cluster = 0;
2779         handle_t *handle;
2780         int i = 0, err = 0;
2781
2782         ext_debug("truncate since %u to %u\n", start, end);
2783
2784         /* probably first extent we're gonna free will be last in block */
2785         handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1);
2786         if (IS_ERR(handle))
2787                 return PTR_ERR(handle);
2788
2789 again:
2790         trace_ext4_ext_remove_space(inode, start, end, depth);
2791
2792         /*
2793          * Check if we are removing extents inside the extent tree. If that
2794          * is the case, we are going to punch a hole inside the extent tree
2795          * so we have to check whether we need to split the extent covering
2796          * the last block to remove so we can easily remove the part of it
2797          * in ext4_ext_rm_leaf().
2798          */
2799         if (end < EXT_MAX_BLOCKS - 1) {
2800                 struct ext4_extent *ex;
2801                 ext4_lblk_t ee_block;
2802
2803                 /* find extent for this block */
2804                 path = ext4_ext_find_extent(inode, end, NULL, EXT4_EX_NOCACHE);
2805                 if (IS_ERR(path)) {
2806                         ext4_journal_stop(handle);
2807                         return PTR_ERR(path);
2808                 }
2809                 depth = ext_depth(inode);
2810                 /* Leaf not may not exist only if inode has no blocks at all */
2811                 ex = path[depth].p_ext;
2812                 if (!ex) {
2813                         if (depth) {
2814                                 EXT4_ERROR_INODE(inode,
2815                                                  "path[%d].p_hdr == NULL",
2816                                                  depth);
2817                                 err = -EIO;
2818                         }
2819                         goto out;
2820                 }
2821
2822                 ee_block = le32_to_cpu(ex->ee_block);
2823
2824                 /*
2825                  * See if the last block is inside the extent, if so split
2826                  * the extent at 'end' block so we can easily remove the
2827                  * tail of the first part of the split extent in
2828                  * ext4_ext_rm_leaf().
2829                  */
2830                 if (end >= ee_block &&
2831                     end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
2832                         int split_flag = 0;
2833
2834                         if (ext4_ext_is_uninitialized(ex))
2835                                 split_flag = EXT4_EXT_MARK_UNINIT1 |
2836                                              EXT4_EXT_MARK_UNINIT2;
2837
2838                         /*
2839                          * Split the extent in two so that 'end' is the last
2840                          * block in the first new extent. Also we should not
2841                          * fail removing space due to ENOSPC so try to use
2842                          * reserved block if that happens.
2843                          */
2844                         err = ext4_split_extent_at(handle, inode, path,
2845                                         end + 1, split_flag,
2846                                         EXT4_EX_NOCACHE |
2847                                         EXT4_GET_BLOCKS_PRE_IO |
2848                                         EXT4_GET_BLOCKS_METADATA_NOFAIL);
2849
2850                         if (err < 0)
2851                                 goto out;
2852                 }
2853         }
2854         /*
2855          * We start scanning from right side, freeing all the blocks
2856          * after i_size and walking into the tree depth-wise.
2857          */
2858         depth = ext_depth(inode);
2859         if (path) {
2860                 int k = i = depth;
2861                 while (--k > 0)
2862                         path[k].p_block =
2863                                 le16_to_cpu(path[k].p_hdr->eh_entries)+1;
2864         } else {
2865                 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
2866                                GFP_NOFS);
2867                 if (path == NULL) {
2868                         ext4_journal_stop(handle);
2869                         return -ENOMEM;
2870                 }
2871                 path[0].p_depth = depth;
2872                 path[0].p_hdr = ext_inode_hdr(inode);
2873                 i = 0;
2874
2875                 if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
2876                         err = -EIO;
2877                         goto out;
2878                 }
2879         }
2880         err = 0;
2881
2882         while (i >= 0 && err == 0) {
2883                 if (i == depth) {
2884                         /* this is leaf block */
2885                         err = ext4_ext_rm_leaf(handle, inode, path,
2886                                                &partial_cluster, start,
2887                                                end);
2888                         /* root level has p_bh == NULL, brelse() eats this */
2889                         brelse(path[i].p_bh);
2890                         path[i].p_bh = NULL;
2891                         i--;
2892                         continue;
2893                 }
2894
2895                 /* this is index block */
2896                 if (!path[i].p_hdr) {
2897                         ext_debug("initialize header\n");
2898                         path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2899                 }
2900
2901                 if (!path[i].p_idx) {
2902                         /* this level hasn't been touched yet */
2903                         path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2904                         path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2905                         ext_debug("init index ptr: hdr 0x%p, num %d\n",
2906                                   path[i].p_hdr,
2907                                   le16_to_cpu(path[i].p_hdr->eh_entries));
2908                 } else {
2909                         /* we were already here, see at next index */
2910                         path[i].p_idx--;
2911                 }
2912
2913                 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2914                                 i, EXT_FIRST_INDEX(path[i].p_hdr),
2915                                 path[i].p_idx);
2916                 if (ext4_ext_more_to_rm(path + i)) {
2917                         struct buffer_head *bh;
2918                         /* go to the next level */
2919                         ext_debug("move to level %d (block %llu)\n",
2920                                   i + 1, ext4_idx_pblock(path[i].p_idx));
2921                         memset(path + i + 1, 0, sizeof(*path));
2922                         bh = read_extent_tree_block(inode,
2923                                 ext4_idx_pblock(path[i].p_idx), depth - i - 1,
2924                                 EXT4_EX_NOCACHE);
2925                         if (IS_ERR(bh)) {
2926                                 /* should we reset i_size? */
2927                                 err = PTR_ERR(bh);
2928                                 break;
2929                         }
2930                         /* Yield here to deal with large extent trees.
2931                          * Should be a no-op if we did IO above. */
2932                         cond_resched();
2933                         if (WARN_ON(i + 1 > depth)) {
2934                                 err = -EIO;
2935                                 break;
2936                         }
2937                         path[i + 1].p_bh = bh;
2938
2939                         /* save actual number of indexes since this
2940                          * number is changed at the next iteration */
2941                         path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2942                         i++;
2943                 } else {
2944                         /* we finished processing this index, go up */
2945                         if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2946                                 /* index is empty, remove it;
2947                                  * handle must be already prepared by the
2948                                  * truncatei_leaf() */
2949                                 err = ext4_ext_rm_idx(handle, inode, path, i);
2950                         }
2951                         /* root level has p_bh == NULL, brelse() eats this */
2952                         brelse(path[i].p_bh);
2953                         path[i].p_bh = NULL;
2954                         i--;
2955                         ext_debug("return to level %d\n", i);
2956                 }
2957         }
2958
2959         trace_ext4_ext_remove_space_done(inode, start, end, depth,
2960                         partial_cluster, path->p_hdr->eh_entries);
2961
2962         /* If we still have something in the partial cluster and we have removed
2963          * even the first extent, then we should free the blocks in the partial
2964          * cluster as well. */
2965         if (partial_cluster > 0 && path->p_hdr->eh_entries == 0) {
2966                 int flags = get_default_free_blocks_flags(inode);
2967
2968                 ext4_free_blocks(handle, inode, NULL,
2969                                  EXT4_C2B(EXT4_SB(sb), partial_cluster),
2970                                  EXT4_SB(sb)->s_cluster_ratio, flags);
2971                 partial_cluster = 0;
2972         }
2973
2974         /* TODO: flexible tree reduction should be here */
2975         if (path->p_hdr->eh_entries == 0) {
2976                 /*
2977                  * truncate to zero freed all the tree,
2978                  * so we need to correct eh_depth
2979                  */
2980                 err = ext4_ext_get_access(handle, inode, path);
2981                 if (err == 0) {
2982                         ext_inode_hdr(inode)->eh_depth = 0;
2983                         ext_inode_hdr(inode)->eh_max =
2984                                 cpu_to_le16(ext4_ext_space_root(inode, 0));
2985                         err = ext4_ext_dirty(handle, inode, path);
2986                 }
2987         }
2988 out:
2989         ext4_ext_drop_refs(path);
2990         kfree(path);
2991         if (err == -EAGAIN) {
2992                 path = NULL;
2993                 goto again;
2994         }
2995         ext4_journal_stop(handle);
2996
2997         return err;
2998 }
2999
3000 /*
3001  * called at mount time
3002  */
3003 void ext4_ext_init(struct super_block *sb)
3004 {
3005         /*
3006          * possible initialization would be here
3007          */
3008
3009         if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
3010 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
3011                 printk(KERN_INFO "EXT4-fs: file extents enabled"
3012 #ifdef AGGRESSIVE_TEST
3013                        ", aggressive tests"
3014 #endif
3015 #ifdef CHECK_BINSEARCH
3016                        ", check binsearch"
3017 #endif
3018 #ifdef EXTENTS_STATS
3019                        ", stats"
3020 #endif
3021                        "\n");
3022 #endif
3023 #ifdef EXTENTS_STATS
3024                 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
3025                 EXT4_SB(sb)->s_ext_min = 1 << 30;
3026                 EXT4_SB(sb)->s_ext_max = 0;
3027 #endif
3028         }
3029 }
3030
3031 /*
3032  * called at umount time
3033  */
3034 void ext4_ext_release(struct super_block *sb)
3035 {
3036         if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
3037                 return;
3038
3039 #ifdef EXTENTS_STATS
3040         if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
3041                 struct ext4_sb_info *sbi = EXT4_SB(sb);
3042                 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
3043                         sbi->s_ext_blocks, sbi->s_ext_extents,
3044                         sbi->s_ext_blocks / sbi->s_ext_extents);
3045                 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
3046                         sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
3047         }
3048 #endif
3049 }
3050
3051 static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
3052 {
3053         ext4_lblk_t  ee_block;
3054         ext4_fsblk_t ee_pblock;
3055         unsigned int ee_len;
3056
3057         ee_block  = le32_to_cpu(ex->ee_block);
3058         ee_len    = ext4_ext_get_actual_len(ex);
3059         ee_pblock = ext4_ext_pblock(ex);
3060
3061         if (ee_len == 0)
3062                 return 0;
3063
3064         return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
3065                                      EXTENT_STATUS_WRITTEN);
3066 }
3067
3068 /* FIXME!! we need to try to merge to left or right after zero-out  */
3069 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
3070 {
3071         ext4_fsblk_t ee_pblock;
3072         unsigned int ee_len;
3073         int ret;
3074
3075         ee_len    = ext4_ext_get_actual_len(ex);
3076         ee_pblock = ext4_ext_pblock(ex);
3077
3078         ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
3079         if (ret > 0)
3080                 ret = 0;
3081
3082         return ret;
3083 }
3084
3085 /*
3086  * ext4_split_extent_at() splits an extent at given block.
3087  *
3088  * @handle: the journal handle
3089  * @inode: the file inode
3090  * @path: the path to the extent
3091  * @split: the logical block where the extent is splitted.
3092  * @split_flags: indicates if the extent could be zeroout if split fails, and
3093  *               the states(init or uninit) of new extents.
3094  * @flags: flags used to insert new extent to extent tree.
3095  *
3096  *
3097  * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
3098  * of which are deterimined by split_flag.
3099  *
3100  * There are two cases:
3101  *  a> the extent are splitted into two extent.
3102  *  b> split is not needed, and just mark the extent.
3103  *
3104  * return 0 on success.
3105  */
3106 static int ext4_split_extent_at(handle_t *handle,
3107                              struct inode *inode,
3108                              struct ext4_ext_path *path,
3109                              ext4_lblk_t split,
3110                              int split_flag,
3111                              int flags)
3112 {
3113         ext4_fsblk_t newblock;
3114         ext4_lblk_t ee_block;
3115         struct ext4_extent *ex, newex, orig_ex, zero_ex;
3116         struct ext4_extent *ex2 = NULL;
3117         unsigned int ee_len, depth;
3118         int err = 0;
3119
3120         BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
3121                (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
3122
3123         ext_debug("ext4_split_extents_at: inode %lu, logical"
3124                 "block %llu\n", inode->i_ino, (unsigned long long)split);
3125
3126         ext4_ext_show_leaf(inode, path);
3127
3128         depth = ext_depth(inode);
3129         ex = path[depth].p_ext;
3130         ee_block = le32_to_cpu(ex->ee_block);
3131         ee_len = ext4_ext_get_actual_len(ex);
3132         newblock = split - ee_block + ext4_ext_pblock(ex);
3133
3134         BUG_ON(split < ee_block || split >= (ee_block + ee_len));
3135         BUG_ON(!ext4_ext_is_uninitialized(ex) &&
3136                split_flag & (EXT4_EXT_MAY_ZEROOUT |
3137                              EXT4_EXT_MARK_UNINIT1 |
3138                              EXT4_EXT_MARK_UNINIT2));
3139
3140         err = ext4_ext_get_access(handle, inode, path + depth);
3141         if (err)
3142                 goto out;
3143
3144         if (split == ee_block) {
3145                 /*
3146                  * case b: block @split is the block that the extent begins with
3147                  * then we just change the state of the extent, and splitting
3148                  * is not needed.
3149                  */
3150                 if (split_flag & EXT4_EXT_MARK_UNINIT2)
3151                         ext4_ext_mark_uninitialized(ex);
3152                 else
3153                         ext4_ext_mark_initialized(ex);
3154
3155                 if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
3156                         ext4_ext_try_to_merge(handle, inode, path, ex);
3157
3158                 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3159                 goto out;
3160         }
3161
3162         /* case a */
3163         memcpy(&orig_ex, ex, sizeof(orig_ex));
3164         ex->ee_len = cpu_to_le16(split - ee_block);
3165         if (split_flag & EXT4_EXT_MARK_UNINIT1)
3166                 ext4_ext_mark_uninitialized(ex);
3167
3168         /*
3169          * path may lead to new leaf, not to original leaf any more
3170          * after ext4_ext_insert_extent() returns,
3171          */
3172         err = ext4_ext_dirty(handle, inode, path + depth);
3173         if (err)
3174                 goto fix_extent_len;
3175
3176         ex2 = &newex;
3177         ex2->ee_block = cpu_to_le32(split);
3178         ex2->ee_len   = cpu_to_le16(ee_len - (split - ee_block));
3179         ext4_ext_store_pblock(ex2, newblock);
3180         if (split_flag & EXT4_EXT_MARK_UNINIT2)
3181                 ext4_ext_mark_uninitialized(ex2);
3182
3183         err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3184         if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3185                 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
3186                         if (split_flag & EXT4_EXT_DATA_VALID1) {
3187                                 err = ext4_ext_zeroout(inode, ex2);
3188                                 zero_ex.ee_block = ex2->ee_block;
3189                                 zero_ex.ee_len = cpu_to_le16(
3190                                                 ext4_ext_get_actual_len(ex2));
3191                                 ext4_ext_store_pblock(&zero_ex,
3192                                                       ext4_ext_pblock(ex2));
3193                         } else {
3194                                 err = ext4_ext_zeroout(inode, ex);
3195                                 zero_ex.ee_block = ex->ee_block;
3196                                 zero_ex.ee_len = cpu_to_le16(
3197                                                 ext4_ext_get_actual_len(ex));
3198                                 ext4_ext_store_pblock(&zero_ex,
3199                                                       ext4_ext_pblock(ex));
3200                         }
3201                 } else {
3202                         err = ext4_ext_zeroout(inode, &orig_ex);
3203                         zero_ex.ee_block = orig_ex.ee_block;
3204                         zero_ex.ee_len = cpu_to_le16(
3205                                                 ext4_ext_get_actual_len(&orig_ex));
3206                         ext4_ext_store_pblock(&zero_ex,
3207                                               ext4_ext_pblock(&orig_ex));
3208                 }
3209
3210                 if (err)
3211                         goto fix_extent_len;
3212                 /* update the extent length and mark as initialized */
3213                 ex->ee_len = cpu_to_le16(ee_len);
3214                 ext4_ext_try_to_merge(handle, inode, path, ex);
3215                 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3216                 if (err)
3217                         goto fix_extent_len;
3218
3219                 /* update extent status tree */
3220                 err = ext4_zeroout_es(inode, &zero_ex);
3221
3222                 goto out;
3223         } else if (err)
3224                 goto fix_extent_len;
3225
3226 out:
3227         ext4_ext_show_leaf(inode, path);
3228         return err;
3229
3230 fix_extent_len:
3231         ex->ee_len = orig_ex.ee_len;
3232         ext4_ext_dirty(handle, inode, path + depth);
3233         return err;
3234 }
3235
3236 /*
3237  * ext4_split_extents() splits an extent and mark extent which is covered
3238  * by @map as split_flags indicates
3239  *
3240  * It may result in splitting the extent into multiple extents (up to three)
3241  * There are three possibilities:
3242  *   a> There is no split required
3243  *   b> Splits in two extents: Split is happening at either end of the extent
3244  *   c> Splits in three extents: Somone is splitting in middle of the extent
3245  *
3246  */
3247 static int ext4_split_extent(handle_t *handle,
3248                               struct inode *inode,
3249                               struct ext4_ext_path *path,
3250                               struct ext4_map_blocks *map,
3251                               int split_flag,
3252                               int flags)
3253 {
3254         ext4_lblk_t ee_block;
3255         struct ext4_extent *ex;
3256         unsigned int ee_len, depth;
3257         int err = 0;
3258         int uninitialized;
3259         int split_flag1, flags1;
3260         int allocated = map->m_len;
3261
3262         depth = ext_depth(inode);
3263         ex = path[depth].p_ext;
3264         ee_block = le32_to_cpu(ex->ee_block);
3265         ee_len = ext4_ext_get_actual_len(ex);
3266         uninitialized = ext4_ext_is_uninitialized(ex);
3267
3268         if (map->m_lblk + map->m_len < ee_block + ee_len) {
3269                 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
3270                 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
3271                 if (uninitialized)
3272                         split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
3273                                        EXT4_EXT_MARK_UNINIT2;
3274                 if (split_flag & EXT4_EXT_DATA_VALID2)
3275                         split_flag1 |= EXT4_EXT_DATA_VALID1;
3276                 err = ext4_split_extent_at(handle, inode, path,
3277                                 map->m_lblk + map->m_len, split_flag1, flags1);
3278                 if (err)
3279                         goto out;
3280         } else {
3281                 allocated = ee_len - (map->m_lblk - ee_block);
3282         }
3283         /*
3284          * Update path is required because previous ext4_split_extent_at() may
3285          * result in split of original leaf or extent zeroout.
3286          */
3287         ext4_ext_drop_refs(path);
3288         path = ext4_ext_find_extent(inode, map->m_lblk, path, 0);
3289         if (IS_ERR(path))
3290                 return PTR_ERR(path);
3291         depth = ext_depth(inode);
3292         ex = path[depth].p_ext;
3293         uninitialized = ext4_ext_is_uninitialized(ex);
3294         split_flag1 = 0;
3295
3296         if (map->m_lblk >= ee_block) {
3297                 split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
3298                 if (uninitialized) {
3299                         split_flag1 |= EXT4_EXT_MARK_UNINIT1;
3300                         split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
3301                                                      EXT4_EXT_MARK_UNINIT2);
3302                 }
3303                 err = ext4_split_extent_at(handle, inode, path,
3304                                 map->m_lblk, split_flag1, flags);
3305                 if (err)
3306                         goto out;
3307         }
3308
3309         ext4_ext_show_leaf(inode, path);
3310 out:
3311         return err ? err : allocated;
3312 }
3313
3314 /*
3315  * This function is called by ext4_ext_map_blocks() if someone tries to write
3316  * to an uninitialized extent. It may result in splitting the uninitialized
3317  * extent into multiple extents (up to three - one initialized and two
3318  * uninitialized).
3319  * There are three possibilities:
3320  *   a> There is no split required: Entire extent should be initialized
3321  *   b> Splits in two extents: Write is happening at either end of the extent
3322  *   c> Splits in three extents: Somone is writing in middle of the extent
3323  *
3324  * Pre-conditions:
3325  *  - The extent pointed to by 'path' is uninitialized.
3326  *  - The extent pointed to by 'path' contains a superset
3327  *    of the logical span [map->m_lblk, map->m_lblk + map->m_len).
3328  *
3329  * Post-conditions on success:
3330  *  - the returned value is the number of blocks beyond map->l_lblk
3331  *    that are allocated and initialized.
3332  *    It is guaranteed to be >= map->m_len.
3333  */
3334 static int ext4_ext_convert_to_initialized(handle_t *handle,
3335                                            struct inode *inode,
3336                                            struct ext4_map_blocks *map,
3337                                            struct ext4_ext_path *path,
3338                                            int flags)
3339 {
3340         struct ext4_sb_info *sbi;
3341         struct ext4_extent_header *eh;
3342         struct ext4_map_blocks split_map;
3343         struct ext4_extent zero_ex;
3344         struct ext4_extent *ex, *abut_ex;
3345         ext4_lblk_t ee_block, eof_block;
3346         unsigned int ee_len, depth, map_len = map->m_len;
3347         int allocated = 0, max_zeroout = 0;
3348         int err = 0;
3349         int split_flag = 0;
3350
3351         ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
3352                 "block %llu, max_blocks %u\n", inode->i_ino,
3353                 (unsigned long long)map->m_lblk, map_len);
3354
3355         sbi = EXT4_SB(inode->i_sb);
3356         eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3357                 inode->i_sb->s_blocksize_bits;
3358         if (eof_block < map->m_lblk + map_len)
3359                 eof_block = map->m_lblk + map_len;
3360
3361         depth = ext_depth(inode);
3362         eh = path[depth].p_hdr;
3363         ex = path[depth].p_ext;
3364         ee_block = le32_to_cpu(ex->ee_block);
3365         ee_len = ext4_ext_get_actual_len(ex);
3366         zero_ex.ee_len = 0;
3367
3368         trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3369
3370         /* Pre-conditions */
3371         BUG_ON(!ext4_ext_is_uninitialized(ex));
3372         BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
3373
3374         /*
3375          * Attempt to transfer newly initialized blocks from the currently
3376          * uninitialized extent to its neighbor. This is much cheaper
3377          * than an insertion followed by a merge as those involve costly
3378          * memmove() calls. Transferring to the left is the common case in
3379          * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE)
3380          * followed by append writes.
3381          *
3382          * Limitations of the current logic:
3383          *  - L1: we do not deal with writes covering the whole extent.
3384          *    This would require removing the extent if the transfer
3385          *    is possible.
3386          *  - L2: we only attempt to merge with an extent stored in the
3387          *    same extent tree node.
3388          */
3389         if ((map->m_lblk == ee_block) &&
3390                 /* See if we can merge left */
3391                 (map_len < ee_len) &&           /*L1*/
3392                 (ex > EXT_FIRST_EXTENT(eh))) {  /*L2*/
3393                 ext4_lblk_t prev_lblk;
3394                 ext4_fsblk_t prev_pblk, ee_pblk;
3395                 unsigned int prev_len;
3396
3397                 abut_ex = ex - 1;
3398                 prev_lblk = le32_to_cpu(abut_ex->ee_block);
3399                 prev_len = ext4_ext_get_actual_len(abut_ex);
3400                 prev_pblk = ext4_ext_pblock(abut_ex);
3401                 ee_pblk = ext4_ext_pblock(ex);
3402
3403                 /*
3404                  * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3405                  * upon those conditions:
3406                  * - C1: abut_ex is initialized,
3407                  * - C2: abut_ex is logically abutting ex,
3408                  * - C3: abut_ex is physically abutting ex,
3409                  * - C4: abut_ex can receive the additional blocks without
3410                  *   overflowing the (initialized) length limit.
3411                  */
3412                 if ((!ext4_ext_is_uninitialized(abut_ex)) &&            /*C1*/
3413                         ((prev_lblk + prev_len) == ee_block) &&         /*C2*/
3414                         ((prev_pblk + prev_len) == ee_pblk) &&          /*C3*/
3415                         (prev_len < (EXT_INIT_MAX_LEN - map_len))) {    /*C4*/
3416                         err = ext4_ext_get_access(handle, inode, path + depth);
3417                         if (err)
3418                                 goto out;
3419
3420                         trace_ext4_ext_convert_to_initialized_fastpath(inode,
3421                                 map, ex, abut_ex);
3422
3423                         /* Shift the start of ex by 'map_len' blocks */
3424                         ex->ee_block = cpu_to_le32(ee_block + map_len);
3425                         ext4_ext_store_pblock(ex, ee_pblk + map_len);
3426                         ex->ee_len = cpu_to_le16(ee_len - map_len);
3427                         ext4_ext_mark_uninitialized(ex); /* Restore the flag */
3428
3429                         /* Extend abut_ex by 'map_len' blocks */
3430                         abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
3431
3432                         /* Result: number of initialized blocks past m_lblk */
3433                         allocated = map_len;
3434                 }
3435         } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
3436                    (map_len < ee_len) &&        /*L1*/
3437                    ex < EXT_LAST_EXTENT(eh)) {  /*L2*/
3438                 /* See if we can merge right */
3439                 ext4_lblk_t next_lblk;
3440                 ext4_fsblk_t next_pblk, ee_pblk;
3441                 unsigned int next_len;
3442
3443                 abut_ex = ex + 1;
3444                 next_lblk = le32_to_cpu(abut_ex->ee_block);
3445                 next_len = ext4_ext_get_actual_len(abut_ex);
3446                 next_pblk = ext4_ext_pblock(abut_ex);
3447                 ee_pblk = ext4_ext_pblock(ex);
3448
3449                 /*
3450                  * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3451                  * upon those conditions:
3452                  * - C1: abut_ex is initialized,
3453                  * - C2: abut_ex is logically abutting ex,
3454                  * - C3: abut_ex is physically abutting ex,
3455                  * - C4: abut_ex can receive the additional blocks without
3456                  *   overflowing the (initialized) length limit.
3457                  */
3458                 if ((!ext4_ext_is_uninitialized(abut_ex)) &&            /*C1*/
3459                     ((map->m_lblk + map_len) == next_lblk) &&           /*C2*/
3460                     ((ee_pblk + ee_len) == next_pblk) &&                /*C3*/
3461                     (next_len < (EXT_INIT_MAX_LEN - map_len))) {        /*C4*/
3462                         err = ext4_ext_get_access(handle, inode, path + depth);
3463                         if (err)
3464                                 goto out;
3465
3466                         trace_ext4_ext_convert_to_initialized_fastpath(inode,
3467                                 map, ex, abut_ex);
3468
3469                         /* Shift the start of abut_ex by 'map_len' blocks */
3470                         abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
3471                         ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
3472                         ex->ee_len = cpu_to_le16(ee_len - map_len);
3473                         ext4_ext_mark_uninitialized(ex); /* Restore the flag */
3474
3475                         /* Extend abut_ex by 'map_len' blocks */
3476                         abut_ex->ee_len = cpu_to_le16(next_len + map_len);
3477
3478                         /* Result: number of initialized blocks past m_lblk */
3479                         allocated = map_len;
3480                 }
3481         }
3482         if (allocated) {
3483                 /* Mark the block containing both extents as dirty */
3484                 ext4_ext_dirty(handle, inode, path + depth);
3485
3486                 /* Update path to point to the right extent */
3487                 path[depth].p_ext = abut_ex;
3488                 goto out;
3489         } else
3490                 allocated = ee_len - (map->m_lblk - ee_block);
3491
3492         WARN_ON(map->m_lblk < ee_block);
3493         /*
3494          * It is safe to convert extent to initialized via explicit
3495          * zeroout only if extent is fully insde i_size or new_size.
3496          */
3497         split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3498
3499         if (EXT4_EXT_MAY_ZEROOUT & split_flag)
3500                 max_zeroout = sbi->s_extent_max_zeroout_kb >>
3501                         (inode->i_sb->s_blocksize_bits - 10);
3502
3503         /* If extent is less than s_max_zeroout_kb, zeroout directly */
3504         if (max_zeroout && (ee_len <= max_zeroout)) {
3505                 err = ext4_ext_zeroout(inode, ex);
3506                 if (err)
3507                         goto out;
3508                 zero_ex.ee_block = ex->ee_block;
3509                 zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));
3510                 ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
3511
3512                 err = ext4_ext_get_access(handle, inode, path + depth);
3513                 if (err)
3514                         goto out;
3515                 ext4_ext_mark_initialized(ex);
3516                 ext4_ext_try_to_merge(handle, inode, path, ex);
3517                 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3518                 goto out;
3519         }
3520
3521         /*
3522          * four cases:
3523          * 1. split the extent into three extents.
3524          * 2. split the extent into two extents, zeroout the first half.
3525          * 3. split the extent into two extents, zeroout the second half.
3526          * 4. split the extent into two extents with out zeroout.
3527          */
3528         split_map.m_lblk = map->m_lblk;
3529         split_map.m_len = map->m_len;
3530
3531         if (max_zeroout && (allocated > map->m_len)) {
3532                 if (allocated <= max_zeroout) {
3533                         /* case 3 */
3534                         zero_ex.ee_block =
3535                                          cpu_to_le32(map->m_lblk);
3536                         zero_ex.ee_len = cpu_to_le16(allocated);
3537                         ext4_ext_store_pblock(&zero_ex,
3538                                 ext4_ext_pblock(ex) + map->m_lblk - ee_block);
3539                         err = ext4_ext_zeroout(inode, &zero_ex);
3540                         if (err)
3541                                 goto out;
3542                         split_map.m_lblk = map->m_lblk;
3543                         split_map.m_len = allocated;
3544                 } else if (map->m_lblk - ee_block + map->m_len < max_zeroout) {
3545                         /* case 2 */
3546                         if (map->m_lblk != ee_block) {
3547                                 zero_ex.ee_block = ex->ee_block;
3548                                 zero_ex.ee_len = cpu_to_le16(map->m_lblk -
3549                                                         ee_block);
3550                                 ext4_ext_store_pblock(&zero_ex,
3551                                                       ext4_ext_pblock(ex));
3552                                 err = ext4_ext_zeroout(inode, &zero_ex);
3553                                 if (err)
3554                                         goto out;
3555                         }
3556
3557                         split_map.m_lblk = ee_block;
3558                         split_map.m_len = map->m_lblk - ee_block + map->m_len;
3559                         allocated = map->m_len;
3560                 }
3561         }
3562
3563         allocated = ext4_split_extent(handle, inode, path,
3564                                       &split_map, split_flag, flags);
3565         if (allocated < 0)
3566                 err = allocated;
3567
3568 out:
3569         /* If we have gotten a failure, don't zero out status tree */
3570         if (!err)
3571                 err = ext4_zeroout_es(inode, &zero_ex);
3572         return err ? err : allocated;
3573 }
3574
3575 /*
3576  * This function is called by ext4_ext_map_blocks() from
3577  * ext4_get_blocks_dio_write() when DIO to write
3578  * to an uninitialized extent.
3579  *
3580  * Writing to an uninitialized extent may result in splitting the uninitialized
3581  * extent into multiple initialized/uninitialized extents (up to three)
3582  * There are three possibilities:
3583  *   a> There is no split required: Entire extent should be uninitialized
3584  *   b> Splits in two extents: Write is happening at either end of the extent
3585  *   c> Splits in three extents: Somone is writing in middle of the extent
3586  *
3587  * One of more index blocks maybe needed if the extent tree grow after
3588  * the uninitialized extent split. To prevent ENOSPC occur at the IO
3589  * complete, we need to split the uninitialized extent before DIO submit
3590  * the IO. The uninitialized extent called at this time will be split
3591  * into three uninitialized extent(at most). After IO complete, the part
3592  * being filled will be convert to initialized by the end_io callback function
3593  * via ext4_convert_unwritten_extents().
3594  *
3595  * Returns the size of uninitialized extent to be written on success.
3596  */
3597 static int ext4_split_unwritten_extents(handle_t *handle,
3598                                         struct inode *inode,
3599                                         struct ext4_map_blocks *map,
3600                                         struct ext4_ext_path *path,
3601                                         int flags)
3602 {
3603         ext4_lblk_t eof_block;
3604         ext4_lblk_t ee_block;
3605         struct ext4_extent *ex;
3606         unsigned int ee_len;
3607         int split_flag = 0, depth;
3608
3609         ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
3610                 "block %llu, max_blocks %u\n", inode->i_ino,
3611                 (unsigned long long)map->m_lblk, map->m_len);
3612
3613         eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3614                 inode->i_sb->s_blocksize_bits;
3615         if (eof_block < map->m_lblk + map->m_len)
3616                 eof_block = map->m_lblk + map->m_len;
3617         /*
3618          * It is safe to convert extent to initialized via explicit
3619          * zeroout only if extent is fully insde i_size or new_size.
3620          */
3621         depth = ext_depth(inode);
3622         ex = path[depth].p_ext;
3623         ee_block = le32_to_cpu(ex->ee_block);
3624         ee_len = ext4_ext_get_actual_len(ex);
3625
3626         split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3627         split_flag |= EXT4_EXT_MARK_UNINIT2;
3628         if (flags & EXT4_GET_BLOCKS_CONVERT)
3629                 split_flag |= EXT4_EXT_DATA_VALID2;
3630         flags |= EXT4_GET_BLOCKS_PRE_IO;
3631         return ext4_split_extent(handle, inode, path, map, split_flag, flags);
3632 }
3633
3634 static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3635                                                 struct inode *inode,
3636                                                 struct ext4_map_blocks *map,
3637                                                 struct ext4_ext_path *path)
3638 {
3639         struct ext4_extent *ex;
3640         ext4_lblk_t ee_block;
3641         unsigned int ee_len;
3642         int depth;
3643         int err = 0;
3644
3645         depth = ext_depth(inode);
3646         ex = path[depth].p_ext;
3647         ee_block = le32_to_cpu(ex->ee_block);
3648         ee_len = ext4_ext_get_actual_len(ex);
3649
3650         ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
3651                 "block %llu, max_blocks %u\n", inode->i_ino,
3652                   (unsigned long long)ee_block, ee_len);
3653
3654         /* If extent is larger than requested it is a clear sign that we still
3655          * have some extent state machine issues left. So extent_split is still
3656          * required.
3657          * TODO: Once all related issues will be fixed this situation should be
3658          * illegal.
3659          */
3660         if (ee_block != map->m_lblk || ee_len > map->m_len) {
3661 #ifdef EXT4_DEBUG
3662                 ext4_warning("Inode (%ld) finished: extent logical block %llu,"
3663                              " len %u; IO logical block %llu, len %u\n",
3664                              inode->i_ino, (unsigned long long)ee_block, ee_len,
3665                              (unsigned long long)map->m_lblk, map->m_len);
3666 #endif
3667                 err = ext4_split_unwritten_extents(handle, inode, map, path,
3668                                                    EXT4_GET_BLOCKS_CONVERT);
3669                 if (err < 0)
3670                         goto out;
3671                 ext4_ext_drop_refs(path);
3672                 path = ext4_ext_find_extent(inode, map->m_lblk, path, 0);
3673                 if (IS_ERR(path)) {
3674                         err = PTR_ERR(path);
3675                         goto out;
3676                 }
3677                 depth = ext_depth(inode);
3678                 ex = path[depth].p_ext;
3679         }
3680
3681         err = ext4_ext_get_access(handle, inode, path + depth);
3682         if (err)
3683                 goto out;
3684         /* first mark the extent as initialized */
3685         ext4_ext_mark_initialized(ex);
3686
3687         /* note: ext4_ext_correct_indexes() isn't needed here because
3688          * borders are not changed
3689          */
3690         ext4_ext_try_to_merge(handle, inode, path, ex);
3691
3692         /* Mark modified extent as dirty */
3693         err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3694 out:
3695         ext4_ext_show_leaf(inode, path);
3696         return err;
3697 }
3698
3699 static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3700                         sector_t block, int count)
3701 {
3702         int i;
3703         for (i = 0; i < count; i++)
3704                 unmap_underlying_metadata(bdev, block + i);
3705 }
3706
3707 /*
3708  * Handle EOFBLOCKS_FL flag, clearing it if necessary
3709  */
3710 static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
3711                               ext4_lblk_t lblk,
3712                               struct ext4_ext_path *path,
3713                               unsigned int len)
3714 {
3715         int i, depth;
3716         struct ext4_extent_header *eh;
3717         struct ext4_extent *last_ex;
3718
3719         if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
3720                 return 0;
3721
3722         depth = ext_depth(inode);
3723         eh = path[depth].p_hdr;
3724
3725         /*
3726          * We're going to remove EOFBLOCKS_FL entirely in future so we
3727          * do not care for this case anymore. Simply remove the flag
3728          * if there are no extents.
3729          */
3730         if (unlikely(!eh->eh_entries))
3731                 goto out;
3732         last_ex = EXT_LAST_EXTENT(eh);
3733         /*
3734          * We should clear the EOFBLOCKS_FL flag if we are writing the
3735          * last block in the last extent in the file.  We test this by
3736          * first checking to see if the caller to
3737          * ext4_ext_get_blocks() was interested in the last block (or
3738          * a block beyond the last block) in the current extent.  If
3739          * this turns out to be false, we can bail out from this
3740          * function immediately.
3741          */
3742         if (lblk + len < le32_to_cpu(last_ex->ee_block) +
3743             ext4_ext_get_actual_len(last_ex))
3744                 return 0;
3745         /*
3746          * If the caller does appear to be planning to write at or
3747          * beyond the end of the current extent, we then test to see
3748          * if the current extent is the last extent in the file, by
3749          * checking to make sure it was reached via the rightmost node
3750          * at each level of the tree.
3751          */
3752         for (i = depth-1; i >= 0; i--)
3753                 if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
3754                         return 0;
3755 out:
3756         ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3757         return ext4_mark_inode_dirty(handle, inode);
3758 }
3759
3760 /**
3761  * ext4_find_delalloc_range: find delayed allocated block in the given range.
3762  *
3763  * Return 1 if there is a delalloc block in the range, otherwise 0.
3764  */
3765 int ext4_find_delalloc_range(struct inode *inode,
3766                              ext4_lblk_t lblk_start,
3767                              ext4_lblk_t lblk_end)
3768 {
3769         struct extent_status es;
3770
3771         ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es);
3772         if (es.es_len == 0)
3773                 return 0; /* there is no delay extent in this tree */
3774         else if (es.es_lblk <= lblk_start &&
3775                  lblk_start < es.es_lblk + es.es_len)
3776                 return 1;
3777         else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end)
3778                 return 1;
3779         else
3780                 return 0;
3781 }
3782
3783 int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
3784 {
3785         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3786         ext4_lblk_t lblk_start, lblk_end;
3787         lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
3788         lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
3789
3790         return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
3791 }
3792
3793 /**
3794  * Determines how many complete clusters (out of those specified by the 'map')
3795  * are under delalloc and were reserved quota for.
3796  * This function is called when we are writing out the blocks that were
3797  * originally written with their allocation delayed, but then the space was
3798  * allocated using fallocate() before the delayed allocation could be resolved.
3799  * The cases to look for are:
3800  * ('=' indicated delayed allocated blocks
3801  *  '-' indicates non-delayed allocated blocks)
3802  * (a) partial clusters towards beginning and/or end outside of allocated range
3803  *     are not delalloc'ed.
3804  *      Ex:
3805  *      |----c---=|====c====|====c====|===-c----|
3806  *               |++++++ allocated ++++++|
3807  *      ==> 4 complete clusters in above example
3808  *
3809  * (b) partial cluster (outside of allocated range) towards either end is
3810  *     marked for delayed allocation. In this case, we will exclude that
3811  *     cluster.
3812  *      Ex:
3813  *      |----====c========|========c========|
3814  *           |++++++ allocated ++++++|
3815  *      ==> 1 complete clusters in above example
3816  *
3817  *      Ex:
3818  *      |================c================|
3819  *            |++++++ allocated ++++++|
3820  *      ==> 0 complete clusters in above example
3821  *
3822  * The ext4_da_update_reserve_space will be called only if we
3823  * determine here that there were some "entire" clusters that span
3824  * this 'allocated' range.
3825  * In the non-bigalloc case, this function will just end up returning num_blks
3826  * without ever calling ext4_find_delalloc_range.
3827  */
3828 static unsigned int
3829 get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
3830                            unsigned int num_blks)
3831 {
3832         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3833         ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
3834         ext4_lblk_t lblk_from, lblk_to, c_offset;
3835         unsigned int allocated_clusters = 0;
3836
3837         alloc_cluster_start = EXT4_B2C(sbi, lblk_start);
3838         alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1);
3839
3840         /* max possible clusters for this allocation */
3841         allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
3842
3843         trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
3844
3845         /* Check towards left side */
3846         c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
3847         if (c_offset) {
3848                 lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
3849                 lblk_to = lblk_from + c_offset - 1;
3850
3851                 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
3852                         allocated_clusters--;
3853         }
3854
3855         /* Now check towards right. */
3856         c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1);
3857         if (allocated_clusters && c_offset) {
3858                 lblk_from = lblk_start + num_blks;
3859                 lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
3860
3861                 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
3862                         allocated_clusters--;
3863         }
3864
3865         return allocated_clusters;
3866 }
3867
3868 static int
3869 ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3870                         struct ext4_map_blocks *map,
3871                         struct ext4_ext_path *path, int flags,
3872                         unsigned int allocated, ext4_fsblk_t newblock)
3873 {
3874         int ret = 0;
3875         int err = 0;
3876         ext4_io_end_t *io = ext4_inode_aio(inode);
3877
3878         ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical "
3879                   "block %llu, max_blocks %u, flags %x, allocated %u\n",
3880                   inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
3881                   flags, allocated);
3882         ext4_ext_show_leaf(inode, path);
3883
3884         /*
3885          * When writing into uninitialized space, we should not fail to
3886          * allocate metadata blocks for the new extent block if needed.
3887          */
3888         flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
3889
3890         trace_ext4_ext_handle_uninitialized_extents(inode, map, flags,
3891                                                     allocated, newblock);
3892
3893         /* get_block() before submit the IO, split the extent */
3894         if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3895                 ret = ext4_split_unwritten_extents(handle, inode, map,
3896                                                    path, flags);
3897                 if (ret <= 0)
3898                         goto out;
3899                 /*
3900                  * Flag the inode(non aio case) or end_io struct (aio case)
3901                  * that this IO needs to conversion to written when IO is
3902                  * completed
3903                  */
3904                 if (io)
3905                         ext4_set_io_unwritten_flag(inode, io);
3906                 else
3907                         ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3908                 map->m_flags |= EXT4_MAP_UNWRITTEN;
3909                 if (ext4_should_dioread_nolock(inode))
3910                         map->m_flags |= EXT4_MAP_UNINIT;
3911                 goto out;
3912         }
3913         /* IO end_io complete, convert the filled extent to written */
3914         if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
3915                 ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
3916                                                         path);
3917                 if (ret >= 0) {
3918                         ext4_update_inode_fsync_trans(handle, inode, 1);
3919                         err = check_eofblocks_fl(handle, inode, map->m_lblk,
3920                                                  path, map->m_len);
3921                 } else
3922                         err = ret;
3923                 map->m_flags |= EXT4_MAP_MAPPED;
3924                 if (allocated > map->m_len)
3925                         allocated = map->m_len;
3926                 map->m_len = allocated;
3927                 goto out2;
3928         }
3929         /* buffered IO case */
3930         /*
3931          * repeat fallocate creation request
3932          * we already have an unwritten extent
3933          */
3934         if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) {
3935                 map->m_flags |= EXT4_MAP_UNWRITTEN;
3936                 goto map_out;
3937         }
3938
3939         /* buffered READ or buffered write_begin() lookup */
3940         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3941                 /*
3942                  * We have blocks reserved already.  We
3943                  * return allocated blocks so that delalloc
3944                  * won't do block reservation for us.  But
3945                  * the buffer head will be unmapped so that
3946                  * a read from the block returns 0s.
3947                  */
3948                 map->m_flags |= EXT4_MAP_UNWRITTEN;
3949                 goto out1;
3950         }
3951
3952         /* buffered write, writepage time, convert*/
3953         ret = ext4_ext_convert_to_initialized(handle, inode, map, path, flags);
3954         if (ret >= 0)
3955                 ext4_update_inode_fsync_trans(handle, inode, 1);
3956 out:
3957         if (ret <= 0) {
3958                 err = ret;
3959                 goto out2;
3960         } else
3961                 allocated = ret;
3962         map->m_flags |= EXT4_MAP_NEW;
3963         /*
3964          * if we allocated more blocks than requested
3965          * we need to make sure we unmap the extra block
3966          * allocated. The actual needed block will get
3967          * unmapped later when we find the buffer_head marked
3968          * new.
3969          */
3970         if (allocated > map->m_len) {
3971                 unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
3972                                         newblock + map->m_len,
3973                                         allocated - map->m_len);
3974                 allocated = map->m_len;
3975         }
3976         map->m_len = allocated;
3977
3978         /*
3979          * If we have done fallocate with the offset that is already
3980          * delayed allocated, we would have block reservation
3981          * and quota reservation done in the delayed write path.
3982          * But fallocate would have already updated quota and block
3983          * count for this offset. So cancel these reservation
3984          */
3985         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
3986                 unsigned int reserved_clusters;
3987                 reserved_clusters = get_reserved_cluster_alloc(inode,
3988                                 map->m_lblk, map->m_len);
3989                 if (reserved_clusters)
3990                         ext4_da_update_reserve_space(inode,
3991                                                      reserved_clusters,
3992                                                      0);
3993         }
3994
3995 map_out:
3996         map->m_flags |= EXT4_MAP_MAPPED;
3997         if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
3998                 err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
3999                                          map->m_len);
4000                 if (err < 0)
4001                         goto out2;
4002         }
4003 out1:
4004         if (allocated > map->m_len)
4005                 allocated = map->m_len;
4006         ext4_ext_show_leaf(inode, path);
4007         map->m_pblk = newblock;
4008         map->m_len = allocated;
4009 out2:
4010         if (path) {
4011                 ext4_ext_drop_refs(path);
4012                 kfree(path);
4013         }
4014         return err ? err : allocated;
4015 }
4016
4017 /*
4018  * get_implied_cluster_alloc - check to see if the requested
4019  * allocation (in the map structure) overlaps with a cluster already
4020  * allocated in an extent.
4021  *      @sb     The filesystem superblock structure
4022  *      @map    The requested lblk->pblk mapping
4023  *      @ex     The extent structure which might contain an implied
4024  *                      cluster allocation
4025  *
4026  * This function is called by ext4_ext_map_blocks() after we failed to
4027  * find blocks that were already in the inode's extent tree.  Hence,
4028  * we know that the beginning of the requested region cannot overlap
4029  * the extent from the inode's extent tree.  There are three cases we
4030  * want to catch.  The first is this case:
4031  *
4032  *               |--- cluster # N--|
4033  *    |--- extent ---|  |---- requested region ---|
4034  *                      |==========|
4035  *
4036  * The second case that we need to test for is this one:
4037  *
4038  *   |--------- cluster # N ----------------|
4039  *         |--- requested region --|   |------- extent ----|
4040  *         |=======================|
4041  *
4042  * The third case is when the requested region lies between two extents
4043  * within the same cluster:
4044  *          |------------- cluster # N-------------|
4045  * |----- ex -----|                  |---- ex_right ----|
4046  *                  |------ requested region ------|
4047  *                  |================|
4048  *
4049  * In each of the above cases, we need to set the map->m_pblk and
4050  * map->m_len so it corresponds to the return the extent labelled as
4051  * "|====|" from cluster #N, since it is already in use for data in
4052  * cluster EXT4_B2C(sbi, map->m_lblk).  We will then return 1 to
4053  * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
4054  * as a new "allocated" block region.  Otherwise, we will return 0 and
4055  * ext4_ext_map_blocks() will then allocate one or more new clusters
4056  * by calling ext4_mb_new_blocks().
4057  */
4058 static int get_implied_cluster_alloc(struct super_block *sb,
4059                                      struct ext4_map_blocks *map,
4060                                      struct ext4_extent *ex,
4061                                      struct ext4_ext_path *path)
4062 {
4063         struct ext4_sb_info *sbi = EXT4_SB(sb);
4064         ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
4065         ext4_lblk_t ex_cluster_start, ex_cluster_end;
4066         ext4_lblk_t rr_cluster_start;
4067         ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4068         ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4069         unsigned short ee_len = ext4_ext_get_actual_len(ex);
4070
4071         /* The extent passed in that we are trying to match */
4072         ex_cluster_start = EXT4_B2C(sbi, ee_block);
4073         ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
4074
4075         /* The requested region passed into ext4_map_blocks() */
4076         rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
4077
4078         if ((rr_cluster_start == ex_cluster_end) ||
4079             (rr_cluster_start == ex_cluster_start)) {
4080                 if (rr_cluster_start == ex_cluster_end)
4081                         ee_start += ee_len - 1;
4082                 map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) +
4083                         c_offset;
4084                 map->m_len = min(map->m_len,
4085                                  (unsigned) sbi->s_cluster_ratio - c_offset);
4086                 /*
4087                  * Check for and handle this case:
4088                  *
4089                  *   |--------- cluster # N-------------|
4090                  *                     |------- extent ----|
4091                  *         |--- requested region ---|
4092                  *         |===========|
4093                  */
4094
4095                 if (map->m_lblk < ee_block)
4096                         map->m_len = min(map->m_len, ee_block - map->m_lblk);
4097
4098                 /*
4099                  * Check for the case where there is already another allocated
4100                  * block to the right of 'ex' but before the end of the cluster.
4101                  *
4102                  *          |------------- cluster # N-------------|
4103                  * |----- ex -----|                  |---- ex_right ----|
4104                  *                  |------ requested region ------|
4105                  *                  |================|
4106                  */
4107                 if (map->m_lblk > ee_block) {
4108                         ext4_lblk_t next = ext4_ext_next_allocated_block(path);
4109                         map->m_len = min(map->m_len, next - map->m_lblk);
4110                 }
4111
4112                 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
4113                 return 1;
4114         }
4115
4116         trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
4117         return 0;
4118 }
4119
4120
4121 /*
4122  * Block allocation/map/preallocation routine for extents based files
4123  *
4124  *
4125  * Need to be called with
4126  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
4127  * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
4128  *
4129  * return > 0, number of of blocks already mapped/allocated
4130  *          if create == 0 and these are pre-allocated blocks
4131  *              buffer head is unmapped
4132  *          otherwise blocks are mapped
4133  *
4134  * return = 0, if plain look up failed (blocks have not been allocated)
4135  *          buffer head is unmapped
4136  *
4137  * return < 0, error case.
4138  */
4139 int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4140                         struct ext4_map_blocks *map, int flags)
4141 {
4142         struct ext4_ext_path *path = NULL;
4143         struct ext4_extent newex, *ex, *ex2;
4144         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4145         ext4_fsblk_t newblock = 0;
4146         int free_on_err = 0, err = 0, depth;
4147         unsigned int allocated = 0, offset = 0;
4148         unsigned int allocated_clusters = 0;
4149         struct ext4_allocation_request ar;
4150         ext4_io_end_t *io = ext4_inode_aio(inode);
4151         ext4_lblk_t cluster_offset;
4152         int set_unwritten = 0;
4153
4154         ext_debug("blocks %u/%u requested for inode %lu\n",
4155                   map->m_lblk, map->m_len, inode->i_ino);
4156         trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
4157
4158         /* find extent for this block */
4159         path = ext4_ext_find_extent(inode, map->m_lblk, NULL, 0);
4160         if (IS_ERR(path)) {
4161                 err = PTR_ERR(path);
4162                 path = NULL;
4163                 goto out2;
4164         }
4165
4166         depth = ext_depth(inode);
4167
4168         /*
4169          * consistent leaf must not be empty;
4170          * this situation is possible, though, _during_ tree modification;
4171          * this is why assert can't be put in ext4_ext_find_extent()
4172          */
4173         if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
4174                 EXT4_ERROR_INODE(inode, "bad extent address "
4175                                  "lblock: %lu, depth: %d pblock %lld",
4176                                  (unsigned long) map->m_lblk, depth,
4177                                  path[depth].p_block);
4178                 err = -EIO;
4179                 goto out2;
4180         }
4181
4182         ex = path[depth].p_ext;
4183         if (ex) {
4184                 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4185                 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4186                 unsigned short ee_len;
4187
4188                 /*
4189                  * Uninitialized extents are treated as holes, except that
4190                  * we split out initialized portions during a write.
4191                  */
4192                 ee_len = ext4_ext_get_actual_len(ex);
4193
4194                 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
4195
4196                 /* if found extent covers block, simply return it */
4197                 if (in_range(map->m_lblk, ee_block, ee_len)) {
4198                         newblock = map->m_lblk - ee_block + ee_start;
4199                         /* number of remaining blocks in the extent */
4200                         allocated = ee_len - (map->m_lblk - ee_block);
4201                         ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
4202                                   ee_block, ee_len, newblock);
4203
4204                         if (!ext4_ext_is_uninitialized(ex))
4205                                 goto out;
4206
4207                         allocated = ext4_ext_handle_uninitialized_extents(
4208                                 handle, inode, map, path, flags,
4209                                 allocated, newblock);
4210                         goto out3;
4211                 }
4212         }
4213
4214         if ((sbi->s_cluster_ratio > 1) &&
4215             ext4_find_delalloc_cluster(inode, map->m_lblk))
4216                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4217
4218         /*
4219          * requested block isn't allocated yet;
4220          * we couldn't try to create block if create flag is zero
4221          */
4222         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
4223                 /*
4224                  * put just found gap into cache to speed up
4225                  * subsequent requests
4226                  */
4227                 if ((flags & EXT4_GET_BLOCKS_NO_PUT_HOLE) == 0)
4228                         ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
4229                 goto out2;
4230         }
4231
4232         /*
4233          * Okay, we need to do block allocation.
4234          */
4235         map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
4236         newex.ee_block = cpu_to_le32(map->m_lblk);
4237         cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
4238
4239         /*
4240          * If we are doing bigalloc, check to see if the extent returned
4241          * by ext4_ext_find_extent() implies a cluster we can use.
4242          */
4243         if (cluster_offset && ex &&
4244             get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
4245                 ar.len = allocated = map->m_len;
4246                 newblock = map->m_pblk;
4247                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4248                 goto got_allocated_blocks;
4249         }
4250
4251         /* find neighbour allocated blocks */
4252         ar.lleft = map->m_lblk;
4253         err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
4254         if (err)
4255                 goto out2;
4256         ar.lright = map->m_lblk;
4257         ex2 = NULL;
4258         err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4259         if (err)
4260                 goto out2;
4261
4262         /* Check if the extent after searching to the right implies a
4263          * cluster we can use. */
4264         if ((sbi->s_cluster_ratio > 1) && ex2 &&
4265             get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
4266                 ar.len = allocated = map->m_len;
4267                 newblock = map->m_pblk;
4268                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4269                 goto got_allocated_blocks;
4270         }
4271
4272         /*
4273          * See if request is beyond maximum number of blocks we can have in
4274          * a single extent. For an initialized extent this limit is
4275          * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
4276          * EXT_UNINIT_MAX_LEN.
4277          */
4278         if (map->m_len > EXT_INIT_MAX_LEN &&
4279             !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4280                 map->m_len = EXT_INIT_MAX_LEN;
4281         else if (map->m_len > EXT_UNINIT_MAX_LEN &&
4282                  (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4283                 map->m_len = EXT_UNINIT_MAX_LEN;
4284
4285         /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
4286         newex.ee_len = cpu_to_le16(map->m_len);
4287         err = ext4_ext_check_overlap(sbi, inode, &newex, path);
4288         if (err)
4289                 allocated = ext4_ext_get_actual_len(&newex);
4290         else
4291                 allocated = map->m_len;
4292
4293         /* allocate new block */
4294         ar.inode = inode;
4295         ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4296         ar.logical = map->m_lblk;
4297         /*
4298          * We calculate the offset from the beginning of the cluster
4299          * for the logical block number, since when we allocate a
4300          * physical cluster, the physical block should start at the
4301          * same offset from the beginning of the cluster.  This is
4302          * needed so that future calls to get_implied_cluster_alloc()
4303          * work correctly.
4304          */
4305         offset = map->m_lblk & (sbi->s_cluster_ratio - 1);
4306         ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
4307         ar.goal -= offset;
4308         ar.logical -= offset;
4309         if (S_ISREG(inode->i_mode))
4310                 ar.flags = EXT4_MB_HINT_DATA;
4311         else
4312                 /* disable in-core preallocation for non-regular files */
4313                 ar.flags = 0;
4314         if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4315                 ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4316         newblock = ext4_mb_new_blocks(handle, &ar, &err);
4317         if (!newblock)
4318                 goto out2;
4319         ext_debug("allocate new block: goal %llu, found %llu/%u\n",
4320                   ar.goal, newblock, allocated);
4321         free_on_err = 1;
4322         allocated_clusters = ar.len;
4323         ar.len = EXT4_C2B(sbi, ar.len) - offset;
4324         if (ar.len > allocated)
4325                 ar.len = allocated;
4326
4327 got_allocated_blocks:
4328         /* try to insert new extent into found leaf and return */
4329         ext4_ext_store_pblock(&newex, newblock + offset);
4330         newex.ee_len = cpu_to_le16(ar.len);
4331         /* Mark uninitialized */
4332         if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
4333                 ext4_ext_mark_uninitialized(&newex);
4334                 map->m_flags |= EXT4_MAP_UNWRITTEN;
4335                 /*
4336                  * io_end structure was created for every IO write to an
4337                  * uninitialized extent. To avoid unnecessary conversion,
4338                  * here we flag the IO that really needs the conversion.
4339                  * For non asycn direct IO case, flag the inode state
4340                  * that we need to perform conversion when IO is done.
4341                  */
4342                 if ((flags & EXT4_GET_BLOCKS_PRE_IO))
4343                         set_unwritten = 1;
4344                 if (ext4_should_dioread_nolock(inode))
4345                         map->m_flags |= EXT4_MAP_UNINIT;
4346         }
4347
4348         err = 0;
4349         if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
4350                 err = check_eofblocks_fl(handle, inode, map->m_lblk,
4351                                          path, ar.len);
4352         if (!err)
4353                 err = ext4_ext_insert_extent(handle, inode, path,
4354                                              &newex, flags);
4355
4356         if (!err && set_unwritten) {
4357                 if (io)
4358                         ext4_set_io_unwritten_flag(inode, io);
4359                 else
4360                         ext4_set_inode_state(inode,
4361                                              EXT4_STATE_DIO_UNWRITTEN);
4362         }
4363
4364         if (err && free_on_err) {
4365                 int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
4366                         EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
4367                 /* free data blocks we just allocated */
4368                 /* not a good idea to call discard here directly,
4369                  * but otherwise we'd need to call it every free() */
4370                 ext4_discard_preallocations(inode);
4371                 ext4_free_blocks(handle, inode, NULL, newblock,
4372                                  EXT4_C2B(sbi, allocated_clusters), fb_flags);
4373                 goto out2;
4374         }
4375
4376         /* previous routine could use block we allocated */
4377         newblock = ext4_ext_pblock(&newex);
4378         allocated = ext4_ext_get_actual_len(&newex);
4379         if (allocated > map->m_len)
4380                 allocated = map->m_len;
4381         map->m_flags |= EXT4_MAP_NEW;
4382
4383         /*
4384          * Update reserved blocks/metadata blocks after successful
4385          * block allocation which had been deferred till now.
4386          */
4387         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
4388                 unsigned int reserved_clusters;
4389                 /*
4390                  * Check how many clusters we had reserved this allocated range
4391                  */
4392                 reserved_clusters = get_reserved_cluster_alloc(inode,
4393                                                 map->m_lblk, allocated);
4394                 if (map->m_flags & EXT4_MAP_FROM_CLUSTER) {
4395                         if (reserved_clusters) {
4396                                 /*
4397                                  * We have clusters reserved for this range.
4398                                  * But since we are not doing actual allocation
4399                                  * and are simply using blocks from previously
4400                                  * allocated cluster, we should release the
4401                                  * reservation and not claim quota.
4402                                  */
4403                                 ext4_da_update_reserve_space(inode,
4404                                                 reserved_clusters, 0);
4405                         }
4406                 } else {
4407                         BUG_ON(allocated_clusters < reserved_clusters);
4408                         if (reserved_clusters < allocated_clusters) {
4409                                 struct ext4_inode_info *ei = EXT4_I(inode);
4410                                 int reservation = allocated_clusters -
4411                                                   reserved_clusters;
4412                                 /*
4413                                  * It seems we claimed few clusters outside of
4414                                  * the range of this allocation. We should give
4415                                  * it back to the reservation pool. This can
4416                                  * happen in the following case:
4417                                  *
4418                                  * * Suppose s_cluster_ratio is 4 (i.e., each
4419                                  *   cluster has 4 blocks. Thus, the clusters
4420                                  *   are [0-3],[4-7],[8-11]...
4421                                  * * First comes delayed allocation write for
4422                                  *   logical blocks 10 & 11. Since there were no
4423                                  *   previous delayed allocated blocks in the
4424                                  *   range [8-11], we would reserve 1 cluster
4425                                  *   for this write.
4426                                  * * Next comes write for logical blocks 3 to 8.
4427                                  *   In this case, we will reserve 2 clusters
4428                                  *   (for [0-3] and [4-7]; and not for [8-11] as
4429                                  *   that range has a delayed allocated blocks.
4430                                  *   Thus total reserved clusters now becomes 3.
4431                                  * * Now, during the delayed allocation writeout
4432                                  *   time, we will first write blocks [3-8] and
4433                                  *   allocate 3 clusters for writing these
4434                                  *   blocks. Also, we would claim all these
4435                                  *   three clusters above.
4436                                  * * Now when we come here to writeout the
4437                                  *   blocks [10-11], we would expect to claim
4438                                  *   the reservation of 1 cluster we had made
4439                                  *   (and we would claim it since there are no
4440                                  *   more delayed allocated blocks in the range
4441                                  *   [8-11]. But our reserved cluster count had
4442                                  *   already gone to 0.
4443                                  *
4444                                  *   Thus, at the step 4 above when we determine
4445                                  *   that there are still some unwritten delayed
4446                                  *   allocated blocks outside of our current
4447                                  *   block range, we should increment the
4448                                  *   reserved clusters count so that when the
4449                                  *   remaining blocks finally gets written, we
4450                                  *   could claim them.
4451                                  */
4452                                 dquot_reserve_block(inode,
4453                                                 EXT4_C2B(sbi, reservation));
4454                                 spin_lock(&ei->i_block_reservation_lock);
4455                                 ei->i_reserved_data_blocks += reservation;
4456                                 spin_unlock(&ei->i_block_reservation_lock);
4457                         }
4458                         /*
4459                          * We will claim quota for all newly allocated blocks.
4460                          * We're updating the reserved space *after* the
4461                          * correction above so we do not accidentally free
4462                          * all the metadata reservation because we might
4463                          * actually need it later on.
4464                          */
4465                         ext4_da_update_reserve_space(inode, allocated_clusters,
4466                                                         1);
4467                 }
4468         }
4469
4470         /*
4471          * Cache the extent and update transaction to commit on fdatasync only
4472          * when it is _not_ an uninitialized extent.
4473          */
4474         if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0)
4475                 ext4_update_inode_fsync_trans(handle, inode, 1);
4476         else
4477                 ext4_update_inode_fsync_trans(handle, inode, 0);
4478 out:
4479         if (allocated > map->m_len)
4480                 allocated = map->m_len;
4481         ext4_ext_show_leaf(inode, path);
4482         map->m_flags |= EXT4_MAP_MAPPED;
4483         map->m_pblk = newblock;
4484         map->m_len = allocated;
4485 out2:
4486         if (path) {
4487                 ext4_ext_drop_refs(path);
4488                 kfree(path);
4489         }
4490
4491 out3:
4492         trace_ext4_ext_map_blocks_exit(inode, flags, map,
4493                                        err ? err : allocated);
4494         ext4_es_lru_add(inode);
4495         return err ? err : allocated;
4496 }
4497
4498 void ext4_ext_truncate(handle_t *handle, struct inode *inode)
4499 {
4500         struct super_block *sb = inode->i_sb;
4501         ext4_lblk_t last_block;
4502         int err = 0;
4503
4504         /*
4505          * TODO: optimization is possible here.
4506          * Probably we need not scan at all,
4507          * because page truncation is enough.
4508          */
4509
4510         /* we have to know where to truncate from in crash case */
4511         EXT4_I(inode)->i_disksize = inode->i_size;
4512         ext4_mark_inode_dirty(handle, inode);
4513
4514         last_block = (inode->i_size + sb->s_blocksize - 1)
4515                         >> EXT4_BLOCK_SIZE_BITS(sb);
4516 retry:
4517         err = ext4_es_remove_extent(inode, last_block,
4518                                     EXT_MAX_BLOCKS - last_block);
4519         if (err == -ENOMEM) {
4520                 cond_resched();
4521                 congestion_wait(BLK_RW_ASYNC, HZ/50);
4522                 goto retry;
4523         }
4524         if (err) {
4525                 ext4_std_error(inode->i_sb, err);
4526                 return;
4527         }
4528         err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
4529         ext4_std_error(inode->i_sb, err);
4530 }
4531
4532 static void ext4_falloc_update_inode(struct inode *inode,
4533                                 int mode, loff_t new_size, int update_ctime)
4534 {
4535         struct timespec now;
4536
4537         if (update_ctime) {
4538                 now = current_fs_time(inode->i_sb);
4539                 if (!timespec_equal(&inode->i_ctime, &now))
4540                         inode->i_ctime = now;
4541         }
4542         /*
4543          * Update only when preallocation was requested beyond
4544          * the file size.
4545          */
4546         if (!(mode & FALLOC_FL_KEEP_SIZE)) {
4547                 if (new_size > i_size_read(inode))
4548                         i_size_write(inode, new_size);
4549                 if (new_size > EXT4_I(inode)->i_disksize)
4550                         ext4_update_i_disksize(inode, new_size);
4551         } else {
4552                 /*
4553                  * Mark that we allocate beyond EOF so the subsequent truncate
4554                  * can proceed even if the new size is the same as i_size.
4555                  */
4556                 if (new_size > i_size_read(inode))
4557                         ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4558         }
4559
4560 }
4561
4562 /*
4563  * preallocate space for a file. This implements ext4's fallocate file
4564  * operation, which gets called from sys_fallocate system call.
4565  * For block-mapped files, posix_fallocate should fall back to the method
4566  * of writing zeroes to the required new blocks (the same behavior which is
4567  * expected for file systems which do not support fallocate() system call).
4568  */
4569 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4570 {
4571         struct inode *inode = file_inode(file);
4572         handle_t *handle;
4573         loff_t new_size;
4574         unsigned int max_blocks;
4575         int ret = 0;
4576         int ret2 = 0;
4577         int retries = 0;
4578         int flags;
4579         struct ext4_map_blocks map;
4580         unsigned int credits, blkbits = inode->i_blkbits;
4581
4582         /* Return error if mode is not supported */
4583         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
4584                 return -EOPNOTSUPP;
4585
4586         if (mode & FALLOC_FL_PUNCH_HOLE)
4587                 return ext4_punch_hole(inode, offset, len);
4588
4589         ret = ext4_convert_inline_data(inode);
4590         if (ret)
4591                 return ret;
4592
4593         /*
4594          * currently supporting (pre)allocate mode for extent-based
4595          * files _only_
4596          */
4597         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4598                 return -EOPNOTSUPP;
4599
4600         trace_ext4_fallocate_enter(inode, offset, len, mode);
4601         map.m_lblk = offset >> blkbits;
4602         /*
4603          * We can't just convert len to max_blocks because
4604          * If blocksize = 4096 offset = 3072 and len = 2048
4605          */
4606         max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
4607                 - map.m_lblk;
4608         /*
4609          * credits to insert 1 extent into extent tree
4610          */
4611         credits = ext4_chunk_trans_blocks(inode, max_blocks);
4612         mutex_lock(&inode->i_mutex);
4613         ret = inode_newsize_ok(inode, (len + offset));
4614         if (ret) {
4615                 mutex_unlock(&inode->i_mutex);
4616                 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
4617                 return ret;
4618         }
4619         flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT;
4620         if (mode & FALLOC_FL_KEEP_SIZE)
4621                 flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4622         /*
4623          * Don't normalize the request if it can fit in one extent so
4624          * that it doesn't get unnecessarily split into multiple
4625          * extents.
4626          */
4627         if (len <= EXT_UNINIT_MAX_LEN << blkbits)
4628                 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4629
4630 retry:
4631         while (ret >= 0 && ret < max_blocks) {
4632                 map.m_lblk = map.m_lblk + ret;
4633                 map.m_len = max_blocks = max_blocks - ret;
4634                 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4635                                             credits);
4636                 if (IS_ERR(handle)) {
4637                         ret = PTR_ERR(handle);
4638                         break;
4639                 }
4640                 ret = ext4_map_blocks(handle, inode, &map, flags);
4641                 if (ret <= 0) {
4642 #ifdef EXT4FS_DEBUG
4643                         ext4_warning(inode->i_sb,
4644                                      "inode #%lu: block %u: len %u: "
4645                                      "ext4_ext_map_blocks returned %d",
4646                                      inode->i_ino, map.m_lblk,
4647                                      map.m_len, ret);
4648 #endif
4649                         ext4_mark_inode_dirty(handle, inode);
4650                         ret2 = ext4_journal_stop(handle);
4651                         break;
4652                 }
4653                 if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
4654                                                 blkbits) >> blkbits))
4655                         new_size = offset + len;
4656                 else
4657                         new_size = ((loff_t) map.m_lblk + ret) << blkbits;
4658
4659                 ext4_falloc_update_inode(inode, mode, new_size,
4660                                          (map.m_flags & EXT4_MAP_NEW));
4661                 ext4_mark_inode_dirty(handle, inode);
4662                 if ((file->f_flags & O_SYNC) && ret >= max_blocks)
4663                         ext4_handle_sync(handle);
4664                 ret2 = ext4_journal_stop(handle);
4665                 if (ret2)
4666                         break;
4667         }
4668         if (ret == -ENOSPC &&
4669                         ext4_should_retry_alloc(inode->i_sb, &retries)) {
4670                 ret = 0;
4671                 goto retry;
4672         }
4673         mutex_unlock(&inode->i_mutex);
4674         trace_ext4_fallocate_exit(inode, offset, max_blocks,
4675                                 ret > 0 ? ret2 : ret);
4676         return ret > 0 ? ret2 : ret;
4677 }
4678
4679 /*
4680  * This function convert a range of blocks to written extents
4681  * The caller of this function will pass the start offset and the size.
4682  * all unwritten extents within this range will be converted to
4683  * written extents.
4684  *
4685  * This function is called from the direct IO end io call back
4686  * function, to convert the fallocated extents after IO is completed.
4687  * Returns 0 on success.
4688  */
4689 int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
4690                                    loff_t offset, ssize_t len)
4691 {
4692         unsigned int max_blocks;
4693         int ret = 0;
4694         int ret2 = 0;
4695         struct ext4_map_blocks map;
4696         unsigned int credits, blkbits = inode->i_blkbits;
4697
4698         map.m_lblk = offset >> blkbits;
4699         /*
4700          * We can't just convert len to max_blocks because
4701          * If blocksize = 4096 offset = 3072 and len = 2048
4702          */
4703         max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
4704                       map.m_lblk);
4705         /*
4706          * This is somewhat ugly but the idea is clear: When transaction is
4707          * reserved, everything goes into it. Otherwise we rather start several
4708          * smaller transactions for conversion of each extent separately.
4709          */
4710         if (handle) {
4711                 handle = ext4_journal_start_reserved(handle,
4712                                                      EXT4_HT_EXT_CONVERT);
4713                 if (IS_ERR(handle))
4714                         return PTR_ERR(handle);
4715                 credits = 0;
4716         } else {
4717                 /*
4718                  * credits to insert 1 extent into extent tree
4719                  */
4720                 credits = ext4_chunk_trans_blocks(inode, max_blocks);
4721         }
4722         while (ret >= 0 && ret < max_blocks) {
4723                 map.m_lblk += ret;
4724                 map.m_len = (max_blocks -= ret);
4725                 if (credits) {
4726                         handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4727                                                     credits);
4728                         if (IS_ERR(handle)) {
4729                                 ret = PTR_ERR(handle);
4730                                 break;
4731                         }
4732                 }
4733                 ret = ext4_map_blocks(handle, inode, &map,
4734                                       EXT4_GET_BLOCKS_IO_CONVERT_EXT);
4735                 if (ret <= 0)
4736                         ext4_warning(inode->i_sb,
4737                                      "inode #%lu: block %u: len %u: "
4738                                      "ext4_ext_map_blocks returned %d",
4739                                      inode->i_ino, map.m_lblk,
4740                                      map.m_len, ret);
4741                 ext4_mark_inode_dirty(handle, inode);
4742                 if (credits)
4743                         ret2 = ext4_journal_stop(handle);
4744                 if (ret <= 0 || ret2)
4745                         break;
4746         }
4747         if (!credits)
4748                 ret2 = ext4_journal_stop(handle);
4749         return ret > 0 ? ret2 : ret;
4750 }
4751
4752 /*
4753  * If newes is not existing extent (newes->ec_pblk equals zero) find
4754  * delayed extent at start of newes and update newes accordingly and
4755  * return start of the next delayed extent.
4756  *
4757  * If newes is existing extent (newes->ec_pblk is not equal zero)
4758  * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed
4759  * extent found. Leave newes unmodified.
4760  */
4761 static int ext4_find_delayed_extent(struct inode *inode,
4762                                     struct extent_status *newes)
4763 {
4764         struct extent_status es;
4765         ext4_lblk_t block, next_del;
4766
4767         if (newes->es_pblk == 0) {
4768                 ext4_es_find_delayed_extent_range(inode, newes->es_lblk,
4769                                 newes->es_lblk + newes->es_len - 1, &es);
4770
4771                 /*
4772                  * No extent in extent-tree contains block @newes->es_pblk,
4773                  * then the block may stay in 1)a hole or 2)delayed-extent.
4774                  */
4775                 if (es.es_len == 0)
4776                         /* A hole found. */
4777                         return 0;
4778
4779                 if (es.es_lblk > newes->es_lblk) {
4780                         /* A hole found. */
4781                         newes->es_len = min(es.es_lblk - newes->es_lblk,
4782                                             newes->es_len);
4783                         return 0;
4784                 }
4785
4786                 newes->es_len = es.es_lblk + es.es_len - newes->es_lblk;
4787         }
4788
4789         block = newes->es_lblk + newes->es_len;
4790         ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es);
4791         if (es.es_len == 0)
4792                 next_del = EXT_MAX_BLOCKS;
4793         else
4794                 next_del = es.es_lblk;
4795
4796         return next_del;
4797 }
4798 /* fiemap flags we can handle specified here */
4799 #define EXT4_FIEMAP_FLAGS       (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
4800
4801 static int ext4_xattr_fiemap(struct inode *inode,
4802                                 struct fiemap_extent_info *fieinfo)
4803 {
4804         __u64 physical = 0;
4805         __u64 length;
4806         __u32 flags = FIEMAP_EXTENT_LAST;
4807         int blockbits = inode->i_sb->s_blocksize_bits;
4808         int error = 0;
4809
4810         /* in-inode? */
4811         if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4812                 struct ext4_iloc iloc;
4813                 int offset;     /* offset of xattr in inode */
4814
4815                 error = ext4_get_inode_loc(inode, &iloc);
4816                 if (error)
4817                         return error;
4818                 physical = (__u64)iloc.bh->b_blocknr << blockbits;
4819                 offset = EXT4_GOOD_OLD_INODE_SIZE +
4820                                 EXT4_I(inode)->i_extra_isize;
4821                 physical += offset;
4822                 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
4823                 flags |= FIEMAP_EXTENT_DATA_INLINE;
4824                 brelse(iloc.bh);
4825         } else { /* external block */
4826                 physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
4827                 length = inode->i_sb->s_blocksize;
4828         }
4829
4830         if (physical)
4831                 error = fiemap_fill_next_extent(fieinfo, 0, physical,
4832                                                 length, flags);
4833         return (error < 0 ? error : 0);
4834 }
4835
4836 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4837                 __u64 start, __u64 len)
4838 {
4839         ext4_lblk_t start_blk;
4840         int error = 0;
4841
4842         if (ext4_has_inline_data(inode)) {
4843                 int has_inline = 1;
4844
4845                 error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline);
4846
4847                 if (has_inline)
4848                         return error;
4849         }
4850
4851         if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
4852                 error = ext4_ext_precache(inode);
4853                 if (error)
4854                         return error;
4855         }
4856
4857         /* fallback to generic here if not in extents fmt */
4858         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4859                 return generic_block_fiemap(inode, fieinfo, start, len,
4860                         ext4_get_block);
4861
4862         if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
4863                 return -EBADR;
4864
4865         if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
4866                 error = ext4_xattr_fiemap(inode, fieinfo);
4867         } else {
4868                 ext4_lblk_t len_blks;
4869                 __u64 last_blk;
4870
4871                 start_blk = start >> inode->i_sb->s_blocksize_bits;
4872                 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
4873                 if (last_blk >= EXT_MAX_BLOCKS)
4874                         last_blk = EXT_MAX_BLOCKS-1;
4875                 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
4876
4877                 /*
4878                  * Walk the extent tree gathering extent information
4879                  * and pushing extents back to the user.
4880                  */
4881                 error = ext4_fill_fiemap_extents(inode, start_blk,
4882                                                  len_blks, fieinfo);
4883         }
4884         ext4_es_lru_add(inode);
4885         return error;
4886 }