1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
7 #include <linux/spinlock.h>
8 #include <linux/completion.h>
9 #include <linux/buffer_head.h>
10 #include <linux/blkdev.h>
11 #include <linux/gfs2_ondisk.h>
12 #include <linux/crc32.h>
13 #include <linux/iomap.h>
14 #include <linux/ktime.h>
30 #include "trace_gfs2.h"
32 /* This doesn't need to be that large as max 64 bit pointers in a 4k
33 * block is 512, so __u16 is fine for that. It saves stack space to
37 struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
38 __u16 mp_list[GFS2_MAX_META_HEIGHT];
39 int mp_fheight; /* find_metapath height */
40 int mp_aheight; /* actual height (lookup height) */
43 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length);
46 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
48 * @dibh: the dinode buffer
49 * @block: the block number that was allocated
50 * @page: The (optional) page. This is looked up if @page is NULL
55 static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
56 u64 block, struct page *page)
58 struct inode *inode = &ip->i_inode;
60 if (!PageUptodate(page)) {
61 void *kaddr = kmap(page);
62 u64 dsize = i_size_read(inode);
64 if (dsize > gfs2_max_stuffed_size(ip))
65 dsize = gfs2_max_stuffed_size(ip);
67 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
68 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
71 SetPageUptodate(page);
74 if (gfs2_is_jdata(ip)) {
75 struct buffer_head *bh;
77 if (!page_has_buffers(page))
78 create_empty_buffers(page, BIT(inode->i_blkbits),
81 bh = page_buffers(page);
82 if (!buffer_mapped(bh))
83 map_bh(bh, inode->i_sb, block);
85 set_buffer_uptodate(bh);
86 gfs2_trans_add_data(ip->i_gl, bh);
89 gfs2_ordered_add_inode(ip);
95 static int __gfs2_unstuff_inode(struct gfs2_inode *ip, struct page *page)
97 struct buffer_head *bh, *dibh;
98 struct gfs2_dinode *di;
100 int isdir = gfs2_is_dir(ip);
103 error = gfs2_meta_inode_buffer(ip, &dibh);
107 if (i_size_read(&ip->i_inode)) {
108 /* Get a free block, fill it with the stuffed data,
109 and write it out to disk */
112 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
116 gfs2_trans_remove_revoke(GFS2_SB(&ip->i_inode), block, 1);
117 error = gfs2_dir_get_new_buffer(ip, block, &bh);
120 gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
121 dibh, sizeof(struct gfs2_dinode));
124 error = gfs2_unstuffer_page(ip, dibh, block, page);
130 /* Set up the pointer to the new block */
132 gfs2_trans_add_meta(ip->i_gl, dibh);
133 di = (struct gfs2_dinode *)dibh->b_data;
134 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
136 if (i_size_read(&ip->i_inode)) {
137 *(__be64 *)(di + 1) = cpu_to_be64(block);
138 gfs2_add_inode_blocks(&ip->i_inode, 1);
139 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
143 di->di_height = cpu_to_be16(1);
151 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
152 * @ip: The GFS2 inode to unstuff
154 * This routine unstuffs a dinode and returns it to a "normal" state such
155 * that the height can be grown in the traditional way.
160 int gfs2_unstuff_dinode(struct gfs2_inode *ip)
162 struct inode *inode = &ip->i_inode;
166 down_write(&ip->i_rw_mutex);
167 page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
171 error = __gfs2_unstuff_inode(ip, page);
175 up_write(&ip->i_rw_mutex);
180 * find_metapath - Find path through the metadata tree
181 * @sdp: The superblock
182 * @block: The disk block to look up
183 * @mp: The metapath to return the result in
184 * @height: The pre-calculated height of the metadata tree
186 * This routine returns a struct metapath structure that defines a path
187 * through the metadata of inode "ip" to get to block "block".
190 * Given: "ip" is a height 3 file, "offset" is 101342453, and this is a
191 * filesystem with a blocksize of 4096.
193 * find_metapath() would return a struct metapath structure set to:
194 * mp_fheight = 3, mp_list[0] = 0, mp_list[1] = 48, and mp_list[2] = 165.
196 * That means that in order to get to the block containing the byte at
197 * offset 101342453, we would load the indirect block pointed to by pointer
198 * 0 in the dinode. We would then load the indirect block pointed to by
199 * pointer 48 in that indirect block. We would then load the data block
200 * pointed to by pointer 165 in that indirect block.
202 * ----------------------------------------
207 * ----------------------------------------
211 * ----------------------------------------
215 * |0 5 6 7 8 9 0 1 2|
216 * ----------------------------------------
220 * ----------------------------------------
225 * ----------------------------------------
229 * ----------------------------------------
230 * | Data block containing offset |
234 * ----------------------------------------
238 static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
239 struct metapath *mp, unsigned int height)
243 mp->mp_fheight = height;
244 for (i = height; i--;)
245 mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
248 static inline unsigned int metapath_branch_start(const struct metapath *mp)
250 if (mp->mp_list[0] == 0)
256 * metaptr1 - Return the first possible metadata pointer in a metapath buffer
257 * @height: The metadata height (0 = dinode)
260 static inline __be64 *metaptr1(unsigned int height, const struct metapath *mp)
262 struct buffer_head *bh = mp->mp_bh[height];
264 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)));
265 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header)));
269 * metapointer - Return pointer to start of metadata in a buffer
270 * @height: The metadata height (0 = dinode)
273 * Return a pointer to the block number of the next height of the metadata
274 * tree given a buffer containing the pointer to the current height of the
278 static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
280 __be64 *p = metaptr1(height, mp);
281 return p + mp->mp_list[height];
284 static inline const __be64 *metaend(unsigned int height, const struct metapath *mp)
286 const struct buffer_head *bh = mp->mp_bh[height];
287 return (const __be64 *)(bh->b_data + bh->b_size);
290 static void clone_metapath(struct metapath *clone, struct metapath *mp)
295 for (hgt = 0; hgt < mp->mp_aheight; hgt++)
296 get_bh(clone->mp_bh[hgt]);
299 static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end)
303 for (t = start; t < end; t++) {
304 struct buffer_head *rabh;
309 rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
310 if (trylock_buffer(rabh)) {
311 if (!buffer_uptodate(rabh)) {
312 rabh->b_end_io = end_buffer_read_sync;
313 submit_bh(REQ_OP_READ,
314 REQ_RAHEAD | REQ_META | REQ_PRIO,
324 static int __fillup_metapath(struct gfs2_inode *ip, struct metapath *mp,
325 unsigned int x, unsigned int h)
328 __be64 *ptr = metapointer(x, mp);
329 u64 dblock = be64_to_cpu(*ptr);
334 ret = gfs2_meta_buffer(ip, GFS2_METATYPE_IN, dblock, &mp->mp_bh[x + 1]);
338 mp->mp_aheight = x + 1;
343 * lookup_metapath - Walk the metadata tree to a specific point
347 * Assumes that the inode's buffer has already been looked up and
348 * hooked onto mp->mp_bh[0] and that the metapath has been initialised
349 * by find_metapath().
351 * If this function encounters part of the tree which has not been
352 * allocated, it returns the current height of the tree at the point
353 * at which it found the unallocated block. Blocks which are found are
354 * added to the mp->mp_bh[] list.
359 static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
361 return __fillup_metapath(ip, mp, 0, ip->i_height - 1);
365 * fillup_metapath - fill up buffers for the metadata path to a specific height
368 * @h: The height to which it should be mapped
370 * Similar to lookup_metapath, but does lookups for a range of heights
372 * Returns: error or the number of buffers filled
375 static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
381 /* find the first buffer we need to look up. */
382 for (x = h - 1; x > 0; x--) {
387 ret = __fillup_metapath(ip, mp, x, h);
390 return mp->mp_aheight - x - 1;
393 static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp)
395 sector_t factor = 1, block = 0;
398 for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) {
399 if (hgt < mp->mp_aheight)
400 block += mp->mp_list[hgt] * factor;
401 factor *= sdp->sd_inptrs;
406 static void release_metapath(struct metapath *mp)
410 for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
411 if (mp->mp_bh[i] == NULL)
413 brelse(mp->mp_bh[i]);
419 * gfs2_extent_length - Returns length of an extent of blocks
420 * @bh: The metadata block
421 * @ptr: Current position in @bh
422 * @limit: Max extent length to return
423 * @eob: Set to 1 if we hit "end of block"
425 * Returns: The length of the extent (minimum of one block)
428 static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, size_t limit, int *eob)
430 const __be64 *end = (__be64 *)(bh->b_data + bh->b_size);
431 const __be64 *first = ptr;
432 u64 d = be64_to_cpu(*ptr);
440 } while(be64_to_cpu(*ptr) == d);
446 enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE };
449 * gfs2_metadata_walker - walk an indirect block
450 * @mp: Metapath to indirect block
451 * @ptrs: Number of pointers to look at
453 * When returning WALK_FOLLOW, the walker must update @mp to point at the right
454 * indirect block to follow.
456 typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp,
460 * gfs2_walk_metadata - walk a tree of indirect blocks
462 * @mp: Starting point of walk
463 * @max_len: Maximum number of blocks to walk
464 * @walker: Called during the walk
466 * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or
467 * past the end of metadata, and a negative error code otherwise.
470 static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
471 u64 max_len, gfs2_metadata_walker walker)
473 struct gfs2_inode *ip = GFS2_I(inode);
474 struct gfs2_sbd *sdp = GFS2_SB(inode);
480 * The walk starts in the lowest allocated indirect block, which may be
481 * before the position indicated by @mp. Adjust @max_len accordingly
482 * to avoid a short walk.
484 for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) {
485 max_len += mp->mp_list[hgt] * factor;
486 mp->mp_list[hgt] = 0;
487 factor *= sdp->sd_inptrs;
491 u16 start = mp->mp_list[hgt];
492 enum walker_status status;
496 /* Walk indirect block. */
497 ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start;
500 ptrs = DIV_ROUND_UP_ULL(max_len, factor);
501 status = walker(mp, ptrs);
506 BUG_ON(mp->mp_aheight == mp->mp_fheight);
507 ptrs = mp->mp_list[hgt] - start;
516 if (status == WALK_FOLLOW)
517 goto fill_up_metapath;
520 /* Decrease height of metapath. */
521 brelse(mp->mp_bh[hgt]);
522 mp->mp_bh[hgt] = NULL;
523 mp->mp_list[hgt] = 0;
527 factor *= sdp->sd_inptrs;
529 /* Advance in metadata tree. */
530 (mp->mp_list[hgt])++;
532 if (mp->mp_list[hgt] >= sdp->sd_inptrs)
535 if (mp->mp_list[hgt] >= sdp->sd_diptrs)
540 /* Increase height of metapath. */
541 ret = fillup_metapath(ip, mp, ip->i_height - 1);
546 do_div(factor, sdp->sd_inptrs);
547 mp->mp_aheight = hgt + 1;
552 static enum walker_status gfs2_hole_walker(struct metapath *mp,
555 const __be64 *start, *ptr, *end;
558 hgt = mp->mp_aheight - 1;
559 start = metapointer(hgt, mp);
562 for (ptr = start; ptr < end; ptr++) {
564 mp->mp_list[hgt] += ptr - start;
565 if (mp->mp_aheight == mp->mp_fheight)
570 return WALK_CONTINUE;
574 * gfs2_hole_size - figure out the size of a hole
576 * @lblock: The logical starting block number
577 * @len: How far to look (in blocks)
578 * @mp: The metapath at lblock
579 * @iomap: The iomap to store the hole size in
581 * This function modifies @mp.
583 * Returns: errno on error
585 static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
586 struct metapath *mp, struct iomap *iomap)
588 struct metapath clone;
592 clone_metapath(&clone, mp);
593 ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker);
598 hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock;
601 iomap->length = hole_size << inode->i_blkbits;
605 release_metapath(&clone);
609 static inline __be64 *gfs2_indirect_init(struct metapath *mp,
610 struct gfs2_glock *gl, unsigned int i,
611 unsigned offset, u64 bn)
613 __be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
614 ((i > 1) ? sizeof(struct gfs2_meta_header) :
615 sizeof(struct gfs2_dinode)));
617 BUG_ON(mp->mp_bh[i] != NULL);
618 mp->mp_bh[i] = gfs2_meta_new(gl, bn);
619 gfs2_trans_add_meta(gl, mp->mp_bh[i]);
620 gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
621 gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
623 *ptr = cpu_to_be64(bn);
629 ALLOC_GROW_DEPTH = 1,
630 ALLOC_GROW_HEIGHT = 2,
631 /* ALLOC_UNSTUFF = 3, TBD and rather complicated */
635 * __gfs2_iomap_alloc - Build a metadata tree of the requested height
636 * @inode: The GFS2 inode
637 * @iomap: The iomap structure
638 * @mp: The metapath, with proper height information calculated
640 * In this routine we may have to alloc:
641 * i) Indirect blocks to grow the metadata tree height
642 * ii) Indirect blocks to fill in lower part of the metadata tree
645 * This function is called after __gfs2_iomap_get, which works out the
646 * total number of blocks which we need via gfs2_alloc_size.
648 * We then do the actual allocation asking for an extent at a time (if
649 * enough contiguous free blocks are available, there will only be one
650 * allocation request per call) and uses the state machine to initialise
651 * the blocks in order.
653 * Right now, this function will allocate at most one indirect block
654 * worth of data -- with a default block size of 4K, that's slightly
655 * less than 2M. If this limitation is ever removed to allow huge
656 * allocations, we would probably still want to limit the iomap size we
657 * return to avoid stalling other tasks during huge writes; the next
658 * iomap iteration would then find the blocks already allocated.
660 * Returns: errno on error
663 static int __gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
666 struct gfs2_inode *ip = GFS2_I(inode);
667 struct gfs2_sbd *sdp = GFS2_SB(inode);
668 struct buffer_head *dibh = mp->mp_bh[0];
670 unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
671 size_t dblks = iomap->length >> inode->i_blkbits;
672 const unsigned end_of_metadata = mp->mp_fheight - 1;
674 enum alloc_state state;
678 BUG_ON(mp->mp_aheight < 1);
679 BUG_ON(dibh == NULL);
682 gfs2_trans_add_meta(ip->i_gl, dibh);
684 down_write(&ip->i_rw_mutex);
686 if (mp->mp_fheight == mp->mp_aheight) {
687 /* Bottom indirect block exists */
690 /* Need to allocate indirect blocks */
691 if (mp->mp_fheight == ip->i_height) {
692 /* Writing into existing tree, extend tree down */
693 iblks = mp->mp_fheight - mp->mp_aheight;
694 state = ALLOC_GROW_DEPTH;
696 /* Building up tree height */
697 state = ALLOC_GROW_HEIGHT;
698 iblks = mp->mp_fheight - ip->i_height;
699 branch_start = metapath_branch_start(mp);
700 iblks += (mp->mp_fheight - branch_start);
704 /* start of the second part of the function (state machine) */
706 blks = dblks + iblks;
710 ret = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
714 if (state != ALLOC_DATA || gfs2_is_jdata(ip))
715 gfs2_trans_remove_revoke(sdp, bn, n);
717 /* Growing height of tree */
718 case ALLOC_GROW_HEIGHT:
720 ptr = (__be64 *)(dibh->b_data +
721 sizeof(struct gfs2_dinode));
724 for (; i - 1 < mp->mp_fheight - ip->i_height && n > 0;
726 gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
727 if (i - 1 == mp->mp_fheight - ip->i_height) {
729 gfs2_buffer_copy_tail(mp->mp_bh[i],
730 sizeof(struct gfs2_meta_header),
731 dibh, sizeof(struct gfs2_dinode));
732 gfs2_buffer_clear_tail(dibh,
733 sizeof(struct gfs2_dinode) +
735 ptr = (__be64 *)(mp->mp_bh[i]->b_data +
736 sizeof(struct gfs2_meta_header));
738 state = ALLOC_GROW_DEPTH;
739 for(i = branch_start; i < mp->mp_fheight; i++) {
740 if (mp->mp_bh[i] == NULL)
742 brelse(mp->mp_bh[i]);
749 fallthrough; /* To branching from existing tree */
750 case ALLOC_GROW_DEPTH:
751 if (i > 1 && i < mp->mp_fheight)
752 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
753 for (; i < mp->mp_fheight && n > 0; i++, n--)
754 gfs2_indirect_init(mp, ip->i_gl, i,
755 mp->mp_list[i-1], bn++);
756 if (i == mp->mp_fheight)
760 fallthrough; /* To tree complete, adding data blocks */
763 BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
764 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
766 ptr = metapointer(end_of_metadata, mp);
767 iomap->addr = bn << inode->i_blkbits;
768 iomap->flags |= IOMAP_F_MERGED | IOMAP_F_NEW;
770 *ptr++ = cpu_to_be64(bn++);
773 } while (iomap->addr == IOMAP_NULL_ADDR);
775 iomap->type = IOMAP_MAPPED;
776 iomap->length = (u64)dblks << inode->i_blkbits;
777 ip->i_height = mp->mp_fheight;
778 gfs2_add_inode_blocks(&ip->i_inode, alloced);
779 gfs2_dinode_out(ip, dibh->b_data);
781 up_write(&ip->i_rw_mutex);
785 #define IOMAP_F_GFS2_BOUNDARY IOMAP_F_PRIVATE
788 * gfs2_alloc_size - Compute the maximum allocation size
791 * @size: Requested size in blocks
793 * Compute the maximum size of the next allocation at @mp.
795 * Returns: size in blocks
797 static u64 gfs2_alloc_size(struct inode *inode, struct metapath *mp, u64 size)
799 struct gfs2_inode *ip = GFS2_I(inode);
800 struct gfs2_sbd *sdp = GFS2_SB(inode);
801 const __be64 *first, *ptr, *end;
804 * For writes to stuffed files, this function is called twice via
805 * __gfs2_iomap_get, before and after unstuffing. The size we return the
806 * first time needs to be large enough to get the reservation and
807 * allocation sizes right. The size we return the second time must
808 * be exact or else __gfs2_iomap_alloc won't do the right thing.
811 if (gfs2_is_stuffed(ip) || mp->mp_fheight != mp->mp_aheight) {
812 unsigned int maxsize = mp->mp_fheight > 1 ?
813 sdp->sd_inptrs : sdp->sd_diptrs;
814 maxsize -= mp->mp_list[mp->mp_fheight - 1];
820 first = metapointer(ip->i_height - 1, mp);
821 end = metaend(ip->i_height - 1, mp);
822 if (end - first > size)
824 for (ptr = first; ptr < end; ptr++) {
832 * __gfs2_iomap_get - Map blocks from an inode to disk blocks
834 * @pos: Starting position in bytes
835 * @length: Length to map, in bytes
836 * @flags: iomap flags
837 * @iomap: The iomap structure
842 static int __gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
843 unsigned flags, struct iomap *iomap,
846 struct gfs2_inode *ip = GFS2_I(inode);
847 struct gfs2_sbd *sdp = GFS2_SB(inode);
848 loff_t size = i_size_read(inode);
851 sector_t lblock_stop;
855 struct buffer_head *dibh = NULL, *bh;
861 down_read(&ip->i_rw_mutex);
863 ret = gfs2_meta_inode_buffer(ip, &dibh);
868 if (gfs2_is_stuffed(ip)) {
869 if (flags & IOMAP_WRITE) {
870 loff_t max_size = gfs2_max_stuffed_size(ip);
872 if (pos + length > max_size)
874 iomap->length = max_size;
877 if (flags & IOMAP_REPORT) {
882 iomap->length = length;
886 iomap->length = size;
888 iomap->addr = (ip->i_no_addr << inode->i_blkbits) +
889 sizeof(struct gfs2_dinode);
890 iomap->type = IOMAP_INLINE;
891 iomap->inline_data = dibh->b_data + sizeof(struct gfs2_dinode);
896 lblock = pos >> inode->i_blkbits;
897 iomap->offset = lblock << inode->i_blkbits;
898 lblock_stop = (pos + length - 1) >> inode->i_blkbits;
899 len = lblock_stop - lblock + 1;
900 iomap->length = len << inode->i_blkbits;
902 height = ip->i_height;
903 while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height])
905 find_metapath(sdp, lblock, mp, height);
906 if (height > ip->i_height || gfs2_is_stuffed(ip))
909 ret = lookup_metapath(ip, mp);
913 if (mp->mp_aheight != ip->i_height)
916 ptr = metapointer(ip->i_height - 1, mp);
920 bh = mp->mp_bh[ip->i_height - 1];
921 len = gfs2_extent_length(bh, ptr, len, &eob);
923 iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits;
924 iomap->length = len << inode->i_blkbits;
925 iomap->type = IOMAP_MAPPED;
926 iomap->flags |= IOMAP_F_MERGED;
928 iomap->flags |= IOMAP_F_GFS2_BOUNDARY;
931 iomap->bdev = inode->i_sb->s_bdev;
933 up_read(&ip->i_rw_mutex);
937 if (flags & IOMAP_REPORT) {
940 else if (height == ip->i_height)
941 ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
943 iomap->length = size - pos;
944 } else if (flags & IOMAP_WRITE) {
947 if (flags & IOMAP_DIRECT)
948 goto out; /* (see gfs2_file_direct_write) */
950 len = gfs2_alloc_size(inode, mp, len);
951 alloc_size = len << inode->i_blkbits;
952 if (alloc_size < iomap->length)
953 iomap->length = alloc_size;
955 if (pos < size && height == ip->i_height)
956 ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
959 iomap->addr = IOMAP_NULL_ADDR;
960 iomap->type = IOMAP_HOLE;
964 static int gfs2_write_lock(struct inode *inode)
966 struct gfs2_inode *ip = GFS2_I(inode);
967 struct gfs2_sbd *sdp = GFS2_SB(inode);
970 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
971 error = gfs2_glock_nq(&ip->i_gh);
974 if (&ip->i_inode == sdp->sd_rindex) {
975 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
977 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
978 GL_NOCACHE, &m_ip->i_gh);
985 gfs2_glock_dq(&ip->i_gh);
987 gfs2_holder_uninit(&ip->i_gh);
991 static void gfs2_write_unlock(struct inode *inode)
993 struct gfs2_inode *ip = GFS2_I(inode);
994 struct gfs2_sbd *sdp = GFS2_SB(inode);
996 if (&ip->i_inode == sdp->sd_rindex) {
997 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
999 gfs2_glock_dq_uninit(&m_ip->i_gh);
1001 gfs2_glock_dq_uninit(&ip->i_gh);
1004 static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
1007 unsigned int blockmask = i_blocksize(inode) - 1;
1008 struct gfs2_sbd *sdp = GFS2_SB(inode);
1009 unsigned int blocks;
1011 blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits;
1012 return gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
1015 static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
1016 unsigned copied, struct page *page)
1018 struct gfs2_trans *tr = current->journal_info;
1019 struct gfs2_inode *ip = GFS2_I(inode);
1020 struct gfs2_sbd *sdp = GFS2_SB(inode);
1022 if (page && !gfs2_is_stuffed(ip))
1023 gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
1025 if (tr->tr_num_buf_new)
1026 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1028 gfs2_trans_end(sdp);
1031 static const struct iomap_page_ops gfs2_iomap_page_ops = {
1032 .page_prepare = gfs2_iomap_page_prepare,
1033 .page_done = gfs2_iomap_page_done,
1036 static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
1037 loff_t length, unsigned flags,
1038 struct iomap *iomap,
1039 struct metapath *mp)
1041 struct gfs2_inode *ip = GFS2_I(inode);
1042 struct gfs2_sbd *sdp = GFS2_SB(inode);
1046 unstuff = gfs2_is_stuffed(ip) &&
1047 pos + length > gfs2_max_stuffed_size(ip);
1049 if (unstuff || iomap->type == IOMAP_HOLE) {
1050 unsigned int data_blocks, ind_blocks;
1051 struct gfs2_alloc_parms ap = {};
1052 unsigned int rblocks;
1053 struct gfs2_trans *tr;
1055 gfs2_write_calc_reserv(ip, iomap->length, &data_blocks,
1057 ap.target = data_blocks + ind_blocks;
1058 ret = gfs2_quota_lock_check(ip, &ap);
1062 ret = gfs2_inplace_reserve(ip, &ap);
1066 rblocks = RES_DINODE + ind_blocks;
1067 if (gfs2_is_jdata(ip))
1068 rblocks += data_blocks;
1069 if (ind_blocks || data_blocks)
1070 rblocks += RES_STATFS + RES_QUOTA;
1071 if (inode == sdp->sd_rindex)
1072 rblocks += 2 * RES_STATFS;
1073 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
1075 ret = gfs2_trans_begin(sdp, rblocks,
1076 iomap->length >> inode->i_blkbits);
1078 goto out_trans_fail;
1081 ret = gfs2_unstuff_dinode(ip);
1084 release_metapath(mp);
1085 ret = __gfs2_iomap_get(inode, iomap->offset,
1086 iomap->length, flags, iomap, mp);
1091 if (iomap->type == IOMAP_HOLE) {
1092 ret = __gfs2_iomap_alloc(inode, iomap, mp);
1094 gfs2_trans_end(sdp);
1095 gfs2_inplace_release(ip);
1096 punch_hole(ip, iomap->offset, iomap->length);
1101 tr = current->journal_info;
1102 if (tr->tr_num_buf_new)
1103 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1105 gfs2_trans_end(sdp);
1108 if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip))
1109 iomap->page_ops = &gfs2_iomap_page_ops;
1113 gfs2_trans_end(sdp);
1115 gfs2_inplace_release(ip);
1117 gfs2_quota_unlock(ip);
1121 static inline bool gfs2_iomap_need_write_lock(unsigned flags)
1123 return (flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT);
1126 static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
1127 unsigned flags, struct iomap *iomap,
1128 struct iomap *srcmap)
1130 struct gfs2_inode *ip = GFS2_I(inode);
1131 struct metapath mp = { .mp_aheight = 1, };
1134 if (gfs2_is_jdata(ip))
1135 iomap->flags |= IOMAP_F_BUFFER_HEAD;
1137 trace_gfs2_iomap_start(ip, pos, length, flags);
1138 if (gfs2_iomap_need_write_lock(flags)) {
1139 ret = gfs2_write_lock(inode);
1144 ret = __gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
1148 switch(flags & (IOMAP_WRITE | IOMAP_ZERO)) {
1150 if (flags & IOMAP_DIRECT) {
1152 * Silently fall back to buffered I/O for stuffed files
1153 * or if we've got a hole (see gfs2_file_direct_write).
1155 if (iomap->type != IOMAP_MAPPED)
1161 if (iomap->type == IOMAP_HOLE)
1168 ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp);
1171 if (ret && gfs2_iomap_need_write_lock(flags))
1172 gfs2_write_unlock(inode);
1173 release_metapath(&mp);
1175 trace_gfs2_iomap_end(ip, iomap, ret);
1179 static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
1180 ssize_t written, unsigned flags, struct iomap *iomap)
1182 struct gfs2_inode *ip = GFS2_I(inode);
1183 struct gfs2_sbd *sdp = GFS2_SB(inode);
1185 switch (flags & (IOMAP_WRITE | IOMAP_ZERO)) {
1187 if (flags & IOMAP_DIRECT)
1191 if (iomap->type == IOMAP_HOLE)
1198 if (!gfs2_is_stuffed(ip))
1199 gfs2_ordered_add_inode(ip);
1201 if (inode == sdp->sd_rindex)
1202 adjust_fs_space(inode);
1204 gfs2_inplace_release(ip);
1206 if (ip->i_qadata && ip->i_qadata->qa_qd_num)
1207 gfs2_quota_unlock(ip);
1209 if (length != written && (iomap->flags & IOMAP_F_NEW)) {
1210 /* Deallocate blocks that were just allocated. */
1211 loff_t blockmask = i_blocksize(inode) - 1;
1212 loff_t end = (pos + length) & ~blockmask;
1214 pos = (pos + written + blockmask) & ~blockmask;
1216 truncate_pagecache_range(inode, pos, end - 1);
1217 punch_hole(ip, pos, end - pos);
1221 if (unlikely(!written))
1224 if (iomap->flags & IOMAP_F_SIZE_CHANGED)
1225 mark_inode_dirty(inode);
1226 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
1229 if (gfs2_iomap_need_write_lock(flags))
1230 gfs2_write_unlock(inode);
1234 const struct iomap_ops gfs2_iomap_ops = {
1235 .iomap_begin = gfs2_iomap_begin,
1236 .iomap_end = gfs2_iomap_end,
1240 * gfs2_block_map - Map one or more blocks of an inode to a disk block
1242 * @lblock: The logical block number
1243 * @bh_map: The bh to be mapped
1244 * @create: True if its ok to alloc blocks to satify the request
1246 * The size of the requested mapping is defined in bh_map->b_size.
1248 * Clears buffer_mapped(bh_map) and leaves bh_map->b_size unchanged
1249 * when @lblock is not mapped. Sets buffer_mapped(bh_map) and
1250 * bh_map->b_size to indicate the size of the mapping when @lblock and
1251 * successive blocks are mapped, up to the requested size.
1253 * Sets buffer_boundary() if a read of metadata will be required
1254 * before the next block can be mapped. Sets buffer_new() if new
1255 * blocks were allocated.
1260 int gfs2_block_map(struct inode *inode, sector_t lblock,
1261 struct buffer_head *bh_map, int create)
1263 struct gfs2_inode *ip = GFS2_I(inode);
1264 loff_t pos = (loff_t)lblock << inode->i_blkbits;
1265 loff_t length = bh_map->b_size;
1266 struct iomap iomap = { };
1269 clear_buffer_mapped(bh_map);
1270 clear_buffer_new(bh_map);
1271 clear_buffer_boundary(bh_map);
1272 trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
1275 ret = gfs2_iomap_get(inode, pos, length, &iomap);
1277 ret = gfs2_iomap_alloc(inode, pos, length, &iomap);
1281 if (iomap.length > bh_map->b_size) {
1282 iomap.length = bh_map->b_size;
1283 iomap.flags &= ~IOMAP_F_GFS2_BOUNDARY;
1285 if (iomap.addr != IOMAP_NULL_ADDR)
1286 map_bh(bh_map, inode->i_sb, iomap.addr >> inode->i_blkbits);
1287 bh_map->b_size = iomap.length;
1288 if (iomap.flags & IOMAP_F_GFS2_BOUNDARY)
1289 set_buffer_boundary(bh_map);
1290 if (iomap.flags & IOMAP_F_NEW)
1291 set_buffer_new(bh_map);
1294 trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
1298 int gfs2_get_extent(struct inode *inode, u64 lblock, u64 *dblock,
1299 unsigned int *extlen)
1301 unsigned int blkbits = inode->i_blkbits;
1302 struct iomap iomap = { };
1306 ret = gfs2_iomap_get(inode, lblock << blkbits, *extlen << blkbits,
1310 if (iomap.type != IOMAP_MAPPED)
1312 *dblock = iomap.addr >> blkbits;
1313 len = iomap.length >> blkbits;
1319 int gfs2_alloc_extent(struct inode *inode, u64 lblock, u64 *dblock,
1320 unsigned int *extlen, bool *new)
1322 unsigned int blkbits = inode->i_blkbits;
1323 struct iomap iomap = { };
1327 ret = gfs2_iomap_alloc(inode, lblock << blkbits, *extlen << blkbits,
1331 if (iomap.type != IOMAP_MAPPED)
1333 *dblock = iomap.addr >> blkbits;
1334 len = iomap.length >> blkbits;
1337 *new = iomap.flags & IOMAP_F_NEW;
1342 * NOTE: Never call gfs2_block_zero_range with an open transaction because it
1343 * uses iomap write to perform its actions, which begin their own transactions
1344 * (iomap_begin, page_prepare, etc.)
1346 static int gfs2_block_zero_range(struct inode *inode, loff_t from,
1347 unsigned int length)
1349 BUG_ON(current->journal_info);
1350 return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops);
1353 #define GFS2_JTRUNC_REVOKES 8192
1356 * gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files
1357 * @inode: The inode being truncated
1358 * @oldsize: The original (larger) size
1359 * @newsize: The new smaller size
1361 * With jdata files, we have to journal a revoke for each block which is
1362 * truncated. As a result, we need to split this into separate transactions
1363 * if the number of pages being truncated gets too large.
1366 static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize)
1368 struct gfs2_sbd *sdp = GFS2_SB(inode);
1369 u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
1373 while (oldsize != newsize) {
1374 struct gfs2_trans *tr;
1377 chunk = oldsize - newsize;
1378 if (chunk > max_chunk)
1381 offs = oldsize & ~PAGE_MASK;
1382 if (offs && chunk > PAGE_SIZE)
1383 chunk = offs + ((chunk - offs) & PAGE_MASK);
1385 truncate_pagecache(inode, oldsize - chunk);
1388 tr = current->journal_info;
1389 if (!test_bit(TR_TOUCHED, &tr->tr_flags))
1392 gfs2_trans_end(sdp);
1393 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
1401 static int trunc_start(struct inode *inode, u64 newsize)
1403 struct gfs2_inode *ip = GFS2_I(inode);
1404 struct gfs2_sbd *sdp = GFS2_SB(inode);
1405 struct buffer_head *dibh = NULL;
1406 int journaled = gfs2_is_jdata(ip);
1407 u64 oldsize = inode->i_size;
1410 if (!gfs2_is_stuffed(ip)) {
1411 unsigned int blocksize = i_blocksize(inode);
1412 unsigned int offs = newsize & (blocksize - 1);
1414 error = gfs2_block_zero_range(inode, newsize,
1421 error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
1423 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1427 error = gfs2_meta_inode_buffer(ip, &dibh);
1431 gfs2_trans_add_meta(ip->i_gl, dibh);
1433 if (gfs2_is_stuffed(ip))
1434 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
1436 ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
1438 i_size_write(inode, newsize);
1439 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1440 gfs2_dinode_out(ip, dibh->b_data);
1443 error = gfs2_journaled_truncate(inode, oldsize, newsize);
1445 truncate_pagecache(inode, newsize);
1449 if (current->journal_info)
1450 gfs2_trans_end(sdp);
1454 int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
1455 struct iomap *iomap)
1457 struct metapath mp = { .mp_aheight = 1, };
1460 ret = __gfs2_iomap_get(inode, pos, length, 0, iomap, &mp);
1461 release_metapath(&mp);
1465 int gfs2_iomap_alloc(struct inode *inode, loff_t pos, loff_t length,
1466 struct iomap *iomap)
1468 struct metapath mp = { .mp_aheight = 1, };
1471 ret = __gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, iomap, &mp);
1472 if (!ret && iomap->type == IOMAP_HOLE)
1473 ret = __gfs2_iomap_alloc(inode, iomap, &mp);
1474 release_metapath(&mp);
1479 * sweep_bh_for_rgrps - find an rgrp in a meta buffer and free blocks therein
1481 * @rd_gh: holder of resource group glock
1482 * @bh: buffer head to sweep
1483 * @start: starting point in bh
1484 * @end: end point in bh
1485 * @meta: true if bh points to metadata (rather than data)
1486 * @btotal: place to keep count of total blocks freed
1488 * We sweep a metadata buffer (provided by the metapath) for blocks we need to
1489 * free, and free them all. However, we do it one rgrp at a time. If this
1490 * block has references to multiple rgrps, we break it into individual
1491 * transactions. This allows other processes to use the rgrps while we're
1492 * focused on a single one, for better concurrency / performance.
1493 * At every transaction boundary, we rewrite the inode into the journal.
1494 * That way the bitmaps are kept consistent with the inode and we can recover
1495 * if we're interrupted by power-outages.
1497 * Returns: 0, or return code if an error occurred.
1498 * *btotal has the total number of blocks freed
1500 static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
1501 struct buffer_head *bh, __be64 *start, __be64 *end,
1502 bool meta, u32 *btotal)
1504 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1505 struct gfs2_rgrpd *rgd;
1506 struct gfs2_trans *tr;
1508 int blks_outside_rgrp;
1509 u64 bn, bstart, isize_blks;
1510 s64 blen; /* needs to be s64 or gfs2_add_inode_blocks breaks */
1512 bool buf_in_tr = false; /* buffer was added to transaction */
1516 if (gfs2_holder_initialized(rd_gh)) {
1517 rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
1518 gfs2_assert_withdraw(sdp,
1519 gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
1521 blks_outside_rgrp = 0;
1525 for (p = start; p < end; p++) {
1528 bn = be64_to_cpu(*p);
1531 if (!rgrp_contains_block(rgd, bn)) {
1532 blks_outside_rgrp++;
1536 rgd = gfs2_blk2rgrpd(sdp, bn, true);
1537 if (unlikely(!rgd)) {
1541 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1542 LM_FLAG_NODE_SCOPE, rd_gh);
1546 /* Must be done with the rgrp glock held: */
1547 if (gfs2_rs_active(&ip->i_res) &&
1548 rgd == ip->i_res.rs_rgd)
1549 gfs2_rs_deltree(&ip->i_res);
1552 /* The size of our transactions will be unknown until we
1553 actually process all the metadata blocks that relate to
1554 the rgrp. So we estimate. We know it can't be more than
1555 the dinode's i_blocks and we don't want to exceed the
1556 journal flush threshold, sd_log_thresh2. */
1557 if (current->journal_info == NULL) {
1558 unsigned int jblocks_rqsted, revokes;
1560 jblocks_rqsted = rgd->rd_length + RES_DINODE +
1562 isize_blks = gfs2_get_inode_blocks(&ip->i_inode);
1563 if (isize_blks > atomic_read(&sdp->sd_log_thresh2))
1565 atomic_read(&sdp->sd_log_thresh2);
1567 jblocks_rqsted += isize_blks;
1568 revokes = jblocks_rqsted;
1570 revokes += end - start;
1571 else if (ip->i_depth)
1572 revokes += sdp->sd_inptrs;
1573 ret = gfs2_trans_begin(sdp, jblocks_rqsted, revokes);
1576 down_write(&ip->i_rw_mutex);
1578 /* check if we will exceed the transaction blocks requested */
1579 tr = current->journal_info;
1580 if (tr->tr_num_buf_new + RES_STATFS +
1581 RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) {
1582 /* We set blks_outside_rgrp to ensure the loop will
1583 be repeated for the same rgrp, but with a new
1585 blks_outside_rgrp++;
1586 /* This next part is tricky. If the buffer was added
1587 to the transaction, we've already set some block
1588 pointers to 0, so we better follow through and free
1589 them, or we will introduce corruption (so break).
1590 This may be impossible, or at least rare, but I
1591 decided to cover the case regardless.
1593 If the buffer was not added to the transaction
1594 (this call), doing so would exceed our transaction
1595 size, so we need to end the transaction and start a
1596 new one (so goto). */
1603 gfs2_trans_add_meta(ip->i_gl, bh);
1606 if (bstart + blen == bn) {
1611 __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1613 gfs2_add_inode_blocks(&ip->i_inode, -blen);
1619 __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1621 gfs2_add_inode_blocks(&ip->i_inode, -blen);
1624 if (!ret && blks_outside_rgrp) { /* If buffer still has non-zero blocks
1625 outside the rgrp we just processed,
1626 do it all over again. */
1627 if (current->journal_info) {
1628 struct buffer_head *dibh;
1630 ret = gfs2_meta_inode_buffer(ip, &dibh);
1634 /* Every transaction boundary, we rewrite the dinode
1635 to keep its di_blocks current in case of failure. */
1636 ip->i_inode.i_mtime = ip->i_inode.i_ctime =
1637 current_time(&ip->i_inode);
1638 gfs2_trans_add_meta(ip->i_gl, dibh);
1639 gfs2_dinode_out(ip, dibh->b_data);
1641 up_write(&ip->i_rw_mutex);
1642 gfs2_trans_end(sdp);
1645 gfs2_glock_dq_uninit(rd_gh);
1653 static bool mp_eq_to_hgt(struct metapath *mp, __u16 *list, unsigned int h)
1655 if (memcmp(mp->mp_list, list, h * sizeof(mp->mp_list[0])))
1661 * find_nonnull_ptr - find a non-null pointer given a metapath and height
1662 * @sdp: The superblock
1663 * @mp: starting metapath
1664 * @h: desired height to search
1665 * @end_list: See punch_hole().
1666 * @end_aligned: See punch_hole().
1668 * Assumes the metapath is valid (with buffers) out to height h.
1669 * Returns: true if a non-null pointer was found in the metapath buffer
1670 * false if all remaining pointers are NULL in the buffer
1672 static bool find_nonnull_ptr(struct gfs2_sbd *sdp, struct metapath *mp,
1674 __u16 *end_list, unsigned int end_aligned)
1676 struct buffer_head *bh = mp->mp_bh[h];
1677 __be64 *first, *ptr, *end;
1679 first = metaptr1(h, mp);
1680 ptr = first + mp->mp_list[h];
1681 end = (__be64 *)(bh->b_data + bh->b_size);
1682 if (end_list && mp_eq_to_hgt(mp, end_list, h)) {
1683 bool keep_end = h < end_aligned;
1684 end = first + end_list[h] + keep_end;
1688 if (*ptr) { /* if we have a non-null pointer */
1689 mp->mp_list[h] = ptr - first;
1691 if (h < GFS2_MAX_META_HEIGHT)
1700 enum dealloc_states {
1701 DEALLOC_MP_FULL = 0, /* Strip a metapath with all buffers read in */
1702 DEALLOC_MP_LOWER = 1, /* lower the metapath strip height */
1703 DEALLOC_FILL_MP = 2, /* Fill in the metapath to the given height. */
1704 DEALLOC_DONE = 3, /* process complete */
1708 metapointer_range(struct metapath *mp, int height,
1709 __u16 *start_list, unsigned int start_aligned,
1710 __u16 *end_list, unsigned int end_aligned,
1711 __be64 **start, __be64 **end)
1713 struct buffer_head *bh = mp->mp_bh[height];
1716 first = metaptr1(height, mp);
1718 if (mp_eq_to_hgt(mp, start_list, height)) {
1719 bool keep_start = height < start_aligned;
1720 *start = first + start_list[height] + keep_start;
1722 *end = (__be64 *)(bh->b_data + bh->b_size);
1723 if (end_list && mp_eq_to_hgt(mp, end_list, height)) {
1724 bool keep_end = height < end_aligned;
1725 *end = first + end_list[height] + keep_end;
1729 static inline bool walk_done(struct gfs2_sbd *sdp,
1730 struct metapath *mp, int height,
1731 __u16 *end_list, unsigned int end_aligned)
1736 bool keep_end = height < end_aligned;
1737 if (!mp_eq_to_hgt(mp, end_list, height))
1739 end = end_list[height] + keep_end;
1741 end = (height > 0) ? sdp->sd_inptrs : sdp->sd_diptrs;
1742 return mp->mp_list[height] >= end;
1746 * punch_hole - deallocate blocks in a file
1747 * @ip: inode to truncate
1748 * @offset: the start of the hole
1749 * @length: the size of the hole (or 0 for truncate)
1751 * Punch a hole into a file or truncate a file at a given position. This
1752 * function operates in whole blocks (@offset and @length are rounded
1753 * accordingly); partially filled blocks must be cleared otherwise.
1755 * This function works from the bottom up, and from the right to the left. In
1756 * other words, it strips off the highest layer (data) before stripping any of
1757 * the metadata. Doing it this way is best in case the operation is interrupted
1758 * by power failure, etc. The dinode is rewritten in every transaction to
1759 * guarantee integrity.
1761 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
1763 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1764 u64 maxsize = sdp->sd_heightsize[ip->i_height];
1765 struct metapath mp = {};
1766 struct buffer_head *dibh, *bh;
1767 struct gfs2_holder rd_gh;
1768 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
1769 u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;
1770 __u16 start_list[GFS2_MAX_META_HEIGHT];
1771 __u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
1772 unsigned int start_aligned, end_aligned;
1773 unsigned int strip_h = ip->i_height - 1;
1776 int mp_h; /* metapath buffers are read in to this height */
1778 __be64 *start, *end;
1780 if (offset >= maxsize) {
1782 * The starting point lies beyond the allocated meta-data;
1783 * there are no blocks do deallocate.
1789 * The start position of the hole is defined by lblock, start_list, and
1790 * start_aligned. The end position of the hole is defined by lend,
1791 * end_list, and end_aligned.
1793 * start_aligned and end_aligned define down to which height the start
1794 * and end positions are aligned to the metadata tree (i.e., the
1795 * position is a multiple of the metadata granularity at the height
1796 * above). This determines at which heights additional meta pointers
1797 * needs to be preserved for the remaining data.
1801 u64 end_offset = offset + length;
1805 * Clip the end at the maximum file size for the given height:
1806 * that's how far the metadata goes; files bigger than that
1807 * will have additional layers of indirection.
1809 if (end_offset > maxsize)
1810 end_offset = maxsize;
1811 lend = end_offset >> bsize_shift;
1816 find_metapath(sdp, lend, &mp, ip->i_height);
1817 end_list = __end_list;
1818 memcpy(end_list, mp.mp_list, sizeof(mp.mp_list));
1820 for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1827 find_metapath(sdp, lblock, &mp, ip->i_height);
1828 memcpy(start_list, mp.mp_list, sizeof(start_list));
1830 for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1831 if (start_list[mp_h])
1834 start_aligned = mp_h;
1836 ret = gfs2_meta_inode_buffer(ip, &dibh);
1841 ret = lookup_metapath(ip, &mp);
1845 /* issue read-ahead on metadata */
1846 for (mp_h = 0; mp_h < mp.mp_aheight - 1; mp_h++) {
1847 metapointer_range(&mp, mp_h, start_list, start_aligned,
1848 end_list, end_aligned, &start, &end);
1849 gfs2_metapath_ra(ip->i_gl, start, end);
1852 if (mp.mp_aheight == ip->i_height)
1853 state = DEALLOC_MP_FULL; /* We have a complete metapath */
1855 state = DEALLOC_FILL_MP; /* deal with partial metapath */
1857 ret = gfs2_rindex_update(sdp);
1861 ret = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1864 gfs2_holder_mark_uninitialized(&rd_gh);
1868 while (state != DEALLOC_DONE) {
1870 /* Truncate a full metapath at the given strip height.
1871 * Note that strip_h == mp_h in order to be in this state. */
1872 case DEALLOC_MP_FULL:
1873 bh = mp.mp_bh[mp_h];
1874 gfs2_assert_withdraw(sdp, bh);
1875 if (gfs2_assert_withdraw(sdp,
1876 prev_bnr != bh->b_blocknr)) {
1877 fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u,"
1878 "s_h:%u, mp_h:%u\n",
1879 (unsigned long long)ip->i_no_addr,
1880 prev_bnr, ip->i_height, strip_h, mp_h);
1882 prev_bnr = bh->b_blocknr;
1884 if (gfs2_metatype_check(sdp, bh,
1885 (mp_h ? GFS2_METATYPE_IN :
1886 GFS2_METATYPE_DI))) {
1892 * Below, passing end_aligned as 0 gives us the
1893 * metapointer range excluding the end point: the end
1894 * point is the first metapath we must not deallocate!
1897 metapointer_range(&mp, mp_h, start_list, start_aligned,
1898 end_list, 0 /* end_aligned */,
1900 ret = sweep_bh_for_rgrps(ip, &rd_gh, mp.mp_bh[mp_h],
1902 mp_h != ip->i_height - 1,
1905 /* If we hit an error or just swept dinode buffer,
1908 state = DEALLOC_DONE;
1911 state = DEALLOC_MP_LOWER;
1914 /* lower the metapath strip height */
1915 case DEALLOC_MP_LOWER:
1916 /* We're done with the current buffer, so release it,
1917 unless it's the dinode buffer. Then back up to the
1918 previous pointer. */
1920 brelse(mp.mp_bh[mp_h]);
1921 mp.mp_bh[mp_h] = NULL;
1923 /* If we can't get any lower in height, we've stripped
1924 off all we can. Next step is to back up and start
1925 stripping the previous level of metadata. */
1928 memcpy(mp.mp_list, start_list, sizeof(start_list));
1930 state = DEALLOC_FILL_MP;
1933 mp.mp_list[mp_h] = 0;
1934 mp_h--; /* search one metadata height down */
1936 if (walk_done(sdp, &mp, mp_h, end_list, end_aligned))
1938 /* Here we've found a part of the metapath that is not
1939 * allocated. We need to search at that height for the
1940 * next non-null pointer. */
1941 if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) {
1942 state = DEALLOC_FILL_MP;
1945 /* No more non-null pointers at this height. Back up
1946 to the previous height and try again. */
1947 break; /* loop around in the same state */
1949 /* Fill the metapath with buffers to the given height. */
1950 case DEALLOC_FILL_MP:
1951 /* Fill the buffers out to the current height. */
1952 ret = fillup_metapath(ip, &mp, mp_h);
1956 /* On the first pass, issue read-ahead on metadata. */
1957 if (mp.mp_aheight > 1 && strip_h == ip->i_height - 1) {
1958 unsigned int height = mp.mp_aheight - 1;
1960 /* No read-ahead for data blocks. */
1961 if (mp.mp_aheight - 1 == strip_h)
1964 for (; height >= mp.mp_aheight - ret; height--) {
1965 metapointer_range(&mp, height,
1966 start_list, start_aligned,
1967 end_list, end_aligned,
1969 gfs2_metapath_ra(ip->i_gl, start, end);
1973 /* If buffers found for the entire strip height */
1974 if (mp.mp_aheight - 1 == strip_h) {
1975 state = DEALLOC_MP_FULL;
1978 if (mp.mp_aheight < ip->i_height) /* We have a partial height */
1979 mp_h = mp.mp_aheight - 1;
1981 /* If we find a non-null block pointer, crawl a bit
1982 higher up in the metapath and try again, otherwise
1983 we need to look lower for a new starting point. */
1984 if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned))
1987 state = DEALLOC_MP_LOWER;
1993 if (current->journal_info == NULL) {
1994 ret = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS +
1998 down_write(&ip->i_rw_mutex);
2000 gfs2_statfs_change(sdp, 0, +btotal, 0);
2001 gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
2003 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2004 gfs2_trans_add_meta(ip->i_gl, dibh);
2005 gfs2_dinode_out(ip, dibh->b_data);
2006 up_write(&ip->i_rw_mutex);
2007 gfs2_trans_end(sdp);
2011 if (gfs2_holder_initialized(&rd_gh))
2012 gfs2_glock_dq_uninit(&rd_gh);
2013 if (current->journal_info) {
2014 up_write(&ip->i_rw_mutex);
2015 gfs2_trans_end(sdp);
2018 gfs2_quota_unhold(ip);
2020 release_metapath(&mp);
2024 static int trunc_end(struct gfs2_inode *ip)
2026 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2027 struct buffer_head *dibh;
2030 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2034 down_write(&ip->i_rw_mutex);
2036 error = gfs2_meta_inode_buffer(ip, &dibh);
2040 if (!i_size_read(&ip->i_inode)) {
2042 ip->i_goal = ip->i_no_addr;
2043 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
2044 gfs2_ordered_del_inode(ip);
2046 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2047 ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
2049 gfs2_trans_add_meta(ip->i_gl, dibh);
2050 gfs2_dinode_out(ip, dibh->b_data);
2054 up_write(&ip->i_rw_mutex);
2055 gfs2_trans_end(sdp);
2060 * do_shrink - make a file smaller
2062 * @newsize: the size to make the file
2064 * Called with an exclusive lock on @inode. The @size must
2065 * be equal to or smaller than the current inode size.
2070 static int do_shrink(struct inode *inode, u64 newsize)
2072 struct gfs2_inode *ip = GFS2_I(inode);
2075 error = trunc_start(inode, newsize);
2078 if (gfs2_is_stuffed(ip))
2081 error = punch_hole(ip, newsize, 0);
2083 error = trunc_end(ip);
2088 void gfs2_trim_blocks(struct inode *inode)
2092 ret = do_shrink(inode, inode->i_size);
2097 * do_grow - Touch and update inode size
2099 * @size: The new size
2101 * This function updates the timestamps on the inode and
2102 * may also increase the size of the inode. This function
2103 * must not be called with @size any smaller than the current
2106 * Although it is not strictly required to unstuff files here,
2107 * earlier versions of GFS2 have a bug in the stuffed file reading
2108 * code which will result in a buffer overrun if the size is larger
2109 * than the max stuffed file size. In order to prevent this from
2110 * occurring, such files are unstuffed, but in other cases we can
2111 * just update the inode size directly.
2113 * Returns: 0 on success, or -ve on error
2116 static int do_grow(struct inode *inode, u64 size)
2118 struct gfs2_inode *ip = GFS2_I(inode);
2119 struct gfs2_sbd *sdp = GFS2_SB(inode);
2120 struct gfs2_alloc_parms ap = { .target = 1, };
2121 struct buffer_head *dibh;
2125 if (gfs2_is_stuffed(ip) && size > gfs2_max_stuffed_size(ip)) {
2126 error = gfs2_quota_lock_check(ip, &ap);
2130 error = gfs2_inplace_reserve(ip, &ap);
2132 goto do_grow_qunlock;
2136 error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
2138 gfs2_is_jdata(ip) ? RES_JDATA : 0) +
2139 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
2142 goto do_grow_release;
2145 error = gfs2_unstuff_dinode(ip);
2150 error = gfs2_meta_inode_buffer(ip, &dibh);
2154 truncate_setsize(inode, size);
2155 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2156 gfs2_trans_add_meta(ip->i_gl, dibh);
2157 gfs2_dinode_out(ip, dibh->b_data);
2161 gfs2_trans_end(sdp);
2164 gfs2_inplace_release(ip);
2166 gfs2_quota_unlock(ip);
2172 * gfs2_setattr_size - make a file a given size
2174 * @newsize: the size to make the file
2176 * The file size can grow, shrink, or stay the same size. This
2177 * is called holding i_rwsem and an exclusive glock on the inode
2183 int gfs2_setattr_size(struct inode *inode, u64 newsize)
2185 struct gfs2_inode *ip = GFS2_I(inode);
2188 BUG_ON(!S_ISREG(inode->i_mode));
2190 ret = inode_newsize_ok(inode, newsize);
2194 inode_dio_wait(inode);
2196 ret = gfs2_qa_get(ip);
2200 if (newsize >= inode->i_size) {
2201 ret = do_grow(inode, newsize);
2205 ret = do_shrink(inode, newsize);
2207 gfs2_rs_delete(ip, NULL);
2212 int gfs2_truncatei_resume(struct gfs2_inode *ip)
2215 error = punch_hole(ip, i_size_read(&ip->i_inode), 0);
2217 error = trunc_end(ip);
2221 int gfs2_file_dealloc(struct gfs2_inode *ip)
2223 return punch_hole(ip, 0, 0);
2227 * gfs2_free_journal_extents - Free cached journal bmap info
2232 void gfs2_free_journal_extents(struct gfs2_jdesc *jd)
2234 struct gfs2_journal_extent *jext;
2236 while(!list_empty(&jd->extent_list)) {
2237 jext = list_first_entry(&jd->extent_list, struct gfs2_journal_extent, list);
2238 list_del(&jext->list);
2244 * gfs2_add_jextent - Add or merge a new extent to extent cache
2245 * @jd: The journal descriptor
2246 * @lblock: The logical block at start of new extent
2247 * @dblock: The physical block at start of new extent
2248 * @blocks: Size of extent in fs blocks
2250 * Returns: 0 on success or -ENOMEM
2253 static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks)
2255 struct gfs2_journal_extent *jext;
2257 if (!list_empty(&jd->extent_list)) {
2258 jext = list_last_entry(&jd->extent_list, struct gfs2_journal_extent, list);
2259 if ((jext->dblock + jext->blocks) == dblock) {
2260 jext->blocks += blocks;
2265 jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS);
2268 jext->dblock = dblock;
2269 jext->lblock = lblock;
2270 jext->blocks = blocks;
2271 list_add_tail(&jext->list, &jd->extent_list);
2277 * gfs2_map_journal_extents - Cache journal bmap info
2278 * @sdp: The super block
2279 * @jd: The journal to map
2281 * Create a reusable "extent" mapping from all logical
2282 * blocks to all physical blocks for the given journal. This will save
2283 * us time when writing journal blocks. Most journals will have only one
2284 * extent that maps all their logical blocks. That's because gfs2.mkfs
2285 * arranges the journal blocks sequentially to maximize performance.
2286 * So the extent would map the first block for the entire file length.
2287 * However, gfs2_jadd can happen while file activity is happening, so
2288 * those journals may not be sequential. Less likely is the case where
2289 * the users created their own journals by mounting the metafs and
2290 * laying it out. But it's still possible. These journals might have
2293 * Returns: 0 on success, or error on failure
2296 int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
2300 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
2301 struct buffer_head bh;
2302 unsigned int shift = sdp->sd_sb.sb_bsize_shift;
2307 start = ktime_get();
2308 lblock_stop = i_size_read(jd->jd_inode) >> shift;
2309 size = (lblock_stop - lblock) << shift;
2311 WARN_ON(!list_empty(&jd->extent_list));
2317 rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0);
2318 if (rc || !buffer_mapped(&bh))
2320 rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift);
2324 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2328 fs_info(sdp, "journal %d mapped with %u extents in %lldms\n", jd->jd_jid,
2329 jd->nr_extents, ktime_ms_delta(end, start));
2333 fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n",
2335 (unsigned long long)(i_size_read(jd->jd_inode) - size),
2337 fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n",
2338 rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr,
2339 bh.b_state, (unsigned long long)bh.b_size);
2340 gfs2_free_journal_extents(jd);
2345 * gfs2_write_alloc_required - figure out if a write will require an allocation
2346 * @ip: the file being written to
2347 * @offset: the offset to write to
2348 * @len: the number of bytes being written
2350 * Returns: 1 if an alloc is required, 0 otherwise
2353 int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
2356 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2357 struct buffer_head bh;
2359 u64 lblock, lblock_stop, size;
2365 if (gfs2_is_stuffed(ip)) {
2366 if (offset + len > gfs2_max_stuffed_size(ip))
2371 shift = sdp->sd_sb.sb_bsize_shift;
2372 BUG_ON(gfs2_is_dir(ip));
2373 end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
2374 lblock = offset >> shift;
2375 lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
2376 if (lblock_stop > end_of_file && ip != GFS2_I(sdp->sd_rindex))
2379 size = (lblock_stop - lblock) << shift;
2383 gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
2384 if (!buffer_mapped(&bh))
2387 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2393 static int stuffed_zero_range(struct inode *inode, loff_t offset, loff_t length)
2395 struct gfs2_inode *ip = GFS2_I(inode);
2396 struct buffer_head *dibh;
2399 if (offset >= inode->i_size)
2401 if (offset + length > inode->i_size)
2402 length = inode->i_size - offset;
2404 error = gfs2_meta_inode_buffer(ip, &dibh);
2407 gfs2_trans_add_meta(ip->i_gl, dibh);
2408 memset(dibh->b_data + sizeof(struct gfs2_dinode) + offset, 0,
2414 static int gfs2_journaled_truncate_range(struct inode *inode, loff_t offset,
2417 struct gfs2_sbd *sdp = GFS2_SB(inode);
2418 loff_t max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
2422 struct gfs2_trans *tr;
2427 if (chunk > max_chunk)
2430 offs = offset & ~PAGE_MASK;
2431 if (offs && chunk > PAGE_SIZE)
2432 chunk = offs + ((chunk - offs) & PAGE_MASK);
2434 truncate_pagecache_range(inode, offset, chunk);
2438 tr = current->journal_info;
2439 if (!test_bit(TR_TOUCHED, &tr->tr_flags))
2442 gfs2_trans_end(sdp);
2443 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
2450 int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
2452 struct inode *inode = file_inode(file);
2453 struct gfs2_inode *ip = GFS2_I(inode);
2454 struct gfs2_sbd *sdp = GFS2_SB(inode);
2455 unsigned int blocksize = i_blocksize(inode);
2459 if (!gfs2_is_stuffed(ip)) {
2460 unsigned int start_off, end_len;
2462 start_off = offset & (blocksize - 1);
2463 end_len = (offset + length) & (blocksize - 1);
2465 unsigned int len = length;
2466 if (length > blocksize - start_off)
2467 len = blocksize - start_off;
2468 error = gfs2_block_zero_range(inode, offset, len);
2471 if (start_off + length < blocksize)
2475 error = gfs2_block_zero_range(inode,
2476 offset + length - end_len, end_len);
2482 start = round_down(offset, blocksize);
2483 end = round_up(offset + length, blocksize) - 1;
2484 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
2488 if (gfs2_is_jdata(ip))
2489 error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA,
2490 GFS2_JTRUNC_REVOKES);
2492 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2496 if (gfs2_is_stuffed(ip)) {
2497 error = stuffed_zero_range(inode, offset, length);
2502 if (gfs2_is_jdata(ip)) {
2503 BUG_ON(!current->journal_info);
2504 gfs2_journaled_truncate_range(inode, offset, length);
2506 truncate_pagecache_range(inode, offset, offset + length - 1);
2508 file_update_time(file);
2509 mark_inode_dirty(inode);
2511 if (current->journal_info)
2512 gfs2_trans_end(sdp);
2514 if (!gfs2_is_stuffed(ip))
2515 error = punch_hole(ip, offset, length);
2518 if (current->journal_info)
2519 gfs2_trans_end(sdp);
2523 static int gfs2_map_blocks(struct iomap_writepage_ctx *wpc, struct inode *inode,
2528 if (WARN_ON_ONCE(gfs2_is_stuffed(GFS2_I(inode))))
2531 if (offset >= wpc->iomap.offset &&
2532 offset < wpc->iomap.offset + wpc->iomap.length)
2535 memset(&wpc->iomap, 0, sizeof(wpc->iomap));
2536 ret = gfs2_iomap_get(inode, offset, INT_MAX, &wpc->iomap);
2540 const struct iomap_writeback_ops gfs2_writeback_ops = {
2541 .map_blocks = gfs2_map_blocks,