2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/completion.h>
15 #include <linux/buffer_head.h>
17 #include <linux/gfs2_ondisk.h>
18 #include <linux/prefetch.h>
19 #include <linux/blkdev.h>
20 #include <linux/rbtree.h>
21 #include <linux/random.h>
36 #include "trace_gfs2.h"
38 #define BFITNOENT ((u32)~0)
39 #define NO_BLOCK ((u64)~0)
41 #if BITS_PER_LONG == 32
42 #define LBITMASK (0x55555555UL)
43 #define LBITSKIP55 (0x55555555UL)
44 #define LBITSKIP00 (0x00000000UL)
46 #define LBITMASK (0x5555555555555555UL)
47 #define LBITSKIP55 (0x5555555555555555UL)
48 #define LBITSKIP00 (0x0000000000000000UL)
52 * These routines are used by the resource group routines (rgrp.c)
53 * to keep track of block allocation. Each block is represented by two
54 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
57 * 1 = Used (not metadata)
58 * 2 = Unlinked (still in use) inode
67 static const char valid_change[16] = {
75 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
76 const struct gfs2_inode *ip, bool nowrap);
80 * gfs2_setbit - Set a bit in the bitmaps
81 * @rbm: The position of the bit to set
82 * @do_clone: Also set the clone bitmap, if it exists
83 * @new_state: the new state of the block
87 static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
88 unsigned char new_state)
90 unsigned char *byte1, *byte2, *end, cur_state;
91 struct gfs2_bitmap *bi = rbm_bi(rbm);
92 unsigned int buflen = bi->bi_len;
93 const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
95 byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
96 end = bi->bi_bh->b_data + bi->bi_offset + buflen;
100 cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
102 if (unlikely(!valid_change[new_state * 4 + cur_state])) {
103 pr_warn("buf_blk = 0x%x old_state=%d, new_state=%d\n",
104 rbm->offset, cur_state, new_state);
105 pr_warn("rgrp=0x%llx bi_start=0x%x\n",
106 (unsigned long long)rbm->rgd->rd_addr, bi->bi_start);
107 pr_warn("bi_offset=0x%x bi_len=0x%x\n",
108 bi->bi_offset, bi->bi_len);
110 gfs2_consist_rgrpd(rbm->rgd);
113 *byte1 ^= (cur_state ^ new_state) << bit;
115 if (do_clone && bi->bi_clone) {
116 byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
117 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
118 *byte2 ^= (cur_state ^ new_state) << bit;
123 * gfs2_testbit - test a bit in the bitmaps
124 * @rbm: The bit to test
126 * Returns: The two bit block state of the requested bit
129 static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
131 struct gfs2_bitmap *bi = rbm_bi(rbm);
132 const u8 *buffer = bi->bi_bh->b_data + bi->bi_offset;
136 byte = buffer + (rbm->offset / GFS2_NBBY);
137 bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
139 return (*byte >> bit) & GFS2_BIT_MASK;
144 * @ptr: Pointer to bitmap data
145 * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
146 * @state: The state we are searching for
148 * We xor the bitmap data with a patter which is the bitwise opposite
149 * of what we are looking for, this gives rise to a pattern of ones
150 * wherever there is a match. Since we have two bits per entry, we
151 * take this pattern, shift it down by one place and then and it with
152 * the original. All the even bit positions (0,2,4, etc) then represent
153 * successful matches, so we mask with 0x55555..... to remove the unwanted
156 * This allows searching of a whole u64 at once (32 blocks) with a
157 * single test (on 64 bit arches).
160 static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
163 static const u64 search[] = {
164 [0] = 0xffffffffffffffffULL,
165 [1] = 0xaaaaaaaaaaaaaaaaULL,
166 [2] = 0x5555555555555555ULL,
167 [3] = 0x0000000000000000ULL,
169 tmp = le64_to_cpu(*ptr) ^ search[state];
176 * rs_cmp - multi-block reservation range compare
177 * @blk: absolute file system block number of the new reservation
178 * @len: number of blocks in the new reservation
179 * @rs: existing reservation to compare against
181 * returns: 1 if the block range is beyond the reach of the reservation
182 * -1 if the block range is before the start of the reservation
183 * 0 if the block range overlaps with the reservation
185 static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
187 u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
189 if (blk >= startblk + rs->rs_free)
191 if (blk + len - 1 < startblk)
197 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
198 * a block in a given allocation state.
199 * @buf: the buffer that holds the bitmaps
200 * @len: the length (in bytes) of the buffer
201 * @goal: start search at this block's bit-pair (within @buffer)
202 * @state: GFS2_BLKST_XXX the state of the block we're looking for.
204 * Scope of @goal and returned block number is only within this bitmap buffer,
205 * not entire rgrp or filesystem. @buffer will be offset from the actual
206 * beginning of a bitmap block buffer, skipping any header structures, but
207 * headers are always a multiple of 64 bits long so that the buffer is
208 * always aligned to a 64 bit boundary.
210 * The size of the buffer is in bytes, but is it assumed that it is
211 * always ok to read a complete multiple of 64 bits at the end
212 * of the block in case the end is no aligned to a natural boundary.
214 * Return: the block number (bitmap buffer scope) that was found
217 static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
220 u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
221 const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
222 const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
224 u64 mask = 0x5555555555555555ULL;
227 /* Mask off bits we don't care about at the start of the search */
229 tmp = gfs2_bit_search(ptr, mask, state);
231 while(tmp == 0 && ptr < end) {
232 tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
235 /* Mask off any bits which are more than len bytes from the start */
236 if (ptr == end && (len & (sizeof(u64) - 1)))
237 tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
238 /* Didn't find anything, so return */
243 bit /= 2; /* two bits per entry in the bitmap */
244 return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
248 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
249 * @rbm: The rbm with rgd already set correctly
250 * @block: The block number (filesystem relative)
252 * This sets the bi and offset members of an rbm based on a
253 * resource group and a filesystem relative block number. The
254 * resource group must be set in the rbm on entry, the bi and
255 * offset members will be set by this function.
257 * Returns: 0 on success, or an error code
260 static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
262 u64 rblock = block - rbm->rgd->rd_data0;
264 if (WARN_ON_ONCE(rblock > UINT_MAX))
266 if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
270 rbm->offset = (u32)(rblock);
271 /* Check if the block is within the first block */
272 if (rbm->offset < rbm_bi(rbm)->bi_blocks)
275 /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
276 rbm->offset += (sizeof(struct gfs2_rgrp) -
277 sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
278 rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
279 rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
284 * gfs2_rbm_incr - increment an rbm structure
285 * @rbm: The rbm with rgd already set correctly
287 * This function takes an existing rbm structure and increments it to the next
288 * viable block offset.
290 * Returns: If incrementing the offset would cause the rbm to go past the
291 * end of the rgrp, true is returned, otherwise false.
295 static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
297 if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */
301 if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */
310 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
311 * @rbm: Position to search (value/result)
312 * @n_unaligned: Number of unaligned blocks to check
313 * @len: Decremented for each block found (terminate on zero)
315 * Returns: true if a non-free block is encountered
318 static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
323 for (n = 0; n < n_unaligned; n++) {
324 res = gfs2_testbit(rbm);
325 if (res != GFS2_BLKST_FREE)
330 if (gfs2_rbm_incr(rbm))
338 * gfs2_free_extlen - Return extent length of free blocks
339 * @rrbm: Starting position
340 * @len: Max length to check
342 * Starting at the block specified by the rbm, see how many free blocks
343 * there are, not reading more than len blocks ahead. This can be done
344 * using memchr_inv when the blocks are byte aligned, but has to be done
345 * on a block by block basis in case of unaligned blocks. Also this
346 * function can cope with bitmap boundaries (although it must stop on
347 * a resource group boundary)
349 * Returns: Number of free blocks in the extent
352 static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
354 struct gfs2_rbm rbm = *rrbm;
355 u32 n_unaligned = rbm.offset & 3;
359 u8 *ptr, *start, *end;
361 struct gfs2_bitmap *bi;
364 gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
367 n_unaligned = len & 3;
368 /* Start is now byte aligned */
371 start = bi->bi_bh->b_data;
373 start = bi->bi_clone;
374 end = start + bi->bi_bh->b_size;
375 start += bi->bi_offset;
376 BUG_ON(rbm.offset & 3);
377 start += (rbm.offset / GFS2_NBBY);
378 bytes = min_t(u32, len / GFS2_NBBY, (end - start));
379 ptr = memchr_inv(start, 0, bytes);
380 chunk_size = ((ptr == NULL) ? bytes : (ptr - start));
381 chunk_size *= GFS2_NBBY;
382 BUG_ON(len < chunk_size);
384 block = gfs2_rbm_to_block(&rbm);
385 if (gfs2_rbm_from_block(&rbm, block + chunk_size)) {
393 n_unaligned = len & 3;
396 /* Deal with any bits left over at the end */
398 gfs2_unaligned_extlen(&rbm, n_unaligned, &len);
404 * gfs2_bitcount - count the number of bits in a certain state
405 * @rgd: the resource group descriptor
406 * @buffer: the buffer that holds the bitmaps
407 * @buflen: the length (in bytes) of the buffer
408 * @state: the state of the block we're looking for
410 * Returns: The number of bits
413 static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
414 unsigned int buflen, u8 state)
416 const u8 *byte = buffer;
417 const u8 *end = buffer + buflen;
418 const u8 state1 = state << 2;
419 const u8 state2 = state << 4;
420 const u8 state3 = state << 6;
423 for (; byte < end; byte++) {
424 if (((*byte) & 0x03) == state)
426 if (((*byte) & 0x0C) == state1)
428 if (((*byte) & 0x30) == state2)
430 if (((*byte) & 0xC0) == state3)
438 * gfs2_rgrp_verify - Verify that a resource group is consistent
443 void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
445 struct gfs2_sbd *sdp = rgd->rd_sbd;
446 struct gfs2_bitmap *bi = NULL;
447 u32 length = rgd->rd_length;
451 memset(count, 0, 4 * sizeof(u32));
453 /* Count # blocks in each of 4 possible allocation states */
454 for (buf = 0; buf < length; buf++) {
455 bi = rgd->rd_bits + buf;
456 for (x = 0; x < 4; x++)
457 count[x] += gfs2_bitcount(rgd,
463 if (count[0] != rgd->rd_free) {
464 if (gfs2_consist_rgrpd(rgd))
465 fs_err(sdp, "free data mismatch: %u != %u\n",
466 count[0], rgd->rd_free);
470 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
471 if (count[1] != tmp) {
472 if (gfs2_consist_rgrpd(rgd))
473 fs_err(sdp, "used data mismatch: %u != %u\n",
478 if (count[2] + count[3] != rgd->rd_dinodes) {
479 if (gfs2_consist_rgrpd(rgd))
480 fs_err(sdp, "used metadata mismatch: %u != %u\n",
481 count[2] + count[3], rgd->rd_dinodes);
487 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
488 * @sdp: The GFS2 superblock
489 * @blk: The data block number
490 * @exact: True if this needs to be an exact match
492 * Returns: The resource group, or NULL if not found
495 struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
497 struct rb_node *n, *next;
498 struct gfs2_rgrpd *cur;
500 spin_lock(&sdp->sd_rindex_spin);
501 n = sdp->sd_rindex_tree.rb_node;
503 cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
505 if (blk < cur->rd_addr)
507 else if (blk >= cur->rd_data0 + cur->rd_data)
510 spin_unlock(&sdp->sd_rindex_spin);
512 if (blk < cur->rd_addr)
514 if (blk >= cur->rd_data0 + cur->rd_data)
521 spin_unlock(&sdp->sd_rindex_spin);
527 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
528 * @sdp: The GFS2 superblock
530 * Returns: The first rgrp in the filesystem
533 struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
535 const struct rb_node *n;
536 struct gfs2_rgrpd *rgd;
538 spin_lock(&sdp->sd_rindex_spin);
539 n = rb_first(&sdp->sd_rindex_tree);
540 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
541 spin_unlock(&sdp->sd_rindex_spin);
547 * gfs2_rgrpd_get_next - get the next RG
548 * @rgd: the resource group descriptor
550 * Returns: The next rgrp
553 struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
555 struct gfs2_sbd *sdp = rgd->rd_sbd;
556 const struct rb_node *n;
558 spin_lock(&sdp->sd_rindex_spin);
559 n = rb_next(&rgd->rd_node);
561 n = rb_first(&sdp->sd_rindex_tree);
563 if (unlikely(&rgd->rd_node == n)) {
564 spin_unlock(&sdp->sd_rindex_spin);
567 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
568 spin_unlock(&sdp->sd_rindex_spin);
572 void check_and_update_goal(struct gfs2_inode *ip)
574 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
575 if (!ip->i_goal || gfs2_blk2rgrpd(sdp, ip->i_goal, 1) == NULL)
576 ip->i_goal = ip->i_no_addr;
579 void gfs2_free_clones(struct gfs2_rgrpd *rgd)
583 for (x = 0; x < rgd->rd_length; x++) {
584 struct gfs2_bitmap *bi = rgd->rd_bits + x;
591 * gfs2_rsqa_alloc - make sure we have a reservation assigned to the inode
592 * plus a quota allocations data structure, if necessary
593 * @ip: the inode for this reservation
595 int gfs2_rsqa_alloc(struct gfs2_inode *ip)
597 return gfs2_qa_alloc(ip);
600 static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
602 gfs2_print_dbg(seq, " B: n:%llu s:%llu b:%u f:%u\n",
603 (unsigned long long)rs->rs_inum,
604 (unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm),
605 rs->rs_rbm.offset, rs->rs_free);
609 * __rs_deltree - remove a multi-block reservation from the rgd tree
610 * @rs: The reservation to remove
613 static void __rs_deltree(struct gfs2_blkreserv *rs)
615 struct gfs2_rgrpd *rgd;
617 if (!gfs2_rs_active(rs))
620 rgd = rs->rs_rbm.rgd;
621 trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
622 rb_erase(&rs->rs_node, &rgd->rd_rstree);
623 RB_CLEAR_NODE(&rs->rs_node);
626 struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm);
628 /* return reserved blocks to the rgrp */
629 BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
630 rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
631 /* The rgrp extent failure point is likely not to increase;
632 it will only do so if the freed blocks are somehow
633 contiguous with a span of free blocks that follows. Still,
634 it will force the number to be recalculated later. */
635 rgd->rd_extfail_pt += rs->rs_free;
637 clear_bit(GBF_FULL, &bi->bi_flags);
642 * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
643 * @rs: The reservation to remove
646 void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
648 struct gfs2_rgrpd *rgd;
650 rgd = rs->rs_rbm.rgd;
652 spin_lock(&rgd->rd_rsspin);
655 spin_unlock(&rgd->rd_rsspin);
660 * gfs2_rsqa_delete - delete a multi-block reservation and quota allocation
661 * @ip: The inode for this reservation
662 * @wcount: The inode's write count, or NULL
665 void gfs2_rsqa_delete(struct gfs2_inode *ip, atomic_t *wcount)
667 down_write(&ip->i_rw_mutex);
668 if ((wcount == NULL) || (atomic_read(wcount) <= 1))
669 gfs2_rs_deltree(&ip->i_res);
670 up_write(&ip->i_rw_mutex);
671 gfs2_qa_delete(ip, wcount);
675 * return_all_reservations - return all reserved blocks back to the rgrp.
676 * @rgd: the rgrp that needs its space back
678 * We previously reserved a bunch of blocks for allocation. Now we need to
679 * give them back. This leave the reservation structures in tact, but removes
680 * all of their corresponding "no-fly zones".
682 static void return_all_reservations(struct gfs2_rgrpd *rgd)
685 struct gfs2_blkreserv *rs;
687 spin_lock(&rgd->rd_rsspin);
688 while ((n = rb_first(&rgd->rd_rstree))) {
689 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
692 spin_unlock(&rgd->rd_rsspin);
695 void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
698 struct gfs2_rgrpd *rgd;
699 struct gfs2_glock *gl;
701 while ((n = rb_first(&sdp->sd_rindex_tree))) {
702 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
705 rb_erase(n, &sdp->sd_rindex_tree);
708 glock_clear_object(gl, rgd);
709 gfs2_rgrp_brelse(rgd);
713 gfs2_free_clones(rgd);
716 return_all_reservations(rgd);
717 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
721 static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
723 pr_info("ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
724 pr_info("ri_length = %u\n", rgd->rd_length);
725 pr_info("ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
726 pr_info("ri_data = %u\n", rgd->rd_data);
727 pr_info("ri_bitbytes = %u\n", rgd->rd_bitbytes);
731 * gfs2_compute_bitstructs - Compute the bitmap sizes
732 * @rgd: The resource group descriptor
734 * Calculates bitmap descriptors, one for each block that contains bitmap data
739 static int compute_bitstructs(struct gfs2_rgrpd *rgd)
741 struct gfs2_sbd *sdp = rgd->rd_sbd;
742 struct gfs2_bitmap *bi;
743 u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
744 u32 bytes_left, bytes;
750 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
754 bytes_left = rgd->rd_bitbytes;
756 for (x = 0; x < length; x++) {
757 bi = rgd->rd_bits + x;
760 /* small rgrp; bitmap stored completely in header block */
763 bi->bi_offset = sizeof(struct gfs2_rgrp);
766 bi->bi_blocks = bytes * GFS2_NBBY;
769 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
770 bi->bi_offset = sizeof(struct gfs2_rgrp);
773 bi->bi_blocks = bytes * GFS2_NBBY;
775 } else if (x + 1 == length) {
777 bi->bi_offset = sizeof(struct gfs2_meta_header);
778 bi->bi_start = rgd->rd_bitbytes - bytes_left;
780 bi->bi_blocks = bytes * GFS2_NBBY;
783 bytes = sdp->sd_sb.sb_bsize -
784 sizeof(struct gfs2_meta_header);
785 bi->bi_offset = sizeof(struct gfs2_meta_header);
786 bi->bi_start = rgd->rd_bitbytes - bytes_left;
788 bi->bi_blocks = bytes * GFS2_NBBY;
795 gfs2_consist_rgrpd(rgd);
798 bi = rgd->rd_bits + (length - 1);
799 if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
800 if (gfs2_consist_rgrpd(rgd)) {
801 gfs2_rindex_print(rgd);
802 fs_err(sdp, "start=%u len=%u offset=%u\n",
803 bi->bi_start, bi->bi_len, bi->bi_offset);
812 * gfs2_ri_total - Total up the file system space, according to the rindex.
813 * @sdp: the filesystem
816 u64 gfs2_ri_total(struct gfs2_sbd *sdp)
819 struct inode *inode = sdp->sd_rindex;
820 struct gfs2_inode *ip = GFS2_I(inode);
821 char buf[sizeof(struct gfs2_rindex)];
824 for (rgrps = 0;; rgrps++) {
825 loff_t pos = rgrps * sizeof(struct gfs2_rindex);
827 if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
829 error = gfs2_internal_read(ip, buf, &pos,
830 sizeof(struct gfs2_rindex));
831 if (error != sizeof(struct gfs2_rindex))
833 total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
838 static int rgd_insert(struct gfs2_rgrpd *rgd)
840 struct gfs2_sbd *sdp = rgd->rd_sbd;
841 struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
843 /* Figure out where to put new node */
845 struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
849 if (rgd->rd_addr < cur->rd_addr)
850 newn = &((*newn)->rb_left);
851 else if (rgd->rd_addr > cur->rd_addr)
852 newn = &((*newn)->rb_right);
857 rb_link_node(&rgd->rd_node, parent, newn);
858 rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
864 * read_rindex_entry - Pull in a new resource index entry from the disk
865 * @ip: Pointer to the rindex inode
867 * Returns: 0 on success, > 0 on EOF, error code otherwise
870 static int read_rindex_entry(struct gfs2_inode *ip)
872 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
873 const unsigned bsize = sdp->sd_sb.sb_bsize;
874 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
875 struct gfs2_rindex buf;
877 struct gfs2_rgrpd *rgd;
879 if (pos >= i_size_read(&ip->i_inode))
882 error = gfs2_internal_read(ip, (char *)&buf, &pos,
883 sizeof(struct gfs2_rindex));
885 if (error != sizeof(struct gfs2_rindex))
886 return (error == 0) ? 1 : error;
888 rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
894 rgd->rd_addr = be64_to_cpu(buf.ri_addr);
895 rgd->rd_length = be32_to_cpu(buf.ri_length);
896 rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
897 rgd->rd_data = be32_to_cpu(buf.ri_data);
898 rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
899 spin_lock_init(&rgd->rd_rsspin);
901 error = compute_bitstructs(rgd);
905 error = gfs2_glock_get(sdp, rgd->rd_addr,
906 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
910 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
911 rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
912 if (rgd->rd_data > sdp->sd_max_rg_data)
913 sdp->sd_max_rg_data = rgd->rd_data;
914 spin_lock(&sdp->sd_rindex_spin);
915 error = rgd_insert(rgd);
916 spin_unlock(&sdp->sd_rindex_spin);
918 glock_set_object(rgd->rd_gl, rgd);
919 rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
920 rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr +
921 rgd->rd_length) * bsize) - 1;
925 error = 0; /* someone else read in the rgrp; free it and ignore it */
926 gfs2_glock_put(rgd->rd_gl);
931 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
936 * set_rgrp_preferences - Run all the rgrps, selecting some we prefer to use
937 * @sdp: the GFS2 superblock
939 * The purpose of this function is to select a subset of the resource groups
940 * and mark them as PREFERRED. We do it in such a way that each node prefers
941 * to use a unique set of rgrps to minimize glock contention.
943 static void set_rgrp_preferences(struct gfs2_sbd *sdp)
945 struct gfs2_rgrpd *rgd, *first;
948 /* Skip an initial number of rgrps, based on this node's journal ID.
949 That should start each node out on its own set. */
950 rgd = gfs2_rgrpd_get_first(sdp);
951 for (i = 0; i < sdp->sd_lockstruct.ls_jid; i++)
952 rgd = gfs2_rgrpd_get_next(rgd);
956 rgd->rd_flags |= GFS2_RDF_PREFERRED;
957 for (i = 0; i < sdp->sd_journals; i++) {
958 rgd = gfs2_rgrpd_get_next(rgd);
959 if (!rgd || rgd == first)
962 } while (rgd && rgd != first);
966 * gfs2_ri_update - Pull in a new resource index from the disk
967 * @ip: pointer to the rindex inode
969 * Returns: 0 on successful update, error code otherwise
972 static int gfs2_ri_update(struct gfs2_inode *ip)
974 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
978 error = read_rindex_entry(ip);
979 } while (error == 0);
984 set_rgrp_preferences(sdp);
986 sdp->sd_rindex_uptodate = 1;
991 * gfs2_rindex_update - Update the rindex if required
992 * @sdp: The GFS2 superblock
994 * We grab a lock on the rindex inode to make sure that it doesn't
995 * change whilst we are performing an operation. We keep this lock
996 * for quite long periods of time compared to other locks. This
997 * doesn't matter, since it is shared and it is very, very rarely
998 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
1000 * This makes sure that we're using the latest copy of the resource index
1001 * special file, which might have been updated if someone expanded the
1002 * filesystem (via gfs2_grow utility), which adds new resource groups.
1004 * Returns: 0 on succeess, error code otherwise
1007 int gfs2_rindex_update(struct gfs2_sbd *sdp)
1009 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
1010 struct gfs2_glock *gl = ip->i_gl;
1011 struct gfs2_holder ri_gh;
1013 int unlock_required = 0;
1015 /* Read new copy from disk if we don't have the latest */
1016 if (!sdp->sd_rindex_uptodate) {
1017 if (!gfs2_glock_is_locked_by_me(gl)) {
1018 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
1021 unlock_required = 1;
1023 if (!sdp->sd_rindex_uptodate)
1024 error = gfs2_ri_update(ip);
1025 if (unlock_required)
1026 gfs2_glock_dq_uninit(&ri_gh);
1032 static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
1034 const struct gfs2_rgrp *str = buf;
1037 rg_flags = be32_to_cpu(str->rg_flags);
1038 rg_flags &= ~GFS2_RDF_MASK;
1039 rgd->rd_flags &= GFS2_RDF_MASK;
1040 rgd->rd_flags |= rg_flags;
1041 rgd->rd_free = be32_to_cpu(str->rg_free);
1042 rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
1043 rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
1046 static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
1048 struct gfs2_rgrp *str = buf;
1050 str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
1051 str->rg_free = cpu_to_be32(rgd->rd_free);
1052 str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
1053 str->__pad = cpu_to_be32(0);
1054 str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
1055 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
1058 static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
1060 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1061 struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
1063 if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free ||
1064 rgl->rl_dinodes != str->rg_dinodes ||
1065 rgl->rl_igeneration != str->rg_igeneration)
1070 static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
1072 const struct gfs2_rgrp *str = buf;
1074 rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
1075 rgl->rl_flags = str->rg_flags;
1076 rgl->rl_free = str->rg_free;
1077 rgl->rl_dinodes = str->rg_dinodes;
1078 rgl->rl_igeneration = str->rg_igeneration;
1082 static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change)
1084 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1085 u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change;
1086 rgl->rl_unlinked = cpu_to_be32(unlinked);
1089 static u32 count_unlinked(struct gfs2_rgrpd *rgd)
1091 struct gfs2_bitmap *bi;
1092 const u32 length = rgd->rd_length;
1093 const u8 *buffer = NULL;
1094 u32 i, goal, count = 0;
1096 for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
1098 buffer = bi->bi_bh->b_data + bi->bi_offset;
1099 WARN_ON(!buffer_uptodate(bi->bi_bh));
1100 while (goal < bi->bi_len * GFS2_NBBY) {
1101 goal = gfs2_bitfit(buffer, bi->bi_len, goal,
1102 GFS2_BLKST_UNLINKED);
1103 if (goal == BFITNOENT)
1115 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
1116 * @rgd: the struct gfs2_rgrpd describing the RG to read in
1118 * Read in all of a Resource Group's header and bitmap blocks.
1119 * Caller must eventually call gfs2_rgrp_brelse() to free the bitmaps.
1124 static int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
1126 struct gfs2_sbd *sdp = rgd->rd_sbd;
1127 struct gfs2_glock *gl = rgd->rd_gl;
1128 unsigned int length = rgd->rd_length;
1129 struct gfs2_bitmap *bi;
1133 if (rgd->rd_bits[0].bi_bh != NULL)
1136 for (x = 0; x < length; x++) {
1137 bi = rgd->rd_bits + x;
1138 error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, 0, &bi->bi_bh);
1143 for (y = length; y--;) {
1144 bi = rgd->rd_bits + y;
1145 error = gfs2_meta_wait(sdp, bi->bi_bh);
1148 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
1149 GFS2_METATYPE_RG)) {
1155 if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
1156 for (x = 0; x < length; x++)
1157 clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
1158 gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
1159 rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1160 rgd->rd_free_clone = rgd->rd_free;
1161 /* max out the rgrp allocation failure point */
1162 rgd->rd_extfail_pt = rgd->rd_free;
1164 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
1165 rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
1166 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
1167 rgd->rd_bits[0].bi_bh->b_data);
1169 else if (sdp->sd_args.ar_rgrplvb) {
1170 if (!gfs2_rgrp_lvb_valid(rgd)){
1171 gfs2_consist_rgrpd(rgd);
1175 if (rgd->rd_rgl->rl_unlinked == 0)
1176 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1182 bi = rgd->rd_bits + x;
1185 gfs2_assert_warn(sdp, !bi->bi_clone);
1191 static int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
1195 if (rgd->rd_flags & GFS2_RDF_UPTODATE)
1198 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
1199 return gfs2_rgrp_bh_get(rgd);
1201 rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
1202 rl_flags &= ~GFS2_RDF_MASK;
1203 rgd->rd_flags &= GFS2_RDF_MASK;
1204 rgd->rd_flags |= (rl_flags | GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1205 if (rgd->rd_rgl->rl_unlinked == 0)
1206 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1207 rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
1208 rgd->rd_free_clone = rgd->rd_free;
1209 rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
1210 rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
1214 int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
1216 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1217 struct gfs2_sbd *sdp = rgd->rd_sbd;
1219 if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
1221 return gfs2_rgrp_bh_get(rgd);
1225 * gfs2_rgrp_brelse - Release RG bitmaps read in with gfs2_rgrp_bh_get()
1226 * @rgd: The resource group
1230 void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd)
1232 int x, length = rgd->rd_length;
1234 for (x = 0; x < length; x++) {
1235 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1245 * gfs2_rgrp_go_unlock - Unlock a rgrp glock
1246 * @gh: The glock holder for the resource group
1250 void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
1252 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1253 int demote_requested = test_bit(GLF_DEMOTE, &gh->gh_gl->gl_flags) |
1254 test_bit(GLF_PENDING_DEMOTE, &gh->gh_gl->gl_flags);
1256 if (rgd && demote_requested)
1257 gfs2_rgrp_brelse(rgd);
1260 int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
1261 struct buffer_head *bh,
1262 const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
1264 struct super_block *sb = sdp->sd_vfs;
1267 sector_t nr_blks = 0;
1273 for (x = 0; x < bi->bi_len; x++) {
1274 const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
1275 clone += bi->bi_offset;
1278 const u8 *orig = bh->b_data + bi->bi_offset + x;
1279 diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
1281 diff = ~(*clone | (*clone >> 1));
1286 blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
1290 goto start_new_extent;
1291 if ((start + nr_blks) != blk) {
1292 if (nr_blks >= minlen) {
1293 rv = sb_issue_discard(sb,
1310 if (nr_blks >= minlen) {
1311 rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0);
1317 *ptrimmed = trimmed;
1321 if (sdp->sd_args.ar_discard)
1322 fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv);
1323 sdp->sd_args.ar_discard = 0;
1328 * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
1329 * @filp: Any file on the filesystem
1330 * @argp: Pointer to the arguments (also used to pass result)
1332 * Returns: 0 on success, otherwise error code
1335 int gfs2_fitrim(struct file *filp, void __user *argp)
1337 struct inode *inode = file_inode(filp);
1338 struct gfs2_sbd *sdp = GFS2_SB(inode);
1339 struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
1340 struct buffer_head *bh;
1341 struct gfs2_rgrpd *rgd;
1342 struct gfs2_rgrpd *rgd_end;
1343 struct gfs2_holder gh;
1344 struct fstrim_range r;
1348 u64 start, end, minlen;
1350 unsigned bs_shift = sdp->sd_sb.sb_bsize_shift;
1352 if (!capable(CAP_SYS_ADMIN))
1355 if (!blk_queue_discard(q))
1358 if (copy_from_user(&r, argp, sizeof(r)))
1361 ret = gfs2_rindex_update(sdp);
1365 start = r.start >> bs_shift;
1366 end = start + (r.len >> bs_shift);
1367 minlen = max_t(u64, r.minlen,
1368 q->limits.discard_granularity) >> bs_shift;
1370 if (end <= start || minlen > sdp->sd_max_rg_data)
1373 rgd = gfs2_blk2rgrpd(sdp, start, 0);
1374 rgd_end = gfs2_blk2rgrpd(sdp, end, 0);
1376 if ((gfs2_rgrpd_get_first(sdp) == gfs2_rgrpd_get_next(rgd_end))
1377 && (start > rgd_end->rd_data0 + rgd_end->rd_data))
1378 return -EINVAL; /* start is beyond the end of the fs */
1382 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1386 if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
1387 /* Trim each bitmap in the rgrp */
1388 for (x = 0; x < rgd->rd_length; x++) {
1389 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1390 ret = gfs2_rgrp_send_discards(sdp,
1391 rgd->rd_data0, NULL, bi, minlen,
1394 gfs2_glock_dq_uninit(&gh);
1400 /* Mark rgrp as having been trimmed */
1401 ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
1403 bh = rgd->rd_bits[0].bi_bh;
1404 rgd->rd_flags |= GFS2_RGF_TRIMMED;
1405 gfs2_trans_add_meta(rgd->rd_gl, bh);
1406 gfs2_rgrp_out(rgd, bh->b_data);
1407 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data);
1408 gfs2_trans_end(sdp);
1411 gfs2_glock_dq_uninit(&gh);
1416 rgd = gfs2_rgrpd_get_next(rgd);
1420 r.len = trimmed << bs_shift;
1421 if (copy_to_user(argp, &r, sizeof(r)))
1428 * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
1429 * @ip: the inode structure
1432 static void rs_insert(struct gfs2_inode *ip)
1434 struct rb_node **newn, *parent = NULL;
1436 struct gfs2_blkreserv *rs = &ip->i_res;
1437 struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
1438 u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm);
1440 BUG_ON(gfs2_rs_active(rs));
1442 spin_lock(&rgd->rd_rsspin);
1443 newn = &rgd->rd_rstree.rb_node;
1445 struct gfs2_blkreserv *cur =
1446 rb_entry(*newn, struct gfs2_blkreserv, rs_node);
1449 rc = rs_cmp(fsblock, rs->rs_free, cur);
1451 newn = &((*newn)->rb_right);
1453 newn = &((*newn)->rb_left);
1455 spin_unlock(&rgd->rd_rsspin);
1461 rb_link_node(&rs->rs_node, parent, newn);
1462 rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
1464 /* Do our rgrp accounting for the reservation */
1465 rgd->rd_reserved += rs->rs_free; /* blocks reserved */
1466 spin_unlock(&rgd->rd_rsspin);
1467 trace_gfs2_rs(rs, TRACE_RS_INSERT);
1471 * rg_mblk_search - find a group of multiple free blocks to form a reservation
1472 * @rgd: the resource group descriptor
1473 * @ip: pointer to the inode for which we're reserving blocks
1474 * @ap: the allocation parameters
1478 static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
1479 const struct gfs2_alloc_parms *ap)
1481 struct gfs2_rbm rbm = { .rgd = rgd, };
1483 struct gfs2_blkreserv *rs = &ip->i_res;
1485 u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved;
1487 struct inode *inode = &ip->i_inode;
1489 if (S_ISDIR(inode->i_mode))
1492 extlen = max_t(u32, atomic_read(&rs->rs_sizehint), ap->target);
1493 extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
1495 if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
1498 /* Find bitmap block that contains bits for goal block */
1499 if (rgrp_contains_block(rgd, ip->i_goal))
1502 goal = rgd->rd_last_alloc + rgd->rd_data0;
1504 if (WARN_ON(gfs2_rbm_from_block(&rbm, goal)))
1507 ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, ip, true);
1510 rs->rs_free = extlen;
1511 rs->rs_inum = ip->i_no_addr;
1514 if (goal == rgd->rd_last_alloc + rgd->rd_data0)
1515 rgd->rd_last_alloc = 0;
1520 * gfs2_next_unreserved_block - Return next block that is not reserved
1521 * @rgd: The resource group
1522 * @block: The starting block
1523 * @length: The required length
1524 * @ip: Ignore any reservations for this inode
1526 * If the block does not appear in any reservation, then return the
1527 * block number unchanged. If it does appear in the reservation, then
1528 * keep looking through the tree of reservations in order to find the
1529 * first block number which is not reserved.
1532 static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
1534 const struct gfs2_inode *ip)
1536 struct gfs2_blkreserv *rs;
1540 spin_lock(&rgd->rd_rsspin);
1541 n = rgd->rd_rstree.rb_node;
1543 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1544 rc = rs_cmp(block, length, rs);
1554 while ((rs_cmp(block, length, rs) == 0) && (&ip->i_res != rs)) {
1555 block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
1559 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1563 spin_unlock(&rgd->rd_rsspin);
1568 * gfs2_reservation_check_and_update - Check for reservations during block alloc
1569 * @rbm: The current position in the resource group
1570 * @ip: The inode for which we are searching for blocks
1571 * @minext: The minimum extent length
1572 * @maxext: A pointer to the maximum extent structure
1574 * This checks the current position in the rgrp to see whether there is
1575 * a reservation covering this block. If not then this function is a
1576 * no-op. If there is, then the position is moved to the end of the
1577 * contiguous reservation(s) so that we are pointing at the first
1578 * non-reserved block.
1580 * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
1583 static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
1584 const struct gfs2_inode *ip,
1586 struct gfs2_extent *maxext)
1588 u64 block = gfs2_rbm_to_block(rbm);
1594 * If we have a minimum extent length, then skip over any extent
1595 * which is less than the min extent length in size.
1598 extlen = gfs2_free_extlen(rbm, minext);
1599 if (extlen <= maxext->len)
1604 * Check the extent which has been found against the reservations
1605 * and skip if parts of it are already reserved
1607 nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip);
1608 if (nblock == block) {
1609 if (!minext || extlen >= minext)
1612 if (extlen > maxext->len) {
1613 maxext->len = extlen;
1617 nblock = block + extlen;
1619 ret = gfs2_rbm_from_block(rbm, nblock);
1626 * gfs2_rbm_find - Look for blocks of a particular state
1627 * @rbm: Value/result starting position and final position
1628 * @state: The state which we want to find
1629 * @minext: Pointer to the requested extent length (NULL for a single block)
1630 * This is updated to be the actual reservation size.
1631 * @ip: If set, check for reservations
1632 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
1633 * around until we've reached the starting point.
1636 * - If looking for free blocks, we set GBF_FULL on each bitmap which
1637 * has no free blocks in it.
1638 * - If looking for free blocks, we set rd_extfail_pt on each rgrp which
1639 * has come up short on a free block search.
1641 * Returns: 0 on success, -ENOSPC if there is no block of the requested state
1644 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
1645 const struct gfs2_inode *ip, bool nowrap)
1647 struct buffer_head *bh;
1650 int first_bii = rbm->bii;
1651 u32 first_offset = rbm->offset;
1655 int iters = rbm->rgd->rd_length;
1657 struct gfs2_bitmap *bi;
1658 struct gfs2_extent maxext = { .rbm.rgd = rbm->rgd, };
1660 /* If we are not starting at the beginning of a bitmap, then we
1661 * need to add one to the bitmap count to ensure that we search
1662 * the starting bitmap twice.
1664 if (rbm->offset != 0)
1669 if ((ip == NULL || !gfs2_rs_active(&ip->i_res)) &&
1670 test_bit(GBF_FULL, &bi->bi_flags) &&
1671 (state == GFS2_BLKST_FREE))
1675 buffer = bh->b_data + bi->bi_offset;
1676 WARN_ON(!buffer_uptodate(bh));
1677 if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
1678 buffer = bi->bi_clone + bi->bi_offset;
1679 initial_offset = rbm->offset;
1680 offset = gfs2_bitfit(buffer, bi->bi_len, rbm->offset, state);
1681 if (offset == BFITNOENT)
1683 rbm->offset = offset;
1687 initial_bii = rbm->bii;
1688 ret = gfs2_reservation_check_and_update(rbm, ip,
1689 minext ? *minext : 0,
1694 n += (rbm->bii - initial_bii);
1697 if (ret == -E2BIG) {
1700 n += (rbm->bii - initial_bii);
1701 goto res_covered_end_of_rgrp;
1705 bitmap_full: /* Mark bitmap as full and fall through */
1706 if ((state == GFS2_BLKST_FREE) && initial_offset == 0)
1707 set_bit(GBF_FULL, &bi->bi_flags);
1709 next_bitmap: /* Find next bitmap in the rgrp */
1712 if (rbm->bii == rbm->rgd->rd_length)
1714 res_covered_end_of_rgrp:
1715 if ((rbm->bii == 0) && nowrap)
1723 if (minext == NULL || state != GFS2_BLKST_FREE)
1726 /* If the extent was too small, and it's smaller than the smallest
1727 to have failed before, remember for future reference that it's
1728 useless to search this rgrp again for this amount or more. */
1729 if ((first_offset == 0) && (first_bii == 0) &&
1730 (*minext < rbm->rgd->rd_extfail_pt))
1731 rbm->rgd->rd_extfail_pt = *minext;
1733 /* If the maximum extent we found is big enough to fulfill the
1734 minimum requirements, use it anyway. */
1737 *minext = maxext.len;
1745 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
1747 * @last_unlinked: block address of the last dinode we unlinked
1748 * @skip: block address we should explicitly not unlink
1750 * Returns: 0 if no error
1751 * The inode, if one has been found, in inode.
1754 static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
1757 struct gfs2_sbd *sdp = rgd->rd_sbd;
1758 struct gfs2_glock *gl;
1759 struct gfs2_inode *ip;
1762 struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
1765 down_write(&sdp->sd_log_flush_lock);
1766 error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, NULL,
1768 up_write(&sdp->sd_log_flush_lock);
1769 if (error == -ENOSPC)
1771 if (WARN_ON_ONCE(error))
1774 block = gfs2_rbm_to_block(&rbm);
1775 if (gfs2_rbm_from_block(&rbm, block + 1))
1777 if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
1781 *last_unlinked = block;
1783 error = gfs2_glock_get(sdp, block, &gfs2_iopen_glops, CREATE, &gl);
1787 /* If the inode is already in cache, we can ignore it here
1788 * because the existing inode disposal code will deal with
1789 * it when all refs have gone away. Accessing gl_object like
1790 * this is not safe in general. Here it is ok because we do
1791 * not dereference the pointer, and we only need an approx
1792 * answer to whether it is NULL or not.
1796 if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
1801 /* Limit reclaim to sensible number of tasks */
1802 if (found > NR_CPUS)
1806 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1811 * gfs2_rgrp_congested - Use stats to figure out whether an rgrp is congested
1812 * @rgd: The rgrp in question
1813 * @loops: An indication of how picky we can be (0=very, 1=less so)
1815 * This function uses the recently added glock statistics in order to
1816 * figure out whether a parciular resource group is suffering from
1817 * contention from multiple nodes. This is done purely on the basis
1818 * of timings, since this is the only data we have to work with and
1819 * our aim here is to reject a resource group which is highly contended
1820 * but (very important) not to do this too often in order to ensure that
1821 * we do not land up introducing fragmentation by changing resource
1822 * groups when not actually required.
1824 * The calculation is fairly simple, we want to know whether the SRTTB
1825 * (i.e. smoothed round trip time for blocking operations) to acquire
1826 * the lock for this rgrp's glock is significantly greater than the
1827 * time taken for resource groups on average. We introduce a margin in
1828 * the form of the variable @var which is computed as the sum of the two
1829 * respective variences, and multiplied by a factor depending on @loops
1830 * and whether we have a lot of data to base the decision on. This is
1831 * then tested against the square difference of the means in order to
1832 * decide whether the result is statistically significant or not.
1834 * Returns: A boolean verdict on the congestion status
1837 static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
1839 const struct gfs2_glock *gl = rgd->rd_gl;
1840 const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1841 struct gfs2_lkstats *st;
1842 u64 r_dcount, l_dcount;
1843 u64 l_srttb, a_srttb = 0;
1847 int cpu, nonzero = 0;
1850 for_each_present_cpu(cpu) {
1851 st = &per_cpu_ptr(sdp->sd_lkstats, cpu)->lkstats[LM_TYPE_RGRP];
1852 if (st->stats[GFS2_LKS_SRTTB]) {
1853 a_srttb += st->stats[GFS2_LKS_SRTTB];
1857 st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP];
1859 do_div(a_srttb, nonzero);
1860 r_dcount = st->stats[GFS2_LKS_DCOUNT];
1861 var = st->stats[GFS2_LKS_SRTTVARB] +
1862 gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
1865 l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
1866 l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];
1868 if ((l_dcount < 1) || (r_dcount < 1) || (a_srttb == 0))
1871 srttb_diff = a_srttb - l_srttb;
1872 sqr_diff = srttb_diff * srttb_diff;
1875 if (l_dcount < 8 || r_dcount < 8)
1880 return ((srttb_diff < 0) && (sqr_diff > var));
1884 * gfs2_rgrp_used_recently
1885 * @rs: The block reservation with the rgrp to test
1886 * @msecs: The time limit in milliseconds
1888 * Returns: True if the rgrp glock has been used within the time limit
1890 static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs,
1895 tdiff = ktime_to_ns(ktime_sub(ktime_get_real(),
1896 rs->rs_rbm.rgd->rd_gl->gl_dstamp));
1898 return tdiff > (msecs * 1000 * 1000);
1901 static u32 gfs2_orlov_skip(const struct gfs2_inode *ip)
1903 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1906 get_random_bytes(&skip, sizeof(skip));
1907 return skip % sdp->sd_rgrps;
1910 static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin)
1912 struct gfs2_rgrpd *rgd = *pos;
1913 struct gfs2_sbd *sdp = rgd->rd_sbd;
1915 rgd = gfs2_rgrpd_get_next(rgd);
1917 rgd = gfs2_rgrpd_get_first(sdp);
1919 if (rgd != begin) /* If we didn't wrap */
1925 * fast_to_acquire - determine if a resource group will be fast to acquire
1927 * If this is one of our preferred rgrps, it should be quicker to acquire,
1928 * because we tried to set ourselves up as dlm lock master.
1930 static inline int fast_to_acquire(struct gfs2_rgrpd *rgd)
1932 struct gfs2_glock *gl = rgd->rd_gl;
1934 if (gl->gl_state != LM_ST_UNLOCKED && list_empty(&gl->gl_holders) &&
1935 !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
1936 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1938 if (rgd->rd_flags & GFS2_RDF_PREFERRED)
1944 * gfs2_inplace_reserve - Reserve space in the filesystem
1945 * @ip: the inode to reserve space for
1946 * @ap: the allocation parameters
1948 * We try our best to find an rgrp that has at least ap->target blocks
1949 * available. After a couple of passes (loops == 2), the prospects of finding
1950 * such an rgrp diminish. At this stage, we return the first rgrp that has
1951 * atleast ap->min_target blocks available. Either way, we set ap->allowed to
1952 * the number of blocks available in the chosen rgrp.
1954 * Returns: 0 on success,
1955 * -ENOMEM if a suitable rgrp can't be found
1959 int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
1961 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1962 struct gfs2_rgrpd *begin = NULL;
1963 struct gfs2_blkreserv *rs = &ip->i_res;
1964 int error = 0, rg_locked, flags = 0;
1965 u64 last_unlinked = NO_BLOCK;
1969 if (sdp->sd_args.ar_rgrplvb)
1971 if (gfs2_assert_warn(sdp, ap->target))
1973 if (gfs2_rs_active(rs)) {
1974 begin = rs->rs_rbm.rgd;
1975 } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
1976 rs->rs_rbm.rgd = begin = ip->i_rgd;
1978 check_and_update_goal(ip);
1979 rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
1981 if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
1982 skip = gfs2_orlov_skip(ip);
1983 if (rs->rs_rbm.rgd == NULL)
1989 if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
1993 if (!gfs2_rs_active(rs)) {
1995 !fast_to_acquire(rs->rs_rbm.rgd))
1998 gfs2_rgrp_used_recently(rs, 1000) &&
1999 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2002 error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
2003 LM_ST_EXCLUSIVE, flags,
2005 if (unlikely(error))
2007 if (!gfs2_rs_active(rs) && (loops < 2) &&
2008 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2010 if (sdp->sd_args.ar_rgrplvb) {
2011 error = update_rgrp_lvb(rs->rs_rbm.rgd);
2012 if (unlikely(error)) {
2013 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2019 /* Skip unuseable resource groups */
2020 if ((rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC |
2022 (loops == 0 && ap->target > rs->rs_rbm.rgd->rd_extfail_pt))
2025 if (sdp->sd_args.ar_rgrplvb)
2026 gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
2028 /* Get a reservation if we don't already have one */
2029 if (!gfs2_rs_active(rs))
2030 rg_mblk_search(rs->rs_rbm.rgd, ip, ap);
2032 /* Skip rgrps when we can't get a reservation on first pass */
2033 if (!gfs2_rs_active(rs) && (loops < 1))
2036 /* If rgrp has enough free space, use it */
2037 if (rs->rs_rbm.rgd->rd_free_clone >= ap->target ||
2038 (loops == 2 && ap->min_target &&
2039 rs->rs_rbm.rgd->rd_free_clone >= ap->min_target)) {
2040 ip->i_rgd = rs->rs_rbm.rgd;
2041 ap->allowed = ip->i_rgd->rd_free_clone;
2045 /* Check for unlinked inodes which can be reclaimed */
2046 if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)
2047 try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
2050 /* Drop reservation, if we couldn't use reserved rgrp */
2051 if (gfs2_rs_active(rs))
2052 gfs2_rs_deltree(rs);
2054 /* Unlock rgrp if required */
2056 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2058 /* Find the next rgrp, and continue looking */
2059 if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
2064 /* If we've scanned all the rgrps, but found no free blocks
2065 * then this checks for some less likely conditions before
2069 /* Check that fs hasn't grown if writing to rindex */
2070 if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
2071 error = gfs2_ri_update(ip);
2075 /* Flushing the log may release space */
2077 gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
2084 * gfs2_inplace_release - release an inplace reservation
2085 * @ip: the inode the reservation was taken out on
2087 * Release a reservation made by gfs2_inplace_reserve().
2090 void gfs2_inplace_release(struct gfs2_inode *ip)
2092 struct gfs2_blkreserv *rs = &ip->i_res;
2094 if (gfs2_holder_initialized(&rs->rs_rgd_gh))
2095 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2099 * gfs2_get_block_type - Check a block in a RG is of given type
2100 * @rgd: the resource group holding the block
2101 * @block: the block number
2103 * Returns: The block type (GFS2_BLKST_*)
2106 static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
2108 struct gfs2_rbm rbm = { .rgd = rgd, };
2111 ret = gfs2_rbm_from_block(&rbm, block);
2112 WARN_ON_ONCE(ret != 0);
2114 return gfs2_testbit(&rbm);
2119 * gfs2_alloc_extent - allocate an extent from a given bitmap
2120 * @rbm: the resource group information
2121 * @dinode: TRUE if the first block we allocate is for a dinode
2122 * @n: The extent length (value/result)
2124 * Add the bitmap buffer to the transaction.
2125 * Set the found bits to @new_state to change block's allocation state.
2127 static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
2130 struct gfs2_rbm pos = { .rgd = rbm->rgd, };
2131 const unsigned int elen = *n;
2136 block = gfs2_rbm_to_block(rbm);
2137 gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
2138 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2141 ret = gfs2_rbm_from_block(&pos, block);
2142 if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
2144 gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
2145 gfs2_setbit(&pos, true, GFS2_BLKST_USED);
2152 * rgblk_free - Change alloc state of given block(s)
2153 * @sdp: the filesystem
2154 * @bstart: the start of a run of blocks to free
2155 * @blen: the length of the block run (all must lie within ONE RG!)
2156 * @new_state: GFS2_BLKST_XXX the after-allocation block state
2158 * Returns: Resource group containing the block(s)
2161 static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
2162 u32 blen, unsigned char new_state)
2164 struct gfs2_rbm rbm;
2165 struct gfs2_bitmap *bi, *bi_prev = NULL;
2167 rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
2169 if (gfs2_consist(sdp))
2170 fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
2174 gfs2_rbm_from_block(&rbm, bstart);
2177 if (bi != bi_prev) {
2178 if (!bi->bi_clone) {
2179 bi->bi_clone = kmalloc(bi->bi_bh->b_size,
2180 GFP_NOFS | __GFP_NOFAIL);
2181 memcpy(bi->bi_clone + bi->bi_offset,
2182 bi->bi_bh->b_data + bi->bi_offset,
2185 gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
2188 gfs2_setbit(&rbm, false, new_state);
2189 gfs2_rbm_incr(&rbm);
2196 * gfs2_rgrp_dump - print out an rgrp
2197 * @seq: The iterator
2198 * @gl: The glock in question
2202 void gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
2204 struct gfs2_rgrpd *rgd = gl->gl_object;
2205 struct gfs2_blkreserv *trs;
2206 const struct rb_node *n;
2210 gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u e:%u\n",
2211 (unsigned long long)rgd->rd_addr, rgd->rd_flags,
2212 rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
2213 rgd->rd_reserved, rgd->rd_extfail_pt);
2214 spin_lock(&rgd->rd_rsspin);
2215 for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
2216 trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
2219 spin_unlock(&rgd->rd_rsspin);
2222 static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
2224 struct gfs2_sbd *sdp = rgd->rd_sbd;
2225 fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
2226 (unsigned long long)rgd->rd_addr);
2227 fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
2228 gfs2_rgrp_dump(NULL, rgd->rd_gl);
2229 rgd->rd_flags |= GFS2_RDF_ERROR;
2233 * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
2234 * @ip: The inode we have just allocated blocks for
2235 * @rbm: The start of the allocated blocks
2236 * @len: The extent length
2238 * Adjusts a reservation after an allocation has taken place. If the
2239 * reservation does not match the allocation, or if it is now empty
2240 * then it is removed.
2243 static void gfs2_adjust_reservation(struct gfs2_inode *ip,
2244 const struct gfs2_rbm *rbm, unsigned len)
2246 struct gfs2_blkreserv *rs = &ip->i_res;
2247 struct gfs2_rgrpd *rgd = rbm->rgd;
2252 spin_lock(&rgd->rd_rsspin);
2253 if (gfs2_rs_active(rs)) {
2254 if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) {
2255 block = gfs2_rbm_to_block(rbm);
2256 ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
2257 rlen = min(rs->rs_free, len);
2258 rs->rs_free -= rlen;
2259 rgd->rd_reserved -= rlen;
2260 trace_gfs2_rs(rs, TRACE_RS_CLAIM);
2261 if (rs->rs_free && !ret)
2263 /* We used up our block reservation, so we should
2264 reserve more blocks next time. */
2265 atomic_add(RGRP_RSRV_ADDBLKS, &rs->rs_sizehint);
2270 spin_unlock(&rgd->rd_rsspin);
2274 * gfs2_set_alloc_start - Set starting point for block allocation
2275 * @rbm: The rbm which will be set to the required location
2276 * @ip: The gfs2 inode
2277 * @dinode: Flag to say if allocation includes a new inode
2279 * This sets the starting point from the reservation if one is active
2280 * otherwise it falls back to guessing a start point based on the
2281 * inode's goal block or the last allocation point in the rgrp.
2284 static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
2285 const struct gfs2_inode *ip, bool dinode)
2289 if (gfs2_rs_active(&ip->i_res)) {
2290 *rbm = ip->i_res.rs_rbm;
2294 if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
2297 goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
2299 gfs2_rbm_from_block(rbm, goal);
2303 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
2304 * @ip: the inode to allocate the block for
2305 * @bn: Used to return the starting block number
2306 * @nblocks: requested number of blocks/extent length (value/result)
2307 * @dinode: 1 if we're allocating a dinode block, else 0
2308 * @generation: the generation number of the inode
2310 * Returns: 0 or error
2313 int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
2314 bool dinode, u64 *generation)
2316 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2317 struct buffer_head *dibh;
2318 struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
2320 u64 block; /* block, within the file system scope */
2323 gfs2_set_alloc_start(&rbm, ip, dinode);
2324 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, ip, false);
2326 if (error == -ENOSPC) {
2327 gfs2_set_alloc_start(&rbm, ip, dinode);
2328 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, NULL, false);
2331 /* Since all blocks are reserved in advance, this shouldn't happen */
2333 fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d fail_pt=%d\n",
2334 (unsigned long long)ip->i_no_addr, error, *nblocks,
2335 test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags),
2336 rbm.rgd->rd_extfail_pt);
2340 gfs2_alloc_extent(&rbm, dinode, nblocks);
2341 block = gfs2_rbm_to_block(&rbm);
2342 rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0;
2343 if (gfs2_rs_active(&ip->i_res))
2344 gfs2_adjust_reservation(ip, &rbm, *nblocks);
2350 ip->i_goal = block + ndata - 1;
2351 error = gfs2_meta_inode_buffer(ip, &dibh);
2353 struct gfs2_dinode *di =
2354 (struct gfs2_dinode *)dibh->b_data;
2355 gfs2_trans_add_meta(ip->i_gl, dibh);
2356 di->di_goal_meta = di->di_goal_data =
2357 cpu_to_be64(ip->i_goal);
2361 if (rbm.rgd->rd_free < *nblocks) {
2362 pr_warn("nblocks=%u\n", *nblocks);
2366 rbm.rgd->rd_free -= *nblocks;
2368 rbm.rgd->rd_dinodes++;
2369 *generation = rbm.rgd->rd_igeneration++;
2370 if (*generation == 0)
2371 *generation = rbm.rgd->rd_igeneration++;
2374 gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
2375 gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
2376 gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data);
2378 gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
2380 gfs2_trans_add_unrevoke(sdp, block, *nblocks);
2382 gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid);
2384 rbm.rgd->rd_free_clone -= *nblocks;
2385 trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
2386 dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2391 gfs2_rgrp_error(rbm.rgd);
2396 * __gfs2_free_blocks - free a contiguous run of block(s)
2397 * @ip: the inode these blocks are being freed from
2398 * @bstart: first block of a run of contiguous blocks
2399 * @blen: the length of the block run
2400 * @meta: 1 if the blocks represent metadata
2404 void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
2406 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2407 struct gfs2_rgrpd *rgd;
2409 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
2412 trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
2413 rgd->rd_free += blen;
2414 rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
2415 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2416 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2417 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2419 /* Directories keep their data in the metadata address space */
2420 if (meta || ip->i_depth)
2421 gfs2_meta_wipe(ip, bstart, blen);
2425 * gfs2_free_meta - free a contiguous run of data block(s)
2426 * @ip: the inode these blocks are being freed from
2427 * @bstart: first block of a run of contiguous blocks
2428 * @blen: the length of the block run
2432 void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
2434 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2436 __gfs2_free_blocks(ip, bstart, blen, 1);
2437 gfs2_statfs_change(sdp, 0, +blen, 0);
2438 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
2441 void gfs2_unlink_di(struct inode *inode)
2443 struct gfs2_inode *ip = GFS2_I(inode);
2444 struct gfs2_sbd *sdp = GFS2_SB(inode);
2445 struct gfs2_rgrpd *rgd;
2446 u64 blkno = ip->i_no_addr;
2448 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
2451 trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
2452 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2453 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2454 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2455 update_rgrp_lvb_unlinked(rgd, 1);
2458 static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
2460 struct gfs2_sbd *sdp = rgd->rd_sbd;
2461 struct gfs2_rgrpd *tmp_rgd;
2463 tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
2466 gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
2468 if (!rgd->rd_dinodes)
2469 gfs2_consist_rgrpd(rgd);
2473 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2474 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2475 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2476 update_rgrp_lvb_unlinked(rgd, -1);
2478 gfs2_statfs_change(sdp, 0, +1, -1);
2482 void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
2484 gfs2_free_uninit_di(rgd, ip->i_no_addr);
2485 trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
2486 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
2487 gfs2_meta_wipe(ip, ip->i_no_addr, 1);
2491 * gfs2_check_blk_type - Check the type of a block
2492 * @sdp: The superblock
2493 * @no_addr: The block number to check
2494 * @type: The block type we are looking for
2496 * Returns: 0 if the block type matches the expected type
2497 * -ESTALE if it doesn't match
2498 * or -ve errno if something went wrong while checking
2501 int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
2503 struct gfs2_rgrpd *rgd;
2504 struct gfs2_holder rgd_gh;
2505 int error = -EINVAL;
2507 rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
2511 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
2515 if (gfs2_get_block_type(rgd, no_addr) != type)
2518 gfs2_glock_dq_uninit(&rgd_gh);
2524 * gfs2_rlist_add - add a RG to a list of RGs
2526 * @rlist: the list of resource groups
2529 * Figure out what RG a block belongs to and add that RG to the list
2531 * FIXME: Don't use NOFAIL
2535 void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
2538 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2539 struct gfs2_rgrpd *rgd;
2540 struct gfs2_rgrpd **tmp;
2541 unsigned int new_space;
2544 if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
2547 if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block))
2550 rgd = gfs2_blk2rgrpd(sdp, block, 1);
2552 fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block);
2557 for (x = 0; x < rlist->rl_rgrps; x++)
2558 if (rlist->rl_rgd[x] == rgd)
2561 if (rlist->rl_rgrps == rlist->rl_space) {
2562 new_space = rlist->rl_space + 10;
2564 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
2565 GFP_NOFS | __GFP_NOFAIL);
2567 if (rlist->rl_rgd) {
2568 memcpy(tmp, rlist->rl_rgd,
2569 rlist->rl_space * sizeof(struct gfs2_rgrpd *));
2570 kfree(rlist->rl_rgd);
2573 rlist->rl_space = new_space;
2574 rlist->rl_rgd = tmp;
2577 rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
2581 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
2582 * and initialize an array of glock holders for them
2583 * @rlist: the list of resource groups
2584 * @state: the lock state to acquire the RG lock in
2586 * FIXME: Don't use NOFAIL
2590 void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
2594 rlist->rl_ghs = kmalloc(rlist->rl_rgrps * sizeof(struct gfs2_holder),
2595 GFP_NOFS | __GFP_NOFAIL);
2596 for (x = 0; x < rlist->rl_rgrps; x++)
2597 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
2603 * gfs2_rlist_free - free a resource group list
2604 * @rlist: the list of resource groups
2608 void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
2612 kfree(rlist->rl_rgd);
2614 if (rlist->rl_ghs) {
2615 for (x = 0; x < rlist->rl_rgrps; x++)
2616 gfs2_holder_uninit(&rlist->rl_ghs[x]);
2617 kfree(rlist->rl_ghs);
2618 rlist->rl_ghs = NULL;