2 * linux/fs/ext3/balloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
10 * Big-endian to little-endian byte-swapping/bitmaps by
11 * David S. Miller (davem@caip.rutgers.edu), 1995
14 #include <linux/quotaops.h>
15 #include <linux/blkdev.h>
19 * balloc.c contains the blocks allocation and deallocation routines
23 * The free blocks are managed by bitmaps. A file system contains several
24 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
25 * block for inodes, N blocks for the inode table and data blocks.
27 * The file system contains group descriptors which are located after the
28 * super block. Each descriptor contains the number of the bitmap block and
29 * the free blocks count in the block. The descriptors are loaded in memory
30 * when a file system is mounted (see ext3_fill_super).
34 #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
37 * Calculate the block group number and offset, given a block number
39 static void ext3_get_group_no_and_offset(struct super_block *sb,
40 ext3_fsblk_t blocknr, unsigned long *blockgrpp, ext3_grpblk_t *offsetp)
42 struct ext3_super_block *es = EXT3_SB(sb)->s_es;
44 blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
46 *offsetp = blocknr % EXT3_BLOCKS_PER_GROUP(sb);
48 *blockgrpp = blocknr / EXT3_BLOCKS_PER_GROUP(sb);
52 * ext3_get_group_desc() -- load group descriptor from disk
54 * @block_group: given block group
55 * @bh: pointer to the buffer head to store the block
58 struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
59 unsigned int block_group,
60 struct buffer_head ** bh)
62 unsigned long group_desc;
64 struct ext3_group_desc * desc;
65 struct ext3_sb_info *sbi = EXT3_SB(sb);
67 if (block_group >= sbi->s_groups_count) {
68 ext3_error (sb, "ext3_get_group_desc",
69 "block_group >= groups_count - "
70 "block_group = %d, groups_count = %lu",
71 block_group, sbi->s_groups_count);
77 group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb);
78 offset = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1);
79 if (!sbi->s_group_desc[group_desc]) {
80 ext3_error (sb, "ext3_get_group_desc",
81 "Group descriptor not loaded - "
82 "block_group = %d, group_desc = %lu, desc = %lu",
83 block_group, group_desc, offset);
87 desc = (struct ext3_group_desc *) sbi->s_group_desc[group_desc]->b_data;
89 *bh = sbi->s_group_desc[group_desc];
93 static int ext3_valid_block_bitmap(struct super_block *sb,
94 struct ext3_group_desc *desc,
95 unsigned int block_group,
96 struct buffer_head *bh)
99 ext3_grpblk_t next_zero_bit;
100 ext3_fsblk_t bitmap_blk;
101 ext3_fsblk_t group_first_block;
103 group_first_block = ext3_group_first_block_no(sb, block_group);
105 /* check whether block bitmap block number is set */
106 bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
107 offset = bitmap_blk - group_first_block;
108 if (!ext3_test_bit(offset, bh->b_data))
109 /* bad block bitmap */
112 /* check whether the inode bitmap block number is set */
113 bitmap_blk = le32_to_cpu(desc->bg_inode_bitmap);
114 offset = bitmap_blk - group_first_block;
115 if (!ext3_test_bit(offset, bh->b_data))
116 /* bad block bitmap */
119 /* check whether the inode table block number is set */
120 bitmap_blk = le32_to_cpu(desc->bg_inode_table);
121 offset = bitmap_blk - group_first_block;
122 next_zero_bit = ext3_find_next_zero_bit(bh->b_data,
123 offset + EXT3_SB(sb)->s_itb_per_group,
125 if (next_zero_bit >= offset + EXT3_SB(sb)->s_itb_per_group)
126 /* good bitmap for inode tables */
130 ext3_error(sb, __func__,
131 "Invalid block bitmap - "
132 "block_group = %d, block = %lu",
133 block_group, bitmap_blk);
138 * read_block_bitmap()
140 * @block_group: given block group
142 * Read the bitmap for a given block_group,and validate the
143 * bits for block/inode/inode tables are set in the bitmaps
145 * Return buffer_head on success or NULL in case of failure.
147 static struct buffer_head *
148 read_block_bitmap(struct super_block *sb, unsigned int block_group)
150 struct ext3_group_desc * desc;
151 struct buffer_head * bh = NULL;
152 ext3_fsblk_t bitmap_blk;
154 desc = ext3_get_group_desc(sb, block_group, NULL);
157 trace_ext3_read_block_bitmap(sb, block_group);
158 bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
159 bh = sb_getblk(sb, bitmap_blk);
161 ext3_error(sb, __func__,
162 "Cannot read block bitmap - "
163 "block_group = %d, block_bitmap = %u",
164 block_group, le32_to_cpu(desc->bg_block_bitmap));
167 if (likely(bh_uptodate_or_lock(bh)))
170 if (bh_submit_read(bh) < 0) {
172 ext3_error(sb, __func__,
173 "Cannot read block bitmap - "
174 "block_group = %d, block_bitmap = %u",
175 block_group, le32_to_cpu(desc->bg_block_bitmap));
178 ext3_valid_block_bitmap(sb, desc, block_group, bh);
180 * file system mounted not to panic on error, continue with corrupt
186 * The reservation window structure operations
187 * --------------------------------------------
188 * Operations include:
189 * dump, find, add, remove, is_empty, find_next_reservable_window, etc.
191 * We use a red-black tree to represent per-filesystem reservation
197 * __rsv_window_dump() -- Dump the filesystem block allocation reservation map
198 * @rb_root: root of per-filesystem reservation rb tree
199 * @verbose: verbose mode
200 * @fn: function which wishes to dump the reservation map
202 * If verbose is turned on, it will print the whole block reservation
203 * windows(start, end). Otherwise, it will only print out the "bad" windows,
204 * those windows that overlap with their immediate neighbors.
207 static void __rsv_window_dump(struct rb_root *root, int verbose,
211 struct ext3_reserve_window_node *rsv, *prev;
219 printk("Block Allocation Reservation Windows Map (%s):\n", fn);
221 rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
223 printk("reservation window 0x%p "
224 "start: %lu, end: %lu\n",
225 rsv, rsv->rsv_start, rsv->rsv_end);
226 if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {
227 printk("Bad reservation %p (start >= end)\n",
231 if (prev && prev->rsv_end >= rsv->rsv_start) {
232 printk("Bad reservation %p (prev->end >= start)\n",
238 printk("Restarting reservation walk in verbose mode\n");
246 printk("Window map complete.\n");
249 #define rsv_window_dump(root, verbose) \
250 __rsv_window_dump((root), (verbose), __func__)
252 #define rsv_window_dump(root, verbose) do {} while (0)
256 * goal_in_my_reservation()
257 * @rsv: inode's reservation window
258 * @grp_goal: given goal block relative to the allocation block group
259 * @group: the current allocation block group
260 * @sb: filesystem super block
262 * Test if the given goal block (group relative) is within the file's
263 * own block reservation window range.
265 * If the reservation window is outside the goal allocation group, return 0;
266 * grp_goal (given goal block) could be -1, which means no specific
267 * goal block. In this case, always return 1.
268 * If the goal block is within the reservation window, return 1;
269 * otherwise, return 0;
272 goal_in_my_reservation(struct ext3_reserve_window *rsv, ext3_grpblk_t grp_goal,
273 unsigned int group, struct super_block * sb)
275 ext3_fsblk_t group_first_block, group_last_block;
277 group_first_block = ext3_group_first_block_no(sb, group);
278 group_last_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1);
280 if ((rsv->_rsv_start > group_last_block) ||
281 (rsv->_rsv_end < group_first_block))
283 if ((grp_goal >= 0) && ((grp_goal + group_first_block < rsv->_rsv_start)
284 || (grp_goal + group_first_block > rsv->_rsv_end)))
290 * search_reserve_window()
291 * @rb_root: root of reservation tree
292 * @goal: target allocation block
294 * Find the reserved window which includes the goal, or the previous one
295 * if the goal is not in any window.
296 * Returns NULL if there are no windows or if all windows start after the goal.
298 static struct ext3_reserve_window_node *
299 search_reserve_window(struct rb_root *root, ext3_fsblk_t goal)
301 struct rb_node *n = root->rb_node;
302 struct ext3_reserve_window_node *rsv;
308 rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
310 if (goal < rsv->rsv_start)
312 else if (goal > rsv->rsv_end)
318 * We've fallen off the end of the tree: the goal wasn't inside
319 * any particular node. OK, the previous node must be to one
320 * side of the interval containing the goal. If it's the RHS,
321 * we need to back up one.
323 if (rsv->rsv_start > goal) {
324 n = rb_prev(&rsv->rsv_node);
325 rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
331 * ext3_rsv_window_add() -- Insert a window to the block reservation rb tree.
333 * @rsv: reservation window to add
335 * Must be called with rsv_lock hold.
337 void ext3_rsv_window_add(struct super_block *sb,
338 struct ext3_reserve_window_node *rsv)
340 struct rb_root *root = &EXT3_SB(sb)->s_rsv_window_root;
341 struct rb_node *node = &rsv->rsv_node;
342 ext3_fsblk_t start = rsv->rsv_start;
344 struct rb_node ** p = &root->rb_node;
345 struct rb_node * parent = NULL;
346 struct ext3_reserve_window_node *this;
348 trace_ext3_rsv_window_add(sb, rsv);
352 this = rb_entry(parent, struct ext3_reserve_window_node, rsv_node);
354 if (start < this->rsv_start)
356 else if (start > this->rsv_end)
359 rsv_window_dump(root, 1);
364 rb_link_node(node, parent, p);
365 rb_insert_color(node, root);
369 * ext3_rsv_window_remove() -- unlink a window from the reservation rb tree
371 * @rsv: reservation window to remove
373 * Mark the block reservation window as not allocated, and unlink it
374 * from the filesystem reservation window rb tree. Must be called with
377 static void rsv_window_remove(struct super_block *sb,
378 struct ext3_reserve_window_node *rsv)
380 rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
381 rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
382 rsv->rsv_alloc_hit = 0;
383 rb_erase(&rsv->rsv_node, &EXT3_SB(sb)->s_rsv_window_root);
387 * rsv_is_empty() -- Check if the reservation window is allocated.
388 * @rsv: given reservation window to check
390 * returns 1 if the end block is EXT3_RESERVE_WINDOW_NOT_ALLOCATED.
392 static inline int rsv_is_empty(struct ext3_reserve_window *rsv)
394 /* a valid reservation end block could not be 0 */
395 return rsv->_rsv_end == EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
399 * ext3_init_block_alloc_info()
400 * @inode: file inode structure
402 * Allocate and initialize the reservation window structure, and
403 * link the window to the ext3 inode structure at last
405 * The reservation window structure is only dynamically allocated
406 * and linked to ext3 inode the first time the open file
407 * needs a new block. So, before every ext3_new_block(s) call, for
408 * regular files, we should check whether the reservation window
409 * structure exists or not. In the latter case, this function is called.
410 * Fail to do so will result in block reservation being turned off for that
413 * This function is called from ext3_get_blocks_handle(), also called
414 * when setting the reservation window size through ioctl before the file
415 * is open for write (needs block allocation).
417 * Needs truncate_mutex protection prior to call this function.
419 void ext3_init_block_alloc_info(struct inode *inode)
421 struct ext3_inode_info *ei = EXT3_I(inode);
422 struct ext3_block_alloc_info *block_i;
423 struct super_block *sb = inode->i_sb;
425 block_i = kmalloc(sizeof(*block_i), GFP_NOFS);
427 struct ext3_reserve_window_node *rsv = &block_i->rsv_window_node;
429 rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
430 rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
433 * if filesystem is mounted with NORESERVATION, the goal
434 * reservation window size is set to zero to indicate
435 * block reservation is off
437 if (!test_opt(sb, RESERVATION))
438 rsv->rsv_goal_size = 0;
440 rsv->rsv_goal_size = EXT3_DEFAULT_RESERVE_BLOCKS;
441 rsv->rsv_alloc_hit = 0;
442 block_i->last_alloc_logical_block = 0;
443 block_i->last_alloc_physical_block = 0;
445 ei->i_block_alloc_info = block_i;
449 * ext3_discard_reservation()
452 * Discard(free) block reservation window on last file close, or truncate
455 * It is being called in three cases:
456 * ext3_release_file(): last writer close the file
457 * ext3_clear_inode(): last iput(), when nobody link to this file.
458 * ext3_truncate(): when the block indirect map is about to change.
461 void ext3_discard_reservation(struct inode *inode)
463 struct ext3_inode_info *ei = EXT3_I(inode);
464 struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info;
465 struct ext3_reserve_window_node *rsv;
466 spinlock_t *rsv_lock = &EXT3_SB(inode->i_sb)->s_rsv_window_lock;
471 rsv = &block_i->rsv_window_node;
472 if (!rsv_is_empty(&rsv->rsv_window)) {
474 if (!rsv_is_empty(&rsv->rsv_window)) {
475 trace_ext3_discard_reservation(inode, rsv);
476 rsv_window_remove(inode->i_sb, rsv);
478 spin_unlock(rsv_lock);
483 * ext3_free_blocks_sb() -- Free given blocks and update quota
484 * @handle: handle to this transaction
486 * @block: start physcial block to free
487 * @count: number of blocks to free
488 * @pdquot_freed_blocks: pointer to quota
490 void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb,
491 ext3_fsblk_t block, unsigned long count,
492 unsigned long *pdquot_freed_blocks)
494 struct buffer_head *bitmap_bh = NULL;
495 struct buffer_head *gd_bh;
496 unsigned long block_group;
499 unsigned long overflow;
500 struct ext3_group_desc * desc;
501 struct ext3_super_block * es;
502 struct ext3_sb_info *sbi;
504 ext3_grpblk_t group_freed;
506 *pdquot_freed_blocks = 0;
509 if (block < le32_to_cpu(es->s_first_data_block) ||
510 block + count < block ||
511 block + count > le32_to_cpu(es->s_blocks_count)) {
512 ext3_error (sb, "ext3_free_blocks",
513 "Freeing blocks not in datazone - "
514 "block = "E3FSBLK", count = %lu", block, count);
518 ext3_debug ("freeing block(s) %lu-%lu\n", block, block + count - 1);
522 block_group = (block - le32_to_cpu(es->s_first_data_block)) /
523 EXT3_BLOCKS_PER_GROUP(sb);
524 bit = (block - le32_to_cpu(es->s_first_data_block)) %
525 EXT3_BLOCKS_PER_GROUP(sb);
527 * Check to see if we are freeing blocks across a group
530 if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) {
531 overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb);
535 bitmap_bh = read_block_bitmap(sb, block_group);
538 desc = ext3_get_group_desc (sb, block_group, &gd_bh);
542 if (in_range (le32_to_cpu(desc->bg_block_bitmap), block, count) ||
543 in_range (le32_to_cpu(desc->bg_inode_bitmap), block, count) ||
544 in_range (block, le32_to_cpu(desc->bg_inode_table),
545 sbi->s_itb_per_group) ||
546 in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table),
547 sbi->s_itb_per_group)) {
548 ext3_error (sb, "ext3_free_blocks",
549 "Freeing blocks in system zones - "
550 "Block = "E3FSBLK", count = %lu",
556 * We are about to start releasing blocks in the bitmap,
557 * so we need undo access.
559 /* @@@ check errors */
560 BUFFER_TRACE(bitmap_bh, "getting undo access");
561 err = ext3_journal_get_undo_access(handle, bitmap_bh);
566 * We are about to modify some metadata. Call the journal APIs
567 * to unshare ->b_data if a currently-committing transaction is
570 BUFFER_TRACE(gd_bh, "get_write_access");
571 err = ext3_journal_get_write_access(handle, gd_bh);
575 jbd_lock_bh_state(bitmap_bh);
577 for (i = 0, group_freed = 0; i < count; i++) {
579 * An HJ special. This is expensive...
581 #ifdef CONFIG_JBD_DEBUG
582 jbd_unlock_bh_state(bitmap_bh);
584 struct buffer_head *debug_bh;
585 debug_bh = sb_find_get_block(sb, block + i);
587 BUFFER_TRACE(debug_bh, "Deleted!");
588 if (!bh2jh(bitmap_bh)->b_committed_data)
589 BUFFER_TRACE(debug_bh,
590 "No committed data in bitmap");
591 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
595 jbd_lock_bh_state(bitmap_bh);
597 if (need_resched()) {
598 jbd_unlock_bh_state(bitmap_bh);
600 jbd_lock_bh_state(bitmap_bh);
602 /* @@@ This prevents newly-allocated data from being
603 * freed and then reallocated within the same
606 * Ideally we would want to allow that to happen, but to
607 * do so requires making journal_forget() capable of
608 * revoking the queued write of a data block, which
609 * implies blocking on the journal lock. *forget()
610 * cannot block due to truncate races.
612 * Eventually we can fix this by making journal_forget()
613 * return a status indicating whether or not it was able
614 * to revoke the buffer. On successful revoke, it is
615 * safe not to set the allocation bit in the committed
616 * bitmap, because we know that there is no outstanding
617 * activity on the buffer any more and so it is safe to
620 BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
621 J_ASSERT_BH(bitmap_bh,
622 bh2jh(bitmap_bh)->b_committed_data != NULL);
623 ext3_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
624 bh2jh(bitmap_bh)->b_committed_data);
627 * We clear the bit in the bitmap after setting the committed
628 * data bit, because this is the reverse order to that which
629 * the allocator uses.
631 BUFFER_TRACE(bitmap_bh, "clear bit");
632 if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
633 bit + i, bitmap_bh->b_data)) {
634 jbd_unlock_bh_state(bitmap_bh);
635 ext3_error(sb, __func__,
636 "bit already cleared for block "E3FSBLK,
638 jbd_lock_bh_state(bitmap_bh);
639 BUFFER_TRACE(bitmap_bh, "bit already cleared");
644 jbd_unlock_bh_state(bitmap_bh);
646 spin_lock(sb_bgl_lock(sbi, block_group));
647 le16_add_cpu(&desc->bg_free_blocks_count, group_freed);
648 spin_unlock(sb_bgl_lock(sbi, block_group));
649 percpu_counter_add(&sbi->s_freeblocks_counter, count);
651 /* We dirtied the bitmap block */
652 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
653 err = ext3_journal_dirty_metadata(handle, bitmap_bh);
655 /* And the group descriptor block */
656 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
657 ret = ext3_journal_dirty_metadata(handle, gd_bh);
659 *pdquot_freed_blocks += group_freed;
661 if (overflow && !err) {
669 ext3_std_error(sb, err);
674 * ext3_free_blocks() -- Free given blocks and update quota
675 * @handle: handle for this transaction
677 * @block: start physical block to free
678 * @count: number of blocks to count
680 void ext3_free_blocks(handle_t *handle, struct inode *inode,
681 ext3_fsblk_t block, unsigned long count)
683 struct super_block *sb = inode->i_sb;
684 unsigned long dquot_freed_blocks;
686 trace_ext3_free_blocks(inode, block, count);
687 ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
688 if (dquot_freed_blocks)
689 dquot_free_block(inode, dquot_freed_blocks);
694 * ext3_test_allocatable()
695 * @nr: given allocation block group
696 * @bh: bufferhead contains the bitmap of the given block group
698 * For ext3 allocations, we must not reuse any blocks which are
699 * allocated in the bitmap buffer's "last committed data" copy. This
700 * prevents deletes from freeing up the page for reuse until we have
701 * committed the delete transaction.
703 * If we didn't do this, then deleting something and reallocating it as
704 * data would allow the old block to be overwritten before the
705 * transaction committed (because we force data to disk before commit).
706 * This would lead to corruption if we crashed between overwriting the
707 * data and committing the delete.
709 * @@@ We may want to make this allocation behaviour conditional on
710 * data-writes at some point, and disable it for metadata allocations or
713 static int ext3_test_allocatable(ext3_grpblk_t nr, struct buffer_head *bh)
716 struct journal_head *jh = bh2jh(bh);
718 if (ext3_test_bit(nr, bh->b_data))
721 jbd_lock_bh_state(bh);
722 if (!jh->b_committed_data)
725 ret = !ext3_test_bit(nr, jh->b_committed_data);
726 jbd_unlock_bh_state(bh);
731 * bitmap_search_next_usable_block()
732 * @start: the starting block (group relative) of the search
733 * @bh: bufferhead contains the block group bitmap
734 * @maxblocks: the ending block (group relative) of the reservation
736 * The bitmap search --- search forward alternately through the actual
737 * bitmap on disk and the last-committed copy in journal, until we find a
738 * bit free in both bitmaps.
741 bitmap_search_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
742 ext3_grpblk_t maxblocks)
745 struct journal_head *jh = bh2jh(bh);
747 while (start < maxblocks) {
748 next = ext3_find_next_zero_bit(bh->b_data, maxblocks, start);
749 if (next >= maxblocks)
751 if (ext3_test_allocatable(next, bh))
753 jbd_lock_bh_state(bh);
754 if (jh->b_committed_data)
755 start = ext3_find_next_zero_bit(jh->b_committed_data,
757 jbd_unlock_bh_state(bh);
763 * find_next_usable_block()
764 * @start: the starting block (group relative) to find next
765 * allocatable block in bitmap.
766 * @bh: bufferhead contains the block group bitmap
767 * @maxblocks: the ending block (group relative) for the search
769 * Find an allocatable block in a bitmap. We honor both the bitmap and
770 * its last-committed copy (if that exists), and perform the "most
771 * appropriate allocation" algorithm of looking for a free block near
772 * the initial goal; then for a free byte somewhere in the bitmap; then
773 * for any free bit in the bitmap.
776 find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
777 ext3_grpblk_t maxblocks)
779 ext3_grpblk_t here, next;
784 * The goal was occupied; search forward for a free
785 * block within the next XX blocks.
787 * end_goal is more or less random, but it has to be
788 * less than EXT3_BLOCKS_PER_GROUP. Aligning up to the
789 * next 64-bit boundary is simple..
791 ext3_grpblk_t end_goal = (start + 63) & ~63;
792 if (end_goal > maxblocks)
793 end_goal = maxblocks;
794 here = ext3_find_next_zero_bit(bh->b_data, end_goal, start);
795 if (here < end_goal && ext3_test_allocatable(here, bh))
797 ext3_debug("Bit not found near goal\n");
804 p = bh->b_data + (here >> 3);
805 r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
806 next = (r - bh->b_data) << 3;
808 if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh))
812 * The bitmap search --- search forward alternately through the actual
813 * bitmap and the last-committed copy until we find a bit free in
816 here = bitmap_search_next_usable_block(here, bh, maxblocks);
822 * @lock: the spin lock for this block group
823 * @block: the free block (group relative) to allocate
824 * @bh: the buffer_head contains the block group bitmap
826 * We think we can allocate this block in this bitmap. Try to set the bit.
827 * If that succeeds then check that nobody has allocated and then freed the
828 * block since we saw that is was not marked in b_committed_data. If it _was_
829 * allocated and freed then clear the bit in the bitmap again and return
833 claim_block(spinlock_t *lock, ext3_grpblk_t block, struct buffer_head *bh)
835 struct journal_head *jh = bh2jh(bh);
838 if (ext3_set_bit_atomic(lock, block, bh->b_data))
840 jbd_lock_bh_state(bh);
841 if (jh->b_committed_data && ext3_test_bit(block,jh->b_committed_data)) {
842 ext3_clear_bit_atomic(lock, block, bh->b_data);
847 jbd_unlock_bh_state(bh);
852 * ext3_try_to_allocate()
854 * @handle: handle to this transaction
855 * @group: given allocation block group
856 * @bitmap_bh: bufferhead holds the block bitmap
857 * @grp_goal: given target block within the group
858 * @count: target number of blocks to allocate
859 * @my_rsv: reservation window
861 * Attempt to allocate blocks within a give range. Set the range of allocation
862 * first, then find the first free bit(s) from the bitmap (within the range),
863 * and at last, allocate the blocks by claiming the found free bit as allocated.
865 * To set the range of this allocation:
866 * if there is a reservation window, only try to allocate block(s) from the
867 * file's own reservation window;
868 * Otherwise, the allocation range starts from the give goal block, ends at
869 * the block group's last block.
871 * If we failed to allocate the desired block then we may end up crossing to a
872 * new bitmap. In that case we must release write access to the old one via
873 * ext3_journal_release_buffer(), else we'll run out of credits.
876 ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
877 struct buffer_head *bitmap_bh, ext3_grpblk_t grp_goal,
878 unsigned long *count, struct ext3_reserve_window *my_rsv)
880 ext3_fsblk_t group_first_block;
881 ext3_grpblk_t start, end;
882 unsigned long num = 0;
884 /* we do allocation within the reservation window if we have a window */
886 group_first_block = ext3_group_first_block_no(sb, group);
887 if (my_rsv->_rsv_start >= group_first_block)
888 start = my_rsv->_rsv_start - group_first_block;
890 /* reservation window cross group boundary */
892 end = my_rsv->_rsv_end - group_first_block + 1;
893 if (end > EXT3_BLOCKS_PER_GROUP(sb))
894 /* reservation window crosses group boundary */
895 end = EXT3_BLOCKS_PER_GROUP(sb);
896 if ((start <= grp_goal) && (grp_goal < end))
905 end = EXT3_BLOCKS_PER_GROUP(sb);
908 BUG_ON(start > EXT3_BLOCKS_PER_GROUP(sb));
911 if (grp_goal < 0 || !ext3_test_allocatable(grp_goal, bitmap_bh)) {
912 grp_goal = find_next_usable_block(start, bitmap_bh, end);
918 for (i = 0; i < 7 && grp_goal > start &&
919 ext3_test_allocatable(grp_goal - 1,
927 if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group),
928 grp_goal, bitmap_bh)) {
930 * The block was allocated by another thread, or it was
931 * allocated and then freed by another thread
941 while (num < *count && grp_goal < end
942 && ext3_test_allocatable(grp_goal, bitmap_bh)
943 && claim_block(sb_bgl_lock(EXT3_SB(sb), group),
944 grp_goal, bitmap_bh)) {
949 return grp_goal - num;
956 * find_next_reservable_window():
957 * find a reservable space within the given range.
958 * It does not allocate the reservation window for now:
959 * alloc_new_reservation() will do the work later.
961 * @search_head: the head of the searching list;
962 * This is not necessarily the list head of the whole filesystem
964 * We have both head and start_block to assist the search
965 * for the reservable space. The list starts from head,
966 * but we will shift to the place where start_block is,
967 * then start from there, when looking for a reservable space.
969 * @my_rsv: the reservation window
971 * @sb: the super block
973 * @start_block: the first block we consider to start
974 * the real search from
977 * the maximum block number that our goal reservable space
978 * could start from. This is normally the last block in this
979 * group. The search will end when we found the start of next
980 * possible reservable space is out of this boundary.
981 * This could handle the cross boundary reservation window
984 * basically we search from the given range, rather than the whole
985 * reservation double linked list, (start_block, last_block)
986 * to find a free region that is of my size and has not
990 static int find_next_reservable_window(
991 struct ext3_reserve_window_node *search_head,
992 struct ext3_reserve_window_node *my_rsv,
993 struct super_block * sb,
994 ext3_fsblk_t start_block,
995 ext3_fsblk_t last_block)
997 struct rb_node *next;
998 struct ext3_reserve_window_node *rsv, *prev;
1000 int size = my_rsv->rsv_goal_size;
1002 /* TODO: make the start of the reservation window byte-aligned */
1003 /* cur = *start_block & ~7;*/
1010 if (cur <= rsv->rsv_end)
1011 cur = rsv->rsv_end + 1;
1014 * in the case we could not find a reservable space
1015 * that is what is expected, during the re-search, we could
1016 * remember what's the largest reservable space we could have
1017 * and return that one.
1019 * For now it will fail if we could not find the reservable
1020 * space with expected-size (or more)...
1022 if (cur > last_block)
1023 return -1; /* fail */
1026 next = rb_next(&rsv->rsv_node);
1027 rsv = rb_entry(next,struct ext3_reserve_window_node,rsv_node);
1030 * Reached the last reservation, we can just append to the
1036 if (cur + size <= rsv->rsv_start) {
1038 * Found a reserveable space big enough. We could
1039 * have a reservation across the group boundary here
1045 * we come here either :
1046 * when we reach the end of the whole list,
1047 * and there is empty reservable space after last entry in the list.
1048 * append it to the end of the list.
1050 * or we found one reservable space in the middle of the list,
1051 * return the reservation window that we could append to.
1055 if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->rsv_window)))
1056 rsv_window_remove(sb, my_rsv);
1059 * Let's book the whole available window for now. We will check the
1060 * disk bitmap later and then, if there are free blocks then we adjust
1061 * the window size if it's larger than requested.
1062 * Otherwise, we will remove this node from the tree next time
1063 * call find_next_reservable_window.
1065 my_rsv->rsv_start = cur;
1066 my_rsv->rsv_end = cur + size - 1;
1067 my_rsv->rsv_alloc_hit = 0;
1070 ext3_rsv_window_add(sb, my_rsv);
1076 * alloc_new_reservation()--allocate a new reservation window
1078 * To make a new reservation, we search part of the filesystem
1079 * reservation list (the list that inside the group). We try to
1080 * allocate a new reservation window near the allocation goal,
1081 * or the beginning of the group, if there is no goal.
1083 * We first find a reservable space after the goal, then from
1084 * there, we check the bitmap for the first free block after
1085 * it. If there is no free block until the end of group, then the
1086 * whole group is full, we failed. Otherwise, check if the free
1087 * block is inside the expected reservable space, if so, we
1089 * If the first free block is outside the reservable space, then
1090 * start from the first free block, we search for next available
1093 * on succeed, a new reservation will be found and inserted into the list
1094 * It contains at least one free block, and it does not overlap with other
1095 * reservation windows.
1097 * failed: we failed to find a reservation window in this group
1099 * @my_rsv: the reservation window
1101 * @grp_goal: The goal (group-relative). It is where the search for a
1102 * free reservable space should start from.
1103 * if we have a grp_goal(grp_goal >0 ), then start from there,
1104 * no grp_goal(grp_goal = -1), we start from the first block
1107 * @sb: the super block
1108 * @group: the group we are trying to allocate in
1109 * @bitmap_bh: the block group block bitmap
1112 static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv,
1113 ext3_grpblk_t grp_goal, struct super_block *sb,
1114 unsigned int group, struct buffer_head *bitmap_bh)
1116 struct ext3_reserve_window_node *search_head;
1117 ext3_fsblk_t group_first_block, group_end_block, start_block;
1118 ext3_grpblk_t first_free_block;
1119 struct rb_root *fs_rsv_root = &EXT3_SB(sb)->s_rsv_window_root;
1122 spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
1124 group_first_block = ext3_group_first_block_no(sb, group);
1125 group_end_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1);
1128 start_block = group_first_block;
1130 start_block = grp_goal + group_first_block;
1132 trace_ext3_alloc_new_reservation(sb, start_block);
1133 size = my_rsv->rsv_goal_size;
1135 if (!rsv_is_empty(&my_rsv->rsv_window)) {
1137 * if the old reservation is cross group boundary
1138 * and if the goal is inside the old reservation window,
1139 * we will come here when we just failed to allocate from
1140 * the first part of the window. We still have another part
1141 * that belongs to the next group. In this case, there is no
1142 * point to discard our window and try to allocate a new one
1143 * in this group(which will fail). we should
1144 * keep the reservation window, just simply move on.
1146 * Maybe we could shift the start block of the reservation
1147 * window to the first block of next group.
1150 if ((my_rsv->rsv_start <= group_end_block) &&
1151 (my_rsv->rsv_end > group_end_block) &&
1152 (start_block >= my_rsv->rsv_start))
1155 if ((my_rsv->rsv_alloc_hit >
1156 (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) {
1158 * if the previously allocation hit ratio is
1159 * greater than 1/2, then we double the size of
1160 * the reservation window the next time,
1161 * otherwise we keep the same size window
1164 if (size > EXT3_MAX_RESERVE_BLOCKS)
1165 size = EXT3_MAX_RESERVE_BLOCKS;
1166 my_rsv->rsv_goal_size= size;
1170 spin_lock(rsv_lock);
1172 * shift the search start to the window near the goal block
1174 search_head = search_reserve_window(fs_rsv_root, start_block);
1177 * find_next_reservable_window() simply finds a reservable window
1178 * inside the given range(start_block, group_end_block).
1180 * To make sure the reservation window has a free bit inside it, we
1181 * need to check the bitmap after we found a reservable window.
1184 ret = find_next_reservable_window(search_head, my_rsv, sb,
1185 start_block, group_end_block);
1188 if (!rsv_is_empty(&my_rsv->rsv_window))
1189 rsv_window_remove(sb, my_rsv);
1190 spin_unlock(rsv_lock);
1195 * On success, find_next_reservable_window() returns the
1196 * reservation window where there is a reservable space after it.
1197 * Before we reserve this reservable space, we need
1198 * to make sure there is at least a free block inside this region.
1200 * searching the first free bit on the block bitmap and copy of
1201 * last committed bitmap alternatively, until we found a allocatable
1202 * block. Search start from the start block of the reservable space
1205 spin_unlock(rsv_lock);
1206 first_free_block = bitmap_search_next_usable_block(
1207 my_rsv->rsv_start - group_first_block,
1208 bitmap_bh, group_end_block - group_first_block + 1);
1210 if (first_free_block < 0) {
1212 * no free block left on the bitmap, no point
1213 * to reserve the space. return failed.
1215 spin_lock(rsv_lock);
1216 if (!rsv_is_empty(&my_rsv->rsv_window))
1217 rsv_window_remove(sb, my_rsv);
1218 spin_unlock(rsv_lock);
1219 return -1; /* failed */
1222 start_block = first_free_block + group_first_block;
1224 * check if the first free block is within the
1225 * free space we just reserved
1227 if (start_block >= my_rsv->rsv_start &&
1228 start_block <= my_rsv->rsv_end) {
1229 trace_ext3_reserved(sb, start_block, my_rsv);
1230 return 0; /* success */
1233 * if the first free bit we found is out of the reservable space
1234 * continue search for next reservable space,
1235 * start from where the free block is,
1236 * we also shift the list head to where we stopped last time
1238 search_head = my_rsv;
1239 spin_lock(rsv_lock);
1244 * try_to_extend_reservation()
1245 * @my_rsv: given reservation window
1247 * @size: the delta to extend
1249 * Attempt to expand the reservation window large enough to have
1250 * required number of free blocks
1252 * Since ext3_try_to_allocate() will always allocate blocks within
1253 * the reservation window range, if the window size is too small,
1254 * multiple blocks allocation has to stop at the end of the reservation
1255 * window. To make this more efficient, given the total number of
1256 * blocks needed and the current size of the window, we try to
1257 * expand the reservation window size if necessary on a best-effort
1258 * basis before ext3_new_blocks() tries to allocate blocks,
1260 static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
1261 struct super_block *sb, int size)
1263 struct ext3_reserve_window_node *next_rsv;
1264 struct rb_node *next;
1265 spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
1267 if (!spin_trylock(rsv_lock))
1270 next = rb_next(&my_rsv->rsv_node);
1273 my_rsv->rsv_end += size;
1275 next_rsv = rb_entry(next, struct ext3_reserve_window_node, rsv_node);
1277 if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
1278 my_rsv->rsv_end += size;
1280 my_rsv->rsv_end = next_rsv->rsv_start - 1;
1282 spin_unlock(rsv_lock);
1286 * ext3_try_to_allocate_with_rsv()
1288 * @handle: handle to this transaction
1289 * @group: given allocation block group
1290 * @bitmap_bh: bufferhead holds the block bitmap
1291 * @grp_goal: given target block within the group
1292 * @my_rsv: reservation window
1293 * @count: target number of blocks to allocate
1294 * @errp: pointer to store the error code
1296 * This is the main function used to allocate a new block and its reservation
1299 * Each time when a new block allocation is need, first try to allocate from
1300 * its own reservation. If it does not have a reservation window, instead of
1301 * looking for a free bit on bitmap first, then look up the reservation list to
1302 * see if it is inside somebody else's reservation window, we try to allocate a
1303 * reservation window for it starting from the goal first. Then do the block
1304 * allocation within the reservation window.
1306 * This will avoid keeping on searching the reservation list again and
1307 * again when somebody is looking for a free block (without
1308 * reservation), and there are lots of free blocks, but they are all
1311 * We use a red-black tree for the per-filesystem reservation list.
1314 static ext3_grpblk_t
1315 ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1316 unsigned int group, struct buffer_head *bitmap_bh,
1317 ext3_grpblk_t grp_goal,
1318 struct ext3_reserve_window_node * my_rsv,
1319 unsigned long *count, int *errp)
1321 ext3_fsblk_t group_first_block, group_last_block;
1322 ext3_grpblk_t ret = 0;
1324 unsigned long num = *count;
1329 * Make sure we use undo access for the bitmap, because it is critical
1330 * that we do the frozen_data COW on bitmap buffers in all cases even
1331 * if the buffer is in BJ_Forget state in the committing transaction.
1333 BUFFER_TRACE(bitmap_bh, "get undo access for new block");
1334 fatal = ext3_journal_get_undo_access(handle, bitmap_bh);
1341 * we don't deal with reservation when
1342 * filesystem is mounted without reservation
1343 * or the file is not a regular file
1344 * or last attempt to allocate a block with reservation turned on failed
1346 if (my_rsv == NULL ) {
1347 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh,
1348 grp_goal, count, NULL);
1352 * grp_goal is a group relative block number (if there is a goal)
1353 * 0 <= grp_goal < EXT3_BLOCKS_PER_GROUP(sb)
1354 * first block is a filesystem wide block number
1355 * first block is the block number of the first block in this group
1357 group_first_block = ext3_group_first_block_no(sb, group);
1358 group_last_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1);
1361 * Basically we will allocate a new block from inode's reservation
1364 * We need to allocate a new reservation window, if:
1365 * a) inode does not have a reservation window; or
1366 * b) last attempt to allocate a block from existing reservation
1368 * c) we come here with a goal and with a reservation window
1370 * We do not need to allocate a new reservation window if we come here
1371 * at the beginning with a goal and the goal is inside the window, or
1372 * we don't have a goal but already have a reservation window.
1373 * then we could go to allocate from the reservation window directly.
1376 if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
1377 !goal_in_my_reservation(&my_rsv->rsv_window,
1378 grp_goal, group, sb)) {
1379 if (my_rsv->rsv_goal_size < *count)
1380 my_rsv->rsv_goal_size = *count;
1381 ret = alloc_new_reservation(my_rsv, grp_goal, sb,
1386 if (!goal_in_my_reservation(&my_rsv->rsv_window,
1387 grp_goal, group, sb))
1389 } else if (grp_goal >= 0) {
1390 int curr = my_rsv->rsv_end -
1391 (grp_goal + group_first_block) + 1;
1394 try_to_extend_reservation(my_rsv, sb,
1398 if ((my_rsv->rsv_start > group_last_block) ||
1399 (my_rsv->rsv_end < group_first_block)) {
1400 rsv_window_dump(&EXT3_SB(sb)->s_rsv_window_root, 1);
1403 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh,
1404 grp_goal, &num, &my_rsv->rsv_window);
1406 my_rsv->rsv_alloc_hit += num;
1408 break; /* succeed */
1414 BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for "
1416 fatal = ext3_journal_dirty_metadata(handle, bitmap_bh);
1424 BUFFER_TRACE(bitmap_bh, "journal_release_buffer");
1425 ext3_journal_release_buffer(handle, bitmap_bh);
1430 * ext3_has_free_blocks()
1431 * @sbi: in-core super block structure.
1433 * Check if filesystem has at least 1 free block available for allocation.
1435 static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
1437 ext3_fsblk_t free_blocks, root_blocks;
1439 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
1440 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
1441 if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
1442 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
1443 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
1444 !in_group_p (sbi->s_resgid))) {
1451 * ext3_should_retry_alloc()
1453 * @retries number of attemps has been made
1455 * ext3_should_retry_alloc() is called when ENOSPC is returned, and if
1456 * it is profitable to retry the operation, this function will wait
1457 * for the current or committing transaction to complete, and then
1460 * if the total number of retries exceed three times, return FALSE.
1462 int ext3_should_retry_alloc(struct super_block *sb, int *retries)
1464 if (!ext3_has_free_blocks(EXT3_SB(sb), 0) || (*retries)++ > 3)
1467 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
1469 return journal_force_commit_nested(EXT3_SB(sb)->s_journal);
1473 * ext3_new_blocks() -- core block(s) allocation function
1474 * @handle: handle to this transaction
1475 * @inode: file inode
1476 * @goal: given target block(filesystem wide)
1477 * @count: target number of blocks to allocate
1480 * ext3_new_blocks uses a goal block to assist allocation. It tries to
1481 * allocate block(s) from the block group contains the goal block first. If that
1482 * fails, it will try to allocate block(s) from other block groups without
1483 * any specific goal block.
1486 ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
1487 ext3_fsblk_t goal, unsigned long *count, int *errp)
1489 struct buffer_head *bitmap_bh = NULL;
1490 struct buffer_head *gdp_bh;
1493 ext3_grpblk_t grp_target_blk; /* blockgroup relative goal block */
1494 ext3_grpblk_t grp_alloc_blk; /* blockgroup-relative allocated block*/
1495 ext3_fsblk_t ret_block; /* filesyetem-wide allocated block */
1496 int bgi; /* blockgroup iteration index */
1498 int performed_allocation = 0;
1499 ext3_grpblk_t free_blocks; /* number of free blocks in a group */
1500 struct super_block *sb;
1501 struct ext3_group_desc *gdp;
1502 struct ext3_super_block *es;
1503 struct ext3_sb_info *sbi;
1504 struct ext3_reserve_window_node *my_rsv = NULL;
1505 struct ext3_block_alloc_info *block_i;
1506 unsigned short windowsz = 0;
1508 static int goal_hits, goal_attempts;
1510 unsigned long ngroups;
1511 unsigned long num = *count;
1517 * Check quota for allocation of this block.
1519 err = dquot_alloc_block(inode, num);
1525 trace_ext3_request_blocks(inode, goal, num);
1529 ext3_debug("goal=%lu.\n", goal);
1531 * Allocate a block from reservation only when
1532 * filesystem is mounted with reservation(default,-o reservation), and
1533 * it's a regular file, and
1534 * the desired window size is greater than 0 (One could use ioctl
1535 * command EXT3_IOC_SETRSVSZ to set the window size to 0 to turn off
1536 * reservation on that particular file)
1538 block_i = EXT3_I(inode)->i_block_alloc_info;
1539 if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0))
1540 my_rsv = &block_i->rsv_window_node;
1542 if (!ext3_has_free_blocks(sbi, IS_NOQUOTA(inode))) {
1548 * First, test whether the goal block is free.
1550 if (goal < le32_to_cpu(es->s_first_data_block) ||
1551 goal >= le32_to_cpu(es->s_blocks_count))
1552 goal = le32_to_cpu(es->s_first_data_block);
1553 group_no = (goal - le32_to_cpu(es->s_first_data_block)) /
1554 EXT3_BLOCKS_PER_GROUP(sb);
1555 goal_group = group_no;
1557 gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
1561 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1563 * if there is not enough free blocks to make a new resevation
1564 * turn off reservation for this allocation
1566 if (my_rsv && (free_blocks < windowsz)
1567 && (free_blocks > 0)
1568 && (rsv_is_empty(&my_rsv->rsv_window)))
1571 if (free_blocks > 0) {
1572 grp_target_blk = ((goal - le32_to_cpu(es->s_first_data_block)) %
1573 EXT3_BLOCKS_PER_GROUP(sb));
1574 bitmap_bh = read_block_bitmap(sb, group_no);
1577 grp_alloc_blk = ext3_try_to_allocate_with_rsv(sb, handle,
1578 group_no, bitmap_bh, grp_target_blk,
1579 my_rsv, &num, &fatal);
1582 if (grp_alloc_blk >= 0)
1586 ngroups = EXT3_SB(sb)->s_groups_count;
1590 * Now search the rest of the groups. We assume that
1591 * group_no and gdp correctly point to the last group visited.
1593 for (bgi = 0; bgi < ngroups; bgi++) {
1595 if (group_no >= ngroups)
1597 gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
1600 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1602 * skip this group (and avoid loading bitmap) if there
1603 * are no free blocks
1608 * skip this group if the number of
1609 * free blocks is less than half of the reservation
1612 if (my_rsv && (free_blocks <= (windowsz/2)))
1616 bitmap_bh = read_block_bitmap(sb, group_no);
1620 * try to allocate block(s) from this group, without a goal(-1).
1622 grp_alloc_blk = ext3_try_to_allocate_with_rsv(sb, handle,
1623 group_no, bitmap_bh, -1, my_rsv,
1627 if (grp_alloc_blk >= 0)
1631 * We may end up a bogus earlier ENOSPC error due to
1632 * filesystem is "full" of reservations, but
1633 * there maybe indeed free blocks available on disk
1634 * In this case, we just forget about the reservations
1635 * just do block allocation as without reservations.
1640 group_no = goal_group;
1643 /* No space left on the device */
1649 ext3_debug("using block group %d(%d)\n",
1650 group_no, gdp->bg_free_blocks_count);
1652 BUFFER_TRACE(gdp_bh, "get_write_access");
1653 fatal = ext3_journal_get_write_access(handle, gdp_bh);
1657 ret_block = grp_alloc_blk + ext3_group_first_block_no(sb, group_no);
1659 if (in_range(le32_to_cpu(gdp->bg_block_bitmap), ret_block, num) ||
1660 in_range(le32_to_cpu(gdp->bg_inode_bitmap), ret_block, num) ||
1661 in_range(ret_block, le32_to_cpu(gdp->bg_inode_table),
1662 EXT3_SB(sb)->s_itb_per_group) ||
1663 in_range(ret_block + num - 1, le32_to_cpu(gdp->bg_inode_table),
1664 EXT3_SB(sb)->s_itb_per_group)) {
1665 ext3_error(sb, "ext3_new_block",
1666 "Allocating block in system zone - "
1667 "blocks from "E3FSBLK", length %lu",
1670 * claim_block() marked the blocks we allocated as in use. So we
1671 * may want to selectively mark some of the blocks as free.
1676 performed_allocation = 1;
1678 #ifdef CONFIG_JBD_DEBUG
1680 struct buffer_head *debug_bh;
1682 /* Record bitmap buffer state in the newly allocated block */
1683 debug_bh = sb_find_get_block(sb, ret_block);
1685 BUFFER_TRACE(debug_bh, "state when allocated");
1686 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state");
1690 jbd_lock_bh_state(bitmap_bh);
1691 spin_lock(sb_bgl_lock(sbi, group_no));
1692 if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
1695 for (i = 0; i < num; i++) {
1696 if (ext3_test_bit(grp_alloc_blk+i,
1697 bh2jh(bitmap_bh)->b_committed_data)) {
1698 printk("%s: block was unexpectedly set in "
1699 "b_committed_data\n", __func__);
1703 ext3_debug("found bit %d\n", grp_alloc_blk);
1704 spin_unlock(sb_bgl_lock(sbi, group_no));
1705 jbd_unlock_bh_state(bitmap_bh);
1708 if (ret_block + num - 1 >= le32_to_cpu(es->s_blocks_count)) {
1709 ext3_error(sb, "ext3_new_block",
1710 "block("E3FSBLK") >= blocks count(%d) - "
1711 "block_group = %d, es == %p ", ret_block,
1712 le32_to_cpu(es->s_blocks_count), group_no, es);
1717 * It is up to the caller to add the new buffer to a journal
1718 * list of some description. We don't know in advance whether
1719 * the caller wants to use it as metadata or data.
1721 ext3_debug("allocating block %lu. Goal hits %d of %d.\n",
1722 ret_block, goal_hits, goal_attempts);
1724 spin_lock(sb_bgl_lock(sbi, group_no));
1725 le16_add_cpu(&gdp->bg_free_blocks_count, -num);
1726 spin_unlock(sb_bgl_lock(sbi, group_no));
1727 percpu_counter_sub(&sbi->s_freeblocks_counter, num);
1729 BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
1730 err = ext3_journal_dirty_metadata(handle, gdp_bh);
1741 dquot_free_block(inode, *count-num);
1745 trace_ext3_allocate_blocks(inode, goal, num,
1746 (unsigned long long)ret_block);
1755 ext3_std_error(sb, fatal);
1758 * Undo the block allocation
1760 if (!performed_allocation)
1761 dquot_free_block(inode, *count);
1766 ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode,
1767 ext3_fsblk_t goal, int *errp)
1769 unsigned long count = 1;
1771 return ext3_new_blocks(handle, inode, goal, &count, errp);
1775 * ext3_count_free_blocks() -- count filesystem free blocks
1778 * Adds up the number of free blocks from each block group.
1780 ext3_fsblk_t ext3_count_free_blocks(struct super_block *sb)
1782 ext3_fsblk_t desc_count;
1783 struct ext3_group_desc *gdp;
1785 unsigned long ngroups = EXT3_SB(sb)->s_groups_count;
1787 struct ext3_super_block *es;
1788 ext3_fsblk_t bitmap_count;
1790 struct buffer_head *bitmap_bh = NULL;
1792 es = EXT3_SB(sb)->s_es;
1798 for (i = 0; i < ngroups; i++) {
1799 gdp = ext3_get_group_desc(sb, i, NULL);
1802 desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
1804 bitmap_bh = read_block_bitmap(sb, i);
1805 if (bitmap_bh == NULL)
1808 x = ext3_count_free(bitmap_bh, sb->s_blocksize);
1809 printk("group %d: stored = %d, counted = %lu\n",
1810 i, le16_to_cpu(gdp->bg_free_blocks_count), x);
1814 printk("ext3_count_free_blocks: stored = "E3FSBLK
1815 ", computed = "E3FSBLK", "E3FSBLK"\n",
1816 (ext3_fsblk_t)le32_to_cpu(es->s_free_blocks_count),
1817 desc_count, bitmap_count);
1818 return bitmap_count;
1822 for (i = 0; i < ngroups; i++) {
1823 gdp = ext3_get_group_desc(sb, i, NULL);
1826 desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
1833 static inline int test_root(int a, int b)
1842 static int ext3_group_sparse(int group)
1848 return (test_root(group, 7) || test_root(group, 5) ||
1849 test_root(group, 3));
1853 * ext3_bg_has_super - number of blocks used by the superblock in group
1854 * @sb: superblock for filesystem
1855 * @group: group number to check
1857 * Return the number of blocks used by the superblock (primary or backup)
1858 * in this group. Currently this will be only 0 or 1.
1860 int ext3_bg_has_super(struct super_block *sb, int group)
1862 if (EXT3_HAS_RO_COMPAT_FEATURE(sb,
1863 EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
1864 !ext3_group_sparse(group))
1869 static unsigned long ext3_bg_num_gdb_meta(struct super_block *sb, int group)
1871 unsigned long metagroup = group / EXT3_DESC_PER_BLOCK(sb);
1872 unsigned long first = metagroup * EXT3_DESC_PER_BLOCK(sb);
1873 unsigned long last = first + EXT3_DESC_PER_BLOCK(sb) - 1;
1875 if (group == first || group == first + 1 || group == last)
1880 static unsigned long ext3_bg_num_gdb_nometa(struct super_block *sb, int group)
1882 return ext3_bg_has_super(sb, group) ? EXT3_SB(sb)->s_gdb_count : 0;
1886 * ext3_bg_num_gdb - number of blocks used by the group table in group
1887 * @sb: superblock for filesystem
1888 * @group: group number to check
1890 * Return the number of blocks used by the group descriptor table
1891 * (primary or backup) in this group. In the future there may be a
1892 * different number of descriptor blocks in each group.
1894 unsigned long ext3_bg_num_gdb(struct super_block *sb, int group)
1896 unsigned long first_meta_bg =
1897 le32_to_cpu(EXT3_SB(sb)->s_es->s_first_meta_bg);
1898 unsigned long metagroup = group / EXT3_DESC_PER_BLOCK(sb);
1900 if (!EXT3_HAS_INCOMPAT_FEATURE(sb,EXT3_FEATURE_INCOMPAT_META_BG) ||
1901 metagroup < first_meta_bg)
1902 return ext3_bg_num_gdb_nometa(sb,group);
1904 return ext3_bg_num_gdb_meta(sb,group);
1909 * ext3_trim_all_free -- function to trim all free space in alloc. group
1910 * @sb: super block for file system
1911 * @group: allocation group to trim
1912 * @start: first group block to examine
1913 * @max: last group block to examine
1914 * @gdp: allocation group description structure
1915 * @minblocks: minimum extent block count
1917 * ext3_trim_all_free walks through group's block bitmap searching for free
1918 * blocks. When the free block is found, it tries to allocate this block and
1919 * consequent free block to get the biggest free extent possible, until it
1920 * reaches any used block. Then issue a TRIM command on this extent and free
1921 * the extent in the block bitmap. This is done until whole group is scanned.
1923 static ext3_grpblk_t ext3_trim_all_free(struct super_block *sb,
1925 ext3_grpblk_t start, ext3_grpblk_t max,
1926 ext3_grpblk_t minblocks)
1929 ext3_grpblk_t next, free_blocks, bit, freed, count = 0;
1930 ext3_fsblk_t discard_block;
1931 struct ext3_sb_info *sbi;
1932 struct buffer_head *gdp_bh, *bitmap_bh = NULL;
1933 struct ext3_group_desc *gdp;
1934 int err = 0, ret = 0;
1937 * We will update one block bitmap, and one group descriptor
1939 handle = ext3_journal_start_sb(sb, 2);
1941 return PTR_ERR(handle);
1943 bitmap_bh = read_block_bitmap(sb, group);
1949 BUFFER_TRACE(bitmap_bh, "getting undo access");
1950 err = ext3_journal_get_undo_access(handle, bitmap_bh);
1954 gdp = ext3_get_group_desc(sb, group, &gdp_bh);
1960 BUFFER_TRACE(gdp_bh, "get_write_access");
1961 err = ext3_journal_get_write_access(handle, gdp_bh);
1965 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1968 /* Walk through the whole group */
1969 while (start <= max) {
1970 start = bitmap_search_next_usable_block(start, bitmap_bh, max);
1976 * Allocate contiguous free extents by setting bits in the
1980 && claim_block(sb_bgl_lock(sbi, group),
1985 /* We did not claim any blocks */
1989 discard_block = (ext3_fsblk_t)start +
1990 ext3_group_first_block_no(sb, group);
1992 /* Update counters */
1993 spin_lock(sb_bgl_lock(sbi, group));
1994 le16_add_cpu(&gdp->bg_free_blocks_count, start - next);
1995 spin_unlock(sb_bgl_lock(sbi, group));
1996 percpu_counter_sub(&sbi->s_freeblocks_counter, next - start);
1998 free_blocks -= next - start;
1999 /* Do not issue a TRIM on extents smaller than minblocks */
2000 if ((next - start) < minblocks)
2003 trace_ext3_discard_blocks(sb, discard_block, next - start);
2004 /* Send the TRIM command down to the device */
2005 err = sb_issue_discard(sb, discard_block, next - start,
2007 count += (next - start);
2012 * Clear bits in the bitmap
2014 for (bit = start; bit < next; bit++) {
2015 BUFFER_TRACE(bitmap_bh, "clear bit");
2016 if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, group),
2017 bit, bitmap_bh->b_data)) {
2018 ext3_error(sb, __func__,
2019 "bit already cleared for block "E3FSBLK,
2020 (unsigned long)bit);
2021 BUFFER_TRACE(bitmap_bh, "bit already cleared");
2027 /* Update couters */
2028 spin_lock(sb_bgl_lock(sbi, group));
2029 le16_add_cpu(&gdp->bg_free_blocks_count, freed);
2030 spin_unlock(sb_bgl_lock(sbi, group));
2031 percpu_counter_add(&sbi->s_freeblocks_counter, freed);
2035 if (err != -EOPNOTSUPP)
2036 ext3_warning(sb, __func__, "Discard command "
2037 "returned error %d\n", err);
2041 if (fatal_signal_pending(current)) {
2048 /* No more suitable extents */
2049 if (free_blocks < minblocks)
2053 /* We dirtied the bitmap block */
2054 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
2055 ret = ext3_journal_dirty_metadata(handle, bitmap_bh);
2059 /* And the group descriptor block */
2060 BUFFER_TRACE(gdp_bh, "dirtied group descriptor block");
2061 ret = ext3_journal_dirty_metadata(handle, gdp_bh);
2065 ext3_debug("trimmed %d blocks in the group %d\n",
2071 ext3_journal_stop(handle);
2078 * ext3_trim_fs() -- trim ioctl handle function
2079 * @sb: superblock for filesystem
2080 * @start: First Byte to trim
2081 * @len: number of Bytes to trim from start
2082 * @minlen: minimum extent length in Bytes
2084 * ext3_trim_fs goes through all allocation groups containing Bytes from
2085 * start to start+len. For each such a group ext3_trim_all_free function
2086 * is invoked to trim all free space.
2088 int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range)
2090 ext3_grpblk_t last_block, first_block;
2091 unsigned long group, first_group, last_group;
2092 struct ext3_group_desc *gdp;
2093 struct ext3_super_block *es = EXT3_SB(sb)->s_es;
2094 uint64_t start, minlen, end, trimmed = 0;
2095 ext3_fsblk_t first_data_blk =
2096 le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
2097 ext3_fsblk_t max_blks = le32_to_cpu(es->s_blocks_count);
2100 start = range->start >> sb->s_blocksize_bits;
2101 end = start + (range->len >> sb->s_blocksize_bits) - 1;
2102 minlen = range->minlen >> sb->s_blocksize_bits;
2104 if (unlikely(minlen > EXT3_BLOCKS_PER_GROUP(sb)) ||
2105 unlikely(start >= max_blks))
2107 if (end >= max_blks)
2109 if (end <= first_data_blk)
2111 if (start < first_data_blk)
2112 start = first_data_blk;
2116 /* Determine first and last group to examine based on start and len */
2117 ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) start,
2118 &first_group, &first_block);
2119 ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) end,
2120 &last_group, &last_block);
2122 /* end now represents the last block to discard in this group */
2123 end = EXT3_BLOCKS_PER_GROUP(sb) - 1;
2125 for (group = first_group; group <= last_group; group++) {
2126 gdp = ext3_get_group_desc(sb, group, NULL);
2131 * For all the groups except the last one, last block will
2132 * always be EXT3_BLOCKS_PER_GROUP(sb)-1, so we only need to
2133 * change it for the last group, note that last_block is
2134 * already computed earlier by ext3_get_group_no_and_offset()
2136 if (group == last_group)
2139 if (le16_to_cpu(gdp->bg_free_blocks_count) >= minlen) {
2140 ret = ext3_trim_all_free(sb, group, first_block,
2148 * For every group except the first one, we are sure
2149 * that the first block to discard will be block #0.
2158 range->len = trimmed * sb->s_blocksize;