1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2017 Christoph Hellwig.
7 #include "xfs_shared.h"
8 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trace.h"
17 * In-core extent record layout:
19 * +-------+----------------------------+
20 * | 00:53 | all 54 bits of startoff |
21 * | 54:63 | low 10 bits of startblock |
22 * +-------+----------------------------+
23 * | 00:20 | all 21 bits of length |
24 * | 21 | unwritten extent bit |
25 * | 22:63 | high 42 bits of startblock |
26 * +-------+----------------------------+
28 #define XFS_IEXT_STARTOFF_MASK xfs_mask64lo(BMBT_STARTOFF_BITLEN)
29 #define XFS_IEXT_LENGTH_MASK xfs_mask64lo(BMBT_BLOCKCOUNT_BITLEN)
30 #define XFS_IEXT_STARTBLOCK_MASK xfs_mask64lo(BMBT_STARTBLOCK_BITLEN)
38 * Given that the length can't be a zero, only an empty hi value indicates an
41 static bool xfs_iext_rec_is_empty(struct xfs_iext_rec *rec)
46 static inline void xfs_iext_rec_clear(struct xfs_iext_rec *rec)
54 struct xfs_iext_rec *rec,
55 struct xfs_bmbt_irec *irec)
57 ASSERT((irec->br_startoff & ~XFS_IEXT_STARTOFF_MASK) == 0);
58 ASSERT((irec->br_blockcount & ~XFS_IEXT_LENGTH_MASK) == 0);
59 ASSERT((irec->br_startblock & ~XFS_IEXT_STARTBLOCK_MASK) == 0);
61 rec->lo = irec->br_startoff & XFS_IEXT_STARTOFF_MASK;
62 rec->hi = irec->br_blockcount & XFS_IEXT_LENGTH_MASK;
64 rec->lo |= (irec->br_startblock << 54);
65 rec->hi |= ((irec->br_startblock & ~xfs_mask64lo(10)) << (22 - 10));
67 if (irec->br_state == XFS_EXT_UNWRITTEN)
73 struct xfs_bmbt_irec *irec,
74 struct xfs_iext_rec *rec)
76 irec->br_startoff = rec->lo & XFS_IEXT_STARTOFF_MASK;
77 irec->br_blockcount = rec->hi & XFS_IEXT_LENGTH_MASK;
79 irec->br_startblock = rec->lo >> 54;
80 irec->br_startblock |= (rec->hi & xfs_mask64hi(42)) >> (22 - 10);
82 if (rec->hi & (1 << 21))
83 irec->br_state = XFS_EXT_UNWRITTEN;
85 irec->br_state = XFS_EXT_NORM;
90 KEYS_PER_NODE = NODE_SIZE / (sizeof(uint64_t) + sizeof(void *)),
91 RECS_PER_LEAF = (NODE_SIZE - (2 * sizeof(struct xfs_iext_leaf *))) /
92 sizeof(struct xfs_iext_rec),
96 * In-core extent btree block layout:
98 * There are two types of blocks in the btree: leaf and inner (non-leaf) blocks.
100 * The leaf blocks are made up by %KEYS_PER_NODE extent records, which each
101 * contain the startoffset, blockcount, startblock and unwritten extent flag.
102 * See above for the exact format, followed by pointers to the previous and next
103 * leaf blocks (if there are any).
105 * The inner (non-leaf) blocks first contain KEYS_PER_NODE lookup keys, followed
106 * by an equal number of pointers to the btree blocks at the next lower level.
108 * +-------+-------+-------+-------+-------+----------+----------+
109 * Leaf: | rec 1 | rec 2 | rec 3 | rec 4 | rec N | prev-ptr | next-ptr |
110 * +-------+-------+-------+-------+-------+----------+----------+
112 * +-------+-------+-------+-------+-------+-------+------+-------+
113 * Inner: | key 1 | key 2 | key 3 | key N | ptr 1 | ptr 2 | ptr3 | ptr N |
114 * +-------+-------+-------+-------+-------+-------+------+-------+
116 struct xfs_iext_node {
117 uint64_t keys[KEYS_PER_NODE];
118 #define XFS_IEXT_KEY_INVALID (1ULL << 63)
119 void *ptrs[KEYS_PER_NODE];
122 struct xfs_iext_leaf {
123 struct xfs_iext_rec recs[RECS_PER_LEAF];
124 struct xfs_iext_leaf *prev;
125 struct xfs_iext_leaf *next;
128 inline xfs_extnum_t xfs_iext_count(struct xfs_ifork *ifp)
130 return ifp->if_bytes / sizeof(struct xfs_iext_rec);
133 static inline int xfs_iext_max_recs(struct xfs_ifork *ifp)
135 if (ifp->if_height == 1)
136 return xfs_iext_count(ifp);
137 return RECS_PER_LEAF;
140 static inline struct xfs_iext_rec *cur_rec(struct xfs_iext_cursor *cur)
142 return &cur->leaf->recs[cur->pos];
145 static inline bool xfs_iext_valid(struct xfs_ifork *ifp,
146 struct xfs_iext_cursor *cur)
150 if (cur->pos < 0 || cur->pos >= xfs_iext_max_recs(ifp))
152 if (xfs_iext_rec_is_empty(cur_rec(cur)))
158 xfs_iext_find_first_leaf(
159 struct xfs_ifork *ifp)
161 struct xfs_iext_node *node = ifp->if_u1.if_root;
167 for (height = ifp->if_height; height > 1; height--) {
168 node = node->ptrs[0];
176 xfs_iext_find_last_leaf(
177 struct xfs_ifork *ifp)
179 struct xfs_iext_node *node = ifp->if_u1.if_root;
185 for (height = ifp->if_height; height > 1; height--) {
186 for (i = 1; i < KEYS_PER_NODE; i++)
189 node = node->ptrs[i - 1];
198 struct xfs_ifork *ifp,
199 struct xfs_iext_cursor *cur)
202 cur->leaf = xfs_iext_find_first_leaf(ifp);
207 struct xfs_ifork *ifp,
208 struct xfs_iext_cursor *cur)
212 cur->leaf = xfs_iext_find_last_leaf(ifp);
218 for (i = 1; i < xfs_iext_max_recs(ifp); i++) {
219 if (xfs_iext_rec_is_empty(&cur->leaf->recs[i]))
227 struct xfs_ifork *ifp,
228 struct xfs_iext_cursor *cur)
231 ASSERT(cur->pos <= 0 || cur->pos >= RECS_PER_LEAF);
232 xfs_iext_first(ifp, cur);
236 ASSERT(cur->pos >= 0);
237 ASSERT(cur->pos < xfs_iext_max_recs(ifp));
240 if (ifp->if_height > 1 && !xfs_iext_valid(ifp, cur) &&
242 cur->leaf = cur->leaf->next;
249 struct xfs_ifork *ifp,
250 struct xfs_iext_cursor *cur)
253 ASSERT(cur->pos <= 0 || cur->pos >= RECS_PER_LEAF);
254 xfs_iext_last(ifp, cur);
258 ASSERT(cur->pos >= 0);
259 ASSERT(cur->pos <= RECS_PER_LEAF);
264 if (xfs_iext_valid(ifp, cur))
266 } while (cur->pos > 0);
268 if (ifp->if_height > 1 && cur->leaf->prev) {
269 cur->leaf = cur->leaf->prev;
270 cur->pos = RECS_PER_LEAF;
277 struct xfs_iext_node *node,
279 xfs_fileoff_t offset)
281 if (node->keys[n] > offset)
283 if (node->keys[n] < offset)
290 struct xfs_iext_rec *rec,
291 xfs_fileoff_t offset)
293 uint64_t rec_offset = rec->lo & XFS_IEXT_STARTOFF_MASK;
294 uint32_t rec_len = rec->hi & XFS_IEXT_LENGTH_MASK;
296 if (rec_offset > offset)
298 if (rec_offset + rec_len <= offset)
305 struct xfs_ifork *ifp,
306 xfs_fileoff_t offset,
309 struct xfs_iext_node *node = ifp->if_u1.if_root;
315 for (height = ifp->if_height; height > level; height--) {
316 for (i = 1; i < KEYS_PER_NODE; i++)
317 if (xfs_iext_key_cmp(node, i, offset) > 0)
320 node = node->ptrs[i - 1];
330 struct xfs_iext_node *node,
331 xfs_fileoff_t offset)
335 for (i = 1; i < KEYS_PER_NODE; i++) {
336 if (xfs_iext_key_cmp(node, i, offset) > 0)
344 xfs_iext_node_insert_pos(
345 struct xfs_iext_node *node,
346 xfs_fileoff_t offset)
350 for (i = 0; i < KEYS_PER_NODE; i++) {
351 if (xfs_iext_key_cmp(node, i, offset) > 0)
355 return KEYS_PER_NODE;
359 xfs_iext_node_nr_entries(
360 struct xfs_iext_node *node,
365 for (i = start; i < KEYS_PER_NODE; i++) {
366 if (node->keys[i] == XFS_IEXT_KEY_INVALID)
374 xfs_iext_leaf_nr_entries(
375 struct xfs_ifork *ifp,
376 struct xfs_iext_leaf *leaf,
381 for (i = start; i < xfs_iext_max_recs(ifp); i++) {
382 if (xfs_iext_rec_is_empty(&leaf->recs[i]))
389 static inline uint64_t
391 struct xfs_iext_leaf *leaf,
394 return leaf->recs[n].lo & XFS_IEXT_STARTOFF_MASK;
399 struct xfs_ifork *ifp)
401 struct xfs_iext_node *node = kmem_zalloc(NODE_SIZE, KM_NOFS);
404 if (ifp->if_height == 1) {
405 struct xfs_iext_leaf *prev = ifp->if_u1.if_root;
407 node->keys[0] = xfs_iext_leaf_key(prev, 0);
408 node->ptrs[0] = prev;
410 struct xfs_iext_node *prev = ifp->if_u1.if_root;
412 ASSERT(ifp->if_height > 1);
414 node->keys[0] = prev->keys[0];
415 node->ptrs[0] = prev;
418 for (i = 1; i < KEYS_PER_NODE; i++)
419 node->keys[i] = XFS_IEXT_KEY_INVALID;
421 ifp->if_u1.if_root = node;
426 xfs_iext_update_node(
427 struct xfs_ifork *ifp,
428 xfs_fileoff_t old_offset,
429 xfs_fileoff_t new_offset,
433 struct xfs_iext_node *node = ifp->if_u1.if_root;
436 for (height = ifp->if_height; height > level; height--) {
437 for (i = 0; i < KEYS_PER_NODE; i++) {
438 if (i > 0 && xfs_iext_key_cmp(node, i, old_offset) > 0)
440 if (node->keys[i] == old_offset)
441 node->keys[i] = new_offset;
443 node = node->ptrs[i - 1];
450 static struct xfs_iext_node *
452 struct xfs_iext_node **nodep,
456 struct xfs_iext_node *node = *nodep;
457 struct xfs_iext_node *new = kmem_zalloc(NODE_SIZE, KM_NOFS);
458 const int nr_move = KEYS_PER_NODE / 2;
459 int nr_keep = nr_move + (KEYS_PER_NODE & 1);
462 /* for sequential append operations just spill over into the new node */
463 if (*pos == KEYS_PER_NODE) {
471 for (i = 0; i < nr_move; i++) {
472 new->keys[i] = node->keys[nr_keep + i];
473 new->ptrs[i] = node->ptrs[nr_keep + i];
475 node->keys[nr_keep + i] = XFS_IEXT_KEY_INVALID;
476 node->ptrs[nr_keep + i] = NULL;
479 if (*pos >= nr_keep) {
482 *nr_entries = nr_move;
484 *nr_entries = nr_keep;
487 for (; i < KEYS_PER_NODE; i++)
488 new->keys[i] = XFS_IEXT_KEY_INVALID;
493 xfs_iext_insert_node(
494 struct xfs_ifork *ifp,
499 struct xfs_iext_node *node, *new;
500 int i, pos, nr_entries;
503 if (ifp->if_height < level)
507 node = xfs_iext_find_level(ifp, offset, level);
508 pos = xfs_iext_node_insert_pos(node, offset);
509 nr_entries = xfs_iext_node_nr_entries(node, pos);
511 ASSERT(pos >= nr_entries || xfs_iext_key_cmp(node, pos, offset) != 0);
512 ASSERT(nr_entries <= KEYS_PER_NODE);
514 if (nr_entries == KEYS_PER_NODE)
515 new = xfs_iext_split_node(&node, &pos, &nr_entries);
518 * Update the pointers in higher levels if the first entry changes
519 * in an existing node.
521 if (node != new && pos == 0 && nr_entries > 0)
522 xfs_iext_update_node(ifp, node->keys[0], offset, level, node);
524 for (i = nr_entries; i > pos; i--) {
525 node->keys[i] = node->keys[i - 1];
526 node->ptrs[i] = node->ptrs[i - 1];
528 node->keys[pos] = offset;
529 node->ptrs[pos] = ptr;
532 offset = new->keys[0];
539 static struct xfs_iext_leaf *
541 struct xfs_iext_cursor *cur,
544 struct xfs_iext_leaf *leaf = cur->leaf;
545 struct xfs_iext_leaf *new = kmem_zalloc(NODE_SIZE, KM_NOFS);
546 const int nr_move = RECS_PER_LEAF / 2;
547 int nr_keep = nr_move + (RECS_PER_LEAF & 1);
550 /* for sequential append operations just spill over into the new node */
551 if (cur->pos == RECS_PER_LEAF) {
558 for (i = 0; i < nr_move; i++) {
559 new->recs[i] = leaf->recs[nr_keep + i];
560 xfs_iext_rec_clear(&leaf->recs[nr_keep + i]);
563 if (cur->pos >= nr_keep) {
566 *nr_entries = nr_move;
568 *nr_entries = nr_keep;
572 leaf->next->prev = new;
573 new->next = leaf->next;
581 struct xfs_ifork *ifp,
582 struct xfs_iext_cursor *cur)
584 ASSERT(ifp->if_bytes == 0);
586 ifp->if_u1.if_root = kmem_zalloc(sizeof(struct xfs_iext_rec), KM_NOFS);
589 /* now that we have a node step into it */
590 cur->leaf = ifp->if_u1.if_root;
595 xfs_iext_realloc_root(
596 struct xfs_ifork *ifp,
597 struct xfs_iext_cursor *cur)
599 int64_t new_size = ifp->if_bytes + sizeof(struct xfs_iext_rec);
602 /* account for the prev/next pointers */
603 if (new_size / sizeof(struct xfs_iext_rec) == RECS_PER_LEAF)
604 new_size = NODE_SIZE;
606 new = krealloc(ifp->if_u1.if_root, new_size, GFP_NOFS | __GFP_NOFAIL);
607 memset(new + ifp->if_bytes, 0, new_size - ifp->if_bytes);
608 ifp->if_u1.if_root = new;
613 * Increment the sequence counter on extent tree changes. If we are on a COW
614 * fork, this allows the writeback code to skip looking for a COW extent if the
615 * COW fork hasn't changed. We use WRITE_ONCE here to ensure the update to the
616 * sequence counter is seen before the modifications to the extent tree itself
619 static inline void xfs_iext_inc_seq(struct xfs_ifork *ifp)
621 WRITE_ONCE(ifp->if_seq, READ_ONCE(ifp->if_seq) + 1);
626 struct xfs_inode *ip,
627 struct xfs_iext_cursor *cur,
628 struct xfs_bmbt_irec *irec,
631 struct xfs_ifork *ifp = xfs_iext_state_to_fork(ip, state);
632 xfs_fileoff_t offset = irec->br_startoff;
633 struct xfs_iext_leaf *new = NULL;
636 xfs_iext_inc_seq(ifp);
638 if (ifp->if_height == 0)
639 xfs_iext_alloc_root(ifp, cur);
640 else if (ifp->if_height == 1)
641 xfs_iext_realloc_root(ifp, cur);
643 nr_entries = xfs_iext_leaf_nr_entries(ifp, cur->leaf, cur->pos);
644 ASSERT(nr_entries <= RECS_PER_LEAF);
645 ASSERT(cur->pos >= nr_entries ||
646 xfs_iext_rec_cmp(cur_rec(cur), irec->br_startoff) != 0);
648 if (nr_entries == RECS_PER_LEAF)
649 new = xfs_iext_split_leaf(cur, &nr_entries);
652 * Update the pointers in higher levels if the first entry changes
653 * in an existing node.
655 if (cur->leaf != new && cur->pos == 0 && nr_entries > 0) {
656 xfs_iext_update_node(ifp, xfs_iext_leaf_key(cur->leaf, 0),
657 offset, 1, cur->leaf);
660 for (i = nr_entries; i > cur->pos; i--)
661 cur->leaf->recs[i] = cur->leaf->recs[i - 1];
662 xfs_iext_set(cur_rec(cur), irec);
663 ifp->if_bytes += sizeof(struct xfs_iext_rec);
665 trace_xfs_iext_insert(ip, cur, state, _RET_IP_);
668 xfs_iext_insert_node(ifp, xfs_iext_leaf_key(new, 0), new, 2);
671 static struct xfs_iext_node *
672 xfs_iext_rebalance_node(
673 struct xfs_iext_node *parent,
675 struct xfs_iext_node *node,
679 * If the neighbouring nodes are completely full, or have different
680 * parents, we might never be able to merge our node, and will only
681 * delete it once the number of entries hits zero.
687 struct xfs_iext_node *prev = parent->ptrs[*pos - 1];
688 int nr_prev = xfs_iext_node_nr_entries(prev, 0), i;
690 if (nr_prev + nr_entries <= KEYS_PER_NODE) {
691 for (i = 0; i < nr_entries; i++) {
692 prev->keys[nr_prev + i] = node->keys[i];
693 prev->ptrs[nr_prev + i] = node->ptrs[i];
699 if (*pos + 1 < xfs_iext_node_nr_entries(parent, *pos)) {
700 struct xfs_iext_node *next = parent->ptrs[*pos + 1];
701 int nr_next = xfs_iext_node_nr_entries(next, 0), i;
703 if (nr_entries + nr_next <= KEYS_PER_NODE) {
705 * Merge the next node into this node so that we don't
706 * have to do an additional update of the keys in the
709 for (i = 0; i < nr_next; i++) {
710 node->keys[nr_entries + i] = next->keys[i];
711 node->ptrs[nr_entries + i] = next->ptrs[i];
723 xfs_iext_remove_node(
724 struct xfs_ifork *ifp,
725 xfs_fileoff_t offset,
728 struct xfs_iext_node *node, *parent;
729 int level = 2, pos, nr_entries, i;
731 ASSERT(level <= ifp->if_height);
732 node = xfs_iext_find_level(ifp, offset, level);
733 pos = xfs_iext_node_pos(node, offset);
735 ASSERT(node->ptrs[pos]);
736 ASSERT(node->ptrs[pos] == victim);
739 nr_entries = xfs_iext_node_nr_entries(node, pos) - 1;
740 offset = node->keys[0];
741 for (i = pos; i < nr_entries; i++) {
742 node->keys[i] = node->keys[i + 1];
743 node->ptrs[i] = node->ptrs[i + 1];
745 node->keys[nr_entries] = XFS_IEXT_KEY_INVALID;
746 node->ptrs[nr_entries] = NULL;
748 if (pos == 0 && nr_entries > 0) {
749 xfs_iext_update_node(ifp, offset, node->keys[0], level, node);
750 offset = node->keys[0];
753 if (nr_entries >= KEYS_PER_NODE / 2)
756 if (level < ifp->if_height) {
758 * If we aren't at the root yet try to find a neighbour node to
759 * merge with (or delete the node if it is empty), and then
760 * recurse up to the next level.
763 parent = xfs_iext_find_level(ifp, offset, level);
764 pos = xfs_iext_node_pos(parent, offset);
766 ASSERT(pos != KEYS_PER_NODE);
767 ASSERT(parent->ptrs[pos] == node);
769 node = xfs_iext_rebalance_node(parent, &pos, node, nr_entries);
775 } else if (nr_entries == 1) {
777 * If we are at the root and only one entry is left we can just
778 * free this node and update the root pointer.
780 ASSERT(node == ifp->if_u1.if_root);
781 ifp->if_u1.if_root = node->ptrs[0];
788 xfs_iext_rebalance_leaf(
789 struct xfs_ifork *ifp,
790 struct xfs_iext_cursor *cur,
791 struct xfs_iext_leaf *leaf,
792 xfs_fileoff_t offset,
796 * If the neighbouring nodes are completely full we might never be able
797 * to merge our node, and will only delete it once the number of
804 int nr_prev = xfs_iext_leaf_nr_entries(ifp, leaf->prev, 0), i;
806 if (nr_prev + nr_entries <= RECS_PER_LEAF) {
807 for (i = 0; i < nr_entries; i++)
808 leaf->prev->recs[nr_prev + i] = leaf->recs[i];
810 if (cur->leaf == leaf) {
811 cur->leaf = leaf->prev;
819 int nr_next = xfs_iext_leaf_nr_entries(ifp, leaf->next, 0), i;
821 if (nr_entries + nr_next <= RECS_PER_LEAF) {
823 * Merge the next node into this node so that we don't
824 * have to do an additional update of the keys in the
827 for (i = 0; i < nr_next; i++) {
828 leaf->recs[nr_entries + i] =
832 if (cur->leaf == leaf->next) {
834 cur->pos += nr_entries;
837 offset = xfs_iext_leaf_key(leaf->next, 0);
846 leaf->prev->next = leaf->next;
848 leaf->next->prev = leaf->prev;
849 xfs_iext_remove_node(ifp, offset, leaf);
853 xfs_iext_free_last_leaf(
854 struct xfs_ifork *ifp)
857 kmem_free(ifp->if_u1.if_root);
858 ifp->if_u1.if_root = NULL;
863 struct xfs_inode *ip,
864 struct xfs_iext_cursor *cur,
867 struct xfs_ifork *ifp = xfs_iext_state_to_fork(ip, state);
868 struct xfs_iext_leaf *leaf = cur->leaf;
869 xfs_fileoff_t offset = xfs_iext_leaf_key(leaf, 0);
872 trace_xfs_iext_remove(ip, cur, state, _RET_IP_);
874 ASSERT(ifp->if_height > 0);
875 ASSERT(ifp->if_u1.if_root != NULL);
876 ASSERT(xfs_iext_valid(ifp, cur));
878 xfs_iext_inc_seq(ifp);
880 nr_entries = xfs_iext_leaf_nr_entries(ifp, leaf, cur->pos) - 1;
881 for (i = cur->pos; i < nr_entries; i++)
882 leaf->recs[i] = leaf->recs[i + 1];
883 xfs_iext_rec_clear(&leaf->recs[nr_entries]);
884 ifp->if_bytes -= sizeof(struct xfs_iext_rec);
886 if (cur->pos == 0 && nr_entries > 0) {
887 xfs_iext_update_node(ifp, offset, xfs_iext_leaf_key(leaf, 0), 1,
889 offset = xfs_iext_leaf_key(leaf, 0);
890 } else if (cur->pos == nr_entries) {
891 if (ifp->if_height > 1 && leaf->next)
892 cur->leaf = leaf->next;
898 if (nr_entries >= RECS_PER_LEAF / 2)
901 if (ifp->if_height > 1)
902 xfs_iext_rebalance_leaf(ifp, cur, leaf, offset, nr_entries);
903 else if (nr_entries == 0)
904 xfs_iext_free_last_leaf(ifp);
908 * Lookup the extent covering bno.
910 * If there is an extent covering bno return the extent index, and store the
911 * expanded extent structure in *gotp, and the extent cursor in *cur.
912 * If there is no extent covering bno, but there is an extent after it (e.g.
913 * it lies in a hole) return that extent in *gotp and its cursor in *cur
915 * If bno is beyond the last extent return false, and return an invalid
919 xfs_iext_lookup_extent(
920 struct xfs_inode *ip,
921 struct xfs_ifork *ifp,
922 xfs_fileoff_t offset,
923 struct xfs_iext_cursor *cur,
924 struct xfs_bmbt_irec *gotp)
926 XFS_STATS_INC(ip->i_mount, xs_look_exlist);
928 cur->leaf = xfs_iext_find_level(ifp, offset, 1);
934 for (cur->pos = 0; cur->pos < xfs_iext_max_recs(ifp); cur->pos++) {
935 struct xfs_iext_rec *rec = cur_rec(cur);
937 if (xfs_iext_rec_is_empty(rec))
939 if (xfs_iext_rec_cmp(rec, offset) >= 0)
943 /* Try looking in the next node for an entry > offset */
944 if (ifp->if_height == 1 || !cur->leaf->next)
946 cur->leaf = cur->leaf->next;
948 if (!xfs_iext_valid(ifp, cur))
951 xfs_iext_get(gotp, cur_rec(cur));
956 * Returns the last extent before end, and if this extent doesn't cover
957 * end, update end to the end of the extent.
960 xfs_iext_lookup_extent_before(
961 struct xfs_inode *ip,
962 struct xfs_ifork *ifp,
964 struct xfs_iext_cursor *cur,
965 struct xfs_bmbt_irec *gotp)
967 /* could be optimized to not even look up the next on a match.. */
968 if (xfs_iext_lookup_extent(ip, ifp, *end - 1, cur, gotp) &&
969 gotp->br_startoff <= *end - 1)
971 if (!xfs_iext_prev_extent(ifp, cur, gotp))
973 *end = gotp->br_startoff + gotp->br_blockcount;
978 xfs_iext_update_extent(
979 struct xfs_inode *ip,
981 struct xfs_iext_cursor *cur,
982 struct xfs_bmbt_irec *new)
984 struct xfs_ifork *ifp = xfs_iext_state_to_fork(ip, state);
986 xfs_iext_inc_seq(ifp);
989 struct xfs_bmbt_irec old;
991 xfs_iext_get(&old, cur_rec(cur));
992 if (new->br_startoff != old.br_startoff) {
993 xfs_iext_update_node(ifp, old.br_startoff,
994 new->br_startoff, 1, cur->leaf);
998 trace_xfs_bmap_pre_update(ip, cur, state, _RET_IP_);
999 xfs_iext_set(cur_rec(cur), new);
1000 trace_xfs_bmap_post_update(ip, cur, state, _RET_IP_);
1004 * Return true if the cursor points at an extent and return the extent structure
1005 * in gotp. Else return false.
1008 xfs_iext_get_extent(
1009 struct xfs_ifork *ifp,
1010 struct xfs_iext_cursor *cur,
1011 struct xfs_bmbt_irec *gotp)
1013 if (!xfs_iext_valid(ifp, cur))
1015 xfs_iext_get(gotp, cur_rec(cur));
1020 * This is a recursive function, because of that we need to be extremely
1021 * careful with stack usage.
1024 xfs_iext_destroy_node(
1025 struct xfs_iext_node *node,
1031 for (i = 0; i < KEYS_PER_NODE; i++) {
1032 if (node->keys[i] == XFS_IEXT_KEY_INVALID)
1034 xfs_iext_destroy_node(node->ptrs[i], level - 1);
1043 struct xfs_ifork *ifp)
1045 xfs_iext_destroy_node(ifp->if_u1.if_root, ifp->if_height);
1049 ifp->if_u1.if_root = NULL;