2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
12 ** directory_part_size
16 ** are_leaves_removable
20 ** is_left_neighbor_in_cache
24 ** can_node_be_removed
26 ** dc_check_balance_internal
27 ** dc_check_balance_leaf
37 #include <linux/time.h>
38 #include <linux/slab.h>
39 #include <linux/string.h>
41 #include <linux/buffer_head.h>
43 /* To make any changes in the tree we find a node, that contains item
44 to be changed/deleted or position in the node we insert a new item
45 to. We call this node S. To do balancing we need to decide what we
46 will shift to left/right neighbor, or to a new node, where new item
47 will be etc. To make this analysis simpler we build virtual
48 node. Virtual node is an array of items, that will replace items of
49 node S. (For instance if we are going to delete an item, virtual
50 node does not contain it). Virtual node keeps information about
51 item sizes and types, mergeability of first and last items, sizes
52 of all entries in directory item. We use this array of items when
53 calculating what we can shift to neighbors and how many nodes we
54 have to have if we do not any shiftings, if we shift to left/right
55 neighbor or to both. */
57 /* taking item number in virtual node, returns number of item, that it has in source buffer */
58 static inline int old_item_num(int new_num, int affected_item_num, int mode)
60 if (mode == M_PASTE || mode == M_CUT || new_num < affected_item_num)
63 if (mode == M_INSERT) {
66 "vs-8005: for INSERT mode and item number of inserted item");
71 RFALSE(mode != M_DELETE,
72 "vs-8010: old_item_num: mode must be M_DELETE (mode = \'%c\'",
78 static void create_virtual_node(struct tree_balance *tb, int h)
81 struct virtual_node *vn = tb->tb_vn;
83 struct buffer_head *Sh; /* this comes from tb->S[h] */
85 Sh = PATH_H_PBUFFER(tb->tb_path, h);
87 /* size of changed node */
89 MAX_CHILD_SIZE(Sh) - B_FREE_SPACE(Sh) + tb->insert_size[h];
91 /* for internal nodes array if virtual items is not created */
93 vn->vn_nr_item = (vn->vn_size - DC_SIZE) / (DC_SIZE + KEY_SIZE);
97 /* number of items in virtual node */
99 B_NR_ITEMS(Sh) + ((vn->vn_mode == M_INSERT) ? 1 : 0) -
100 ((vn->vn_mode == M_DELETE) ? 1 : 0);
102 /* first virtual item */
103 vn->vn_vi = (struct virtual_item *)(tb->tb_vn + 1);
104 memset(vn->vn_vi, 0, vn->vn_nr_item * sizeof(struct virtual_item));
105 vn->vn_free_ptr += vn->vn_nr_item * sizeof(struct virtual_item);
107 /* first item in the node */
108 ih = B_N_PITEM_HEAD(Sh, 0);
110 /* define the mergeability for 0-th item (if it is not being deleted) */
111 if (op_is_left_mergeable(&(ih->ih_key), Sh->b_size)
112 && (vn->vn_mode != M_DELETE || vn->vn_affected_item_num))
113 vn->vn_vi[0].vi_type |= VI_TYPE_LEFT_MERGEABLE;
115 /* go through all items those remain in the virtual node (except for the new (inserted) one) */
116 for (new_num = 0; new_num < vn->vn_nr_item; new_num++) {
118 struct virtual_item *vi = vn->vn_vi + new_num;
120 ((new_num != vn->vn_affected_item_num) ? 0 : 1);
122 if (is_affected && vn->vn_mode == M_INSERT)
125 /* get item number in source node */
126 j = old_item_num(new_num, vn->vn_affected_item_num,
129 vi->vi_item_len += ih_item_len(ih + j) + IH_SIZE;
131 vi->vi_item = B_I_PITEM(Sh, ih + j);
132 vi->vi_uarea = vn->vn_free_ptr;
134 // FIXME: there is no check, that item operation did not
135 // consume too much memory
137 op_create_vi(vn, vi, is_affected, tb->insert_size[0]);
138 if (tb->vn_buf + tb->vn_buf_size < vn->vn_free_ptr)
139 reiserfs_panic(tb->tb_sb, "vs-8030",
140 "virtual node space consumed");
143 /* this is not being changed */
146 if (vn->vn_mode == M_PASTE || vn->vn_mode == M_CUT) {
147 vn->vn_vi[new_num].vi_item_len += tb->insert_size[0];
148 vi->vi_new_data = vn->vn_data; // pointer to data which is going to be pasted
152 /* virtual inserted item is not defined yet */
153 if (vn->vn_mode == M_INSERT) {
154 struct virtual_item *vi = vn->vn_vi + vn->vn_affected_item_num;
156 RFALSE(vn->vn_ins_ih == NULL,
157 "vs-8040: item header of inserted item is not specified");
158 vi->vi_item_len = tb->insert_size[0];
159 vi->vi_ih = vn->vn_ins_ih;
160 vi->vi_item = vn->vn_data;
161 vi->vi_uarea = vn->vn_free_ptr;
163 op_create_vi(vn, vi, 0 /*not pasted or cut */ ,
167 /* set right merge flag we take right delimiting key and check whether it is a mergeable item */
169 struct reiserfs_key *key;
171 key = B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0]);
172 if (op_is_left_mergeable(key, Sh->b_size)
173 && (vn->vn_mode != M_DELETE
174 || vn->vn_affected_item_num != B_NR_ITEMS(Sh) - 1))
175 vn->vn_vi[vn->vn_nr_item - 1].vi_type |=
176 VI_TYPE_RIGHT_MERGEABLE;
178 #ifdef CONFIG_REISERFS_CHECK
179 if (op_is_left_mergeable(key, Sh->b_size) &&
180 !(vn->vn_mode != M_DELETE
181 || vn->vn_affected_item_num != B_NR_ITEMS(Sh) - 1)) {
182 /* we delete last item and it could be merged with right neighbor's first item */
185 && is_direntry_le_ih(B_N_PITEM_HEAD(Sh, 0))
186 && I_ENTRY_COUNT(B_N_PITEM_HEAD(Sh, 0)) == 1)) {
187 /* node contains more than 1 item, or item is not directory item, or this item contains more than 1 entry */
188 print_block(Sh, 0, -1, -1);
189 reiserfs_panic(tb->tb_sb, "vs-8045",
190 "rdkey %k, affected item==%d "
191 "(mode==%c) Must be %c",
192 key, vn->vn_affected_item_num,
193 vn->vn_mode, M_DELETE);
201 /* using virtual node check, how many items can be shifted to left
203 static void check_left(struct tree_balance *tb, int h, int cur_free)
206 struct virtual_node *vn = tb->tb_vn;
207 struct virtual_item *vi;
210 RFALSE(cur_free < 0, "vs-8050: cur_free (%d) < 0", cur_free);
214 tb->lnum[h] = cur_free / (DC_SIZE + KEY_SIZE);
220 if (!cur_free || !vn->vn_nr_item) {
221 /* no free space or nothing to move */
227 RFALSE(!PATH_H_PPARENT(tb->tb_path, 0),
228 "vs-8055: parent does not exist or invalid");
231 if ((unsigned int)cur_free >=
233 ((vi->vi_type & VI_TYPE_LEFT_MERGEABLE) ? IH_SIZE : 0))) {
234 /* all contents of S[0] fits into L[0] */
236 RFALSE(vn->vn_mode == M_INSERT || vn->vn_mode == M_PASTE,
237 "vs-8055: invalid mode or balance condition failed");
239 tb->lnum[0] = vn->vn_nr_item;
244 d_size = 0, ih_size = IH_SIZE;
246 /* first item may be merge with last item in left neighbor */
247 if (vi->vi_type & VI_TYPE_LEFT_MERGEABLE)
248 d_size = -((int)IH_SIZE), ih_size = 0;
251 for (i = 0; i < vn->vn_nr_item;
252 i++, ih_size = IH_SIZE, d_size = 0, vi++) {
253 d_size += vi->vi_item_len;
254 if (cur_free >= d_size) {
255 /* the item can be shifted entirely */
261 /* the item cannot be shifted entirely, try to split it */
262 /* check whether L[0] can hold ih and at least one byte of the item body */
263 if (cur_free <= ih_size) {
264 /* cannot shift even a part of the current item */
270 tb->lbytes = op_check_left(vi, cur_free, 0, 0);
271 if (tb->lbytes != -1)
272 /* count partially shifted item */
281 /* using virtual node check, how many items can be shifted to right
283 static void check_right(struct tree_balance *tb, int h, int cur_free)
286 struct virtual_node *vn = tb->tb_vn;
287 struct virtual_item *vi;
290 RFALSE(cur_free < 0, "vs-8070: cur_free < 0");
294 tb->rnum[h] = cur_free / (DC_SIZE + KEY_SIZE);
300 if (!cur_free || !vn->vn_nr_item) {
307 RFALSE(!PATH_H_PPARENT(tb->tb_path, 0),
308 "vs-8075: parent does not exist or invalid");
310 vi = vn->vn_vi + vn->vn_nr_item - 1;
311 if ((unsigned int)cur_free >=
313 ((vi->vi_type & VI_TYPE_RIGHT_MERGEABLE) ? IH_SIZE : 0))) {
314 /* all contents of S[0] fits into R[0] */
316 RFALSE(vn->vn_mode == M_INSERT || vn->vn_mode == M_PASTE,
317 "vs-8080: invalid mode or balance condition failed");
319 tb->rnum[h] = vn->vn_nr_item;
324 d_size = 0, ih_size = IH_SIZE;
326 /* last item may be merge with first item in right neighbor */
327 if (vi->vi_type & VI_TYPE_RIGHT_MERGEABLE)
328 d_size = -(int)IH_SIZE, ih_size = 0;
331 for (i = vn->vn_nr_item - 1; i >= 0;
332 i--, d_size = 0, ih_size = IH_SIZE, vi--) {
333 d_size += vi->vi_item_len;
334 if (cur_free >= d_size) {
335 /* the item can be shifted entirely */
341 /* check whether R[0] can hold ih and at least one byte of the item body */
342 if (cur_free <= ih_size) { /* cannot shift even a part of the current item */
347 /* R[0] can hold the header of the item and at least one byte of its body */
348 cur_free -= ih_size; /* cur_free is still > 0 */
350 tb->rbytes = op_check_right(vi, cur_free);
351 if (tb->rbytes != -1)
352 /* count partially shifted item */
362 * from - number of items, which are shifted to left neighbor entirely
363 * to - number of item, which are shifted to right neighbor entirely
364 * from_bytes - number of bytes of boundary item (or directory entries) which are shifted to left neighbor
365 * to_bytes - number of bytes of boundary item (or directory entries) which are shifted to right neighbor */
366 static int get_num_ver(int mode, struct tree_balance *tb, int h,
367 int from, int from_bytes,
368 int to, int to_bytes, short *snum012, int flow)
374 struct virtual_node *vn = tb->tb_vn;
375 // struct virtual_item * vi;
377 int total_node_size, max_node_size, current_item_size;
379 int start_item, /* position of item we start filling node from */
380 end_item, /* position of item we finish filling node by */
381 start_bytes, /* number of first bytes (entries for directory) of start_item-th item
382 we do not include into node that is being filled */
383 end_bytes; /* number of last bytes (entries for directory) of end_item-th item
384 we do node include into node that is being filled */
385 int split_item_positions[2]; /* these are positions in virtual item of
386 items, that are split between S[0] and
387 S1new and S1new and S2new */
389 split_item_positions[0] = -1;
390 split_item_positions[1] = -1;
392 /* We only create additional nodes if we are in insert or paste mode
393 or we are in replace mode at the internal level. If h is 0 and
394 the mode is M_REPLACE then in fix_nodes we change the mode to
395 paste or insert before we get here in the code. */
396 RFALSE(tb->insert_size[h] < 0 || (mode != M_INSERT && mode != M_PASTE),
397 "vs-8100: insert_size < 0 in overflow");
399 max_node_size = MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, h));
401 /* snum012 [0-2] - number of items, that lay
402 to S[0], first new node and second new node */
403 snum012[3] = -1; /* s1bytes */
404 snum012[4] = -1; /* s2bytes */
408 i = ((to - from) * (KEY_SIZE + DC_SIZE) + DC_SIZE);
409 if (i == max_node_size)
411 return (i / max_node_size + 1);
417 cur_free = max_node_size;
419 // start from 'from'-th item
421 // skip its first 'start_bytes' units
422 start_bytes = ((from_bytes != -1) ? from_bytes : 0);
424 // last included item is the 'end_item'-th one
425 end_item = vn->vn_nr_item - to - 1;
426 // do not count last 'end_bytes' units of 'end_item'-th item
427 end_bytes = (to_bytes != -1) ? to_bytes : 0;
429 /* go through all item beginning from the start_item-th item and ending by
430 the end_item-th item. Do not count first 'start_bytes' units of
431 'start_item'-th item and last 'end_bytes' of 'end_item'-th item */
433 for (i = start_item; i <= end_item; i++) {
434 struct virtual_item *vi = vn->vn_vi + i;
435 int skip_from_end = ((i == end_item) ? end_bytes : 0);
437 RFALSE(needed_nodes > 3, "vs-8105: too many nodes are needed");
439 /* get size of current item */
440 current_item_size = vi->vi_item_len;
442 /* do not take in calculation head part (from_bytes) of from-th item */
444 op_part_size(vi, 0 /*from start */ , start_bytes);
446 /* do not take in calculation tail part of last item */
448 op_part_size(vi, 1 /*from end */ , skip_from_end);
450 /* if item fits into current node entierly */
451 if (total_node_size + current_item_size <= max_node_size) {
452 snum012[needed_nodes - 1]++;
453 total_node_size += current_item_size;
458 if (current_item_size > max_node_size) {
459 /* virtual item length is longer, than max size of item in
460 a node. It is impossible for direct item */
461 RFALSE(is_direct_le_ih(vi->vi_ih),
463 "direct item length is %d. It can not be longer than %d",
464 current_item_size, max_node_size);
465 /* we will try to split it */
470 /* as we do not split items, take new node and continue */
476 // calculate number of item units which fit into node being
481 free_space = max_node_size - total_node_size - IH_SIZE;
483 op_check_left(vi, free_space, start_bytes,
486 /* nothing fits into current node, take new node and continue */
487 needed_nodes++, i--, total_node_size = 0;
492 /* something fits into the current node */
493 //if (snum012[3] != -1 || needed_nodes != 1)
494 // reiserfs_panic (tb->tb_sb, "vs-8115: get_num_ver: too many nodes required");
495 //snum012[needed_nodes - 1 + 3] = op_unit_num (vi) - start_bytes - units;
496 start_bytes += units;
497 snum012[needed_nodes - 1 + 3] = units;
499 if (needed_nodes > 2)
500 reiserfs_warning(tb->tb_sb, "vs-8111",
501 "split_item_position is out of range");
502 snum012[needed_nodes - 1]++;
503 split_item_positions[needed_nodes - 1] = i;
505 /* continue from the same item with start_bytes != -1 */
511 // sum012[4] (if it is not -1) contains number of units of which
512 // are to be in S1new, snum012[3] - to be in S0. They are supposed
513 // to be S1bytes and S2bytes correspondingly, so recalculate
514 if (snum012[4] > 0) {
516 int bytes_to_r, bytes_to_l;
519 split_item_num = split_item_positions[1];
521 ((from == split_item_num
522 && from_bytes != -1) ? from_bytes : 0);
524 ((end_item == split_item_num
525 && end_bytes != -1) ? end_bytes : 0);
527 ((split_item_positions[0] ==
528 split_item_positions[1]) ? snum012[3] : 0);
532 op_unit_num(&vn->vn_vi[split_item_num]) - snum012[4] -
533 bytes_to_r - bytes_to_l - bytes_to_S1new;
535 if (vn->vn_vi[split_item_num].vi_index != TYPE_DIRENTRY &&
536 vn->vn_vi[split_item_num].vi_index != TYPE_INDIRECT)
537 reiserfs_warning(tb->tb_sb, "vs-8115",
538 "not directory or indirect item");
541 /* now we know S2bytes, calculate S1bytes */
542 if (snum012[3] > 0) {
544 int bytes_to_r, bytes_to_l;
547 split_item_num = split_item_positions[0];
549 ((from == split_item_num
550 && from_bytes != -1) ? from_bytes : 0);
552 ((end_item == split_item_num
553 && end_bytes != -1) ? end_bytes : 0);
555 ((split_item_positions[0] == split_item_positions[1]
556 && snum012[4] != -1) ? snum012[4] : 0);
560 op_unit_num(&vn->vn_vi[split_item_num]) - snum012[3] -
561 bytes_to_r - bytes_to_l - bytes_to_S2new;
568 /* Set parameters for balancing.
569 * Performs write of results of analysis of balancing into structure tb,
570 * where it will later be used by the functions that actually do the balancing.
572 * tb tree_balance structure;
573 * h current level of the node;
574 * lnum number of items from S[h] that must be shifted to L[h];
575 * rnum number of items from S[h] that must be shifted to R[h];
576 * blk_num number of blocks that S[h] will be splitted into;
577 * s012 number of items that fall into splitted nodes.
578 * lbytes number of bytes which flow to the left neighbor from the item that is not
579 * not shifted entirely
580 * rbytes number of bytes which flow to the right neighbor from the item that is not
581 * not shifted entirely
582 * s1bytes number of bytes which flow to the first new node when S[0] splits (this number is contained in s012 array)
585 static void set_parameters(struct tree_balance *tb, int h, int lnum,
586 int rnum, int blk_num, short *s012, int lb, int rb)
591 tb->blknum[h] = blk_num;
593 if (h == 0) { /* only for leaf level */
596 tb->s1num = *s012++, tb->s2num = *s012++;
597 tb->s1bytes = *s012++;
603 PROC_INFO_ADD(tb->tb_sb, lnum[h], lnum);
604 PROC_INFO_ADD(tb->tb_sb, rnum[h], rnum);
606 PROC_INFO_ADD(tb->tb_sb, lbytes[h], lb);
607 PROC_INFO_ADD(tb->tb_sb, rbytes[h], rb);
610 /* check, does node disappear if we shift tb->lnum[0] items to left
611 neighbor and tb->rnum[0] to the right one. */
612 static int is_leaf_removable(struct tree_balance *tb)
614 struct virtual_node *vn = tb->tb_vn;
615 int to_left, to_right;
619 /* number of items, that will be shifted to left (right) neighbor
621 to_left = tb->lnum[0] - ((tb->lbytes != -1) ? 1 : 0);
622 to_right = tb->rnum[0] - ((tb->rbytes != -1) ? 1 : 0);
623 remain_items = vn->vn_nr_item;
625 /* how many items remain in S[0] after shiftings to neighbors */
626 remain_items -= (to_left + to_right);
628 if (remain_items < 1) {
629 /* all content of node can be shifted to neighbors */
630 set_parameters(tb, 0, to_left, vn->vn_nr_item - to_left, 0,
635 if (remain_items > 1 || tb->lbytes == -1 || tb->rbytes == -1)
636 /* S[0] is not removable */
639 /* check, whether we can divide 1 remaining item between neighbors */
641 /* get size of remaining item (in item units) */
642 size = op_unit_num(&(vn->vn_vi[to_left]));
644 if (tb->lbytes + tb->rbytes >= size) {
645 set_parameters(tb, 0, to_left + 1, to_right + 1, 0, NULL,
653 /* check whether L, S, R can be joined in one node */
654 static int are_leaves_removable(struct tree_balance *tb, int lfree, int rfree)
656 struct virtual_node *vn = tb->tb_vn;
658 struct buffer_head *S0;
660 S0 = PATH_H_PBUFFER(tb->tb_path, 0);
663 if (vn->vn_nr_item) {
664 if (vn->vn_vi[0].vi_type & VI_TYPE_LEFT_MERGEABLE)
667 if (vn->vn_vi[vn->vn_nr_item - 1].
668 vi_type & VI_TYPE_RIGHT_MERGEABLE)
671 /* there was only one item and it will be deleted */
672 struct item_head *ih;
674 RFALSE(B_NR_ITEMS(S0) != 1,
675 "vs-8125: item number must be 1: it is %d",
678 ih = B_N_PITEM_HEAD(S0, 0);
680 && !comp_short_le_keys(&(ih->ih_key),
681 B_N_PDELIM_KEY(tb->CFR[0],
683 if (is_direntry_le_ih(ih)) {
684 /* Directory must be in correct state here: that is
685 somewhere at the left side should exist first directory
686 item. But the item being deleted can not be that first
687 one because its right neighbor is item of the same
688 directory. (But first item always gets deleted in last
689 turn). So, neighbors of deleted item can be merged, so
690 we can save ih_size */
693 /* we might check that left neighbor exists and is of the
695 RFALSE(le_ih_k_offset(ih) == DOT_OFFSET,
696 "vs-8130: first directory item can not be removed until directory is not empty");
701 if (MAX_CHILD_SIZE(S0) + vn->vn_size <= rfree + lfree + ih_size) {
702 set_parameters(tb, 0, -1, -1, -1, NULL, -1, -1);
703 PROC_INFO_INC(tb->tb_sb, leaves_removable);
710 /* when we do not split item, lnum and rnum are numbers of entire items */
711 #define SET_PAR_SHIFT_LEFT \
716 to_l = (MAX_NR_KEY(Sh)+1 - lpar + vn->vn_nr_item + 1) / 2 -\
717 (MAX_NR_KEY(Sh) + 1 - lpar);\
719 set_parameters (tb, h, to_l, 0, lnver, NULL, -1, -1);\
723 if (lset==LEFT_SHIFT_FLOW)\
724 set_parameters (tb, h, lpar, 0, lnver, snum012+lset,\
727 set_parameters (tb, h, lpar - (tb->lbytes!=-1), 0, lnver, snum012+lset,\
731 #define SET_PAR_SHIFT_RIGHT \
736 to_r = (MAX_NR_KEY(Sh)+1 - rpar + vn->vn_nr_item + 1) / 2 - (MAX_NR_KEY(Sh) + 1 - rpar);\
738 set_parameters (tb, h, 0, to_r, rnver, NULL, -1, -1);\
742 if (rset==RIGHT_SHIFT_FLOW)\
743 set_parameters (tb, h, 0, rpar, rnver, snum012+rset,\
746 set_parameters (tb, h, 0, rpar - (tb->rbytes!=-1), rnver, snum012+rset,\
750 static void free_buffers_in_tb(struct tree_balance *tb)
754 pathrelse(tb->tb_path);
756 for (i = 0; i < MAX_HEIGHT; i++) {
773 /* Get new buffers for storing new nodes that are created while balancing.
774 * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked;
775 * CARRY_ON - schedule didn't occur while the function worked;
776 * NO_DISK_SPACE - no disk space.
778 /* The function is NOT SCHEDULE-SAFE! */
779 static int get_empty_nodes(struct tree_balance *tb, int h)
781 struct buffer_head *new_bh,
782 *Sh = PATH_H_PBUFFER(tb->tb_path, h);
783 b_blocknr_t *blocknr, blocknrs[MAX_AMOUNT_NEEDED] = { 0, };
784 int counter, number_of_freeblk, amount_needed, /* number of needed empty blocks */
786 struct super_block *sb = tb->tb_sb;
788 /* number_of_freeblk is the number of empty blocks which have been
789 acquired for use by the balancing algorithm minus the number of
790 empty blocks used in the previous levels of the analysis,
791 number_of_freeblk = tb->cur_blknum can be non-zero if a schedule occurs
792 after empty blocks are acquired, and the balancing analysis is
793 then restarted, amount_needed is the number needed by this level
794 (h) of the balancing analysis.
796 Note that for systems with many processes writing, it would be
797 more layout optimal to calculate the total number needed by all
798 levels and then to run reiserfs_new_blocks to get all of them at once. */
800 /* Initiate number_of_freeblk to the amount acquired prior to the restart of
801 the analysis or 0 if not restarted, then subtract the amount needed
802 by all of the levels of the tree below h. */
803 /* blknum includes S[h], so we subtract 1 in this calculation */
804 for (counter = 0, number_of_freeblk = tb->cur_blknum;
805 counter < h; counter++)
807 (tb->blknum[counter]) ? (tb->blknum[counter] -
810 /* Allocate missing empty blocks. */
811 /* if Sh == 0 then we are getting a new root */
812 amount_needed = (Sh) ? (tb->blknum[h] - 1) : 1;
813 /* Amount_needed = the amount that we need more than the amount that we have. */
814 if (amount_needed > number_of_freeblk)
815 amount_needed -= number_of_freeblk;
816 else /* If we have enough already then there is nothing to do. */
819 /* No need to check quota - is not allocated for blocks used for formatted nodes */
820 if (reiserfs_new_form_blocknrs(tb, blocknrs,
821 amount_needed) == NO_DISK_SPACE)
822 return NO_DISK_SPACE;
824 /* for each blocknumber we just got, get a buffer and stick it on FEB */
825 for (blocknr = blocknrs, counter = 0;
826 counter < amount_needed; blocknr++, counter++) {
829 "PAP-8135: reiserfs_new_blocknrs failed when got new blocks");
831 new_bh = sb_getblk(sb, *blocknr);
832 RFALSE(buffer_dirty(new_bh) ||
833 buffer_journaled(new_bh) ||
834 buffer_journal_dirty(new_bh),
835 "PAP-8140: journaled or dirty buffer %b for the new block",
838 /* Put empty buffers into the array. */
839 RFALSE(tb->FEB[tb->cur_blknum],
840 "PAP-8141: busy slot for new buffer");
842 set_buffer_journal_new(new_bh);
843 tb->FEB[tb->cur_blknum++] = new_bh;
846 if (retval == CARRY_ON && FILESYSTEM_CHANGED_TB(tb))
847 retval = REPEAT_SEARCH;
852 /* Get free space of the left neighbor, which is stored in the parent
853 * node of the left neighbor. */
854 static int get_lfree(struct tree_balance *tb, int h)
856 struct buffer_head *l, *f;
859 if ((f = PATH_H_PPARENT(tb->tb_path, h)) == NULL ||
860 (l = tb->FL[h]) == NULL)
864 order = PATH_H_B_ITEM_ORDER(tb->tb_path, h) - 1;
866 order = B_NR_ITEMS(l);
870 return (MAX_CHILD_SIZE(f) - dc_size(B_N_CHILD(f, order)));
873 /* Get free space of the right neighbor,
874 * which is stored in the parent node of the right neighbor.
876 static int get_rfree(struct tree_balance *tb, int h)
878 struct buffer_head *r, *f;
881 if ((f = PATH_H_PPARENT(tb->tb_path, h)) == NULL ||
882 (r = tb->FR[h]) == NULL)
886 order = PATH_H_B_ITEM_ORDER(tb->tb_path, h) + 1;
892 return (MAX_CHILD_SIZE(f) - dc_size(B_N_CHILD(f, order)));
896 /* Check whether left neighbor is in memory. */
897 static int is_left_neighbor_in_cache(struct tree_balance *tb, int h)
899 struct buffer_head *father, *left;
900 struct super_block *sb = tb->tb_sb;
901 b_blocknr_t left_neighbor_blocknr;
902 int left_neighbor_position;
904 /* Father of the left neighbor does not exist. */
908 /* Calculate father of the node to be balanced. */
909 father = PATH_H_PBUFFER(tb->tb_path, h + 1);
912 !B_IS_IN_TREE(father) ||
913 !B_IS_IN_TREE(tb->FL[h]) ||
914 !buffer_uptodate(father) ||
915 !buffer_uptodate(tb->FL[h]),
916 "vs-8165: F[h] (%b) or FL[h] (%b) is invalid",
919 /* Get position of the pointer to the left neighbor into the left father. */
920 left_neighbor_position = (father == tb->FL[h]) ?
921 tb->lkey[h] : B_NR_ITEMS(tb->FL[h]);
922 /* Get left neighbor block number. */
923 left_neighbor_blocknr =
924 B_N_CHILD_NUM(tb->FL[h], left_neighbor_position);
925 /* Look for the left neighbor in the cache. */
926 if ((left = sb_find_get_block(sb, left_neighbor_blocknr))) {
928 RFALSE(buffer_uptodate(left) && !B_IS_IN_TREE(left),
929 "vs-8170: left neighbor (%b %z) is not in the tree",
938 #define LEFT_PARENTS 'l'
939 #define RIGHT_PARENTS 'r'
941 static void decrement_key(struct cpu_key *key)
943 // call item specific function for this key
944 item_ops[cpu_key_k_type(key)]->decrement_key(key);
947 /* Calculate far left/right parent of the left/right neighbor of the current node, that
948 * is calculate the left/right (FL[h]/FR[h]) neighbor of the parent F[h].
949 * Calculate left/right common parent of the current node and L[h]/R[h].
950 * Calculate left/right delimiting key position.
951 * Returns: PATH_INCORRECT - path in the tree is not correct;
952 SCHEDULE_OCCURRED - schedule occurred while the function worked;
953 * CARRY_ON - schedule didn't occur while the function worked;
955 static int get_far_parent(struct tree_balance *tb,
957 struct buffer_head **pfather,
958 struct buffer_head **pcom_father, char c_lr_par)
960 struct buffer_head *parent;
961 INITIALIZE_PATH(s_path_to_neighbor_father);
962 struct treepath *path = tb->tb_path;
963 struct cpu_key s_lr_father_key;
966 first_last_position = 0,
967 path_offset = PATH_H_PATH_OFFSET(path, h);
969 /* Starting from F[h] go upwards in the tree, and look for the common
970 ancestor of F[h], and its neighbor l/r, that should be obtained. */
972 counter = path_offset;
974 RFALSE(counter < FIRST_PATH_ELEMENT_OFFSET,
975 "PAP-8180: invalid path length");
977 for (; counter > FIRST_PATH_ELEMENT_OFFSET; counter--) {
978 /* Check whether parent of the current buffer in the path is really parent in the tree. */
980 (parent = PATH_OFFSET_PBUFFER(path, counter - 1)))
981 return REPEAT_SEARCH;
982 /* Check whether position in the parent is correct. */
984 PATH_OFFSET_POSITION(path,
987 return REPEAT_SEARCH;
988 /* Check whether parent at the path really points to the child. */
989 if (B_N_CHILD_NUM(parent, position) !=
990 PATH_OFFSET_PBUFFER(path, counter)->b_blocknr)
991 return REPEAT_SEARCH;
992 /* Return delimiting key if position in the parent is not equal to first/last one. */
993 if (c_lr_par == RIGHT_PARENTS)
994 first_last_position = B_NR_ITEMS(parent);
995 if (position != first_last_position) {
996 *pcom_father = parent;
997 get_bh(*pcom_father);
998 /*(*pcom_father = parent)->b_count++; */
1003 /* if we are in the root of the tree, then there is no common father */
1004 if (counter == FIRST_PATH_ELEMENT_OFFSET) {
1005 /* Check whether first buffer in the path is the root of the tree. */
1006 if (PATH_OFFSET_PBUFFER
1008 FIRST_PATH_ELEMENT_OFFSET)->b_blocknr ==
1009 SB_ROOT_BLOCK(tb->tb_sb)) {
1010 *pfather = *pcom_father = NULL;
1013 return REPEAT_SEARCH;
1016 RFALSE(B_LEVEL(*pcom_father) <= DISK_LEAF_NODE_LEVEL,
1017 "PAP-8185: (%b %z) level too small",
1018 *pcom_father, *pcom_father);
1020 /* Check whether the common parent is locked. */
1022 if (buffer_locked(*pcom_father)) {
1024 /* Release the write lock while the buffer is busy */
1025 int depth = reiserfs_write_unlock_nested(tb->tb_sb);
1026 __wait_on_buffer(*pcom_father);
1027 reiserfs_write_lock_nested(tb->tb_sb, depth);
1028 if (FILESYSTEM_CHANGED_TB(tb)) {
1029 brelse(*pcom_father);
1030 return REPEAT_SEARCH;
1034 /* So, we got common parent of the current node and its left/right neighbor.
1035 Now we are geting the parent of the left/right neighbor. */
1037 /* Form key to get parent of the left/right neighbor. */
1038 le_key2cpu_key(&s_lr_father_key,
1039 B_N_PDELIM_KEY(*pcom_father,
1041 LEFT_PARENTS) ? (tb->lkey[h - 1] =
1047 if (c_lr_par == LEFT_PARENTS)
1048 decrement_key(&s_lr_father_key);
1051 (tb->tb_sb, &s_lr_father_key, &s_path_to_neighbor_father,
1056 if (FILESYSTEM_CHANGED_TB(tb)) {
1057 pathrelse(&s_path_to_neighbor_father);
1058 brelse(*pcom_father);
1059 return REPEAT_SEARCH;
1062 *pfather = PATH_PLAST_BUFFER(&s_path_to_neighbor_father);
1064 RFALSE(B_LEVEL(*pfather) != h + 1,
1065 "PAP-8190: (%b %z) level too small", *pfather, *pfather);
1066 RFALSE(s_path_to_neighbor_father.path_length <
1067 FIRST_PATH_ELEMENT_OFFSET, "PAP-8192: path length is too small");
1069 s_path_to_neighbor_father.path_length--;
1070 pathrelse(&s_path_to_neighbor_father);
1074 /* Get parents of neighbors of node in the path(S[path_offset]) and common parents of
1075 * S[path_offset] and L[path_offset]/R[path_offset]: F[path_offset], FL[path_offset],
1076 * FR[path_offset], CFL[path_offset], CFR[path_offset].
1077 * Calculate numbers of left and right delimiting keys position: lkey[path_offset], rkey[path_offset].
1078 * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked;
1079 * CARRY_ON - schedule didn't occur while the function worked;
1081 static int get_parents(struct tree_balance *tb, int h)
1083 struct treepath *path = tb->tb_path;
1086 path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h);
1087 struct buffer_head *curf, *curcf;
1089 /* Current node is the root of the tree or will be root of the tree */
1090 if (path_offset <= FIRST_PATH_ELEMENT_OFFSET) {
1091 /* The root can not have parents.
1092 Release nodes which previously were obtained as parents of the current node neighbors. */
1104 /* Get parent FL[path_offset] of L[path_offset]. */
1105 position = PATH_OFFSET_POSITION(path, path_offset - 1);
1107 /* Current node is not the first child of its parent. */
1108 curf = PATH_OFFSET_PBUFFER(path, path_offset - 1);
1109 curcf = PATH_OFFSET_PBUFFER(path, path_offset - 1);
1112 tb->lkey[h] = position - 1;
1114 /* Calculate current parent of L[path_offset], which is the left neighbor of the current node.
1115 Calculate current common parent of L[path_offset] and the current node. Note that
1116 CFL[path_offset] not equal FL[path_offset] and CFL[path_offset] not equal F[path_offset].
1117 Calculate lkey[path_offset]. */
1118 if ((ret = get_far_parent(tb, h + 1, &curf,
1120 LEFT_PARENTS)) != CARRY_ON)
1125 tb->FL[h] = curf; /* New initialization of FL[h]. */
1127 tb->CFL[h] = curcf; /* New initialization of CFL[h]. */
1129 RFALSE((curf && !B_IS_IN_TREE(curf)) ||
1130 (curcf && !B_IS_IN_TREE(curcf)),
1131 "PAP-8195: FL (%b) or CFL (%b) is invalid", curf, curcf);
1133 /* Get parent FR[h] of R[h]. */
1135 /* Current node is the last child of F[h]. FR[h] != F[h]. */
1136 if (position == B_NR_ITEMS(PATH_H_PBUFFER(path, h + 1))) {
1137 /* Calculate current parent of R[h], which is the right neighbor of F[h].
1138 Calculate current common parent of R[h] and current node. Note that CFR[h]
1139 not equal FR[path_offset] and CFR[h] not equal F[h]. */
1141 get_far_parent(tb, h + 1, &curf, &curcf,
1142 RIGHT_PARENTS)) != CARRY_ON)
1145 /* Current node is not the last child of its parent F[h]. */
1146 curf = PATH_OFFSET_PBUFFER(path, path_offset - 1);
1147 curcf = PATH_OFFSET_PBUFFER(path, path_offset - 1);
1150 tb->rkey[h] = position;
1154 /* New initialization of FR[path_offset]. */
1158 /* New initialization of CFR[path_offset]. */
1161 RFALSE((curf && !B_IS_IN_TREE(curf)) ||
1162 (curcf && !B_IS_IN_TREE(curcf)),
1163 "PAP-8205: FR (%b) or CFR (%b) is invalid", curf, curcf);
1168 /* it is possible to remove node as result of shiftings to
1169 neighbors even when we insert or paste item. */
1170 static inline int can_node_be_removed(int mode, int lfree, int sfree, int rfree,
1171 struct tree_balance *tb, int h)
1173 struct buffer_head *Sh = PATH_H_PBUFFER(tb->tb_path, h);
1174 int levbytes = tb->insert_size[h];
1175 struct item_head *ih;
1176 struct reiserfs_key *r_key = NULL;
1178 ih = B_N_PITEM_HEAD(Sh, 0);
1180 r_key = B_N_PDELIM_KEY(tb->CFR[h], tb->rkey[h]);
1182 if (lfree + rfree + sfree < MAX_CHILD_SIZE(Sh) + levbytes
1183 /* shifting may merge items which might save space */
1186 && op_is_left_mergeable(&(ih->ih_key), Sh->b_size)) ? IH_SIZE : 0)
1189 && op_is_left_mergeable(r_key, Sh->b_size)) ? IH_SIZE : 0)
1190 + ((h) ? KEY_SIZE : 0)) {
1191 /* node can not be removed */
1192 if (sfree >= levbytes) { /* new item fits into node S[h] without any shifting */
1196 ((mode == M_INSERT) ? 1 : 0);
1197 set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
1198 return NO_BALANCING_NEEDED;
1201 PROC_INFO_INC(tb->tb_sb, can_node_be_removed[h]);
1202 return !NO_BALANCING_NEEDED;
1205 /* Check whether current node S[h] is balanced when increasing its size by
1206 * Inserting or Pasting.
1207 * Calculate parameters for balancing for current level h.
1209 * tb tree_balance structure;
1210 * h current level of the node;
1211 * inum item number in S[h];
1212 * mode i - insert, p - paste;
1213 * Returns: 1 - schedule occurred;
1214 * 0 - balancing for higher levels needed;
1215 * -1 - no balancing for higher levels needed;
1216 * -2 - no disk space.
1218 /* ip means Inserting or Pasting */
1219 static int ip_check_balance(struct tree_balance *tb, int h)
1221 struct virtual_node *vn = tb->tb_vn;
1222 int levbytes, /* Number of bytes that must be inserted into (value
1223 is negative if bytes are deleted) buffer which
1224 contains node being balanced. The mnemonic is
1225 that the attempted change in node space used level
1226 is levbytes bytes. */
1229 int lfree, sfree, rfree /* free space in L, S and R */ ;
1231 /* nver is short for number of vertixes, and lnver is the number if
1232 we shift to the left, rnver is the number if we shift to the
1233 right, and lrnver is the number if we shift in both directions.
1234 The goal is to minimize first the number of vertixes, and second,
1235 the number of vertixes whose contents are changed by shifting,
1236 and third the number of uncached vertixes whose contents are
1237 changed by shifting and must be read from disk. */
1238 int nver, lnver, rnver, lrnver;
1240 /* used at leaf level only, S0 = S[0] is the node being balanced,
1241 sInum [ I = 0,1,2 ] is the number of items that will
1242 remain in node SI after balancing. S1 and S2 are new
1243 nodes that might be created. */
1245 /* we perform 8 calls to get_num_ver(). For each call we calculate five parameters.
1246 where 4th parameter is s1bytes and 5th - s2bytes
1248 short snum012[40] = { 0, }; /* s0num, s1num, s2num for 8 cases
1249 0,1 - do not shift and do not shift but bottle
1250 2 - shift only whole item to left
1251 3 - shift to left and bottle as much as possible
1252 4,5 - shift to right (whole items and as much as possible
1253 6,7 - shift to both directions (whole items and as much as possible)
1256 /* Sh is the node whose balance is currently being checked */
1257 struct buffer_head *Sh;
1259 Sh = PATH_H_PBUFFER(tb->tb_path, h);
1260 levbytes = tb->insert_size[h];
1262 /* Calculate balance parameters for creating new root. */
1265 reiserfs_panic(tb->tb_sb, "vs-8210",
1266 "S[0] can not be 0");
1267 switch (ret = get_empty_nodes(tb, h)) {
1269 set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
1270 return NO_BALANCING_NEEDED; /* no balancing for higher levels needed */
1276 reiserfs_panic(tb->tb_sb, "vs-8215", "incorrect "
1277 "return value of get_empty_nodes");
1281 if ((ret = get_parents(tb, h)) != CARRY_ON) /* get parents of S[h] neighbors. */
1284 sfree = B_FREE_SPACE(Sh);
1286 /* get free space of neighbors */
1287 rfree = get_rfree(tb, h);
1288 lfree = get_lfree(tb, h);
1290 if (can_node_be_removed(vn->vn_mode, lfree, sfree, rfree, tb, h) ==
1291 NO_BALANCING_NEEDED)
1292 /* and new item fits into node S[h] without any shifting */
1293 return NO_BALANCING_NEEDED;
1295 create_virtual_node(tb, h);
1298 determine maximal number of items we can shift to the left neighbor (in tb structure)
1299 and the maximal number of bytes that can flow to the left neighbor
1300 from the left most liquid item that cannot be shifted from S[0] entirely (returned value)
1302 check_left(tb, h, lfree);
1305 determine maximal number of items we can shift to the right neighbor (in tb structure)
1306 and the maximal number of bytes that can flow to the right neighbor
1307 from the right most liquid item that cannot be shifted from S[0] entirely (returned value)
1309 check_right(tb, h, rfree);
1311 /* all contents of internal node S[h] can be moved into its
1312 neighbors, S[h] will be removed after balancing */
1313 if (h && (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1)) {
1316 /* Since we are working on internal nodes, and our internal
1317 nodes have fixed size entries, then we can balance by the
1318 number of items rather than the space they consume. In this
1319 routine we set the left node equal to the right node,
1320 allowing a difference of less than or equal to 1 child
1323 ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - tb->rnum[h] +
1324 vn->vn_nr_item + 1) / 2 - (MAX_NR_KEY(Sh) + 1 -
1326 set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL,
1331 /* this checks balance condition, that any two neighboring nodes can not fit in one node */
1333 (tb->lnum[h] >= vn->vn_nr_item + 1 ||
1334 tb->rnum[h] >= vn->vn_nr_item + 1),
1335 "vs-8220: tree is not balanced on internal level");
1336 RFALSE(!h && ((tb->lnum[h] >= vn->vn_nr_item && (tb->lbytes == -1)) ||
1337 (tb->rnum[h] >= vn->vn_nr_item && (tb->rbytes == -1))),
1338 "vs-8225: tree is not balanced on leaf level");
1340 /* all contents of S[0] can be moved into its neighbors
1341 S[0] will be removed after balancing. */
1342 if (!h && is_leaf_removable(tb))
1345 /* why do we perform this check here rather than earlier??
1346 Answer: we can win 1 node in some cases above. Moreover we
1347 checked it above, when we checked, that S[0] is not removable
1349 if (sfree >= levbytes) { /* new item fits into node S[h] without any shifting */
1351 tb->s0num = vn->vn_nr_item;
1352 set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
1353 return NO_BALANCING_NEEDED;
1357 int lpar, rpar, nset, lset, rset, lrset;
1359 * regular overflowing of the node
1362 /* get_num_ver works in 2 modes (FLOW & NO_FLOW)
1363 lpar, rpar - number of items we can shift to left/right neighbor (including splitting item)
1364 nset, lset, rset, lrset - shows, whether flowing items give better packing
1367 #define NO_FLOW 0 /* do not any splitting */
1369 /* we choose one the following */
1370 #define NOTHING_SHIFT_NO_FLOW 0
1371 #define NOTHING_SHIFT_FLOW 5
1372 #define LEFT_SHIFT_NO_FLOW 10
1373 #define LEFT_SHIFT_FLOW 15
1374 #define RIGHT_SHIFT_NO_FLOW 20
1375 #define RIGHT_SHIFT_FLOW 25
1376 #define LR_SHIFT_NO_FLOW 30
1377 #define LR_SHIFT_FLOW 35
1382 /* calculate number of blocks S[h] must be split into when
1383 nothing is shifted to the neighbors,
1384 as well as number of items in each part of the split node (s012 numbers),
1385 and number of bytes (s1bytes) of the shared drop which flow to S1 if any */
1386 nset = NOTHING_SHIFT_NO_FLOW;
1387 nver = get_num_ver(vn->vn_mode, tb, h,
1388 0, -1, h ? vn->vn_nr_item : 0, -1,
1394 /* note, that in this case we try to bottle between S[0] and S1 (S1 - the first new node) */
1395 nver1 = get_num_ver(vn->vn_mode, tb, h,
1397 snum012 + NOTHING_SHIFT_FLOW, FLOW);
1399 nset = NOTHING_SHIFT_FLOW, nver = nver1;
1402 /* calculate number of blocks S[h] must be split into when
1403 l_shift_num first items and l_shift_bytes of the right most
1404 liquid item to be shifted are shifted to the left neighbor,
1405 as well as number of items in each part of the splitted node (s012 numbers),
1406 and number of bytes (s1bytes) of the shared drop which flow to S1 if any
1408 lset = LEFT_SHIFT_NO_FLOW;
1409 lnver = get_num_ver(vn->vn_mode, tb, h,
1410 lpar - ((h || tb->lbytes == -1) ? 0 : 1),
1411 -1, h ? vn->vn_nr_item : 0, -1,
1412 snum012 + LEFT_SHIFT_NO_FLOW, NO_FLOW);
1416 lnver1 = get_num_ver(vn->vn_mode, tb, h,
1418 ((tb->lbytes != -1) ? 1 : 0),
1420 snum012 + LEFT_SHIFT_FLOW, FLOW);
1422 lset = LEFT_SHIFT_FLOW, lnver = lnver1;
1425 /* calculate number of blocks S[h] must be split into when
1426 r_shift_num first items and r_shift_bytes of the left most
1427 liquid item to be shifted are shifted to the right neighbor,
1428 as well as number of items in each part of the splitted node (s012 numbers),
1429 and number of bytes (s1bytes) of the shared drop which flow to S1 if any
1431 rset = RIGHT_SHIFT_NO_FLOW;
1432 rnver = get_num_ver(vn->vn_mode, tb, h,
1434 h ? (vn->vn_nr_item - rpar) : (rpar -
1439 snum012 + RIGHT_SHIFT_NO_FLOW, NO_FLOW);
1443 rnver1 = get_num_ver(vn->vn_mode, tb, h,
1446 ((tb->rbytes != -1) ? 1 : 0)),
1448 snum012 + RIGHT_SHIFT_FLOW, FLOW);
1451 rset = RIGHT_SHIFT_FLOW, rnver = rnver1;
1454 /* calculate number of blocks S[h] must be split into when
1455 items are shifted in both directions,
1456 as well as number of items in each part of the splitted node (s012 numbers),
1457 and number of bytes (s1bytes) of the shared drop which flow to S1 if any
1459 lrset = LR_SHIFT_NO_FLOW;
1460 lrnver = get_num_ver(vn->vn_mode, tb, h,
1461 lpar - ((h || tb->lbytes == -1) ? 0 : 1),
1463 h ? (vn->vn_nr_item - rpar) : (rpar -
1468 snum012 + LR_SHIFT_NO_FLOW, NO_FLOW);
1472 lrnver1 = get_num_ver(vn->vn_mode, tb, h,
1474 ((tb->lbytes != -1) ? 1 : 0),
1477 ((tb->rbytes != -1) ? 1 : 0)),
1479 snum012 + LR_SHIFT_FLOW, FLOW);
1480 if (lrnver > lrnver1)
1481 lrset = LR_SHIFT_FLOW, lrnver = lrnver1;
1484 /* Our general shifting strategy is:
1485 1) to minimized number of new nodes;
1486 2) to minimized number of neighbors involved in shifting;
1487 3) to minimized number of disk reads; */
1489 /* we can win TWO or ONE nodes by shifting in both directions */
1490 if (lrnver < lnver && lrnver < rnver) {
1492 (tb->lnum[h] != 1 ||
1494 lrnver != 1 || rnver != 2 || lnver != 2
1495 || h != 1), "vs-8230: bad h");
1496 if (lrset == LR_SHIFT_FLOW)
1497 set_parameters(tb, h, tb->lnum[h], tb->rnum[h],
1498 lrnver, snum012 + lrset,
1499 tb->lbytes, tb->rbytes);
1501 set_parameters(tb, h,
1503 ((tb->lbytes == -1) ? 0 : 1),
1505 ((tb->rbytes == -1) ? 0 : 1),
1506 lrnver, snum012 + lrset, -1, -1);
1511 /* if shifting doesn't lead to better packing then don't shift */
1512 if (nver == lrnver) {
1513 set_parameters(tb, h, 0, 0, nver, snum012 + nset, -1,
1518 /* now we know that for better packing shifting in only one
1519 direction either to the left or to the right is required */
1521 /* if shifting to the left is better than shifting to the right */
1522 if (lnver < rnver) {
1527 /* if shifting to the right is better than shifting to the left */
1528 if (lnver > rnver) {
1529 SET_PAR_SHIFT_RIGHT;
1533 /* now shifting in either direction gives the same number
1534 of nodes and we can make use of the cached neighbors */
1535 if (is_left_neighbor_in_cache(tb, h)) {
1540 /* shift to the right independently on whether the right neighbor in cache or not */
1541 SET_PAR_SHIFT_RIGHT;
1546 /* Check whether current node S[h] is balanced when Decreasing its size by
1547 * Deleting or Cutting for INTERNAL node of S+tree.
1548 * Calculate parameters for balancing for current level h.
1550 * tb tree_balance structure;
1551 * h current level of the node;
1552 * inum item number in S[h];
1553 * mode i - insert, p - paste;
1554 * Returns: 1 - schedule occurred;
1555 * 0 - balancing for higher levels needed;
1556 * -1 - no balancing for higher levels needed;
1557 * -2 - no disk space.
1559 * Note: Items of internal nodes have fixed size, so the balance condition for
1560 * the internal part of S+tree is as for the B-trees.
1562 static int dc_check_balance_internal(struct tree_balance *tb, int h)
1564 struct virtual_node *vn = tb->tb_vn;
1566 /* Sh is the node whose balance is currently being checked,
1567 and Fh is its father. */
1568 struct buffer_head *Sh, *Fh;
1570 int lfree, rfree /* free space in L and R */ ;
1572 Sh = PATH_H_PBUFFER(tb->tb_path, h);
1573 Fh = PATH_H_PPARENT(tb->tb_path, h);
1575 maxsize = MAX_CHILD_SIZE(Sh);
1577 /* using tb->insert_size[h], which is negative in this case, create_virtual_node calculates: */
1578 /* new_nr_item = number of items node would have if operation is */
1579 /* performed without balancing (new_nr_item); */
1580 create_virtual_node(tb, h);
1582 if (!Fh) { /* S[h] is the root. */
1583 if (vn->vn_nr_item > 0) {
1584 set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
1585 return NO_BALANCING_NEEDED; /* no balancing for higher levels needed */
1587 /* new_nr_item == 0.
1588 * Current root will be deleted resulting in
1589 * decrementing the tree height. */
1590 set_parameters(tb, h, 0, 0, 0, NULL, -1, -1);
1594 if ((ret = get_parents(tb, h)) != CARRY_ON)
1597 /* get free space of neighbors */
1598 rfree = get_rfree(tb, h);
1599 lfree = get_lfree(tb, h);
1601 /* determine maximal number of items we can fit into neighbors */
1602 check_left(tb, h, lfree);
1603 check_right(tb, h, rfree);
1605 if (vn->vn_nr_item >= MIN_NR_KEY(Sh)) { /* Balance condition for the internal node is valid.
1606 * In this case we balance only if it leads to better packing. */
1607 if (vn->vn_nr_item == MIN_NR_KEY(Sh)) { /* Here we join S[h] with one of its neighbors,
1608 * which is impossible with greater values of new_nr_item. */
1609 if (tb->lnum[h] >= vn->vn_nr_item + 1) {
1610 /* All contents of S[h] can be moved to L[h]. */
1616 PATH_H_B_ITEM_ORDER(tb->tb_path,
1618 0) ? B_NR_ITEMS(tb->FL[h]) : n - 1;
1619 n = dc_size(B_N_CHILD(tb->FL[h], order_L)) /
1620 (DC_SIZE + KEY_SIZE);
1621 set_parameters(tb, h, -n - 1, 0, 0, NULL, -1,
1626 if (tb->rnum[h] >= vn->vn_nr_item + 1) {
1627 /* All contents of S[h] can be moved to R[h]. */
1633 PATH_H_B_ITEM_ORDER(tb->tb_path,
1635 B_NR_ITEMS(Fh)) ? 0 : n + 1;
1636 n = dc_size(B_N_CHILD(tb->FR[h], order_R)) /
1637 (DC_SIZE + KEY_SIZE);
1638 set_parameters(tb, h, 0, -n - 1, 0, NULL, -1,
1644 if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1) {
1645 /* All contents of S[h] can be moved to the neighbors (L[h] & R[h]). */
1649 ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] -
1650 tb->rnum[h] + vn->vn_nr_item + 1) / 2 -
1651 (MAX_NR_KEY(Sh) + 1 - tb->rnum[h]);
1652 set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r,
1657 /* Balancing does not lead to better packing. */
1658 set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
1659 return NO_BALANCING_NEEDED;
1662 /* Current node contain insufficient number of items. Balancing is required. */
1663 /* Check whether we can merge S[h] with left neighbor. */
1664 if (tb->lnum[h] >= vn->vn_nr_item + 1)
1665 if (is_left_neighbor_in_cache(tb, h)
1666 || tb->rnum[h] < vn->vn_nr_item + 1 || !tb->FR[h]) {
1672 PATH_H_B_ITEM_ORDER(tb->tb_path,
1674 0) ? B_NR_ITEMS(tb->FL[h]) : n - 1;
1675 n = dc_size(B_N_CHILD(tb->FL[h], order_L)) / (DC_SIZE +
1677 set_parameters(tb, h, -n - 1, 0, 0, NULL, -1, -1);
1681 /* Check whether we can merge S[h] with right neighbor. */
1682 if (tb->rnum[h] >= vn->vn_nr_item + 1) {
1688 PATH_H_B_ITEM_ORDER(tb->tb_path,
1689 h)) == B_NR_ITEMS(Fh)) ? 0 : (n + 1);
1690 n = dc_size(B_N_CHILD(tb->FR[h], order_R)) / (DC_SIZE +
1692 set_parameters(tb, h, 0, -n - 1, 0, NULL, -1, -1);
1696 /* All contents of S[h] can be moved to the neighbors (L[h] & R[h]). */
1697 if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1) {
1701 ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - tb->rnum[h] +
1702 vn->vn_nr_item + 1) / 2 - (MAX_NR_KEY(Sh) + 1 -
1704 set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL,
1709 /* For internal nodes try to borrow item from a neighbor */
1710 RFALSE(!tb->FL[h] && !tb->FR[h], "vs-8235: trying to borrow for root");
1712 /* Borrow one or two items from caching neighbor */
1713 if (is_left_neighbor_in_cache(tb, h) || !tb->FR[h]) {
1717 (MAX_NR_KEY(Sh) + 1 - tb->lnum[h] + vn->vn_nr_item +
1718 1) / 2 - (vn->vn_nr_item + 1);
1719 set_parameters(tb, h, -from_l, 0, 1, NULL, -1, -1);
1723 set_parameters(tb, h, 0,
1724 -((MAX_NR_KEY(Sh) + 1 - tb->rnum[h] + vn->vn_nr_item +
1725 1) / 2 - (vn->vn_nr_item + 1)), 1, NULL, -1, -1);
1729 /* Check whether current node S[h] is balanced when Decreasing its size by
1730 * Deleting or Truncating for LEAF node of S+tree.
1731 * Calculate parameters for balancing for current level h.
1733 * tb tree_balance structure;
1734 * h current level of the node;
1735 * inum item number in S[h];
1736 * mode i - insert, p - paste;
1737 * Returns: 1 - schedule occurred;
1738 * 0 - balancing for higher levels needed;
1739 * -1 - no balancing for higher levels needed;
1740 * -2 - no disk space.
1742 static int dc_check_balance_leaf(struct tree_balance *tb, int h)
1744 struct virtual_node *vn = tb->tb_vn;
1746 /* Number of bytes that must be deleted from
1747 (value is negative if bytes are deleted) buffer which
1748 contains node being balanced. The mnemonic is that the
1749 attempted change in node space used level is levbytes bytes. */
1751 /* the maximal item size */
1753 /* S0 is the node whose balance is currently being checked,
1754 and F0 is its father. */
1755 struct buffer_head *S0, *F0;
1756 int lfree, rfree /* free space in L and R */ ;
1758 S0 = PATH_H_PBUFFER(tb->tb_path, 0);
1759 F0 = PATH_H_PPARENT(tb->tb_path, 0);
1761 levbytes = tb->insert_size[h];
1763 maxsize = MAX_CHILD_SIZE(S0); /* maximal possible size of an item */
1765 if (!F0) { /* S[0] is the root now. */
1767 RFALSE(-levbytes >= maxsize - B_FREE_SPACE(S0),
1768 "vs-8240: attempt to create empty buffer tree");
1770 set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
1771 return NO_BALANCING_NEEDED;
1774 if ((ret = get_parents(tb, h)) != CARRY_ON)
1777 /* get free space of neighbors */
1778 rfree = get_rfree(tb, h);
1779 lfree = get_lfree(tb, h);
1781 create_virtual_node(tb, h);
1783 /* if 3 leaves can be merge to one, set parameters and return */
1784 if (are_leaves_removable(tb, lfree, rfree))
1787 /* determine maximal number of items we can shift to the left/right neighbor
1788 and the maximal number of bytes that can flow to the left/right neighbor
1789 from the left/right most liquid item that cannot be shifted from S[0] entirely
1791 check_left(tb, h, lfree);
1792 check_right(tb, h, rfree);
1794 /* check whether we can merge S with left neighbor. */
1795 if (tb->lnum[0] >= vn->vn_nr_item && tb->lbytes == -1)
1796 if (is_left_neighbor_in_cache(tb, h) || ((tb->rnum[0] - ((tb->rbytes == -1) ? 0 : 1)) < vn->vn_nr_item) || /* S can not be merged with R */
1800 "vs-8245: dc_check_balance_leaf: FL[h] must exist");
1802 /* set parameter to merge S[0] with its left neighbor */
1803 set_parameters(tb, h, -1, 0, 0, NULL, -1, -1);
1807 /* check whether we can merge S[0] with right neighbor. */
1808 if (tb->rnum[0] >= vn->vn_nr_item && tb->rbytes == -1) {
1809 set_parameters(tb, h, 0, -1, 0, NULL, -1, -1);
1813 /* All contents of S[0] can be moved to the neighbors (L[0] & R[0]). Set parameters and return */
1814 if (is_leaf_removable(tb))
1817 /* Balancing is not required. */
1818 tb->s0num = vn->vn_nr_item;
1819 set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
1820 return NO_BALANCING_NEEDED;
1823 /* Check whether current node S[h] is balanced when Decreasing its size by
1824 * Deleting or Cutting.
1825 * Calculate parameters for balancing for current level h.
1827 * tb tree_balance structure;
1828 * h current level of the node;
1829 * inum item number in S[h];
1830 * mode d - delete, c - cut.
1831 * Returns: 1 - schedule occurred;
1832 * 0 - balancing for higher levels needed;
1833 * -1 - no balancing for higher levels needed;
1834 * -2 - no disk space.
1836 static int dc_check_balance(struct tree_balance *tb, int h)
1838 RFALSE(!(PATH_H_PBUFFER(tb->tb_path, h)),
1839 "vs-8250: S is not initialized");
1842 return dc_check_balance_internal(tb, h);
1844 return dc_check_balance_leaf(tb, h);
1847 /* Check whether current node S[h] is balanced.
1848 * Calculate parameters for balancing for current level h.
1851 * tb tree_balance structure:
1853 * tb is a large structure that must be read about in the header file
1854 * at the same time as this procedure if the reader is to successfully
1855 * understand this procedure
1857 * h current level of the node;
1858 * inum item number in S[h];
1859 * mode i - insert, p - paste, d - delete, c - cut.
1860 * Returns: 1 - schedule occurred;
1861 * 0 - balancing for higher levels needed;
1862 * -1 - no balancing for higher levels needed;
1863 * -2 - no disk space.
1865 static int check_balance(int mode,
1866 struct tree_balance *tb,
1870 struct item_head *ins_ih, const void *data)
1872 struct virtual_node *vn;
1874 vn = tb->tb_vn = (struct virtual_node *)(tb->vn_buf);
1875 vn->vn_free_ptr = (char *)(tb->tb_vn + 1);
1877 vn->vn_affected_item_num = inum;
1878 vn->vn_pos_in_item = pos_in_item;
1879 vn->vn_ins_ih = ins_ih;
1882 RFALSE(mode == M_INSERT && !vn->vn_ins_ih,
1883 "vs-8255: ins_ih can not be 0 in insert mode");
1885 if (tb->insert_size[h] > 0)
1886 /* Calculate balance parameters when size of node is increasing. */
1887 return ip_check_balance(tb, h);
1889 /* Calculate balance parameters when size of node is decreasing. */
1890 return dc_check_balance(tb, h);
1893 /* Check whether parent at the path is the really parent of the current node.*/
1894 static int get_direct_parent(struct tree_balance *tb, int h)
1896 struct buffer_head *bh;
1897 struct treepath *path = tb->tb_path;
1899 path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h);
1901 /* We are in the root or in the new root. */
1902 if (path_offset <= FIRST_PATH_ELEMENT_OFFSET) {
1904 RFALSE(path_offset < FIRST_PATH_ELEMENT_OFFSET - 1,
1905 "PAP-8260: invalid offset in the path");
1907 if (PATH_OFFSET_PBUFFER(path, FIRST_PATH_ELEMENT_OFFSET)->
1908 b_blocknr == SB_ROOT_BLOCK(tb->tb_sb)) {
1909 /* Root is not changed. */
1910 PATH_OFFSET_PBUFFER(path, path_offset - 1) = NULL;
1911 PATH_OFFSET_POSITION(path, path_offset - 1) = 0;
1914 return REPEAT_SEARCH; /* Root is changed and we must recalculate the path. */
1918 (bh = PATH_OFFSET_PBUFFER(path, path_offset - 1)))
1919 return REPEAT_SEARCH; /* Parent in the path is not in the tree. */
1922 PATH_OFFSET_POSITION(path,
1923 path_offset - 1)) > B_NR_ITEMS(bh))
1924 return REPEAT_SEARCH;
1926 if (B_N_CHILD_NUM(bh, position) !=
1927 PATH_OFFSET_PBUFFER(path, path_offset)->b_blocknr)
1928 /* Parent in the path is not parent of the current node in the tree. */
1929 return REPEAT_SEARCH;
1931 if (buffer_locked(bh)) {
1932 int depth = reiserfs_write_unlock_nested(tb->tb_sb);
1933 __wait_on_buffer(bh);
1934 reiserfs_write_lock_nested(tb->tb_sb, depth);
1935 if (FILESYSTEM_CHANGED_TB(tb))
1936 return REPEAT_SEARCH;
1939 return CARRY_ON; /* Parent in the path is unlocked and really parent of the current node. */
1942 /* Using lnum[h] and rnum[h] we should determine what neighbors
1944 * need in order to balance S[h], and get them if necessary.
1945 * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked;
1946 * CARRY_ON - schedule didn't occur while the function worked;
1948 static int get_neighbors(struct tree_balance *tb, int h)
1951 path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h + 1);
1952 unsigned long son_number;
1953 struct super_block *sb = tb->tb_sb;
1954 struct buffer_head *bh;
1957 PROC_INFO_INC(sb, get_neighbors[h]);
1960 /* We need left neighbor to balance S[h]. */
1961 PROC_INFO_INC(sb, need_l_neighbor[h]);
1962 bh = PATH_OFFSET_PBUFFER(tb->tb_path, path_offset);
1964 RFALSE(bh == tb->FL[h] &&
1965 !PATH_OFFSET_POSITION(tb->tb_path, path_offset),
1966 "PAP-8270: invalid position in the parent");
1970 tb->FL[h]) ? tb->lkey[h] : B_NR_ITEMS(tb->
1972 son_number = B_N_CHILD_NUM(tb->FL[h], child_position);
1973 depth = reiserfs_write_unlock_nested(tb->tb_sb);
1974 bh = sb_bread(sb, son_number);
1975 reiserfs_write_lock_nested(tb->tb_sb, depth);
1978 if (FILESYSTEM_CHANGED_TB(tb)) {
1980 PROC_INFO_INC(sb, get_neighbors_restart[h]);
1981 return REPEAT_SEARCH;
1984 RFALSE(!B_IS_IN_TREE(tb->FL[h]) ||
1985 child_position > B_NR_ITEMS(tb->FL[h]) ||
1986 B_N_CHILD_NUM(tb->FL[h], child_position) !=
1987 bh->b_blocknr, "PAP-8275: invalid parent");
1988 RFALSE(!B_IS_IN_TREE(bh), "PAP-8280: invalid child");
1991 MAX_CHILD_SIZE(bh) -
1992 dc_size(B_N_CHILD(tb->FL[0], child_position)),
1993 "PAP-8290: invalid child size of left neighbor");
1999 /* We need right neighbor to balance S[path_offset]. */
2000 if (tb->rnum[h]) { /* We need right neighbor to balance S[path_offset]. */
2001 PROC_INFO_INC(sb, need_r_neighbor[h]);
2002 bh = PATH_OFFSET_PBUFFER(tb->tb_path, path_offset);
2004 RFALSE(bh == tb->FR[h] &&
2005 PATH_OFFSET_POSITION(tb->tb_path,
2008 "PAP-8295: invalid position in the parent");
2011 (bh == tb->FR[h]) ? tb->rkey[h] + 1 : 0;
2012 son_number = B_N_CHILD_NUM(tb->FR[h], child_position);
2013 depth = reiserfs_write_unlock_nested(tb->tb_sb);
2014 bh = sb_bread(sb, son_number);
2015 reiserfs_write_lock_nested(tb->tb_sb, depth);
2018 if (FILESYSTEM_CHANGED_TB(tb)) {
2020 PROC_INFO_INC(sb, get_neighbors_restart[h]);
2021 return REPEAT_SEARCH;
2027 && B_FREE_SPACE(bh) !=
2028 MAX_CHILD_SIZE(bh) -
2029 dc_size(B_N_CHILD(tb->FR[0], child_position)),
2030 "PAP-8300: invalid child size of right neighbor (%d != %d - %d)",
2031 B_FREE_SPACE(bh), MAX_CHILD_SIZE(bh),
2032 dc_size(B_N_CHILD(tb->FR[0], child_position)));
2038 static int get_virtual_node_size(struct super_block *sb, struct buffer_head *bh)
2040 int max_num_of_items;
2041 int max_num_of_entries;
2042 unsigned long blocksize = sb->s_blocksize;
2044 #define MIN_NAME_LEN 1
2046 max_num_of_items = (blocksize - BLKH_SIZE) / (IH_SIZE + MIN_ITEM_LEN);
2047 max_num_of_entries = (blocksize - BLKH_SIZE - IH_SIZE) /
2048 (DEH_SIZE + MIN_NAME_LEN);
2050 return sizeof(struct virtual_node) +
2051 max(max_num_of_items * sizeof(struct virtual_item),
2052 sizeof(struct virtual_item) + sizeof(struct direntry_uarea) +
2053 (max_num_of_entries - 1) * sizeof(__u16));
2056 /* maybe we should fail balancing we are going to perform when kmalloc
2057 fails several times. But now it will loop until kmalloc gets
2059 static int get_mem_for_virtual_node(struct tree_balance *tb)
2065 size = get_virtual_node_size(tb->tb_sb, PATH_PLAST_BUFFER(tb->tb_path));
2067 if (size > tb->vn_buf_size) {
2068 /* we have to allocate more memory for virtual node */
2070 /* free memory allocated before */
2072 /* this is not needed if kfree is atomic */
2076 /* virtual node requires now more memory */
2077 tb->vn_buf_size = size;
2079 /* get memory for virtual item */
2080 buf = kmalloc(size, GFP_ATOMIC | __GFP_NOWARN);
2082 /* getting memory with GFP_KERNEL priority may involve
2083 balancing now (due to indirect_to_direct conversion on
2084 dcache shrinking). So, release path and collected
2086 free_buffers_in_tb(tb);
2087 buf = kmalloc(size, GFP_NOFS);
2089 tb->vn_buf_size = 0;
2093 return REPEAT_SEARCH;
2099 if (check_fs && FILESYSTEM_CHANGED_TB(tb))
2100 return REPEAT_SEARCH;
2105 #ifdef CONFIG_REISERFS_CHECK
2106 static void tb_buffer_sanity_check(struct super_block *sb,
2107 struct buffer_head *bh,
2108 const char *descr, int level)
2111 if (atomic_read(&(bh->b_count)) <= 0)
2113 reiserfs_panic(sb, "jmacd-1", "negative or zero "
2114 "reference counter for buffer %s[%d] "
2115 "(%b)", descr, level, bh);
2117 if (!buffer_uptodate(bh))
2118 reiserfs_panic(sb, "jmacd-2", "buffer is not up "
2119 "to date %s[%d] (%b)",
2122 if (!B_IS_IN_TREE(bh))
2123 reiserfs_panic(sb, "jmacd-3", "buffer is not "
2124 "in tree %s[%d] (%b)",
2127 if (bh->b_bdev != sb->s_bdev)
2128 reiserfs_panic(sb, "jmacd-4", "buffer has wrong "
2129 "device %s[%d] (%b)",
2132 if (bh->b_size != sb->s_blocksize)
2133 reiserfs_panic(sb, "jmacd-5", "buffer has wrong "
2134 "blocksize %s[%d] (%b)",
2137 if (bh->b_blocknr > SB_BLOCK_COUNT(sb))
2138 reiserfs_panic(sb, "jmacd-6", "buffer block "
2139 "number too high %s[%d] (%b)",
2144 static void tb_buffer_sanity_check(struct super_block *sb,
2145 struct buffer_head *bh,
2146 const char *descr, int level)
2151 static int clear_all_dirty_bits(struct super_block *s, struct buffer_head *bh)
2153 return reiserfs_prepare_for_journal(s, bh, 0);
2156 static int wait_tb_buffers_until_unlocked(struct tree_balance *tb)
2158 struct buffer_head *locked;
2159 #ifdef CONFIG_REISERFS_CHECK
2160 int repeat_counter = 0;
2168 for (i = tb->tb_path->path_length;
2169 !locked && i > ILLEGAL_PATH_ELEMENT_OFFSET; i--) {
2170 if (PATH_OFFSET_PBUFFER(tb->tb_path, i)) {
2171 /* if I understand correctly, we can only be sure the last buffer
2172 ** in the path is in the tree --clm
2174 #ifdef CONFIG_REISERFS_CHECK
2175 if (PATH_PLAST_BUFFER(tb->tb_path) ==
2176 PATH_OFFSET_PBUFFER(tb->tb_path, i))
2177 tb_buffer_sanity_check(tb->tb_sb,
2184 if (!clear_all_dirty_bits(tb->tb_sb,
2189 PATH_OFFSET_PBUFFER(tb->tb_path,
2195 for (i = 0; !locked && i < MAX_HEIGHT && tb->insert_size[i];
2201 tb_buffer_sanity_check(tb->tb_sb,
2204 if (!clear_all_dirty_bits
2205 (tb->tb_sb, tb->L[i]))
2209 if (!locked && tb->FL[i]) {
2210 tb_buffer_sanity_check(tb->tb_sb,
2213 if (!clear_all_dirty_bits
2214 (tb->tb_sb, tb->FL[i]))
2218 if (!locked && tb->CFL[i]) {
2219 tb_buffer_sanity_check(tb->tb_sb,
2222 if (!clear_all_dirty_bits
2223 (tb->tb_sb, tb->CFL[i]))
2224 locked = tb->CFL[i];
2229 if (!locked && (tb->rnum[i])) {
2232 tb_buffer_sanity_check(tb->tb_sb,
2235 if (!clear_all_dirty_bits
2236 (tb->tb_sb, tb->R[i]))
2240 if (!locked && tb->FR[i]) {
2241 tb_buffer_sanity_check(tb->tb_sb,
2244 if (!clear_all_dirty_bits
2245 (tb->tb_sb, tb->FR[i]))
2249 if (!locked && tb->CFR[i]) {
2250 tb_buffer_sanity_check(tb->tb_sb,
2253 if (!clear_all_dirty_bits
2254 (tb->tb_sb, tb->CFR[i]))
2255 locked = tb->CFR[i];
2259 /* as far as I can tell, this is not required. The FEB list seems
2260 ** to be full of newly allocated nodes, which will never be locked,
2261 ** dirty, or anything else.
2262 ** To be safe, I'm putting in the checks and waits in. For the moment,
2263 ** they are needed to keep the code in journal.c from complaining
2264 ** about the buffer. That code is inside CONFIG_REISERFS_CHECK as well.
2267 for (i = 0; !locked && i < MAX_FEB_SIZE; i++) {
2269 if (!clear_all_dirty_bits
2270 (tb->tb_sb, tb->FEB[i]))
2271 locked = tb->FEB[i];
2277 #ifdef CONFIG_REISERFS_CHECK
2279 if ((repeat_counter % 10000) == 0) {
2280 reiserfs_warning(tb->tb_sb, "reiserfs-8200",
2281 "too many iterations waiting "
2282 "for buffer to unlock "
2285 /* Don't loop forever. Try to recover from possible error. */
2287 return (FILESYSTEM_CHANGED_TB(tb)) ?
2288 REPEAT_SEARCH : CARRY_ON;
2291 depth = reiserfs_write_unlock_nested(tb->tb_sb);
2292 __wait_on_buffer(locked);
2293 reiserfs_write_lock_nested(tb->tb_sb, depth);
2294 if (FILESYSTEM_CHANGED_TB(tb))
2295 return REPEAT_SEARCH;
2303 /* Prepare for balancing, that is
2304 * get all necessary parents, and neighbors;
2305 * analyze what and where should be moved;
2306 * get sufficient number of new nodes;
2307 * Balancing will start only after all resources will be collected at a time.
2309 * When ported to SMP kernels, only at the last moment after all needed nodes
2310 * are collected in cache, will the resources be locked using the usual
2311 * textbook ordered lock acquisition algorithms. Note that ensuring that
2312 * this code neither write locks what it does not need to write lock nor locks out of order
2313 * will be a pain in the butt that could have been avoided. Grumble grumble. -Hans
2315 * fix is meant in the sense of render unchanging
2317 * Latency might be improved by first gathering a list of what buffers are needed
2318 * and then getting as many of them in parallel as possible? -Hans
2321 * op_mode i - insert, d - delete, c - cut (truncate), p - paste (append)
2322 * tb tree_balance structure;
2323 * inum item number in S[h];
2324 * pos_in_item - comment this if you can
2325 * ins_ih item head of item being inserted
2326 * data inserted item or data to be pasted
2327 * Returns: 1 - schedule occurred while the function worked;
2328 * 0 - schedule didn't occur while the function worked;
2329 * -1 - if no_disk_space
2332 int fix_nodes(int op_mode, struct tree_balance *tb,
2333 struct item_head *ins_ih, const void *data)
2335 int ret, h, item_num = PATH_LAST_POSITION(tb->tb_path);
2338 /* we set wait_tb_buffers_run when we have to restore any dirty bits cleared
2339 ** during wait_tb_buffers_run
2341 int wait_tb_buffers_run = 0;
2342 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
2344 ++REISERFS_SB(tb->tb_sb)->s_fix_nodes;
2346 pos_in_item = tb->tb_path->pos_in_item;
2348 tb->fs_gen = get_generation(tb->tb_sb);
2350 /* we prepare and log the super here so it will already be in the
2351 ** transaction when do_balance needs to change it.
2352 ** This way do_balance won't have to schedule when trying to prepare
2353 ** the super for logging
2355 reiserfs_prepare_for_journal(tb->tb_sb,
2356 SB_BUFFER_WITH_SB(tb->tb_sb), 1);
2357 journal_mark_dirty(tb->transaction_handle, tb->tb_sb,
2358 SB_BUFFER_WITH_SB(tb->tb_sb));
2359 if (FILESYSTEM_CHANGED_TB(tb))
2360 return REPEAT_SEARCH;
2362 /* if it possible in indirect_to_direct conversion */
2363 if (buffer_locked(tbS0)) {
2364 int depth = reiserfs_write_unlock_nested(tb->tb_sb);
2365 __wait_on_buffer(tbS0);
2366 reiserfs_write_lock_nested(tb->tb_sb, depth);
2367 if (FILESYSTEM_CHANGED_TB(tb))
2368 return REPEAT_SEARCH;
2370 #ifdef CONFIG_REISERFS_CHECK
2371 if (REISERFS_SB(tb->tb_sb)->cur_tb) {
2372 print_cur_tb("fix_nodes");
2373 reiserfs_panic(tb->tb_sb, "PAP-8305",
2374 "there is pending do_balance");
2377 if (!buffer_uptodate(tbS0) || !B_IS_IN_TREE(tbS0))
2378 reiserfs_panic(tb->tb_sb, "PAP-8320", "S[0] (%b %z) is "
2379 "not uptodate at the beginning of fix_nodes "
2380 "or not in tree (mode %c)",
2381 tbS0, tbS0, op_mode);
2383 /* Check parameters. */
2386 if (item_num <= 0 || item_num > B_NR_ITEMS(tbS0))
2387 reiserfs_panic(tb->tb_sb, "PAP-8330", "Incorrect "
2388 "item number %d (in S0 - %d) in case "
2389 "of insert", item_num,
2395 if (item_num < 0 || item_num >= B_NR_ITEMS(tbS0)) {
2396 print_block(tbS0, 0, -1, -1);
2397 reiserfs_panic(tb->tb_sb, "PAP-8335", "Incorrect "
2398 "item number(%d); mode = %c "
2401 tb->insert_size[0]);
2405 reiserfs_panic(tb->tb_sb, "PAP-8340", "Incorrect mode "
2410 if (get_mem_for_virtual_node(tb) == REPEAT_SEARCH)
2411 // FIXME: maybe -ENOMEM when tb->vn_buf == 0? Now just repeat
2412 return REPEAT_SEARCH;
2414 /* Starting from the leaf level; for all levels h of the tree. */
2415 for (h = 0; h < MAX_HEIGHT && tb->insert_size[h]; h++) {
2416 ret = get_direct_parent(tb, h);
2417 if (ret != CARRY_ON)
2420 ret = check_balance(op_mode, tb, h, item_num,
2421 pos_in_item, ins_ih, data);
2422 if (ret != CARRY_ON) {
2423 if (ret == NO_BALANCING_NEEDED) {
2424 /* No balancing for higher levels needed. */
2425 ret = get_neighbors(tb, h);
2426 if (ret != CARRY_ON)
2428 if (h != MAX_HEIGHT - 1)
2429 tb->insert_size[h + 1] = 0;
2430 /* ok, analysis and resource gathering are complete */
2436 ret = get_neighbors(tb, h);
2437 if (ret != CARRY_ON)
2440 /* No disk space, or schedule occurred and analysis may be
2441 * invalid and needs to be redone. */
2442 ret = get_empty_nodes(tb, h);
2443 if (ret != CARRY_ON)
2446 if (!PATH_H_PBUFFER(tb->tb_path, h)) {
2447 /* We have a positive insert size but no nodes exist on this
2448 level, this means that we are creating a new root. */
2450 RFALSE(tb->blknum[h] != 1,
2451 "PAP-8350: creating new empty root");
2453 if (h < MAX_HEIGHT - 1)
2454 tb->insert_size[h + 1] = 0;
2455 } else if (!PATH_H_PBUFFER(tb->tb_path, h + 1)) {
2456 if (tb->blknum[h] > 1) {
2457 /* The tree needs to be grown, so this node S[h]
2458 which is the root node is split into two nodes,
2459 and a new node (S[h+1]) will be created to
2460 become the root node. */
2462 RFALSE(h == MAX_HEIGHT - 1,
2463 "PAP-8355: attempt to create too high of a tree");
2465 tb->insert_size[h + 1] =
2467 KEY_SIZE) * (tb->blknum[h] - 1) +
2469 } else if (h < MAX_HEIGHT - 1)
2470 tb->insert_size[h + 1] = 0;
2472 tb->insert_size[h + 1] =
2473 (DC_SIZE + KEY_SIZE) * (tb->blknum[h] - 1);
2476 ret = wait_tb_buffers_until_unlocked(tb);
2477 if (ret == CARRY_ON) {
2478 if (FILESYSTEM_CHANGED_TB(tb)) {
2479 wait_tb_buffers_run = 1;
2480 ret = REPEAT_SEARCH;
2486 wait_tb_buffers_run = 1;
2491 // fix_nodes was unable to perform its calculation due to
2492 // filesystem got changed under us, lack of free disk space or i/o
2493 // failure. If the first is the case - the search will be
2494 // repeated. For now - free all resources acquired so far except
2495 // for the new allocated nodes
2499 /* Release path buffers. */
2500 if (wait_tb_buffers_run) {
2501 pathrelse_and_restore(tb->tb_sb, tb->tb_path);
2503 pathrelse(tb->tb_path);
2505 /* brelse all resources collected for balancing */
2506 for (i = 0; i < MAX_HEIGHT; i++) {
2507 if (wait_tb_buffers_run) {
2508 reiserfs_restore_prepared_buffer(tb->tb_sb,
2510 reiserfs_restore_prepared_buffer(tb->tb_sb,
2512 reiserfs_restore_prepared_buffer(tb->tb_sb,
2514 reiserfs_restore_prepared_buffer(tb->tb_sb,
2516 reiserfs_restore_prepared_buffer(tb->tb_sb,
2519 reiserfs_restore_prepared_buffer(tb->tb_sb,
2539 if (wait_tb_buffers_run) {
2540 for (i = 0; i < MAX_FEB_SIZE; i++) {
2542 reiserfs_restore_prepared_buffer
2543 (tb->tb_sb, tb->FEB[i]);
2551 /* Anatoly will probably forgive me renaming tb to tb. I just
2552 wanted to make lines shorter */
2553 void unfix_nodes(struct tree_balance *tb)
2557 /* Release path buffers. */
2558 pathrelse_and_restore(tb->tb_sb, tb->tb_path);
2560 /* brelse all resources collected for balancing */
2561 for (i = 0; i < MAX_HEIGHT; i++) {
2562 reiserfs_restore_prepared_buffer(tb->tb_sb, tb->L[i]);
2563 reiserfs_restore_prepared_buffer(tb->tb_sb, tb->R[i]);
2564 reiserfs_restore_prepared_buffer(tb->tb_sb, tb->FL[i]);
2565 reiserfs_restore_prepared_buffer(tb->tb_sb, tb->FR[i]);
2566 reiserfs_restore_prepared_buffer(tb->tb_sb, tb->CFL[i]);
2567 reiserfs_restore_prepared_buffer(tb->tb_sb, tb->CFR[i]);
2577 /* deal with list of allocated (used and unused) nodes */
2578 for (i = 0; i < MAX_FEB_SIZE; i++) {
2580 b_blocknr_t blocknr = tb->FEB[i]->b_blocknr;
2581 /* de-allocated block which was not used by balancing and
2582 bforget about buffer for it */
2584 reiserfs_free_block(tb->transaction_handle, NULL,
2588 /* release used as new nodes including a new root */
2589 brelse(tb->used[i]);