CONTIG_RIGHT
};
+
+/*
+ * NOTE: ocfs2_block_extent_contig(), ocfs2_extents_adjacent() and
+ * ocfs2_extent_contig only work properly against leaf nodes!
+ */
static int ocfs2_block_extent_contig(struct super_block *sb,
struct ocfs2_extent_rec *ext,
u64 blkno)
{
- return blkno == (le64_to_cpu(ext->e_blkno) +
- ocfs2_clusters_to_blocks(sb,
- le32_to_cpu(ext->e_clusters)));
+ u64 blk_end = le64_to_cpu(ext->e_blkno);
+
+ blk_end += ocfs2_clusters_to_blocks(sb,
+ le16_to_cpu(ext->e_leaf_clusters));
+
+ return blkno == blk_end;
}
static int ocfs2_extents_adjacent(struct ocfs2_extent_rec *left,
struct ocfs2_extent_rec *right)
{
- return (le32_to_cpu(left->e_cpos) + le32_to_cpu(left->e_clusters) ==
- le32_to_cpu(right->e_cpos));
+ u32 left_range;
+
+ left_range = le32_to_cpu(left->e_cpos) +
+ le16_to_cpu(left->e_leaf_clusters);
+
+ return (left_range == le32_to_cpu(right->e_cpos));
}
static enum ocfs2_contig_type
i = le16_to_cpu(el->l_next_free_rec) - 1;
return le32_to_cpu(el->l_recs[i].e_cpos) +
- le32_to_cpu(el->l_recs[i].e_clusters);
+ ocfs2_rec_clusters(el, &el->l_recs[i]);
}
/*
* for the new last extent block.
*
* the new branch will be 'empty' in the sense that every block will
- * contain a single record with e_clusters == 0.
+ * contain a single record with cluster count == 0.
*/
static int ocfs2_add_branch(struct ocfs2_super *osb,
handle_t *handle,
*/
eb_el->l_recs[0].e_cpos = cpu_to_le32(new_cpos);
eb_el->l_recs[0].e_blkno = cpu_to_le64(next_blkno);
- eb_el->l_recs[0].e_clusters = cpu_to_le32(0);
+ /*
+ * eb_el isn't always an interior node, but even leaf
+ * nodes want a zero'd flags and reserved field so
+ * this gets the whole 32 bits regardless of use.
+ */
+ eb_el->l_recs[0].e_int_clusters = cpu_to_le32(0);
if (!eb_el->l_tree_depth)
new_last_eb_blk = le64_to_cpu(eb->h_blkno);
i = le16_to_cpu(el->l_next_free_rec);
el->l_recs[i].e_blkno = cpu_to_le64(next_blkno);
el->l_recs[i].e_cpos = cpu_to_le32(new_cpos);
- el->l_recs[i].e_clusters = 0;
+ el->l_recs[i].e_int_clusters = 0;
le16_add_cpu(&el->l_next_free_rec, 1);
/* fe needs a new last extent block pointer, as does the
/* copy the fe data into the new extent block */
eb_el->l_tree_depth = fe_el->l_tree_depth;
eb_el->l_next_free_rec = fe_el->l_next_free_rec;
- for(i = 0; i < le16_to_cpu(fe_el->l_next_free_rec); i++) {
- eb_el->l_recs[i].e_cpos = fe_el->l_recs[i].e_cpos;
- eb_el->l_recs[i].e_clusters = fe_el->l_recs[i].e_clusters;
- eb_el->l_recs[i].e_blkno = fe_el->l_recs[i].e_blkno;
- }
+ for(i = 0; i < le16_to_cpu(fe_el->l_next_free_rec); i++)
+ eb_el->l_recs[i] = fe_el->l_recs[i];
status = ocfs2_journal_dirty(handle, new_eb_bh);
if (status < 0) {
le16_add_cpu(&fe_el->l_tree_depth, 1);
fe_el->l_recs[0].e_cpos = 0;
fe_el->l_recs[0].e_blkno = eb->h_blkno;
- fe_el->l_recs[0].e_clusters = cpu_to_le32(new_clusters);
- for(i = 1; i < le16_to_cpu(fe_el->l_next_free_rec); i++) {
- fe_el->l_recs[i].e_cpos = 0;
- fe_el->l_recs[i].e_clusters = 0;
- fe_el->l_recs[i].e_blkno = 0;
- }
+ fe_el->l_recs[0].e_int_clusters = cpu_to_le32(new_clusters);
+ for(i = 1; i < le16_to_cpu(fe_el->l_next_free_rec); i++)
+ memset(&fe_el->l_recs[i], 0, sizeof(struct ocfs2_extent_rec));
fe_el->l_next_free_rec = cpu_to_le16(1);
/* If this is our 1st tree depth shift, then last_eb_blk
return status;
}
+/*
+ * This is only valid for leaf nodes, which are the only ones that can
+ * have empty extents anyway.
+ */
static inline int ocfs2_is_empty_extent(struct ocfs2_extent_rec *rec)
{
- return !rec->e_clusters;
+ return !rec->e_leaf_clusters;
}
/*
{
int next_free = le16_to_cpu(el->l_next_free_rec);
+ BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
+
if (next_free == 0)
goto set_and_inc;
* rightmost record.
*/
range = le32_to_cpu(rec->e_cpos) +
- le32_to_cpu(rec->e_clusters);
+ ocfs2_rec_clusters(el, rec);
if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range)
break;
}
*/
left_clusters = le32_to_cpu(right_child_el->l_recs[0].e_cpos);
left_clusters -= le32_to_cpu(left_rec->e_cpos);
- left_rec->e_clusters = cpu_to_le32(left_clusters);
+ left_rec->e_int_clusters = cpu_to_le32(left_clusters);
/*
* Calculate the rightmost cluster count boundary before
- * moving cpos - we will need to adjust e_clusters after
+ * moving cpos - we will need to adjust clusters after
* updating e_cpos to keep the same highest cluster count.
*/
right_end = le32_to_cpu(right_rec->e_cpos);
- right_end += le32_to_cpu(right_rec->e_clusters);
+ right_end += le32_to_cpu(right_rec->e_int_clusters);
right_rec->e_cpos = left_rec->e_cpos;
le32_add_cpu(&right_rec->e_cpos, left_clusters);
right_end -= le32_to_cpu(right_rec->e_cpos);
- right_rec->e_clusters = cpu_to_le32(right_end);
+ right_rec->e_int_clusters = cpu_to_le32(right_end);
}
/*
u64 blkno;
struct ocfs2_extent_list *el;
+ BUG_ON(path->p_tree_depth == 0);
+
*cpos = 0;
blkno = path_leaf_bh(path)->b_blocknr;
}
*cpos = le32_to_cpu(el->l_recs[j - 1].e_cpos);
- *cpos = *cpos + le32_to_cpu(el->l_recs[j - 1].e_clusters) - 1;
+ *cpos = *cpos + ocfs2_rec_clusters(el,
+ &el->l_recs[j - 1]);
+ *cpos = *cpos - 1;
goto out;
}
}
unsigned int range;
struct ocfs2_extent_rec *rec;
- BUG_ON(el->l_tree_depth);
+ BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
/*
* Contiguous insert - either left or right.
rec->e_blkno = insert_rec->e_blkno;
rec->e_cpos = insert_rec->e_cpos;
}
- le32_add_cpu(&rec->e_clusters,
- le32_to_cpu(insert_rec->e_clusters));
+ le16_add_cpu(&rec->e_leaf_clusters,
+ le16_to_cpu(insert_rec->e_leaf_clusters));
return;
}
if (insert->ins_appending == APPEND_TAIL) {
i = le16_to_cpu(el->l_next_free_rec) - 1;
rec = &el->l_recs[i];
- range = le32_to_cpu(rec->e_cpos) + le32_to_cpu(rec->e_clusters);
+ range = le32_to_cpu(rec->e_cpos)
+ + le16_to_cpu(rec->e_leaf_clusters);
BUG_ON(le32_to_cpu(insert_rec->e_cpos) < range);
mlog_bug_on_msg(le16_to_cpu(el->l_next_free_rec) >=
le16_to_cpu(el->l_count),
le16_to_cpu(el->l_next_free_rec),
le32_to_cpu(el->l_recs[i].e_cpos),
- le32_to_cpu(el->l_recs[i].e_clusters),
+ le16_to_cpu(el->l_recs[i].e_leaf_clusters),
le32_to_cpu(insert_rec->e_cpos),
- le32_to_cpu(insert_rec->e_clusters));
+ le16_to_cpu(insert_rec->e_leaf_clusters));
i++;
el->l_recs[i] = *insert_rec;
le16_add_cpu(&el->l_next_free_rec, 1);
*ret_left_path = NULL;
/*
+ * This shouldn't happen for non-trees. The extent rec cluster
+ * count manipulation below only works for interior nodes.
+ */
+ BUG_ON(right_path->p_tree_depth == 0);
+
+ /*
* If our appending insert is at the leftmost edge of a leaf,
* then we might need to update the rightmost records of the
* neighboring path.
bh = path_root_bh(right_path);
i = 0;
while (1) {
+ struct ocfs2_extent_rec *rec;
+
next_free = le16_to_cpu(el->l_next_free_rec);
if (next_free == 0) {
ocfs2_error(inode->i_sb,
goto out;
}
- el->l_recs[next_free - 1].e_clusters = insert_rec->e_cpos;
- le32_add_cpu(&el->l_recs[next_free - 1].e_clusters,
- le32_to_cpu(insert_rec->e_clusters));
- le32_add_cpu(&el->l_recs[next_free - 1].e_clusters,
- -le32_to_cpu(el->l_recs[next_free - 1].e_cpos));
+ rec = &el->l_recs[next_free - 1];
+
+ rec->e_int_clusters = insert_rec->e_cpos;
+ le32_add_cpu(&rec->e_int_clusters,
+ le16_to_cpu(insert_rec->e_leaf_clusters));
+ le32_add_cpu(&rec->e_int_clusters,
+ -le32_to_cpu(rec->e_cpos));
ret = ocfs2_journal_dirty(handle, bh);
if (ret)
mlog_errno(ret);
+ /* Don't touch the leaf node */
if (++i >= right_path->p_tree_depth)
break;
out_update_clusters:
ocfs2_update_dinode_clusters(inode, di,
- le32_to_cpu(insert_rec->e_clusters));
+ le16_to_cpu(insert_rec->e_leaf_clusters));
ret = ocfs2_journal_dirty(handle, di_bh);
if (ret)
int i;
enum ocfs2_contig_type contig_type = CONTIG_NONE;
+ BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
+
for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
contig_type = ocfs2_extent_contig(inode, &el->l_recs[i],
insert_rec);
insert->ins_appending = APPEND_NONE;
- BUG_ON(el->l_tree_depth);
+ BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
if (!el->l_next_free_rec)
goto set_tail_append;
i = le16_to_cpu(el->l_next_free_rec) - 1;
rec = &el->l_recs[i];
- if (cpos >= (le32_to_cpu(rec->e_cpos) + le32_to_cpu(rec->e_clusters)))
+ if (cpos >=
+ (le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters)))
goto set_tail_append;
return;
* The insert code isn't quite ready to deal with all cases of
* left contiguousness. Specifically, if it's an insert into
* the 1st record in a leaf, it will require the adjustment of
- * e_clusters on the last record of the path directly to it's
+ * cluster count on the last record of the path directly to it's
* left. For now, just catch that case and fool the layers
* above us. This works just fine for tree_depth == 0, which
* is why we allow that above.
(unsigned long long)OCFS2_I(inode)->ip_blkno, cpos,
OCFS2_I(inode)->ip_clusters);
+ memset(&rec, 0, sizeof(rec));
rec.e_cpos = cpu_to_le32(cpos);
rec.e_blkno = cpu_to_le64(start_blk);
- rec.e_clusters = cpu_to_le32(new_clusters);
+ rec.e_leaf_clusters = cpu_to_le16(new_clusters);
status = ocfs2_figure_insert_type(inode, fe_bh, &last_eb_bh, &rec,
&insert);
* Check it we'll only be trimming off the end of this
* cluster.
*/
- if (le16_to_cpu(rec->e_clusters) > clusters_to_del)
+ if (le16_to_cpu(rec->e_leaf_clusters) > clusters_to_del)
goto out;
}
mlog(0, "Extent list before: record %d: (%u, %u, %llu), "
"next = %u\n", i, le32_to_cpu(rec->e_cpos),
- le32_to_cpu(rec->e_clusters),
+ ocfs2_rec_clusters(el, rec),
(unsigned long long)le64_to_cpu(rec->e_blkno),
le16_to_cpu(el->l_next_free_rec));
- BUG_ON(le32_to_cpu(rec->e_clusters) < clusters_to_del);
+ BUG_ON(ocfs2_rec_clusters(el, rec) < clusters_to_del);
if (le16_to_cpu(el->l_tree_depth) == 0) {
/*
goto find_tail_record;
}
- le32_add_cpu(&rec->e_clusters, -clusters_to_del);
+ le16_add_cpu(&rec->e_leaf_clusters, -clusters_to_del);
/*
* We'll use "new_edge" on our way back up the
* tree to know what our rightmost cpos is.
*/
- new_edge = le32_to_cpu(rec->e_clusters);
+ new_edge = le16_to_cpu(rec->e_leaf_clusters);
new_edge += le32_to_cpu(rec->e_cpos);
/*
*/
*delete_start = le64_to_cpu(rec->e_blkno)
+ ocfs2_clusters_to_blocks(inode->i_sb,
- le32_to_cpu(rec->e_clusters));
+ le16_to_cpu(rec->e_leaf_clusters));
/*
* If it's now empty, remove this record.
*/
- if (le32_to_cpu(rec->e_clusters) == 0) {
+ if (le16_to_cpu(rec->e_leaf_clusters) == 0) {
memset(rec, 0,
sizeof(struct ocfs2_extent_rec));
le16_add_cpu(&el->l_next_free_rec, -1);
if (new_edge == 0)
goto delete;
- rec->e_clusters = cpu_to_le32(new_edge);
- le32_add_cpu(&rec->e_clusters,
+ rec->e_int_clusters = cpu_to_le32(new_edge);
+ le32_add_cpu(&rec->e_int_clusters,
-le32_to_cpu(rec->e_cpos));
/*
* A deleted child record should have been
* caught above.
*/
- BUG_ON(le32_to_cpu(rec->e_clusters) == 0);
+ BUG_ON(le32_to_cpu(rec->e_int_clusters) == 0);
}
delete:
mlog(0, "extent list container %llu, after: record %d: "
"(%u, %u, %llu), next = %u.\n",
(unsigned long long)bh->b_blocknr, i,
- le32_to_cpu(rec->e_cpos), le32_to_cpu(rec->e_clusters),
+ le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec),
(unsigned long long)le64_to_cpu(rec->e_blkno),
le16_to_cpu(el->l_next_free_rec));
ocfs2_remove_from_cache(inode, bh);
- BUG_ON(le32_to_cpu(el->l_recs[0].e_clusters));
+ BUG_ON(ocfs2_rec_clusters(el, &el->l_recs[0]));
BUG_ON(le32_to_cpu(el->l_recs[0].e_cpos));
BUG_ON(le64_to_cpu(el->l_recs[0].e_blkno));
* Lower levels depend on this never happening, but it's best
* to check it up here before changing the tree.
*/
- if (el->l_tree_depth && ocfs2_is_empty_extent(&el->l_recs[0])) {
+ if (el->l_tree_depth && el->l_recs[0].e_int_clusters == 0) {
ocfs2_error(inode->i_sb,
"Inode %lu has an empty extent record, depth %u\n",
inode->i_ino, le16_to_cpu(el->l_tree_depth));
i = le16_to_cpu(el->l_next_free_rec) - 1;
range = le32_to_cpu(el->l_recs[i].e_cpos) +
- le32_to_cpu(el->l_recs[i].e_clusters);
+ ocfs2_rec_clusters(el, &el->l_recs[i]);
if (i == 0 && ocfs2_is_empty_extent(&el->l_recs[i])) {
clusters_to_del = 0;
} else if (le32_to_cpu(el->l_recs[i].e_cpos) >= new_highest_cpos) {
- clusters_to_del = le32_to_cpu(el->l_recs[i].e_clusters);
+ clusters_to_del = ocfs2_rec_clusters(el, &el->l_recs[i]);
} else if (range > new_highest_cpos) {
- clusters_to_del = (le32_to_cpu(el->l_recs[i].e_clusters) +
+ clusters_to_del = (ocfs2_rec_clusters(el, &el->l_recs[i]) +
le32_to_cpu(el->l_recs[i].e_cpos)) -
new_highest_cpos;
} else {