struct xfs_iext_cursor icur, ccur;
xfs_fsblock_t prealloc_blocks = 0;
bool eof = false, cow_eof = false, shared = false;
- int whichfork = XFS_DATA_FORK;
+ int allocfork = XFS_DATA_FORK;
int error = 0;
/* we can't use delayed allocations when using extent size hints */
* Fork all the shared blocks from our write offset until the
* end of the extent.
*/
- whichfork = XFS_COW_FORK;
+ allocfork = XFS_COW_FORK;
end_fsb = imap.br_startoff + imap.br_blockcount;
} else {
/*
end_fsb = xfs_iomap_end_fsb(mp, offset, count);
if (xfs_is_always_cow_inode(ip))
- whichfork = XFS_COW_FORK;
+ allocfork = XFS_COW_FORK;
}
error = xfs_qm_dqattach_locked(ip, false);
goto out_unlock;
if (eof) {
- prealloc_blocks = xfs_iomap_prealloc_size(ip, whichfork, offset,
+ prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork, offset,
count, &icur);
if (prealloc_blocks) {
xfs_extlen_t align;
}
retry:
- error = xfs_bmapi_reserve_delalloc(ip, whichfork, offset_fsb,
+ error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
end_fsb - offset_fsb, prealloc_blocks,
- whichfork == XFS_DATA_FORK ? &imap : &cmap,
- whichfork == XFS_DATA_FORK ? &icur : &ccur,
- whichfork == XFS_DATA_FORK ? eof : cow_eof);
+ allocfork == XFS_DATA_FORK ? &imap : &cmap,
+ allocfork == XFS_DATA_FORK ? &icur : &ccur,
+ allocfork == XFS_DATA_FORK ? eof : cow_eof);
switch (error) {
case 0:
break;
goto out_unlock;
}
- if (whichfork == XFS_COW_FORK) {
- trace_xfs_iomap_alloc(ip, offset, count, whichfork, &cmap);
+ if (allocfork == XFS_COW_FORK) {
+ trace_xfs_iomap_alloc(ip, offset, count, allocfork, &cmap);
goto found_cow;
}
* them out if the write happens to fail.
*/
xfs_iunlock(ip, XFS_ILOCK_EXCL);
- trace_xfs_iomap_alloc(ip, offset, count, whichfork, &imap);
+ trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap);
return xfs_bmbt_to_iomap(ip, iomap, &imap, IOMAP_F_NEW);
found_imap: