*/
static inline int rs_cmp(u64 start, u32 len, struct gfs2_blkreserv *rs)
{
- if (start >= rs->rs_start + rs->rs_free)
+ if (start >= rs->rs_start + rs->rs_requested)
return 1;
if (rs->rs_start >= start + len)
return -1;
fs_id_buf,
(unsigned long long)ip->i_no_addr,
(unsigned long long)rs->rs_start,
- rs->rs_free);
+ rs->rs_requested);
}
/**
rb_erase(&rs->rs_node, &rgd->rd_rstree);
RB_CLEAR_NODE(&rs->rs_node);
- if (rs->rs_free) {
- /* return reserved blocks to the rgrp */
- BUG_ON(rs->rs_rgd->rd_reserved < rs->rs_free);
- rs->rs_rgd->rd_reserved -= rs->rs_free;
+ if (rs->rs_requested) {
+ /* return requested blocks to the rgrp */
+ BUG_ON(rs->rs_rgd->rd_requested < rs->rs_requested);
+ rs->rs_rgd->rd_requested -= rs->rs_requested;
/* The rgrp extent failure point is likely not to increase;
it will only do so if the freed blocks are somehow
contiguous with a span of free blocks that follows. Still,
it will force the number to be recalculated later. */
- rgd->rd_extfail_pt += rs->rs_free;
- rs->rs_free = 0;
+ rgd->rd_extfail_pt += rs->rs_requested;
+ rs->rs_requested = 0;
}
}
if (rgd) {
spin_lock(&rgd->rd_rsspin);
__rs_deltree(rs);
- BUG_ON(rs->rs_free);
+ BUG_ON(rs->rs_requested);
spin_unlock(&rgd->rd_rsspin);
}
}
rb_entry(*newn, struct gfs2_blkreserv, rs_node);
parent = *newn;
- rc = rs_cmp(rs->rs_start, rs->rs_free, cur);
+ rc = rs_cmp(rs->rs_start, rs->rs_requested, cur);
if (rc > 0)
newn = &((*newn)->rb_right);
else if (rc < 0)
rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
/* Do our rgrp accounting for the reservation */
- rgd->rd_reserved += rs->rs_free; /* blocks reserved */
+ rgd->rd_requested += rs->rs_requested; /* blocks requested */
spin_unlock(&rgd->rd_rsspin);
trace_gfs2_rs(rs, TRACE_RS_INSERT);
}
{
u32 tot_reserved, tot_free;
- if (WARN_ON_ONCE(rgd->rd_reserved < rs->rs_free))
+ if (WARN_ON_ONCE(rgd->rd_requested < rs->rs_requested))
return 0;
- tot_reserved = rgd->rd_reserved - rs->rs_free;
+ tot_reserved = rgd->rd_requested - rs->rs_requested;
if (rgd->rd_free_clone < tot_reserved)
tot_reserved = 0;
extlen = max_t(u32, atomic_read(&ip->i_sizehint), ap->target);
extlen = clamp(extlen, (u32)RGRP_RSRV_MINBLKS, free_blocks);
}
- if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
+ if ((rgd->rd_free_clone < rgd->rd_requested) || (free_blocks < extlen))
return;
/* Find bitmap block that contains bits for goal block */
ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, &ip->i_res, true);
if (ret == 0) {
rs->rs_start = gfs2_rbm_to_block(&rbm);
- rs->rs_free = extlen;
+ rs->rs_requested = extlen;
rs_insert(ip);
} else {
if (goal == rgd->rd_last_alloc + rgd->rd_data0)
if (n) {
while (rs_cmp(block, length, rs) == 0 && rs != ignore_rs) {
- block = rs->rs_start + rs->rs_free;
+ block = rs->rs_start + rs->rs_requested;
n = n->rb_right;
if (n == NULL)
break;
fs_id_buf,
(unsigned long long)rgd->rd_addr, rgd->rd_flags,
rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
- rgd->rd_reserved, rgd->rd_extfail_pt);
+ rgd->rd_requested, rgd->rd_extfail_pt);
if (rgd->rd_sbd->sd_args.ar_rgrplvb) {
struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
unsigned int rlen;
rs->rs_start += len;
- rlen = min(rs->rs_free, len);
- rs->rs_free -= rlen;
- rgd->rd_reserved -= rlen;
+ rlen = min(rs->rs_requested, len);
+ rs->rs_requested -= rlen;
+ rgd->rd_requested -= rlen;
trace_gfs2_rs(rs, TRACE_RS_CLAIM);
if (rs->rs_start < rgd->rd_data0 + rgd->rd_data &&
- rs->rs_free)
+ rs->rs_requested)
goto out;
/* We used up our block reservation, so we should
reserve more blocks next time. */
__field( u8, block_state )
__field( u64, rd_addr )
__field( u32, rd_free_clone )
- __field( u32, rd_reserved )
+ __field( u32, rd_requested )
),
TP_fast_assign(
__entry->block_state = block_state;
__entry->rd_addr = rgd->rd_addr;
__entry->rd_free_clone = rgd->rd_free_clone;
- __entry->rd_reserved = rgd->rd_reserved;
+ __entry->rd_requested = rgd->rd_requested;
),
TP_printk("%u,%u bmap %llu alloc %llu/%lu %s rg:%llu rf:%u rr:%lu",
(unsigned long)__entry->len,
block_state_name(__entry->block_state),
(unsigned long long)__entry->rd_addr,
- __entry->rd_free_clone, (unsigned long)__entry->rd_reserved)
+ __entry->rd_free_clone, (unsigned long)__entry->rd_requested)
);
/* Keep track of multi-block reservations as they are allocated/freed */
__field( dev_t, dev )
__field( u64, rd_addr )
__field( u32, rd_free_clone )
- __field( u32, rd_reserved )
+ __field( u32, rd_requested )
__field( u64, inum )
__field( u64, start )
- __field( u32, free )
+ __field( u32, requested )
__field( u8, func )
),
__entry->dev = rs->rs_rgd->rd_sbd->sd_vfs->s_dev;
__entry->rd_addr = rs->rs_rgd->rd_addr;
__entry->rd_free_clone = rs->rs_rgd->rd_free_clone;
- __entry->rd_reserved = rs->rs_rgd->rd_reserved;
+ __entry->rd_requested = rs->rs_rgd->rd_requested;
__entry->inum = container_of(rs, struct gfs2_inode,
i_res)->i_no_addr;
__entry->start = rs->rs_start;
- __entry->free = rs->rs_free;
+ __entry->requested = rs->rs_requested;
__entry->func = func;
),
(unsigned long long)__entry->start,
(unsigned long long)__entry->rd_addr,
(unsigned long)__entry->rd_free_clone,
- (unsigned long)__entry->rd_reserved,
- rs_func_name(__entry->func), (unsigned long)__entry->free)
+ (unsigned long)__entry->rd_requested,
+ rs_func_name(__entry->func), (unsigned long)__entry->requested)
);
#endif /* _TRACE_GFS2_H */