goto out;
}
ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
- 0, rd_gh);
+ LM_FLAG_NODE_SCOPE, rd_gh);
if (ret)
goto out;
if (!rgd)
goto out_inodes;
- gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2);
+ gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, LM_FLAG_NODE_SCOPE, ghs + 2);
error = gfs2_glock_nq(ghs); /* parent */
error = -ENOENT;
goto out_gunlock;
}
- error = gfs2_glock_nq_init(nrgd->rd_gl, LM_ST_EXCLUSIVE, 0,
- &rd_gh);
+ error = gfs2_glock_nq_init(nrgd->rd_gl, LM_ST_EXCLUSIVE,
+ LM_FLAG_NODE_SCOPE, &rd_gh);
if (error)
goto out_gunlock;
}
while (1) {
- ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
+ ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
+ LM_FLAG_NODE_SCOPE, &gh);
if (ret)
goto out;
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *begin = NULL;
struct gfs2_blkreserv *rs = &ip->i_res;
- int error = 0, flags = 0;
+ int error = 0, flags = LM_FLAG_NODE_SCOPE;
bool rg_locked;
u64 last_unlinked = NO_BLOCK;
u32 target = ap->target;
sizeof(struct gfs2_holder),
GFP_NOFS | __GFP_NOFAIL);
for (x = 0; x < rlist->rl_rgrps; x++)
- gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
- LM_ST_EXCLUSIVE, 0,
- &rlist->rl_ghs[x]);
+ gfs2_holder_init(rlist->rl_rgd[x]->rd_gl, LM_ST_EXCLUSIVE,
+ LM_FLAG_NODE_SCOPE, &rlist->rl_ghs[x]);
}
/**
goto out_qs;
}
- error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
+ error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
+ LM_FLAG_NODE_SCOPE, &gh);
if (error)
goto out_qs;
return -EIO;
}
- error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
+ error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
+ LM_FLAG_NODE_SCOPE, &rg_gh);
if (error)
return error;
return -EIO;
}
- error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
+ error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
+ LM_FLAG_NODE_SCOPE, &gh);
if (error)
return error;