As per Andrew Morton's request, removed trailing whitespace.
Cc: Andrew Morton <akpm@osdl.org>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
skip the queue_cast(ECANCEL). It indicates that the request/convert
completed (and queued a normal ast) just before the cancel; we don't
want to clobber the sb_result for the normal ast with ECANCEL. */
-
+
static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
revert_lock(r, lkb);
error = gfs2_check_acl_locked(inode, mask);
gfs2_glock_dq_uninit(&i_gh);
}
-
+
return error;
}
error = gfs2_meta_inode_buffer(ip, &dibh);
if (error)
goto out;
-
+
if (ip->i_di.di_size) {
/* Get a free block, fill it with the stuffed data,
and write it out to disk */
blocks[n] = gfs2_meta_new(ip->i_gl, bn);
gfs2_trans_add_bh(ip->i_gl, blocks[n], 1);
}
-
+
n = 0;
bn = blocks[0]->b_blocknr;
if (new_height > 1) {
/* This uses schedule_timeout() instead of msleep() because it's good for
the daemons to wake up more often than the timeout when unmounting so
the user's unmount doesn't sit there forever.
-
+
The kthread functions used to start these daemons block and flush signals. */
/**
#define gfs2_disk_hash2offset(h) (((u64)(h)) >> 1)
#define gfs2_dir_offset2hash(p) ((u32)(((u64)(p)) << 1))
-typedef int (*leaf_call_t) (struct gfs2_inode *dip, u32 index, u32 len,
+typedef int (*leaf_call_t) (struct gfs2_inode *dip, u32 index, u32 len,
u64 leaf_no, void *data);
typedef int (*gfs2_dscan_t)(const struct gfs2_dirent *dent,
const struct qstr *name, void *opaque);
static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf,
unsigned int offset, unsigned int size)
-
{
struct buffer_head *dibh;
int error;
const struct qstr *name)
{
struct gfs2_dirent *dent;
- dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size,
+ dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size,
gfs2_dirent_find_space, name, NULL);
if (!dent || IS_ERR(dent))
return dent;
gfs2_consist_inode(ip);
return ERR_PTR(-EIO);
}
-
+
index = name->hash >> (32 - ip->i_di.di_depth);
error = get_first_leaf(ip, index, &bh);
if (error)
brelse(bh);
if (!ln)
break;
-
+
error = get_leaf(ip, ln, &bh);
} while(!error);
return error ? ERR_PTR(error) : NULL;
}
-
+
error = gfs2_meta_inode_buffer(ip, &bh);
if (error)
return ERR_PTR(error);
struct qstr name = { .name = "", .len = 0, .hash = 0 };
if (!bh)
return NULL;
-
+
gfs2_trans_add_bh(ip->i_gl, bh, 1);
gfs2_metatype_set(bh, GFS2_METATYPE_LF, GFS2_FORMAT_LF);
leaf = (struct gfs2_leaf *)bh->b_data;
if (error == -ENODATA)
error = 0;
}
- return error;
+ return error;
}
return -EPERM;
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(&ip->i_di, dibh->b_data);
brelse(dibh);
- }
+ }
gfs2_trans_end(GFS2_SB(&ip->i_inode));
if (gl->gl_aspace)
gfs2_aspace_put(gl->gl_aspace);
fail:
- kmem_cache_free(gfs2_glock_cachep, gl);
+ kmem_cache_free(gfs2_glock_cachep, gl);
return error;
}
gfs2_holder_uninit(gh);
kfree(container_of(gh, struct greedy, gr_gh));
- spin_lock(&gl->gl_spin);
+ spin_lock(&gl->gl_spin);
return 0;
}
if (existing) {
print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
printk(KERN_INFO "pid : %d\n", existing->gh_owner->pid);
- printk(KERN_INFO "lock type : %d lock state : %d\n",
+ printk(KERN_INFO "lock type : %d lock state : %d\n",
existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state);
print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
printk(KERN_INFO "pid : %d\n", gh->gh_owner->pid);
- printk(KERN_INFO "lock type : %d lock state : %d\n",
+ printk(KERN_INFO "lock type : %d lock state : %d\n",
gl->gl_name.ln_type, gl->gl_state);
BUG();
}
if (gh->gh_flags & LM_FLAG_PRIORITY)
list_add(&gh->gh_list, &gl->gl_waiters3);
else
- list_add_tail(&gh->gh_list, &gl->gl_waiters3);
+ list_add_tail(&gh->gh_list, &gl->gl_waiters3);
}
/**
for (;;) {
cont = 0;
for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
- if (examine_bucket(clear_glock, sdp, x))
+ if (examine_bucket(clear_glock, sdp, x))
cont = 1;
}
struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
int is_root, struct nameidata *nd)
-
{
struct super_block *sb = dir->i_sb;
struct gfs2_inode *dip = GFS2_I(dir);
error = gfs2_meta_inode_buffer(ip, &bh);
if (error)
goto out_end_trans;
-
+
gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
if (!ir.ir_length) {
error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
al->al_rgd->rd_ri.ri_length +
- 2 * RES_DINODE +
+ 2 * RES_DINODE +
RES_STATFS + RES_QUOTA, 0);
if (error)
goto fail_ipreserv;
int gfs2_dinode_dealloc(struct gfs2_inode *inode);
int gfs2_change_nlink(struct gfs2_inode *ip, int diff);
-struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
+struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
int is_root, struct nameidata *nd);
struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
unsigned int mode);
fs_err(sdp, "about to withdraw from the cluster\n");
BUG_ON(sdp->sd_args.ar_debug);
-
+
fs_err(sdp, "waiting for outstanding I/O\n");
if (lp->lksb.sb_status == -DLM_ECANCEL) {
log_info("complete dlm cancel %x,%llx flags %lx",
- lp->lockname.ln_type,
+ lp->lockname.ln_type,
(unsigned long long)lp->lockname.ln_number,
lp->flags);
if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) {
log_info("complete internal cancel %x,%llx",
- lp->lockname.ln_type,
+ lp->lockname.ln_type,
(unsigned long long)lp->lockname.ln_number);
lp->req = lp->cur;
acb.lc_ret |= LM_OUT_CANCELED;
gfs2_log_lock(sdp);
sdp->sd_log_num_rg++;
list_add(&le->le_list, &sdp->sd_log_le_rg);
- gfs2_log_unlock(sdp);
+ gfs2_log_unlock(sdp);
}
static void rg_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
error = -ENOMEM;
gfs2_glock_cachep = kmem_cache_create("gfs2_glock",
sizeof(struct gfs2_glock),
- 0, 0,
+ 0, 0,
gfs2_init_glock_once, NULL);
if (!gfs2_glock_cachep)
goto fail;
LM_FLAG_TRY_1CB|GL_ATIME|GL_AOP, &gh);
do_unlock = 1;
ret = gfs2_glock_nq_m_atime(1, &gh);
- if (ret == GLR_TRYFAILED)
+ if (ret == GLR_TRYFAILED)
goto out_noerror;
if (unlikely(ret))
goto out_unlock;
gl = bd->bd_gl;
- fs_warn(sdp, "gl = (%u, %llu)\n",
+ fs_warn(sdp, "gl = (%u, %llu)\n",
gl->gl_name.ln_type, (unsigned long long)gl->gl_name.ln_number);
fs_warn(sdp, "bd_list_tr = %s, bd_le.le_list = %s\n",
error = gfs2_glock_nq_m_atime(1, &gh);
if (error)
return error;
-
+
iflags = iflags_cvt(gfs2_to_iflags, ip->i_di.di_flags);
if (put_user(iflags, ptr))
error = -EFAULT;
goto out;
if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
goto out;
- if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
+ if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
!capable(CAP_LINUX_IMMUTABLE))
goto out;
if (!IS_IMMUTABLE(inode)) {
goto out;
gfs2_glock_hold(gl);
flock_lock_file_wait(file,
- &(struct file_lock){.fl_type = F_UNLCK});
+ &(struct file_lock){.fl_type = F_UNLCK});
gfs2_glock_dq_uninit(fl_gh);
} else {
error = gfs2_glock_get(GFS2_SB(&ip->i_inode),
brelse(bh);
return -ENOMEM;
}
- gfs2_sb_in(sb, bh->b_data);
+ gfs2_sb_in(sb, bh->b_data);
brelse(bh);
error = gfs2_check_sb(sdp, sb, silent);
}
return 0;
}
-
+
error = gfs2_glock_nq_num(sdp, GFS2_SB_LOCK, &gfs2_meta_glops,
LM_ST_SHARED, 0, &sb_gh);
if (error) {
error = -EINVAL;
if (!gfs2_jindex_size(sdp)) {
fs_err(sdp, "no journals!\n");
- goto fail_jindex;
+ goto fail_jindex;
}
if (sdp->sd_args.ar_spectator) {
return error;
}
-static int fill_super_meta(struct super_block *sb, struct super_block *new,
+static int fill_super_meta(struct super_block *sb, struct super_block *new,
void *data, int silent)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
s->s_dev = s->s_bdev->bd_dev;
return 0;
}
-
+
static int test_bdev_super(struct super_block *s, void *data)
{
return s->s_bdev == data;
struct super_block *sb = NULL, *s;
struct list_head *l;
int error;
-
+
error = path_lookup(dev_name, LOOKUP_FOLLOW, &nd);
if (error) {
- printk(KERN_WARNING "GFS2: path_lookup on %s returned error\n",
+ printk(KERN_WARNING "GFS2: path_lookup on %s returned error\n",
dev_name);
goto out;
}
deactivate_super(new);
goto error;
}
-
+
new->s_flags |= MS_ACTIVE;
-
+
/* Grab a reference to the gfs2 mount point */
atomic_inc(&sdp->sd_gfs2mnt->mnt_count);
return simple_set_mnt(mnt, new);
case S_IFSOCK:
break;
default:
- return -EOPNOTSUPP;
+ return -EOPNOTSUPP;
};
gfs2_holder_init(dip->i_gl, 0, 0, ghs);
return 0;
}
-/*
+/*
* We have to (at the moment) hold the inodes main lock to cover
* the gap between unlocking the shared lock on the iopen lock and
* taking the exclusive lock. I'd rather do a shared -> exclusive
qd_hold(qd);
slot_hold(qd);
}
-
+
mutex_unlock(&sdp->sd_quota_mutex);
}
gfs2_glock_dq_uninit(&i_gh);
-
+
gfs2_quota_in(&q, buf);
qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
if (!ip->i_di.di_size || ip->i_di.di_size > (64 << 20) ||
ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) {
gfs2_consist_inode(ip);
- return -EIO;
+ return -EIO;
}
sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
switch (error) {
case 0:
break;
-
+
case GLR_TRYFAILED:
fs_info(sdp, "jid=%u: Busy\n", jd->jd_jid);
error = 0;
-
+
default:
goto fail;
};
rgd = recent_rgrp_first(sdp, ip->i_last_rg_alloc);
while (rgd) {
- error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
+ error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
LM_FLAG_TRY, &al->al_rgd_gh);
switch (error) {
case 0:
l_sc->sc_total += total;
l_sc->sc_free += free;
l_sc->sc_dinodes += dinodes;
- gfs2_statfs_change_out(l_sc, l_bh->b_data +
- sizeof(struct gfs2_dinode));
+ gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
spin_unlock(&sdp->sd_statfs_spin);
brelse(l_bh);
spin_lock(&sdp->sd_statfs_spin);
gfs2_statfs_change_in(m_sc, m_bh->b_data +
- sizeof(struct gfs2_dinode));
+ sizeof(struct gfs2_dinode));
if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
spin_unlock(&sdp->sd_statfs_spin);
goto out_bh;