struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
kfree(gl->gl_lksb.sb_lvbptr);
- if (gl->gl_ops->go_flags & GLOF_ASPACE)
- kmem_cache_free(gfs2_glock_aspace_cachep, gl);
- else
+ if (gl->gl_ops->go_flags & GLOF_ASPACE) {
+ struct gfs2_glock_aspace *gla =
+ container_of(gl, struct gfs2_glock_aspace, glock);
+ kmem_cache_free(gfs2_glock_aspace_cachep, gla);
+ } else
kmem_cache_free(gfs2_glock_cachep, gl);
}
.ln_sbd = sdp };
struct gfs2_glock *gl, *tmp;
struct address_space *mapping;
- struct kmem_cache *cachep;
int ret = 0;
gl = find_insert_glock(&name, NULL);
if (!create)
return -ENOENT;
- if (glops->go_flags & GLOF_ASPACE)
- cachep = gfs2_glock_aspace_cachep;
- else
- cachep = gfs2_glock_cachep;
- gl = kmem_cache_alloc(cachep, GFP_NOFS);
- if (!gl)
- return -ENOMEM;
-
+ if (glops->go_flags & GLOF_ASPACE) {
+ struct gfs2_glock_aspace *gla =
+ kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_NOFS);
+ if (!gla)
+ return -ENOMEM;
+ gl = &gla->glock;
+ } else {
+ gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_NOFS);
+ if (!gl)
+ return -ENOMEM;
+ }
memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
+ gl->gl_ops = glops;
if (glops->go_flags & GLOF_LVB) {
gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
if (!gl->gl_lksb.sb_lvbptr) {
- kmem_cache_free(cachep, gl);
+ gfs2_glock_dealloc(&gl->gl_rcu);
return -ENOMEM;
}
}
gl->gl_state = LM_ST_UNLOCKED;
gl->gl_target = LM_ST_UNLOCKED;
gl->gl_demote_state = LM_ST_EXCLUSIVE;
- gl->gl_ops = glops;
gl->gl_dstamp = 0;
preempt_disable();
/* We use the global stats to estimate the initial per-glock stats */
*glp = tmp;
out_free:
- kfree(gl->gl_lksb.sb_lvbptr);
- kmem_cache_free(cachep, gl);
+ gfs2_glock_dealloc(&gl->gl_rcu);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_glock_wait);
const match_table_t *lm_tokens;
};
+struct gfs2_glock_aspace {
+ struct gfs2_glock glock;
+ struct address_space mapping;
+};
+
extern struct workqueue_struct *gfs2_delete_workqueue;
static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
{
static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
{
- if (gl->gl_ops->go_flags & GLOF_ASPACE)
- return (struct address_space *)(gl + 1);
+ if (gl->gl_ops->go_flags & GLOF_ASPACE) {
+ struct gfs2_glock_aspace *gla =
+ container_of(gl, struct gfs2_glock_aspace, glock);
+ return &gla->mapping;
+ }
return NULL;
}
static void gfs2_init_gl_aspace_once(void *foo)
{
- struct gfs2_glock *gl = foo;
- struct address_space *mapping = (struct address_space *)(gl + 1);
+ struct gfs2_glock_aspace *gla = foo;
- gfs2_init_glock_once(gl);
- address_space_init_once(mapping);
+ gfs2_init_glock_once(&gla->glock);
+ address_space_init_once(&gla->mapping);
}
/**
goto fail_cachep1;
gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock(aspace)",
- sizeof(struct gfs2_glock) +
- sizeof(struct address_space),
+ sizeof(struct gfs2_glock_aspace),
0, 0, gfs2_init_gl_aspace_once);
if (!gfs2_glock_aspace_cachep)
static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping)
{
struct inode *inode = mapping->host;
- if (mapping->a_ops == &gfs2_meta_aops)
- return (((struct gfs2_glock *)mapping) - 1)->gl_name.ln_sbd;
- else if (mapping->a_ops == &gfs2_rgrp_aops)
+ if (mapping->a_ops == &gfs2_meta_aops) {
+ struct gfs2_glock_aspace *gla =
+ container_of(mapping, struct gfs2_glock_aspace, mapping);
+ return gla->glock.gl_name.ln_sbd;
+ } else if (mapping->a_ops == &gfs2_rgrp_aops)
return container_of(mapping, struct gfs2_sbd, sd_aspace);
else
return inode->i_sb->s_fs_info;