1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
7 #include <linux/spinlock.h>
8 #include <linux/completion.h>
9 #include <linux/buffer_head.h>
10 #include <linux/gfs2_ondisk.h>
11 #include <linux/bio.h>
12 #include <linux/posix_acl.h>
13 #include <linux/security.h>
30 struct workqueue_struct *gfs2_freeze_wq;
32 extern struct workqueue_struct *gfs2_control_wq;
34 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
36 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
39 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
41 bh, (unsigned long long)bh->b_blocknr, bh->b_state,
42 bh->b_page->mapping, bh->b_page->flags);
43 fs_err(sdp, "AIL glock %u:%llu mapping %p\n",
44 gl->gl_name.ln_type, gl->gl_name.ln_number,
45 gfs2_glock2aspace(gl));
46 gfs2_lm(sdp, "AIL error\n");
47 gfs2_withdraw_delayed(sdp);
51 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
53 * @fsync: set when called from fsync (not all buffers will be clean)
54 * @nr_revokes: Number of buffers to revoke
56 * None of the buffers should be dirty, locked, or pinned.
59 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
60 unsigned int nr_revokes)
62 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
63 struct list_head *head = &gl->gl_ail_list;
64 struct gfs2_bufdata *bd, *tmp;
65 struct buffer_head *bh;
66 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
69 spin_lock(&sdp->sd_ail_lock);
70 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
74 if (bh->b_state & b_state) {
77 gfs2_ail_error(gl, bh);
79 gfs2_trans_add_revoke(sdp, bd);
82 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
83 spin_unlock(&sdp->sd_ail_lock);
88 static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
90 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
95 revokes = atomic_read(&gl->gl_ail_count);
102 * We have nothing on the ail, but there could be revokes on
103 * the sdp revoke queue, in which case, we still want to flush
104 * the log and wait for it to finish.
106 * If the sdp revoke list is empty too, we might still have an
107 * io outstanding for writing revokes, so we should wait for
108 * it before returning.
110 * If none of these conditions are true, our revokes are all
111 * flushed and we can return.
114 have_revokes = !list_empty(&sdp->sd_log_revokes);
115 log_in_flight = atomic_read(&sdp->sd_log_in_flight);
116 gfs2_log_unlock(sdp);
124 memset(&tr, 0, sizeof(tr));
125 set_bit(TR_ONSTACK, &tr.tr_flags);
126 ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_);
129 __gfs2_ail_flush(gl, 0, revokes);
133 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
134 GFS2_LFC_AIL_EMPTY_GL);
138 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
140 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
141 unsigned int revokes = atomic_read(&gl->gl_ail_count);
147 ret = gfs2_trans_begin(sdp, 0, revokes);
150 __gfs2_ail_flush(gl, fsync, revokes);
152 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
157 * gfs2_rgrp_metasync - sync out the metadata of a resource group
158 * @gl: the glock protecting the resource group
162 static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
164 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
165 struct address_space *metamapping = &sdp->sd_aspace;
166 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
167 const unsigned bsize = sdp->sd_sb.sb_bsize;
168 loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
169 loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
172 filemap_fdatawrite_range(metamapping, start, end);
173 error = filemap_fdatawait_range(metamapping, start, end);
174 WARN_ON_ONCE(error && !gfs2_withdrawn(sdp));
175 mapping_set_error(metamapping, error);
182 * rgrp_go_sync - sync out the metadata for this glock
185 * Called when demoting or unlocking an EX glock. We must flush
186 * to disk all dirty buffers/pages relating to this glock, and must not
187 * return to caller to demote/unlock the glock until I/O is complete.
190 static int rgrp_go_sync(struct gfs2_glock *gl)
192 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
193 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
196 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
198 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
200 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
201 GFS2_LFC_RGRP_GO_SYNC);
202 error = gfs2_rgrp_metasync(gl);
204 error = gfs2_ail_empty_gl(gl);
205 gfs2_free_clones(rgd);
210 * rgrp_go_inval - invalidate the metadata for this glock
214 * We never used LM_ST_DEFERRED with resource groups, so that we
215 * should always see the metadata flag set here.
219 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
221 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
222 struct address_space *mapping = &sdp->sd_aspace;
223 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
224 const unsigned bsize = sdp->sd_sb.sb_bsize;
225 loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
226 loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
228 gfs2_rgrp_brelse(rgd);
229 WARN_ON_ONCE(!(flags & DIO_METADATA));
230 truncate_inode_pages_range(mapping, start, end);
233 static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
234 const char *fs_id_buf)
236 struct gfs2_rgrpd *rgd = gl->gl_object;
239 gfs2_rgrp_dump(seq, rgd, fs_id_buf);
242 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
244 struct gfs2_inode *ip;
246 spin_lock(&gl->gl_lockref.lock);
249 set_bit(GIF_GLOP_PENDING, &ip->i_flags);
250 spin_unlock(&gl->gl_lockref.lock);
254 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
256 struct gfs2_rgrpd *rgd;
258 spin_lock(&gl->gl_lockref.lock);
260 spin_unlock(&gl->gl_lockref.lock);
265 static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
270 clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
271 wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
275 * gfs2_inode_metasync - sync out the metadata of an inode
276 * @gl: the glock protecting the inode
279 int gfs2_inode_metasync(struct gfs2_glock *gl)
281 struct address_space *metamapping = gfs2_glock2aspace(gl);
284 filemap_fdatawrite(metamapping);
285 error = filemap_fdatawait(metamapping);
287 gfs2_io_error(gl->gl_name.ln_sbd);
292 * inode_go_sync - Sync the dirty metadata of an inode
293 * @gl: the glock protecting the inode
297 static int inode_go_sync(struct gfs2_glock *gl)
299 struct gfs2_inode *ip = gfs2_glock2inode(gl);
300 int isreg = ip && S_ISREG(ip->i_inode.i_mode);
301 struct address_space *metamapping = gfs2_glock2aspace(gl);
305 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
306 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
307 inode_dio_wait(&ip->i_inode);
309 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
312 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
314 gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
315 GFS2_LFC_INODE_GO_SYNC);
316 filemap_fdatawrite(metamapping);
318 struct address_space *mapping = ip->i_inode.i_mapping;
319 filemap_fdatawrite(mapping);
320 error = filemap_fdatawait(mapping);
321 mapping_set_error(mapping, error);
323 ret = gfs2_inode_metasync(gl);
326 gfs2_ail_empty_gl(gl);
328 * Writeback of the data mapping may cause the dirty flag to be set
329 * so we have to clear it again here.
331 smp_mb__before_atomic();
332 clear_bit(GLF_DIRTY, &gl->gl_flags);
335 gfs2_clear_glop_pending(ip);
340 * inode_go_inval - prepare a inode glock to be released
344 * Normally we invalidate everything, but if we are moving into
345 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
346 * can keep hold of the metadata, since it won't have changed.
350 static void inode_go_inval(struct gfs2_glock *gl, int flags)
352 struct gfs2_inode *ip = gfs2_glock2inode(gl);
354 if (flags & DIO_METADATA) {
355 struct address_space *mapping = gfs2_glock2aspace(gl);
356 truncate_inode_pages(mapping, 0);
358 set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
359 forget_all_cached_acls(&ip->i_inode);
360 security_inode_invalidate_secctx(&ip->i_inode);
361 gfs2_dir_hash_inval(ip);
365 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
366 gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
367 GFS2_LOG_HEAD_FLUSH_NORMAL |
368 GFS2_LFC_INODE_GO_INVAL);
369 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
371 if (ip && S_ISREG(ip->i_inode.i_mode))
372 truncate_inode_pages(ip->i_inode.i_mapping, 0);
374 gfs2_clear_glop_pending(ip);
378 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
381 * Returns: 1 if it's ok
384 static int inode_go_demote_ok(const struct gfs2_glock *gl)
386 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
388 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
394 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
396 const struct gfs2_dinode *str = buf;
397 struct timespec64 atime;
399 umode_t mode = be32_to_cpu(str->di_mode);
400 bool is_new = ip->i_inode.i_state & I_NEW;
402 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
404 if (unlikely(!is_new && inode_wrong_type(&ip->i_inode, mode)))
406 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
407 ip->i_inode.i_mode = mode;
409 ip->i_inode.i_rdev = 0;
410 switch (mode & S_IFMT) {
413 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
414 be32_to_cpu(str->di_minor));
419 i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
420 i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
421 set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
422 i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
423 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
424 atime.tv_sec = be64_to_cpu(str->di_atime);
425 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
426 if (timespec64_compare(&ip->i_inode.i_atime, &atime) < 0)
427 ip->i_inode.i_atime = atime;
428 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
429 ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
430 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
431 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
433 ip->i_goal = be64_to_cpu(str->di_goal_meta);
434 ip->i_generation = be64_to_cpu(str->di_generation);
436 ip->i_diskflags = be32_to_cpu(str->di_flags);
437 ip->i_eattr = be64_to_cpu(str->di_eattr);
438 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
439 gfs2_set_inode_flags(&ip->i_inode);
440 height = be16_to_cpu(str->di_height);
441 if (unlikely(height > GFS2_MAX_META_HEIGHT))
443 ip->i_height = (u8)height;
445 depth = be16_to_cpu(str->di_depth);
446 if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
448 ip->i_depth = (u8)depth;
449 ip->i_entries = be32_to_cpu(str->di_entries);
451 if (S_ISREG(ip->i_inode.i_mode))
452 gfs2_set_aops(&ip->i_inode);
456 gfs2_consist_inode(ip);
461 * gfs2_inode_refresh - Refresh the incore copy of the dinode
462 * @ip: The GFS2 inode
467 int gfs2_inode_refresh(struct gfs2_inode *ip)
469 struct buffer_head *dibh;
472 error = gfs2_meta_inode_buffer(ip, &dibh);
476 error = gfs2_dinode_in(ip, dibh->b_data);
482 * inode_go_instantiate - read in an inode if necessary
483 * @gh: The glock holder
488 static int inode_go_instantiate(struct gfs2_glock *gl)
490 struct gfs2_inode *ip = gl->gl_object;
492 if (!ip) /* no inode to populate - read it in later */
495 return gfs2_inode_refresh(ip);
498 static int inode_go_held(struct gfs2_holder *gh)
500 struct gfs2_glock *gl = gh->gh_gl;
501 struct gfs2_inode *ip = gl->gl_object;
504 if (!ip) /* no inode to populate - read it in later */
507 if (gh->gh_state != LM_ST_DEFERRED)
508 inode_dio_wait(&ip->i_inode);
510 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
511 (gl->gl_state == LM_ST_EXCLUSIVE) &&
512 (gh->gh_state == LM_ST_EXCLUSIVE))
513 error = gfs2_truncatei_resume(ip);
519 * inode_go_dump - print information about an inode
522 * @fs_id_buf: file system id (may be empty)
526 static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
527 const char *fs_id_buf)
529 struct gfs2_inode *ip = gl->gl_object;
530 struct inode *inode = &ip->i_inode;
531 unsigned long nrpages;
536 xa_lock_irq(&inode->i_data.i_pages);
537 nrpages = inode->i_data.nrpages;
538 xa_unlock_irq(&inode->i_data.i_pages);
540 gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
541 "p:%lu\n", fs_id_buf,
542 (unsigned long long)ip->i_no_formal_ino,
543 (unsigned long long)ip->i_no_addr,
544 IF2DT(ip->i_inode.i_mode), ip->i_flags,
545 (unsigned int)ip->i_diskflags,
546 (unsigned long long)i_size_read(inode), nrpages);
550 * freeze_go_sync - promote/demote the freeze glock
554 static int freeze_go_sync(struct gfs2_glock *gl)
557 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
560 * We need to check gl_state == LM_ST_SHARED here and not gl_req ==
561 * LM_ST_EXCLUSIVE. That's because when any node does a freeze,
562 * all the nodes should have the freeze glock in SH mode and they all
563 * call do_xmote: One for EX and the others for UN. They ALL must
564 * freeze locally, and they ALL must queue freeze work. The freeze_work
565 * calls freeze_func, which tries to reacquire the freeze glock in SH,
566 * effectively waiting for the thaw on the node who holds it in EX.
567 * Once thawed, the work func acquires the freeze glock in
568 * SH and everybody goes back to thawed.
570 if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
571 !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
572 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
573 error = freeze_super(sdp->sd_vfs);
575 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
577 if (gfs2_withdrawn(sdp)) {
578 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
581 gfs2_assert_withdraw(sdp, 0);
583 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
584 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
585 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
586 GFS2_LFC_FREEZE_GO_SYNC);
587 else /* read-only mounts */
588 atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
594 * freeze_go_xmote_bh - After promoting/demoting the freeze glock
597 static int freeze_go_xmote_bh(struct gfs2_glock *gl)
599 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
600 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
601 struct gfs2_glock *j_gl = ip->i_gl;
602 struct gfs2_log_header_host head;
605 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
606 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
608 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
609 if (gfs2_assert_withdraw_delayed(sdp, !error))
611 if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
612 GFS2_LOG_HEAD_UNMOUNT))
614 sdp->sd_log_sequence = head.lh_sequence + 1;
615 gfs2_log_pointers_init(sdp, head.lh_blkno);
621 * freeze_go_demote_ok
627 static int freeze_go_demote_ok(const struct gfs2_glock *gl)
633 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
635 * @remote: true if this came from a different cluster node
637 * gl_lockref.lock lock is held while calling this
639 static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
641 struct gfs2_inode *ip = gl->gl_object;
642 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
644 if (!remote || sb_rdonly(sdp->sd_vfs))
647 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
648 gl->gl_state == LM_ST_SHARED && ip) {
649 gl->gl_lockref.count++;
650 if (!queue_delayed_work(gfs2_delete_workqueue,
652 gl->gl_lockref.count--;
656 static int iopen_go_demote_ok(const struct gfs2_glock *gl)
658 return !gfs2_delete_work_queued(gl);
662 * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
663 * @gl: glock being freed
665 * For now, this is only used for the journal inode glock. In withdraw
666 * situations, we need to wait for the glock to be freed so that we know
667 * other nodes may proceed with recovery / journal replay.
669 static void inode_go_free(struct gfs2_glock *gl)
671 /* Note that we cannot reference gl_object because it's already set
672 * to NULL by this point in its lifecycle. */
673 if (!test_bit(GLF_FREEING, &gl->gl_flags))
675 clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
676 wake_up_bit(&gl->gl_flags, GLF_FREEING);
680 * nondisk_go_callback - used to signal when a node did a withdraw
681 * @gl: the nondisk glock
682 * @remote: true if this came from a different cluster node
685 static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
687 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
689 /* Ignore the callback unless it's from another node, and it's the
691 if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
694 /* First order of business is to cancel the demote request. We don't
695 * really want to demote a nondisk glock. At best it's just to inform
696 * us of another node's withdraw. We'll keep it in SH mode. */
697 clear_bit(GLF_DEMOTE, &gl->gl_flags);
698 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
700 /* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
701 if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
702 test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
703 test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
706 /* We only care when a node wants us to unlock, because that means
707 * they want a journal recovered. */
708 if (gl->gl_demote_state != LM_ST_UNLOCKED)
711 if (sdp->sd_args.ar_spectator) {
712 fs_warn(sdp, "Spectator node cannot recover journals.\n");
716 fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
717 set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
719 * We can't call remote_withdraw directly here or gfs2_recover_journal
720 * because this is called from the glock unlock function and the
721 * remote_withdraw needs to enqueue and dequeue the same "live" glock
722 * we were called from. So we queue it to the control work queue in
725 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
728 const struct gfs2_glock_operations gfs2_meta_glops = {
729 .go_type = LM_TYPE_META,
730 .go_flags = GLOF_NONDISK,
733 const struct gfs2_glock_operations gfs2_inode_glops = {
734 .go_sync = inode_go_sync,
735 .go_inval = inode_go_inval,
736 .go_demote_ok = inode_go_demote_ok,
737 .go_instantiate = inode_go_instantiate,
738 .go_held = inode_go_held,
739 .go_dump = inode_go_dump,
740 .go_type = LM_TYPE_INODE,
741 .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
742 .go_free = inode_go_free,
745 const struct gfs2_glock_operations gfs2_rgrp_glops = {
746 .go_sync = rgrp_go_sync,
747 .go_inval = rgrp_go_inval,
748 .go_instantiate = gfs2_rgrp_go_instantiate,
749 .go_dump = gfs2_rgrp_go_dump,
750 .go_type = LM_TYPE_RGRP,
751 .go_flags = GLOF_LVB,
754 const struct gfs2_glock_operations gfs2_freeze_glops = {
755 .go_sync = freeze_go_sync,
756 .go_xmote_bh = freeze_go_xmote_bh,
757 .go_demote_ok = freeze_go_demote_ok,
758 .go_type = LM_TYPE_NONDISK,
759 .go_flags = GLOF_NONDISK,
762 const struct gfs2_glock_operations gfs2_iopen_glops = {
763 .go_type = LM_TYPE_IOPEN,
764 .go_callback = iopen_go_callback,
765 .go_dump = inode_go_dump,
766 .go_demote_ok = iopen_go_demote_ok,
767 .go_flags = GLOF_LRU | GLOF_NONDISK,
771 const struct gfs2_glock_operations gfs2_flock_glops = {
772 .go_type = LM_TYPE_FLOCK,
773 .go_flags = GLOF_LRU | GLOF_NONDISK,
776 const struct gfs2_glock_operations gfs2_nondisk_glops = {
777 .go_type = LM_TYPE_NONDISK,
778 .go_flags = GLOF_NONDISK,
779 .go_callback = nondisk_go_callback,
782 const struct gfs2_glock_operations gfs2_quota_glops = {
783 .go_type = LM_TYPE_QUOTA,
784 .go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
787 const struct gfs2_glock_operations gfs2_journal_glops = {
788 .go_type = LM_TYPE_JOURNAL,
789 .go_flags = GLOF_NONDISK,
792 const struct gfs2_glock_operations *gfs2_glops_list[] = {
793 [LM_TYPE_META] = &gfs2_meta_glops,
794 [LM_TYPE_INODE] = &gfs2_inode_glops,
795 [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
796 [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
797 [LM_TYPE_FLOCK] = &gfs2_flock_glops,
798 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
799 [LM_TYPE_QUOTA] = &gfs2_quota_glops,
800 [LM_TYPE_JOURNAL] = &gfs2_journal_glops,