1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/sched/signal.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/statfs.h>
16 #include <linux/seq_file.h>
17 #include <linux/mount.h>
18 #include <linux/kthread.h>
19 #include <linux/delay.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/crc32.h>
22 #include <linux/time.h>
23 #include <linux/wait.h>
24 #include <linux/writeback.h>
25 #include <linux/backing-dev.h>
26 #include <linux/kernel.h>
49 SHOULD_NOT_DELETE_DINODE,
50 SHOULD_DEFER_EVICTION,
54 * gfs2_jindex_free - Clear all the journal index information
55 * @sdp: The GFS2 superblock
59 void gfs2_jindex_free(struct gfs2_sbd *sdp)
61 struct list_head list;
62 struct gfs2_jdesc *jd;
64 spin_lock(&sdp->sd_jindex_spin);
65 list_add(&list, &sdp->sd_jindex_list);
66 list_del_init(&sdp->sd_jindex_list);
68 spin_unlock(&sdp->sd_jindex_spin);
71 while (!list_empty(&list)) {
72 jd = list_first_entry(&list, struct gfs2_jdesc, jd_list);
73 gfs2_free_journal_extents(jd);
74 list_del(&jd->jd_list);
81 static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
83 struct gfs2_jdesc *jd;
85 list_for_each_entry(jd, head, jd_list) {
86 if (jd->jd_jid == jid)
92 struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
94 struct gfs2_jdesc *jd;
96 spin_lock(&sdp->sd_jindex_spin);
97 jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
98 spin_unlock(&sdp->sd_jindex_spin);
103 int gfs2_jdesc_check(struct gfs2_jdesc *jd)
105 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
106 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
107 u64 size = i_size_read(jd->jd_inode);
109 if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, BIT(30)))
112 jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift;
114 if (gfs2_write_alloc_required(ip, 0, size)) {
115 gfs2_consist_inode(ip);
123 * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
124 * @sdp: the filesystem
129 int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
131 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
132 struct gfs2_glock *j_gl = ip->i_gl;
133 struct gfs2_log_header_host head;
136 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
137 if (gfs2_withdrawn(sdp))
140 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
146 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
151 /* Initialize some head of the log stuff */
152 sdp->sd_log_sequence = head.lh_sequence + 1;
153 gfs2_log_pointers_init(sdp, head.lh_blkno);
155 error = gfs2_quota_init(sdp);
156 if (!error && gfs2_withdrawn(sdp))
159 set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
163 void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
165 const struct gfs2_statfs_change *str = buf;
167 sc->sc_total = be64_to_cpu(str->sc_total);
168 sc->sc_free = be64_to_cpu(str->sc_free);
169 sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
172 void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
174 struct gfs2_statfs_change *str = buf;
176 str->sc_total = cpu_to_be64(sc->sc_total);
177 str->sc_free = cpu_to_be64(sc->sc_free);
178 str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
181 int gfs2_statfs_init(struct gfs2_sbd *sdp)
183 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
184 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
185 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
186 struct buffer_head *m_bh;
187 struct gfs2_holder gh;
190 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
195 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
199 if (sdp->sd_args.ar_spectator) {
200 spin_lock(&sdp->sd_statfs_spin);
201 gfs2_statfs_change_in(m_sc, m_bh->b_data +
202 sizeof(struct gfs2_dinode));
203 spin_unlock(&sdp->sd_statfs_spin);
205 spin_lock(&sdp->sd_statfs_spin);
206 gfs2_statfs_change_in(m_sc, m_bh->b_data +
207 sizeof(struct gfs2_dinode));
208 gfs2_statfs_change_in(l_sc, sdp->sd_sc_bh->b_data +
209 sizeof(struct gfs2_dinode));
210 spin_unlock(&sdp->sd_statfs_spin);
216 gfs2_glock_dq_uninit(&gh);
220 void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
223 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
224 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
225 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
229 gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh);
231 spin_lock(&sdp->sd_statfs_spin);
232 l_sc->sc_total += total;
233 l_sc->sc_free += free;
234 l_sc->sc_dinodes += dinodes;
235 gfs2_statfs_change_out(l_sc, sdp->sd_sc_bh->b_data +
236 sizeof(struct gfs2_dinode));
237 if (sdp->sd_args.ar_statfs_percent) {
238 x = 100 * l_sc->sc_free;
239 y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
240 if (x >= y || x <= -y)
243 spin_unlock(&sdp->sd_statfs_spin);
246 gfs2_wake_up_statfs(sdp);
249 void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh)
251 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
252 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
253 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
254 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
256 gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh);
257 gfs2_trans_add_meta(m_ip->i_gl, m_bh);
259 spin_lock(&sdp->sd_statfs_spin);
260 m_sc->sc_total += l_sc->sc_total;
261 m_sc->sc_free += l_sc->sc_free;
262 m_sc->sc_dinodes += l_sc->sc_dinodes;
263 memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
264 memset(sdp->sd_sc_bh->b_data + sizeof(struct gfs2_dinode),
265 0, sizeof(struct gfs2_statfs_change));
266 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
267 spin_unlock(&sdp->sd_statfs_spin);
270 int gfs2_statfs_sync(struct super_block *sb, int type)
272 struct gfs2_sbd *sdp = sb->s_fs_info;
273 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
274 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
275 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
276 struct gfs2_holder gh;
277 struct buffer_head *m_bh;
280 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
285 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
289 spin_lock(&sdp->sd_statfs_spin);
290 gfs2_statfs_change_in(m_sc, m_bh->b_data +
291 sizeof(struct gfs2_dinode));
292 if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
293 spin_unlock(&sdp->sd_statfs_spin);
296 spin_unlock(&sdp->sd_statfs_spin);
298 error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
302 update_statfs(sdp, m_bh);
303 sdp->sd_statfs_force_sync = 0;
310 gfs2_glock_dq_uninit(&gh);
316 struct list_head list;
317 struct gfs2_holder gh;
321 * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
323 * @sdp: the file system
328 static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
330 struct gfs2_inode *ip;
331 struct gfs2_jdesc *jd;
334 struct gfs2_log_header_host lh;
338 * Grab all the journal glocks in SH mode. We are *probably* doing
339 * that to prevent recovery.
342 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
343 lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
348 ip = GFS2_I(jd->jd_inode);
349 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
354 list_add(&lfcc->list, &list);
357 gfs2_freeze_unlock(&sdp->sd_freeze_gh);
359 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
360 LM_FLAG_NOEXP | GL_NOPID,
365 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
366 error = gfs2_jdesc_check(jd);
369 error = gfs2_find_jhead(jd, &lh, false);
372 if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
379 goto out; /* success */
381 gfs2_freeze_unlock(&sdp->sd_freeze_gh);
384 error2 = gfs2_freeze_lock_shared(sdp);
385 gfs2_assert_withdraw(sdp, !error2);
388 while (!list_empty(&list)) {
389 lfcc = list_first_entry(&list, struct lfcc, list);
390 list_del(&lfcc->list);
391 gfs2_glock_dq_uninit(&lfcc->gh);
397 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
399 const struct inode *inode = &ip->i_inode;
400 struct gfs2_dinode *str = buf;
402 str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
403 str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
404 str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
405 str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
406 str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
407 str->di_mode = cpu_to_be32(inode->i_mode);
408 str->di_uid = cpu_to_be32(i_uid_read(inode));
409 str->di_gid = cpu_to_be32(i_gid_read(inode));
410 str->di_nlink = cpu_to_be32(inode->i_nlink);
411 str->di_size = cpu_to_be64(i_size_read(inode));
412 str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(inode));
413 str->di_atime = cpu_to_be64(inode->i_atime.tv_sec);
414 str->di_mtime = cpu_to_be64(inode->i_mtime.tv_sec);
415 str->di_ctime = cpu_to_be64(inode_get_ctime(inode).tv_sec);
417 str->di_goal_meta = cpu_to_be64(ip->i_goal);
418 str->di_goal_data = cpu_to_be64(ip->i_goal);
419 str->di_generation = cpu_to_be64(ip->i_generation);
421 str->di_flags = cpu_to_be32(ip->i_diskflags);
422 str->di_height = cpu_to_be16(ip->i_height);
423 str->di_payload_format = cpu_to_be32(S_ISDIR(inode->i_mode) &&
424 !(ip->i_diskflags & GFS2_DIF_EXHASH) ?
426 str->di_depth = cpu_to_be16(ip->i_depth);
427 str->di_entries = cpu_to_be32(ip->i_entries);
429 str->di_eattr = cpu_to_be64(ip->i_eattr);
430 str->di_atime_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
431 str->di_mtime_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
432 str->di_ctime_nsec = cpu_to_be32(inode_get_ctime(inode).tv_nsec);
436 * gfs2_write_inode - Make sure the inode is stable on the disk
438 * @wbc: The writeback control structure
443 static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
445 struct gfs2_inode *ip = GFS2_I(inode);
446 struct gfs2_sbd *sdp = GFS2_SB(inode);
447 struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
448 struct backing_dev_info *bdi = inode_to_bdi(metamapping->host);
450 bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip));
453 gfs2_log_flush(GFS2_SB(inode), ip->i_gl,
454 GFS2_LOG_HEAD_FLUSH_NORMAL |
455 GFS2_LFC_WRITE_INODE);
456 if (bdi->wb.dirty_exceeded)
457 gfs2_ail1_flush(sdp, wbc);
459 filemap_fdatawrite(metamapping);
461 ret = filemap_fdatawait(metamapping);
463 mark_inode_dirty_sync(inode);
465 spin_lock(&inode->i_lock);
466 if (!(inode->i_flags & I_DIRTY))
467 gfs2_ordered_del_inode(ip);
468 spin_unlock(&inode->i_lock);
474 * gfs2_dirty_inode - check for atime updates
475 * @inode: The inode in question
476 * @flags: The type of dirty
478 * Unfortunately it can be called under any combination of inode
479 * glock and freeze glock, so we have to check carefully.
481 * At the moment this deals only with atime - it should be possible
482 * to expand that role in future, once a review of the locking has
486 static void gfs2_dirty_inode(struct inode *inode, int flags)
488 struct gfs2_inode *ip = GFS2_I(inode);
489 struct gfs2_sbd *sdp = GFS2_SB(inode);
490 struct buffer_head *bh;
491 struct gfs2_holder gh;
493 int need_endtrans = 0;
496 if (unlikely(!ip->i_gl)) {
497 /* This can only happen during incomplete inode creation. */
498 BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags));
502 if (unlikely(gfs2_withdrawn(sdp)))
504 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
505 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
507 fs_err(sdp, "dirty_inode: glock %d\n", ret);
508 gfs2_dump_glock(NULL, ip->i_gl, true);
512 } else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE))
515 if (current->journal_info == NULL) {
516 ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
518 fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret);
524 ret = gfs2_meta_inode_buffer(ip, &bh);
526 gfs2_trans_add_meta(ip->i_gl, bh);
527 gfs2_dinode_out(ip, bh->b_data);
535 gfs2_glock_dq_uninit(&gh);
539 * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
540 * @sdp: the filesystem
545 void gfs2_make_fs_ro(struct gfs2_sbd *sdp)
547 int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
549 if (!test_bit(SDF_KILL, &sdp->sd_flags))
550 gfs2_flush_delete_work(sdp);
552 gfs2_destroy_threads(sdp);
554 if (log_write_allowed) {
555 gfs2_quota_sync(sdp->sd_vfs, 0);
556 gfs2_statfs_sync(sdp->sd_vfs, 0);
558 /* We do two log flushes here. The first one commits dirty inodes
559 * and rgrps to the journal, but queues up revokes to the ail list.
560 * The second flush writes out and removes the revokes.
562 * The first must be done before the FLUSH_SHUTDOWN code
563 * clears the LIVE flag, otherwise it will not be able to start
564 * a transaction to write its revokes, and the error will cause
565 * a withdraw of the file system. */
566 gfs2_log_flush(sdp, NULL, GFS2_LFC_MAKE_FS_RO);
567 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
568 GFS2_LFC_MAKE_FS_RO);
569 wait_event_timeout(sdp->sd_log_waitq,
570 gfs2_log_is_empty(sdp),
572 gfs2_assert_warn(sdp, gfs2_log_is_empty(sdp));
574 gfs2_quota_cleanup(sdp);
578 * gfs2_put_super - Unmount the filesystem
579 * @sb: The VFS superblock
583 static void gfs2_put_super(struct super_block *sb)
585 struct gfs2_sbd *sdp = sb->s_fs_info;
586 struct gfs2_jdesc *jd;
588 /* No more recovery requests */
589 set_bit(SDF_NORECOVERY, &sdp->sd_flags);
592 /* Wait on outstanding recovery */
594 spin_lock(&sdp->sd_jindex_spin);
595 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
596 if (!test_bit(JDF_RECOVERY, &jd->jd_flags))
598 spin_unlock(&sdp->sd_jindex_spin);
599 wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
600 TASK_UNINTERRUPTIBLE);
603 spin_unlock(&sdp->sd_jindex_spin);
606 gfs2_make_fs_ro(sdp);
608 if (gfs2_withdrawn(sdp))
609 gfs2_destroy_threads(sdp);
611 gfs2_quota_cleanup(sdp);
614 WARN_ON(gfs2_withdrawing(sdp));
616 /* At this point, we're through modifying the disk */
620 gfs2_freeze_unlock(&sdp->sd_freeze_gh);
622 iput(sdp->sd_jindex);
623 iput(sdp->sd_statfs_inode);
624 iput(sdp->sd_rindex);
625 iput(sdp->sd_quota_inode);
627 gfs2_glock_put(sdp->sd_rename_gl);
628 gfs2_glock_put(sdp->sd_freeze_gl);
630 if (!sdp->sd_args.ar_spectator) {
631 if (gfs2_holder_initialized(&sdp->sd_journal_gh))
632 gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
633 if (gfs2_holder_initialized(&sdp->sd_jinode_gh))
634 gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
635 brelse(sdp->sd_sc_bh);
636 gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
637 gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
638 free_local_statfs_inodes(sdp);
639 iput(sdp->sd_qc_inode);
642 gfs2_glock_dq_uninit(&sdp->sd_live_gh);
643 gfs2_clear_rgrpd(sdp);
644 gfs2_jindex_free(sdp);
645 /* Take apart glock structures and buffer lists */
646 gfs2_gl_hash_clear(sdp);
647 truncate_inode_pages_final(&sdp->sd_aspace);
648 gfs2_delete_debugfs_file(sdp);
649 /* Unmount the locking protocol */
650 gfs2_lm_unmount(sdp);
652 /* At this point, we're through participating in the lockspace */
653 gfs2_sys_fs_del(sdp);
658 * gfs2_sync_fs - sync the filesystem
659 * @sb: the superblock
660 * @wait: true to wait for completion
662 * Flushes the log to disk.
665 static int gfs2_sync_fs(struct super_block *sb, int wait)
667 struct gfs2_sbd *sdp = sb->s_fs_info;
669 gfs2_quota_sync(sb, -1);
671 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
673 return sdp->sd_log_error;
676 static int gfs2_freeze_locally(struct gfs2_sbd *sdp)
678 struct super_block *sb = sdp->sd_vfs;
681 error = freeze_super(sb, FREEZE_HOLDER_USERSPACE);
685 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
686 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
687 GFS2_LFC_FREEZE_GO_SYNC);
688 if (gfs2_withdrawn(sdp)) {
689 error = thaw_super(sb, FREEZE_HOLDER_USERSPACE);
698 static int gfs2_do_thaw(struct gfs2_sbd *sdp)
700 struct super_block *sb = sdp->sd_vfs;
703 error = gfs2_freeze_lock_shared(sdp);
706 error = thaw_super(sb, FREEZE_HOLDER_USERSPACE);
711 fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n", error);
712 gfs2_assert_withdraw(sdp, 0);
716 void gfs2_freeze_func(struct work_struct *work)
718 struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
719 struct super_block *sb = sdp->sd_vfs;
722 mutex_lock(&sdp->sd_freeze_mutex);
724 if (test_bit(SDF_FROZEN, &sdp->sd_flags))
727 error = gfs2_freeze_locally(sdp);
731 gfs2_freeze_unlock(&sdp->sd_freeze_gh);
732 set_bit(SDF_FROZEN, &sdp->sd_flags);
734 error = gfs2_do_thaw(sdp);
738 clear_bit(SDF_FROZEN, &sdp->sd_flags);
742 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n", error);
745 mutex_unlock(&sdp->sd_freeze_mutex);
746 deactivate_super(sb);
750 * gfs2_freeze_super - prevent further writes to the filesystem
751 * @sb: the VFS structure for the filesystem
755 static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who)
757 struct gfs2_sbd *sdp = sb->s_fs_info;
760 if (!mutex_trylock(&sdp->sd_freeze_mutex))
763 if (test_bit(SDF_FROZEN, &sdp->sd_flags))
767 error = gfs2_freeze_locally(sdp);
769 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
774 error = gfs2_lock_fs_check_clean(sdp);
778 error = gfs2_do_thaw(sdp);
783 fs_err(sdp, "waiting for recovery before freeze\n");
784 else if (error == -EIO) {
785 fs_err(sdp, "Fatal IO error: cannot freeze gfs2 due "
786 "to recovery error.\n");
789 fs_err(sdp, "error freezing FS: %d\n", error);
791 fs_err(sdp, "retrying...\n");
797 set_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags);
798 set_bit(SDF_FROZEN, &sdp->sd_flags);
800 mutex_unlock(&sdp->sd_freeze_mutex);
805 * gfs2_thaw_super - reallow writes to the filesystem
806 * @sb: the VFS structure for the filesystem
810 static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who)
812 struct gfs2_sbd *sdp = sb->s_fs_info;
815 if (!mutex_trylock(&sdp->sd_freeze_mutex))
818 if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags))
821 gfs2_freeze_unlock(&sdp->sd_freeze_gh);
823 error = gfs2_do_thaw(sdp);
826 clear_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags);
827 clear_bit(SDF_FROZEN, &sdp->sd_flags);
830 mutex_unlock(&sdp->sd_freeze_mutex);
834 void gfs2_thaw_freeze_initiator(struct super_block *sb)
836 struct gfs2_sbd *sdp = sb->s_fs_info;
838 mutex_lock(&sdp->sd_freeze_mutex);
839 if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags))
842 gfs2_freeze_unlock(&sdp->sd_freeze_gh);
845 mutex_unlock(&sdp->sd_freeze_mutex);
849 * statfs_slow_fill - fill in the sg for a given RG
851 * @sc: the sc structure
853 * Returns: 0 on success, -ESTALE if the LVB is invalid
856 static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
857 struct gfs2_statfs_change_host *sc)
859 gfs2_rgrp_verify(rgd);
860 sc->sc_total += rgd->rd_data;
861 sc->sc_free += rgd->rd_free;
862 sc->sc_dinodes += rgd->rd_dinodes;
867 * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
868 * @sdp: the filesystem
869 * @sc: the sc info that will be returned
871 * Any error (other than a signal) will cause this routine to fall back
872 * to the synchronous version.
874 * FIXME: This really shouldn't busy wait like this.
879 static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
881 struct gfs2_rgrpd *rgd_next;
882 struct gfs2_holder *gha, *gh;
883 unsigned int slots = 64;
888 memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
889 gha = kmalloc_array(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
892 for (x = 0; x < slots; x++)
893 gfs2_holder_mark_uninitialized(gha + x);
895 rgd_next = gfs2_rgrpd_get_first(sdp);
900 for (x = 0; x < slots; x++) {
903 if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) {
904 err = gfs2_glock_wait(gh);
906 gfs2_holder_uninit(gh);
910 struct gfs2_rgrpd *rgd =
911 gfs2_glock2rgrp(gh->gh_gl);
913 error = statfs_slow_fill(rgd, sc);
915 gfs2_glock_dq_uninit(gh);
919 if (gfs2_holder_initialized(gh))
921 else if (rgd_next && !error) {
922 error = gfs2_glock_nq_init(rgd_next->rd_gl,
926 rgd_next = gfs2_rgrpd_get_next(rgd_next);
930 if (signal_pending(current))
931 error = -ERESTARTSYS;
945 * gfs2_statfs_i - Do a statfs
946 * @sdp: the filesystem
947 * @sc: the sc structure
952 static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
954 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
955 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
957 spin_lock(&sdp->sd_statfs_spin);
960 sc->sc_total += l_sc->sc_total;
961 sc->sc_free += l_sc->sc_free;
962 sc->sc_dinodes += l_sc->sc_dinodes;
964 spin_unlock(&sdp->sd_statfs_spin);
968 if (sc->sc_free > sc->sc_total)
969 sc->sc_free = sc->sc_total;
970 if (sc->sc_dinodes < 0)
977 * gfs2_statfs - Gather and return stats about the filesystem
978 * @dentry: The name of the link
981 * Returns: 0 on success or error code
984 static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
986 struct super_block *sb = dentry->d_sb;
987 struct gfs2_sbd *sdp = sb->s_fs_info;
988 struct gfs2_statfs_change_host sc;
991 error = gfs2_rindex_update(sdp);
995 if (gfs2_tune_get(sdp, gt_statfs_slow))
996 error = gfs2_statfs_slow(sdp, &sc);
998 error = gfs2_statfs_i(sdp, &sc);
1003 buf->f_type = GFS2_MAGIC;
1004 buf->f_bsize = sdp->sd_sb.sb_bsize;
1005 buf->f_blocks = sc.sc_total;
1006 buf->f_bfree = sc.sc_free;
1007 buf->f_bavail = sc.sc_free;
1008 buf->f_files = sc.sc_dinodes + sc.sc_free;
1009 buf->f_ffree = sc.sc_free;
1010 buf->f_namelen = GFS2_FNAMESIZE;
1016 * gfs2_drop_inode - Drop an inode (test for remote unlink)
1017 * @inode: The inode to drop
1019 * If we've received a callback on an iopen lock then it's because a
1020 * remote node tried to deallocate the inode but failed due to this node
1021 * still having the inode open. Here we mark the link count zero
1022 * since we know that it must have reached zero if the GLF_DEMOTE flag
1023 * is set on the iopen glock. If we didn't do a disk read since the
1024 * remote node removed the final link then we might otherwise miss
1025 * this event. This check ensures that this node will deallocate the
1026 * inode's blocks, or alternatively pass the baton on to another
1027 * node for later deallocation.
1030 static int gfs2_drop_inode(struct inode *inode)
1032 struct gfs2_inode *ip = GFS2_I(inode);
1033 struct gfs2_sbd *sdp = GFS2_SB(inode);
1035 if (inode->i_nlink &&
1036 gfs2_holder_initialized(&ip->i_iopen_gh)) {
1037 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1038 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
1043 * When under memory pressure when an inode's link count has dropped to
1044 * zero, defer deleting the inode to the delete workqueue. This avoids
1045 * calling into DLM under memory pressure, which can deadlock.
1047 if (!inode->i_nlink &&
1048 unlikely(current->flags & PF_MEMALLOC) &&
1049 gfs2_holder_initialized(&ip->i_iopen_gh)) {
1050 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1052 gfs2_glock_hold(gl);
1053 if (!gfs2_queue_try_to_evict(gl))
1054 gfs2_glock_queue_put(gl);
1059 * No longer cache inodes when trying to evict them all.
1061 if (test_bit(SDF_EVICTING, &sdp->sd_flags))
1064 return generic_drop_inode(inode);
1067 static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
1073 } while (!IS_ROOT(d1));
1078 * gfs2_show_options - Show mount options for /proc/mounts
1079 * @s: seq_file structure
1080 * @root: root of this (sub)tree
1082 * Returns: 0 on success or error code
1085 static int gfs2_show_options(struct seq_file *s, struct dentry *root)
1087 struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
1088 struct gfs2_args *args = &sdp->sd_args;
1089 unsigned int logd_secs, statfs_slow, statfs_quantum, quota_quantum;
1091 spin_lock(&sdp->sd_tune.gt_spin);
1092 logd_secs = sdp->sd_tune.gt_logd_secs;
1093 quota_quantum = sdp->sd_tune.gt_quota_quantum;
1094 statfs_quantum = sdp->sd_tune.gt_statfs_quantum;
1095 statfs_slow = sdp->sd_tune.gt_statfs_slow;
1096 spin_unlock(&sdp->sd_tune.gt_spin);
1098 if (is_ancestor(root, sdp->sd_master_dir))
1099 seq_puts(s, ",meta");
1100 if (args->ar_lockproto[0])
1101 seq_show_option(s, "lockproto", args->ar_lockproto);
1102 if (args->ar_locktable[0])
1103 seq_show_option(s, "locktable", args->ar_locktable);
1104 if (args->ar_hostdata[0])
1105 seq_show_option(s, "hostdata", args->ar_hostdata);
1106 if (args->ar_spectator)
1107 seq_puts(s, ",spectator");
1108 if (args->ar_localflocks)
1109 seq_puts(s, ",localflocks");
1111 seq_puts(s, ",debug");
1112 if (args->ar_posix_acl)
1113 seq_puts(s, ",acl");
1114 if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
1116 switch (args->ar_quota) {
1117 case GFS2_QUOTA_OFF:
1120 case GFS2_QUOTA_ACCOUNT:
1126 case GFS2_QUOTA_QUIET:
1133 seq_printf(s, ",quota=%s", state);
1135 if (args->ar_suiddir)
1136 seq_puts(s, ",suiddir");
1137 if (args->ar_data != GFS2_DATA_DEFAULT) {
1139 switch (args->ar_data) {
1140 case GFS2_DATA_WRITEBACK:
1141 state = "writeback";
1143 case GFS2_DATA_ORDERED:
1150 seq_printf(s, ",data=%s", state);
1152 if (args->ar_discard)
1153 seq_puts(s, ",discard");
1154 if (logd_secs != 30)
1155 seq_printf(s, ",commit=%d", logd_secs);
1156 if (statfs_quantum != 30)
1157 seq_printf(s, ",statfs_quantum=%d", statfs_quantum);
1158 else if (statfs_slow)
1159 seq_puts(s, ",statfs_quantum=0");
1160 if (quota_quantum != 60)
1161 seq_printf(s, ",quota_quantum=%d", quota_quantum);
1162 if (args->ar_statfs_percent)
1163 seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
1164 if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
1167 switch (args->ar_errors) {
1168 case GFS2_ERRORS_WITHDRAW:
1171 case GFS2_ERRORS_PANIC:
1178 seq_printf(s, ",errors=%s", state);
1180 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
1181 seq_puts(s, ",nobarrier");
1182 if (test_bit(SDF_DEMOTE, &sdp->sd_flags))
1183 seq_puts(s, ",demote_interface_used");
1184 if (args->ar_rgrplvb)
1185 seq_puts(s, ",rgrplvb");
1186 if (args->ar_loccookie)
1187 seq_puts(s, ",loccookie");
1191 static void gfs2_final_release_pages(struct gfs2_inode *ip)
1193 struct inode *inode = &ip->i_inode;
1194 struct gfs2_glock *gl = ip->i_gl;
1196 if (unlikely(!gl)) {
1197 /* This can only happen during incomplete inode creation. */
1198 BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags));
1202 truncate_inode_pages(gfs2_glock2aspace(gl), 0);
1203 truncate_inode_pages(&inode->i_data, 0);
1205 if (atomic_read(&gl->gl_revokes) == 0) {
1206 clear_bit(GLF_LFLUSH, &gl->gl_flags);
1207 clear_bit(GLF_DIRTY, &gl->gl_flags);
1211 static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
1213 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1214 struct gfs2_rgrpd *rgd;
1215 struct gfs2_holder gh;
1218 if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
1219 gfs2_consist_inode(ip);
1223 gfs2_rindex_update(sdp);
1225 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1229 rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
1231 gfs2_consist_inode(ip);
1236 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1237 LM_FLAG_NODE_SCOPE, &gh);
1241 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA,
1242 sdp->sd_jdesc->jd_blocks);
1244 goto out_rg_gunlock;
1246 gfs2_free_di(rgd, ip);
1248 gfs2_final_release_pages(ip);
1250 gfs2_trans_end(sdp);
1253 gfs2_glock_dq_uninit(&gh);
1255 gfs2_quota_unhold(ip);
1260 * gfs2_glock_put_eventually
1261 * @gl: The glock to put
1263 * When under memory pressure, trigger a deferred glock put to make sure we
1264 * won't call into DLM and deadlock. Otherwise, put the glock directly.
1267 static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
1269 if (current->flags & PF_MEMALLOC)
1270 gfs2_glock_queue_put(gl);
1275 static bool gfs2_upgrade_iopen_glock(struct inode *inode)
1277 struct gfs2_inode *ip = GFS2_I(inode);
1278 struct gfs2_sbd *sdp = GFS2_SB(inode);
1279 struct gfs2_holder *gh = &ip->i_iopen_gh;
1280 long timeout = 5 * HZ;
1283 gh->gh_flags |= GL_NOCACHE;
1284 gfs2_glock_dq_wait(gh);
1287 * If there are no other lock holders, we will immediately get
1288 * exclusive access to the iopen glock here.
1290 * Otherwise, the other nodes holding the lock will be notified about
1291 * our locking request. If they do not have the inode open, they are
1292 * expected to evict the cached inode and release the lock, allowing us
1295 * Otherwise, if they cannot evict the inode, they are expected to poke
1296 * the inode glock (note: not the iopen glock). We will notice that
1297 * and stop waiting for the iopen glock immediately. The other node(s)
1298 * are then expected to take care of deleting the inode when they no
1301 * As a last resort, if another node keeps holding the iopen glock
1302 * without showing any activity on the inode glock, we will eventually
1303 * time out and fail the iopen glock upgrade.
1305 * Note that we're passing the LM_FLAG_TRY_1CB flag to the first
1306 * locking request as an optimization to notify lock holders as soon as
1307 * possible. Without that flag, they'd be notified implicitly by the
1308 * second locking request.
1311 gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, gh);
1312 error = gfs2_glock_nq(gh);
1313 if (error != GLR_TRYFAILED)
1316 gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh);
1317 error = gfs2_glock_nq(gh);
1321 timeout = wait_event_interruptible_timeout(sdp->sd_async_glock_wait,
1322 !test_bit(HIF_WAIT, &gh->gh_iflags) ||
1323 test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags),
1325 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) {
1329 return gfs2_glock_holder_ready(gh) == 0;
1333 * evict_should_delete - determine whether the inode is eligible for deletion
1334 * @inode: The inode to evict
1335 * @gh: The glock holder structure
1337 * This function determines whether the evicted inode is eligible to be deleted
1338 * and locks the inode glock.
1340 * Returns: the fate of the dinode
1342 static enum dinode_demise evict_should_delete(struct inode *inode,
1343 struct gfs2_holder *gh)
1345 struct gfs2_inode *ip = GFS2_I(inode);
1346 struct super_block *sb = inode->i_sb;
1347 struct gfs2_sbd *sdp = sb->s_fs_info;
1350 if (unlikely(test_bit(GIF_ALLOC_FAILED, &ip->i_flags)))
1353 if (test_bit(GIF_DEFERRED_DELETE, &ip->i_flags))
1354 return SHOULD_DEFER_EVICTION;
1356 /* Deletes should never happen under memory pressure anymore. */
1357 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
1358 return SHOULD_DEFER_EVICTION;
1360 /* Must not read inode block until block type has been verified */
1361 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh);
1362 if (unlikely(ret)) {
1363 glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
1364 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1365 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1366 return SHOULD_DEFER_EVICTION;
1369 if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino))
1370 return SHOULD_NOT_DELETE_DINODE;
1371 ret = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
1373 return SHOULD_NOT_DELETE_DINODE;
1375 ret = gfs2_instantiate(gh);
1377 return SHOULD_NOT_DELETE_DINODE;
1380 * The inode may have been recreated in the meantime.
1383 return SHOULD_NOT_DELETE_DINODE;
1386 if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
1387 test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1388 if (!gfs2_upgrade_iopen_glock(inode)) {
1389 gfs2_holder_uninit(&ip->i_iopen_gh);
1390 return SHOULD_NOT_DELETE_DINODE;
1393 return SHOULD_DELETE_DINODE;
1397 * evict_unlinked_inode - delete the pieces of an unlinked evicted inode
1398 * @inode: The inode to evict
1400 static int evict_unlinked_inode(struct inode *inode)
1402 struct gfs2_inode *ip = GFS2_I(inode);
1405 if (S_ISDIR(inode->i_mode) &&
1406 (ip->i_diskflags & GFS2_DIF_EXHASH)) {
1407 ret = gfs2_dir_exhash_dealloc(ip);
1413 ret = gfs2_ea_dealloc(ip);
1418 if (!gfs2_is_stuffed(ip)) {
1419 ret = gfs2_file_dealloc(ip);
1425 * As soon as we clear the bitmap for the dinode, gfs2_create_inode()
1426 * can get called to recreate it, or even gfs2_inode_lookup() if the
1427 * inode was recreated on another node in the meantime.
1429 * However, inserting the new inode into the inode hash table will not
1430 * succeed until the old inode is removed, and that only happens after
1431 * ->evict_inode() returns. The new inode is attached to its inode and
1432 * iopen glocks after inserting it into the inode hash table, so at
1433 * that point we can be sure that both glocks are unused.
1436 ret = gfs2_dinode_dealloc(ip);
1437 if (!ret && ip->i_gl)
1438 gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino);
1445 * evict_linked_inode - evict an inode whose dinode has not been unlinked
1446 * @inode: The inode to evict
1448 static int evict_linked_inode(struct inode *inode)
1450 struct super_block *sb = inode->i_sb;
1451 struct gfs2_sbd *sdp = sb->s_fs_info;
1452 struct gfs2_inode *ip = GFS2_I(inode);
1453 struct address_space *metamapping;
1456 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
1457 GFS2_LFC_EVICT_INODE);
1458 metamapping = gfs2_glock2aspace(ip->i_gl);
1459 if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
1460 filemap_fdatawrite(metamapping);
1461 filemap_fdatawait(metamapping);
1463 write_inode_now(inode, 1);
1464 gfs2_ail_flush(ip->i_gl, 0);
1466 ret = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1470 /* Needs to be done before glock release & also in a transaction */
1471 truncate_inode_pages(&inode->i_data, 0);
1472 truncate_inode_pages(metamapping, 0);
1473 gfs2_trans_end(sdp);
1478 * gfs2_evict_inode - Remove an inode from cache
1479 * @inode: The inode to evict
1481 * There are three cases to consider:
1482 * 1. i_nlink == 0, we are final opener (and must deallocate)
1483 * 2. i_nlink == 0, we are not the final opener (and cannot deallocate)
1486 * If the fs is read only, then we have to treat all cases as per #3
1487 * since we are unable to do any deallocation. The inode will be
1488 * deallocated by the next read/write node to attempt an allocation
1489 * in the same resource group
1491 * We have to (at the moment) hold the inodes main lock to cover
1492 * the gap between unlocking the shared lock on the iopen lock and
1493 * taking the exclusive lock. I'd rather do a shared -> exclusive
1494 * conversion on the iopen lock, but we can change that later. This
1495 * is safe, just less efficient.
1498 static void gfs2_evict_inode(struct inode *inode)
1500 struct super_block *sb = inode->i_sb;
1501 struct gfs2_sbd *sdp = sb->s_fs_info;
1502 struct gfs2_inode *ip = GFS2_I(inode);
1503 struct gfs2_holder gh;
1506 if (inode->i_nlink || sb_rdonly(sb) || !ip->i_no_addr)
1510 * In case of an incomplete mount, gfs2_evict_inode() may be called for
1511 * system files without having an active journal to write to. In that
1512 * case, skip the filesystem evict.
1517 gfs2_holder_mark_uninitialized(&gh);
1518 ret = evict_should_delete(inode, &gh);
1519 if (ret == SHOULD_DEFER_EVICTION)
1521 if (ret == SHOULD_DELETE_DINODE)
1522 ret = evict_unlinked_inode(inode);
1524 ret = evict_linked_inode(inode);
1526 if (gfs2_rs_active(&ip->i_res))
1527 gfs2_rs_deltree(&ip->i_res);
1529 if (gfs2_holder_initialized(&gh))
1530 gfs2_glock_dq_uninit(&gh);
1531 if (ret && ret != GLR_TRYFAILED && ret != -EROFS)
1532 fs_warn(sdp, "gfs2_evict_inode: %d\n", ret);
1534 truncate_inode_pages_final(&inode->i_data);
1536 gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0);
1537 gfs2_rs_deltree(&ip->i_res);
1538 gfs2_ordered_del_inode(ip);
1540 gfs2_dir_hash_inval(ip);
1541 if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
1542 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1544 glock_clear_object(gl, ip);
1545 gfs2_glock_hold(gl);
1546 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1547 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1548 gfs2_glock_put_eventually(gl);
1551 glock_clear_object(ip->i_gl, ip);
1552 wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
1553 gfs2_glock_add_to_lru(ip->i_gl);
1554 gfs2_glock_put_eventually(ip->i_gl);
1555 rcu_assign_pointer(ip->i_gl, NULL);
1559 static struct inode *gfs2_alloc_inode(struct super_block *sb)
1561 struct gfs2_inode *ip;
1563 ip = alloc_inode_sb(sb, gfs2_inode_cachep, GFP_KERNEL);
1569 gfs2_holder_mark_uninitialized(&ip->i_iopen_gh);
1570 memset(&ip->i_res, 0, sizeof(ip->i_res));
1571 RB_CLEAR_NODE(&ip->i_res.rs_node);
1573 return &ip->i_inode;
1576 static void gfs2_free_inode(struct inode *inode)
1578 kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode));
1581 extern void free_local_statfs_inodes(struct gfs2_sbd *sdp)
1583 struct local_statfs_inode *lsi, *safe;
1585 /* Run through the statfs inodes list to iput and free memory */
1586 list_for_each_entry_safe(lsi, safe, &sdp->sd_sc_inodes_list, si_list) {
1587 if (lsi->si_jid == sdp->sd_jdesc->jd_jid)
1588 sdp->sd_sc_inode = NULL; /* belongs to this node */
1589 if (lsi->si_sc_inode)
1590 iput(lsi->si_sc_inode);
1591 list_del(&lsi->si_list);
1596 extern struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
1599 struct local_statfs_inode *lsi;
1601 /* Return the local (per node) statfs inode in the
1602 * sdp->sd_sc_inodes_list corresponding to the 'index'. */
1603 list_for_each_entry(lsi, &sdp->sd_sc_inodes_list, si_list) {
1604 if (lsi->si_jid == index)
1605 return lsi->si_sc_inode;
1610 const struct super_operations gfs2_super_ops = {
1611 .alloc_inode = gfs2_alloc_inode,
1612 .free_inode = gfs2_free_inode,
1613 .write_inode = gfs2_write_inode,
1614 .dirty_inode = gfs2_dirty_inode,
1615 .evict_inode = gfs2_evict_inode,
1616 .put_super = gfs2_put_super,
1617 .sync_fs = gfs2_sync_fs,
1618 .freeze_super = gfs2_freeze_super,
1619 .thaw_super = gfs2_thaw_super,
1620 .statfs = gfs2_statfs,
1621 .drop_inode = gfs2_drop_inode,
1622 .show_options = gfs2_show_options,