1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/sched.h>
10 #include <linux/cred.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/module.h>
15 #include <linux/kobject.h>
16 #include <linux/uaccess.h>
17 #include <linux/gfs2_ondisk.h>
18 #include <linux/blkdev.h>
31 struct attribute attr;
32 ssize_t (*show)(struct gfs2_sbd *, char *);
33 ssize_t (*store)(struct gfs2_sbd *, const char *, size_t);
36 static ssize_t gfs2_attr_show(struct kobject *kobj, struct attribute *attr,
39 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
40 struct gfs2_attr *a = container_of(attr, struct gfs2_attr, attr);
41 return a->show ? a->show(sdp, buf) : 0;
44 static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
45 const char *buf, size_t len)
47 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
48 struct gfs2_attr *a = container_of(attr, struct gfs2_attr, attr);
49 return a->store ? a->store(sdp, buf, len) : len;
52 static const struct sysfs_ops gfs2_attr_ops = {
53 .show = gfs2_attr_show,
54 .store = gfs2_attr_store,
58 static struct kset *gfs2_kset;
60 static ssize_t id_show(struct gfs2_sbd *sdp, char *buf)
62 return snprintf(buf, PAGE_SIZE, "%u:%u\n",
63 MAJOR(sdp->sd_vfs->s_dev), MINOR(sdp->sd_vfs->s_dev));
66 static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
68 unsigned long f = sdp->sd_flags;
71 s = snprintf(buf, PAGE_SIZE,
72 "Journal Checked: %d\n"
83 "Skip DLM Unlock: %d\n"
84 "Force AIL Flush: %d\n"
85 "FS Freeze Initiator: %d\n"
88 "Withdraw In Prog: %d\n"
89 "Remote Withdraw: %d\n"
90 "Withdraw Recovery: %d\n"
93 "sd_log_flush_lock: %d\n"
94 "sd_log_num_revoke: %u\n"
95 "sd_log_in_flight: %d\n"
96 "sd_log_blks_needed: %d\n"
97 "sd_log_blks_free: %d\n"
98 "sd_log_flush_head: %d\n"
99 "sd_log_flush_tail: %d\n"
100 "sd_log_blks_reserved: %d\n"
101 "sd_log_revokes_available: %d\n",
102 test_bit(SDF_JOURNAL_CHECKED, &f),
103 test_bit(SDF_JOURNAL_LIVE, &f),
104 (sdp->sd_jdesc ? sdp->sd_jdesc->jd_jid : 0),
105 (sdp->sd_args.ar_spectator ? 1 : 0),
106 test_bit(SDF_WITHDRAWN, &f),
107 test_bit(SDF_NOBARRIERS, &f),
108 test_bit(SDF_NORECOVERY, &f),
109 test_bit(SDF_DEMOTE, &f),
110 test_bit(SDF_NOJOURNALID, &f),
111 (sb_rdonly(sdp->sd_vfs) ? 1 : 0),
112 test_bit(SDF_RORECOVERY, &f),
113 test_bit(SDF_SKIP_DLM_UNLOCK, &f),
114 test_bit(SDF_FORCE_AIL_FLUSH, &f),
115 test_bit(SDF_FREEZE_INITIATOR, &f),
116 test_bit(SDF_FROZEN, &f),
117 test_bit(SDF_WITHDRAWING, &f),
118 test_bit(SDF_WITHDRAW_IN_PROG, &f),
119 test_bit(SDF_REMOTE_WITHDRAW, &f),
120 test_bit(SDF_WITHDRAW_RECOVERY, &f),
121 test_bit(SDF_KILL, &f),
123 rwsem_is_locked(&sdp->sd_log_flush_lock),
124 sdp->sd_log_num_revoke,
125 atomic_read(&sdp->sd_log_in_flight),
126 atomic_read(&sdp->sd_log_blks_needed),
127 atomic_read(&sdp->sd_log_blks_free),
128 sdp->sd_log_flush_head,
129 sdp->sd_log_flush_tail,
130 sdp->sd_log_blks_reserved,
131 atomic_read(&sdp->sd_log_revokes_available));
135 static ssize_t fsname_show(struct gfs2_sbd *sdp, char *buf)
137 return snprintf(buf, PAGE_SIZE, "%s\n", sdp->sd_fsname);
140 static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf)
142 struct super_block *s = sdp->sd_vfs;
145 if (uuid_is_null(&s->s_uuid))
147 return snprintf(buf, PAGE_SIZE, "%pUB\n", &s->s_uuid);
150 static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
152 struct super_block *sb = sdp->sd_vfs;
153 int frozen = (sb->s_writers.frozen == SB_UNFROZEN) ? 0 : 1;
155 return snprintf(buf, PAGE_SIZE, "%d\n", frozen);
158 static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
162 error = kstrtoint(buf, 0, &n);
166 if (!capable(CAP_SYS_ADMIN))
171 error = thaw_super(sdp->sd_vfs);
174 error = freeze_super(sdp->sd_vfs);
181 fs_warn(sdp, "freeze %d error %d\n", n, error);
188 static ssize_t withdraw_show(struct gfs2_sbd *sdp, char *buf)
190 unsigned int b = gfs2_withdrawn(sdp);
191 return snprintf(buf, PAGE_SIZE, "%u\n", b);
194 static ssize_t withdraw_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
198 if (!capable(CAP_SYS_ADMIN))
201 error = kstrtoint(buf, 0, &val);
208 gfs2_lm(sdp, "withdrawing from cluster at user's request\n");
214 static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf,
219 if (!capable(CAP_SYS_ADMIN))
222 error = kstrtoint(buf, 0, &val);
229 gfs2_statfs_sync(sdp->sd_vfs, 0);
233 static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf,
238 if (!capable(CAP_SYS_ADMIN))
241 error = kstrtoint(buf, 0, &val);
248 gfs2_quota_sync(sdp->sd_vfs, 0);
252 static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf,
259 if (!capable(CAP_SYS_ADMIN))
262 error = kstrtou32(buf, 0, &id);
266 qid = make_kqid(current_user_ns(), USRQUOTA, id);
270 error = gfs2_quota_refresh(sdp, qid);
271 return error ? error : len;
274 static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf,
281 if (!capable(CAP_SYS_ADMIN))
284 error = kstrtou32(buf, 0, &id);
288 qid = make_kqid(current_user_ns(), GRPQUOTA, id);
292 error = gfs2_quota_refresh(sdp, qid);
293 return error ? error : len;
296 static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
298 struct gfs2_glock *gl;
299 const struct gfs2_glock_operations *glops;
302 unsigned long long glnum;
306 if (!capable(CAP_SYS_ADMIN))
309 rv = sscanf(buf, "%u:%llu %15s", &gltype, &glnum,
314 if (strcmp(mode, "EX") == 0)
315 glmode = LM_ST_UNLOCKED;
316 else if ((strcmp(mode, "CW") == 0) || (strcmp(mode, "DF") == 0))
317 glmode = LM_ST_DEFERRED;
318 else if ((strcmp(mode, "PR") == 0) || (strcmp(mode, "SH") == 0))
319 glmode = LM_ST_SHARED;
323 if (gltype > LM_TYPE_JOURNAL)
325 if (gltype == LM_TYPE_NONDISK && glnum == GFS2_FREEZE_LOCK)
326 glops = &gfs2_freeze_glops;
328 glops = gfs2_glops_list[gltype];
331 if (!test_and_set_bit(SDF_DEMOTE, &sdp->sd_flags))
332 fs_info(sdp, "demote interface used\n");
333 rv = gfs2_glock_get(sdp, glnum, glops, 0, &gl);
336 gfs2_glock_cb(gl, glmode);
342 #define GFS2_ATTR(name, mode, show, store) \
343 static struct gfs2_attr gfs2_attr_##name = __ATTR(name, mode, show, store)
345 GFS2_ATTR(id, 0444, id_show, NULL);
346 GFS2_ATTR(fsname, 0444, fsname_show, NULL);
347 GFS2_ATTR(uuid, 0444, uuid_show, NULL);
348 GFS2_ATTR(freeze, 0644, freeze_show, freeze_store);
349 GFS2_ATTR(withdraw, 0644, withdraw_show, withdraw_store);
350 GFS2_ATTR(statfs_sync, 0200, NULL, statfs_sync_store);
351 GFS2_ATTR(quota_sync, 0200, NULL, quota_sync_store);
352 GFS2_ATTR(quota_refresh_user, 0200, NULL, quota_refresh_user_store);
353 GFS2_ATTR(quota_refresh_group, 0200, NULL, quota_refresh_group_store);
354 GFS2_ATTR(demote_rq, 0200, NULL, demote_rq_store);
355 GFS2_ATTR(status, 0400, status_show, NULL);
357 static struct attribute *gfs2_attrs[] = {
359 &gfs2_attr_fsname.attr,
360 &gfs2_attr_uuid.attr,
361 &gfs2_attr_freeze.attr,
362 &gfs2_attr_withdraw.attr,
363 &gfs2_attr_statfs_sync.attr,
364 &gfs2_attr_quota_sync.attr,
365 &gfs2_attr_quota_refresh_user.attr,
366 &gfs2_attr_quota_refresh_group.attr,
367 &gfs2_attr_demote_rq.attr,
368 &gfs2_attr_status.attr,
371 ATTRIBUTE_GROUPS(gfs2);
373 static void gfs2_sbd_release(struct kobject *kobj)
375 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
377 complete(&sdp->sd_kobj_unregister);
380 static struct kobj_type gfs2_ktype = {
381 .release = gfs2_sbd_release,
382 .default_groups = gfs2_groups,
383 .sysfs_ops = &gfs2_attr_ops,
388 * lock_module. Originally from lock_dlm
391 static ssize_t proto_name_show(struct gfs2_sbd *sdp, char *buf)
393 const struct lm_lockops *ops = sdp->sd_lockstruct.ls_ops;
394 return sprintf(buf, "%s\n", ops->lm_proto_name);
397 static ssize_t block_show(struct gfs2_sbd *sdp, char *buf)
399 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
403 if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))
405 ret = sprintf(buf, "%d\n", val);
409 static ssize_t block_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
411 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
414 ret = kstrtoint(buf, 0, &val);
419 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
421 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
422 smp_mb__after_atomic();
423 gfs2_glock_thaw(sdp);
430 static ssize_t wdack_show(struct gfs2_sbd *sdp, char *buf)
432 int val = completion_done(&sdp->sd_wdack) ? 1 : 0;
434 return sprintf(buf, "%d\n", val);
437 static ssize_t wdack_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
441 ret = kstrtoint(buf, 0, &val);
446 !strcmp(sdp->sd_lockstruct.ls_ops->lm_proto_name, "lock_dlm"))
447 complete(&sdp->sd_wdack);
453 static ssize_t lkfirst_show(struct gfs2_sbd *sdp, char *buf)
455 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
456 return sprintf(buf, "%d\n", ls->ls_first);
459 static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
464 rv = sscanf(buf, "%u", &first);
465 if (rv != 1 || first > 1)
467 rv = wait_for_completion_killable(&sdp->sd_locking_init);
470 spin_lock(&sdp->sd_jindex_spin);
472 if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
475 if (sdp->sd_args.ar_spectator)
477 if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
479 sdp->sd_lockstruct.ls_first = first;
482 spin_unlock(&sdp->sd_jindex_spin);
483 return rv ? rv : len;
486 static ssize_t first_done_show(struct gfs2_sbd *sdp, char *buf)
488 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
489 return sprintf(buf, "%d\n", !!test_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags));
492 int gfs2_recover_set(struct gfs2_sbd *sdp, unsigned jid)
494 struct gfs2_jdesc *jd;
497 /* Wait for our primary journal to be initialized */
498 wait_for_completion(&sdp->sd_journal_ready);
500 spin_lock(&sdp->sd_jindex_spin);
503 * If we're a spectator, we use journal0, but it's not really ours.
504 * So we need to wait for its recovery too. If we skip it we'd never
505 * queue work to the recovery workqueue, and so its completion would
506 * never clear the DFL_BLOCK_LOCKS flag, so all our locks would
507 * permanently stop working.
511 if (sdp->sd_jdesc->jd_jid == jid && !sdp->sd_args.ar_spectator)
514 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
515 if (jd->jd_jid != jid && !sdp->sd_args.ar_spectator)
517 rv = gfs2_recover_journal(jd, false);
521 spin_unlock(&sdp->sd_jindex_spin);
525 static ssize_t recover_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
530 rv = sscanf(buf, "%u", &jid);
534 if (test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
539 rv = gfs2_recover_set(sdp, jid);
541 return rv ? rv : len;
544 static ssize_t recover_done_show(struct gfs2_sbd *sdp, char *buf)
546 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
547 return sprintf(buf, "%d\n", ls->ls_recover_jid_done);
550 static ssize_t recover_status_show(struct gfs2_sbd *sdp, char *buf)
552 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
553 return sprintf(buf, "%d\n", ls->ls_recover_jid_status);
556 static ssize_t jid_show(struct gfs2_sbd *sdp, char *buf)
558 return sprintf(buf, "%d\n", sdp->sd_lockstruct.ls_jid);
561 static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
566 rv = sscanf(buf, "%d", &jid);
569 rv = wait_for_completion_killable(&sdp->sd_locking_init);
572 spin_lock(&sdp->sd_jindex_spin);
574 if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
577 if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
580 if (sdp->sd_args.ar_spectator && jid > 0)
582 sdp->sd_lockstruct.ls_jid = jid;
583 clear_bit(SDF_NOJOURNALID, &sdp->sd_flags);
584 smp_mb__after_atomic();
585 wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID);
587 spin_unlock(&sdp->sd_jindex_spin);
588 return rv ? rv : len;
591 #define GDLM_ATTR(_name,_mode,_show,_store) \
592 static struct gfs2_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store)
594 GDLM_ATTR(proto_name, 0444, proto_name_show, NULL);
595 GDLM_ATTR(block, 0644, block_show, block_store);
596 GDLM_ATTR(withdraw, 0644, wdack_show, wdack_store);
597 GDLM_ATTR(jid, 0644, jid_show, jid_store);
598 GDLM_ATTR(first, 0644, lkfirst_show, lkfirst_store);
599 GDLM_ATTR(first_done, 0444, first_done_show, NULL);
600 GDLM_ATTR(recover, 0600, NULL, recover_store);
601 GDLM_ATTR(recover_done, 0444, recover_done_show, NULL);
602 GDLM_ATTR(recover_status, 0444, recover_status_show, NULL);
604 static struct attribute *lock_module_attrs[] = {
605 &gdlm_attr_proto_name.attr,
606 &gdlm_attr_block.attr,
607 &gdlm_attr_withdraw.attr,
609 &gdlm_attr_first.attr,
610 &gdlm_attr_first_done.attr,
611 &gdlm_attr_recover.attr,
612 &gdlm_attr_recover_done.attr,
613 &gdlm_attr_recover_status.attr,
618 * get and set struct gfs2_tune fields
621 static ssize_t quota_scale_show(struct gfs2_sbd *sdp, char *buf)
623 return snprintf(buf, PAGE_SIZE, "%u %u\n",
624 sdp->sd_tune.gt_quota_scale_num,
625 sdp->sd_tune.gt_quota_scale_den);
628 static ssize_t quota_scale_store(struct gfs2_sbd *sdp, const char *buf,
631 struct gfs2_tune *gt = &sdp->sd_tune;
634 if (!capable(CAP_SYS_ADMIN))
637 if (sscanf(buf, "%u %u", &x, &y) != 2 || !y)
640 spin_lock(>->gt_spin);
641 gt->gt_quota_scale_num = x;
642 gt->gt_quota_scale_den = y;
643 spin_unlock(>->gt_spin);
647 static ssize_t tune_set(struct gfs2_sbd *sdp, unsigned int *field,
648 int check_zero, const char *buf, size_t len)
650 struct gfs2_tune *gt = &sdp->sd_tune;
654 if (!capable(CAP_SYS_ADMIN))
657 error = kstrtouint(buf, 0, &x);
661 if (check_zero && !x)
664 spin_lock(>->gt_spin);
666 spin_unlock(>->gt_spin);
670 #define TUNE_ATTR_3(name, show, store) \
671 static struct gfs2_attr tune_attr_##name = __ATTR(name, 0644, show, store)
673 #define TUNE_ATTR_2(name, store) \
674 static ssize_t name##_show(struct gfs2_sbd *sdp, char *buf) \
676 return snprintf(buf, PAGE_SIZE, "%u\n", sdp->sd_tune.gt_##name); \
678 TUNE_ATTR_3(name, name##_show, store)
680 #define TUNE_ATTR(name, check_zero) \
681 static ssize_t name##_store(struct gfs2_sbd *sdp, const char *buf, size_t len)\
683 return tune_set(sdp, &sdp->sd_tune.gt_##name, check_zero, buf, len); \
685 TUNE_ATTR_2(name, name##_store)
687 TUNE_ATTR(quota_warn_period, 0);
688 TUNE_ATTR(quota_quantum, 0);
689 TUNE_ATTR(max_readahead, 0);
690 TUNE_ATTR(complain_secs, 0);
691 TUNE_ATTR(statfs_slow, 0);
692 TUNE_ATTR(new_files_jdata, 0);
693 TUNE_ATTR(statfs_quantum, 1);
694 TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
696 static struct attribute *tune_attrs[] = {
697 &tune_attr_quota_warn_period.attr,
698 &tune_attr_quota_quantum.attr,
699 &tune_attr_max_readahead.attr,
700 &tune_attr_complain_secs.attr,
701 &tune_attr_statfs_slow.attr,
702 &tune_attr_statfs_quantum.attr,
703 &tune_attr_quota_scale.attr,
704 &tune_attr_new_files_jdata.attr,
708 static const struct attribute_group tune_group = {
713 static const struct attribute_group lock_module_group = {
714 .name = "lock_module",
715 .attrs = lock_module_attrs,
718 int gfs2_sys_fs_add(struct gfs2_sbd *sdp)
720 struct super_block *sb = sdp->sd_vfs;
724 char *envp[] = { ro, spectator, NULL };
726 sprintf(ro, "RDONLY=%d", sb_rdonly(sb));
727 sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0);
729 init_completion(&sdp->sd_kobj_unregister);
730 sdp->sd_kobj.kset = gfs2_kset;
731 error = kobject_init_and_add(&sdp->sd_kobj, &gfs2_ktype, NULL,
732 "%s", sdp->sd_table_name);
736 error = sysfs_create_group(&sdp->sd_kobj, &tune_group);
740 error = sysfs_create_group(&sdp->sd_kobj, &lock_module_group);
744 error = sysfs_create_link(&sdp->sd_kobj,
745 &disk_to_dev(sb->s_bdev->bd_disk)->kobj,
748 goto fail_lock_module;
750 kobject_uevent_env(&sdp->sd_kobj, KOBJ_ADD, envp);
754 sysfs_remove_group(&sdp->sd_kobj, &lock_module_group);
756 sysfs_remove_group(&sdp->sd_kobj, &tune_group);
758 fs_err(sdp, "error %d adding sysfs files\n", error);
759 kobject_put(&sdp->sd_kobj);
760 wait_for_completion(&sdp->sd_kobj_unregister);
761 sb->s_fs_info = NULL;
765 void gfs2_sys_fs_del(struct gfs2_sbd *sdp)
767 sysfs_remove_link(&sdp->sd_kobj, "device");
768 sysfs_remove_group(&sdp->sd_kobj, &tune_group);
769 sysfs_remove_group(&sdp->sd_kobj, &lock_module_group);
770 kobject_put(&sdp->sd_kobj);
771 wait_for_completion(&sdp->sd_kobj_unregister);
774 static int gfs2_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
776 const struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
777 const struct super_block *s = sdp->sd_vfs;
779 add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name);
780 add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name);
781 if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags))
782 add_uevent_var(env, "JOURNALID=%d", sdp->sd_lockstruct.ls_jid);
783 if (!uuid_is_null(&s->s_uuid))
784 add_uevent_var(env, "UUID=%pUB", &s->s_uuid);
788 static const struct kset_uevent_ops gfs2_uevent_ops = {
789 .uevent = gfs2_uevent,
792 int gfs2_sys_init(void)
794 gfs2_kset = kset_create_and_add("gfs2", &gfs2_uevent_ops, fs_kobj);
800 void gfs2_sys_uninit(void)
802 kset_unregister(gfs2_kset);