1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2014 Red Hat, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sysfs.h"
14 #include "xfs_log_priv.h"
15 #include "xfs_mount.h"
17 struct xfs_sysfs_attr {
18 struct attribute attr;
19 ssize_t (*show)(struct kobject *kobject, char *buf);
20 ssize_t (*store)(struct kobject *kobject, const char *buf,
24 static inline struct xfs_sysfs_attr *
25 to_attr(struct attribute *attr)
27 return container_of(attr, struct xfs_sysfs_attr, attr);
30 #define XFS_SYSFS_ATTR_RW(name) \
31 static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RW(name)
32 #define XFS_SYSFS_ATTR_RO(name) \
33 static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RO(name)
34 #define XFS_SYSFS_ATTR_WO(name) \
35 static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_WO(name)
37 #define ATTR_LIST(name) &xfs_sysfs_attr_##name.attr
40 xfs_sysfs_object_show(
41 struct kobject *kobject,
42 struct attribute *attr,
45 struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
47 return xfs_attr->show ? xfs_attr->show(kobject, buf) : 0;
51 xfs_sysfs_object_store(
52 struct kobject *kobject,
53 struct attribute *attr,
57 struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
59 return xfs_attr->store ? xfs_attr->store(kobject, buf, count) : 0;
62 static const struct sysfs_ops xfs_sysfs_ops = {
63 .show = xfs_sysfs_object_show,
64 .store = xfs_sysfs_object_store,
67 static struct attribute *xfs_mp_attrs[] = {
70 ATTRIBUTE_GROUPS(xfs_mp);
72 const struct kobj_type xfs_mp_ktype = {
73 .release = xfs_sysfs_release,
74 .sysfs_ops = &xfs_sysfs_ops,
75 .default_groups = xfs_mp_groups,
83 struct kobject *kobject,
90 ret = kstrtoint(buf, 0, &val);
95 xfs_globals.bug_on_assert = true;
97 xfs_globals.bug_on_assert = false;
106 struct kobject *kobject,
109 return sysfs_emit(buf, "%d\n", xfs_globals.bug_on_assert);
111 XFS_SYSFS_ATTR_RW(bug_on_assert);
114 log_recovery_delay_store(
115 struct kobject *kobject,
122 ret = kstrtoint(buf, 0, &val);
126 if (val < 0 || val > 60)
129 xfs_globals.log_recovery_delay = val;
135 log_recovery_delay_show(
136 struct kobject *kobject,
139 return sysfs_emit(buf, "%d\n", xfs_globals.log_recovery_delay);
141 XFS_SYSFS_ATTR_RW(log_recovery_delay);
145 struct kobject *kobject,
152 ret = kstrtoint(buf, 0, &val);
156 if (val < 0 || val > 60)
159 xfs_globals.mount_delay = val;
166 struct kobject *kobject,
169 return sysfs_emit(buf, "%d\n", xfs_globals.mount_delay);
171 XFS_SYSFS_ATTR_RW(mount_delay);
175 struct kobject *kobject,
181 ret = kstrtobool(buf, &xfs_globals.always_cow);
189 struct kobject *kobject,
192 return sysfs_emit(buf, "%d\n", xfs_globals.always_cow);
194 XFS_SYSFS_ATTR_RW(always_cow);
198 * Override how many threads the parallel work queue is allowed to create.
199 * This has to be a debug-only global (instead of an errortag) because one of
200 * the main users of parallel workqueues is mount time quotacheck.
204 struct kobject *kobject,
211 ret = kstrtoint(buf, 0, &val);
215 if (val < -1 || val > num_possible_cpus())
218 xfs_globals.pwork_threads = val;
225 struct kobject *kobject,
228 return sysfs_emit(buf, "%d\n", xfs_globals.pwork_threads);
230 XFS_SYSFS_ATTR_RW(pwork_threads);
234 struct kobject *kobject,
240 ret = kstrtobool(buf, &xfs_globals.larp);
248 struct kobject *kobject,
251 return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.larp);
253 XFS_SYSFS_ATTR_RW(larp);
256 static struct attribute *xfs_dbg_attrs[] = {
257 ATTR_LIST(bug_on_assert),
258 ATTR_LIST(log_recovery_delay),
259 ATTR_LIST(mount_delay),
260 ATTR_LIST(always_cow),
262 ATTR_LIST(pwork_threads),
267 ATTRIBUTE_GROUPS(xfs_dbg);
269 const struct kobj_type xfs_dbg_ktype = {
270 .release = xfs_sysfs_release,
271 .sysfs_ops = &xfs_sysfs_ops,
272 .default_groups = xfs_dbg_groups,
279 static inline struct xstats *
280 to_xstats(struct kobject *kobject)
282 struct xfs_kobj *kobj = to_kobj(kobject);
284 return container_of(kobj, struct xstats, xs_kobj);
289 struct kobject *kobject,
292 struct xstats *stats = to_xstats(kobject);
294 return xfs_stats_format(stats->xs_stats, buf);
296 XFS_SYSFS_ATTR_RO(stats);
300 struct kobject *kobject,
306 struct xstats *stats = to_xstats(kobject);
308 ret = kstrtoint(buf, 0, &val);
315 xfs_stats_clearall(stats->xs_stats);
318 XFS_SYSFS_ATTR_WO(stats_clear);
320 static struct attribute *xfs_stats_attrs[] = {
322 ATTR_LIST(stats_clear),
325 ATTRIBUTE_GROUPS(xfs_stats);
327 const struct kobj_type xfs_stats_ktype = {
328 .release = xfs_sysfs_release,
329 .sysfs_ops = &xfs_sysfs_ops,
330 .default_groups = xfs_stats_groups,
335 static inline struct xlog *
336 to_xlog(struct kobject *kobject)
338 struct xfs_kobj *kobj = to_kobj(kobject);
340 return container_of(kobj, struct xlog, l_kobj);
345 struct kobject *kobject,
350 struct xlog *log = to_xlog(kobject);
352 spin_lock(&log->l_icloglock);
353 cycle = log->l_curr_cycle;
354 block = log->l_curr_block;
355 spin_unlock(&log->l_icloglock);
357 return sysfs_emit(buf, "%d:%d\n", cycle, block);
359 XFS_SYSFS_ATTR_RO(log_head_lsn);
363 struct kobject *kobject,
368 struct xlog *log = to_xlog(kobject);
370 xlog_crack_atomic_lsn(&log->l_tail_lsn, &cycle, &block);
371 return sysfs_emit(buf, "%d:%d\n", cycle, block);
373 XFS_SYSFS_ATTR_RO(log_tail_lsn);
376 reserve_grant_head_show(
377 struct kobject *kobject,
383 struct xlog *log = to_xlog(kobject);
385 xlog_crack_grant_head(&log->l_reserve_head.grant, &cycle, &bytes);
386 return sysfs_emit(buf, "%d:%d\n", cycle, bytes);
388 XFS_SYSFS_ATTR_RO(reserve_grant_head);
391 write_grant_head_show(
392 struct kobject *kobject,
397 struct xlog *log = to_xlog(kobject);
399 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &bytes);
400 return sysfs_emit(buf, "%d:%d\n", cycle, bytes);
402 XFS_SYSFS_ATTR_RO(write_grant_head);
404 static struct attribute *xfs_log_attrs[] = {
405 ATTR_LIST(log_head_lsn),
406 ATTR_LIST(log_tail_lsn),
407 ATTR_LIST(reserve_grant_head),
408 ATTR_LIST(write_grant_head),
411 ATTRIBUTE_GROUPS(xfs_log);
413 const struct kobj_type xfs_log_ktype = {
414 .release = xfs_sysfs_release,
415 .sysfs_ops = &xfs_sysfs_ops,
416 .default_groups = xfs_log_groups,
420 * Metadata IO error configuration
422 * The sysfs structure here is:
423 * ...xfs/<dev>/error/<class>/<errno>/<error_attrs>
425 * where <class> allows us to discriminate between data IO and metadata IO,
426 * and any other future type of IO (e.g. special inode or directory error
427 * handling) we care to support.
429 static inline struct xfs_error_cfg *
430 to_error_cfg(struct kobject *kobject)
432 struct xfs_kobj *kobj = to_kobj(kobject);
433 return container_of(kobj, struct xfs_error_cfg, kobj);
436 static inline struct xfs_mount *
437 err_to_mp(struct kobject *kobject)
439 struct xfs_kobj *kobj = to_kobj(kobject);
440 return container_of(kobj, struct xfs_mount, m_error_kobj);
445 struct kobject *kobject,
449 struct xfs_error_cfg *cfg = to_error_cfg(kobject);
451 if (cfg->max_retries == XFS_ERR_RETRY_FOREVER)
454 retries = cfg->max_retries;
456 return sysfs_emit(buf, "%d\n", retries);
461 struct kobject *kobject,
465 struct xfs_error_cfg *cfg = to_error_cfg(kobject);
469 ret = kstrtoint(buf, 0, &val);
477 cfg->max_retries = XFS_ERR_RETRY_FOREVER;
479 cfg->max_retries = val;
482 XFS_SYSFS_ATTR_RW(max_retries);
485 retry_timeout_seconds_show(
486 struct kobject *kobject,
490 struct xfs_error_cfg *cfg = to_error_cfg(kobject);
492 if (cfg->retry_timeout == XFS_ERR_RETRY_FOREVER)
495 timeout = jiffies_to_msecs(cfg->retry_timeout) / MSEC_PER_SEC;
497 return sysfs_emit(buf, "%d\n", timeout);
501 retry_timeout_seconds_store(
502 struct kobject *kobject,
506 struct xfs_error_cfg *cfg = to_error_cfg(kobject);
510 ret = kstrtoint(buf, 0, &val);
514 /* 1 day timeout maximum, -1 means infinite */
515 if (val < -1 || val > 86400)
519 cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
521 cfg->retry_timeout = msecs_to_jiffies(val * MSEC_PER_SEC);
522 ASSERT(msecs_to_jiffies(val * MSEC_PER_SEC) < LONG_MAX);
526 XFS_SYSFS_ATTR_RW(retry_timeout_seconds);
529 fail_at_unmount_show(
530 struct kobject *kobject,
533 struct xfs_mount *mp = err_to_mp(kobject);
535 return sysfs_emit(buf, "%d\n", mp->m_fail_unmount);
539 fail_at_unmount_store(
540 struct kobject *kobject,
544 struct xfs_mount *mp = err_to_mp(kobject);
548 ret = kstrtoint(buf, 0, &val);
552 if (val < 0 || val > 1)
555 mp->m_fail_unmount = val;
558 XFS_SYSFS_ATTR_RW(fail_at_unmount);
560 static struct attribute *xfs_error_attrs[] = {
561 ATTR_LIST(max_retries),
562 ATTR_LIST(retry_timeout_seconds),
565 ATTRIBUTE_GROUPS(xfs_error);
567 static const struct kobj_type xfs_error_cfg_ktype = {
568 .release = xfs_sysfs_release,
569 .sysfs_ops = &xfs_sysfs_ops,
570 .default_groups = xfs_error_groups,
573 static const struct kobj_type xfs_error_ktype = {
574 .release = xfs_sysfs_release,
575 .sysfs_ops = &xfs_sysfs_ops,
579 * Error initialization tables. These need to be ordered in the same
580 * order as the enums used to index the array. All class init tables need to
581 * define a "default" behaviour as the first entry, all other entries can be
584 struct xfs_error_init {
587 int retry_timeout; /* in seconds */
590 static const struct xfs_error_init xfs_error_meta_init[XFS_ERR_ERRNO_MAX] = {
592 .max_retries = XFS_ERR_RETRY_FOREVER,
593 .retry_timeout = XFS_ERR_RETRY_FOREVER,
596 .max_retries = XFS_ERR_RETRY_FOREVER,
597 .retry_timeout = XFS_ERR_RETRY_FOREVER,
600 .max_retries = XFS_ERR_RETRY_FOREVER,
601 .retry_timeout = XFS_ERR_RETRY_FOREVER,
604 .max_retries = 0, /* We can't recover from devices disappearing */
610 xfs_error_sysfs_init_class(
611 struct xfs_mount *mp,
613 const char *parent_name,
614 struct xfs_kobj *parent_kobj,
615 const struct xfs_error_init init[])
617 struct xfs_error_cfg *cfg;
621 ASSERT(class < XFS_ERR_CLASS_MAX);
623 error = xfs_sysfs_init(parent_kobj, &xfs_error_ktype,
624 &mp->m_error_kobj, parent_name);
628 for (i = 0; i < XFS_ERR_ERRNO_MAX; i++) {
629 cfg = &mp->m_error_cfg[class][i];
630 error = xfs_sysfs_init(&cfg->kobj, &xfs_error_cfg_ktype,
631 parent_kobj, init[i].name);
635 cfg->max_retries = init[i].max_retries;
636 if (init[i].retry_timeout == XFS_ERR_RETRY_FOREVER)
637 cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
639 cfg->retry_timeout = msecs_to_jiffies(
640 init[i].retry_timeout * MSEC_PER_SEC);
645 /* unwind the entries that succeeded */
646 for (i--; i >= 0; i--) {
647 cfg = &mp->m_error_cfg[class][i];
648 xfs_sysfs_del(&cfg->kobj);
650 xfs_sysfs_del(parent_kobj);
655 xfs_error_sysfs_init(
656 struct xfs_mount *mp)
660 /* .../xfs/<dev>/error/ */
661 error = xfs_sysfs_init(&mp->m_error_kobj, &xfs_error_ktype,
662 &mp->m_kobj, "error");
666 error = sysfs_create_file(&mp->m_error_kobj.kobject,
667 ATTR_LIST(fail_at_unmount));
672 /* .../xfs/<dev>/error/metadata/ */
673 error = xfs_error_sysfs_init_class(mp, XFS_ERR_METADATA,
674 "metadata", &mp->m_error_meta_kobj,
675 xfs_error_meta_init);
682 xfs_sysfs_del(&mp->m_error_kobj);
688 struct xfs_mount *mp)
690 struct xfs_error_cfg *cfg;
693 for (i = 0; i < XFS_ERR_CLASS_MAX; i++) {
694 for (j = 0; j < XFS_ERR_ERRNO_MAX; j++) {
695 cfg = &mp->m_error_cfg[i][j];
697 xfs_sysfs_del(&cfg->kobj);
700 xfs_sysfs_del(&mp->m_error_meta_kobj);
701 xfs_sysfs_del(&mp->m_error_kobj);
704 struct xfs_error_cfg *
706 struct xfs_mount *mp,
710 struct xfs_error_cfg *cfg;
717 cfg = &mp->m_error_cfg[error_class][XFS_ERR_EIO];
720 cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENOSPC];
723 cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENODEV];
726 cfg = &mp->m_error_cfg[error_class][XFS_ERR_DEFAULT];