1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2021, Alibaba Cloud
7 #include <linux/module.h>
8 #include <linux/buffer_head.h>
9 #include <linux/statfs.h>
10 #include <linux/parser.h>
11 #include <linux/seq_file.h>
12 #include <linux/crc32c.h>
13 #include <linux/fs_context.h>
14 #include <linux/fs_parser.h>
15 #include <linux/dax.h>
18 #define CREATE_TRACE_POINTS
19 #include <trace/events/erofs.h>
21 static struct kmem_cache *erofs_inode_cachep __read_mostly;
23 void _erofs_err(struct super_block *sb, const char *function,
34 pr_err("(device %s): %s: %pV", sb->s_id, function, &vaf);
38 void _erofs_info(struct super_block *sb, const char *function,
49 pr_info("(device %s): %pV", sb->s_id, &vaf);
53 static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
55 struct erofs_super_block *dsb;
56 u32 expected_crc, crc;
58 dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET,
59 EROFS_BLKSIZ - EROFS_SUPER_OFFSET, GFP_KERNEL);
63 expected_crc = le32_to_cpu(dsb->checksum);
65 /* to allow for x86 boot sectors and other oddities. */
66 crc = crc32c(~0, dsb, EROFS_BLKSIZ - EROFS_SUPER_OFFSET);
69 if (crc != expected_crc) {
70 erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
77 static void erofs_inode_init_once(void *ptr)
79 struct erofs_inode *vi = ptr;
81 inode_init_once(&vi->vfs_inode);
84 static struct inode *erofs_alloc_inode(struct super_block *sb)
86 struct erofs_inode *vi =
87 alloc_inode_sb(sb, erofs_inode_cachep, GFP_KERNEL);
92 /* zero out everything except vfs_inode */
93 memset(vi, 0, offsetof(struct erofs_inode, vfs_inode));
94 return &vi->vfs_inode;
97 static void erofs_free_inode(struct inode *inode)
99 struct erofs_inode *vi = EROFS_I(inode);
101 /* be careful of RCU symlink path */
102 if (inode->i_op == &erofs_fast_symlink_iops)
103 kfree(inode->i_link);
104 kfree(vi->xattr_shared_xattrs);
106 kmem_cache_free(erofs_inode_cachep, vi);
109 static bool check_layout_compatibility(struct super_block *sb,
110 struct erofs_super_block *dsb)
112 const unsigned int feature = le32_to_cpu(dsb->feature_incompat);
114 EROFS_SB(sb)->feature_incompat = feature;
116 /* check if current kernel meets all mandatory requirements */
117 if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) {
119 "unidentified incompatible feature %x, please upgrade kernel version",
120 feature & ~EROFS_ALL_FEATURE_INCOMPAT);
126 #ifdef CONFIG_EROFS_FS_ZIP
127 /* read variable-sized metadata, offset will be aligned by 4-byte */
128 static void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
129 erofs_off_t *offset, int *lengthp)
134 *offset = round_up(*offset, 4);
135 ptr = erofs_read_metabuf(buf, sb, erofs_blknr(*offset), EROFS_KMAP);
139 len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(*offset)]);
142 buffer = kmalloc(len, GFP_KERNEL);
144 return ERR_PTR(-ENOMEM);
145 *offset += sizeof(__le16);
148 for (i = 0; i < len; i += cnt) {
149 cnt = min(EROFS_BLKSIZ - (int)erofs_blkoff(*offset), len - i);
150 ptr = erofs_read_metabuf(buf, sb, erofs_blknr(*offset),
156 memcpy(buffer + i, ptr + erofs_blkoff(*offset), cnt);
162 static int erofs_load_compr_cfgs(struct super_block *sb,
163 struct erofs_super_block *dsb)
165 struct erofs_sb_info *sbi = EROFS_SB(sb);
166 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
167 unsigned int algs, alg;
171 sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
172 if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
173 erofs_err(sb, "try to load compressed fs with unsupported algorithms %x",
174 sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
178 offset = EROFS_SUPER_OFFSET + sbi->sb_size;
180 for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
186 data = erofs_read_metadata(sb, &buf, &offset, &size);
193 case Z_EROFS_COMPRESSION_LZ4:
194 ret = z_erofs_load_lz4_config(sb, dsb, data, size);
196 case Z_EROFS_COMPRESSION_LZMA:
197 ret = z_erofs_load_lzma_config(sb, dsb, data, size);
207 erofs_put_metabuf(&buf);
211 static int erofs_load_compr_cfgs(struct super_block *sb,
212 struct erofs_super_block *dsb)
214 if (dsb->u1.available_compr_algs) {
215 erofs_err(sb, "try to load compressed fs when compression is disabled");
222 static int erofs_init_devices(struct super_block *sb,
223 struct erofs_super_block *dsb)
225 struct erofs_sb_info *sbi = EROFS_SB(sb);
226 unsigned int ondisk_extradevs;
228 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
229 struct erofs_device_info *dif;
230 struct erofs_deviceslot *dis;
234 sbi->total_blocks = sbi->primarydevice_blocks;
235 if (!erofs_sb_has_device_table(sbi))
236 ondisk_extradevs = 0;
238 ondisk_extradevs = le16_to_cpu(dsb->extra_devices);
240 if (ondisk_extradevs != sbi->devs->extra_devices) {
241 erofs_err(sb, "extra devices don't match (ondisk %u, given %u)",
242 ondisk_extradevs, sbi->devs->extra_devices);
245 if (!ondisk_extradevs)
248 sbi->device_id_mask = roundup_pow_of_two(ondisk_extradevs + 1) - 1;
249 pos = le16_to_cpu(dsb->devt_slotoff) * EROFS_DEVT_SLOT_SIZE;
250 down_read(&sbi->devs->rwsem);
251 idr_for_each_entry(&sbi->devs->tree, dif, id) {
252 struct block_device *bdev;
254 ptr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos),
260 dis = ptr + erofs_blkoff(pos);
262 bdev = blkdev_get_by_path(dif->path,
263 FMODE_READ | FMODE_EXCL,
270 dif->dax_dev = fs_dax_get_by_bdev(bdev, &dif->dax_part_off);
271 dif->blocks = le32_to_cpu(dis->blocks);
272 dif->mapped_blkaddr = le32_to_cpu(dis->mapped_blkaddr);
273 sbi->total_blocks += dif->blocks;
274 pos += EROFS_DEVT_SLOT_SIZE;
276 up_read(&sbi->devs->rwsem);
277 erofs_put_metabuf(&buf);
281 static int erofs_read_superblock(struct super_block *sb)
283 struct erofs_sb_info *sbi;
284 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
285 struct erofs_super_block *dsb;
286 unsigned int blkszbits;
290 data = erofs_read_metabuf(&buf, sb, 0, EROFS_KMAP);
292 erofs_err(sb, "cannot read erofs superblock");
293 return PTR_ERR(data);
297 dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET);
300 if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) {
301 erofs_err(sb, "cannot find valid erofs superblock");
305 sbi->feature_compat = le32_to_cpu(dsb->feature_compat);
306 if (erofs_sb_has_sb_chksum(sbi)) {
307 ret = erofs_superblock_csum_verify(sb, data);
313 blkszbits = dsb->blkszbits;
314 /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
315 if (blkszbits != LOG_BLOCK_SIZE) {
316 erofs_err(sb, "blkszbits %u isn't supported on this platform",
321 if (!check_layout_compatibility(sb, dsb))
324 sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE;
325 if (sbi->sb_size > EROFS_BLKSIZ) {
326 erofs_err(sb, "invalid sb_extslots %u (more than a fs block)",
330 sbi->primarydevice_blocks = le32_to_cpu(dsb->blocks);
331 sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
332 #ifdef CONFIG_EROFS_FS_XATTR
333 sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
335 sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact));
336 sbi->root_nid = le16_to_cpu(dsb->root_nid);
337 sbi->inos = le64_to_cpu(dsb->inos);
339 sbi->build_time = le64_to_cpu(dsb->build_time);
340 sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec);
342 memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid));
344 ret = strscpy(sbi->volume_name, dsb->volume_name,
345 sizeof(dsb->volume_name));
346 if (ret < 0) { /* -E2BIG */
347 erofs_err(sb, "bad volume name without NIL terminator");
352 /* parse on-disk compression configurations */
353 if (erofs_sb_has_compr_cfgs(sbi))
354 ret = erofs_load_compr_cfgs(sb, dsb);
356 ret = z_erofs_load_lz4_config(sb, dsb, NULL, 0);
360 /* handle multiple devices */
361 ret = erofs_init_devices(sb, dsb);
363 if (erofs_sb_has_ztailpacking(sbi))
364 erofs_info(sb, "EXPERIMENTAL compressed inline data feature in use. Use at your own risk!");
366 erofs_put_metabuf(&buf);
370 /* set up default EROFS parameters */
371 static void erofs_default_options(struct erofs_fs_context *ctx)
373 #ifdef CONFIG_EROFS_FS_ZIP
374 ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
375 ctx->opt.max_sync_decompress_pages = 3;
376 ctx->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
378 #ifdef CONFIG_EROFS_FS_XATTR
379 set_opt(&ctx->opt, XATTR_USER);
381 #ifdef CONFIG_EROFS_FS_POSIX_ACL
382 set_opt(&ctx->opt, POSIX_ACL);
396 static const struct constant_table erofs_param_cache_strategy[] = {
397 {"disabled", EROFS_ZIP_CACHE_DISABLED},
398 {"readahead", EROFS_ZIP_CACHE_READAHEAD},
399 {"readaround", EROFS_ZIP_CACHE_READAROUND},
403 static const struct constant_table erofs_dax_param_enums[] = {
404 {"always", EROFS_MOUNT_DAX_ALWAYS},
405 {"never", EROFS_MOUNT_DAX_NEVER},
409 static const struct fs_parameter_spec erofs_fs_parameters[] = {
410 fsparam_flag_no("user_xattr", Opt_user_xattr),
411 fsparam_flag_no("acl", Opt_acl),
412 fsparam_enum("cache_strategy", Opt_cache_strategy,
413 erofs_param_cache_strategy),
414 fsparam_flag("dax", Opt_dax),
415 fsparam_enum("dax", Opt_dax_enum, erofs_dax_param_enums),
416 fsparam_string("device", Opt_device),
420 static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
423 struct erofs_fs_context *ctx = fc->fs_private;
426 case EROFS_MOUNT_DAX_ALWAYS:
427 warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
428 set_opt(&ctx->opt, DAX_ALWAYS);
429 clear_opt(&ctx->opt, DAX_NEVER);
431 case EROFS_MOUNT_DAX_NEVER:
432 set_opt(&ctx->opt, DAX_NEVER);
433 clear_opt(&ctx->opt, DAX_ALWAYS);
440 errorfc(fc, "dax options not supported");
445 static int erofs_fc_parse_param(struct fs_context *fc,
446 struct fs_parameter *param)
448 struct erofs_fs_context *ctx = fc->fs_private;
449 struct fs_parse_result result;
450 struct erofs_device_info *dif;
453 opt = fs_parse(fc, erofs_fs_parameters, param, &result);
459 #ifdef CONFIG_EROFS_FS_XATTR
461 set_opt(&ctx->opt, XATTR_USER);
463 clear_opt(&ctx->opt, XATTR_USER);
465 errorfc(fc, "{,no}user_xattr options not supported");
469 #ifdef CONFIG_EROFS_FS_POSIX_ACL
471 set_opt(&ctx->opt, POSIX_ACL);
473 clear_opt(&ctx->opt, POSIX_ACL);
475 errorfc(fc, "{,no}acl options not supported");
478 case Opt_cache_strategy:
479 #ifdef CONFIG_EROFS_FS_ZIP
480 ctx->opt.cache_strategy = result.uint_32;
482 errorfc(fc, "compression not supported, cache_strategy ignored");
486 if (!erofs_fc_set_dax_mode(fc, EROFS_MOUNT_DAX_ALWAYS))
490 if (!erofs_fc_set_dax_mode(fc, result.uint_32))
494 dif = kzalloc(sizeof(*dif), GFP_KERNEL);
497 dif->path = kstrdup(param->string, GFP_KERNEL);
502 down_write(&ctx->devs->rwsem);
503 ret = idr_alloc(&ctx->devs->tree, dif, 0, 0, GFP_KERNEL);
504 up_write(&ctx->devs->rwsem);
510 ++ctx->devs->extra_devices;
518 #ifdef CONFIG_EROFS_FS_ZIP
519 static const struct address_space_operations managed_cache_aops;
521 static int erofs_managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
523 int ret = 1; /* 0 - busy */
524 struct address_space *const mapping = page->mapping;
526 DBG_BUGON(!PageLocked(page));
527 DBG_BUGON(mapping->a_ops != &managed_cache_aops);
529 if (PagePrivate(page))
530 ret = erofs_try_to_free_cached_page(page);
536 * It will be called only on inode eviction. In case that there are still some
537 * decompression requests in progress, wait with rescheduling for a bit here.
538 * We could introduce an extra locking instead but it seems unnecessary.
540 static void erofs_managed_cache_invalidate_folio(struct folio *folio,
541 size_t offset, size_t length)
543 const size_t stop = length + offset;
545 DBG_BUGON(!folio_test_locked(folio));
547 /* Check for potential overflow in debug mode */
548 DBG_BUGON(stop > folio_size(folio) || stop < length);
550 if (offset == 0 && stop == folio_size(folio))
551 while (!erofs_managed_cache_releasepage(&folio->page, GFP_NOFS))
555 static const struct address_space_operations managed_cache_aops = {
556 .releasepage = erofs_managed_cache_releasepage,
557 .invalidate_folio = erofs_managed_cache_invalidate_folio,
560 static int erofs_init_managed_cache(struct super_block *sb)
562 struct erofs_sb_info *const sbi = EROFS_SB(sb);
563 struct inode *const inode = new_inode(sb);
569 inode->i_size = OFFSET_MAX;
571 inode->i_mapping->a_ops = &managed_cache_aops;
572 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
573 sbi->managed_cache = inode;
577 static int erofs_init_managed_cache(struct super_block *sb) { return 0; }
580 static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
583 struct erofs_sb_info *sbi;
584 struct erofs_fs_context *ctx = fc->fs_private;
587 sb->s_magic = EROFS_SUPER_MAGIC;
589 if (!sb_set_blocksize(sb, EROFS_BLKSIZ)) {
590 erofs_err(sb, "failed to set erofs blksize");
594 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
600 sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->dax_part_off);
601 sbi->devs = ctx->devs;
604 err = erofs_read_superblock(sb);
608 if (test_opt(&sbi->opt, DAX_ALWAYS)) {
609 BUILD_BUG_ON(EROFS_BLKSIZ != PAGE_SIZE);
612 errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
613 clear_opt(&sbi->opt, DAX_ALWAYS);
616 sb->s_flags |= SB_RDONLY | SB_NOATIME;
617 sb->s_maxbytes = MAX_LFS_FILESIZE;
620 sb->s_op = &erofs_sops;
621 sb->s_xattr = erofs_xattr_handlers;
623 if (test_opt(&sbi->opt, POSIX_ACL))
624 sb->s_flags |= SB_POSIXACL;
626 sb->s_flags &= ~SB_POSIXACL;
628 #ifdef CONFIG_EROFS_FS_ZIP
629 xa_init(&sbi->managed_pslots);
632 /* get the root inode */
633 inode = erofs_iget(sb, ROOT_NID(sbi), true);
635 return PTR_ERR(inode);
637 if (!S_ISDIR(inode->i_mode)) {
638 erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)",
639 ROOT_NID(sbi), inode->i_mode);
644 sb->s_root = d_make_root(inode);
648 erofs_shrinker_register(sb);
649 /* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */
650 err = erofs_init_managed_cache(sb);
654 err = erofs_register_sysfs(sb);
658 erofs_info(sb, "mounted with root inode @ nid %llu.", ROOT_NID(sbi));
662 static int erofs_fc_get_tree(struct fs_context *fc)
664 return get_tree_bdev(fc, erofs_fc_fill_super);
667 static int erofs_fc_reconfigure(struct fs_context *fc)
669 struct super_block *sb = fc->root->d_sb;
670 struct erofs_sb_info *sbi = EROFS_SB(sb);
671 struct erofs_fs_context *ctx = fc->fs_private;
673 DBG_BUGON(!sb_rdonly(sb));
675 if (test_opt(&ctx->opt, POSIX_ACL))
676 fc->sb_flags |= SB_POSIXACL;
678 fc->sb_flags &= ~SB_POSIXACL;
682 fc->sb_flags |= SB_RDONLY;
686 static int erofs_release_device_info(int id, void *ptr, void *data)
688 struct erofs_device_info *dif = ptr;
690 fs_put_dax(dif->dax_dev);
692 blkdev_put(dif->bdev, FMODE_READ | FMODE_EXCL);
698 static void erofs_free_dev_context(struct erofs_dev_context *devs)
702 idr_for_each(&devs->tree, &erofs_release_device_info, NULL);
703 idr_destroy(&devs->tree);
707 static void erofs_fc_free(struct fs_context *fc)
709 struct erofs_fs_context *ctx = fc->fs_private;
711 erofs_free_dev_context(ctx->devs);
715 static const struct fs_context_operations erofs_context_ops = {
716 .parse_param = erofs_fc_parse_param,
717 .get_tree = erofs_fc_get_tree,
718 .reconfigure = erofs_fc_reconfigure,
719 .free = erofs_fc_free,
722 static int erofs_init_fs_context(struct fs_context *fc)
724 struct erofs_fs_context *ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
728 ctx->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
733 fc->fs_private = ctx;
735 idr_init(&ctx->devs->tree);
736 init_rwsem(&ctx->devs->rwsem);
737 erofs_default_options(ctx);
738 fc->ops = &erofs_context_ops;
743 * could be triggered after deactivate_locked_super()
744 * is called, thus including umount and failed to initialize.
746 static void erofs_kill_sb(struct super_block *sb)
748 struct erofs_sb_info *sbi;
750 WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC);
752 kill_block_super(sb);
758 erofs_free_dev_context(sbi->devs);
759 fs_put_dax(sbi->dax_dev);
761 sb->s_fs_info = NULL;
764 /* called when ->s_root is non-NULL */
765 static void erofs_put_super(struct super_block *sb)
767 struct erofs_sb_info *const sbi = EROFS_SB(sb);
771 erofs_unregister_sysfs(sb);
772 erofs_shrinker_unregister(sb);
773 #ifdef CONFIG_EROFS_FS_ZIP
774 iput(sbi->managed_cache);
775 sbi->managed_cache = NULL;
779 static struct file_system_type erofs_fs_type = {
780 .owner = THIS_MODULE,
782 .init_fs_context = erofs_init_fs_context,
783 .kill_sb = erofs_kill_sb,
784 .fs_flags = FS_REQUIRES_DEV,
786 MODULE_ALIAS_FS("erofs");
788 static int __init erofs_module_init(void)
792 erofs_check_ondisk_layout_definitions();
794 erofs_inode_cachep = kmem_cache_create("erofs_inode",
795 sizeof(struct erofs_inode), 0,
796 SLAB_RECLAIM_ACCOUNT,
797 erofs_inode_init_once);
798 if (!erofs_inode_cachep) {
803 err = erofs_init_shrinker();
807 err = z_erofs_lzma_init();
811 erofs_pcpubuf_init();
812 err = z_erofs_init_zip_subsystem();
816 err = erofs_init_sysfs();
820 err = register_filesystem(&erofs_fs_type);
829 z_erofs_exit_zip_subsystem();
833 erofs_exit_shrinker();
835 kmem_cache_destroy(erofs_inode_cachep);
840 static void __exit erofs_module_exit(void)
842 unregister_filesystem(&erofs_fs_type);
844 /* Ensure all RCU free inodes / pclusters are safe to be destroyed. */
848 z_erofs_exit_zip_subsystem();
850 erofs_exit_shrinker();
851 kmem_cache_destroy(erofs_inode_cachep);
852 erofs_pcpubuf_exit();
855 /* get filesystem statistics */
856 static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
858 struct super_block *sb = dentry->d_sb;
859 struct erofs_sb_info *sbi = EROFS_SB(sb);
860 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
862 buf->f_type = sb->s_magic;
863 buf->f_bsize = EROFS_BLKSIZ;
864 buf->f_blocks = sbi->total_blocks;
865 buf->f_bfree = buf->f_bavail = 0;
867 buf->f_files = ULLONG_MAX;
868 buf->f_ffree = ULLONG_MAX - sbi->inos;
870 buf->f_namelen = EROFS_NAME_LEN;
872 buf->f_fsid = u64_to_fsid(id);
876 static int erofs_show_options(struct seq_file *seq, struct dentry *root)
878 struct erofs_sb_info *sbi = EROFS_SB(root->d_sb);
879 struct erofs_mount_opts *opt = &sbi->opt;
881 #ifdef CONFIG_EROFS_FS_XATTR
882 if (test_opt(opt, XATTR_USER))
883 seq_puts(seq, ",user_xattr");
885 seq_puts(seq, ",nouser_xattr");
887 #ifdef CONFIG_EROFS_FS_POSIX_ACL
888 if (test_opt(opt, POSIX_ACL))
889 seq_puts(seq, ",acl");
891 seq_puts(seq, ",noacl");
893 #ifdef CONFIG_EROFS_FS_ZIP
894 if (opt->cache_strategy == EROFS_ZIP_CACHE_DISABLED)
895 seq_puts(seq, ",cache_strategy=disabled");
896 else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAHEAD)
897 seq_puts(seq, ",cache_strategy=readahead");
898 else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAROUND)
899 seq_puts(seq, ",cache_strategy=readaround");
901 if (test_opt(opt, DAX_ALWAYS))
902 seq_puts(seq, ",dax=always");
903 if (test_opt(opt, DAX_NEVER))
904 seq_puts(seq, ",dax=never");
908 const struct super_operations erofs_sops = {
909 .put_super = erofs_put_super,
910 .alloc_inode = erofs_alloc_inode,
911 .free_inode = erofs_free_inode,
912 .statfs = erofs_statfs,
913 .show_options = erofs_show_options,
916 module_init(erofs_module_init);
917 module_exit(erofs_module_exit);
919 MODULE_DESCRIPTION("Enhanced ROM File System");
920 MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc.");
921 MODULE_LICENSE("GPL");