1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2021, Alibaba Cloud
7 #include <linux/module.h>
8 #include <linux/statfs.h>
9 #include <linux/parser.h>
10 #include <linux/seq_file.h>
11 #include <linux/crc32c.h>
12 #include <linux/fs_context.h>
13 #include <linux/fs_parser.h>
14 #include <linux/dax.h>
15 #include <linux/exportfs.h>
18 #define CREATE_TRACE_POINTS
19 #include <trace/events/erofs.h>
21 static struct kmem_cache *erofs_inode_cachep __read_mostly;
23 void _erofs_err(struct super_block *sb, const char *func, const char *fmt, ...)
33 pr_err("(device %s): %s: %pV", sb->s_id, func, &vaf);
37 void _erofs_info(struct super_block *sb, const char *func, const char *fmt, ...)
47 pr_info("(device %s): %pV", sb->s_id, &vaf);
51 static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
53 size_t len = 1 << EROFS_SB(sb)->blkszbits;
54 struct erofs_super_block *dsb;
55 u32 expected_crc, crc;
57 if (len > EROFS_SUPER_OFFSET)
58 len -= EROFS_SUPER_OFFSET;
60 dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET, len, GFP_KERNEL);
64 expected_crc = le32_to_cpu(dsb->checksum);
66 /* to allow for x86 boot sectors and other oddities. */
67 crc = crc32c(~0, dsb, len);
70 if (crc != expected_crc) {
71 erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
78 static void erofs_inode_init_once(void *ptr)
80 struct erofs_inode *vi = ptr;
82 inode_init_once(&vi->vfs_inode);
85 static struct inode *erofs_alloc_inode(struct super_block *sb)
87 struct erofs_inode *vi =
88 alloc_inode_sb(sb, erofs_inode_cachep, GFP_KERNEL);
93 /* zero out everything except vfs_inode */
94 memset(vi, 0, offsetof(struct erofs_inode, vfs_inode));
95 return &vi->vfs_inode;
98 static void erofs_free_inode(struct inode *inode)
100 struct erofs_inode *vi = EROFS_I(inode);
102 if (inode->i_op == &erofs_fast_symlink_iops)
103 kfree(inode->i_link);
104 kfree(vi->xattr_shared_xattrs);
105 kmem_cache_free(erofs_inode_cachep, vi);
108 static bool check_layout_compatibility(struct super_block *sb,
109 struct erofs_super_block *dsb)
111 const unsigned int feature = le32_to_cpu(dsb->feature_incompat);
113 EROFS_SB(sb)->feature_incompat = feature;
115 /* check if current kernel meets all mandatory requirements */
116 if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) {
117 erofs_err(sb, "unidentified incompatible feature %x, please upgrade kernel",
118 feature & ~EROFS_ALL_FEATURE_INCOMPAT);
124 /* read variable-sized metadata, offset will be aligned by 4-byte */
125 void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
126 erofs_off_t *offset, int *lengthp)
131 *offset = round_up(*offset, 4);
132 ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP);
136 len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(sb, *offset)]);
139 buffer = kmalloc(len, GFP_KERNEL);
141 return ERR_PTR(-ENOMEM);
142 *offset += sizeof(__le16);
145 for (i = 0; i < len; i += cnt) {
146 cnt = min_t(int, sb->s_blocksize - erofs_blkoff(sb, *offset),
148 ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP);
153 memcpy(buffer + i, ptr + erofs_blkoff(sb, *offset), cnt);
159 #ifdef CONFIG_EROFS_FS_ZIP
160 static int erofs_load_compr_cfgs(struct super_block *sb,
161 struct erofs_super_block *dsb)
163 struct erofs_sb_info *sbi = EROFS_SB(sb);
164 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
165 unsigned int algs, alg;
169 sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
170 if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
171 erofs_err(sb, "try to load compressed fs with unsupported algorithms %x",
172 sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
176 erofs_init_metabuf(&buf, sb);
177 offset = EROFS_SUPER_OFFSET + sbi->sb_size;
179 for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
185 data = erofs_read_metadata(sb, &buf, &offset, &size);
192 case Z_EROFS_COMPRESSION_LZ4:
193 ret = z_erofs_load_lz4_config(sb, dsb, data, size);
195 case Z_EROFS_COMPRESSION_LZMA:
196 ret = z_erofs_load_lzma_config(sb, dsb, data, size);
198 case Z_EROFS_COMPRESSION_DEFLATE:
199 ret = z_erofs_load_deflate_config(sb, dsb, data, size);
209 erofs_put_metabuf(&buf);
213 static int erofs_load_compr_cfgs(struct super_block *sb,
214 struct erofs_super_block *dsb)
216 if (dsb->u1.available_compr_algs) {
217 erofs_err(sb, "try to load compressed fs when compression is disabled");
224 static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
225 struct erofs_device_info *dif, erofs_off_t *pos)
227 struct erofs_sb_info *sbi = EROFS_SB(sb);
228 struct erofs_fscache *fscache;
229 struct erofs_deviceslot *dis;
230 struct block_device *bdev;
233 ptr = erofs_read_metabuf(buf, sb, erofs_blknr(sb, *pos), EROFS_KMAP);
236 dis = ptr + erofs_blkoff(sb, *pos);
238 if (!sbi->devs->flatdev && !dif->path) {
240 erofs_err(sb, "empty device tag @ pos %llu", *pos);
243 dif->path = kmemdup_nul(dis->tag, sizeof(dis->tag), GFP_KERNEL);
248 if (erofs_is_fscache_mode(sb)) {
249 fscache = erofs_fscache_register_cookie(sb, dif->path, 0);
251 return PTR_ERR(fscache);
252 dif->fscache = fscache;
253 } else if (!sbi->devs->flatdev) {
254 bdev = blkdev_get_by_path(dif->path, BLK_OPEN_READ, sb->s_type,
257 return PTR_ERR(bdev);
259 dif->dax_dev = fs_dax_get_by_bdev(bdev, &dif->dax_part_off,
263 dif->blocks = le32_to_cpu(dis->blocks);
264 dif->mapped_blkaddr = le32_to_cpu(dis->mapped_blkaddr);
265 sbi->total_blocks += dif->blocks;
266 *pos += EROFS_DEVT_SLOT_SIZE;
270 static int erofs_scan_devices(struct super_block *sb,
271 struct erofs_super_block *dsb)
273 struct erofs_sb_info *sbi = EROFS_SB(sb);
274 unsigned int ondisk_extradevs;
276 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
277 struct erofs_device_info *dif;
280 sbi->total_blocks = sbi->primarydevice_blocks;
281 if (!erofs_sb_has_device_table(sbi))
282 ondisk_extradevs = 0;
284 ondisk_extradevs = le16_to_cpu(dsb->extra_devices);
286 if (sbi->devs->extra_devices &&
287 ondisk_extradevs != sbi->devs->extra_devices) {
288 erofs_err(sb, "extra devices don't match (ondisk %u, given %u)",
289 ondisk_extradevs, sbi->devs->extra_devices);
292 if (!ondisk_extradevs)
295 if (!sbi->devs->extra_devices && !erofs_is_fscache_mode(sb))
296 sbi->devs->flatdev = true;
298 sbi->device_id_mask = roundup_pow_of_two(ondisk_extradevs + 1) - 1;
299 pos = le16_to_cpu(dsb->devt_slotoff) * EROFS_DEVT_SLOT_SIZE;
300 down_read(&sbi->devs->rwsem);
301 if (sbi->devs->extra_devices) {
302 idr_for_each_entry(&sbi->devs->tree, dif, id) {
303 err = erofs_init_device(&buf, sb, dif, &pos);
308 for (id = 0; id < ondisk_extradevs; id++) {
309 dif = kzalloc(sizeof(*dif), GFP_KERNEL);
315 err = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL);
320 ++sbi->devs->extra_devices;
322 err = erofs_init_device(&buf, sb, dif, &pos);
327 up_read(&sbi->devs->rwsem);
328 erofs_put_metabuf(&buf);
332 static int erofs_read_superblock(struct super_block *sb)
334 struct erofs_sb_info *sbi;
335 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
336 struct erofs_super_block *dsb;
340 data = erofs_read_metabuf(&buf, sb, 0, EROFS_KMAP);
342 erofs_err(sb, "cannot read erofs superblock");
343 return PTR_ERR(data);
347 dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET);
350 if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) {
351 erofs_err(sb, "cannot find valid erofs superblock");
355 sbi->blkszbits = dsb->blkszbits;
356 if (sbi->blkszbits < 9 || sbi->blkszbits > PAGE_SHIFT) {
357 erofs_err(sb, "blkszbits %u isn't supported", sbi->blkszbits);
360 if (dsb->dirblkbits) {
361 erofs_err(sb, "dirblkbits %u isn't supported", dsb->dirblkbits);
365 sbi->feature_compat = le32_to_cpu(dsb->feature_compat);
366 if (erofs_sb_has_sb_chksum(sbi)) {
367 ret = erofs_superblock_csum_verify(sb, data);
373 if (!check_layout_compatibility(sb, dsb))
376 sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE;
377 if (sbi->sb_size > PAGE_SIZE - EROFS_SUPER_OFFSET) {
378 erofs_err(sb, "invalid sb_extslots %u (more than a fs block)",
382 sbi->primarydevice_blocks = le32_to_cpu(dsb->blocks);
383 sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
384 #ifdef CONFIG_EROFS_FS_XATTR
385 sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
386 sbi->xattr_prefix_start = le32_to_cpu(dsb->xattr_prefix_start);
387 sbi->xattr_prefix_count = dsb->xattr_prefix_count;
388 sbi->xattr_filter_reserved = dsb->xattr_filter_reserved;
390 sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact));
391 sbi->root_nid = le16_to_cpu(dsb->root_nid);
392 sbi->packed_nid = le64_to_cpu(dsb->packed_nid);
393 sbi->inos = le64_to_cpu(dsb->inos);
395 sbi->build_time = le64_to_cpu(dsb->build_time);
396 sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec);
398 memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid));
400 ret = strscpy(sbi->volume_name, dsb->volume_name,
401 sizeof(dsb->volume_name));
402 if (ret < 0) { /* -E2BIG */
403 erofs_err(sb, "bad volume name without NIL terminator");
408 /* parse on-disk compression configurations */
409 if (erofs_sb_has_compr_cfgs(sbi))
410 ret = erofs_load_compr_cfgs(sb, dsb);
412 ret = z_erofs_load_lz4_config(sb, dsb, NULL, 0);
416 /* handle multiple devices */
417 ret = erofs_scan_devices(sb, dsb);
419 if (erofs_is_fscache_mode(sb))
420 erofs_info(sb, "EXPERIMENTAL fscache-based on-demand read feature in use. Use at your own risk!");
422 erofs_put_metabuf(&buf);
426 static void erofs_default_options(struct erofs_fs_context *ctx)
428 #ifdef CONFIG_EROFS_FS_ZIP
429 ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
430 ctx->opt.max_sync_decompress_pages = 3;
431 ctx->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
433 #ifdef CONFIG_EROFS_FS_XATTR
434 set_opt(&ctx->opt, XATTR_USER);
436 #ifdef CONFIG_EROFS_FS_POSIX_ACL
437 set_opt(&ctx->opt, POSIX_ACL);
453 static const struct constant_table erofs_param_cache_strategy[] = {
454 {"disabled", EROFS_ZIP_CACHE_DISABLED},
455 {"readahead", EROFS_ZIP_CACHE_READAHEAD},
456 {"readaround", EROFS_ZIP_CACHE_READAROUND},
460 static const struct constant_table erofs_dax_param_enums[] = {
461 {"always", EROFS_MOUNT_DAX_ALWAYS},
462 {"never", EROFS_MOUNT_DAX_NEVER},
466 static const struct fs_parameter_spec erofs_fs_parameters[] = {
467 fsparam_flag_no("user_xattr", Opt_user_xattr),
468 fsparam_flag_no("acl", Opt_acl),
469 fsparam_enum("cache_strategy", Opt_cache_strategy,
470 erofs_param_cache_strategy),
471 fsparam_flag("dax", Opt_dax),
472 fsparam_enum("dax", Opt_dax_enum, erofs_dax_param_enums),
473 fsparam_string("device", Opt_device),
474 fsparam_string("fsid", Opt_fsid),
475 fsparam_string("domain_id", Opt_domain_id),
479 static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
482 struct erofs_fs_context *ctx = fc->fs_private;
485 case EROFS_MOUNT_DAX_ALWAYS:
486 warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
487 set_opt(&ctx->opt, DAX_ALWAYS);
488 clear_opt(&ctx->opt, DAX_NEVER);
490 case EROFS_MOUNT_DAX_NEVER:
491 set_opt(&ctx->opt, DAX_NEVER);
492 clear_opt(&ctx->opt, DAX_ALWAYS);
499 errorfc(fc, "dax options not supported");
504 static int erofs_fc_parse_param(struct fs_context *fc,
505 struct fs_parameter *param)
507 struct erofs_fs_context *ctx = fc->fs_private;
508 struct fs_parse_result result;
509 struct erofs_device_info *dif;
512 opt = fs_parse(fc, erofs_fs_parameters, param, &result);
518 #ifdef CONFIG_EROFS_FS_XATTR
520 set_opt(&ctx->opt, XATTR_USER);
522 clear_opt(&ctx->opt, XATTR_USER);
524 errorfc(fc, "{,no}user_xattr options not supported");
528 #ifdef CONFIG_EROFS_FS_POSIX_ACL
530 set_opt(&ctx->opt, POSIX_ACL);
532 clear_opt(&ctx->opt, POSIX_ACL);
534 errorfc(fc, "{,no}acl options not supported");
537 case Opt_cache_strategy:
538 #ifdef CONFIG_EROFS_FS_ZIP
539 ctx->opt.cache_strategy = result.uint_32;
541 errorfc(fc, "compression not supported, cache_strategy ignored");
545 if (!erofs_fc_set_dax_mode(fc, EROFS_MOUNT_DAX_ALWAYS))
549 if (!erofs_fc_set_dax_mode(fc, result.uint_32))
553 dif = kzalloc(sizeof(*dif), GFP_KERNEL);
556 dif->path = kstrdup(param->string, GFP_KERNEL);
561 down_write(&ctx->devs->rwsem);
562 ret = idr_alloc(&ctx->devs->tree, dif, 0, 0, GFP_KERNEL);
563 up_write(&ctx->devs->rwsem);
569 ++ctx->devs->extra_devices;
571 #ifdef CONFIG_EROFS_FS_ONDEMAND
574 ctx->fsid = kstrdup(param->string, GFP_KERNEL);
579 kfree(ctx->domain_id);
580 ctx->domain_id = kstrdup(param->string, GFP_KERNEL);
587 errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
596 static struct inode *erofs_nfs_get_inode(struct super_block *sb,
597 u64 ino, u32 generation)
599 return erofs_iget(sb, ino);
602 static struct dentry *erofs_fh_to_dentry(struct super_block *sb,
603 struct fid *fid, int fh_len, int fh_type)
605 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
606 erofs_nfs_get_inode);
609 static struct dentry *erofs_fh_to_parent(struct super_block *sb,
610 struct fid *fid, int fh_len, int fh_type)
612 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
613 erofs_nfs_get_inode);
616 static struct dentry *erofs_get_parent(struct dentry *child)
622 err = erofs_namei(d_inode(child), &dotdot_name, &nid, &d_type);
625 return d_obtain_alias(erofs_iget(child->d_sb, nid));
628 static const struct export_operations erofs_export_ops = {
629 .fh_to_dentry = erofs_fh_to_dentry,
630 .fh_to_parent = erofs_fh_to_parent,
631 .get_parent = erofs_get_parent,
634 static int erofs_fc_fill_pseudo_super(struct super_block *sb, struct fs_context *fc)
636 static const struct tree_descr empty_descr = {""};
638 return simple_fill_super(sb, EROFS_SUPER_MAGIC, &empty_descr);
641 static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
644 struct erofs_sb_info *sbi;
645 struct erofs_fs_context *ctx = fc->fs_private;
648 sb->s_magic = EROFS_SUPER_MAGIC;
649 sb->s_flags |= SB_RDONLY | SB_NOATIME;
650 sb->s_maxbytes = MAX_LFS_FILESIZE;
651 sb->s_op = &erofs_sops;
653 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
659 sbi->devs = ctx->devs;
661 sbi->fsid = ctx->fsid;
663 sbi->domain_id = ctx->domain_id;
664 ctx->domain_id = NULL;
666 sbi->blkszbits = PAGE_SHIFT;
667 if (erofs_is_fscache_mode(sb)) {
668 sb->s_blocksize = PAGE_SIZE;
669 sb->s_blocksize_bits = PAGE_SHIFT;
671 err = erofs_fscache_register_fs(sb);
675 err = super_setup_bdi(sb);
679 if (!sb_set_blocksize(sb, PAGE_SIZE)) {
680 errorfc(fc, "failed to set initial blksize");
684 sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev,
689 err = erofs_read_superblock(sb);
693 if (sb->s_blocksize_bits != sbi->blkszbits) {
694 if (erofs_is_fscache_mode(sb)) {
695 errorfc(fc, "unsupported blksize for fscache mode");
698 if (!sb_set_blocksize(sb, 1 << sbi->blkszbits)) {
699 errorfc(fc, "failed to set erofs blksize");
704 if (test_opt(&sbi->opt, DAX_ALWAYS)) {
706 errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
707 clear_opt(&sbi->opt, DAX_ALWAYS);
708 } else if (sbi->blkszbits != PAGE_SHIFT) {
709 errorfc(fc, "unsupported blocksize for DAX");
710 clear_opt(&sbi->opt, DAX_ALWAYS);
715 sb->s_xattr = erofs_xattr_handlers;
716 sb->s_export_op = &erofs_export_ops;
718 if (test_opt(&sbi->opt, POSIX_ACL))
719 sb->s_flags |= SB_POSIXACL;
721 sb->s_flags &= ~SB_POSIXACL;
723 #ifdef CONFIG_EROFS_FS_ZIP
724 xa_init(&sbi->managed_pslots);
727 inode = erofs_iget(sb, ROOT_NID(sbi));
729 return PTR_ERR(inode);
731 if (!S_ISDIR(inode->i_mode)) {
732 erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)",
733 ROOT_NID(sbi), inode->i_mode);
738 sb->s_root = d_make_root(inode);
742 erofs_shrinker_register(sb);
743 if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) {
744 sbi->packed_inode = erofs_iget(sb, sbi->packed_nid);
745 if (IS_ERR(sbi->packed_inode)) {
746 err = PTR_ERR(sbi->packed_inode);
747 sbi->packed_inode = NULL;
751 err = erofs_init_managed_cache(sb);
755 err = erofs_xattr_prefixes_init(sb);
759 err = erofs_register_sysfs(sb);
763 erofs_info(sb, "mounted with root inode @ nid %llu.", ROOT_NID(sbi));
767 static int erofs_fc_anon_get_tree(struct fs_context *fc)
769 return get_tree_nodev(fc, erofs_fc_fill_pseudo_super);
772 static int erofs_fc_get_tree(struct fs_context *fc)
774 struct erofs_fs_context *ctx = fc->fs_private;
776 if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && ctx->fsid)
777 return get_tree_nodev(fc, erofs_fc_fill_super);
779 return get_tree_bdev(fc, erofs_fc_fill_super);
782 static int erofs_fc_reconfigure(struct fs_context *fc)
784 struct super_block *sb = fc->root->d_sb;
785 struct erofs_sb_info *sbi = EROFS_SB(sb);
786 struct erofs_fs_context *ctx = fc->fs_private;
788 DBG_BUGON(!sb_rdonly(sb));
790 if (ctx->fsid || ctx->domain_id)
791 erofs_info(sb, "ignoring reconfiguration for fsid|domain_id.");
793 if (test_opt(&ctx->opt, POSIX_ACL))
794 fc->sb_flags |= SB_POSIXACL;
796 fc->sb_flags &= ~SB_POSIXACL;
800 fc->sb_flags |= SB_RDONLY;
804 static int erofs_release_device_info(int id, void *ptr, void *data)
806 struct erofs_device_info *dif = ptr;
808 fs_put_dax(dif->dax_dev, NULL);
810 blkdev_put(dif->bdev, &erofs_fs_type);
811 erofs_fscache_unregister_cookie(dif->fscache);
818 static void erofs_free_dev_context(struct erofs_dev_context *devs)
822 idr_for_each(&devs->tree, &erofs_release_device_info, NULL);
823 idr_destroy(&devs->tree);
827 static void erofs_fc_free(struct fs_context *fc)
829 struct erofs_fs_context *ctx = fc->fs_private;
831 erofs_free_dev_context(ctx->devs);
833 kfree(ctx->domain_id);
837 static const struct fs_context_operations erofs_context_ops = {
838 .parse_param = erofs_fc_parse_param,
839 .get_tree = erofs_fc_get_tree,
840 .reconfigure = erofs_fc_reconfigure,
841 .free = erofs_fc_free,
844 static const struct fs_context_operations erofs_anon_context_ops = {
845 .get_tree = erofs_fc_anon_get_tree,
848 static int erofs_init_fs_context(struct fs_context *fc)
850 struct erofs_fs_context *ctx;
852 /* pseudo mount for anon inodes */
853 if (fc->sb_flags & SB_KERNMOUNT) {
854 fc->ops = &erofs_anon_context_ops;
858 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
861 ctx->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
866 fc->fs_private = ctx;
868 idr_init(&ctx->devs->tree);
869 init_rwsem(&ctx->devs->rwsem);
870 erofs_default_options(ctx);
871 fc->ops = &erofs_context_ops;
875 static void erofs_kill_sb(struct super_block *sb)
877 struct erofs_sb_info *sbi;
879 /* pseudo mount for anon inodes */
880 if (sb->s_flags & SB_KERNMOUNT) {
885 if (erofs_is_fscache_mode(sb))
888 kill_block_super(sb);
894 erofs_free_dev_context(sbi->devs);
895 fs_put_dax(sbi->dax_dev, NULL);
896 erofs_fscache_unregister_fs(sb);
898 kfree(sbi->domain_id);
900 sb->s_fs_info = NULL;
903 static void erofs_put_super(struct super_block *sb)
905 struct erofs_sb_info *const sbi = EROFS_SB(sb);
909 erofs_unregister_sysfs(sb);
910 erofs_shrinker_unregister(sb);
911 erofs_xattr_prefixes_cleanup(sb);
912 #ifdef CONFIG_EROFS_FS_ZIP
913 iput(sbi->managed_cache);
914 sbi->managed_cache = NULL;
916 iput(sbi->packed_inode);
917 sbi->packed_inode = NULL;
918 erofs_free_dev_context(sbi->devs);
920 erofs_fscache_unregister_fs(sb);
923 struct file_system_type erofs_fs_type = {
924 .owner = THIS_MODULE,
926 .init_fs_context = erofs_init_fs_context,
927 .kill_sb = erofs_kill_sb,
928 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
930 MODULE_ALIAS_FS("erofs");
932 static int __init erofs_module_init(void)
936 erofs_check_ondisk_layout_definitions();
938 erofs_inode_cachep = kmem_cache_create("erofs_inode",
939 sizeof(struct erofs_inode), 0,
940 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT,
941 erofs_inode_init_once);
942 if (!erofs_inode_cachep)
945 err = erofs_init_shrinker();
949 err = z_erofs_lzma_init();
953 err = z_erofs_deflate_init();
957 erofs_pcpubuf_init();
958 err = z_erofs_init_zip_subsystem();
962 err = erofs_init_sysfs();
966 err = register_filesystem(&erofs_fs_type);
975 z_erofs_exit_zip_subsystem();
977 z_erofs_deflate_exit();
981 erofs_exit_shrinker();
983 kmem_cache_destroy(erofs_inode_cachep);
987 static void __exit erofs_module_exit(void)
989 unregister_filesystem(&erofs_fs_type);
991 /* Ensure all RCU free inodes / pclusters are safe to be destroyed. */
995 z_erofs_exit_zip_subsystem();
996 z_erofs_deflate_exit();
998 erofs_exit_shrinker();
999 kmem_cache_destroy(erofs_inode_cachep);
1000 erofs_pcpubuf_exit();
1003 static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
1005 struct super_block *sb = dentry->d_sb;
1006 struct erofs_sb_info *sbi = EROFS_SB(sb);
1009 if (!erofs_is_fscache_mode(sb))
1010 id = huge_encode_dev(sb->s_bdev->bd_dev);
1012 buf->f_type = sb->s_magic;
1013 buf->f_bsize = sb->s_blocksize;
1014 buf->f_blocks = sbi->total_blocks;
1015 buf->f_bfree = buf->f_bavail = 0;
1017 buf->f_files = ULLONG_MAX;
1018 buf->f_ffree = ULLONG_MAX - sbi->inos;
1020 buf->f_namelen = EROFS_NAME_LEN;
1022 buf->f_fsid = u64_to_fsid(id);
1026 static int erofs_show_options(struct seq_file *seq, struct dentry *root)
1028 struct erofs_sb_info *sbi = EROFS_SB(root->d_sb);
1029 struct erofs_mount_opts *opt = &sbi->opt;
1031 #ifdef CONFIG_EROFS_FS_XATTR
1032 if (test_opt(opt, XATTR_USER))
1033 seq_puts(seq, ",user_xattr");
1035 seq_puts(seq, ",nouser_xattr");
1037 #ifdef CONFIG_EROFS_FS_POSIX_ACL
1038 if (test_opt(opt, POSIX_ACL))
1039 seq_puts(seq, ",acl");
1041 seq_puts(seq, ",noacl");
1043 #ifdef CONFIG_EROFS_FS_ZIP
1044 if (opt->cache_strategy == EROFS_ZIP_CACHE_DISABLED)
1045 seq_puts(seq, ",cache_strategy=disabled");
1046 else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAHEAD)
1047 seq_puts(seq, ",cache_strategy=readahead");
1048 else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAROUND)
1049 seq_puts(seq, ",cache_strategy=readaround");
1051 if (test_opt(opt, DAX_ALWAYS))
1052 seq_puts(seq, ",dax=always");
1053 if (test_opt(opt, DAX_NEVER))
1054 seq_puts(seq, ",dax=never");
1055 #ifdef CONFIG_EROFS_FS_ONDEMAND
1057 seq_printf(seq, ",fsid=%s", sbi->fsid);
1059 seq_printf(seq, ",domain_id=%s", sbi->domain_id);
1064 const struct super_operations erofs_sops = {
1065 .put_super = erofs_put_super,
1066 .alloc_inode = erofs_alloc_inode,
1067 .free_inode = erofs_free_inode,
1068 .statfs = erofs_statfs,
1069 .show_options = erofs_show_options,
1072 module_init(erofs_module_init);
1073 module_exit(erofs_module_exit);
1075 MODULE_DESCRIPTION("Enhanced ROM File System");
1076 MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc.");
1077 MODULE_LICENSE("GPL");