1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2022, Alibaba Cloud
4 * Copyright (C) 2022, Bytedance Inc. All rights reserved.
6 #include <linux/fscache.h>
9 static DEFINE_MUTEX(erofs_domain_list_lock);
10 static DEFINE_MUTEX(erofs_domain_cookies_lock);
11 static LIST_HEAD(erofs_domain_list);
12 static struct vfsmount *erofs_pseudo_mnt;
14 struct erofs_fscache_request {
15 struct erofs_fscache_request *primary;
16 struct netfs_cache_resources cache_resources;
17 struct address_space *mapping; /* The mapping being accessed */
18 loff_t start; /* Start position */
19 size_t len; /* Length of the request */
20 size_t submitted; /* Length of submitted */
21 short error; /* 0 or error that occurred */
25 static struct erofs_fscache_request *erofs_fscache_req_alloc(struct address_space *mapping,
26 loff_t start, size_t len)
28 struct erofs_fscache_request *req;
30 req = kzalloc(sizeof(struct erofs_fscache_request), GFP_KERNEL);
32 return ERR_PTR(-ENOMEM);
34 req->mapping = mapping;
37 refcount_set(&req->ref, 1);
42 static struct erofs_fscache_request *erofs_fscache_req_chain(struct erofs_fscache_request *primary,
45 struct erofs_fscache_request *req;
47 /* use primary request for the first submission */
48 if (!primary->submitted) {
49 refcount_inc(&primary->ref);
53 req = erofs_fscache_req_alloc(primary->mapping,
54 primary->start + primary->submitted, len);
56 req->primary = primary;
57 refcount_inc(&primary->ref);
62 static void erofs_fscache_req_complete(struct erofs_fscache_request *req)
65 bool failed = req->error;
66 pgoff_t start_page = req->start / PAGE_SIZE;
67 pgoff_t last_page = ((req->start + req->len) / PAGE_SIZE) - 1;
69 XA_STATE(xas, &req->mapping->i_pages, start_page);
72 xas_for_each(&xas, folio, last_page) {
73 if (xas_retry(&xas, folio))
76 folio_mark_uptodate(folio);
82 static void erofs_fscache_req_put(struct erofs_fscache_request *req)
84 if (refcount_dec_and_test(&req->ref)) {
85 if (req->cache_resources.ops)
86 req->cache_resources.ops->end_operation(&req->cache_resources);
88 erofs_fscache_req_complete(req);
90 erofs_fscache_req_put(req->primary);
95 static void erofs_fscache_subreq_complete(void *priv,
96 ssize_t transferred_or_error, bool was_async)
98 struct erofs_fscache_request *req = priv;
100 if (IS_ERR_VALUE(transferred_or_error)) {
102 req->primary->error = transferred_or_error;
104 req->error = transferred_or_error;
106 erofs_fscache_req_put(req);
110 * Read data from fscache (cookie, pstart, len), and fill the read data into
111 * page cache described by (req->mapping, lstart, len). @pstart describeis the
112 * start physical address in the cache file.
114 static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie,
115 struct erofs_fscache_request *req, loff_t pstart, size_t len)
117 enum netfs_io_source source;
118 struct super_block *sb = req->mapping->host->i_sb;
119 struct netfs_cache_resources *cres = &req->cache_resources;
120 struct iov_iter iter;
121 loff_t lstart = req->start + req->submitted;
125 DBG_BUGON(len > req->len - req->submitted);
127 ret = fscache_begin_read_operation(cres, cookie);
132 loff_t sstart = pstart + done;
133 size_t slen = len - done;
134 unsigned long flags = 1 << NETFS_SREQ_ONDEMAND;
136 source = cres->ops->prepare_ondemand_read(cres,
137 sstart, &slen, LLONG_MAX, &flags, 0);
138 if (WARN_ON(slen == 0))
139 source = NETFS_INVALID_READ;
140 if (source != NETFS_READ_FROM_CACHE) {
141 erofs_err(sb, "failed to fscache prepare_read (source %d)", source);
145 refcount_inc(&req->ref);
146 iov_iter_xarray(&iter, READ, &req->mapping->i_pages,
147 lstart + done, slen);
149 ret = fscache_read(cres, sstart, &iter, NETFS_READ_HOLE_FAIL,
150 erofs_fscache_subreq_complete, req);
151 if (ret == -EIOCBQUEUED)
154 erofs_err(sb, "failed to fscache_read (ret %d)", ret);
160 DBG_BUGON(done != len);
164 static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
167 struct super_block *sb = folio_mapping(folio)->host->i_sb;
168 struct erofs_fscache_request *req;
169 struct erofs_map_dev mdev = {
171 .m_pa = folio_pos(folio),
174 ret = erofs_map_dev(sb, &mdev);
180 req = erofs_fscache_req_alloc(folio_mapping(folio),
181 folio_pos(folio), folio_size(folio));
187 ret = erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
188 req, mdev.m_pa, folio_size(folio));
192 erofs_fscache_req_put(req);
196 static int erofs_fscache_data_read_slice(struct erofs_fscache_request *primary)
198 struct address_space *mapping = primary->mapping;
199 struct inode *inode = mapping->host;
200 struct super_block *sb = inode->i_sb;
201 struct erofs_fscache_request *req;
202 struct erofs_map_blocks map;
203 struct erofs_map_dev mdev;
204 struct iov_iter iter;
205 loff_t pos = primary->start + primary->submitted;
210 ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
214 if (map.m_flags & EROFS_MAP_META) {
215 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
220 /* For tail packing layout, the offset may be non-zero. */
221 offset = erofs_blkoff(map.m_pa);
222 blknr = erofs_blknr(map.m_pa);
225 src = erofs_read_metabuf(&buf, sb, blknr, EROFS_KMAP);
229 iov_iter_xarray(&iter, READ, &mapping->i_pages, pos, PAGE_SIZE);
230 if (copy_to_iter(src + offset, size, &iter) != size) {
231 erofs_put_metabuf(&buf);
234 iov_iter_zero(PAGE_SIZE - size, &iter);
235 erofs_put_metabuf(&buf);
236 primary->submitted += PAGE_SIZE;
240 count = primary->len - primary->submitted;
241 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
242 iov_iter_xarray(&iter, READ, &mapping->i_pages, pos, count);
243 iov_iter_zero(count, &iter);
244 primary->submitted += count;
248 count = min_t(size_t, map.m_llen - (pos - map.m_la), count);
249 DBG_BUGON(!count || count % PAGE_SIZE);
251 mdev = (struct erofs_map_dev) {
252 .m_deviceid = map.m_deviceid,
255 ret = erofs_map_dev(sb, &mdev);
259 req = erofs_fscache_req_chain(primary, count);
263 ret = erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
264 req, mdev.m_pa + (pos - map.m_la), count);
265 erofs_fscache_req_put(req);
266 primary->submitted += count;
270 static int erofs_fscache_data_read(struct erofs_fscache_request *req)
275 ret = erofs_fscache_data_read_slice(req);
278 } while (!ret && req->submitted < req->len);
283 static int erofs_fscache_read_folio(struct file *file, struct folio *folio)
285 struct erofs_fscache_request *req;
288 req = erofs_fscache_req_alloc(folio_mapping(folio),
289 folio_pos(folio), folio_size(folio));
295 ret = erofs_fscache_data_read(req);
296 erofs_fscache_req_put(req);
300 static void erofs_fscache_readahead(struct readahead_control *rac)
302 struct erofs_fscache_request *req;
304 if (!readahead_count(rac))
307 req = erofs_fscache_req_alloc(rac->mapping,
308 readahead_pos(rac), readahead_length(rac));
312 /* The request completion will drop refs on the folios. */
313 while (readahead_folio(rac))
316 erofs_fscache_data_read(req);
317 erofs_fscache_req_put(req);
320 static const struct address_space_operations erofs_fscache_meta_aops = {
321 .read_folio = erofs_fscache_meta_read_folio,
324 const struct address_space_operations erofs_fscache_access_aops = {
325 .read_folio = erofs_fscache_read_folio,
326 .readahead = erofs_fscache_readahead,
329 static void erofs_fscache_domain_put(struct erofs_domain *domain)
333 mutex_lock(&erofs_domain_list_lock);
334 if (refcount_dec_and_test(&domain->ref)) {
335 list_del(&domain->list);
336 if (list_empty(&erofs_domain_list)) {
337 kern_unmount(erofs_pseudo_mnt);
338 erofs_pseudo_mnt = NULL;
340 mutex_unlock(&erofs_domain_list_lock);
341 fscache_relinquish_volume(domain->volume, NULL, false);
342 kfree(domain->domain_id);
346 mutex_unlock(&erofs_domain_list_lock);
349 static int erofs_fscache_register_volume(struct super_block *sb)
351 struct erofs_sb_info *sbi = EROFS_SB(sb);
352 char *domain_id = sbi->domain_id;
353 struct fscache_volume *volume;
357 name = kasprintf(GFP_KERNEL, "erofs,%s",
358 domain_id ? domain_id : sbi->fsid);
362 volume = fscache_acquire_volume(name, NULL, NULL, 0);
363 if (IS_ERR_OR_NULL(volume)) {
364 erofs_err(sb, "failed to register volume for %s", name);
365 ret = volume ? PTR_ERR(volume) : -EOPNOTSUPP;
369 sbi->volume = volume;
374 static int erofs_fscache_init_domain(struct super_block *sb)
377 struct erofs_domain *domain;
378 struct erofs_sb_info *sbi = EROFS_SB(sb);
380 domain = kzalloc(sizeof(struct erofs_domain), GFP_KERNEL);
384 domain->domain_id = kstrdup(sbi->domain_id, GFP_KERNEL);
385 if (!domain->domain_id) {
390 err = erofs_fscache_register_volume(sb);
394 if (!erofs_pseudo_mnt) {
395 erofs_pseudo_mnt = kern_mount(&erofs_fs_type);
396 if (IS_ERR(erofs_pseudo_mnt)) {
397 err = PTR_ERR(erofs_pseudo_mnt);
402 domain->volume = sbi->volume;
403 refcount_set(&domain->ref, 1);
404 list_add(&domain->list, &erofs_domain_list);
405 sbi->domain = domain;
408 kfree(domain->domain_id);
413 static int erofs_fscache_register_domain(struct super_block *sb)
416 struct erofs_domain *domain;
417 struct erofs_sb_info *sbi = EROFS_SB(sb);
419 mutex_lock(&erofs_domain_list_lock);
420 list_for_each_entry(domain, &erofs_domain_list, list) {
421 if (!strcmp(domain->domain_id, sbi->domain_id)) {
422 sbi->domain = domain;
423 sbi->volume = domain->volume;
424 refcount_inc(&domain->ref);
425 mutex_unlock(&erofs_domain_list_lock);
429 err = erofs_fscache_init_domain(sb);
430 mutex_unlock(&erofs_domain_list_lock);
435 struct erofs_fscache *erofs_fscache_acquire_cookie(struct super_block *sb,
439 struct fscache_volume *volume = EROFS_SB(sb)->volume;
440 struct erofs_fscache *ctx;
441 struct fscache_cookie *cookie;
444 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
446 return ERR_PTR(-ENOMEM);
448 cookie = fscache_acquire_cookie(volume, FSCACHE_ADV_WANT_CACHE_SIZE,
449 name, strlen(name), NULL, 0, 0);
451 erofs_err(sb, "failed to get cookie for %s", name);
456 fscache_use_cookie(cookie, false);
457 ctx->cookie = cookie;
459 if (flags & EROFS_REG_COOKIE_NEED_INODE) {
460 struct inode *const inode = new_inode(sb);
463 erofs_err(sb, "failed to get anon inode for %s", name);
469 inode->i_size = OFFSET_MAX;
470 inode->i_mapping->a_ops = &erofs_fscache_meta_aops;
471 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
479 fscache_unuse_cookie(ctx->cookie, NULL, NULL);
480 fscache_relinquish_cookie(ctx->cookie, false);
486 static void erofs_fscache_relinquish_cookie(struct erofs_fscache *ctx)
488 fscache_unuse_cookie(ctx->cookie, NULL, NULL);
489 fscache_relinquish_cookie(ctx->cookie, false);
496 struct erofs_fscache *erofs_fscache_domain_init_cookie(struct super_block *sb,
502 struct erofs_fscache *ctx;
503 struct erofs_domain *domain = EROFS_SB(sb)->domain;
505 ctx = erofs_fscache_acquire_cookie(sb, name, flags);
509 ctx->name = kstrdup(name, GFP_KERNEL);
515 inode = new_inode(erofs_pseudo_mnt->mnt_sb);
521 ctx->domain = domain;
522 ctx->anon_inode = inode;
523 inode->i_private = ctx;
524 refcount_inc(&domain->ref);
527 erofs_fscache_relinquish_cookie(ctx);
532 struct erofs_fscache *erofs_domain_register_cookie(struct super_block *sb,
537 struct erofs_fscache *ctx;
538 struct erofs_domain *domain = EROFS_SB(sb)->domain;
539 struct super_block *psb = erofs_pseudo_mnt->mnt_sb;
541 mutex_lock(&erofs_domain_cookies_lock);
542 spin_lock(&psb->s_inode_list_lock);
543 list_for_each_entry(inode, &psb->s_inodes, i_sb_list) {
544 ctx = inode->i_private;
545 if (!ctx || ctx->domain != domain || strcmp(ctx->name, name))
547 if (!(flags & EROFS_REG_COOKIE_NEED_NOEXIST)) {
550 erofs_err(sb, "%s already exists in domain %s", name,
552 ctx = ERR_PTR(-EEXIST);
554 spin_unlock(&psb->s_inode_list_lock);
555 mutex_unlock(&erofs_domain_cookies_lock);
558 spin_unlock(&psb->s_inode_list_lock);
559 ctx = erofs_fscache_domain_init_cookie(sb, name, flags);
560 mutex_unlock(&erofs_domain_cookies_lock);
564 struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb,
568 if (EROFS_SB(sb)->domain_id)
569 return erofs_domain_register_cookie(sb, name, flags);
570 return erofs_fscache_acquire_cookie(sb, name, flags);
573 void erofs_fscache_unregister_cookie(struct erofs_fscache *ctx)
576 struct erofs_domain *domain;
580 domain = ctx->domain;
582 mutex_lock(&erofs_domain_cookies_lock);
583 drop = atomic_read(&ctx->anon_inode->i_count) == 1;
584 iput(ctx->anon_inode);
585 mutex_unlock(&erofs_domain_cookies_lock);
590 erofs_fscache_relinquish_cookie(ctx);
591 erofs_fscache_domain_put(domain);
594 int erofs_fscache_register_fs(struct super_block *sb)
597 struct erofs_sb_info *sbi = EROFS_SB(sb);
598 struct erofs_fscache *fscache;
602 ret = erofs_fscache_register_domain(sb);
604 ret = erofs_fscache_register_volume(sb);
609 * When shared domain is enabled, using NEED_NOEXIST to guarantee
610 * the primary data blob (aka fsid) is unique in the shared domain.
612 * For non-shared-domain case, fscache_acquire_volume() invoked by
613 * erofs_fscache_register_volume() has already guaranteed
614 * the uniqueness of primary data blob.
616 * Acquired domain/volume will be relinquished in kill_sb() on error.
618 flags = EROFS_REG_COOKIE_NEED_INODE;
620 flags |= EROFS_REG_COOKIE_NEED_NOEXIST;
621 fscache = erofs_fscache_register_cookie(sb, sbi->fsid, flags);
623 return PTR_ERR(fscache);
625 sbi->s_fscache = fscache;
629 void erofs_fscache_unregister_fs(struct super_block *sb)
631 struct erofs_sb_info *sbi = EROFS_SB(sb);
633 erofs_fscache_unregister_cookie(sbi->s_fscache);
636 erofs_fscache_domain_put(sbi->domain);
638 fscache_relinquish_volume(sbi->volume, NULL, false);
640 sbi->s_fscache = NULL;