1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* handling of writes to regular files and writing back to the server
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/backing-dev.h>
9 #include <linux/slab.h>
11 #include <linux/pagemap.h>
12 #include <linux/writeback.h>
13 #include <linux/pagevec.h>
14 #include <linux/netfs.h>
17 static int afs_writepages_region(struct address_space *mapping,
18 struct writeback_control *wbc,
19 loff_t start, loff_t end, loff_t *_next,
22 static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len,
23 loff_t i_size, bool caching);
25 #ifdef CONFIG_AFS_FSCACHE
27 * Mark a page as having been made dirty and thus needing writeback. We also
28 * need to pin the cache object to write back to.
30 bool afs_dirty_folio(struct address_space *mapping, struct folio *folio)
32 return fscache_dirty_folio(mapping, folio,
33 afs_vnode_cache(AFS_FS_I(mapping->host)));
35 static void afs_folio_start_fscache(bool caching, struct folio *folio)
38 folio_start_fscache(folio);
41 static void afs_folio_start_fscache(bool caching, struct folio *folio)
47 * Flush out a conflicting write. This may extend the write to the surrounding
48 * pages if also dirty and contiguous to the conflicting region..
50 static int afs_flush_conflicting_write(struct address_space *mapping,
53 struct writeback_control wbc = {
54 .sync_mode = WB_SYNC_ALL,
55 .nr_to_write = LONG_MAX,
56 .range_start = folio_pos(folio),
57 .range_end = LLONG_MAX,
61 return afs_writepages_region(mapping, &wbc, folio_pos(folio), LLONG_MAX,
66 * prepare to perform part of a write to a page
68 int afs_write_begin(struct file *file, struct address_space *mapping,
69 loff_t pos, unsigned len,
70 struct page **_page, void **fsdata)
72 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
80 _enter("{%llx:%llu},%llx,%x",
81 vnode->fid.vid, vnode->fid.vnode, pos, len);
83 /* Prefetch area to be written into the cache if we're caching this
84 * file. We need to do this before we get a lock on the page in case
85 * there's more than one writer competing for the same cache block.
87 ret = netfs_write_begin(&vnode->netfs, file, mapping, pos, len, &folio, fsdata);
91 index = folio_index(folio);
92 from = pos - index * PAGE_SIZE;
96 /* See if this page is already partially written in a way that we can
97 * merge the new write with.
99 if (folio_test_private(folio)) {
100 priv = (unsigned long)folio_get_private(folio);
101 f = afs_folio_dirty_from(folio, priv);
102 t = afs_folio_dirty_to(folio, priv);
105 if (folio_test_writeback(folio)) {
106 trace_afs_folio_dirty(vnode, tracepoint_string("alrdy"), folio);
108 goto wait_for_writeback;
110 /* If the file is being filled locally, allow inter-write
111 * spaces to be merged into writes. If it's not, only write
112 * back what the user gives us.
114 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
115 (to < f || from > t))
116 goto flush_conflicting_write;
119 *_page = folio_file_page(folio, pos / PAGE_SIZE);
123 /* The previous write and this write aren't adjacent or overlapping, so
124 * flush the page out.
126 flush_conflicting_write:
127 trace_afs_folio_dirty(vnode, tracepoint_string("confl"), folio);
130 ret = afs_flush_conflicting_write(mapping, folio);
135 ret = folio_wait_writeback_killable(folio);
139 ret = folio_lock_killable(folio);
146 _leave(" = %d", ret);
151 * finalise part of a write to a page
153 int afs_write_end(struct file *file, struct address_space *mapping,
154 loff_t pos, unsigned len, unsigned copied,
155 struct page *subpage, void *fsdata)
157 struct folio *folio = page_folio(subpage);
158 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
160 unsigned int f, from = offset_in_folio(folio, pos);
161 unsigned int t, to = from + copied;
162 loff_t i_size, write_end_pos;
164 _enter("{%llx:%llu},{%lx}",
165 vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
167 if (!folio_test_uptodate(folio)) {
173 folio_mark_uptodate(folio);
179 write_end_pos = pos + copied;
181 i_size = i_size_read(&vnode->netfs.inode);
182 if (write_end_pos > i_size) {
183 write_seqlock(&vnode->cb_lock);
184 i_size = i_size_read(&vnode->netfs.inode);
185 if (write_end_pos > i_size)
186 afs_set_i_size(vnode, write_end_pos);
187 write_sequnlock(&vnode->cb_lock);
188 fscache_update_cookie(afs_vnode_cache(vnode), NULL, &write_end_pos);
191 if (folio_test_private(folio)) {
192 priv = (unsigned long)folio_get_private(folio);
193 f = afs_folio_dirty_from(folio, priv);
194 t = afs_folio_dirty_to(folio, priv);
199 priv = afs_folio_dirty(folio, f, t);
200 folio_change_private(folio, (void *)priv);
201 trace_afs_folio_dirty(vnode, tracepoint_string("dirty+"), folio);
203 priv = afs_folio_dirty(folio, from, to);
204 folio_attach_private(folio, (void *)priv);
205 trace_afs_folio_dirty(vnode, tracepoint_string("dirty"), folio);
208 if (folio_mark_dirty(folio))
209 _debug("dirtied %lx", folio_index(folio));
218 * kill all the pages in the given range
220 static void afs_kill_pages(struct address_space *mapping,
221 loff_t start, loff_t len)
223 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
225 pgoff_t index = start / PAGE_SIZE;
226 pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
228 _enter("{%llx:%llu},%llx @%llx",
229 vnode->fid.vid, vnode->fid.vnode, len, start);
232 _debug("kill %lx (to %lx)", index, last);
234 folio = filemap_get_folio(mapping, index);
240 next = folio_next_index(folio);
242 folio_clear_uptodate(folio);
243 folio_end_writeback(folio);
245 generic_error_remove_page(mapping, &folio->page);
249 } while (index = next, index <= last);
255 * Redirty all the pages in a given range.
257 static void afs_redirty_pages(struct writeback_control *wbc,
258 struct address_space *mapping,
259 loff_t start, loff_t len)
261 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
263 pgoff_t index = start / PAGE_SIZE;
264 pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
266 _enter("{%llx:%llu},%llx @%llx",
267 vnode->fid.vid, vnode->fid.vnode, len, start);
270 _debug("redirty %llx @%llx", len, start);
272 folio = filemap_get_folio(mapping, index);
278 next = index + folio_nr_pages(folio);
279 folio_redirty_for_writepage(wbc, folio);
280 folio_end_writeback(folio);
282 } while (index = next, index <= last);
288 * completion of write to server
290 static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len)
292 struct address_space *mapping = vnode->netfs.inode.i_mapping;
296 XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
298 _enter("{%llx:%llu},{%x @%llx}",
299 vnode->fid.vid, vnode->fid.vnode, len, start);
303 end = (start + len - 1) / PAGE_SIZE;
304 xas_for_each(&xas, folio, end) {
305 if (!folio_test_writeback(folio)) {
306 kdebug("bad %x @%llx page %lx %lx",
307 len, start, folio_index(folio), end);
308 ASSERT(folio_test_writeback(folio));
311 trace_afs_folio_dirty(vnode, tracepoint_string("clear"), folio);
312 folio_detach_private(folio);
313 folio_end_writeback(folio);
318 afs_prune_wb_keys(vnode);
323 * Find a key to use for the writeback. We cached the keys used to author the
324 * writes on the vnode. *_wbk will contain the last writeback key used or NULL
325 * and we need to start from there if it's set.
327 static int afs_get_writeback_key(struct afs_vnode *vnode,
328 struct afs_wb_key **_wbk)
330 struct afs_wb_key *wbk = NULL;
332 int ret = -ENOKEY, ret2;
334 spin_lock(&vnode->wb_lock);
336 p = (*_wbk)->vnode_link.next;
338 p = vnode->wb_keys.next;
340 while (p != &vnode->wb_keys) {
341 wbk = list_entry(p, struct afs_wb_key, vnode_link);
342 _debug("wbk %u", key_serial(wbk->key));
343 ret2 = key_validate(wbk->key);
345 refcount_inc(&wbk->usage);
346 _debug("USE WB KEY %u", key_serial(wbk->key));
356 spin_unlock(&vnode->wb_lock);
358 afs_put_wb_key(*_wbk);
363 static void afs_store_data_success(struct afs_operation *op)
365 struct afs_vnode *vnode = op->file[0].vnode;
367 op->ctime = op->file[0].scb.status.mtime_client;
368 afs_vnode_commit_status(op, &op->file[0]);
369 if (op->error == 0) {
370 if (!op->store.laundering)
371 afs_pages_written_back(vnode, op->store.pos, op->store.size);
372 afs_stat_v(vnode, n_stores);
373 atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes);
377 static const struct afs_operation_ops afs_store_data_operation = {
378 .issue_afs_rpc = afs_fs_store_data,
379 .issue_yfs_rpc = yfs_fs_store_data,
380 .success = afs_store_data_success,
386 static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos,
389 struct afs_operation *op;
390 struct afs_wb_key *wbk = NULL;
391 loff_t size = iov_iter_count(iter);
394 _enter("%s{%llx:%llu.%u},%llx,%llx",
401 ret = afs_get_writeback_key(vnode, &wbk);
403 _leave(" = %d [no keys]", ret);
407 op = afs_alloc_operation(wbk->key, vnode->volume);
413 afs_op_set_vnode(op, 0, vnode);
414 op->file[0].dv_delta = 1;
415 op->file[0].modification = true;
417 op->store.size = size;
418 op->store.laundering = laundering;
419 op->flags |= AFS_OPERATION_UNINTR;
420 op->ops = &afs_store_data_operation;
423 afs_begin_vnode_operation(op);
425 op->store.write_iter = iter;
426 op->store.i_size = max(pos + size, vnode->netfs.remote_i_size);
427 op->mtime = vnode->netfs.inode.i_mtime;
429 afs_wait_for_operation(op);
440 ret = afs_get_writeback_key(vnode, &wbk);
443 op->key = key_get(wbk->key);
450 _leave(" = %d", op->error);
451 return afs_put_operation(op);
455 * Extend the region to be written back to include subsequent contiguously
456 * dirty pages if possible, but don't sleep while doing so.
458 * If this page holds new content, then we can include filler zeros in the
461 static void afs_extend_writeback(struct address_space *mapping,
462 struct afs_vnode *vnode,
470 struct folio_batch fbatch;
473 unsigned int psize, filler = 0;
476 pgoff_t index = (start + len) / PAGE_SIZE;
480 XA_STATE(xas, &mapping->i_pages, index);
481 folio_batch_init(&fbatch);
484 /* Firstly, we gather up a batch of contiguous dirty pages
485 * under the RCU read lock - but we can't clear the dirty flags
486 * there if any of those pages are mapped.
490 xas_for_each(&xas, folio, ULONG_MAX) {
492 if (xas_retry(&xas, folio))
494 if (xa_is_value(folio))
496 if (folio_index(folio) != index)
499 if (!folio_try_get_rcu(folio)) {
504 /* Has the page moved or been split? */
505 if (unlikely(folio != xas_reload(&xas))) {
510 if (!folio_trylock(folio)) {
514 if (!folio_test_dirty(folio) ||
515 folio_test_writeback(folio) ||
516 folio_test_fscache(folio)) {
522 psize = folio_size(folio);
523 priv = (unsigned long)folio_get_private(folio);
524 f = afs_folio_dirty_from(folio, priv);
525 t = afs_folio_dirty_to(folio, priv);
526 if (f != 0 && !new_content) {
534 if (len >= max_len || *_count <= 0)
536 else if (t == psize || new_content)
539 index += folio_nr_pages(folio);
540 if (!folio_batch_add(&fbatch, folio))
550 /* Now, if we obtained any folios, we can shift them to being
551 * writable and mark them for caching.
553 if (!folio_batch_count(&fbatch))
556 for (i = 0; i < folio_batch_count(&fbatch); i++) {
557 folio = fbatch.folios[i];
558 trace_afs_folio_dirty(vnode, tracepoint_string("store+"), folio);
560 if (!folio_clear_dirty_for_io(folio))
562 if (folio_start_writeback(folio))
564 afs_folio_start_fscache(caching, folio);
566 *_count -= folio_nr_pages(folio);
570 folio_batch_release(&fbatch);
578 * Synchronously write back the locked page and any subsequent non-locked dirty
581 static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
582 struct writeback_control *wbc,
584 loff_t start, loff_t end)
586 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
587 struct iov_iter iter;
589 unsigned int offset, to, len, max_len;
590 loff_t i_size = i_size_read(&vnode->netfs.inode);
591 bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
592 bool caching = fscache_cookie_enabled(afs_vnode_cache(vnode));
593 long count = wbc->nr_to_write;
596 _enter(",%lx,%llx-%llx", folio_index(folio), start, end);
598 if (folio_start_writeback(folio))
600 afs_folio_start_fscache(caching, folio);
602 count -= folio_nr_pages(folio);
604 /* Find all consecutive lockable dirty pages that have contiguous
605 * written regions, stopping when we find a page that is not
606 * immediately lockable, is not dirty or is missing, or we reach the
609 priv = (unsigned long)folio_get_private(folio);
610 offset = afs_folio_dirty_from(folio, priv);
611 to = afs_folio_dirty_to(folio, priv);
612 trace_afs_folio_dirty(vnode, tracepoint_string("store"), folio);
616 if (start < i_size) {
617 /* Trim the write to the EOF; the extra data is ignored. Also
618 * put an upper limit on the size of a single storedata op.
620 max_len = 65536 * 4096;
621 max_len = min_t(unsigned long long, max_len, end - start + 1);
622 max_len = min_t(unsigned long long, max_len, i_size - start);
625 (to == folio_size(folio) || new_content))
626 afs_extend_writeback(mapping, vnode, &count,
627 start, max_len, new_content,
629 len = min_t(loff_t, len, max_len);
632 /* We now have a contiguous set of dirty pages, each with writeback
633 * set; the first page is still locked at this point, but all the rest
634 * have been unlocked.
638 if (start < i_size) {
639 _debug("write back %x @%llx [%llx]", len, start, i_size);
641 /* Speculatively write to the cache. We have to fix this up
642 * later if the store fails.
644 afs_write_to_cache(vnode, start, len, i_size, caching);
646 iov_iter_xarray(&iter, ITER_SOURCE, &mapping->i_pages, start, len);
647 ret = afs_store_data(vnode, &iter, start, false);
649 _debug("write discard %x @%llx [%llx]", len, start, i_size);
651 /* The dirty region was entirely beyond the EOF. */
652 fscache_clear_page_bits(mapping, start, len, caching);
653 afs_pages_written_back(vnode, start, len);
659 wbc->nr_to_write = count;
664 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
673 afs_redirty_pages(wbc, mapping, start, len);
674 mapping_set_error(mapping, ret);
679 afs_redirty_pages(wbc, mapping, start, len);
680 mapping_set_error(mapping, -ENOSPC);
690 trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
691 afs_kill_pages(mapping, start, len);
692 mapping_set_error(mapping, ret);
696 _leave(" = %d", ret);
701 * write a region of pages back to the server
703 static int afs_writepages_region(struct address_space *mapping,
704 struct writeback_control *wbc,
705 loff_t start, loff_t end, loff_t *_next,
709 struct folio_batch fbatch;
714 _enter("%llx,%llx,", start, end);
715 folio_batch_init(&fbatch);
718 pgoff_t index = start / PAGE_SIZE;
720 n = filemap_get_folios_tag(mapping, &index, end / PAGE_SIZE,
721 PAGECACHE_TAG_DIRTY, &fbatch);
725 for (i = 0; i < n; i++) {
726 folio = fbatch.folios[i];
727 start = folio_pos(folio); /* May regress with THPs */
729 _debug("wback %lx", folio_index(folio));
731 /* At this point we hold neither the i_pages lock nor the
732 * page lock: the page may be truncated or invalidated
733 * (changing page->mapping to NULL), or even swizzled
734 * back from swapper_space to tmpfs file mapping
737 if (wbc->sync_mode != WB_SYNC_NONE) {
738 ret = folio_lock_killable(folio);
740 folio_batch_release(&fbatch);
744 if (!folio_trylock(folio))
748 if (folio->mapping != mapping ||
749 !folio_test_dirty(folio)) {
750 start += folio_size(folio);
755 if (folio_test_writeback(folio) ||
756 folio_test_fscache(folio)) {
758 if (wbc->sync_mode != WB_SYNC_NONE) {
759 folio_wait_writeback(folio);
760 #ifdef CONFIG_AFS_FSCACHE
761 folio_wait_fscache(folio);
766 start += folio_size(folio);
767 if (wbc->sync_mode == WB_SYNC_NONE) {
768 if (skips >= 5 || need_resched()) {
770 folio_batch_release(&fbatch);
771 _leave(" = 0 [%llx]", *_next);
779 if (!folio_clear_dirty_for_io(folio))
781 ret = afs_write_back_from_locked_folio(mapping, wbc,
784 _leave(" = %zd", ret);
785 folio_batch_release(&fbatch);
792 folio_batch_release(&fbatch);
794 } while (wbc->nr_to_write > 0);
797 _leave(" = 0 [%llx]", *_next);
802 * write some of the pending data back to the server
804 int afs_writepages(struct address_space *mapping,
805 struct writeback_control *wbc)
807 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
813 /* We have to be careful as we can end up racing with setattr()
814 * truncating the pagecache since the caller doesn't take a lock here
817 if (wbc->sync_mode == WB_SYNC_ALL)
818 down_read(&vnode->validate_lock);
819 else if (!down_read_trylock(&vnode->validate_lock))
822 if (wbc->range_cyclic) {
823 start = mapping->writeback_index * PAGE_SIZE;
824 ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX,
827 mapping->writeback_index = next / PAGE_SIZE;
828 if (start > 0 && wbc->nr_to_write > 0) {
829 ret = afs_writepages_region(mapping, wbc, 0,
830 start, &next, false);
832 mapping->writeback_index =
836 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
837 ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX,
839 if (wbc->nr_to_write > 0 && ret == 0)
840 mapping->writeback_index = next / PAGE_SIZE;
842 ret = afs_writepages_region(mapping, wbc,
843 wbc->range_start, wbc->range_end,
847 up_read(&vnode->validate_lock);
848 _leave(" = %d", ret);
853 * write to an AFS file
855 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
857 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
858 struct afs_file *af = iocb->ki_filp->private_data;
860 size_t count = iov_iter_count(from);
862 _enter("{%llx:%llu},{%zu},",
863 vnode->fid.vid, vnode->fid.vnode, count);
865 if (IS_SWAPFILE(&vnode->netfs.inode)) {
867 "AFS: Attempt to write to active swap file!\n");
874 result = afs_validate(vnode, af->key);
878 result = generic_file_write_iter(iocb, from);
880 _leave(" = %zd", result);
885 * flush any dirty pages for this process, and check for write errors.
886 * - the return status from this call provides a reliable indication of
887 * whether any write errors occurred for this process.
889 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
891 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
892 struct afs_file *af = file->private_data;
895 _enter("{%llx:%llu},{n=%pD},%d",
896 vnode->fid.vid, vnode->fid.vnode, file,
899 ret = afs_validate(vnode, af->key);
903 return file_write_and_wait_range(file, start, end);
907 * notification that a previously read-only page is about to become writable
908 * - if it returns an error, the caller will deliver a bus error signal
910 vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
912 struct folio *folio = page_folio(vmf->page);
913 struct file *file = vmf->vma->vm_file;
914 struct inode *inode = file_inode(file);
915 struct afs_vnode *vnode = AFS_FS_I(inode);
916 struct afs_file *af = file->private_data;
918 vm_fault_t ret = VM_FAULT_RETRY;
920 _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
922 afs_validate(vnode, af->key);
924 sb_start_pagefault(inode->i_sb);
926 /* Wait for the page to be written to the cache before we allow it to
927 * be modified. We then assume the entire page will need writing back.
929 #ifdef CONFIG_AFS_FSCACHE
930 if (folio_test_fscache(folio) &&
931 folio_wait_fscache_killable(folio) < 0)
935 if (folio_wait_writeback_killable(folio))
938 if (folio_lock_killable(folio) < 0)
941 /* We mustn't change folio->private until writeback is complete as that
942 * details the portion of the page we need to write back and we might
943 * need to redirty the page if there's a problem.
945 if (folio_wait_writeback_killable(folio) < 0) {
950 priv = afs_folio_dirty(folio, 0, folio_size(folio));
951 priv = afs_folio_dirty_mmapped(priv);
952 if (folio_test_private(folio)) {
953 folio_change_private(folio, (void *)priv);
954 trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite+"), folio);
956 folio_attach_private(folio, (void *)priv);
957 trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite"), folio);
959 file_update_time(file);
961 ret = VM_FAULT_LOCKED;
963 sb_end_pagefault(inode->i_sb);
968 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
970 void afs_prune_wb_keys(struct afs_vnode *vnode)
972 LIST_HEAD(graveyard);
973 struct afs_wb_key *wbk, *tmp;
975 /* Discard unused keys */
976 spin_lock(&vnode->wb_lock);
978 if (!mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
979 !mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_DIRTY)) {
980 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
981 if (refcount_read(&wbk->usage) == 1)
982 list_move(&wbk->vnode_link, &graveyard);
986 spin_unlock(&vnode->wb_lock);
988 while (!list_empty(&graveyard)) {
989 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
990 list_del(&wbk->vnode_link);
996 * Clean up a page during invalidation.
998 int afs_launder_folio(struct folio *folio)
1000 struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
1001 struct iov_iter iter;
1007 _enter("{%lx}", folio->index);
1009 priv = (unsigned long)folio_get_private(folio);
1010 if (folio_clear_dirty_for_io(folio)) {
1012 t = folio_size(folio);
1013 if (folio_test_private(folio)) {
1014 f = afs_folio_dirty_from(folio, priv);
1015 t = afs_folio_dirty_to(folio, priv);
1018 bvec_set_folio(&bv, folio, t - f, f);
1019 iov_iter_bvec(&iter, ITER_SOURCE, &bv, 1, bv.bv_len);
1021 trace_afs_folio_dirty(vnode, tracepoint_string("launder"), folio);
1022 ret = afs_store_data(vnode, &iter, folio_pos(folio) + f, true);
1025 trace_afs_folio_dirty(vnode, tracepoint_string("laundered"), folio);
1026 folio_detach_private(folio);
1027 folio_wait_fscache(folio);
1032 * Deal with the completion of writing the data to the cache.
1034 static void afs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
1037 struct afs_vnode *vnode = priv;
1039 if (IS_ERR_VALUE(transferred_or_error) &&
1040 transferred_or_error != -ENOBUFS)
1041 afs_invalidate_cache(vnode, 0);
1045 * Save the write to the cache also.
1047 static void afs_write_to_cache(struct afs_vnode *vnode,
1048 loff_t start, size_t len, loff_t i_size,
1051 fscache_write_to_cache(afs_vnode_cache(vnode),
1052 vnode->netfs.inode.i_mapping, start, len, i_size,
1053 afs_write_to_cache_done, vnode, caching);