2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/sched/signal.h>
16 #include <linux/module.h>
17 #include <linux/swap.h>
18 #include <linux/falloc.h>
19 #include <linux/uio.h>
21 #include <linux/file.h>
23 static int fuse_send_open(struct fuse_mount *fm, u64 nodeid,
24 unsigned int open_flags, int opcode,
25 struct fuse_open_out *outargp)
27 struct fuse_open_in inarg;
30 memset(&inarg, 0, sizeof(inarg));
31 inarg.flags = open_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
32 if (!fm->fc->atomic_o_trunc)
33 inarg.flags &= ~O_TRUNC;
35 if (fm->fc->handle_killpriv_v2 &&
36 (inarg.flags & O_TRUNC) && !capable(CAP_FSETID)) {
37 inarg.open_flags |= FUSE_OPEN_KILL_SUIDGID;
43 args.in_args[0].size = sizeof(inarg);
44 args.in_args[0].value = &inarg;
46 args.out_args[0].size = sizeof(*outargp);
47 args.out_args[0].value = outargp;
49 return fuse_simple_request(fm, &args);
52 struct fuse_release_args {
53 struct fuse_args args;
54 struct fuse_release_in inarg;
58 struct fuse_file *fuse_file_alloc(struct fuse_mount *fm)
62 ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL_ACCOUNT);
67 ff->release_args = kzalloc(sizeof(*ff->release_args),
69 if (!ff->release_args) {
74 INIT_LIST_HEAD(&ff->write_entry);
75 mutex_init(&ff->readdir.lock);
76 refcount_set(&ff->count, 1);
77 RB_CLEAR_NODE(&ff->polled_node);
78 init_waitqueue_head(&ff->poll_wait);
80 ff->kh = atomic64_inc_return(&fm->fc->khctr);
85 void fuse_file_free(struct fuse_file *ff)
87 kfree(ff->release_args);
88 mutex_destroy(&ff->readdir.lock);
92 static struct fuse_file *fuse_file_get(struct fuse_file *ff)
94 refcount_inc(&ff->count);
98 static void fuse_release_end(struct fuse_mount *fm, struct fuse_args *args,
101 struct fuse_release_args *ra = container_of(args, typeof(*ra), args);
107 static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir)
109 if (refcount_dec_and_test(&ff->count)) {
110 struct fuse_args *args = &ff->release_args->args;
112 if (isdir ? ff->fm->fc->no_opendir : ff->fm->fc->no_open) {
113 /* Do nothing when client does not implement 'open' */
114 fuse_release_end(ff->fm, args, 0);
116 fuse_simple_request(ff->fm, args);
117 fuse_release_end(ff->fm, args, 0);
119 args->end = fuse_release_end;
120 if (fuse_simple_background(ff->fm, args,
121 GFP_KERNEL | __GFP_NOFAIL))
122 fuse_release_end(ff->fm, args, -ENOTCONN);
128 struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid,
129 unsigned int open_flags, bool isdir)
131 struct fuse_conn *fc = fm->fc;
132 struct fuse_file *ff;
133 int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
135 ff = fuse_file_alloc(fm);
137 return ERR_PTR(-ENOMEM);
140 /* Default for no-open */
141 ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0);
142 if (isdir ? !fc->no_opendir : !fc->no_open) {
143 struct fuse_open_out outarg;
146 err = fuse_send_open(fm, nodeid, open_flags, opcode, &outarg);
149 ff->open_flags = outarg.open_flags;
151 } else if (err != -ENOSYS) {
163 ff->open_flags &= ~FOPEN_DIRECT_IO;
170 int fuse_do_open(struct fuse_mount *fm, u64 nodeid, struct file *file,
173 struct fuse_file *ff = fuse_file_open(fm, nodeid, file->f_flags, isdir);
176 file->private_data = ff;
178 return PTR_ERR_OR_ZERO(ff);
180 EXPORT_SYMBOL_GPL(fuse_do_open);
182 static void fuse_link_write_file(struct file *file)
184 struct inode *inode = file_inode(file);
185 struct fuse_inode *fi = get_fuse_inode(inode);
186 struct fuse_file *ff = file->private_data;
188 * file may be written through mmap, so chain it onto the
189 * inodes's write_file list
191 spin_lock(&fi->lock);
192 if (list_empty(&ff->write_entry))
193 list_add(&ff->write_entry, &fi->write_files);
194 spin_unlock(&fi->lock);
197 void fuse_finish_open(struct inode *inode, struct file *file)
199 struct fuse_file *ff = file->private_data;
200 struct fuse_conn *fc = get_fuse_conn(inode);
202 if (ff->open_flags & FOPEN_STREAM)
203 stream_open(inode, file);
204 else if (ff->open_flags & FOPEN_NONSEEKABLE)
205 nonseekable_open(inode, file);
207 if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
208 struct fuse_inode *fi = get_fuse_inode(inode);
210 spin_lock(&fi->lock);
211 fi->attr_version = atomic64_inc_return(&fc->attr_version);
212 i_size_write(inode, 0);
213 spin_unlock(&fi->lock);
214 file_update_time(file);
215 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
217 if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache)
218 fuse_link_write_file(file);
221 int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
223 struct fuse_mount *fm = get_fuse_mount(inode);
224 struct fuse_conn *fc = fm->fc;
226 bool is_wb_truncate = (file->f_flags & O_TRUNC) &&
227 fc->atomic_o_trunc &&
229 bool dax_truncate = (file->f_flags & O_TRUNC) &&
230 fc->atomic_o_trunc && FUSE_IS_DAX(inode);
232 if (fuse_is_bad(inode))
235 err = generic_file_open(inode, file);
239 if (is_wb_truncate || dax_truncate)
243 filemap_invalidate_lock(inode->i_mapping);
244 err = fuse_dax_break_layouts(inode, 0, 0);
246 goto out_inode_unlock;
249 if (is_wb_truncate || dax_truncate)
250 fuse_set_nowrite(inode);
252 err = fuse_do_open(fm, get_node_id(inode), file, isdir);
254 fuse_finish_open(inode, file);
256 if (is_wb_truncate || dax_truncate)
257 fuse_release_nowrite(inode);
259 struct fuse_file *ff = file->private_data;
261 if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC))
262 truncate_pagecache(inode, 0);
263 else if (!(ff->open_flags & FOPEN_KEEP_CACHE))
264 invalidate_inode_pages2(inode->i_mapping);
267 filemap_invalidate_unlock(inode->i_mapping);
269 if (is_wb_truncate || dax_truncate)
275 static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff,
276 unsigned int flags, int opcode)
278 struct fuse_conn *fc = ff->fm->fc;
279 struct fuse_release_args *ra = ff->release_args;
281 /* Inode is NULL on error path of fuse_create_open() */
283 spin_lock(&fi->lock);
284 list_del(&ff->write_entry);
285 spin_unlock(&fi->lock);
287 spin_lock(&fc->lock);
288 if (!RB_EMPTY_NODE(&ff->polled_node))
289 rb_erase(&ff->polled_node, &fc->polled_files);
290 spin_unlock(&fc->lock);
292 wake_up_interruptible_all(&ff->poll_wait);
294 ra->inarg.fh = ff->fh;
295 ra->inarg.flags = flags;
296 ra->args.in_numargs = 1;
297 ra->args.in_args[0].size = sizeof(struct fuse_release_in);
298 ra->args.in_args[0].value = &ra->inarg;
299 ra->args.opcode = opcode;
300 ra->args.nodeid = ff->nodeid;
301 ra->args.force = true;
302 ra->args.nocreds = true;
305 void fuse_file_release(struct inode *inode, struct fuse_file *ff,
306 unsigned int open_flags, fl_owner_t id, bool isdir)
308 struct fuse_inode *fi = get_fuse_inode(inode);
309 struct fuse_release_args *ra = ff->release_args;
310 int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
312 fuse_prepare_release(fi, ff, open_flags, opcode);
315 ra->inarg.release_flags |= FUSE_RELEASE_FLOCK_UNLOCK;
316 ra->inarg.lock_owner = fuse_lock_owner_id(ff->fm->fc, id);
318 /* Hold inode until release is finished */
319 ra->inode = igrab(inode);
322 * Normally this will send the RELEASE request, however if
323 * some asynchronous READ or WRITE requests are outstanding,
324 * the sending will be delayed.
326 * Make the release synchronous if this is a fuseblk mount,
327 * synchronous RELEASE is allowed (and desirable) in this case
328 * because the server can be trusted not to screw up.
330 fuse_file_put(ff, ff->fm->fc->destroy, isdir);
333 void fuse_release_common(struct file *file, bool isdir)
335 fuse_file_release(file_inode(file), file->private_data, file->f_flags,
336 (fl_owner_t) file, isdir);
339 static int fuse_open(struct inode *inode, struct file *file)
341 return fuse_open_common(inode, file, false);
344 static int fuse_release(struct inode *inode, struct file *file)
346 struct fuse_conn *fc = get_fuse_conn(inode);
349 * Dirty pages might remain despite write_inode_now() call from
350 * fuse_flush() due to writes racing with the close.
352 if (fc->writeback_cache)
353 write_inode_now(inode, 1);
355 fuse_release_common(file, false);
357 /* return value is ignored by VFS */
361 void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff,
364 WARN_ON(refcount_read(&ff->count) > 1);
365 fuse_prepare_release(fi, ff, flags, FUSE_RELEASE);
367 * iput(NULL) is a no-op and since the refcount is 1 and everything's
368 * synchronous, we are fine with not doing igrab() here"
370 fuse_file_put(ff, true, false);
372 EXPORT_SYMBOL_GPL(fuse_sync_release);
375 * Scramble the ID space with XTEA, so that the value of the files_struct
376 * pointer is not exposed to userspace.
378 u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
380 u32 *k = fc->scramble_key;
381 u64 v = (unsigned long) id;
387 for (i = 0; i < 32; i++) {
388 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
390 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
393 return (u64) v0 + ((u64) v1 << 32);
396 struct fuse_writepage_args {
397 struct fuse_io_args ia;
398 struct rb_node writepages_entry;
399 struct list_head queue_entry;
400 struct fuse_writepage_args *next;
402 struct fuse_sync_bucket *bucket;
405 static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi,
406 pgoff_t idx_from, pgoff_t idx_to)
410 n = fi->writepages.rb_node;
413 struct fuse_writepage_args *wpa;
416 wpa = rb_entry(n, struct fuse_writepage_args, writepages_entry);
417 WARN_ON(get_fuse_inode(wpa->inode) != fi);
418 curr_index = wpa->ia.write.in.offset >> PAGE_SHIFT;
419 if (idx_from >= curr_index + wpa->ia.ap.num_pages)
421 else if (idx_to < curr_index)
430 * Check if any page in a range is under writeback
432 * This is currently done by walking the list of writepage requests
433 * for the inode, which can be pretty inefficient.
435 static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
438 struct fuse_inode *fi = get_fuse_inode(inode);
441 spin_lock(&fi->lock);
442 found = fuse_find_writeback(fi, idx_from, idx_to);
443 spin_unlock(&fi->lock);
448 static inline bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
450 return fuse_range_is_writeback(inode, index, index);
454 * Wait for page writeback to be completed.
456 * Since fuse doesn't rely on the VM writeback tracking, this has to
457 * use some other means.
459 static void fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
461 struct fuse_inode *fi = get_fuse_inode(inode);
463 wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
467 * Wait for all pending writepages on the inode to finish.
469 * This is currently done by blocking further writes with FUSE_NOWRITE
470 * and waiting for all sent writes to complete.
472 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
473 * could conflict with truncation.
475 static void fuse_sync_writes(struct inode *inode)
477 fuse_set_nowrite(inode);
478 fuse_release_nowrite(inode);
481 struct fuse_flush_args {
482 struct fuse_args args;
483 struct fuse_flush_in inarg;
484 struct work_struct work;
488 static int fuse_do_flush(struct fuse_flush_args *fa)
491 struct inode *inode = file_inode(fa->file);
492 struct fuse_mount *fm = get_fuse_mount(inode);
494 err = write_inode_now(inode, 1);
499 fuse_sync_writes(inode);
502 err = filemap_check_errors(fa->file->f_mapping);
507 if (fm->fc->no_flush)
510 err = fuse_simple_request(fm, &fa->args);
511 if (err == -ENOSYS) {
512 fm->fc->no_flush = 1;
518 * In memory i_blocks is not maintained by fuse, if writeback cache is
519 * enabled, i_blocks from cached attr may not be accurate.
521 if (!err && fm->fc->writeback_cache)
522 fuse_invalidate_attr_mask(inode, STATX_BLOCKS);
530 static void fuse_flush_async(struct work_struct *work)
532 struct fuse_flush_args *fa = container_of(work, typeof(*fa), work);
537 static int fuse_flush(struct file *file, fl_owner_t id)
539 struct fuse_flush_args *fa;
540 struct inode *inode = file_inode(file);
541 struct fuse_mount *fm = get_fuse_mount(inode);
542 struct fuse_file *ff = file->private_data;
544 if (fuse_is_bad(inode))
547 if (ff->open_flags & FOPEN_NOFLUSH && !fm->fc->writeback_cache)
550 fa = kzalloc(sizeof(*fa), GFP_KERNEL);
554 fa->inarg.fh = ff->fh;
555 fa->inarg.lock_owner = fuse_lock_owner_id(fm->fc, id);
556 fa->args.opcode = FUSE_FLUSH;
557 fa->args.nodeid = get_node_id(inode);
558 fa->args.in_numargs = 1;
559 fa->args.in_args[0].size = sizeof(fa->inarg);
560 fa->args.in_args[0].value = &fa->inarg;
561 fa->args.force = true;
562 fa->file = get_file(file);
564 /* Don't wait if the task is exiting */
565 if (current->flags & PF_EXITING) {
566 INIT_WORK(&fa->work, fuse_flush_async);
567 schedule_work(&fa->work);
571 return fuse_do_flush(fa);
574 int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
575 int datasync, int opcode)
577 struct inode *inode = file->f_mapping->host;
578 struct fuse_mount *fm = get_fuse_mount(inode);
579 struct fuse_file *ff = file->private_data;
581 struct fuse_fsync_in inarg;
583 memset(&inarg, 0, sizeof(inarg));
585 inarg.fsync_flags = datasync ? FUSE_FSYNC_FDATASYNC : 0;
586 args.opcode = opcode;
587 args.nodeid = get_node_id(inode);
589 args.in_args[0].size = sizeof(inarg);
590 args.in_args[0].value = &inarg;
591 return fuse_simple_request(fm, &args);
594 static int fuse_fsync(struct file *file, loff_t start, loff_t end,
597 struct inode *inode = file->f_mapping->host;
598 struct fuse_conn *fc = get_fuse_conn(inode);
601 if (fuse_is_bad(inode))
607 * Start writeback against all dirty pages of the inode, then
608 * wait for all outstanding writes, before sending the FSYNC
611 err = file_write_and_wait_range(file, start, end);
615 fuse_sync_writes(inode);
618 * Due to implementation of fuse writeback
619 * file_write_and_wait_range() does not catch errors.
620 * We have to do this directly after fuse_sync_writes()
622 err = file_check_and_advance_wb_err(file);
626 err = sync_inode_metadata(inode, 1);
633 err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNC);
634 if (err == -ENOSYS) {
644 void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos,
645 size_t count, int opcode)
647 struct fuse_file *ff = file->private_data;
648 struct fuse_args *args = &ia->ap.args;
650 ia->read.in.fh = ff->fh;
651 ia->read.in.offset = pos;
652 ia->read.in.size = count;
653 ia->read.in.flags = file->f_flags;
654 args->opcode = opcode;
655 args->nodeid = ff->nodeid;
656 args->in_numargs = 1;
657 args->in_args[0].size = sizeof(ia->read.in);
658 args->in_args[0].value = &ia->read.in;
659 args->out_argvar = true;
660 args->out_numargs = 1;
661 args->out_args[0].size = count;
664 static void fuse_release_user_pages(struct fuse_args_pages *ap,
669 for (i = 0; i < ap->num_pages; i++) {
671 set_page_dirty_lock(ap->pages[i]);
672 put_page(ap->pages[i]);
676 static void fuse_io_release(struct kref *kref)
678 kfree(container_of(kref, struct fuse_io_priv, refcnt));
681 static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
686 if (io->bytes >= 0 && io->write)
689 return io->bytes < 0 ? io->size : io->bytes;
693 * In case of short read, the caller sets 'pos' to the position of
694 * actual end of fuse request in IO request. Otherwise, if bytes_requested
695 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
698 * User requested DIO read of 64K. It was split into two 32K fuse requests,
699 * both submitted asynchronously. The first of them was ACKed by userspace as
700 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
701 * second request was ACKed as short, e.g. only 1K was read, resulting in
704 * Thus, when all fuse requests are completed, the minimal non-negative 'pos'
705 * will be equal to the length of the longest contiguous fragment of
706 * transferred data starting from the beginning of IO request.
708 static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
712 spin_lock(&io->lock);
714 io->err = io->err ? : err;
715 else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes))
719 if (!left && io->blocking)
721 spin_unlock(&io->lock);
723 if (!left && !io->blocking) {
724 ssize_t res = fuse_get_res_by_io(io);
727 struct inode *inode = file_inode(io->iocb->ki_filp);
728 struct fuse_conn *fc = get_fuse_conn(inode);
729 struct fuse_inode *fi = get_fuse_inode(inode);
731 spin_lock(&fi->lock);
732 fi->attr_version = atomic64_inc_return(&fc->attr_version);
733 spin_unlock(&fi->lock);
736 io->iocb->ki_complete(io->iocb, res);
739 kref_put(&io->refcnt, fuse_io_release);
742 static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io,
745 struct fuse_io_args *ia;
747 ia = kzalloc(sizeof(*ia), GFP_KERNEL);
750 ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL,
760 static void fuse_io_free(struct fuse_io_args *ia)
766 static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args,
769 struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
770 struct fuse_io_priv *io = ia->io;
773 fuse_release_user_pages(&ia->ap, io->should_dirty);
777 } else if (io->write) {
778 if (ia->write.out.size > ia->write.in.size) {
780 } else if (ia->write.in.size != ia->write.out.size) {
781 pos = ia->write.in.offset - io->offset +
785 u32 outsize = args->out_args[0].size;
787 if (ia->read.in.size != outsize)
788 pos = ia->read.in.offset - io->offset + outsize;
791 fuse_aio_complete(io, err, pos);
795 static ssize_t fuse_async_req_send(struct fuse_mount *fm,
796 struct fuse_io_args *ia, size_t num_bytes)
799 struct fuse_io_priv *io = ia->io;
801 spin_lock(&io->lock);
802 kref_get(&io->refcnt);
803 io->size += num_bytes;
805 spin_unlock(&io->lock);
807 ia->ap.args.end = fuse_aio_complete_req;
808 ia->ap.args.may_block = io->should_dirty;
809 err = fuse_simple_background(fm, &ia->ap.args, GFP_KERNEL);
811 fuse_aio_complete_req(fm, &ia->ap.args, err);
816 static ssize_t fuse_send_read(struct fuse_io_args *ia, loff_t pos, size_t count,
819 struct file *file = ia->io->iocb->ki_filp;
820 struct fuse_file *ff = file->private_data;
821 struct fuse_mount *fm = ff->fm;
823 fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
825 ia->read.in.read_flags |= FUSE_READ_LOCKOWNER;
826 ia->read.in.lock_owner = fuse_lock_owner_id(fm->fc, owner);
830 return fuse_async_req_send(fm, ia, count);
832 return fuse_simple_request(fm, &ia->ap.args);
835 static void fuse_read_update_size(struct inode *inode, loff_t size,
838 struct fuse_conn *fc = get_fuse_conn(inode);
839 struct fuse_inode *fi = get_fuse_inode(inode);
841 spin_lock(&fi->lock);
842 if (attr_ver >= fi->attr_version && size < inode->i_size &&
843 !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
844 fi->attr_version = atomic64_inc_return(&fc->attr_version);
845 i_size_write(inode, size);
847 spin_unlock(&fi->lock);
850 static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read,
851 struct fuse_args_pages *ap)
853 struct fuse_conn *fc = get_fuse_conn(inode);
856 * If writeback_cache is enabled, a short read means there's a hole in
857 * the file. Some data after the hole is in page cache, but has not
858 * reached the client fs yet. So the hole is not present there.
860 if (!fc->writeback_cache) {
861 loff_t pos = page_offset(ap->pages[0]) + num_read;
862 fuse_read_update_size(inode, pos, attr_ver);
866 static int fuse_do_readpage(struct file *file, struct page *page)
868 struct inode *inode = page->mapping->host;
869 struct fuse_mount *fm = get_fuse_mount(inode);
870 loff_t pos = page_offset(page);
871 struct fuse_page_desc desc = { .length = PAGE_SIZE };
872 struct fuse_io_args ia = {
873 .ap.args.page_zeroing = true,
874 .ap.args.out_pages = true,
883 * Page writeback can extend beyond the lifetime of the
884 * page-cache page, so make sure we read a properly synced
887 fuse_wait_on_page_writeback(inode, page->index);
889 attr_ver = fuse_get_attr_version(fm->fc);
891 /* Don't overflow end offset */
892 if (pos + (desc.length - 1) == LLONG_MAX)
895 fuse_read_args_fill(&ia, file, pos, desc.length, FUSE_READ);
896 res = fuse_simple_request(fm, &ia.ap.args);
900 * Short read means EOF. If file size is larger, truncate it
902 if (res < desc.length)
903 fuse_short_read(inode, attr_ver, res, &ia.ap);
905 SetPageUptodate(page);
910 static int fuse_read_folio(struct file *file, struct folio *folio)
912 struct page *page = &folio->page;
913 struct inode *inode = page->mapping->host;
917 if (fuse_is_bad(inode))
920 err = fuse_do_readpage(file, page);
921 fuse_invalidate_atime(inode);
927 static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args,
931 struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
932 struct fuse_args_pages *ap = &ia->ap;
933 size_t count = ia->read.in.size;
934 size_t num_read = args->out_args[0].size;
935 struct address_space *mapping = NULL;
937 for (i = 0; mapping == NULL && i < ap->num_pages; i++)
938 mapping = ap->pages[i]->mapping;
941 struct inode *inode = mapping->host;
944 * Short read means EOF. If file size is larger, truncate it
946 if (!err && num_read < count)
947 fuse_short_read(inode, ia->read.attr_ver, num_read, ap);
949 fuse_invalidate_atime(inode);
952 for (i = 0; i < ap->num_pages; i++) {
953 struct page *page = ap->pages[i];
956 SetPageUptodate(page);
963 fuse_file_put(ia->ff, false, false);
968 static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
970 struct fuse_file *ff = file->private_data;
971 struct fuse_mount *fm = ff->fm;
972 struct fuse_args_pages *ap = &ia->ap;
973 loff_t pos = page_offset(ap->pages[0]);
974 size_t count = ap->num_pages << PAGE_SHIFT;
978 ap->args.out_pages = true;
979 ap->args.page_zeroing = true;
980 ap->args.page_replace = true;
982 /* Don't overflow end offset */
983 if (pos + (count - 1) == LLONG_MAX) {
985 ap->descs[ap->num_pages - 1].length--;
987 WARN_ON((loff_t) (pos + count) < 0);
989 fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
990 ia->read.attr_ver = fuse_get_attr_version(fm->fc);
991 if (fm->fc->async_read) {
992 ia->ff = fuse_file_get(ff);
993 ap->args.end = fuse_readpages_end;
994 err = fuse_simple_background(fm, &ap->args, GFP_KERNEL);
998 res = fuse_simple_request(fm, &ap->args);
999 err = res < 0 ? res : 0;
1001 fuse_readpages_end(fm, &ap->args, err);
1004 static void fuse_readahead(struct readahead_control *rac)
1006 struct inode *inode = rac->mapping->host;
1007 struct fuse_conn *fc = get_fuse_conn(inode);
1008 unsigned int i, max_pages, nr_pages = 0;
1010 if (fuse_is_bad(inode))
1013 max_pages = min_t(unsigned int, fc->max_pages,
1014 fc->max_read / PAGE_SIZE);
1017 struct fuse_io_args *ia;
1018 struct fuse_args_pages *ap;
1020 if (fc->num_background >= fc->congestion_threshold &&
1021 rac->ra->async_size >= readahead_count(rac))
1023 * Congested and only async pages left, so skip the
1028 nr_pages = readahead_count(rac) - nr_pages;
1029 if (nr_pages > max_pages)
1030 nr_pages = max_pages;
1033 ia = fuse_io_alloc(NULL, nr_pages);
1037 nr_pages = __readahead_batch(rac, ap->pages, nr_pages);
1038 for (i = 0; i < nr_pages; i++) {
1039 fuse_wait_on_page_writeback(inode,
1040 readahead_index(rac) + i);
1041 ap->descs[i].length = PAGE_SIZE;
1043 ap->num_pages = nr_pages;
1044 fuse_send_readpages(ia, rac->file);
1048 static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to)
1050 struct inode *inode = iocb->ki_filp->f_mapping->host;
1051 struct fuse_conn *fc = get_fuse_conn(inode);
1054 * In auto invalidate mode, always update attributes on read.
1055 * Otherwise, only update if we attempt to read past EOF (to ensure
1056 * i_size is up to date).
1058 if (fc->auto_inval_data ||
1059 (iocb->ki_pos + iov_iter_count(to) > i_size_read(inode))) {
1061 err = fuse_update_attributes(inode, iocb->ki_filp, STATX_SIZE);
1066 return generic_file_read_iter(iocb, to);
1069 static void fuse_write_args_fill(struct fuse_io_args *ia, struct fuse_file *ff,
1070 loff_t pos, size_t count)
1072 struct fuse_args *args = &ia->ap.args;
1074 ia->write.in.fh = ff->fh;
1075 ia->write.in.offset = pos;
1076 ia->write.in.size = count;
1077 args->opcode = FUSE_WRITE;
1078 args->nodeid = ff->nodeid;
1079 args->in_numargs = 2;
1080 if (ff->fm->fc->minor < 9)
1081 args->in_args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
1083 args->in_args[0].size = sizeof(ia->write.in);
1084 args->in_args[0].value = &ia->write.in;
1085 args->in_args[1].size = count;
1086 args->out_numargs = 1;
1087 args->out_args[0].size = sizeof(ia->write.out);
1088 args->out_args[0].value = &ia->write.out;
1091 static unsigned int fuse_write_flags(struct kiocb *iocb)
1093 unsigned int flags = iocb->ki_filp->f_flags;
1095 if (iocb_is_dsync(iocb))
1097 if (iocb->ki_flags & IOCB_SYNC)
1103 static ssize_t fuse_send_write(struct fuse_io_args *ia, loff_t pos,
1104 size_t count, fl_owner_t owner)
1106 struct kiocb *iocb = ia->io->iocb;
1107 struct file *file = iocb->ki_filp;
1108 struct fuse_file *ff = file->private_data;
1109 struct fuse_mount *fm = ff->fm;
1110 struct fuse_write_in *inarg = &ia->write.in;
1113 fuse_write_args_fill(ia, ff, pos, count);
1114 inarg->flags = fuse_write_flags(iocb);
1115 if (owner != NULL) {
1116 inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
1117 inarg->lock_owner = fuse_lock_owner_id(fm->fc, owner);
1121 return fuse_async_req_send(fm, ia, count);
1123 err = fuse_simple_request(fm, &ia->ap.args);
1124 if (!err && ia->write.out.size > count)
1127 return err ?: ia->write.out.size;
1130 bool fuse_write_update_attr(struct inode *inode, loff_t pos, ssize_t written)
1132 struct fuse_conn *fc = get_fuse_conn(inode);
1133 struct fuse_inode *fi = get_fuse_inode(inode);
1136 spin_lock(&fi->lock);
1137 fi->attr_version = atomic64_inc_return(&fc->attr_version);
1138 if (written > 0 && pos > inode->i_size) {
1139 i_size_write(inode, pos);
1142 spin_unlock(&fi->lock);
1144 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
1149 static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
1150 struct kiocb *iocb, struct inode *inode,
1151 loff_t pos, size_t count)
1153 struct fuse_args_pages *ap = &ia->ap;
1154 struct file *file = iocb->ki_filp;
1155 struct fuse_file *ff = file->private_data;
1156 struct fuse_mount *fm = ff->fm;
1157 unsigned int offset, i;
1161 for (i = 0; i < ap->num_pages; i++)
1162 fuse_wait_on_page_writeback(inode, ap->pages[i]->index);
1164 fuse_write_args_fill(ia, ff, pos, count);
1165 ia->write.in.flags = fuse_write_flags(iocb);
1166 if (fm->fc->handle_killpriv_v2 && !capable(CAP_FSETID))
1167 ia->write.in.write_flags |= FUSE_WRITE_KILL_SUIDGID;
1169 err = fuse_simple_request(fm, &ap->args);
1170 if (!err && ia->write.out.size > count)
1173 short_write = ia->write.out.size < count;
1174 offset = ap->descs[0].offset;
1175 count = ia->write.out.size;
1176 for (i = 0; i < ap->num_pages; i++) {
1177 struct page *page = ap->pages[i];
1180 ClearPageUptodate(page);
1182 if (count >= PAGE_SIZE - offset)
1183 count -= PAGE_SIZE - offset;
1186 ClearPageUptodate(page);
1191 if (ia->write.page_locked && (i == ap->num_pages - 1))
1199 static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
1200 struct address_space *mapping,
1201 struct iov_iter *ii, loff_t pos,
1202 unsigned int max_pages)
1204 struct fuse_args_pages *ap = &ia->ap;
1205 struct fuse_conn *fc = get_fuse_conn(mapping->host);
1206 unsigned offset = pos & (PAGE_SIZE - 1);
1210 ap->args.in_pages = true;
1211 ap->descs[0].offset = offset;
1216 pgoff_t index = pos >> PAGE_SHIFT;
1217 size_t bytes = min_t(size_t, PAGE_SIZE - offset,
1218 iov_iter_count(ii));
1220 bytes = min_t(size_t, bytes, fc->max_write - count);
1224 if (fault_in_iov_iter_readable(ii, bytes))
1228 page = grab_cache_page_write_begin(mapping, index);
1232 if (mapping_writably_mapped(mapping))
1233 flush_dcache_page(page);
1235 tmp = copy_page_from_iter_atomic(page, offset, bytes, ii);
1236 flush_dcache_page(page);
1245 ap->pages[ap->num_pages] = page;
1246 ap->descs[ap->num_pages].length = tmp;
1252 if (offset == PAGE_SIZE)
1255 /* If we copied full page, mark it uptodate */
1256 if (tmp == PAGE_SIZE)
1257 SetPageUptodate(page);
1259 if (PageUptodate(page)) {
1262 ia->write.page_locked = true;
1265 if (!fc->big_writes)
1267 } while (iov_iter_count(ii) && count < fc->max_write &&
1268 ap->num_pages < max_pages && offset == 0);
1270 return count > 0 ? count : err;
1273 static inline unsigned int fuse_wr_pages(loff_t pos, size_t len,
1274 unsigned int max_pages)
1276 return min_t(unsigned int,
1277 ((pos + len - 1) >> PAGE_SHIFT) -
1278 (pos >> PAGE_SHIFT) + 1,
1282 static ssize_t fuse_perform_write(struct kiocb *iocb,
1283 struct address_space *mapping,
1284 struct iov_iter *ii, loff_t pos)
1286 struct inode *inode = mapping->host;
1287 struct fuse_conn *fc = get_fuse_conn(inode);
1288 struct fuse_inode *fi = get_fuse_inode(inode);
1292 if (inode->i_size < pos + iov_iter_count(ii))
1293 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1297 struct fuse_io_args ia = {};
1298 struct fuse_args_pages *ap = &ia.ap;
1299 unsigned int nr_pages = fuse_wr_pages(pos, iov_iter_count(ii),
1302 ap->pages = fuse_pages_alloc(nr_pages, GFP_KERNEL, &ap->descs);
1308 count = fuse_fill_write_pages(&ia, mapping, ii, pos, nr_pages);
1312 err = fuse_send_write_pages(&ia, iocb, inode,
1315 size_t num_written = ia.write.out.size;
1320 /* break out of the loop on short write */
1321 if (num_written != count)
1326 } while (!err && iov_iter_count(ii));
1328 fuse_write_update_attr(inode, pos, res);
1329 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1331 return res > 0 ? res : err;
1334 static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
1336 struct file *file = iocb->ki_filp;
1337 struct address_space *mapping = file->f_mapping;
1338 ssize_t written = 0;
1339 ssize_t written_buffered = 0;
1340 struct inode *inode = mapping->host;
1342 struct fuse_conn *fc = get_fuse_conn(inode);
1345 if (fc->writeback_cache) {
1346 /* Update size (EOF optimization) and mode (SUID clearing) */
1347 err = fuse_update_attributes(mapping->host, file,
1348 STATX_SIZE | STATX_MODE);
1352 if (fc->handle_killpriv_v2 &&
1353 setattr_should_drop_suidgid(&init_user_ns, file_inode(file))) {
1357 return generic_file_write_iter(iocb, from);
1363 /* We can write back this queue in page reclaim */
1364 current->backing_dev_info = inode_to_bdi(inode);
1366 err = generic_write_checks(iocb, from);
1370 err = file_remove_privs(file);
1374 err = file_update_time(file);
1378 if (iocb->ki_flags & IOCB_DIRECT) {
1379 loff_t pos = iocb->ki_pos;
1380 written = generic_file_direct_write(iocb, from);
1381 if (written < 0 || !iov_iter_count(from))
1386 written_buffered = fuse_perform_write(iocb, mapping, from, pos);
1387 if (written_buffered < 0) {
1388 err = written_buffered;
1391 endbyte = pos + written_buffered - 1;
1393 err = filemap_write_and_wait_range(file->f_mapping, pos,
1398 invalidate_mapping_pages(file->f_mapping,
1400 endbyte >> PAGE_SHIFT);
1402 written += written_buffered;
1403 iocb->ki_pos = pos + written_buffered;
1405 written = fuse_perform_write(iocb, mapping, from, iocb->ki_pos);
1407 iocb->ki_pos += written;
1410 current->backing_dev_info = NULL;
1411 inode_unlock(inode);
1413 written = generic_write_sync(iocb, written);
1415 return written ? written : err;
1418 static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
1420 return (unsigned long)ii->iov->iov_base + ii->iov_offset;
1423 static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
1426 return min(iov_iter_single_seg_count(ii), max_size);
1429 static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
1430 size_t *nbytesp, int write,
1431 unsigned int max_pages)
1433 size_t nbytes = 0; /* # bytes already packed in req */
1436 /* Special case for kernel I/O: can copy directly into the buffer */
1437 if (iov_iter_is_kvec(ii)) {
1438 unsigned long user_addr = fuse_get_user_addr(ii);
1439 size_t frag_size = fuse_get_frag_size(ii, *nbytesp);
1442 ap->args.in_args[1].value = (void *) user_addr;
1444 ap->args.out_args[0].value = (void *) user_addr;
1446 iov_iter_advance(ii, frag_size);
1447 *nbytesp = frag_size;
1451 while (nbytes < *nbytesp && ap->num_pages < max_pages) {
1454 ret = iov_iter_get_pages2(ii, &ap->pages[ap->num_pages],
1456 max_pages - ap->num_pages,
1464 npages = DIV_ROUND_UP(ret, PAGE_SIZE);
1466 ap->descs[ap->num_pages].offset = start;
1467 fuse_page_descs_length_init(ap->descs, ap->num_pages, npages);
1469 ap->num_pages += npages;
1470 ap->descs[ap->num_pages - 1].length -=
1471 (PAGE_SIZE - ret) & (PAGE_SIZE - 1);
1474 ap->args.user_pages = true;
1476 ap->args.in_pages = true;
1478 ap->args.out_pages = true;
1482 return ret < 0 ? ret : 0;
1485 ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
1486 loff_t *ppos, int flags)
1488 int write = flags & FUSE_DIO_WRITE;
1489 int cuse = flags & FUSE_DIO_CUSE;
1490 struct file *file = io->iocb->ki_filp;
1491 struct inode *inode = file->f_mapping->host;
1492 struct fuse_file *ff = file->private_data;
1493 struct fuse_conn *fc = ff->fm->fc;
1494 size_t nmax = write ? fc->max_write : fc->max_read;
1496 size_t count = iov_iter_count(iter);
1497 pgoff_t idx_from = pos >> PAGE_SHIFT;
1498 pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT;
1501 struct fuse_io_args *ia;
1502 unsigned int max_pages;
1504 max_pages = iov_iter_npages(iter, fc->max_pages);
1505 ia = fuse_io_alloc(io, max_pages);
1509 if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) {
1512 fuse_sync_writes(inode);
1514 inode_unlock(inode);
1517 io->should_dirty = !write && user_backed_iter(iter);
1520 fl_owner_t owner = current->files;
1521 size_t nbytes = min(count, nmax);
1523 err = fuse_get_user_pages(&ia->ap, iter, &nbytes, write,
1529 if (!capable(CAP_FSETID))
1530 ia->write.in.write_flags |= FUSE_WRITE_KILL_SUIDGID;
1532 nres = fuse_send_write(ia, pos, nbytes, owner);
1534 nres = fuse_send_read(ia, pos, nbytes, owner);
1537 if (!io->async || nres < 0) {
1538 fuse_release_user_pages(&ia->ap, io->should_dirty);
1543 iov_iter_revert(iter, nbytes);
1547 WARN_ON(nres > nbytes);
1552 if (nres != nbytes) {
1553 iov_iter_revert(iter, nbytes - nres);
1557 max_pages = iov_iter_npages(iter, fc->max_pages);
1558 ia = fuse_io_alloc(io, max_pages);
1568 return res > 0 ? res : err;
1570 EXPORT_SYMBOL_GPL(fuse_direct_io);
1572 static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
1573 struct iov_iter *iter,
1577 struct inode *inode = file_inode(io->iocb->ki_filp);
1579 res = fuse_direct_io(io, iter, ppos, 0);
1581 fuse_invalidate_atime(inode);
1586 static ssize_t fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
1588 static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
1592 if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
1593 res = fuse_direct_IO(iocb, to);
1595 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
1597 res = __fuse_direct_read(&io, to, &iocb->ki_pos);
1603 static bool fuse_direct_write_extending_i_size(struct kiocb *iocb,
1604 struct iov_iter *iter)
1606 struct inode *inode = file_inode(iocb->ki_filp);
1608 return iocb->ki_pos + iov_iter_count(iter) > i_size_read(inode);
1611 static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
1613 struct inode *inode = file_inode(iocb->ki_filp);
1614 struct file *file = iocb->ki_filp;
1615 struct fuse_file *ff = file->private_data;
1616 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
1618 bool exclusive_lock =
1619 !(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES) ||
1620 iocb->ki_flags & IOCB_APPEND ||
1621 fuse_direct_write_extending_i_size(iocb, from);
1624 * Take exclusive lock if
1625 * - Parallel direct writes are disabled - a user space decision
1626 * - Parallel direct writes are enabled and i_size is being extended.
1627 * This might not be needed at all, but needs further investigation.
1632 inode_lock_shared(inode);
1634 /* A race with truncate might have come up as the decision for
1635 * the lock type was done without holding the lock, check again.
1637 if (fuse_direct_write_extending_i_size(iocb, from)) {
1638 inode_unlock_shared(inode);
1640 exclusive_lock = true;
1644 res = generic_write_checks(iocb, from);
1646 if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
1647 res = fuse_direct_IO(iocb, from);
1649 res = fuse_direct_io(&io, from, &iocb->ki_pos,
1651 fuse_write_update_attr(inode, iocb->ki_pos, res);
1655 inode_unlock(inode);
1657 inode_unlock_shared(inode);
1662 static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1664 struct file *file = iocb->ki_filp;
1665 struct fuse_file *ff = file->private_data;
1666 struct inode *inode = file_inode(file);
1668 if (fuse_is_bad(inode))
1671 if (FUSE_IS_DAX(inode))
1672 return fuse_dax_read_iter(iocb, to);
1674 if (!(ff->open_flags & FOPEN_DIRECT_IO))
1675 return fuse_cache_read_iter(iocb, to);
1677 return fuse_direct_read_iter(iocb, to);
1680 static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1682 struct file *file = iocb->ki_filp;
1683 struct fuse_file *ff = file->private_data;
1684 struct inode *inode = file_inode(file);
1686 if (fuse_is_bad(inode))
1689 if (FUSE_IS_DAX(inode))
1690 return fuse_dax_write_iter(iocb, from);
1692 if (!(ff->open_flags & FOPEN_DIRECT_IO))
1693 return fuse_cache_write_iter(iocb, from);
1695 return fuse_direct_write_iter(iocb, from);
1698 static void fuse_writepage_free(struct fuse_writepage_args *wpa)
1700 struct fuse_args_pages *ap = &wpa->ia.ap;
1704 fuse_sync_bucket_dec(wpa->bucket);
1706 for (i = 0; i < ap->num_pages; i++)
1707 __free_page(ap->pages[i]);
1710 fuse_file_put(wpa->ia.ff, false, false);
1716 static void fuse_writepage_finish(struct fuse_mount *fm,
1717 struct fuse_writepage_args *wpa)
1719 struct fuse_args_pages *ap = &wpa->ia.ap;
1720 struct inode *inode = wpa->inode;
1721 struct fuse_inode *fi = get_fuse_inode(inode);
1722 struct backing_dev_info *bdi = inode_to_bdi(inode);
1725 for (i = 0; i < ap->num_pages; i++) {
1726 dec_wb_stat(&bdi->wb, WB_WRITEBACK);
1727 dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP);
1728 wb_writeout_inc(&bdi->wb);
1730 wake_up(&fi->page_waitq);
1733 /* Called under fi->lock, may release and reacquire it */
1734 static void fuse_send_writepage(struct fuse_mount *fm,
1735 struct fuse_writepage_args *wpa, loff_t size)
1736 __releases(fi->lock)
1737 __acquires(fi->lock)
1739 struct fuse_writepage_args *aux, *next;
1740 struct fuse_inode *fi = get_fuse_inode(wpa->inode);
1741 struct fuse_write_in *inarg = &wpa->ia.write.in;
1742 struct fuse_args *args = &wpa->ia.ap.args;
1743 __u64 data_size = wpa->ia.ap.num_pages * PAGE_SIZE;
1747 if (inarg->offset + data_size <= size) {
1748 inarg->size = data_size;
1749 } else if (inarg->offset < size) {
1750 inarg->size = size - inarg->offset;
1752 /* Got truncated off completely */
1756 args->in_args[1].size = inarg->size;
1758 args->nocreds = true;
1760 err = fuse_simple_background(fm, args, GFP_ATOMIC);
1761 if (err == -ENOMEM) {
1762 spin_unlock(&fi->lock);
1763 err = fuse_simple_background(fm, args, GFP_NOFS | __GFP_NOFAIL);
1764 spin_lock(&fi->lock);
1767 /* Fails on broken connection only */
1775 rb_erase(&wpa->writepages_entry, &fi->writepages);
1776 fuse_writepage_finish(fm, wpa);
1777 spin_unlock(&fi->lock);
1779 /* After fuse_writepage_finish() aux request list is private */
1780 for (aux = wpa->next; aux; aux = next) {
1783 fuse_writepage_free(aux);
1786 fuse_writepage_free(wpa);
1787 spin_lock(&fi->lock);
1791 * If fi->writectr is positive (no truncate or fsync going on) send
1792 * all queued writepage requests.
1794 * Called with fi->lock
1796 void fuse_flush_writepages(struct inode *inode)
1797 __releases(fi->lock)
1798 __acquires(fi->lock)
1800 struct fuse_mount *fm = get_fuse_mount(inode);
1801 struct fuse_inode *fi = get_fuse_inode(inode);
1802 loff_t crop = i_size_read(inode);
1803 struct fuse_writepage_args *wpa;
1805 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
1806 wpa = list_entry(fi->queued_writes.next,
1807 struct fuse_writepage_args, queue_entry);
1808 list_del_init(&wpa->queue_entry);
1809 fuse_send_writepage(fm, wpa, crop);
1813 static struct fuse_writepage_args *fuse_insert_writeback(struct rb_root *root,
1814 struct fuse_writepage_args *wpa)
1816 pgoff_t idx_from = wpa->ia.write.in.offset >> PAGE_SHIFT;
1817 pgoff_t idx_to = idx_from + wpa->ia.ap.num_pages - 1;
1818 struct rb_node **p = &root->rb_node;
1819 struct rb_node *parent = NULL;
1821 WARN_ON(!wpa->ia.ap.num_pages);
1823 struct fuse_writepage_args *curr;
1827 curr = rb_entry(parent, struct fuse_writepage_args,
1829 WARN_ON(curr->inode != wpa->inode);
1830 curr_index = curr->ia.write.in.offset >> PAGE_SHIFT;
1832 if (idx_from >= curr_index + curr->ia.ap.num_pages)
1833 p = &(*p)->rb_right;
1834 else if (idx_to < curr_index)
1840 rb_link_node(&wpa->writepages_entry, parent, p);
1841 rb_insert_color(&wpa->writepages_entry, root);
1845 static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa)
1847 WARN_ON(fuse_insert_writeback(root, wpa));
1850 static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
1853 struct fuse_writepage_args *wpa =
1854 container_of(args, typeof(*wpa), ia.ap.args);
1855 struct inode *inode = wpa->inode;
1856 struct fuse_inode *fi = get_fuse_inode(inode);
1857 struct fuse_conn *fc = get_fuse_conn(inode);
1859 mapping_set_error(inode->i_mapping, error);
1861 * A writeback finished and this might have updated mtime/ctime on
1862 * server making local mtime/ctime stale. Hence invalidate attrs.
1863 * Do this only if writeback_cache is not enabled. If writeback_cache
1864 * is enabled, we trust local ctime/mtime.
1866 if (!fc->writeback_cache)
1867 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODIFY);
1868 spin_lock(&fi->lock);
1869 rb_erase(&wpa->writepages_entry, &fi->writepages);
1871 struct fuse_mount *fm = get_fuse_mount(inode);
1872 struct fuse_write_in *inarg = &wpa->ia.write.in;
1873 struct fuse_writepage_args *next = wpa->next;
1875 wpa->next = next->next;
1877 next->ia.ff = fuse_file_get(wpa->ia.ff);
1878 tree_insert(&fi->writepages, next);
1881 * Skip fuse_flush_writepages() to make it easy to crop requests
1882 * based on primary request size.
1884 * 1st case (trivial): there are no concurrent activities using
1885 * fuse_set/release_nowrite. Then we're on safe side because
1886 * fuse_flush_writepages() would call fuse_send_writepage()
1889 * 2nd case: someone called fuse_set_nowrite and it is waiting
1890 * now for completion of all in-flight requests. This happens
1891 * rarely and no more than once per page, so this should be
1894 * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle
1895 * of fuse_set_nowrite..fuse_release_nowrite section. The fact
1896 * that fuse_set_nowrite returned implies that all in-flight
1897 * requests were completed along with all of their secondary
1898 * requests. Further primary requests are blocked by negative
1899 * writectr. Hence there cannot be any in-flight requests and
1900 * no invocations of fuse_writepage_end() while we're in
1901 * fuse_set_nowrite..fuse_release_nowrite section.
1903 fuse_send_writepage(fm, next, inarg->offset + inarg->size);
1906 fuse_writepage_finish(fm, wpa);
1907 spin_unlock(&fi->lock);
1908 fuse_writepage_free(wpa);
1911 static struct fuse_file *__fuse_write_file_get(struct fuse_inode *fi)
1913 struct fuse_file *ff;
1915 spin_lock(&fi->lock);
1916 ff = list_first_entry_or_null(&fi->write_files, struct fuse_file,
1920 spin_unlock(&fi->lock);
1925 static struct fuse_file *fuse_write_file_get(struct fuse_inode *fi)
1927 struct fuse_file *ff = __fuse_write_file_get(fi);
1932 int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
1934 struct fuse_inode *fi = get_fuse_inode(inode);
1935 struct fuse_file *ff;
1939 * Inode is always written before the last reference is dropped and
1940 * hence this should not be reached from reclaim.
1942 * Writing back the inode from reclaim can deadlock if the request
1943 * processing itself needs an allocation. Allocations triggering
1944 * reclaim while serving a request can't be prevented, because it can
1945 * involve any number of unrelated userspace processes.
1947 WARN_ON(wbc->for_reclaim);
1949 ff = __fuse_write_file_get(fi);
1950 err = fuse_flush_times(inode, ff);
1952 fuse_file_put(ff, false, false);
1957 static struct fuse_writepage_args *fuse_writepage_args_alloc(void)
1959 struct fuse_writepage_args *wpa;
1960 struct fuse_args_pages *ap;
1962 wpa = kzalloc(sizeof(*wpa), GFP_NOFS);
1966 ap->pages = fuse_pages_alloc(1, GFP_NOFS, &ap->descs);
1976 static void fuse_writepage_add_to_bucket(struct fuse_conn *fc,
1977 struct fuse_writepage_args *wpa)
1983 /* Prevent resurrection of dead bucket in unlikely race with syncfs */
1985 wpa->bucket = rcu_dereference(fc->curr_bucket);
1986 } while (unlikely(!atomic_inc_not_zero(&wpa->bucket->count)));
1990 static int fuse_writepage_locked(struct page *page)
1992 struct address_space *mapping = page->mapping;
1993 struct inode *inode = mapping->host;
1994 struct fuse_conn *fc = get_fuse_conn(inode);
1995 struct fuse_inode *fi = get_fuse_inode(inode);
1996 struct fuse_writepage_args *wpa;
1997 struct fuse_args_pages *ap;
1998 struct page *tmp_page;
1999 int error = -ENOMEM;
2001 set_page_writeback(page);
2003 wpa = fuse_writepage_args_alloc();
2008 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2013 wpa->ia.ff = fuse_write_file_get(fi);
2017 fuse_writepage_add_to_bucket(fc, wpa);
2018 fuse_write_args_fill(&wpa->ia, wpa->ia.ff, page_offset(page), 0);
2020 copy_highpage(tmp_page, page);
2021 wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
2023 ap->args.in_pages = true;
2025 ap->pages[0] = tmp_page;
2026 ap->descs[0].offset = 0;
2027 ap->descs[0].length = PAGE_SIZE;
2028 ap->args.end = fuse_writepage_end;
2031 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
2032 inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
2034 spin_lock(&fi->lock);
2035 tree_insert(&fi->writepages, wpa);
2036 list_add_tail(&wpa->queue_entry, &fi->queued_writes);
2037 fuse_flush_writepages(inode);
2038 spin_unlock(&fi->lock);
2040 end_page_writeback(page);
2045 __free_page(tmp_page);
2049 mapping_set_error(page->mapping, error);
2050 end_page_writeback(page);
2054 static int fuse_writepage(struct page *page, struct writeback_control *wbc)
2056 struct fuse_conn *fc = get_fuse_conn(page->mapping->host);
2059 if (fuse_page_is_writeback(page->mapping->host, page->index)) {
2061 * ->writepages() should be called for sync() and friends. We
2062 * should only get here on direct reclaim and then we are
2063 * allowed to skip a page which is already in flight
2065 WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
2067 redirty_page_for_writepage(wbc, page);
2072 if (wbc->sync_mode == WB_SYNC_NONE &&
2073 fc->num_background >= fc->congestion_threshold)
2074 return AOP_WRITEPAGE_ACTIVATE;
2076 err = fuse_writepage_locked(page);
2082 struct fuse_fill_wb_data {
2083 struct fuse_writepage_args *wpa;
2084 struct fuse_file *ff;
2085 struct inode *inode;
2086 struct page **orig_pages;
2087 unsigned int max_pages;
2090 static bool fuse_pages_realloc(struct fuse_fill_wb_data *data)
2092 struct fuse_args_pages *ap = &data->wpa->ia.ap;
2093 struct fuse_conn *fc = get_fuse_conn(data->inode);
2094 struct page **pages;
2095 struct fuse_page_desc *descs;
2096 unsigned int npages = min_t(unsigned int,
2097 max_t(unsigned int, data->max_pages * 2,
2098 FUSE_DEFAULT_MAX_PAGES_PER_REQ),
2100 WARN_ON(npages <= data->max_pages);
2102 pages = fuse_pages_alloc(npages, GFP_NOFS, &descs);
2106 memcpy(pages, ap->pages, sizeof(struct page *) * ap->num_pages);
2107 memcpy(descs, ap->descs, sizeof(struct fuse_page_desc) * ap->num_pages);
2111 data->max_pages = npages;
2116 static void fuse_writepages_send(struct fuse_fill_wb_data *data)
2118 struct fuse_writepage_args *wpa = data->wpa;
2119 struct inode *inode = data->inode;
2120 struct fuse_inode *fi = get_fuse_inode(inode);
2121 int num_pages = wpa->ia.ap.num_pages;
2124 wpa->ia.ff = fuse_file_get(data->ff);
2125 spin_lock(&fi->lock);
2126 list_add_tail(&wpa->queue_entry, &fi->queued_writes);
2127 fuse_flush_writepages(inode);
2128 spin_unlock(&fi->lock);
2130 for (i = 0; i < num_pages; i++)
2131 end_page_writeback(data->orig_pages[i]);
2135 * Check under fi->lock if the page is under writeback, and insert it onto the
2136 * rb_tree if not. Otherwise iterate auxiliary write requests, to see if there's
2137 * one already added for a page at this offset. If there's none, then insert
2138 * this new request onto the auxiliary list, otherwise reuse the existing one by
2139 * swapping the new temp page with the old one.
2141 static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa,
2144 struct fuse_inode *fi = get_fuse_inode(new_wpa->inode);
2145 struct fuse_writepage_args *tmp;
2146 struct fuse_writepage_args *old_wpa;
2147 struct fuse_args_pages *new_ap = &new_wpa->ia.ap;
2149 WARN_ON(new_ap->num_pages != 0);
2150 new_ap->num_pages = 1;
2152 spin_lock(&fi->lock);
2153 old_wpa = fuse_insert_writeback(&fi->writepages, new_wpa);
2155 spin_unlock(&fi->lock);
2159 for (tmp = old_wpa->next; tmp; tmp = tmp->next) {
2162 WARN_ON(tmp->inode != new_wpa->inode);
2163 curr_index = tmp->ia.write.in.offset >> PAGE_SHIFT;
2164 if (curr_index == page->index) {
2165 WARN_ON(tmp->ia.ap.num_pages != 1);
2166 swap(tmp->ia.ap.pages[0], new_ap->pages[0]);
2172 new_wpa->next = old_wpa->next;
2173 old_wpa->next = new_wpa;
2176 spin_unlock(&fi->lock);
2179 struct backing_dev_info *bdi = inode_to_bdi(new_wpa->inode);
2181 dec_wb_stat(&bdi->wb, WB_WRITEBACK);
2182 dec_node_page_state(new_ap->pages[0], NR_WRITEBACK_TEMP);
2183 wb_writeout_inc(&bdi->wb);
2184 fuse_writepage_free(new_wpa);
2190 static bool fuse_writepage_need_send(struct fuse_conn *fc, struct page *page,
2191 struct fuse_args_pages *ap,
2192 struct fuse_fill_wb_data *data)
2194 WARN_ON(!ap->num_pages);
2197 * Being under writeback is unlikely but possible. For example direct
2198 * read to an mmaped fuse file will set the page dirty twice; once when
2199 * the pages are faulted with get_user_pages(), and then after the read
2202 if (fuse_page_is_writeback(data->inode, page->index))
2205 /* Reached max pages */
2206 if (ap->num_pages == fc->max_pages)
2209 /* Reached max write bytes */
2210 if ((ap->num_pages + 1) * PAGE_SIZE > fc->max_write)
2214 if (data->orig_pages[ap->num_pages - 1]->index + 1 != page->index)
2217 /* Need to grow the pages array? If so, did the expansion fail? */
2218 if (ap->num_pages == data->max_pages && !fuse_pages_realloc(data))
2224 static int fuse_writepages_fill(struct page *page,
2225 struct writeback_control *wbc, void *_data)
2227 struct fuse_fill_wb_data *data = _data;
2228 struct fuse_writepage_args *wpa = data->wpa;
2229 struct fuse_args_pages *ap = &wpa->ia.ap;
2230 struct inode *inode = data->inode;
2231 struct fuse_inode *fi = get_fuse_inode(inode);
2232 struct fuse_conn *fc = get_fuse_conn(inode);
2233 struct page *tmp_page;
2238 data->ff = fuse_write_file_get(fi);
2243 if (wpa && fuse_writepage_need_send(fc, page, ap, data)) {
2244 fuse_writepages_send(data);
2249 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2254 * The page must not be redirtied until the writeout is completed
2255 * (i.e. userspace has sent a reply to the write request). Otherwise
2256 * there could be more than one temporary page instance for each real
2259 * This is ensured by holding the page lock in page_mkwrite() while
2260 * checking fuse_page_is_writeback(). We already hold the page lock
2261 * since clear_page_dirty_for_io() and keep it held until we add the
2262 * request to the fi->writepages list and increment ap->num_pages.
2263 * After this fuse_page_is_writeback() will indicate that the page is
2264 * under writeback, so we can release the page lock.
2266 if (data->wpa == NULL) {
2268 wpa = fuse_writepage_args_alloc();
2270 __free_page(tmp_page);
2273 fuse_writepage_add_to_bucket(fc, wpa);
2275 data->max_pages = 1;
2278 fuse_write_args_fill(&wpa->ia, data->ff, page_offset(page), 0);
2279 wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
2281 ap->args.in_pages = true;
2282 ap->args.end = fuse_writepage_end;
2286 set_page_writeback(page);
2288 copy_highpage(tmp_page, page);
2289 ap->pages[ap->num_pages] = tmp_page;
2290 ap->descs[ap->num_pages].offset = 0;
2291 ap->descs[ap->num_pages].length = PAGE_SIZE;
2292 data->orig_pages[ap->num_pages] = page;
2294 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
2295 inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
2300 * Protected by fi->lock against concurrent access by
2301 * fuse_page_is_writeback().
2303 spin_lock(&fi->lock);
2305 spin_unlock(&fi->lock);
2306 } else if (fuse_writepage_add(wpa, page)) {
2309 end_page_writeback(page);
2317 static int fuse_writepages(struct address_space *mapping,
2318 struct writeback_control *wbc)
2320 struct inode *inode = mapping->host;
2321 struct fuse_conn *fc = get_fuse_conn(inode);
2322 struct fuse_fill_wb_data data;
2326 if (fuse_is_bad(inode))
2329 if (wbc->sync_mode == WB_SYNC_NONE &&
2330 fc->num_background >= fc->congestion_threshold)
2338 data.orig_pages = kcalloc(fc->max_pages,
2339 sizeof(struct page *),
2341 if (!data.orig_pages)
2344 err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data);
2346 WARN_ON(!data.wpa->ia.ap.num_pages);
2347 fuse_writepages_send(&data);
2350 fuse_file_put(data.ff, false, false);
2352 kfree(data.orig_pages);
2358 * It's worthy to make sure that space is reserved on disk for the write,
2359 * but how to implement it without killing performance need more thinking.
2361 static int fuse_write_begin(struct file *file, struct address_space *mapping,
2362 loff_t pos, unsigned len, struct page **pagep, void **fsdata)
2364 pgoff_t index = pos >> PAGE_SHIFT;
2365 struct fuse_conn *fc = get_fuse_conn(file_inode(file));
2370 WARN_ON(!fc->writeback_cache);
2372 page = grab_cache_page_write_begin(mapping, index);
2376 fuse_wait_on_page_writeback(mapping->host, page->index);
2378 if (PageUptodate(page) || len == PAGE_SIZE)
2381 * Check if the start this page comes after the end of file, in which
2382 * case the readpage can be optimized away.
2384 fsize = i_size_read(mapping->host);
2385 if (fsize <= (pos & PAGE_MASK)) {
2386 size_t off = pos & ~PAGE_MASK;
2388 zero_user_segment(page, 0, off);
2391 err = fuse_do_readpage(file, page);
2405 static int fuse_write_end(struct file *file, struct address_space *mapping,
2406 loff_t pos, unsigned len, unsigned copied,
2407 struct page *page, void *fsdata)
2409 struct inode *inode = page->mapping->host;
2411 /* Haven't copied anything? Skip zeroing, size extending, dirtying. */
2416 if (!PageUptodate(page)) {
2417 /* Zero any unwritten bytes at the end of the page */
2418 size_t endoff = pos & ~PAGE_MASK;
2420 zero_user_segment(page, endoff, PAGE_SIZE);
2421 SetPageUptodate(page);
2424 if (pos > inode->i_size)
2425 i_size_write(inode, pos);
2427 set_page_dirty(page);
2436 static int fuse_launder_folio(struct folio *folio)
2439 if (folio_clear_dirty_for_io(folio)) {
2440 struct inode *inode = folio->mapping->host;
2442 /* Serialize with pending writeback for the same page */
2443 fuse_wait_on_page_writeback(inode, folio->index);
2444 err = fuse_writepage_locked(&folio->page);
2446 fuse_wait_on_page_writeback(inode, folio->index);
2452 * Write back dirty data/metadata now (there may not be any suitable
2453 * open files later for data)
2455 static void fuse_vma_close(struct vm_area_struct *vma)
2459 err = write_inode_now(vma->vm_file->f_mapping->host, 1);
2460 mapping_set_error(vma->vm_file->f_mapping, err);
2464 * Wait for writeback against this page to complete before allowing it
2465 * to be marked dirty again, and hence written back again, possibly
2466 * before the previous writepage completed.
2468 * Block here, instead of in ->writepage(), so that the userspace fs
2469 * can only block processes actually operating on the filesystem.
2471 * Otherwise unprivileged userspace fs would be able to block
2476 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
2478 static vm_fault_t fuse_page_mkwrite(struct vm_fault *vmf)
2480 struct page *page = vmf->page;
2481 struct inode *inode = file_inode(vmf->vma->vm_file);
2483 file_update_time(vmf->vma->vm_file);
2485 if (page->mapping != inode->i_mapping) {
2487 return VM_FAULT_NOPAGE;
2490 fuse_wait_on_page_writeback(inode, page->index);
2491 return VM_FAULT_LOCKED;
2494 static const struct vm_operations_struct fuse_file_vm_ops = {
2495 .close = fuse_vma_close,
2496 .fault = filemap_fault,
2497 .map_pages = filemap_map_pages,
2498 .page_mkwrite = fuse_page_mkwrite,
2501 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
2503 struct fuse_file *ff = file->private_data;
2505 /* DAX mmap is superior to direct_io mmap */
2506 if (FUSE_IS_DAX(file_inode(file)))
2507 return fuse_dax_mmap(file, vma);
2509 if (ff->open_flags & FOPEN_DIRECT_IO) {
2510 /* Can't provide the coherency needed for MAP_SHARED */
2511 if (vma->vm_flags & VM_MAYSHARE)
2514 invalidate_inode_pages2(file->f_mapping);
2516 return generic_file_mmap(file, vma);
2519 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
2520 fuse_link_write_file(file);
2522 file_accessed(file);
2523 vma->vm_ops = &fuse_file_vm_ops;
2527 static int convert_fuse_file_lock(struct fuse_conn *fc,
2528 const struct fuse_file_lock *ffl,
2529 struct file_lock *fl)
2531 switch (ffl->type) {
2537 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
2538 ffl->end < ffl->start)
2541 fl->fl_start = ffl->start;
2542 fl->fl_end = ffl->end;
2545 * Convert pid into init's pid namespace. The locks API will
2546 * translate it into the caller's pid namespace.
2549 fl->fl_pid = pid_nr_ns(find_pid_ns(ffl->pid, fc->pid_ns), &init_pid_ns);
2556 fl->fl_type = ffl->type;
2560 static void fuse_lk_fill(struct fuse_args *args, struct file *file,
2561 const struct file_lock *fl, int opcode, pid_t pid,
2562 int flock, struct fuse_lk_in *inarg)
2564 struct inode *inode = file_inode(file);
2565 struct fuse_conn *fc = get_fuse_conn(inode);
2566 struct fuse_file *ff = file->private_data;
2568 memset(inarg, 0, sizeof(*inarg));
2570 inarg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
2571 inarg->lk.start = fl->fl_start;
2572 inarg->lk.end = fl->fl_end;
2573 inarg->lk.type = fl->fl_type;
2574 inarg->lk.pid = pid;
2576 inarg->lk_flags |= FUSE_LK_FLOCK;
2577 args->opcode = opcode;
2578 args->nodeid = get_node_id(inode);
2579 args->in_numargs = 1;
2580 args->in_args[0].size = sizeof(*inarg);
2581 args->in_args[0].value = inarg;
2584 static int fuse_getlk(struct file *file, struct file_lock *fl)
2586 struct inode *inode = file_inode(file);
2587 struct fuse_mount *fm = get_fuse_mount(inode);
2589 struct fuse_lk_in inarg;
2590 struct fuse_lk_out outarg;
2593 fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg);
2594 args.out_numargs = 1;
2595 args.out_args[0].size = sizeof(outarg);
2596 args.out_args[0].value = &outarg;
2597 err = fuse_simple_request(fm, &args);
2599 err = convert_fuse_file_lock(fm->fc, &outarg.lk, fl);
2604 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
2606 struct inode *inode = file_inode(file);
2607 struct fuse_mount *fm = get_fuse_mount(inode);
2609 struct fuse_lk_in inarg;
2610 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
2611 struct pid *pid = fl->fl_type != F_UNLCK ? task_tgid(current) : NULL;
2612 pid_t pid_nr = pid_nr_ns(pid, fm->fc->pid_ns);
2615 if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
2616 /* NLM needs asynchronous locks, which we don't support yet */
2620 /* Unlock on close is handled by the flush method */
2621 if ((fl->fl_flags & FL_CLOSE_POSIX) == FL_CLOSE_POSIX)
2624 fuse_lk_fill(&args, file, fl, opcode, pid_nr, flock, &inarg);
2625 err = fuse_simple_request(fm, &args);
2627 /* locking is restartable */
2634 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
2636 struct inode *inode = file_inode(file);
2637 struct fuse_conn *fc = get_fuse_conn(inode);
2640 if (cmd == F_CANCELLK) {
2642 } else if (cmd == F_GETLK) {
2644 posix_test_lock(file, fl);
2647 err = fuse_getlk(file, fl);
2650 err = posix_lock_file(file, fl, NULL);
2652 err = fuse_setlk(file, fl, 0);
2657 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
2659 struct inode *inode = file_inode(file);
2660 struct fuse_conn *fc = get_fuse_conn(inode);
2664 err = locks_lock_file_wait(file, fl);
2666 struct fuse_file *ff = file->private_data;
2668 /* emulate flock with POSIX locks */
2670 err = fuse_setlk(file, fl, 1);
2676 static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
2678 struct inode *inode = mapping->host;
2679 struct fuse_mount *fm = get_fuse_mount(inode);
2681 struct fuse_bmap_in inarg;
2682 struct fuse_bmap_out outarg;
2685 if (!inode->i_sb->s_bdev || fm->fc->no_bmap)
2688 memset(&inarg, 0, sizeof(inarg));
2689 inarg.block = block;
2690 inarg.blocksize = inode->i_sb->s_blocksize;
2691 args.opcode = FUSE_BMAP;
2692 args.nodeid = get_node_id(inode);
2693 args.in_numargs = 1;
2694 args.in_args[0].size = sizeof(inarg);
2695 args.in_args[0].value = &inarg;
2696 args.out_numargs = 1;
2697 args.out_args[0].size = sizeof(outarg);
2698 args.out_args[0].value = &outarg;
2699 err = fuse_simple_request(fm, &args);
2701 fm->fc->no_bmap = 1;
2703 return err ? 0 : outarg.block;
2706 static loff_t fuse_lseek(struct file *file, loff_t offset, int whence)
2708 struct inode *inode = file->f_mapping->host;
2709 struct fuse_mount *fm = get_fuse_mount(inode);
2710 struct fuse_file *ff = file->private_data;
2712 struct fuse_lseek_in inarg = {
2717 struct fuse_lseek_out outarg;
2720 if (fm->fc->no_lseek)
2723 args.opcode = FUSE_LSEEK;
2724 args.nodeid = ff->nodeid;
2725 args.in_numargs = 1;
2726 args.in_args[0].size = sizeof(inarg);
2727 args.in_args[0].value = &inarg;
2728 args.out_numargs = 1;
2729 args.out_args[0].size = sizeof(outarg);
2730 args.out_args[0].value = &outarg;
2731 err = fuse_simple_request(fm, &args);
2733 if (err == -ENOSYS) {
2734 fm->fc->no_lseek = 1;
2740 return vfs_setpos(file, outarg.offset, inode->i_sb->s_maxbytes);
2743 err = fuse_update_attributes(inode, file, STATX_SIZE);
2745 return generic_file_llseek(file, offset, whence);
2750 static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence)
2753 struct inode *inode = file_inode(file);
2758 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */
2759 retval = generic_file_llseek(file, offset, whence);
2763 retval = fuse_update_attributes(inode, file, STATX_SIZE);
2765 retval = generic_file_llseek(file, offset, whence);
2766 inode_unlock(inode);
2771 retval = fuse_lseek(file, offset, whence);
2772 inode_unlock(inode);
2782 * All files which have been polled are linked to RB tree
2783 * fuse_conn->polled_files which is indexed by kh. Walk the tree and
2784 * find the matching one.
2786 static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh,
2787 struct rb_node **parent_out)
2789 struct rb_node **link = &fc->polled_files.rb_node;
2790 struct rb_node *last = NULL;
2793 struct fuse_file *ff;
2796 ff = rb_entry(last, struct fuse_file, polled_node);
2799 link = &last->rb_left;
2800 else if (kh > ff->kh)
2801 link = &last->rb_right;
2812 * The file is about to be polled. Make sure it's on the polled_files
2813 * RB tree. Note that files once added to the polled_files tree are
2814 * not removed before the file is released. This is because a file
2815 * polled once is likely to be polled again.
2817 static void fuse_register_polled_file(struct fuse_conn *fc,
2818 struct fuse_file *ff)
2820 spin_lock(&fc->lock);
2821 if (RB_EMPTY_NODE(&ff->polled_node)) {
2822 struct rb_node **link, *parent;
2824 link = fuse_find_polled_node(fc, ff->kh, &parent);
2826 rb_link_node(&ff->polled_node, parent, link);
2827 rb_insert_color(&ff->polled_node, &fc->polled_files);
2829 spin_unlock(&fc->lock);
2832 __poll_t fuse_file_poll(struct file *file, poll_table *wait)
2834 struct fuse_file *ff = file->private_data;
2835 struct fuse_mount *fm = ff->fm;
2836 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
2837 struct fuse_poll_out outarg;
2841 if (fm->fc->no_poll)
2842 return DEFAULT_POLLMASK;
2844 poll_wait(file, &ff->poll_wait, wait);
2845 inarg.events = mangle_poll(poll_requested_events(wait));
2848 * Ask for notification iff there's someone waiting for it.
2849 * The client may ignore the flag and always notify.
2851 if (waitqueue_active(&ff->poll_wait)) {
2852 inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY;
2853 fuse_register_polled_file(fm->fc, ff);
2856 args.opcode = FUSE_POLL;
2857 args.nodeid = ff->nodeid;
2858 args.in_numargs = 1;
2859 args.in_args[0].size = sizeof(inarg);
2860 args.in_args[0].value = &inarg;
2861 args.out_numargs = 1;
2862 args.out_args[0].size = sizeof(outarg);
2863 args.out_args[0].value = &outarg;
2864 err = fuse_simple_request(fm, &args);
2867 return demangle_poll(outarg.revents);
2868 if (err == -ENOSYS) {
2869 fm->fc->no_poll = 1;
2870 return DEFAULT_POLLMASK;
2874 EXPORT_SYMBOL_GPL(fuse_file_poll);
2877 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
2878 * wakes up the poll waiters.
2880 int fuse_notify_poll_wakeup(struct fuse_conn *fc,
2881 struct fuse_notify_poll_wakeup_out *outarg)
2883 u64 kh = outarg->kh;
2884 struct rb_node **link;
2886 spin_lock(&fc->lock);
2888 link = fuse_find_polled_node(fc, kh, NULL);
2890 struct fuse_file *ff;
2892 ff = rb_entry(*link, struct fuse_file, polled_node);
2893 wake_up_interruptible_sync(&ff->poll_wait);
2896 spin_unlock(&fc->lock);
2900 static void fuse_do_truncate(struct file *file)
2902 struct inode *inode = file->f_mapping->host;
2905 attr.ia_valid = ATTR_SIZE;
2906 attr.ia_size = i_size_read(inode);
2908 attr.ia_file = file;
2909 attr.ia_valid |= ATTR_FILE;
2911 fuse_do_setattr(file_dentry(file), &attr, file);
2914 static inline loff_t fuse_round_up(struct fuse_conn *fc, loff_t off)
2916 return round_up(off, fc->max_pages << PAGE_SHIFT);
2920 fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2922 DECLARE_COMPLETION_ONSTACK(wait);
2924 struct file *file = iocb->ki_filp;
2925 struct fuse_file *ff = file->private_data;
2927 struct inode *inode;
2929 size_t count = iov_iter_count(iter), shortened = 0;
2930 loff_t offset = iocb->ki_pos;
2931 struct fuse_io_priv *io;
2934 inode = file->f_mapping->host;
2935 i_size = i_size_read(inode);
2937 if ((iov_iter_rw(iter) == READ) && (offset >= i_size))
2940 io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
2943 spin_lock_init(&io->lock);
2944 kref_init(&io->refcnt);
2948 io->offset = offset;
2949 io->write = (iov_iter_rw(iter) == WRITE);
2952 * By default, we want to optimize all I/Os with async request
2953 * submission to the client filesystem if supported.
2955 io->async = ff->fm->fc->async_dio;
2957 io->blocking = is_sync_kiocb(iocb);
2959 /* optimization for short read */
2960 if (io->async && !io->write && offset + count > i_size) {
2961 iov_iter_truncate(iter, fuse_round_up(ff->fm->fc, i_size - offset));
2962 shortened = count - iov_iter_count(iter);
2967 * We cannot asynchronously extend the size of a file.
2968 * In such case the aio will behave exactly like sync io.
2970 if ((offset + count > i_size) && io->write)
2971 io->blocking = true;
2973 if (io->async && io->blocking) {
2975 * Additional reference to keep io around after
2976 * calling fuse_aio_complete()
2978 kref_get(&io->refcnt);
2982 if (iov_iter_rw(iter) == WRITE) {
2983 ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
2984 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
2986 ret = __fuse_direct_read(io, iter, &pos);
2988 iov_iter_reexpand(iter, iov_iter_count(iter) + shortened);
2991 bool blocking = io->blocking;
2993 fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
2995 /* we have a non-extending, async request, so return */
2997 return -EIOCBQUEUED;
2999 wait_for_completion(&wait);
3000 ret = fuse_get_res_by_io(io);
3003 kref_put(&io->refcnt, fuse_io_release);
3005 if (iov_iter_rw(iter) == WRITE) {
3006 fuse_write_update_attr(inode, pos, ret);
3007 /* For extending writes we already hold exclusive lock */
3008 if (ret < 0 && offset + count > i_size)
3009 fuse_do_truncate(file);
3015 static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end)
3017 int err = filemap_write_and_wait_range(inode->i_mapping, start, LLONG_MAX);
3020 fuse_sync_writes(inode);
3025 static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
3028 struct fuse_file *ff = file->private_data;
3029 struct inode *inode = file_inode(file);
3030 struct fuse_inode *fi = get_fuse_inode(inode);
3031 struct fuse_mount *fm = ff->fm;
3033 struct fuse_fallocate_in inarg = {
3040 bool block_faults = FUSE_IS_DAX(inode) &&
3041 (!(mode & FALLOC_FL_KEEP_SIZE) ||
3042 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)));
3044 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3045 FALLOC_FL_ZERO_RANGE))
3048 if (fm->fc->no_fallocate)
3053 filemap_invalidate_lock(inode->i_mapping);
3054 err = fuse_dax_break_layouts(inode, 0, 0);
3059 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) {
3060 loff_t endbyte = offset + length - 1;
3062 err = fuse_writeback_range(inode, offset, endbyte);
3067 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
3068 offset + length > i_size_read(inode)) {
3069 err = inode_newsize_ok(inode, offset + length);
3074 err = file_modified(file);
3078 if (!(mode & FALLOC_FL_KEEP_SIZE))
3079 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3081 args.opcode = FUSE_FALLOCATE;
3082 args.nodeid = ff->nodeid;
3083 args.in_numargs = 1;
3084 args.in_args[0].size = sizeof(inarg);
3085 args.in_args[0].value = &inarg;
3086 err = fuse_simple_request(fm, &args);
3087 if (err == -ENOSYS) {
3088 fm->fc->no_fallocate = 1;
3094 /* we could have extended the file */
3095 if (!(mode & FALLOC_FL_KEEP_SIZE)) {
3096 if (fuse_write_update_attr(inode, offset + length, length))
3097 file_update_time(file);
3100 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE))
3101 truncate_pagecache_range(inode, offset, offset + length - 1);
3103 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
3106 if (!(mode & FALLOC_FL_KEEP_SIZE))
3107 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3110 filemap_invalidate_unlock(inode->i_mapping);
3112 inode_unlock(inode);
3114 fuse_flush_time_update(inode);
3119 static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
3120 struct file *file_out, loff_t pos_out,
3121 size_t len, unsigned int flags)
3123 struct fuse_file *ff_in = file_in->private_data;
3124 struct fuse_file *ff_out = file_out->private_data;
3125 struct inode *inode_in = file_inode(file_in);
3126 struct inode *inode_out = file_inode(file_out);
3127 struct fuse_inode *fi_out = get_fuse_inode(inode_out);
3128 struct fuse_mount *fm = ff_in->fm;
3129 struct fuse_conn *fc = fm->fc;
3131 struct fuse_copy_file_range_in inarg = {
3134 .nodeid_out = ff_out->nodeid,
3135 .fh_out = ff_out->fh,
3140 struct fuse_write_out outarg;
3142 /* mark unstable when write-back is not used, and file_out gets
3144 bool is_unstable = (!fc->writeback_cache) &&
3145 ((pos_out + len) > inode_out->i_size);
3147 if (fc->no_copy_file_range)
3150 if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb)
3153 inode_lock(inode_in);
3154 err = fuse_writeback_range(inode_in, pos_in, pos_in + len - 1);
3155 inode_unlock(inode_in);
3159 inode_lock(inode_out);
3161 err = file_modified(file_out);
3166 * Write out dirty pages in the destination file before sending the COPY
3167 * request to userspace. After the request is completed, truncate off
3168 * pages (including partial ones) from the cache that have been copied,
3169 * since these contain stale data at that point.
3171 * This should be mostly correct, but if the COPY writes to partial
3172 * pages (at the start or end) and the parts not covered by the COPY are
3173 * written through a memory map after calling fuse_writeback_range(),
3174 * then these partial page modifications will be lost on truncation.
3176 * It is unlikely that someone would rely on such mixed style
3177 * modifications. Yet this does give less guarantees than if the
3178 * copying was performed with write(2).
3180 * To fix this a mapping->invalidate_lock could be used to prevent new
3181 * faults while the copy is ongoing.
3183 err = fuse_writeback_range(inode_out, pos_out, pos_out + len - 1);
3188 set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
3190 args.opcode = FUSE_COPY_FILE_RANGE;
3191 args.nodeid = ff_in->nodeid;
3192 args.in_numargs = 1;
3193 args.in_args[0].size = sizeof(inarg);
3194 args.in_args[0].value = &inarg;
3195 args.out_numargs = 1;
3196 args.out_args[0].size = sizeof(outarg);
3197 args.out_args[0].value = &outarg;
3198 err = fuse_simple_request(fm, &args);
3199 if (err == -ENOSYS) {
3200 fc->no_copy_file_range = 1;
3206 truncate_inode_pages_range(inode_out->i_mapping,
3207 ALIGN_DOWN(pos_out, PAGE_SIZE),
3208 ALIGN(pos_out + outarg.size, PAGE_SIZE) - 1);
3210 file_update_time(file_out);
3211 fuse_write_update_attr(inode_out, pos_out + outarg.size, outarg.size);
3216 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
3218 inode_unlock(inode_out);
3219 file_accessed(file_in);
3221 fuse_flush_time_update(inode_out);
3226 static ssize_t fuse_copy_file_range(struct file *src_file, loff_t src_off,
3227 struct file *dst_file, loff_t dst_off,
3228 size_t len, unsigned int flags)
3232 ret = __fuse_copy_file_range(src_file, src_off, dst_file, dst_off,
3235 if (ret == -EOPNOTSUPP || ret == -EXDEV)
3236 ret = generic_copy_file_range(src_file, src_off, dst_file,
3237 dst_off, len, flags);
3241 static const struct file_operations fuse_file_operations = {
3242 .llseek = fuse_file_llseek,
3243 .read_iter = fuse_file_read_iter,
3244 .write_iter = fuse_file_write_iter,
3245 .mmap = fuse_file_mmap,
3247 .flush = fuse_flush,
3248 .release = fuse_release,
3249 .fsync = fuse_fsync,
3250 .lock = fuse_file_lock,
3251 .get_unmapped_area = thp_get_unmapped_area,
3252 .flock = fuse_file_flock,
3253 .splice_read = generic_file_splice_read,
3254 .splice_write = iter_file_splice_write,
3255 .unlocked_ioctl = fuse_file_ioctl,
3256 .compat_ioctl = fuse_file_compat_ioctl,
3257 .poll = fuse_file_poll,
3258 .fallocate = fuse_file_fallocate,
3259 .copy_file_range = fuse_copy_file_range,
3262 static const struct address_space_operations fuse_file_aops = {
3263 .read_folio = fuse_read_folio,
3264 .readahead = fuse_readahead,
3265 .writepage = fuse_writepage,
3266 .writepages = fuse_writepages,
3267 .launder_folio = fuse_launder_folio,
3268 .dirty_folio = filemap_dirty_folio,
3270 .direct_IO = fuse_direct_IO,
3271 .write_begin = fuse_write_begin,
3272 .write_end = fuse_write_end,
3275 void fuse_init_file_inode(struct inode *inode, unsigned int flags)
3277 struct fuse_inode *fi = get_fuse_inode(inode);
3279 inode->i_fop = &fuse_file_operations;
3280 inode->i_data.a_ops = &fuse_file_aops;
3282 INIT_LIST_HEAD(&fi->write_files);
3283 INIT_LIST_HEAD(&fi->queued_writes);
3285 init_waitqueue_head(&fi->page_waitq);
3286 fi->writepages = RB_ROOT;
3288 if (IS_ENABLED(CONFIG_FUSE_DAX))
3289 fuse_dax_inode_init(inode, flags);