1 // SPDX-License-Identifier: GPL-2.0-only
3 * This file contians vfs address (mmap) ops for 9P2000.
5 * Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com>
6 * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
9 #include <linux/module.h>
10 #include <linux/errno.h>
12 #include <linux/file.h>
13 #include <linux/stat.h>
14 #include <linux/string.h>
15 #include <linux/inet.h>
16 #include <linux/pagemap.h>
17 #include <linux/idr.h>
18 #include <linux/sched.h>
19 #include <linux/swap.h>
20 #include <linux/uio.h>
21 #include <linux/netfs.h>
22 #include <net/9p/9p.h>
23 #include <net/9p/client.h>
31 * v9fs_issue_read - Issue a read from 9P
32 * @subreq: The read to make
34 static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
36 struct netfs_io_request *rreq = subreq->rreq;
37 struct p9_fid *fid = rreq->netfs_priv;
39 loff_t pos = subreq->start + subreq->transferred;
40 size_t len = subreq->len - subreq->transferred;
43 iov_iter_xarray(&to, READ, &rreq->mapping->i_pages, pos, len);
45 total = p9_client_read(fid, pos, &to, &err);
47 /* if we just extended the file size, any portion not in
48 * cache won't be on server and is zeroes */
49 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
51 netfs_subreq_terminated(subreq, err ?: total, false);
55 * v9fs_init_request - Initialise a read request
56 * @rreq: The read request
57 * @file: The file being read from
59 static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
61 struct inode *inode = file_inode(file);
62 struct v9fs_inode *v9inode = V9FS_I(inode);
63 struct p9_fid *fid = file->private_data;
67 /* we might need to read from a fid that was opened write-only
68 * for read-modify-write of page cache, use the writeback fid
70 if (rreq->origin == NETFS_READ_FOR_WRITE &&
71 (fid->mode & O_ACCMODE) == O_WRONLY) {
72 fid = v9inode->writeback_fid;
76 refcount_inc(&fid->count);
77 rreq->netfs_priv = fid;
82 * v9fs_free_request - Cleanup request initialized by v9fs_init_rreq
83 * @rreq: The I/O request to clean up
85 static void v9fs_free_request(struct netfs_io_request *rreq)
87 struct p9_fid *fid = rreq->netfs_priv;
93 * v9fs_begin_cache_operation - Begin a cache operation for a read
94 * @rreq: The read request
96 static int v9fs_begin_cache_operation(struct netfs_io_request *rreq)
98 #ifdef CONFIG_9P_FSCACHE
99 struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode));
101 return fscache_begin_read_operation(&rreq->cache_resources, cookie);
107 const struct netfs_request_ops v9fs_req_ops = {
108 .init_request = v9fs_init_request,
109 .free_request = v9fs_free_request,
110 .begin_cache_operation = v9fs_begin_cache_operation,
111 .issue_read = v9fs_issue_read,
115 * v9fs_release_folio - release the private state associated with a folio
116 * @folio: The folio to be released
117 * @gfp: The caller's allocation restrictions
119 * Returns true if the page can be released, false otherwise.
122 static bool v9fs_release_folio(struct folio *folio, gfp_t gfp)
124 struct inode *inode = folio_inode(folio);
126 if (folio_test_private(folio))
128 #ifdef CONFIG_9P_FSCACHE
129 if (folio_test_fscache(folio)) {
130 if (current_is_kswapd() || !(gfp & __GFP_FS))
132 folio_wait_fscache(folio);
135 fscache_note_page_release(v9fs_inode_cookie(V9FS_I(inode)));
139 static void v9fs_invalidate_folio(struct folio *folio, size_t offset,
142 folio_wait_fscache(folio);
145 static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
148 struct v9fs_inode *v9inode = priv;
151 if (IS_ERR_VALUE(transferred_or_error) &&
152 transferred_or_error != -ENOBUFS) {
153 version = cpu_to_le32(v9inode->qid.version);
154 fscache_invalidate(v9fs_inode_cookie(v9inode), &version,
155 i_size_read(&v9inode->netfs.inode), 0);
159 static int v9fs_vfs_write_folio_locked(struct folio *folio)
161 struct inode *inode = folio_inode(folio);
162 struct v9fs_inode *v9inode = V9FS_I(inode);
163 struct fscache_cookie *cookie = v9fs_inode_cookie(v9inode);
164 loff_t start = folio_pos(folio);
165 loff_t i_size = i_size_read(inode);
166 struct iov_iter from;
167 size_t len = folio_size(folio);
171 return 0; /* Simultaneous truncation occurred */
173 len = min_t(loff_t, i_size - start, len);
175 iov_iter_xarray(&from, WRITE, &folio_mapping(folio)->i_pages, start, len);
177 /* We should have writeback_fid always set */
178 BUG_ON(!v9inode->writeback_fid);
180 folio_wait_fscache(folio);
181 folio_start_writeback(folio);
183 p9_client_write(v9inode->writeback_fid, start, &from, &err);
186 fscache_cookie_enabled(cookie) &&
187 test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) {
188 folio_start_fscache(folio);
189 fscache_write_to_cache(v9fs_inode_cookie(v9inode),
190 folio_mapping(folio), start, len, i_size,
191 v9fs_write_to_cache_done, v9inode,
195 folio_end_writeback(folio);
199 static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
201 struct folio *folio = page_folio(page);
204 p9_debug(P9_DEBUG_VFS, "folio %p\n", folio);
206 retval = v9fs_vfs_write_folio_locked(folio);
208 if (retval == -EAGAIN) {
209 folio_redirty_for_writepage(wbc, folio);
212 mapping_set_error(folio_mapping(folio), retval);
221 static int v9fs_launder_folio(struct folio *folio)
225 if (folio_clear_dirty_for_io(folio)) {
226 retval = v9fs_vfs_write_folio_locked(folio);
230 folio_wait_fscache(folio);
235 * v9fs_direct_IO - 9P address space operation for direct I/O
236 * @iocb: target I/O control block
237 * @iter: The data/buffer to use
239 * The presence of v9fs_direct_IO() in the address space ops vector
240 * allowes open() O_DIRECT flags which would have failed otherwise.
242 * In the non-cached mode, we shunt off direct read and write requests before
243 * the VFS gets them, so this method should never be called.
245 * Direct IO is not 'yet' supported in the cached mode. Hence when
246 * this routine is called through generic_file_aio_read(), the read/write fails
251 v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
253 struct file *file = iocb->ki_filp;
254 loff_t pos = iocb->ki_pos;
258 if (iov_iter_rw(iter) == WRITE) {
259 n = p9_client_write(file->private_data, pos, iter, &err);
261 struct inode *inode = file_inode(file);
262 loff_t i_size = i_size_read(inode);
264 if (pos + n > i_size)
265 inode_add_bytes(inode, pos + n - i_size);
268 n = p9_client_read(file->private_data, pos, iter, &err);
273 static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
274 loff_t pos, unsigned int len,
275 struct page **subpagep, void **fsdata)
279 struct v9fs_inode *v9inode = V9FS_I(mapping->host);
281 p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
283 BUG_ON(!v9inode->writeback_fid);
285 /* Prefetch area to be written into the cache if we're caching this
286 * file. We need to do this before we get a lock on the page in case
287 * there's more than one writer competing for the same cache block.
289 retval = netfs_write_begin(&v9inode->netfs, filp, mapping, pos, len, &folio, fsdata);
293 *subpagep = &folio->page;
297 static int v9fs_write_end(struct file *filp, struct address_space *mapping,
298 loff_t pos, unsigned int len, unsigned int copied,
299 struct page *subpage, void *fsdata)
301 loff_t last_pos = pos + copied;
302 struct folio *folio = page_folio(subpage);
303 struct inode *inode = mapping->host;
304 struct v9fs_inode *v9inode = V9FS_I(inode);
306 p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
308 if (!folio_test_uptodate(folio)) {
309 if (unlikely(copied < len)) {
314 folio_mark_uptodate(folio);
318 * No need to use i_size_read() here, the i_size
319 * cannot change under us because we hold the i_mutex.
321 if (last_pos > inode->i_size) {
322 inode_add_bytes(inode, last_pos - inode->i_size);
323 i_size_write(inode, last_pos);
324 fscache_update_cookie(v9fs_inode_cookie(v9inode), NULL, &last_pos);
326 folio_mark_dirty(folio);
334 #ifdef CONFIG_9P_FSCACHE
336 * Mark a page as having been made dirty and thus needing writeback. We also
337 * need to pin the cache object to write back to.
339 static bool v9fs_dirty_folio(struct address_space *mapping, struct folio *folio)
341 struct v9fs_inode *v9inode = V9FS_I(mapping->host);
343 return fscache_dirty_folio(mapping, folio, v9fs_inode_cookie(v9inode));
346 #define v9fs_dirty_folio filemap_dirty_folio
349 const struct address_space_operations v9fs_addr_operations = {
350 .read_folio = netfs_read_folio,
351 .readahead = netfs_readahead,
352 .dirty_folio = v9fs_dirty_folio,
353 .writepage = v9fs_vfs_writepage,
354 .write_begin = v9fs_write_begin,
355 .write_end = v9fs_write_end,
356 .release_folio = v9fs_release_folio,
357 .invalidate_folio = v9fs_invalidate_folio,
358 .launder_folio = v9fs_launder_folio,
359 .direct_IO = v9fs_direct_IO,