#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/iversion.h>
+#include <linux/xarray.h>
+#include <linux/fscache.h>
+#include <linux/netfs.h>
#include "internal.h"
#include "iostat.h"
fscache_unuse_cookie(cookie, &auxdata, &i_size);
}
-/*
- * Fallback page reading interface.
- */
-static int fscache_fallback_read_page(struct inode *inode, struct page *page)
+int nfs_netfs_read_folio(struct file *file, struct folio *folio)
{
- struct netfs_cache_resources cres;
- struct fscache_cookie *cookie = netfs_i_cookie(&NFS_I(inode)->netfs);
- struct iov_iter iter;
- struct bio_vec bvec;
- int ret;
-
- memset(&cres, 0, sizeof(cres));
- bvec_set_page(&bvec, page, PAGE_SIZE, 0);
- iov_iter_bvec(&iter, ITER_DEST, &bvec, 1, PAGE_SIZE);
-
- ret = fscache_begin_read_operation(&cres, cookie);
- if (ret < 0)
- return ret;
-
- ret = fscache_read(&cres, page_offset(page), &iter, NETFS_READ_HOLE_FAIL,
- NULL, NULL);
- fscache_end_operation(&cres);
- return ret;
+ if (!netfs_inode(folio_inode(folio))->cache)
+ return -ENOBUFS;
+
+ return netfs_read_folio(file, folio);
}
-/*
- * Fallback page writing interface.
- */
-static int fscache_fallback_write_page(struct inode *inode, struct page *page,
- bool no_space_allocated_yet)
+int nfs_netfs_readahead(struct readahead_control *ractl)
{
- struct netfs_cache_resources cres;
- struct fscache_cookie *cookie = netfs_i_cookie(&NFS_I(inode)->netfs);
- struct iov_iter iter;
- struct bio_vec bvec;
- loff_t start = page_offset(page);
- size_t len = PAGE_SIZE;
- int ret;
-
- memset(&cres, 0, sizeof(cres));
- bvec_set_page(&bvec, page, PAGE_SIZE, 0);
- iov_iter_bvec(&iter, ITER_SOURCE, &bvec, 1, PAGE_SIZE);
-
- ret = fscache_begin_write_operation(&cres, cookie);
- if (ret < 0)
- return ret;
-
- ret = cres.ops->prepare_write(&cres, &start, &len, i_size_read(inode),
- no_space_allocated_yet);
- if (ret == 0)
- ret = fscache_write(&cres, page_offset(page), &iter, NULL, NULL);
- fscache_end_operation(&cres);
- return ret;
+ struct inode *inode = ractl->mapping->host;
+
+ if (!netfs_inode(inode)->cache)
+ return -ENOBUFS;
+
+ netfs_readahead(ractl);
+ return 0;
}
-/*
- * Retrieve a page from fscache
- */
-int __nfs_fscache_read_page(struct inode *inode, struct page *page)
+atomic_t nfs_netfs_debug_id;
+static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *file)
{
- int ret;
+ rreq->netfs_priv = get_nfs_open_context(nfs_file_open_context(file));
+ rreq->debug_id = atomic_inc_return(&nfs_netfs_debug_id);
- trace_nfs_fscache_read_page(inode, page);
- if (PageChecked(page)) {
- ClearPageChecked(page);
- ret = 1;
- goto out;
- }
+ return 0;
+}
- ret = fscache_fallback_read_page(inode, page);
- if (ret < 0) {
- nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL);
- SetPageChecked(page);
- goto out;
- }
+static void nfs_netfs_free_request(struct netfs_io_request *rreq)
+{
+ put_nfs_open_context(rreq->netfs_priv);
+}
- /* Read completed synchronously */
- nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK);
- SetPageUptodate(page);
- ret = 0;
-out:
- trace_nfs_fscache_read_page_exit(inode, page, ret);
- return ret;
+static inline int nfs_netfs_begin_cache_operation(struct netfs_io_request *rreq)
+{
+ return fscache_begin_read_operation(&rreq->cache_resources,
+ netfs_i_cookie(netfs_inode(rreq->inode)));
}
-/*
- * Store a newly fetched page in fscache. We can be certain there's no page
- * stored in the cache as yet otherwise we would've read it from there.
- */
-void __nfs_fscache_write_page(struct inode *inode, struct page *page)
+static struct nfs_netfs_io_data *nfs_netfs_alloc(struct netfs_io_subrequest *sreq)
{
- int ret;
+ struct nfs_netfs_io_data *netfs;
+
+ netfs = kzalloc(sizeof(*netfs), GFP_KERNEL_ACCOUNT);
+ if (!netfs)
+ return NULL;
+ netfs->sreq = sreq;
+ refcount_set(&netfs->refcount, 1);
+ return netfs;
+}
- trace_nfs_fscache_write_page(inode, page);
+static bool nfs_netfs_clamp_length(struct netfs_io_subrequest *sreq)
+{
+ size_t rsize = NFS_SB(sreq->rreq->inode->i_sb)->rsize;
- ret = fscache_fallback_write_page(inode, page, true);
+ sreq->len = min(sreq->len, rsize);
+ return true;
+}
- if (ret != 0) {
- nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL);
- nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_UNCACHED);
- } else {
- nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_WRITTEN_OK);
+static void nfs_netfs_issue_read(struct netfs_io_subrequest *sreq)
+{
+ struct nfs_netfs_io_data *netfs;
+ struct nfs_pageio_descriptor pgio;
+ struct inode *inode = sreq->rreq->inode;
+ struct nfs_open_context *ctx = sreq->rreq->netfs_priv;
+ struct page *page;
+ int err;
+ pgoff_t start = (sreq->start + sreq->transferred) >> PAGE_SHIFT;
+ pgoff_t last = ((sreq->start + sreq->len -
+ sreq->transferred - 1) >> PAGE_SHIFT);
+ XA_STATE(xas, &sreq->rreq->mapping->i_pages, start);
+
+ nfs_pageio_init_read(&pgio, inode, false,
+ &nfs_async_read_completion_ops);
+
+ netfs = nfs_netfs_alloc(sreq);
+ if (!netfs)
+ return netfs_subreq_terminated(sreq, -ENOMEM, false);
+
+ pgio.pg_netfs = netfs; /* used in completion */
+
+ xas_lock(&xas);
+ xas_for_each(&xas, page, last) {
+ /* nfs_read_add_folio() may schedule() due to pNFS layout and other RPCs */
+ xas_pause(&xas);
+ xas_unlock(&xas);
+ err = nfs_read_add_folio(&pgio, ctx, page_folio(page));
+ if (err < 0) {
+ netfs->error = err;
+ goto out;
+ }
+ xas_lock(&xas);
}
- trace_nfs_fscache_write_page_exit(inode, page, ret);
+ xas_unlock(&xas);
+out:
+ nfs_pageio_complete_read(&pgio);
+ nfs_netfs_put(netfs);
}
+
+void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr)
+{
+ struct nfs_netfs_io_data *netfs = hdr->netfs;
+
+ if (!netfs)
+ return;
+
+ nfs_netfs_get(netfs);
+}
+
+int nfs_netfs_folio_unlock(struct folio *folio)
+{
+ struct inode *inode = folio_file_mapping(folio)->host;
+
+ /*
+ * If fscache is enabled, netfs will unlock pages.
+ */
+ if (netfs_inode(inode)->cache)
+ return 0;
+
+ return 1;
+}
+
+void nfs_netfs_read_completion(struct nfs_pgio_header *hdr)
+{
+ struct nfs_netfs_io_data *netfs = hdr->netfs;
+ struct netfs_io_subrequest *sreq;
+
+ if (!netfs)
+ return;
+
+ sreq = netfs->sreq;
+ if (test_bit(NFS_IOHDR_EOF, &hdr->flags))
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &sreq->flags);
+
+ if (hdr->error)
+ netfs->error = hdr->error;
+ else
+ atomic64_add(hdr->res.count, &netfs->transferred);
+
+ nfs_netfs_put(netfs);
+ hdr->netfs = NULL;
+}
+
+const struct netfs_request_ops nfs_netfs_ops = {
+ .init_request = nfs_netfs_init_request,
+ .free_request = nfs_netfs_free_request,
+ .begin_cache_operation = nfs_netfs_begin_cache_operation,
+ .issue_read = nfs_netfs_issue_read,
+ .clamp_length = nfs_netfs_clamp_length
+};
u64 change_attr;
};
+struct nfs_netfs_io_data {
+ /*
+ * NFS may split a netfs_io_subrequest into multiple RPCs, each
+ * with their own read completion. In netfs, we can only call
+ * netfs_subreq_terminated() once for each subrequest. Use the
+ * refcount here to double as a marker of the last RPC completion,
+ * and only call netfs via netfs_subreq_terminated() once.
+ */
+ refcount_t refcount;
+ struct netfs_io_subrequest *sreq;
+
+ /*
+ * Final disposition of the netfs_io_subrequest, sent in
+ * netfs_subreq_terminated()
+ */
+ atomic64_t transferred;
+ int error;
+};
+
+static inline void nfs_netfs_get(struct nfs_netfs_io_data *netfs)
+{
+ refcount_inc(&netfs->refcount);
+}
+
+static inline void nfs_netfs_put(struct nfs_netfs_io_data *netfs)
+{
+ ssize_t final_len;
+
+ /* Only the last RPC completion should call netfs_subreq_terminated() */
+ if (!refcount_dec_and_test(&netfs->refcount))
+ return;
+
+ /*
+ * The NFS pageio interface may read a complete page, even when netfs
+ * only asked for a partial page. Specifically, this may be seen when
+ * one thread is truncating a file while another one is reading the last
+ * page of the file.
+ * Correct the final length here to be no larger than the netfs subrequest
+ * length, and thus avoid netfs's "Subreq overread" warning message.
+ */
+ final_len = min_t(s64, netfs->sreq->len, atomic64_read(&netfs->transferred));
+ netfs_subreq_terminated(netfs->sreq, netfs->error ?: final_len, false);
+ kfree(netfs);
+}
+static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi)
+{
+ netfs_inode_init(&nfsi->netfs, &nfs_netfs_ops);
+}
+extern void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr);
+extern void nfs_netfs_read_completion(struct nfs_pgio_header *hdr);
+extern int nfs_netfs_folio_unlock(struct folio *folio);
+
/*
* fscache.c
*/
extern void nfs_fscache_clear_inode(struct inode *);
extern void nfs_fscache_open_file(struct inode *, struct file *);
extern void nfs_fscache_release_file(struct inode *, struct file *);
-
-extern int __nfs_fscache_read_page(struct inode *, struct page *);
-extern void __nfs_fscache_write_page(struct inode *, struct page *);
+extern int nfs_netfs_readahead(struct readahead_control *ractl);
+extern int nfs_netfs_read_folio(struct file *file, struct folio *folio);
static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
{
if (current_is_kswapd() || !(gfp & __GFP_FS))
return false;
folio_wait_fscache(folio);
- fscache_note_page_release(netfs_i_cookie(&NFS_I(folio->mapping->host)->netfs));
- nfs_inc_fscache_stats(folio->mapping->host,
- NFSIOS_FSCACHE_PAGES_UNCACHED);
}
+ fscache_note_page_release(netfs_i_cookie(netfs_inode(folio->mapping->host)));
return true;
}
-/*
- * Retrieve a page from an inode data storage object.
- */
-static inline int nfs_fscache_read_page(struct inode *inode, struct page *page)
-{
- if (netfs_inode(inode)->cache)
- return __nfs_fscache_read_page(inode, page);
- return -ENOBUFS;
-}
-
-/*
- * Store a page newly fetched from the server in an inode data storage object
- * in the cache.
- */
-static inline void nfs_fscache_write_page(struct inode *inode,
- struct page *page)
-{
- if (netfs_inode(inode)->cache)
- __nfs_fscache_write_page(inode, page);
-}
-
static inline void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *auxdata,
struct inode *inode)
{
return "no ";
}
+static inline void nfs_netfs_set_pgio_header(struct nfs_pgio_header *hdr,
+ struct nfs_pageio_descriptor *desc)
+{
+ hdr->netfs = desc->pg_netfs;
+}
+static inline void nfs_netfs_set_pageio_descriptor(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr)
+{
+ desc->pg_netfs = hdr->netfs;
+}
+static inline void nfs_netfs_reset_pageio_descriptor(struct nfs_pageio_descriptor *desc)
+{
+ desc->pg_netfs = NULL;
+}
#else /* CONFIG_NFS_FSCACHE */
+static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi) {}
+static inline void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr) {}
+static inline void nfs_netfs_read_completion(struct nfs_pgio_header *hdr) {}
+static inline int nfs_netfs_folio_unlock(struct folio *folio)
+{
+ return 1;
+}
static inline void nfs_fscache_release_super_cookie(struct super_block *sb) {}
static inline void nfs_fscache_init_inode(struct inode *inode) {}
static inline void nfs_fscache_open_file(struct inode *inode,
struct file *filp) {}
static inline void nfs_fscache_release_file(struct inode *inode, struct file *file) {}
-
-static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
+static inline int nfs_netfs_readahead(struct readahead_control *ractl)
{
- return true; /* may release folio */
+ return -ENOBUFS;
}
-static inline int nfs_fscache_read_page(struct inode *inode, struct page *page)
+static inline int nfs_netfs_read_folio(struct file *file, struct folio *folio)
{
return -ENOBUFS;
}
-static inline void nfs_fscache_write_page(struct inode *inode, struct page *page) {}
+
+static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
+{
+ return true; /* may release folio */
+}
static inline void nfs_fscache_invalidate(struct inode *inode, int flags) {}
static inline const char *nfs_server_fscache_state(struct nfs_server *server)
{
return "no ";
}
-
+static inline void nfs_netfs_set_pgio_header(struct nfs_pgio_header *hdr,
+ struct nfs_pageio_descriptor *desc) {}
+static inline void nfs_netfs_set_pageio_descriptor(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr) {}
+static inline void nfs_netfs_reset_pageio_descriptor(struct nfs_pageio_descriptor *desc) {}
#endif /* CONFIG_NFS_FSCACHE */
#endif /* _NFS_FSCACHE_H */
#ifdef CONFIG_NFS_V4_2
nfsi->xattr_cache = NULL;
#endif
+ nfs_netfs_inode_init(nfsi);
+
return &nfsi->vfs_inode;
}
EXPORT_SYMBOL_GPL(nfs_alloc_inode);
extern int nfs_client_for_each_server(struct nfs_client *clp,
int (*fn)(struct nfs_server *, void *),
void *data);
+#ifdef CONFIG_NFS_FSCACHE
+extern const struct netfs_request_ops nfs_netfs_ops;
+#endif
+
/* io.c */
extern void nfs_start_io_read(struct inode *inode);
extern void nfs_end_io_read(struct inode *inode);
struct nfs_pgio_completion_ops;
/* read.c */
+extern const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
extern void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
struct inode *inode, bool force_mds,
const struct nfs_pgio_completion_ops *compl_ops);
+extern int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
+ struct nfs_open_context *ctx,
+ struct folio *folio);
+extern void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio);
extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
#include "internal.h"
#include "pnfs.h"
#include "nfstrace.h"
+#include "fscache.h"
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
hdr->good_bytes = mirror->pg_count;
hdr->io_completion = desc->pg_io_completion;
hdr->dreq = desc->pg_dreq;
+ nfs_netfs_set_pgio_header(hdr, desc);
hdr->release = release;
hdr->completion_ops = desc->pg_completion_ops;
if (hdr->completion_ops->init_hdr)
desc->pg_lseg = NULL;
desc->pg_io_completion = NULL;
desc->pg_dreq = NULL;
+ nfs_netfs_reset_pageio_descriptor(desc);
desc->pg_bsize = bsize;
desc->pg_mirror_count = 1;
desc->pg_io_completion = hdr->io_completion;
desc->pg_dreq = hdr->dreq;
+ nfs_netfs_set_pageio_descriptor(desc, hdr);
list_splice_init(&hdr->pages, &pages);
while (!list_empty(&pages)) {
struct nfs_page *req = nfs_list_entry(pages.next);
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
-static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
+const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
static const struct nfs_rw_ops nfs_rw_read_ops;
static struct kmem_cache *nfs_rdata_cachep;
}
EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
-static void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio)
+void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio)
{
struct nfs_pgio_mirror *pgm;
unsigned long npages;
static void nfs_readpage_release(struct nfs_page *req, int error)
{
- struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
struct folio *folio = nfs_page_to_folio(req);
- dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
- (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
- (long long)req_offset(req));
-
if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
folio_set_error(folio);
- if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
- if (folio_test_uptodate(folio))
- nfs_fscache_write_page(inode, &folio->page);
- folio_unlock(folio);
- }
+ if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE))
+ if (nfs_netfs_folio_unlock(folio))
+ folio_unlock(folio);
+
nfs_release_request(req);
}
nfs_list_remove_request(req);
nfs_readpage_release(req, error);
}
+ nfs_netfs_read_completion(hdr);
+
out:
hdr->release(hdr);
}
struct rpc_task_setup *task_setup_data, int how)
{
rpc_ops->read_setup(hdr, msg);
+ nfs_netfs_initiate_read(hdr);
trace_nfs_initiate_read(hdr);
}
}
}
-static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
+const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
.error_cleanup = nfs_async_read_error,
.completion = nfs_read_completion,
};
nfs_readpage_retry(task, hdr);
}
-static int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
- struct nfs_open_context *ctx,
- struct folio *folio)
+int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
+ struct nfs_open_context *ctx,
+ struct folio *folio)
{
struct inode *inode = folio_file_mapping(folio)->host;
struct nfs_server *server = NFS_SERVER(inode);
aligned_len = min_t(unsigned int, ALIGN(len, rsize), fsize);
- if (!IS_SYNC(inode)) {
- error = nfs_fscache_read_page(inode, &folio->page);
- if (error == 0)
- goto out_unlock;
- }
-
new = nfs_page_create_from_folio(ctx, folio, 0, aligned_len);
- if (IS_ERR(new))
- goto out_error;
+ if (IS_ERR(new)) {
+ error = PTR_ERR(new);
+ goto out;
+ }
if (len < fsize)
folio_zero_segment(folio, len, fsize);
goto out;
}
return 0;
-out_error:
- error = PTR_ERR(new);
-out_unlock:
- folio_unlock(folio);
out:
return error;
}
if (NFS_STALE(inode))
goto out_unlock;
+ ret = nfs_netfs_read_folio(file, folio);
+ if (!ret)
+ goto out;
+
ctx = get_nfs_open_context(nfs_file_open_context(file));
xchg(&ctx->error, 0);
ret = nfs_read_add_folio(&pgio, ctx, folio);
if (ret)
- goto out;
+ goto out_put;
nfs_pageio_complete_read(&pgio);
ret = pgio.pg_error < 0 ? pgio.pg_error : 0;
if (!folio_test_uptodate(folio) && !ret)
ret = xchg(&ctx->error, 0);
}
-out:
+out_put:
put_nfs_open_context(ctx);
+out:
trace_nfs_aop_readpage_done(inode, folio, ret);
return ret;
out_unlock:
folio_unlock(folio);
- trace_nfs_aop_readpage_done(inode, folio, ret);
- return ret;
+ goto out;
}
void nfs_readahead(struct readahead_control *ractl)
if (NFS_STALE(inode))
goto out;
+ ret = nfs_netfs_readahead(ractl);
+ if (!ret)
+ goto out;
+
if (file == NULL) {
ret = -EBADF;
ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
struct pnfs_layout_segment *pg_lseg;
struct nfs_io_completion *pg_io_completion;
struct nfs_direct_req *pg_dreq;
+#ifdef CONFIG_NFS_FSCACHE
+ void *pg_netfs;
+#endif
unsigned int pg_bsize; /* default bsize for mirrors */
u32 pg_mirror_count;
const struct nfs_rw_ops *rw_ops;
struct nfs_io_completion *io_completion;
struct nfs_direct_req *dreq;
+#ifdef CONFIG_NFS_FSCACHE
+ void *netfs;
+#endif
int pnfs_error;
int error; /* merge with pnfs_error */