powerpc/mm: Avoid calling arch_enter/leave_lazy_mmu() in set_ptes
[platform/kernel/linux-starfive.git] / fs / nfs / direct.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/fs/nfs/direct.c
4  *
5  * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
6  *
7  * High-performance uncached I/O for the Linux NFS client
8  *
9  * There are important applications whose performance or correctness
10  * depends on uncached access to file data.  Database clusters
11  * (multiple copies of the same instance running on separate hosts)
12  * implement their own cache coherency protocol that subsumes file
13  * system cache protocols.  Applications that process datasets
14  * considerably larger than the client's memory do not always benefit
15  * from a local cache.  A streaming video server, for instance, has no
16  * need to cache the contents of a file.
17  *
18  * When an application requests uncached I/O, all read and write requests
19  * are made directly to the server; data stored or fetched via these
20  * requests is not cached in the Linux page cache.  The client does not
21  * correct unaligned requests from applications.  All requested bytes are
22  * held on permanent storage before a direct write system call returns to
23  * an application.
24  *
25  * Solaris implements an uncached I/O facility called directio() that
26  * is used for backups and sequential I/O to very large files.  Solaris
27  * also supports uncaching whole NFS partitions with "-o forcedirectio,"
28  * an undocumented mount option.
29  *
30  * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
31  * help from Andrew Morton.
32  *
33  * 18 Dec 2001  Initial implementation for 2.4  --cel
34  * 08 Jul 2002  Version for 2.4.19, with bug fixes --trondmy
35  * 08 Jun 2003  Port to 2.5 APIs  --cel
36  * 31 Mar 2004  Handle direct I/O without VFS support  --cel
37  * 15 Sep 2004  Parallel async reads  --cel
38  * 04 May 2005  support O_DIRECT with aio  --cel
39  *
40  */
41
42 #include <linux/errno.h>
43 #include <linux/sched.h>
44 #include <linux/kernel.h>
45 #include <linux/file.h>
46 #include <linux/pagemap.h>
47 #include <linux/kref.h>
48 #include <linux/slab.h>
49 #include <linux/task_io_accounting_ops.h>
50 #include <linux/module.h>
51
52 #include <linux/nfs_fs.h>
53 #include <linux/nfs_page.h>
54 #include <linux/sunrpc/clnt.h>
55
56 #include <linux/uaccess.h>
57 #include <linux/atomic.h>
58
59 #include "internal.h"
60 #include "iostat.h"
61 #include "pnfs.h"
62 #include "fscache.h"
63 #include "nfstrace.h"
64
65 #define NFSDBG_FACILITY         NFSDBG_VFS
66
67 static struct kmem_cache *nfs_direct_cachep;
68
69 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
70 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
71 static void nfs_direct_write_complete(struct nfs_direct_req *dreq);
72 static void nfs_direct_write_schedule_work(struct work_struct *work);
73
74 static inline void get_dreq(struct nfs_direct_req *dreq)
75 {
76         atomic_inc(&dreq->io_count);
77 }
78
79 static inline int put_dreq(struct nfs_direct_req *dreq)
80 {
81         return atomic_dec_and_test(&dreq->io_count);
82 }
83
84 static void
85 nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
86                             const struct nfs_pgio_header *hdr,
87                             ssize_t dreq_len)
88 {
89         if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) ||
90               test_bit(NFS_IOHDR_EOF, &hdr->flags)))
91                 return;
92         if (dreq->max_count >= dreq_len) {
93                 dreq->max_count = dreq_len;
94                 if (dreq->count > dreq_len)
95                         dreq->count = dreq_len;
96
97                 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
98                         dreq->error = hdr->error;
99                 else /* Clear outstanding error if this is EOF */
100                         dreq->error = 0;
101         }
102 }
103
104 static void
105 nfs_direct_count_bytes(struct nfs_direct_req *dreq,
106                        const struct nfs_pgio_header *hdr)
107 {
108         loff_t hdr_end = hdr->io_start + hdr->good_bytes;
109         ssize_t dreq_len = 0;
110
111         if (hdr_end > dreq->io_start)
112                 dreq_len = hdr_end - dreq->io_start;
113
114         nfs_direct_handle_truncated(dreq, hdr, dreq_len);
115
116         if (dreq_len > dreq->max_count)
117                 dreq_len = dreq->max_count;
118
119         if (dreq->count < dreq_len)
120                 dreq->count = dreq_len;
121 }
122
123 /**
124  * nfs_swap_rw - NFS address space operation for swap I/O
125  * @iocb: target I/O control block
126  * @iter: I/O buffer
127  *
128  * Perform IO to the swap-file.  This is much like direct IO.
129  */
130 int nfs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
131 {
132         ssize_t ret;
133
134         VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
135
136         if (iov_iter_rw(iter) == READ)
137                 ret = nfs_file_direct_read(iocb, iter, true);
138         else
139                 ret = nfs_file_direct_write(iocb, iter, true);
140         if (ret < 0)
141                 return ret;
142         return 0;
143 }
144
145 static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
146 {
147         unsigned int i;
148         for (i = 0; i < npages; i++)
149                 put_page(pages[i]);
150 }
151
152 void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
153                               struct nfs_direct_req *dreq)
154 {
155         cinfo->inode = dreq->inode;
156         cinfo->mds = &dreq->mds_cinfo;
157         cinfo->ds = &dreq->ds_cinfo;
158         cinfo->dreq = dreq;
159         cinfo->completion_ops = &nfs_direct_commit_completion_ops;
160 }
161
162 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
163 {
164         struct nfs_direct_req *dreq;
165
166         dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
167         if (!dreq)
168                 return NULL;
169
170         kref_init(&dreq->kref);
171         kref_get(&dreq->kref);
172         init_completion(&dreq->completion);
173         INIT_LIST_HEAD(&dreq->mds_cinfo.list);
174         pnfs_init_ds_commit_info(&dreq->ds_cinfo);
175         INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
176         spin_lock_init(&dreq->lock);
177
178         return dreq;
179 }
180
181 static void nfs_direct_req_free(struct kref *kref)
182 {
183         struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
184
185         pnfs_release_ds_info(&dreq->ds_cinfo, dreq->inode);
186         if (dreq->l_ctx != NULL)
187                 nfs_put_lock_context(dreq->l_ctx);
188         if (dreq->ctx != NULL)
189                 put_nfs_open_context(dreq->ctx);
190         kmem_cache_free(nfs_direct_cachep, dreq);
191 }
192
193 static void nfs_direct_req_release(struct nfs_direct_req *dreq)
194 {
195         kref_put(&dreq->kref, nfs_direct_req_free);
196 }
197
198 ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
199 {
200         return dreq->bytes_left;
201 }
202 EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
203
204 /*
205  * Collects and returns the final error value/byte-count.
206  */
207 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
208 {
209         ssize_t result = -EIOCBQUEUED;
210
211         /* Async requests don't wait here */
212         if (dreq->iocb)
213                 goto out;
214
215         result = wait_for_completion_killable(&dreq->completion);
216
217         if (!result) {
218                 result = dreq->count;
219                 WARN_ON_ONCE(dreq->count < 0);
220         }
221         if (!result)
222                 result = dreq->error;
223
224 out:
225         return (ssize_t) result;
226 }
227
228 /*
229  * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
230  * the iocb is still valid here if this is a synchronous request.
231  */
232 static void nfs_direct_complete(struct nfs_direct_req *dreq)
233 {
234         struct inode *inode = dreq->inode;
235
236         inode_dio_end(inode);
237
238         if (dreq->iocb) {
239                 long res = (long) dreq->error;
240                 if (dreq->count != 0) {
241                         res = (long) dreq->count;
242                         WARN_ON_ONCE(dreq->count < 0);
243                 }
244                 dreq->iocb->ki_complete(dreq->iocb, res);
245         }
246
247         complete(&dreq->completion);
248
249         nfs_direct_req_release(dreq);
250 }
251
252 static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
253 {
254         unsigned long bytes = 0;
255         struct nfs_direct_req *dreq = hdr->dreq;
256
257         spin_lock(&dreq->lock);
258         if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
259                 spin_unlock(&dreq->lock);
260                 goto out_put;
261         }
262
263         nfs_direct_count_bytes(dreq, hdr);
264         spin_unlock(&dreq->lock);
265
266         while (!list_empty(&hdr->pages)) {
267                 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
268                 struct page *page = req->wb_page;
269
270                 if (!PageCompound(page) && bytes < hdr->good_bytes &&
271                     (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
272                         set_page_dirty(page);
273                 bytes += req->wb_bytes;
274                 nfs_list_remove_request(req);
275                 nfs_release_request(req);
276         }
277 out_put:
278         if (put_dreq(dreq))
279                 nfs_direct_complete(dreq);
280         hdr->release(hdr);
281 }
282
283 static void nfs_read_sync_pgio_error(struct list_head *head, int error)
284 {
285         struct nfs_page *req;
286
287         while (!list_empty(head)) {
288                 req = nfs_list_entry(head->next);
289                 nfs_list_remove_request(req);
290                 nfs_release_request(req);
291         }
292 }
293
294 static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
295 {
296         get_dreq(hdr->dreq);
297 }
298
299 static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
300         .error_cleanup = nfs_read_sync_pgio_error,
301         .init_hdr = nfs_direct_pgio_init,
302         .completion = nfs_direct_read_completion,
303 };
304
305 /*
306  * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
307  * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
308  * bail and stop sending more reads.  Read length accounting is
309  * handled automatically by nfs_direct_read_result().  Otherwise, if
310  * no requests have been sent, just return an error.
311  */
312
313 static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
314                                               struct iov_iter *iter,
315                                               loff_t pos)
316 {
317         struct nfs_pageio_descriptor desc;
318         struct inode *inode = dreq->inode;
319         ssize_t result = -EINVAL;
320         size_t requested_bytes = 0;
321         size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
322
323         nfs_pageio_init_read(&desc, dreq->inode, false,
324                              &nfs_direct_read_completion_ops);
325         get_dreq(dreq);
326         desc.pg_dreq = dreq;
327         inode_dio_begin(inode);
328
329         while (iov_iter_count(iter)) {
330                 struct page **pagevec;
331                 size_t bytes;
332                 size_t pgbase;
333                 unsigned npages, i;
334
335                 result = iov_iter_get_pages_alloc2(iter, &pagevec,
336                                                   rsize, &pgbase);
337                 if (result < 0)
338                         break;
339         
340                 bytes = result;
341                 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
342                 for (i = 0; i < npages; i++) {
343                         struct nfs_page *req;
344                         unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
345                         /* XXX do we need to do the eof zeroing found in async_filler? */
346                         req = nfs_page_create_from_page(dreq->ctx, pagevec[i],
347                                                         pgbase, pos, req_len);
348                         if (IS_ERR(req)) {
349                                 result = PTR_ERR(req);
350                                 break;
351                         }
352                         if (!nfs_pageio_add_request(&desc, req)) {
353                                 result = desc.pg_error;
354                                 nfs_release_request(req);
355                                 break;
356                         }
357                         pgbase = 0;
358                         bytes -= req_len;
359                         requested_bytes += req_len;
360                         pos += req_len;
361                         dreq->bytes_left -= req_len;
362                 }
363                 nfs_direct_release_pages(pagevec, npages);
364                 kvfree(pagevec);
365                 if (result < 0)
366                         break;
367         }
368
369         nfs_pageio_complete(&desc);
370
371         /*
372          * If no bytes were started, return the error, and let the
373          * generic layer handle the completion.
374          */
375         if (requested_bytes == 0) {
376                 inode_dio_end(inode);
377                 nfs_direct_req_release(dreq);
378                 return result < 0 ? result : -EIO;
379         }
380
381         if (put_dreq(dreq))
382                 nfs_direct_complete(dreq);
383         return requested_bytes;
384 }
385
386 /**
387  * nfs_file_direct_read - file direct read operation for NFS files
388  * @iocb: target I/O control block
389  * @iter: vector of user buffers into which to read data
390  * @swap: flag indicating this is swap IO, not O_DIRECT IO
391  *
392  * We use this function for direct reads instead of calling
393  * generic_file_aio_read() in order to avoid gfar's check to see if
394  * the request starts before the end of the file.  For that check
395  * to work, we must generate a GETATTR before each direct read, and
396  * even then there is a window between the GETATTR and the subsequent
397  * READ where the file size could change.  Our preference is simply
398  * to do all reads the application wants, and the server will take
399  * care of managing the end of file boundary.
400  *
401  * This function also eliminates unnecessarily updating the file's
402  * atime locally, as the NFS server sets the file's atime, and this
403  * client must read the updated atime from the server back into its
404  * cache.
405  */
406 ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
407                              bool swap)
408 {
409         struct file *file = iocb->ki_filp;
410         struct address_space *mapping = file->f_mapping;
411         struct inode *inode = mapping->host;
412         struct nfs_direct_req *dreq;
413         struct nfs_lock_context *l_ctx;
414         ssize_t result, requested;
415         size_t count = iov_iter_count(iter);
416         nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
417
418         dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
419                 file, count, (long long) iocb->ki_pos);
420
421         result = 0;
422         if (!count)
423                 goto out;
424
425         task_io_account_read(count);
426
427         result = -ENOMEM;
428         dreq = nfs_direct_req_alloc();
429         if (dreq == NULL)
430                 goto out;
431
432         dreq->inode = inode;
433         dreq->bytes_left = dreq->max_count = count;
434         dreq->io_start = iocb->ki_pos;
435         dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
436         l_ctx = nfs_get_lock_context(dreq->ctx);
437         if (IS_ERR(l_ctx)) {
438                 result = PTR_ERR(l_ctx);
439                 nfs_direct_req_release(dreq);
440                 goto out_release;
441         }
442         dreq->l_ctx = l_ctx;
443         if (!is_sync_kiocb(iocb))
444                 dreq->iocb = iocb;
445
446         if (user_backed_iter(iter))
447                 dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
448
449         if (!swap)
450                 nfs_start_io_direct(inode);
451
452         NFS_I(inode)->read_io += count;
453         requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
454
455         if (!swap)
456                 nfs_end_io_direct(inode);
457
458         if (requested > 0) {
459                 result = nfs_direct_wait(dreq);
460                 if (result > 0) {
461                         requested -= result;
462                         iocb->ki_pos += result;
463                 }
464                 iov_iter_revert(iter, requested);
465         } else {
466                 result = requested;
467         }
468
469 out_release:
470         nfs_direct_req_release(dreq);
471 out:
472         return result;
473 }
474
475 static void nfs_direct_add_page_head(struct list_head *list,
476                                      struct nfs_page *req)
477 {
478         struct nfs_page *head = req->wb_head;
479
480         if (!list_empty(&head->wb_list) || !nfs_lock_request(head))
481                 return;
482         if (!list_empty(&head->wb_list)) {
483                 nfs_unlock_request(head);
484                 return;
485         }
486         list_add(&head->wb_list, list);
487         kref_get(&head->wb_kref);
488         kref_get(&head->wb_kref);
489 }
490
491 static void nfs_direct_join_group(struct list_head *list, struct inode *inode)
492 {
493         struct nfs_page *req, *subreq;
494
495         list_for_each_entry(req, list, wb_list) {
496                 if (req->wb_head != req) {
497                         nfs_direct_add_page_head(&req->wb_list, req);
498                         continue;
499                 }
500                 subreq = req->wb_this_page;
501                 if (subreq == req)
502                         continue;
503                 do {
504                         /*
505                          * Remove subrequests from this list before freeing
506                          * them in the call to nfs_join_page_group().
507                          */
508                         if (!list_empty(&subreq->wb_list)) {
509                                 nfs_list_remove_request(subreq);
510                                 nfs_release_request(subreq);
511                         }
512                 } while ((subreq = subreq->wb_this_page) != req);
513                 nfs_join_page_group(req, inode);
514         }
515 }
516
517 static void
518 nfs_direct_write_scan_commit_list(struct inode *inode,
519                                   struct list_head *list,
520                                   struct nfs_commit_info *cinfo)
521 {
522         mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
523         pnfs_recover_commit_reqs(list, cinfo);
524         nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
525         mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
526 }
527
528 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
529 {
530         struct nfs_pageio_descriptor desc;
531         struct nfs_page *req, *tmp;
532         LIST_HEAD(reqs);
533         struct nfs_commit_info cinfo;
534         LIST_HEAD(failed);
535
536         nfs_init_cinfo_from_dreq(&cinfo, dreq);
537         nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
538
539         nfs_direct_join_group(&reqs, dreq->inode);
540
541         dreq->count = 0;
542         dreq->max_count = 0;
543         list_for_each_entry(req, &reqs, wb_list)
544                 dreq->max_count += req->wb_bytes;
545         nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
546         get_dreq(dreq);
547
548         nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
549                               &nfs_direct_write_completion_ops);
550         desc.pg_dreq = dreq;
551
552         list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
553                 /* Bump the transmission count */
554                 req->wb_nio++;
555                 if (!nfs_pageio_add_request(&desc, req)) {
556                         nfs_list_move_request(req, &failed);
557                         spin_lock(&cinfo.inode->i_lock);
558                         dreq->flags = 0;
559                         if (desc.pg_error < 0)
560                                 dreq->error = desc.pg_error;
561                         else
562                                 dreq->error = -EIO;
563                         spin_unlock(&cinfo.inode->i_lock);
564                 }
565                 nfs_release_request(req);
566         }
567         nfs_pageio_complete(&desc);
568
569         while (!list_empty(&failed)) {
570                 req = nfs_list_entry(failed.next);
571                 nfs_list_remove_request(req);
572                 nfs_unlock_and_release_request(req);
573         }
574
575         if (put_dreq(dreq))
576                 nfs_direct_write_complete(dreq);
577 }
578
579 static void nfs_direct_commit_complete(struct nfs_commit_data *data)
580 {
581         const struct nfs_writeverf *verf = data->res.verf;
582         struct nfs_direct_req *dreq = data->dreq;
583         struct nfs_commit_info cinfo;
584         struct nfs_page *req;
585         int status = data->task.tk_status;
586
587         trace_nfs_direct_commit_complete(dreq);
588
589         if (status < 0) {
590                 /* Errors in commit are fatal */
591                 dreq->error = status;
592                 dreq->max_count = 0;
593                 dreq->count = 0;
594                 dreq->flags = NFS_ODIRECT_DONE;
595         } else {
596                 status = dreq->error;
597         }
598
599         nfs_init_cinfo_from_dreq(&cinfo, dreq);
600
601         while (!list_empty(&data->pages)) {
602                 req = nfs_list_entry(data->pages.next);
603                 nfs_list_remove_request(req);
604                 if (status >= 0 && !nfs_write_match_verf(verf, req)) {
605                         dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
606                         /*
607                          * Despite the reboot, the write was successful,
608                          * so reset wb_nio.
609                          */
610                         req->wb_nio = 0;
611                         nfs_mark_request_commit(req, NULL, &cinfo, 0);
612                 } else /* Error or match */
613                         nfs_release_request(req);
614                 nfs_unlock_and_release_request(req);
615         }
616
617         if (nfs_commit_end(cinfo.mds))
618                 nfs_direct_write_complete(dreq);
619 }
620
621 static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
622                 struct nfs_page *req)
623 {
624         struct nfs_direct_req *dreq = cinfo->dreq;
625
626         trace_nfs_direct_resched_write(dreq);
627
628         spin_lock(&dreq->lock);
629         if (dreq->flags != NFS_ODIRECT_DONE)
630                 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
631         spin_unlock(&dreq->lock);
632         nfs_mark_request_commit(req, NULL, cinfo, 0);
633 }
634
635 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
636         .completion = nfs_direct_commit_complete,
637         .resched_write = nfs_direct_resched_write,
638 };
639
640 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
641 {
642         int res;
643         struct nfs_commit_info cinfo;
644         LIST_HEAD(mds_list);
645
646         nfs_init_cinfo_from_dreq(&cinfo, dreq);
647         nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
648         res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
649         if (res < 0) /* res == -ENOMEM */
650                 nfs_direct_write_reschedule(dreq);
651 }
652
653 static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
654 {
655         struct nfs_commit_info cinfo;
656         struct nfs_page *req;
657         LIST_HEAD(reqs);
658
659         nfs_init_cinfo_from_dreq(&cinfo, dreq);
660         nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
661
662         while (!list_empty(&reqs)) {
663                 req = nfs_list_entry(reqs.next);
664                 nfs_list_remove_request(req);
665                 nfs_release_request(req);
666                 nfs_unlock_and_release_request(req);
667         }
668 }
669
670 static void nfs_direct_write_schedule_work(struct work_struct *work)
671 {
672         struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
673         int flags = dreq->flags;
674
675         dreq->flags = 0;
676         switch (flags) {
677                 case NFS_ODIRECT_DO_COMMIT:
678                         nfs_direct_commit_schedule(dreq);
679                         break;
680                 case NFS_ODIRECT_RESCHED_WRITES:
681                         nfs_direct_write_reschedule(dreq);
682                         break;
683                 default:
684                         nfs_direct_write_clear_reqs(dreq);
685                         nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
686                         nfs_direct_complete(dreq);
687         }
688 }
689
690 static void nfs_direct_write_complete(struct nfs_direct_req *dreq)
691 {
692         trace_nfs_direct_write_complete(dreq);
693         queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */
694 }
695
696 static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
697 {
698         struct nfs_direct_req *dreq = hdr->dreq;
699         struct nfs_commit_info cinfo;
700         struct nfs_page *req = nfs_list_entry(hdr->pages.next);
701         int flags = NFS_ODIRECT_DONE;
702
703         trace_nfs_direct_write_completion(dreq);
704
705         nfs_init_cinfo_from_dreq(&cinfo, dreq);
706
707         spin_lock(&dreq->lock);
708         if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
709                 spin_unlock(&dreq->lock);
710                 goto out_put;
711         }
712
713         nfs_direct_count_bytes(dreq, hdr);
714         if (test_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags)) {
715                 if (!dreq->flags)
716                         dreq->flags = NFS_ODIRECT_DO_COMMIT;
717                 flags = dreq->flags;
718         }
719         spin_unlock(&dreq->lock);
720
721         while (!list_empty(&hdr->pages)) {
722
723                 req = nfs_list_entry(hdr->pages.next);
724                 nfs_list_remove_request(req);
725                 if (flags == NFS_ODIRECT_DO_COMMIT) {
726                         kref_get(&req->wb_kref);
727                         memcpy(&req->wb_verf, &hdr->verf.verifier,
728                                sizeof(req->wb_verf));
729                         nfs_mark_request_commit(req, hdr->lseg, &cinfo,
730                                 hdr->ds_commit_idx);
731                 } else if (flags == NFS_ODIRECT_RESCHED_WRITES) {
732                         kref_get(&req->wb_kref);
733                         nfs_mark_request_commit(req, NULL, &cinfo, 0);
734                 }
735                 nfs_unlock_and_release_request(req);
736         }
737
738 out_put:
739         if (put_dreq(dreq))
740                 nfs_direct_write_complete(dreq);
741         hdr->release(hdr);
742 }
743
744 static void nfs_write_sync_pgio_error(struct list_head *head, int error)
745 {
746         struct nfs_page *req;
747
748         while (!list_empty(head)) {
749                 req = nfs_list_entry(head->next);
750                 nfs_list_remove_request(req);
751                 nfs_unlock_and_release_request(req);
752         }
753 }
754
755 static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
756 {
757         struct nfs_direct_req *dreq = hdr->dreq;
758
759         trace_nfs_direct_write_reschedule_io(dreq);
760
761         spin_lock(&dreq->lock);
762         if (dreq->error == 0) {
763                 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
764                 /* fake unstable write to let common nfs resend pages */
765                 hdr->verf.committed = NFS_UNSTABLE;
766                 hdr->good_bytes = hdr->args.offset + hdr->args.count -
767                         hdr->io_start;
768         }
769         spin_unlock(&dreq->lock);
770 }
771
772 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
773         .error_cleanup = nfs_write_sync_pgio_error,
774         .init_hdr = nfs_direct_pgio_init,
775         .completion = nfs_direct_write_completion,
776         .reschedule_io = nfs_direct_write_reschedule_io,
777 };
778
779
780 /*
781  * NB: Return the value of the first error return code.  Subsequent
782  *     errors after the first one are ignored.
783  */
784 /*
785  * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
786  * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
787  * bail and stop sending more writes.  Write length accounting is
788  * handled automatically by nfs_direct_write_result().  Otherwise, if
789  * no requests have been sent, just return an error.
790  */
791 static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
792                                                struct iov_iter *iter,
793                                                loff_t pos, int ioflags)
794 {
795         struct nfs_pageio_descriptor desc;
796         struct inode *inode = dreq->inode;
797         ssize_t result = 0;
798         size_t requested_bytes = 0;
799         size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
800
801         trace_nfs_direct_write_schedule_iovec(dreq);
802
803         nfs_pageio_init_write(&desc, inode, ioflags, false,
804                               &nfs_direct_write_completion_ops);
805         desc.pg_dreq = dreq;
806         get_dreq(dreq);
807         inode_dio_begin(inode);
808
809         NFS_I(inode)->write_io += iov_iter_count(iter);
810         while (iov_iter_count(iter)) {
811                 struct page **pagevec;
812                 size_t bytes;
813                 size_t pgbase;
814                 unsigned npages, i;
815
816                 result = iov_iter_get_pages_alloc2(iter, &pagevec,
817                                                   wsize, &pgbase);
818                 if (result < 0)
819                         break;
820
821                 bytes = result;
822                 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
823                 for (i = 0; i < npages; i++) {
824                         struct nfs_page *req;
825                         unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
826
827                         req = nfs_page_create_from_page(dreq->ctx, pagevec[i],
828                                                         pgbase, pos, req_len);
829                         if (IS_ERR(req)) {
830                                 result = PTR_ERR(req);
831                                 break;
832                         }
833
834                         if (desc.pg_error < 0) {
835                                 nfs_free_request(req);
836                                 result = desc.pg_error;
837                                 break;
838                         }
839
840                         nfs_lock_request(req);
841                         if (!nfs_pageio_add_request(&desc, req)) {
842                                 result = desc.pg_error;
843                                 nfs_unlock_and_release_request(req);
844                                 break;
845                         }
846                         pgbase = 0;
847                         bytes -= req_len;
848                         requested_bytes += req_len;
849                         pos += req_len;
850                         dreq->bytes_left -= req_len;
851                 }
852                 nfs_direct_release_pages(pagevec, npages);
853                 kvfree(pagevec);
854                 if (result < 0)
855                         break;
856         }
857         nfs_pageio_complete(&desc);
858
859         /*
860          * If no bytes were started, return the error, and let the
861          * generic layer handle the completion.
862          */
863         if (requested_bytes == 0) {
864                 inode_dio_end(inode);
865                 nfs_direct_req_release(dreq);
866                 return result < 0 ? result : -EIO;
867         }
868
869         if (put_dreq(dreq))
870                 nfs_direct_write_complete(dreq);
871         return requested_bytes;
872 }
873
874 /**
875  * nfs_file_direct_write - file direct write operation for NFS files
876  * @iocb: target I/O control block
877  * @iter: vector of user buffers from which to write data
878  * @swap: flag indicating this is swap IO, not O_DIRECT IO
879  *
880  * We use this function for direct writes instead of calling
881  * generic_file_aio_write() in order to avoid taking the inode
882  * semaphore and updating the i_size.  The NFS server will set
883  * the new i_size and this client must read the updated size
884  * back into its cache.  We let the server do generic write
885  * parameter checking and report problems.
886  *
887  * We eliminate local atime updates, see direct read above.
888  *
889  * We avoid unnecessary page cache invalidations for normal cached
890  * readers of this file.
891  *
892  * Note that O_APPEND is not supported for NFS direct writes, as there
893  * is no atomic O_APPEND write facility in the NFS protocol.
894  */
895 ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
896                               bool swap)
897 {
898         ssize_t result, requested;
899         size_t count;
900         struct file *file = iocb->ki_filp;
901         struct address_space *mapping = file->f_mapping;
902         struct inode *inode = mapping->host;
903         struct nfs_direct_req *dreq;
904         struct nfs_lock_context *l_ctx;
905         loff_t pos, end;
906
907         dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
908                 file, iov_iter_count(iter), (long long) iocb->ki_pos);
909
910         if (swap)
911                 /* bypass generic checks */
912                 result =  iov_iter_count(iter);
913         else
914                 result = generic_write_checks(iocb, iter);
915         if (result <= 0)
916                 return result;
917         count = result;
918         nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
919
920         pos = iocb->ki_pos;
921         end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
922
923         task_io_account_write(count);
924
925         result = -ENOMEM;
926         dreq = nfs_direct_req_alloc();
927         if (!dreq)
928                 goto out;
929
930         dreq->inode = inode;
931         dreq->bytes_left = dreq->max_count = count;
932         dreq->io_start = pos;
933         dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
934         l_ctx = nfs_get_lock_context(dreq->ctx);
935         if (IS_ERR(l_ctx)) {
936                 result = PTR_ERR(l_ctx);
937                 nfs_direct_req_release(dreq);
938                 goto out_release;
939         }
940         dreq->l_ctx = l_ctx;
941         if (!is_sync_kiocb(iocb))
942                 dreq->iocb = iocb;
943         pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode);
944
945         if (swap) {
946                 requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
947                                                             FLUSH_STABLE);
948         } else {
949                 nfs_start_io_direct(inode);
950
951                 requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
952                                                             FLUSH_COND_STABLE);
953
954                 if (mapping->nrpages) {
955                         invalidate_inode_pages2_range(mapping,
956                                                       pos >> PAGE_SHIFT, end);
957                 }
958
959                 nfs_end_io_direct(inode);
960         }
961
962         if (requested > 0) {
963                 result = nfs_direct_wait(dreq);
964                 if (result > 0) {
965                         requested -= result;
966                         iocb->ki_pos = pos + result;
967                         /* XXX: should check the generic_write_sync retval */
968                         generic_write_sync(iocb, result);
969                 }
970                 iov_iter_revert(iter, requested);
971         } else {
972                 result = requested;
973         }
974         nfs_fscache_invalidate(inode, FSCACHE_INVAL_DIO_WRITE);
975 out_release:
976         nfs_direct_req_release(dreq);
977 out:
978         return result;
979 }
980
981 /**
982  * nfs_init_directcache - create a slab cache for nfs_direct_req structures
983  *
984  */
985 int __init nfs_init_directcache(void)
986 {
987         nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
988                                                 sizeof(struct nfs_direct_req),
989                                                 0, (SLAB_RECLAIM_ACCOUNT|
990                                                         SLAB_MEM_SPREAD),
991                                                 NULL);
992         if (nfs_direct_cachep == NULL)
993                 return -ENOMEM;
994
995         return 0;
996 }
997
998 /**
999  * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
1000  *
1001  */
1002 void nfs_destroy_directcache(void)
1003 {
1004         kmem_cache_destroy(nfs_direct_cachep);
1005 }