Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid
[platform/kernel/linux-starfive.git] / fs / nfs / direct.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/fs/nfs/direct.c
4  *
5  * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
6  *
7  * High-performance uncached I/O for the Linux NFS client
8  *
9  * There are important applications whose performance or correctness
10  * depends on uncached access to file data.  Database clusters
11  * (multiple copies of the same instance running on separate hosts)
12  * implement their own cache coherency protocol that subsumes file
13  * system cache protocols.  Applications that process datasets
14  * considerably larger than the client's memory do not always benefit
15  * from a local cache.  A streaming video server, for instance, has no
16  * need to cache the contents of a file.
17  *
18  * When an application requests uncached I/O, all read and write requests
19  * are made directly to the server; data stored or fetched via these
20  * requests is not cached in the Linux page cache.  The client does not
21  * correct unaligned requests from applications.  All requested bytes are
22  * held on permanent storage before a direct write system call returns to
23  * an application.
24  *
25  * Solaris implements an uncached I/O facility called directio() that
26  * is used for backups and sequential I/O to very large files.  Solaris
27  * also supports uncaching whole NFS partitions with "-o forcedirectio,"
28  * an undocumented mount option.
29  *
30  * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
31  * help from Andrew Morton.
32  *
33  * 18 Dec 2001  Initial implementation for 2.4  --cel
34  * 08 Jul 2002  Version for 2.4.19, with bug fixes --trondmy
35  * 08 Jun 2003  Port to 2.5 APIs  --cel
36  * 31 Mar 2004  Handle direct I/O without VFS support  --cel
37  * 15 Sep 2004  Parallel async reads  --cel
38  * 04 May 2005  support O_DIRECT with aio  --cel
39  *
40  */
41
42 #include <linux/errno.h>
43 #include <linux/sched.h>
44 #include <linux/kernel.h>
45 #include <linux/file.h>
46 #include <linux/pagemap.h>
47 #include <linux/kref.h>
48 #include <linux/slab.h>
49 #include <linux/task_io_accounting_ops.h>
50 #include <linux/module.h>
51
52 #include <linux/nfs_fs.h>
53 #include <linux/nfs_page.h>
54 #include <linux/sunrpc/clnt.h>
55
56 #include <linux/uaccess.h>
57 #include <linux/atomic.h>
58
59 #include "internal.h"
60 #include "iostat.h"
61 #include "pnfs.h"
62
63 #define NFSDBG_FACILITY         NFSDBG_VFS
64
65 static struct kmem_cache *nfs_direct_cachep;
66
67 struct nfs_direct_req {
68         struct kref             kref;           /* release manager */
69
70         /* I/O parameters */
71         struct nfs_open_context *ctx;           /* file open context info */
72         struct nfs_lock_context *l_ctx;         /* Lock context info */
73         struct kiocb *          iocb;           /* controlling i/o request */
74         struct inode *          inode;          /* target file of i/o */
75
76         /* completion state */
77         atomic_t                io_count;       /* i/os we're waiting for */
78         spinlock_t              lock;           /* protect completion state */
79
80         loff_t                  io_start;       /* Start offset for I/O */
81         ssize_t                 count,          /* bytes actually processed */
82                                 max_count,      /* max expected count */
83                                 bytes_left,     /* bytes left to be sent */
84                                 error;          /* any reported error */
85         struct completion       completion;     /* wait for i/o completion */
86
87         /* commit state */
88         struct nfs_mds_commit_info mds_cinfo;   /* Storage for cinfo */
89         struct pnfs_ds_commit_info ds_cinfo;    /* Storage for cinfo */
90         struct work_struct      work;
91         int                     flags;
92         /* for write */
93 #define NFS_ODIRECT_DO_COMMIT           (1)     /* an unstable reply was received */
94 #define NFS_ODIRECT_RESCHED_WRITES      (2)     /* write verification failed */
95         /* for read */
96 #define NFS_ODIRECT_SHOULD_DIRTY        (3)     /* dirty user-space page after read */
97 #define NFS_ODIRECT_DONE                INT_MAX /* write verification failed */
98 };
99
100 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
101 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
102 static void nfs_direct_write_complete(struct nfs_direct_req *dreq);
103 static void nfs_direct_write_schedule_work(struct work_struct *work);
104
105 static inline void get_dreq(struct nfs_direct_req *dreq)
106 {
107         atomic_inc(&dreq->io_count);
108 }
109
110 static inline int put_dreq(struct nfs_direct_req *dreq)
111 {
112         return atomic_dec_and_test(&dreq->io_count);
113 }
114
115 static void
116 nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
117                             const struct nfs_pgio_header *hdr,
118                             ssize_t dreq_len)
119 {
120         if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) ||
121               test_bit(NFS_IOHDR_EOF, &hdr->flags)))
122                 return;
123         if (dreq->max_count >= dreq_len) {
124                 dreq->max_count = dreq_len;
125                 if (dreq->count > dreq_len)
126                         dreq->count = dreq_len;
127
128                 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
129                         dreq->error = hdr->error;
130                 else /* Clear outstanding error if this is EOF */
131                         dreq->error = 0;
132         }
133 }
134
135 static void
136 nfs_direct_count_bytes(struct nfs_direct_req *dreq,
137                        const struct nfs_pgio_header *hdr)
138 {
139         loff_t hdr_end = hdr->io_start + hdr->good_bytes;
140         ssize_t dreq_len = 0;
141
142         if (hdr_end > dreq->io_start)
143                 dreq_len = hdr_end - dreq->io_start;
144
145         nfs_direct_handle_truncated(dreq, hdr, dreq_len);
146
147         if (dreq_len > dreq->max_count)
148                 dreq_len = dreq->max_count;
149
150         if (dreq->count < dreq_len)
151                 dreq->count = dreq_len;
152 }
153
154 /**
155  * nfs_direct_IO - NFS address space operation for direct I/O
156  * @iocb: target I/O control block
157  * @iter: I/O buffer
158  *
159  * The presence of this routine in the address space ops vector means
160  * the NFS client supports direct I/O. However, for most direct IO, we
161  * shunt off direct read and write requests before the VFS gets them,
162  * so this method is only ever called for swap.
163  */
164 ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
165 {
166         struct inode *inode = iocb->ki_filp->f_mapping->host;
167
168         /* we only support swap file calling nfs_direct_IO */
169         if (!IS_SWAPFILE(inode))
170                 return 0;
171
172         VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
173
174         if (iov_iter_rw(iter) == READ)
175                 return nfs_file_direct_read(iocb, iter);
176         return nfs_file_direct_write(iocb, iter);
177 }
178
179 static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
180 {
181         unsigned int i;
182         for (i = 0; i < npages; i++)
183                 put_page(pages[i]);
184 }
185
186 void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
187                               struct nfs_direct_req *dreq)
188 {
189         cinfo->inode = dreq->inode;
190         cinfo->mds = &dreq->mds_cinfo;
191         cinfo->ds = &dreq->ds_cinfo;
192         cinfo->dreq = dreq;
193         cinfo->completion_ops = &nfs_direct_commit_completion_ops;
194 }
195
196 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
197 {
198         struct nfs_direct_req *dreq;
199
200         dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
201         if (!dreq)
202                 return NULL;
203
204         kref_init(&dreq->kref);
205         kref_get(&dreq->kref);
206         init_completion(&dreq->completion);
207         INIT_LIST_HEAD(&dreq->mds_cinfo.list);
208         pnfs_init_ds_commit_info(&dreq->ds_cinfo);
209         INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
210         spin_lock_init(&dreq->lock);
211
212         return dreq;
213 }
214
215 static void nfs_direct_req_free(struct kref *kref)
216 {
217         struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
218
219         pnfs_release_ds_info(&dreq->ds_cinfo, dreq->inode);
220         if (dreq->l_ctx != NULL)
221                 nfs_put_lock_context(dreq->l_ctx);
222         if (dreq->ctx != NULL)
223                 put_nfs_open_context(dreq->ctx);
224         kmem_cache_free(nfs_direct_cachep, dreq);
225 }
226
227 static void nfs_direct_req_release(struct nfs_direct_req *dreq)
228 {
229         kref_put(&dreq->kref, nfs_direct_req_free);
230 }
231
232 ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
233 {
234         return dreq->bytes_left;
235 }
236 EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
237
238 /*
239  * Collects and returns the final error value/byte-count.
240  */
241 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
242 {
243         ssize_t result = -EIOCBQUEUED;
244
245         /* Async requests don't wait here */
246         if (dreq->iocb)
247                 goto out;
248
249         result = wait_for_completion_killable(&dreq->completion);
250
251         if (!result) {
252                 result = dreq->count;
253                 WARN_ON_ONCE(dreq->count < 0);
254         }
255         if (!result)
256                 result = dreq->error;
257
258 out:
259         return (ssize_t) result;
260 }
261
262 /*
263  * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
264  * the iocb is still valid here if this is a synchronous request.
265  */
266 static void nfs_direct_complete(struct nfs_direct_req *dreq)
267 {
268         struct inode *inode = dreq->inode;
269
270         inode_dio_end(inode);
271
272         if (dreq->iocb) {
273                 long res = (long) dreq->error;
274                 if (dreq->count != 0) {
275                         res = (long) dreq->count;
276                         WARN_ON_ONCE(dreq->count < 0);
277                 }
278                 dreq->iocb->ki_complete(dreq->iocb, res, 0);
279         }
280
281         complete(&dreq->completion);
282
283         nfs_direct_req_release(dreq);
284 }
285
286 static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
287 {
288         unsigned long bytes = 0;
289         struct nfs_direct_req *dreq = hdr->dreq;
290
291         spin_lock(&dreq->lock);
292         if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
293                 spin_unlock(&dreq->lock);
294                 goto out_put;
295         }
296
297         nfs_direct_count_bytes(dreq, hdr);
298         spin_unlock(&dreq->lock);
299
300         while (!list_empty(&hdr->pages)) {
301                 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
302                 struct page *page = req->wb_page;
303
304                 if (!PageCompound(page) && bytes < hdr->good_bytes &&
305                     (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
306                         set_page_dirty(page);
307                 bytes += req->wb_bytes;
308                 nfs_list_remove_request(req);
309                 nfs_release_request(req);
310         }
311 out_put:
312         if (put_dreq(dreq))
313                 nfs_direct_complete(dreq);
314         hdr->release(hdr);
315 }
316
317 static void nfs_read_sync_pgio_error(struct list_head *head, int error)
318 {
319         struct nfs_page *req;
320
321         while (!list_empty(head)) {
322                 req = nfs_list_entry(head->next);
323                 nfs_list_remove_request(req);
324                 nfs_release_request(req);
325         }
326 }
327
328 static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
329 {
330         get_dreq(hdr->dreq);
331 }
332
333 static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
334         .error_cleanup = nfs_read_sync_pgio_error,
335         .init_hdr = nfs_direct_pgio_init,
336         .completion = nfs_direct_read_completion,
337 };
338
339 /*
340  * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
341  * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
342  * bail and stop sending more reads.  Read length accounting is
343  * handled automatically by nfs_direct_read_result().  Otherwise, if
344  * no requests have been sent, just return an error.
345  */
346
347 static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
348                                               struct iov_iter *iter,
349                                               loff_t pos)
350 {
351         struct nfs_pageio_descriptor desc;
352         struct inode *inode = dreq->inode;
353         ssize_t result = -EINVAL;
354         size_t requested_bytes = 0;
355         size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
356
357         nfs_pageio_init_read(&desc, dreq->inode, false,
358                              &nfs_direct_read_completion_ops);
359         get_dreq(dreq);
360         desc.pg_dreq = dreq;
361         inode_dio_begin(inode);
362
363         while (iov_iter_count(iter)) {
364                 struct page **pagevec;
365                 size_t bytes;
366                 size_t pgbase;
367                 unsigned npages, i;
368
369                 result = iov_iter_get_pages_alloc(iter, &pagevec, 
370                                                   rsize, &pgbase);
371                 if (result < 0)
372                         break;
373         
374                 bytes = result;
375                 iov_iter_advance(iter, bytes);
376                 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
377                 for (i = 0; i < npages; i++) {
378                         struct nfs_page *req;
379                         unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
380                         /* XXX do we need to do the eof zeroing found in async_filler? */
381                         req = nfs_create_request(dreq->ctx, pagevec[i],
382                                                  pgbase, req_len);
383                         if (IS_ERR(req)) {
384                                 result = PTR_ERR(req);
385                                 break;
386                         }
387                         req->wb_index = pos >> PAGE_SHIFT;
388                         req->wb_offset = pos & ~PAGE_MASK;
389                         if (!nfs_pageio_add_request(&desc, req)) {
390                                 result = desc.pg_error;
391                                 nfs_release_request(req);
392                                 break;
393                         }
394                         pgbase = 0;
395                         bytes -= req_len;
396                         requested_bytes += req_len;
397                         pos += req_len;
398                         dreq->bytes_left -= req_len;
399                 }
400                 nfs_direct_release_pages(pagevec, npages);
401                 kvfree(pagevec);
402                 if (result < 0)
403                         break;
404         }
405
406         nfs_pageio_complete(&desc);
407
408         /*
409          * If no bytes were started, return the error, and let the
410          * generic layer handle the completion.
411          */
412         if (requested_bytes == 0) {
413                 inode_dio_end(inode);
414                 nfs_direct_req_release(dreq);
415                 return result < 0 ? result : -EIO;
416         }
417
418         if (put_dreq(dreq))
419                 nfs_direct_complete(dreq);
420         return requested_bytes;
421 }
422
423 /**
424  * nfs_file_direct_read - file direct read operation for NFS files
425  * @iocb: target I/O control block
426  * @iter: vector of user buffers into which to read data
427  *
428  * We use this function for direct reads instead of calling
429  * generic_file_aio_read() in order to avoid gfar's check to see if
430  * the request starts before the end of the file.  For that check
431  * to work, we must generate a GETATTR before each direct read, and
432  * even then there is a window between the GETATTR and the subsequent
433  * READ where the file size could change.  Our preference is simply
434  * to do all reads the application wants, and the server will take
435  * care of managing the end of file boundary.
436  *
437  * This function also eliminates unnecessarily updating the file's
438  * atime locally, as the NFS server sets the file's atime, and this
439  * client must read the updated atime from the server back into its
440  * cache.
441  */
442 ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
443 {
444         struct file *file = iocb->ki_filp;
445         struct address_space *mapping = file->f_mapping;
446         struct inode *inode = mapping->host;
447         struct nfs_direct_req *dreq;
448         struct nfs_lock_context *l_ctx;
449         ssize_t result, requested;
450         size_t count = iov_iter_count(iter);
451         nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
452
453         dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
454                 file, count, (long long) iocb->ki_pos);
455
456         result = 0;
457         if (!count)
458                 goto out;
459
460         task_io_account_read(count);
461
462         result = -ENOMEM;
463         dreq = nfs_direct_req_alloc();
464         if (dreq == NULL)
465                 goto out;
466
467         dreq->inode = inode;
468         dreq->bytes_left = dreq->max_count = count;
469         dreq->io_start = iocb->ki_pos;
470         dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
471         l_ctx = nfs_get_lock_context(dreq->ctx);
472         if (IS_ERR(l_ctx)) {
473                 result = PTR_ERR(l_ctx);
474                 nfs_direct_req_release(dreq);
475                 goto out_release;
476         }
477         dreq->l_ctx = l_ctx;
478         if (!is_sync_kiocb(iocb))
479                 dreq->iocb = iocb;
480
481         if (iter_is_iovec(iter))
482                 dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
483
484         nfs_start_io_direct(inode);
485
486         NFS_I(inode)->read_io += count;
487         requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
488
489         nfs_end_io_direct(inode);
490
491         if (requested > 0) {
492                 result = nfs_direct_wait(dreq);
493                 if (result > 0) {
494                         requested -= result;
495                         iocb->ki_pos += result;
496                 }
497                 iov_iter_revert(iter, requested);
498         } else {
499                 result = requested;
500         }
501
502 out_release:
503         nfs_direct_req_release(dreq);
504 out:
505         return result;
506 }
507
508 static void
509 nfs_direct_join_group(struct list_head *list, struct inode *inode)
510 {
511         struct nfs_page *req, *next;
512
513         list_for_each_entry(req, list, wb_list) {
514                 if (req->wb_head != req || req->wb_this_page == req)
515                         continue;
516                 for (next = req->wb_this_page;
517                                 next != req->wb_head;
518                                 next = next->wb_this_page) {
519                         nfs_list_remove_request(next);
520                         nfs_release_request(next);
521                 }
522                 nfs_join_page_group(req, inode);
523         }
524 }
525
526 static void
527 nfs_direct_write_scan_commit_list(struct inode *inode,
528                                   struct list_head *list,
529                                   struct nfs_commit_info *cinfo)
530 {
531         mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
532         pnfs_recover_commit_reqs(list, cinfo);
533         nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
534         mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
535 }
536
537 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
538 {
539         struct nfs_pageio_descriptor desc;
540         struct nfs_page *req, *tmp;
541         LIST_HEAD(reqs);
542         struct nfs_commit_info cinfo;
543         LIST_HEAD(failed);
544
545         nfs_init_cinfo_from_dreq(&cinfo, dreq);
546         nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
547
548         nfs_direct_join_group(&reqs, dreq->inode);
549
550         dreq->count = 0;
551         dreq->max_count = 0;
552         list_for_each_entry(req, &reqs, wb_list)
553                 dreq->max_count += req->wb_bytes;
554         nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
555         get_dreq(dreq);
556
557         nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
558                               &nfs_direct_write_completion_ops);
559         desc.pg_dreq = dreq;
560
561         list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
562                 /* Bump the transmission count */
563                 req->wb_nio++;
564                 if (!nfs_pageio_add_request(&desc, req)) {
565                         nfs_list_move_request(req, &failed);
566                         spin_lock(&cinfo.inode->i_lock);
567                         dreq->flags = 0;
568                         if (desc.pg_error < 0)
569                                 dreq->error = desc.pg_error;
570                         else
571                                 dreq->error = -EIO;
572                         spin_unlock(&cinfo.inode->i_lock);
573                 }
574                 nfs_release_request(req);
575         }
576         nfs_pageio_complete(&desc);
577
578         while (!list_empty(&failed)) {
579                 req = nfs_list_entry(failed.next);
580                 nfs_list_remove_request(req);
581                 nfs_unlock_and_release_request(req);
582         }
583
584         if (put_dreq(dreq))
585                 nfs_direct_write_complete(dreq);
586 }
587
588 static void nfs_direct_commit_complete(struct nfs_commit_data *data)
589 {
590         const struct nfs_writeverf *verf = data->res.verf;
591         struct nfs_direct_req *dreq = data->dreq;
592         struct nfs_commit_info cinfo;
593         struct nfs_page *req;
594         int status = data->task.tk_status;
595
596         if (status < 0) {
597                 /* Errors in commit are fatal */
598                 dreq->error = status;
599                 dreq->max_count = 0;
600                 dreq->count = 0;
601                 dreq->flags = NFS_ODIRECT_DONE;
602         } else if (dreq->flags == NFS_ODIRECT_DONE)
603                 status = dreq->error;
604
605         nfs_init_cinfo_from_dreq(&cinfo, dreq);
606
607         while (!list_empty(&data->pages)) {
608                 req = nfs_list_entry(data->pages.next);
609                 nfs_list_remove_request(req);
610                 if (status >= 0 && !nfs_write_match_verf(verf, req)) {
611                         dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
612                         /*
613                          * Despite the reboot, the write was successful,
614                          * so reset wb_nio.
615                          */
616                         req->wb_nio = 0;
617                         nfs_mark_request_commit(req, NULL, &cinfo, 0);
618                 } else /* Error or match */
619                         nfs_release_request(req);
620                 nfs_unlock_and_release_request(req);
621         }
622
623         if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
624                 nfs_direct_write_complete(dreq);
625 }
626
627 static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
628                 struct nfs_page *req)
629 {
630         struct nfs_direct_req *dreq = cinfo->dreq;
631
632         spin_lock(&dreq->lock);
633         if (dreq->flags != NFS_ODIRECT_DONE)
634                 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
635         spin_unlock(&dreq->lock);
636         nfs_mark_request_commit(req, NULL, cinfo, 0);
637 }
638
639 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
640         .completion = nfs_direct_commit_complete,
641         .resched_write = nfs_direct_resched_write,
642 };
643
644 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
645 {
646         int res;
647         struct nfs_commit_info cinfo;
648         LIST_HEAD(mds_list);
649
650         nfs_init_cinfo_from_dreq(&cinfo, dreq);
651         nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
652         res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
653         if (res < 0) /* res == -ENOMEM */
654                 nfs_direct_write_reschedule(dreq);
655 }
656
657 static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
658 {
659         struct nfs_commit_info cinfo;
660         struct nfs_page *req;
661         LIST_HEAD(reqs);
662
663         nfs_init_cinfo_from_dreq(&cinfo, dreq);
664         nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
665
666         while (!list_empty(&reqs)) {
667                 req = nfs_list_entry(reqs.next);
668                 nfs_list_remove_request(req);
669                 nfs_release_request(req);
670                 nfs_unlock_and_release_request(req);
671         }
672 }
673
674 static void nfs_direct_write_schedule_work(struct work_struct *work)
675 {
676         struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
677         int flags = dreq->flags;
678
679         dreq->flags = 0;
680         switch (flags) {
681                 case NFS_ODIRECT_DO_COMMIT:
682                         nfs_direct_commit_schedule(dreq);
683                         break;
684                 case NFS_ODIRECT_RESCHED_WRITES:
685                         nfs_direct_write_reschedule(dreq);
686                         break;
687                 default:
688                         nfs_direct_write_clear_reqs(dreq);
689                         nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
690                         nfs_direct_complete(dreq);
691         }
692 }
693
694 static void nfs_direct_write_complete(struct nfs_direct_req *dreq)
695 {
696         queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */
697 }
698
699 static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
700 {
701         struct nfs_direct_req *dreq = hdr->dreq;
702         struct nfs_commit_info cinfo;
703         struct nfs_page *req = nfs_list_entry(hdr->pages.next);
704         int flags = NFS_ODIRECT_DONE;
705
706         nfs_init_cinfo_from_dreq(&cinfo, dreq);
707
708         spin_lock(&dreq->lock);
709         if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
710                 spin_unlock(&dreq->lock);
711                 goto out_put;
712         }
713
714         nfs_direct_count_bytes(dreq, hdr);
715         if (hdr->good_bytes != 0 && nfs_write_need_commit(hdr)) {
716                 if (!dreq->flags)
717                         dreq->flags = NFS_ODIRECT_DO_COMMIT;
718                 flags = dreq->flags;
719         }
720         spin_unlock(&dreq->lock);
721
722         while (!list_empty(&hdr->pages)) {
723
724                 req = nfs_list_entry(hdr->pages.next);
725                 nfs_list_remove_request(req);
726                 if (flags == NFS_ODIRECT_DO_COMMIT) {
727                         kref_get(&req->wb_kref);
728                         memcpy(&req->wb_verf, &hdr->verf.verifier,
729                                sizeof(req->wb_verf));
730                         nfs_mark_request_commit(req, hdr->lseg, &cinfo,
731                                 hdr->ds_commit_idx);
732                 } else if (flags == NFS_ODIRECT_RESCHED_WRITES) {
733                         kref_get(&req->wb_kref);
734                         nfs_mark_request_commit(req, NULL, &cinfo, 0);
735                 }
736                 nfs_unlock_and_release_request(req);
737         }
738
739 out_put:
740         if (put_dreq(dreq))
741                 nfs_direct_write_complete(dreq);
742         hdr->release(hdr);
743 }
744
745 static void nfs_write_sync_pgio_error(struct list_head *head, int error)
746 {
747         struct nfs_page *req;
748
749         while (!list_empty(head)) {
750                 req = nfs_list_entry(head->next);
751                 nfs_list_remove_request(req);
752                 nfs_unlock_and_release_request(req);
753         }
754 }
755
756 static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
757 {
758         struct nfs_direct_req *dreq = hdr->dreq;
759
760         spin_lock(&dreq->lock);
761         if (dreq->error == 0) {
762                 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
763                 /* fake unstable write to let common nfs resend pages */
764                 hdr->verf.committed = NFS_UNSTABLE;
765                 hdr->good_bytes = hdr->args.offset + hdr->args.count -
766                         hdr->io_start;
767         }
768         spin_unlock(&dreq->lock);
769 }
770
771 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
772         .error_cleanup = nfs_write_sync_pgio_error,
773         .init_hdr = nfs_direct_pgio_init,
774         .completion = nfs_direct_write_completion,
775         .reschedule_io = nfs_direct_write_reschedule_io,
776 };
777
778
779 /*
780  * NB: Return the value of the first error return code.  Subsequent
781  *     errors after the first one are ignored.
782  */
783 /*
784  * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
785  * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
786  * bail and stop sending more writes.  Write length accounting is
787  * handled automatically by nfs_direct_write_result().  Otherwise, if
788  * no requests have been sent, just return an error.
789  */
790 static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
791                                                struct iov_iter *iter,
792                                                loff_t pos)
793 {
794         struct nfs_pageio_descriptor desc;
795         struct inode *inode = dreq->inode;
796         ssize_t result = 0;
797         size_t requested_bytes = 0;
798         size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
799
800         nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false,
801                               &nfs_direct_write_completion_ops);
802         desc.pg_dreq = dreq;
803         get_dreq(dreq);
804         inode_dio_begin(inode);
805
806         NFS_I(inode)->write_io += iov_iter_count(iter);
807         while (iov_iter_count(iter)) {
808                 struct page **pagevec;
809                 size_t bytes;
810                 size_t pgbase;
811                 unsigned npages, i;
812
813                 result = iov_iter_get_pages_alloc(iter, &pagevec, 
814                                                   wsize, &pgbase);
815                 if (result < 0)
816                         break;
817
818                 bytes = result;
819                 iov_iter_advance(iter, bytes);
820                 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
821                 for (i = 0; i < npages; i++) {
822                         struct nfs_page *req;
823                         unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
824
825                         req = nfs_create_request(dreq->ctx, pagevec[i],
826                                                  pgbase, req_len);
827                         if (IS_ERR(req)) {
828                                 result = PTR_ERR(req);
829                                 break;
830                         }
831
832                         if (desc.pg_error < 0) {
833                                 nfs_free_request(req);
834                                 result = desc.pg_error;
835                                 break;
836                         }
837
838                         nfs_lock_request(req);
839                         req->wb_index = pos >> PAGE_SHIFT;
840                         req->wb_offset = pos & ~PAGE_MASK;
841                         if (!nfs_pageio_add_request(&desc, req)) {
842                                 result = desc.pg_error;
843                                 nfs_unlock_and_release_request(req);
844                                 break;
845                         }
846                         pgbase = 0;
847                         bytes -= req_len;
848                         requested_bytes += req_len;
849                         pos += req_len;
850                         dreq->bytes_left -= req_len;
851                 }
852                 nfs_direct_release_pages(pagevec, npages);
853                 kvfree(pagevec);
854                 if (result < 0)
855                         break;
856         }
857         nfs_pageio_complete(&desc);
858
859         /*
860          * If no bytes were started, return the error, and let the
861          * generic layer handle the completion.
862          */
863         if (requested_bytes == 0) {
864                 inode_dio_end(inode);
865                 nfs_direct_req_release(dreq);
866                 return result < 0 ? result : -EIO;
867         }
868
869         if (put_dreq(dreq))
870                 nfs_direct_write_complete(dreq);
871         return requested_bytes;
872 }
873
874 /**
875  * nfs_file_direct_write - file direct write operation for NFS files
876  * @iocb: target I/O control block
877  * @iter: vector of user buffers from which to write data
878  *
879  * We use this function for direct writes instead of calling
880  * generic_file_aio_write() in order to avoid taking the inode
881  * semaphore and updating the i_size.  The NFS server will set
882  * the new i_size and this client must read the updated size
883  * back into its cache.  We let the server do generic write
884  * parameter checking and report problems.
885  *
886  * We eliminate local atime updates, see direct read above.
887  *
888  * We avoid unnecessary page cache invalidations for normal cached
889  * readers of this file.
890  *
891  * Note that O_APPEND is not supported for NFS direct writes, as there
892  * is no atomic O_APPEND write facility in the NFS protocol.
893  */
894 ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
895 {
896         ssize_t result, requested;
897         size_t count;
898         struct file *file = iocb->ki_filp;
899         struct address_space *mapping = file->f_mapping;
900         struct inode *inode = mapping->host;
901         struct nfs_direct_req *dreq;
902         struct nfs_lock_context *l_ctx;
903         loff_t pos, end;
904
905         dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
906                 file, iov_iter_count(iter), (long long) iocb->ki_pos);
907
908         result = generic_write_checks(iocb, iter);
909         if (result <= 0)
910                 return result;
911         count = result;
912         nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
913
914         pos = iocb->ki_pos;
915         end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
916
917         task_io_account_write(count);
918
919         result = -ENOMEM;
920         dreq = nfs_direct_req_alloc();
921         if (!dreq)
922                 goto out;
923
924         dreq->inode = inode;
925         dreq->bytes_left = dreq->max_count = count;
926         dreq->io_start = pos;
927         dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
928         l_ctx = nfs_get_lock_context(dreq->ctx);
929         if (IS_ERR(l_ctx)) {
930                 result = PTR_ERR(l_ctx);
931                 nfs_direct_req_release(dreq);
932                 goto out_release;
933         }
934         dreq->l_ctx = l_ctx;
935         if (!is_sync_kiocb(iocb))
936                 dreq->iocb = iocb;
937         pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode);
938
939         nfs_start_io_direct(inode);
940
941         requested = nfs_direct_write_schedule_iovec(dreq, iter, pos);
942
943         if (mapping->nrpages) {
944                 invalidate_inode_pages2_range(mapping,
945                                               pos >> PAGE_SHIFT, end);
946         }
947
948         nfs_end_io_direct(inode);
949
950         if (requested > 0) {
951                 result = nfs_direct_wait(dreq);
952                 if (result > 0) {
953                         requested -= result;
954                         iocb->ki_pos = pos + result;
955                         /* XXX: should check the generic_write_sync retval */
956                         generic_write_sync(iocb, result);
957                 }
958                 iov_iter_revert(iter, requested);
959         } else {
960                 result = requested;
961         }
962 out_release:
963         nfs_direct_req_release(dreq);
964 out:
965         return result;
966 }
967
968 /**
969  * nfs_init_directcache - create a slab cache for nfs_direct_req structures
970  *
971  */
972 int __init nfs_init_directcache(void)
973 {
974         nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
975                                                 sizeof(struct nfs_direct_req),
976                                                 0, (SLAB_RECLAIM_ACCOUNT|
977                                                         SLAB_MEM_SPREAD),
978                                                 NULL);
979         if (nfs_direct_cachep == NULL)
980                 return -ENOMEM;
981
982         return 0;
983 }
984
985 /**
986  * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
987  *
988  */
989 void nfs_destroy_directcache(void)
990 {
991         kmem_cache_destroy(nfs_direct_cachep);
992 }