Merge branch 'restriper' of git://github.com/idryomov/btrfs-unstable into integration
[platform/adaptation/renesas_rcar/renesas_kernel.git] / fs / nfs / pnfs.c
1 /*
2  *  pNFS functions to call and manage layout drivers.
3  *
4  *  Copyright (c) 2002 [year of first publication]
5  *  The Regents of the University of Michigan
6  *  All Rights Reserved
7  *
8  *  Dean Hildebrand <dhildebz@umich.edu>
9  *
10  *  Permission is granted to use, copy, create derivative works, and
11  *  redistribute this software and such derivative works for any purpose,
12  *  so long as the name of the University of Michigan is not used in
13  *  any advertising or publicity pertaining to the use or distribution
14  *  of this software without specific, written prior authorization. If
15  *  the above copyright notice or any other identification of the
16  *  University of Michigan is included in any copy of any portion of
17  *  this software, then the disclaimer below must also be included.
18  *
19  *  This software is provided as is, without representation or warranty
20  *  of any kind either express or implied, including without limitation
21  *  the implied warranties of merchantability, fitness for a particular
22  *  purpose, or noninfringement.  The Regents of the University of
23  *  Michigan shall not be liable for any damages, including special,
24  *  indirect, incidental, or consequential damages, with respect to any
25  *  claim arising out of or in connection with the use of the software,
26  *  even if it has been or is hereafter advised of the possibility of
27  *  such damages.
28  */
29
30 #include <linux/nfs_fs.h>
31 #include <linux/nfs_page.h>
32 #include <linux/module.h>
33 #include "internal.h"
34 #include "pnfs.h"
35 #include "iostat.h"
36
37 #define NFSDBG_FACILITY         NFSDBG_PNFS
38
39 /* Locking:
40  *
41  * pnfs_spinlock:
42  *      protects pnfs_modules_tbl.
43  */
44 static DEFINE_SPINLOCK(pnfs_spinlock);
45
46 /*
47  * pnfs_modules_tbl holds all pnfs modules
48  */
49 static LIST_HEAD(pnfs_modules_tbl);
50
51 /* Return the registered pnfs layout driver module matching given id */
52 static struct pnfs_layoutdriver_type *
53 find_pnfs_driver_locked(u32 id)
54 {
55         struct pnfs_layoutdriver_type *local;
56
57         list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
58                 if (local->id == id)
59                         goto out;
60         local = NULL;
61 out:
62         dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
63         return local;
64 }
65
66 static struct pnfs_layoutdriver_type *
67 find_pnfs_driver(u32 id)
68 {
69         struct pnfs_layoutdriver_type *local;
70
71         spin_lock(&pnfs_spinlock);
72         local = find_pnfs_driver_locked(id);
73         spin_unlock(&pnfs_spinlock);
74         return local;
75 }
76
77 void
78 unset_pnfs_layoutdriver(struct nfs_server *nfss)
79 {
80         if (nfss->pnfs_curr_ld) {
81                 if (nfss->pnfs_curr_ld->clear_layoutdriver)
82                         nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
83                 module_put(nfss->pnfs_curr_ld->owner);
84         }
85         nfss->pnfs_curr_ld = NULL;
86 }
87
88 /*
89  * Try to set the server's pnfs module to the pnfs layout type specified by id.
90  * Currently only one pNFS layout driver per filesystem is supported.
91  *
92  * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
93  */
94 void
95 set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
96                       u32 id)
97 {
98         struct pnfs_layoutdriver_type *ld_type = NULL;
99
100         if (id == 0)
101                 goto out_no_driver;
102         if (!(server->nfs_client->cl_exchange_flags &
103                  (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
104                 printk(KERN_ERR "%s: id %u cl_exchange_flags 0x%x\n", __func__,
105                        id, server->nfs_client->cl_exchange_flags);
106                 goto out_no_driver;
107         }
108         ld_type = find_pnfs_driver(id);
109         if (!ld_type) {
110                 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
111                 ld_type = find_pnfs_driver(id);
112                 if (!ld_type) {
113                         dprintk("%s: No pNFS module found for %u.\n",
114                                 __func__, id);
115                         goto out_no_driver;
116                 }
117         }
118         if (!try_module_get(ld_type->owner)) {
119                 dprintk("%s: Could not grab reference on module\n", __func__);
120                 goto out_no_driver;
121         }
122         server->pnfs_curr_ld = ld_type;
123         if (ld_type->set_layoutdriver
124             && ld_type->set_layoutdriver(server, mntfh)) {
125                 printk(KERN_ERR "%s: Error initializing pNFS layout driver %u.\n",
126                                 __func__, id);
127                 module_put(ld_type->owner);
128                 goto out_no_driver;
129         }
130
131         dprintk("%s: pNFS module for %u set\n", __func__, id);
132         return;
133
134 out_no_driver:
135         dprintk("%s: Using NFSv4 I/O\n", __func__);
136         server->pnfs_curr_ld = NULL;
137 }
138
139 int
140 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
141 {
142         int status = -EINVAL;
143         struct pnfs_layoutdriver_type *tmp;
144
145         if (ld_type->id == 0) {
146                 printk(KERN_ERR "%s id 0 is reserved\n", __func__);
147                 return status;
148         }
149         if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
150                 printk(KERN_ERR "%s Layout driver must provide "
151                        "alloc_lseg and free_lseg.\n", __func__);
152                 return status;
153         }
154
155         spin_lock(&pnfs_spinlock);
156         tmp = find_pnfs_driver_locked(ld_type->id);
157         if (!tmp) {
158                 list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
159                 status = 0;
160                 dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
161                         ld_type->name);
162         } else {
163                 printk(KERN_ERR "%s Module with id %d already loaded!\n",
164                         __func__, ld_type->id);
165         }
166         spin_unlock(&pnfs_spinlock);
167
168         return status;
169 }
170 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
171
172 void
173 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
174 {
175         dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
176         spin_lock(&pnfs_spinlock);
177         list_del(&ld_type->pnfs_tblid);
178         spin_unlock(&pnfs_spinlock);
179 }
180 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
181
182 /*
183  * pNFS client layout cache
184  */
185
186 /* Need to hold i_lock if caller does not already hold reference */
187 void
188 get_layout_hdr(struct pnfs_layout_hdr *lo)
189 {
190         atomic_inc(&lo->plh_refcount);
191 }
192
193 static struct pnfs_layout_hdr *
194 pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
195 {
196         struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
197         return ld->alloc_layout_hdr ? ld->alloc_layout_hdr(ino, gfp_flags) :
198                 kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags);
199 }
200
201 static void
202 pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
203 {
204         struct pnfs_layoutdriver_type *ld = NFS_SERVER(lo->plh_inode)->pnfs_curr_ld;
205         put_rpccred(lo->plh_lc_cred);
206         return ld->alloc_layout_hdr ? ld->free_layout_hdr(lo) : kfree(lo);
207 }
208
209 static void
210 destroy_layout_hdr(struct pnfs_layout_hdr *lo)
211 {
212         dprintk("%s: freeing layout cache %p\n", __func__, lo);
213         BUG_ON(!list_empty(&lo->plh_layouts));
214         NFS_I(lo->plh_inode)->layout = NULL;
215         pnfs_free_layout_hdr(lo);
216 }
217
218 static void
219 put_layout_hdr_locked(struct pnfs_layout_hdr *lo)
220 {
221         if (atomic_dec_and_test(&lo->plh_refcount))
222                 destroy_layout_hdr(lo);
223 }
224
225 void
226 put_layout_hdr(struct pnfs_layout_hdr *lo)
227 {
228         struct inode *inode = lo->plh_inode;
229
230         if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
231                 destroy_layout_hdr(lo);
232                 spin_unlock(&inode->i_lock);
233         }
234 }
235
236 static void
237 init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
238 {
239         INIT_LIST_HEAD(&lseg->pls_list);
240         INIT_LIST_HEAD(&lseg->pls_lc_list);
241         atomic_set(&lseg->pls_refcount, 1);
242         smp_mb();
243         set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
244         lseg->pls_layout = lo;
245 }
246
247 static void free_lseg(struct pnfs_layout_segment *lseg)
248 {
249         struct inode *ino = lseg->pls_layout->plh_inode;
250
251         NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
252         /* Matched by get_layout_hdr in pnfs_insert_layout */
253         put_layout_hdr(NFS_I(ino)->layout);
254 }
255
256 static void
257 put_lseg_common(struct pnfs_layout_segment *lseg)
258 {
259         struct inode *inode = lseg->pls_layout->plh_inode;
260
261         WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
262         list_del_init(&lseg->pls_list);
263         if (list_empty(&lseg->pls_layout->plh_segs)) {
264                 set_bit(NFS_LAYOUT_DESTROYED, &lseg->pls_layout->plh_flags);
265                 /* Matched by initial refcount set in alloc_init_layout_hdr */
266                 put_layout_hdr_locked(lseg->pls_layout);
267         }
268         rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
269 }
270
271 void
272 put_lseg(struct pnfs_layout_segment *lseg)
273 {
274         struct inode *inode;
275
276         if (!lseg)
277                 return;
278
279         dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
280                 atomic_read(&lseg->pls_refcount),
281                 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
282         inode = lseg->pls_layout->plh_inode;
283         if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
284                 LIST_HEAD(free_me);
285
286                 put_lseg_common(lseg);
287                 list_add(&lseg->pls_list, &free_me);
288                 spin_unlock(&inode->i_lock);
289                 pnfs_free_lseg_list(&free_me);
290         }
291 }
292 EXPORT_SYMBOL_GPL(put_lseg);
293
294 static inline u64
295 end_offset(u64 start, u64 len)
296 {
297         u64 end;
298
299         end = start + len;
300         return end >= start ? end : NFS4_MAX_UINT64;
301 }
302
303 /* last octet in a range */
304 static inline u64
305 last_byte_offset(u64 start, u64 len)
306 {
307         u64 end;
308
309         BUG_ON(!len);
310         end = start + len;
311         return end > start ? end - 1 : NFS4_MAX_UINT64;
312 }
313
314 /*
315  * is l2 fully contained in l1?
316  *   start1                             end1
317  *   [----------------------------------)
318  *           start2           end2
319  *           [----------------)
320  */
321 static inline int
322 lo_seg_contained(struct pnfs_layout_range *l1,
323                  struct pnfs_layout_range *l2)
324 {
325         u64 start1 = l1->offset;
326         u64 end1 = end_offset(start1, l1->length);
327         u64 start2 = l2->offset;
328         u64 end2 = end_offset(start2, l2->length);
329
330         return (start1 <= start2) && (end1 >= end2);
331 }
332
333 /*
334  * is l1 and l2 intersecting?
335  *   start1                             end1
336  *   [----------------------------------)
337  *                              start2           end2
338  *                              [----------------)
339  */
340 static inline int
341 lo_seg_intersecting(struct pnfs_layout_range *l1,
342                     struct pnfs_layout_range *l2)
343 {
344         u64 start1 = l1->offset;
345         u64 end1 = end_offset(start1, l1->length);
346         u64 start2 = l2->offset;
347         u64 end2 = end_offset(start2, l2->length);
348
349         return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
350                (end2 == NFS4_MAX_UINT64 || end2 > start1);
351 }
352
353 static bool
354 should_free_lseg(struct pnfs_layout_range *lseg_range,
355                  struct pnfs_layout_range *recall_range)
356 {
357         return (recall_range->iomode == IOMODE_ANY ||
358                 lseg_range->iomode == recall_range->iomode) &&
359                lo_seg_intersecting(lseg_range, recall_range);
360 }
361
362 /* Returns 1 if lseg is removed from list, 0 otherwise */
363 static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
364                              struct list_head *tmp_list)
365 {
366         int rv = 0;
367
368         if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
369                 /* Remove the reference keeping the lseg in the
370                  * list.  It will now be removed when all
371                  * outstanding io is finished.
372                  */
373                 dprintk("%s: lseg %p ref %d\n", __func__, lseg,
374                         atomic_read(&lseg->pls_refcount));
375                 if (atomic_dec_and_test(&lseg->pls_refcount)) {
376                         put_lseg_common(lseg);
377                         list_add(&lseg->pls_list, tmp_list);
378                         rv = 1;
379                 }
380         }
381         return rv;
382 }
383
384 /* Returns count of number of matching invalid lsegs remaining in list
385  * after call.
386  */
387 int
388 mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
389                             struct list_head *tmp_list,
390                             struct pnfs_layout_range *recall_range)
391 {
392         struct pnfs_layout_segment *lseg, *next;
393         int invalid = 0, removed = 0;
394
395         dprintk("%s:Begin lo %p\n", __func__, lo);
396
397         if (list_empty(&lo->plh_segs)) {
398                 if (!test_and_set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags))
399                         put_layout_hdr_locked(lo);
400                 return 0;
401         }
402         list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
403                 if (!recall_range ||
404                     should_free_lseg(&lseg->pls_range, recall_range)) {
405                         dprintk("%s: freeing lseg %p iomode %d "
406                                 "offset %llu length %llu\n", __func__,
407                                 lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
408                                 lseg->pls_range.length);
409                         invalid++;
410                         removed += mark_lseg_invalid(lseg, tmp_list);
411                 }
412         dprintk("%s:Return %i\n", __func__, invalid - removed);
413         return invalid - removed;
414 }
415
416 /* note free_me must contain lsegs from a single layout_hdr */
417 void
418 pnfs_free_lseg_list(struct list_head *free_me)
419 {
420         struct pnfs_layout_segment *lseg, *tmp;
421         struct pnfs_layout_hdr *lo;
422
423         if (list_empty(free_me))
424                 return;
425
426         lo = list_first_entry(free_me, struct pnfs_layout_segment,
427                               pls_list)->pls_layout;
428
429         if (test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags)) {
430                 struct nfs_client *clp;
431
432                 clp = NFS_SERVER(lo->plh_inode)->nfs_client;
433                 spin_lock(&clp->cl_lock);
434                 list_del_init(&lo->plh_layouts);
435                 spin_unlock(&clp->cl_lock);
436         }
437         list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
438                 list_del(&lseg->pls_list);
439                 free_lseg(lseg);
440         }
441 }
442
443 void
444 pnfs_destroy_layout(struct nfs_inode *nfsi)
445 {
446         struct pnfs_layout_hdr *lo;
447         LIST_HEAD(tmp_list);
448
449         spin_lock(&nfsi->vfs_inode.i_lock);
450         lo = nfsi->layout;
451         if (lo) {
452                 lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
453                 mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
454         }
455         spin_unlock(&nfsi->vfs_inode.i_lock);
456         pnfs_free_lseg_list(&tmp_list);
457 }
458
459 /*
460  * Called by the state manger to remove all layouts established under an
461  * expired lease.
462  */
463 void
464 pnfs_destroy_all_layouts(struct nfs_client *clp)
465 {
466         struct nfs_server *server;
467         struct pnfs_layout_hdr *lo;
468         LIST_HEAD(tmp_list);
469
470         nfs4_deviceid_mark_client_invalid(clp);
471         nfs4_deviceid_purge_client(clp);
472
473         spin_lock(&clp->cl_lock);
474         rcu_read_lock();
475         list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
476                 if (!list_empty(&server->layouts))
477                         list_splice_init(&server->layouts, &tmp_list);
478         }
479         rcu_read_unlock();
480         spin_unlock(&clp->cl_lock);
481
482         while (!list_empty(&tmp_list)) {
483                 lo = list_entry(tmp_list.next, struct pnfs_layout_hdr,
484                                 plh_layouts);
485                 dprintk("%s freeing layout for inode %lu\n", __func__,
486                         lo->plh_inode->i_ino);
487                 list_del_init(&lo->plh_layouts);
488                 pnfs_destroy_layout(NFS_I(lo->plh_inode));
489         }
490 }
491
492 /* update lo->plh_stateid with new if is more recent */
493 void
494 pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
495                         bool update_barrier)
496 {
497         u32 oldseq, newseq;
498
499         oldseq = be32_to_cpu(lo->plh_stateid.stateid.seqid);
500         newseq = be32_to_cpu(new->stateid.seqid);
501         if ((int)(newseq - oldseq) > 0) {
502                 memcpy(&lo->plh_stateid, &new->stateid, sizeof(new->stateid));
503                 if (update_barrier) {
504                         u32 new_barrier = be32_to_cpu(new->stateid.seqid);
505
506                         if ((int)(new_barrier - lo->plh_barrier))
507                                 lo->plh_barrier = new_barrier;
508                 } else {
509                         /* Because of wraparound, we want to keep the barrier
510                          * "close" to the current seqids.  It needs to be
511                          * within 2**31 to count as "behind", so if it
512                          * gets too near that limit, give us a litle leeway
513                          * and bring it to within 2**30.
514                          * NOTE - and yes, this is all unsigned arithmetic.
515                          */
516                         if (unlikely((newseq - lo->plh_barrier) > (3 << 29)))
517                                 lo->plh_barrier = newseq - (1 << 30);
518                 }
519         }
520 }
521
522 /* lget is set to 1 if called from inside send_layoutget call chain */
523 static bool
524 pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid,
525                         int lget)
526 {
527         if ((stateid) &&
528             (int)(lo->plh_barrier - be32_to_cpu(stateid->stateid.seqid)) >= 0)
529                 return true;
530         return lo->plh_block_lgets ||
531                 test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags) ||
532                 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
533                 (list_empty(&lo->plh_segs) &&
534                  (atomic_read(&lo->plh_outstanding) > lget));
535 }
536
537 int
538 pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
539                               struct nfs4_state *open_state)
540 {
541         int status = 0;
542
543         dprintk("--> %s\n", __func__);
544         spin_lock(&lo->plh_inode->i_lock);
545         if (pnfs_layoutgets_blocked(lo, NULL, 1)) {
546                 status = -EAGAIN;
547         } else if (list_empty(&lo->plh_segs)) {
548                 int seq;
549
550                 do {
551                         seq = read_seqbegin(&open_state->seqlock);
552                         memcpy(dst->data, open_state->stateid.data,
553                                sizeof(open_state->stateid.data));
554                 } while (read_seqretry(&open_state->seqlock, seq));
555         } else
556                 memcpy(dst->data, lo->plh_stateid.data, sizeof(lo->plh_stateid.data));
557         spin_unlock(&lo->plh_inode->i_lock);
558         dprintk("<-- %s\n", __func__);
559         return status;
560 }
561
562 /*
563 * Get layout from server.
564 *    for now, assume that whole file layouts are requested.
565 *    arg->offset: 0
566 *    arg->length: all ones
567 */
568 static struct pnfs_layout_segment *
569 send_layoutget(struct pnfs_layout_hdr *lo,
570            struct nfs_open_context *ctx,
571            struct pnfs_layout_range *range,
572            gfp_t gfp_flags)
573 {
574         struct inode *ino = lo->plh_inode;
575         struct nfs_server *server = NFS_SERVER(ino);
576         struct nfs4_layoutget *lgp;
577         struct pnfs_layout_segment *lseg = NULL;
578         struct page **pages = NULL;
579         int i;
580         u32 max_resp_sz, max_pages;
581
582         dprintk("--> %s\n", __func__);
583
584         BUG_ON(ctx == NULL);
585         lgp = kzalloc(sizeof(*lgp), gfp_flags);
586         if (lgp == NULL)
587                 return NULL;
588
589         /* allocate pages for xdr post processing */
590         max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
591         max_pages = max_resp_sz >> PAGE_SHIFT;
592
593         pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags);
594         if (!pages)
595                 goto out_err_free;
596
597         for (i = 0; i < max_pages; i++) {
598                 pages[i] = alloc_page(gfp_flags);
599                 if (!pages[i])
600                         goto out_err_free;
601         }
602
603         lgp->args.minlength = PAGE_CACHE_SIZE;
604         if (lgp->args.minlength > range->length)
605                 lgp->args.minlength = range->length;
606         lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
607         lgp->args.range = *range;
608         lgp->args.type = server->pnfs_curr_ld->id;
609         lgp->args.inode = ino;
610         lgp->args.ctx = get_nfs_open_context(ctx);
611         lgp->args.layout.pages = pages;
612         lgp->args.layout.pglen = max_pages * PAGE_SIZE;
613         lgp->lsegpp = &lseg;
614         lgp->gfp_flags = gfp_flags;
615
616         /* Synchronously retrieve layout information from server and
617          * store in lseg.
618          */
619         nfs4_proc_layoutget(lgp);
620         if (!lseg) {
621                 /* remember that LAYOUTGET failed and suspend trying */
622                 set_bit(lo_fail_bit(range->iomode), &lo->plh_flags);
623         }
624
625         /* free xdr pages */
626         for (i = 0; i < max_pages; i++)
627                 __free_page(pages[i]);
628         kfree(pages);
629
630         return lseg;
631
632 out_err_free:
633         /* free any allocated xdr pages, lgp as it's not used */
634         if (pages) {
635                 for (i = 0; i < max_pages; i++) {
636                         if (!pages[i])
637                                 break;
638                         __free_page(pages[i]);
639                 }
640                 kfree(pages);
641         }
642         kfree(lgp);
643         return NULL;
644 }
645
646 /* Initiates a LAYOUTRETURN(FILE) */
647 int
648 _pnfs_return_layout(struct inode *ino)
649 {
650         struct pnfs_layout_hdr *lo = NULL;
651         struct nfs_inode *nfsi = NFS_I(ino);
652         LIST_HEAD(tmp_list);
653         struct nfs4_layoutreturn *lrp;
654         nfs4_stateid stateid;
655         int status = 0;
656
657         dprintk("--> %s\n", __func__);
658
659         spin_lock(&ino->i_lock);
660         lo = nfsi->layout;
661         if (!lo) {
662                 spin_unlock(&ino->i_lock);
663                 dprintk("%s: no layout to return\n", __func__);
664                 return status;
665         }
666         stateid = nfsi->layout->plh_stateid;
667         /* Reference matched in nfs4_layoutreturn_release */
668         get_layout_hdr(lo);
669         mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
670         lo->plh_block_lgets++;
671         spin_unlock(&ino->i_lock);
672         pnfs_free_lseg_list(&tmp_list);
673
674         WARN_ON(test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags));
675
676         lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
677         if (unlikely(lrp == NULL)) {
678                 status = -ENOMEM;
679                 set_bit(NFS_LAYOUT_RW_FAILED, &lo->plh_flags);
680                 set_bit(NFS_LAYOUT_RO_FAILED, &lo->plh_flags);
681                 put_layout_hdr(lo);
682                 goto out;
683         }
684
685         lrp->args.stateid = stateid;
686         lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
687         lrp->args.inode = ino;
688         lrp->args.layout = lo;
689         lrp->clp = NFS_SERVER(ino)->nfs_client;
690
691         status = nfs4_proc_layoutreturn(lrp);
692 out:
693         dprintk("<-- %s status: %d\n", __func__, status);
694         return status;
695 }
696
697 bool pnfs_roc(struct inode *ino)
698 {
699         struct pnfs_layout_hdr *lo;
700         struct pnfs_layout_segment *lseg, *tmp;
701         LIST_HEAD(tmp_list);
702         bool found = false;
703
704         spin_lock(&ino->i_lock);
705         lo = NFS_I(ino)->layout;
706         if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
707             test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
708                 goto out_nolayout;
709         list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
710                 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
711                         mark_lseg_invalid(lseg, &tmp_list);
712                         found = true;
713                 }
714         if (!found)
715                 goto out_nolayout;
716         lo->plh_block_lgets++;
717         get_layout_hdr(lo); /* matched in pnfs_roc_release */
718         spin_unlock(&ino->i_lock);
719         pnfs_free_lseg_list(&tmp_list);
720         return true;
721
722 out_nolayout:
723         spin_unlock(&ino->i_lock);
724         return false;
725 }
726
727 void pnfs_roc_release(struct inode *ino)
728 {
729         struct pnfs_layout_hdr *lo;
730
731         spin_lock(&ino->i_lock);
732         lo = NFS_I(ino)->layout;
733         lo->plh_block_lgets--;
734         put_layout_hdr_locked(lo);
735         spin_unlock(&ino->i_lock);
736 }
737
738 void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
739 {
740         struct pnfs_layout_hdr *lo;
741
742         spin_lock(&ino->i_lock);
743         lo = NFS_I(ino)->layout;
744         if ((int)(barrier - lo->plh_barrier) > 0)
745                 lo->plh_barrier = barrier;
746         spin_unlock(&ino->i_lock);
747 }
748
749 bool pnfs_roc_drain(struct inode *ino, u32 *barrier)
750 {
751         struct nfs_inode *nfsi = NFS_I(ino);
752         struct pnfs_layout_segment *lseg;
753         bool found = false;
754
755         spin_lock(&ino->i_lock);
756         list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
757                 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
758                         found = true;
759                         break;
760                 }
761         if (!found) {
762                 struct pnfs_layout_hdr *lo = nfsi->layout;
763                 u32 current_seqid = be32_to_cpu(lo->plh_stateid.stateid.seqid);
764
765                 /* Since close does not return a layout stateid for use as
766                  * a barrier, we choose the worst-case barrier.
767                  */
768                 *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
769         }
770         spin_unlock(&ino->i_lock);
771         return found;
772 }
773
774 /*
775  * Compare two layout segments for sorting into layout cache.
776  * We want to preferentially return RW over RO layouts, so ensure those
777  * are seen first.
778  */
779 static s64
780 cmp_layout(struct pnfs_layout_range *l1,
781            struct pnfs_layout_range *l2)
782 {
783         s64 d;
784
785         /* high offset > low offset */
786         d = l1->offset - l2->offset;
787         if (d)
788                 return d;
789
790         /* short length > long length */
791         d = l2->length - l1->length;
792         if (d)
793                 return d;
794
795         /* read > read/write */
796         return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
797 }
798
799 static void
800 pnfs_insert_layout(struct pnfs_layout_hdr *lo,
801                    struct pnfs_layout_segment *lseg)
802 {
803         struct pnfs_layout_segment *lp;
804
805         dprintk("%s:Begin\n", __func__);
806
807         assert_spin_locked(&lo->plh_inode->i_lock);
808         list_for_each_entry(lp, &lo->plh_segs, pls_list) {
809                 if (cmp_layout(&lseg->pls_range, &lp->pls_range) > 0)
810                         continue;
811                 list_add_tail(&lseg->pls_list, &lp->pls_list);
812                 dprintk("%s: inserted lseg %p "
813                         "iomode %d offset %llu length %llu before "
814                         "lp %p iomode %d offset %llu length %llu\n",
815                         __func__, lseg, lseg->pls_range.iomode,
816                         lseg->pls_range.offset, lseg->pls_range.length,
817                         lp, lp->pls_range.iomode, lp->pls_range.offset,
818                         lp->pls_range.length);
819                 goto out;
820         }
821         list_add_tail(&lseg->pls_list, &lo->plh_segs);
822         dprintk("%s: inserted lseg %p "
823                 "iomode %d offset %llu length %llu at tail\n",
824                 __func__, lseg, lseg->pls_range.iomode,
825                 lseg->pls_range.offset, lseg->pls_range.length);
826 out:
827         get_layout_hdr(lo);
828
829         dprintk("%s:Return\n", __func__);
830 }
831
832 static struct pnfs_layout_hdr *
833 alloc_init_layout_hdr(struct inode *ino,
834                       struct nfs_open_context *ctx,
835                       gfp_t gfp_flags)
836 {
837         struct pnfs_layout_hdr *lo;
838
839         lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
840         if (!lo)
841                 return NULL;
842         atomic_set(&lo->plh_refcount, 1);
843         INIT_LIST_HEAD(&lo->plh_layouts);
844         INIT_LIST_HEAD(&lo->plh_segs);
845         INIT_LIST_HEAD(&lo->plh_bulk_recall);
846         lo->plh_inode = ino;
847         lo->plh_lc_cred = get_rpccred(ctx->state->owner->so_cred);
848         return lo;
849 }
850
851 static struct pnfs_layout_hdr *
852 pnfs_find_alloc_layout(struct inode *ino,
853                        struct nfs_open_context *ctx,
854                        gfp_t gfp_flags)
855 {
856         struct nfs_inode *nfsi = NFS_I(ino);
857         struct pnfs_layout_hdr *new = NULL;
858
859         dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
860
861         assert_spin_locked(&ino->i_lock);
862         if (nfsi->layout) {
863                 if (test_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags))
864                         return NULL;
865                 else
866                         return nfsi->layout;
867         }
868         spin_unlock(&ino->i_lock);
869         new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
870         spin_lock(&ino->i_lock);
871
872         if (likely(nfsi->layout == NULL))       /* Won the race? */
873                 nfsi->layout = new;
874         else
875                 pnfs_free_layout_hdr(new);
876         return nfsi->layout;
877 }
878
879 /*
880  * iomode matching rules:
881  * iomode       lseg    match
882  * -----        -----   -----
883  * ANY          READ    true
884  * ANY          RW      true
885  * RW           READ    false
886  * RW           RW      true
887  * READ         READ    true
888  * READ         RW      true
889  */
890 static int
891 is_matching_lseg(struct pnfs_layout_range *ls_range,
892                  struct pnfs_layout_range *range)
893 {
894         struct pnfs_layout_range range1;
895
896         if ((range->iomode == IOMODE_RW &&
897              ls_range->iomode != IOMODE_RW) ||
898             !lo_seg_intersecting(ls_range, range))
899                 return 0;
900
901         /* range1 covers only the first byte in the range */
902         range1 = *range;
903         range1.length = 1;
904         return lo_seg_contained(ls_range, &range1);
905 }
906
907 /*
908  * lookup range in layout
909  */
910 static struct pnfs_layout_segment *
911 pnfs_find_lseg(struct pnfs_layout_hdr *lo,
912                 struct pnfs_layout_range *range)
913 {
914         struct pnfs_layout_segment *lseg, *ret = NULL;
915
916         dprintk("%s:Begin\n", __func__);
917
918         assert_spin_locked(&lo->plh_inode->i_lock);
919         list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
920                 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
921                     is_matching_lseg(&lseg->pls_range, range)) {
922                         ret = get_lseg(lseg);
923                         break;
924                 }
925                 if (lseg->pls_range.offset > range->offset)
926                         break;
927         }
928
929         dprintk("%s:Return lseg %p ref %d\n",
930                 __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
931         return ret;
932 }
933
934 /*
935  * Layout segment is retreived from the server if not cached.
936  * The appropriate layout segment is referenced and returned to the caller.
937  */
938 struct pnfs_layout_segment *
939 pnfs_update_layout(struct inode *ino,
940                    struct nfs_open_context *ctx,
941                    loff_t pos,
942                    u64 count,
943                    enum pnfs_iomode iomode,
944                    gfp_t gfp_flags)
945 {
946         struct pnfs_layout_range arg = {
947                 .iomode = iomode,
948                 .offset = pos,
949                 .length = count,
950         };
951         unsigned pg_offset;
952         struct nfs_inode *nfsi = NFS_I(ino);
953         struct nfs_server *server = NFS_SERVER(ino);
954         struct nfs_client *clp = server->nfs_client;
955         struct pnfs_layout_hdr *lo;
956         struct pnfs_layout_segment *lseg = NULL;
957         bool first = false;
958
959         if (!pnfs_enabled_sb(NFS_SERVER(ino)))
960                 return NULL;
961         spin_lock(&ino->i_lock);
962         lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
963         if (lo == NULL) {
964                 dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__);
965                 goto out_unlock;
966         }
967
968         /* Do we even need to bother with this? */
969         if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
970             test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
971                 dprintk("%s matches recall, use MDS\n", __func__);
972                 goto out_unlock;
973         }
974
975         /* if LAYOUTGET already failed once we don't try again */
976         if (test_bit(lo_fail_bit(iomode), &nfsi->layout->plh_flags))
977                 goto out_unlock;
978
979         /* Check to see if the layout for the given range already exists */
980         lseg = pnfs_find_lseg(lo, &arg);
981         if (lseg)
982                 goto out_unlock;
983
984         if (pnfs_layoutgets_blocked(lo, NULL, 0))
985                 goto out_unlock;
986         atomic_inc(&lo->plh_outstanding);
987
988         get_layout_hdr(lo);
989         if (list_empty(&lo->plh_segs))
990                 first = true;
991         spin_unlock(&ino->i_lock);
992         if (first) {
993                 /* The lo must be on the clp list if there is any
994                  * chance of a CB_LAYOUTRECALL(FILE) coming in.
995                  */
996                 spin_lock(&clp->cl_lock);
997                 BUG_ON(!list_empty(&lo->plh_layouts));
998                 list_add_tail(&lo->plh_layouts, &server->layouts);
999                 spin_unlock(&clp->cl_lock);
1000         }
1001
1002         pg_offset = arg.offset & ~PAGE_CACHE_MASK;
1003         if (pg_offset) {
1004                 arg.offset -= pg_offset;
1005                 arg.length += pg_offset;
1006         }
1007         if (arg.length != NFS4_MAX_UINT64)
1008                 arg.length = PAGE_CACHE_ALIGN(arg.length);
1009
1010         lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
1011         if (!lseg && first) {
1012                 spin_lock(&clp->cl_lock);
1013                 list_del_init(&lo->plh_layouts);
1014                 spin_unlock(&clp->cl_lock);
1015         }
1016         atomic_dec(&lo->plh_outstanding);
1017         put_layout_hdr(lo);
1018 out:
1019         dprintk("%s end, state 0x%lx lseg %p\n", __func__,
1020                 nfsi->layout ? nfsi->layout->plh_flags : -1, lseg);
1021         return lseg;
1022 out_unlock:
1023         spin_unlock(&ino->i_lock);
1024         goto out;
1025 }
1026 EXPORT_SYMBOL_GPL(pnfs_update_layout);
1027
1028 int
1029 pnfs_layout_process(struct nfs4_layoutget *lgp)
1030 {
1031         struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
1032         struct nfs4_layoutget_res *res = &lgp->res;
1033         struct pnfs_layout_segment *lseg;
1034         struct inode *ino = lo->plh_inode;
1035         struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
1036         int status = 0;
1037
1038         /* Inject layout blob into I/O device driver */
1039         lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
1040         if (!lseg || IS_ERR(lseg)) {
1041                 if (!lseg)
1042                         status = -ENOMEM;
1043                 else
1044                         status = PTR_ERR(lseg);
1045                 dprintk("%s: Could not allocate layout: error %d\n",
1046                        __func__, status);
1047                 goto out;
1048         }
1049
1050         spin_lock(&ino->i_lock);
1051         if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
1052             test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1053                 dprintk("%s forget reply due to recall\n", __func__);
1054                 goto out_forget_reply;
1055         }
1056
1057         if (pnfs_layoutgets_blocked(lo, &res->stateid, 1)) {
1058                 dprintk("%s forget reply due to state\n", __func__);
1059                 goto out_forget_reply;
1060         }
1061         init_lseg(lo, lseg);
1062         lseg->pls_range = res->range;
1063         *lgp->lsegpp = get_lseg(lseg);
1064         pnfs_insert_layout(lo, lseg);
1065
1066         if (res->return_on_close) {
1067                 set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
1068                 set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
1069         }
1070
1071         /* Done processing layoutget. Set the layout stateid */
1072         pnfs_set_layout_stateid(lo, &res->stateid, false);
1073         spin_unlock(&ino->i_lock);
1074 out:
1075         return status;
1076
1077 out_forget_reply:
1078         spin_unlock(&ino->i_lock);
1079         lseg->pls_layout = lo;
1080         NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
1081         goto out;
1082 }
1083
1084 void
1085 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1086 {
1087         BUG_ON(pgio->pg_lseg != NULL);
1088
1089         pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1090                                            req->wb_context,
1091                                            req_offset(req),
1092                                            req->wb_bytes,
1093                                            IOMODE_READ,
1094                                            GFP_KERNEL);
1095         /* If no lseg, fall back to read through mds */
1096         if (pgio->pg_lseg == NULL)
1097                 nfs_pageio_reset_read_mds(pgio);
1098
1099 }
1100 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
1101
1102 void
1103 pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1104 {
1105         BUG_ON(pgio->pg_lseg != NULL);
1106
1107         pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1108                                            req->wb_context,
1109                                            req_offset(req),
1110                                            req->wb_bytes,
1111                                            IOMODE_RW,
1112                                            GFP_NOFS);
1113         /* If no lseg, fall back to write through mds */
1114         if (pgio->pg_lseg == NULL)
1115                 nfs_pageio_reset_write_mds(pgio);
1116 }
1117 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
1118
1119 bool
1120 pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode)
1121 {
1122         struct nfs_server *server = NFS_SERVER(inode);
1123         struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
1124
1125         if (ld == NULL)
1126                 return false;
1127         nfs_pageio_init(pgio, inode, ld->pg_read_ops, server->rsize, 0);
1128         return true;
1129 }
1130
1131 bool
1132 pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags)
1133 {
1134         struct nfs_server *server = NFS_SERVER(inode);
1135         struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
1136
1137         if (ld == NULL)
1138                 return false;
1139         nfs_pageio_init(pgio, inode, ld->pg_write_ops, server->wsize, ioflags);
1140         return true;
1141 }
1142
1143 bool
1144 pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1145                      struct nfs_page *req)
1146 {
1147         if (pgio->pg_lseg == NULL)
1148                 return nfs_generic_pg_test(pgio, prev, req);
1149
1150         /*
1151          * Test if a nfs_page is fully contained in the pnfs_layout_range.
1152          * Note that this test makes several assumptions:
1153          * - that the previous nfs_page in the struct nfs_pageio_descriptor
1154          *   is known to lie within the range.
1155          *   - that the nfs_page being tested is known to be contiguous with the
1156          *   previous nfs_page.
1157          *   - Layout ranges are page aligned, so we only have to test the
1158          *   start offset of the request.
1159          *
1160          * Please also note that 'end_offset' is actually the offset of the
1161          * first byte that lies outside the pnfs_layout_range. FIXME?
1162          *
1163          */
1164         return req_offset(req) < end_offset(pgio->pg_lseg->pls_range.offset,
1165                                          pgio->pg_lseg->pls_range.length);
1166 }
1167 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
1168
1169 /*
1170  * Called by non rpc-based layout drivers
1171  */
1172 void pnfs_ld_write_done(struct nfs_write_data *data)
1173 {
1174         if (likely(!data->pnfs_error)) {
1175                 pnfs_set_layoutcommit(data);
1176                 data->mds_ops->rpc_call_done(&data->task, data);
1177         } else {
1178                 put_lseg(data->lseg);
1179                 data->lseg = NULL;
1180                 dprintk("pnfs write error = %d\n", data->pnfs_error);
1181         }
1182         data->mds_ops->rpc_release(data);
1183 }
1184 EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
1185
1186 static void
1187 pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
1188                 struct nfs_write_data *data)
1189 {
1190         list_splice_tail_init(&data->pages, &desc->pg_list);
1191         if (data->req && list_empty(&data->req->wb_list))
1192                 nfs_list_add_request(data->req, &desc->pg_list);
1193         nfs_pageio_reset_write_mds(desc);
1194         desc->pg_recoalesce = 1;
1195         nfs_writedata_release(data);
1196 }
1197
1198 static enum pnfs_try_status
1199 pnfs_try_to_write_data(struct nfs_write_data *wdata,
1200                         const struct rpc_call_ops *call_ops,
1201                         struct pnfs_layout_segment *lseg,
1202                         int how)
1203 {
1204         struct inode *inode = wdata->inode;
1205         enum pnfs_try_status trypnfs;
1206         struct nfs_server *nfss = NFS_SERVER(inode);
1207
1208         wdata->mds_ops = call_ops;
1209         wdata->lseg = get_lseg(lseg);
1210
1211         dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
1212                 inode->i_ino, wdata->args.count, wdata->args.offset, how);
1213
1214         trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how);
1215         if (trypnfs == PNFS_NOT_ATTEMPTED) {
1216                 put_lseg(wdata->lseg);
1217                 wdata->lseg = NULL;
1218         } else
1219                 nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
1220
1221         dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
1222         return trypnfs;
1223 }
1224
1225 static void
1226 pnfs_do_multiple_writes(struct nfs_pageio_descriptor *desc, struct list_head *head, int how)
1227 {
1228         struct nfs_write_data *data;
1229         const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
1230         struct pnfs_layout_segment *lseg = desc->pg_lseg;
1231
1232         desc->pg_lseg = NULL;
1233         while (!list_empty(head)) {
1234                 enum pnfs_try_status trypnfs;
1235
1236                 data = list_entry(head->next, struct nfs_write_data, list);
1237                 list_del_init(&data->list);
1238
1239                 trypnfs = pnfs_try_to_write_data(data, call_ops, lseg, how);
1240                 if (trypnfs == PNFS_NOT_ATTEMPTED)
1241                         pnfs_write_through_mds(desc, data);
1242         }
1243         put_lseg(lseg);
1244 }
1245
1246 int
1247 pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1248 {
1249         LIST_HEAD(head);
1250         int ret;
1251
1252         ret = nfs_generic_flush(desc, &head);
1253         if (ret != 0) {
1254                 put_lseg(desc->pg_lseg);
1255                 desc->pg_lseg = NULL;
1256                 return ret;
1257         }
1258         pnfs_do_multiple_writes(desc, &head, desc->pg_ioflags);
1259         return 0;
1260 }
1261 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
1262
1263 static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
1264 {
1265         struct nfs_pageio_descriptor pgio;
1266
1267         put_lseg(data->lseg);
1268         data->lseg = NULL;
1269         dprintk("pnfs write error = %d\n", data->pnfs_error);
1270
1271         nfs_pageio_init_read_mds(&pgio, data->inode);
1272
1273         while (!list_empty(&data->pages)) {
1274                 struct nfs_page *req = nfs_list_entry(data->pages.next);
1275
1276                 nfs_list_remove_request(req);
1277                 nfs_pageio_add_request(&pgio, req);
1278         }
1279         nfs_pageio_complete(&pgio);
1280 }
1281
1282 /*
1283  * Called by non rpc-based layout drivers
1284  */
1285 void pnfs_ld_read_done(struct nfs_read_data *data)
1286 {
1287         if (likely(!data->pnfs_error)) {
1288                 __nfs4_read_done_cb(data);
1289                 data->mds_ops->rpc_call_done(&data->task, data);
1290         } else
1291                 pnfs_ld_handle_read_error(data);
1292         data->mds_ops->rpc_release(data);
1293 }
1294 EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
1295
1296 static void
1297 pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
1298                 struct nfs_read_data *data)
1299 {
1300         list_splice_tail_init(&data->pages, &desc->pg_list);
1301         if (data->req && list_empty(&data->req->wb_list))
1302                 nfs_list_add_request(data->req, &desc->pg_list);
1303         nfs_pageio_reset_read_mds(desc);
1304         desc->pg_recoalesce = 1;
1305         nfs_readdata_release(data);
1306 }
1307
1308 /*
1309  * Call the appropriate parallel I/O subsystem read function.
1310  */
1311 static enum pnfs_try_status
1312 pnfs_try_to_read_data(struct nfs_read_data *rdata,
1313                        const struct rpc_call_ops *call_ops,
1314                        struct pnfs_layout_segment *lseg)
1315 {
1316         struct inode *inode = rdata->inode;
1317         struct nfs_server *nfss = NFS_SERVER(inode);
1318         enum pnfs_try_status trypnfs;
1319
1320         rdata->mds_ops = call_ops;
1321         rdata->lseg = get_lseg(lseg);
1322
1323         dprintk("%s: Reading ino:%lu %u@%llu\n",
1324                 __func__, inode->i_ino, rdata->args.count, rdata->args.offset);
1325
1326         trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
1327         if (trypnfs == PNFS_NOT_ATTEMPTED) {
1328                 put_lseg(rdata->lseg);
1329                 rdata->lseg = NULL;
1330         } else {
1331                 nfs_inc_stats(inode, NFSIOS_PNFS_READ);
1332         }
1333         dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
1334         return trypnfs;
1335 }
1336
1337 static void
1338 pnfs_do_multiple_reads(struct nfs_pageio_descriptor *desc, struct list_head *head)
1339 {
1340         struct nfs_read_data *data;
1341         const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
1342         struct pnfs_layout_segment *lseg = desc->pg_lseg;
1343
1344         desc->pg_lseg = NULL;
1345         while (!list_empty(head)) {
1346                 enum pnfs_try_status trypnfs;
1347
1348                 data = list_entry(head->next, struct nfs_read_data, list);
1349                 list_del_init(&data->list);
1350
1351                 trypnfs = pnfs_try_to_read_data(data, call_ops, lseg);
1352                 if (trypnfs == PNFS_NOT_ATTEMPTED)
1353                         pnfs_read_through_mds(desc, data);
1354         }
1355         put_lseg(lseg);
1356 }
1357
1358 int
1359 pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
1360 {
1361         LIST_HEAD(head);
1362         int ret;
1363
1364         ret = nfs_generic_pagein(desc, &head);
1365         if (ret != 0) {
1366                 put_lseg(desc->pg_lseg);
1367                 desc->pg_lseg = NULL;
1368                 return ret;
1369         }
1370         pnfs_do_multiple_reads(desc, &head);
1371         return 0;
1372 }
1373 EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
1374
1375 /*
1376  * There can be multiple RW segments.
1377  */
1378 static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
1379 {
1380         struct pnfs_layout_segment *lseg;
1381
1382         list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
1383                 if (lseg->pls_range.iomode == IOMODE_RW &&
1384                     test_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
1385                         list_add(&lseg->pls_lc_list, listp);
1386         }
1387 }
1388
1389 void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
1390 {
1391         if (lseg->pls_range.iomode == IOMODE_RW) {
1392                 dprintk("%s Setting layout IOMODE_RW fail bit\n", __func__);
1393                 set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
1394         } else {
1395                 dprintk("%s Setting layout IOMODE_READ fail bit\n", __func__);
1396                 set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
1397         }
1398 }
1399 EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
1400
1401 void
1402 pnfs_set_layoutcommit(struct nfs_write_data *wdata)
1403 {
1404         struct nfs_inode *nfsi = NFS_I(wdata->inode);
1405         loff_t end_pos = wdata->mds_offset + wdata->res.count;
1406         bool mark_as_dirty = false;
1407
1408         spin_lock(&nfsi->vfs_inode.i_lock);
1409         if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
1410                 mark_as_dirty = true;
1411                 dprintk("%s: Set layoutcommit for inode %lu ",
1412                         __func__, wdata->inode->i_ino);
1413         }
1414         if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &wdata->lseg->pls_flags)) {
1415                 /* references matched in nfs4_layoutcommit_release */
1416                 get_lseg(wdata->lseg);
1417         }
1418         if (end_pos > nfsi->layout->plh_lwb)
1419                 nfsi->layout->plh_lwb = end_pos;
1420         spin_unlock(&nfsi->vfs_inode.i_lock);
1421         dprintk("%s: lseg %p end_pos %llu\n",
1422                 __func__, wdata->lseg, nfsi->layout->plh_lwb);
1423
1424         /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
1425          * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
1426         if (mark_as_dirty)
1427                 mark_inode_dirty_sync(wdata->inode);
1428 }
1429 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
1430
1431 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
1432 {
1433         struct nfs_server *nfss = NFS_SERVER(data->args.inode);
1434
1435         if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
1436                 nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
1437 }
1438
1439 /*
1440  * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
1441  * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
1442  * data to disk to allow the server to recover the data if it crashes.
1443  * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
1444  * is off, and a COMMIT is sent to a data server, or
1445  * if WRITEs to a data server return NFS_DATA_SYNC.
1446  */
1447 int
1448 pnfs_layoutcommit_inode(struct inode *inode, bool sync)
1449 {
1450         struct nfs4_layoutcommit_data *data;
1451         struct nfs_inode *nfsi = NFS_I(inode);
1452         loff_t end_pos;
1453         int status = 0;
1454
1455         dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
1456
1457         if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
1458                 return 0;
1459
1460         /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
1461         data = kzalloc(sizeof(*data), GFP_NOFS);
1462         if (!data) {
1463                 status = -ENOMEM;
1464                 goto out;
1465         }
1466
1467         if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
1468                 goto out_free;
1469
1470         if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
1471                 if (!sync) {
1472                         status = -EAGAIN;
1473                         goto out_free;
1474                 }
1475                 status = wait_on_bit_lock(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING,
1476                                         nfs_wait_bit_killable, TASK_KILLABLE);
1477                 if (status)
1478                         goto out_free;
1479         }
1480
1481         INIT_LIST_HEAD(&data->lseg_list);
1482         spin_lock(&inode->i_lock);
1483         if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
1484                 clear_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags);
1485                 spin_unlock(&inode->i_lock);
1486                 wake_up_bit(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING);
1487                 goto out_free;
1488         }
1489
1490         pnfs_list_write_lseg(inode, &data->lseg_list);
1491
1492         end_pos = nfsi->layout->plh_lwb;
1493         nfsi->layout->plh_lwb = 0;
1494
1495         memcpy(&data->args.stateid.data, nfsi->layout->plh_stateid.data,
1496                 sizeof(nfsi->layout->plh_stateid.data));
1497         spin_unlock(&inode->i_lock);
1498
1499         data->args.inode = inode;
1500         data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
1501         nfs_fattr_init(&data->fattr);
1502         data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
1503         data->res.fattr = &data->fattr;
1504         data->args.lastbytewritten = end_pos - 1;
1505         data->res.server = NFS_SERVER(inode);
1506
1507         status = nfs4_proc_layoutcommit(data, sync);
1508 out:
1509         if (status)
1510                 mark_inode_dirty_sync(inode);
1511         dprintk("<-- %s status %d\n", __func__, status);
1512         return status;
1513 out_free:
1514         kfree(data);
1515         goto out;
1516 }