56ff823ca82e0979f08355f80aaf75ae4a019393
[platform/adaptation/renesas_rcar/renesas_kernel.git] / fs / nfs / blocklayout / blocklayout.c
1 /*
2  *  linux/fs/nfs/blocklayout/blocklayout.c
3  *
4  *  Module for the NFSv4.1 pNFS block layout driver.
5  *
6  *  Copyright (c) 2006 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Andy Adamson <andros@citi.umich.edu>
10  *  Fred Isaman <iisaman@umich.edu>
11  *
12  * permission is granted to use, copy, create derivative works and
13  * redistribute this software and such derivative works for any purpose,
14  * so long as the name of the university of michigan is not used in
15  * any advertising or publicity pertaining to the use or distribution
16  * of this software without specific, written prior authorization.  if
17  * the above copyright notice or any other identification of the
18  * university of michigan is included in any copy of any portion of
19  * this software, then the disclaimer below must also be included.
20  *
21  * this software is provided as is, without representation from the
22  * university of michigan as to its fitness for any purpose, and without
23  * warranty by the university of michigan of any kind, either express
24  * or implied, including without limitation the implied warranties of
25  * merchantability and fitness for a particular purpose.  the regents
26  * of the university of michigan shall not be liable for any damages,
27  * including special, indirect, incidental, or consequential damages,
28  * with respect to any claim arising out or in connection with the use
29  * of the software, even if it has been or is hereafter advised of the
30  * possibility of such damages.
31  */
32
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/mount.h>
36 #include <linux/namei.h>
37 #include <linux/bio.h>          /* struct bio */
38 #include <linux/buffer_head.h>  /* various write calls */
39 #include <linux/prefetch.h>
40 #include <linux/pagevec.h>
41
42 #include "../pnfs.h"
43 #include "../nfs4session.h"
44 #include "../internal.h"
45 #include "blocklayout.h"
46
47 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
48
49 MODULE_LICENSE("GPL");
50 MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
51 MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
52
53 static void print_page(struct page *page)
54 {
55         dprintk("PRINTPAGE page %p\n", page);
56         dprintk("       PagePrivate %d\n", PagePrivate(page));
57         dprintk("       PageUptodate %d\n", PageUptodate(page));
58         dprintk("       PageError %d\n", PageError(page));
59         dprintk("       PageDirty %d\n", PageDirty(page));
60         dprintk("       PageReferenced %d\n", PageReferenced(page));
61         dprintk("       PageLocked %d\n", PageLocked(page));
62         dprintk("       PageWriteback %d\n", PageWriteback(page));
63         dprintk("       PageMappedToDisk %d\n", PageMappedToDisk(page));
64         dprintk("\n");
65 }
66
67 /* Given the be associated with isect, determine if page data needs to be
68  * initialized.
69  */
70 static int is_hole(struct pnfs_block_extent *be, sector_t isect)
71 {
72         if (be->be_state == PNFS_BLOCK_NONE_DATA)
73                 return 1;
74         else if (be->be_state != PNFS_BLOCK_INVALID_DATA)
75                 return 0;
76         else
77                 return !bl_is_sector_init(be->be_inval, isect);
78 }
79
80 /* Given the be associated with isect, determine if page data can be
81  * written to disk.
82  */
83 static int is_writable(struct pnfs_block_extent *be, sector_t isect)
84 {
85         return (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
86                 be->be_state == PNFS_BLOCK_INVALID_DATA);
87 }
88
89 /* The data we are handed might be spread across several bios.  We need
90  * to track when the last one is finished.
91  */
92 struct parallel_io {
93         struct kref refcnt;
94         void (*pnfs_callback) (void *data, int num_se);
95         void *data;
96         int bse_count;
97 };
98
99 static inline struct parallel_io *alloc_parallel(void *data)
100 {
101         struct parallel_io *rv;
102
103         rv  = kmalloc(sizeof(*rv), GFP_NOFS);
104         if (rv) {
105                 rv->data = data;
106                 kref_init(&rv->refcnt);
107                 rv->bse_count = 0;
108         }
109         return rv;
110 }
111
112 static inline void get_parallel(struct parallel_io *p)
113 {
114         kref_get(&p->refcnt);
115 }
116
117 static void destroy_parallel(struct kref *kref)
118 {
119         struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
120
121         dprintk("%s enter\n", __func__);
122         p->pnfs_callback(p->data, p->bse_count);
123         kfree(p);
124 }
125
126 static inline void put_parallel(struct parallel_io *p)
127 {
128         kref_put(&p->refcnt, destroy_parallel);
129 }
130
131 static struct bio *
132 bl_submit_bio(int rw, struct bio *bio)
133 {
134         if (bio) {
135                 get_parallel(bio->bi_private);
136                 dprintk("%s submitting %s bio %u@%llu\n", __func__,
137                         rw == READ ? "read" : "write", bio->bi_iter.bi_size,
138                         (unsigned long long)bio->bi_iter.bi_sector);
139                 submit_bio(rw, bio);
140         }
141         return NULL;
142 }
143
144 static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
145                                      struct pnfs_block_extent *be,
146                                      void (*end_io)(struct bio *, int err),
147                                      struct parallel_io *par)
148 {
149         struct bio *bio;
150
151         npg = min(npg, BIO_MAX_PAGES);
152         bio = bio_alloc(GFP_NOIO, npg);
153         if (!bio && (current->flags & PF_MEMALLOC)) {
154                 while (!bio && (npg /= 2))
155                         bio = bio_alloc(GFP_NOIO, npg);
156         }
157
158         if (bio) {
159                 bio->bi_iter.bi_sector = isect - be->be_f_offset +
160                         be->be_v_offset;
161                 bio->bi_bdev = be->be_mdev;
162                 bio->bi_end_io = end_io;
163                 bio->bi_private = par;
164         }
165         return bio;
166 }
167
168 static struct bio *do_add_page_to_bio(struct bio *bio, int npg, int rw,
169                                       sector_t isect, struct page *page,
170                                       struct pnfs_block_extent *be,
171                                       void (*end_io)(struct bio *, int err),
172                                       struct parallel_io *par,
173                                       unsigned int offset, int len)
174 {
175         isect = isect + (offset >> SECTOR_SHIFT);
176         dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
177                 npg, rw, (unsigned long long)isect, offset, len);
178 retry:
179         if (!bio) {
180                 bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
181                 if (!bio)
182                         return ERR_PTR(-ENOMEM);
183         }
184         if (bio_add_page(bio, page, len, offset) < len) {
185                 bio = bl_submit_bio(rw, bio);
186                 goto retry;
187         }
188         return bio;
189 }
190
191 static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
192                                       sector_t isect, struct page *page,
193                                       struct pnfs_block_extent *be,
194                                       void (*end_io)(struct bio *, int err),
195                                       struct parallel_io *par)
196 {
197         return do_add_page_to_bio(bio, npg, rw, isect, page, be,
198                                   end_io, par, 0, PAGE_CACHE_SIZE);
199 }
200
201 /* This is basically copied from mpage_end_io_read */
202 static void bl_end_io_read(struct bio *bio, int err)
203 {
204         struct parallel_io *par = bio->bi_private;
205         struct bio_vec *bvec;
206         int i;
207
208         if (!err)
209                 bio_for_each_segment_all(bvec, bio, i)
210                         SetPageUptodate(bvec->bv_page);
211
212         if (err) {
213                 struct nfs_read_data *rdata = par->data;
214                 struct nfs_pgio_header *header = rdata->header;
215
216                 if (!header->pnfs_error)
217                         header->pnfs_error = -EIO;
218                 pnfs_set_lo_fail(header->lseg);
219         }
220         bio_put(bio);
221         put_parallel(par);
222 }
223
224 static void bl_read_cleanup(struct work_struct *work)
225 {
226         struct rpc_task *task;
227         struct nfs_read_data *rdata;
228         dprintk("%s enter\n", __func__);
229         task = container_of(work, struct rpc_task, u.tk_work);
230         rdata = container_of(task, struct nfs_read_data, task);
231         pnfs_ld_read_done(rdata);
232 }
233
234 static void
235 bl_end_par_io_read(void *data, int unused)
236 {
237         struct nfs_read_data *rdata = data;
238
239         rdata->task.tk_status = rdata->header->pnfs_error;
240         INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
241         schedule_work(&rdata->task.u.tk_work);
242 }
243
244 static enum pnfs_try_status
245 bl_read_pagelist(struct nfs_read_data *rdata)
246 {
247         struct nfs_pgio_header *header = rdata->header;
248         int i, hole;
249         struct bio *bio = NULL;
250         struct pnfs_block_extent *be = NULL, *cow_read = NULL;
251         sector_t isect, extent_length = 0;
252         struct parallel_io *par;
253         loff_t f_offset = rdata->args.offset;
254         size_t bytes_left = rdata->args.count;
255         unsigned int pg_offset, pg_len;
256         struct page **pages = rdata->args.pages;
257         int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
258         const bool is_dio = (header->dreq != NULL);
259
260         dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
261                rdata->pages.npages, f_offset, (unsigned int)rdata->args.count);
262
263         par = alloc_parallel(rdata);
264         if (!par)
265                 goto use_mds;
266         par->pnfs_callback = bl_end_par_io_read;
267         /* At this point, we can no longer jump to use_mds */
268
269         isect = (sector_t) (f_offset >> SECTOR_SHIFT);
270         /* Code assumes extents are page-aligned */
271         for (i = pg_index; i < rdata->pages.npages; i++) {
272                 if (!extent_length) {
273                         /* We've used up the previous extent */
274                         bl_put_extent(be);
275                         bl_put_extent(cow_read);
276                         bio = bl_submit_bio(READ, bio);
277                         /* Get the next one */
278                         be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
279                                              isect, &cow_read);
280                         if (!be) {
281                                 header->pnfs_error = -EIO;
282                                 goto out;
283                         }
284                         extent_length = be->be_length -
285                                 (isect - be->be_f_offset);
286                         if (cow_read) {
287                                 sector_t cow_length = cow_read->be_length -
288                                         (isect - cow_read->be_f_offset);
289                                 extent_length = min(extent_length, cow_length);
290                         }
291                 }
292
293                 if (is_dio) {
294                         pg_offset = f_offset & ~PAGE_CACHE_MASK;
295                         if (pg_offset + bytes_left > PAGE_CACHE_SIZE)
296                                 pg_len = PAGE_CACHE_SIZE - pg_offset;
297                         else
298                                 pg_len = bytes_left;
299
300                         f_offset += pg_len;
301                         bytes_left -= pg_len;
302                         isect += (pg_offset >> SECTOR_SHIFT);
303                 } else {
304                         pg_offset = 0;
305                         pg_len = PAGE_CACHE_SIZE;
306                 }
307
308                 hole = is_hole(be, isect);
309                 if (hole && !cow_read) {
310                         bio = bl_submit_bio(READ, bio);
311                         /* Fill hole w/ zeroes w/o accessing device */
312                         dprintk("%s Zeroing page for hole\n", __func__);
313                         zero_user_segment(pages[i], pg_offset, pg_len);
314                         print_page(pages[i]);
315                         SetPageUptodate(pages[i]);
316                 } else {
317                         struct pnfs_block_extent *be_read;
318
319                         be_read = (hole && cow_read) ? cow_read : be;
320                         bio = do_add_page_to_bio(bio, rdata->pages.npages - i,
321                                                  READ,
322                                                  isect, pages[i], be_read,
323                                                  bl_end_io_read, par,
324                                                  pg_offset, pg_len);
325                         if (IS_ERR(bio)) {
326                                 header->pnfs_error = PTR_ERR(bio);
327                                 bio = NULL;
328                                 goto out;
329                         }
330                 }
331                 isect += (pg_len >> SECTOR_SHIFT);
332                 extent_length -= PAGE_CACHE_SECTORS;
333         }
334         if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
335                 rdata->res.eof = 1;
336                 rdata->res.count = header->inode->i_size - rdata->args.offset;
337         } else {
338                 rdata->res.count = (isect << SECTOR_SHIFT) - rdata->args.offset;
339         }
340 out:
341         bl_put_extent(be);
342         bl_put_extent(cow_read);
343         bl_submit_bio(READ, bio);
344         put_parallel(par);
345         return PNFS_ATTEMPTED;
346
347  use_mds:
348         dprintk("Giving up and using normal NFS\n");
349         return PNFS_NOT_ATTEMPTED;
350 }
351
352 static void mark_extents_written(struct pnfs_block_layout *bl,
353                                  __u64 offset, __u32 count)
354 {
355         sector_t isect, end;
356         struct pnfs_block_extent *be;
357         struct pnfs_block_short_extent *se;
358
359         dprintk("%s(%llu, %u)\n", __func__, offset, count);
360         if (count == 0)
361                 return;
362         isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT;
363         end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK);
364         end >>= SECTOR_SHIFT;
365         while (isect < end) {
366                 sector_t len;
367                 be = bl_find_get_extent(bl, isect, NULL);
368                 BUG_ON(!be); /* FIXME */
369                 len = min(end, be->be_f_offset + be->be_length) - isect;
370                 if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
371                         se = bl_pop_one_short_extent(be->be_inval);
372                         BUG_ON(!se);
373                         bl_mark_for_commit(be, isect, len, se);
374                 }
375                 isect += len;
376                 bl_put_extent(be);
377         }
378 }
379
380 static void bl_end_io_write_zero(struct bio *bio, int err)
381 {
382         struct parallel_io *par = bio->bi_private;
383         struct bio_vec *bvec;
384         int i;
385
386         bio_for_each_segment_all(bvec, bio, i) {
387                 /* This is the zeroing page we added */
388                 end_page_writeback(bvec->bv_page);
389                 page_cache_release(bvec->bv_page);
390         }
391
392         if (unlikely(err)) {
393                 struct nfs_write_data *data = par->data;
394                 struct nfs_pgio_header *header = data->header;
395
396                 if (!header->pnfs_error)
397                         header->pnfs_error = -EIO;
398                 pnfs_set_lo_fail(header->lseg);
399         }
400         bio_put(bio);
401         put_parallel(par);
402 }
403
404 static void bl_end_io_write(struct bio *bio, int err)
405 {
406         struct parallel_io *par = bio->bi_private;
407         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
408         struct nfs_write_data *data = par->data;
409         struct nfs_pgio_header *header = data->header;
410
411         if (!uptodate) {
412                 if (!header->pnfs_error)
413                         header->pnfs_error = -EIO;
414                 pnfs_set_lo_fail(header->lseg);
415         }
416         bio_put(bio);
417         put_parallel(par);
418 }
419
420 /* Function scheduled for call during bl_end_par_io_write,
421  * it marks sectors as written and extends the commitlist.
422  */
423 static void bl_write_cleanup(struct work_struct *work)
424 {
425         struct rpc_task *task;
426         struct nfs_write_data *wdata;
427         dprintk("%s enter\n", __func__);
428         task = container_of(work, struct rpc_task, u.tk_work);
429         wdata = container_of(task, struct nfs_write_data, task);
430         if (likely(!wdata->header->pnfs_error)) {
431                 /* Marks for LAYOUTCOMMIT */
432                 mark_extents_written(BLK_LSEG2EXT(wdata->header->lseg),
433                                      wdata->args.offset, wdata->args.count);
434         }
435         pnfs_ld_write_done(wdata);
436 }
437
438 /* Called when last of bios associated with a bl_write_pagelist call finishes */
439 static void bl_end_par_io_write(void *data, int num_se)
440 {
441         struct nfs_write_data *wdata = data;
442
443         if (unlikely(wdata->header->pnfs_error)) {
444                 bl_free_short_extents(&BLK_LSEG2EXT(wdata->header->lseg)->bl_inval,
445                                         num_se);
446         }
447
448         wdata->task.tk_status = wdata->header->pnfs_error;
449         wdata->verf.committed = NFS_FILE_SYNC;
450         INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
451         schedule_work(&wdata->task.u.tk_work);
452 }
453
454 /* FIXME STUB - mark intersection of layout and page as bad, so is not
455  * used again.
456  */
457 static void mark_bad_read(void)
458 {
459         return;
460 }
461
462 /*
463  * map_block:  map a requested I/0 block (isect) into an offset in the LVM
464  * block_device
465  */
466 static void
467 map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
468 {
469         dprintk("%s enter be=%p\n", __func__, be);
470
471         set_buffer_mapped(bh);
472         bh->b_bdev = be->be_mdev;
473         bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >>
474             (be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT);
475
476         dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n",
477                 __func__, (unsigned long long)isect, (long)bh->b_blocknr,
478                 bh->b_size);
479         return;
480 }
481
482 static void
483 bl_read_single_end_io(struct bio *bio, int error)
484 {
485         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
486         struct page *page = bvec->bv_page;
487
488         /* Only one page in bvec */
489         unlock_page(page);
490 }
491
492 static int
493 bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be,
494                     unsigned int offset, unsigned int len)
495 {
496         struct bio *bio;
497         struct page *shadow_page;
498         sector_t isect;
499         char *kaddr, *kshadow_addr;
500         int ret = 0;
501
502         dprintk("%s: offset %u len %u\n", __func__, offset, len);
503
504         shadow_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
505         if (shadow_page == NULL)
506                 return -ENOMEM;
507
508         bio = bio_alloc(GFP_NOIO, 1);
509         if (bio == NULL)
510                 return -ENOMEM;
511
512         isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
513                 (offset / SECTOR_SIZE);
514
515         bio->bi_iter.bi_sector = isect - be->be_f_offset + be->be_v_offset;
516         bio->bi_bdev = be->be_mdev;
517         bio->bi_end_io = bl_read_single_end_io;
518
519         lock_page(shadow_page);
520         if (bio_add_page(bio, shadow_page,
521                          SECTOR_SIZE, round_down(offset, SECTOR_SIZE)) == 0) {
522                 unlock_page(shadow_page);
523                 bio_put(bio);
524                 return -EIO;
525         }
526
527         submit_bio(READ, bio);
528         wait_on_page_locked(shadow_page);
529         if (unlikely(!test_bit(BIO_UPTODATE, &bio->bi_flags))) {
530                 ret = -EIO;
531         } else {
532                 kaddr = kmap_atomic(page);
533                 kshadow_addr = kmap_atomic(shadow_page);
534                 memcpy(kaddr + offset, kshadow_addr + offset, len);
535                 kunmap_atomic(kshadow_addr);
536                 kunmap_atomic(kaddr);
537         }
538         __free_page(shadow_page);
539         bio_put(bio);
540
541         return ret;
542 }
543
544 static int
545 bl_read_partial_page_sync(struct page *page, struct pnfs_block_extent *be,
546                           unsigned int dirty_offset, unsigned int dirty_len,
547                           bool full_page)
548 {
549         int ret = 0;
550         unsigned int start, end;
551
552         if (full_page) {
553                 start = 0;
554                 end = PAGE_CACHE_SIZE;
555         } else {
556                 start = round_down(dirty_offset, SECTOR_SIZE);
557                 end = round_up(dirty_offset + dirty_len, SECTOR_SIZE);
558         }
559
560         dprintk("%s: offset %u len %d\n", __func__, dirty_offset, dirty_len);
561         if (!be) {
562                 zero_user_segments(page, start, dirty_offset,
563                                    dirty_offset + dirty_len, end);
564                 if (start == 0 && end == PAGE_CACHE_SIZE &&
565                     trylock_page(page)) {
566                         SetPageUptodate(page);
567                         unlock_page(page);
568                 }
569                 return ret;
570         }
571
572         if (start != dirty_offset)
573                 ret = bl_do_readpage_sync(page, be, start, dirty_offset - start);
574
575         if (!ret && (dirty_offset + dirty_len < end))
576                 ret = bl_do_readpage_sync(page, be, dirty_offset + dirty_len,
577                                           end - dirty_offset - dirty_len);
578
579         return ret;
580 }
581
582 /* Given an unmapped page, zero it or read in page for COW, page is locked
583  * by caller.
584  */
585 static int
586 init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
587 {
588         struct buffer_head *bh = NULL;
589         int ret = 0;
590         sector_t isect;
591
592         dprintk("%s enter, %p\n", __func__, page);
593         BUG_ON(PageUptodate(page));
594         if (!cow_read) {
595                 zero_user_segment(page, 0, PAGE_SIZE);
596                 SetPageUptodate(page);
597                 goto cleanup;
598         }
599
600         bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0);
601         if (!bh) {
602                 ret = -ENOMEM;
603                 goto cleanup;
604         }
605
606         isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT;
607         map_block(bh, isect, cow_read);
608         if (!bh_uptodate_or_lock(bh))
609                 ret = bh_submit_read(bh);
610         if (ret)
611                 goto cleanup;
612         SetPageUptodate(page);
613
614 cleanup:
615         if (bh)
616                 free_buffer_head(bh);
617         if (ret) {
618                 /* Need to mark layout with bad read...should now
619                  * just use nfs4 for reads and writes.
620                  */
621                 mark_bad_read();
622         }
623         return ret;
624 }
625
626 /* Find or create a zeroing page marked being writeback.
627  * Return ERR_PTR on error, NULL to indicate skip this page and page itself
628  * to indicate write out.
629  */
630 static struct page *
631 bl_find_get_zeroing_page(struct inode *inode, pgoff_t index,
632                         struct pnfs_block_extent *cow_read)
633 {
634         struct page *page;
635         int locked = 0;
636         page = find_get_page(inode->i_mapping, index);
637         if (page)
638                 goto check_page;
639
640         page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
641         if (unlikely(!page)) {
642                 dprintk("%s oom\n", __func__);
643                 return ERR_PTR(-ENOMEM);
644         }
645         locked = 1;
646
647 check_page:
648         /* PageDirty: Other will write this out
649          * PageWriteback: Other is writing this out
650          * PageUptodate: It was read before
651          */
652         if (PageDirty(page) || PageWriteback(page)) {
653                 print_page(page);
654                 if (locked)
655                         unlock_page(page);
656                 page_cache_release(page);
657                 return NULL;
658         }
659
660         if (!locked) {
661                 lock_page(page);
662                 locked = 1;
663                 goto check_page;
664         }
665         if (!PageUptodate(page)) {
666                 /* New page, readin or zero it */
667                 init_page_for_write(page, cow_read);
668         }
669         set_page_writeback(page);
670         unlock_page(page);
671
672         return page;
673 }
674
675 static enum pnfs_try_status
676 bl_write_pagelist(struct nfs_write_data *wdata, int sync)
677 {
678         struct nfs_pgio_header *header = wdata->header;
679         int i, ret, npg_zero, pg_index, last = 0;
680         struct bio *bio = NULL;
681         struct pnfs_block_extent *be = NULL, *cow_read = NULL;
682         sector_t isect, last_isect = 0, extent_length = 0;
683         struct parallel_io *par = NULL;
684         loff_t offset = wdata->args.offset;
685         size_t count = wdata->args.count;
686         unsigned int pg_offset, pg_len, saved_len;
687         struct page **pages = wdata->args.pages;
688         struct page *page;
689         pgoff_t index;
690         u64 temp;
691         int npg_per_block =
692             NFS_SERVER(header->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
693
694         dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
695
696         if (header->dreq != NULL &&
697             (!IS_ALIGNED(offset, NFS_SERVER(header->inode)->pnfs_blksize) ||
698              !IS_ALIGNED(count, NFS_SERVER(header->inode)->pnfs_blksize))) {
699                 dprintk("pnfsblock nonblock aligned DIO writes. Resend MDS\n");
700                 goto out_mds;
701         }
702         /* At this point, wdata->pages is a (sequential) list of nfs_pages.
703          * We want to write each, and if there is an error set pnfs_error
704          * to have it redone using nfs.
705          */
706         par = alloc_parallel(wdata);
707         if (!par)
708                 goto out_mds;
709         par->pnfs_callback = bl_end_par_io_write;
710         /* At this point, have to be more careful with error handling */
711
712         isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
713         be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg), isect, &cow_read);
714         if (!be || !is_writable(be, isect)) {
715                 dprintk("%s no matching extents!\n", __func__);
716                 goto out_mds;
717         }
718
719         /* First page inside INVALID extent */
720         if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
721                 if (likely(!bl_push_one_short_extent(be->be_inval)))
722                         par->bse_count++;
723                 else
724                         goto out_mds;
725                 temp = offset >> PAGE_CACHE_SHIFT;
726                 npg_zero = do_div(temp, npg_per_block);
727                 isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) &
728                                      (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
729                 extent_length = be->be_length - (isect - be->be_f_offset);
730
731 fill_invalid_ext:
732                 dprintk("%s need to zero %d pages\n", __func__, npg_zero);
733                 for (;npg_zero > 0; npg_zero--) {
734                         if (bl_is_sector_init(be->be_inval, isect)) {
735                                 dprintk("isect %llu already init\n",
736                                         (unsigned long long)isect);
737                                 goto next_page;
738                         }
739                         /* page ref released in bl_end_io_write_zero */
740                         index = isect >> PAGE_CACHE_SECTOR_SHIFT;
741                         dprintk("%s zero %dth page: index %lu isect %llu\n",
742                                 __func__, npg_zero, index,
743                                 (unsigned long long)isect);
744                         page = bl_find_get_zeroing_page(header->inode, index,
745                                                         cow_read);
746                         if (unlikely(IS_ERR(page))) {
747                                 header->pnfs_error = PTR_ERR(page);
748                                 goto out;
749                         } else if (page == NULL)
750                                 goto next_page;
751
752                         ret = bl_mark_sectors_init(be->be_inval, isect,
753                                                        PAGE_CACHE_SECTORS);
754                         if (unlikely(ret)) {
755                                 dprintk("%s bl_mark_sectors_init fail %d\n",
756                                         __func__, ret);
757                                 end_page_writeback(page);
758                                 page_cache_release(page);
759                                 header->pnfs_error = ret;
760                                 goto out;
761                         }
762                         if (likely(!bl_push_one_short_extent(be->be_inval)))
763                                 par->bse_count++;
764                         else {
765                                 end_page_writeback(page);
766                                 page_cache_release(page);
767                                 header->pnfs_error = -ENOMEM;
768                                 goto out;
769                         }
770                         /* FIXME: This should be done in bi_end_io */
771                         mark_extents_written(BLK_LSEG2EXT(header->lseg),
772                                              page->index << PAGE_CACHE_SHIFT,
773                                              PAGE_CACHE_SIZE);
774
775                         bio = bl_add_page_to_bio(bio, npg_zero, WRITE,
776                                                  isect, page, be,
777                                                  bl_end_io_write_zero, par);
778                         if (IS_ERR(bio)) {
779                                 header->pnfs_error = PTR_ERR(bio);
780                                 bio = NULL;
781                                 goto out;
782                         }
783 next_page:
784                         isect += PAGE_CACHE_SECTORS;
785                         extent_length -= PAGE_CACHE_SECTORS;
786                 }
787                 if (last)
788                         goto write_done;
789         }
790         bio = bl_submit_bio(WRITE, bio);
791
792         /* Middle pages */
793         pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT;
794         for (i = pg_index; i < wdata->pages.npages; i++) {
795                 if (!extent_length) {
796                         /* We've used up the previous extent */
797                         bl_put_extent(be);
798                         bl_put_extent(cow_read);
799                         bio = bl_submit_bio(WRITE, bio);
800                         /* Get the next one */
801                         be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
802                                              isect, &cow_read);
803                         if (!be || !is_writable(be, isect)) {
804                                 header->pnfs_error = -EINVAL;
805                                 goto out;
806                         }
807                         if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
808                                 if (likely(!bl_push_one_short_extent(
809                                                                 be->be_inval)))
810                                         par->bse_count++;
811                                 else {
812                                         header->pnfs_error = -ENOMEM;
813                                         goto out;
814                                 }
815                         }
816                         extent_length = be->be_length -
817                             (isect - be->be_f_offset);
818                 }
819
820                 dprintk("%s offset %lld count %Zu\n", __func__, offset, count);
821                 pg_offset = offset & ~PAGE_CACHE_MASK;
822                 if (pg_offset + count > PAGE_CACHE_SIZE)
823                         pg_len = PAGE_CACHE_SIZE - pg_offset;
824                 else
825                         pg_len = count;
826
827                 saved_len = pg_len;
828                 if (be->be_state == PNFS_BLOCK_INVALID_DATA &&
829                     !bl_is_sector_init(be->be_inval, isect)) {
830                         ret = bl_read_partial_page_sync(pages[i], cow_read,
831                                                         pg_offset, pg_len, true);
832                         if (ret) {
833                                 dprintk("%s bl_read_partial_page_sync fail %d\n",
834                                         __func__, ret);
835                                 header->pnfs_error = ret;
836                                 goto out;
837                         }
838
839                         ret = bl_mark_sectors_init(be->be_inval, isect,
840                                                        PAGE_CACHE_SECTORS);
841                         if (unlikely(ret)) {
842                                 dprintk("%s bl_mark_sectors_init fail %d\n",
843                                         __func__, ret);
844                                 header->pnfs_error = ret;
845                                 goto out;
846                         }
847
848                         /* Expand to full page write */
849                         pg_offset = 0;
850                         pg_len = PAGE_CACHE_SIZE;
851                 } else if  ((pg_offset & (SECTOR_SIZE - 1)) ||
852                             (pg_len & (SECTOR_SIZE - 1))){
853                         /* ahh, nasty case. We have to do sync full sector
854                          * read-modify-write cycles.
855                          */
856                         unsigned int saved_offset = pg_offset;
857                         ret = bl_read_partial_page_sync(pages[i], be, pg_offset,
858                                                         pg_len, false);
859                         pg_offset = round_down(pg_offset, SECTOR_SIZE);
860                         pg_len = round_up(saved_offset + pg_len, SECTOR_SIZE)
861                                  - pg_offset;
862                 }
863
864
865                 bio = do_add_page_to_bio(bio, wdata->pages.npages - i, WRITE,
866                                          isect, pages[i], be,
867                                          bl_end_io_write, par,
868                                          pg_offset, pg_len);
869                 if (IS_ERR(bio)) {
870                         header->pnfs_error = PTR_ERR(bio);
871                         bio = NULL;
872                         goto out;
873                 }
874                 offset += saved_len;
875                 count -= saved_len;
876                 isect += PAGE_CACHE_SECTORS;
877                 last_isect = isect;
878                 extent_length -= PAGE_CACHE_SECTORS;
879         }
880
881         /* Last page inside INVALID extent */
882         if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
883                 bio = bl_submit_bio(WRITE, bio);
884                 temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT;
885                 npg_zero = npg_per_block - do_div(temp, npg_per_block);
886                 if (npg_zero < npg_per_block) {
887                         last = 1;
888                         goto fill_invalid_ext;
889                 }
890         }
891
892 write_done:
893         wdata->res.count = wdata->args.count;
894 out:
895         bl_put_extent(be);
896         bl_put_extent(cow_read);
897         bl_submit_bio(WRITE, bio);
898         put_parallel(par);
899         return PNFS_ATTEMPTED;
900 out_mds:
901         bl_put_extent(be);
902         bl_put_extent(cow_read);
903         kfree(par);
904         return PNFS_NOT_ATTEMPTED;
905 }
906
907 /* FIXME - range ignored */
908 static void
909 release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range)
910 {
911         int i;
912         struct pnfs_block_extent *be;
913
914         spin_lock(&bl->bl_ext_lock);
915         for (i = 0; i < EXTENT_LISTS; i++) {
916                 while (!list_empty(&bl->bl_extents[i])) {
917                         be = list_first_entry(&bl->bl_extents[i],
918                                               struct pnfs_block_extent,
919                                               be_node);
920                         list_del(&be->be_node);
921                         bl_put_extent(be);
922                 }
923         }
924         spin_unlock(&bl->bl_ext_lock);
925 }
926
927 static void
928 release_inval_marks(struct pnfs_inval_markings *marks)
929 {
930         struct pnfs_inval_tracking *pos, *temp;
931         struct pnfs_block_short_extent *se, *stemp;
932
933         list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) {
934                 list_del(&pos->it_link);
935                 kfree(pos);
936         }
937
938         list_for_each_entry_safe(se, stemp, &marks->im_extents, bse_node) {
939                 list_del(&se->bse_node);
940                 kfree(se);
941         }
942         return;
943 }
944
945 static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
946 {
947         struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
948
949         dprintk("%s enter\n", __func__);
950         release_extents(bl, NULL);
951         release_inval_marks(&bl->bl_inval);
952         kfree(bl);
953 }
954
955 static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
956                                                    gfp_t gfp_flags)
957 {
958         struct pnfs_block_layout *bl;
959
960         dprintk("%s enter\n", __func__);
961         bl = kzalloc(sizeof(*bl), gfp_flags);
962         if (!bl)
963                 return NULL;
964         spin_lock_init(&bl->bl_ext_lock);
965         INIT_LIST_HEAD(&bl->bl_extents[0]);
966         INIT_LIST_HEAD(&bl->bl_extents[1]);
967         INIT_LIST_HEAD(&bl->bl_commit);
968         INIT_LIST_HEAD(&bl->bl_committing);
969         bl->bl_count = 0;
970         bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT;
971         BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize);
972         return &bl->bl_layout;
973 }
974
975 static void bl_free_lseg(struct pnfs_layout_segment *lseg)
976 {
977         dprintk("%s enter\n", __func__);
978         kfree(lseg);
979 }
980
981 /* We pretty much ignore lseg, and store all data layout wide, so we
982  * can correctly merge.
983  */
984 static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo,
985                                                  struct nfs4_layoutget_res *lgr,
986                                                  gfp_t gfp_flags)
987 {
988         struct pnfs_layout_segment *lseg;
989         int status;
990
991         dprintk("%s enter\n", __func__);
992         lseg = kzalloc(sizeof(*lseg), gfp_flags);
993         if (!lseg)
994                 return ERR_PTR(-ENOMEM);
995         status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags);
996         if (status) {
997                 /* We don't want to call the full-blown bl_free_lseg,
998                  * since on error extents were not touched.
999                  */
1000                 kfree(lseg);
1001                 return ERR_PTR(status);
1002         }
1003         return lseg;
1004 }
1005
1006 static void
1007 bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr,
1008                        const struct nfs4_layoutcommit_args *arg)
1009 {
1010         dprintk("%s enter\n", __func__);
1011         encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg);
1012 }
1013
1014 static void
1015 bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
1016 {
1017         struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout;
1018
1019         dprintk("%s enter\n", __func__);
1020         clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status);
1021 }
1022
1023 static void free_blk_mountid(struct block_mount_id *mid)
1024 {
1025         if (mid) {
1026                 struct pnfs_block_dev *dev, *tmp;
1027
1028                 /* No need to take bm_lock as we are last user freeing bm_devlist */
1029                 list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) {
1030                         list_del(&dev->bm_node);
1031                         bl_free_block_dev(dev);
1032                 }
1033                 kfree(mid);
1034         }
1035 }
1036
1037 /* This is mostly copied from the filelayout_get_device_info function.
1038  * It seems much of this should be at the generic pnfs level.
1039  */
1040 static struct pnfs_block_dev *
1041 nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
1042                         struct nfs4_deviceid *d_id)
1043 {
1044         struct pnfs_device *dev;
1045         struct pnfs_block_dev *rv;
1046         u32 max_resp_sz;
1047         int max_pages;
1048         struct page **pages = NULL;
1049         int i, rc;
1050
1051         /*
1052          * Use the session max response size as the basis for setting
1053          * GETDEVICEINFO's maxcount
1054          */
1055         max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
1056         max_pages = nfs_page_array_len(0, max_resp_sz);
1057         dprintk("%s max_resp_sz %u max_pages %d\n",
1058                 __func__, max_resp_sz, max_pages);
1059
1060         dev = kmalloc(sizeof(*dev), GFP_NOFS);
1061         if (!dev) {
1062                 dprintk("%s kmalloc failed\n", __func__);
1063                 return ERR_PTR(-ENOMEM);
1064         }
1065
1066         pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS);
1067         if (pages == NULL) {
1068                 kfree(dev);
1069                 return ERR_PTR(-ENOMEM);
1070         }
1071         for (i = 0; i < max_pages; i++) {
1072                 pages[i] = alloc_page(GFP_NOFS);
1073                 if (!pages[i]) {
1074                         rv = ERR_PTR(-ENOMEM);
1075                         goto out_free;
1076                 }
1077         }
1078
1079         memcpy(&dev->dev_id, d_id, sizeof(*d_id));
1080         dev->layout_type = LAYOUT_BLOCK_VOLUME;
1081         dev->pages = pages;
1082         dev->pgbase = 0;
1083         dev->pglen = PAGE_SIZE * max_pages;
1084         dev->mincount = 0;
1085         dev->maxcount = max_resp_sz - nfs41_maxgetdevinfo_overhead;
1086
1087         dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
1088         rc = nfs4_proc_getdeviceinfo(server, dev, NULL);
1089         dprintk("%s getdevice info returns %d\n", __func__, rc);
1090         if (rc) {
1091                 rv = ERR_PTR(rc);
1092                 goto out_free;
1093         }
1094
1095         rv = nfs4_blk_decode_device(server, dev);
1096  out_free:
1097         for (i = 0; i < max_pages; i++)
1098                 __free_page(pages[i]);
1099         kfree(pages);
1100         kfree(dev);
1101         return rv;
1102 }
1103
1104 static int
1105 bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
1106 {
1107         struct block_mount_id *b_mt_id = NULL;
1108         struct pnfs_devicelist *dlist = NULL;
1109         struct pnfs_block_dev *bdev;
1110         LIST_HEAD(block_disklist);
1111         int status, i;
1112
1113         dprintk("%s enter\n", __func__);
1114
1115         if (server->pnfs_blksize == 0) {
1116                 dprintk("%s Server did not return blksize\n", __func__);
1117                 return -EINVAL;
1118         }
1119         b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS);
1120         if (!b_mt_id) {
1121                 status = -ENOMEM;
1122                 goto out_error;
1123         }
1124         /* Initialize nfs4 block layout mount id */
1125         spin_lock_init(&b_mt_id->bm_lock);
1126         INIT_LIST_HEAD(&b_mt_id->bm_devlist);
1127
1128         dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS);
1129         if (!dlist) {
1130                 status = -ENOMEM;
1131                 goto out_error;
1132         }
1133         dlist->eof = 0;
1134         while (!dlist->eof) {
1135                 status = nfs4_proc_getdevicelist(server, fh, dlist);
1136                 if (status)
1137                         goto out_error;
1138                 dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n",
1139                         __func__, dlist->num_devs, dlist->eof);
1140                 for (i = 0; i < dlist->num_devs; i++) {
1141                         bdev = nfs4_blk_get_deviceinfo(server, fh,
1142                                                        &dlist->dev_id[i]);
1143                         if (IS_ERR(bdev)) {
1144                                 status = PTR_ERR(bdev);
1145                                 goto out_error;
1146                         }
1147                         spin_lock(&b_mt_id->bm_lock);
1148                         list_add(&bdev->bm_node, &b_mt_id->bm_devlist);
1149                         spin_unlock(&b_mt_id->bm_lock);
1150                 }
1151         }
1152         dprintk("%s SUCCESS\n", __func__);
1153         server->pnfs_ld_data = b_mt_id;
1154
1155  out_return:
1156         kfree(dlist);
1157         return status;
1158
1159  out_error:
1160         free_blk_mountid(b_mt_id);
1161         goto out_return;
1162 }
1163
1164 static int
1165 bl_clear_layoutdriver(struct nfs_server *server)
1166 {
1167         struct block_mount_id *b_mt_id = server->pnfs_ld_data;
1168
1169         dprintk("%s enter\n", __func__);
1170         free_blk_mountid(b_mt_id);
1171         dprintk("%s RETURNS\n", __func__);
1172         return 0;
1173 }
1174
1175 static bool
1176 is_aligned_req(struct nfs_page *req, unsigned int alignment)
1177 {
1178         return IS_ALIGNED(req->wb_offset, alignment) &&
1179                IS_ALIGNED(req->wb_bytes, alignment);
1180 }
1181
1182 static void
1183 bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1184 {
1185         if (pgio->pg_dreq != NULL &&
1186             !is_aligned_req(req, SECTOR_SIZE))
1187                 nfs_pageio_reset_read_mds(pgio);
1188         else
1189                 pnfs_generic_pg_init_read(pgio, req);
1190 }
1191
1192 static bool
1193 bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1194                 struct nfs_page *req)
1195 {
1196         if (pgio->pg_dreq != NULL &&
1197             !is_aligned_req(req, SECTOR_SIZE))
1198                 return false;
1199
1200         return pnfs_generic_pg_test(pgio, prev, req);
1201 }
1202
1203 /*
1204  * Return the number of contiguous bytes for a given inode
1205  * starting at page frame idx.
1206  */
1207 static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
1208 {
1209         struct address_space *mapping = inode->i_mapping;
1210         pgoff_t end;
1211
1212         /* Optimize common case that writes from 0 to end of file */
1213         end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
1214         if (end != NFS_I(inode)->npages) {
1215                 rcu_read_lock();
1216                 end = radix_tree_next_hole(&mapping->page_tree, idx + 1, ULONG_MAX);
1217                 rcu_read_unlock();
1218         }
1219
1220         if (!end)
1221                 return i_size_read(inode) - (idx << PAGE_CACHE_SHIFT);
1222         else
1223                 return (end - idx) << PAGE_CACHE_SHIFT;
1224 }
1225
1226 static void
1227 bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1228 {
1229         if (pgio->pg_dreq != NULL &&
1230             !is_aligned_req(req, PAGE_CACHE_SIZE)) {
1231                 nfs_pageio_reset_write_mds(pgio);
1232         } else {
1233                 u64 wb_size;
1234                 if (pgio->pg_dreq == NULL)
1235                         wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
1236                                                       req->wb_index);
1237                 else
1238                         wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
1239
1240                 pnfs_generic_pg_init_write(pgio, req, wb_size);
1241         }
1242 }
1243
1244 static bool
1245 bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1246                  struct nfs_page *req)
1247 {
1248         if (pgio->pg_dreq != NULL &&
1249             !is_aligned_req(req, PAGE_CACHE_SIZE))
1250                 return false;
1251
1252         return pnfs_generic_pg_test(pgio, prev, req);
1253 }
1254
1255 static const struct nfs_pageio_ops bl_pg_read_ops = {
1256         .pg_init = bl_pg_init_read,
1257         .pg_test = bl_pg_test_read,
1258         .pg_doio = pnfs_generic_pg_readpages,
1259 };
1260
1261 static const struct nfs_pageio_ops bl_pg_write_ops = {
1262         .pg_init = bl_pg_init_write,
1263         .pg_test = bl_pg_test_write,
1264         .pg_doio = pnfs_generic_pg_writepages,
1265 };
1266
1267 static struct pnfs_layoutdriver_type blocklayout_type = {
1268         .id                             = LAYOUT_BLOCK_VOLUME,
1269         .name                           = "LAYOUT_BLOCK_VOLUME",
1270         .owner                          = THIS_MODULE,
1271         .read_pagelist                  = bl_read_pagelist,
1272         .write_pagelist                 = bl_write_pagelist,
1273         .alloc_layout_hdr               = bl_alloc_layout_hdr,
1274         .free_layout_hdr                = bl_free_layout_hdr,
1275         .alloc_lseg                     = bl_alloc_lseg,
1276         .free_lseg                      = bl_free_lseg,
1277         .encode_layoutcommit            = bl_encode_layoutcommit,
1278         .cleanup_layoutcommit           = bl_cleanup_layoutcommit,
1279         .set_layoutdriver               = bl_set_layoutdriver,
1280         .clear_layoutdriver             = bl_clear_layoutdriver,
1281         .pg_read_ops                    = &bl_pg_read_ops,
1282         .pg_write_ops                   = &bl_pg_write_ops,
1283 };
1284
1285 static const struct rpc_pipe_ops bl_upcall_ops = {
1286         .upcall         = rpc_pipe_generic_upcall,
1287         .downcall       = bl_pipe_downcall,
1288         .destroy_msg    = bl_pipe_destroy_msg,
1289 };
1290
1291 static struct dentry *nfs4blocklayout_register_sb(struct super_block *sb,
1292                                             struct rpc_pipe *pipe)
1293 {
1294         struct dentry *dir, *dentry;
1295
1296         dir = rpc_d_lookup_sb(sb, NFS_PIPE_DIRNAME);
1297         if (dir == NULL)
1298                 return ERR_PTR(-ENOENT);
1299         dentry = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe);
1300         dput(dir);
1301         return dentry;
1302 }
1303
1304 static void nfs4blocklayout_unregister_sb(struct super_block *sb,
1305                                           struct rpc_pipe *pipe)
1306 {
1307         if (pipe->dentry)
1308                 rpc_unlink(pipe->dentry);
1309 }
1310
1311 static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
1312                            void *ptr)
1313 {
1314         struct super_block *sb = ptr;
1315         struct net *net = sb->s_fs_info;
1316         struct nfs_net *nn = net_generic(net, nfs_net_id);
1317         struct dentry *dentry;
1318         int ret = 0;
1319
1320         if (!try_module_get(THIS_MODULE))
1321                 return 0;
1322
1323         if (nn->bl_device_pipe == NULL) {
1324                 module_put(THIS_MODULE);
1325                 return 0;
1326         }
1327
1328         switch (event) {
1329         case RPC_PIPEFS_MOUNT:
1330                 dentry = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe);
1331                 if (IS_ERR(dentry)) {
1332                         ret = PTR_ERR(dentry);
1333                         break;
1334                 }
1335                 nn->bl_device_pipe->dentry = dentry;
1336                 break;
1337         case RPC_PIPEFS_UMOUNT:
1338                 if (nn->bl_device_pipe->dentry)
1339                         nfs4blocklayout_unregister_sb(sb, nn->bl_device_pipe);
1340                 break;
1341         default:
1342                 ret = -ENOTSUPP;
1343                 break;
1344         }
1345         module_put(THIS_MODULE);
1346         return ret;
1347 }
1348
1349 static struct notifier_block nfs4blocklayout_block = {
1350         .notifier_call = rpc_pipefs_event,
1351 };
1352
1353 static struct dentry *nfs4blocklayout_register_net(struct net *net,
1354                                                    struct rpc_pipe *pipe)
1355 {
1356         struct super_block *pipefs_sb;
1357         struct dentry *dentry;
1358
1359         pipefs_sb = rpc_get_sb_net(net);
1360         if (!pipefs_sb)
1361                 return NULL;
1362         dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe);
1363         rpc_put_sb_net(net);
1364         return dentry;
1365 }
1366
1367 static void nfs4blocklayout_unregister_net(struct net *net,
1368                                            struct rpc_pipe *pipe)
1369 {
1370         struct super_block *pipefs_sb;
1371
1372         pipefs_sb = rpc_get_sb_net(net);
1373         if (pipefs_sb) {
1374                 nfs4blocklayout_unregister_sb(pipefs_sb, pipe);
1375                 rpc_put_sb_net(net);
1376         }
1377 }
1378
1379 static int nfs4blocklayout_net_init(struct net *net)
1380 {
1381         struct nfs_net *nn = net_generic(net, nfs_net_id);
1382         struct dentry *dentry;
1383
1384         init_waitqueue_head(&nn->bl_wq);
1385         nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0);
1386         if (IS_ERR(nn->bl_device_pipe))
1387                 return PTR_ERR(nn->bl_device_pipe);
1388         dentry = nfs4blocklayout_register_net(net, nn->bl_device_pipe);
1389         if (IS_ERR(dentry)) {
1390                 rpc_destroy_pipe_data(nn->bl_device_pipe);
1391                 return PTR_ERR(dentry);
1392         }
1393         nn->bl_device_pipe->dentry = dentry;
1394         return 0;
1395 }
1396
1397 static void nfs4blocklayout_net_exit(struct net *net)
1398 {
1399         struct nfs_net *nn = net_generic(net, nfs_net_id);
1400
1401         nfs4blocklayout_unregister_net(net, nn->bl_device_pipe);
1402         rpc_destroy_pipe_data(nn->bl_device_pipe);
1403         nn->bl_device_pipe = NULL;
1404 }
1405
1406 static struct pernet_operations nfs4blocklayout_net_ops = {
1407         .init = nfs4blocklayout_net_init,
1408         .exit = nfs4blocklayout_net_exit,
1409 };
1410
1411 static int __init nfs4blocklayout_init(void)
1412 {
1413         int ret;
1414
1415         dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
1416
1417         ret = pnfs_register_layoutdriver(&blocklayout_type);
1418         if (ret)
1419                 goto out;
1420
1421         ret = rpc_pipefs_notifier_register(&nfs4blocklayout_block);
1422         if (ret)
1423                 goto out_remove;
1424         ret = register_pernet_subsys(&nfs4blocklayout_net_ops);
1425         if (ret)
1426                 goto out_notifier;
1427 out:
1428         return ret;
1429
1430 out_notifier:
1431         rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
1432 out_remove:
1433         pnfs_unregister_layoutdriver(&blocklayout_type);
1434         return ret;
1435 }
1436
1437 static void __exit nfs4blocklayout_exit(void)
1438 {
1439         dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
1440                __func__);
1441
1442         rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
1443         unregister_pernet_subsys(&nfs4blocklayout_net_ops);
1444         pnfs_unregister_layoutdriver(&blocklayout_type);
1445 }
1446
1447 MODULE_ALIAS("nfs-layouttype4-3");
1448
1449 module_init(nfs4blocklayout_init);
1450 module_exit(nfs4blocklayout_exit);