Merge branch 'rmobile/dma' into rmobile-fixes-for-linus
[profile/ivi/kernel-adaptation-intel-automotive.git] / fs / exofs / inode.c
1 /*
2  * Copyright (C) 2005, 2006
3  * Avishay Traeger (avishay@gmail.com)
4  * Copyright (C) 2008, 2009
5  * Boaz Harrosh <bharrosh@panasas.com>
6  *
7  * Copyrights for code taken from ext2:
8  *     Copyright (C) 1992, 1993, 1994, 1995
9  *     Remy Card (card@masi.ibp.fr)
10  *     Laboratoire MASI - Institut Blaise Pascal
11  *     Universite Pierre et Marie Curie (Paris VI)
12  *     from
13  *     linux/fs/minix/inode.c
14  *     Copyright (C) 1991, 1992  Linus Torvalds
15  *
16  * This file is part of exofs.
17  *
18  * exofs is free software; you can redistribute it and/or modify
19  * it under the terms of the GNU General Public License as published by
20  * the Free Software Foundation.  Since it is based on ext2, and the only
21  * valid version of GPL for the Linux kernel is version 2, the only valid
22  * version of GPL for exofs is version 2.
23  *
24  * exofs is distributed in the hope that it will be useful,
25  * but WITHOUT ANY WARRANTY; without even the implied warranty of
26  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
27  * GNU General Public License for more details.
28  *
29  * You should have received a copy of the GNU General Public License
30  * along with exofs; if not, write to the Free Software
31  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
32  */
33
34 #include <linux/slab.h>
35
36 #include "exofs.h"
37
38 #define EXOFS_DBGMSG2(M...) do {} while (0)
39
40 enum { BIO_MAX_PAGES_KMALLOC =
41                 (PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec),
42         MAX_PAGES_KMALLOC =
43                 PAGE_SIZE / sizeof(struct page *),
44 };
45
46 unsigned exofs_max_io_pages(struct ore_layout *layout,
47                             unsigned expected_pages)
48 {
49         unsigned pages = min_t(unsigned, expected_pages, MAX_PAGES_KMALLOC);
50
51         /* TODO: easily support bio chaining */
52         pages =  min_t(unsigned, pages,
53                        layout->group_width * BIO_MAX_PAGES_KMALLOC);
54         return pages;
55 }
56
57 struct page_collect {
58         struct exofs_sb_info *sbi;
59         struct inode *inode;
60         unsigned expected_pages;
61         struct ore_io_state *ios;
62
63         struct page **pages;
64         unsigned alloc_pages;
65         unsigned nr_pages;
66         unsigned long length;
67         loff_t pg_first; /* keep 64bit also in 32-arches */
68         bool read_4_write; /* This means two things: that the read is sync
69                             * And the pages should not be unlocked.
70                             */
71 };
72
73 static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
74                        struct inode *inode)
75 {
76         struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
77
78         pcol->sbi = sbi;
79         pcol->inode = inode;
80         pcol->expected_pages = expected_pages;
81
82         pcol->ios = NULL;
83         pcol->pages = NULL;
84         pcol->alloc_pages = 0;
85         pcol->nr_pages = 0;
86         pcol->length = 0;
87         pcol->pg_first = -1;
88         pcol->read_4_write = false;
89 }
90
91 static void _pcol_reset(struct page_collect *pcol)
92 {
93         pcol->expected_pages -= min(pcol->nr_pages, pcol->expected_pages);
94
95         pcol->pages = NULL;
96         pcol->alloc_pages = 0;
97         pcol->nr_pages = 0;
98         pcol->length = 0;
99         pcol->pg_first = -1;
100         pcol->ios = NULL;
101
102         /* this is probably the end of the loop but in writes
103          * it might not end here. don't be left with nothing
104          */
105         if (!pcol->expected_pages)
106                 pcol->expected_pages = MAX_PAGES_KMALLOC;
107 }
108
109 static int pcol_try_alloc(struct page_collect *pcol)
110 {
111         unsigned pages;
112
113         /* TODO: easily support bio chaining */
114         pages =  exofs_max_io_pages(&pcol->sbi->layout, pcol->expected_pages);
115
116         for (; pages; pages >>= 1) {
117                 pcol->pages = kmalloc(pages * sizeof(struct page *),
118                                       GFP_KERNEL);
119                 if (likely(pcol->pages)) {
120                         pcol->alloc_pages = pages;
121                         return 0;
122                 }
123         }
124
125         EXOFS_ERR("Failed to kmalloc expected_pages=%u\n",
126                   pcol->expected_pages);
127         return -ENOMEM;
128 }
129
130 static void pcol_free(struct page_collect *pcol)
131 {
132         kfree(pcol->pages);
133         pcol->pages = NULL;
134
135         if (pcol->ios) {
136                 ore_put_io_state(pcol->ios);
137                 pcol->ios = NULL;
138         }
139 }
140
141 static int pcol_add_page(struct page_collect *pcol, struct page *page,
142                          unsigned len)
143 {
144         if (unlikely(pcol->nr_pages >= pcol->alloc_pages))
145                 return -ENOMEM;
146
147         pcol->pages[pcol->nr_pages++] = page;
148         pcol->length += len;
149         return 0;
150 }
151
152 static int update_read_page(struct page *page, int ret)
153 {
154         if (ret == 0) {
155                 /* Everything is OK */
156                 SetPageUptodate(page);
157                 if (PageError(page))
158                         ClearPageError(page);
159         } else if (ret == -EFAULT) {
160                 /* In this case we were trying to read something that wasn't on
161                  * disk yet - return a page full of zeroes.  This should be OK,
162                  * because the object should be empty (if there was a write
163                  * before this read, the read would be waiting with the page
164                  * locked */
165                 clear_highpage(page);
166
167                 SetPageUptodate(page);
168                 if (PageError(page))
169                         ClearPageError(page);
170                 ret = 0; /* recovered error */
171                 EXOFS_DBGMSG("recovered read error\n");
172         } else /* Error */
173                 SetPageError(page);
174
175         return ret;
176 }
177
178 static void update_write_page(struct page *page, int ret)
179 {
180         if (ret) {
181                 mapping_set_error(page->mapping, ret);
182                 SetPageError(page);
183         }
184         end_page_writeback(page);
185 }
186
187 /* Called at the end of reads, to optionally unlock pages and update their
188  * status.
189  */
190 static int __readpages_done(struct page_collect *pcol)
191 {
192         int i;
193         u64 resid;
194         u64 good_bytes;
195         u64 length = 0;
196         int ret = ore_check_io(pcol->ios, &resid);
197
198         if (likely(!ret))
199                 good_bytes = pcol->length;
200         else
201                 good_bytes = pcol->length - resid;
202
203         EXOFS_DBGMSG2("readpages_done(0x%lx) good_bytes=0x%llx"
204                      " length=0x%lx nr_pages=%u\n",
205                      pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
206                      pcol->nr_pages);
207
208         for (i = 0; i < pcol->nr_pages; i++) {
209                 struct page *page = pcol->pages[i];
210                 struct inode *inode = page->mapping->host;
211                 int page_stat;
212
213                 if (inode != pcol->inode)
214                         continue; /* osd might add more pages at end */
215
216                 if (likely(length < good_bytes))
217                         page_stat = 0;
218                 else
219                         page_stat = ret;
220
221                 EXOFS_DBGMSG2("    readpages_done(0x%lx, 0x%lx) %s\n",
222                           inode->i_ino, page->index,
223                           page_stat ? "bad_bytes" : "good_bytes");
224
225                 ret = update_read_page(page, page_stat);
226                 if (!pcol->read_4_write)
227                         unlock_page(page);
228                 length += PAGE_SIZE;
229         }
230
231         pcol_free(pcol);
232         EXOFS_DBGMSG2("readpages_done END\n");
233         return ret;
234 }
235
236 /* callback of async reads */
237 static void readpages_done(struct ore_io_state *ios, void *p)
238 {
239         struct page_collect *pcol = p;
240
241         __readpages_done(pcol);
242         atomic_dec(&pcol->sbi->s_curr_pending);
243         kfree(pcol);
244 }
245
246 static void _unlock_pcol_pages(struct page_collect *pcol, int ret, int rw)
247 {
248         int i;
249
250         for (i = 0; i < pcol->nr_pages; i++) {
251                 struct page *page = pcol->pages[i];
252
253                 if (rw == READ)
254                         update_read_page(page, ret);
255                 else
256                         update_write_page(page, ret);
257
258                 unlock_page(page);
259         }
260 }
261
262 static int read_exec(struct page_collect *pcol)
263 {
264         struct exofs_i_info *oi = exofs_i(pcol->inode);
265         struct ore_io_state *ios;
266         struct page_collect *pcol_copy = NULL;
267         int ret;
268
269         if (!pcol->pages)
270                 return 0;
271
272         if (!pcol->ios) {
273                 int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->comps, true,
274                                              pcol->pg_first << PAGE_CACHE_SHIFT,
275                                              pcol->length, &pcol->ios);
276
277                 if (ret)
278                         return ret;
279         }
280
281         ios = pcol->ios;
282         ios->pages = pcol->pages;
283         ios->nr_pages = pcol->nr_pages;
284
285         if (pcol->read_4_write) {
286                 ore_read(pcol->ios);
287                 return __readpages_done(pcol);
288         }
289
290         pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
291         if (!pcol_copy) {
292                 ret = -ENOMEM;
293                 goto err;
294         }
295
296         *pcol_copy = *pcol;
297         ios->done = readpages_done;
298         ios->private = pcol_copy;
299         ret = ore_read(ios);
300         if (unlikely(ret))
301                 goto err;
302
303         atomic_inc(&pcol->sbi->s_curr_pending);
304
305         EXOFS_DBGMSG2("read_exec obj=0x%llx start=0x%llx length=0x%lx\n",
306                   oi->one_comp.obj.id, _LLU(ios->offset), pcol->length);
307
308         /* pages ownership was passed to pcol_copy */
309         _pcol_reset(pcol);
310         return 0;
311
312 err:
313         if (!pcol->read_4_write)
314                 _unlock_pcol_pages(pcol, ret, READ);
315
316         pcol_free(pcol);
317
318         kfree(pcol_copy);
319         return ret;
320 }
321
322 /* readpage_strip is called either directly from readpage() or by the VFS from
323  * within read_cache_pages(), to add one more page to be read. It will try to
324  * collect as many contiguous pages as posible. If a discontinuity is
325  * encountered, or it runs out of resources, it will submit the previous segment
326  * and will start a new collection. Eventually caller must submit the last
327  * segment if present.
328  */
329 static int readpage_strip(void *data, struct page *page)
330 {
331         struct page_collect *pcol = data;
332         struct inode *inode = pcol->inode;
333         struct exofs_i_info *oi = exofs_i(inode);
334         loff_t i_size = i_size_read(inode);
335         pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
336         size_t len;
337         int ret;
338
339         /* FIXME: Just for debugging, will be removed */
340         if (PageUptodate(page))
341                 EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol->inode->i_ino,
342                           page->index);
343
344         if (page->index < end_index)
345                 len = PAGE_CACHE_SIZE;
346         else if (page->index == end_index)
347                 len = i_size & ~PAGE_CACHE_MASK;
348         else
349                 len = 0;
350
351         if (!len || !obj_created(oi)) {
352                 /* this will be out of bounds, or doesn't exist yet.
353                  * Current page is cleared and the request is split
354                  */
355                 clear_highpage(page);
356
357                 SetPageUptodate(page);
358                 if (PageError(page))
359                         ClearPageError(page);
360
361                 if (!pcol->read_4_write)
362                         unlock_page(page);
363                 EXOFS_DBGMSG("readpage_strip(0x%lx) empty page len=%zx "
364                              "read_4_write=%d index=0x%lx end_index=0x%lx "
365                              "splitting\n", inode->i_ino, len,
366                              pcol->read_4_write, page->index, end_index);
367
368                 return read_exec(pcol);
369         }
370
371 try_again:
372
373         if (unlikely(pcol->pg_first == -1)) {
374                 pcol->pg_first = page->index;
375         } else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
376                    page->index)) {
377                 /* Discontinuity detected, split the request */
378                 ret = read_exec(pcol);
379                 if (unlikely(ret))
380                         goto fail;
381                 goto try_again;
382         }
383
384         if (!pcol->pages) {
385                 ret = pcol_try_alloc(pcol);
386                 if (unlikely(ret))
387                         goto fail;
388         }
389
390         if (len != PAGE_CACHE_SIZE)
391                 zero_user(page, len, PAGE_CACHE_SIZE - len);
392
393         EXOFS_DBGMSG2("    readpage_strip(0x%lx, 0x%lx) len=0x%zx\n",
394                      inode->i_ino, page->index, len);
395
396         ret = pcol_add_page(pcol, page, len);
397         if (ret) {
398                 EXOFS_DBGMSG2("Failed pcol_add_page pages[i]=%p "
399                           "this_len=0x%zx nr_pages=%u length=0x%lx\n",
400                           page, len, pcol->nr_pages, pcol->length);
401
402                 /* split the request, and start again with current page */
403                 ret = read_exec(pcol);
404                 if (unlikely(ret))
405                         goto fail;
406
407                 goto try_again;
408         }
409
410         return 0;
411
412 fail:
413         /* SetPageError(page); ??? */
414         unlock_page(page);
415         return ret;
416 }
417
418 static int exofs_readpages(struct file *file, struct address_space *mapping,
419                            struct list_head *pages, unsigned nr_pages)
420 {
421         struct page_collect pcol;
422         int ret;
423
424         _pcol_init(&pcol, nr_pages, mapping->host);
425
426         ret = read_cache_pages(mapping, pages, readpage_strip, &pcol);
427         if (ret) {
428                 EXOFS_ERR("read_cache_pages => %d\n", ret);
429                 return ret;
430         }
431
432         return read_exec(&pcol);
433 }
434
435 static int _readpage(struct page *page, bool read_4_write)
436 {
437         struct page_collect pcol;
438         int ret;
439
440         _pcol_init(&pcol, 1, page->mapping->host);
441
442         pcol.read_4_write = read_4_write;
443         ret = readpage_strip(&pcol, page);
444         if (ret) {
445                 EXOFS_ERR("_readpage => %d\n", ret);
446                 return ret;
447         }
448
449         return read_exec(&pcol);
450 }
451
452 /*
453  * We don't need the file
454  */
455 static int exofs_readpage(struct file *file, struct page *page)
456 {
457         return _readpage(page, false);
458 }
459
460 /* Callback for osd_write. All writes are asynchronous */
461 static void writepages_done(struct ore_io_state *ios, void *p)
462 {
463         struct page_collect *pcol = p;
464         int i;
465         u64 resid;
466         u64  good_bytes;
467         u64  length = 0;
468         int ret = ore_check_io(ios, &resid);
469
470         atomic_dec(&pcol->sbi->s_curr_pending);
471
472         if (likely(!ret))
473                 good_bytes = pcol->length;
474         else
475                 good_bytes = pcol->length - resid;
476
477         EXOFS_DBGMSG2("writepages_done(0x%lx) good_bytes=0x%llx"
478                      " length=0x%lx nr_pages=%u\n",
479                      pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
480                      pcol->nr_pages);
481
482         for (i = 0; i < pcol->nr_pages; i++) {
483                 struct page *page = pcol->pages[i];
484                 struct inode *inode = page->mapping->host;
485                 int page_stat;
486
487                 if (inode != pcol->inode)
488                         continue; /* osd might add more pages to a bio */
489
490                 if (likely(length < good_bytes))
491                         page_stat = 0;
492                 else
493                         page_stat = ret;
494
495                 update_write_page(page, page_stat);
496                 unlock_page(page);
497                 EXOFS_DBGMSG2("    writepages_done(0x%lx, 0x%lx) status=%d\n",
498                              inode->i_ino, page->index, page_stat);
499
500                 length += PAGE_SIZE;
501         }
502
503         pcol_free(pcol);
504         kfree(pcol);
505         EXOFS_DBGMSG2("writepages_done END\n");
506 }
507
508 static int write_exec(struct page_collect *pcol)
509 {
510         struct exofs_i_info *oi = exofs_i(pcol->inode);
511         struct ore_io_state *ios;
512         struct page_collect *pcol_copy = NULL;
513         int ret;
514
515         if (!pcol->pages)
516                 return 0;
517
518         BUG_ON(pcol->ios);
519         ret = ore_get_rw_state(&pcol->sbi->layout, &oi->comps, false,
520                                  pcol->pg_first << PAGE_CACHE_SHIFT,
521                                  pcol->length, &pcol->ios);
522
523         if (unlikely(ret))
524                 goto err;
525
526         pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
527         if (!pcol_copy) {
528                 EXOFS_ERR("write_exec: Failed to kmalloc(pcol)\n");
529                 ret = -ENOMEM;
530                 goto err;
531         }
532
533         *pcol_copy = *pcol;
534
535         ios = pcol->ios;
536         ios->pages = pcol_copy->pages;
537         ios->nr_pages = pcol_copy->nr_pages;
538         ios->done = writepages_done;
539         ios->private = pcol_copy;
540
541         ret = ore_write(ios);
542         if (unlikely(ret)) {
543                 EXOFS_ERR("write_exec: ore_write() Failed\n");
544                 goto err;
545         }
546
547         atomic_inc(&pcol->sbi->s_curr_pending);
548         EXOFS_DBGMSG2("write_exec(0x%lx, 0x%llx) start=0x%llx length=0x%lx\n",
549                   pcol->inode->i_ino, pcol->pg_first, _LLU(ios->offset),
550                   pcol->length);
551         /* pages ownership was passed to pcol_copy */
552         _pcol_reset(pcol);
553         return 0;
554
555 err:
556         _unlock_pcol_pages(pcol, ret, WRITE);
557         pcol_free(pcol);
558         kfree(pcol_copy);
559
560         return ret;
561 }
562
563 /* writepage_strip is called either directly from writepage() or by the VFS from
564  * within write_cache_pages(), to add one more page to be written to storage.
565  * It will try to collect as many contiguous pages as possible. If a
566  * discontinuity is encountered or it runs out of resources it will submit the
567  * previous segment and will start a new collection.
568  * Eventually caller must submit the last segment if present.
569  */
570 static int writepage_strip(struct page *page,
571                            struct writeback_control *wbc_unused, void *data)
572 {
573         struct page_collect *pcol = data;
574         struct inode *inode = pcol->inode;
575         struct exofs_i_info *oi = exofs_i(inode);
576         loff_t i_size = i_size_read(inode);
577         pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
578         size_t len;
579         int ret;
580
581         BUG_ON(!PageLocked(page));
582
583         ret = wait_obj_created(oi);
584         if (unlikely(ret))
585                 goto fail;
586
587         if (page->index < end_index)
588                 /* in this case, the page is within the limits of the file */
589                 len = PAGE_CACHE_SIZE;
590         else {
591                 len = i_size & ~PAGE_CACHE_MASK;
592
593                 if (page->index > end_index || !len) {
594                         /* in this case, the page is outside the limits
595                          * (truncate in progress)
596                          */
597                         ret = write_exec(pcol);
598                         if (unlikely(ret))
599                                 goto fail;
600                         if (PageError(page))
601                                 ClearPageError(page);
602                         unlock_page(page);
603                         EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) "
604                                      "outside the limits\n",
605                                      inode->i_ino, page->index);
606                         return 0;
607                 }
608         }
609
610 try_again:
611
612         if (unlikely(pcol->pg_first == -1)) {
613                 pcol->pg_first = page->index;
614         } else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
615                    page->index)) {
616                 /* Discontinuity detected, split the request */
617                 ret = write_exec(pcol);
618                 if (unlikely(ret))
619                         goto fail;
620
621                 EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) Discontinuity\n",
622                              inode->i_ino, page->index);
623                 goto try_again;
624         }
625
626         if (!pcol->pages) {
627                 ret = pcol_try_alloc(pcol);
628                 if (unlikely(ret))
629                         goto fail;
630         }
631
632         EXOFS_DBGMSG2("    writepage_strip(0x%lx, 0x%lx) len=0x%zx\n",
633                      inode->i_ino, page->index, len);
634
635         ret = pcol_add_page(pcol, page, len);
636         if (unlikely(ret)) {
637                 EXOFS_DBGMSG2("Failed pcol_add_page "
638                              "nr_pages=%u total_length=0x%lx\n",
639                              pcol->nr_pages, pcol->length);
640
641                 /* split the request, next loop will start again */
642                 ret = write_exec(pcol);
643                 if (unlikely(ret)) {
644                         EXOFS_DBGMSG("write_exec failed => %d", ret);
645                         goto fail;
646                 }
647
648                 goto try_again;
649         }
650
651         BUG_ON(PageWriteback(page));
652         set_page_writeback(page);
653
654         return 0;
655
656 fail:
657         EXOFS_DBGMSG("Error: writepage_strip(0x%lx, 0x%lx)=>%d\n",
658                      inode->i_ino, page->index, ret);
659         set_bit(AS_EIO, &page->mapping->flags);
660         unlock_page(page);
661         return ret;
662 }
663
664 static int exofs_writepages(struct address_space *mapping,
665                        struct writeback_control *wbc)
666 {
667         struct page_collect pcol;
668         long start, end, expected_pages;
669         int ret;
670
671         start = wbc->range_start >> PAGE_CACHE_SHIFT;
672         end = (wbc->range_end == LLONG_MAX) ?
673                         start + mapping->nrpages :
674                         wbc->range_end >> PAGE_CACHE_SHIFT;
675
676         if (start || end)
677                 expected_pages = end - start + 1;
678         else
679                 expected_pages = mapping->nrpages;
680
681         if (expected_pages < 32L)
682                 expected_pages = 32L;
683
684         EXOFS_DBGMSG2("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx "
685                      "nrpages=%lu start=0x%lx end=0x%lx expected_pages=%ld\n",
686                      mapping->host->i_ino, wbc->range_start, wbc->range_end,
687                      mapping->nrpages, start, end, expected_pages);
688
689         _pcol_init(&pcol, expected_pages, mapping->host);
690
691         ret = write_cache_pages(mapping, wbc, writepage_strip, &pcol);
692         if (ret) {
693                 EXOFS_ERR("write_cache_pages => %d\n", ret);
694                 return ret;
695         }
696
697         return write_exec(&pcol);
698 }
699
700 static int exofs_writepage(struct page *page, struct writeback_control *wbc)
701 {
702         struct page_collect pcol;
703         int ret;
704
705         _pcol_init(&pcol, 1, page->mapping->host);
706
707         ret = writepage_strip(page, NULL, &pcol);
708         if (ret) {
709                 EXOFS_ERR("exofs_writepage => %d\n", ret);
710                 return ret;
711         }
712
713         return write_exec(&pcol);
714 }
715
716 /* i_mutex held using inode->i_size directly */
717 static void _write_failed(struct inode *inode, loff_t to)
718 {
719         if (to > inode->i_size)
720                 truncate_pagecache(inode, to, inode->i_size);
721 }
722
723 int exofs_write_begin(struct file *file, struct address_space *mapping,
724                 loff_t pos, unsigned len, unsigned flags,
725                 struct page **pagep, void **fsdata)
726 {
727         int ret = 0;
728         struct page *page;
729
730         page = *pagep;
731         if (page == NULL) {
732                 ret = simple_write_begin(file, mapping, pos, len, flags, pagep,
733                                          fsdata);
734                 if (ret) {
735                         EXOFS_DBGMSG("simple_write_begin failed\n");
736                         goto out;
737                 }
738
739                 page = *pagep;
740         }
741
742          /* read modify write */
743         if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
744                 loff_t i_size = i_size_read(mapping->host);
745                 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
746                 size_t rlen;
747
748                 if (page->index < end_index)
749                         rlen = PAGE_CACHE_SIZE;
750                 else if (page->index == end_index)
751                         rlen = i_size & ~PAGE_CACHE_MASK;
752                 else
753                         rlen = 0;
754
755                 if (!rlen) {
756                         clear_highpage(page);
757                         SetPageUptodate(page);
758                         goto out;
759                 }
760
761                 ret = _readpage(page, true);
762                 if (ret) {
763                         /*SetPageError was done by _readpage. Is it ok?*/
764                         unlock_page(page);
765                         EXOFS_DBGMSG("__readpage failed\n");
766                 }
767         }
768 out:
769         if (unlikely(ret))
770                 _write_failed(mapping->host, pos + len);
771
772         return ret;
773 }
774
775 static int exofs_write_begin_export(struct file *file,
776                 struct address_space *mapping,
777                 loff_t pos, unsigned len, unsigned flags,
778                 struct page **pagep, void **fsdata)
779 {
780         *pagep = NULL;
781
782         return exofs_write_begin(file, mapping, pos, len, flags, pagep,
783                                         fsdata);
784 }
785
786 static int exofs_write_end(struct file *file, struct address_space *mapping,
787                         loff_t pos, unsigned len, unsigned copied,
788                         struct page *page, void *fsdata)
789 {
790         struct inode *inode = mapping->host;
791         /* According to comment in simple_write_end i_mutex is held */
792         loff_t i_size = inode->i_size;
793         int ret;
794
795         ret = simple_write_end(file, mapping,pos, len, copied, page, fsdata);
796         if (unlikely(ret))
797                 _write_failed(inode, pos + len);
798
799         /* TODO: once simple_write_end marks inode dirty remove */
800         if (i_size != inode->i_size)
801                 mark_inode_dirty(inode);
802         return ret;
803 }
804
805 static int exofs_releasepage(struct page *page, gfp_t gfp)
806 {
807         EXOFS_DBGMSG("page 0x%lx\n", page->index);
808         WARN_ON(1);
809         return 0;
810 }
811
812 static void exofs_invalidatepage(struct page *page, unsigned long offset)
813 {
814         EXOFS_DBGMSG("page 0x%lx offset 0x%lx\n", page->index, offset);
815         WARN_ON(1);
816 }
817
818 const struct address_space_operations exofs_aops = {
819         .readpage       = exofs_readpage,
820         .readpages      = exofs_readpages,
821         .writepage      = exofs_writepage,
822         .writepages     = exofs_writepages,
823         .write_begin    = exofs_write_begin_export,
824         .write_end      = exofs_write_end,
825         .releasepage    = exofs_releasepage,
826         .set_page_dirty = __set_page_dirty_nobuffers,
827         .invalidatepage = exofs_invalidatepage,
828
829         /* Not implemented Yet */
830         .bmap           = NULL, /* TODO: use osd's OSD_ACT_READ_MAP */
831         .direct_IO      = NULL, /* TODO: Should be trivial to do */
832
833         /* With these NULL has special meaning or default is not exported */
834         .get_xip_mem    = NULL,
835         .migratepage    = NULL,
836         .launder_page   = NULL,
837         .is_partially_uptodate = NULL,
838         .error_remove_page = NULL,
839 };
840
841 /******************************************************************************
842  * INODE OPERATIONS
843  *****************************************************************************/
844
845 /*
846  * Test whether an inode is a fast symlink.
847  */
848 static inline int exofs_inode_is_fast_symlink(struct inode *inode)
849 {
850         struct exofs_i_info *oi = exofs_i(inode);
851
852         return S_ISLNK(inode->i_mode) && (oi->i_data[0] != 0);
853 }
854
855 static int _do_truncate(struct inode *inode, loff_t newsize)
856 {
857         struct exofs_i_info *oi = exofs_i(inode);
858         struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
859         int ret;
860
861         inode->i_mtime = inode->i_ctime = CURRENT_TIME;
862
863         ret = ore_truncate(&sbi->layout, &oi->comps, (u64)newsize);
864         if (likely(!ret))
865                 truncate_setsize(inode, newsize);
866
867         EXOFS_DBGMSG("(0x%lx) size=0x%llx ret=>%d\n",
868                      inode->i_ino, newsize, ret);
869         return ret;
870 }
871
872 /*
873  * Set inode attributes - update size attribute on OSD if needed,
874  *                        otherwise just call generic functions.
875  */
876 int exofs_setattr(struct dentry *dentry, struct iattr *iattr)
877 {
878         struct inode *inode = dentry->d_inode;
879         int error;
880
881         /* if we are about to modify an object, and it hasn't been
882          * created yet, wait
883          */
884         error = wait_obj_created(exofs_i(inode));
885         if (unlikely(error))
886                 return error;
887
888         error = inode_change_ok(inode, iattr);
889         if (unlikely(error))
890                 return error;
891
892         if ((iattr->ia_valid & ATTR_SIZE) &&
893             iattr->ia_size != i_size_read(inode)) {
894                 error = _do_truncate(inode, iattr->ia_size);
895                 if (unlikely(error))
896                         return error;
897         }
898
899         setattr_copy(inode, iattr);
900         mark_inode_dirty(inode);
901         return 0;
902 }
903
904 static const struct osd_attr g_attr_inode_file_layout = ATTR_DEF(
905         EXOFS_APAGE_FS_DATA,
906         EXOFS_ATTR_INODE_FILE_LAYOUT,
907         0);
908 static const struct osd_attr g_attr_inode_dir_layout = ATTR_DEF(
909         EXOFS_APAGE_FS_DATA,
910         EXOFS_ATTR_INODE_DIR_LAYOUT,
911         0);
912
913 /*
914  * Read the Linux inode info from the OSD, and return it as is. In exofs the
915  * inode info is in an application specific page/attribute of the osd-object.
916  */
917 static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi,
918                     struct exofs_fcb *inode)
919 {
920         struct exofs_sb_info *sbi = sb->s_fs_info;
921         struct osd_attr attrs[] = {
922                 [0] = g_attr_inode_data,
923                 [1] = g_attr_inode_file_layout,
924                 [2] = g_attr_inode_dir_layout,
925         };
926         struct ore_io_state *ios;
927         struct exofs_on_disk_inode_layout *layout;
928         int ret;
929
930         ret = ore_get_io_state(&sbi->layout, &oi->comps, &ios);
931         if (unlikely(ret)) {
932                 EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
933                 return ret;
934         }
935
936         attrs[1].len = exofs_on_disk_inode_layout_size(sbi->comps.numdevs);
937         attrs[2].len = exofs_on_disk_inode_layout_size(sbi->comps.numdevs);
938
939         ios->in_attr = attrs;
940         ios->in_attr_len = ARRAY_SIZE(attrs);
941
942         ret = ore_read(ios);
943         if (unlikely(ret)) {
944                 EXOFS_ERR("object(0x%llx) corrupted, return empty file=>%d\n",
945                           _LLU(oi->one_comp.obj.id), ret);
946                 memset(inode, 0, sizeof(*inode));
947                 inode->i_mode = 0040000 | (0777 & ~022);
948                 /* If object is lost on target we might as well enable it's
949                  * delete.
950                  */
951                 if ((ret == -ENOENT) || (ret == -EINVAL))
952                         ret = 0;
953                 goto out;
954         }
955
956         ret = extract_attr_from_ios(ios, &attrs[0]);
957         if (ret) {
958                 EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
959                 goto out;
960         }
961         WARN_ON(attrs[0].len != EXOFS_INO_ATTR_SIZE);
962         memcpy(inode, attrs[0].val_ptr, EXOFS_INO_ATTR_SIZE);
963
964         ret = extract_attr_from_ios(ios, &attrs[1]);
965         if (ret) {
966                 EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
967                 goto out;
968         }
969         if (attrs[1].len) {
970                 layout = attrs[1].val_ptr;
971                 if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) {
972                         EXOFS_ERR("%s: unsupported files layout %d\n",
973                                 __func__, layout->gen_func);
974                         ret = -ENOTSUPP;
975                         goto out;
976                 }
977         }
978
979         ret = extract_attr_from_ios(ios, &attrs[2]);
980         if (ret) {
981                 EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
982                 goto out;
983         }
984         if (attrs[2].len) {
985                 layout = attrs[2].val_ptr;
986                 if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) {
987                         EXOFS_ERR("%s: unsupported meta-data layout %d\n",
988                                 __func__, layout->gen_func);
989                         ret = -ENOTSUPP;
990                         goto out;
991                 }
992         }
993
994 out:
995         ore_put_io_state(ios);
996         return ret;
997 }
998
999 static void __oi_init(struct exofs_i_info *oi)
1000 {
1001         init_waitqueue_head(&oi->i_wq);
1002         oi->i_flags = 0;
1003 }
1004 /*
1005  * Fill in an inode read from the OSD and set it up for use
1006  */
1007 struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
1008 {
1009         struct exofs_i_info *oi;
1010         struct exofs_fcb fcb;
1011         struct inode *inode;
1012         int ret;
1013
1014         inode = iget_locked(sb, ino);
1015         if (!inode)
1016                 return ERR_PTR(-ENOMEM);
1017         if (!(inode->i_state & I_NEW))
1018                 return inode;
1019         oi = exofs_i(inode);
1020         __oi_init(oi);
1021         exofs_init_comps(&oi->comps, &oi->one_comp, sb->s_fs_info,
1022                          exofs_oi_objno(oi));
1023
1024         /* read the inode from the osd */
1025         ret = exofs_get_inode(sb, oi, &fcb);
1026         if (ret)
1027                 goto bad_inode;
1028
1029         set_obj_created(oi);
1030
1031         /* copy stuff from on-disk struct to in-memory struct */
1032         inode->i_mode = le16_to_cpu(fcb.i_mode);
1033         inode->i_uid = le32_to_cpu(fcb.i_uid);
1034         inode->i_gid = le32_to_cpu(fcb.i_gid);
1035         inode->i_nlink = le16_to_cpu(fcb.i_links_count);
1036         inode->i_ctime.tv_sec = (signed)le32_to_cpu(fcb.i_ctime);
1037         inode->i_atime.tv_sec = (signed)le32_to_cpu(fcb.i_atime);
1038         inode->i_mtime.tv_sec = (signed)le32_to_cpu(fcb.i_mtime);
1039         inode->i_ctime.tv_nsec =
1040                 inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = 0;
1041         oi->i_commit_size = le64_to_cpu(fcb.i_size);
1042         i_size_write(inode, oi->i_commit_size);
1043         inode->i_blkbits = EXOFS_BLKSHIFT;
1044         inode->i_generation = le32_to_cpu(fcb.i_generation);
1045
1046         oi->i_dir_start_lookup = 0;
1047
1048         if ((inode->i_nlink == 0) && (inode->i_mode == 0)) {
1049                 ret = -ESTALE;
1050                 goto bad_inode;
1051         }
1052
1053         if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1054                 if (fcb.i_data[0])
1055                         inode->i_rdev =
1056                                 old_decode_dev(le32_to_cpu(fcb.i_data[0]));
1057                 else
1058                         inode->i_rdev =
1059                                 new_decode_dev(le32_to_cpu(fcb.i_data[1]));
1060         } else {
1061                 memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
1062         }
1063
1064         inode->i_mapping->backing_dev_info = sb->s_bdi;
1065         if (S_ISREG(inode->i_mode)) {
1066                 inode->i_op = &exofs_file_inode_operations;
1067                 inode->i_fop = &exofs_file_operations;
1068                 inode->i_mapping->a_ops = &exofs_aops;
1069         } else if (S_ISDIR(inode->i_mode)) {
1070                 inode->i_op = &exofs_dir_inode_operations;
1071                 inode->i_fop = &exofs_dir_operations;
1072                 inode->i_mapping->a_ops = &exofs_aops;
1073         } else if (S_ISLNK(inode->i_mode)) {
1074                 if (exofs_inode_is_fast_symlink(inode))
1075                         inode->i_op = &exofs_fast_symlink_inode_operations;
1076                 else {
1077                         inode->i_op = &exofs_symlink_inode_operations;
1078                         inode->i_mapping->a_ops = &exofs_aops;
1079                 }
1080         } else {
1081                 inode->i_op = &exofs_special_inode_operations;
1082                 if (fcb.i_data[0])
1083                         init_special_inode(inode, inode->i_mode,
1084                            old_decode_dev(le32_to_cpu(fcb.i_data[0])));
1085                 else
1086                         init_special_inode(inode, inode->i_mode,
1087                            new_decode_dev(le32_to_cpu(fcb.i_data[1])));
1088         }
1089
1090         unlock_new_inode(inode);
1091         return inode;
1092
1093 bad_inode:
1094         iget_failed(inode);
1095         return ERR_PTR(ret);
1096 }
1097
1098 int __exofs_wait_obj_created(struct exofs_i_info *oi)
1099 {
1100         if (!obj_created(oi)) {
1101                 EXOFS_DBGMSG("!obj_created\n");
1102                 BUG_ON(!obj_2bcreated(oi));
1103                 wait_event(oi->i_wq, obj_created(oi));
1104                 EXOFS_DBGMSG("wait_event done\n");
1105         }
1106         return unlikely(is_bad_inode(&oi->vfs_inode)) ? -EIO : 0;
1107 }
1108
1109 /*
1110  * Callback function from exofs_new_inode().  The important thing is that we
1111  * set the obj_created flag so that other methods know that the object exists on
1112  * the OSD.
1113  */
1114 static void create_done(struct ore_io_state *ios, void *p)
1115 {
1116         struct inode *inode = p;
1117         struct exofs_i_info *oi = exofs_i(inode);
1118         struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
1119         int ret;
1120
1121         ret = ore_check_io(ios, NULL);
1122         ore_put_io_state(ios);
1123
1124         atomic_dec(&sbi->s_curr_pending);
1125
1126         if (unlikely(ret)) {
1127                 EXOFS_ERR("object=0x%llx creation failed in pid=0x%llx",
1128                           _LLU(exofs_oi_objno(oi)),
1129                           _LLU(oi->one_comp.obj.partition));
1130                 /*TODO: When FS is corrupted creation can fail, object already
1131                  * exist. Get rid of this asynchronous creation, if exist
1132                  * increment the obj counter and try the next object. Until we
1133                  * succeed. All these dangling objects will be made into lost
1134                  * files by chkfs.exofs
1135                  */
1136         }
1137
1138         set_obj_created(oi);
1139
1140         wake_up(&oi->i_wq);
1141 }
1142
1143 /*
1144  * Set up a new inode and create an object for it on the OSD
1145  */
1146 struct inode *exofs_new_inode(struct inode *dir, int mode)
1147 {
1148         struct super_block *sb = dir->i_sb;
1149         struct exofs_sb_info *sbi = sb->s_fs_info;
1150         struct inode *inode;
1151         struct exofs_i_info *oi;
1152         struct ore_io_state *ios;
1153         int ret;
1154
1155         inode = new_inode(sb);
1156         if (!inode)
1157                 return ERR_PTR(-ENOMEM);
1158
1159         oi = exofs_i(inode);
1160         __oi_init(oi);
1161
1162         set_obj_2bcreated(oi);
1163
1164         inode->i_mapping->backing_dev_info = sb->s_bdi;
1165         inode_init_owner(inode, dir, mode);
1166         inode->i_ino = sbi->s_nextid++;
1167         inode->i_blkbits = EXOFS_BLKSHIFT;
1168         inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
1169         oi->i_commit_size = inode->i_size = 0;
1170         spin_lock(&sbi->s_next_gen_lock);
1171         inode->i_generation = sbi->s_next_generation++;
1172         spin_unlock(&sbi->s_next_gen_lock);
1173         insert_inode_hash(inode);
1174
1175         exofs_init_comps(&oi->comps, &oi->one_comp, sb->s_fs_info,
1176                          exofs_oi_objno(oi));
1177         exofs_sbi_write_stats(sbi); /* Make sure new sbi->s_nextid is on disk */
1178
1179         mark_inode_dirty(inode);
1180
1181         ret = ore_get_io_state(&sbi->layout, &oi->comps, &ios);
1182         if (unlikely(ret)) {
1183                 EXOFS_ERR("exofs_new_inode: ore_get_io_state failed\n");
1184                 return ERR_PTR(ret);
1185         }
1186
1187         ios->done = create_done;
1188         ios->private = inode;
1189
1190         ret = ore_create(ios);
1191         if (ret) {
1192                 ore_put_io_state(ios);
1193                 return ERR_PTR(ret);
1194         }
1195         atomic_inc(&sbi->s_curr_pending);
1196
1197         return inode;
1198 }
1199
1200 /*
1201  * struct to pass two arguments to update_inode's callback
1202  */
1203 struct updatei_args {
1204         struct exofs_sb_info    *sbi;
1205         struct exofs_fcb        fcb;
1206 };
1207
1208 /*
1209  * Callback function from exofs_update_inode().
1210  */
1211 static void updatei_done(struct ore_io_state *ios, void *p)
1212 {
1213         struct updatei_args *args = p;
1214
1215         ore_put_io_state(ios);
1216
1217         atomic_dec(&args->sbi->s_curr_pending);
1218
1219         kfree(args);
1220 }
1221
1222 /*
1223  * Write the inode to the OSD.  Just fill up the struct, and set the attribute
1224  * synchronously or asynchronously depending on the do_sync flag.
1225  */
1226 static int exofs_update_inode(struct inode *inode, int do_sync)
1227 {
1228         struct exofs_i_info *oi = exofs_i(inode);
1229         struct super_block *sb = inode->i_sb;
1230         struct exofs_sb_info *sbi = sb->s_fs_info;
1231         struct ore_io_state *ios;
1232         struct osd_attr attr;
1233         struct exofs_fcb *fcb;
1234         struct updatei_args *args;
1235         int ret;
1236
1237         args = kzalloc(sizeof(*args), GFP_KERNEL);
1238         if (!args) {
1239                 EXOFS_DBGMSG("Failed kzalloc of args\n");
1240                 return -ENOMEM;
1241         }
1242
1243         fcb = &args->fcb;
1244
1245         fcb->i_mode = cpu_to_le16(inode->i_mode);
1246         fcb->i_uid = cpu_to_le32(inode->i_uid);
1247         fcb->i_gid = cpu_to_le32(inode->i_gid);
1248         fcb->i_links_count = cpu_to_le16(inode->i_nlink);
1249         fcb->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
1250         fcb->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
1251         fcb->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
1252         oi->i_commit_size = i_size_read(inode);
1253         fcb->i_size = cpu_to_le64(oi->i_commit_size);
1254         fcb->i_generation = cpu_to_le32(inode->i_generation);
1255
1256         if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1257                 if (old_valid_dev(inode->i_rdev)) {
1258                         fcb->i_data[0] =
1259                                 cpu_to_le32(old_encode_dev(inode->i_rdev));
1260                         fcb->i_data[1] = 0;
1261                 } else {
1262                         fcb->i_data[0] = 0;
1263                         fcb->i_data[1] =
1264                                 cpu_to_le32(new_encode_dev(inode->i_rdev));
1265                         fcb->i_data[2] = 0;
1266                 }
1267         } else
1268                 memcpy(fcb->i_data, oi->i_data, sizeof(fcb->i_data));
1269
1270         ret = ore_get_io_state(&sbi->layout, &oi->comps, &ios);
1271         if (unlikely(ret)) {
1272                 EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
1273                 goto free_args;
1274         }
1275
1276         attr = g_attr_inode_data;
1277         attr.val_ptr = fcb;
1278         ios->out_attr_len = 1;
1279         ios->out_attr = &attr;
1280
1281         wait_obj_created(oi);
1282
1283         if (!do_sync) {
1284                 args->sbi = sbi;
1285                 ios->done = updatei_done;
1286                 ios->private = args;
1287         }
1288
1289         ret = ore_write(ios);
1290         if (!do_sync && !ret) {
1291                 atomic_inc(&sbi->s_curr_pending);
1292                 goto out; /* deallocation in updatei_done */
1293         }
1294
1295         ore_put_io_state(ios);
1296 free_args:
1297         kfree(args);
1298 out:
1299         EXOFS_DBGMSG("(0x%lx) do_sync=%d ret=>%d\n",
1300                      inode->i_ino, do_sync, ret);
1301         return ret;
1302 }
1303
1304 int exofs_write_inode(struct inode *inode, struct writeback_control *wbc)
1305 {
1306         /* FIXME: fix fsync and use wbc->sync_mode == WB_SYNC_ALL */
1307         return exofs_update_inode(inode, 1);
1308 }
1309
1310 /*
1311  * Callback function from exofs_delete_inode() - don't have much cleaning up to
1312  * do.
1313  */
1314 static void delete_done(struct ore_io_state *ios, void *p)
1315 {
1316         struct exofs_sb_info *sbi = p;
1317
1318         ore_put_io_state(ios);
1319
1320         atomic_dec(&sbi->s_curr_pending);
1321 }
1322
1323 /*
1324  * Called when the refcount of an inode reaches zero.  We remove the object
1325  * from the OSD here.  We make sure the object was created before we try and
1326  * delete it.
1327  */
1328 void exofs_evict_inode(struct inode *inode)
1329 {
1330         struct exofs_i_info *oi = exofs_i(inode);
1331         struct super_block *sb = inode->i_sb;
1332         struct exofs_sb_info *sbi = sb->s_fs_info;
1333         struct ore_io_state *ios;
1334         int ret;
1335
1336         truncate_inode_pages(&inode->i_data, 0);
1337
1338         /* TODO: should do better here */
1339         if (inode->i_nlink || is_bad_inode(inode))
1340                 goto no_delete;
1341
1342         inode->i_size = 0;
1343         end_writeback(inode);
1344
1345         /* if we are deleting an obj that hasn't been created yet, wait.
1346          * This also makes sure that create_done cannot be called with an
1347          * already evicted inode.
1348          */
1349         wait_obj_created(oi);
1350         /* ignore the error, attempt a remove anyway */
1351
1352         /* Now Remove the OSD objects */
1353         ret = ore_get_io_state(&sbi->layout, &oi->comps, &ios);
1354         if (unlikely(ret)) {
1355                 EXOFS_ERR("%s: ore_get_io_state failed\n", __func__);
1356                 return;
1357         }
1358
1359         ios->done = delete_done;
1360         ios->private = sbi;
1361
1362         ret = ore_remove(ios);
1363         if (ret) {
1364                 EXOFS_ERR("%s: ore_remove failed\n", __func__);
1365                 ore_put_io_state(ios);
1366                 return;
1367         }
1368         atomic_inc(&sbi->s_curr_pending);
1369
1370         return;
1371
1372 no_delete:
1373         end_writeback(inode);
1374 }