ore: Only IO one group at a time (API change)
[profile/ivi/kernel-adaptation-intel-automotive.git] / fs / exofs / inode.c
1 /*
2  * Copyright (C) 2005, 2006
3  * Avishay Traeger (avishay@gmail.com)
4  * Copyright (C) 2008, 2009
5  * Boaz Harrosh <bharrosh@panasas.com>
6  *
7  * Copyrights for code taken from ext2:
8  *     Copyright (C) 1992, 1993, 1994, 1995
9  *     Remy Card (card@masi.ibp.fr)
10  *     Laboratoire MASI - Institut Blaise Pascal
11  *     Universite Pierre et Marie Curie (Paris VI)
12  *     from
13  *     linux/fs/minix/inode.c
14  *     Copyright (C) 1991, 1992  Linus Torvalds
15  *
16  * This file is part of exofs.
17  *
18  * exofs is free software; you can redistribute it and/or modify
19  * it under the terms of the GNU General Public License as published by
20  * the Free Software Foundation.  Since it is based on ext2, and the only
21  * valid version of GPL for the Linux kernel is version 2, the only valid
22  * version of GPL for exofs is version 2.
23  *
24  * exofs is distributed in the hope that it will be useful,
25  * but WITHOUT ANY WARRANTY; without even the implied warranty of
26  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
27  * GNU General Public License for more details.
28  *
29  * You should have received a copy of the GNU General Public License
30  * along with exofs; if not, write to the Free Software
31  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
32  */
33
34 #include <linux/slab.h>
35
36 #include "exofs.h"
37
38 #define EXOFS_DBGMSG2(M...) do {} while (0)
39
40 enum { BIO_MAX_PAGES_KMALLOC =
41                 (PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec),
42         MAX_PAGES_KMALLOC =
43                 PAGE_SIZE / sizeof(struct page *),
44 };
45
46 unsigned exofs_max_io_pages(struct ore_layout *layout,
47                             unsigned expected_pages)
48 {
49         unsigned pages = min_t(unsigned, expected_pages, MAX_PAGES_KMALLOC);
50
51         /* TODO: easily support bio chaining */
52         pages =  min_t(unsigned, pages,
53                        layout->group_width * BIO_MAX_PAGES_KMALLOC);
54         return pages;
55 }
56
57 struct page_collect {
58         struct exofs_sb_info *sbi;
59         struct inode *inode;
60         unsigned expected_pages;
61         struct ore_io_state *ios;
62
63         struct page **pages;
64         unsigned alloc_pages;
65         unsigned nr_pages;
66         unsigned long length;
67         loff_t pg_first; /* keep 64bit also in 32-arches */
68         bool read_4_write; /* This means two things: that the read is sync
69                             * And the pages should not be unlocked.
70                             */
71 };
72
73 static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
74                        struct inode *inode)
75 {
76         struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
77
78         pcol->sbi = sbi;
79         pcol->inode = inode;
80         pcol->expected_pages = expected_pages;
81
82         pcol->ios = NULL;
83         pcol->pages = NULL;
84         pcol->alloc_pages = 0;
85         pcol->nr_pages = 0;
86         pcol->length = 0;
87         pcol->pg_first = -1;
88         pcol->read_4_write = false;
89 }
90
91 static void _pcol_reset(struct page_collect *pcol)
92 {
93         pcol->expected_pages -= min(pcol->nr_pages, pcol->expected_pages);
94
95         pcol->pages = NULL;
96         pcol->alloc_pages = 0;
97         pcol->nr_pages = 0;
98         pcol->length = 0;
99         pcol->pg_first = -1;
100         pcol->ios = NULL;
101
102         /* this is probably the end of the loop but in writes
103          * it might not end here. don't be left with nothing
104          */
105         if (!pcol->expected_pages)
106                 pcol->expected_pages = MAX_PAGES_KMALLOC;
107 }
108
109 static int pcol_try_alloc(struct page_collect *pcol)
110 {
111         unsigned pages;
112
113         /* TODO: easily support bio chaining */
114         pages =  exofs_max_io_pages(&pcol->sbi->layout, pcol->expected_pages);
115
116         for (; pages; pages >>= 1) {
117                 pcol->pages = kmalloc(pages * sizeof(struct page *),
118                                       GFP_KERNEL);
119                 if (likely(pcol->pages)) {
120                         pcol->alloc_pages = pages;
121                         return 0;
122                 }
123         }
124
125         EXOFS_ERR("Failed to kmalloc expected_pages=%u\n",
126                   pcol->expected_pages);
127         return -ENOMEM;
128 }
129
130 static void pcol_free(struct page_collect *pcol)
131 {
132         kfree(pcol->pages);
133         pcol->pages = NULL;
134
135         if (pcol->ios) {
136                 ore_put_io_state(pcol->ios);
137                 pcol->ios = NULL;
138         }
139 }
140
141 static int pcol_add_page(struct page_collect *pcol, struct page *page,
142                          unsigned len)
143 {
144         if (unlikely(pcol->nr_pages >= pcol->alloc_pages))
145                 return -ENOMEM;
146
147         pcol->pages[pcol->nr_pages++] = page;
148         pcol->length += len;
149         return 0;
150 }
151
152 static int update_read_page(struct page *page, int ret)
153 {
154         if (ret == 0) {
155                 /* Everything is OK */
156                 SetPageUptodate(page);
157                 if (PageError(page))
158                         ClearPageError(page);
159         } else if (ret == -EFAULT) {
160                 /* In this case we were trying to read something that wasn't on
161                  * disk yet - return a page full of zeroes.  This should be OK,
162                  * because the object should be empty (if there was a write
163                  * before this read, the read would be waiting with the page
164                  * locked */
165                 clear_highpage(page);
166
167                 SetPageUptodate(page);
168                 if (PageError(page))
169                         ClearPageError(page);
170                 ret = 0; /* recovered error */
171                 EXOFS_DBGMSG("recovered read error\n");
172         } else /* Error */
173                 SetPageError(page);
174
175         return ret;
176 }
177
178 static void update_write_page(struct page *page, int ret)
179 {
180         if (ret) {
181                 mapping_set_error(page->mapping, ret);
182                 SetPageError(page);
183         }
184         end_page_writeback(page);
185 }
186
187 /* Called at the end of reads, to optionally unlock pages and update their
188  * status.
189  */
190 static int __readpages_done(struct page_collect *pcol)
191 {
192         int i;
193         u64 resid;
194         u64 good_bytes;
195         u64 length = 0;
196         int ret = ore_check_io(pcol->ios, &resid);
197
198         if (likely(!ret))
199                 good_bytes = pcol->length;
200         else
201                 good_bytes = pcol->length - resid;
202
203         EXOFS_DBGMSG2("readpages_done(0x%lx) good_bytes=0x%llx"
204                      " length=0x%lx nr_pages=%u\n",
205                      pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
206                      pcol->nr_pages);
207
208         for (i = 0; i < pcol->nr_pages; i++) {
209                 struct page *page = pcol->pages[i];
210                 struct inode *inode = page->mapping->host;
211                 int page_stat;
212
213                 if (inode != pcol->inode)
214                         continue; /* osd might add more pages at end */
215
216                 if (likely(length < good_bytes))
217                         page_stat = 0;
218                 else
219                         page_stat = ret;
220
221                 EXOFS_DBGMSG2("    readpages_done(0x%lx, 0x%lx) %s\n",
222                           inode->i_ino, page->index,
223                           page_stat ? "bad_bytes" : "good_bytes");
224
225                 ret = update_read_page(page, page_stat);
226                 if (!pcol->read_4_write)
227                         unlock_page(page);
228                 length += PAGE_SIZE;
229         }
230
231         pcol_free(pcol);
232         EXOFS_DBGMSG2("readpages_done END\n");
233         return ret;
234 }
235
236 /* callback of async reads */
237 static void readpages_done(struct ore_io_state *ios, void *p)
238 {
239         struct page_collect *pcol = p;
240
241         __readpages_done(pcol);
242         atomic_dec(&pcol->sbi->s_curr_pending);
243         kfree(pcol);
244 }
245
246 static void _unlock_pcol_pages(struct page_collect *pcol, int ret, int rw)
247 {
248         int i;
249
250         for (i = 0; i < pcol->nr_pages; i++) {
251                 struct page *page = pcol->pages[i];
252
253                 if (rw == READ)
254                         update_read_page(page, ret);
255                 else
256                         update_write_page(page, ret);
257
258                 unlock_page(page);
259         }
260 }
261
262 static int _maybe_not_all_in_one_io(struct ore_io_state *ios,
263         struct page_collect *pcol_src, struct page_collect *pcol)
264 {
265         /* length was wrong or offset was not page aligned */
266         BUG_ON(pcol_src->nr_pages < ios->nr_pages);
267
268         if (pcol_src->nr_pages > ios->nr_pages) {
269                 struct page **src_page;
270                 unsigned pages_less = pcol_src->nr_pages - ios->nr_pages;
271                 unsigned long len_less = pcol_src->length - ios->length;
272                 unsigned i;
273                 int ret;
274
275                 /* This IO was trimmed */
276                 pcol_src->nr_pages = ios->nr_pages;
277                 pcol_src->length = ios->length;
278
279                 /* Left over pages are passed to the next io */
280                 pcol->expected_pages += pages_less;
281                 pcol->nr_pages = pages_less;
282                 pcol->length = len_less;
283                 src_page = pcol_src->pages + pcol_src->nr_pages;
284                 pcol->pg_first = (*src_page)->index;
285
286                 ret = pcol_try_alloc(pcol);
287                 if (unlikely(ret))
288                         return ret;
289
290                 for (i = 0; i < pages_less; ++i)
291                         pcol->pages[i] = *src_page++;
292
293                 EXOFS_DBGMSG("Length was adjusted nr_pages=0x%x "
294                         "pages_less=0x%x expected_pages=0x%x "
295                         "next_offset=0x%llx next_len=0x%lx\n",
296                         pcol_src->nr_pages, pages_less, pcol->expected_pages,
297                         pcol->pg_first * PAGE_SIZE, pcol->length);
298         }
299         return 0;
300 }
301
302 static int read_exec(struct page_collect *pcol)
303 {
304         struct exofs_i_info *oi = exofs_i(pcol->inode);
305         struct ore_io_state *ios;
306         struct page_collect *pcol_copy = NULL;
307         int ret;
308
309         if (!pcol->pages)
310                 return 0;
311
312         if (!pcol->ios) {
313                 int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, true,
314                                              pcol->pg_first << PAGE_CACHE_SHIFT,
315                                              pcol->length, &pcol->ios);
316
317                 if (ret)
318                         return ret;
319         }
320
321         ios = pcol->ios;
322         ios->pages = pcol->pages;
323
324         if (pcol->read_4_write) {
325                 ore_read(pcol->ios);
326                 return __readpages_done(pcol);
327         }
328
329         pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
330         if (!pcol_copy) {
331                 ret = -ENOMEM;
332                 goto err;
333         }
334
335         *pcol_copy = *pcol;
336         ios->done = readpages_done;
337         ios->private = pcol_copy;
338
339         /* pages ownership was passed to pcol_copy */
340         _pcol_reset(pcol);
341
342         ret = _maybe_not_all_in_one_io(ios, pcol_copy, pcol);
343         if (unlikely(ret))
344                 goto err;
345
346         EXOFS_DBGMSG2("read_exec(0x%lx) offset=0x%llx length=0x%llx\n",
347                 pcol->inode->i_ino, _LLU(ios->offset), _LLU(ios->length));
348
349         ret = ore_read(ios);
350         if (unlikely(ret))
351                 goto err;
352
353         atomic_inc(&pcol->sbi->s_curr_pending);
354
355         return 0;
356
357 err:
358         if (!pcol->read_4_write)
359                 _unlock_pcol_pages(pcol, ret, READ);
360
361         pcol_free(pcol);
362
363         kfree(pcol_copy);
364         return ret;
365 }
366
367 /* readpage_strip is called either directly from readpage() or by the VFS from
368  * within read_cache_pages(), to add one more page to be read. It will try to
369  * collect as many contiguous pages as posible. If a discontinuity is
370  * encountered, or it runs out of resources, it will submit the previous segment
371  * and will start a new collection. Eventually caller must submit the last
372  * segment if present.
373  */
374 static int readpage_strip(void *data, struct page *page)
375 {
376         struct page_collect *pcol = data;
377         struct inode *inode = pcol->inode;
378         struct exofs_i_info *oi = exofs_i(inode);
379         loff_t i_size = i_size_read(inode);
380         pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
381         size_t len;
382         int ret;
383
384         /* FIXME: Just for debugging, will be removed */
385         if (PageUptodate(page))
386                 EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol->inode->i_ino,
387                           page->index);
388
389         if (page->index < end_index)
390                 len = PAGE_CACHE_SIZE;
391         else if (page->index == end_index)
392                 len = i_size & ~PAGE_CACHE_MASK;
393         else
394                 len = 0;
395
396         if (!len || !obj_created(oi)) {
397                 /* this will be out of bounds, or doesn't exist yet.
398                  * Current page is cleared and the request is split
399                  */
400                 clear_highpage(page);
401
402                 SetPageUptodate(page);
403                 if (PageError(page))
404                         ClearPageError(page);
405
406                 if (!pcol->read_4_write)
407                         unlock_page(page);
408                 EXOFS_DBGMSG("readpage_strip(0x%lx) empty page len=%zx "
409                              "read_4_write=%d index=0x%lx end_index=0x%lx "
410                              "splitting\n", inode->i_ino, len,
411                              pcol->read_4_write, page->index, end_index);
412
413                 return read_exec(pcol);
414         }
415
416 try_again:
417
418         if (unlikely(pcol->pg_first == -1)) {
419                 pcol->pg_first = page->index;
420         } else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
421                    page->index)) {
422                 /* Discontinuity detected, split the request */
423                 ret = read_exec(pcol);
424                 if (unlikely(ret))
425                         goto fail;
426                 goto try_again;
427         }
428
429         if (!pcol->pages) {
430                 ret = pcol_try_alloc(pcol);
431                 if (unlikely(ret))
432                         goto fail;
433         }
434
435         if (len != PAGE_CACHE_SIZE)
436                 zero_user(page, len, PAGE_CACHE_SIZE - len);
437
438         EXOFS_DBGMSG2("    readpage_strip(0x%lx, 0x%lx) len=0x%zx\n",
439                      inode->i_ino, page->index, len);
440
441         ret = pcol_add_page(pcol, page, len);
442         if (ret) {
443                 EXOFS_DBGMSG2("Failed pcol_add_page pages[i]=%p "
444                           "this_len=0x%zx nr_pages=%u length=0x%lx\n",
445                           page, len, pcol->nr_pages, pcol->length);
446
447                 /* split the request, and start again with current page */
448                 ret = read_exec(pcol);
449                 if (unlikely(ret))
450                         goto fail;
451
452                 goto try_again;
453         }
454
455         return 0;
456
457 fail:
458         /* SetPageError(page); ??? */
459         unlock_page(page);
460         return ret;
461 }
462
463 static int exofs_readpages(struct file *file, struct address_space *mapping,
464                            struct list_head *pages, unsigned nr_pages)
465 {
466         struct page_collect pcol;
467         int ret;
468
469         _pcol_init(&pcol, nr_pages, mapping->host);
470
471         ret = read_cache_pages(mapping, pages, readpage_strip, &pcol);
472         if (ret) {
473                 EXOFS_ERR("read_cache_pages => %d\n", ret);
474                 return ret;
475         }
476
477         ret = read_exec(&pcol);
478         if (unlikely(ret))
479                 return ret;
480
481         return read_exec(&pcol);
482 }
483
484 static int _readpage(struct page *page, bool read_4_write)
485 {
486         struct page_collect pcol;
487         int ret;
488
489         _pcol_init(&pcol, 1, page->mapping->host);
490
491         pcol.read_4_write = read_4_write;
492         ret = readpage_strip(&pcol, page);
493         if (ret) {
494                 EXOFS_ERR("_readpage => %d\n", ret);
495                 return ret;
496         }
497
498         return read_exec(&pcol);
499 }
500
501 /*
502  * We don't need the file
503  */
504 static int exofs_readpage(struct file *file, struct page *page)
505 {
506         return _readpage(page, false);
507 }
508
509 /* Callback for osd_write. All writes are asynchronous */
510 static void writepages_done(struct ore_io_state *ios, void *p)
511 {
512         struct page_collect *pcol = p;
513         int i;
514         u64 resid;
515         u64  good_bytes;
516         u64  length = 0;
517         int ret = ore_check_io(ios, &resid);
518
519         atomic_dec(&pcol->sbi->s_curr_pending);
520
521         if (likely(!ret))
522                 good_bytes = pcol->length;
523         else
524                 good_bytes = pcol->length - resid;
525
526         EXOFS_DBGMSG2("writepages_done(0x%lx) good_bytes=0x%llx"
527                      " length=0x%lx nr_pages=%u\n",
528                      pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
529                      pcol->nr_pages);
530
531         for (i = 0; i < pcol->nr_pages; i++) {
532                 struct page *page = pcol->pages[i];
533                 struct inode *inode = page->mapping->host;
534                 int page_stat;
535
536                 if (inode != pcol->inode)
537                         continue; /* osd might add more pages to a bio */
538
539                 if (likely(length < good_bytes))
540                         page_stat = 0;
541                 else
542                         page_stat = ret;
543
544                 update_write_page(page, page_stat);
545                 unlock_page(page);
546                 EXOFS_DBGMSG2("    writepages_done(0x%lx, 0x%lx) status=%d\n",
547                              inode->i_ino, page->index, page_stat);
548
549                 length += PAGE_SIZE;
550         }
551
552         pcol_free(pcol);
553         kfree(pcol);
554         EXOFS_DBGMSG2("writepages_done END\n");
555 }
556
557 static int write_exec(struct page_collect *pcol)
558 {
559         struct exofs_i_info *oi = exofs_i(pcol->inode);
560         struct ore_io_state *ios;
561         struct page_collect *pcol_copy = NULL;
562         int ret;
563
564         if (!pcol->pages)
565                 return 0;
566
567         BUG_ON(pcol->ios);
568         ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, false,
569                                  pcol->pg_first << PAGE_CACHE_SHIFT,
570                                  pcol->length, &pcol->ios);
571         if (unlikely(ret))
572                 goto err;
573
574         pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
575         if (!pcol_copy) {
576                 EXOFS_ERR("write_exec: Failed to kmalloc(pcol)\n");
577                 ret = -ENOMEM;
578                 goto err;
579         }
580
581         *pcol_copy = *pcol;
582
583         ios = pcol->ios;
584         ios->pages = pcol_copy->pages;
585         ios->done = writepages_done;
586         ios->private = pcol_copy;
587
588         /* pages ownership was passed to pcol_copy */
589         _pcol_reset(pcol);
590
591         ret = _maybe_not_all_in_one_io(ios, pcol_copy, pcol);
592         if (unlikely(ret))
593                 goto err;
594
595         EXOFS_DBGMSG2("write_exec(0x%lx) offset=0x%llx length=0x%llx\n",
596                 pcol->inode->i_ino, _LLU(ios->offset), _LLU(ios->length));
597
598         ret = ore_write(ios);
599         if (unlikely(ret)) {
600                 EXOFS_ERR("write_exec: ore_write() Failed\n");
601                 goto err;
602         }
603
604         atomic_inc(&pcol->sbi->s_curr_pending);
605         return 0;
606
607 err:
608         _unlock_pcol_pages(pcol, ret, WRITE);
609         pcol_free(pcol);
610         kfree(pcol_copy);
611
612         return ret;
613 }
614
615 /* writepage_strip is called either directly from writepage() or by the VFS from
616  * within write_cache_pages(), to add one more page to be written to storage.
617  * It will try to collect as many contiguous pages as possible. If a
618  * discontinuity is encountered or it runs out of resources it will submit the
619  * previous segment and will start a new collection.
620  * Eventually caller must submit the last segment if present.
621  */
622 static int writepage_strip(struct page *page,
623                            struct writeback_control *wbc_unused, void *data)
624 {
625         struct page_collect *pcol = data;
626         struct inode *inode = pcol->inode;
627         struct exofs_i_info *oi = exofs_i(inode);
628         loff_t i_size = i_size_read(inode);
629         pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
630         size_t len;
631         int ret;
632
633         BUG_ON(!PageLocked(page));
634
635         ret = wait_obj_created(oi);
636         if (unlikely(ret))
637                 goto fail;
638
639         if (page->index < end_index)
640                 /* in this case, the page is within the limits of the file */
641                 len = PAGE_CACHE_SIZE;
642         else {
643                 len = i_size & ~PAGE_CACHE_MASK;
644
645                 if (page->index > end_index || !len) {
646                         /* in this case, the page is outside the limits
647                          * (truncate in progress)
648                          */
649                         ret = write_exec(pcol);
650                         if (unlikely(ret))
651                                 goto fail;
652                         if (PageError(page))
653                                 ClearPageError(page);
654                         unlock_page(page);
655                         EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) "
656                                      "outside the limits\n",
657                                      inode->i_ino, page->index);
658                         return 0;
659                 }
660         }
661
662 try_again:
663
664         if (unlikely(pcol->pg_first == -1)) {
665                 pcol->pg_first = page->index;
666         } else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
667                    page->index)) {
668                 /* Discontinuity detected, split the request */
669                 ret = write_exec(pcol);
670                 if (unlikely(ret))
671                         goto fail;
672
673                 EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) Discontinuity\n",
674                              inode->i_ino, page->index);
675                 goto try_again;
676         }
677
678         if (!pcol->pages) {
679                 ret = pcol_try_alloc(pcol);
680                 if (unlikely(ret))
681                         goto fail;
682         }
683
684         EXOFS_DBGMSG2("    writepage_strip(0x%lx, 0x%lx) len=0x%zx\n",
685                      inode->i_ino, page->index, len);
686
687         ret = pcol_add_page(pcol, page, len);
688         if (unlikely(ret)) {
689                 EXOFS_DBGMSG2("Failed pcol_add_page "
690                              "nr_pages=%u total_length=0x%lx\n",
691                              pcol->nr_pages, pcol->length);
692
693                 /* split the request, next loop will start again */
694                 ret = write_exec(pcol);
695                 if (unlikely(ret)) {
696                         EXOFS_DBGMSG("write_exec failed => %d", ret);
697                         goto fail;
698                 }
699
700                 goto try_again;
701         }
702
703         BUG_ON(PageWriteback(page));
704         set_page_writeback(page);
705
706         return 0;
707
708 fail:
709         EXOFS_DBGMSG("Error: writepage_strip(0x%lx, 0x%lx)=>%d\n",
710                      inode->i_ino, page->index, ret);
711         set_bit(AS_EIO, &page->mapping->flags);
712         unlock_page(page);
713         return ret;
714 }
715
716 static int exofs_writepages(struct address_space *mapping,
717                        struct writeback_control *wbc)
718 {
719         struct page_collect pcol;
720         long start, end, expected_pages;
721         int ret;
722
723         start = wbc->range_start >> PAGE_CACHE_SHIFT;
724         end = (wbc->range_end == LLONG_MAX) ?
725                         start + mapping->nrpages :
726                         wbc->range_end >> PAGE_CACHE_SHIFT;
727
728         if (start || end)
729                 expected_pages = end - start + 1;
730         else
731                 expected_pages = mapping->nrpages;
732
733         if (expected_pages < 32L)
734                 expected_pages = 32L;
735
736         EXOFS_DBGMSG2("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx "
737                      "nrpages=%lu start=0x%lx end=0x%lx expected_pages=%ld\n",
738                      mapping->host->i_ino, wbc->range_start, wbc->range_end,
739                      mapping->nrpages, start, end, expected_pages);
740
741         _pcol_init(&pcol, expected_pages, mapping->host);
742
743         ret = write_cache_pages(mapping, wbc, writepage_strip, &pcol);
744         if (unlikely(ret)) {
745                 EXOFS_ERR("write_cache_pages => %d\n", ret);
746                 return ret;
747         }
748
749         ret = write_exec(&pcol);
750         if (unlikely(ret))
751                 return ret;
752
753         if (wbc->sync_mode == WB_SYNC_ALL) {
754                 return write_exec(&pcol); /* pump the last reminder */
755         } else if (pcol.nr_pages) {
756                 /* not SYNC let the reminder join the next writeout */
757                 unsigned i;
758
759                 for (i = 0; i < pcol.nr_pages; i++) {
760                         struct page *page = pcol.pages[i];
761
762                         end_page_writeback(page);
763                         set_page_dirty(page);
764                         unlock_page(page);
765                 }
766         }
767         return 0;
768 }
769
770 static int exofs_writepage(struct page *page, struct writeback_control *wbc)
771 {
772         struct page_collect pcol;
773         int ret;
774
775         _pcol_init(&pcol, 1, page->mapping->host);
776
777         ret = writepage_strip(page, NULL, &pcol);
778         if (ret) {
779                 EXOFS_ERR("exofs_writepage => %d\n", ret);
780                 return ret;
781         }
782
783         return write_exec(&pcol);
784 }
785
786 /* i_mutex held using inode->i_size directly */
787 static void _write_failed(struct inode *inode, loff_t to)
788 {
789         if (to > inode->i_size)
790                 truncate_pagecache(inode, to, inode->i_size);
791 }
792
793 int exofs_write_begin(struct file *file, struct address_space *mapping,
794                 loff_t pos, unsigned len, unsigned flags,
795                 struct page **pagep, void **fsdata)
796 {
797         int ret = 0;
798         struct page *page;
799
800         page = *pagep;
801         if (page == NULL) {
802                 ret = simple_write_begin(file, mapping, pos, len, flags, pagep,
803                                          fsdata);
804                 if (ret) {
805                         EXOFS_DBGMSG("simple_write_begin failed\n");
806                         goto out;
807                 }
808
809                 page = *pagep;
810         }
811
812          /* read modify write */
813         if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
814                 loff_t i_size = i_size_read(mapping->host);
815                 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
816                 size_t rlen;
817
818                 if (page->index < end_index)
819                         rlen = PAGE_CACHE_SIZE;
820                 else if (page->index == end_index)
821                         rlen = i_size & ~PAGE_CACHE_MASK;
822                 else
823                         rlen = 0;
824
825                 if (!rlen) {
826                         clear_highpage(page);
827                         SetPageUptodate(page);
828                         goto out;
829                 }
830
831                 ret = _readpage(page, true);
832                 if (ret) {
833                         /*SetPageError was done by _readpage. Is it ok?*/
834                         unlock_page(page);
835                         EXOFS_DBGMSG("__readpage failed\n");
836                 }
837         }
838 out:
839         if (unlikely(ret))
840                 _write_failed(mapping->host, pos + len);
841
842         return ret;
843 }
844
845 static int exofs_write_begin_export(struct file *file,
846                 struct address_space *mapping,
847                 loff_t pos, unsigned len, unsigned flags,
848                 struct page **pagep, void **fsdata)
849 {
850         *pagep = NULL;
851
852         return exofs_write_begin(file, mapping, pos, len, flags, pagep,
853                                         fsdata);
854 }
855
856 static int exofs_write_end(struct file *file, struct address_space *mapping,
857                         loff_t pos, unsigned len, unsigned copied,
858                         struct page *page, void *fsdata)
859 {
860         struct inode *inode = mapping->host;
861         /* According to comment in simple_write_end i_mutex is held */
862         loff_t i_size = inode->i_size;
863         int ret;
864
865         ret = simple_write_end(file, mapping,pos, len, copied, page, fsdata);
866         if (unlikely(ret))
867                 _write_failed(inode, pos + len);
868
869         /* TODO: once simple_write_end marks inode dirty remove */
870         if (i_size != inode->i_size)
871                 mark_inode_dirty(inode);
872         return ret;
873 }
874
875 static int exofs_releasepage(struct page *page, gfp_t gfp)
876 {
877         EXOFS_DBGMSG("page 0x%lx\n", page->index);
878         WARN_ON(1);
879         return 0;
880 }
881
882 static void exofs_invalidatepage(struct page *page, unsigned long offset)
883 {
884         EXOFS_DBGMSG("page 0x%lx offset 0x%lx\n", page->index, offset);
885         WARN_ON(1);
886 }
887
888 const struct address_space_operations exofs_aops = {
889         .readpage       = exofs_readpage,
890         .readpages      = exofs_readpages,
891         .writepage      = exofs_writepage,
892         .writepages     = exofs_writepages,
893         .write_begin    = exofs_write_begin_export,
894         .write_end      = exofs_write_end,
895         .releasepage    = exofs_releasepage,
896         .set_page_dirty = __set_page_dirty_nobuffers,
897         .invalidatepage = exofs_invalidatepage,
898
899         /* Not implemented Yet */
900         .bmap           = NULL, /* TODO: use osd's OSD_ACT_READ_MAP */
901         .direct_IO      = NULL, /* TODO: Should be trivial to do */
902
903         /* With these NULL has special meaning or default is not exported */
904         .get_xip_mem    = NULL,
905         .migratepage    = NULL,
906         .launder_page   = NULL,
907         .is_partially_uptodate = NULL,
908         .error_remove_page = NULL,
909 };
910
911 /******************************************************************************
912  * INODE OPERATIONS
913  *****************************************************************************/
914
915 /*
916  * Test whether an inode is a fast symlink.
917  */
918 static inline int exofs_inode_is_fast_symlink(struct inode *inode)
919 {
920         struct exofs_i_info *oi = exofs_i(inode);
921
922         return S_ISLNK(inode->i_mode) && (oi->i_data[0] != 0);
923 }
924
925 static int _do_truncate(struct inode *inode, loff_t newsize)
926 {
927         struct exofs_i_info *oi = exofs_i(inode);
928         struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
929         int ret;
930
931         inode->i_mtime = inode->i_ctime = CURRENT_TIME;
932
933         ret = ore_truncate(&sbi->layout, &oi->oc, (u64)newsize);
934         if (likely(!ret))
935                 truncate_setsize(inode, newsize);
936
937         EXOFS_DBGMSG("(0x%lx) size=0x%llx ret=>%d\n",
938                      inode->i_ino, newsize, ret);
939         return ret;
940 }
941
942 /*
943  * Set inode attributes - update size attribute on OSD if needed,
944  *                        otherwise just call generic functions.
945  */
946 int exofs_setattr(struct dentry *dentry, struct iattr *iattr)
947 {
948         struct inode *inode = dentry->d_inode;
949         int error;
950
951         /* if we are about to modify an object, and it hasn't been
952          * created yet, wait
953          */
954         error = wait_obj_created(exofs_i(inode));
955         if (unlikely(error))
956                 return error;
957
958         error = inode_change_ok(inode, iattr);
959         if (unlikely(error))
960                 return error;
961
962         if ((iattr->ia_valid & ATTR_SIZE) &&
963             iattr->ia_size != i_size_read(inode)) {
964                 error = _do_truncate(inode, iattr->ia_size);
965                 if (unlikely(error))
966                         return error;
967         }
968
969         setattr_copy(inode, iattr);
970         mark_inode_dirty(inode);
971         return 0;
972 }
973
974 static const struct osd_attr g_attr_inode_file_layout = ATTR_DEF(
975         EXOFS_APAGE_FS_DATA,
976         EXOFS_ATTR_INODE_FILE_LAYOUT,
977         0);
978 static const struct osd_attr g_attr_inode_dir_layout = ATTR_DEF(
979         EXOFS_APAGE_FS_DATA,
980         EXOFS_ATTR_INODE_DIR_LAYOUT,
981         0);
982
983 /*
984  * Read the Linux inode info from the OSD, and return it as is. In exofs the
985  * inode info is in an application specific page/attribute of the osd-object.
986  */
987 static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi,
988                     struct exofs_fcb *inode)
989 {
990         struct exofs_sb_info *sbi = sb->s_fs_info;
991         struct osd_attr attrs[] = {
992                 [0] = g_attr_inode_data,
993                 [1] = g_attr_inode_file_layout,
994                 [2] = g_attr_inode_dir_layout,
995         };
996         struct ore_io_state *ios;
997         struct exofs_on_disk_inode_layout *layout;
998         int ret;
999
1000         ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
1001         if (unlikely(ret)) {
1002                 EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
1003                 return ret;
1004         }
1005
1006         attrs[1].len = exofs_on_disk_inode_layout_size(sbi->oc.numdevs);
1007         attrs[2].len = exofs_on_disk_inode_layout_size(sbi->oc.numdevs);
1008
1009         ios->in_attr = attrs;
1010         ios->in_attr_len = ARRAY_SIZE(attrs);
1011
1012         ret = ore_read(ios);
1013         if (unlikely(ret)) {
1014                 EXOFS_ERR("object(0x%llx) corrupted, return empty file=>%d\n",
1015                           _LLU(oi->one_comp.obj.id), ret);
1016                 memset(inode, 0, sizeof(*inode));
1017                 inode->i_mode = 0040000 | (0777 & ~022);
1018                 /* If object is lost on target we might as well enable it's
1019                  * delete.
1020                  */
1021                 if ((ret == -ENOENT) || (ret == -EINVAL))
1022                         ret = 0;
1023                 goto out;
1024         }
1025
1026         ret = extract_attr_from_ios(ios, &attrs[0]);
1027         if (ret) {
1028                 EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
1029                 goto out;
1030         }
1031         WARN_ON(attrs[0].len != EXOFS_INO_ATTR_SIZE);
1032         memcpy(inode, attrs[0].val_ptr, EXOFS_INO_ATTR_SIZE);
1033
1034         ret = extract_attr_from_ios(ios, &attrs[1]);
1035         if (ret) {
1036                 EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
1037                 goto out;
1038         }
1039         if (attrs[1].len) {
1040                 layout = attrs[1].val_ptr;
1041                 if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) {
1042                         EXOFS_ERR("%s: unsupported files layout %d\n",
1043                                 __func__, layout->gen_func);
1044                         ret = -ENOTSUPP;
1045                         goto out;
1046                 }
1047         }
1048
1049         ret = extract_attr_from_ios(ios, &attrs[2]);
1050         if (ret) {
1051                 EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
1052                 goto out;
1053         }
1054         if (attrs[2].len) {
1055                 layout = attrs[2].val_ptr;
1056                 if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) {
1057                         EXOFS_ERR("%s: unsupported meta-data layout %d\n",
1058                                 __func__, layout->gen_func);
1059                         ret = -ENOTSUPP;
1060                         goto out;
1061                 }
1062         }
1063
1064 out:
1065         ore_put_io_state(ios);
1066         return ret;
1067 }
1068
1069 static void __oi_init(struct exofs_i_info *oi)
1070 {
1071         init_waitqueue_head(&oi->i_wq);
1072         oi->i_flags = 0;
1073 }
1074 /*
1075  * Fill in an inode read from the OSD and set it up for use
1076  */
1077 struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
1078 {
1079         struct exofs_i_info *oi;
1080         struct exofs_fcb fcb;
1081         struct inode *inode;
1082         int ret;
1083
1084         inode = iget_locked(sb, ino);
1085         if (!inode)
1086                 return ERR_PTR(-ENOMEM);
1087         if (!(inode->i_state & I_NEW))
1088                 return inode;
1089         oi = exofs_i(inode);
1090         __oi_init(oi);
1091         exofs_init_comps(&oi->oc, &oi->one_comp, sb->s_fs_info,
1092                          exofs_oi_objno(oi));
1093
1094         /* read the inode from the osd */
1095         ret = exofs_get_inode(sb, oi, &fcb);
1096         if (ret)
1097                 goto bad_inode;
1098
1099         set_obj_created(oi);
1100
1101         /* copy stuff from on-disk struct to in-memory struct */
1102         inode->i_mode = le16_to_cpu(fcb.i_mode);
1103         inode->i_uid = le32_to_cpu(fcb.i_uid);
1104         inode->i_gid = le32_to_cpu(fcb.i_gid);
1105         inode->i_nlink = le16_to_cpu(fcb.i_links_count);
1106         inode->i_ctime.tv_sec = (signed)le32_to_cpu(fcb.i_ctime);
1107         inode->i_atime.tv_sec = (signed)le32_to_cpu(fcb.i_atime);
1108         inode->i_mtime.tv_sec = (signed)le32_to_cpu(fcb.i_mtime);
1109         inode->i_ctime.tv_nsec =
1110                 inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = 0;
1111         oi->i_commit_size = le64_to_cpu(fcb.i_size);
1112         i_size_write(inode, oi->i_commit_size);
1113         inode->i_blkbits = EXOFS_BLKSHIFT;
1114         inode->i_generation = le32_to_cpu(fcb.i_generation);
1115
1116         oi->i_dir_start_lookup = 0;
1117
1118         if ((inode->i_nlink == 0) && (inode->i_mode == 0)) {
1119                 ret = -ESTALE;
1120                 goto bad_inode;
1121         }
1122
1123         if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1124                 if (fcb.i_data[0])
1125                         inode->i_rdev =
1126                                 old_decode_dev(le32_to_cpu(fcb.i_data[0]));
1127                 else
1128                         inode->i_rdev =
1129                                 new_decode_dev(le32_to_cpu(fcb.i_data[1]));
1130         } else {
1131                 memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
1132         }
1133
1134         inode->i_mapping->backing_dev_info = sb->s_bdi;
1135         if (S_ISREG(inode->i_mode)) {
1136                 inode->i_op = &exofs_file_inode_operations;
1137                 inode->i_fop = &exofs_file_operations;
1138                 inode->i_mapping->a_ops = &exofs_aops;
1139         } else if (S_ISDIR(inode->i_mode)) {
1140                 inode->i_op = &exofs_dir_inode_operations;
1141                 inode->i_fop = &exofs_dir_operations;
1142                 inode->i_mapping->a_ops = &exofs_aops;
1143         } else if (S_ISLNK(inode->i_mode)) {
1144                 if (exofs_inode_is_fast_symlink(inode))
1145                         inode->i_op = &exofs_fast_symlink_inode_operations;
1146                 else {
1147                         inode->i_op = &exofs_symlink_inode_operations;
1148                         inode->i_mapping->a_ops = &exofs_aops;
1149                 }
1150         } else {
1151                 inode->i_op = &exofs_special_inode_operations;
1152                 if (fcb.i_data[0])
1153                         init_special_inode(inode, inode->i_mode,
1154                            old_decode_dev(le32_to_cpu(fcb.i_data[0])));
1155                 else
1156                         init_special_inode(inode, inode->i_mode,
1157                            new_decode_dev(le32_to_cpu(fcb.i_data[1])));
1158         }
1159
1160         unlock_new_inode(inode);
1161         return inode;
1162
1163 bad_inode:
1164         iget_failed(inode);
1165         return ERR_PTR(ret);
1166 }
1167
1168 int __exofs_wait_obj_created(struct exofs_i_info *oi)
1169 {
1170         if (!obj_created(oi)) {
1171                 EXOFS_DBGMSG("!obj_created\n");
1172                 BUG_ON(!obj_2bcreated(oi));
1173                 wait_event(oi->i_wq, obj_created(oi));
1174                 EXOFS_DBGMSG("wait_event done\n");
1175         }
1176         return unlikely(is_bad_inode(&oi->vfs_inode)) ? -EIO : 0;
1177 }
1178
1179 /*
1180  * Callback function from exofs_new_inode().  The important thing is that we
1181  * set the obj_created flag so that other methods know that the object exists on
1182  * the OSD.
1183  */
1184 static void create_done(struct ore_io_state *ios, void *p)
1185 {
1186         struct inode *inode = p;
1187         struct exofs_i_info *oi = exofs_i(inode);
1188         struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
1189         int ret;
1190
1191         ret = ore_check_io(ios, NULL);
1192         ore_put_io_state(ios);
1193
1194         atomic_dec(&sbi->s_curr_pending);
1195
1196         if (unlikely(ret)) {
1197                 EXOFS_ERR("object=0x%llx creation failed in pid=0x%llx",
1198                           _LLU(exofs_oi_objno(oi)),
1199                           _LLU(oi->one_comp.obj.partition));
1200                 /*TODO: When FS is corrupted creation can fail, object already
1201                  * exist. Get rid of this asynchronous creation, if exist
1202                  * increment the obj counter and try the next object. Until we
1203                  * succeed. All these dangling objects will be made into lost
1204                  * files by chkfs.exofs
1205                  */
1206         }
1207
1208         set_obj_created(oi);
1209
1210         wake_up(&oi->i_wq);
1211 }
1212
1213 /*
1214  * Set up a new inode and create an object for it on the OSD
1215  */
1216 struct inode *exofs_new_inode(struct inode *dir, int mode)
1217 {
1218         struct super_block *sb = dir->i_sb;
1219         struct exofs_sb_info *sbi = sb->s_fs_info;
1220         struct inode *inode;
1221         struct exofs_i_info *oi;
1222         struct ore_io_state *ios;
1223         int ret;
1224
1225         inode = new_inode(sb);
1226         if (!inode)
1227                 return ERR_PTR(-ENOMEM);
1228
1229         oi = exofs_i(inode);
1230         __oi_init(oi);
1231
1232         set_obj_2bcreated(oi);
1233
1234         inode->i_mapping->backing_dev_info = sb->s_bdi;
1235         inode_init_owner(inode, dir, mode);
1236         inode->i_ino = sbi->s_nextid++;
1237         inode->i_blkbits = EXOFS_BLKSHIFT;
1238         inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
1239         oi->i_commit_size = inode->i_size = 0;
1240         spin_lock(&sbi->s_next_gen_lock);
1241         inode->i_generation = sbi->s_next_generation++;
1242         spin_unlock(&sbi->s_next_gen_lock);
1243         insert_inode_hash(inode);
1244
1245         exofs_init_comps(&oi->oc, &oi->one_comp, sb->s_fs_info,
1246                          exofs_oi_objno(oi));
1247         exofs_sbi_write_stats(sbi); /* Make sure new sbi->s_nextid is on disk */
1248
1249         mark_inode_dirty(inode);
1250
1251         ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
1252         if (unlikely(ret)) {
1253                 EXOFS_ERR("exofs_new_inode: ore_get_io_state failed\n");
1254                 return ERR_PTR(ret);
1255         }
1256
1257         ios->done = create_done;
1258         ios->private = inode;
1259
1260         ret = ore_create(ios);
1261         if (ret) {
1262                 ore_put_io_state(ios);
1263                 return ERR_PTR(ret);
1264         }
1265         atomic_inc(&sbi->s_curr_pending);
1266
1267         return inode;
1268 }
1269
1270 /*
1271  * struct to pass two arguments to update_inode's callback
1272  */
1273 struct updatei_args {
1274         struct exofs_sb_info    *sbi;
1275         struct exofs_fcb        fcb;
1276 };
1277
1278 /*
1279  * Callback function from exofs_update_inode().
1280  */
1281 static void updatei_done(struct ore_io_state *ios, void *p)
1282 {
1283         struct updatei_args *args = p;
1284
1285         ore_put_io_state(ios);
1286
1287         atomic_dec(&args->sbi->s_curr_pending);
1288
1289         kfree(args);
1290 }
1291
1292 /*
1293  * Write the inode to the OSD.  Just fill up the struct, and set the attribute
1294  * synchronously or asynchronously depending on the do_sync flag.
1295  */
1296 static int exofs_update_inode(struct inode *inode, int do_sync)
1297 {
1298         struct exofs_i_info *oi = exofs_i(inode);
1299         struct super_block *sb = inode->i_sb;
1300         struct exofs_sb_info *sbi = sb->s_fs_info;
1301         struct ore_io_state *ios;
1302         struct osd_attr attr;
1303         struct exofs_fcb *fcb;
1304         struct updatei_args *args;
1305         int ret;
1306
1307         args = kzalloc(sizeof(*args), GFP_KERNEL);
1308         if (!args) {
1309                 EXOFS_DBGMSG("Failed kzalloc of args\n");
1310                 return -ENOMEM;
1311         }
1312
1313         fcb = &args->fcb;
1314
1315         fcb->i_mode = cpu_to_le16(inode->i_mode);
1316         fcb->i_uid = cpu_to_le32(inode->i_uid);
1317         fcb->i_gid = cpu_to_le32(inode->i_gid);
1318         fcb->i_links_count = cpu_to_le16(inode->i_nlink);
1319         fcb->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
1320         fcb->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
1321         fcb->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
1322         oi->i_commit_size = i_size_read(inode);
1323         fcb->i_size = cpu_to_le64(oi->i_commit_size);
1324         fcb->i_generation = cpu_to_le32(inode->i_generation);
1325
1326         if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1327                 if (old_valid_dev(inode->i_rdev)) {
1328                         fcb->i_data[0] =
1329                                 cpu_to_le32(old_encode_dev(inode->i_rdev));
1330                         fcb->i_data[1] = 0;
1331                 } else {
1332                         fcb->i_data[0] = 0;
1333                         fcb->i_data[1] =
1334                                 cpu_to_le32(new_encode_dev(inode->i_rdev));
1335                         fcb->i_data[2] = 0;
1336                 }
1337         } else
1338                 memcpy(fcb->i_data, oi->i_data, sizeof(fcb->i_data));
1339
1340         ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
1341         if (unlikely(ret)) {
1342                 EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
1343                 goto free_args;
1344         }
1345
1346         attr = g_attr_inode_data;
1347         attr.val_ptr = fcb;
1348         ios->out_attr_len = 1;
1349         ios->out_attr = &attr;
1350
1351         wait_obj_created(oi);
1352
1353         if (!do_sync) {
1354                 args->sbi = sbi;
1355                 ios->done = updatei_done;
1356                 ios->private = args;
1357         }
1358
1359         ret = ore_write(ios);
1360         if (!do_sync && !ret) {
1361                 atomic_inc(&sbi->s_curr_pending);
1362                 goto out; /* deallocation in updatei_done */
1363         }
1364
1365         ore_put_io_state(ios);
1366 free_args:
1367         kfree(args);
1368 out:
1369         EXOFS_DBGMSG("(0x%lx) do_sync=%d ret=>%d\n",
1370                      inode->i_ino, do_sync, ret);
1371         return ret;
1372 }
1373
1374 int exofs_write_inode(struct inode *inode, struct writeback_control *wbc)
1375 {
1376         /* FIXME: fix fsync and use wbc->sync_mode == WB_SYNC_ALL */
1377         return exofs_update_inode(inode, 1);
1378 }
1379
1380 /*
1381  * Callback function from exofs_delete_inode() - don't have much cleaning up to
1382  * do.
1383  */
1384 static void delete_done(struct ore_io_state *ios, void *p)
1385 {
1386         struct exofs_sb_info *sbi = p;
1387
1388         ore_put_io_state(ios);
1389
1390         atomic_dec(&sbi->s_curr_pending);
1391 }
1392
1393 /*
1394  * Called when the refcount of an inode reaches zero.  We remove the object
1395  * from the OSD here.  We make sure the object was created before we try and
1396  * delete it.
1397  */
1398 void exofs_evict_inode(struct inode *inode)
1399 {
1400         struct exofs_i_info *oi = exofs_i(inode);
1401         struct super_block *sb = inode->i_sb;
1402         struct exofs_sb_info *sbi = sb->s_fs_info;
1403         struct ore_io_state *ios;
1404         int ret;
1405
1406         truncate_inode_pages(&inode->i_data, 0);
1407
1408         /* TODO: should do better here */
1409         if (inode->i_nlink || is_bad_inode(inode))
1410                 goto no_delete;
1411
1412         inode->i_size = 0;
1413         end_writeback(inode);
1414
1415         /* if we are deleting an obj that hasn't been created yet, wait.
1416          * This also makes sure that create_done cannot be called with an
1417          * already evicted inode.
1418          */
1419         wait_obj_created(oi);
1420         /* ignore the error, attempt a remove anyway */
1421
1422         /* Now Remove the OSD objects */
1423         ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
1424         if (unlikely(ret)) {
1425                 EXOFS_ERR("%s: ore_get_io_state failed\n", __func__);
1426                 return;
1427         }
1428
1429         ios->done = delete_done;
1430         ios->private = sbi;
1431
1432         ret = ore_remove(ios);
1433         if (ret) {
1434                 EXOFS_ERR("%s: ore_remove failed\n", __func__);
1435                 ore_put_io_state(ios);
1436                 return;
1437         }
1438         atomic_inc(&sbi->s_curr_pending);
1439
1440         return;
1441
1442 no_delete:
1443         end_writeback(inode);
1444 }