Merge tag 'tty-5.10-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty
[platform/kernel/linux-rpi.git] / fs / afs / write.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* handling of writes to regular files and writing back to the server
3  *
4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7
8 #include <linux/backing-dev.h>
9 #include <linux/slab.h>
10 #include <linux/fs.h>
11 #include <linux/pagemap.h>
12 #include <linux/writeback.h>
13 #include <linux/pagevec.h>
14 #include "internal.h"
15
16 /*
17  * mark a page as having been made dirty and thus needing writeback
18  */
19 int afs_set_page_dirty(struct page *page)
20 {
21         _enter("");
22         return __set_page_dirty_nobuffers(page);
23 }
24
25 /*
26  * partly or wholly fill a page that's under preparation for writing
27  */
28 static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
29                          loff_t pos, unsigned int len, struct page *page)
30 {
31         struct afs_read *req;
32         size_t p;
33         void *data;
34         int ret;
35
36         _enter(",,%llu", (unsigned long long)pos);
37
38         if (pos >= vnode->vfs_inode.i_size) {
39                 p = pos & ~PAGE_MASK;
40                 ASSERTCMP(p + len, <=, PAGE_SIZE);
41                 data = kmap(page);
42                 memset(data + p, 0, len);
43                 kunmap(page);
44                 return 0;
45         }
46
47         req = kzalloc(struct_size(req, array, 1), GFP_KERNEL);
48         if (!req)
49                 return -ENOMEM;
50
51         refcount_set(&req->usage, 1);
52         req->pos = pos;
53         req->len = len;
54         req->nr_pages = 1;
55         req->pages = req->array;
56         req->pages[0] = page;
57         get_page(page);
58
59         ret = afs_fetch_data(vnode, key, req);
60         afs_put_read(req);
61         if (ret < 0) {
62                 if (ret == -ENOENT) {
63                         _debug("got NOENT from server"
64                                " - marking file deleted and stale");
65                         set_bit(AFS_VNODE_DELETED, &vnode->flags);
66                         ret = -ESTALE;
67                 }
68         }
69
70         _leave(" = %d", ret);
71         return ret;
72 }
73
74 /*
75  * prepare to perform part of a write to a page
76  */
77 int afs_write_begin(struct file *file, struct address_space *mapping,
78                     loff_t pos, unsigned len, unsigned flags,
79                     struct page **_page, void **fsdata)
80 {
81         struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
82         struct page *page;
83         struct key *key = afs_file_key(file);
84         unsigned long priv;
85         unsigned f, from = pos & (PAGE_SIZE - 1);
86         unsigned t, to = from + len;
87         pgoff_t index = pos >> PAGE_SHIFT;
88         int ret;
89
90         _enter("{%llx:%llu},{%lx},%u,%u",
91                vnode->fid.vid, vnode->fid.vnode, index, from, to);
92
93         page = grab_cache_page_write_begin(mapping, index, flags);
94         if (!page)
95                 return -ENOMEM;
96
97         if (!PageUptodate(page) && len != PAGE_SIZE) {
98                 ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
99                 if (ret < 0) {
100                         unlock_page(page);
101                         put_page(page);
102                         _leave(" = %d [prep]", ret);
103                         return ret;
104                 }
105                 SetPageUptodate(page);
106         }
107
108 try_again:
109         /* See if this page is already partially written in a way that we can
110          * merge the new write with.
111          */
112         t = f = 0;
113         if (PagePrivate(page)) {
114                 priv = page_private(page);
115                 f = afs_page_dirty_from(priv);
116                 t = afs_page_dirty_to(priv);
117                 ASSERTCMP(f, <=, t);
118         }
119
120         if (f != t) {
121                 if (PageWriteback(page)) {
122                         trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
123                                              page->index, priv);
124                         goto flush_conflicting_write;
125                 }
126                 /* If the file is being filled locally, allow inter-write
127                  * spaces to be merged into writes.  If it's not, only write
128                  * back what the user gives us.
129                  */
130                 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
131                     (to < f || from > t))
132                         goto flush_conflicting_write;
133         }
134
135         *_page = page;
136         _leave(" = 0");
137         return 0;
138
139         /* The previous write and this write aren't adjacent or overlapping, so
140          * flush the page out.
141          */
142 flush_conflicting_write:
143         _debug("flush conflict");
144         ret = write_one_page(page);
145         if (ret < 0)
146                 goto error;
147
148         ret = lock_page_killable(page);
149         if (ret < 0)
150                 goto error;
151         goto try_again;
152
153 error:
154         put_page(page);
155         _leave(" = %d", ret);
156         return ret;
157 }
158
159 /*
160  * finalise part of a write to a page
161  */
162 int afs_write_end(struct file *file, struct address_space *mapping,
163                   loff_t pos, unsigned len, unsigned copied,
164                   struct page *page, void *fsdata)
165 {
166         struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
167         struct key *key = afs_file_key(file);
168         unsigned long priv;
169         unsigned int f, from = pos & (PAGE_SIZE - 1);
170         unsigned int t, to = from + copied;
171         loff_t i_size, maybe_i_size;
172         int ret;
173
174         _enter("{%llx:%llu},{%lx}",
175                vnode->fid.vid, vnode->fid.vnode, page->index);
176
177         maybe_i_size = pos + copied;
178
179         i_size = i_size_read(&vnode->vfs_inode);
180         if (maybe_i_size > i_size) {
181                 write_seqlock(&vnode->cb_lock);
182                 i_size = i_size_read(&vnode->vfs_inode);
183                 if (maybe_i_size > i_size)
184                         i_size_write(&vnode->vfs_inode, maybe_i_size);
185                 write_sequnlock(&vnode->cb_lock);
186         }
187
188         if (!PageUptodate(page)) {
189                 if (copied < len) {
190                         /* Try and load any missing data from the server.  The
191                          * unmarshalling routine will take care of clearing any
192                          * bits that are beyond the EOF.
193                          */
194                         ret = afs_fill_page(vnode, key, pos + copied,
195                                             len - copied, page);
196                         if (ret < 0)
197                                 goto out;
198                 }
199                 SetPageUptodate(page);
200         }
201
202         if (PagePrivate(page)) {
203                 priv = page_private(page);
204                 f = afs_page_dirty_from(priv);
205                 t = afs_page_dirty_to(priv);
206                 if (from < f)
207                         f = from;
208                 if (to > t)
209                         t = to;
210                 priv = afs_page_dirty(f, t);
211                 set_page_private(page, priv);
212                 trace_afs_page_dirty(vnode, tracepoint_string("dirty+"),
213                                      page->index, priv);
214         } else {
215                 priv = afs_page_dirty(from, to);
216                 attach_page_private(page, (void *)priv);
217                 trace_afs_page_dirty(vnode, tracepoint_string("dirty"),
218                                      page->index, priv);
219         }
220
221         set_page_dirty(page);
222         if (PageDirty(page))
223                 _debug("dirtied");
224         ret = copied;
225
226 out:
227         unlock_page(page);
228         put_page(page);
229         return ret;
230 }
231
232 /*
233  * kill all the pages in the given range
234  */
235 static void afs_kill_pages(struct address_space *mapping,
236                            pgoff_t first, pgoff_t last)
237 {
238         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
239         struct pagevec pv;
240         unsigned count, loop;
241
242         _enter("{%llx:%llu},%lx-%lx",
243                vnode->fid.vid, vnode->fid.vnode, first, last);
244
245         pagevec_init(&pv);
246
247         do {
248                 _debug("kill %lx-%lx", first, last);
249
250                 count = last - first + 1;
251                 if (count > PAGEVEC_SIZE)
252                         count = PAGEVEC_SIZE;
253                 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
254                 ASSERTCMP(pv.nr, ==, count);
255
256                 for (loop = 0; loop < count; loop++) {
257                         struct page *page = pv.pages[loop];
258                         ClearPageUptodate(page);
259                         SetPageError(page);
260                         end_page_writeback(page);
261                         if (page->index >= first)
262                                 first = page->index + 1;
263                         lock_page(page);
264                         generic_error_remove_page(mapping, page);
265                         unlock_page(page);
266                 }
267
268                 __pagevec_release(&pv);
269         } while (first <= last);
270
271         _leave("");
272 }
273
274 /*
275  * Redirty all the pages in a given range.
276  */
277 static void afs_redirty_pages(struct writeback_control *wbc,
278                               struct address_space *mapping,
279                               pgoff_t first, pgoff_t last)
280 {
281         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
282         struct pagevec pv;
283         unsigned count, loop;
284
285         _enter("{%llx:%llu},%lx-%lx",
286                vnode->fid.vid, vnode->fid.vnode, first, last);
287
288         pagevec_init(&pv);
289
290         do {
291                 _debug("redirty %lx-%lx", first, last);
292
293                 count = last - first + 1;
294                 if (count > PAGEVEC_SIZE)
295                         count = PAGEVEC_SIZE;
296                 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
297                 ASSERTCMP(pv.nr, ==, count);
298
299                 for (loop = 0; loop < count; loop++) {
300                         struct page *page = pv.pages[loop];
301
302                         redirty_page_for_writepage(wbc, page);
303                         end_page_writeback(page);
304                         if (page->index >= first)
305                                 first = page->index + 1;
306                 }
307
308                 __pagevec_release(&pv);
309         } while (first <= last);
310
311         _leave("");
312 }
313
314 /*
315  * completion of write to server
316  */
317 static void afs_pages_written_back(struct afs_vnode *vnode,
318                                    pgoff_t first, pgoff_t last)
319 {
320         struct pagevec pv;
321         unsigned long priv;
322         unsigned count, loop;
323
324         _enter("{%llx:%llu},{%lx-%lx}",
325                vnode->fid.vid, vnode->fid.vnode, first, last);
326
327         pagevec_init(&pv);
328
329         do {
330                 _debug("done %lx-%lx", first, last);
331
332                 count = last - first + 1;
333                 if (count > PAGEVEC_SIZE)
334                         count = PAGEVEC_SIZE;
335                 pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
336                                               first, count, pv.pages);
337                 ASSERTCMP(pv.nr, ==, count);
338
339                 for (loop = 0; loop < count; loop++) {
340                         priv = (unsigned long)detach_page_private(pv.pages[loop]);
341                         trace_afs_page_dirty(vnode, tracepoint_string("clear"),
342                                              pv.pages[loop]->index, priv);
343                         end_page_writeback(pv.pages[loop]);
344                 }
345                 first += count;
346                 __pagevec_release(&pv);
347         } while (first <= last);
348
349         afs_prune_wb_keys(vnode);
350         _leave("");
351 }
352
353 /*
354  * Find a key to use for the writeback.  We cached the keys used to author the
355  * writes on the vnode.  *_wbk will contain the last writeback key used or NULL
356  * and we need to start from there if it's set.
357  */
358 static int afs_get_writeback_key(struct afs_vnode *vnode,
359                                  struct afs_wb_key **_wbk)
360 {
361         struct afs_wb_key *wbk = NULL;
362         struct list_head *p;
363         int ret = -ENOKEY, ret2;
364
365         spin_lock(&vnode->wb_lock);
366         if (*_wbk)
367                 p = (*_wbk)->vnode_link.next;
368         else
369                 p = vnode->wb_keys.next;
370
371         while (p != &vnode->wb_keys) {
372                 wbk = list_entry(p, struct afs_wb_key, vnode_link);
373                 _debug("wbk %u", key_serial(wbk->key));
374                 ret2 = key_validate(wbk->key);
375                 if (ret2 == 0) {
376                         refcount_inc(&wbk->usage);
377                         _debug("USE WB KEY %u", key_serial(wbk->key));
378                         break;
379                 }
380
381                 wbk = NULL;
382                 if (ret == -ENOKEY)
383                         ret = ret2;
384                 p = p->next;
385         }
386
387         spin_unlock(&vnode->wb_lock);
388         if (*_wbk)
389                 afs_put_wb_key(*_wbk);
390         *_wbk = wbk;
391         return 0;
392 }
393
394 static void afs_store_data_success(struct afs_operation *op)
395 {
396         struct afs_vnode *vnode = op->file[0].vnode;
397
398         op->ctime = op->file[0].scb.status.mtime_client;
399         afs_vnode_commit_status(op, &op->file[0]);
400         if (op->error == 0) {
401                 if (!op->store.laundering)
402                         afs_pages_written_back(vnode, op->store.first, op->store.last);
403                 afs_stat_v(vnode, n_stores);
404                 atomic_long_add((op->store.last * PAGE_SIZE + op->store.last_to) -
405                                 (op->store.first * PAGE_SIZE + op->store.first_offset),
406                                 &afs_v2net(vnode)->n_store_bytes);
407         }
408 }
409
410 static const struct afs_operation_ops afs_store_data_operation = {
411         .issue_afs_rpc  = afs_fs_store_data,
412         .issue_yfs_rpc  = yfs_fs_store_data,
413         .success        = afs_store_data_success,
414 };
415
416 /*
417  * write to a file
418  */
419 static int afs_store_data(struct address_space *mapping,
420                           pgoff_t first, pgoff_t last,
421                           unsigned offset, unsigned to, bool laundering)
422 {
423         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
424         struct afs_operation *op;
425         struct afs_wb_key *wbk = NULL;
426         int ret;
427
428         _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x",
429                vnode->volume->name,
430                vnode->fid.vid,
431                vnode->fid.vnode,
432                vnode->fid.unique,
433                first, last, offset, to);
434
435         ret = afs_get_writeback_key(vnode, &wbk);
436         if (ret) {
437                 _leave(" = %d [no keys]", ret);
438                 return ret;
439         }
440
441         op = afs_alloc_operation(wbk->key, vnode->volume);
442         if (IS_ERR(op)) {
443                 afs_put_wb_key(wbk);
444                 return -ENOMEM;
445         }
446
447         afs_op_set_vnode(op, 0, vnode);
448         op->file[0].dv_delta = 1;
449         op->store.mapping = mapping;
450         op->store.first = first;
451         op->store.last = last;
452         op->store.first_offset = offset;
453         op->store.last_to = to;
454         op->store.laundering = laundering;
455         op->mtime = vnode->vfs_inode.i_mtime;
456         op->flags |= AFS_OPERATION_UNINTR;
457         op->ops = &afs_store_data_operation;
458
459 try_next_key:
460         afs_begin_vnode_operation(op);
461         afs_wait_for_operation(op);
462
463         switch (op->error) {
464         case -EACCES:
465         case -EPERM:
466         case -ENOKEY:
467         case -EKEYEXPIRED:
468         case -EKEYREJECTED:
469         case -EKEYREVOKED:
470                 _debug("next");
471
472                 ret = afs_get_writeback_key(vnode, &wbk);
473                 if (ret == 0) {
474                         key_put(op->key);
475                         op->key = key_get(wbk->key);
476                         goto try_next_key;
477                 }
478                 break;
479         }
480
481         afs_put_wb_key(wbk);
482         _leave(" = %d", op->error);
483         return afs_put_operation(op);
484 }
485
486 /*
487  * Synchronously write back the locked page and any subsequent non-locked dirty
488  * pages.
489  */
490 static int afs_write_back_from_locked_page(struct address_space *mapping,
491                                            struct writeback_control *wbc,
492                                            struct page *primary_page,
493                                            pgoff_t final_page)
494 {
495         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
496         struct page *pages[8], *page;
497         unsigned long count, priv;
498         unsigned n, offset, to, f, t;
499         pgoff_t start, first, last;
500         loff_t i_size, end;
501         int loop, ret;
502
503         _enter(",%lx", primary_page->index);
504
505         count = 1;
506         if (test_set_page_writeback(primary_page))
507                 BUG();
508
509         /* Find all consecutive lockable dirty pages that have contiguous
510          * written regions, stopping when we find a page that is not
511          * immediately lockable, is not dirty or is missing, or we reach the
512          * end of the range.
513          */
514         start = primary_page->index;
515         priv = page_private(primary_page);
516         offset = afs_page_dirty_from(priv);
517         to = afs_page_dirty_to(priv);
518         trace_afs_page_dirty(vnode, tracepoint_string("store"),
519                              primary_page->index, priv);
520
521         WARN_ON(offset == to);
522         if (offset == to)
523                 trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
524                                      primary_page->index, priv);
525
526         if (start >= final_page ||
527             (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
528                 goto no_more;
529
530         start++;
531         do {
532                 _debug("more %lx [%lx]", start, count);
533                 n = final_page - start + 1;
534                 if (n > ARRAY_SIZE(pages))
535                         n = ARRAY_SIZE(pages);
536                 n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
537                 _debug("fgpc %u", n);
538                 if (n == 0)
539                         goto no_more;
540                 if (pages[0]->index != start) {
541                         do {
542                                 put_page(pages[--n]);
543                         } while (n > 0);
544                         goto no_more;
545                 }
546
547                 for (loop = 0; loop < n; loop++) {
548                         page = pages[loop];
549                         if (to != PAGE_SIZE &&
550                             !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
551                                 break;
552                         if (page->index > final_page)
553                                 break;
554                         if (!trylock_page(page))
555                                 break;
556                         if (!PageDirty(page) || PageWriteback(page)) {
557                                 unlock_page(page);
558                                 break;
559                         }
560
561                         priv = page_private(page);
562                         f = afs_page_dirty_from(priv);
563                         t = afs_page_dirty_to(priv);
564                         if (f != 0 &&
565                             !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
566                                 unlock_page(page);
567                                 break;
568                         }
569                         to = t;
570
571                         trace_afs_page_dirty(vnode, tracepoint_string("store+"),
572                                              page->index, priv);
573
574                         if (!clear_page_dirty_for_io(page))
575                                 BUG();
576                         if (test_set_page_writeback(page))
577                                 BUG();
578                         unlock_page(page);
579                         put_page(page);
580                 }
581                 count += loop;
582                 if (loop < n) {
583                         for (; loop < n; loop++)
584                                 put_page(pages[loop]);
585                         goto no_more;
586                 }
587
588                 start += loop;
589         } while (start <= final_page && count < 65536);
590
591 no_more:
592         /* We now have a contiguous set of dirty pages, each with writeback
593          * set; the first page is still locked at this point, but all the rest
594          * have been unlocked.
595          */
596         unlock_page(primary_page);
597
598         first = primary_page->index;
599         last = first + count - 1;
600
601         end = (loff_t)last * PAGE_SIZE + to;
602         i_size = i_size_read(&vnode->vfs_inode);
603
604         _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
605         if (end > i_size)
606                 to = i_size & ~PAGE_MASK;
607
608         ret = afs_store_data(mapping, first, last, offset, to, false);
609         switch (ret) {
610         case 0:
611                 ret = count;
612                 break;
613
614         default:
615                 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
616                 fallthrough;
617         case -EACCES:
618         case -EPERM:
619         case -ENOKEY:
620         case -EKEYEXPIRED:
621         case -EKEYREJECTED:
622         case -EKEYREVOKED:
623                 afs_redirty_pages(wbc, mapping, first, last);
624                 mapping_set_error(mapping, ret);
625                 break;
626
627         case -EDQUOT:
628         case -ENOSPC:
629                 afs_redirty_pages(wbc, mapping, first, last);
630                 mapping_set_error(mapping, -ENOSPC);
631                 break;
632
633         case -EROFS:
634         case -EIO:
635         case -EREMOTEIO:
636         case -EFBIG:
637         case -ENOENT:
638         case -ENOMEDIUM:
639         case -ENXIO:
640                 trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
641                 afs_kill_pages(mapping, first, last);
642                 mapping_set_error(mapping, ret);
643                 break;
644         }
645
646         _leave(" = %d", ret);
647         return ret;
648 }
649
650 /*
651  * write a page back to the server
652  * - the caller locked the page for us
653  */
654 int afs_writepage(struct page *page, struct writeback_control *wbc)
655 {
656         int ret;
657
658         _enter("{%lx},", page->index);
659
660         ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
661                                               wbc->range_end >> PAGE_SHIFT);
662         if (ret < 0) {
663                 _leave(" = %d", ret);
664                 return 0;
665         }
666
667         wbc->nr_to_write -= ret;
668
669         _leave(" = 0");
670         return 0;
671 }
672
673 /*
674  * write a region of pages back to the server
675  */
676 static int afs_writepages_region(struct address_space *mapping,
677                                  struct writeback_control *wbc,
678                                  pgoff_t index, pgoff_t end, pgoff_t *_next)
679 {
680         struct page *page;
681         int ret, n;
682
683         _enter(",,%lx,%lx,", index, end);
684
685         do {
686                 n = find_get_pages_range_tag(mapping, &index, end,
687                                         PAGECACHE_TAG_DIRTY, 1, &page);
688                 if (!n)
689                         break;
690
691                 _debug("wback %lx", page->index);
692
693                 /*
694                  * at this point we hold neither the i_pages lock nor the
695                  * page lock: the page may be truncated or invalidated
696                  * (changing page->mapping to NULL), or even swizzled
697                  * back from swapper_space to tmpfs file mapping
698                  */
699                 ret = lock_page_killable(page);
700                 if (ret < 0) {
701                         put_page(page);
702                         _leave(" = %d", ret);
703                         return ret;
704                 }
705
706                 if (page->mapping != mapping || !PageDirty(page)) {
707                         unlock_page(page);
708                         put_page(page);
709                         continue;
710                 }
711
712                 if (PageWriteback(page)) {
713                         unlock_page(page);
714                         if (wbc->sync_mode != WB_SYNC_NONE)
715                                 wait_on_page_writeback(page);
716                         put_page(page);
717                         continue;
718                 }
719
720                 if (!clear_page_dirty_for_io(page))
721                         BUG();
722                 ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
723                 put_page(page);
724                 if (ret < 0) {
725                         _leave(" = %d", ret);
726                         return ret;
727                 }
728
729                 wbc->nr_to_write -= ret;
730
731                 cond_resched();
732         } while (index < end && wbc->nr_to_write > 0);
733
734         *_next = index;
735         _leave(" = 0 [%lx]", *_next);
736         return 0;
737 }
738
739 /*
740  * write some of the pending data back to the server
741  */
742 int afs_writepages(struct address_space *mapping,
743                    struct writeback_control *wbc)
744 {
745         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
746         pgoff_t start, end, next;
747         int ret;
748
749         _enter("");
750
751         /* We have to be careful as we can end up racing with setattr()
752          * truncating the pagecache since the caller doesn't take a lock here
753          * to prevent it.
754          */
755         if (wbc->sync_mode == WB_SYNC_ALL)
756                 down_read(&vnode->validate_lock);
757         else if (!down_read_trylock(&vnode->validate_lock))
758                 return 0;
759
760         if (wbc->range_cyclic) {
761                 start = mapping->writeback_index;
762                 end = -1;
763                 ret = afs_writepages_region(mapping, wbc, start, end, &next);
764                 if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
765                         ret = afs_writepages_region(mapping, wbc, 0, start,
766                                                     &next);
767                 mapping->writeback_index = next;
768         } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
769                 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
770                 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
771                 if (wbc->nr_to_write > 0)
772                         mapping->writeback_index = next;
773         } else {
774                 start = wbc->range_start >> PAGE_SHIFT;
775                 end = wbc->range_end >> PAGE_SHIFT;
776                 ret = afs_writepages_region(mapping, wbc, start, end, &next);
777         }
778
779         up_read(&vnode->validate_lock);
780         _leave(" = %d", ret);
781         return ret;
782 }
783
784 /*
785  * write to an AFS file
786  */
787 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
788 {
789         struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
790         ssize_t result;
791         size_t count = iov_iter_count(from);
792
793         _enter("{%llx:%llu},{%zu},",
794                vnode->fid.vid, vnode->fid.vnode, count);
795
796         if (IS_SWAPFILE(&vnode->vfs_inode)) {
797                 printk(KERN_INFO
798                        "AFS: Attempt to write to active swap file!\n");
799                 return -EBUSY;
800         }
801
802         if (!count)
803                 return 0;
804
805         result = generic_file_write_iter(iocb, from);
806
807         _leave(" = %zd", result);
808         return result;
809 }
810
811 /*
812  * flush any dirty pages for this process, and check for write errors.
813  * - the return status from this call provides a reliable indication of
814  *   whether any write errors occurred for this process.
815  */
816 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
817 {
818         struct inode *inode = file_inode(file);
819         struct afs_vnode *vnode = AFS_FS_I(inode);
820
821         _enter("{%llx:%llu},{n=%pD},%d",
822                vnode->fid.vid, vnode->fid.vnode, file,
823                datasync);
824
825         return file_write_and_wait_range(file, start, end);
826 }
827
828 /*
829  * notification that a previously read-only page is about to become writable
830  * - if it returns an error, the caller will deliver a bus error signal
831  */
832 vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
833 {
834         struct file *file = vmf->vma->vm_file;
835         struct inode *inode = file_inode(file);
836         struct afs_vnode *vnode = AFS_FS_I(inode);
837         unsigned long priv;
838
839         _enter("{{%llx:%llu}},{%lx}",
840                vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
841
842         sb_start_pagefault(inode->i_sb);
843
844         /* Wait for the page to be written to the cache before we allow it to
845          * be modified.  We then assume the entire page will need writing back.
846          */
847 #ifdef CONFIG_AFS_FSCACHE
848         fscache_wait_on_page_write(vnode->cache, vmf->page);
849 #endif
850
851         if (PageWriteback(vmf->page) &&
852             wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
853                 return VM_FAULT_RETRY;
854
855         if (lock_page_killable(vmf->page) < 0)
856                 return VM_FAULT_RETRY;
857
858         /* We mustn't change page->private until writeback is complete as that
859          * details the portion of the page we need to write back and we might
860          * need to redirty the page if there's a problem.
861          */
862         wait_on_page_writeback(vmf->page);
863
864         priv = afs_page_dirty(0, PAGE_SIZE);
865         priv = afs_page_dirty_mmapped(priv);
866         trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
867                              vmf->page->index, priv);
868         if (PagePrivate(vmf->page))
869                 set_page_private(vmf->page, priv);
870         else
871                 attach_page_private(vmf->page, (void *)priv);
872         file_update_time(file);
873
874         sb_end_pagefault(inode->i_sb);
875         return VM_FAULT_LOCKED;
876 }
877
878 /*
879  * Prune the keys cached for writeback.  The caller must hold vnode->wb_lock.
880  */
881 void afs_prune_wb_keys(struct afs_vnode *vnode)
882 {
883         LIST_HEAD(graveyard);
884         struct afs_wb_key *wbk, *tmp;
885
886         /* Discard unused keys */
887         spin_lock(&vnode->wb_lock);
888
889         if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
890             !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
891                 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
892                         if (refcount_read(&wbk->usage) == 1)
893                                 list_move(&wbk->vnode_link, &graveyard);
894                 }
895         }
896
897         spin_unlock(&vnode->wb_lock);
898
899         while (!list_empty(&graveyard)) {
900                 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
901                 list_del(&wbk->vnode_link);
902                 afs_put_wb_key(wbk);
903         }
904 }
905
906 /*
907  * Clean up a page during invalidation.
908  */
909 int afs_launder_page(struct page *page)
910 {
911         struct address_space *mapping = page->mapping;
912         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
913         unsigned long priv;
914         unsigned int f, t;
915         int ret = 0;
916
917         _enter("{%lx}", page->index);
918
919         priv = page_private(page);
920         if (clear_page_dirty_for_io(page)) {
921                 f = 0;
922                 t = PAGE_SIZE;
923                 if (PagePrivate(page)) {
924                         f = afs_page_dirty_from(priv);
925                         t = afs_page_dirty_to(priv);
926                 }
927
928                 trace_afs_page_dirty(vnode, tracepoint_string("launder"),
929                                      page->index, priv);
930                 ret = afs_store_data(mapping, page->index, page->index, t, f, true);
931         }
932
933         priv = (unsigned long)detach_page_private(page);
934         trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
935                              page->index, priv);
936
937 #ifdef CONFIG_AFS_FSCACHE
938         if (PageFsCache(page)) {
939                 fscache_wait_on_page_write(vnode->cache, page);
940                 fscache_uncache_page(vnode->cache, page);
941         }
942 #endif
943         return ret;
944 }