Merge tag 'mtd/for-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux
[platform/kernel/linux-starfive.git] / fs / gfs2 / aops.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
5  */
6
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/pagemap.h>
13 #include <linux/pagevec.h>
14 #include <linux/mpage.h>
15 #include <linux/fs.h>
16 #include <linux/writeback.h>
17 #include <linux/swap.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/backing-dev.h>
20 #include <linux/uio.h>
21 #include <trace/events/writeback.h>
22 #include <linux/sched/signal.h>
23
24 #include "gfs2.h"
25 #include "incore.h"
26 #include "bmap.h"
27 #include "glock.h"
28 #include "inode.h"
29 #include "log.h"
30 #include "meta_io.h"
31 #include "quota.h"
32 #include "trans.h"
33 #include "rgrp.h"
34 #include "super.h"
35 #include "util.h"
36 #include "glops.h"
37 #include "aops.h"
38
39
40 void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
41                             unsigned int from, unsigned int len)
42 {
43         struct buffer_head *head = page_buffers(page);
44         unsigned int bsize = head->b_size;
45         struct buffer_head *bh;
46         unsigned int to = from + len;
47         unsigned int start, end;
48
49         for (bh = head, start = 0; bh != head || !start;
50              bh = bh->b_this_page, start = end) {
51                 end = start + bsize;
52                 if (end <= from)
53                         continue;
54                 if (start >= to)
55                         break;
56                 set_buffer_uptodate(bh);
57                 gfs2_trans_add_data(ip->i_gl, bh);
58         }
59 }
60
61 /**
62  * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
63  * @inode: The inode
64  * @lblock: The block number to look up
65  * @bh_result: The buffer head to return the result in
66  * @create: Non-zero if we may add block to the file
67  *
68  * Returns: errno
69  */
70
71 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72                                   struct buffer_head *bh_result, int create)
73 {
74         int error;
75
76         error = gfs2_block_map(inode, lblock, bh_result, 0);
77         if (error)
78                 return error;
79         if (!buffer_mapped(bh_result))
80                 return -ENODATA;
81         return 0;
82 }
83
84 /**
85  * gfs2_writepage - Write page for writeback mappings
86  * @page: The page
87  * @wbc: The writeback control
88  */
89 static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
90 {
91         struct inode *inode = page->mapping->host;
92         struct gfs2_inode *ip = GFS2_I(inode);
93         struct gfs2_sbd *sdp = GFS2_SB(inode);
94         struct iomap_writepage_ctx wpc = { };
95
96         if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
97                 goto out;
98         if (current->journal_info)
99                 goto redirty;
100         return iomap_writepage(page, wbc, &wpc, &gfs2_writeback_ops);
101
102 redirty:
103         redirty_page_for_writepage(wbc, page);
104 out:
105         unlock_page(page);
106         return 0;
107 }
108
109 /**
110  * gfs2_write_jdata_page - gfs2 jdata-specific version of block_write_full_page
111  * @page: The page to write
112  * @wbc: The writeback control
113  *
114  * This is the same as calling block_write_full_page, but it also
115  * writes pages outside of i_size
116  */
117 static int gfs2_write_jdata_page(struct page *page,
118                                  struct writeback_control *wbc)
119 {
120         struct inode * const inode = page->mapping->host;
121         loff_t i_size = i_size_read(inode);
122         const pgoff_t end_index = i_size >> PAGE_SHIFT;
123         unsigned offset;
124
125         /*
126          * The page straddles i_size.  It must be zeroed out on each and every
127          * writepage invocation because it may be mmapped.  "A file is mapped
128          * in multiples of the page size.  For a file that is not a multiple of
129          * the  page size, the remaining memory is zeroed when mapped, and
130          * writes to that region are not written out to the file."
131          */
132         offset = i_size & (PAGE_SIZE - 1);
133         if (page->index == end_index && offset)
134                 zero_user_segment(page, offset, PAGE_SIZE);
135
136         return __block_write_full_page(inode, page, gfs2_get_block_noalloc, wbc,
137                                        end_buffer_async_write);
138 }
139
140 /**
141  * __gfs2_jdata_writepage - The core of jdata writepage
142  * @page: The page to write
143  * @wbc: The writeback control
144  *
145  * This is shared between writepage and writepages and implements the
146  * core of the writepage operation. If a transaction is required then
147  * PageChecked will have been set and the transaction will have
148  * already been started before this is called.
149  */
150
151 static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
152 {
153         struct inode *inode = page->mapping->host;
154         struct gfs2_inode *ip = GFS2_I(inode);
155         struct gfs2_sbd *sdp = GFS2_SB(inode);
156
157         if (PageChecked(page)) {
158                 ClearPageChecked(page);
159                 if (!page_has_buffers(page)) {
160                         create_empty_buffers(page, inode->i_sb->s_blocksize,
161                                              BIT(BH_Dirty)|BIT(BH_Uptodate));
162                 }
163                 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
164         }
165         return gfs2_write_jdata_page(page, wbc);
166 }
167
168 /**
169  * gfs2_jdata_writepage - Write complete page
170  * @page: Page to write
171  * @wbc: The writeback control
172  *
173  * Returns: errno
174  *
175  */
176
177 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
178 {
179         struct inode *inode = page->mapping->host;
180         struct gfs2_inode *ip = GFS2_I(inode);
181         struct gfs2_sbd *sdp = GFS2_SB(inode);
182
183         if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
184                 goto out;
185         if (PageChecked(page) || current->journal_info)
186                 goto out_ignore;
187         return __gfs2_jdata_writepage(page, wbc);
188
189 out_ignore:
190         redirty_page_for_writepage(wbc, page);
191 out:
192         unlock_page(page);
193         return 0;
194 }
195
196 /**
197  * gfs2_writepages - Write a bunch of dirty pages back to disk
198  * @mapping: The mapping to write
199  * @wbc: Write-back control
200  *
201  * Used for both ordered and writeback modes.
202  */
203 static int gfs2_writepages(struct address_space *mapping,
204                            struct writeback_control *wbc)
205 {
206         struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
207         struct iomap_writepage_ctx wpc = { };
208         int ret;
209
210         /*
211          * Even if we didn't write any pages here, we might still be holding
212          * dirty pages in the ail. We forcibly flush the ail because we don't
213          * want balance_dirty_pages() to loop indefinitely trying to write out
214          * pages held in the ail that it can't find.
215          */
216         ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
217         if (ret == 0)
218                 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
219         return ret;
220 }
221
222 /**
223  * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
224  * @mapping: The mapping
225  * @wbc: The writeback control
226  * @pvec: The vector of pages
227  * @nr_pages: The number of pages to write
228  * @done_index: Page index
229  *
230  * Returns: non-zero if loop should terminate, zero otherwise
231  */
232
233 static int gfs2_write_jdata_pagevec(struct address_space *mapping,
234                                     struct writeback_control *wbc,
235                                     struct pagevec *pvec,
236                                     int nr_pages,
237                                     pgoff_t *done_index)
238 {
239         struct inode *inode = mapping->host;
240         struct gfs2_sbd *sdp = GFS2_SB(inode);
241         unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
242         int i;
243         int ret;
244
245         ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
246         if (ret < 0)
247                 return ret;
248
249         for(i = 0; i < nr_pages; i++) {
250                 struct page *page = pvec->pages[i];
251
252                 *done_index = page->index;
253
254                 lock_page(page);
255
256                 if (unlikely(page->mapping != mapping)) {
257 continue_unlock:
258                         unlock_page(page);
259                         continue;
260                 }
261
262                 if (!PageDirty(page)) {
263                         /* someone wrote it for us */
264                         goto continue_unlock;
265                 }
266
267                 if (PageWriteback(page)) {
268                         if (wbc->sync_mode != WB_SYNC_NONE)
269                                 wait_on_page_writeback(page);
270                         else
271                                 goto continue_unlock;
272                 }
273
274                 BUG_ON(PageWriteback(page));
275                 if (!clear_page_dirty_for_io(page))
276                         goto continue_unlock;
277
278                 trace_wbc_writepage(wbc, inode_to_bdi(inode));
279
280                 ret = __gfs2_jdata_writepage(page, wbc);
281                 if (unlikely(ret)) {
282                         if (ret == AOP_WRITEPAGE_ACTIVATE) {
283                                 unlock_page(page);
284                                 ret = 0;
285                         } else {
286
287                                 /*
288                                  * done_index is set past this page,
289                                  * so media errors will not choke
290                                  * background writeout for the entire
291                                  * file. This has consequences for
292                                  * range_cyclic semantics (ie. it may
293                                  * not be suitable for data integrity
294                                  * writeout).
295                                  */
296                                 *done_index = page->index + 1;
297                                 ret = 1;
298                                 break;
299                         }
300                 }
301
302                 /*
303                  * We stop writing back only if we are not doing
304                  * integrity sync. In case of integrity sync we have to
305                  * keep going until we have written all the pages
306                  * we tagged for writeback prior to entering this loop.
307                  */
308                 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
309                         ret = 1;
310                         break;
311                 }
312
313         }
314         gfs2_trans_end(sdp);
315         return ret;
316 }
317
318 /**
319  * gfs2_write_cache_jdata - Like write_cache_pages but different
320  * @mapping: The mapping to write
321  * @wbc: The writeback control
322  *
323  * The reason that we use our own function here is that we need to
324  * start transactions before we grab page locks. This allows us
325  * to get the ordering right.
326  */
327
328 static int gfs2_write_cache_jdata(struct address_space *mapping,
329                                   struct writeback_control *wbc)
330 {
331         int ret = 0;
332         int done = 0;
333         struct pagevec pvec;
334         int nr_pages;
335         pgoff_t writeback_index;
336         pgoff_t index;
337         pgoff_t end;
338         pgoff_t done_index;
339         int cycled;
340         int range_whole = 0;
341         xa_mark_t tag;
342
343         pagevec_init(&pvec);
344         if (wbc->range_cyclic) {
345                 writeback_index = mapping->writeback_index; /* prev offset */
346                 index = writeback_index;
347                 if (index == 0)
348                         cycled = 1;
349                 else
350                         cycled = 0;
351                 end = -1;
352         } else {
353                 index = wbc->range_start >> PAGE_SHIFT;
354                 end = wbc->range_end >> PAGE_SHIFT;
355                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
356                         range_whole = 1;
357                 cycled = 1; /* ignore range_cyclic tests */
358         }
359         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
360                 tag = PAGECACHE_TAG_TOWRITE;
361         else
362                 tag = PAGECACHE_TAG_DIRTY;
363
364 retry:
365         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
366                 tag_pages_for_writeback(mapping, index, end);
367         done_index = index;
368         while (!done && (index <= end)) {
369                 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
370                                 tag);
371                 if (nr_pages == 0)
372                         break;
373
374                 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
375                 if (ret)
376                         done = 1;
377                 if (ret > 0)
378                         ret = 0;
379                 pagevec_release(&pvec);
380                 cond_resched();
381         }
382
383         if (!cycled && !done) {
384                 /*
385                  * range_cyclic:
386                  * We hit the last page and there is more work to be done: wrap
387                  * back to the start of the file
388                  */
389                 cycled = 1;
390                 index = 0;
391                 end = writeback_index - 1;
392                 goto retry;
393         }
394
395         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
396                 mapping->writeback_index = done_index;
397
398         return ret;
399 }
400
401
402 /**
403  * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
404  * @mapping: The mapping to write
405  * @wbc: The writeback control
406  * 
407  */
408
409 static int gfs2_jdata_writepages(struct address_space *mapping,
410                                  struct writeback_control *wbc)
411 {
412         struct gfs2_inode *ip = GFS2_I(mapping->host);
413         struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
414         int ret;
415
416         ret = gfs2_write_cache_jdata(mapping, wbc);
417         if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
418                 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
419                                GFS2_LFC_JDATA_WPAGES);
420                 ret = gfs2_write_cache_jdata(mapping, wbc);
421         }
422         return ret;
423 }
424
425 /**
426  * stuffed_readpage - Fill in a Linux page with stuffed file data
427  * @ip: the inode
428  * @page: the page
429  *
430  * Returns: errno
431  */
432 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
433 {
434         struct buffer_head *dibh;
435         u64 dsize = i_size_read(&ip->i_inode);
436         void *kaddr;
437         int error;
438
439         /*
440          * Due to the order of unstuffing files and ->fault(), we can be
441          * asked for a zero page in the case of a stuffed file being extended,
442          * so we need to supply one here. It doesn't happen often.
443          */
444         if (unlikely(page->index)) {
445                 zero_user(page, 0, PAGE_SIZE);
446                 SetPageUptodate(page);
447                 return 0;
448         }
449
450         error = gfs2_meta_inode_buffer(ip, &dibh);
451         if (error)
452                 return error;
453
454         kaddr = kmap_atomic(page);
455         if (dsize > gfs2_max_stuffed_size(ip))
456                 dsize = gfs2_max_stuffed_size(ip);
457         memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
458         memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
459         kunmap_atomic(kaddr);
460         flush_dcache_page(page);
461         brelse(dibh);
462         SetPageUptodate(page);
463
464         return 0;
465 }
466
467
468 static int __gfs2_readpage(void *file, struct page *page)
469 {
470         struct inode *inode = page->mapping->host;
471         struct gfs2_inode *ip = GFS2_I(inode);
472         struct gfs2_sbd *sdp = GFS2_SB(inode);
473         int error;
474
475         if (!gfs2_is_jdata(ip) ||
476             (i_blocksize(inode) == PAGE_SIZE && !page_has_buffers(page))) {
477                 error = iomap_readpage(page, &gfs2_iomap_ops);
478         } else if (gfs2_is_stuffed(ip)) {
479                 error = stuffed_readpage(ip, page);
480                 unlock_page(page);
481         } else {
482                 error = mpage_readpage(page, gfs2_block_map);
483         }
484
485         if (unlikely(gfs2_withdrawn(sdp)))
486                 return -EIO;
487
488         return error;
489 }
490
491 /**
492  * gfs2_readpage - read a page of a file
493  * @file: The file to read
494  * @page: The page of the file
495  */
496
497 static int gfs2_readpage(struct file *file, struct page *page)
498 {
499         return __gfs2_readpage(file, page);
500 }
501
502 /**
503  * gfs2_internal_read - read an internal file
504  * @ip: The gfs2 inode
505  * @buf: The buffer to fill
506  * @pos: The file position
507  * @size: The amount to read
508  *
509  */
510
511 int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
512                        unsigned size)
513 {
514         struct address_space *mapping = ip->i_inode.i_mapping;
515         unsigned long index = *pos >> PAGE_SHIFT;
516         unsigned offset = *pos & (PAGE_SIZE - 1);
517         unsigned copied = 0;
518         unsigned amt;
519         struct page *page;
520         void *p;
521
522         do {
523                 amt = size - copied;
524                 if (offset + size > PAGE_SIZE)
525                         amt = PAGE_SIZE - offset;
526                 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
527                 if (IS_ERR(page))
528                         return PTR_ERR(page);
529                 p = kmap_atomic(page);
530                 memcpy(buf + copied, p + offset, amt);
531                 kunmap_atomic(p);
532                 put_page(page);
533                 copied += amt;
534                 index++;
535                 offset = 0;
536         } while(copied < size);
537         (*pos) += size;
538         return size;
539 }
540
541 /**
542  * gfs2_readahead - Read a bunch of pages at once
543  * @rac: Read-ahead control structure
544  *
545  * Some notes:
546  * 1. This is only for readahead, so we can simply ignore any things
547  *    which are slightly inconvenient (such as locking conflicts between
548  *    the page lock and the glock) and return having done no I/O. Its
549  *    obviously not something we'd want to do on too regular a basis.
550  *    Any I/O we ignore at this time will be done via readpage later.
551  * 2. We don't handle stuffed files here we let readpage do the honours.
552  * 3. mpage_readahead() does most of the heavy lifting in the common case.
553  * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
554  */
555
556 static void gfs2_readahead(struct readahead_control *rac)
557 {
558         struct inode *inode = rac->mapping->host;
559         struct gfs2_inode *ip = GFS2_I(inode);
560
561         if (gfs2_is_stuffed(ip))
562                 ;
563         else if (gfs2_is_jdata(ip))
564                 mpage_readahead(rac, gfs2_block_map);
565         else
566                 iomap_readahead(rac, &gfs2_iomap_ops);
567 }
568
569 /**
570  * adjust_fs_space - Adjusts the free space available due to gfs2_grow
571  * @inode: the rindex inode
572  */
573 void adjust_fs_space(struct inode *inode)
574 {
575         struct gfs2_sbd *sdp = GFS2_SB(inode);
576         struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
577         struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
578         struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
579         struct buffer_head *m_bh;
580         u64 fs_total, new_free;
581
582         if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
583                 return;
584
585         /* Total up the file system space, according to the latest rindex. */
586         fs_total = gfs2_ri_total(sdp);
587         if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
588                 goto out;
589
590         spin_lock(&sdp->sd_statfs_spin);
591         gfs2_statfs_change_in(m_sc, m_bh->b_data +
592                               sizeof(struct gfs2_dinode));
593         if (fs_total > (m_sc->sc_total + l_sc->sc_total))
594                 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
595         else
596                 new_free = 0;
597         spin_unlock(&sdp->sd_statfs_spin);
598         fs_warn(sdp, "File system extended by %llu blocks.\n",
599                 (unsigned long long)new_free);
600         gfs2_statfs_change(sdp, new_free, new_free, 0);
601
602         update_statfs(sdp, m_bh);
603         brelse(m_bh);
604 out:
605         sdp->sd_rindex_uptodate = 0;
606         gfs2_trans_end(sdp);
607 }
608
609 /**
610  * jdata_set_page_dirty - Page dirtying function
611  * @page: The page to dirty
612  *
613  * Returns: 1 if it dirtyed the page, or 0 otherwise
614  */
615  
616 static int jdata_set_page_dirty(struct page *page)
617 {
618         if (current->journal_info)
619                 SetPageChecked(page);
620         return __set_page_dirty_buffers(page);
621 }
622
623 /**
624  * gfs2_bmap - Block map function
625  * @mapping: Address space info
626  * @lblock: The block to map
627  *
628  * Returns: The disk address for the block or 0 on hole or error
629  */
630
631 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
632 {
633         struct gfs2_inode *ip = GFS2_I(mapping->host);
634         struct gfs2_holder i_gh;
635         sector_t dblock = 0;
636         int error;
637
638         error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
639         if (error)
640                 return 0;
641
642         if (!gfs2_is_stuffed(ip))
643                 dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
644
645         gfs2_glock_dq_uninit(&i_gh);
646
647         return dblock;
648 }
649
650 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
651 {
652         struct gfs2_bufdata *bd;
653
654         lock_buffer(bh);
655         gfs2_log_lock(sdp);
656         clear_buffer_dirty(bh);
657         bd = bh->b_private;
658         if (bd) {
659                 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
660                         list_del_init(&bd->bd_list);
661                 else {
662                         spin_lock(&sdp->sd_ail_lock);
663                         gfs2_remove_from_journal(bh, REMOVE_JDATA);
664                         spin_unlock(&sdp->sd_ail_lock);
665                 }
666         }
667         bh->b_bdev = NULL;
668         clear_buffer_mapped(bh);
669         clear_buffer_req(bh);
670         clear_buffer_new(bh);
671         gfs2_log_unlock(sdp);
672         unlock_buffer(bh);
673 }
674
675 static void gfs2_invalidatepage(struct page *page, unsigned int offset,
676                                 unsigned int length)
677 {
678         struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
679         unsigned int stop = offset + length;
680         int partial_page = (offset || length < PAGE_SIZE);
681         struct buffer_head *bh, *head;
682         unsigned long pos = 0;
683
684         BUG_ON(!PageLocked(page));
685         if (!partial_page)
686                 ClearPageChecked(page);
687         if (!page_has_buffers(page))
688                 goto out;
689
690         bh = head = page_buffers(page);
691         do {
692                 if (pos + bh->b_size > stop)
693                         return;
694
695                 if (offset <= pos)
696                         gfs2_discard(sdp, bh);
697                 pos += bh->b_size;
698                 bh = bh->b_this_page;
699         } while (bh != head);
700 out:
701         if (!partial_page)
702                 try_to_release_page(page, 0);
703 }
704
705 /**
706  * gfs2_releasepage - free the metadata associated with a page
707  * @page: the page that's being released
708  * @gfp_mask: passed from Linux VFS, ignored by us
709  *
710  * Calls try_to_free_buffers() to free the buffers and put the page if the
711  * buffers can be released.
712  *
713  * Returns: 1 if the page was put or else 0
714  */
715
716 int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
717 {
718         struct address_space *mapping = page->mapping;
719         struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
720         struct buffer_head *bh, *head;
721         struct gfs2_bufdata *bd;
722
723         if (!page_has_buffers(page))
724                 return 0;
725
726         /*
727          * From xfs_vm_releasepage: mm accommodates an old ext3 case where
728          * clean pages might not have had the dirty bit cleared.  Thus, it can
729          * send actual dirty pages to ->releasepage() via shrink_active_list().
730          *
731          * As a workaround, we skip pages that contain dirty buffers below.
732          * Once ->releasepage isn't called on dirty pages anymore, we can warn
733          * on dirty buffers like we used to here again.
734          */
735
736         gfs2_log_lock(sdp);
737         head = bh = page_buffers(page);
738         do {
739                 if (atomic_read(&bh->b_count))
740                         goto cannot_release;
741                 bd = bh->b_private;
742                 if (bd && bd->bd_tr)
743                         goto cannot_release;
744                 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
745                         goto cannot_release;
746                 bh = bh->b_this_page;
747         } while(bh != head);
748
749         head = bh = page_buffers(page);
750         do {
751                 bd = bh->b_private;
752                 if (bd) {
753                         gfs2_assert_warn(sdp, bd->bd_bh == bh);
754                         bd->bd_bh = NULL;
755                         bh->b_private = NULL;
756                         /*
757                          * The bd may still be queued as a revoke, in which
758                          * case we must not dequeue nor free it.
759                          */
760                         if (!bd->bd_blkno && !list_empty(&bd->bd_list))
761                                 list_del_init(&bd->bd_list);
762                         if (list_empty(&bd->bd_list))
763                                 kmem_cache_free(gfs2_bufdata_cachep, bd);
764                 }
765
766                 bh = bh->b_this_page;
767         } while (bh != head);
768         gfs2_log_unlock(sdp);
769
770         return try_to_free_buffers(page);
771
772 cannot_release:
773         gfs2_log_unlock(sdp);
774         return 0;
775 }
776
777 static const struct address_space_operations gfs2_aops = {
778         .writepage = gfs2_writepage,
779         .writepages = gfs2_writepages,
780         .readpage = gfs2_readpage,
781         .readahead = gfs2_readahead,
782         .set_page_dirty = __set_page_dirty_nobuffers,
783         .releasepage = iomap_releasepage,
784         .invalidatepage = iomap_invalidatepage,
785         .bmap = gfs2_bmap,
786         .direct_IO = noop_direct_IO,
787         .migratepage = iomap_migrate_page,
788         .is_partially_uptodate = iomap_is_partially_uptodate,
789         .error_remove_page = generic_error_remove_page,
790 };
791
792 static const struct address_space_operations gfs2_jdata_aops = {
793         .writepage = gfs2_jdata_writepage,
794         .writepages = gfs2_jdata_writepages,
795         .readpage = gfs2_readpage,
796         .readahead = gfs2_readahead,
797         .set_page_dirty = jdata_set_page_dirty,
798         .bmap = gfs2_bmap,
799         .invalidatepage = gfs2_invalidatepage,
800         .releasepage = gfs2_releasepage,
801         .is_partially_uptodate = block_is_partially_uptodate,
802         .error_remove_page = generic_error_remove_page,
803 };
804
805 void gfs2_set_aops(struct inode *inode)
806 {
807         if (gfs2_is_jdata(GFS2_I(inode)))
808                 inode->i_mapping->a_ops = &gfs2_jdata_aops;
809         else
810                 inode->i_mapping->a_ops = &gfs2_aops;
811 }