Merge tag 'microblaze-v6.6' of git://git.monstr.eu/linux-2.6-microblaze
[platform/kernel/linux-rpi.git] / fs / gfs2 / aops.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
5  */
6
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/pagemap.h>
13 #include <linux/pagevec.h>
14 #include <linux/mpage.h>
15 #include <linux/fs.h>
16 #include <linux/writeback.h>
17 #include <linux/swap.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/backing-dev.h>
20 #include <linux/uio.h>
21 #include <trace/events/writeback.h>
22 #include <linux/sched/signal.h>
23
24 #include "gfs2.h"
25 #include "incore.h"
26 #include "bmap.h"
27 #include "glock.h"
28 #include "inode.h"
29 #include "log.h"
30 #include "meta_io.h"
31 #include "quota.h"
32 #include "trans.h"
33 #include "rgrp.h"
34 #include "super.h"
35 #include "util.h"
36 #include "glops.h"
37 #include "aops.h"
38
39
40 void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio,
41                              size_t from, size_t len)
42 {
43         struct buffer_head *head = folio_buffers(folio);
44         unsigned int bsize = head->b_size;
45         struct buffer_head *bh;
46         size_t to = from + len;
47         size_t start, end;
48
49         for (bh = head, start = 0; bh != head || !start;
50              bh = bh->b_this_page, start = end) {
51                 end = start + bsize;
52                 if (end <= from)
53                         continue;
54                 if (start >= to)
55                         break;
56                 set_buffer_uptodate(bh);
57                 gfs2_trans_add_data(ip->i_gl, bh);
58         }
59 }
60
61 /**
62  * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
63  * @inode: The inode
64  * @lblock: The block number to look up
65  * @bh_result: The buffer head to return the result in
66  * @create: Non-zero if we may add block to the file
67  *
68  * Returns: errno
69  */
70
71 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72                                   struct buffer_head *bh_result, int create)
73 {
74         int error;
75
76         error = gfs2_block_map(inode, lblock, bh_result, 0);
77         if (error)
78                 return error;
79         if (!buffer_mapped(bh_result))
80                 return -ENODATA;
81         return 0;
82 }
83
84 /**
85  * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_page
86  * @folio: The folio to write
87  * @wbc: The writeback control
88  *
89  * This is the same as calling block_write_full_page, but it also
90  * writes pages outside of i_size
91  */
92 static int gfs2_write_jdata_folio(struct folio *folio,
93                                  struct writeback_control *wbc)
94 {
95         struct inode * const inode = folio->mapping->host;
96         loff_t i_size = i_size_read(inode);
97
98         /*
99          * The folio straddles i_size.  It must be zeroed out on each and every
100          * writepage invocation because it may be mmapped.  "A file is mapped
101          * in multiples of the page size.  For a file that is not a multiple of
102          * the page size, the remaining memory is zeroed when mapped, and
103          * writes to that region are not written out to the file."
104          */
105         if (folio_pos(folio) < i_size &&
106             i_size < folio_pos(folio) + folio_size(folio))
107                 folio_zero_segment(folio, offset_in_folio(folio, i_size),
108                                 folio_size(folio));
109
110         return __block_write_full_folio(inode, folio, gfs2_get_block_noalloc,
111                         wbc, end_buffer_async_write);
112 }
113
114 /**
115  * __gfs2_jdata_write_folio - The core of jdata writepage
116  * @folio: The folio to write
117  * @wbc: The writeback control
118  *
119  * This is shared between writepage and writepages and implements the
120  * core of the writepage operation. If a transaction is required then
121  * the checked flag will have been set and the transaction will have
122  * already been started before this is called.
123  */
124 static int __gfs2_jdata_write_folio(struct folio *folio,
125                 struct writeback_control *wbc)
126 {
127         struct inode *inode = folio->mapping->host;
128         struct gfs2_inode *ip = GFS2_I(inode);
129
130         if (folio_test_checked(folio)) {
131                 folio_clear_checked(folio);
132                 if (!folio_buffers(folio)) {
133                         folio_create_empty_buffers(folio,
134                                         inode->i_sb->s_blocksize,
135                                         BIT(BH_Dirty)|BIT(BH_Uptodate));
136                 }
137                 gfs2_trans_add_databufs(ip, folio, 0, folio_size(folio));
138         }
139         return gfs2_write_jdata_folio(folio, wbc);
140 }
141
142 /**
143  * gfs2_jdata_writepage - Write complete page
144  * @page: Page to write
145  * @wbc: The writeback control
146  *
147  * Returns: errno
148  *
149  */
150
151 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
152 {
153         struct folio *folio = page_folio(page);
154         struct inode *inode = page->mapping->host;
155         struct gfs2_inode *ip = GFS2_I(inode);
156         struct gfs2_sbd *sdp = GFS2_SB(inode);
157
158         if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
159                 goto out;
160         if (folio_test_checked(folio) || current->journal_info)
161                 goto out_ignore;
162         return __gfs2_jdata_write_folio(folio, wbc);
163
164 out_ignore:
165         folio_redirty_for_writepage(wbc, folio);
166 out:
167         folio_unlock(folio);
168         return 0;
169 }
170
171 /**
172  * gfs2_writepages - Write a bunch of dirty pages back to disk
173  * @mapping: The mapping to write
174  * @wbc: Write-back control
175  *
176  * Used for both ordered and writeback modes.
177  */
178 static int gfs2_writepages(struct address_space *mapping,
179                            struct writeback_control *wbc)
180 {
181         struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
182         struct iomap_writepage_ctx wpc = { };
183         int ret;
184
185         /*
186          * Even if we didn't write any pages here, we might still be holding
187          * dirty pages in the ail. We forcibly flush the ail because we don't
188          * want balance_dirty_pages() to loop indefinitely trying to write out
189          * pages held in the ail that it can't find.
190          */
191         ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
192         if (ret == 0)
193                 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
194         return ret;
195 }
196
197 /**
198  * gfs2_write_jdata_batch - Write back a folio batch's worth of folios
199  * @mapping: The mapping
200  * @wbc: The writeback control
201  * @fbatch: The batch of folios
202  * @done_index: Page index
203  *
204  * Returns: non-zero if loop should terminate, zero otherwise
205  */
206
207 static int gfs2_write_jdata_batch(struct address_space *mapping,
208                                     struct writeback_control *wbc,
209                                     struct folio_batch *fbatch,
210                                     pgoff_t *done_index)
211 {
212         struct inode *inode = mapping->host;
213         struct gfs2_sbd *sdp = GFS2_SB(inode);
214         unsigned nrblocks;
215         int i;
216         int ret;
217         int nr_pages = 0;
218         int nr_folios = folio_batch_count(fbatch);
219
220         for (i = 0; i < nr_folios; i++)
221                 nr_pages += folio_nr_pages(fbatch->folios[i]);
222         nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
223
224         ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
225         if (ret < 0)
226                 return ret;
227
228         for (i = 0; i < nr_folios; i++) {
229                 struct folio *folio = fbatch->folios[i];
230
231                 *done_index = folio->index;
232
233                 folio_lock(folio);
234
235                 if (unlikely(folio->mapping != mapping)) {
236 continue_unlock:
237                         folio_unlock(folio);
238                         continue;
239                 }
240
241                 if (!folio_test_dirty(folio)) {
242                         /* someone wrote it for us */
243                         goto continue_unlock;
244                 }
245
246                 if (folio_test_writeback(folio)) {
247                         if (wbc->sync_mode != WB_SYNC_NONE)
248                                 folio_wait_writeback(folio);
249                         else
250                                 goto continue_unlock;
251                 }
252
253                 BUG_ON(folio_test_writeback(folio));
254                 if (!folio_clear_dirty_for_io(folio))
255                         goto continue_unlock;
256
257                 trace_wbc_writepage(wbc, inode_to_bdi(inode));
258
259                 ret = __gfs2_jdata_write_folio(folio, wbc);
260                 if (unlikely(ret)) {
261                         if (ret == AOP_WRITEPAGE_ACTIVATE) {
262                                 folio_unlock(folio);
263                                 ret = 0;
264                         } else {
265
266                                 /*
267                                  * done_index is set past this page,
268                                  * so media errors will not choke
269                                  * background writeout for the entire
270                                  * file. This has consequences for
271                                  * range_cyclic semantics (ie. it may
272                                  * not be suitable for data integrity
273                                  * writeout).
274                                  */
275                                 *done_index = folio->index +
276                                         folio_nr_pages(folio);
277                                 ret = 1;
278                                 break;
279                         }
280                 }
281
282                 /*
283                  * We stop writing back only if we are not doing
284                  * integrity sync. In case of integrity sync we have to
285                  * keep going until we have written all the pages
286                  * we tagged for writeback prior to entering this loop.
287                  */
288                 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
289                         ret = 1;
290                         break;
291                 }
292
293         }
294         gfs2_trans_end(sdp);
295         return ret;
296 }
297
298 /**
299  * gfs2_write_cache_jdata - Like write_cache_pages but different
300  * @mapping: The mapping to write
301  * @wbc: The writeback control
302  *
303  * The reason that we use our own function here is that we need to
304  * start transactions before we grab page locks. This allows us
305  * to get the ordering right.
306  */
307
308 static int gfs2_write_cache_jdata(struct address_space *mapping,
309                                   struct writeback_control *wbc)
310 {
311         int ret = 0;
312         int done = 0;
313         struct folio_batch fbatch;
314         int nr_folios;
315         pgoff_t writeback_index;
316         pgoff_t index;
317         pgoff_t end;
318         pgoff_t done_index;
319         int cycled;
320         int range_whole = 0;
321         xa_mark_t tag;
322
323         folio_batch_init(&fbatch);
324         if (wbc->range_cyclic) {
325                 writeback_index = mapping->writeback_index; /* prev offset */
326                 index = writeback_index;
327                 if (index == 0)
328                         cycled = 1;
329                 else
330                         cycled = 0;
331                 end = -1;
332         } else {
333                 index = wbc->range_start >> PAGE_SHIFT;
334                 end = wbc->range_end >> PAGE_SHIFT;
335                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
336                         range_whole = 1;
337                 cycled = 1; /* ignore range_cyclic tests */
338         }
339         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
340                 tag = PAGECACHE_TAG_TOWRITE;
341         else
342                 tag = PAGECACHE_TAG_DIRTY;
343
344 retry:
345         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
346                 tag_pages_for_writeback(mapping, index, end);
347         done_index = index;
348         while (!done && (index <= end)) {
349                 nr_folios = filemap_get_folios_tag(mapping, &index, end,
350                                 tag, &fbatch);
351                 if (nr_folios == 0)
352                         break;
353
354                 ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch,
355                                 &done_index);
356                 if (ret)
357                         done = 1;
358                 if (ret > 0)
359                         ret = 0;
360                 folio_batch_release(&fbatch);
361                 cond_resched();
362         }
363
364         if (!cycled && !done) {
365                 /*
366                  * range_cyclic:
367                  * We hit the last page and there is more work to be done: wrap
368                  * back to the start of the file
369                  */
370                 cycled = 1;
371                 index = 0;
372                 end = writeback_index - 1;
373                 goto retry;
374         }
375
376         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
377                 mapping->writeback_index = done_index;
378
379         return ret;
380 }
381
382
383 /**
384  * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
385  * @mapping: The mapping to write
386  * @wbc: The writeback control
387  * 
388  */
389
390 static int gfs2_jdata_writepages(struct address_space *mapping,
391                                  struct writeback_control *wbc)
392 {
393         struct gfs2_inode *ip = GFS2_I(mapping->host);
394         struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
395         int ret;
396
397         ret = gfs2_write_cache_jdata(mapping, wbc);
398         if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
399                 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
400                                GFS2_LFC_JDATA_WPAGES);
401                 ret = gfs2_write_cache_jdata(mapping, wbc);
402         }
403         return ret;
404 }
405
406 /**
407  * stuffed_readpage - Fill in a Linux page with stuffed file data
408  * @ip: the inode
409  * @page: the page
410  *
411  * Returns: errno
412  */
413 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
414 {
415         struct buffer_head *dibh;
416         u64 dsize = i_size_read(&ip->i_inode);
417         void *kaddr;
418         int error;
419
420         /*
421          * Due to the order of unstuffing files and ->fault(), we can be
422          * asked for a zero page in the case of a stuffed file being extended,
423          * so we need to supply one here. It doesn't happen often.
424          */
425         if (unlikely(page->index)) {
426                 zero_user(page, 0, PAGE_SIZE);
427                 SetPageUptodate(page);
428                 return 0;
429         }
430
431         error = gfs2_meta_inode_buffer(ip, &dibh);
432         if (error)
433                 return error;
434
435         kaddr = kmap_local_page(page);
436         memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
437         memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
438         kunmap_local(kaddr);
439         flush_dcache_page(page);
440         brelse(dibh);
441         SetPageUptodate(page);
442
443         return 0;
444 }
445
446 /**
447  * gfs2_read_folio - read a folio from a file
448  * @file: The file to read
449  * @folio: The folio in the file
450  */
451 static int gfs2_read_folio(struct file *file, struct folio *folio)
452 {
453         struct inode *inode = folio->mapping->host;
454         struct gfs2_inode *ip = GFS2_I(inode);
455         struct gfs2_sbd *sdp = GFS2_SB(inode);
456         int error;
457
458         if (!gfs2_is_jdata(ip) ||
459             (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
460                 error = iomap_read_folio(folio, &gfs2_iomap_ops);
461         } else if (gfs2_is_stuffed(ip)) {
462                 error = stuffed_readpage(ip, &folio->page);
463                 folio_unlock(folio);
464         } else {
465                 error = mpage_read_folio(folio, gfs2_block_map);
466         }
467
468         if (unlikely(gfs2_withdrawn(sdp)))
469                 return -EIO;
470
471         return error;
472 }
473
474 /**
475  * gfs2_internal_read - read an internal file
476  * @ip: The gfs2 inode
477  * @buf: The buffer to fill
478  * @pos: The file position
479  * @size: The amount to read
480  *
481  */
482
483 int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
484                        unsigned size)
485 {
486         struct address_space *mapping = ip->i_inode.i_mapping;
487         unsigned long index = *pos >> PAGE_SHIFT;
488         unsigned offset = *pos & (PAGE_SIZE - 1);
489         unsigned copied = 0;
490         unsigned amt;
491         struct page *page;
492
493         do {
494                 page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
495                 if (IS_ERR(page)) {
496                         if (PTR_ERR(page) == -EINTR)
497                                 continue;
498                         return PTR_ERR(page);
499                 }
500                 amt = size - copied;
501                 if (offset + size > PAGE_SIZE)
502                         amt = PAGE_SIZE - offset;
503                 memcpy_from_page(buf + copied, page, offset, amt);
504                 put_page(page);
505                 copied += amt;
506                 index++;
507                 offset = 0;
508         } while(copied < size);
509         (*pos) += size;
510         return size;
511 }
512
513 /**
514  * gfs2_readahead - Read a bunch of pages at once
515  * @rac: Read-ahead control structure
516  *
517  * Some notes:
518  * 1. This is only for readahead, so we can simply ignore any things
519  *    which are slightly inconvenient (such as locking conflicts between
520  *    the page lock and the glock) and return having done no I/O. Its
521  *    obviously not something we'd want to do on too regular a basis.
522  *    Any I/O we ignore at this time will be done via readpage later.
523  * 2. We don't handle stuffed files here we let readpage do the honours.
524  * 3. mpage_readahead() does most of the heavy lifting in the common case.
525  * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
526  */
527
528 static void gfs2_readahead(struct readahead_control *rac)
529 {
530         struct inode *inode = rac->mapping->host;
531         struct gfs2_inode *ip = GFS2_I(inode);
532
533         if (gfs2_is_stuffed(ip))
534                 ;
535         else if (gfs2_is_jdata(ip))
536                 mpage_readahead(rac, gfs2_block_map);
537         else
538                 iomap_readahead(rac, &gfs2_iomap_ops);
539 }
540
541 /**
542  * adjust_fs_space - Adjusts the free space available due to gfs2_grow
543  * @inode: the rindex inode
544  */
545 void adjust_fs_space(struct inode *inode)
546 {
547         struct gfs2_sbd *sdp = GFS2_SB(inode);
548         struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
549         struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
550         struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
551         struct buffer_head *m_bh;
552         u64 fs_total, new_free;
553
554         if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
555                 return;
556
557         /* Total up the file system space, according to the latest rindex. */
558         fs_total = gfs2_ri_total(sdp);
559         if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
560                 goto out;
561
562         spin_lock(&sdp->sd_statfs_spin);
563         gfs2_statfs_change_in(m_sc, m_bh->b_data +
564                               sizeof(struct gfs2_dinode));
565         if (fs_total > (m_sc->sc_total + l_sc->sc_total))
566                 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
567         else
568                 new_free = 0;
569         spin_unlock(&sdp->sd_statfs_spin);
570         fs_warn(sdp, "File system extended by %llu blocks.\n",
571                 (unsigned long long)new_free);
572         gfs2_statfs_change(sdp, new_free, new_free, 0);
573
574         update_statfs(sdp, m_bh);
575         brelse(m_bh);
576 out:
577         sdp->sd_rindex_uptodate = 0;
578         gfs2_trans_end(sdp);
579 }
580
581 static bool jdata_dirty_folio(struct address_space *mapping,
582                 struct folio *folio)
583 {
584         if (current->journal_info)
585                 folio_set_checked(folio);
586         return block_dirty_folio(mapping, folio);
587 }
588
589 /**
590  * gfs2_bmap - Block map function
591  * @mapping: Address space info
592  * @lblock: The block to map
593  *
594  * Returns: The disk address for the block or 0 on hole or error
595  */
596
597 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
598 {
599         struct gfs2_inode *ip = GFS2_I(mapping->host);
600         struct gfs2_holder i_gh;
601         sector_t dblock = 0;
602         int error;
603
604         error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
605         if (error)
606                 return 0;
607
608         if (!gfs2_is_stuffed(ip))
609                 dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
610
611         gfs2_glock_dq_uninit(&i_gh);
612
613         return dblock;
614 }
615
616 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
617 {
618         struct gfs2_bufdata *bd;
619
620         lock_buffer(bh);
621         gfs2_log_lock(sdp);
622         clear_buffer_dirty(bh);
623         bd = bh->b_private;
624         if (bd) {
625                 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
626                         list_del_init(&bd->bd_list);
627                 else {
628                         spin_lock(&sdp->sd_ail_lock);
629                         gfs2_remove_from_journal(bh, REMOVE_JDATA);
630                         spin_unlock(&sdp->sd_ail_lock);
631                 }
632         }
633         bh->b_bdev = NULL;
634         clear_buffer_mapped(bh);
635         clear_buffer_req(bh);
636         clear_buffer_new(bh);
637         gfs2_log_unlock(sdp);
638         unlock_buffer(bh);
639 }
640
641 static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
642                                 size_t length)
643 {
644         struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
645         size_t stop = offset + length;
646         int partial_page = (offset || length < folio_size(folio));
647         struct buffer_head *bh, *head;
648         unsigned long pos = 0;
649
650         BUG_ON(!folio_test_locked(folio));
651         if (!partial_page)
652                 folio_clear_checked(folio);
653         head = folio_buffers(folio);
654         if (!head)
655                 goto out;
656
657         bh = head;
658         do {
659                 if (pos + bh->b_size > stop)
660                         return;
661
662                 if (offset <= pos)
663                         gfs2_discard(sdp, bh);
664                 pos += bh->b_size;
665                 bh = bh->b_this_page;
666         } while (bh != head);
667 out:
668         if (!partial_page)
669                 filemap_release_folio(folio, 0);
670 }
671
672 /**
673  * gfs2_release_folio - free the metadata associated with a folio
674  * @folio: the folio that's being released
675  * @gfp_mask: passed from Linux VFS, ignored by us
676  *
677  * Calls try_to_free_buffers() to free the buffers and put the folio if the
678  * buffers can be released.
679  *
680  * Returns: true if the folio was put or else false
681  */
682
683 bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
684 {
685         struct address_space *mapping = folio->mapping;
686         struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
687         struct buffer_head *bh, *head;
688         struct gfs2_bufdata *bd;
689
690         head = folio_buffers(folio);
691         if (!head)
692                 return false;
693
694         /*
695          * mm accommodates an old ext3 case where clean folios might
696          * not have had the dirty bit cleared.  Thus, it can send actual
697          * dirty folios to ->release_folio() via shrink_active_list().
698          *
699          * As a workaround, we skip folios that contain dirty buffers
700          * below.  Once ->release_folio isn't called on dirty folios
701          * anymore, we can warn on dirty buffers like we used to here
702          * again.
703          */
704
705         gfs2_log_lock(sdp);
706         bh = head;
707         do {
708                 if (atomic_read(&bh->b_count))
709                         goto cannot_release;
710                 bd = bh->b_private;
711                 if (bd && bd->bd_tr)
712                         goto cannot_release;
713                 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
714                         goto cannot_release;
715                 bh = bh->b_this_page;
716         } while (bh != head);
717
718         bh = head;
719         do {
720                 bd = bh->b_private;
721                 if (bd) {
722                         gfs2_assert_warn(sdp, bd->bd_bh == bh);
723                         bd->bd_bh = NULL;
724                         bh->b_private = NULL;
725                         /*
726                          * The bd may still be queued as a revoke, in which
727                          * case we must not dequeue nor free it.
728                          */
729                         if (!bd->bd_blkno && !list_empty(&bd->bd_list))
730                                 list_del_init(&bd->bd_list);
731                         if (list_empty(&bd->bd_list))
732                                 kmem_cache_free(gfs2_bufdata_cachep, bd);
733                 }
734
735                 bh = bh->b_this_page;
736         } while (bh != head);
737         gfs2_log_unlock(sdp);
738
739         return try_to_free_buffers(folio);
740
741 cannot_release:
742         gfs2_log_unlock(sdp);
743         return false;
744 }
745
746 static const struct address_space_operations gfs2_aops = {
747         .writepages = gfs2_writepages,
748         .read_folio = gfs2_read_folio,
749         .readahead = gfs2_readahead,
750         .dirty_folio = iomap_dirty_folio,
751         .release_folio = iomap_release_folio,
752         .invalidate_folio = iomap_invalidate_folio,
753         .bmap = gfs2_bmap,
754         .migrate_folio = filemap_migrate_folio,
755         .is_partially_uptodate = iomap_is_partially_uptodate,
756         .error_remove_page = generic_error_remove_page,
757 };
758
759 static const struct address_space_operations gfs2_jdata_aops = {
760         .writepage = gfs2_jdata_writepage,
761         .writepages = gfs2_jdata_writepages,
762         .read_folio = gfs2_read_folio,
763         .readahead = gfs2_readahead,
764         .dirty_folio = jdata_dirty_folio,
765         .bmap = gfs2_bmap,
766         .invalidate_folio = gfs2_invalidate_folio,
767         .release_folio = gfs2_release_folio,
768         .is_partially_uptodate = block_is_partially_uptodate,
769         .error_remove_page = generic_error_remove_page,
770 };
771
772 void gfs2_set_aops(struct inode *inode)
773 {
774         if (gfs2_is_jdata(GFS2_I(inode)))
775                 inode->i_mapping->a_ops = &gfs2_jdata_aops;
776         else
777                 inode->i_mapping->a_ops = &gfs2_aops;
778 }