1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) International Business Machines Corp., 2000-2005
4 * Portions Copyright (C) Christoph Hellwig, 2001-2002
9 #include <linux/module.h>
10 #include <linux/bio.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mempool.h>
15 #include <linux/seq_file.h>
16 #include "jfs_incore.h"
17 #include "jfs_superblock.h"
18 #include "jfs_filsys.h"
19 #include "jfs_metapage.h"
20 #include "jfs_txnmgr.h"
21 #include "jfs_debug.h"
23 #ifdef CONFIG_JFS_STATISTICS
25 uint pagealloc; /* # of page allocations */
26 uint pagefree; /* # of page frees */
27 uint lockwait; /* # of sleeping lock_metapage() calls */
31 #define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
32 #define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
34 static inline void unlock_metapage(struct metapage *mp)
36 clear_bit_unlock(META_locked, &mp->flag);
40 static inline void __lock_metapage(struct metapage *mp)
42 DECLARE_WAITQUEUE(wait, current);
43 INCREMENT(mpStat.lockwait);
44 add_wait_queue_exclusive(&mp->wait, &wait);
46 set_current_state(TASK_UNINTERRUPTIBLE);
47 if (metapage_locked(mp)) {
48 unlock_page(mp->page);
52 } while (trylock_metapage(mp));
53 __set_current_state(TASK_RUNNING);
54 remove_wait_queue(&mp->wait, &wait);
58 * Must have mp->page locked
60 static inline void lock_metapage(struct metapage *mp)
62 if (trylock_metapage(mp))
66 #define METAPOOL_MIN_PAGES 32
67 static struct kmem_cache *metapage_cache;
68 static mempool_t *metapage_mempool;
70 #define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE)
77 struct metapage *mp[MPS_PER_PAGE];
79 #define mp_anchor(page) ((struct meta_anchor *)page_private(page))
81 static inline struct metapage *page_to_mp(struct page *page, int offset)
83 if (!PagePrivate(page))
85 return mp_anchor(page)->mp[offset >> L2PSIZE];
88 static inline int insert_metapage(struct page *page, struct metapage *mp)
90 struct meta_anchor *a;
92 int l2mp_blocks; /* log2 blocks per metapage */
94 if (PagePrivate(page))
97 a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
100 set_page_private(page, (unsigned long)a);
101 SetPagePrivate(page);
106 l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
107 index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
115 static inline void remove_metapage(struct page *page, struct metapage *mp)
117 struct meta_anchor *a = mp_anchor(page);
118 int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
121 index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
123 BUG_ON(a->mp[index] != mp);
126 if (--a->mp_count == 0) {
128 set_page_private(page, 0);
129 ClearPagePrivate(page);
134 static inline void inc_io(struct page *page)
136 atomic_inc(&mp_anchor(page)->io_count);
139 static inline void dec_io(struct page *page, void (*handler) (struct page *))
141 if (atomic_dec_and_test(&mp_anchor(page)->io_count))
146 static inline struct metapage *page_to_mp(struct page *page, int offset)
148 return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL;
151 static inline int insert_metapage(struct page *page, struct metapage *mp)
154 set_page_private(page, (unsigned long)mp);
155 SetPagePrivate(page);
161 static inline void remove_metapage(struct page *page, struct metapage *mp)
163 set_page_private(page, 0);
164 ClearPagePrivate(page);
168 #define inc_io(page) do {} while(0)
169 #define dec_io(page, handler) handler(page)
173 static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
175 struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask);
183 init_waitqueue_head(&mp->wait);
188 static inline void free_metapage(struct metapage *mp)
190 mempool_free(mp, metapage_mempool);
193 int __init metapage_init(void)
196 * Allocate the metapage structures
198 metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
200 if (metapage_cache == NULL)
203 metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
206 if (metapage_mempool == NULL) {
207 kmem_cache_destroy(metapage_cache);
214 void metapage_exit(void)
216 mempool_destroy(metapage_mempool);
217 kmem_cache_destroy(metapage_cache);
220 static inline void drop_metapage(struct page *page, struct metapage *mp)
222 if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
223 test_bit(META_io, &mp->flag))
225 remove_metapage(page, mp);
226 INCREMENT(mpStat.pagefree);
231 * Metapage address space operations
234 static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
240 sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
243 if (lblock >= file_blocks)
245 if (lblock + *len > file_blocks)
246 *len = file_blocks - lblock;
249 rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0);
250 if ((rc == 0) && *len)
251 lblock = (sector_t)xaddr;
254 } /* else no mapping */
259 static void last_read_complete(struct page *page)
261 if (!PageError(page))
262 SetPageUptodate(page);
266 static void metapage_read_end_io(struct bio *bio)
268 struct page *page = bio->bi_private;
270 if (bio->bi_status) {
271 printk(KERN_ERR "metapage_read_end_io: I/O error\n");
275 dec_io(page, last_read_complete);
279 static void remove_from_logsync(struct metapage *mp)
281 struct jfs_log *log = mp->log;
284 * This can race. Recheck that log hasn't been set to null, and after
285 * acquiring logsync lock, recheck lsn
290 LOGSYNC_LOCK(log, flags);
296 list_del(&mp->synclist);
298 LOGSYNC_UNLOCK(log, flags);
301 static void last_write_complete(struct page *page)
306 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
307 mp = page_to_mp(page, offset);
308 if (mp && test_bit(META_io, &mp->flag)) {
310 remove_from_logsync(mp);
311 clear_bit(META_io, &mp->flag);
314 * I'd like to call drop_metapage here, but I don't think it's
315 * safe unless I have the page locked
318 end_page_writeback(page);
321 static void metapage_write_end_io(struct bio *bio)
323 struct page *page = bio->bi_private;
325 BUG_ON(!PagePrivate(page));
327 if (bio->bi_status) {
328 printk(KERN_ERR "metapage_write_end_io: I/O error\n");
331 dec_io(page, last_write_complete);
335 static int metapage_writepage(struct page *page, struct writeback_control *wbc)
337 struct bio *bio = NULL;
338 int block_offset; /* block offset of mp within page */
339 struct inode *inode = page->mapping->host;
340 int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
348 sector_t next_block = 0;
350 unsigned long bio_bytes = 0;
351 unsigned long bio_offset = 0;
355 page_start = (sector_t)page->index <<
356 (PAGE_SHIFT - inode->i_blkbits);
357 BUG_ON(!PageLocked(page));
358 BUG_ON(PageWriteback(page));
359 set_page_writeback(page);
361 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
362 mp = page_to_mp(page, offset);
364 if (!mp || !test_bit(META_dirty, &mp->flag))
367 if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) {
370 * Make sure this page isn't blocked indefinitely.
371 * If the journal isn't undergoing I/O, push it
373 if (mp->log && !(mp->log->cflag & logGC_PAGEOUT))
374 jfs_flush_journal(mp->log, 0);
378 clear_bit(META_dirty, &mp->flag);
379 set_bit(META_io, &mp->flag);
380 block_offset = offset >> inode->i_blkbits;
381 lblock = page_start + block_offset;
383 if (xlen && lblock == next_block) {
384 /* Contiguous, in memory & on disk */
385 len = min(xlen, blocks_per_mp);
387 bio_bytes += len << inode->i_blkbits;
391 if (bio_add_page(bio, page, bio_bytes, bio_offset) <
395 * Increment counter before submitting i/o to keep
396 * count from hitting zero before we're through
399 if (!bio->bi_iter.bi_size)
406 xlen = (PAGE_SIZE - offset) >> inode->i_blkbits;
407 pblock = metapage_get_blocks(inode, lblock, &xlen);
409 printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
411 * We already called inc_io(), but can't cancel it
412 * with dec_io() until we're done with the page
417 len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
419 bio = bio_alloc(GFP_NOFS, 1);
420 bio_set_dev(bio, inode->i_sb->s_bdev);
421 bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
422 bio->bi_end_io = metapage_write_end_io;
423 bio->bi_private = page;
424 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
426 /* Don't call bio_add_page yet, we may add to this vec */
428 bio_bytes = len << inode->i_blkbits;
431 next_block = lblock + len;
434 if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
436 if (!bio->bi_iter.bi_size)
443 redirty_page_for_writepage(wbc, page);
450 if (nr_underway == 0)
451 end_page_writeback(page);
455 /* We should never reach here, since we're only adding one vec */
456 printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
459 print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
460 4, bio, sizeof(*bio), 0);
464 dec_io(page, last_write_complete);
467 dec_io(page, last_write_complete);
471 static int metapage_readpage(struct file *fp, struct page *page)
473 struct inode *inode = page->mapping->host;
474 struct bio *bio = NULL;
476 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
477 sector_t page_start; /* address of page in fs blocks */
483 BUG_ON(!PageLocked(page));
484 page_start = (sector_t)page->index <<
485 (PAGE_SHIFT - inode->i_blkbits);
488 while (block_offset < blocks_per_page) {
489 xlen = blocks_per_page - block_offset;
490 pblock = metapage_get_blocks(inode, page_start + block_offset,
493 if (!PagePrivate(page))
494 insert_metapage(page, NULL);
499 bio = bio_alloc(GFP_NOFS, 1);
500 bio_set_dev(bio, inode->i_sb->s_bdev);
501 bio->bi_iter.bi_sector =
502 pblock << (inode->i_blkbits - 9);
503 bio->bi_end_io = metapage_read_end_io;
504 bio->bi_private = page;
505 bio_set_op_attrs(bio, REQ_OP_READ, 0);
506 len = xlen << inode->i_blkbits;
507 offset = block_offset << inode->i_blkbits;
508 if (bio_add_page(bio, page, len, offset) < len)
510 block_offset += xlen;
522 printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
524 dec_io(page, last_read_complete);
528 static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
534 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
535 mp = page_to_mp(page, offset);
540 jfs_info("metapage_releasepage: mp = 0x%p", mp);
541 if (mp->count || mp->nohomeok ||
542 test_bit(META_dirty, &mp->flag)) {
543 jfs_info("count = %ld, nohomeok = %d", mp->count,
549 remove_from_logsync(mp);
550 remove_metapage(page, mp);
551 INCREMENT(mpStat.pagefree);
557 static void metapage_invalidatepage(struct page *page, unsigned int offset,
560 BUG_ON(offset || length < PAGE_SIZE);
562 BUG_ON(PageWriteback(page));
564 metapage_releasepage(page, 0);
567 const struct address_space_operations jfs_metapage_aops = {
568 .readpage = metapage_readpage,
569 .writepage = metapage_writepage,
570 .releasepage = metapage_releasepage,
571 .invalidatepage = metapage_invalidatepage,
572 .set_page_dirty = __set_page_dirty_nobuffers,
575 struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
576 unsigned int size, int absolute,
581 struct address_space *mapping;
582 struct metapage *mp = NULL;
584 unsigned long page_index;
585 unsigned long page_offset;
587 jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
588 inode->i_ino, lblock, absolute);
590 l2bsize = inode->i_blkbits;
591 l2BlocksPerPage = PAGE_SHIFT - l2bsize;
592 page_index = lblock >> l2BlocksPerPage;
593 page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
594 if ((page_offset + size) > PAGE_SIZE) {
595 jfs_err("MetaData crosses page boundary!!");
596 jfs_err("lblock = %lx, size = %d", lblock, size);
601 mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping;
604 * If an nfs client tries to read an inode that is larger
605 * than any existing inodes, we may try to read past the
606 * end of the inode map
608 if ((lblock << inode->i_blkbits) >= inode->i_size)
610 mapping = inode->i_mapping;
613 if (new && (PSIZE == PAGE_SIZE)) {
614 page = grab_cache_page(mapping, page_index);
616 jfs_err("grab_cache_page failed!");
619 SetPageUptodate(page);
621 page = read_mapping_page(mapping, page_index, NULL);
622 if (IS_ERR(page) || !PageUptodate(page)) {
623 jfs_err("read_mapping_page failed!");
629 mp = page_to_mp(page, page_offset);
631 if (mp->logical_size != size) {
632 jfs_error(inode->i_sb,
633 "get_mp->logical_size != size\n");
634 jfs_err("logical_size = %d, size = %d",
635 mp->logical_size, size);
641 if (test_bit(META_discard, &mp->flag)) {
643 jfs_error(inode->i_sb,
644 "using a discarded metapage\n");
645 discard_metapage(mp);
648 clear_bit(META_discard, &mp->flag);
651 INCREMENT(mpStat.pagealloc);
652 mp = alloc_metapage(GFP_NOFS);
656 mp->sb = inode->i_sb;
658 mp->xflag = COMMIT_PAGE;
661 mp->logical_size = size;
662 mp->data = page_address(page) + page_offset;
664 if (unlikely(insert_metapage(page, mp))) {
672 jfs_info("zeroing mp = 0x%p", mp);
673 memset(mp->data, 0, PSIZE);
677 jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
685 void grab_metapage(struct metapage * mp)
687 jfs_info("grab_metapage: mp = 0x%p", mp);
692 unlock_page(mp->page);
695 void force_metapage(struct metapage *mp)
697 struct page *page = mp->page;
698 jfs_info("force_metapage: mp = 0x%p", mp);
699 set_bit(META_forcewrite, &mp->flag);
700 clear_bit(META_sync, &mp->flag);
703 set_page_dirty(page);
704 if (write_one_page(page))
705 jfs_error(mp->sb, "write_one_page() failed\n");
706 clear_bit(META_forcewrite, &mp->flag);
710 void hold_metapage(struct metapage *mp)
715 void put_metapage(struct metapage *mp)
717 if (mp->count || mp->nohomeok) {
718 /* Someone else will release this */
719 unlock_page(mp->page);
725 unlock_page(mp->page);
726 release_metapage(mp);
729 void release_metapage(struct metapage * mp)
731 struct page *page = mp->page;
732 jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
740 if (--mp->count || mp->nohomeok) {
746 if (test_bit(META_dirty, &mp->flag)) {
747 set_page_dirty(page);
748 if (test_bit(META_sync, &mp->flag)) {
749 clear_bit(META_sync, &mp->flag);
750 if (write_one_page(page))
751 jfs_error(mp->sb, "write_one_page() failed\n");
752 lock_page(page); /* write_one_page unlocks the page */
754 } else if (mp->lsn) /* discard_metapage doesn't remove it */
755 remove_from_logsync(mp);
757 /* Try to keep metapages from using up too much memory */
758 drop_metapage(page, mp);
764 void __invalidate_metapages(struct inode *ip, s64 addr, int len)
767 int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits;
768 int BlocksPerPage = 1 << l2BlocksPerPage;
769 /* All callers are interested in block device's mapping */
770 struct address_space *mapping =
771 JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
777 * Mark metapages to discard. They will eventually be
778 * released, but should not be written.
780 for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
781 lblock += BlocksPerPage) {
782 page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
785 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
786 mp = page_to_mp(page, offset);
789 if (mp->index < addr)
791 if (mp->index >= addr + len)
794 clear_bit(META_dirty, &mp->flag);
795 set_bit(META_discard, &mp->flag);
797 remove_from_logsync(mp);
804 #ifdef CONFIG_JFS_STATISTICS
805 int jfs_mpstat_proc_show(struct seq_file *m, void *v)
808 "JFS Metapage statistics\n"
809 "=======================\n"
810 "page allocations = %d\n"