4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/config.h>
22 #include <linux/kernel.h>
23 #include <linux/syscalls.h>
26 #include <linux/percpu.h>
27 #include <linux/slab.h>
28 #include <linux/smp_lock.h>
29 #include <linux/capability.h>
30 #include <linux/blkdev.h>
31 #include <linux/file.h>
32 #include <linux/quotaops.h>
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/writeback.h>
36 #include <linux/hash.h>
37 #include <linux/suspend.h>
38 #include <linux/buffer_head.h>
39 #include <linux/bio.h>
40 #include <linux/notifier.h>
41 #include <linux/cpu.h>
42 #include <linux/bitops.h>
43 #include <linux/mpage.h>
44 #include <linux/bit_spinlock.h>
46 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47 static void invalidate_bh_lrus(void);
49 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
52 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
54 bh->b_end_io = handler;
55 bh->b_private = private;
58 static int sync_buffer(void *word)
60 struct block_device *bd;
61 struct buffer_head *bh
62 = container_of(word, struct buffer_head, b_state);
67 blk_run_address_space(bd->bd_inode->i_mapping);
72 void fastcall __lock_buffer(struct buffer_head *bh)
74 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
75 TASK_UNINTERRUPTIBLE);
77 EXPORT_SYMBOL(__lock_buffer);
79 void fastcall unlock_buffer(struct buffer_head *bh)
81 clear_buffer_locked(bh);
82 smp_mb__after_clear_bit();
83 wake_up_bit(&bh->b_state, BH_Lock);
87 * Block until a buffer comes unlocked. This doesn't stop it
88 * from becoming locked again - you have to lock it yourself
89 * if you want to preserve its state.
91 void __wait_on_buffer(struct buffer_head * bh)
93 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
97 __clear_page_buffers(struct page *page)
99 ClearPagePrivate(page);
100 set_page_private(page, 0);
101 page_cache_release(page);
104 static void buffer_io_error(struct buffer_head *bh)
106 char b[BDEVNAME_SIZE];
108 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
109 bdevname(bh->b_bdev, b),
110 (unsigned long long)bh->b_blocknr);
114 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
115 * unlock the buffer. This is what ll_rw_block uses too.
117 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
120 set_buffer_uptodate(bh);
122 /* This happens, due to failed READA attempts. */
123 clear_buffer_uptodate(bh);
129 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
131 char b[BDEVNAME_SIZE];
134 set_buffer_uptodate(bh);
136 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
138 printk(KERN_WARNING "lost page write due to "
140 bdevname(bh->b_bdev, b));
142 set_buffer_write_io_error(bh);
143 clear_buffer_uptodate(bh);
150 * Write out and wait upon all the dirty data associated with a block
151 * device via its mapping. Does not take the superblock lock.
153 int sync_blockdev(struct block_device *bdev)
158 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
161 EXPORT_SYMBOL(sync_blockdev);
164 * Write out and wait upon all dirty data associated with this
165 * superblock. Filesystem data as well as the underlying block
166 * device. Takes the superblock lock.
168 int fsync_super(struct super_block *sb)
170 sync_inodes_sb(sb, 0);
173 if (sb->s_dirt && sb->s_op->write_super)
174 sb->s_op->write_super(sb);
176 if (sb->s_op->sync_fs)
177 sb->s_op->sync_fs(sb, 1);
178 sync_blockdev(sb->s_bdev);
179 sync_inodes_sb(sb, 1);
181 return sync_blockdev(sb->s_bdev);
185 * Write out and wait upon all dirty data associated with this
186 * device. Filesystem data as well as the underlying block
187 * device. Takes the superblock lock.
189 int fsync_bdev(struct block_device *bdev)
191 struct super_block *sb = get_super(bdev);
193 int res = fsync_super(sb);
197 return sync_blockdev(bdev);
201 * freeze_bdev -- lock a filesystem and force it into a consistent state
202 * @bdev: blockdevice to lock
204 * This takes the block device bd_mount_mutex to make sure no new mounts
205 * happen on bdev until thaw_bdev() is called.
206 * If a superblock is found on this device, we take the s_umount semaphore
207 * on it to make sure nobody unmounts until the snapshot creation is done.
209 struct super_block *freeze_bdev(struct block_device *bdev)
211 struct super_block *sb;
213 mutex_lock(&bdev->bd_mount_mutex);
214 sb = get_super(bdev);
215 if (sb && !(sb->s_flags & MS_RDONLY)) {
216 sb->s_frozen = SB_FREEZE_WRITE;
219 sync_inodes_sb(sb, 0);
223 if (sb->s_dirt && sb->s_op->write_super)
224 sb->s_op->write_super(sb);
227 if (sb->s_op->sync_fs)
228 sb->s_op->sync_fs(sb, 1);
230 sync_blockdev(sb->s_bdev);
231 sync_inodes_sb(sb, 1);
233 sb->s_frozen = SB_FREEZE_TRANS;
236 sync_blockdev(sb->s_bdev);
238 if (sb->s_op->write_super_lockfs)
239 sb->s_op->write_super_lockfs(sb);
243 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
245 EXPORT_SYMBOL(freeze_bdev);
248 * thaw_bdev -- unlock filesystem
249 * @bdev: blockdevice to unlock
250 * @sb: associated superblock
252 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
254 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
257 BUG_ON(sb->s_bdev != bdev);
259 if (sb->s_op->unlockfs)
260 sb->s_op->unlockfs(sb);
261 sb->s_frozen = SB_UNFROZEN;
263 wake_up(&sb->s_wait_unfrozen);
267 mutex_unlock(&bdev->bd_mount_mutex);
269 EXPORT_SYMBOL(thaw_bdev);
272 * sync everything. Start out by waking pdflush, because that writes back
273 * all queues in parallel.
275 static void do_sync(unsigned long wait)
278 sync_inodes(0); /* All mappings, inodes and their blockdevs */
280 sync_supers(); /* Write the superblocks */
281 sync_filesystems(0); /* Start syncing the filesystems */
282 sync_filesystems(wait); /* Waitingly sync the filesystems */
283 sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
285 printk("Emergency Sync complete\n");
286 if (unlikely(laptop_mode))
287 laptop_sync_completion();
290 asmlinkage long sys_sync(void)
296 void emergency_sync(void)
298 pdflush_operation(do_sync, 0);
302 * Generic function to fsync a file.
304 * filp may be NULL if called via the msync of a vma.
307 int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
309 struct inode * inode = dentry->d_inode;
310 struct super_block * sb;
313 /* sync the inode to buffers */
314 ret = write_inode_now(inode, 0);
316 /* sync the superblock to buffers */
319 if (sb->s_op->write_super)
320 sb->s_op->write_super(sb);
323 /* .. finally sync the buffers to disk */
324 err = sync_blockdev(sb->s_bdev);
330 long do_fsync(struct file *file, int datasync)
334 struct address_space *mapping = file->f_mapping;
336 if (!file->f_op || !file->f_op->fsync) {
337 /* Why? We can still call filemap_fdatawrite */
342 current->flags |= PF_SYNCWRITE;
343 ret = filemap_fdatawrite(mapping);
346 * We need to protect against concurrent writers, which could cause
347 * livelocks in fsync_buffers_list().
349 mutex_lock(&mapping->host->i_mutex);
350 err = file->f_op->fsync(file, file->f_dentry, datasync);
353 mutex_unlock(&mapping->host->i_mutex);
354 err = filemap_fdatawait(mapping);
357 current->flags &= ~PF_SYNCWRITE;
362 static long __do_fsync(unsigned int fd, int datasync)
369 ret = do_fsync(file, datasync);
375 asmlinkage long sys_fsync(unsigned int fd)
377 return __do_fsync(fd, 0);
380 asmlinkage long sys_fdatasync(unsigned int fd)
382 return __do_fsync(fd, 1);
386 * Various filesystems appear to want __find_get_block to be non-blocking.
387 * But it's the page lock which protects the buffers. To get around this,
388 * we get exclusion from try_to_free_buffers with the blockdev mapping's
391 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
392 * may be quite high. This code could TryLock the page, and if that
393 * succeeds, there is no need to take private_lock. (But if
394 * private_lock is contended then so is mapping->tree_lock).
396 static struct buffer_head *
397 __find_get_block_slow(struct block_device *bdev, sector_t block)
399 struct inode *bd_inode = bdev->bd_inode;
400 struct address_space *bd_mapping = bd_inode->i_mapping;
401 struct buffer_head *ret = NULL;
403 struct buffer_head *bh;
404 struct buffer_head *head;
408 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
409 page = find_get_page(bd_mapping, index);
413 spin_lock(&bd_mapping->private_lock);
414 if (!page_has_buffers(page))
416 head = page_buffers(page);
419 if (bh->b_blocknr == block) {
424 if (!buffer_mapped(bh))
426 bh = bh->b_this_page;
427 } while (bh != head);
429 /* we might be here because some of the buffers on this page are
430 * not mapped. This is due to various races between
431 * file io on the block device and getblk. It gets dealt with
432 * elsewhere, don't buffer_error if we had some unmapped buffers
435 printk("__find_get_block_slow() failed. "
436 "block=%llu, b_blocknr=%llu\n",
437 (unsigned long long)block, (unsigned long long)bh->b_blocknr);
438 printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
439 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
442 spin_unlock(&bd_mapping->private_lock);
443 page_cache_release(page);
448 /* If invalidate_buffers() will trash dirty buffers, it means some kind
449 of fs corruption is going on. Trashing dirty data always imply losing
450 information that was supposed to be just stored on the physical layer
453 Thus invalidate_buffers in general usage is not allwowed to trash
454 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
455 be preserved. These buffers are simply skipped.
457 We also skip buffers which are still in use. For example this can
458 happen if a userspace program is reading the block device.
460 NOTE: In the case where the user removed a removable-media-disk even if
461 there's still dirty data not synced on disk (due a bug in the device driver
462 or due an error of the user), by not destroying the dirty buffers we could
463 generate corruption also on the next media inserted, thus a parameter is
464 necessary to handle this case in the most safe way possible (trying
465 to not corrupt also the new disk inserted with the data belonging to
466 the old now corrupted disk). Also for the ramdisk the natural thing
467 to do in order to release the ramdisk memory is to destroy dirty buffers.
469 These are two special cases. Normal usage imply the device driver
470 to issue a sync on the device (without waiting I/O completion) and
471 then an invalidate_buffers call that doesn't trash dirty buffers.
473 For handling cache coherency with the blkdev pagecache the 'update' case
474 is been introduced. It is needed to re-read from disk any pinned
475 buffer. NOTE: re-reading from disk is destructive so we can do it only
476 when we assume nobody is changing the buffercache under our I/O and when
477 we think the disk contains more recent information than the buffercache.
478 The update == 1 pass marks the buffers we need to update, the update == 2
479 pass does the actual I/O. */
480 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
482 invalidate_bh_lrus();
484 * FIXME: what about destroy_dirty_buffers?
485 * We really want to use invalidate_inode_pages2() for
486 * that, but not until that's cleaned up.
488 invalidate_inode_pages(bdev->bd_inode->i_mapping);
492 * Kick pdflush then try to free up some ZONE_NORMAL memory.
494 static void free_more_memory(void)
499 wakeup_pdflush(1024);
502 for_each_pgdat(pgdat) {
503 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
505 try_to_free_pages(zones, GFP_NOFS);
510 * I/O completion handler for block_read_full_page() - pages
511 * which come unlocked at the end of I/O.
513 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
516 struct buffer_head *first;
517 struct buffer_head *tmp;
519 int page_uptodate = 1;
521 BUG_ON(!buffer_async_read(bh));
525 set_buffer_uptodate(bh);
527 clear_buffer_uptodate(bh);
528 if (printk_ratelimit())
534 * Be _very_ careful from here on. Bad things can happen if
535 * two buffer heads end IO at almost the same time and both
536 * decide that the page is now completely done.
538 first = page_buffers(page);
539 local_irq_save(flags);
540 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
541 clear_buffer_async_read(bh);
545 if (!buffer_uptodate(tmp))
547 if (buffer_async_read(tmp)) {
548 BUG_ON(!buffer_locked(tmp));
551 tmp = tmp->b_this_page;
553 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
554 local_irq_restore(flags);
557 * If none of the buffers had errors and they are all
558 * uptodate then we can set the page uptodate.
560 if (page_uptodate && !PageError(page))
561 SetPageUptodate(page);
566 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
567 local_irq_restore(flags);
572 * Completion handler for block_write_full_page() - pages which are unlocked
573 * during I/O, and which have PageWriteback cleared upon I/O completion.
575 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
577 char b[BDEVNAME_SIZE];
579 struct buffer_head *first;
580 struct buffer_head *tmp;
583 BUG_ON(!buffer_async_write(bh));
587 set_buffer_uptodate(bh);
589 if (printk_ratelimit()) {
591 printk(KERN_WARNING "lost page write due to "
593 bdevname(bh->b_bdev, b));
595 set_bit(AS_EIO, &page->mapping->flags);
596 clear_buffer_uptodate(bh);
600 first = page_buffers(page);
601 local_irq_save(flags);
602 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
604 clear_buffer_async_write(bh);
606 tmp = bh->b_this_page;
608 if (buffer_async_write(tmp)) {
609 BUG_ON(!buffer_locked(tmp));
612 tmp = tmp->b_this_page;
614 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
615 local_irq_restore(flags);
616 end_page_writeback(page);
620 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
621 local_irq_restore(flags);
626 * If a page's buffers are under async readin (end_buffer_async_read
627 * completion) then there is a possibility that another thread of
628 * control could lock one of the buffers after it has completed
629 * but while some of the other buffers have not completed. This
630 * locked buffer would confuse end_buffer_async_read() into not unlocking
631 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
632 * that this buffer is not under async I/O.
634 * The page comes unlocked when it has no locked buffer_async buffers
637 * PageLocked prevents anyone starting new async I/O reads any of
640 * PageWriteback is used to prevent simultaneous writeout of the same
643 * PageLocked prevents anyone from starting writeback of a page which is
644 * under read I/O (PageWriteback is only ever set against a locked page).
646 static void mark_buffer_async_read(struct buffer_head *bh)
648 bh->b_end_io = end_buffer_async_read;
649 set_buffer_async_read(bh);
652 void mark_buffer_async_write(struct buffer_head *bh)
654 bh->b_end_io = end_buffer_async_write;
655 set_buffer_async_write(bh);
657 EXPORT_SYMBOL(mark_buffer_async_write);
661 * fs/buffer.c contains helper functions for buffer-backed address space's
662 * fsync functions. A common requirement for buffer-based filesystems is
663 * that certain data from the backing blockdev needs to be written out for
664 * a successful fsync(). For example, ext2 indirect blocks need to be
665 * written back and waited upon before fsync() returns.
667 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
668 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
669 * management of a list of dependent buffers at ->i_mapping->private_list.
671 * Locking is a little subtle: try_to_free_buffers() will remove buffers
672 * from their controlling inode's queue when they are being freed. But
673 * try_to_free_buffers() will be operating against the *blockdev* mapping
674 * at the time, not against the S_ISREG file which depends on those buffers.
675 * So the locking for private_list is via the private_lock in the address_space
676 * which backs the buffers. Which is different from the address_space
677 * against which the buffers are listed. So for a particular address_space,
678 * mapping->private_lock does *not* protect mapping->private_list! In fact,
679 * mapping->private_list will always be protected by the backing blockdev's
682 * Which introduces a requirement: all buffers on an address_space's
683 * ->private_list must be from the same address_space: the blockdev's.
685 * address_spaces which do not place buffers at ->private_list via these
686 * utility functions are free to use private_lock and private_list for
687 * whatever they want. The only requirement is that list_empty(private_list)
688 * be true at clear_inode() time.
690 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
691 * filesystems should do that. invalidate_inode_buffers() should just go
692 * BUG_ON(!list_empty).
694 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
695 * take an address_space, not an inode. And it should be called
696 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
699 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
700 * list if it is already on a list. Because if the buffer is on a list,
701 * it *must* already be on the right one. If not, the filesystem is being
702 * silly. This will save a ton of locking. But first we have to ensure
703 * that buffers are taken *off* the old inode's list when they are freed
704 * (presumably in truncate). That requires careful auditing of all
705 * filesystems (do it inside bforget()). It could also be done by bringing
710 * The buffer's backing address_space's private_lock must be held
712 static inline void __remove_assoc_queue(struct buffer_head *bh)
714 list_del_init(&bh->b_assoc_buffers);
717 int inode_has_buffers(struct inode *inode)
719 return !list_empty(&inode->i_data.private_list);
723 * osync is designed to support O_SYNC io. It waits synchronously for
724 * all already-submitted IO to complete, but does not queue any new
725 * writes to the disk.
727 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
728 * you dirty the buffers, and then use osync_inode_buffers to wait for
729 * completion. Any other dirty buffers which are not yet queued for
730 * write will not be flushed to disk by the osync.
732 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
734 struct buffer_head *bh;
740 list_for_each_prev(p, list) {
742 if (buffer_locked(bh)) {
746 if (!buffer_uptodate(bh))
758 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
760 * @mapping: the mapping which wants those buffers written
762 * Starts I/O against the buffers at mapping->private_list, and waits upon
765 * Basically, this is a convenience function for fsync().
766 * @mapping is a file or directory which needs those buffers to be written for
767 * a successful fsync().
769 int sync_mapping_buffers(struct address_space *mapping)
771 struct address_space *buffer_mapping = mapping->assoc_mapping;
773 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
776 return fsync_buffers_list(&buffer_mapping->private_lock,
777 &mapping->private_list);
779 EXPORT_SYMBOL(sync_mapping_buffers);
782 * Called when we've recently written block `bblock', and it is known that
783 * `bblock' was for a buffer_boundary() buffer. This means that the block at
784 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
785 * dirty, schedule it for IO. So that indirects merge nicely with their data.
787 void write_boundary_block(struct block_device *bdev,
788 sector_t bblock, unsigned blocksize)
790 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
792 if (buffer_dirty(bh))
793 ll_rw_block(WRITE, 1, &bh);
798 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
800 struct address_space *mapping = inode->i_mapping;
801 struct address_space *buffer_mapping = bh->b_page->mapping;
803 mark_buffer_dirty(bh);
804 if (!mapping->assoc_mapping) {
805 mapping->assoc_mapping = buffer_mapping;
807 if (mapping->assoc_mapping != buffer_mapping)
810 if (list_empty(&bh->b_assoc_buffers)) {
811 spin_lock(&buffer_mapping->private_lock);
812 list_move_tail(&bh->b_assoc_buffers,
813 &mapping->private_list);
814 spin_unlock(&buffer_mapping->private_lock);
817 EXPORT_SYMBOL(mark_buffer_dirty_inode);
820 * Add a page to the dirty page list.
822 * It is a sad fact of life that this function is called from several places
823 * deeply under spinlocking. It may not sleep.
825 * If the page has buffers, the uptodate buffers are set dirty, to preserve
826 * dirty-state coherency between the page and the buffers. It the page does
827 * not have buffers then when they are later attached they will all be set
830 * The buffers are dirtied before the page is dirtied. There's a small race
831 * window in which a writepage caller may see the page cleanness but not the
832 * buffer dirtiness. That's fine. If this code were to set the page dirty
833 * before the buffers, a concurrent writepage caller could clear the page dirty
834 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
835 * page on the dirty page list.
837 * We use private_lock to lock against try_to_free_buffers while using the
838 * page's buffer list. Also use this to protect against clean buffers being
839 * added to the page after it was set dirty.
841 * FIXME: may need to call ->reservepage here as well. That's rather up to the
842 * address_space though.
844 int __set_page_dirty_buffers(struct page *page)
846 struct address_space * const mapping = page->mapping;
848 spin_lock(&mapping->private_lock);
849 if (page_has_buffers(page)) {
850 struct buffer_head *head = page_buffers(page);
851 struct buffer_head *bh = head;
854 set_buffer_dirty(bh);
855 bh = bh->b_this_page;
856 } while (bh != head);
858 spin_unlock(&mapping->private_lock);
860 if (!TestSetPageDirty(page)) {
861 write_lock_irq(&mapping->tree_lock);
862 if (page->mapping) { /* Race with truncate? */
863 if (mapping_cap_account_dirty(mapping))
864 inc_page_state(nr_dirty);
865 radix_tree_tag_set(&mapping->page_tree,
867 PAGECACHE_TAG_DIRTY);
869 write_unlock_irq(&mapping->tree_lock);
870 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
875 EXPORT_SYMBOL(__set_page_dirty_buffers);
878 * Write out and wait upon a list of buffers.
880 * We have conflicting pressures: we want to make sure that all
881 * initially dirty buffers get waited on, but that any subsequently
882 * dirtied buffers don't. After all, we don't want fsync to last
883 * forever if somebody is actively writing to the file.
885 * Do this in two main stages: first we copy dirty buffers to a
886 * temporary inode list, queueing the writes as we go. Then we clean
887 * up, waiting for those writes to complete.
889 * During this second stage, any subsequent updates to the file may end
890 * up refiling the buffer on the original inode's dirty list again, so
891 * there is a chance we will end up with a buffer queued for write but
892 * not yet completed on that list. So, as a final cleanup we go through
893 * the osync code to catch these locked, dirty buffers without requeuing
894 * any newly dirty buffers for write.
896 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
898 struct buffer_head *bh;
899 struct list_head tmp;
902 INIT_LIST_HEAD(&tmp);
905 while (!list_empty(list)) {
906 bh = BH_ENTRY(list->next);
907 list_del_init(&bh->b_assoc_buffers);
908 if (buffer_dirty(bh) || buffer_locked(bh)) {
909 list_add(&bh->b_assoc_buffers, &tmp);
910 if (buffer_dirty(bh)) {
914 * Ensure any pending I/O completes so that
915 * ll_rw_block() actually writes the current
916 * contents - it is a noop if I/O is still in
917 * flight on potentially older contents.
919 ll_rw_block(SWRITE, 1, &bh);
926 while (!list_empty(&tmp)) {
927 bh = BH_ENTRY(tmp.prev);
928 __remove_assoc_queue(bh);
932 if (!buffer_uptodate(bh))
939 err2 = osync_buffers_list(lock, list);
947 * Invalidate any and all dirty buffers on a given inode. We are
948 * probably unmounting the fs, but that doesn't mean we have already
949 * done a sync(). Just drop the buffers from the inode list.
951 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
952 * assumes that all the buffers are against the blockdev. Not true
955 void invalidate_inode_buffers(struct inode *inode)
957 if (inode_has_buffers(inode)) {
958 struct address_space *mapping = &inode->i_data;
959 struct list_head *list = &mapping->private_list;
960 struct address_space *buffer_mapping = mapping->assoc_mapping;
962 spin_lock(&buffer_mapping->private_lock);
963 while (!list_empty(list))
964 __remove_assoc_queue(BH_ENTRY(list->next));
965 spin_unlock(&buffer_mapping->private_lock);
970 * Remove any clean buffers from the inode's buffer list. This is called
971 * when we're trying to free the inode itself. Those buffers can pin it.
973 * Returns true if all buffers were removed.
975 int remove_inode_buffers(struct inode *inode)
979 if (inode_has_buffers(inode)) {
980 struct address_space *mapping = &inode->i_data;
981 struct list_head *list = &mapping->private_list;
982 struct address_space *buffer_mapping = mapping->assoc_mapping;
984 spin_lock(&buffer_mapping->private_lock);
985 while (!list_empty(list)) {
986 struct buffer_head *bh = BH_ENTRY(list->next);
987 if (buffer_dirty(bh)) {
991 __remove_assoc_queue(bh);
993 spin_unlock(&buffer_mapping->private_lock);
999 * Create the appropriate buffers when given a page for data area and
1000 * the size of each buffer.. Use the bh->b_this_page linked list to
1001 * follow the buffers created. Return NULL if unable to create more
1004 * The retry flag is used to differentiate async IO (paging, swapping)
1005 * which may not fail from ordinary buffer allocations.
1007 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
1010 struct buffer_head *bh, *head;
1016 while ((offset -= size) >= 0) {
1017 bh = alloc_buffer_head(GFP_NOFS);
1022 bh->b_this_page = head;
1027 atomic_set(&bh->b_count, 0);
1028 bh->b_private = NULL;
1031 /* Link the buffer to its page */
1032 set_bh_page(bh, page, offset);
1034 init_buffer(bh, NULL, NULL);
1038 * In case anything failed, we just free everything we got.
1044 head = head->b_this_page;
1045 free_buffer_head(bh);
1050 * Return failure for non-async IO requests. Async IO requests
1051 * are not allowed to fail, so we have to wait until buffer heads
1052 * become available. But we don't want tasks sleeping with
1053 * partially complete buffers, so all were released above.
1058 /* We're _really_ low on memory. Now we just
1059 * wait for old buffer heads to become free due to
1060 * finishing IO. Since this is an async request and
1061 * the reserve list is empty, we're sure there are
1062 * async buffer heads in use.
1067 EXPORT_SYMBOL_GPL(alloc_page_buffers);
1070 link_dev_buffers(struct page *page, struct buffer_head *head)
1072 struct buffer_head *bh, *tail;
1077 bh = bh->b_this_page;
1079 tail->b_this_page = head;
1080 attach_page_buffers(page, head);
1084 * Initialise the state of a blockdev page's buffers.
1087 init_page_buffers(struct page *page, struct block_device *bdev,
1088 sector_t block, int size)
1090 struct buffer_head *head = page_buffers(page);
1091 struct buffer_head *bh = head;
1092 int uptodate = PageUptodate(page);
1095 if (!buffer_mapped(bh)) {
1096 init_buffer(bh, NULL, NULL);
1098 bh->b_blocknr = block;
1100 set_buffer_uptodate(bh);
1101 set_buffer_mapped(bh);
1104 bh = bh->b_this_page;
1105 } while (bh != head);
1109 * Create the page-cache page that contains the requested block.
1111 * This is user purely for blockdev mappings.
1113 static struct page *
1114 grow_dev_page(struct block_device *bdev, sector_t block,
1115 pgoff_t index, int size)
1117 struct inode *inode = bdev->bd_inode;
1119 struct buffer_head *bh;
1121 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1125 if (!PageLocked(page))
1128 if (page_has_buffers(page)) {
1129 bh = page_buffers(page);
1130 if (bh->b_size == size) {
1131 init_page_buffers(page, bdev, block, size);
1134 if (!try_to_free_buffers(page))
1139 * Allocate some buffers for this page
1141 bh = alloc_page_buffers(page, size, 0);
1146 * Link the page to the buffers and initialise them. Take the
1147 * lock to be atomic wrt __find_get_block(), which does not
1148 * run under the page lock.
1150 spin_lock(&inode->i_mapping->private_lock);
1151 link_dev_buffers(page, bh);
1152 init_page_buffers(page, bdev, block, size);
1153 spin_unlock(&inode->i_mapping->private_lock);
1159 page_cache_release(page);
1164 * Create buffers for the specified block device block's page. If
1165 * that page was dirty, the buffers are set dirty also.
1167 * Except that's a bug. Attaching dirty buffers to a dirty
1168 * blockdev's page can result in filesystem corruption, because
1169 * some of those buffers may be aliases of filesystem data.
1170 * grow_dev_page() will go BUG() if this happens.
1173 grow_buffers(struct block_device *bdev, sector_t block, int size)
1182 } while ((size << sizebits) < PAGE_SIZE);
1184 index = block >> sizebits;
1185 block = index << sizebits;
1187 /* Create a page with the proper size buffers.. */
1188 page = grow_dev_page(bdev, block, index, size);
1192 page_cache_release(page);
1196 static struct buffer_head *
1197 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1199 /* Size must be multiple of hard sectorsize */
1200 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1201 (size < 512 || size > PAGE_SIZE))) {
1202 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1204 printk(KERN_ERR "hardsect size: %d\n",
1205 bdev_hardsect_size(bdev));
1212 struct buffer_head * bh;
1214 bh = __find_get_block(bdev, block, size);
1218 if (!grow_buffers(bdev, block, size))
1224 * The relationship between dirty buffers and dirty pages:
1226 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1227 * the page is tagged dirty in its radix tree.
1229 * At all times, the dirtiness of the buffers represents the dirtiness of
1230 * subsections of the page. If the page has buffers, the page dirty bit is
1231 * merely a hint about the true dirty state.
1233 * When a page is set dirty in its entirety, all its buffers are marked dirty
1234 * (if the page has buffers).
1236 * When a buffer is marked dirty, its page is dirtied, but the page's other
1239 * Also. When blockdev buffers are explicitly read with bread(), they
1240 * individually become uptodate. But their backing page remains not
1241 * uptodate - even if all of its buffers are uptodate. A subsequent
1242 * block_read_full_page() against that page will discover all the uptodate
1243 * buffers, will set the page uptodate and will perform no I/O.
1247 * mark_buffer_dirty - mark a buffer_head as needing writeout
1248 * @bh: the buffer_head to mark dirty
1250 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1251 * backing page dirty, then tag the page as dirty in its address_space's radix
1252 * tree and then attach the address_space's inode to its superblock's dirty
1255 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1256 * mapping->tree_lock and the global inode_lock.
1258 void fastcall mark_buffer_dirty(struct buffer_head *bh)
1260 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1261 __set_page_dirty_nobuffers(bh->b_page);
1265 * Decrement a buffer_head's reference count. If all buffers against a page
1266 * have zero reference count, are clean and unlocked, and if the page is clean
1267 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1268 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1269 * a page but it ends up not being freed, and buffers may later be reattached).
1271 void __brelse(struct buffer_head * buf)
1273 if (atomic_read(&buf->b_count)) {
1277 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1282 * bforget() is like brelse(), except it discards any
1283 * potentially dirty data.
1285 void __bforget(struct buffer_head *bh)
1287 clear_buffer_dirty(bh);
1288 if (!list_empty(&bh->b_assoc_buffers)) {
1289 struct address_space *buffer_mapping = bh->b_page->mapping;
1291 spin_lock(&buffer_mapping->private_lock);
1292 list_del_init(&bh->b_assoc_buffers);
1293 spin_unlock(&buffer_mapping->private_lock);
1298 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1301 if (buffer_uptodate(bh)) {
1306 bh->b_end_io = end_buffer_read_sync;
1307 submit_bh(READ, bh);
1309 if (buffer_uptodate(bh))
1317 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1318 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1319 * refcount elevated by one when they're in an LRU. A buffer can only appear
1320 * once in a particular CPU's LRU. A single buffer can be present in multiple
1321 * CPU's LRUs at the same time.
1323 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1324 * sb_find_get_block().
1326 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1327 * a local interrupt disable for that.
1330 #define BH_LRU_SIZE 8
1333 struct buffer_head *bhs[BH_LRU_SIZE];
1336 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1339 #define bh_lru_lock() local_irq_disable()
1340 #define bh_lru_unlock() local_irq_enable()
1342 #define bh_lru_lock() preempt_disable()
1343 #define bh_lru_unlock() preempt_enable()
1346 static inline void check_irqs_on(void)
1348 #ifdef irqs_disabled
1349 BUG_ON(irqs_disabled());
1354 * The LRU management algorithm is dopey-but-simple. Sorry.
1356 static void bh_lru_install(struct buffer_head *bh)
1358 struct buffer_head *evictee = NULL;
1363 lru = &__get_cpu_var(bh_lrus);
1364 if (lru->bhs[0] != bh) {
1365 struct buffer_head *bhs[BH_LRU_SIZE];
1371 for (in = 0; in < BH_LRU_SIZE; in++) {
1372 struct buffer_head *bh2 = lru->bhs[in];
1377 if (out >= BH_LRU_SIZE) {
1378 BUG_ON(evictee != NULL);
1385 while (out < BH_LRU_SIZE)
1387 memcpy(lru->bhs, bhs, sizeof(bhs));
1396 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1398 static struct buffer_head *
1399 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1401 struct buffer_head *ret = NULL;
1407 lru = &__get_cpu_var(bh_lrus);
1408 for (i = 0; i < BH_LRU_SIZE; i++) {
1409 struct buffer_head *bh = lru->bhs[i];
1411 if (bh && bh->b_bdev == bdev &&
1412 bh->b_blocknr == block && bh->b_size == size) {
1415 lru->bhs[i] = lru->bhs[i - 1];
1430 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1431 * it in the LRU and mark it as accessed. If it is not present then return
1434 struct buffer_head *
1435 __find_get_block(struct block_device *bdev, sector_t block, int size)
1437 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1440 bh = __find_get_block_slow(bdev, block);
1448 EXPORT_SYMBOL(__find_get_block);
1451 * __getblk will locate (and, if necessary, create) the buffer_head
1452 * which corresponds to the passed block_device, block and size. The
1453 * returned buffer has its reference count incremented.
1455 * __getblk() cannot fail - it just keeps trying. If you pass it an
1456 * illegal block number, __getblk() will happily return a buffer_head
1457 * which represents the non-existent block. Very weird.
1459 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1460 * attempt is failing. FIXME, perhaps?
1462 struct buffer_head *
1463 __getblk(struct block_device *bdev, sector_t block, int size)
1465 struct buffer_head *bh = __find_get_block(bdev, block, size);
1469 bh = __getblk_slow(bdev, block, size);
1472 EXPORT_SYMBOL(__getblk);
1475 * Do async read-ahead on a buffer..
1477 void __breadahead(struct block_device *bdev, sector_t block, int size)
1479 struct buffer_head *bh = __getblk(bdev, block, size);
1481 ll_rw_block(READA, 1, &bh);
1485 EXPORT_SYMBOL(__breadahead);
1488 * __bread() - reads a specified block and returns the bh
1489 * @bdev: the block_device to read from
1490 * @block: number of block
1491 * @size: size (in bytes) to read
1493 * Reads a specified block, and returns buffer head that contains it.
1494 * It returns NULL if the block was unreadable.
1496 struct buffer_head *
1497 __bread(struct block_device *bdev, sector_t block, int size)
1499 struct buffer_head *bh = __getblk(bdev, block, size);
1501 if (likely(bh) && !buffer_uptodate(bh))
1502 bh = __bread_slow(bh);
1505 EXPORT_SYMBOL(__bread);
1508 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1509 * This doesn't race because it runs in each cpu either in irq
1510 * or with preempt disabled.
1512 static void invalidate_bh_lru(void *arg)
1514 struct bh_lru *b = &get_cpu_var(bh_lrus);
1517 for (i = 0; i < BH_LRU_SIZE; i++) {
1521 put_cpu_var(bh_lrus);
1524 static void invalidate_bh_lrus(void)
1526 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1529 void set_bh_page(struct buffer_head *bh,
1530 struct page *page, unsigned long offset)
1533 if (offset >= PAGE_SIZE)
1535 if (PageHighMem(page))
1537 * This catches illegal uses and preserves the offset:
1539 bh->b_data = (char *)(0 + offset);
1541 bh->b_data = page_address(page) + offset;
1543 EXPORT_SYMBOL(set_bh_page);
1546 * Called when truncating a buffer on a page completely.
1548 static void discard_buffer(struct buffer_head * bh)
1551 clear_buffer_dirty(bh);
1553 clear_buffer_mapped(bh);
1554 clear_buffer_req(bh);
1555 clear_buffer_new(bh);
1556 clear_buffer_delay(bh);
1561 * try_to_release_page() - release old fs-specific metadata on a page
1563 * @page: the page which the kernel is trying to free
1564 * @gfp_mask: memory allocation flags (and I/O mode)
1566 * The address_space is to try to release any data against the page
1567 * (presumably at page->private). If the release was successful, return `1'.
1568 * Otherwise return zero.
1570 * The @gfp_mask argument specifies whether I/O may be performed to release
1571 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1573 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1575 int try_to_release_page(struct page *page, gfp_t gfp_mask)
1577 struct address_space * const mapping = page->mapping;
1579 BUG_ON(!PageLocked(page));
1580 if (PageWriteback(page))
1583 if (mapping && mapping->a_ops->releasepage)
1584 return mapping->a_ops->releasepage(page, gfp_mask);
1585 return try_to_free_buffers(page);
1587 EXPORT_SYMBOL(try_to_release_page);
1590 * block_invalidatepage - invalidate part of all of a buffer-backed page
1592 * @page: the page which is affected
1593 * @offset: the index of the truncation point
1595 * block_invalidatepage() is called when all or part of the page has become
1596 * invalidatedby a truncate operation.
1598 * block_invalidatepage() does not have to release all buffers, but it must
1599 * ensure that no dirty buffer is left outside @offset and that no I/O
1600 * is underway against any of the blocks which are outside the truncation
1601 * point. Because the caller is about to free (and possibly reuse) those
1604 int block_invalidatepage(struct page *page, unsigned long offset)
1606 struct buffer_head *head, *bh, *next;
1607 unsigned int curr_off = 0;
1610 BUG_ON(!PageLocked(page));
1611 if (!page_has_buffers(page))
1614 head = page_buffers(page);
1617 unsigned int next_off = curr_off + bh->b_size;
1618 next = bh->b_this_page;
1621 * is this block fully invalidated?
1623 if (offset <= curr_off)
1625 curr_off = next_off;
1627 } while (bh != head);
1630 * We release buffers only if the entire page is being invalidated.
1631 * The get_block cached value has been unconditionally invalidated,
1632 * so real IO is not possible anymore.
1635 ret = try_to_release_page(page, 0);
1639 EXPORT_SYMBOL(block_invalidatepage);
1641 int do_invalidatepage(struct page *page, unsigned long offset)
1643 int (*invalidatepage)(struct page *, unsigned long);
1644 invalidatepage = page->mapping->a_ops->invalidatepage;
1645 if (invalidatepage == NULL)
1646 invalidatepage = block_invalidatepage;
1647 return (*invalidatepage)(page, offset);
1651 * We attach and possibly dirty the buffers atomically wrt
1652 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1653 * is already excluded via the page lock.
1655 void create_empty_buffers(struct page *page,
1656 unsigned long blocksize, unsigned long b_state)
1658 struct buffer_head *bh, *head, *tail;
1660 head = alloc_page_buffers(page, blocksize, 1);
1663 bh->b_state |= b_state;
1665 bh = bh->b_this_page;
1667 tail->b_this_page = head;
1669 spin_lock(&page->mapping->private_lock);
1670 if (PageUptodate(page) || PageDirty(page)) {
1673 if (PageDirty(page))
1674 set_buffer_dirty(bh);
1675 if (PageUptodate(page))
1676 set_buffer_uptodate(bh);
1677 bh = bh->b_this_page;
1678 } while (bh != head);
1680 attach_page_buffers(page, head);
1681 spin_unlock(&page->mapping->private_lock);
1683 EXPORT_SYMBOL(create_empty_buffers);
1686 * We are taking a block for data and we don't want any output from any
1687 * buffer-cache aliases starting from return from that function and
1688 * until the moment when something will explicitly mark the buffer
1689 * dirty (hopefully that will not happen until we will free that block ;-)
1690 * We don't even need to mark it not-uptodate - nobody can expect
1691 * anything from a newly allocated buffer anyway. We used to used
1692 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1693 * don't want to mark the alias unmapped, for example - it would confuse
1694 * anyone who might pick it with bread() afterwards...
1696 * Also.. Note that bforget() doesn't lock the buffer. So there can
1697 * be writeout I/O going on against recently-freed buffers. We don't
1698 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1699 * only if we really need to. That happens here.
1701 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1703 struct buffer_head *old_bh;
1707 old_bh = __find_get_block_slow(bdev, block);
1709 clear_buffer_dirty(old_bh);
1710 wait_on_buffer(old_bh);
1711 clear_buffer_req(old_bh);
1715 EXPORT_SYMBOL(unmap_underlying_metadata);
1718 * NOTE! All mapped/uptodate combinations are valid:
1720 * Mapped Uptodate Meaning
1722 * No No "unknown" - must do get_block()
1723 * No Yes "hole" - zero-filled
1724 * Yes No "allocated" - allocated on disk, not read in
1725 * Yes Yes "valid" - allocated and up-to-date in memory.
1727 * "Dirty" is valid only with the last case (mapped+uptodate).
1731 * While block_write_full_page is writing back the dirty buffers under
1732 * the page lock, whoever dirtied the buffers may decide to clean them
1733 * again at any time. We handle that by only looking at the buffer
1734 * state inside lock_buffer().
1736 * If block_write_full_page() is called for regular writeback
1737 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1738 * locked buffer. This only can happen if someone has written the buffer
1739 * directly, with submit_bh(). At the address_space level PageWriteback
1740 * prevents this contention from occurring.
1742 static int __block_write_full_page(struct inode *inode, struct page *page,
1743 get_block_t *get_block, struct writeback_control *wbc)
1747 sector_t last_block;
1748 struct buffer_head *bh, *head;
1749 int nr_underway = 0;
1751 BUG_ON(!PageLocked(page));
1753 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1755 if (!page_has_buffers(page)) {
1756 create_empty_buffers(page, 1 << inode->i_blkbits,
1757 (1 << BH_Dirty)|(1 << BH_Uptodate));
1761 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1762 * here, and the (potentially unmapped) buffers may become dirty at
1763 * any time. If a buffer becomes dirty here after we've inspected it
1764 * then we just miss that fact, and the page stays dirty.
1766 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1767 * handle that here by just cleaning them.
1770 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1771 head = page_buffers(page);
1775 * Get all the dirty buffers mapped to disk addresses and
1776 * handle any aliases from the underlying blockdev's mapping.
1779 if (block > last_block) {
1781 * mapped buffers outside i_size will occur, because
1782 * this page can be outside i_size when there is a
1783 * truncate in progress.
1786 * The buffer was zeroed by block_write_full_page()
1788 clear_buffer_dirty(bh);
1789 set_buffer_uptodate(bh);
1790 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1791 err = get_block(inode, block, bh, 1);
1794 if (buffer_new(bh)) {
1795 /* blockdev mappings never come here */
1796 clear_buffer_new(bh);
1797 unmap_underlying_metadata(bh->b_bdev,
1801 bh = bh->b_this_page;
1803 } while (bh != head);
1806 if (!buffer_mapped(bh))
1809 * If it's a fully non-blocking write attempt and we cannot
1810 * lock the buffer then redirty the page. Note that this can
1811 * potentially cause a busy-wait loop from pdflush and kswapd
1812 * activity, but those code paths have their own higher-level
1815 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1817 } else if (test_set_buffer_locked(bh)) {
1818 redirty_page_for_writepage(wbc, page);
1821 if (test_clear_buffer_dirty(bh)) {
1822 mark_buffer_async_write(bh);
1826 } while ((bh = bh->b_this_page) != head);
1829 * The page and its buffers are protected by PageWriteback(), so we can
1830 * drop the bh refcounts early.
1832 BUG_ON(PageWriteback(page));
1833 set_page_writeback(page);
1836 struct buffer_head *next = bh->b_this_page;
1837 if (buffer_async_write(bh)) {
1838 submit_bh(WRITE, bh);
1842 } while (bh != head);
1847 if (nr_underway == 0) {
1849 * The page was marked dirty, but the buffers were
1850 * clean. Someone wrote them back by hand with
1851 * ll_rw_block/submit_bh. A rare case.
1855 if (!buffer_uptodate(bh)) {
1859 bh = bh->b_this_page;
1860 } while (bh != head);
1862 SetPageUptodate(page);
1863 end_page_writeback(page);
1865 * The page and buffer_heads can be released at any time from
1868 wbc->pages_skipped++; /* We didn't write this page */
1874 * ENOSPC, or some other error. We may already have added some
1875 * blocks to the file, so we need to write these out to avoid
1876 * exposing stale data.
1877 * The page is currently locked and not marked for writeback
1880 /* Recovery: lock and submit the mapped buffers */
1882 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1884 mark_buffer_async_write(bh);
1887 * The buffer may have been set dirty during
1888 * attachment to a dirty page.
1890 clear_buffer_dirty(bh);
1892 } while ((bh = bh->b_this_page) != head);
1894 BUG_ON(PageWriteback(page));
1895 set_page_writeback(page);
1898 struct buffer_head *next = bh->b_this_page;
1899 if (buffer_async_write(bh)) {
1900 clear_buffer_dirty(bh);
1901 submit_bh(WRITE, bh);
1905 } while (bh != head);
1909 static int __block_prepare_write(struct inode *inode, struct page *page,
1910 unsigned from, unsigned to, get_block_t *get_block)
1912 unsigned block_start, block_end;
1915 unsigned blocksize, bbits;
1916 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1918 BUG_ON(!PageLocked(page));
1919 BUG_ON(from > PAGE_CACHE_SIZE);
1920 BUG_ON(to > PAGE_CACHE_SIZE);
1923 blocksize = 1 << inode->i_blkbits;
1924 if (!page_has_buffers(page))
1925 create_empty_buffers(page, blocksize, 0);
1926 head = page_buffers(page);
1928 bbits = inode->i_blkbits;
1929 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1931 for(bh = head, block_start = 0; bh != head || !block_start;
1932 block++, block_start=block_end, bh = bh->b_this_page) {
1933 block_end = block_start + blocksize;
1934 if (block_end <= from || block_start >= to) {
1935 if (PageUptodate(page)) {
1936 if (!buffer_uptodate(bh))
1937 set_buffer_uptodate(bh);
1942 clear_buffer_new(bh);
1943 if (!buffer_mapped(bh)) {
1944 err = get_block(inode, block, bh, 1);
1947 if (buffer_new(bh)) {
1948 unmap_underlying_metadata(bh->b_bdev,
1950 if (PageUptodate(page)) {
1951 set_buffer_uptodate(bh);
1954 if (block_end > to || block_start < from) {
1957 kaddr = kmap_atomic(page, KM_USER0);
1961 if (block_start < from)
1962 memset(kaddr+block_start,
1963 0, from-block_start);
1964 flush_dcache_page(page);
1965 kunmap_atomic(kaddr, KM_USER0);
1970 if (PageUptodate(page)) {
1971 if (!buffer_uptodate(bh))
1972 set_buffer_uptodate(bh);
1975 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1976 (block_start < from || block_end > to)) {
1977 ll_rw_block(READ, 1, &bh);
1982 * If we issued read requests - let them complete.
1984 while(wait_bh > wait) {
1985 wait_on_buffer(*--wait_bh);
1986 if (!buffer_uptodate(*wait_bh))
1993 clear_buffer_new(bh);
1994 } while ((bh = bh->b_this_page) != head);
1999 * Zero out any newly allocated blocks to avoid exposing stale
2000 * data. If BH_New is set, we know that the block was newly
2001 * allocated in the above loop.
2006 block_end = block_start+blocksize;
2007 if (block_end <= from)
2009 if (block_start >= to)
2011 if (buffer_new(bh)) {
2014 clear_buffer_new(bh);
2015 kaddr = kmap_atomic(page, KM_USER0);
2016 memset(kaddr+block_start, 0, bh->b_size);
2017 kunmap_atomic(kaddr, KM_USER0);
2018 set_buffer_uptodate(bh);
2019 mark_buffer_dirty(bh);
2022 block_start = block_end;
2023 bh = bh->b_this_page;
2024 } while (bh != head);
2028 static int __block_commit_write(struct inode *inode, struct page *page,
2029 unsigned from, unsigned to)
2031 unsigned block_start, block_end;
2034 struct buffer_head *bh, *head;
2036 blocksize = 1 << inode->i_blkbits;
2038 for(bh = head = page_buffers(page), block_start = 0;
2039 bh != head || !block_start;
2040 block_start=block_end, bh = bh->b_this_page) {
2041 block_end = block_start + blocksize;
2042 if (block_end <= from || block_start >= to) {
2043 if (!buffer_uptodate(bh))
2046 set_buffer_uptodate(bh);
2047 mark_buffer_dirty(bh);
2052 * If this is a partial write which happened to make all buffers
2053 * uptodate then we can optimize away a bogus readpage() for
2054 * the next read(). Here we 'discover' whether the page went
2055 * uptodate as a result of this (potentially partial) write.
2058 SetPageUptodate(page);
2063 * Generic "read page" function for block devices that have the normal
2064 * get_block functionality. This is most of the block device filesystems.
2065 * Reads the page asynchronously --- the unlock_buffer() and
2066 * set/clear_buffer_uptodate() functions propagate buffer state into the
2067 * page struct once IO has completed.
2069 int block_read_full_page(struct page *page, get_block_t *get_block)
2071 struct inode *inode = page->mapping->host;
2072 sector_t iblock, lblock;
2073 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2074 unsigned int blocksize;
2076 int fully_mapped = 1;
2078 BUG_ON(!PageLocked(page));
2079 blocksize = 1 << inode->i_blkbits;
2080 if (!page_has_buffers(page))
2081 create_empty_buffers(page, blocksize, 0);
2082 head = page_buffers(page);
2084 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2085 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2091 if (buffer_uptodate(bh))
2094 if (!buffer_mapped(bh)) {
2098 if (iblock < lblock) {
2099 err = get_block(inode, iblock, bh, 0);
2103 if (!buffer_mapped(bh)) {
2104 void *kaddr = kmap_atomic(page, KM_USER0);
2105 memset(kaddr + i * blocksize, 0, blocksize);
2106 flush_dcache_page(page);
2107 kunmap_atomic(kaddr, KM_USER0);
2109 set_buffer_uptodate(bh);
2113 * get_block() might have updated the buffer
2116 if (buffer_uptodate(bh))
2120 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2123 SetPageMappedToDisk(page);
2127 * All buffers are uptodate - we can set the page uptodate
2128 * as well. But not if get_block() returned an error.
2130 if (!PageError(page))
2131 SetPageUptodate(page);
2136 /* Stage two: lock the buffers */
2137 for (i = 0; i < nr; i++) {
2140 mark_buffer_async_read(bh);
2144 * Stage 3: start the IO. Check for uptodateness
2145 * inside the buffer lock in case another process reading
2146 * the underlying blockdev brought it uptodate (the sct fix).
2148 for (i = 0; i < nr; i++) {
2150 if (buffer_uptodate(bh))
2151 end_buffer_async_read(bh, 1);
2153 submit_bh(READ, bh);
2158 /* utility function for filesystems that need to do work on expanding
2159 * truncates. Uses prepare/commit_write to allow the filesystem to
2160 * deal with the hole.
2162 static int __generic_cont_expand(struct inode *inode, loff_t size,
2163 pgoff_t index, unsigned int offset)
2165 struct address_space *mapping = inode->i_mapping;
2167 unsigned long limit;
2171 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2172 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2173 send_sig(SIGXFSZ, current, 0);
2176 if (size > inode->i_sb->s_maxbytes)
2180 page = grab_cache_page(mapping, index);
2183 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2186 * ->prepare_write() may have instantiated a few blocks
2187 * outside i_size. Trim these off again.
2190 page_cache_release(page);
2191 vmtruncate(inode, inode->i_size);
2195 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2198 page_cache_release(page);
2205 int generic_cont_expand(struct inode *inode, loff_t size)
2208 unsigned int offset;
2210 offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2212 /* ugh. in prepare/commit_write, if from==to==start of block, we
2213 ** skip the prepare. make sure we never send an offset for the start
2216 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2217 /* caller must handle this extra byte. */
2220 index = size >> PAGE_CACHE_SHIFT;
2222 return __generic_cont_expand(inode, size, index, offset);
2225 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2227 loff_t pos = size - 1;
2228 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2229 unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2231 /* prepare/commit_write can handle even if from==to==start of block. */
2232 return __generic_cont_expand(inode, size, index, offset);
2236 * For moronic filesystems that do not allow holes in file.
2237 * We may have to extend the file.
2240 int cont_prepare_write(struct page *page, unsigned offset,
2241 unsigned to, get_block_t *get_block, loff_t *bytes)
2243 struct address_space *mapping = page->mapping;
2244 struct inode *inode = mapping->host;
2245 struct page *new_page;
2249 unsigned blocksize = 1 << inode->i_blkbits;
2252 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2254 new_page = grab_cache_page(mapping, pgpos);
2257 /* we might sleep */
2258 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2259 unlock_page(new_page);
2260 page_cache_release(new_page);
2263 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2264 if (zerofrom & (blocksize-1)) {
2265 *bytes |= (blocksize-1);
2268 status = __block_prepare_write(inode, new_page, zerofrom,
2269 PAGE_CACHE_SIZE, get_block);
2272 kaddr = kmap_atomic(new_page, KM_USER0);
2273 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2274 flush_dcache_page(new_page);
2275 kunmap_atomic(kaddr, KM_USER0);
2276 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2277 unlock_page(new_page);
2278 page_cache_release(new_page);
2281 if (page->index < pgpos) {
2282 /* completely inside the area */
2285 /* page covers the boundary, find the boundary offset */
2286 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2288 /* if we will expand the thing last block will be filled */
2289 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2290 *bytes |= (blocksize-1);
2294 /* starting below the boundary? Nothing to zero out */
2295 if (offset <= zerofrom)
2298 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2301 if (zerofrom < offset) {
2302 kaddr = kmap_atomic(page, KM_USER0);
2303 memset(kaddr+zerofrom, 0, offset-zerofrom);
2304 flush_dcache_page(page);
2305 kunmap_atomic(kaddr, KM_USER0);
2306 __block_commit_write(inode, page, zerofrom, offset);
2310 ClearPageUptodate(page);
2314 ClearPageUptodate(new_page);
2315 unlock_page(new_page);
2316 page_cache_release(new_page);
2321 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2322 get_block_t *get_block)
2324 struct inode *inode = page->mapping->host;
2325 int err = __block_prepare_write(inode, page, from, to, get_block);
2327 ClearPageUptodate(page);
2331 int block_commit_write(struct page *page, unsigned from, unsigned to)
2333 struct inode *inode = page->mapping->host;
2334 __block_commit_write(inode,page,from,to);
2338 int generic_commit_write(struct file *file, struct page *page,
2339 unsigned from, unsigned to)
2341 struct inode *inode = page->mapping->host;
2342 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2343 __block_commit_write(inode,page,from,to);
2345 * No need to use i_size_read() here, the i_size
2346 * cannot change under us because we hold i_mutex.
2348 if (pos > inode->i_size) {
2349 i_size_write(inode, pos);
2350 mark_inode_dirty(inode);
2357 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2358 * immediately, while under the page lock. So it needs a special end_io
2359 * handler which does not touch the bh after unlocking it.
2361 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2362 * a race there is benign: unlock_buffer() only use the bh's address for
2363 * hashing after unlocking the buffer, so it doesn't actually touch the bh
2366 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2369 set_buffer_uptodate(bh);
2371 /* This happens, due to failed READA attempts. */
2372 clear_buffer_uptodate(bh);
2378 * On entry, the page is fully not uptodate.
2379 * On exit the page is fully uptodate in the areas outside (from,to)
2381 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2382 get_block_t *get_block)
2384 struct inode *inode = page->mapping->host;
2385 const unsigned blkbits = inode->i_blkbits;
2386 const unsigned blocksize = 1 << blkbits;
2387 struct buffer_head map_bh;
2388 struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2389 unsigned block_in_page;
2390 unsigned block_start;
2391 sector_t block_in_file;
2396 int is_mapped_to_disk = 1;
2399 if (PageMappedToDisk(page))
2402 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2403 map_bh.b_page = page;
2406 * We loop across all blocks in the page, whether or not they are
2407 * part of the affected region. This is so we can discover if the
2408 * page is fully mapped-to-disk.
2410 for (block_start = 0, block_in_page = 0;
2411 block_start < PAGE_CACHE_SIZE;
2412 block_in_page++, block_start += blocksize) {
2413 unsigned block_end = block_start + blocksize;
2418 if (block_start >= to)
2420 ret = get_block(inode, block_in_file + block_in_page,
2424 if (!buffer_mapped(&map_bh))
2425 is_mapped_to_disk = 0;
2426 if (buffer_new(&map_bh))
2427 unmap_underlying_metadata(map_bh.b_bdev,
2429 if (PageUptodate(page))
2431 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2432 kaddr = kmap_atomic(page, KM_USER0);
2433 if (block_start < from) {
2434 memset(kaddr+block_start, 0, from-block_start);
2437 if (block_end > to) {
2438 memset(kaddr + to, 0, block_end - to);
2441 flush_dcache_page(page);
2442 kunmap_atomic(kaddr, KM_USER0);
2445 if (buffer_uptodate(&map_bh))
2446 continue; /* reiserfs does this */
2447 if (block_start < from || block_end > to) {
2448 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2454 bh->b_state = map_bh.b_state;
2455 atomic_set(&bh->b_count, 0);
2456 bh->b_this_page = NULL;
2458 bh->b_blocknr = map_bh.b_blocknr;
2459 bh->b_size = blocksize;
2460 bh->b_data = (char *)(long)block_start;
2461 bh->b_bdev = map_bh.b_bdev;
2462 bh->b_private = NULL;
2463 read_bh[nr_reads++] = bh;
2468 struct buffer_head *bh;
2471 * The page is locked, so these buffers are protected from
2472 * any VM or truncate activity. Hence we don't need to care
2473 * for the buffer_head refcounts.
2475 for (i = 0; i < nr_reads; i++) {
2478 bh->b_end_io = end_buffer_read_nobh;
2479 submit_bh(READ, bh);
2481 for (i = 0; i < nr_reads; i++) {
2484 if (!buffer_uptodate(bh))
2486 free_buffer_head(bh);
2493 if (is_mapped_to_disk)
2494 SetPageMappedToDisk(page);
2495 SetPageUptodate(page);
2498 * Setting the page dirty here isn't necessary for the prepare_write
2499 * function - commit_write will do that. But if/when this function is
2500 * used within the pagefault handler to ensure that all mmapped pages
2501 * have backing space in the filesystem, we will need to dirty the page
2502 * if its contents were altered.
2505 set_page_dirty(page);
2510 for (i = 0; i < nr_reads; i++) {
2512 free_buffer_head(read_bh[i]);
2516 * Error recovery is pretty slack. Clear the page and mark it dirty
2517 * so we'll later zero out any blocks which _were_ allocated.
2519 kaddr = kmap_atomic(page, KM_USER0);
2520 memset(kaddr, 0, PAGE_CACHE_SIZE);
2521 kunmap_atomic(kaddr, KM_USER0);
2522 SetPageUptodate(page);
2523 set_page_dirty(page);
2526 EXPORT_SYMBOL(nobh_prepare_write);
2528 int nobh_commit_write(struct file *file, struct page *page,
2529 unsigned from, unsigned to)
2531 struct inode *inode = page->mapping->host;
2532 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2534 set_page_dirty(page);
2535 if (pos > inode->i_size) {
2536 i_size_write(inode, pos);
2537 mark_inode_dirty(inode);
2541 EXPORT_SYMBOL(nobh_commit_write);
2544 * nobh_writepage() - based on block_full_write_page() except
2545 * that it tries to operate without attaching bufferheads to
2548 int nobh_writepage(struct page *page, get_block_t *get_block,
2549 struct writeback_control *wbc)
2551 struct inode * const inode = page->mapping->host;
2552 loff_t i_size = i_size_read(inode);
2553 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2558 /* Is the page fully inside i_size? */
2559 if (page->index < end_index)
2562 /* Is the page fully outside i_size? (truncate in progress) */
2563 offset = i_size & (PAGE_CACHE_SIZE-1);
2564 if (page->index >= end_index+1 || !offset) {
2566 * The page may have dirty, unmapped buffers. For example,
2567 * they may have been added in ext3_writepage(). Make them
2568 * freeable here, so the page does not leak.
2571 /* Not really sure about this - do we need this ? */
2572 if (page->mapping->a_ops->invalidatepage)
2573 page->mapping->a_ops->invalidatepage(page, offset);
2576 return 0; /* don't care */
2580 * The page straddles i_size. It must be zeroed out on each and every
2581 * writepage invocation because it may be mmapped. "A file is mapped
2582 * in multiples of the page size. For a file that is not a multiple of
2583 * the page size, the remaining memory is zeroed when mapped, and
2584 * writes to that region are not written out to the file."
2586 kaddr = kmap_atomic(page, KM_USER0);
2587 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2588 flush_dcache_page(page);
2589 kunmap_atomic(kaddr, KM_USER0);
2591 ret = mpage_writepage(page, get_block, wbc);
2593 ret = __block_write_full_page(inode, page, get_block, wbc);
2596 EXPORT_SYMBOL(nobh_writepage);
2599 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2601 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2603 struct inode *inode = mapping->host;
2604 unsigned blocksize = 1 << inode->i_blkbits;
2605 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2606 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2609 struct address_space_operations *a_ops = mapping->a_ops;
2613 if ((offset & (blocksize - 1)) == 0)
2617 page = grab_cache_page(mapping, index);
2621 to = (offset + blocksize) & ~(blocksize - 1);
2622 ret = a_ops->prepare_write(NULL, page, offset, to);
2624 kaddr = kmap_atomic(page, KM_USER0);
2625 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2626 flush_dcache_page(page);
2627 kunmap_atomic(kaddr, KM_USER0);
2628 set_page_dirty(page);
2631 page_cache_release(page);
2635 EXPORT_SYMBOL(nobh_truncate_page);
2637 int block_truncate_page(struct address_space *mapping,
2638 loff_t from, get_block_t *get_block)
2640 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2641 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2644 unsigned length, pos;
2645 struct inode *inode = mapping->host;
2647 struct buffer_head *bh;
2651 blocksize = 1 << inode->i_blkbits;
2652 length = offset & (blocksize - 1);
2654 /* Block boundary? Nothing to do */
2658 length = blocksize - length;
2659 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2661 page = grab_cache_page(mapping, index);
2666 if (!page_has_buffers(page))
2667 create_empty_buffers(page, blocksize, 0);
2669 /* Find the buffer that contains "offset" */
2670 bh = page_buffers(page);
2672 while (offset >= pos) {
2673 bh = bh->b_this_page;
2679 if (!buffer_mapped(bh)) {
2680 err = get_block(inode, iblock, bh, 0);
2683 /* unmapped? It's a hole - nothing to do */
2684 if (!buffer_mapped(bh))
2688 /* Ok, it's mapped. Make sure it's up-to-date */
2689 if (PageUptodate(page))
2690 set_buffer_uptodate(bh);
2692 if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2694 ll_rw_block(READ, 1, &bh);
2696 /* Uhhuh. Read error. Complain and punt. */
2697 if (!buffer_uptodate(bh))
2701 kaddr = kmap_atomic(page, KM_USER0);
2702 memset(kaddr + offset, 0, length);
2703 flush_dcache_page(page);
2704 kunmap_atomic(kaddr, KM_USER0);
2706 mark_buffer_dirty(bh);
2711 page_cache_release(page);
2717 * The generic ->writepage function for buffer-backed address_spaces
2719 int block_write_full_page(struct page *page, get_block_t *get_block,
2720 struct writeback_control *wbc)
2722 struct inode * const inode = page->mapping->host;
2723 loff_t i_size = i_size_read(inode);
2724 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2728 /* Is the page fully inside i_size? */
2729 if (page->index < end_index)
2730 return __block_write_full_page(inode, page, get_block, wbc);
2732 /* Is the page fully outside i_size? (truncate in progress) */
2733 offset = i_size & (PAGE_CACHE_SIZE-1);
2734 if (page->index >= end_index+1 || !offset) {
2736 * The page may have dirty, unmapped buffers. For example,
2737 * they may have been added in ext3_writepage(). Make them
2738 * freeable here, so the page does not leak.
2740 do_invalidatepage(page, 0);
2742 return 0; /* don't care */
2746 * The page straddles i_size. It must be zeroed out on each and every
2747 * writepage invokation because it may be mmapped. "A file is mapped
2748 * in multiples of the page size. For a file that is not a multiple of
2749 * the page size, the remaining memory is zeroed when mapped, and
2750 * writes to that region are not written out to the file."
2752 kaddr = kmap_atomic(page, KM_USER0);
2753 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2754 flush_dcache_page(page);
2755 kunmap_atomic(kaddr, KM_USER0);
2756 return __block_write_full_page(inode, page, get_block, wbc);
2759 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2760 get_block_t *get_block)
2762 struct buffer_head tmp;
2763 struct inode *inode = mapping->host;
2766 get_block(inode, block, &tmp, 0);
2767 return tmp.b_blocknr;
2770 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2772 struct buffer_head *bh = bio->bi_private;
2777 if (err == -EOPNOTSUPP) {
2778 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2779 set_bit(BH_Eopnotsupp, &bh->b_state);
2782 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2787 int submit_bh(int rw, struct buffer_head * bh)
2792 BUG_ON(!buffer_locked(bh));
2793 BUG_ON(!buffer_mapped(bh));
2794 BUG_ON(!bh->b_end_io);
2796 if (buffer_ordered(bh) && (rw == WRITE))
2800 * Only clear out a write error when rewriting, should this
2801 * include WRITE_SYNC as well?
2803 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2804 clear_buffer_write_io_error(bh);
2807 * from here on down, it's all bio -- do the initial mapping,
2808 * submit_bio -> generic_make_request may further map this bio around
2810 bio = bio_alloc(GFP_NOIO, 1);
2812 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2813 bio->bi_bdev = bh->b_bdev;
2814 bio->bi_io_vec[0].bv_page = bh->b_page;
2815 bio->bi_io_vec[0].bv_len = bh->b_size;
2816 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2820 bio->bi_size = bh->b_size;
2822 bio->bi_end_io = end_bio_bh_io_sync;
2823 bio->bi_private = bh;
2826 submit_bio(rw, bio);
2828 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2836 * ll_rw_block: low-level access to block devices (DEPRECATED)
2837 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2838 * @nr: number of &struct buffer_heads in the array
2839 * @bhs: array of pointers to &struct buffer_head
2841 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2842 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2843 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2844 * are sent to disk. The fourth %READA option is described in the documentation
2845 * for generic_make_request() which ll_rw_block() calls.
2847 * This function drops any buffer that it cannot get a lock on (with the
2848 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2849 * clean when doing a write request, and any buffer that appears to be
2850 * up-to-date when doing read request. Further it marks as clean buffers that
2851 * are processed for writing (the buffer cache won't assume that they are
2852 * actually clean until the buffer gets unlocked).
2854 * ll_rw_block sets b_end_io to simple completion handler that marks
2855 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2858 * All of the buffers must be for the same device, and must also be a
2859 * multiple of the current approved size for the device.
2861 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2865 for (i = 0; i < nr; i++) {
2866 struct buffer_head *bh = bhs[i];
2870 else if (test_set_buffer_locked(bh))
2873 if (rw == WRITE || rw == SWRITE) {
2874 if (test_clear_buffer_dirty(bh)) {
2875 bh->b_end_io = end_buffer_write_sync;
2877 submit_bh(WRITE, bh);
2881 if (!buffer_uptodate(bh)) {
2882 bh->b_end_io = end_buffer_read_sync;
2893 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2894 * and then start new I/O and then wait upon it. The caller must have a ref on
2897 int sync_dirty_buffer(struct buffer_head *bh)
2901 WARN_ON(atomic_read(&bh->b_count) < 1);
2903 if (test_clear_buffer_dirty(bh)) {
2905 bh->b_end_io = end_buffer_write_sync;
2906 ret = submit_bh(WRITE, bh);
2908 if (buffer_eopnotsupp(bh)) {
2909 clear_buffer_eopnotsupp(bh);
2912 if (!ret && !buffer_uptodate(bh))
2921 * try_to_free_buffers() checks if all the buffers on this particular page
2922 * are unused, and releases them if so.
2924 * Exclusion against try_to_free_buffers may be obtained by either
2925 * locking the page or by holding its mapping's private_lock.
2927 * If the page is dirty but all the buffers are clean then we need to
2928 * be sure to mark the page clean as well. This is because the page
2929 * may be against a block device, and a later reattachment of buffers
2930 * to a dirty page will set *all* buffers dirty. Which would corrupt
2931 * filesystem data on the same device.
2933 * The same applies to regular filesystem pages: if all the buffers are
2934 * clean then we set the page clean and proceed. To do that, we require
2935 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2938 * try_to_free_buffers() is non-blocking.
2940 static inline int buffer_busy(struct buffer_head *bh)
2942 return atomic_read(&bh->b_count) |
2943 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2947 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2949 struct buffer_head *head = page_buffers(page);
2950 struct buffer_head *bh;
2954 if (buffer_write_io_error(bh) && page->mapping)
2955 set_bit(AS_EIO, &page->mapping->flags);
2956 if (buffer_busy(bh))
2958 bh = bh->b_this_page;
2959 } while (bh != head);
2962 struct buffer_head *next = bh->b_this_page;
2964 if (!list_empty(&bh->b_assoc_buffers))
2965 __remove_assoc_queue(bh);
2967 } while (bh != head);
2968 *buffers_to_free = head;
2969 __clear_page_buffers(page);
2975 int try_to_free_buffers(struct page *page)
2977 struct address_space * const mapping = page->mapping;
2978 struct buffer_head *buffers_to_free = NULL;
2981 BUG_ON(!PageLocked(page));
2982 if (PageWriteback(page))
2985 if (mapping == NULL) { /* can this still happen? */
2986 ret = drop_buffers(page, &buffers_to_free);
2990 spin_lock(&mapping->private_lock);
2991 ret = drop_buffers(page, &buffers_to_free);
2994 * If the filesystem writes its buffers by hand (eg ext3)
2995 * then we can have clean buffers against a dirty page. We
2996 * clean the page here; otherwise later reattachment of buffers
2997 * could encounter a non-uptodate page, which is unresolvable.
2998 * This only applies in the rare case where try_to_free_buffers
2999 * succeeds but the page is not freed.
3001 clear_page_dirty(page);
3003 spin_unlock(&mapping->private_lock);
3005 if (buffers_to_free) {
3006 struct buffer_head *bh = buffers_to_free;
3009 struct buffer_head *next = bh->b_this_page;
3010 free_buffer_head(bh);
3012 } while (bh != buffers_to_free);
3016 EXPORT_SYMBOL(try_to_free_buffers);
3018 int block_sync_page(struct page *page)
3020 struct address_space *mapping;
3023 mapping = page_mapping(page);
3025 blk_run_backing_dev(mapping->backing_dev_info, page);
3030 * There are no bdflush tunables left. But distributions are
3031 * still running obsolete flush daemons, so we terminate them here.
3033 * Use of bdflush() is deprecated and will be removed in a future kernel.
3034 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3036 asmlinkage long sys_bdflush(int func, long data)
3038 static int msg_count;
3040 if (!capable(CAP_SYS_ADMIN))
3043 if (msg_count < 5) {
3046 "warning: process `%s' used the obsolete bdflush"
3047 " system call\n", current->comm);
3048 printk(KERN_INFO "Fix your initscripts?\n");
3057 * Buffer-head allocation
3059 static kmem_cache_t *bh_cachep;
3062 * Once the number of bh's in the machine exceeds this level, we start
3063 * stripping them in writeback.
3065 static int max_buffer_heads;
3067 int buffer_heads_over_limit;
3069 struct bh_accounting {
3070 int nr; /* Number of live bh's */
3071 int ratelimit; /* Limit cacheline bouncing */
3074 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3076 static void recalc_bh_state(void)
3081 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3083 __get_cpu_var(bh_accounting).ratelimit = 0;
3084 for_each_online_cpu(i)
3085 tot += per_cpu(bh_accounting, i).nr;
3086 buffer_heads_over_limit = (tot > max_buffer_heads);
3089 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3091 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3093 get_cpu_var(bh_accounting).nr++;
3095 put_cpu_var(bh_accounting);
3099 EXPORT_SYMBOL(alloc_buffer_head);
3101 void free_buffer_head(struct buffer_head *bh)
3103 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3104 kmem_cache_free(bh_cachep, bh);
3105 get_cpu_var(bh_accounting).nr--;
3107 put_cpu_var(bh_accounting);
3109 EXPORT_SYMBOL(free_buffer_head);
3112 init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
3114 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
3115 SLAB_CTOR_CONSTRUCTOR) {
3116 struct buffer_head * bh = (struct buffer_head *)data;
3118 memset(bh, 0, sizeof(*bh));
3119 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3123 #ifdef CONFIG_HOTPLUG_CPU
3124 static void buffer_exit_cpu(int cpu)
3127 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3129 for (i = 0; i < BH_LRU_SIZE; i++) {
3133 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3134 per_cpu(bh_accounting, cpu).nr = 0;
3135 put_cpu_var(bh_accounting);
3138 static int buffer_cpu_notify(struct notifier_block *self,
3139 unsigned long action, void *hcpu)
3141 if (action == CPU_DEAD)
3142 buffer_exit_cpu((unsigned long)hcpu);
3145 #endif /* CONFIG_HOTPLUG_CPU */
3147 void __init buffer_init(void)
3151 bh_cachep = kmem_cache_create("buffer_head",
3152 sizeof(struct buffer_head), 0,
3153 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3159 * Limit the bh occupancy to 10% of ZONE_NORMAL
3161 nrpages = (nr_free_buffer_pages() * 10) / 100;
3162 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3163 hotcpu_notifier(buffer_cpu_notify, 0);
3166 EXPORT_SYMBOL(__bforget);
3167 EXPORT_SYMBOL(__brelse);
3168 EXPORT_SYMBOL(__wait_on_buffer);
3169 EXPORT_SYMBOL(block_commit_write);
3170 EXPORT_SYMBOL(block_prepare_write);
3171 EXPORT_SYMBOL(block_read_full_page);
3172 EXPORT_SYMBOL(block_sync_page);
3173 EXPORT_SYMBOL(block_truncate_page);
3174 EXPORT_SYMBOL(block_write_full_page);
3175 EXPORT_SYMBOL(cont_prepare_write);
3176 EXPORT_SYMBOL(end_buffer_async_write);
3177 EXPORT_SYMBOL(end_buffer_read_sync);
3178 EXPORT_SYMBOL(end_buffer_write_sync);
3179 EXPORT_SYMBOL(file_fsync);
3180 EXPORT_SYMBOL(fsync_bdev);
3181 EXPORT_SYMBOL(generic_block_bmap);
3182 EXPORT_SYMBOL(generic_commit_write);
3183 EXPORT_SYMBOL(generic_cont_expand);
3184 EXPORT_SYMBOL(generic_cont_expand_simple);
3185 EXPORT_SYMBOL(init_buffer);
3186 EXPORT_SYMBOL(invalidate_bdev);
3187 EXPORT_SYMBOL(ll_rw_block);
3188 EXPORT_SYMBOL(mark_buffer_dirty);
3189 EXPORT_SYMBOL(submit_bh);
3190 EXPORT_SYMBOL(sync_dirty_buffer);
3191 EXPORT_SYMBOL(unlock_buffer);