2 * linux/fs/ext4/inode.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
11 * linux/fs/minix/inode.c
13 * Copyright (C) 1991, 1992 Linus Torvalds
15 * 64-bit file support on 64-bit platforms by Jakub Jelinek
16 * (jj@sunsite.ms.mff.cuni.cz)
18 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
22 #include <linux/time.h>
23 #include <linux/jbd2.h>
24 #include <linux/highuid.h>
25 #include <linux/pagemap.h>
26 #include <linux/quotaops.h>
27 #include <linux/string.h>
28 #include <linux/buffer_head.h>
29 #include <linux/writeback.h>
30 #include <linux/pagevec.h>
31 #include <linux/mpage.h>
32 #include <linux/namei.h>
33 #include <linux/uio.h>
34 #include <linux/bio.h>
35 #include <linux/workqueue.h>
36 #include <linux/kernel.h>
37 #include <linux/printk.h>
38 #include <linux/slab.h>
39 #include <linux/ratelimit.h>
40 #include <linux/aio.h>
42 #include "ext4_jbd2.h"
47 #include <trace/events/ext4.h>
49 #define MPAGE_DA_EXTENT_TAIL 0x01
51 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
52 struct ext4_inode_info *ei)
54 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
59 csum_lo = le16_to_cpu(raw->i_checksum_lo);
60 raw->i_checksum_lo = 0;
61 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
62 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
63 csum_hi = le16_to_cpu(raw->i_checksum_hi);
64 raw->i_checksum_hi = 0;
67 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw,
68 EXT4_INODE_SIZE(inode->i_sb));
70 raw->i_checksum_lo = cpu_to_le16(csum_lo);
71 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
72 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
73 raw->i_checksum_hi = cpu_to_le16(csum_hi);
78 static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
79 struct ext4_inode_info *ei)
81 __u32 provided, calculated;
83 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
84 cpu_to_le32(EXT4_OS_LINUX) ||
85 !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
86 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
89 provided = le16_to_cpu(raw->i_checksum_lo);
90 calculated = ext4_inode_csum(inode, raw, ei);
91 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
92 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
93 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
97 return provided == calculated;
100 static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
101 struct ext4_inode_info *ei)
105 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
106 cpu_to_le32(EXT4_OS_LINUX) ||
107 !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
108 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
111 csum = ext4_inode_csum(inode, raw, ei);
112 raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
113 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
114 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
115 raw->i_checksum_hi = cpu_to_le16(csum >> 16);
118 static inline int ext4_begin_ordered_truncate(struct inode *inode,
121 trace_ext4_begin_ordered_truncate(inode, new_size);
123 * If jinode is zero, then we never opened the file for
124 * writing, so there's no need to call
125 * jbd2_journal_begin_ordered_truncate() since there's no
126 * outstanding writes we need to flush.
128 if (!EXT4_I(inode)->jinode)
130 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
131 EXT4_I(inode)->jinode,
135 static void ext4_invalidatepage(struct page *page, unsigned int offset,
136 unsigned int length);
137 static int __ext4_journalled_writepage(struct page *page, unsigned int len);
138 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
139 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
143 * Test whether an inode is a fast symlink.
145 static int ext4_inode_is_fast_symlink(struct inode *inode)
147 int ea_blocks = EXT4_I(inode)->i_file_acl ?
148 (inode->i_sb->s_blocksize >> 9) : 0;
150 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
154 * Restart the transaction associated with *handle. This does a commit,
155 * so before we call here everything must be consistently dirtied against
158 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
164 * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
165 * moment, get_block can be called only for blocks inside i_size since
166 * page cache has been already dropped and writes are blocked by
167 * i_mutex. So we can safely drop the i_data_sem here.
169 BUG_ON(EXT4_JOURNAL(inode) == NULL);
170 jbd_debug(2, "restarting handle %p\n", handle);
171 up_write(&EXT4_I(inode)->i_data_sem);
172 ret = ext4_journal_restart(handle, nblocks);
173 down_write(&EXT4_I(inode)->i_data_sem);
174 ext4_discard_preallocations(inode);
180 * Called at the last iput() if i_nlink is zero.
182 void ext4_evict_inode(struct inode *inode)
187 trace_ext4_evict_inode(inode);
189 if (inode->i_nlink) {
191 * When journalling data dirty buffers are tracked only in the
192 * journal. So although mm thinks everything is clean and
193 * ready for reaping the inode might still have some pages to
194 * write in the running transaction or waiting to be
195 * checkpointed. Thus calling jbd2_journal_invalidatepage()
196 * (via truncate_inode_pages()) to discard these buffers can
197 * cause data loss. Also even if we did not discard these
198 * buffers, we would have no way to find them after the inode
199 * is reaped and thus user could see stale data if he tries to
200 * read them before the transaction is checkpointed. So be
201 * careful and force everything to disk here... We use
202 * ei->i_datasync_tid to store the newest transaction
203 * containing inode's data.
205 * Note that directories do not have this problem because they
206 * don't use page cache.
208 if (ext4_should_journal_data(inode) &&
209 (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
210 inode->i_ino != EXT4_JOURNAL_INO) {
211 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
212 tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
214 jbd2_complete_transaction(journal, commit_tid);
215 filemap_write_and_wait(&inode->i_data);
217 truncate_inode_pages(&inode->i_data, 0);
219 WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count));
223 if (!is_bad_inode(inode))
224 dquot_initialize(inode);
226 if (ext4_should_order_data(inode))
227 ext4_begin_ordered_truncate(inode, 0);
228 truncate_inode_pages(&inode->i_data, 0);
230 WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count));
231 if (is_bad_inode(inode))
235 * Protect us against freezing - iput() caller didn't have to have any
236 * protection against it
238 sb_start_intwrite(inode->i_sb);
239 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
240 ext4_blocks_for_truncate(inode)+3);
241 if (IS_ERR(handle)) {
242 ext4_std_error(inode->i_sb, PTR_ERR(handle));
244 * If we're going to skip the normal cleanup, we still need to
245 * make sure that the in-core orphan linked list is properly
248 ext4_orphan_del(NULL, inode);
249 sb_end_intwrite(inode->i_sb);
254 ext4_handle_sync(handle);
256 err = ext4_mark_inode_dirty(handle, inode);
258 ext4_warning(inode->i_sb,
259 "couldn't mark inode dirty (err %d)", err);
263 ext4_truncate(inode);
266 * ext4_ext_truncate() doesn't reserve any slop when it
267 * restarts journal transactions; therefore there may not be
268 * enough credits left in the handle to remove the inode from
269 * the orphan list and set the dtime field.
271 if (!ext4_handle_has_enough_credits(handle, 3)) {
272 err = ext4_journal_extend(handle, 3);
274 err = ext4_journal_restart(handle, 3);
276 ext4_warning(inode->i_sb,
277 "couldn't extend journal (err %d)", err);
279 ext4_journal_stop(handle);
280 ext4_orphan_del(NULL, inode);
281 sb_end_intwrite(inode->i_sb);
287 * Kill off the orphan record which ext4_truncate created.
288 * AKPM: I think this can be inside the above `if'.
289 * Note that ext4_orphan_del() has to be able to cope with the
290 * deletion of a non-existent orphan - this is because we don't
291 * know if ext4_truncate() actually created an orphan record.
292 * (Well, we could do this if we need to, but heck - it works)
294 ext4_orphan_del(handle, inode);
295 EXT4_I(inode)->i_dtime = get_seconds();
298 * One subtle ordering requirement: if anything has gone wrong
299 * (transaction abort, IO errors, whatever), then we can still
300 * do these next steps (the fs will already have been marked as
301 * having errors), but we can't free the inode if the mark_dirty
304 if (ext4_mark_inode_dirty(handle, inode))
305 /* If that failed, just do the required in-core inode clear. */
306 ext4_clear_inode(inode);
308 ext4_free_inode(handle, inode);
309 ext4_journal_stop(handle);
310 sb_end_intwrite(inode->i_sb);
313 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
317 qsize_t *ext4_get_reserved_space(struct inode *inode)
319 return &EXT4_I(inode)->i_reserved_quota;
324 * Calculate the number of metadata blocks need to reserve
325 * to allocate a block located at @lblock
327 static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
329 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
330 return ext4_ext_calc_metadata_amount(inode, lblock);
332 return ext4_ind_calc_metadata_amount(inode, lblock);
336 * Called with i_data_sem down, which is important since we can call
337 * ext4_discard_preallocations() from here.
339 void ext4_da_update_reserve_space(struct inode *inode,
340 int used, int quota_claim)
342 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
343 struct ext4_inode_info *ei = EXT4_I(inode);
345 spin_lock(&ei->i_block_reservation_lock);
346 trace_ext4_da_update_reserve_space(inode, used, quota_claim);
347 if (unlikely(used > ei->i_reserved_data_blocks)) {
348 ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
349 "with only %d reserved data blocks",
350 __func__, inode->i_ino, used,
351 ei->i_reserved_data_blocks);
353 used = ei->i_reserved_data_blocks;
356 if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
357 ext4_warning(inode->i_sb, "ino %lu, allocated %d "
358 "with only %d reserved metadata blocks "
359 "(releasing %d blocks with reserved %d data blocks)",
360 inode->i_ino, ei->i_allocated_meta_blocks,
361 ei->i_reserved_meta_blocks, used,
362 ei->i_reserved_data_blocks);
364 ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
367 /* Update per-inode reservations */
368 ei->i_reserved_data_blocks -= used;
369 ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
370 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
371 used + ei->i_allocated_meta_blocks);
372 ei->i_allocated_meta_blocks = 0;
374 if (ei->i_reserved_data_blocks == 0) {
376 * We can release all of the reserved metadata blocks
377 * only when we have written all of the delayed
380 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
381 ei->i_reserved_meta_blocks);
382 ei->i_reserved_meta_blocks = 0;
383 ei->i_da_metadata_calc_len = 0;
385 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
387 /* Update quota subsystem for data blocks */
389 dquot_claim_block(inode, EXT4_C2B(sbi, used));
392 * We did fallocate with an offset that is already delayed
393 * allocated. So on delayed allocated writeback we should
394 * not re-claim the quota for fallocated blocks.
396 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
400 * If we have done all the pending block allocations and if
401 * there aren't any writers on the inode, we can discard the
402 * inode's preallocations.
404 if ((ei->i_reserved_data_blocks == 0) &&
405 (atomic_read(&inode->i_writecount) == 0))
406 ext4_discard_preallocations(inode);
409 static int __check_block_validity(struct inode *inode, const char *func,
411 struct ext4_map_blocks *map)
413 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
415 ext4_error_inode(inode, func, line, map->m_pblk,
416 "lblock %lu mapped to illegal pblock "
417 "(length %d)", (unsigned long) map->m_lblk,
424 #define check_block_validity(inode, map) \
425 __check_block_validity((inode), __func__, __LINE__, (map))
427 #ifdef ES_AGGRESSIVE_TEST
428 static void ext4_map_blocks_es_recheck(handle_t *handle,
430 struct ext4_map_blocks *es_map,
431 struct ext4_map_blocks *map,
438 * There is a race window that the result is not the same.
439 * e.g. xfstests #223 when dioread_nolock enables. The reason
440 * is that we lookup a block mapping in extent status tree with
441 * out taking i_data_sem. So at the time the unwritten extent
442 * could be converted.
444 if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
445 down_read((&EXT4_I(inode)->i_data_sem));
446 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
447 retval = ext4_ext_map_blocks(handle, inode, map, flags &
448 EXT4_GET_BLOCKS_KEEP_SIZE);
450 retval = ext4_ind_map_blocks(handle, inode, map, flags &
451 EXT4_GET_BLOCKS_KEEP_SIZE);
453 if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
454 up_read((&EXT4_I(inode)->i_data_sem));
456 * Clear EXT4_MAP_FROM_CLUSTER and EXT4_MAP_BOUNDARY flag
457 * because it shouldn't be marked in es_map->m_flags.
459 map->m_flags &= ~(EXT4_MAP_FROM_CLUSTER | EXT4_MAP_BOUNDARY);
462 * We don't check m_len because extent will be collpased in status
463 * tree. So the m_len might not equal.
465 if (es_map->m_lblk != map->m_lblk ||
466 es_map->m_flags != map->m_flags ||
467 es_map->m_pblk != map->m_pblk) {
468 printk("ES cache assertion failed for inode: %lu "
469 "es_cached ex [%d/%d/%llu/%x] != "
470 "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
471 inode->i_ino, es_map->m_lblk, es_map->m_len,
472 es_map->m_pblk, es_map->m_flags, map->m_lblk,
473 map->m_len, map->m_pblk, map->m_flags,
477 #endif /* ES_AGGRESSIVE_TEST */
480 * The ext4_map_blocks() function tries to look up the requested blocks,
481 * and returns if the blocks are already mapped.
483 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
484 * and store the allocated blocks in the result buffer head and mark it
487 * If file type is extents based, it will call ext4_ext_map_blocks(),
488 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
491 * On success, it returns the number of blocks being mapped or allocate.
492 * if create==0 and the blocks are pre-allocated and uninitialized block,
493 * the result buffer head is unmapped. If the create ==1, it will make sure
494 * the buffer head is mapped.
496 * It returns 0 if plain look up failed (blocks have not been allocated), in
497 * that case, buffer head is unmapped
499 * It returns the error in case of allocation failure.
501 int ext4_map_blocks(handle_t *handle, struct inode *inode,
502 struct ext4_map_blocks *map, int flags)
504 struct extent_status es;
506 #ifdef ES_AGGRESSIVE_TEST
507 struct ext4_map_blocks orig_map;
509 memcpy(&orig_map, map, sizeof(*map));
513 ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
514 "logical block %lu\n", inode->i_ino, flags, map->m_len,
515 (unsigned long) map->m_lblk);
517 /* Lookup extent status tree firstly */
518 if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
519 ext4_es_lru_add(inode);
520 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
521 map->m_pblk = ext4_es_pblock(&es) +
522 map->m_lblk - es.es_lblk;
523 map->m_flags |= ext4_es_is_written(&es) ?
524 EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
525 retval = es.es_len - (map->m_lblk - es.es_lblk);
526 if (retval > map->m_len)
529 } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
534 #ifdef ES_AGGRESSIVE_TEST
535 ext4_map_blocks_es_recheck(handle, inode, map,
542 * Try to see if we can get the block without requesting a new
545 if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
546 down_read((&EXT4_I(inode)->i_data_sem));
547 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
548 retval = ext4_ext_map_blocks(handle, inode, map, flags &
549 EXT4_GET_BLOCKS_KEEP_SIZE);
551 retval = ext4_ind_map_blocks(handle, inode, map, flags &
552 EXT4_GET_BLOCKS_KEEP_SIZE);
558 if (unlikely(retval != map->m_len)) {
559 ext4_warning(inode->i_sb,
560 "ES len assertion failed for inode "
561 "%lu: retval %d != map->m_len %d",
562 inode->i_ino, retval, map->m_len);
566 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
567 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
568 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
569 ext4_find_delalloc_range(inode, map->m_lblk,
570 map->m_lblk + map->m_len - 1))
571 status |= EXTENT_STATUS_DELAYED;
572 ret = ext4_es_insert_extent(inode, map->m_lblk,
573 map->m_len, map->m_pblk, status);
577 if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
578 up_read((&EXT4_I(inode)->i_data_sem));
581 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
582 int ret = check_block_validity(inode, map);
587 /* If it is only a block(s) look up */
588 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
592 * Returns if the blocks have already allocated
594 * Note that if blocks have been preallocated
595 * ext4_ext_get_block() returns the create = 0
596 * with buffer head unmapped.
598 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
602 * Here we clear m_flags because after allocating an new extent,
603 * it will be set again.
605 map->m_flags &= ~EXT4_MAP_FLAGS;
608 * New blocks allocate and/or writing to uninitialized extent
609 * will possibly result in updating i_data, so we take
610 * the write lock of i_data_sem, and call get_blocks()
611 * with create == 1 flag.
613 down_write((&EXT4_I(inode)->i_data_sem));
616 * if the caller is from delayed allocation writeout path
617 * we have already reserved fs blocks for allocation
618 * let the underlying get_block() function know to
619 * avoid double accounting
621 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
622 ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
624 * We need to check for EXT4 here because migrate
625 * could have changed the inode type in between
627 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
628 retval = ext4_ext_map_blocks(handle, inode, map, flags);
630 retval = ext4_ind_map_blocks(handle, inode, map, flags);
632 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
634 * We allocated new blocks which will result in
635 * i_data's format changing. Force the migrate
636 * to fail by clearing migrate flags
638 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
642 * Update reserved blocks/metadata blocks after successful
643 * block allocation which had been deferred till now. We don't
644 * support fallocate for non extent files. So we can update
645 * reserve space here.
648 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
649 ext4_da_update_reserve_space(inode, retval, 1);
651 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
652 ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
658 if (unlikely(retval != map->m_len)) {
659 ext4_warning(inode->i_sb,
660 "ES len assertion failed for inode "
661 "%lu: retval %d != map->m_len %d",
662 inode->i_ino, retval, map->m_len);
667 * If the extent has been zeroed out, we don't need to update
668 * extent status tree.
670 if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
671 ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
672 if (ext4_es_is_written(&es))
675 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
676 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
677 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
678 ext4_find_delalloc_range(inode, map->m_lblk,
679 map->m_lblk + map->m_len - 1))
680 status |= EXTENT_STATUS_DELAYED;
681 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
682 map->m_pblk, status);
688 up_write((&EXT4_I(inode)->i_data_sem));
689 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
690 int ret = check_block_validity(inode, map);
697 /* Maximum number of blocks we map for direct IO at once. */
698 #define DIO_MAX_BLOCKS 4096
700 static int _ext4_get_block(struct inode *inode, sector_t iblock,
701 struct buffer_head *bh, int flags)
703 handle_t *handle = ext4_journal_current_handle();
704 struct ext4_map_blocks map;
705 int ret = 0, started = 0;
708 if (ext4_has_inline_data(inode))
712 map.m_len = bh->b_size >> inode->i_blkbits;
714 if (flags && !(flags & EXT4_GET_BLOCKS_NO_LOCK) && !handle) {
715 /* Direct IO write... */
716 if (map.m_len > DIO_MAX_BLOCKS)
717 map.m_len = DIO_MAX_BLOCKS;
718 dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
719 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
721 if (IS_ERR(handle)) {
722 ret = PTR_ERR(handle);
728 ret = ext4_map_blocks(handle, inode, &map, flags);
730 map_bh(bh, inode->i_sb, map.m_pblk);
731 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
732 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
736 ext4_journal_stop(handle);
740 int ext4_get_block(struct inode *inode, sector_t iblock,
741 struct buffer_head *bh, int create)
743 return _ext4_get_block(inode, iblock, bh,
744 create ? EXT4_GET_BLOCKS_CREATE : 0);
748 * `handle' can be NULL if create is zero
750 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
751 ext4_lblk_t block, int create, int *errp)
753 struct ext4_map_blocks map;
754 struct buffer_head *bh;
757 J_ASSERT(handle != NULL || create == 0);
761 err = ext4_map_blocks(handle, inode, &map,
762 create ? EXT4_GET_BLOCKS_CREATE : 0);
764 /* ensure we send some value back into *errp */
767 if (create && err == 0)
768 err = -ENOSPC; /* should never happen */
774 bh = sb_getblk(inode->i_sb, map.m_pblk);
779 if (map.m_flags & EXT4_MAP_NEW) {
780 J_ASSERT(create != 0);
781 J_ASSERT(handle != NULL);
784 * Now that we do not always journal data, we should
785 * keep in mind whether this should always journal the
786 * new buffer as metadata. For now, regular file
787 * writes use ext4_get_block instead, so it's not a
791 BUFFER_TRACE(bh, "call get_create_access");
792 fatal = ext4_journal_get_create_access(handle, bh);
793 if (!fatal && !buffer_uptodate(bh)) {
794 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
795 set_buffer_uptodate(bh);
798 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
799 err = ext4_handle_dirty_metadata(handle, inode, bh);
803 BUFFER_TRACE(bh, "not a new buffer");
813 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
814 ext4_lblk_t block, int create, int *err)
816 struct buffer_head *bh;
818 bh = ext4_getblk(handle, inode, block, create, err);
821 if (buffer_uptodate(bh))
823 ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
825 if (buffer_uptodate(bh))
832 int ext4_walk_page_buffers(handle_t *handle,
833 struct buffer_head *head,
837 int (*fn)(handle_t *handle,
838 struct buffer_head *bh))
840 struct buffer_head *bh;
841 unsigned block_start, block_end;
842 unsigned blocksize = head->b_size;
844 struct buffer_head *next;
846 for (bh = head, block_start = 0;
847 ret == 0 && (bh != head || !block_start);
848 block_start = block_end, bh = next) {
849 next = bh->b_this_page;
850 block_end = block_start + blocksize;
851 if (block_end <= from || block_start >= to) {
852 if (partial && !buffer_uptodate(bh))
856 err = (*fn)(handle, bh);
864 * To preserve ordering, it is essential that the hole instantiation and
865 * the data write be encapsulated in a single transaction. We cannot
866 * close off a transaction and start a new one between the ext4_get_block()
867 * and the commit_write(). So doing the jbd2_journal_start at the start of
868 * prepare_write() is the right place.
870 * Also, this function can nest inside ext4_writepage(). In that case, we
871 * *know* that ext4_writepage() has generated enough buffer credits to do the
872 * whole page. So we won't block on the journal in that case, which is good,
873 * because the caller may be PF_MEMALLOC.
875 * By accident, ext4 can be reentered when a transaction is open via
876 * quota file writes. If we were to commit the transaction while thus
877 * reentered, there can be a deadlock - we would be holding a quota
878 * lock, and the commit would never complete if another thread had a
879 * transaction open and was blocking on the quota lock - a ranking
882 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
883 * will _not_ run commit under these circumstances because handle->h_ref
884 * is elevated. We'll still have enough credits for the tiny quotafile
887 int do_journal_get_write_access(handle_t *handle,
888 struct buffer_head *bh)
890 int dirty = buffer_dirty(bh);
893 if (!buffer_mapped(bh) || buffer_freed(bh))
896 * __block_write_begin() could have dirtied some buffers. Clean
897 * the dirty bit as jbd2_journal_get_write_access() could complain
898 * otherwise about fs integrity issues. Setting of the dirty bit
899 * by __block_write_begin() isn't a real problem here as we clear
900 * the bit before releasing a page lock and thus writeback cannot
901 * ever write the buffer.
904 clear_buffer_dirty(bh);
905 ret = ext4_journal_get_write_access(handle, bh);
907 ret = ext4_handle_dirty_metadata(handle, NULL, bh);
911 static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
912 struct buffer_head *bh_result, int create);
913 static int ext4_write_begin(struct file *file, struct address_space *mapping,
914 loff_t pos, unsigned len, unsigned flags,
915 struct page **pagep, void **fsdata)
917 struct inode *inode = mapping->host;
918 int ret, needed_blocks;
925 trace_ext4_write_begin(inode, pos, len, flags);
927 * Reserve one block more for addition to orphan list in case
928 * we allocate blocks but write fails for some reason
930 needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
931 index = pos >> PAGE_CACHE_SHIFT;
932 from = pos & (PAGE_CACHE_SIZE - 1);
935 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
936 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
945 * grab_cache_page_write_begin() can take a long time if the
946 * system is thrashing due to memory pressure, or if the page
947 * is being written back. So grab it first before we start
948 * the transaction handle. This also allows us to allocate
949 * the page (if needed) without using GFP_NOFS.
952 page = grab_cache_page_write_begin(mapping, index, flags);
958 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
959 if (IS_ERR(handle)) {
960 page_cache_release(page);
961 return PTR_ERR(handle);
965 if (page->mapping != mapping) {
966 /* The page got truncated from under us */
968 page_cache_release(page);
969 ext4_journal_stop(handle);
972 wait_on_page_writeback(page);
974 if (ext4_should_dioread_nolock(inode))
975 ret = __block_write_begin(page, pos, len, ext4_get_block_write);
977 ret = __block_write_begin(page, pos, len, ext4_get_block);
979 if (!ret && ext4_should_journal_data(inode)) {
980 ret = ext4_walk_page_buffers(handle, page_buffers(page),
982 do_journal_get_write_access);
988 * __block_write_begin may have instantiated a few blocks
989 * outside i_size. Trim these off again. Don't need
990 * i_size_read because we hold i_mutex.
992 * Add inode to orphan list in case we crash before
995 if (pos + len > inode->i_size && ext4_can_truncate(inode))
996 ext4_orphan_add(handle, inode);
998 ext4_journal_stop(handle);
999 if (pos + len > inode->i_size) {
1000 ext4_truncate_failed_write(inode);
1002 * If truncate failed early the inode might
1003 * still be on the orphan list; we need to
1004 * make sure the inode is removed from the
1005 * orphan list in that case.
1008 ext4_orphan_del(NULL, inode);
1011 if (ret == -ENOSPC &&
1012 ext4_should_retry_alloc(inode->i_sb, &retries))
1014 page_cache_release(page);
1021 /* For write_end() in data=journal mode */
1022 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1025 if (!buffer_mapped(bh) || buffer_freed(bh))
1027 set_buffer_uptodate(bh);
1028 ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1029 clear_buffer_meta(bh);
1030 clear_buffer_prio(bh);
1035 * We need to pick up the new inode size which generic_commit_write gave us
1036 * `file' can be NULL - eg, when called from page_symlink().
1038 * ext4 never places buffers on inode->i_mapping->private_list. metadata
1039 * buffers are managed internally.
1041 static int ext4_write_end(struct file *file,
1042 struct address_space *mapping,
1043 loff_t pos, unsigned len, unsigned copied,
1044 struct page *page, void *fsdata)
1046 handle_t *handle = ext4_journal_current_handle();
1047 struct inode *inode = mapping->host;
1049 int i_size_changed = 0;
1051 trace_ext4_write_end(inode, pos, len, copied);
1052 if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) {
1053 ret = ext4_jbd2_file_inode(handle, inode);
1056 page_cache_release(page);
1061 if (ext4_has_inline_data(inode)) {
1062 ret = ext4_write_inline_data_end(inode, pos, len,
1068 copied = block_write_end(file, mapping, pos,
1069 len, copied, page, fsdata);
1072 * No need to use i_size_read() here, the i_size
1073 * cannot change under us because we hole i_mutex.
1075 * But it's important to update i_size while still holding page lock:
1076 * page writeout could otherwise come in and zero beyond i_size.
1078 if (pos + copied > inode->i_size) {
1079 i_size_write(inode, pos + copied);
1083 if (pos + copied > EXT4_I(inode)->i_disksize) {
1084 /* We need to mark inode dirty even if
1085 * new_i_size is less that inode->i_size
1086 * but greater than i_disksize. (hint delalloc)
1088 ext4_update_i_disksize(inode, (pos + copied));
1092 page_cache_release(page);
1095 * Don't mark the inode dirty under page lock. First, it unnecessarily
1096 * makes the holding time of page lock longer. Second, it forces lock
1097 * ordering of page lock and transaction start for journaling
1101 ext4_mark_inode_dirty(handle, inode);
1103 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1104 /* if we have allocated more blocks and copied
1105 * less. We will have blocks allocated outside
1106 * inode->i_size. So truncate them
1108 ext4_orphan_add(handle, inode);
1110 ret2 = ext4_journal_stop(handle);
1114 if (pos + len > inode->i_size) {
1115 ext4_truncate_failed_write(inode);
1117 * If truncate failed early the inode might still be
1118 * on the orphan list; we need to make sure the inode
1119 * is removed from the orphan list in that case.
1122 ext4_orphan_del(NULL, inode);
1125 return ret ? ret : copied;
1128 static int ext4_journalled_write_end(struct file *file,
1129 struct address_space *mapping,
1130 loff_t pos, unsigned len, unsigned copied,
1131 struct page *page, void *fsdata)
1133 handle_t *handle = ext4_journal_current_handle();
1134 struct inode *inode = mapping->host;
1140 trace_ext4_journalled_write_end(inode, pos, len, copied);
1141 from = pos & (PAGE_CACHE_SIZE - 1);
1144 BUG_ON(!ext4_handle_valid(handle));
1146 if (ext4_has_inline_data(inode))
1147 copied = ext4_write_inline_data_end(inode, pos, len,
1151 if (!PageUptodate(page))
1153 page_zero_new_buffers(page, from+copied, to);
1156 ret = ext4_walk_page_buffers(handle, page_buffers(page), from,
1157 to, &partial, write_end_fn);
1159 SetPageUptodate(page);
1161 new_i_size = pos + copied;
1162 if (new_i_size > inode->i_size)
1163 i_size_write(inode, pos+copied);
1164 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1165 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1166 if (new_i_size > EXT4_I(inode)->i_disksize) {
1167 ext4_update_i_disksize(inode, new_i_size);
1168 ret2 = ext4_mark_inode_dirty(handle, inode);
1174 page_cache_release(page);
1175 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1176 /* if we have allocated more blocks and copied
1177 * less. We will have blocks allocated outside
1178 * inode->i_size. So truncate them
1180 ext4_orphan_add(handle, inode);
1182 ret2 = ext4_journal_stop(handle);
1185 if (pos + len > inode->i_size) {
1186 ext4_truncate_failed_write(inode);
1188 * If truncate failed early the inode might still be
1189 * on the orphan list; we need to make sure the inode
1190 * is removed from the orphan list in that case.
1193 ext4_orphan_del(NULL, inode);
1196 return ret ? ret : copied;
1200 * Reserve a metadata for a single block located at lblock
1202 static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
1205 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1206 struct ext4_inode_info *ei = EXT4_I(inode);
1207 unsigned int md_needed;
1208 ext4_lblk_t save_last_lblock;
1212 * recalculate the amount of metadata blocks to reserve
1213 * in order to allocate nrblocks
1214 * worse case is one extent per block
1217 spin_lock(&ei->i_block_reservation_lock);
1219 * ext4_calc_metadata_amount() has side effects, which we have
1220 * to be prepared undo if we fail to claim space.
1222 save_len = ei->i_da_metadata_calc_len;
1223 save_last_lblock = ei->i_da_metadata_calc_last_lblock;
1224 md_needed = EXT4_NUM_B2C(sbi,
1225 ext4_calc_metadata_amount(inode, lblock));
1226 trace_ext4_da_reserve_space(inode, md_needed);
1229 * We do still charge estimated metadata to the sb though;
1230 * we cannot afford to run out of free blocks.
1232 if (ext4_claim_free_clusters(sbi, md_needed, 0)) {
1233 ei->i_da_metadata_calc_len = save_len;
1234 ei->i_da_metadata_calc_last_lblock = save_last_lblock;
1235 spin_unlock(&ei->i_block_reservation_lock);
1236 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1242 ei->i_reserved_meta_blocks += md_needed;
1243 spin_unlock(&ei->i_block_reservation_lock);
1245 return 0; /* success */
1249 * Reserve a single cluster located at lblock
1251 static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1254 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1255 struct ext4_inode_info *ei = EXT4_I(inode);
1256 unsigned int md_needed;
1258 ext4_lblk_t save_last_lblock;
1262 * We will charge metadata quota at writeout time; this saves
1263 * us from metadata over-estimation, though we may go over by
1264 * a small amount in the end. Here we just reserve for data.
1266 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1271 * recalculate the amount of metadata blocks to reserve
1272 * in order to allocate nrblocks
1273 * worse case is one extent per block
1276 spin_lock(&ei->i_block_reservation_lock);
1278 * ext4_calc_metadata_amount() has side effects, which we have
1279 * to be prepared undo if we fail to claim space.
1281 save_len = ei->i_da_metadata_calc_len;
1282 save_last_lblock = ei->i_da_metadata_calc_last_lblock;
1283 md_needed = EXT4_NUM_B2C(sbi,
1284 ext4_calc_metadata_amount(inode, lblock));
1285 trace_ext4_da_reserve_space(inode, md_needed);
1288 * We do still charge estimated metadata to the sb though;
1289 * we cannot afford to run out of free blocks.
1291 if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
1292 ei->i_da_metadata_calc_len = save_len;
1293 ei->i_da_metadata_calc_last_lblock = save_last_lblock;
1294 spin_unlock(&ei->i_block_reservation_lock);
1295 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1299 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1302 ei->i_reserved_data_blocks++;
1303 ei->i_reserved_meta_blocks += md_needed;
1304 spin_unlock(&ei->i_block_reservation_lock);
1306 return 0; /* success */
1309 static void ext4_da_release_space(struct inode *inode, int to_free)
1311 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1312 struct ext4_inode_info *ei = EXT4_I(inode);
1315 return; /* Nothing to release, exit */
1317 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1319 trace_ext4_da_release_space(inode, to_free);
1320 if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1322 * if there aren't enough reserved blocks, then the
1323 * counter is messed up somewhere. Since this
1324 * function is called from invalidate page, it's
1325 * harmless to return without any action.
1327 ext4_warning(inode->i_sb, "ext4_da_release_space: "
1328 "ino %lu, to_free %d with only %d reserved "
1329 "data blocks", inode->i_ino, to_free,
1330 ei->i_reserved_data_blocks);
1332 to_free = ei->i_reserved_data_blocks;
1334 ei->i_reserved_data_blocks -= to_free;
1336 if (ei->i_reserved_data_blocks == 0) {
1338 * We can release all of the reserved metadata blocks
1339 * only when we have written all of the delayed
1340 * allocation blocks.
1341 * Note that in case of bigalloc, i_reserved_meta_blocks,
1342 * i_reserved_data_blocks, etc. refer to number of clusters.
1344 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
1345 ei->i_reserved_meta_blocks);
1346 ei->i_reserved_meta_blocks = 0;
1347 ei->i_da_metadata_calc_len = 0;
1350 /* update fs dirty data blocks counter */
1351 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1353 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1355 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1358 static void ext4_da_page_release_reservation(struct page *page,
1359 unsigned int offset,
1360 unsigned int length)
1363 struct buffer_head *head, *bh;
1364 unsigned int curr_off = 0;
1365 struct inode *inode = page->mapping->host;
1366 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1367 unsigned int stop = offset + length;
1371 BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
1373 head = page_buffers(page);
1376 unsigned int next_off = curr_off + bh->b_size;
1378 if (next_off > stop)
1381 if ((offset <= curr_off) && (buffer_delay(bh))) {
1383 clear_buffer_delay(bh);
1385 curr_off = next_off;
1386 } while ((bh = bh->b_this_page) != head);
1389 lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1390 ext4_es_remove_extent(inode, lblk, to_release);
1393 /* If we have released all the blocks belonging to a cluster, then we
1394 * need to release the reserved space for that cluster. */
1395 num_clusters = EXT4_NUM_B2C(sbi, to_release);
1396 while (num_clusters > 0) {
1397 lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) +
1398 ((num_clusters - 1) << sbi->s_cluster_bits);
1399 if (sbi->s_cluster_ratio == 1 ||
1400 !ext4_find_delalloc_cluster(inode, lblk))
1401 ext4_da_release_space(inode, 1);
1408 * Delayed allocation stuff
1411 struct mpage_da_data {
1412 struct inode *inode;
1413 struct writeback_control *wbc;
1415 pgoff_t first_page; /* The first page to write */
1416 pgoff_t next_page; /* Current page to examine */
1417 pgoff_t last_page; /* Last page to examine */
1419 * Extent to map - this can be after first_page because that can be
1420 * fully mapped. We somewhat abuse m_flags to store whether the extent
1421 * is delalloc or unwritten.
1423 struct ext4_map_blocks map;
1424 struct ext4_io_submit io_submit; /* IO submission data */
1427 static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1432 struct pagevec pvec;
1433 struct inode *inode = mpd->inode;
1434 struct address_space *mapping = inode->i_mapping;
1436 /* This is necessary when next_page == 0. */
1437 if (mpd->first_page >= mpd->next_page)
1440 index = mpd->first_page;
1441 end = mpd->next_page - 1;
1443 ext4_lblk_t start, last;
1444 start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1445 last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1446 ext4_es_remove_extent(inode, start, last - start + 1);
1449 pagevec_init(&pvec, 0);
1450 while (index <= end) {
1451 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1454 for (i = 0; i < nr_pages; i++) {
1455 struct page *page = pvec.pages[i];
1456 if (page->index > end)
1458 BUG_ON(!PageLocked(page));
1459 BUG_ON(PageWriteback(page));
1461 block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
1462 ClearPageUptodate(page);
1466 index = pvec.pages[nr_pages - 1]->index + 1;
1467 pagevec_release(&pvec);
1471 static void ext4_print_free_blocks(struct inode *inode)
1473 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1474 struct super_block *sb = inode->i_sb;
1475 struct ext4_inode_info *ei = EXT4_I(inode);
1477 ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
1478 EXT4_C2B(EXT4_SB(inode->i_sb),
1479 ext4_count_free_clusters(sb)));
1480 ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1481 ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
1482 (long long) EXT4_C2B(EXT4_SB(sb),
1483 percpu_counter_sum(&sbi->s_freeclusters_counter)));
1484 ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
1485 (long long) EXT4_C2B(EXT4_SB(sb),
1486 percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1487 ext4_msg(sb, KERN_CRIT, "Block reservation details");
1488 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1489 ei->i_reserved_data_blocks);
1490 ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u",
1491 ei->i_reserved_meta_blocks);
1492 ext4_msg(sb, KERN_CRIT, "i_allocated_meta_blocks=%u",
1493 ei->i_allocated_meta_blocks);
1497 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
1499 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
1503 * This function is grabs code from the very beginning of
1504 * ext4_map_blocks, but assumes that the caller is from delayed write
1505 * time. This function looks up the requested blocks and sets the
1506 * buffer delay bit under the protection of i_data_sem.
1508 static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1509 struct ext4_map_blocks *map,
1510 struct buffer_head *bh)
1512 struct extent_status es;
1514 sector_t invalid_block = ~((sector_t) 0xffff);
1515 #ifdef ES_AGGRESSIVE_TEST
1516 struct ext4_map_blocks orig_map;
1518 memcpy(&orig_map, map, sizeof(*map));
1521 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1525 ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
1526 "logical block %lu\n", inode->i_ino, map->m_len,
1527 (unsigned long) map->m_lblk);
1529 /* Lookup extent status tree firstly */
1530 if (ext4_es_lookup_extent(inode, iblock, &es)) {
1531 ext4_es_lru_add(inode);
1532 if (ext4_es_is_hole(&es)) {
1534 down_read((&EXT4_I(inode)->i_data_sem));
1539 * Delayed extent could be allocated by fallocate.
1540 * So we need to check it.
1542 if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
1543 map_bh(bh, inode->i_sb, invalid_block);
1545 set_buffer_delay(bh);
1549 map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
1550 retval = es.es_len - (iblock - es.es_lblk);
1551 if (retval > map->m_len)
1552 retval = map->m_len;
1553 map->m_len = retval;
1554 if (ext4_es_is_written(&es))
1555 map->m_flags |= EXT4_MAP_MAPPED;
1556 else if (ext4_es_is_unwritten(&es))
1557 map->m_flags |= EXT4_MAP_UNWRITTEN;
1561 #ifdef ES_AGGRESSIVE_TEST
1562 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1568 * Try to see if we can get the block without requesting a new
1569 * file system block.
1571 down_read((&EXT4_I(inode)->i_data_sem));
1572 if (ext4_has_inline_data(inode)) {
1574 * We will soon create blocks for this page, and let
1575 * us pretend as if the blocks aren't allocated yet.
1576 * In case of clusters, we have to handle the work
1577 * of mapping from cluster so that the reserved space
1578 * is calculated properly.
1580 if ((EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) &&
1581 ext4_find_delalloc_cluster(inode, map->m_lblk))
1582 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
1584 } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1585 retval = ext4_ext_map_blocks(NULL, inode, map,
1586 EXT4_GET_BLOCKS_NO_PUT_HOLE);
1588 retval = ext4_ind_map_blocks(NULL, inode, map,
1589 EXT4_GET_BLOCKS_NO_PUT_HOLE);
1595 * XXX: __block_prepare_write() unmaps passed block,
1599 * If the block was allocated from previously allocated cluster,
1600 * then we don't need to reserve it again. However we still need
1601 * to reserve metadata for every block we're going to write.
1603 if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) {
1604 ret = ext4_da_reserve_space(inode, iblock);
1606 /* not enough space to reserve */
1611 ret = ext4_da_reserve_metadata(inode, iblock);
1613 /* not enough space to reserve */
1619 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1620 ~0, EXTENT_STATUS_DELAYED);
1626 /* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served
1627 * and it should not appear on the bh->b_state.
1629 map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
1631 map_bh(bh, inode->i_sb, invalid_block);
1633 set_buffer_delay(bh);
1634 } else if (retval > 0) {
1636 unsigned int status;
1638 if (unlikely(retval != map->m_len)) {
1639 ext4_warning(inode->i_sb,
1640 "ES len assertion failed for inode "
1641 "%lu: retval %d != map->m_len %d",
1642 inode->i_ino, retval, map->m_len);
1646 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1647 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
1648 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1649 map->m_pblk, status);
1655 up_read((&EXT4_I(inode)->i_data_sem));
1661 * This is a special get_blocks_t callback which is used by
1662 * ext4_da_write_begin(). It will either return mapped block or
1663 * reserve space for a single block.
1665 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1666 * We also have b_blocknr = -1 and b_bdev initialized properly
1668 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1669 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1670 * initialized properly.
1672 int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1673 struct buffer_head *bh, int create)
1675 struct ext4_map_blocks map;
1678 BUG_ON(create == 0);
1679 BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1681 map.m_lblk = iblock;
1685 * first, we need to know whether the block is allocated already
1686 * preallocated blocks are unmapped but should treated
1687 * the same as allocated blocks.
1689 ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1693 map_bh(bh, inode->i_sb, map.m_pblk);
1694 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
1696 if (buffer_unwritten(bh)) {
1697 /* A delayed write to unwritten bh should be marked
1698 * new and mapped. Mapped ensures that we don't do
1699 * get_block multiple times when we write to the same
1700 * offset and new ensures that we do proper zero out
1701 * for partial write.
1704 set_buffer_mapped(bh);
1709 static int bget_one(handle_t *handle, struct buffer_head *bh)
1715 static int bput_one(handle_t *handle, struct buffer_head *bh)
1721 static int __ext4_journalled_writepage(struct page *page,
1724 struct address_space *mapping = page->mapping;
1725 struct inode *inode = mapping->host;
1726 struct buffer_head *page_bufs = NULL;
1727 handle_t *handle = NULL;
1728 int ret = 0, err = 0;
1729 int inline_data = ext4_has_inline_data(inode);
1730 struct buffer_head *inode_bh = NULL;
1732 ClearPageChecked(page);
1735 BUG_ON(page->index != 0);
1736 BUG_ON(len > ext4_get_max_inline_size(inode));
1737 inode_bh = ext4_journalled_write_inline_data(inode, len, page);
1738 if (inode_bh == NULL)
1741 page_bufs = page_buffers(page);
1746 ext4_walk_page_buffers(handle, page_bufs, 0, len,
1749 /* As soon as we unlock the page, it can go away, but we have
1750 * references to buffers so we are safe */
1753 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
1754 ext4_writepage_trans_blocks(inode));
1755 if (IS_ERR(handle)) {
1756 ret = PTR_ERR(handle);
1760 BUG_ON(!ext4_handle_valid(handle));
1763 ret = ext4_journal_get_write_access(handle, inode_bh);
1765 err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
1768 ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
1769 do_journal_get_write_access);
1771 err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
1776 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1777 err = ext4_journal_stop(handle);
1781 if (!ext4_has_inline_data(inode))
1782 ext4_walk_page_buffers(handle, page_bufs, 0, len,
1784 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1791 * Note that we don't need to start a transaction unless we're journaling data
1792 * because we should have holes filled from ext4_page_mkwrite(). We even don't
1793 * need to file the inode to the transaction's list in ordered mode because if
1794 * we are writing back data added by write(), the inode is already there and if
1795 * we are writing back data modified via mmap(), no one guarantees in which
1796 * transaction the data will hit the disk. In case we are journaling data, we
1797 * cannot start transaction directly because transaction start ranks above page
1798 * lock so we have to do some magic.
1800 * This function can get called via...
1801 * - ext4_writepages after taking page lock (have journal handle)
1802 * - journal_submit_inode_data_buffers (no journal handle)
1803 * - shrink_page_list via the kswapd/direct reclaim (no journal handle)
1804 * - grab_page_cache when doing write_begin (have journal handle)
1806 * We don't do any block allocation in this function. If we have page with
1807 * multiple blocks we need to write those buffer_heads that are mapped. This
1808 * is important for mmaped based write. So if we do with blocksize 1K
1809 * truncate(f, 1024);
1810 * a = mmap(f, 0, 4096);
1812 * truncate(f, 4096);
1813 * we have in the page first buffer_head mapped via page_mkwrite call back
1814 * but other buffer_heads would be unmapped but dirty (dirty done via the
1815 * do_wp_page). So writepage should write the first block. If we modify
1816 * the mmap area beyond 1024 we will again get a page_fault and the
1817 * page_mkwrite callback will do the block allocation and mark the
1818 * buffer_heads mapped.
1820 * We redirty the page if we have any buffer_heads that is either delay or
1821 * unwritten in the page.
1823 * We can get recursively called as show below.
1825 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1828 * But since we don't do any block allocation we should not deadlock.
1829 * Page also have the dirty flag cleared so we don't get recurive page_lock.
1831 static int ext4_writepage(struct page *page,
1832 struct writeback_control *wbc)
1837 struct buffer_head *page_bufs = NULL;
1838 struct inode *inode = page->mapping->host;
1839 struct ext4_io_submit io_submit;
1841 trace_ext4_writepage(page);
1842 size = i_size_read(inode);
1843 if (page->index == size >> PAGE_CACHE_SHIFT)
1844 len = size & ~PAGE_CACHE_MASK;
1846 len = PAGE_CACHE_SIZE;
1848 page_bufs = page_buffers(page);
1850 * We cannot do block allocation or other extent handling in this
1851 * function. If there are buffers needing that, we have to redirty
1852 * the page. But we may reach here when we do a journal commit via
1853 * journal_submit_inode_data_buffers() and in that case we must write
1854 * allocated buffers to achieve data=ordered mode guarantees.
1856 if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL,
1857 ext4_bh_delay_or_unwritten)) {
1858 redirty_page_for_writepage(wbc, page);
1859 if (current->flags & PF_MEMALLOC) {
1861 * For memory cleaning there's no point in writing only
1862 * some buffers. So just bail out. Warn if we came here
1863 * from direct reclaim.
1865 WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD))
1872 if (PageChecked(page) && ext4_should_journal_data(inode))
1874 * It's mmapped pagecache. Add buffers and journal it. There
1875 * doesn't seem much point in redirtying the page here.
1877 return __ext4_journalled_writepage(page, len);
1879 ext4_io_submit_init(&io_submit, wbc);
1880 io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
1881 if (!io_submit.io_end) {
1882 redirty_page_for_writepage(wbc, page);
1886 ret = ext4_bio_write_page(&io_submit, page, len, wbc);
1887 ext4_io_submit(&io_submit);
1888 /* Drop io_end reference we got from init */
1889 ext4_put_io_end_defer(io_submit.io_end);
1893 static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
1896 loff_t size = i_size_read(mpd->inode);
1899 BUG_ON(page->index != mpd->first_page);
1900 if (page->index == size >> PAGE_CACHE_SHIFT)
1901 len = size & ~PAGE_CACHE_MASK;
1903 len = PAGE_CACHE_SIZE;
1904 clear_page_dirty_for_io(page);
1905 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc);
1907 mpd->wbc->nr_to_write--;
1913 #define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay))
1916 * mballoc gives us at most this number of blocks...
1917 * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
1918 * The rest of mballoc seems to handle chunks upto full group size.
1920 #define MAX_WRITEPAGES_EXTENT_LEN 2048
1923 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
1925 * @mpd - extent of blocks
1926 * @lblk - logical number of the block in the file
1927 * @bh - buffer head we want to add to the extent
1929 * The function is used to collect contig. blocks in the same state. If the
1930 * buffer doesn't require mapping for writeback and we haven't started the
1931 * extent of buffers to map yet, the function returns 'true' immediately - the
1932 * caller can write the buffer right away. Otherwise the function returns true
1933 * if the block has been added to the extent, false if the block couldn't be
1936 static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
1937 struct buffer_head *bh)
1939 struct ext4_map_blocks *map = &mpd->map;
1941 /* Buffer that doesn't need mapping for writeback? */
1942 if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
1943 (!buffer_delay(bh) && !buffer_unwritten(bh))) {
1944 /* So far no extent to map => we write the buffer right away */
1945 if (map->m_len == 0)
1950 /* First block in the extent? */
1951 if (map->m_len == 0) {
1954 map->m_flags = bh->b_state & BH_FLAGS;
1958 /* Don't go larger than mballoc is willing to allocate */
1959 if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
1962 /* Can we merge the block to our big extent? */
1963 if (lblk == map->m_lblk + map->m_len &&
1964 (bh->b_state & BH_FLAGS) == map->m_flags) {
1972 * mpage_process_page_bufs - submit page buffers for IO or add them to extent
1974 * @mpd - extent of blocks for mapping
1975 * @head - the first buffer in the page
1976 * @bh - buffer we should start processing from
1977 * @lblk - logical number of the block in the file corresponding to @bh
1979 * Walk through page buffers from @bh upto @head (exclusive) and either submit
1980 * the page for IO if all buffers in this page were mapped and there's no
1981 * accumulated extent of buffers to map or add buffers in the page to the
1982 * extent of buffers to map. The function returns 1 if the caller can continue
1983 * by processing the next page, 0 if it should stop adding buffers to the
1984 * extent to map because we cannot extend it anymore. It can also return value
1985 * < 0 in case of error during IO submission.
1987 static int mpage_process_page_bufs(struct mpage_da_data *mpd,
1988 struct buffer_head *head,
1989 struct buffer_head *bh,
1992 struct inode *inode = mpd->inode;
1994 ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
1995 >> inode->i_blkbits;
1998 BUG_ON(buffer_locked(bh));
2000 if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
2001 /* Found extent to map? */
2004 /* Everything mapped so far and we hit EOF */
2007 } while (lblk++, (bh = bh->b_this_page) != head);
2008 /* So far everything mapped? Submit the page for IO. */
2009 if (mpd->map.m_len == 0) {
2010 err = mpage_submit_page(mpd, head->b_page);
2014 return lblk < blocks;
2018 * mpage_map_buffers - update buffers corresponding to changed extent and
2019 * submit fully mapped pages for IO
2021 * @mpd - description of extent to map, on return next extent to map
2023 * Scan buffers corresponding to changed extent (we expect corresponding pages
2024 * to be already locked) and update buffer state according to new extent state.
2025 * We map delalloc buffers to their physical location, clear unwritten bits,
2026 * and mark buffers as uninit when we perform writes to uninitialized extents
2027 * and do extent conversion after IO is finished. If the last page is not fully
2028 * mapped, we update @map to the next extent in the last page that needs
2029 * mapping. Otherwise we submit the page for IO.
2031 static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2033 struct pagevec pvec;
2035 struct inode *inode = mpd->inode;
2036 struct buffer_head *head, *bh;
2037 int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits;
2043 start = mpd->map.m_lblk >> bpp_bits;
2044 end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
2045 lblk = start << bpp_bits;
2046 pblock = mpd->map.m_pblk;
2048 pagevec_init(&pvec, 0);
2049 while (start <= end) {
2050 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, start,
2054 for (i = 0; i < nr_pages; i++) {
2055 struct page *page = pvec.pages[i];
2057 if (page->index > end)
2059 /* Upto 'end' pages must be contiguous */
2060 BUG_ON(page->index != start);
2061 bh = head = page_buffers(page);
2063 if (lblk < mpd->map.m_lblk)
2065 if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
2067 * Buffer after end of mapped extent.
2068 * Find next buffer in the page to map.
2071 mpd->map.m_flags = 0;
2073 * FIXME: If dioread_nolock supports
2074 * blocksize < pagesize, we need to make
2075 * sure we add size mapped so far to
2076 * io_end->size as the following call
2077 * can submit the page for IO.
2079 err = mpage_process_page_bufs(mpd, head,
2081 pagevec_release(&pvec);
2086 if (buffer_delay(bh)) {
2087 clear_buffer_delay(bh);
2088 bh->b_blocknr = pblock++;
2090 clear_buffer_unwritten(bh);
2091 } while (lblk++, (bh = bh->b_this_page) != head);
2094 * FIXME: This is going to break if dioread_nolock
2095 * supports blocksize < pagesize as we will try to
2096 * convert potentially unmapped parts of inode.
2098 mpd->io_submit.io_end->size += PAGE_CACHE_SIZE;
2099 /* Page fully mapped - let IO run! */
2100 err = mpage_submit_page(mpd, page);
2102 pagevec_release(&pvec);
2107 pagevec_release(&pvec);
2109 /* Extent fully mapped and matches with page boundary. We are done. */
2111 mpd->map.m_flags = 0;
2115 static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
2117 struct inode *inode = mpd->inode;
2118 struct ext4_map_blocks *map = &mpd->map;
2119 int get_blocks_flags;
2122 trace_ext4_da_write_pages_extent(inode, map);
2124 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
2125 * to convert an uninitialized extent to be initialized (in the case
2126 * where we have written into one or more preallocated blocks). It is
2127 * possible that we're going to need more metadata blocks than
2128 * previously reserved. However we must not fail because we're in
2129 * writeback and there is nothing we can do about it so it might result
2130 * in data loss. So use reserved blocks to allocate metadata if
2133 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if the blocks
2134 * in question are delalloc blocks. This affects functions in many
2135 * different parts of the allocation call path. This flag exists
2136 * primarily because we don't want to change *many* call functions, so
2137 * ext4_map_blocks() will set the EXT4_STATE_DELALLOC_RESERVED flag
2138 * once the inode's allocation semaphore is taken.
2140 get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
2141 EXT4_GET_BLOCKS_METADATA_NOFAIL;
2142 if (ext4_should_dioread_nolock(inode))
2143 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2144 if (map->m_flags & (1 << BH_Delay))
2145 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2147 err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2150 if (map->m_flags & EXT4_MAP_UNINIT) {
2151 if (!mpd->io_submit.io_end->handle &&
2152 ext4_handle_valid(handle)) {
2153 mpd->io_submit.io_end->handle = handle->h_rsv_handle;
2154 handle->h_rsv_handle = NULL;
2156 ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
2159 BUG_ON(map->m_len == 0);
2160 if (map->m_flags & EXT4_MAP_NEW) {
2161 struct block_device *bdev = inode->i_sb->s_bdev;
2164 for (i = 0; i < map->m_len; i++)
2165 unmap_underlying_metadata(bdev, map->m_pblk + i);
2171 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
2172 * mpd->len and submit pages underlying it for IO
2174 * @handle - handle for journal operations
2175 * @mpd - extent to map
2177 * The function maps extent starting at mpd->lblk of length mpd->len. If it is
2178 * delayed, blocks are allocated, if it is unwritten, we may need to convert
2179 * them to initialized or split the described range from larger unwritten
2180 * extent. Note that we need not map all the described range since allocation
2181 * can return less blocks or the range is covered by more unwritten extents. We
2182 * cannot map more because we are limited by reserved transaction credits. On
2183 * the other hand we always make sure that the last touched page is fully
2184 * mapped so that it can be written out (and thus forward progress is
2185 * guaranteed). After mapping we submit all mapped pages for IO.
2187 static int mpage_map_and_submit_extent(handle_t *handle,
2188 struct mpage_da_data *mpd,
2189 bool *give_up_on_write)
2191 struct inode *inode = mpd->inode;
2192 struct ext4_map_blocks *map = &mpd->map;
2196 mpd->io_submit.io_end->offset =
2197 ((loff_t)map->m_lblk) << inode->i_blkbits;
2199 err = mpage_map_one_extent(handle, mpd);
2201 struct super_block *sb = inode->i_sb;
2203 if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
2204 goto invalidate_dirty_pages;
2206 * Let the uper layers retry transient errors.
2207 * In the case of ENOSPC, if ext4_count_free_blocks()
2208 * is non-zero, a commit should free up blocks.
2210 if ((err == -ENOMEM) ||
2211 (err == -ENOSPC && ext4_count_free_clusters(sb)))
2213 ext4_msg(sb, KERN_CRIT,
2214 "Delayed block allocation failed for "
2215 "inode %lu at logical offset %llu with"
2216 " max blocks %u with error %d",
2218 (unsigned long long)map->m_lblk,
2219 (unsigned)map->m_len, -err);
2220 ext4_msg(sb, KERN_CRIT,
2221 "This should not happen!! Data will "
2224 ext4_print_free_blocks(inode);
2225 invalidate_dirty_pages:
2226 *give_up_on_write = true;
2230 * Update buffer state, submit mapped pages, and get us new
2233 err = mpage_map_and_submit_buffers(mpd);
2236 } while (map->m_len);
2238 /* Update on-disk size after IO is submitted */
2239 disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT;
2240 if (disksize > EXT4_I(inode)->i_disksize) {
2243 ext4_wb_update_i_disksize(inode, disksize);
2244 err2 = ext4_mark_inode_dirty(handle, inode);
2246 ext4_error(inode->i_sb,
2247 "Failed to mark inode %lu dirty",
2256 * Calculate the total number of credits to reserve for one writepages
2257 * iteration. This is called from ext4_writepages(). We map an extent of
2258 * upto MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
2259 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2260 * bpp - 1 blocks in bpp different extents.
2262 static int ext4_da_writepages_trans_blocks(struct inode *inode)
2264 int bpp = ext4_journal_blocks_per_page(inode);
2266 return ext4_meta_trans_blocks(inode,
2267 MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
2271 * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
2272 * and underlying extent to map
2274 * @mpd - where to look for pages
2276 * Walk dirty pages in the mapping. If they are fully mapped, submit them for
2277 * IO immediately. When we find a page which isn't mapped we start accumulating
2278 * extent of buffers underlying these pages that needs mapping (formed by
2279 * either delayed or unwritten buffers). We also lock the pages containing
2280 * these buffers. The extent found is returned in @mpd structure (starting at
2281 * mpd->lblk with length mpd->len blocks).
2283 * Note that this function can attach bios to one io_end structure which are
2284 * neither logically nor physically contiguous. Although it may seem as an
2285 * unnecessary complication, it is actually inevitable in blocksize < pagesize
2286 * case as we need to track IO to all buffers underlying a page in one io_end.
2288 static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2290 struct address_space *mapping = mpd->inode->i_mapping;
2291 struct pagevec pvec;
2292 unsigned int nr_pages;
2293 pgoff_t index = mpd->first_page;
2294 pgoff_t end = mpd->last_page;
2297 int blkbits = mpd->inode->i_blkbits;
2299 struct buffer_head *head;
2301 if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
2302 tag = PAGECACHE_TAG_TOWRITE;
2304 tag = PAGECACHE_TAG_DIRTY;
2306 pagevec_init(&pvec, 0);
2308 mpd->next_page = index;
2309 while (index <= end) {
2310 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2311 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
2315 for (i = 0; i < nr_pages; i++) {
2316 struct page *page = pvec.pages[i];
2319 * At this point, the page may be truncated or
2320 * invalidated (changing page->mapping to NULL), or
2321 * even swizzled back from swapper_space to tmpfs file
2322 * mapping. However, page->index will not change
2323 * because we have a reference on the page.
2325 if (page->index > end)
2328 /* If we can't merge this page, we are done. */
2329 if (mpd->map.m_len > 0 && mpd->next_page != page->index)
2334 * If the page is no longer dirty, or its mapping no
2335 * longer corresponds to inode we are writing (which
2336 * means it has been truncated or invalidated), or the
2337 * page is already under writeback and we are not doing
2338 * a data integrity writeback, skip the page
2340 if (!PageDirty(page) ||
2341 (PageWriteback(page) &&
2342 (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
2343 unlikely(page->mapping != mapping)) {
2348 wait_on_page_writeback(page);
2349 BUG_ON(PageWriteback(page));
2351 if (mpd->map.m_len == 0)
2352 mpd->first_page = page->index;
2353 mpd->next_page = page->index + 1;
2354 /* Add all dirty buffers to mpd */
2355 lblk = ((ext4_lblk_t)page->index) <<
2356 (PAGE_CACHE_SHIFT - blkbits);
2357 head = page_buffers(page);
2358 err = mpage_process_page_bufs(mpd, head, head, lblk);
2364 * Accumulated enough dirty pages? This doesn't apply
2365 * to WB_SYNC_ALL mode. For integrity sync we have to
2366 * keep going because someone may be concurrently
2367 * dirtying pages, and we might have synced a lot of
2368 * newly appeared dirty pages, but have not synced all
2369 * of the old dirty pages.
2371 if (mpd->wbc->sync_mode == WB_SYNC_NONE &&
2372 mpd->next_page - mpd->first_page >=
2373 mpd->wbc->nr_to_write)
2376 pagevec_release(&pvec);
2381 pagevec_release(&pvec);
2385 static int __writepage(struct page *page, struct writeback_control *wbc,
2388 struct address_space *mapping = data;
2389 int ret = ext4_writepage(page, wbc);
2390 mapping_set_error(mapping, ret);
2394 static int ext4_writepages(struct address_space *mapping,
2395 struct writeback_control *wbc)
2397 pgoff_t writeback_index = 0;
2398 long nr_to_write = wbc->nr_to_write;
2399 int range_whole = 0;
2401 handle_t *handle = NULL;
2402 struct mpage_da_data mpd;
2403 struct inode *inode = mapping->host;
2404 int needed_blocks, rsv_blocks = 0, ret = 0;
2405 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2407 struct blk_plug plug;
2408 bool give_up_on_write = false;
2410 trace_ext4_writepages(inode, wbc);
2413 * No pages to write? This is mainly a kludge to avoid starting
2414 * a transaction for special inodes like journal inode on last iput()
2415 * because that could violate lock ordering on umount
2417 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2420 if (ext4_should_journal_data(inode)) {
2421 struct blk_plug plug;
2424 blk_start_plug(&plug);
2425 ret = write_cache_pages(mapping, wbc, __writepage, mapping);
2426 blk_finish_plug(&plug);
2431 * If the filesystem has aborted, it is read-only, so return
2432 * right away instead of dumping stack traces later on that
2433 * will obscure the real source of the problem. We test
2434 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
2435 * the latter could be true if the filesystem is mounted
2436 * read-only, and in that case, ext4_writepages should
2437 * *never* be called, so if that ever happens, we would want
2440 if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
2443 if (ext4_should_dioread_nolock(inode)) {
2445 * We may need to convert upto one extent per block in
2446 * the page and we may dirty the inode.
2448 rsv_blocks = 1 + (PAGE_CACHE_SIZE >> inode->i_blkbits);
2452 * If we have inline data and arrive here, it means that
2453 * we will soon create the block for the 1st page, so
2454 * we'd better clear the inline data here.
2456 if (ext4_has_inline_data(inode)) {
2457 /* Just inode will be modified... */
2458 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
2459 if (IS_ERR(handle)) {
2460 ret = PTR_ERR(handle);
2461 goto out_writepages;
2463 BUG_ON(ext4_test_inode_state(inode,
2464 EXT4_STATE_MAY_INLINE_DATA));
2465 ext4_destroy_inline_data(handle, inode);
2466 ext4_journal_stop(handle);
2469 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2472 if (wbc->range_cyclic) {
2473 writeback_index = mapping->writeback_index;
2474 if (writeback_index)
2476 mpd.first_page = writeback_index;
2479 mpd.first_page = wbc->range_start >> PAGE_CACHE_SHIFT;
2480 mpd.last_page = wbc->range_end >> PAGE_CACHE_SHIFT;
2485 ext4_io_submit_init(&mpd.io_submit, wbc);
2487 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2488 tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page);
2490 blk_start_plug(&plug);
2491 while (!done && mpd.first_page <= mpd.last_page) {
2492 /* For each extent of pages we use new io_end */
2493 mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2494 if (!mpd.io_submit.io_end) {
2500 * We have two constraints: We find one extent to map and we
2501 * must always write out whole page (makes a difference when
2502 * blocksize < pagesize) so that we don't block on IO when we
2503 * try to write out the rest of the page. Journalled mode is
2504 * not supported by delalloc.
2506 BUG_ON(ext4_should_journal_data(inode));
2507 needed_blocks = ext4_da_writepages_trans_blocks(inode);
2509 /* start a new transaction */
2510 handle = ext4_journal_start_with_reserve(inode,
2511 EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
2512 if (IS_ERR(handle)) {
2513 ret = PTR_ERR(handle);
2514 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2515 "%ld pages, ino %lu; err %d", __func__,
2516 wbc->nr_to_write, inode->i_ino, ret);
2517 /* Release allocated io_end */
2518 ext4_put_io_end(mpd.io_submit.io_end);
2522 trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc);
2523 ret = mpage_prepare_extent_to_map(&mpd);
2526 ret = mpage_map_and_submit_extent(handle, &mpd,
2530 * We scanned the whole range (or exhausted
2531 * nr_to_write), submitted what was mapped and
2532 * didn't find anything needing mapping. We are
2538 ext4_journal_stop(handle);
2539 /* Submit prepared bio */
2540 ext4_io_submit(&mpd.io_submit);
2541 /* Unlock pages we didn't use */
2542 mpage_release_unused_pages(&mpd, give_up_on_write);
2543 /* Drop our io_end reference we got from init */
2544 ext4_put_io_end(mpd.io_submit.io_end);
2546 if (ret == -ENOSPC && sbi->s_journal) {
2548 * Commit the transaction which would
2549 * free blocks released in the transaction
2552 jbd2_journal_force_commit_nested(sbi->s_journal);
2556 /* Fatal error - ENOMEM, EIO... */
2560 blk_finish_plug(&plug);
2561 if (!ret && !cycled) {
2563 mpd.last_page = writeback_index - 1;
2569 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2571 * Set the writeback_index so that range_cyclic
2572 * mode will write it back later
2574 mapping->writeback_index = mpd.first_page;
2577 trace_ext4_writepages_result(inode, wbc, ret,
2578 nr_to_write - wbc->nr_to_write);
2582 static int ext4_nonda_switch(struct super_block *sb)
2584 s64 free_clusters, dirty_clusters;
2585 struct ext4_sb_info *sbi = EXT4_SB(sb);
2588 * switch to non delalloc mode if we are running low
2589 * on free block. The free block accounting via percpu
2590 * counters can get slightly wrong with percpu_counter_batch getting
2591 * accumulated on each CPU without updating global counters
2592 * Delalloc need an accurate free block accounting. So switch
2593 * to non delalloc when we are near to error range.
2596 percpu_counter_read_positive(&sbi->s_freeclusters_counter);
2598 percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2600 * Start pushing delalloc when 1/2 of free blocks are dirty.
2602 if (dirty_clusters && (free_clusters < 2 * dirty_clusters))
2603 try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
2605 if (2 * free_clusters < 3 * dirty_clusters ||
2606 free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) {
2608 * free block count is less than 150% of dirty blocks
2609 * or free blocks is less than watermark
2616 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2617 loff_t pos, unsigned len, unsigned flags,
2618 struct page **pagep, void **fsdata)
2620 int ret, retries = 0;
2623 struct inode *inode = mapping->host;
2626 index = pos >> PAGE_CACHE_SHIFT;
2628 if (ext4_nonda_switch(inode->i_sb)) {
2629 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2630 return ext4_write_begin(file, mapping, pos,
2631 len, flags, pagep, fsdata);
2633 *fsdata = (void *)0;
2634 trace_ext4_da_write_begin(inode, pos, len, flags);
2636 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
2637 ret = ext4_da_write_inline_data_begin(mapping, inode,
2647 * grab_cache_page_write_begin() can take a long time if the
2648 * system is thrashing due to memory pressure, or if the page
2649 * is being written back. So grab it first before we start
2650 * the transaction handle. This also allows us to allocate
2651 * the page (if needed) without using GFP_NOFS.
2654 page = grab_cache_page_write_begin(mapping, index, flags);
2660 * With delayed allocation, we don't log the i_disksize update
2661 * if there is delayed block allocation. But we still need
2662 * to journalling the i_disksize update if writes to the end
2663 * of file which has an already mapped buffer.
2666 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1);
2667 if (IS_ERR(handle)) {
2668 page_cache_release(page);
2669 return PTR_ERR(handle);
2673 if (page->mapping != mapping) {
2674 /* The page got truncated from under us */
2676 page_cache_release(page);
2677 ext4_journal_stop(handle);
2680 /* In case writeback began while the page was unlocked */
2681 wait_on_page_writeback(page);
2683 ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
2686 ext4_journal_stop(handle);
2688 * block_write_begin may have instantiated a few blocks
2689 * outside i_size. Trim these off again. Don't need
2690 * i_size_read because we hold i_mutex.
2692 if (pos + len > inode->i_size)
2693 ext4_truncate_failed_write(inode);
2695 if (ret == -ENOSPC &&
2696 ext4_should_retry_alloc(inode->i_sb, &retries))
2699 page_cache_release(page);
2708 * Check if we should update i_disksize
2709 * when write to the end of file but not require block allocation
2711 static int ext4_da_should_update_i_disksize(struct page *page,
2712 unsigned long offset)
2714 struct buffer_head *bh;
2715 struct inode *inode = page->mapping->host;
2719 bh = page_buffers(page);
2720 idx = offset >> inode->i_blkbits;
2722 for (i = 0; i < idx; i++)
2723 bh = bh->b_this_page;
2725 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
2730 static int ext4_da_write_end(struct file *file,
2731 struct address_space *mapping,
2732 loff_t pos, unsigned len, unsigned copied,
2733 struct page *page, void *fsdata)
2735 struct inode *inode = mapping->host;
2737 handle_t *handle = ext4_journal_current_handle();
2739 unsigned long start, end;
2740 int write_mode = (int)(unsigned long)fsdata;
2742 if (write_mode == FALL_BACK_TO_NONDELALLOC)
2743 return ext4_write_end(file, mapping, pos,
2744 len, copied, page, fsdata);
2746 trace_ext4_da_write_end(inode, pos, len, copied);
2747 start = pos & (PAGE_CACHE_SIZE - 1);
2748 end = start + copied - 1;
2751 * generic_write_end() will run mark_inode_dirty() if i_size
2752 * changes. So let's piggyback the i_disksize mark_inode_dirty
2755 new_i_size = pos + copied;
2756 if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
2757 if (ext4_has_inline_data(inode) ||
2758 ext4_da_should_update_i_disksize(page, end)) {
2759 down_write(&EXT4_I(inode)->i_data_sem);
2760 if (new_i_size > EXT4_I(inode)->i_disksize)
2761 EXT4_I(inode)->i_disksize = new_i_size;
2762 up_write(&EXT4_I(inode)->i_data_sem);
2763 /* We need to mark inode dirty even if
2764 * new_i_size is less that inode->i_size
2765 * bu greater than i_disksize.(hint delalloc)
2767 ext4_mark_inode_dirty(handle, inode);
2771 if (write_mode != CONVERT_INLINE_DATA &&
2772 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
2773 ext4_has_inline_data(inode))
2774 ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied,
2777 ret2 = generic_write_end(file, mapping, pos, len, copied,
2783 ret2 = ext4_journal_stop(handle);
2787 return ret ? ret : copied;
2790 static void ext4_da_invalidatepage(struct page *page, unsigned int offset,
2791 unsigned int length)
2794 * Drop reserved blocks
2796 BUG_ON(!PageLocked(page));
2797 if (!page_has_buffers(page))
2800 ext4_da_page_release_reservation(page, offset, length);
2803 ext4_invalidatepage(page, offset, length);
2809 * Force all delayed allocation blocks to be allocated for a given inode.
2811 int ext4_alloc_da_blocks(struct inode *inode)
2813 trace_ext4_alloc_da_blocks(inode);
2815 if (!EXT4_I(inode)->i_reserved_data_blocks &&
2816 !EXT4_I(inode)->i_reserved_meta_blocks)
2820 * We do something simple for now. The filemap_flush() will
2821 * also start triggering a write of the data blocks, which is
2822 * not strictly speaking necessary (and for users of
2823 * laptop_mode, not even desirable). However, to do otherwise
2824 * would require replicating code paths in:
2826 * ext4_writepages() ->
2827 * write_cache_pages() ---> (via passed in callback function)
2828 * __mpage_da_writepage() -->
2829 * mpage_add_bh_to_extent()
2830 * mpage_da_map_blocks()
2832 * The problem is that write_cache_pages(), located in
2833 * mm/page-writeback.c, marks pages clean in preparation for
2834 * doing I/O, which is not desirable if we're not planning on
2837 * We could call write_cache_pages(), and then redirty all of
2838 * the pages by calling redirty_page_for_writepage() but that
2839 * would be ugly in the extreme. So instead we would need to
2840 * replicate parts of the code in the above functions,
2841 * simplifying them because we wouldn't actually intend to
2842 * write out the pages, but rather only collect contiguous
2843 * logical block extents, call the multi-block allocator, and
2844 * then update the buffer heads with the block allocations.
2846 * For now, though, we'll cheat by calling filemap_flush(),
2847 * which will map the blocks, and start the I/O, but not
2848 * actually wait for the I/O to complete.
2850 return filemap_flush(inode->i_mapping);
2854 * bmap() is special. It gets used by applications such as lilo and by
2855 * the swapper to find the on-disk block of a specific piece of data.
2857 * Naturally, this is dangerous if the block concerned is still in the
2858 * journal. If somebody makes a swapfile on an ext4 data-journaling
2859 * filesystem and enables swap, then they may get a nasty shock when the
2860 * data getting swapped to that swapfile suddenly gets overwritten by
2861 * the original zero's written out previously to the journal and
2862 * awaiting writeback in the kernel's buffer cache.
2864 * So, if we see any bmap calls here on a modified, data-journaled file,
2865 * take extra steps to flush any blocks which might be in the cache.
2867 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
2869 struct inode *inode = mapping->host;
2874 * We can get here for an inline file via the FIBMAP ioctl
2876 if (ext4_has_inline_data(inode))
2879 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
2880 test_opt(inode->i_sb, DELALLOC)) {
2882 * With delalloc we want to sync the file
2883 * so that we can make sure we allocate
2886 filemap_write_and_wait(mapping);
2889 if (EXT4_JOURNAL(inode) &&
2890 ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
2892 * This is a REALLY heavyweight approach, but the use of
2893 * bmap on dirty files is expected to be extremely rare:
2894 * only if we run lilo or swapon on a freshly made file
2895 * do we expect this to happen.
2897 * (bmap requires CAP_SYS_RAWIO so this does not
2898 * represent an unprivileged user DOS attack --- we'd be
2899 * in trouble if mortal users could trigger this path at
2902 * NB. EXT4_STATE_JDATA is not set on files other than
2903 * regular files. If somebody wants to bmap a directory
2904 * or symlink and gets confused because the buffer
2905 * hasn't yet been flushed to disk, they deserve
2906 * everything they get.
2909 ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
2910 journal = EXT4_JOURNAL(inode);
2911 jbd2_journal_lock_updates(journal);
2912 err = jbd2_journal_flush(journal);
2913 jbd2_journal_unlock_updates(journal);
2919 return generic_block_bmap(mapping, block, ext4_get_block);
2922 static int ext4_readpage(struct file *file, struct page *page)
2925 struct inode *inode = page->mapping->host;
2927 trace_ext4_readpage(page);
2929 if (ext4_has_inline_data(inode))
2930 ret = ext4_readpage_inline(inode, page);
2933 return mpage_readpage(page, ext4_get_block);
2939 ext4_readpages(struct file *file, struct address_space *mapping,
2940 struct list_head *pages, unsigned nr_pages)
2942 struct inode *inode = mapping->host;
2944 /* If the file has inline data, no need to do readpages. */
2945 if (ext4_has_inline_data(inode))
2948 return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
2951 static void ext4_invalidatepage(struct page *page, unsigned int offset,
2952 unsigned int length)
2954 trace_ext4_invalidatepage(page, offset, length);
2956 /* No journalling happens on data buffers when this function is used */
2957 WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
2959 block_invalidatepage(page, offset, length);
2962 static int __ext4_journalled_invalidatepage(struct page *page,
2963 unsigned int offset,
2964 unsigned int length)
2966 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2968 trace_ext4_journalled_invalidatepage(page, offset, length);
2971 * If it's a full truncate we just forget about the pending dirtying
2973 if (offset == 0 && length == PAGE_CACHE_SIZE)
2974 ClearPageChecked(page);
2976 return jbd2_journal_invalidatepage(journal, page, offset, length);
2979 /* Wrapper for aops... */
2980 static void ext4_journalled_invalidatepage(struct page *page,
2981 unsigned int offset,
2982 unsigned int length)
2984 WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0);
2987 static int ext4_releasepage(struct page *page, gfp_t wait)
2989 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2991 trace_ext4_releasepage(page);
2993 /* Page has dirty journalled data -> cannot release */
2994 if (PageChecked(page))
2997 return jbd2_journal_try_to_free_buffers(journal, page, wait);
2999 return try_to_free_buffers(page);
3003 * ext4_get_block used when preparing for a DIO write or buffer write.
3004 * We allocate an uinitialized extent if blocks haven't been allocated.
3005 * The extent will be converted to initialized after the IO is complete.
3007 int ext4_get_block_write(struct inode *inode, sector_t iblock,
3008 struct buffer_head *bh_result, int create)
3010 ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
3011 inode->i_ino, create);
3012 return _ext4_get_block(inode, iblock, bh_result,
3013 EXT4_GET_BLOCKS_IO_CREATE_EXT);
3016 static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
3017 struct buffer_head *bh_result, int create)
3019 ext4_debug("ext4_get_block_write_nolock: inode %lu, create flag %d\n",
3020 inode->i_ino, create);
3021 return _ext4_get_block(inode, iblock, bh_result,
3022 EXT4_GET_BLOCKS_NO_LOCK);
3025 static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3026 ssize_t size, void *private, int ret,
3029 struct inode *inode = file_inode(iocb->ki_filp);
3030 ext4_io_end_t *io_end = iocb->private;
3032 /* if not async direct IO just return */
3034 inode_dio_done(inode);
3036 aio_complete(iocb, ret, 0);
3040 ext_debug("ext4_end_io_dio(): io_end 0x%p "
3041 "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
3042 iocb->private, io_end->inode->i_ino, iocb, offset,
3045 iocb->private = NULL;
3046 io_end->offset = offset;
3047 io_end->size = size;
3049 io_end->iocb = iocb;
3050 io_end->result = ret;
3052 ext4_put_io_end_defer(io_end);
3056 * For ext4 extent files, ext4 will do direct-io write to holes,
3057 * preallocated extents, and those write extend the file, no need to
3058 * fall back to buffered IO.
3060 * For holes, we fallocate those blocks, mark them as uninitialized
3061 * If those blocks were preallocated, we mark sure they are split, but
3062 * still keep the range to write as uninitialized.
3064 * The unwritten extents will be converted to written when DIO is completed.
3065 * For async direct IO, since the IO may still pending when return, we
3066 * set up an end_io call back function, which will do the conversion
3067 * when async direct IO completed.
3069 * If the O_DIRECT write will extend the file then add this inode to the
3070 * orphan list. So recovery will truncate it back to the original size
3071 * if the machine crashes during the write.
3074 static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3075 const struct iovec *iov, loff_t offset,
3076 unsigned long nr_segs)
3078 struct file *file = iocb->ki_filp;
3079 struct inode *inode = file->f_mapping->host;
3081 size_t count = iov_length(iov, nr_segs);
3083 get_block_t *get_block_func = NULL;
3085 loff_t final_size = offset + count;
3086 ext4_io_end_t *io_end = NULL;
3088 /* Use the old path for reads and writes beyond i_size. */
3089 if (rw != WRITE || final_size > inode->i_size)
3090 return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3092 BUG_ON(iocb->private == NULL);
3095 * Make all waiters for direct IO properly wait also for extent
3096 * conversion. This also disallows race between truncate() and
3097 * overwrite DIO as i_dio_count needs to be incremented under i_mutex.
3100 atomic_inc(&inode->i_dio_count);
3102 /* If we do a overwrite dio, i_mutex locking can be released */
3103 overwrite = *((int *)iocb->private);
3106 down_read(&EXT4_I(inode)->i_data_sem);
3107 mutex_unlock(&inode->i_mutex);
3111 * We could direct write to holes and fallocate.
3113 * Allocated blocks to fill the hole are marked as
3114 * uninitialized to prevent parallel buffered read to expose
3115 * the stale data before DIO complete the data IO.
3117 * As to previously fallocated extents, ext4 get_block will
3118 * just simply mark the buffer mapped but still keep the
3119 * extents uninitialized.
3121 * For non AIO case, we will convert those unwritten extents
3122 * to written after return back from blockdev_direct_IO.
3124 * For async DIO, the conversion needs to be deferred when the
3125 * IO is completed. The ext4 end_io callback function will be
3126 * called to take care of the conversion work. Here for async
3127 * case, we allocate an io_end structure to hook to the iocb.
3129 iocb->private = NULL;
3130 ext4_inode_aio_set(inode, NULL);
3131 if (!is_sync_kiocb(iocb)) {
3132 io_end = ext4_init_io_end(inode, GFP_NOFS);
3137 io_end->flag |= EXT4_IO_END_DIRECT;
3139 * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
3141 iocb->private = ext4_get_io_end(io_end);
3143 * we save the io structure for current async direct
3144 * IO, so that later ext4_map_blocks() could flag the
3145 * io structure whether there is a unwritten extents
3146 * needs to be converted when IO is completed.
3148 ext4_inode_aio_set(inode, io_end);
3152 get_block_func = ext4_get_block_write_nolock;
3154 get_block_func = ext4_get_block_write;
3155 dio_flags = DIO_LOCKING;
3157 ret = __blockdev_direct_IO(rw, iocb, inode,
3158 inode->i_sb->s_bdev, iov,
3166 * Put our reference to io_end. This can free the io_end structure e.g.
3167 * in sync IO case or in case of error. It can even perform extent
3168 * conversion if all bios we submitted finished before we got here.
3169 * Note that in that case iocb->private can be already set to NULL
3173 ext4_inode_aio_set(inode, NULL);
3174 ext4_put_io_end(io_end);
3176 * When no IO was submitted ext4_end_io_dio() was not
3177 * called so we have to put iocb's reference.
3179 if (ret <= 0 && ret != -EIOCBQUEUED && iocb->private) {
3180 WARN_ON(iocb->private != io_end);
3181 WARN_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
3182 WARN_ON(io_end->iocb);
3184 * Generic code already did inode_dio_done() so we
3185 * have to clear EXT4_IO_END_DIRECT to not do it for
3189 ext4_put_io_end(io_end);
3190 iocb->private = NULL;
3193 if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
3194 EXT4_STATE_DIO_UNWRITTEN)) {
3197 * for non AIO case, since the IO is already
3198 * completed, we could do the conversion right here
3200 err = ext4_convert_unwritten_extents(NULL, inode,
3204 ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3209 inode_dio_done(inode);
3210 /* take i_mutex locking again if we do a ovewrite dio */
3212 up_read(&EXT4_I(inode)->i_data_sem);
3213 mutex_lock(&inode->i_mutex);
3219 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
3220 const struct iovec *iov, loff_t offset,
3221 unsigned long nr_segs)
3223 struct file *file = iocb->ki_filp;
3224 struct inode *inode = file->f_mapping->host;
3228 * If we are doing data journalling we don't support O_DIRECT
3230 if (ext4_should_journal_data(inode))
3233 /* Let buffer I/O handle the inline data case. */
3234 if (ext4_has_inline_data(inode))
3237 trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
3238 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3239 ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
3241 ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3242 trace_ext4_direct_IO_exit(inode, offset,
3243 iov_length(iov, nr_segs), rw, ret);
3248 * Pages can be marked dirty completely asynchronously from ext4's journalling
3249 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
3250 * much here because ->set_page_dirty is called under VFS locks. The page is
3251 * not necessarily locked.
3253 * We cannot just dirty the page and leave attached buffers clean, because the
3254 * buffers' dirty state is "definitive". We cannot just set the buffers dirty
3255 * or jbddirty because all the journalling code will explode.
3257 * So what we do is to mark the page "pending dirty" and next time writepage
3258 * is called, propagate that into the buffers appropriately.
3260 static int ext4_journalled_set_page_dirty(struct page *page)
3262 SetPageChecked(page);
3263 return __set_page_dirty_nobuffers(page);
3266 static const struct address_space_operations ext4_aops = {
3267 .readpage = ext4_readpage,
3268 .readpages = ext4_readpages,
3269 .writepage = ext4_writepage,
3270 .writepages = ext4_writepages,
3271 .write_begin = ext4_write_begin,
3272 .write_end = ext4_write_end,
3274 .invalidatepage = ext4_invalidatepage,
3275 .releasepage = ext4_releasepage,
3276 .direct_IO = ext4_direct_IO,
3277 .migratepage = buffer_migrate_page,
3278 .is_partially_uptodate = block_is_partially_uptodate,
3279 .error_remove_page = generic_error_remove_page,
3282 static const struct address_space_operations ext4_journalled_aops = {
3283 .readpage = ext4_readpage,
3284 .readpages = ext4_readpages,
3285 .writepage = ext4_writepage,
3286 .writepages = ext4_writepages,
3287 .write_begin = ext4_write_begin,
3288 .write_end = ext4_journalled_write_end,
3289 .set_page_dirty = ext4_journalled_set_page_dirty,
3291 .invalidatepage = ext4_journalled_invalidatepage,
3292 .releasepage = ext4_releasepage,
3293 .direct_IO = ext4_direct_IO,
3294 .is_partially_uptodate = block_is_partially_uptodate,
3295 .error_remove_page = generic_error_remove_page,
3298 static const struct address_space_operations ext4_da_aops = {
3299 .readpage = ext4_readpage,
3300 .readpages = ext4_readpages,
3301 .writepage = ext4_writepage,
3302 .writepages = ext4_writepages,
3303 .write_begin = ext4_da_write_begin,
3304 .write_end = ext4_da_write_end,
3306 .invalidatepage = ext4_da_invalidatepage,
3307 .releasepage = ext4_releasepage,
3308 .direct_IO = ext4_direct_IO,
3309 .migratepage = buffer_migrate_page,
3310 .is_partially_uptodate = block_is_partially_uptodate,
3311 .error_remove_page = generic_error_remove_page,
3314 void ext4_set_aops(struct inode *inode)
3316 switch (ext4_inode_journal_mode(inode)) {
3317 case EXT4_INODE_ORDERED_DATA_MODE:
3318 ext4_set_inode_state(inode, EXT4_STATE_ORDERED_MODE);
3320 case EXT4_INODE_WRITEBACK_DATA_MODE:
3321 ext4_clear_inode_state(inode, EXT4_STATE_ORDERED_MODE);
3323 case EXT4_INODE_JOURNAL_DATA_MODE:
3324 inode->i_mapping->a_ops = &ext4_journalled_aops;
3329 if (test_opt(inode->i_sb, DELALLOC))
3330 inode->i_mapping->a_ops = &ext4_da_aops;
3332 inode->i_mapping->a_ops = &ext4_aops;
3336 * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
3337 * up to the end of the block which corresponds to `from'.
3338 * This required during truncate. We need to physically zero the tail end
3339 * of that block so it doesn't yield old data if the file is later grown.
3341 int ext4_block_truncate_page(handle_t *handle,
3342 struct address_space *mapping, loff_t from)
3344 unsigned offset = from & (PAGE_CACHE_SIZE-1);
3347 struct inode *inode = mapping->host;
3349 blocksize = inode->i_sb->s_blocksize;
3350 length = blocksize - (offset & (blocksize - 1));
3352 return ext4_block_zero_page_range(handle, mapping, from, length);
3356 * ext4_block_zero_page_range() zeros out a mapping of length 'length'
3357 * starting from file offset 'from'. The range to be zero'd must
3358 * be contained with in one block. If the specified range exceeds
3359 * the end of the block it will be shortened to end of the block
3360 * that cooresponds to 'from'
3362 int ext4_block_zero_page_range(handle_t *handle,
3363 struct address_space *mapping, loff_t from, loff_t length)
3365 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
3366 unsigned offset = from & (PAGE_CACHE_SIZE-1);
3367 unsigned blocksize, max, pos;
3369 struct inode *inode = mapping->host;
3370 struct buffer_head *bh;
3374 page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
3375 mapping_gfp_mask(mapping) & ~__GFP_FS);
3379 blocksize = inode->i_sb->s_blocksize;
3380 max = blocksize - (offset & (blocksize - 1));
3383 * correct length if it does not fall between
3384 * 'from' and the end of the block
3386 if (length > max || length < 0)
3389 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
3391 if (!page_has_buffers(page))
3392 create_empty_buffers(page, blocksize, 0);
3394 /* Find the buffer that contains "offset" */
3395 bh = page_buffers(page);
3397 while (offset >= pos) {
3398 bh = bh->b_this_page;
3402 if (buffer_freed(bh)) {
3403 BUFFER_TRACE(bh, "freed: skip");
3406 if (!buffer_mapped(bh)) {
3407 BUFFER_TRACE(bh, "unmapped");
3408 ext4_get_block(inode, iblock, bh, 0);
3409 /* unmapped? It's a hole - nothing to do */
3410 if (!buffer_mapped(bh)) {
3411 BUFFER_TRACE(bh, "still unmapped");
3416 /* Ok, it's mapped. Make sure it's up-to-date */
3417 if (PageUptodate(page))
3418 set_buffer_uptodate(bh);
3420 if (!buffer_uptodate(bh)) {
3422 ll_rw_block(READ, 1, &bh);
3424 /* Uhhuh. Read error. Complain and punt. */
3425 if (!buffer_uptodate(bh))
3428 if (ext4_should_journal_data(inode)) {
3429 BUFFER_TRACE(bh, "get write access");
3430 err = ext4_journal_get_write_access(handle, bh);
3434 zero_user(page, offset, length);
3435 BUFFER_TRACE(bh, "zeroed end of block");
3437 if (ext4_should_journal_data(inode)) {
3438 err = ext4_handle_dirty_metadata(handle, inode, bh);
3441 mark_buffer_dirty(bh);
3442 if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE))
3443 err = ext4_jbd2_file_inode(handle, inode);
3448 page_cache_release(page);
3452 int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
3453 loff_t lstart, loff_t length)
3455 struct super_block *sb = inode->i_sb;
3456 struct address_space *mapping = inode->i_mapping;
3457 unsigned partial_start, partial_end;
3458 ext4_fsblk_t start, end;
3459 loff_t byte_end = (lstart + length - 1);
3462 partial_start = lstart & (sb->s_blocksize - 1);
3463 partial_end = byte_end & (sb->s_blocksize - 1);
3465 start = lstart >> sb->s_blocksize_bits;
3466 end = byte_end >> sb->s_blocksize_bits;
3468 /* Handle partial zero within the single block */
3470 (partial_start || (partial_end != sb->s_blocksize - 1))) {
3471 err = ext4_block_zero_page_range(handle, mapping,
3475 /* Handle partial zero out on the start of the range */
3476 if (partial_start) {
3477 err = ext4_block_zero_page_range(handle, mapping,
3478 lstart, sb->s_blocksize);
3482 /* Handle partial zero out on the end of the range */
3483 if (partial_end != sb->s_blocksize - 1)
3484 err = ext4_block_zero_page_range(handle, mapping,
3485 byte_end - partial_end,
3490 int ext4_can_truncate(struct inode *inode)
3492 if (S_ISREG(inode->i_mode))
3494 if (S_ISDIR(inode->i_mode))
3496 if (S_ISLNK(inode->i_mode))
3497 return !ext4_inode_is_fast_symlink(inode);
3502 * ext4_punch_hole: punches a hole in a file by releaseing the blocks
3503 * associated with the given offset and length
3505 * @inode: File inode
3506 * @offset: The offset where the hole will begin
3507 * @len: The length of the hole
3509 * Returns: 0 on success or negative on failure
3512 int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
3514 struct super_block *sb = inode->i_sb;
3515 ext4_lblk_t first_block, stop_block;
3516 struct address_space *mapping = inode->i_mapping;
3517 loff_t first_block_offset, last_block_offset;
3519 unsigned int credits;
3522 if (!S_ISREG(inode->i_mode))
3525 if (EXT4_SB(sb)->s_cluster_ratio > 1) {
3526 /* TODO: Add support for bigalloc file systems */
3530 trace_ext4_punch_hole(inode, offset, length);
3533 * Write out all dirty pages to avoid race conditions
3534 * Then release them.
3536 if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
3537 ret = filemap_write_and_wait_range(mapping, offset,
3538 offset + length - 1);
3543 mutex_lock(&inode->i_mutex);
3544 /* It's not possible punch hole on append only file */
3545 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
3549 if (IS_SWAPFILE(inode)) {
3554 /* No need to punch hole beyond i_size */
3555 if (offset >= inode->i_size)
3559 * If the hole extends beyond i_size, set the hole
3560 * to end after the page that contains i_size
3562 if (offset + length > inode->i_size) {
3563 length = inode->i_size +
3564 PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) -
3568 if (offset & (sb->s_blocksize - 1) ||
3569 (offset + length) & (sb->s_blocksize - 1)) {
3571 * Attach jinode to inode for jbd2 if we do any zeroing of
3574 ret = ext4_inode_attach_jinode(inode);
3580 first_block_offset = round_up(offset, sb->s_blocksize);
3581 last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
3583 /* Now release the pages and zero block aligned part of pages*/
3584 if (last_block_offset > first_block_offset)
3585 truncate_pagecache_range(inode, first_block_offset,
3588 /* Wait all existing dio workers, newcomers will block on i_mutex */
3589 ext4_inode_block_unlocked_dio(inode);
3590 inode_dio_wait(inode);
3592 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3593 credits = ext4_writepage_trans_blocks(inode);
3595 credits = ext4_blocks_for_truncate(inode);
3596 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
3597 if (IS_ERR(handle)) {
3598 ret = PTR_ERR(handle);
3599 ext4_std_error(sb, ret);
3603 ret = ext4_zero_partial_blocks(handle, inode, offset,
3608 first_block = (offset + sb->s_blocksize - 1) >>
3609 EXT4_BLOCK_SIZE_BITS(sb);
3610 stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
3612 /* If there are no blocks to remove, return now */
3613 if (first_block >= stop_block)
3616 down_write(&EXT4_I(inode)->i_data_sem);
3617 ext4_discard_preallocations(inode);
3619 ret = ext4_es_remove_extent(inode, first_block,
3620 stop_block - first_block);
3622 up_write(&EXT4_I(inode)->i_data_sem);
3626 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3627 ret = ext4_ext_remove_space(inode, first_block,
3630 ret = ext4_free_hole_blocks(handle, inode, first_block,
3633 ext4_discard_preallocations(inode);
3634 up_write(&EXT4_I(inode)->i_data_sem);
3636 ext4_handle_sync(handle);
3637 inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
3638 ext4_mark_inode_dirty(handle, inode);
3640 ext4_journal_stop(handle);
3642 ext4_inode_resume_unlocked_dio(inode);
3644 mutex_unlock(&inode->i_mutex);
3648 int ext4_inode_attach_jinode(struct inode *inode)
3650 struct ext4_inode_info *ei = EXT4_I(inode);
3651 struct jbd2_inode *jinode;
3653 if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
3656 jinode = jbd2_alloc_inode(GFP_KERNEL);
3657 spin_lock(&inode->i_lock);
3660 spin_unlock(&inode->i_lock);
3663 ei->jinode = jinode;
3664 jbd2_journal_init_jbd_inode(ei->jinode, inode);
3667 spin_unlock(&inode->i_lock);
3668 if (unlikely(jinode != NULL))
3669 jbd2_free_inode(jinode);
3676 * We block out ext4_get_block() block instantiations across the entire
3677 * transaction, and VFS/VM ensures that ext4_truncate() cannot run
3678 * simultaneously on behalf of the same inode.
3680 * As we work through the truncate and commit bits of it to the journal there
3681 * is one core, guiding principle: the file's tree must always be consistent on
3682 * disk. We must be able to restart the truncate after a crash.
3684 * The file's tree may be transiently inconsistent in memory (although it
3685 * probably isn't), but whenever we close off and commit a journal transaction,
3686 * the contents of (the filesystem + the journal) must be consistent and
3687 * restartable. It's pretty simple, really: bottom up, right to left (although
3688 * left-to-right works OK too).
3690 * Note that at recovery time, journal replay occurs *before* the restart of
3691 * truncate against the orphan inode list.
3693 * The committed inode has the new, desired i_size (which is the same as
3694 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
3695 * that this inode's truncate did not complete and it will again call
3696 * ext4_truncate() to have another go. So there will be instantiated blocks
3697 * to the right of the truncation point in a crashed ext4 filesystem. But
3698 * that's fine - as long as they are linked from the inode, the post-crash
3699 * ext4_truncate() run will find them and release them.
3701 void ext4_truncate(struct inode *inode)
3703 struct ext4_inode_info *ei = EXT4_I(inode);
3704 unsigned int credits;
3706 struct address_space *mapping = inode->i_mapping;
3709 * There is a possibility that we're either freeing the inode
3710 * or it completely new indode. In those cases we might not
3711 * have i_mutex locked because it's not necessary.
3713 if (!(inode->i_state & (I_NEW|I_FREEING)))
3714 WARN_ON(!mutex_is_locked(&inode->i_mutex));
3715 trace_ext4_truncate_enter(inode);
3717 if (!ext4_can_truncate(inode))
3720 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3722 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
3723 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
3725 if (ext4_has_inline_data(inode)) {
3728 ext4_inline_data_truncate(inode, &has_inline);
3733 /* If we zero-out tail of the page, we have to create jinode for jbd2 */
3734 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
3735 if (ext4_inode_attach_jinode(inode) < 0)
3739 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3740 credits = ext4_writepage_trans_blocks(inode);
3742 credits = ext4_blocks_for_truncate(inode);
3744 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
3745 if (IS_ERR(handle)) {
3746 ext4_std_error(inode->i_sb, PTR_ERR(handle));
3750 if (inode->i_size & (inode->i_sb->s_blocksize - 1))
3751 ext4_block_truncate_page(handle, mapping, inode->i_size);
3754 * We add the inode to the orphan list, so that if this
3755 * truncate spans multiple transactions, and we crash, we will
3756 * resume the truncate when the filesystem recovers. It also
3757 * marks the inode dirty, to catch the new size.
3759 * Implication: the file must always be in a sane, consistent
3760 * truncatable state while each transaction commits.
3762 if (ext4_orphan_add(handle, inode))
3765 down_write(&EXT4_I(inode)->i_data_sem);
3767 ext4_discard_preallocations(inode);
3769 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3770 ext4_ext_truncate(handle, inode);
3772 ext4_ind_truncate(handle, inode);
3774 up_write(&ei->i_data_sem);
3777 ext4_handle_sync(handle);
3781 * If this was a simple ftruncate() and the file will remain alive,
3782 * then we need to clear up the orphan record which we created above.
3783 * However, if this was a real unlink then we were called by
3784 * ext4_delete_inode(), and we allow that function to clean up the
3785 * orphan info for us.
3788 ext4_orphan_del(handle, inode);
3790 inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
3791 ext4_mark_inode_dirty(handle, inode);
3792 ext4_journal_stop(handle);
3794 trace_ext4_truncate_exit(inode);
3798 * ext4_get_inode_loc returns with an extra refcount against the inode's
3799 * underlying buffer_head on success. If 'in_mem' is true, we have all
3800 * data in memory that is needed to recreate the on-disk version of this
3803 static int __ext4_get_inode_loc(struct inode *inode,
3804 struct ext4_iloc *iloc, int in_mem)
3806 struct ext4_group_desc *gdp;
3807 struct buffer_head *bh;
3808 struct super_block *sb = inode->i_sb;
3810 int inodes_per_block, inode_offset;
3813 if (!ext4_valid_inum(sb, inode->i_ino))
3816 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
3817 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
3822 * Figure out the offset within the block group inode table
3824 inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
3825 inode_offset = ((inode->i_ino - 1) %
3826 EXT4_INODES_PER_GROUP(sb));
3827 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
3828 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
3830 bh = sb_getblk(sb, block);
3833 if (!buffer_uptodate(bh)) {
3837 * If the buffer has the write error flag, we have failed
3838 * to write out another inode in the same block. In this
3839 * case, we don't have to read the block because we may
3840 * read the old inode data successfully.
3842 if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
3843 set_buffer_uptodate(bh);
3845 if (buffer_uptodate(bh)) {
3846 /* someone brought it uptodate while we waited */
3852 * If we have all information of the inode in memory and this
3853 * is the only valid inode in the block, we need not read the
3857 struct buffer_head *bitmap_bh;
3860 start = inode_offset & ~(inodes_per_block - 1);
3862 /* Is the inode bitmap in cache? */
3863 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
3864 if (unlikely(!bitmap_bh))
3868 * If the inode bitmap isn't in cache then the
3869 * optimisation may end up performing two reads instead
3870 * of one, so skip it.
3872 if (!buffer_uptodate(bitmap_bh)) {
3876 for (i = start; i < start + inodes_per_block; i++) {
3877 if (i == inode_offset)
3879 if (ext4_test_bit(i, bitmap_bh->b_data))
3883 if (i == start + inodes_per_block) {
3884 /* all other inodes are free, so skip I/O */
3885 memset(bh->b_data, 0, bh->b_size);
3886 set_buffer_uptodate(bh);
3894 * If we need to do any I/O, try to pre-readahead extra
3895 * blocks from the inode table.
3897 if (EXT4_SB(sb)->s_inode_readahead_blks) {
3898 ext4_fsblk_t b, end, table;
3900 __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks;
3902 table = ext4_inode_table(sb, gdp);
3903 /* s_inode_readahead_blks is always a power of 2 */
3904 b = block & ~((ext4_fsblk_t) ra_blks - 1);
3908 num = EXT4_INODES_PER_GROUP(sb);
3909 if (ext4_has_group_desc_csum(sb))
3910 num -= ext4_itable_unused_count(sb, gdp);
3911 table += num / inodes_per_block;
3915 sb_breadahead(sb, b++);
3919 * There are other valid inodes in the buffer, this inode
3920 * has in-inode xattrs, or we don't have this inode in memory.
3921 * Read the block from disk.
3923 trace_ext4_load_inode(inode);
3925 bh->b_end_io = end_buffer_read_sync;
3926 submit_bh(READ | REQ_META | REQ_PRIO, bh);
3928 if (!buffer_uptodate(bh)) {
3929 EXT4_ERROR_INODE_BLOCK(inode, block,
3930 "unable to read itable block");
3940 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
3942 /* We have all inode data except xattrs in memory here. */
3943 return __ext4_get_inode_loc(inode, iloc,
3944 !ext4_test_inode_state(inode, EXT4_STATE_XATTR));
3947 void ext4_set_inode_flags(struct inode *inode)
3949 unsigned int flags = EXT4_I(inode)->i_flags;
3951 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
3952 if (flags & EXT4_SYNC_FL)
3953 inode->i_flags |= S_SYNC;
3954 if (flags & EXT4_APPEND_FL)
3955 inode->i_flags |= S_APPEND;
3956 if (flags & EXT4_IMMUTABLE_FL)
3957 inode->i_flags |= S_IMMUTABLE;
3958 if (flags & EXT4_NOATIME_FL)
3959 inode->i_flags |= S_NOATIME;
3960 if (flags & EXT4_DIRSYNC_FL)
3961 inode->i_flags |= S_DIRSYNC;
3964 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
3965 void ext4_get_inode_flags(struct ext4_inode_info *ei)
3967 unsigned int vfs_fl;
3968 unsigned long old_fl, new_fl;
3971 vfs_fl = ei->vfs_inode.i_flags;
3972 old_fl = ei->i_flags;
3973 new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
3974 EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
3976 if (vfs_fl & S_SYNC)
3977 new_fl |= EXT4_SYNC_FL;
3978 if (vfs_fl & S_APPEND)
3979 new_fl |= EXT4_APPEND_FL;
3980 if (vfs_fl & S_IMMUTABLE)
3981 new_fl |= EXT4_IMMUTABLE_FL;
3982 if (vfs_fl & S_NOATIME)
3983 new_fl |= EXT4_NOATIME_FL;
3984 if (vfs_fl & S_DIRSYNC)
3985 new_fl |= EXT4_DIRSYNC_FL;
3986 } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
3989 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
3990 struct ext4_inode_info *ei)
3993 struct inode *inode = &(ei->vfs_inode);
3994 struct super_block *sb = inode->i_sb;
3996 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3997 EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
3998 /* we are using combined 48 bit field */
3999 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
4000 le32_to_cpu(raw_inode->i_blocks_lo);
4001 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
4002 /* i_blocks represent file system block size */
4003 return i_blocks << (inode->i_blkbits - 9);
4008 return le32_to_cpu(raw_inode->i_blocks_lo);
4012 static inline void ext4_iget_extra_inode(struct inode *inode,
4013 struct ext4_inode *raw_inode,
4014 struct ext4_inode_info *ei)
4016 __le32 *magic = (void *)raw_inode +
4017 EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
4018 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
4019 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
4020 ext4_find_inline_data_nolock(inode);
4022 EXT4_I(inode)->i_inline_off = 0;
4025 struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4027 struct ext4_iloc iloc;
4028 struct ext4_inode *raw_inode;
4029 struct ext4_inode_info *ei;
4030 struct inode *inode;
4031 journal_t *journal = EXT4_SB(sb)->s_journal;
4037 inode = iget_locked(sb, ino);
4039 return ERR_PTR(-ENOMEM);
4040 if (!(inode->i_state & I_NEW))
4046 ret = __ext4_get_inode_loc(inode, &iloc, 0);
4049 raw_inode = ext4_raw_inode(&iloc);
4051 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4052 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4053 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4054 EXT4_INODE_SIZE(inode->i_sb)) {
4055 EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)",
4056 EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize,
4057 EXT4_INODE_SIZE(inode->i_sb));
4062 ei->i_extra_isize = 0;
4064 /* Precompute checksum seed for inode metadata */
4065 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
4066 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
4067 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4069 __le32 inum = cpu_to_le32(inode->i_ino);
4070 __le32 gen = raw_inode->i_generation;
4071 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
4073 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
4077 if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
4078 EXT4_ERROR_INODE(inode, "checksum invalid");
4083 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4084 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4085 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
4086 if (!(test_opt(inode->i_sb, NO_UID32))) {
4087 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
4088 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
4090 i_uid_write(inode, i_uid);
4091 i_gid_write(inode, i_gid);
4092 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
4094 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
4095 ei->i_inline_off = 0;
4096 ei->i_dir_start_lookup = 0;
4097 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4098 /* We now have enough fields to check if the inode was active or not.
4099 * This is needed because nfsd might try to access dead inodes
4100 * the test is that same one that e2fsck uses
4101 * NeilBrown 1999oct15
4103 if (inode->i_nlink == 0) {
4104 if ((inode->i_mode == 0 ||
4105 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
4106 ino != EXT4_BOOT_LOADER_INO) {
4107 /* this inode is deleted */
4111 /* The only unlinked inodes we let through here have
4112 * valid i_mode and are being read by the orphan
4113 * recovery code: that's fine, we're about to complete
4114 * the process of deleting those.
4115 * OR it is the EXT4_BOOT_LOADER_INO which is
4116 * not initialized on a new filesystem. */
4118 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
4119 inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
4120 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
4121 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
4123 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4124 inode->i_size = ext4_isize(raw_inode);
4125 ei->i_disksize = inode->i_size;
4127 ei->i_reserved_quota = 0;
4129 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4130 ei->i_block_group = iloc.block_group;
4131 ei->i_last_alloc_group = ~0;
4133 * NOTE! The in-memory inode i_data array is in little-endian order
4134 * even on big-endian machines: we do NOT byteswap the block numbers!
4136 for (block = 0; block < EXT4_N_BLOCKS; block++)
4137 ei->i_data[block] = raw_inode->i_block[block];
4138 INIT_LIST_HEAD(&ei->i_orphan);
4141 * Set transaction id's of transactions that have to be committed
4142 * to finish f[data]sync. We set them to currently running transaction
4143 * as we cannot be sure that the inode or some of its metadata isn't
4144 * part of the transaction - the inode could have been reclaimed and
4145 * now it is reread from disk.
4148 transaction_t *transaction;
4151 read_lock(&journal->j_state_lock);
4152 if (journal->j_running_transaction)
4153 transaction = journal->j_running_transaction;
4155 transaction = journal->j_committing_transaction;
4157 tid = transaction->t_tid;
4159 tid = journal->j_commit_sequence;
4160 read_unlock(&journal->j_state_lock);
4161 ei->i_sync_tid = tid;
4162 ei->i_datasync_tid = tid;
4165 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4166 if (ei->i_extra_isize == 0) {
4167 /* The extra space is currently unused. Use it. */
4168 ei->i_extra_isize = sizeof(struct ext4_inode) -
4169 EXT4_GOOD_OLD_INODE_SIZE;
4171 ext4_iget_extra_inode(inode, raw_inode, ei);
4175 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
4176 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
4177 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
4178 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
4180 inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
4181 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4182 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4184 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
4188 if (ei->i_file_acl &&
4189 !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
4190 EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
4194 } else if (!ext4_has_inline_data(inode)) {
4195 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
4196 if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4197 (S_ISLNK(inode->i_mode) &&
4198 !ext4_inode_is_fast_symlink(inode))))
4199 /* Validate extent which is part of inode */
4200 ret = ext4_ext_check_inode(inode);
4201 } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4202 (S_ISLNK(inode->i_mode) &&
4203 !ext4_inode_is_fast_symlink(inode))) {
4204 /* Validate block references which are part of inode */
4205 ret = ext4_ind_check_inode(inode);
4211 if (S_ISREG(inode->i_mode)) {
4212 inode->i_op = &ext4_file_inode_operations;
4213 inode->i_fop = &ext4_file_operations;
4214 ext4_set_aops(inode);
4215 } else if (S_ISDIR(inode->i_mode)) {
4216 inode->i_op = &ext4_dir_inode_operations;
4217 inode->i_fop = &ext4_dir_operations;
4218 } else if (S_ISLNK(inode->i_mode)) {
4219 if (ext4_inode_is_fast_symlink(inode)) {
4220 inode->i_op = &ext4_fast_symlink_inode_operations;
4221 nd_terminate_link(ei->i_data, inode->i_size,
4222 sizeof(ei->i_data) - 1);
4224 inode->i_op = &ext4_symlink_inode_operations;
4225 ext4_set_aops(inode);
4227 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
4228 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
4229 inode->i_op = &ext4_special_inode_operations;
4230 if (raw_inode->i_block[0])
4231 init_special_inode(inode, inode->i_mode,
4232 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
4234 init_special_inode(inode, inode->i_mode,
4235 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
4236 } else if (ino == EXT4_BOOT_LOADER_INO) {
4237 make_bad_inode(inode);
4240 EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
4244 ext4_set_inode_flags(inode);
4245 unlock_new_inode(inode);
4251 return ERR_PTR(ret);
4254 static int ext4_inode_blocks_set(handle_t *handle,
4255 struct ext4_inode *raw_inode,
4256 struct ext4_inode_info *ei)
4258 struct inode *inode = &(ei->vfs_inode);
4259 u64 i_blocks = inode->i_blocks;
4260 struct super_block *sb = inode->i_sb;
4262 if (i_blocks <= ~0U) {
4264 * i_blocks can be represented in a 32 bit variable
4265 * as multiple of 512 bytes
4267 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4268 raw_inode->i_blocks_high = 0;
4269 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4272 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
4275 if (i_blocks <= 0xffffffffffffULL) {
4277 * i_blocks can be represented in a 48 bit variable
4278 * as multiple of 512 bytes
4280 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4281 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4282 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4284 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4285 /* i_block is stored in file system block size */
4286 i_blocks = i_blocks >> (inode->i_blkbits - 9);
4287 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4288 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4294 * Post the struct inode info into an on-disk inode location in the
4295 * buffer-cache. This gobbles the caller's reference to the
4296 * buffer_head in the inode location struct.
4298 * The caller must have write access to iloc->bh.
4300 static int ext4_do_update_inode(handle_t *handle,
4301 struct inode *inode,
4302 struct ext4_iloc *iloc)
4304 struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
4305 struct ext4_inode_info *ei = EXT4_I(inode);
4306 struct buffer_head *bh = iloc->bh;
4307 int err = 0, rc, block;
4308 int need_datasync = 0;
4312 /* For fields not not tracking in the in-memory inode,
4313 * initialise them to zero for new inodes. */
4314 if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
4315 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
4317 ext4_get_inode_flags(ei);
4318 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
4319 i_uid = i_uid_read(inode);
4320 i_gid = i_gid_read(inode);
4321 if (!(test_opt(inode->i_sb, NO_UID32))) {
4322 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
4323 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
4325 * Fix up interoperability with old kernels. Otherwise, old inodes get
4326 * re-used with the upper 16 bits of the uid/gid intact
4329 raw_inode->i_uid_high =
4330 cpu_to_le16(high_16_bits(i_uid));
4331 raw_inode->i_gid_high =
4332 cpu_to_le16(high_16_bits(i_gid));
4334 raw_inode->i_uid_high = 0;
4335 raw_inode->i_gid_high = 0;
4338 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
4339 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
4340 raw_inode->i_uid_high = 0;
4341 raw_inode->i_gid_high = 0;
4343 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4345 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4346 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4347 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4348 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4350 if (ext4_inode_blocks_set(handle, raw_inode, ei))
4352 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4353 raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
4354 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
4355 cpu_to_le32(EXT4_OS_HURD))
4356 raw_inode->i_file_acl_high =
4357 cpu_to_le16(ei->i_file_acl >> 32);
4358 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4359 if (ei->i_disksize != ext4_isize(raw_inode)) {
4360 ext4_isize_set(raw_inode, ei->i_disksize);
4363 if (ei->i_disksize > 0x7fffffffULL) {
4364 struct super_block *sb = inode->i_sb;
4365 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
4366 EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
4367 EXT4_SB(sb)->s_es->s_rev_level ==
4368 cpu_to_le32(EXT4_GOOD_OLD_REV)) {
4369 /* If this is the first large file
4370 * created, add a flag to the superblock.
4372 err = ext4_journal_get_write_access(handle,
4373 EXT4_SB(sb)->s_sbh);
4376 ext4_update_dynamic_rev(sb);
4377 EXT4_SET_RO_COMPAT_FEATURE(sb,
4378 EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
4379 ext4_handle_sync(handle);
4380 err = ext4_handle_dirty_super(handle, sb);
4383 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4384 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4385 if (old_valid_dev(inode->i_rdev)) {
4386 raw_inode->i_block[0] =
4387 cpu_to_le32(old_encode_dev(inode->i_rdev));
4388 raw_inode->i_block[1] = 0;
4390 raw_inode->i_block[0] = 0;
4391 raw_inode->i_block[1] =
4392 cpu_to_le32(new_encode_dev(inode->i_rdev));
4393 raw_inode->i_block[2] = 0;
4395 } else if (!ext4_has_inline_data(inode)) {
4396 for (block = 0; block < EXT4_N_BLOCKS; block++)
4397 raw_inode->i_block[block] = ei->i_data[block];
4400 raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
4401 if (ei->i_extra_isize) {
4402 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4403 raw_inode->i_version_hi =
4404 cpu_to_le32(inode->i_version >> 32);
4405 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
4408 ext4_inode_csum_set(inode, raw_inode, ei);
4410 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4411 rc = ext4_handle_dirty_metadata(handle, NULL, bh);
4414 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
4416 ext4_update_inode_fsync_trans(handle, inode, need_datasync);
4419 ext4_std_error(inode->i_sb, err);
4424 * ext4_write_inode()
4426 * We are called from a few places:
4428 * - Within generic_file_write() for O_SYNC files.
4429 * Here, there will be no transaction running. We wait for any running
4430 * transaction to commit.
4432 * - Within sys_sync(), kupdate and such.
4433 * We wait on commit, if tol to.
4435 * - Within prune_icache() (PF_MEMALLOC == true)
4436 * Here we simply return. We can't afford to block kswapd on the
4439 * In all cases it is actually safe for us to return without doing anything,
4440 * because the inode has been copied into a raw inode buffer in
4441 * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for
4444 * Note that we are absolutely dependent upon all inode dirtiers doing the
4445 * right thing: they *must* call mark_inode_dirty() after dirtying info in
4446 * which we are interested.
4448 * It would be a bug for them to not do this. The code:
4450 * mark_inode_dirty(inode)
4452 * inode->i_size = expr;
4454 * is in error because a kswapd-driven write_inode() could occur while
4455 * `stuff()' is running, and the new i_size will be lost. Plus the inode
4456 * will no longer be on the superblock's dirty inode list.
4458 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
4462 if (current->flags & PF_MEMALLOC)
4465 if (EXT4_SB(inode->i_sb)->s_journal) {
4466 if (ext4_journal_current_handle()) {
4467 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
4472 if (wbc->sync_mode != WB_SYNC_ALL)
4475 err = ext4_force_commit(inode->i_sb);
4477 struct ext4_iloc iloc;
4479 err = __ext4_get_inode_loc(inode, &iloc, 0);
4482 if (wbc->sync_mode == WB_SYNC_ALL)
4483 sync_dirty_buffer(iloc.bh);
4484 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
4485 EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
4486 "IO error syncing inode");
4495 * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate
4496 * buffers that are attached to a page stradding i_size and are undergoing
4497 * commit. In that case we have to wait for commit to finish and try again.
4499 static void ext4_wait_for_tail_page_commit(struct inode *inode)
4503 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
4504 tid_t commit_tid = 0;
4507 offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
4509 * All buffers in the last page remain valid? Then there's nothing to
4510 * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE ==
4513 if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits))
4516 page = find_lock_page(inode->i_mapping,
4517 inode->i_size >> PAGE_CACHE_SHIFT);
4520 ret = __ext4_journalled_invalidatepage(page, offset,
4521 PAGE_CACHE_SIZE - offset);
4523 page_cache_release(page);
4527 read_lock(&journal->j_state_lock);
4528 if (journal->j_committing_transaction)
4529 commit_tid = journal->j_committing_transaction->t_tid;
4530 read_unlock(&journal->j_state_lock);
4532 jbd2_log_wait_commit(journal, commit_tid);
4539 * Called from notify_change.
4541 * We want to trap VFS attempts to truncate the file as soon as
4542 * possible. In particular, we want to make sure that when the VFS
4543 * shrinks i_size, we put the inode on the orphan list and modify
4544 * i_disksize immediately, so that during the subsequent flushing of
4545 * dirty pages and freeing of disk blocks, we can guarantee that any
4546 * commit will leave the blocks being flushed in an unused state on
4547 * disk. (On recovery, the inode will get truncated and the blocks will
4548 * be freed, so we have a strong guarantee that no future commit will
4549 * leave these blocks visible to the user.)
4551 * Another thing we have to assure is that if we are in ordered mode
4552 * and inode is still attached to the committing transaction, we must
4553 * we start writeout of all the dirty pages which are being truncated.
4554 * This way we are sure that all the data written in the previous
4555 * transaction are already on disk (truncate waits for pages under
4558 * Called with inode->i_mutex down.
4560 int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4562 struct inode *inode = dentry->d_inode;
4565 const unsigned int ia_valid = attr->ia_valid;
4567 error = inode_change_ok(inode, attr);
4571 if (is_quota_modification(inode, attr))
4572 dquot_initialize(inode);
4573 if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
4574 (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
4577 /* (user+group)*(old+new) structure, inode write (sb,
4578 * inode block, ? - but truncate inode update has it) */
4579 handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
4580 (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
4581 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
4582 if (IS_ERR(handle)) {
4583 error = PTR_ERR(handle);
4586 error = dquot_transfer(inode, attr);
4588 ext4_journal_stop(handle);
4591 /* Update corresponding info in inode so that everything is in
4592 * one transaction */
4593 if (attr->ia_valid & ATTR_UID)
4594 inode->i_uid = attr->ia_uid;
4595 if (attr->ia_valid & ATTR_GID)
4596 inode->i_gid = attr->ia_gid;
4597 error = ext4_mark_inode_dirty(handle, inode);
4598 ext4_journal_stop(handle);
4601 if (attr->ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
4603 loff_t oldsize = inode->i_size;
4605 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4606 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4608 if (attr->ia_size > sbi->s_bitmap_maxbytes)
4611 if (S_ISREG(inode->i_mode) &&
4612 (attr->ia_size < inode->i_size)) {
4613 if (ext4_should_order_data(inode)) {
4614 error = ext4_begin_ordered_truncate(inode,
4619 handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
4620 if (IS_ERR(handle)) {
4621 error = PTR_ERR(handle);
4624 if (ext4_handle_valid(handle)) {
4625 error = ext4_orphan_add(handle, inode);
4628 down_write(&EXT4_I(inode)->i_data_sem);
4629 EXT4_I(inode)->i_disksize = attr->ia_size;
4630 rc = ext4_mark_inode_dirty(handle, inode);
4634 * We have to update i_size under i_data_sem together
4635 * with i_disksize to avoid races with writeback code
4636 * running ext4_wb_update_i_disksize().
4639 i_size_write(inode, attr->ia_size);
4640 up_write(&EXT4_I(inode)->i_data_sem);
4641 ext4_journal_stop(handle);
4643 ext4_orphan_del(NULL, inode);
4647 i_size_write(inode, attr->ia_size);
4650 * Blocks are going to be removed from the inode. Wait
4651 * for dio in flight. Temporarily disable
4652 * dioread_nolock to prevent livelock.
4655 if (!ext4_should_journal_data(inode)) {
4656 ext4_inode_block_unlocked_dio(inode);
4657 inode_dio_wait(inode);
4658 ext4_inode_resume_unlocked_dio(inode);
4660 ext4_wait_for_tail_page_commit(inode);
4663 * Truncate pagecache after we've waited for commit
4664 * in data=journal mode to make pages freeable.
4666 truncate_pagecache(inode, oldsize, inode->i_size);
4669 * We want to call ext4_truncate() even if attr->ia_size ==
4670 * inode->i_size for cases like truncation of fallocated space
4672 if (attr->ia_valid & ATTR_SIZE)
4673 ext4_truncate(inode);
4676 setattr_copy(inode, attr);
4677 mark_inode_dirty(inode);
4681 * If the call to ext4_truncate failed to get a transaction handle at
4682 * all, we need to clean up the in-core orphan list manually.
4684 if (orphan && inode->i_nlink)
4685 ext4_orphan_del(NULL, inode);
4687 if (!rc && (ia_valid & ATTR_MODE))
4688 rc = ext4_acl_chmod(inode);
4691 ext4_std_error(inode->i_sb, error);
4697 int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
4700 struct inode *inode;
4701 unsigned long long delalloc_blocks;
4703 inode = dentry->d_inode;
4704 generic_fillattr(inode, stat);
4707 * We can't update i_blocks if the block allocation is delayed
4708 * otherwise in the case of system crash before the real block
4709 * allocation is done, we will have i_blocks inconsistent with
4710 * on-disk file blocks.
4711 * We always keep i_blocks updated together with real
4712 * allocation. But to not confuse with user, stat
4713 * will return the blocks that include the delayed allocation
4714 * blocks for this file.
4716 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
4717 EXT4_I(inode)->i_reserved_data_blocks);
4719 stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits-9);
4723 static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
4726 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4727 return ext4_ind_trans_blocks(inode, lblocks);
4728 return ext4_ext_index_trans_blocks(inode, pextents);
4732 * Account for index blocks, block groups bitmaps and block group
4733 * descriptor blocks if modify datablocks and index blocks
4734 * worse case, the indexs blocks spread over different block groups
4736 * If datablocks are discontiguous, they are possible to spread over
4737 * different block groups too. If they are contiguous, with flexbg,
4738 * they could still across block group boundary.
4740 * Also account for superblock, inode, quota and xattr blocks
4742 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
4745 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
4751 * How many index blocks need to touch to map @lblocks logical blocks
4752 * to @pextents physical extents?
4754 idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
4759 * Now let's see how many group bitmaps and group descriptors need
4762 groups = idxblocks + pextents;
4764 if (groups > ngroups)
4766 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
4767 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
4769 /* bitmaps and block group descriptor blocks */
4770 ret += groups + gdpblocks;
4772 /* Blocks for super block, inode, quota and xattr blocks */
4773 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
4779 * Calculate the total number of credits to reserve to fit
4780 * the modification of a single pages into a single transaction,
4781 * which may include multiple chunks of block allocations.
4783 * This could be called via ext4_write_begin()
4785 * We need to consider the worse case, when
4786 * one new block per extent.
4788 int ext4_writepage_trans_blocks(struct inode *inode)
4790 int bpp = ext4_journal_blocks_per_page(inode);
4793 ret = ext4_meta_trans_blocks(inode, bpp, bpp);
4795 /* Account for data blocks for journalled mode */
4796 if (ext4_should_journal_data(inode))
4802 * Calculate the journal credits for a chunk of data modification.
4804 * This is called from DIO, fallocate or whoever calling
4805 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
4807 * journal buffers for data blocks are not included here, as DIO
4808 * and fallocate do no need to journal data buffers.
4810 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
4812 return ext4_meta_trans_blocks(inode, nrblocks, 1);
4816 * The caller must have previously called ext4_reserve_inode_write().
4817 * Give this, we know that the caller already has write access to iloc->bh.
4819 int ext4_mark_iloc_dirty(handle_t *handle,
4820 struct inode *inode, struct ext4_iloc *iloc)
4824 if (IS_I_VERSION(inode))
4825 inode_inc_iversion(inode);
4827 /* the do_update_inode consumes one bh->b_count */
4830 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
4831 err = ext4_do_update_inode(handle, inode, iloc);
4837 * On success, We end up with an outstanding reference count against
4838 * iloc->bh. This _must_ be cleaned up later.
4842 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
4843 struct ext4_iloc *iloc)
4847 err = ext4_get_inode_loc(inode, iloc);
4849 BUFFER_TRACE(iloc->bh, "get_write_access");
4850 err = ext4_journal_get_write_access(handle, iloc->bh);
4856 ext4_std_error(inode->i_sb, err);
4861 * Expand an inode by new_extra_isize bytes.
4862 * Returns 0 on success or negative error number on failure.
4864 static int ext4_expand_extra_isize(struct inode *inode,
4865 unsigned int new_extra_isize,
4866 struct ext4_iloc iloc,
4869 struct ext4_inode *raw_inode;
4870 struct ext4_xattr_ibody_header *header;
4872 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
4875 raw_inode = ext4_raw_inode(&iloc);
4877 header = IHDR(inode, raw_inode);
4879 /* No extended attributes present */
4880 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
4881 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
4882 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
4884 EXT4_I(inode)->i_extra_isize = new_extra_isize;
4888 /* try to expand with EAs present */
4889 return ext4_expand_extra_isize_ea(inode, new_extra_isize,
4894 * What we do here is to mark the in-core inode as clean with respect to inode
4895 * dirtiness (it may still be data-dirty).
4896 * This means that the in-core inode may be reaped by prune_icache
4897 * without having to perform any I/O. This is a very good thing,
4898 * because *any* task may call prune_icache - even ones which
4899 * have a transaction open against a different journal.
4901 * Is this cheating? Not really. Sure, we haven't written the
4902 * inode out, but prune_icache isn't a user-visible syncing function.
4903 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
4904 * we start and wait on commits.
4906 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
4908 struct ext4_iloc iloc;
4909 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4910 static unsigned int mnt_count;
4914 trace_ext4_mark_inode_dirty(inode, _RET_IP_);
4915 err = ext4_reserve_inode_write(handle, inode, &iloc);
4916 if (ext4_handle_valid(handle) &&
4917 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
4918 !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
4920 * We need extra buffer credits since we may write into EA block
4921 * with this same handle. If journal_extend fails, then it will
4922 * only result in a minor loss of functionality for that inode.
4923 * If this is felt to be critical, then e2fsck should be run to
4924 * force a large enough s_min_extra_isize.
4926 if ((jbd2_journal_extend(handle,
4927 EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
4928 ret = ext4_expand_extra_isize(inode,
4929 sbi->s_want_extra_isize,
4932 ext4_set_inode_state(inode,
4933 EXT4_STATE_NO_EXPAND);
4935 le16_to_cpu(sbi->s_es->s_mnt_count)) {
4936 ext4_warning(inode->i_sb,
4937 "Unable to expand inode %lu. Delete"
4938 " some EAs or run e2fsck.",
4941 le16_to_cpu(sbi->s_es->s_mnt_count);
4947 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
4952 * ext4_dirty_inode() is called from __mark_inode_dirty()
4954 * We're really interested in the case where a file is being extended.
4955 * i_size has been changed by generic_commit_write() and we thus need
4956 * to include the updated inode in the current transaction.
4958 * Also, dquot_alloc_block() will always dirty the inode when blocks
4959 * are allocated to the file.
4961 * If the inode is marked synchronous, we don't honour that here - doing
4962 * so would cause a commit on atime updates, which we don't bother doing.
4963 * We handle synchronous inodes at the highest possible level.
4965 void ext4_dirty_inode(struct inode *inode, int flags)
4969 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
4973 ext4_mark_inode_dirty(handle, inode);
4975 ext4_journal_stop(handle);
4982 * Bind an inode's backing buffer_head into this transaction, to prevent
4983 * it from being flushed to disk early. Unlike
4984 * ext4_reserve_inode_write, this leaves behind no bh reference and
4985 * returns no iloc structure, so the caller needs to repeat the iloc
4986 * lookup to mark the inode dirty later.
4988 static int ext4_pin_inode(handle_t *handle, struct inode *inode)
4990 struct ext4_iloc iloc;
4994 err = ext4_get_inode_loc(inode, &iloc);
4996 BUFFER_TRACE(iloc.bh, "get_write_access");
4997 err = jbd2_journal_get_write_access(handle, iloc.bh);
4999 err = ext4_handle_dirty_metadata(handle,
5005 ext4_std_error(inode->i_sb, err);
5010 int ext4_change_inode_journal_flag(struct inode *inode, int val)
5017 * We have to be very careful here: changing a data block's
5018 * journaling status dynamically is dangerous. If we write a
5019 * data block to the journal, change the status and then delete
5020 * that block, we risk forgetting to revoke the old log record
5021 * from the journal and so a subsequent replay can corrupt data.
5022 * So, first we make sure that the journal is empty and that
5023 * nobody is changing anything.
5026 journal = EXT4_JOURNAL(inode);
5029 if (is_journal_aborted(journal))
5031 /* We have to allocate physical blocks for delalloc blocks
5032 * before flushing journal. otherwise delalloc blocks can not
5033 * be allocated any more. even more truncate on delalloc blocks
5034 * could trigger BUG by flushing delalloc blocks in journal.
5035 * There is no delalloc block in non-journal data mode.
5037 if (val && test_opt(inode->i_sb, DELALLOC)) {
5038 err = ext4_alloc_da_blocks(inode);
5043 /* Wait for all existing dio workers */
5044 ext4_inode_block_unlocked_dio(inode);
5045 inode_dio_wait(inode);
5047 jbd2_journal_lock_updates(journal);
5050 * OK, there are no updates running now, and all cached data is
5051 * synced to disk. We are now in a completely consistent state
5052 * which doesn't have anything in the journal, and we know that
5053 * no filesystem updates are running, so it is safe to modify
5054 * the inode's in-core data-journaling state flag now.
5058 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
5060 jbd2_journal_flush(journal);
5061 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
5063 ext4_set_aops(inode);
5065 jbd2_journal_unlock_updates(journal);
5066 ext4_inode_resume_unlocked_dio(inode);
5068 /* Finally we can mark the inode as dirty. */
5070 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
5072 return PTR_ERR(handle);
5074 err = ext4_mark_inode_dirty(handle, inode);
5075 ext4_handle_sync(handle);
5076 ext4_journal_stop(handle);
5077 ext4_std_error(inode->i_sb, err);
5082 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
5084 return !buffer_mapped(bh);
5087 int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5089 struct page *page = vmf->page;
5093 struct file *file = vma->vm_file;
5094 struct inode *inode = file_inode(file);
5095 struct address_space *mapping = inode->i_mapping;
5097 get_block_t *get_block;
5100 sb_start_pagefault(inode->i_sb);
5101 file_update_time(vma->vm_file);
5102 /* Delalloc case is easy... */
5103 if (test_opt(inode->i_sb, DELALLOC) &&
5104 !ext4_should_journal_data(inode) &&
5105 !ext4_nonda_switch(inode->i_sb)) {
5107 ret = __block_page_mkwrite(vma, vmf,
5108 ext4_da_get_block_prep);
5109 } while (ret == -ENOSPC &&
5110 ext4_should_retry_alloc(inode->i_sb, &retries));
5115 size = i_size_read(inode);
5116 /* Page got truncated from under us? */
5117 if (page->mapping != mapping || page_offset(page) > size) {
5119 ret = VM_FAULT_NOPAGE;
5123 if (page->index == size >> PAGE_CACHE_SHIFT)
5124 len = size & ~PAGE_CACHE_MASK;
5126 len = PAGE_CACHE_SIZE;
5128 * Return if we have all the buffers mapped. This avoids the need to do
5129 * journal_start/journal_stop which can block and take a long time
5131 if (page_has_buffers(page)) {
5132 if (!ext4_walk_page_buffers(NULL, page_buffers(page),
5134 ext4_bh_unmapped)) {
5135 /* Wait so that we don't change page under IO */
5136 wait_for_stable_page(page);
5137 ret = VM_FAULT_LOCKED;
5142 /* OK, we need to fill the hole... */
5143 if (ext4_should_dioread_nolock(inode))
5144 get_block = ext4_get_block_write;
5146 get_block = ext4_get_block;
5148 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
5149 ext4_writepage_trans_blocks(inode));
5150 if (IS_ERR(handle)) {
5151 ret = VM_FAULT_SIGBUS;
5154 ret = __block_page_mkwrite(vma, vmf, get_block);
5155 if (!ret && ext4_should_journal_data(inode)) {
5156 if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
5157 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
5159 ret = VM_FAULT_SIGBUS;
5160 ext4_journal_stop(handle);
5163 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
5165 ext4_journal_stop(handle);
5166 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
5169 ret = block_page_mkwrite_return(ret);
5171 sb_end_pagefault(inode->i_sb);