2 * linux/fs/jbd2/commit.c
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6 * Copyright 1998 Red Hat corp --- All Rights Reserved
8 * This file is part of the Linux kernel and is made available under
9 * the terms of the GNU General Public License, version 2, or at your
10 * option, any later version, incorporated herein by reference.
12 * Journal commit routines for the generic filesystem journaling code;
13 * part of the ext2fs journaling system.
16 #include <linux/time.h>
18 #include <linux/jbd2.h>
19 #include <linux/errno.h>
20 #include <linux/slab.h>
22 #include <linux/pagemap.h>
23 #include <linux/jiffies.h>
24 #include <linux/crc32.h>
25 #include <linux/writeback.h>
26 #include <linux/backing-dev.h>
27 #include <linux/bio.h>
28 #include <linux/blkdev.h>
29 #include <trace/events/jbd2.h>
32 * Default IO end handler for temporary BJ_IO buffer_heads.
34 static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
38 set_buffer_uptodate(bh);
40 clear_buffer_uptodate(bh);
45 * When an ext4 file is truncated, it is possible that some pages are not
46 * successfully freed, because they are attached to a committing transaction.
47 * After the transaction commits, these pages are left on the LRU, with no
48 * ->mapping, and with attached buffers. These pages are trivially reclaimable
49 * by the VM, but their apparent absence upsets the VM accounting, and it makes
50 * the numbers in /proc/meminfo look odd.
52 * So here, we have a buffer which has just come off the forget list. Look to
53 * see if we can strip all buffers from the backing page.
55 * Called under lock_journal(), and possibly under journal_datalist_lock. The
56 * caller provided us with a ref against the buffer, and we drop that here.
58 static void release_buffer_page(struct buffer_head *bh)
64 if (atomic_read(&bh->b_count) != 1)
72 /* OK, it's a truncated page */
73 if (!trylock_page(page))
78 try_to_free_buffers(page);
80 page_cache_release(page);
88 * Done it all: now submit the commit record. We should have
89 * cleaned up our previous buffers by now, so if we are in abort
90 * mode we can now just skip the rest of the journal write
93 * Returns 1 if the journal needs to be aborted or 0 on success
95 static int journal_submit_commit_record(journal_t *journal,
96 transaction_t *commit_transaction,
97 struct buffer_head **cbh,
100 struct journal_head *descriptor;
101 struct commit_header *tmp;
102 struct buffer_head *bh;
104 struct timespec now = current_kernel_time();
106 if (is_journal_aborted(journal))
109 descriptor = jbd2_journal_get_descriptor_buffer(journal);
113 bh = jh2bh(descriptor);
115 tmp = (struct commit_header *)bh->b_data;
116 tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
117 tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
118 tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
119 tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
120 tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
122 if (JBD2_HAS_COMPAT_FEATURE(journal,
123 JBD2_FEATURE_COMPAT_CHECKSUM)) {
124 tmp->h_chksum_type = JBD2_CRC32_CHKSUM;
125 tmp->h_chksum_size = JBD2_CRC32_CHKSUM_SIZE;
126 tmp->h_chksum[0] = cpu_to_be32(crc32_sum);
129 JBUFFER_TRACE(descriptor, "submit commit block");
131 clear_buffer_dirty(bh);
132 set_buffer_uptodate(bh);
133 bh->b_end_io = journal_end_buffer_io_sync;
135 if (journal->j_flags & JBD2_BARRIER &&
136 !JBD2_HAS_INCOMPAT_FEATURE(journal,
137 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
138 ret = submit_bh(WRITE_SYNC_PLUG | WRITE_BARRIER, bh);
139 if (ret == -EOPNOTSUPP) {
141 "JBD2: Disabling barriers on %s, "
142 "not supported by device\n", journal->j_devname);
143 write_lock(&journal->j_state_lock);
144 journal->j_flags &= ~JBD2_BARRIER;
145 write_unlock(&journal->j_state_lock);
147 /* And try again, without the barrier */
149 set_buffer_uptodate(bh);
150 clear_buffer_dirty(bh);
151 ret = submit_bh(WRITE_SYNC_PLUG, bh);
154 ret = submit_bh(WRITE_SYNC_PLUG, bh);
161 * This function along with journal_submit_commit_record
162 * allows to write the commit record asynchronously.
164 static int journal_wait_on_commit_record(journal_t *journal,
165 struct buffer_head *bh)
170 clear_buffer_dirty(bh);
172 if (buffer_eopnotsupp(bh) && (journal->j_flags & JBD2_BARRIER)) {
174 "JBD2: %s: disabling barries on %s - not supported "
175 "by device\n", __func__, journal->j_devname);
176 write_lock(&journal->j_state_lock);
177 journal->j_flags &= ~JBD2_BARRIER;
178 write_unlock(&journal->j_state_lock);
181 clear_buffer_dirty(bh);
182 set_buffer_uptodate(bh);
183 bh->b_end_io = journal_end_buffer_io_sync;
185 ret = submit_bh(WRITE_SYNC_PLUG, bh);
193 if (unlikely(!buffer_uptodate(bh)))
195 put_bh(bh); /* One for getblk() */
196 jbd2_journal_put_journal_head(bh2jh(bh));
202 * write the filemap data using writepage() address_space_operations.
203 * We don't do block allocation here even for delalloc. We don't
204 * use writepages() because with dealyed allocation we may be doing
205 * block allocation in writepages().
207 static int journal_submit_inode_data_buffers(struct address_space *mapping)
210 struct writeback_control wbc = {
211 .sync_mode = WB_SYNC_ALL,
212 .nr_to_write = mapping->nrpages * 2,
214 .range_end = i_size_read(mapping->host),
217 ret = generic_writepages(mapping, &wbc);
222 * Submit all the data buffers of inode associated with the transaction to
225 * We are in a committing transaction. Therefore no new inode can be added to
226 * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
227 * operate on from being released while we write out pages.
229 static int journal_submit_data_buffers(journal_t *journal,
230 transaction_t *commit_transaction)
232 struct jbd2_inode *jinode;
234 struct address_space *mapping;
236 spin_lock(&journal->j_list_lock);
237 list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
238 mapping = jinode->i_vfs_inode->i_mapping;
239 jinode->i_flags |= JI_COMMIT_RUNNING;
240 spin_unlock(&journal->j_list_lock);
242 * submit the inode data buffers. We use writepage
243 * instead of writepages. Because writepages can do
244 * block allocation with delalloc. We need to write
245 * only allocated blocks here.
247 trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
248 err = journal_submit_inode_data_buffers(mapping);
251 spin_lock(&journal->j_list_lock);
252 J_ASSERT(jinode->i_transaction == commit_transaction);
253 jinode->i_flags &= ~JI_COMMIT_RUNNING;
254 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
256 spin_unlock(&journal->j_list_lock);
261 * Wait for data submitted for writeout, refile inodes to proper
262 * transaction if needed.
265 static int journal_finish_inode_data_buffers(journal_t *journal,
266 transaction_t *commit_transaction)
268 struct jbd2_inode *jinode, *next_i;
271 /* For locking, see the comment in journal_submit_data_buffers() */
272 spin_lock(&journal->j_list_lock);
273 list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
274 jinode->i_flags |= JI_COMMIT_RUNNING;
275 spin_unlock(&journal->j_list_lock);
276 err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
279 * Because AS_EIO is cleared by
280 * filemap_fdatawait_range(), set it again so
281 * that user process can get -EIO from fsync().
284 &jinode->i_vfs_inode->i_mapping->flags);
289 spin_lock(&journal->j_list_lock);
290 jinode->i_flags &= ~JI_COMMIT_RUNNING;
291 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
294 /* Now refile inode to proper lists */
295 list_for_each_entry_safe(jinode, next_i,
296 &commit_transaction->t_inode_list, i_list) {
297 list_del(&jinode->i_list);
298 if (jinode->i_next_transaction) {
299 jinode->i_transaction = jinode->i_next_transaction;
300 jinode->i_next_transaction = NULL;
301 list_add(&jinode->i_list,
302 &jinode->i_transaction->t_inode_list);
304 jinode->i_transaction = NULL;
307 spin_unlock(&journal->j_list_lock);
312 static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
314 struct page *page = bh->b_page;
318 addr = kmap_atomic(page, KM_USER0);
319 checksum = crc32_be(crc32_sum,
320 (void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
321 kunmap_atomic(addr, KM_USER0);
326 static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
327 unsigned long long block)
329 tag->t_blocknr = cpu_to_be32(block & (u32)~0);
330 if (tag_bytes > JBD2_TAG_SIZE32)
331 tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
335 * jbd2_journal_commit_transaction
337 * The primary function for committing a transaction to the log. This
338 * function is called by the journal thread to begin a complete commit.
340 void jbd2_journal_commit_transaction(journal_t *journal)
342 struct transaction_stats_s stats;
343 transaction_t *commit_transaction;
344 struct journal_head *jh, *new_jh, *descriptor;
345 struct buffer_head **wbuf = journal->j_wbuf;
349 unsigned long long blocknr;
353 journal_header_t *header;
354 journal_block_tag_t *tag = NULL;
359 int tag_bytes = journal_tag_bytes(journal);
360 struct buffer_head *cbh = NULL; /* For transactional checksums */
361 __u32 crc32_sum = ~0;
362 int write_op = WRITE;
365 * First job: lock down the current transaction and wait for
366 * all outstanding updates to complete.
370 spin_lock(&journal->j_list_lock);
371 summarise_journal_usage(journal);
372 spin_unlock(&journal->j_list_lock);
375 /* Do we need to erase the effects of a prior jbd2_journal_flush? */
376 if (journal->j_flags & JBD2_FLUSHED) {
377 jbd_debug(3, "super block updated\n");
378 jbd2_journal_update_superblock(journal, 1);
380 jbd_debug(3, "superblock not updated\n");
383 J_ASSERT(journal->j_running_transaction != NULL);
384 J_ASSERT(journal->j_committing_transaction == NULL);
386 commit_transaction = journal->j_running_transaction;
387 J_ASSERT(commit_transaction->t_state == T_RUNNING);
389 trace_jbd2_start_commit(journal, commit_transaction);
390 jbd_debug(1, "JBD: starting commit of transaction %d\n",
391 commit_transaction->t_tid);
393 write_lock(&journal->j_state_lock);
394 commit_transaction->t_state = T_LOCKED;
397 * Use plugged writes here, since we want to submit several before
398 * we unplug the device. We don't do explicit unplugging in here,
399 * instead we rely on sync_buffer() doing the unplug for us.
401 if (commit_transaction->t_synchronous_commit)
402 write_op = WRITE_SYNC_PLUG;
403 trace_jbd2_commit_locking(journal, commit_transaction);
404 stats.run.rs_wait = commit_transaction->t_max_wait;
405 stats.run.rs_locked = jiffies;
406 stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
407 stats.run.rs_locked);
409 spin_lock(&commit_transaction->t_handle_lock);
410 while (atomic_read(&commit_transaction->t_updates)) {
413 prepare_to_wait(&journal->j_wait_updates, &wait,
414 TASK_UNINTERRUPTIBLE);
415 if (atomic_read(&commit_transaction->t_updates)) {
416 spin_unlock(&commit_transaction->t_handle_lock);
417 write_unlock(&journal->j_state_lock);
419 write_lock(&journal->j_state_lock);
420 spin_lock(&commit_transaction->t_handle_lock);
422 finish_wait(&journal->j_wait_updates, &wait);
424 spin_unlock(&commit_transaction->t_handle_lock);
426 J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
427 journal->j_max_transaction_buffers);
430 * First thing we are allowed to do is to discard any remaining
431 * BJ_Reserved buffers. Note, it is _not_ permissible to assume
432 * that there are no such buffers: if a large filesystem
433 * operation like a truncate needs to split itself over multiple
434 * transactions, then it may try to do a jbd2_journal_restart() while
435 * there are still BJ_Reserved buffers outstanding. These must
436 * be released cleanly from the current transaction.
438 * In this case, the filesystem must still reserve write access
439 * again before modifying the buffer in the new transaction, but
440 * we do not require it to remember exactly which old buffers it
441 * has reserved. This is consistent with the existing behaviour
442 * that multiple jbd2_journal_get_write_access() calls to the same
443 * buffer are perfectly permissable.
445 while (commit_transaction->t_reserved_list) {
446 jh = commit_transaction->t_reserved_list;
447 JBUFFER_TRACE(jh, "reserved, unused: refile");
449 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
450 * leave undo-committed data.
452 if (jh->b_committed_data) {
453 struct buffer_head *bh = jh2bh(jh);
455 jbd_lock_bh_state(bh);
456 jbd2_free(jh->b_committed_data, bh->b_size);
457 jh->b_committed_data = NULL;
458 jbd_unlock_bh_state(bh);
460 jbd2_journal_refile_buffer(journal, jh);
464 * Now try to drop any written-back buffers from the journal's
465 * checkpoint lists. We do this *before* commit because it potentially
468 spin_lock(&journal->j_list_lock);
469 __jbd2_journal_clean_checkpoint_list(journal);
470 spin_unlock(&journal->j_list_lock);
472 jbd_debug (3, "JBD: commit phase 1\n");
475 * Switch to a new revoke table.
477 jbd2_journal_switch_revoke_table(journal);
479 trace_jbd2_commit_flushing(journal, commit_transaction);
480 stats.run.rs_flushing = jiffies;
481 stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
482 stats.run.rs_flushing);
484 commit_transaction->t_state = T_FLUSH;
485 journal->j_committing_transaction = commit_transaction;
486 journal->j_running_transaction = NULL;
487 start_time = ktime_get();
488 commit_transaction->t_log_start = journal->j_head;
489 wake_up(&journal->j_wait_transaction_locked);
490 write_unlock(&journal->j_state_lock);
492 jbd_debug (3, "JBD: commit phase 2\n");
495 * Now start flushing things to disk, in the order they appear
496 * on the transaction lists. Data blocks go first.
498 err = journal_submit_data_buffers(journal, commit_transaction);
500 jbd2_journal_abort(journal, err);
502 jbd2_journal_write_revoke_records(journal, commit_transaction,
505 jbd_debug(3, "JBD: commit phase 2\n");
508 * Way to go: we have now written out all of the data for a
509 * transaction! Now comes the tricky part: we need to write out
510 * metadata. Loop over the transaction's entire buffer list:
512 write_lock(&journal->j_state_lock);
513 commit_transaction->t_state = T_COMMIT;
514 write_unlock(&journal->j_state_lock);
516 trace_jbd2_commit_logging(journal, commit_transaction);
517 stats.run.rs_logging = jiffies;
518 stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
519 stats.run.rs_logging);
520 stats.run.rs_blocks =
521 atomic_read(&commit_transaction->t_outstanding_credits);
522 stats.run.rs_blocks_logged = 0;
524 J_ASSERT(commit_transaction->t_nr_buffers <=
525 atomic_read(&commit_transaction->t_outstanding_credits));
530 while (commit_transaction->t_buffers) {
532 /* Find the next buffer to be journaled... */
534 jh = commit_transaction->t_buffers;
536 /* If we're in abort mode, we just un-journal the buffer and
539 if (is_journal_aborted(journal)) {
540 clear_buffer_jbddirty(jh2bh(jh));
541 JBUFFER_TRACE(jh, "journal is aborting: refile");
542 jbd2_buffer_abort_trigger(jh,
544 jh->b_frozen_triggers :
546 jbd2_journal_refile_buffer(journal, jh);
547 /* If that was the last one, we need to clean up
548 * any descriptor buffers which may have been
549 * already allocated, even if we are now
551 if (!commit_transaction->t_buffers)
552 goto start_journal_io;
556 /* Make sure we have a descriptor block in which to
557 record the metadata buffer. */
560 struct buffer_head *bh;
562 J_ASSERT (bufs == 0);
564 jbd_debug(4, "JBD: get descriptor\n");
566 descriptor = jbd2_journal_get_descriptor_buffer(journal);
568 jbd2_journal_abort(journal, -EIO);
572 bh = jh2bh(descriptor);
573 jbd_debug(4, "JBD: got buffer %llu (%p)\n",
574 (unsigned long long)bh->b_blocknr, bh->b_data);
575 header = (journal_header_t *)&bh->b_data[0];
576 header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
577 header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
578 header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
580 tagp = &bh->b_data[sizeof(journal_header_t)];
581 space_left = bh->b_size - sizeof(journal_header_t);
583 set_buffer_jwrite(bh);
584 set_buffer_dirty(bh);
587 /* Record it so that we can wait for IO
589 BUFFER_TRACE(bh, "ph3: file as descriptor");
590 jbd2_journal_file_buffer(descriptor, commit_transaction,
594 /* Where is the buffer to be written? */
596 err = jbd2_journal_next_log_block(journal, &blocknr);
597 /* If the block mapping failed, just abandon the buffer
598 and repeat this loop: we'll fall into the
599 refile-on-abort condition above. */
601 jbd2_journal_abort(journal, err);
606 * start_this_handle() uses t_outstanding_credits to determine
607 * the free space in the log, but this counter is changed
608 * by jbd2_journal_next_log_block() also.
610 atomic_dec(&commit_transaction->t_outstanding_credits);
612 /* Bump b_count to prevent truncate from stumbling over
613 the shadowed buffer! @@@ This can go if we ever get
614 rid of the BJ_IO/BJ_Shadow pairing of buffers. */
615 atomic_inc(&jh2bh(jh)->b_count);
617 /* Make a temporary IO buffer with which to write it out
618 (this will requeue both the metadata buffer and the
619 temporary IO buffer). new_bh goes on BJ_IO*/
621 set_bit(BH_JWrite, &jh2bh(jh)->b_state);
623 * akpm: jbd2_journal_write_metadata_buffer() sets
624 * new_bh->b_transaction to commit_transaction.
625 * We need to clean this up before we release new_bh
626 * (which is of type BJ_IO)
628 JBUFFER_TRACE(jh, "ph3: write metadata");
629 flags = jbd2_journal_write_metadata_buffer(commit_transaction,
630 jh, &new_jh, blocknr);
632 jbd2_journal_abort(journal, flags);
635 set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
636 wbuf[bufs++] = jh2bh(new_jh);
638 /* Record the new block's tag in the current descriptor
643 tag_flag |= JBD2_FLAG_ESCAPE;
645 tag_flag |= JBD2_FLAG_SAME_UUID;
647 tag = (journal_block_tag_t *) tagp;
648 write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
649 tag->t_flags = cpu_to_be32(tag_flag);
651 space_left -= tag_bytes;
654 memcpy (tagp, journal->j_uuid, 16);
660 /* If there's no more to do, or if the descriptor is full,
663 if (bufs == journal->j_wbufsize ||
664 commit_transaction->t_buffers == NULL ||
665 space_left < tag_bytes + 16) {
667 jbd_debug(4, "JBD: Submit %d IOs\n", bufs);
669 /* Write an end-of-descriptor marker before
670 submitting the IOs. "tag" still points to
671 the last tag we set up. */
673 tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG);
676 for (i = 0; i < bufs; i++) {
677 struct buffer_head *bh = wbuf[i];
681 if (JBD2_HAS_COMPAT_FEATURE(journal,
682 JBD2_FEATURE_COMPAT_CHECKSUM)) {
684 jbd2_checksum_data(crc32_sum, bh);
688 clear_buffer_dirty(bh);
689 set_buffer_uptodate(bh);
690 bh->b_end_io = journal_end_buffer_io_sync;
691 submit_bh(write_op, bh);
694 stats.run.rs_blocks_logged += bufs;
696 /* Force a new descriptor to be generated next
697 time round the loop. */
703 write_lock(&journal->j_state_lock);
704 J_ASSERT(commit_transaction->t_state == T_COMMIT);
705 commit_transaction->t_state = T_COMMIT_DFLUSH;
706 write_unlock(&journal->j_state_lock);
709 * If the journal is not located on the file system device,
710 * then we must flush the file system device before we issue
713 if (commit_transaction->t_need_data_flush &&
714 (journal->j_fs_dev != journal->j_dev) &&
715 (journal->j_flags & JBD2_BARRIER))
716 blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL,
719 /* Done it all: now write the commit record asynchronously. */
720 if (JBD2_HAS_INCOMPAT_FEATURE(journal,
721 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
722 err = journal_submit_commit_record(journal, commit_transaction,
725 __jbd2_journal_abort_hard(journal);
726 if (journal->j_flags & JBD2_BARRIER)
727 blkdev_issue_flush(journal->j_dev, GFP_KERNEL, NULL,
731 err = journal_finish_inode_data_buffers(journal, commit_transaction);
734 "JBD2: Detected IO errors while flushing file data "
735 "on %s\n", journal->j_devname);
736 if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
737 jbd2_journal_abort(journal, err);
741 /* Lo and behold: we have just managed to send a transaction to
742 the log. Before we can commit it, wait for the IO so far to
743 complete. Control buffers being written are on the
744 transaction's t_log_list queue, and metadata buffers are on
745 the t_iobuf_list queue.
747 Wait for the buffers in reverse order. That way we are
748 less likely to be woken up until all IOs have completed, and
749 so we incur less scheduling load.
752 jbd_debug(3, "JBD: commit phase 3\n");
755 * akpm: these are BJ_IO, and j_list_lock is not needed.
756 * See __journal_try_to_free_buffer.
759 while (commit_transaction->t_iobuf_list != NULL) {
760 struct buffer_head *bh;
762 jh = commit_transaction->t_iobuf_list->b_tprev;
764 if (buffer_locked(bh)) {
771 if (unlikely(!buffer_uptodate(bh)))
774 clear_buffer_jwrite(bh);
776 JBUFFER_TRACE(jh, "ph4: unfile after journal write");
777 jbd2_journal_unfile_buffer(journal, jh);
780 * ->t_iobuf_list should contain only dummy buffer_heads
781 * which were created by jbd2_journal_write_metadata_buffer().
783 BUFFER_TRACE(bh, "dumping temporary bh");
784 jbd2_journal_put_journal_head(jh);
786 J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
787 free_buffer_head(bh);
789 /* We also have to unlock and free the corresponding
791 jh = commit_transaction->t_shadow_list->b_tprev;
793 clear_bit(BH_JWrite, &bh->b_state);
794 J_ASSERT_BH(bh, buffer_jbddirty(bh));
796 /* The metadata is now released for reuse, but we need
797 to remember it against this transaction so that when
798 we finally commit, we can do any checkpointing
800 JBUFFER_TRACE(jh, "file as BJ_Forget");
801 jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
802 /* Wake up any transactions which were waiting for this
804 wake_up_bit(&bh->b_state, BH_Unshadow);
805 JBUFFER_TRACE(jh, "brelse shadowed buffer");
809 J_ASSERT (commit_transaction->t_shadow_list == NULL);
811 jbd_debug(3, "JBD: commit phase 4\n");
813 /* Here we wait for the revoke record and descriptor record buffers */
815 while (commit_transaction->t_log_list != NULL) {
816 struct buffer_head *bh;
818 jh = commit_transaction->t_log_list->b_tprev;
820 if (buffer_locked(bh)) {
822 goto wait_for_ctlbuf;
825 goto wait_for_ctlbuf;
827 if (unlikely(!buffer_uptodate(bh)))
830 BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
831 clear_buffer_jwrite(bh);
832 jbd2_journal_unfile_buffer(journal, jh);
833 jbd2_journal_put_journal_head(jh);
834 __brelse(bh); /* One for getblk */
835 /* AKPM: bforget here */
839 jbd2_journal_abort(journal, err);
841 jbd_debug(3, "JBD: commit phase 5\n");
842 write_lock(&journal->j_state_lock);
843 J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
844 commit_transaction->t_state = T_COMMIT_JFLUSH;
845 write_unlock(&journal->j_state_lock);
847 if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
848 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
849 err = journal_submit_commit_record(journal, commit_transaction,
852 __jbd2_journal_abort_hard(journal);
854 if (!err && !is_journal_aborted(journal))
855 err = journal_wait_on_commit_record(journal, cbh);
858 jbd2_journal_abort(journal, err);
860 /* End of a transaction! Finally, we can do checkpoint
861 processing: any buffers committed as a result of this
862 transaction can be removed from any checkpoint list it was on
865 jbd_debug(3, "JBD: commit phase 6\n");
867 J_ASSERT(list_empty(&commit_transaction->t_inode_list));
868 J_ASSERT(commit_transaction->t_buffers == NULL);
869 J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
870 J_ASSERT(commit_transaction->t_iobuf_list == NULL);
871 J_ASSERT(commit_transaction->t_shadow_list == NULL);
872 J_ASSERT(commit_transaction->t_log_list == NULL);
876 * As there are other places (journal_unmap_buffer()) adding buffers
877 * to this list we have to be careful and hold the j_list_lock.
879 spin_lock(&journal->j_list_lock);
880 while (commit_transaction->t_forget) {
881 transaction_t *cp_transaction;
882 struct buffer_head *bh;
884 jh = commit_transaction->t_forget;
885 spin_unlock(&journal->j_list_lock);
887 jbd_lock_bh_state(bh);
888 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
891 * If there is undo-protected committed data against
892 * this buffer, then we can remove it now. If it is a
893 * buffer needing such protection, the old frozen_data
894 * field now points to a committed version of the
895 * buffer, so rotate that field to the new committed
898 * Otherwise, we can just throw away the frozen data now.
900 * We also know that the frozen data has already fired
901 * its triggers if they exist, so we can clear that too.
903 if (jh->b_committed_data) {
904 jbd2_free(jh->b_committed_data, bh->b_size);
905 jh->b_committed_data = NULL;
906 if (jh->b_frozen_data) {
907 jh->b_committed_data = jh->b_frozen_data;
908 jh->b_frozen_data = NULL;
909 jh->b_frozen_triggers = NULL;
911 } else if (jh->b_frozen_data) {
912 jbd2_free(jh->b_frozen_data, bh->b_size);
913 jh->b_frozen_data = NULL;
914 jh->b_frozen_triggers = NULL;
917 spin_lock(&journal->j_list_lock);
918 cp_transaction = jh->b_cp_transaction;
919 if (cp_transaction) {
920 JBUFFER_TRACE(jh, "remove from old cp transaction");
921 cp_transaction->t_chp_stats.cs_dropped++;
922 __jbd2_journal_remove_checkpoint(jh);
925 /* Only re-checkpoint the buffer_head if it is marked
926 * dirty. If the buffer was added to the BJ_Forget list
927 * by jbd2_journal_forget, it may no longer be dirty and
928 * there's no point in keeping a checkpoint record for
931 /* A buffer which has been freed while still being
932 * journaled by a previous transaction may end up still
933 * being dirty here, but we want to avoid writing back
934 * that buffer in the future after the "add to orphan"
935 * operation been committed, That's not only a performance
936 * gain, it also stops aliasing problems if the buffer is
937 * left behind for writeback and gets reallocated for another
938 * use in a different page. */
939 if (buffer_freed(bh) && !jh->b_next_transaction) {
940 clear_buffer_freed(bh);
941 clear_buffer_jbddirty(bh);
944 if (buffer_jbddirty(bh)) {
945 JBUFFER_TRACE(jh, "add to new checkpointing trans");
946 __jbd2_journal_insert_checkpoint(jh, commit_transaction);
947 if (is_journal_aborted(journal))
948 clear_buffer_jbddirty(bh);
949 JBUFFER_TRACE(jh, "refile for checkpoint writeback");
950 __jbd2_journal_refile_buffer(jh);
951 jbd_unlock_bh_state(bh);
953 J_ASSERT_BH(bh, !buffer_dirty(bh));
954 /* The buffer on BJ_Forget list and not jbddirty means
955 * it has been freed by this transaction and hence it
956 * could not have been reallocated until this
957 * transaction has committed. *BUT* it could be
958 * reallocated once we have written all the data to
959 * disk and before we process the buffer on BJ_Forget
961 JBUFFER_TRACE(jh, "refile or unfile freed buffer");
962 __jbd2_journal_refile_buffer(jh);
963 if (!jh->b_transaction) {
964 jbd_unlock_bh_state(bh);
966 jbd2_journal_remove_journal_head(bh);
967 release_buffer_page(bh);
969 jbd_unlock_bh_state(bh);
971 cond_resched_lock(&journal->j_list_lock);
973 spin_unlock(&journal->j_list_lock);
975 * This is a bit sleazy. We use j_list_lock to protect transition
976 * of a transaction into T_FINISHED state and calling
977 * __jbd2_journal_drop_transaction(). Otherwise we could race with
978 * other checkpointing code processing the transaction...
980 write_lock(&journal->j_state_lock);
981 spin_lock(&journal->j_list_lock);
983 * Now recheck if some buffers did not get attached to the transaction
984 * while the lock was dropped...
986 if (commit_transaction->t_forget) {
987 spin_unlock(&journal->j_list_lock);
988 write_unlock(&journal->j_state_lock);
992 /* Done with this transaction! */
994 jbd_debug(3, "JBD: commit phase 7\n");
996 J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
998 commit_transaction->t_start = jiffies;
999 stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
1000 commit_transaction->t_start);
1003 * File the transaction statistics
1005 stats.ts_tid = commit_transaction->t_tid;
1006 stats.run.rs_handle_count =
1007 atomic_read(&commit_transaction->t_handle_count);
1008 trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
1009 commit_transaction->t_tid, &stats.run);
1012 * Calculate overall stats
1014 spin_lock(&journal->j_history_lock);
1015 journal->j_stats.ts_tid++;
1016 journal->j_stats.run.rs_wait += stats.run.rs_wait;
1017 journal->j_stats.run.rs_running += stats.run.rs_running;
1018 journal->j_stats.run.rs_locked += stats.run.rs_locked;
1019 journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
1020 journal->j_stats.run.rs_logging += stats.run.rs_logging;
1021 journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
1022 journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
1023 journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
1024 spin_unlock(&journal->j_history_lock);
1026 commit_transaction->t_state = T_FINISHED;
1027 J_ASSERT(commit_transaction == journal->j_committing_transaction);
1028 journal->j_commit_sequence = commit_transaction->t_tid;
1029 journal->j_committing_transaction = NULL;
1030 commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1033 * weight the commit time higher than the average time so we don't
1034 * react too strongly to vast changes in the commit time
1036 if (likely(journal->j_average_commit_time))
1037 journal->j_average_commit_time = (commit_time +
1038 journal->j_average_commit_time*3) / 4;
1040 journal->j_average_commit_time = commit_time;
1041 write_unlock(&journal->j_state_lock);
1043 if (commit_transaction->t_checkpoint_list == NULL &&
1044 commit_transaction->t_checkpoint_io_list == NULL) {
1045 __jbd2_journal_drop_transaction(journal, commit_transaction);
1048 if (journal->j_checkpoint_transactions == NULL) {
1049 journal->j_checkpoint_transactions = commit_transaction;
1050 commit_transaction->t_cpnext = commit_transaction;
1051 commit_transaction->t_cpprev = commit_transaction;
1053 commit_transaction->t_cpnext =
1054 journal->j_checkpoint_transactions;
1055 commit_transaction->t_cpprev =
1056 commit_transaction->t_cpnext->t_cpprev;
1057 commit_transaction->t_cpnext->t_cpprev =
1059 commit_transaction->t_cpprev->t_cpnext =
1063 spin_unlock(&journal->j_list_lock);
1065 if (journal->j_commit_callback)
1066 journal->j_commit_callback(journal, commit_transaction);
1068 trace_jbd2_end_commit(journal, commit_transaction);
1069 jbd_debug(1, "JBD: commit %d complete, head %d\n",
1070 journal->j_commit_sequence, journal->j_tail_sequence);
1072 kfree(commit_transaction);
1074 wake_up(&journal->j_wait_done_commit);