1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_inode.h"
17 #include "xfs_trans.h"
19 #include "xfs_log_priv.h"
20 #include "xfs_log_recover.h"
21 #include "xfs_trans_priv.h"
22 #include "xfs_alloc.h"
23 #include "xfs_ialloc.h"
24 #include "xfs_trace.h"
25 #include "xfs_icache.h"
26 #include "xfs_error.h"
27 #include "xfs_buf_item.h"
29 #include "xfs_quota.h"
30 #include "xfs_reflink.h"
32 #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
39 xlog_clear_stale_blocks(
44 xlog_recover_check_summary(
47 #define xlog_recover_check_summary(log)
50 xlog_do_recovery_pass(
51 struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
54 * Sector aligned buffer routines for buffer create/read/write/access
58 * Verify the log-relative block number and length in basic blocks are valid for
59 * an operation involving the given XFS log buffer. Returns true if the fields
60 * are valid, false otherwise.
68 if (blk_no < 0 || blk_no >= log->l_logBBsize)
70 if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
76 * Allocate a buffer to hold log data. The buffer needs to be able to map to
77 * a range of nbblks basic blocks at any valid offset within the log.
85 * Pass log block 0 since we don't have an addr yet, buffer will be
88 if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, 0, nbblks))) {
89 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
95 * We do log I/O in units of log sectors (a power-of-2 multiple of the
96 * basic block size), so we round up the requested size to accommodate
97 * the basic blocks required for complete log sectors.
99 * In addition, the buffer may be used for a non-sector-aligned block
100 * offset, in which case an I/O of the requested size could extend
101 * beyond the end of the buffer. If the requested size is only 1 basic
102 * block it will never straddle a sector boundary, so this won't be an
103 * issue. Nor will this be a problem if the log I/O is done in basic
104 * blocks (sector size 1). But otherwise we extend the buffer by one
105 * extra log sector to ensure there's space to accommodate this
108 if (nbblks > 1 && log->l_sectBBsize > 1)
109 nbblks += log->l_sectBBsize;
110 nbblks = round_up(nbblks, log->l_sectBBsize);
111 return kvzalloc(BBTOB(nbblks), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
115 * Return the address of the start of the given block number's data
116 * in a log buffer. The buffer covers a log sector-aligned region.
118 static inline unsigned int
123 return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1));
136 if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, blk_no, nbblks))) {
138 "Invalid log block/length (0x%llx, 0x%x) for buffer",
140 return -EFSCORRUPTED;
143 blk_no = round_down(blk_no, log->l_sectBBsize);
144 nbblks = round_up(nbblks, log->l_sectBBsize);
147 error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no,
148 BBTOB(nbblks), data, op);
149 if (error && !xlog_is_shutdown(log)) {
151 "log recovery %s I/O error at daddr 0x%llx len %d error %d",
152 op == REQ_OP_WRITE ? "write" : "read",
153 blk_no, nbblks, error);
165 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
178 error = xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
180 *offset = data + xlog_align(log, blk_no);
191 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_WRITE);
196 * dump debug superblock and log record information
199 xlog_header_check_dump(
201 xlog_rec_header_t *head)
203 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d",
204 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
205 xfs_debug(mp, " log : uuid = %pU, fmt = %d",
206 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
209 #define xlog_header_check_dump(mp, head)
213 * check log record header for recovery
216 xlog_header_check_recover(
218 xlog_rec_header_t *head)
220 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
223 * IRIX doesn't write the h_fmt field and leaves it zeroed
224 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
225 * a dirty log created in IRIX.
227 if (XFS_IS_CORRUPT(mp, head->h_fmt != cpu_to_be32(XLOG_FMT))) {
229 "dirty log written in incompatible format - can't recover");
230 xlog_header_check_dump(mp, head);
231 return -EFSCORRUPTED;
233 if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
234 &head->h_fs_uuid))) {
236 "dirty log entry has mismatched uuid - can't recover");
237 xlog_header_check_dump(mp, head);
238 return -EFSCORRUPTED;
244 * read the head block of the log and check the header
247 xlog_header_check_mount(
249 xlog_rec_header_t *head)
251 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
253 if (uuid_is_null(&head->h_fs_uuid)) {
255 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
256 * h_fs_uuid is null, we assume this log was last mounted
257 * by IRIX and continue.
259 xfs_warn(mp, "null uuid in log - IRIX style log");
260 } else if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
261 &head->h_fs_uuid))) {
262 xfs_warn(mp, "log has mismatched uuid - can't recover");
263 xlog_header_check_dump(mp, head);
264 return -EFSCORRUPTED;
270 * This routine finds (to an approximation) the first block in the physical
271 * log which contains the given cycle. It uses a binary search algorithm.
272 * Note that the algorithm can not be perfect because the disk will not
273 * necessarily be perfect.
276 xlog_find_cycle_start(
279 xfs_daddr_t first_blk,
280 xfs_daddr_t *last_blk,
290 mid_blk = BLK_AVG(first_blk, end_blk);
291 while (mid_blk != first_blk && mid_blk != end_blk) {
292 error = xlog_bread(log, mid_blk, 1, buffer, &offset);
295 mid_cycle = xlog_get_cycle(offset);
296 if (mid_cycle == cycle)
297 end_blk = mid_blk; /* last_half_cycle == mid_cycle */
299 first_blk = mid_blk; /* first_half_cycle == mid_cycle */
300 mid_blk = BLK_AVG(first_blk, end_blk);
302 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
303 (mid_blk == end_blk && mid_blk-1 == first_blk));
311 * Check that a range of blocks does not contain stop_on_cycle_no.
312 * Fill in *new_blk with the block offset where such a block is
313 * found, or with -1 (an invalid block number) if there is no such
314 * block in the range. The scan needs to occur from front to back
315 * and the pointer into the region must be updated since a later
316 * routine will need to perform another test.
319 xlog_find_verify_cycle(
321 xfs_daddr_t start_blk,
323 uint stop_on_cycle_no,
324 xfs_daddr_t *new_blk)
334 * Greedily allocate a buffer big enough to handle the full
335 * range of basic blocks we'll be examining. If that fails,
336 * try a smaller size. We need to be able to read at least
337 * a log sector, or we're out of luck.
339 bufblks = 1 << ffs(nbblks);
340 while (bufblks > log->l_logBBsize)
342 while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
344 if (bufblks < log->l_sectBBsize)
348 for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
351 bcount = min(bufblks, (start_blk + nbblks - i));
353 error = xlog_bread(log, i, bcount, buffer, &buf);
357 for (j = 0; j < bcount; j++) {
358 cycle = xlog_get_cycle(buf);
359 if (cycle == stop_on_cycle_no) {
376 xlog_logrec_hblks(struct xlog *log, struct xlog_rec_header *rh)
378 if (xfs_has_logv2(log->l_mp)) {
379 int h_size = be32_to_cpu(rh->h_size);
381 if ((be32_to_cpu(rh->h_version) & XLOG_VERSION_2) &&
382 h_size > XLOG_HEADER_CYCLE_SIZE)
383 return DIV_ROUND_UP(h_size, XLOG_HEADER_CYCLE_SIZE);
389 * Potentially backup over partial log record write.
391 * In the typical case, last_blk is the number of the block directly after
392 * a good log record. Therefore, we subtract one to get the block number
393 * of the last block in the given buffer. extra_bblks contains the number
394 * of blocks we would have read on a previous read. This happens when the
395 * last log record is split over the end of the physical log.
397 * extra_bblks is the number of blocks potentially verified on a previous
398 * call to this routine.
401 xlog_find_verify_log_record(
403 xfs_daddr_t start_blk,
404 xfs_daddr_t *last_blk,
410 xlog_rec_header_t *head = NULL;
413 int num_blks = *last_blk - start_blk;
416 ASSERT(start_blk != 0 || *last_blk != start_blk);
418 buffer = xlog_alloc_buffer(log, num_blks);
420 buffer = xlog_alloc_buffer(log, 1);
425 error = xlog_bread(log, start_blk, num_blks, buffer, &offset);
428 offset += ((num_blks - 1) << BBSHIFT);
431 for (i = (*last_blk) - 1; i >= 0; i--) {
433 /* valid log record not found */
435 "Log inconsistent (didn't find previous header)");
437 error = -EFSCORRUPTED;
442 error = xlog_bread(log, i, 1, buffer, &offset);
447 head = (xlog_rec_header_t *)offset;
449 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
457 * We hit the beginning of the physical log & still no header. Return
458 * to caller. If caller can handle a return of -1, then this routine
459 * will be called again for the end of the physical log.
467 * We have the final block of the good log (the first block
468 * of the log record _before_ the head. So we check the uuid.
470 if ((error = xlog_header_check_mount(log->l_mp, head)))
474 * We may have found a log record header before we expected one.
475 * last_blk will be the 1st block # with a given cycle #. We may end
476 * up reading an entire log record. In this case, we don't want to
477 * reset last_blk. Only when last_blk points in the middle of a log
478 * record do we update last_blk.
480 xhdrs = xlog_logrec_hblks(log, head);
482 if (*last_blk - i + extra_bblks !=
483 BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
492 * Head is defined to be the point of the log where the next log write
493 * could go. This means that incomplete LR writes at the end are
494 * eliminated when calculating the head. We aren't guaranteed that previous
495 * LR have complete transactions. We only know that a cycle number of
496 * current cycle number -1 won't be present in the log if we start writing
497 * from our current block number.
499 * last_blk contains the block number of the first block with a given
502 * Return: zero if normal, non-zero if error.
507 xfs_daddr_t *return_head_blk)
511 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
513 uint first_half_cycle, last_half_cycle;
515 int error, log_bbnum = log->l_logBBsize;
517 /* Is the end of the log device zeroed? */
518 error = xlog_find_zeroed(log, &first_blk);
520 xfs_warn(log->l_mp, "empty log check failed");
524 *return_head_blk = first_blk;
526 /* Is the whole lot zeroed? */
528 /* Linux XFS shouldn't generate totally zeroed logs -
529 * mkfs etc write a dummy unmount record to a fresh
530 * log so we can store the uuid in there
532 xfs_warn(log->l_mp, "totally zeroed log");
538 first_blk = 0; /* get cycle # of 1st block */
539 buffer = xlog_alloc_buffer(log, 1);
543 error = xlog_bread(log, 0, 1, buffer, &offset);
545 goto out_free_buffer;
547 first_half_cycle = xlog_get_cycle(offset);
549 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
550 error = xlog_bread(log, last_blk, 1, buffer, &offset);
552 goto out_free_buffer;
554 last_half_cycle = xlog_get_cycle(offset);
555 ASSERT(last_half_cycle != 0);
558 * If the 1st half cycle number is equal to the last half cycle number,
559 * then the entire log is stamped with the same cycle number. In this
560 * case, head_blk can't be set to zero (which makes sense). The below
561 * math doesn't work out properly with head_blk equal to zero. Instead,
562 * we set it to log_bbnum which is an invalid block number, but this
563 * value makes the math correct. If head_blk doesn't changed through
564 * all the tests below, *head_blk is set to zero at the very end rather
565 * than log_bbnum. In a sense, log_bbnum and zero are the same block
566 * in a circular file.
568 if (first_half_cycle == last_half_cycle) {
570 * In this case we believe that the entire log should have
571 * cycle number last_half_cycle. We need to scan backwards
572 * from the end verifying that there are no holes still
573 * containing last_half_cycle - 1. If we find such a hole,
574 * then the start of that hole will be the new head. The
575 * simple case looks like
576 * x | x ... | x - 1 | x
577 * Another case that fits this picture would be
578 * x | x + 1 | x ... | x
579 * In this case the head really is somewhere at the end of the
580 * log, as one of the latest writes at the beginning was
583 * x | x + 1 | x ... | x - 1 | x
584 * This is really the combination of the above two cases, and
585 * the head has to end up at the start of the x-1 hole at the
588 * In the 256k log case, we will read from the beginning to the
589 * end of the log and search for cycle numbers equal to x-1.
590 * We don't worry about the x+1 blocks that we encounter,
591 * because we know that they cannot be the head since the log
594 head_blk = log_bbnum;
595 stop_on_cycle = last_half_cycle - 1;
598 * In this case we want to find the first block with cycle
599 * number matching last_half_cycle. We expect the log to be
601 * x + 1 ... | x ... | x
602 * The first block with cycle number x (last_half_cycle) will
603 * be where the new head belongs. First we do a binary search
604 * for the first occurrence of last_half_cycle. The binary
605 * search may not be totally accurate, so then we scan back
606 * from there looking for occurrences of last_half_cycle before
607 * us. If that backwards scan wraps around the beginning of
608 * the log, then we look for occurrences of last_half_cycle - 1
609 * at the end of the log. The cases we're looking for look
611 * v binary search stopped here
612 * x + 1 ... | x | x + 1 | x ... | x
613 * ^ but we want to locate this spot
615 * <---------> less than scan distance
616 * x + 1 ... | x ... | x - 1 | x
617 * ^ we want to locate this spot
619 stop_on_cycle = last_half_cycle;
620 error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk,
623 goto out_free_buffer;
627 * Now validate the answer. Scan back some number of maximum possible
628 * blocks and make sure each one has the expected cycle number. The
629 * maximum is determined by the total possible amount of buffering
630 * in the in-core log. The following number can be made tighter if
631 * we actually look at the block size of the filesystem.
633 num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
634 if (head_blk >= num_scan_bblks) {
636 * We are guaranteed that the entire check can be performed
639 start_blk = head_blk - num_scan_bblks;
640 if ((error = xlog_find_verify_cycle(log,
641 start_blk, num_scan_bblks,
642 stop_on_cycle, &new_blk)))
643 goto out_free_buffer;
646 } else { /* need to read 2 parts of log */
648 * We are going to scan backwards in the log in two parts.
649 * First we scan the physical end of the log. In this part
650 * of the log, we are looking for blocks with cycle number
651 * last_half_cycle - 1.
652 * If we find one, then we know that the log starts there, as
653 * we've found a hole that didn't get written in going around
654 * the end of the physical log. The simple case for this is
655 * x + 1 ... | x ... | x - 1 | x
656 * <---------> less than scan distance
657 * If all of the blocks at the end of the log have cycle number
658 * last_half_cycle, then we check the blocks at the start of
659 * the log looking for occurrences of last_half_cycle. If we
660 * find one, then our current estimate for the location of the
661 * first occurrence of last_half_cycle is wrong and we move
662 * back to the hole we've found. This case looks like
663 * x + 1 ... | x | x + 1 | x ...
664 * ^ binary search stopped here
665 * Another case we need to handle that only occurs in 256k
667 * x + 1 ... | x ... | x+1 | x ...
668 * ^ binary search stops here
669 * In a 256k log, the scan at the end of the log will see the
670 * x + 1 blocks. We need to skip past those since that is
671 * certainly not the head of the log. By searching for
672 * last_half_cycle-1 we accomplish that.
674 ASSERT(head_blk <= INT_MAX &&
675 (xfs_daddr_t) num_scan_bblks >= head_blk);
676 start_blk = log_bbnum - (num_scan_bblks - head_blk);
677 if ((error = xlog_find_verify_cycle(log, start_blk,
678 num_scan_bblks - (int)head_blk,
679 (stop_on_cycle - 1), &new_blk)))
680 goto out_free_buffer;
687 * Scan beginning of log now. The last part of the physical
688 * log is good. This scan needs to verify that it doesn't find
689 * the last_half_cycle.
692 ASSERT(head_blk <= INT_MAX);
693 if ((error = xlog_find_verify_cycle(log,
694 start_blk, (int)head_blk,
695 stop_on_cycle, &new_blk)))
696 goto out_free_buffer;
703 * Now we need to make sure head_blk is not pointing to a block in
704 * the middle of a log record.
706 num_scan_bblks = XLOG_REC_SHIFT(log);
707 if (head_blk >= num_scan_bblks) {
708 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
710 /* start ptr at last block ptr before head_blk */
711 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
715 goto out_free_buffer;
718 ASSERT(head_blk <= INT_MAX);
719 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
721 goto out_free_buffer;
723 /* We hit the beginning of the log during our search */
724 start_blk = log_bbnum - (num_scan_bblks - head_blk);
726 ASSERT(start_blk <= INT_MAX &&
727 (xfs_daddr_t) log_bbnum-start_blk >= 0);
728 ASSERT(head_blk <= INT_MAX);
729 error = xlog_find_verify_log_record(log, start_blk,
730 &new_blk, (int)head_blk);
734 goto out_free_buffer;
735 if (new_blk != log_bbnum)
738 goto out_free_buffer;
742 if (head_blk == log_bbnum)
743 *return_head_blk = 0;
745 *return_head_blk = head_blk;
747 * When returning here, we have a good block number. Bad block
748 * means that during a previous crash, we didn't have a clean break
749 * from cycle number N to cycle number N-1. In this case, we need
750 * to find the first block with cycle number N-1.
757 xfs_warn(log->l_mp, "failed to find log head");
762 * Seek backwards in the log for log record headers.
764 * Given a starting log block, walk backwards until we find the provided number
765 * of records or hit the provided tail block. The return value is the number of
766 * records encountered or a negative error code. The log block and buffer
767 * pointer of the last record seen are returned in rblk and rhead respectively.
770 xlog_rseek_logrec_hdr(
772 xfs_daddr_t head_blk,
773 xfs_daddr_t tail_blk,
777 struct xlog_rec_header **rhead,
789 * Walk backwards from the head block until we hit the tail or the first
792 end_blk = head_blk > tail_blk ? tail_blk : 0;
793 for (i = (int) head_blk - 1; i >= end_blk; i--) {
794 error = xlog_bread(log, i, 1, buffer, &offset);
798 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
800 *rhead = (struct xlog_rec_header *) offset;
801 if (++found == count)
807 * If we haven't hit the tail block or the log record header count,
808 * start looking again from the end of the physical log. Note that
809 * callers can pass head == tail if the tail is not yet known.
811 if (tail_blk >= head_blk && found != count) {
812 for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
813 error = xlog_bread(log, i, 1, buffer, &offset);
817 if (*(__be32 *)offset ==
818 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
821 *rhead = (struct xlog_rec_header *) offset;
822 if (++found == count)
835 * Seek forward in the log for log record headers.
837 * Given head and tail blocks, walk forward from the tail block until we find
838 * the provided number of records or hit the head block. The return value is the
839 * number of records encountered or a negative error code. The log block and
840 * buffer pointer of the last record seen are returned in rblk and rhead
844 xlog_seek_logrec_hdr(
846 xfs_daddr_t head_blk,
847 xfs_daddr_t tail_blk,
851 struct xlog_rec_header **rhead,
863 * Walk forward from the tail block until we hit the head or the last
866 end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
867 for (i = (int) tail_blk; i <= end_blk; i++) {
868 error = xlog_bread(log, i, 1, buffer, &offset);
872 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
874 *rhead = (struct xlog_rec_header *) offset;
875 if (++found == count)
881 * If we haven't hit the head block or the log record header count,
882 * start looking again from the start of the physical log.
884 if (tail_blk > head_blk && found != count) {
885 for (i = 0; i < (int) head_blk; i++) {
886 error = xlog_bread(log, i, 1, buffer, &offset);
890 if (*(__be32 *)offset ==
891 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
894 *rhead = (struct xlog_rec_header *) offset;
895 if (++found == count)
908 * Calculate distance from head to tail (i.e., unused space in the log).
913 xfs_daddr_t head_blk,
914 xfs_daddr_t tail_blk)
916 if (head_blk < tail_blk)
917 return tail_blk - head_blk;
919 return tail_blk + (log->l_logBBsize - head_blk);
923 * Verify the log tail. This is particularly important when torn or incomplete
924 * writes have been detected near the front of the log and the head has been
925 * walked back accordingly.
927 * We also have to handle the case where the tail was pinned and the head
928 * blocked behind the tail right before a crash. If the tail had been pushed
929 * immediately prior to the crash and the subsequent checkpoint was only
930 * partially written, it's possible it overwrote the last referenced tail in the
931 * log with garbage. This is not a coherency problem because the tail must have
932 * been pushed before it can be overwritten, but appears as log corruption to
933 * recovery because we have no way to know the tail was updated if the
934 * subsequent checkpoint didn't write successfully.
936 * Therefore, CRC check the log from tail to head. If a failure occurs and the
937 * offending record is within max iclog bufs from the head, walk the tail
938 * forward and retry until a valid tail is found or corruption is detected out
939 * of the range of a possible overwrite.
944 xfs_daddr_t head_blk,
945 xfs_daddr_t *tail_blk,
948 struct xlog_rec_header *thead;
950 xfs_daddr_t first_bad;
953 xfs_daddr_t tmp_tail;
954 xfs_daddr_t orig_tail = *tail_blk;
956 buffer = xlog_alloc_buffer(log, 1);
961 * Make sure the tail points to a record (returns positive count on
964 error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer,
965 &tmp_tail, &thead, &wrapped);
968 if (*tail_blk != tmp_tail)
969 *tail_blk = tmp_tail;
972 * Run a CRC check from the tail to the head. We can't just check
973 * MAX_ICLOGS records past the tail because the tail may point to stale
974 * blocks cleared during the search for the head/tail. These blocks are
975 * overwritten with zero-length records and thus record count is not a
976 * reliable indicator of the iclog state before a crash.
979 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
980 XLOG_RECOVER_CRCPASS, &first_bad);
981 while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
985 * Is corruption within range of the head? If so, retry from
986 * the next record. Otherwise return an error.
988 tail_distance = xlog_tail_distance(log, head_blk, first_bad);
989 if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
992 /* skip to the next record; returns positive count on success */
993 error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2,
994 buffer, &tmp_tail, &thead, &wrapped);
998 *tail_blk = tmp_tail;
1000 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1001 XLOG_RECOVER_CRCPASS, &first_bad);
1004 if (!error && *tail_blk != orig_tail)
1006 "Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
1007 orig_tail, *tail_blk);
1014 * Detect and trim torn writes from the head of the log.
1016 * Storage without sector atomicity guarantees can result in torn writes in the
1017 * log in the event of a crash. Our only means to detect this scenario is via
1018 * CRC verification. While we can't always be certain that CRC verification
1019 * failure is due to a torn write vs. an unrelated corruption, we do know that
1020 * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1021 * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1022 * the log and treat failures in this range as torn writes as a matter of
1023 * policy. In the event of CRC failure, the head is walked back to the last good
1024 * record in the log and the tail is updated from that record and verified.
1029 xfs_daddr_t *head_blk, /* in/out: unverified head */
1030 xfs_daddr_t *tail_blk, /* out: tail block */
1032 xfs_daddr_t *rhead_blk, /* start blk of last record */
1033 struct xlog_rec_header **rhead, /* ptr to last record */
1034 bool *wrapped) /* last rec. wraps phys. log */
1036 struct xlog_rec_header *tmp_rhead;
1038 xfs_daddr_t first_bad;
1039 xfs_daddr_t tmp_rhead_blk;
1045 * Check the head of the log for torn writes. Search backwards from the
1046 * head until we hit the tail or the maximum number of log record I/Os
1047 * that could have been in flight at one time. Use a temporary buffer so
1048 * we don't trash the rhead/buffer pointers from the caller.
1050 tmp_buffer = xlog_alloc_buffer(log, 1);
1053 error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1054 XLOG_MAX_ICLOGS, tmp_buffer,
1055 &tmp_rhead_blk, &tmp_rhead, &tmp_wrapped);
1056 kmem_free(tmp_buffer);
1061 * Now run a CRC verification pass over the records starting at the
1062 * block found above to the current head. If a CRC failure occurs, the
1063 * log block of the first bad record is saved in first_bad.
1065 error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1066 XLOG_RECOVER_CRCPASS, &first_bad);
1067 if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1069 * We've hit a potential torn write. Reset the error and warn
1074 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1075 first_bad, *head_blk);
1078 * Get the header block and buffer pointer for the last good
1079 * record before the bad record.
1081 * Note that xlog_find_tail() clears the blocks at the new head
1082 * (i.e., the records with invalid CRC) if the cycle number
1083 * matches the current cycle.
1085 found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1,
1086 buffer, rhead_blk, rhead, wrapped);
1089 if (found == 0) /* XXX: right thing to do here? */
1093 * Reset the head block to the starting block of the first bad
1094 * log record and set the tail block based on the last good
1097 * Bail out if the updated head/tail match as this indicates
1098 * possible corruption outside of the acceptable
1099 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1101 *head_blk = first_bad;
1102 *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1103 if (*head_blk == *tail_blk) {
1111 return xlog_verify_tail(log, *head_blk, tail_blk,
1112 be32_to_cpu((*rhead)->h_size));
1116 * We need to make sure we handle log wrapping properly, so we can't use the
1117 * calculated logbno directly. Make sure it wraps to the correct bno inside the
1120 * The log is limited to 32 bit sizes, so we use the appropriate modulus
1121 * operation here and cast it back to a 64 bit daddr on return.
1123 static inline xfs_daddr_t
1130 div_s64_rem(bno, log->l_logBBsize, &mod);
1135 * Check whether the head of the log points to an unmount record. In other
1136 * words, determine whether the log is clean. If so, update the in-core state
1140 xlog_check_unmount_rec(
1142 xfs_daddr_t *head_blk,
1143 xfs_daddr_t *tail_blk,
1144 struct xlog_rec_header *rhead,
1145 xfs_daddr_t rhead_blk,
1149 struct xlog_op_header *op_head;
1150 xfs_daddr_t umount_data_blk;
1151 xfs_daddr_t after_umount_blk;
1159 * Look for unmount record. If we find it, then we know there was a
1160 * clean unmount. Since 'i' could be the last block in the physical
1161 * log, we convert to a log block before comparing to the head_blk.
1163 * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1164 * below. We won't want to clear the unmount record if there is one, so
1165 * we pass the lsn of the unmount record rather than the block after it.
1167 hblks = xlog_logrec_hblks(log, rhead);
1168 after_umount_blk = xlog_wrap_logbno(log,
1169 rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)));
1171 if (*head_blk == after_umount_blk &&
1172 be32_to_cpu(rhead->h_num_logops) == 1) {
1173 umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks);
1174 error = xlog_bread(log, umount_data_blk, 1, buffer, &offset);
1178 op_head = (struct xlog_op_header *)offset;
1179 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1181 * Set tail and last sync so that newly written log
1182 * records will point recovery to after the current
1185 xlog_assign_atomic_lsn(&log->l_tail_lsn,
1186 log->l_curr_cycle, after_umount_blk);
1187 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1188 log->l_curr_cycle, after_umount_blk);
1189 *tail_blk = after_umount_blk;
1201 xfs_daddr_t head_blk,
1202 struct xlog_rec_header *rhead,
1203 xfs_daddr_t rhead_blk,
1207 * Reset log values according to the state of the log when we
1208 * crashed. In the case where head_blk == 0, we bump curr_cycle
1209 * one because the next write starts a new cycle rather than
1210 * continuing the cycle of the last good log record. At this
1211 * point we have guaranteed that all partial log records have been
1212 * accounted for. Therefore, we know that the last good log record
1213 * written was complete and ended exactly on the end boundary
1214 * of the physical log.
1216 log->l_prev_block = rhead_blk;
1217 log->l_curr_block = (int)head_blk;
1218 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1220 log->l_curr_cycle++;
1221 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1222 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1223 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1224 BBTOB(log->l_curr_block));
1225 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1226 BBTOB(log->l_curr_block));
1230 * Find the sync block number or the tail of the log.
1232 * This will be the block number of the last record to have its
1233 * associated buffers synced to disk. Every log record header has
1234 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
1235 * to get a sync block number. The only concern is to figure out which
1236 * log record header to believe.
1238 * The following algorithm uses the log record header with the largest
1239 * lsn. The entire log record does not need to be valid. We only care
1240 * that the header is valid.
1242 * We could speed up search by using current head_blk buffer, but it is not
1248 xfs_daddr_t *head_blk,
1249 xfs_daddr_t *tail_blk)
1251 xlog_rec_header_t *rhead;
1252 char *offset = NULL;
1255 xfs_daddr_t rhead_blk;
1257 bool wrapped = false;
1261 * Find previous log record
1263 if ((error = xlog_find_head(log, head_blk)))
1265 ASSERT(*head_blk < INT_MAX);
1267 buffer = xlog_alloc_buffer(log, 1);
1270 if (*head_blk == 0) { /* special case */
1271 error = xlog_bread(log, 0, 1, buffer, &offset);
1275 if (xlog_get_cycle(offset) == 0) {
1277 /* leave all other log inited values alone */
1283 * Search backwards through the log looking for the log record header
1284 * block. This wraps all the way back around to the head so something is
1285 * seriously wrong if we can't find it.
1287 error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer,
1288 &rhead_blk, &rhead, &wrapped);
1292 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1293 error = -EFSCORRUPTED;
1296 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1299 * Set the log state based on the current head record.
1301 xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1302 tail_lsn = atomic64_read(&log->l_tail_lsn);
1305 * Look for an unmount record at the head of the log. This sets the log
1306 * state to determine whether recovery is necessary.
1308 error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1309 rhead_blk, buffer, &clean);
1314 * Verify the log head if the log is not clean (e.g., we have anything
1315 * but an unmount record at the head). This uses CRC verification to
1316 * detect and trim torn writes. If discovered, CRC failures are
1317 * considered torn writes and the log head is trimmed accordingly.
1319 * Note that we can only run CRC verification when the log is dirty
1320 * because there's no guarantee that the log data behind an unmount
1321 * record is compatible with the current architecture.
1324 xfs_daddr_t orig_head = *head_blk;
1326 error = xlog_verify_head(log, head_blk, tail_blk, buffer,
1327 &rhead_blk, &rhead, &wrapped);
1331 /* update in-core state again if the head changed */
1332 if (*head_blk != orig_head) {
1333 xlog_set_state(log, *head_blk, rhead, rhead_blk,
1335 tail_lsn = atomic64_read(&log->l_tail_lsn);
1336 error = xlog_check_unmount_rec(log, head_blk, tail_blk,
1337 rhead, rhead_blk, buffer,
1345 * Note that the unmount was clean. If the unmount was not clean, we
1346 * need to know this to rebuild the superblock counters from the perag
1347 * headers if we have a filesystem using non-persistent counters.
1350 set_bit(XFS_OPSTATE_CLEAN, &log->l_mp->m_opstate);
1353 * Make sure that there are no blocks in front of the head
1354 * with the same cycle number as the head. This can happen
1355 * because we allow multiple outstanding log writes concurrently,
1356 * and the later writes might make it out before earlier ones.
1358 * We use the lsn from before modifying it so that we'll never
1359 * overwrite the unmount record after a clean unmount.
1361 * Do this only if we are going to recover the filesystem
1363 * NOTE: This used to say "if (!readonly)"
1364 * However on Linux, we can & do recover a read-only filesystem.
1365 * We only skip recovery if NORECOVERY is specified on mount,
1366 * in which case we would not be here.
1368 * But... if the -device- itself is readonly, just skip this.
1369 * We can't recover this device anyway, so it won't matter.
1371 if (!xfs_readonly_buftarg(log->l_targ))
1372 error = xlog_clear_stale_blocks(log, tail_lsn);
1378 xfs_warn(log->l_mp, "failed to locate log tail");
1383 * Is the log zeroed at all?
1385 * The last binary search should be changed to perform an X block read
1386 * once X becomes small enough. You can then search linearly through
1387 * the X blocks. This will cut down on the number of reads we need to do.
1389 * If the log is partially zeroed, this routine will pass back the blkno
1390 * of the first block with cycle number 0. It won't have a complete LR
1394 * 0 => the log is completely written to
1395 * 1 => use *blk_no as the first block of the log
1396 * <0 => error has occurred
1401 xfs_daddr_t *blk_no)
1405 uint first_cycle, last_cycle;
1406 xfs_daddr_t new_blk, last_blk, start_blk;
1407 xfs_daddr_t num_scan_bblks;
1408 int error, log_bbnum = log->l_logBBsize;
1412 /* check totally zeroed log */
1413 buffer = xlog_alloc_buffer(log, 1);
1416 error = xlog_bread(log, 0, 1, buffer, &offset);
1418 goto out_free_buffer;
1420 first_cycle = xlog_get_cycle(offset);
1421 if (first_cycle == 0) { /* completely zeroed log */
1427 /* check partially zeroed log */
1428 error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset);
1430 goto out_free_buffer;
1432 last_cycle = xlog_get_cycle(offset);
1433 if (last_cycle != 0) { /* log completely written to */
1438 /* we have a partially zeroed log */
1439 last_blk = log_bbnum-1;
1440 error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0);
1442 goto out_free_buffer;
1445 * Validate the answer. Because there is no way to guarantee that
1446 * the entire log is made up of log records which are the same size,
1447 * we scan over the defined maximum blocks. At this point, the maximum
1448 * is not chosen to mean anything special. XXXmiken
1450 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1451 ASSERT(num_scan_bblks <= INT_MAX);
1453 if (last_blk < num_scan_bblks)
1454 num_scan_bblks = last_blk;
1455 start_blk = last_blk - num_scan_bblks;
1458 * We search for any instances of cycle number 0 that occur before
1459 * our current estimate of the head. What we're trying to detect is
1460 * 1 ... | 0 | 1 | 0...
1461 * ^ binary search ends here
1463 if ((error = xlog_find_verify_cycle(log, start_blk,
1464 (int)num_scan_bblks, 0, &new_blk)))
1465 goto out_free_buffer;
1470 * Potentially backup over partial log record write. We don't need
1471 * to search the end of the log because we know it is zero.
1473 error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1477 goto out_free_buffer;
1488 * These are simple subroutines used by xlog_clear_stale_blocks() below
1489 * to initialize a buffer full of empty log record headers and write
1490 * them into the log.
1501 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
1503 memset(buf, 0, BBSIZE);
1504 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1505 recp->h_cycle = cpu_to_be32(cycle);
1506 recp->h_version = cpu_to_be32(
1507 xfs_has_logv2(log->l_mp) ? 2 : 1);
1508 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1509 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1510 recp->h_fmt = cpu_to_be32(XLOG_FMT);
1511 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1515 xlog_write_log_records(
1526 int sectbb = log->l_sectBBsize;
1527 int end_block = start_block + blocks;
1533 * Greedily allocate a buffer big enough to handle the full
1534 * range of basic blocks to be written. If that fails, try
1535 * a smaller size. We need to be able to write at least a
1536 * log sector, or we're out of luck.
1538 bufblks = 1 << ffs(blocks);
1539 while (bufblks > log->l_logBBsize)
1541 while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
1543 if (bufblks < sectbb)
1547 /* We may need to do a read at the start to fill in part of
1548 * the buffer in the starting sector not covered by the first
1551 balign = round_down(start_block, sectbb);
1552 if (balign != start_block) {
1553 error = xlog_bread_noalign(log, start_block, 1, buffer);
1555 goto out_free_buffer;
1557 j = start_block - balign;
1560 for (i = start_block; i < end_block; i += bufblks) {
1561 int bcount, endcount;
1563 bcount = min(bufblks, end_block - start_block);
1564 endcount = bcount - j;
1566 /* We may need to do a read at the end to fill in part of
1567 * the buffer in the final sector not covered by the write.
1568 * If this is the same sector as the above read, skip it.
1570 ealign = round_down(end_block, sectbb);
1571 if (j == 0 && (start_block + endcount > ealign)) {
1572 error = xlog_bread_noalign(log, ealign, sectbb,
1573 buffer + BBTOB(ealign - start_block));
1579 offset = buffer + xlog_align(log, start_block);
1580 for (; j < endcount; j++) {
1581 xlog_add_record(log, offset, cycle, i+j,
1582 tail_cycle, tail_block);
1585 error = xlog_bwrite(log, start_block, endcount, buffer);
1588 start_block += endcount;
1598 * This routine is called to blow away any incomplete log writes out
1599 * in front of the log head. We do this so that we won't become confused
1600 * if we come up, write only a little bit more, and then crash again.
1601 * If we leave the partial log records out there, this situation could
1602 * cause us to think those partial writes are valid blocks since they
1603 * have the current cycle number. We get rid of them by overwriting them
1604 * with empty log records with the old cycle number rather than the
1607 * The tail lsn is passed in rather than taken from
1608 * the log so that we will not write over the unmount record after a
1609 * clean unmount in a 512 block log. Doing so would leave the log without
1610 * any valid log records in it until a new one was written. If we crashed
1611 * during that time we would not be able to recover.
1614 xlog_clear_stale_blocks(
1618 int tail_cycle, head_cycle;
1619 int tail_block, head_block;
1620 int tail_distance, max_distance;
1624 tail_cycle = CYCLE_LSN(tail_lsn);
1625 tail_block = BLOCK_LSN(tail_lsn);
1626 head_cycle = log->l_curr_cycle;
1627 head_block = log->l_curr_block;
1630 * Figure out the distance between the new head of the log
1631 * and the tail. We want to write over any blocks beyond the
1632 * head that we may have written just before the crash, but
1633 * we don't want to overwrite the tail of the log.
1635 if (head_cycle == tail_cycle) {
1637 * The tail is behind the head in the physical log,
1638 * so the distance from the head to the tail is the
1639 * distance from the head to the end of the log plus
1640 * the distance from the beginning of the log to the
1643 if (XFS_IS_CORRUPT(log->l_mp,
1644 head_block < tail_block ||
1645 head_block >= log->l_logBBsize))
1646 return -EFSCORRUPTED;
1647 tail_distance = tail_block + (log->l_logBBsize - head_block);
1650 * The head is behind the tail in the physical log,
1651 * so the distance from the head to the tail is just
1652 * the tail block minus the head block.
1654 if (XFS_IS_CORRUPT(log->l_mp,
1655 head_block >= tail_block ||
1656 head_cycle != tail_cycle + 1))
1657 return -EFSCORRUPTED;
1658 tail_distance = tail_block - head_block;
1662 * If the head is right up against the tail, we can't clear
1665 if (tail_distance <= 0) {
1666 ASSERT(tail_distance == 0);
1670 max_distance = XLOG_TOTAL_REC_SHIFT(log);
1672 * Take the smaller of the maximum amount of outstanding I/O
1673 * we could have and the distance to the tail to clear out.
1674 * We take the smaller so that we don't overwrite the tail and
1675 * we don't waste all day writing from the head to the tail
1678 max_distance = min(max_distance, tail_distance);
1680 if ((head_block + max_distance) <= log->l_logBBsize) {
1682 * We can stomp all the blocks we need to without
1683 * wrapping around the end of the log. Just do it
1684 * in a single write. Use the cycle number of the
1685 * current cycle minus one so that the log will look like:
1688 error = xlog_write_log_records(log, (head_cycle - 1),
1689 head_block, max_distance, tail_cycle,
1695 * We need to wrap around the end of the physical log in
1696 * order to clear all the blocks. Do it in two separate
1697 * I/Os. The first write should be from the head to the
1698 * end of the physical log, and it should use the current
1699 * cycle number minus one just like above.
1701 distance = log->l_logBBsize - head_block;
1702 error = xlog_write_log_records(log, (head_cycle - 1),
1703 head_block, distance, tail_cycle,
1710 * Now write the blocks at the start of the physical log.
1711 * This writes the remainder of the blocks we want to clear.
1712 * It uses the current cycle number since we're now on the
1713 * same cycle as the head so that we get:
1714 * n ... n ... | n - 1 ...
1715 * ^^^^^ blocks we're writing
1717 distance = max_distance - (log->l_logBBsize - head_block);
1718 error = xlog_write_log_records(log, head_cycle, 0, distance,
1719 tail_cycle, tail_block);
1728 * Release the recovered intent item in the AIL that matches the given intent
1729 * type and intent id.
1732 xlog_recover_release_intent(
1734 unsigned short intent_type,
1737 struct xfs_ail_cursor cur;
1738 struct xfs_log_item *lip;
1739 struct xfs_ail *ailp = log->l_ailp;
1741 spin_lock(&ailp->ail_lock);
1742 for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); lip != NULL;
1743 lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
1744 if (lip->li_type != intent_type)
1746 if (!lip->li_ops->iop_match(lip, intent_id))
1749 spin_unlock(&ailp->ail_lock);
1750 lip->li_ops->iop_release(lip);
1751 spin_lock(&ailp->ail_lock);
1755 xfs_trans_ail_cursor_done(&cur);
1756 spin_unlock(&ailp->ail_lock);
1761 struct xfs_mount *mp,
1763 struct xfs_inode **ipp)
1767 error = xfs_iget(mp, NULL, ino, 0, 0, ipp);
1771 error = xfs_qm_dqattach(*ipp);
1777 if (VFS_I(*ipp)->i_nlink == 0)
1778 xfs_iflags_set(*ipp, XFS_IRECOVERY);
1783 /******************************************************************************
1785 * Log recover routines
1787 ******************************************************************************
1789 static const struct xlog_recover_item_ops *xlog_recover_item_ops[] = {
1791 &xlog_inode_item_ops,
1792 &xlog_dquot_item_ops,
1793 &xlog_quotaoff_item_ops,
1794 &xlog_icreate_item_ops,
1805 static const struct xlog_recover_item_ops *
1807 struct xlog_recover_item *item)
1811 for (i = 0; i < ARRAY_SIZE(xlog_recover_item_ops); i++)
1812 if (ITEM_TYPE(item) == xlog_recover_item_ops[i]->item_type)
1813 return xlog_recover_item_ops[i];
1819 * Sort the log items in the transaction.
1821 * The ordering constraints are defined by the inode allocation and unlink
1822 * behaviour. The rules are:
1824 * 1. Every item is only logged once in a given transaction. Hence it
1825 * represents the last logged state of the item. Hence ordering is
1826 * dependent on the order in which operations need to be performed so
1827 * required initial conditions are always met.
1829 * 2. Cancelled buffers are recorded in pass 1 in a separate table and
1830 * there's nothing to replay from them so we can simply cull them
1831 * from the transaction. However, we can't do that until after we've
1832 * replayed all the other items because they may be dependent on the
1833 * cancelled buffer and replaying the cancelled buffer can remove it
1834 * form the cancelled buffer table. Hence they have tobe done last.
1836 * 3. Inode allocation buffers must be replayed before inode items that
1837 * read the buffer and replay changes into it. For filesystems using the
1838 * ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1839 * treated the same as inode allocation buffers as they create and
1840 * initialise the buffers directly.
1842 * 4. Inode unlink buffers must be replayed after inode items are replayed.
1843 * This ensures that inodes are completely flushed to the inode buffer
1844 * in a "free" state before we remove the unlinked inode list pointer.
1846 * Hence the ordering needs to be inode allocation buffers first, inode items
1847 * second, inode unlink buffers third and cancelled buffers last.
1849 * But there's a problem with that - we can't tell an inode allocation buffer
1850 * apart from a regular buffer, so we can't separate them. We can, however,
1851 * tell an inode unlink buffer from the others, and so we can separate them out
1852 * from all the other buffers and move them to last.
1854 * Hence, 4 lists, in order from head to tail:
1855 * - buffer_list for all buffers except cancelled/inode unlink buffers
1856 * - item_list for all non-buffer items
1857 * - inode_buffer_list for inode unlink buffers
1858 * - cancel_list for the cancelled buffers
1860 * Note that we add objects to the tail of the lists so that first-to-last
1861 * ordering is preserved within the lists. Adding objects to the head of the
1862 * list means when we traverse from the head we walk them in last-to-first
1863 * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1864 * but for all other items there may be specific ordering that we need to
1868 xlog_recover_reorder_trans(
1870 struct xlog_recover *trans,
1873 struct xlog_recover_item *item, *n;
1875 LIST_HEAD(sort_list);
1876 LIST_HEAD(cancel_list);
1877 LIST_HEAD(buffer_list);
1878 LIST_HEAD(inode_buffer_list);
1879 LIST_HEAD(item_list);
1881 list_splice_init(&trans->r_itemq, &sort_list);
1882 list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1883 enum xlog_recover_reorder fate = XLOG_REORDER_ITEM_LIST;
1885 item->ri_ops = xlog_find_item_ops(item);
1886 if (!item->ri_ops) {
1888 "%s: unrecognized type of log operation (%d)",
1889 __func__, ITEM_TYPE(item));
1892 * return the remaining items back to the transaction
1893 * item list so they can be freed in caller.
1895 if (!list_empty(&sort_list))
1896 list_splice_init(&sort_list, &trans->r_itemq);
1897 error = -EFSCORRUPTED;
1901 if (item->ri_ops->reorder)
1902 fate = item->ri_ops->reorder(item);
1905 case XLOG_REORDER_BUFFER_LIST:
1906 list_move_tail(&item->ri_list, &buffer_list);
1908 case XLOG_REORDER_CANCEL_LIST:
1909 trace_xfs_log_recover_item_reorder_head(log,
1911 list_move(&item->ri_list, &cancel_list);
1913 case XLOG_REORDER_INODE_BUFFER_LIST:
1914 list_move(&item->ri_list, &inode_buffer_list);
1916 case XLOG_REORDER_ITEM_LIST:
1917 trace_xfs_log_recover_item_reorder_tail(log,
1919 list_move_tail(&item->ri_list, &item_list);
1924 ASSERT(list_empty(&sort_list));
1925 if (!list_empty(&buffer_list))
1926 list_splice(&buffer_list, &trans->r_itemq);
1927 if (!list_empty(&item_list))
1928 list_splice_tail(&item_list, &trans->r_itemq);
1929 if (!list_empty(&inode_buffer_list))
1930 list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1931 if (!list_empty(&cancel_list))
1932 list_splice_tail(&cancel_list, &trans->r_itemq);
1941 const struct xfs_buf_ops *ops)
1943 if (!xlog_is_buffer_cancelled(log, blkno, len))
1944 xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops);
1948 xlog_recover_items_pass2(
1950 struct xlog_recover *trans,
1951 struct list_head *buffer_list,
1952 struct list_head *item_list)
1954 struct xlog_recover_item *item;
1957 list_for_each_entry(item, item_list, ri_list) {
1958 trace_xfs_log_recover_item_recover(log, trans, item,
1959 XLOG_RECOVER_PASS2);
1961 if (item->ri_ops->commit_pass2)
1962 error = item->ri_ops->commit_pass2(log, buffer_list,
1963 item, trans->r_lsn);
1972 * Perform the transaction.
1974 * If the transaction modifies a buffer or inode, do it now. Otherwise,
1975 * EFIs and EFDs get queued up by adding entries into the AIL for them.
1978 xlog_recover_commit_trans(
1980 struct xlog_recover *trans,
1982 struct list_head *buffer_list)
1985 int items_queued = 0;
1986 struct xlog_recover_item *item;
1987 struct xlog_recover_item *next;
1988 LIST_HEAD (ra_list);
1989 LIST_HEAD (done_list);
1991 #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
1993 hlist_del_init(&trans->r_list);
1995 error = xlog_recover_reorder_trans(log, trans, pass);
1999 list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
2000 trace_xfs_log_recover_item_recover(log, trans, item, pass);
2003 case XLOG_RECOVER_PASS1:
2004 if (item->ri_ops->commit_pass1)
2005 error = item->ri_ops->commit_pass1(log, item);
2007 case XLOG_RECOVER_PASS2:
2008 if (item->ri_ops->ra_pass2)
2009 item->ri_ops->ra_pass2(log, item);
2010 list_move_tail(&item->ri_list, &ra_list);
2012 if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
2013 error = xlog_recover_items_pass2(log, trans,
2014 buffer_list, &ra_list);
2015 list_splice_tail_init(&ra_list, &done_list);
2029 if (!list_empty(&ra_list)) {
2031 error = xlog_recover_items_pass2(log, trans,
2032 buffer_list, &ra_list);
2033 list_splice_tail_init(&ra_list, &done_list);
2036 if (!list_empty(&done_list))
2037 list_splice_init(&done_list, &trans->r_itemq);
2043 xlog_recover_add_item(
2044 struct list_head *head)
2046 struct xlog_recover_item *item;
2048 item = kmem_zalloc(sizeof(struct xlog_recover_item), 0);
2049 INIT_LIST_HEAD(&item->ri_list);
2050 list_add_tail(&item->ri_list, head);
2054 xlog_recover_add_to_cont_trans(
2056 struct xlog_recover *trans,
2060 struct xlog_recover_item *item;
2061 char *ptr, *old_ptr;
2065 * If the transaction is empty, the header was split across this and the
2066 * previous record. Copy the rest of the header.
2068 if (list_empty(&trans->r_itemq)) {
2069 ASSERT(len <= sizeof(struct xfs_trans_header));
2070 if (len > sizeof(struct xfs_trans_header)) {
2071 xfs_warn(log->l_mp, "%s: bad header length", __func__);
2072 return -EFSCORRUPTED;
2075 xlog_recover_add_item(&trans->r_itemq);
2076 ptr = (char *)&trans->r_theader +
2077 sizeof(struct xfs_trans_header) - len;
2078 memcpy(ptr, dp, len);
2082 /* take the tail entry */
2083 item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
2086 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
2087 old_len = item->ri_buf[item->ri_cnt-1].i_len;
2089 ptr = kvrealloc(old_ptr, old_len, len + old_len, GFP_KERNEL);
2092 memcpy(&ptr[old_len], dp, len);
2093 item->ri_buf[item->ri_cnt-1].i_len += len;
2094 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
2095 trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
2100 * The next region to add is the start of a new region. It could be
2101 * a whole region or it could be the first part of a new region. Because
2102 * of this, the assumption here is that the type and size fields of all
2103 * format structures fit into the first 32 bits of the structure.
2105 * This works because all regions must be 32 bit aligned. Therefore, we
2106 * either have both fields or we have neither field. In the case we have
2107 * neither field, the data part of the region is zero length. We only have
2108 * a log_op_header and can throw away the header since a new one will appear
2109 * later. If we have at least 4 bytes, then we can determine how many regions
2110 * will appear in the current log item.
2113 xlog_recover_add_to_trans(
2115 struct xlog_recover *trans,
2119 struct xfs_inode_log_format *in_f; /* any will do */
2120 struct xlog_recover_item *item;
2125 if (list_empty(&trans->r_itemq)) {
2126 /* we need to catch log corruptions here */
2127 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
2128 xfs_warn(log->l_mp, "%s: bad header magic number",
2131 return -EFSCORRUPTED;
2134 if (len > sizeof(struct xfs_trans_header)) {
2135 xfs_warn(log->l_mp, "%s: bad header length", __func__);
2137 return -EFSCORRUPTED;
2141 * The transaction header can be arbitrarily split across op
2142 * records. If we don't have the whole thing here, copy what we
2143 * do have and handle the rest in the next record.
2145 if (len == sizeof(struct xfs_trans_header))
2146 xlog_recover_add_item(&trans->r_itemq);
2147 memcpy(&trans->r_theader, dp, len);
2151 ptr = kmem_alloc(len, 0);
2152 memcpy(ptr, dp, len);
2153 in_f = (struct xfs_inode_log_format *)ptr;
2155 /* take the tail entry */
2156 item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
2158 if (item->ri_total != 0 &&
2159 item->ri_total == item->ri_cnt) {
2160 /* tail item is in use, get a new one */
2161 xlog_recover_add_item(&trans->r_itemq);
2162 item = list_entry(trans->r_itemq.prev,
2163 struct xlog_recover_item, ri_list);
2166 if (item->ri_total == 0) { /* first region to be added */
2167 if (in_f->ilf_size == 0 ||
2168 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
2170 "bad number of regions (%d) in inode log format",
2174 return -EFSCORRUPTED;
2177 item->ri_total = in_f->ilf_size;
2179 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
2183 if (item->ri_total <= item->ri_cnt) {
2185 "log item region count (%d) overflowed size (%d)",
2186 item->ri_cnt, item->ri_total);
2189 return -EFSCORRUPTED;
2192 /* Description region is ri_buf[0] */
2193 item->ri_buf[item->ri_cnt].i_addr = ptr;
2194 item->ri_buf[item->ri_cnt].i_len = len;
2196 trace_xfs_log_recover_item_add(log, trans, item, 0);
2201 * Free up any resources allocated by the transaction
2203 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2206 xlog_recover_free_trans(
2207 struct xlog_recover *trans)
2209 struct xlog_recover_item *item, *n;
2212 hlist_del_init(&trans->r_list);
2214 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
2215 /* Free the regions in the item. */
2216 list_del(&item->ri_list);
2217 for (i = 0; i < item->ri_cnt; i++)
2218 kmem_free(item->ri_buf[i].i_addr);
2219 /* Free the item itself */
2220 kmem_free(item->ri_buf);
2223 /* Free the transaction recover structure */
2228 * On error or completion, trans is freed.
2231 xlog_recovery_process_trans(
2233 struct xlog_recover *trans,
2238 struct list_head *buffer_list)
2241 bool freeit = false;
2243 /* mask off ophdr transaction container flags */
2244 flags &= ~XLOG_END_TRANS;
2245 if (flags & XLOG_WAS_CONT_TRANS)
2246 flags &= ~XLOG_CONTINUE_TRANS;
2249 * Callees must not free the trans structure. We'll decide if we need to
2250 * free it or not based on the operation being done and it's result.
2253 /* expected flag values */
2255 case XLOG_CONTINUE_TRANS:
2256 error = xlog_recover_add_to_trans(log, trans, dp, len);
2258 case XLOG_WAS_CONT_TRANS:
2259 error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
2261 case XLOG_COMMIT_TRANS:
2262 error = xlog_recover_commit_trans(log, trans, pass,
2264 /* success or fail, we are now done with this transaction. */
2268 /* unexpected flag values */
2269 case XLOG_UNMOUNT_TRANS:
2270 /* just skip trans */
2271 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
2274 case XLOG_START_TRANS:
2276 xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
2278 error = -EFSCORRUPTED;
2281 if (error || freeit)
2282 xlog_recover_free_trans(trans);
2287 * Lookup the transaction recovery structure associated with the ID in the
2288 * current ophdr. If the transaction doesn't exist and the start flag is set in
2289 * the ophdr, then allocate a new transaction for future ID matches to find.
2290 * Either way, return what we found during the lookup - an existing transaction
2293 STATIC struct xlog_recover *
2294 xlog_recover_ophdr_to_trans(
2295 struct hlist_head rhash[],
2296 struct xlog_rec_header *rhead,
2297 struct xlog_op_header *ohead)
2299 struct xlog_recover *trans;
2301 struct hlist_head *rhp;
2303 tid = be32_to_cpu(ohead->oh_tid);
2304 rhp = &rhash[XLOG_RHASH(tid)];
2305 hlist_for_each_entry(trans, rhp, r_list) {
2306 if (trans->r_log_tid == tid)
2311 * skip over non-start transaction headers - we could be
2312 * processing slack space before the next transaction starts
2314 if (!(ohead->oh_flags & XLOG_START_TRANS))
2317 ASSERT(be32_to_cpu(ohead->oh_len) == 0);
2320 * This is a new transaction so allocate a new recovery container to
2321 * hold the recovery ops that will follow.
2323 trans = kmem_zalloc(sizeof(struct xlog_recover), 0);
2324 trans->r_log_tid = tid;
2325 trans->r_lsn = be64_to_cpu(rhead->h_lsn);
2326 INIT_LIST_HEAD(&trans->r_itemq);
2327 INIT_HLIST_NODE(&trans->r_list);
2328 hlist_add_head(&trans->r_list, rhp);
2331 * Nothing more to do for this ophdr. Items to be added to this new
2332 * transaction will be in subsequent ophdr containers.
2338 xlog_recover_process_ophdr(
2340 struct hlist_head rhash[],
2341 struct xlog_rec_header *rhead,
2342 struct xlog_op_header *ohead,
2346 struct list_head *buffer_list)
2348 struct xlog_recover *trans;
2352 /* Do we understand who wrote this op? */
2353 if (ohead->oh_clientid != XFS_TRANSACTION &&
2354 ohead->oh_clientid != XFS_LOG) {
2355 xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
2356 __func__, ohead->oh_clientid);
2358 return -EFSCORRUPTED;
2362 * Check the ophdr contains all the data it is supposed to contain.
2364 len = be32_to_cpu(ohead->oh_len);
2365 if (dp + len > end) {
2366 xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
2368 return -EFSCORRUPTED;
2371 trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
2373 /* nothing to do, so skip over this ophdr */
2378 * The recovered buffer queue is drained only once we know that all
2379 * recovery items for the current LSN have been processed. This is
2382 * - Buffer write submission updates the metadata LSN of the buffer.
2383 * - Log recovery skips items with a metadata LSN >= the current LSN of
2384 * the recovery item.
2385 * - Separate recovery items against the same metadata buffer can share
2386 * a current LSN. I.e., consider that the LSN of a recovery item is
2387 * defined as the starting LSN of the first record in which its
2388 * transaction appears, that a record can hold multiple transactions,
2389 * and/or that a transaction can span multiple records.
2391 * In other words, we are allowed to submit a buffer from log recovery
2392 * once per current LSN. Otherwise, we may incorrectly skip recovery
2393 * items and cause corruption.
2395 * We don't know up front whether buffers are updated multiple times per
2396 * LSN. Therefore, track the current LSN of each commit log record as it
2397 * is processed and drain the queue when it changes. Use commit records
2398 * because they are ordered correctly by the logging code.
2400 if (log->l_recovery_lsn != trans->r_lsn &&
2401 ohead->oh_flags & XLOG_COMMIT_TRANS) {
2402 error = xfs_buf_delwri_submit(buffer_list);
2405 log->l_recovery_lsn = trans->r_lsn;
2408 return xlog_recovery_process_trans(log, trans, dp, len,
2409 ohead->oh_flags, pass, buffer_list);
2413 * There are two valid states of the r_state field. 0 indicates that the
2414 * transaction structure is in a normal state. We have either seen the
2415 * start of the transaction or the last operation we added was not a partial
2416 * operation. If the last operation we added to the transaction was a
2417 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2419 * NOTE: skip LRs with 0 data length.
2422 xlog_recover_process_data(
2424 struct hlist_head rhash[],
2425 struct xlog_rec_header *rhead,
2428 struct list_head *buffer_list)
2430 struct xlog_op_header *ohead;
2435 end = dp + be32_to_cpu(rhead->h_len);
2436 num_logops = be32_to_cpu(rhead->h_num_logops);
2438 /* check the log format matches our own - else we can't recover */
2439 if (xlog_header_check_recover(log->l_mp, rhead))
2442 trace_xfs_log_recover_record(log, rhead, pass);
2443 while ((dp < end) && num_logops) {
2445 ohead = (struct xlog_op_header *)dp;
2446 dp += sizeof(*ohead);
2449 /* errors will abort recovery */
2450 error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
2451 dp, end, pass, buffer_list);
2455 dp += be32_to_cpu(ohead->oh_len);
2461 /* Take all the collected deferred ops and finish them in order. */
2463 xlog_finish_defer_ops(
2464 struct xfs_mount *mp,
2465 struct list_head *capture_list)
2467 struct xfs_defer_capture *dfc, *next;
2468 struct xfs_trans *tp;
2471 list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
2472 struct xfs_trans_res resv;
2473 struct xfs_defer_resources dres;
2476 * Create a new transaction reservation from the captured
2477 * information. Set logcount to 1 to force the new transaction
2478 * to regrant every roll so that we can make forward progress
2479 * in recovery no matter how full the log might be.
2481 resv.tr_logres = dfc->dfc_logres;
2482 resv.tr_logcount = 1;
2483 resv.tr_logflags = XFS_TRANS_PERM_LOG_RES;
2485 error = xfs_trans_alloc(mp, &resv, dfc->dfc_blkres,
2486 dfc->dfc_rtxres, XFS_TRANS_RESERVE, &tp);
2488 xlog_force_shutdown(mp->m_log, SHUTDOWN_LOG_IO_ERROR);
2493 * Transfer to this new transaction all the dfops we captured
2494 * from recovering a single intent item.
2496 list_del_init(&dfc->dfc_list);
2497 xfs_defer_ops_continue(dfc, tp, &dres);
2498 error = xfs_trans_commit(tp);
2499 xfs_defer_resources_rele(&dres);
2504 ASSERT(list_empty(capture_list));
2508 /* Release all the captured defer ops and capture structures in this list. */
2510 xlog_abort_defer_ops(
2511 struct xfs_mount *mp,
2512 struct list_head *capture_list)
2514 struct xfs_defer_capture *dfc;
2515 struct xfs_defer_capture *next;
2517 list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
2518 list_del_init(&dfc->dfc_list);
2519 xfs_defer_ops_capture_free(mp, dfc);
2524 * When this is called, all of the log intent items which did not have
2525 * corresponding log done items should be in the AIL. What we do now is update
2526 * the data structures associated with each one.
2528 * Since we process the log intent items in normal transactions, they will be
2529 * removed at some point after the commit. This prevents us from just walking
2530 * down the list processing each one. We'll use a flag in the intent item to
2531 * skip those that we've already processed and use the AIL iteration mechanism's
2532 * generation count to try to speed this up at least a bit.
2534 * When we start, we know that the intents are the only things in the AIL. As we
2535 * process them, however, other items are added to the AIL. Hence we know we
2536 * have started recovery on all the pending intents when we find an non-intent
2540 xlog_recover_process_intents(
2543 LIST_HEAD(capture_list);
2544 struct xfs_ail_cursor cur;
2545 struct xfs_log_item *lip;
2546 struct xfs_ail *ailp;
2548 #if defined(DEBUG) || defined(XFS_WARN)
2553 spin_lock(&ailp->ail_lock);
2554 #if defined(DEBUG) || defined(XFS_WARN)
2555 last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
2557 for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2559 lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
2560 if (!xlog_item_is_intent(lip))
2564 * We should never see a redo item with a LSN higher than
2565 * the last transaction we found in the log at the start
2568 ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
2571 * NOTE: If your intent processing routine can create more
2572 * deferred ops, you /must/ attach them to the capture list in
2573 * the recover routine or else those subsequent intents will be
2574 * replayed in the wrong order!
2576 spin_unlock(&ailp->ail_lock);
2577 error = lip->li_ops->iop_recover(lip, &capture_list);
2578 spin_lock(&ailp->ail_lock);
2580 trace_xlog_intent_recovery_failed(log->l_mp, error,
2581 lip->li_ops->iop_recover);
2586 xfs_trans_ail_cursor_done(&cur);
2587 spin_unlock(&ailp->ail_lock);
2591 error = xlog_finish_defer_ops(log->l_mp, &capture_list);
2597 xlog_abort_defer_ops(log->l_mp, &capture_list);
2602 * A cancel occurs when the mount has failed and we're bailing out. Release all
2603 * pending log intent items that we haven't started recovery on so they don't
2607 xlog_recover_cancel_intents(
2610 struct xfs_log_item *lip;
2611 struct xfs_ail_cursor cur;
2612 struct xfs_ail *ailp;
2615 spin_lock(&ailp->ail_lock);
2616 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2617 while (lip != NULL) {
2618 if (!xlog_item_is_intent(lip))
2621 spin_unlock(&ailp->ail_lock);
2622 lip->li_ops->iop_release(lip);
2623 spin_lock(&ailp->ail_lock);
2624 lip = xfs_trans_ail_cursor_next(ailp, &cur);
2627 xfs_trans_ail_cursor_done(&cur);
2628 spin_unlock(&ailp->ail_lock);
2632 * This routine performs a transaction to null out a bad inode pointer
2633 * in an agi unlinked inode hash bucket.
2636 xlog_recover_clear_agi_bucket(
2638 xfs_agnumber_t agno,
2643 struct xfs_buf *agibp;
2647 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
2651 error = xfs_read_agi(mp, tp, agno, &agibp);
2655 agi = agibp->b_addr;
2656 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
2657 offset = offsetof(xfs_agi_t, agi_unlinked) +
2658 (sizeof(xfs_agino_t) * bucket);
2659 xfs_trans_log_buf(tp, agibp, offset,
2660 (offset + sizeof(xfs_agino_t) - 1));
2662 error = xfs_trans_commit(tp);
2668 xfs_trans_cancel(tp);
2670 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
2675 xlog_recover_process_one_iunlink(
2676 struct xfs_mount *mp,
2677 xfs_agnumber_t agno,
2681 struct xfs_buf *ibp;
2682 struct xfs_dinode *dip;
2683 struct xfs_inode *ip;
2687 ino = XFS_AGINO_TO_INO(mp, agno, agino);
2688 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
2693 * Get the on disk inode to find the next inode in the bucket.
2695 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &ibp);
2698 dip = xfs_buf_offset(ibp, ip->i_imap.im_boffset);
2700 xfs_iflags_clear(ip, XFS_IRECOVERY);
2701 ASSERT(VFS_I(ip)->i_nlink == 0);
2702 ASSERT(VFS_I(ip)->i_mode != 0);
2704 /* setup for the next pass */
2705 agino = be32_to_cpu(dip->di_next_unlinked);
2715 * We can't read in the inode this bucket points to, or this inode
2716 * is messed up. Just ditch this bucket of inodes. We will lose
2717 * some inodes and space, but at least we won't hang.
2719 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
2720 * clear the inode pointer in the bucket.
2722 xlog_recover_clear_agi_bucket(mp, agno, bucket);
2727 * Recover AGI unlinked lists
2729 * This is called during recovery to process any inodes which we unlinked but
2730 * not freed when the system crashed. These inodes will be on the lists in the
2731 * AGI blocks. What we do here is scan all the AGIs and fully truncate and free
2732 * any inodes found on the lists. Each inode is removed from the lists when it
2733 * has been fully truncated and is freed. The freeing of the inode and its
2734 * removal from the list must be atomic.
2736 * If everything we touch in the agi processing loop is already in memory, this
2737 * loop can hold the cpu for a long time. It runs without lock contention,
2738 * memory allocation contention, the need wait for IO, etc, and so will run
2739 * until we either run out of inodes to process, run low on memory or we run out
2742 * This behaviour is bad for latency on single CPU and non-preemptible kernels,
2743 * and can prevent other filesystem work (such as CIL pushes) from running. This
2744 * can lead to deadlocks if the recovery process runs out of log reservation
2745 * space. Hence we need to yield the CPU when there is other kernel work
2746 * scheduled on this CPU to ensure other scheduled work can run without undue
2750 xlog_recover_process_iunlinks(
2753 struct xfs_mount *mp = log->l_mp;
2754 struct xfs_perag *pag;
2755 xfs_agnumber_t agno;
2756 struct xfs_agi *agi;
2757 struct xfs_buf *agibp;
2762 for_each_perag(mp, agno, pag) {
2763 error = xfs_read_agi(mp, NULL, pag->pag_agno, &agibp);
2766 * AGI is b0rked. Don't process it.
2768 * We should probably mark the filesystem as corrupt
2769 * after we've recovered all the ag's we can....
2774 * Unlock the buffer so that it can be acquired in the normal
2775 * course of the transaction to truncate and free each inode.
2776 * Because we are not racing with anyone else here for the AGI
2777 * buffer, we don't even need to hold it locked to read the
2778 * initial unlinked bucket entries out of the buffer. We keep
2779 * buffer reference though, so that it stays pinned in memory
2780 * while we need the buffer.
2782 agi = agibp->b_addr;
2783 xfs_buf_unlock(agibp);
2785 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
2786 agino = be32_to_cpu(agi->agi_unlinked[bucket]);
2787 while (agino != NULLAGINO) {
2788 agino = xlog_recover_process_one_iunlink(mp,
2789 pag->pag_agno, agino, bucket);
2793 xfs_buf_rele(agibp);
2797 * Flush the pending unlinked inodes to ensure that the inactivations
2798 * are fully completed on disk and the incore inodes can be reclaimed
2799 * before we signal that recovery is complete.
2801 xfs_inodegc_flush(mp);
2806 struct xlog_rec_header *rhead,
2812 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
2813 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
2814 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
2818 if (xfs_has_logv2(log->l_mp)) {
2819 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
2820 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
2821 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
2822 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
2823 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
2830 * CRC check, unpack and process a log record.
2833 xlog_recover_process(
2835 struct hlist_head rhash[],
2836 struct xlog_rec_header *rhead,
2839 struct list_head *buffer_list)
2841 __le32 old_crc = rhead->h_crc;
2844 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
2847 * Nothing else to do if this is a CRC verification pass. Just return
2848 * if this a record with a non-zero crc. Unfortunately, mkfs always
2849 * sets old_crc to 0 so we must consider this valid even on v5 supers.
2850 * Otherwise, return EFSBADCRC on failure so the callers up the stack
2851 * know precisely what failed.
2853 if (pass == XLOG_RECOVER_CRCPASS) {
2854 if (old_crc && crc != old_crc)
2860 * We're in the normal recovery path. Issue a warning if and only if the
2861 * CRC in the header is non-zero. This is an advisory warning and the
2862 * zero CRC check prevents warnings from being emitted when upgrading
2863 * the kernel from one that does not add CRCs by default.
2865 if (crc != old_crc) {
2866 if (old_crc || xfs_has_crc(log->l_mp)) {
2867 xfs_alert(log->l_mp,
2868 "log record CRC mismatch: found 0x%x, expected 0x%x.",
2869 le32_to_cpu(old_crc),
2871 xfs_hex_dump(dp, 32);
2875 * If the filesystem is CRC enabled, this mismatch becomes a
2876 * fatal log corruption failure.
2878 if (xfs_has_crc(log->l_mp)) {
2879 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
2880 return -EFSCORRUPTED;
2884 xlog_unpack_data(rhead, dp, log);
2886 return xlog_recover_process_data(log, rhash, rhead, dp, pass,
2891 xlog_valid_rec_header(
2893 struct xlog_rec_header *rhead,
2899 if (XFS_IS_CORRUPT(log->l_mp,
2900 rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)))
2901 return -EFSCORRUPTED;
2902 if (XFS_IS_CORRUPT(log->l_mp,
2903 (!rhead->h_version ||
2904 (be32_to_cpu(rhead->h_version) &
2905 (~XLOG_VERSION_OKBITS))))) {
2906 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
2907 __func__, be32_to_cpu(rhead->h_version));
2908 return -EFSCORRUPTED;
2912 * LR body must have data (or it wouldn't have been written)
2913 * and h_len must not be greater than LR buffer size.
2915 hlen = be32_to_cpu(rhead->h_len);
2916 if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > bufsize))
2917 return -EFSCORRUPTED;
2919 if (XFS_IS_CORRUPT(log->l_mp,
2920 blkno > log->l_logBBsize || blkno > INT_MAX))
2921 return -EFSCORRUPTED;
2926 * Read the log from tail to head and process the log records found.
2927 * Handle the two cases where the tail and head are in the same cycle
2928 * and where the active portion of the log wraps around the end of
2929 * the physical log separately. The pass parameter is passed through
2930 * to the routines called to process the data and is not looked at
2934 xlog_do_recovery_pass(
2936 xfs_daddr_t head_blk,
2937 xfs_daddr_t tail_blk,
2939 xfs_daddr_t *first_bad) /* out: first bad log rec */
2941 xlog_rec_header_t *rhead;
2942 xfs_daddr_t blk_no, rblk_no;
2943 xfs_daddr_t rhead_blk;
2946 int error = 0, h_size, h_len;
2948 int bblks, split_bblks;
2949 int hblks, split_hblks, wrapped_hblks;
2951 struct hlist_head rhash[XLOG_RHASH_SIZE];
2952 LIST_HEAD (buffer_list);
2954 ASSERT(head_blk != tail_blk);
2955 blk_no = rhead_blk = tail_blk;
2957 for (i = 0; i < XLOG_RHASH_SIZE; i++)
2958 INIT_HLIST_HEAD(&rhash[i]);
2961 * Read the header of the tail block and get the iclog buffer size from
2962 * h_size. Use this to tell how many sectors make up the log header.
2964 if (xfs_has_logv2(log->l_mp)) {
2966 * When using variable length iclogs, read first sector of
2967 * iclog header and extract the header size from it. Get a
2968 * new hbp that is the correct size.
2970 hbp = xlog_alloc_buffer(log, 1);
2974 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
2978 rhead = (xlog_rec_header_t *)offset;
2981 * xfsprogs has a bug where record length is based on lsunit but
2982 * h_size (iclog size) is hardcoded to 32k. Now that we
2983 * unconditionally CRC verify the unmount record, this means the
2984 * log buffer can be too small for the record and cause an
2987 * Detect this condition here. Use lsunit for the buffer size as
2988 * long as this looks like the mkfs case. Otherwise, return an
2989 * error to avoid a buffer overrun.
2991 h_size = be32_to_cpu(rhead->h_size);
2992 h_len = be32_to_cpu(rhead->h_len);
2993 if (h_len > h_size && h_len <= log->l_mp->m_logbsize &&
2994 rhead->h_num_logops == cpu_to_be32(1)) {
2996 "invalid iclog size (%d bytes), using lsunit (%d bytes)",
2997 h_size, log->l_mp->m_logbsize);
2998 h_size = log->l_mp->m_logbsize;
3001 error = xlog_valid_rec_header(log, rhead, tail_blk, h_size);
3005 hblks = xlog_logrec_hblks(log, rhead);
3008 hbp = xlog_alloc_buffer(log, hblks);
3011 ASSERT(log->l_sectBBsize == 1);
3013 hbp = xlog_alloc_buffer(log, 1);
3014 h_size = XLOG_BIG_RECORD_BSIZE;
3019 dbp = xlog_alloc_buffer(log, BTOBB(h_size));
3025 memset(rhash, 0, sizeof(rhash));
3026 if (tail_blk > head_blk) {
3028 * Perform recovery around the end of the physical log.
3029 * When the head is not on the same cycle number as the tail,
3030 * we can't do a sequential recovery.
3032 while (blk_no < log->l_logBBsize) {
3034 * Check for header wrapping around physical end-of-log
3039 if (blk_no + hblks <= log->l_logBBsize) {
3040 /* Read header in one read */
3041 error = xlog_bread(log, blk_no, hblks, hbp,
3046 /* This LR is split across physical log end */
3047 if (blk_no != log->l_logBBsize) {
3048 /* some data before physical log end */
3049 ASSERT(blk_no <= INT_MAX);
3050 split_hblks = log->l_logBBsize - (int)blk_no;
3051 ASSERT(split_hblks > 0);
3052 error = xlog_bread(log, blk_no,
3060 * Note: this black magic still works with
3061 * large sector sizes (non-512) only because:
3062 * - we increased the buffer size originally
3063 * by 1 sector giving us enough extra space
3064 * for the second read;
3065 * - the log start is guaranteed to be sector
3067 * - we read the log end (LR header start)
3068 * _first_, then the log start (LR header end)
3069 * - order is important.
3071 wrapped_hblks = hblks - split_hblks;
3072 error = xlog_bread_noalign(log, 0,
3074 offset + BBTOB(split_hblks));
3078 rhead = (xlog_rec_header_t *)offset;
3079 error = xlog_valid_rec_header(log, rhead,
3080 split_hblks ? blk_no : 0, h_size);
3084 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3088 * Read the log record data in multiple reads if it
3089 * wraps around the end of the log. Note that if the
3090 * header already wrapped, blk_no could point past the
3091 * end of the log. The record data is contiguous in
3094 if (blk_no + bblks <= log->l_logBBsize ||
3095 blk_no >= log->l_logBBsize) {
3096 rblk_no = xlog_wrap_logbno(log, blk_no);
3097 error = xlog_bread(log, rblk_no, bblks, dbp,
3102 /* This log record is split across the
3103 * physical end of log */
3106 if (blk_no != log->l_logBBsize) {
3107 /* some data is before the physical
3109 ASSERT(!wrapped_hblks);
3110 ASSERT(blk_no <= INT_MAX);
3112 log->l_logBBsize - (int)blk_no;
3113 ASSERT(split_bblks > 0);
3114 error = xlog_bread(log, blk_no,
3122 * Note: this black magic still works with
3123 * large sector sizes (non-512) only because:
3124 * - we increased the buffer size originally
3125 * by 1 sector giving us enough extra space
3126 * for the second read;
3127 * - the log start is guaranteed to be sector
3129 * - we read the log end (LR header start)
3130 * _first_, then the log start (LR header end)
3131 * - order is important.
3133 error = xlog_bread_noalign(log, 0,
3134 bblks - split_bblks,
3135 offset + BBTOB(split_bblks));
3140 error = xlog_recover_process(log, rhash, rhead, offset,
3141 pass, &buffer_list);
3149 ASSERT(blk_no >= log->l_logBBsize);
3150 blk_no -= log->l_logBBsize;
3154 /* read first part of physical log */
3155 while (blk_no < head_blk) {
3156 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3160 rhead = (xlog_rec_header_t *)offset;
3161 error = xlog_valid_rec_header(log, rhead, blk_no, h_size);
3165 /* blocks in data section */
3166 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3167 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3172 error = xlog_recover_process(log, rhash, rhead, offset, pass,
3177 blk_no += bblks + hblks;
3187 * Submit buffers that have been added from the last record processed,
3188 * regardless of error status.
3190 if (!list_empty(&buffer_list))
3191 error2 = xfs_buf_delwri_submit(&buffer_list);
3193 if (error && first_bad)
3194 *first_bad = rhead_blk;
3197 * Transactions are freed at commit time but transactions without commit
3198 * records on disk are never committed. Free any that may be left in the
3201 for (i = 0; i < XLOG_RHASH_SIZE; i++) {
3202 struct hlist_node *tmp;
3203 struct xlog_recover *trans;
3205 hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
3206 xlog_recover_free_trans(trans);
3209 return error ? error : error2;
3213 * Do the recovery of the log. We actually do this in two phases.
3214 * The two passes are necessary in order to implement the function
3215 * of cancelling a record written into the log. The first pass
3216 * determines those things which have been cancelled, and the
3217 * second pass replays log items normally except for those which
3218 * have been cancelled. The handling of the replay and cancellations
3219 * takes place in the log item type specific routines.
3221 * The table of items which have cancel records in the log is allocated
3222 * and freed at this level, since only here do we know when all of
3223 * the log recovery has been completed.
3226 xlog_do_log_recovery(
3228 xfs_daddr_t head_blk,
3229 xfs_daddr_t tail_blk)
3233 ASSERT(head_blk != tail_blk);
3236 * First do a pass to find all of the cancelled buf log items.
3237 * Store them in the buf_cancel_table for use in the second pass.
3239 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
3240 sizeof(struct list_head),
3242 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3243 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
3245 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3246 XLOG_RECOVER_PASS1, NULL);
3248 kmem_free(log->l_buf_cancel_table);
3249 log->l_buf_cancel_table = NULL;
3253 * Then do a second pass to actually recover the items in the log.
3254 * When it is complete free the table of buf cancel items.
3256 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3257 XLOG_RECOVER_PASS2, NULL);
3262 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3263 ASSERT(list_empty(&log->l_buf_cancel_table[i]));
3267 kmem_free(log->l_buf_cancel_table);
3268 log->l_buf_cancel_table = NULL;
3274 * Do the actual recovery
3279 xfs_daddr_t head_blk,
3280 xfs_daddr_t tail_blk)
3282 struct xfs_mount *mp = log->l_mp;
3283 struct xfs_buf *bp = mp->m_sb_bp;
3284 struct xfs_sb *sbp = &mp->m_sb;
3287 trace_xfs_log_recover(log, head_blk, tail_blk);
3290 * First replay the images in the log.
3292 error = xlog_do_log_recovery(log, head_blk, tail_blk);
3296 if (xlog_is_shutdown(log))
3300 * We now update the tail_lsn since much of the recovery has completed
3301 * and there may be space available to use. If there were no extent
3302 * or iunlinks, we can free up the entire log and set the tail_lsn to
3303 * be the last_sync_lsn. This was set in xlog_find_tail to be the
3304 * lsn of the last known good LR on disk. If there are extent frees
3305 * or iunlinks they will have some entries in the AIL; so we look at
3306 * the AIL to determine how to set the tail_lsn.
3308 xlog_assign_tail_lsn(mp);
3311 * Now that we've finished replaying all buffer and inode updates,
3312 * re-read the superblock and reverify it.
3316 error = _xfs_buf_read(bp, XBF_READ);
3318 if (!xlog_is_shutdown(log)) {
3319 xfs_buf_ioerror_alert(bp, __this_address);
3326 /* Convert superblock from on-disk format */
3327 xfs_sb_from_disk(sbp, bp->b_addr);
3330 /* re-initialise in-core superblock and geometry structures */
3331 mp->m_features |= xfs_sb_version_to_features(sbp);
3332 xfs_reinit_percpu_counters(mp);
3333 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
3335 xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
3338 mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
3340 xlog_recover_check_summary(log);
3342 /* Normal transactions can now occur */
3343 clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
3348 * Perform recovery and re-initialize some log variables in xlog_find_tail.
3350 * Return error or zero.
3356 xfs_daddr_t head_blk, tail_blk;
3359 /* find the tail of the log */
3360 error = xlog_find_tail(log, &head_blk, &tail_blk);
3365 * The superblock was read before the log was available and thus the LSN
3366 * could not be verified. Check the superblock LSN against the current
3367 * LSN now that it's known.
3369 if (xfs_has_crc(log->l_mp) &&
3370 !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
3373 if (tail_blk != head_blk) {
3374 /* There used to be a comment here:
3376 * disallow recovery on read-only mounts. note -- mount
3377 * checks for ENOSPC and turns it into an intelligent
3379 * ...but this is no longer true. Now, unless you specify
3380 * NORECOVERY (in which case this function would never be
3381 * called), we just go ahead and recover. We do this all
3382 * under the vfs layer, so we can get away with it unless
3383 * the device itself is read-only, in which case we fail.
3385 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
3390 * Version 5 superblock log feature mask validation. We know the
3391 * log is dirty so check if there are any unknown log features
3392 * in what we need to recover. If there are unknown features
3393 * (e.g. unsupported transactions, then simply reject the
3394 * attempt at recovery before touching anything.
3396 if (xfs_sb_is_v5(&log->l_mp->m_sb) &&
3397 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
3398 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
3400 "Superblock has unknown incompatible log features (0x%x) enabled.",
3401 (log->l_mp->m_sb.sb_features_log_incompat &
3402 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
3404 "The log can not be fully and/or safely recovered by this kernel.");
3406 "Please recover the log on a kernel that supports the unknown features.");
3411 * Delay log recovery if the debug hook is set. This is debug
3412 * instrumentation to coordinate simulation of I/O failures with
3415 if (xfs_globals.log_recovery_delay) {
3416 xfs_notice(log->l_mp,
3417 "Delaying log recovery for %d seconds.",
3418 xfs_globals.log_recovery_delay);
3419 msleep(xfs_globals.log_recovery_delay * 1000);
3422 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
3423 log->l_mp->m_logname ? log->l_mp->m_logname
3426 error = xlog_do_recover(log, head_blk, tail_blk);
3427 set_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
3433 * In the first part of recovery we replay inodes and buffers and build up the
3434 * list of intents which need to be processed. Here we process the intents and
3435 * clean up the on disk unlinked inode lists. This is separated from the first
3436 * part of recovery so that the root and real-time bitmap inodes can be read in
3437 * from disk in between the two stages. This is necessary so that we can free
3438 * space in the real-time portion of the file system.
3441 xlog_recover_finish(
3446 error = xlog_recover_process_intents(log);
3449 * Cancel all the unprocessed intent items now so that we don't
3450 * leave them pinned in the AIL. This can cause the AIL to
3451 * livelock on the pinned item if anyone tries to push the AIL
3452 * (inode reclaim does this) before we get around to
3453 * xfs_log_mount_cancel.
3455 xlog_recover_cancel_intents(log);
3456 xfs_alert(log->l_mp, "Failed to recover intents");
3457 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
3462 * Sync the log to get all the intents out of the AIL. This isn't
3463 * absolutely necessary, but it helps in case the unlink transactions
3464 * would have problems pushing the intents out of the way.
3466 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3469 * Now that we've recovered the log and all the intents, we can clear
3470 * the log incompat feature bits in the superblock because there's no
3471 * longer anything to protect. We rely on the AIL push to write out the
3472 * updated superblock after everything else.
3474 if (xfs_clear_incompat_log_features(log->l_mp)) {
3475 error = xfs_sync_sb(log->l_mp, false);
3477 xfs_alert(log->l_mp,
3478 "Failed to clear log incompat features on recovery");
3483 xlog_recover_process_iunlinks(log);
3484 xlog_recover_check_summary(log);
3487 * Recover any CoW staging blocks that are still referenced by the
3488 * ondisk refcount metadata. During mount there cannot be any live
3489 * staging extents as we have not permitted any user modifications.
3490 * Therefore, it is safe to free them all right now, even on a
3493 error = xfs_reflink_recover_cow(log->l_mp);
3495 xfs_alert(log->l_mp,
3496 "Failed to recover leftover CoW staging extents, err %d.",
3499 * If we get an error here, make sure the log is shut down
3500 * but return zero so that any log items committed since the
3501 * end of intents processing can be pushed through the CIL
3504 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
3511 xlog_recover_cancel(
3514 if (xlog_recovery_needed(log))
3515 xlog_recover_cancel_intents(log);
3520 * Read all of the agf and agi counters and check that they
3521 * are consistent with the superblock counters.
3524 xlog_recover_check_summary(
3527 struct xfs_mount *mp = log->l_mp;
3528 struct xfs_perag *pag;
3529 struct xfs_buf *agfbp;
3530 struct xfs_buf *agibp;
3531 xfs_agnumber_t agno;
3540 for_each_perag(mp, agno, pag) {
3541 error = xfs_read_agf(mp, NULL, pag->pag_agno, 0, &agfbp);
3543 xfs_alert(mp, "%s agf read failed agno %d error %d",
3544 __func__, pag->pag_agno, error);
3546 struct xfs_agf *agfp = agfbp->b_addr;
3548 freeblks += be32_to_cpu(agfp->agf_freeblks) +
3549 be32_to_cpu(agfp->agf_flcount);
3550 xfs_buf_relse(agfbp);
3553 error = xfs_read_agi(mp, NULL, pag->pag_agno, &agibp);
3555 xfs_alert(mp, "%s agi read failed agno %d error %d",
3556 __func__, pag->pag_agno, error);
3558 struct xfs_agi *agi = agibp->b_addr;
3560 itotal += be32_to_cpu(agi->agi_count);
3561 ifree += be32_to_cpu(agi->agi_freecount);
3562 xfs_buf_relse(agibp);