1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_shared.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_extent_busy.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
17 #include "xfs_log_priv.h"
18 #include "xfs_trace.h"
20 struct workqueue_struct *xfs_discard_wq;
23 * Allocate a new ticket. Failing to get a new ticket makes it really hard to
24 * recover, so we don't allow failure here. Also, we allocate in a context that
25 * we don't want to be issuing transactions from, so we need to tell the
26 * allocation code this as well.
28 * We don't reserve any space for the ticket - we are going to steal whatever
29 * space we require from transactions as they commit. To ensure we reserve all
30 * the space required, we need to set the current reservation of the ticket to
31 * zero so that we know to steal the initial transaction overhead from the
32 * first transaction commit.
34 static struct xlog_ticket *
35 xlog_cil_ticket_alloc(
38 struct xlog_ticket *tic;
40 tic = xlog_ticket_alloc(log, 0, 1, 0);
43 * set the current reservation to zero so we know to steal the basic
44 * transaction overhead reservation from the first transaction commit.
47 tic->t_iclog_hdrs = 0;
52 xlog_cil_set_iclog_hdr_count(struct xfs_cil *cil)
54 struct xlog *log = cil->xc_log;
56 atomic_set(&cil->xc_iclog_hdrs,
57 (XLOG_CIL_BLOCKING_SPACE_LIMIT(log) /
58 (log->l_iclog_size - log->l_iclog_hsize)));
62 * Check if the current log item was first committed in this sequence.
63 * We can't rely on just the log item being in the CIL, we have to check
64 * the recorded commit sequence number.
66 * Note: for this to be used in a non-racy manner, it has to be called with
67 * CIL flushing locked out. As a result, it should only be used during the
68 * transaction commit process when deciding what to format into the item.
71 xlog_item_in_current_chkpt(
73 struct xfs_log_item *lip)
75 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
79 * li_seq is written on the first commit of a log item to record the
80 * first checkpoint it is written to. Hence if it is different to the
81 * current sequence, we're in a new checkpoint.
83 return lip->li_seq == READ_ONCE(cil->xc_current_sequence);
87 xfs_log_item_in_current_chkpt(
88 struct xfs_log_item *lip)
90 return xlog_item_in_current_chkpt(lip->li_log->l_cilp, lip);
94 * Unavoidable forward declaration - xlog_cil_push_work() calls
95 * xlog_cil_ctx_alloc() itself.
97 static void xlog_cil_push_work(struct work_struct *work);
99 static struct xfs_cil_ctx *
100 xlog_cil_ctx_alloc(void)
102 struct xfs_cil_ctx *ctx;
104 ctx = kmem_zalloc(sizeof(*ctx), KM_NOFS);
105 INIT_LIST_HEAD(&ctx->committing);
106 INIT_LIST_HEAD(&ctx->busy_extents);
107 INIT_LIST_HEAD(&ctx->log_items);
108 INIT_LIST_HEAD(&ctx->lv_chain);
109 INIT_WORK(&ctx->push_work, xlog_cil_push_work);
114 * Aggregate the CIL per cpu structures into global counts, lists, etc and
115 * clear the percpu state ready for the next context to use. This is called
116 * from the push code with the context lock held exclusively, hence nothing else
117 * will be accessing or modifying the per-cpu counters.
120 xlog_cil_push_pcp_aggregate(
122 struct xfs_cil_ctx *ctx)
124 struct xlog_cil_pcp *cilpcp;
127 for_each_online_cpu(cpu) {
128 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
130 ctx->ticket->t_curr_res += cilpcp->space_reserved;
131 cilpcp->space_reserved = 0;
133 if (!list_empty(&cilpcp->busy_extents)) {
134 list_splice_init(&cilpcp->busy_extents,
137 if (!list_empty(&cilpcp->log_items))
138 list_splice_init(&cilpcp->log_items, &ctx->log_items);
141 * We're in the middle of switching cil contexts. Reset the
142 * counter we use to detect when the current context is nearing
145 cilpcp->space_used = 0;
150 * Aggregate the CIL per-cpu space used counters into the global atomic value.
151 * This is called when the per-cpu counter aggregation will first pass the soft
152 * limit threshold so we can switch to atomic counter aggregation for accurate
153 * detection of hard limit traversal.
156 xlog_cil_insert_pcp_aggregate(
158 struct xfs_cil_ctx *ctx)
160 struct xlog_cil_pcp *cilpcp;
164 /* Trigger atomic updates then aggregate only for the first caller */
165 if (!test_and_clear_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags))
168 for_each_online_cpu(cpu) {
171 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
173 old = cilpcp->space_used;
174 prev = cmpxchg(&cilpcp->space_used, old, 0);
175 } while (old != prev);
178 atomic_add(count, &ctx->space_used);
184 struct xfs_cil_ctx *ctx)
186 xlog_cil_set_iclog_hdr_count(cil);
187 set_bit(XLOG_CIL_EMPTY, &cil->xc_flags);
188 set_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags);
189 ctx->sequence = ++cil->xc_current_sequence;
195 * After the first stage of log recovery is done, we know where the head and
196 * tail of the log are. We need this log initialisation done before we can
197 * initialise the first CIL checkpoint context.
199 * Here we allocate a log ticket to track space usage during a CIL push. This
200 * ticket is passed to xlog_write() directly so that we don't slowly leak log
201 * space by failing to account for space used by log headers and additional
202 * region headers for split regions.
205 xlog_cil_init_post_recovery(
208 log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
209 log->l_cilp->xc_ctx->sequence = 1;
210 xlog_cil_set_iclog_hdr_count(log->l_cilp);
214 xlog_cil_iovec_space(
217 return round_up((sizeof(struct xfs_log_vec) +
218 niovecs * sizeof(struct xfs_log_iovec)),
223 * Allocate or pin log vector buffers for CIL insertion.
225 * The CIL currently uses disposable buffers for copying a snapshot of the
226 * modified items into the log during a push. The biggest problem with this is
227 * the requirement to allocate the disposable buffer during the commit if:
228 * a) does not exist; or
231 * If we do this allocation within xlog_cil_insert_format_items(), it is done
232 * under the xc_ctx_lock, which means that a CIL push cannot occur during
233 * the memory allocation. This means that we have a potential deadlock situation
234 * under low memory conditions when we have lots of dirty metadata pinned in
235 * the CIL and we need a CIL commit to occur to free memory.
237 * To avoid this, we need to move the memory allocation outside the
238 * xc_ctx_lock, but because the log vector buffers are disposable, that opens
239 * up a TOCTOU race condition w.r.t. the CIL committing and removing the log
240 * vector buffers between the check and the formatting of the item into the
241 * log vector buffer within the xc_ctx_lock.
243 * Because the log vector buffer needs to be unchanged during the CIL push
244 * process, we cannot share the buffer between the transaction commit (which
245 * modifies the buffer) and the CIL push context that is writing the changes
246 * into the log. This means skipping preallocation of buffer space is
247 * unreliable, but we most definitely do not want to be allocating and freeing
248 * buffers unnecessarily during commits when overwrites can be done safely.
250 * The simplest solution to this problem is to allocate a shadow buffer when a
251 * log item is committed for the second time, and then to only use this buffer
252 * if necessary. The buffer can remain attached to the log item until such time
253 * it is needed, and this is the buffer that is reallocated to match the size of
254 * the incoming modification. Then during the formatting of the item we can swap
255 * the active buffer with the new one if we can't reuse the existing buffer. We
256 * don't free the old buffer as it may be reused on the next modification if
257 * it's size is right, otherwise we'll free and reallocate it at that point.
259 * This function builds a vector for the changes in each log item in the
260 * transaction. It then works out the length of the buffer needed for each log
261 * item, allocates them and attaches the vector to the log item in preparation
262 * for the formatting step which occurs under the xc_ctx_lock.
264 * While this means the memory footprint goes up, it avoids the repeated
265 * alloc/free pattern that repeated modifications of an item would otherwise
266 * cause, and hence minimises the CPU overhead of such behaviour.
269 xlog_cil_alloc_shadow_bufs(
271 struct xfs_trans *tp)
273 struct xfs_log_item *lip;
275 list_for_each_entry(lip, &tp->t_items, li_trans) {
276 struct xfs_log_vec *lv;
280 bool ordered = false;
282 /* Skip items which aren't dirty in this transaction. */
283 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
286 /* get number of vecs and size of data to be stored */
287 lip->li_ops->iop_size(lip, &niovecs, &nbytes);
290 * Ordered items need to be tracked but we do not wish to write
291 * them. We need a logvec to track the object, but we do not
292 * need an iovec or buffer to be allocated for copying data.
294 if (niovecs == XFS_LOG_VEC_ORDERED) {
301 * We 64-bit align the length of each iovec so that the start of
302 * the next one is naturally aligned. We'll need to account for
303 * that slack space here.
305 * We also add the xlog_op_header to each region when
306 * formatting, but that's not accounted to the size of the item
307 * at this point. Hence we'll need an addition number of bytes
308 * for each vector to hold an opheader.
310 * Then round nbytes up to 64-bit alignment so that the initial
311 * buffer alignment is easy to calculate and verify.
314 (sizeof(uint64_t) + sizeof(struct xlog_op_header));
315 nbytes = round_up(nbytes, sizeof(uint64_t));
318 * The data buffer needs to start 64-bit aligned, so round up
319 * that space to ensure we can align it appropriately and not
320 * overrun the buffer.
322 buf_size = nbytes + xlog_cil_iovec_space(niovecs);
325 * if we have no shadow buffer, or it is too small, we need to
328 if (!lip->li_lv_shadow ||
329 buf_size > lip->li_lv_shadow->lv_size) {
331 * We free and allocate here as a realloc would copy
332 * unnecessary data. We don't use kvzalloc() for the
333 * same reason - we don't need to zero the data area in
334 * the buffer, only the log vector header and the iovec
337 kmem_free(lip->li_lv_shadow);
338 lv = xlog_kvmalloc(buf_size);
340 memset(lv, 0, xlog_cil_iovec_space(niovecs));
342 INIT_LIST_HEAD(&lv->lv_list);
344 lv->lv_size = buf_size;
346 lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
348 lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
349 lip->li_lv_shadow = lv;
351 /* same or smaller, optimise common overwrite case */
352 lv = lip->li_lv_shadow;
354 lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
360 /* Ensure the lv is set up according to ->iop_size */
361 lv->lv_niovecs = niovecs;
363 /* The allocated data region lies beyond the iovec region */
364 lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs);
370 * Prepare the log item for insertion into the CIL. Calculate the difference in
371 * log space it will consume, and if it is a new item pin it as well.
374 xfs_cil_prepare_item(
376 struct xfs_log_vec *lv,
377 struct xfs_log_vec *old_lv,
380 /* Account for the new LV being passed in */
381 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED)
382 *diff_len += lv->lv_bytes;
385 * If there is no old LV, this is the first time we've seen the item in
386 * this CIL context and so we need to pin it. If we are replacing the
387 * old_lv, then remove the space it accounts for and make it the shadow
388 * buffer for later freeing. In both cases we are now switching to the
389 * shadow buffer, so update the pointer to it appropriately.
392 if (lv->lv_item->li_ops->iop_pin)
393 lv->lv_item->li_ops->iop_pin(lv->lv_item);
394 lv->lv_item->li_lv_shadow = NULL;
395 } else if (old_lv != lv) {
396 ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
398 *diff_len -= old_lv->lv_bytes;
399 lv->lv_item->li_lv_shadow = old_lv;
402 /* attach new log vector to log item */
403 lv->lv_item->li_lv = lv;
406 * If this is the first time the item is being committed to the
407 * CIL, store the sequence number on the log item so we can
408 * tell in future commits whether this is the first checkpoint
409 * the item is being committed into.
411 if (!lv->lv_item->li_seq)
412 lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
416 * Format log item into a flat buffers
418 * For delayed logging, we need to hold a formatted buffer containing all the
419 * changes on the log item. This enables us to relog the item in memory and
420 * write it out asynchronously without needing to relock the object that was
421 * modified at the time it gets written into the iclog.
423 * This function takes the prepared log vectors attached to each log item, and
424 * formats the changes into the log vector buffer. The buffer it uses is
425 * dependent on the current state of the vector in the CIL - the shadow lv is
426 * guaranteed to be large enough for the current modification, but we will only
427 * use that if we can't reuse the existing lv. If we can't reuse the existing
428 * lv, then simple swap it out for the shadow lv. We don't free it - that is
429 * done lazily either by th enext modification or the freeing of the log item.
431 * We don't set up region headers during this process; we simply copy the
432 * regions into the flat buffer. We can do this because we still have to do a
433 * formatting step to write the regions into the iclog buffer. Writing the
434 * ophdrs during the iclog write means that we can support splitting large
435 * regions across iclog boundares without needing a change in the format of the
436 * item/region encapsulation.
438 * Hence what we need to do now is change the rewrite the vector array to point
439 * to the copied region inside the buffer we just allocated. This allows us to
440 * format the regions into the iclog as though they are being formatted
441 * directly out of the objects themselves.
444 xlog_cil_insert_format_items(
446 struct xfs_trans *tp,
449 struct xfs_log_item *lip;
451 /* Bail out if we didn't find a log item. */
452 if (list_empty(&tp->t_items)) {
457 list_for_each_entry(lip, &tp->t_items, li_trans) {
458 struct xfs_log_vec *lv;
459 struct xfs_log_vec *old_lv = NULL;
460 struct xfs_log_vec *shadow;
461 bool ordered = false;
463 /* Skip items which aren't dirty in this transaction. */
464 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
468 * The formatting size information is already attached to
469 * the shadow lv on the log item.
471 shadow = lip->li_lv_shadow;
472 if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED)
475 /* Skip items that do not have any vectors for writing */
476 if (!shadow->lv_niovecs && !ordered)
479 /* compare to existing item size */
481 if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) {
482 /* same or smaller, optimise common overwrite case */
489 * set the item up as though it is a new insertion so
490 * that the space reservation accounting is correct.
492 *diff_len -= lv->lv_bytes;
494 /* Ensure the lv is set up according to ->iop_size */
495 lv->lv_niovecs = shadow->lv_niovecs;
497 /* reset the lv buffer information for new formatting */
500 lv->lv_buf = (char *)lv +
501 xlog_cil_iovec_space(lv->lv_niovecs);
503 /* switch to shadow buffer! */
507 /* track as an ordered logvec */
508 ASSERT(lip->li_lv == NULL);
513 ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
514 lip->li_ops->iop_format(lip, lv);
516 xfs_cil_prepare_item(log, lv, old_lv, diff_len);
521 * The use of lockless waitqueue_active() requires that the caller has
522 * serialised itself against the wakeup call in xlog_cil_push_work(). That
523 * can be done by either holding the push lock or the context lock.
526 xlog_cil_over_hard_limit(
530 if (waitqueue_active(&log->l_cilp->xc_push_wait))
532 if (space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log))
538 * Insert the log items into the CIL and calculate the difference in space
539 * consumed by the item. Add the space to the checkpoint ticket and calculate
540 * if the change requires additional log metadata. If it does, take that space
541 * as well. Remove the amount of space we added to the checkpoint ticket from
542 * the current transaction ticket so that the accounting works out correctly.
545 xlog_cil_insert_items(
547 struct xfs_trans *tp,
548 uint32_t released_space)
550 struct xfs_cil *cil = log->l_cilp;
551 struct xfs_cil_ctx *ctx = cil->xc_ctx;
552 struct xfs_log_item *lip;
554 int iovhdr_res = 0, split_res = 0, ctx_res = 0;
557 struct xlog_cil_pcp *cilpcp;
562 * We can do this safely because the context can't checkpoint until we
563 * are done so it doesn't matter exactly how we update the CIL.
565 xlog_cil_insert_format_items(log, tp, &len);
568 * Subtract the space released by intent cancelation from the space we
569 * consumed so that we remove it from the CIL space and add it back to
570 * the current transaction reservation context.
572 len -= released_space;
575 * Grab the per-cpu pointer for the CIL before we start any accounting.
576 * That ensures that we are running with pre-emption disabled and so we
577 * can't be scheduled away between split sample/update operations that
578 * are done without outside locking to serialise them.
580 cilpcp = get_cpu_ptr(cil->xc_pcp);
583 * We need to take the CIL checkpoint unit reservation on the first
584 * commit into the CIL. Test the XLOG_CIL_EMPTY bit first so we don't
585 * unnecessarily do an atomic op in the fast path here. We can clear the
586 * XLOG_CIL_EMPTY bit as we are under the xc_ctx_lock here and that
587 * needs to be held exclusively to reset the XLOG_CIL_EMPTY bit.
589 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) &&
590 test_and_clear_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
591 ctx_res = ctx->ticket->t_unit_res;
594 * Check if we need to steal iclog headers. atomic_read() is not a
595 * locked atomic operation, so we can check the value before we do any
596 * real atomic ops in the fast path. If we've already taken the CIL unit
597 * reservation from this commit, we've already got one iclog header
598 * space reserved so we have to account for that otherwise we risk
599 * overrunning the reservation on this ticket.
601 * If the CIL is already at the hard limit, we might need more header
602 * space that originally reserved. So steal more header space from every
603 * commit that occurs once we are over the hard limit to ensure the CIL
604 * push won't run out of reservation space.
606 * This can steal more than we need, but that's OK.
608 * The cil->xc_ctx_lock provides the serialisation necessary for safely
609 * calling xlog_cil_over_hard_limit() in this context.
611 space_used = atomic_read(&ctx->space_used) + cilpcp->space_used + len;
612 if (atomic_read(&cil->xc_iclog_hdrs) > 0 ||
613 xlog_cil_over_hard_limit(log, space_used)) {
614 split_res = log->l_iclog_hsize +
615 sizeof(struct xlog_op_header);
617 ctx_res += split_res * (tp->t_ticket->t_iclog_hdrs - 1);
619 ctx_res = split_res * tp->t_ticket->t_iclog_hdrs;
620 atomic_sub(tp->t_ticket->t_iclog_hdrs, &cil->xc_iclog_hdrs);
622 cilpcp->space_reserved += ctx_res;
625 * Accurately account when over the soft limit, otherwise fold the
626 * percpu count into the global count if over the per-cpu threshold.
628 if (!test_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags)) {
629 atomic_add(len, &ctx->space_used);
630 } else if (cilpcp->space_used + len >
631 (XLOG_CIL_SPACE_LIMIT(log) / num_online_cpus())) {
632 space_used = atomic_add_return(cilpcp->space_used + len,
634 cilpcp->space_used = 0;
637 * If we just transitioned over the soft limit, we need to
638 * transition to the global atomic counter.
640 if (space_used >= XLOG_CIL_SPACE_LIMIT(log))
641 xlog_cil_insert_pcp_aggregate(cil, ctx);
643 cilpcp->space_used += len;
645 /* attach the transaction to the CIL if it has any busy extents */
646 if (!list_empty(&tp->t_busy))
647 list_splice_init(&tp->t_busy, &cilpcp->busy_extents);
650 * Now update the order of everything modified in the transaction
651 * and insert items into the CIL if they aren't already there.
652 * We do this here so we only need to take the CIL lock once during
653 * the transaction commit.
655 order = atomic_inc_return(&ctx->order_id);
656 list_for_each_entry(lip, &tp->t_items, li_trans) {
657 /* Skip items which aren't dirty in this transaction. */
658 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
661 lip->li_order_id = order;
662 if (!list_empty(&lip->li_cil))
664 list_add_tail(&lip->li_cil, &cilpcp->log_items);
669 * If we've overrun the reservation, dump the tx details before we move
670 * the log items. Shutdown is imminent...
672 tp->t_ticket->t_curr_res -= ctx_res + len;
673 if (WARN_ON(tp->t_ticket->t_curr_res < 0)) {
674 xfs_warn(log->l_mp, "Transaction log reservation overrun:");
676 " log items: %d bytes (iov hdrs: %d bytes)",
678 xfs_warn(log->l_mp, " split region headers: %d bytes",
680 xfs_warn(log->l_mp, " ctx ticket: %d bytes", ctx_res);
681 xlog_print_trans(tp);
682 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
687 xlog_cil_free_logvec(
688 struct list_head *lv_chain)
690 struct xfs_log_vec *lv;
692 while (!list_empty(lv_chain)) {
693 lv = list_first_entry(lv_chain, struct xfs_log_vec, lv_list);
694 list_del_init(&lv->lv_list);
700 xlog_discard_endio_work(
701 struct work_struct *work)
703 struct xfs_cil_ctx *ctx =
704 container_of(work, struct xfs_cil_ctx, discard_endio_work);
705 struct xfs_mount *mp = ctx->cil->xc_log->l_mp;
707 xfs_extent_busy_clear(mp, &ctx->busy_extents, false);
712 * Queue up the actual completion to a thread to avoid IRQ-safe locking for
713 * pagb_lock. Note that we need a unbounded workqueue, otherwise we might
714 * get the execution delayed up to 30 seconds for weird reasons.
720 struct xfs_cil_ctx *ctx = bio->bi_private;
722 INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work);
723 queue_work(xfs_discard_wq, &ctx->discard_endio_work);
728 xlog_discard_busy_extents(
729 struct xfs_mount *mp,
730 struct xfs_cil_ctx *ctx)
732 struct list_head *list = &ctx->busy_extents;
733 struct xfs_extent_busy *busyp;
734 struct bio *bio = NULL;
735 struct blk_plug plug;
738 ASSERT(xfs_has_discard(mp));
740 blk_start_plug(&plug);
741 list_for_each_entry(busyp, list, list) {
742 trace_xfs_discard_extent(mp, busyp->agno, busyp->bno,
745 error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev,
746 XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno),
747 XFS_FSB_TO_BB(mp, busyp->length),
749 if (error && error != -EOPNOTSUPP) {
751 "discard failed for extent [0x%llx,%u], error %d",
752 (unsigned long long)busyp->bno,
760 bio->bi_private = ctx;
761 bio->bi_end_io = xlog_discard_endio;
764 xlog_discard_endio_work(&ctx->discard_endio_work);
766 blk_finish_plug(&plug);
770 * Mark all items committed and clear busy extents. We free the log vector
771 * chains in a separate pass so that we unpin the log items as quickly as
776 struct xfs_cil_ctx *ctx)
778 struct xfs_mount *mp = ctx->cil->xc_log->l_mp;
779 bool abort = xlog_is_shutdown(ctx->cil->xc_log);
782 * If the I/O failed, we're aborting the commit and already shutdown.
783 * Wake any commit waiters before aborting the log items so we don't
784 * block async log pushers on callbacks. Async log pushers explicitly do
785 * not wait on log force completion because they may be holding locks
786 * required to unpin items.
789 spin_lock(&ctx->cil->xc_push_lock);
790 wake_up_all(&ctx->cil->xc_start_wait);
791 wake_up_all(&ctx->cil->xc_commit_wait);
792 spin_unlock(&ctx->cil->xc_push_lock);
795 xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, &ctx->lv_chain,
796 ctx->start_lsn, abort);
798 xfs_extent_busy_sort(&ctx->busy_extents);
799 xfs_extent_busy_clear(mp, &ctx->busy_extents,
800 xfs_has_discard(mp) && !abort);
802 spin_lock(&ctx->cil->xc_push_lock);
803 list_del(&ctx->committing);
804 spin_unlock(&ctx->cil->xc_push_lock);
806 xlog_cil_free_logvec(&ctx->lv_chain);
808 if (!list_empty(&ctx->busy_extents))
809 xlog_discard_busy_extents(mp, ctx);
815 xlog_cil_process_committed(
816 struct list_head *list)
818 struct xfs_cil_ctx *ctx;
820 while ((ctx = list_first_entry_or_null(list,
821 struct xfs_cil_ctx, iclog_entry))) {
822 list_del(&ctx->iclog_entry);
823 xlog_cil_committed(ctx);
828 * Record the LSN of the iclog we were just granted space to start writing into.
829 * If the context doesn't have a start_lsn recorded, then this iclog will
830 * contain the start record for the checkpoint. Otherwise this write contains
831 * the commit record for the checkpoint.
834 xlog_cil_set_ctx_write_state(
835 struct xfs_cil_ctx *ctx,
836 struct xlog_in_core *iclog)
838 struct xfs_cil *cil = ctx->cil;
839 xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn);
841 ASSERT(!ctx->commit_lsn);
842 if (!ctx->start_lsn) {
843 spin_lock(&cil->xc_push_lock);
845 * The LSN we need to pass to the log items on transaction
846 * commit is the LSN reported by the first log vector write, not
847 * the commit lsn. If we use the commit record lsn then we can
848 * move the grant write head beyond the tail LSN and overwrite
851 ctx->start_lsn = lsn;
852 wake_up_all(&cil->xc_start_wait);
853 spin_unlock(&cil->xc_push_lock);
856 * Make sure the metadata we are about to overwrite in the log
857 * has been flushed to stable storage before this iclog is
860 spin_lock(&cil->xc_log->l_icloglock);
861 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
862 spin_unlock(&cil->xc_log->l_icloglock);
867 * Take a reference to the iclog for the context so that we still hold
868 * it when xlog_write is done and has released it. This means the
869 * context controls when the iclog is released for IO.
871 atomic_inc(&iclog->ic_refcnt);
874 * xlog_state_get_iclog_space() guarantees there is enough space in the
875 * iclog for an entire commit record, so we can attach the context
876 * callbacks now. This needs to be done before we make the commit_lsn
877 * visible to waiters so that checkpoints with commit records in the
878 * same iclog order their IO completion callbacks in the same order that
879 * the commit records appear in the iclog.
881 spin_lock(&cil->xc_log->l_icloglock);
882 list_add_tail(&ctx->iclog_entry, &iclog->ic_callbacks);
883 spin_unlock(&cil->xc_log->l_icloglock);
886 * Now we can record the commit LSN and wake anyone waiting for this
887 * sequence to have the ordered commit record assigned to a physical
888 * location in the log.
890 spin_lock(&cil->xc_push_lock);
891 ctx->commit_iclog = iclog;
892 ctx->commit_lsn = lsn;
893 wake_up_all(&cil->xc_commit_wait);
894 spin_unlock(&cil->xc_push_lock);
899 * Ensure that the order of log writes follows checkpoint sequence order. This
900 * relies on the context LSN being zero until the log write has guaranteed the
901 * LSN that the log write will start at via xlog_state_get_iclog_space().
909 xlog_cil_order_write(
912 enum _record_type record)
914 struct xfs_cil_ctx *ctx;
917 spin_lock(&cil->xc_push_lock);
918 list_for_each_entry(ctx, &cil->xc_committing, committing) {
920 * Avoid getting stuck in this loop because we were woken by the
921 * shutdown, but then went back to sleep once already in the
924 if (xlog_is_shutdown(cil->xc_log)) {
925 spin_unlock(&cil->xc_push_lock);
930 * Higher sequences will wait for this one so skip them.
931 * Don't wait for our own sequence, either.
933 if (ctx->sequence >= sequence)
936 /* Wait until the LSN for the record has been recorded. */
939 if (!ctx->start_lsn) {
940 xlog_wait(&cil->xc_start_wait, &cil->xc_push_lock);
945 if (!ctx->commit_lsn) {
946 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
952 spin_unlock(&cil->xc_push_lock);
957 * Write out the log vector change now attached to the CIL context. This will
958 * write a start record that needs to be strictly ordered in ascending CIL
959 * sequence order so that log recovery will always use in-order start LSNs when
960 * replaying checkpoints.
963 xlog_cil_write_chain(
964 struct xfs_cil_ctx *ctx,
967 struct xlog *log = ctx->cil->xc_log;
970 error = xlog_cil_order_write(ctx->cil, ctx->sequence, _START_RECORD);
973 return xlog_write(log, ctx, &ctx->lv_chain, ctx->ticket, chain_len);
977 * Write out the commit record of a checkpoint transaction to close off a
978 * running log write. These commit records are strictly ordered in ascending CIL
979 * sequence order so that log recovery will always replay the checkpoints in the
983 xlog_cil_write_commit_record(
984 struct xfs_cil_ctx *ctx)
986 struct xlog *log = ctx->cil->xc_log;
987 struct xlog_op_header ophdr = {
988 .oh_clientid = XFS_TRANSACTION,
989 .oh_tid = cpu_to_be32(ctx->ticket->t_tid),
990 .oh_flags = XLOG_COMMIT_TRANS,
992 struct xfs_log_iovec reg = {
994 .i_len = sizeof(struct xlog_op_header),
995 .i_type = XLOG_REG_TYPE_COMMIT,
997 struct xfs_log_vec vec = {
1002 LIST_HEAD(lv_chain);
1003 list_add(&vec.lv_list, &lv_chain);
1005 if (xlog_is_shutdown(log))
1008 error = xlog_cil_order_write(ctx->cil, ctx->sequence, _COMMIT_RECORD);
1012 /* account for space used by record data */
1013 ctx->ticket->t_curr_res -= reg.i_len;
1014 error = xlog_write(log, ctx, &lv_chain, ctx->ticket, reg.i_len);
1016 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1020 struct xlog_cil_trans_hdr {
1021 struct xlog_op_header oph[2];
1022 struct xfs_trans_header thdr;
1023 struct xfs_log_iovec lhdr[2];
1027 * Build a checkpoint transaction header to begin the journal transaction. We
1028 * need to account for the space used by the transaction header here as it is
1029 * not accounted for in xlog_write().
1031 * This is the only place we write a transaction header, so we also build the
1032 * log opheaders that indicate the start of a log transaction and wrap the
1033 * transaction header. We keep the start record in it's own log vector rather
1034 * than compacting them into a single region as this ends up making the logic
1035 * in xlog_write() for handling empty opheaders for start, commit and unmount
1036 * records much simpler.
1039 xlog_cil_build_trans_hdr(
1040 struct xfs_cil_ctx *ctx,
1041 struct xlog_cil_trans_hdr *hdr,
1042 struct xfs_log_vec *lvhdr,
1045 struct xlog_ticket *tic = ctx->ticket;
1046 __be32 tid = cpu_to_be32(tic->t_tid);
1048 memset(hdr, 0, sizeof(*hdr));
1050 /* Log start record */
1051 hdr->oph[0].oh_tid = tid;
1052 hdr->oph[0].oh_clientid = XFS_TRANSACTION;
1053 hdr->oph[0].oh_flags = XLOG_START_TRANS;
1055 /* log iovec region pointer */
1056 hdr->lhdr[0].i_addr = &hdr->oph[0];
1057 hdr->lhdr[0].i_len = sizeof(struct xlog_op_header);
1058 hdr->lhdr[0].i_type = XLOG_REG_TYPE_LRHEADER;
1061 hdr->oph[1].oh_tid = tid;
1062 hdr->oph[1].oh_clientid = XFS_TRANSACTION;
1063 hdr->oph[1].oh_len = cpu_to_be32(sizeof(struct xfs_trans_header));
1065 /* transaction header in host byte order format */
1066 hdr->thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
1067 hdr->thdr.th_type = XFS_TRANS_CHECKPOINT;
1068 hdr->thdr.th_tid = tic->t_tid;
1069 hdr->thdr.th_num_items = num_iovecs;
1071 /* log iovec region pointer */
1072 hdr->lhdr[1].i_addr = &hdr->oph[1];
1073 hdr->lhdr[1].i_len = sizeof(struct xlog_op_header) +
1074 sizeof(struct xfs_trans_header);
1075 hdr->lhdr[1].i_type = XLOG_REG_TYPE_TRANSHDR;
1077 lvhdr->lv_niovecs = 2;
1078 lvhdr->lv_iovecp = &hdr->lhdr[0];
1079 lvhdr->lv_bytes = hdr->lhdr[0].i_len + hdr->lhdr[1].i_len;
1081 tic->t_curr_res -= lvhdr->lv_bytes;
1085 * CIL item reordering compare function. We want to order in ascending ID order,
1086 * but we want to leave items with the same ID in the order they were added to
1087 * the list. This is important for operations like reflink where we log 4 order
1088 * dependent intents in a single transaction when we overwrite an existing
1089 * shared extent with a new shared extent. i.e. BUI(unmap), CUI(drop),
1090 * CUI (inc), BUI(remap)...
1095 const struct list_head *a,
1096 const struct list_head *b)
1098 struct xfs_log_vec *l1 = container_of(a, struct xfs_log_vec, lv_list);
1099 struct xfs_log_vec *l2 = container_of(b, struct xfs_log_vec, lv_list);
1101 return l1->lv_order_id > l2->lv_order_id;
1105 * Pull all the log vectors off the items in the CIL, and remove the items from
1106 * the CIL. We don't need the CIL lock here because it's only needed on the
1107 * transaction commit side which is currently locked out by the flush lock.
1109 * If a log item is marked with a whiteout, we do not need to write it to the
1110 * journal and so we just move them to the whiteout list for the caller to
1111 * dispose of appropriately.
1114 xlog_cil_build_lv_chain(
1115 struct xfs_cil_ctx *ctx,
1116 struct list_head *whiteouts,
1117 uint32_t *num_iovecs,
1118 uint32_t *num_bytes)
1120 while (!list_empty(&ctx->log_items)) {
1121 struct xfs_log_item *item;
1122 struct xfs_log_vec *lv;
1124 item = list_first_entry(&ctx->log_items,
1125 struct xfs_log_item, li_cil);
1127 if (test_bit(XFS_LI_WHITEOUT, &item->li_flags)) {
1128 list_move(&item->li_cil, whiteouts);
1129 trace_xfs_cil_whiteout_skip(item);
1134 lv->lv_order_id = item->li_order_id;
1136 /* we don't write ordered log vectors */
1137 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED)
1138 *num_bytes += lv->lv_bytes;
1139 *num_iovecs += lv->lv_niovecs;
1140 list_add_tail(&lv->lv_list, &ctx->lv_chain);
1142 list_del_init(&item->li_cil);
1143 item->li_order_id = 0;
1149 xlog_cil_cleanup_whiteouts(
1150 struct list_head *whiteouts)
1152 while (!list_empty(whiteouts)) {
1153 struct xfs_log_item *item = list_first_entry(whiteouts,
1154 struct xfs_log_item, li_cil);
1155 list_del_init(&item->li_cil);
1156 trace_xfs_cil_whiteout_unpin(item);
1157 item->li_ops->iop_unpin(item, 1);
1162 * Push the Committed Item List to the log.
1164 * If the current sequence is the same as xc_push_seq we need to do a flush. If
1165 * xc_push_seq is less than the current sequence, then it has already been
1166 * flushed and we don't need to do anything - the caller will wait for it to
1167 * complete if necessary.
1169 * xc_push_seq is checked unlocked against the sequence number for a match.
1170 * Hence we can allow log forces to run racily and not issue pushes for the
1171 * same sequence twice. If we get a race between multiple pushes for the same
1172 * sequence they will block on the first one and then abort, hence avoiding
1177 struct work_struct *work)
1179 struct xfs_cil_ctx *ctx =
1180 container_of(work, struct xfs_cil_ctx, push_work);
1181 struct xfs_cil *cil = ctx->cil;
1182 struct xlog *log = cil->xc_log;
1183 struct xfs_cil_ctx *new_ctx;
1187 struct xlog_cil_trans_hdr thdr;
1188 struct xfs_log_vec lvhdr = {};
1190 bool push_commit_stable;
1191 LIST_HEAD (whiteouts);
1192 struct xlog_ticket *ticket;
1194 new_ctx = xlog_cil_ctx_alloc();
1195 new_ctx->ticket = xlog_cil_ticket_alloc(log);
1197 down_write(&cil->xc_ctx_lock);
1199 spin_lock(&cil->xc_push_lock);
1200 push_seq = cil->xc_push_seq;
1201 ASSERT(push_seq <= ctx->sequence);
1202 push_commit_stable = cil->xc_push_commit_stable;
1203 cil->xc_push_commit_stable = false;
1206 * As we are about to switch to a new, empty CIL context, we no longer
1207 * need to throttle tasks on CIL space overruns. Wake any waiters that
1208 * the hard push throttle may have caught so they can start committing
1209 * to the new context. The ctx->xc_push_lock provides the serialisation
1210 * necessary for safely using the lockless waitqueue_active() check in
1213 if (waitqueue_active(&cil->xc_push_wait))
1214 wake_up_all(&cil->xc_push_wait);
1216 xlog_cil_push_pcp_aggregate(cil, ctx);
1219 * Check if we've anything to push. If there is nothing, then we don't
1220 * move on to a new sequence number and so we have to be able to push
1221 * this sequence again later.
1223 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) {
1224 cil->xc_push_seq = 0;
1225 spin_unlock(&cil->xc_push_lock);
1230 /* check for a previously pushed sequence */
1231 if (push_seq < ctx->sequence) {
1232 spin_unlock(&cil->xc_push_lock);
1237 * We are now going to push this context, so add it to the committing
1238 * list before we do anything else. This ensures that anyone waiting on
1239 * this push can easily detect the difference between a "push in
1240 * progress" and "CIL is empty, nothing to do".
1242 * IOWs, a wait loop can now check for:
1243 * the current sequence not being found on the committing list;
1245 * an unchanged sequence number
1246 * to detect a push that had nothing to do and therefore does not need
1247 * waiting on. If the CIL is not empty, we get put on the committing
1248 * list before emptying the CIL and bumping the sequence number. Hence
1249 * an empty CIL and an unchanged sequence number means we jumped out
1250 * above after doing nothing.
1252 * Hence the waiter will either find the commit sequence on the
1253 * committing list or the sequence number will be unchanged and the CIL
1254 * still dirty. In that latter case, the push has not yet started, and
1255 * so the waiter will have to continue trying to check the CIL
1256 * committing list until it is found. In extreme cases of delay, the
1257 * sequence may fully commit between the attempts the wait makes to wait
1258 * on the commit sequence.
1260 list_add(&ctx->committing, &cil->xc_committing);
1261 spin_unlock(&cil->xc_push_lock);
1263 xlog_cil_build_lv_chain(ctx, &whiteouts, &num_iovecs, &num_bytes);
1266 * Switch the contexts so we can drop the context lock and move out
1267 * of a shared context. We can't just go straight to the commit record,
1268 * though - we need to synchronise with previous and future commits so
1269 * that the commit records are correctly ordered in the log to ensure
1270 * that we process items during log IO completion in the correct order.
1272 * For example, if we get an EFI in one checkpoint and the EFD in the
1273 * next (e.g. due to log forces), we do not want the checkpoint with
1274 * the EFD to be committed before the checkpoint with the EFI. Hence
1275 * we must strictly order the commit records of the checkpoints so
1276 * that: a) the checkpoint callbacks are attached to the iclogs in the
1277 * correct order; and b) the checkpoints are replayed in correct order
1280 * Hence we need to add this context to the committing context list so
1281 * that higher sequences will wait for us to write out a commit record
1284 * xfs_log_force_seq requires us to mirror the new sequence into the cil
1285 * structure atomically with the addition of this sequence to the
1286 * committing list. This also ensures that we can do unlocked checks
1287 * against the current sequence in log forces without risking
1288 * deferencing a freed context pointer.
1290 spin_lock(&cil->xc_push_lock);
1291 xlog_cil_ctx_switch(cil, new_ctx);
1292 spin_unlock(&cil->xc_push_lock);
1293 up_write(&cil->xc_ctx_lock);
1296 * Sort the log vector chain before we add the transaction headers.
1297 * This ensures we always have the transaction headers at the start
1300 list_sort(NULL, &ctx->lv_chain, xlog_cil_order_cmp);
1303 * Build a checkpoint transaction header and write it to the log to
1304 * begin the transaction. We need to account for the space used by the
1305 * transaction header here as it is not accounted for in xlog_write().
1306 * Add the lvhdr to the head of the lv chain we pass to xlog_write() so
1307 * it gets written into the iclog first.
1309 xlog_cil_build_trans_hdr(ctx, &thdr, &lvhdr, num_iovecs);
1310 num_bytes += lvhdr.lv_bytes;
1311 list_add(&lvhdr.lv_list, &ctx->lv_chain);
1314 * Take the lvhdr back off the lv_chain immediately after calling
1315 * xlog_cil_write_chain() as it should not be passed to log IO
1318 error = xlog_cil_write_chain(ctx, num_bytes);
1319 list_del(&lvhdr.lv_list);
1321 goto out_abort_free_ticket;
1323 error = xlog_cil_write_commit_record(ctx);
1325 goto out_abort_free_ticket;
1328 * Grab the ticket from the ctx so we can ungrant it after releasing the
1329 * commit_iclog. The ctx may be freed by the time we return from
1330 * releasing the commit_iclog (i.e. checkpoint has been completed and
1331 * callback run) so we can't reference the ctx after the call to
1332 * xlog_state_release_iclog().
1334 ticket = ctx->ticket;
1337 * If the checkpoint spans multiple iclogs, wait for all previous iclogs
1338 * to complete before we submit the commit_iclog. We can't use state
1339 * checks for this - ACTIVE can be either a past completed iclog or a
1340 * future iclog being filled, while WANT_SYNC through SYNC_DONE can be a
1341 * past or future iclog awaiting IO or ordered IO completion to be run.
1342 * In the latter case, if it's a future iclog and we wait on it, the we
1343 * will hang because it won't get processed through to ic_force_wait
1344 * wakeup until this commit_iclog is written to disk. Hence we use the
1345 * iclog header lsn and compare it to the commit lsn to determine if we
1346 * need to wait on iclogs or not.
1348 spin_lock(&log->l_icloglock);
1349 if (ctx->start_lsn != ctx->commit_lsn) {
1352 plsn = be64_to_cpu(ctx->commit_iclog->ic_prev->ic_header.h_lsn);
1353 if (plsn && XFS_LSN_CMP(plsn, ctx->commit_lsn) < 0) {
1355 * Waiting on ic_force_wait orders the completion of
1356 * iclogs older than ic_prev. Hence we only need to wait
1357 * on the most recent older iclog here.
1359 xlog_wait_on_iclog(ctx->commit_iclog->ic_prev);
1360 spin_lock(&log->l_icloglock);
1364 * We need to issue a pre-flush so that the ordering for this
1365 * checkpoint is correctly preserved down to stable storage.
1367 ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
1371 * The commit iclog must be written to stable storage to guarantee
1372 * journal IO vs metadata writeback IO is correctly ordered on stable
1375 * If the push caller needs the commit to be immediately stable and the
1376 * commit_iclog is not yet marked as XLOG_STATE_WANT_SYNC to indicate it
1377 * will be written when released, switch it's state to WANT_SYNC right
1380 ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA;
1381 if (push_commit_stable &&
1382 ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE)
1383 xlog_state_switch_iclogs(log, ctx->commit_iclog, 0);
1384 ticket = ctx->ticket;
1385 xlog_state_release_iclog(log, ctx->commit_iclog, ticket);
1387 /* Not safe to reference ctx now! */
1389 spin_unlock(&log->l_icloglock);
1390 xlog_cil_cleanup_whiteouts(&whiteouts);
1391 xfs_log_ticket_ungrant(log, ticket);
1395 up_write(&cil->xc_ctx_lock);
1396 xfs_log_ticket_put(new_ctx->ticket);
1400 out_abort_free_ticket:
1401 ASSERT(xlog_is_shutdown(log));
1402 xlog_cil_cleanup_whiteouts(&whiteouts);
1403 if (!ctx->commit_iclog) {
1404 xfs_log_ticket_ungrant(log, ctx->ticket);
1405 xlog_cil_committed(ctx);
1408 spin_lock(&log->l_icloglock);
1409 ticket = ctx->ticket;
1410 xlog_state_release_iclog(log, ctx->commit_iclog, ticket);
1411 /* Not safe to reference ctx now! */
1412 spin_unlock(&log->l_icloglock);
1413 xfs_log_ticket_ungrant(log, ticket);
1417 * We need to push CIL every so often so we don't cache more than we can fit in
1418 * the log. The limit really is that a checkpoint can't be more than half the
1419 * log (the current checkpoint is not allowed to overwrite the previous
1420 * checkpoint), but commit latency and memory usage limit this to a smaller
1424 xlog_cil_push_background(
1425 struct xlog *log) __releases(cil->xc_ctx_lock)
1427 struct xfs_cil *cil = log->l_cilp;
1428 int space_used = atomic_read(&cil->xc_ctx->space_used);
1431 * The cil won't be empty because we are called while holding the
1432 * context lock so whatever we added to the CIL will still be there.
1434 ASSERT(!test_bit(XLOG_CIL_EMPTY, &cil->xc_flags));
1438 * - we haven't used up all the space available yet; or
1439 * - we've already queued up a push; and
1440 * - we're not over the hard limit; and
1441 * - nothing has been over the hard limit.
1443 * If so, we don't need to take the push lock as there's nothing to do.
1445 if (space_used < XLOG_CIL_SPACE_LIMIT(log) ||
1446 (cil->xc_push_seq == cil->xc_current_sequence &&
1447 space_used < XLOG_CIL_BLOCKING_SPACE_LIMIT(log) &&
1448 !waitqueue_active(&cil->xc_push_wait))) {
1449 up_read(&cil->xc_ctx_lock);
1453 spin_lock(&cil->xc_push_lock);
1454 if (cil->xc_push_seq < cil->xc_current_sequence) {
1455 cil->xc_push_seq = cil->xc_current_sequence;
1456 queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
1460 * Drop the context lock now, we can't hold that if we need to sleep
1461 * because we are over the blocking threshold. The push_lock is still
1462 * held, so blocking threshold sleep/wakeup is still correctly
1465 up_read(&cil->xc_ctx_lock);
1468 * If we are well over the space limit, throttle the work that is being
1469 * done until the push work on this context has begun. Enforce the hard
1470 * throttle on all transaction commits once it has been activated, even
1471 * if the committing transactions have resulted in the space usage
1472 * dipping back down under the hard limit.
1474 * The ctx->xc_push_lock provides the serialisation necessary for safely
1475 * calling xlog_cil_over_hard_limit() in this context.
1477 if (xlog_cil_over_hard_limit(log, space_used)) {
1478 trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
1479 ASSERT(space_used < log->l_logsize);
1480 xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
1484 spin_unlock(&cil->xc_push_lock);
1489 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence
1490 * number that is passed. When it returns, the work will be queued for
1491 * @push_seq, but it won't be completed.
1493 * If the caller is performing a synchronous force, we will flush the workqueue
1494 * to get previously queued work moving to minimise the wait time they will
1495 * undergo waiting for all outstanding pushes to complete. The caller is
1496 * expected to do the required waiting for push_seq to complete.
1498 * If the caller is performing an async push, we need to ensure that the
1499 * checkpoint is fully flushed out of the iclogs when we finish the push. If we
1500 * don't do this, then the commit record may remain sitting in memory in an
1501 * ACTIVE iclog. This then requires another full log force to push to disk,
1502 * which defeats the purpose of having an async, non-blocking CIL force
1503 * mechanism. Hence in this case we need to pass a flag to the push work to
1504 * indicate it needs to flush the commit record itself.
1512 struct xfs_cil *cil = log->l_cilp;
1517 ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
1519 /* start on any pending background push to minimise wait time on it */
1521 flush_workqueue(cil->xc_push_wq);
1523 spin_lock(&cil->xc_push_lock);
1526 * If this is an async flush request, we always need to set the
1527 * xc_push_commit_stable flag even if something else has already queued
1528 * a push. The flush caller is asking for the CIL to be on stable
1529 * storage when the next push completes, so regardless of who has queued
1530 * the push, the flush requires stable semantics from it.
1532 cil->xc_push_commit_stable = async;
1535 * If the CIL is empty or we've already pushed the sequence then
1536 * there's no more work that we need to do.
1538 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) ||
1539 push_seq <= cil->xc_push_seq) {
1540 spin_unlock(&cil->xc_push_lock);
1544 cil->xc_push_seq = push_seq;
1545 queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
1546 spin_unlock(&cil->xc_push_lock);
1553 struct xfs_cil *cil = log->l_cilp;
1556 spin_lock(&cil->xc_push_lock);
1557 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
1559 spin_unlock(&cil->xc_push_lock);
1564 * If there are intent done items in this transaction and the related intent was
1565 * committed in the current (same) CIL checkpoint, we don't need to write either
1566 * the intent or intent done item to the journal as the change will be
1567 * journalled atomically within this checkpoint. As we cannot remove items from
1568 * the CIL here, mark the related intent with a whiteout so that the CIL push
1569 * can remove it rather than writing it to the journal. Then remove the intent
1570 * done item from the current transaction and release it so it doesn't get put
1571 * into the CIL at all.
1574 xlog_cil_process_intents(
1575 struct xfs_cil *cil,
1576 struct xfs_trans *tp)
1578 struct xfs_log_item *lip, *ilip, *next;
1581 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
1582 if (!(lip->li_ops->flags & XFS_ITEM_INTENT_DONE))
1585 ilip = lip->li_ops->iop_intent(lip);
1586 if (!ilip || !xlog_item_in_current_chkpt(cil, ilip))
1588 set_bit(XFS_LI_WHITEOUT, &ilip->li_flags);
1589 trace_xfs_cil_whiteout_mark(ilip);
1590 len += ilip->li_lv->lv_bytes;
1591 kmem_free(ilip->li_lv);
1594 xfs_trans_del_item(lip);
1595 lip->li_ops->iop_release(lip);
1601 * Commit a transaction with the given vector to the Committed Item List.
1603 * To do this, we need to format the item, pin it in memory if required and
1604 * account for the space used by the transaction. Once we have done that we
1605 * need to release the unused reservation for the transaction, attach the
1606 * transaction to the checkpoint context so we carry the busy extents through
1607 * to checkpoint completion, and then unlock all the items in the transaction.
1609 * Called with the context lock already held in read mode to lock out
1610 * background commit, returns without it held once background commits are
1616 struct xfs_trans *tp,
1617 xfs_csn_t *commit_seq,
1620 struct xfs_cil *cil = log->l_cilp;
1621 struct xfs_log_item *lip, *next;
1622 uint32_t released_space = 0;
1625 * Do all necessary memory allocation before we lock the CIL.
1626 * This ensures the allocation does not deadlock with a CIL
1627 * push in memory reclaim (e.g. from kswapd).
1629 xlog_cil_alloc_shadow_bufs(log, tp);
1631 /* lock out background commit */
1632 down_read(&cil->xc_ctx_lock);
1634 if (tp->t_flags & XFS_TRANS_HAS_INTENT_DONE)
1635 released_space = xlog_cil_process_intents(cil, tp);
1637 xlog_cil_insert_items(log, tp, released_space);
1639 if (regrant && !xlog_is_shutdown(log))
1640 xfs_log_ticket_regrant(log, tp->t_ticket);
1642 xfs_log_ticket_ungrant(log, tp->t_ticket);
1643 tp->t_ticket = NULL;
1644 xfs_trans_unreserve_and_mod_sb(tp);
1647 * Once all the items of the transaction have been copied to the CIL,
1648 * the items can be unlocked and possibly freed.
1650 * This needs to be done before we drop the CIL context lock because we
1651 * have to update state in the log items and unlock them before they go
1652 * to disk. If we don't, then the CIL checkpoint can race with us and
1653 * we can run checkpoint completion before we've updated and unlocked
1654 * the log items. This affects (at least) processing of stale buffers,
1657 trace_xfs_trans_commit_items(tp, _RET_IP_);
1658 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
1659 xfs_trans_del_item(lip);
1660 if (lip->li_ops->iop_committing)
1661 lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence);
1664 *commit_seq = cil->xc_ctx->sequence;
1666 /* xlog_cil_push_background() releases cil->xc_ctx_lock */
1667 xlog_cil_push_background(log);
1671 * Flush the CIL to stable storage but don't wait for it to complete. This
1672 * requires the CIL push to ensure the commit record for the push hits the disk,
1673 * but otherwise is no different to a push done from a log force.
1679 xfs_csn_t seq = log->l_cilp->xc_current_sequence;
1681 trace_xfs_log_force(log->l_mp, seq, _RET_IP_);
1682 xlog_cil_push_now(log, seq, true);
1685 * If the CIL is empty, make sure that any previous checkpoint that may
1686 * still be in an active iclog is pushed to stable storage.
1688 if (test_bit(XLOG_CIL_EMPTY, &log->l_cilp->xc_flags))
1689 xfs_log_force(log->l_mp, 0);
1693 * Conditionally push the CIL based on the sequence passed in.
1695 * We only need to push if we haven't already pushed the sequence number given.
1696 * Hence the only time we will trigger a push here is if the push sequence is
1697 * the same as the current context.
1699 * We return the current commit lsn to allow the callers to determine if a
1700 * iclog flush is necessary following this call.
1707 struct xfs_cil *cil = log->l_cilp;
1708 struct xfs_cil_ctx *ctx;
1709 xfs_lsn_t commit_lsn = NULLCOMMITLSN;
1711 ASSERT(sequence <= cil->xc_current_sequence);
1714 sequence = cil->xc_current_sequence;
1715 trace_xfs_log_force(log->l_mp, sequence, _RET_IP_);
1718 * check to see if we need to force out the current context.
1719 * xlog_cil_push() handles racing pushes for the same sequence,
1720 * so no need to deal with it here.
1723 xlog_cil_push_now(log, sequence, false);
1726 * See if we can find a previous sequence still committing.
1727 * We need to wait for all previous sequence commits to complete
1728 * before allowing the force of push_seq to go ahead. Hence block
1729 * on commits for those as well.
1731 spin_lock(&cil->xc_push_lock);
1732 list_for_each_entry(ctx, &cil->xc_committing, committing) {
1734 * Avoid getting stuck in this loop because we were woken by the
1735 * shutdown, but then went back to sleep once already in the
1738 if (xlog_is_shutdown(log))
1740 if (ctx->sequence > sequence)
1742 if (!ctx->commit_lsn) {
1744 * It is still being pushed! Wait for the push to
1745 * complete, then start again from the beginning.
1747 XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
1748 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
1751 if (ctx->sequence != sequence)
1754 commit_lsn = ctx->commit_lsn;
1758 * The call to xlog_cil_push_now() executes the push in the background.
1759 * Hence by the time we have got here it our sequence may not have been
1760 * pushed yet. This is true if the current sequence still matches the
1761 * push sequence after the above wait loop and the CIL still contains
1762 * dirty objects. This is guaranteed by the push code first adding the
1763 * context to the committing list before emptying the CIL.
1765 * Hence if we don't find the context in the committing list and the
1766 * current sequence number is unchanged then the CIL contents are
1767 * significant. If the CIL is empty, if means there was nothing to push
1768 * and that means there is nothing to wait for. If the CIL is not empty,
1769 * it means we haven't yet started the push, because if it had started
1770 * we would have found the context on the committing list.
1772 if (sequence == cil->xc_current_sequence &&
1773 !test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) {
1774 spin_unlock(&cil->xc_push_lock);
1778 spin_unlock(&cil->xc_push_lock);
1782 * We detected a shutdown in progress. We need to trigger the log force
1783 * to pass through it's iclog state machine error handling, even though
1784 * we are already in a shutdown state. Hence we can't return
1785 * NULLCOMMITLSN here as that has special meaning to log forces (i.e.
1786 * LSN is already stable), so we return a zero LSN instead.
1789 spin_unlock(&cil->xc_push_lock);
1794 * Move dead percpu state to the relevant CIL context structures.
1796 * We have to lock the CIL context here to ensure that nothing is modifying
1797 * the percpu state, either addition or removal. Both of these are done under
1798 * the CIL context lock, so grabbing that exclusively here will ensure we can
1799 * safely drain the cilpcp for the CPU that is dying.
1806 struct xfs_cil *cil = log->l_cilp;
1807 struct xlog_cil_pcp *cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
1808 struct xfs_cil_ctx *ctx;
1810 down_write(&cil->xc_ctx_lock);
1813 ctx->ticket->t_curr_res += cilpcp->space_reserved;
1814 cilpcp->space_reserved = 0;
1816 if (!list_empty(&cilpcp->log_items))
1817 list_splice_init(&cilpcp->log_items, &ctx->log_items);
1818 if (!list_empty(&cilpcp->busy_extents))
1819 list_splice_init(&cilpcp->busy_extents, &ctx->busy_extents);
1820 atomic_add(cilpcp->space_used, &ctx->space_used);
1821 cilpcp->space_used = 0;
1822 up_write(&cil->xc_ctx_lock);
1826 * Perform initial CIL structure initialisation.
1832 struct xfs_cil *cil;
1833 struct xfs_cil_ctx *ctx;
1834 struct xlog_cil_pcp *cilpcp;
1837 cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL);
1841 * Limit the CIL pipeline depth to 4 concurrent works to bound the
1842 * concurrency the log spinlocks will be exposed to.
1844 cil->xc_push_wq = alloc_workqueue("xfs-cil/%s",
1845 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND),
1846 4, log->l_mp->m_super->s_id);
1847 if (!cil->xc_push_wq)
1848 goto out_destroy_cil;
1851 cil->xc_pcp = alloc_percpu(struct xlog_cil_pcp);
1853 goto out_destroy_wq;
1855 for_each_possible_cpu(cpu) {
1856 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
1857 INIT_LIST_HEAD(&cilpcp->busy_extents);
1858 INIT_LIST_HEAD(&cilpcp->log_items);
1861 INIT_LIST_HEAD(&cil->xc_committing);
1862 spin_lock_init(&cil->xc_push_lock);
1863 init_waitqueue_head(&cil->xc_push_wait);
1864 init_rwsem(&cil->xc_ctx_lock);
1865 init_waitqueue_head(&cil->xc_start_wait);
1866 init_waitqueue_head(&cil->xc_commit_wait);
1869 ctx = xlog_cil_ctx_alloc();
1870 xlog_cil_ctx_switch(cil, ctx);
1874 destroy_workqueue(cil->xc_push_wq);
1884 struct xfs_cil *cil = log->l_cilp;
1887 if (cil->xc_ctx->ticket)
1888 xfs_log_ticket_put(cil->xc_ctx->ticket);
1889 kmem_free(cil->xc_ctx);
1892 ASSERT(test_bit(XLOG_CIL_EMPTY, &cil->xc_flags));
1893 free_percpu(cil->xc_pcp);
1894 destroy_workqueue(cil->xc_push_wq);