1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_shared.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_extent_busy.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
17 #include "xfs_log_priv.h"
18 #include "xfs_trace.h"
20 struct workqueue_struct *xfs_discard_wq;
23 * Allocate a new ticket. Failing to get a new ticket makes it really hard to
24 * recover, so we don't allow failure here. Also, we allocate in a context that
25 * we don't want to be issuing transactions from, so we need to tell the
26 * allocation code this as well.
28 * We don't reserve any space for the ticket - we are going to steal whatever
29 * space we require from transactions as they commit. To ensure we reserve all
30 * the space required, we need to set the current reservation of the ticket to
31 * zero so that we know to steal the initial transaction overhead from the
32 * first transaction commit.
34 static struct xlog_ticket *
35 xlog_cil_ticket_alloc(
38 struct xlog_ticket *tic;
40 tic = xlog_ticket_alloc(log, 0, 1, 0);
43 * set the current reservation to zero so we know to steal the basic
44 * transaction overhead reservation from the first transaction commit.
47 tic->t_iclog_hdrs = 0;
52 xlog_cil_set_iclog_hdr_count(struct xfs_cil *cil)
54 struct xlog *log = cil->xc_log;
56 atomic_set(&cil->xc_iclog_hdrs,
57 (XLOG_CIL_BLOCKING_SPACE_LIMIT(log) /
58 (log->l_iclog_size - log->l_iclog_hsize)));
62 * Check if the current log item was first committed in this sequence.
63 * We can't rely on just the log item being in the CIL, we have to check
64 * the recorded commit sequence number.
66 * Note: for this to be used in a non-racy manner, it has to be called with
67 * CIL flushing locked out. As a result, it should only be used during the
68 * transaction commit process when deciding what to format into the item.
71 xlog_item_in_current_chkpt(
73 struct xfs_log_item *lip)
75 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
79 * li_seq is written on the first commit of a log item to record the
80 * first checkpoint it is written to. Hence if it is different to the
81 * current sequence, we're in a new checkpoint.
83 return lip->li_seq == READ_ONCE(cil->xc_current_sequence);
87 xfs_log_item_in_current_chkpt(
88 struct xfs_log_item *lip)
90 return xlog_item_in_current_chkpt(lip->li_log->l_cilp, lip);
94 * Unavoidable forward declaration - xlog_cil_push_work() calls
95 * xlog_cil_ctx_alloc() itself.
97 static void xlog_cil_push_work(struct work_struct *work);
99 static struct xfs_cil_ctx *
100 xlog_cil_ctx_alloc(void)
102 struct xfs_cil_ctx *ctx;
104 ctx = kmem_zalloc(sizeof(*ctx), KM_NOFS);
105 INIT_LIST_HEAD(&ctx->committing);
106 INIT_LIST_HEAD(&ctx->busy_extents);
107 INIT_LIST_HEAD(&ctx->log_items);
108 INIT_LIST_HEAD(&ctx->lv_chain);
109 INIT_WORK(&ctx->push_work, xlog_cil_push_work);
114 * Aggregate the CIL per cpu structures into global counts, lists, etc and
115 * clear the percpu state ready for the next context to use. This is called
116 * from the push code with the context lock held exclusively, hence nothing else
117 * will be accessing or modifying the per-cpu counters.
120 xlog_cil_push_pcp_aggregate(
122 struct xfs_cil_ctx *ctx)
124 struct xlog_cil_pcp *cilpcp;
127 for_each_cpu(cpu, &ctx->cil_pcpmask) {
128 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
130 ctx->ticket->t_curr_res += cilpcp->space_reserved;
131 cilpcp->space_reserved = 0;
133 if (!list_empty(&cilpcp->busy_extents)) {
134 list_splice_init(&cilpcp->busy_extents,
137 if (!list_empty(&cilpcp->log_items))
138 list_splice_init(&cilpcp->log_items, &ctx->log_items);
141 * We're in the middle of switching cil contexts. Reset the
142 * counter we use to detect when the current context is nearing
145 cilpcp->space_used = 0;
150 * Aggregate the CIL per-cpu space used counters into the global atomic value.
151 * This is called when the per-cpu counter aggregation will first pass the soft
152 * limit threshold so we can switch to atomic counter aggregation for accurate
153 * detection of hard limit traversal.
156 xlog_cil_insert_pcp_aggregate(
158 struct xfs_cil_ctx *ctx)
160 struct xlog_cil_pcp *cilpcp;
164 /* Trigger atomic updates then aggregate only for the first caller */
165 if (!test_and_clear_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags))
169 * We can race with other cpus setting cil_pcpmask. However, we've
170 * atomically cleared PCP_SPACE which forces other threads to add to
171 * the global space used count. cil_pcpmask is a superset of cilpcp
172 * structures that could have a nonzero space_used.
174 for_each_cpu(cpu, &ctx->cil_pcpmask) {
177 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
179 old = cilpcp->space_used;
180 prev = cmpxchg(&cilpcp->space_used, old, 0);
181 } while (old != prev);
184 atomic_add(count, &ctx->space_used);
190 struct xfs_cil_ctx *ctx)
192 xlog_cil_set_iclog_hdr_count(cil);
193 set_bit(XLOG_CIL_EMPTY, &cil->xc_flags);
194 set_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags);
195 ctx->sequence = ++cil->xc_current_sequence;
201 * After the first stage of log recovery is done, we know where the head and
202 * tail of the log are. We need this log initialisation done before we can
203 * initialise the first CIL checkpoint context.
205 * Here we allocate a log ticket to track space usage during a CIL push. This
206 * ticket is passed to xlog_write() directly so that we don't slowly leak log
207 * space by failing to account for space used by log headers and additional
208 * region headers for split regions.
211 xlog_cil_init_post_recovery(
214 log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
215 log->l_cilp->xc_ctx->sequence = 1;
216 xlog_cil_set_iclog_hdr_count(log->l_cilp);
220 xlog_cil_iovec_space(
223 return round_up((sizeof(struct xfs_log_vec) +
224 niovecs * sizeof(struct xfs_log_iovec)),
229 * Allocate or pin log vector buffers for CIL insertion.
231 * The CIL currently uses disposable buffers for copying a snapshot of the
232 * modified items into the log during a push. The biggest problem with this is
233 * the requirement to allocate the disposable buffer during the commit if:
234 * a) does not exist; or
237 * If we do this allocation within xlog_cil_insert_format_items(), it is done
238 * under the xc_ctx_lock, which means that a CIL push cannot occur during
239 * the memory allocation. This means that we have a potential deadlock situation
240 * under low memory conditions when we have lots of dirty metadata pinned in
241 * the CIL and we need a CIL commit to occur to free memory.
243 * To avoid this, we need to move the memory allocation outside the
244 * xc_ctx_lock, but because the log vector buffers are disposable, that opens
245 * up a TOCTOU race condition w.r.t. the CIL committing and removing the log
246 * vector buffers between the check and the formatting of the item into the
247 * log vector buffer within the xc_ctx_lock.
249 * Because the log vector buffer needs to be unchanged during the CIL push
250 * process, we cannot share the buffer between the transaction commit (which
251 * modifies the buffer) and the CIL push context that is writing the changes
252 * into the log. This means skipping preallocation of buffer space is
253 * unreliable, but we most definitely do not want to be allocating and freeing
254 * buffers unnecessarily during commits when overwrites can be done safely.
256 * The simplest solution to this problem is to allocate a shadow buffer when a
257 * log item is committed for the second time, and then to only use this buffer
258 * if necessary. The buffer can remain attached to the log item until such time
259 * it is needed, and this is the buffer that is reallocated to match the size of
260 * the incoming modification. Then during the formatting of the item we can swap
261 * the active buffer with the new one if we can't reuse the existing buffer. We
262 * don't free the old buffer as it may be reused on the next modification if
263 * it's size is right, otherwise we'll free and reallocate it at that point.
265 * This function builds a vector for the changes in each log item in the
266 * transaction. It then works out the length of the buffer needed for each log
267 * item, allocates them and attaches the vector to the log item in preparation
268 * for the formatting step which occurs under the xc_ctx_lock.
270 * While this means the memory footprint goes up, it avoids the repeated
271 * alloc/free pattern that repeated modifications of an item would otherwise
272 * cause, and hence minimises the CPU overhead of such behaviour.
275 xlog_cil_alloc_shadow_bufs(
277 struct xfs_trans *tp)
279 struct xfs_log_item *lip;
281 list_for_each_entry(lip, &tp->t_items, li_trans) {
282 struct xfs_log_vec *lv;
286 bool ordered = false;
288 /* Skip items which aren't dirty in this transaction. */
289 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
292 /* get number of vecs and size of data to be stored */
293 lip->li_ops->iop_size(lip, &niovecs, &nbytes);
296 * Ordered items need to be tracked but we do not wish to write
297 * them. We need a logvec to track the object, but we do not
298 * need an iovec or buffer to be allocated for copying data.
300 if (niovecs == XFS_LOG_VEC_ORDERED) {
307 * We 64-bit align the length of each iovec so that the start of
308 * the next one is naturally aligned. We'll need to account for
309 * that slack space here.
311 * We also add the xlog_op_header to each region when
312 * formatting, but that's not accounted to the size of the item
313 * at this point. Hence we'll need an addition number of bytes
314 * for each vector to hold an opheader.
316 * Then round nbytes up to 64-bit alignment so that the initial
317 * buffer alignment is easy to calculate and verify.
320 (sizeof(uint64_t) + sizeof(struct xlog_op_header));
321 nbytes = round_up(nbytes, sizeof(uint64_t));
324 * The data buffer needs to start 64-bit aligned, so round up
325 * that space to ensure we can align it appropriately and not
326 * overrun the buffer.
328 buf_size = nbytes + xlog_cil_iovec_space(niovecs);
331 * if we have no shadow buffer, or it is too small, we need to
334 if (!lip->li_lv_shadow ||
335 buf_size > lip->li_lv_shadow->lv_size) {
337 * We free and allocate here as a realloc would copy
338 * unnecessary data. We don't use kvzalloc() for the
339 * same reason - we don't need to zero the data area in
340 * the buffer, only the log vector header and the iovec
343 kmem_free(lip->li_lv_shadow);
344 lv = xlog_kvmalloc(buf_size);
346 memset(lv, 0, xlog_cil_iovec_space(niovecs));
348 INIT_LIST_HEAD(&lv->lv_list);
350 lv->lv_size = buf_size;
352 lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
354 lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
355 lip->li_lv_shadow = lv;
357 /* same or smaller, optimise common overwrite case */
358 lv = lip->li_lv_shadow;
360 lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
366 /* Ensure the lv is set up according to ->iop_size */
367 lv->lv_niovecs = niovecs;
369 /* The allocated data region lies beyond the iovec region */
370 lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs);
376 * Prepare the log item for insertion into the CIL. Calculate the difference in
377 * log space it will consume, and if it is a new item pin it as well.
380 xfs_cil_prepare_item(
382 struct xfs_log_vec *lv,
383 struct xfs_log_vec *old_lv,
386 /* Account for the new LV being passed in */
387 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED)
388 *diff_len += lv->lv_bytes;
391 * If there is no old LV, this is the first time we've seen the item in
392 * this CIL context and so we need to pin it. If we are replacing the
393 * old_lv, then remove the space it accounts for and make it the shadow
394 * buffer for later freeing. In both cases we are now switching to the
395 * shadow buffer, so update the pointer to it appropriately.
398 if (lv->lv_item->li_ops->iop_pin)
399 lv->lv_item->li_ops->iop_pin(lv->lv_item);
400 lv->lv_item->li_lv_shadow = NULL;
401 } else if (old_lv != lv) {
402 ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
404 *diff_len -= old_lv->lv_bytes;
405 lv->lv_item->li_lv_shadow = old_lv;
408 /* attach new log vector to log item */
409 lv->lv_item->li_lv = lv;
412 * If this is the first time the item is being committed to the
413 * CIL, store the sequence number on the log item so we can
414 * tell in future commits whether this is the first checkpoint
415 * the item is being committed into.
417 if (!lv->lv_item->li_seq)
418 lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
422 * Format log item into a flat buffers
424 * For delayed logging, we need to hold a formatted buffer containing all the
425 * changes on the log item. This enables us to relog the item in memory and
426 * write it out asynchronously without needing to relock the object that was
427 * modified at the time it gets written into the iclog.
429 * This function takes the prepared log vectors attached to each log item, and
430 * formats the changes into the log vector buffer. The buffer it uses is
431 * dependent on the current state of the vector in the CIL - the shadow lv is
432 * guaranteed to be large enough for the current modification, but we will only
433 * use that if we can't reuse the existing lv. If we can't reuse the existing
434 * lv, then simple swap it out for the shadow lv. We don't free it - that is
435 * done lazily either by th enext modification or the freeing of the log item.
437 * We don't set up region headers during this process; we simply copy the
438 * regions into the flat buffer. We can do this because we still have to do a
439 * formatting step to write the regions into the iclog buffer. Writing the
440 * ophdrs during the iclog write means that we can support splitting large
441 * regions across iclog boundares without needing a change in the format of the
442 * item/region encapsulation.
444 * Hence what we need to do now is change the rewrite the vector array to point
445 * to the copied region inside the buffer we just allocated. This allows us to
446 * format the regions into the iclog as though they are being formatted
447 * directly out of the objects themselves.
450 xlog_cil_insert_format_items(
452 struct xfs_trans *tp,
455 struct xfs_log_item *lip;
457 /* Bail out if we didn't find a log item. */
458 if (list_empty(&tp->t_items)) {
463 list_for_each_entry(lip, &tp->t_items, li_trans) {
464 struct xfs_log_vec *lv;
465 struct xfs_log_vec *old_lv = NULL;
466 struct xfs_log_vec *shadow;
467 bool ordered = false;
469 /* Skip items which aren't dirty in this transaction. */
470 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
474 * The formatting size information is already attached to
475 * the shadow lv on the log item.
477 shadow = lip->li_lv_shadow;
478 if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED)
481 /* Skip items that do not have any vectors for writing */
482 if (!shadow->lv_niovecs && !ordered)
485 /* compare to existing item size */
487 if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) {
488 /* same or smaller, optimise common overwrite case */
495 * set the item up as though it is a new insertion so
496 * that the space reservation accounting is correct.
498 *diff_len -= lv->lv_bytes;
500 /* Ensure the lv is set up according to ->iop_size */
501 lv->lv_niovecs = shadow->lv_niovecs;
503 /* reset the lv buffer information for new formatting */
506 lv->lv_buf = (char *)lv +
507 xlog_cil_iovec_space(lv->lv_niovecs);
509 /* switch to shadow buffer! */
513 /* track as an ordered logvec */
514 ASSERT(lip->li_lv == NULL);
519 ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
520 lip->li_ops->iop_format(lip, lv);
522 xfs_cil_prepare_item(log, lv, old_lv, diff_len);
527 * The use of lockless waitqueue_active() requires that the caller has
528 * serialised itself against the wakeup call in xlog_cil_push_work(). That
529 * can be done by either holding the push lock or the context lock.
532 xlog_cil_over_hard_limit(
536 if (waitqueue_active(&log->l_cilp->xc_push_wait))
538 if (space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log))
544 * Insert the log items into the CIL and calculate the difference in space
545 * consumed by the item. Add the space to the checkpoint ticket and calculate
546 * if the change requires additional log metadata. If it does, take that space
547 * as well. Remove the amount of space we added to the checkpoint ticket from
548 * the current transaction ticket so that the accounting works out correctly.
551 xlog_cil_insert_items(
553 struct xfs_trans *tp,
554 uint32_t released_space)
556 struct xfs_cil *cil = log->l_cilp;
557 struct xfs_cil_ctx *ctx = cil->xc_ctx;
558 struct xfs_log_item *lip;
560 int iovhdr_res = 0, split_res = 0, ctx_res = 0;
564 struct xlog_cil_pcp *cilpcp;
569 * We can do this safely because the context can't checkpoint until we
570 * are done so it doesn't matter exactly how we update the CIL.
572 xlog_cil_insert_format_items(log, tp, &len);
575 * Subtract the space released by intent cancelation from the space we
576 * consumed so that we remove it from the CIL space and add it back to
577 * the current transaction reservation context.
579 len -= released_space;
582 * Grab the per-cpu pointer for the CIL before we start any accounting.
583 * That ensures that we are running with pre-emption disabled and so we
584 * can't be scheduled away between split sample/update operations that
585 * are done without outside locking to serialise them.
588 cilpcp = this_cpu_ptr(cil->xc_pcp);
590 /* Tell the future push that there was work added by this CPU. */
591 if (!cpumask_test_cpu(cpu_nr, &ctx->cil_pcpmask))
592 cpumask_test_and_set_cpu(cpu_nr, &ctx->cil_pcpmask);
595 * We need to take the CIL checkpoint unit reservation on the first
596 * commit into the CIL. Test the XLOG_CIL_EMPTY bit first so we don't
597 * unnecessarily do an atomic op in the fast path here. We can clear the
598 * XLOG_CIL_EMPTY bit as we are under the xc_ctx_lock here and that
599 * needs to be held exclusively to reset the XLOG_CIL_EMPTY bit.
601 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) &&
602 test_and_clear_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
603 ctx_res = ctx->ticket->t_unit_res;
606 * Check if we need to steal iclog headers. atomic_read() is not a
607 * locked atomic operation, so we can check the value before we do any
608 * real atomic ops in the fast path. If we've already taken the CIL unit
609 * reservation from this commit, we've already got one iclog header
610 * space reserved so we have to account for that otherwise we risk
611 * overrunning the reservation on this ticket.
613 * If the CIL is already at the hard limit, we might need more header
614 * space that originally reserved. So steal more header space from every
615 * commit that occurs once we are over the hard limit to ensure the CIL
616 * push won't run out of reservation space.
618 * This can steal more than we need, but that's OK.
620 * The cil->xc_ctx_lock provides the serialisation necessary for safely
621 * calling xlog_cil_over_hard_limit() in this context.
623 space_used = atomic_read(&ctx->space_used) + cilpcp->space_used + len;
624 if (atomic_read(&cil->xc_iclog_hdrs) > 0 ||
625 xlog_cil_over_hard_limit(log, space_used)) {
626 split_res = log->l_iclog_hsize +
627 sizeof(struct xlog_op_header);
629 ctx_res += split_res * (tp->t_ticket->t_iclog_hdrs - 1);
631 ctx_res = split_res * tp->t_ticket->t_iclog_hdrs;
632 atomic_sub(tp->t_ticket->t_iclog_hdrs, &cil->xc_iclog_hdrs);
634 cilpcp->space_reserved += ctx_res;
637 * Accurately account when over the soft limit, otherwise fold the
638 * percpu count into the global count if over the per-cpu threshold.
640 if (!test_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags)) {
641 atomic_add(len, &ctx->space_used);
642 } else if (cilpcp->space_used + len >
643 (XLOG_CIL_SPACE_LIMIT(log) / num_online_cpus())) {
644 space_used = atomic_add_return(cilpcp->space_used + len,
646 cilpcp->space_used = 0;
649 * If we just transitioned over the soft limit, we need to
650 * transition to the global atomic counter.
652 if (space_used >= XLOG_CIL_SPACE_LIMIT(log))
653 xlog_cil_insert_pcp_aggregate(cil, ctx);
655 cilpcp->space_used += len;
657 /* attach the transaction to the CIL if it has any busy extents */
658 if (!list_empty(&tp->t_busy))
659 list_splice_init(&tp->t_busy, &cilpcp->busy_extents);
662 * Now update the order of everything modified in the transaction
663 * and insert items into the CIL if they aren't already there.
664 * We do this here so we only need to take the CIL lock once during
665 * the transaction commit.
667 order = atomic_inc_return(&ctx->order_id);
668 list_for_each_entry(lip, &tp->t_items, li_trans) {
669 /* Skip items which aren't dirty in this transaction. */
670 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
673 lip->li_order_id = order;
674 if (!list_empty(&lip->li_cil))
676 list_add_tail(&lip->li_cil, &cilpcp->log_items);
681 * If we've overrun the reservation, dump the tx details before we move
682 * the log items. Shutdown is imminent...
684 tp->t_ticket->t_curr_res -= ctx_res + len;
685 if (WARN_ON(tp->t_ticket->t_curr_res < 0)) {
686 xfs_warn(log->l_mp, "Transaction log reservation overrun:");
688 " log items: %d bytes (iov hdrs: %d bytes)",
690 xfs_warn(log->l_mp, " split region headers: %d bytes",
692 xfs_warn(log->l_mp, " ctx ticket: %d bytes", ctx_res);
693 xlog_print_trans(tp);
694 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
699 xlog_cil_free_logvec(
700 struct list_head *lv_chain)
702 struct xfs_log_vec *lv;
704 while (!list_empty(lv_chain)) {
705 lv = list_first_entry(lv_chain, struct xfs_log_vec, lv_list);
706 list_del_init(&lv->lv_list);
712 xlog_discard_endio_work(
713 struct work_struct *work)
715 struct xfs_cil_ctx *ctx =
716 container_of(work, struct xfs_cil_ctx, discard_endio_work);
717 struct xfs_mount *mp = ctx->cil->xc_log->l_mp;
719 xfs_extent_busy_clear(mp, &ctx->busy_extents, false);
724 * Queue up the actual completion to a thread to avoid IRQ-safe locking for
725 * pagb_lock. Note that we need a unbounded workqueue, otherwise we might
726 * get the execution delayed up to 30 seconds for weird reasons.
732 struct xfs_cil_ctx *ctx = bio->bi_private;
734 INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work);
735 queue_work(xfs_discard_wq, &ctx->discard_endio_work);
740 xlog_discard_busy_extents(
741 struct xfs_mount *mp,
742 struct xfs_cil_ctx *ctx)
744 struct list_head *list = &ctx->busy_extents;
745 struct xfs_extent_busy *busyp;
746 struct bio *bio = NULL;
747 struct blk_plug plug;
750 ASSERT(xfs_has_discard(mp));
752 blk_start_plug(&plug);
753 list_for_each_entry(busyp, list, list) {
754 trace_xfs_discard_extent(mp, busyp->agno, busyp->bno,
757 error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev,
758 XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno),
759 XFS_FSB_TO_BB(mp, busyp->length),
761 if (error && error != -EOPNOTSUPP) {
763 "discard failed for extent [0x%llx,%u], error %d",
764 (unsigned long long)busyp->bno,
772 bio->bi_private = ctx;
773 bio->bi_end_io = xlog_discard_endio;
776 xlog_discard_endio_work(&ctx->discard_endio_work);
778 blk_finish_plug(&plug);
782 * Mark all items committed and clear busy extents. We free the log vector
783 * chains in a separate pass so that we unpin the log items as quickly as
788 struct xfs_cil_ctx *ctx)
790 struct xfs_mount *mp = ctx->cil->xc_log->l_mp;
791 bool abort = xlog_is_shutdown(ctx->cil->xc_log);
794 * If the I/O failed, we're aborting the commit and already shutdown.
795 * Wake any commit waiters before aborting the log items so we don't
796 * block async log pushers on callbacks. Async log pushers explicitly do
797 * not wait on log force completion because they may be holding locks
798 * required to unpin items.
801 spin_lock(&ctx->cil->xc_push_lock);
802 wake_up_all(&ctx->cil->xc_start_wait);
803 wake_up_all(&ctx->cil->xc_commit_wait);
804 spin_unlock(&ctx->cil->xc_push_lock);
807 xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, &ctx->lv_chain,
808 ctx->start_lsn, abort);
810 xfs_extent_busy_sort(&ctx->busy_extents);
811 xfs_extent_busy_clear(mp, &ctx->busy_extents,
812 xfs_has_discard(mp) && !abort);
814 spin_lock(&ctx->cil->xc_push_lock);
815 list_del(&ctx->committing);
816 spin_unlock(&ctx->cil->xc_push_lock);
818 xlog_cil_free_logvec(&ctx->lv_chain);
820 if (!list_empty(&ctx->busy_extents))
821 xlog_discard_busy_extents(mp, ctx);
827 xlog_cil_process_committed(
828 struct list_head *list)
830 struct xfs_cil_ctx *ctx;
832 while ((ctx = list_first_entry_or_null(list,
833 struct xfs_cil_ctx, iclog_entry))) {
834 list_del(&ctx->iclog_entry);
835 xlog_cil_committed(ctx);
840 * Record the LSN of the iclog we were just granted space to start writing into.
841 * If the context doesn't have a start_lsn recorded, then this iclog will
842 * contain the start record for the checkpoint. Otherwise this write contains
843 * the commit record for the checkpoint.
846 xlog_cil_set_ctx_write_state(
847 struct xfs_cil_ctx *ctx,
848 struct xlog_in_core *iclog)
850 struct xfs_cil *cil = ctx->cil;
851 xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn);
853 ASSERT(!ctx->commit_lsn);
854 if (!ctx->start_lsn) {
855 spin_lock(&cil->xc_push_lock);
857 * The LSN we need to pass to the log items on transaction
858 * commit is the LSN reported by the first log vector write, not
859 * the commit lsn. If we use the commit record lsn then we can
860 * move the grant write head beyond the tail LSN and overwrite
863 ctx->start_lsn = lsn;
864 wake_up_all(&cil->xc_start_wait);
865 spin_unlock(&cil->xc_push_lock);
868 * Make sure the metadata we are about to overwrite in the log
869 * has been flushed to stable storage before this iclog is
872 spin_lock(&cil->xc_log->l_icloglock);
873 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
874 spin_unlock(&cil->xc_log->l_icloglock);
879 * Take a reference to the iclog for the context so that we still hold
880 * it when xlog_write is done and has released it. This means the
881 * context controls when the iclog is released for IO.
883 atomic_inc(&iclog->ic_refcnt);
886 * xlog_state_get_iclog_space() guarantees there is enough space in the
887 * iclog for an entire commit record, so we can attach the context
888 * callbacks now. This needs to be done before we make the commit_lsn
889 * visible to waiters so that checkpoints with commit records in the
890 * same iclog order their IO completion callbacks in the same order that
891 * the commit records appear in the iclog.
893 spin_lock(&cil->xc_log->l_icloglock);
894 list_add_tail(&ctx->iclog_entry, &iclog->ic_callbacks);
895 spin_unlock(&cil->xc_log->l_icloglock);
898 * Now we can record the commit LSN and wake anyone waiting for this
899 * sequence to have the ordered commit record assigned to a physical
900 * location in the log.
902 spin_lock(&cil->xc_push_lock);
903 ctx->commit_iclog = iclog;
904 ctx->commit_lsn = lsn;
905 wake_up_all(&cil->xc_commit_wait);
906 spin_unlock(&cil->xc_push_lock);
911 * Ensure that the order of log writes follows checkpoint sequence order. This
912 * relies on the context LSN being zero until the log write has guaranteed the
913 * LSN that the log write will start at via xlog_state_get_iclog_space().
921 xlog_cil_order_write(
924 enum _record_type record)
926 struct xfs_cil_ctx *ctx;
929 spin_lock(&cil->xc_push_lock);
930 list_for_each_entry(ctx, &cil->xc_committing, committing) {
932 * Avoid getting stuck in this loop because we were woken by the
933 * shutdown, but then went back to sleep once already in the
936 if (xlog_is_shutdown(cil->xc_log)) {
937 spin_unlock(&cil->xc_push_lock);
942 * Higher sequences will wait for this one so skip them.
943 * Don't wait for our own sequence, either.
945 if (ctx->sequence >= sequence)
948 /* Wait until the LSN for the record has been recorded. */
951 if (!ctx->start_lsn) {
952 xlog_wait(&cil->xc_start_wait, &cil->xc_push_lock);
957 if (!ctx->commit_lsn) {
958 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
964 spin_unlock(&cil->xc_push_lock);
969 * Write out the log vector change now attached to the CIL context. This will
970 * write a start record that needs to be strictly ordered in ascending CIL
971 * sequence order so that log recovery will always use in-order start LSNs when
972 * replaying checkpoints.
975 xlog_cil_write_chain(
976 struct xfs_cil_ctx *ctx,
979 struct xlog *log = ctx->cil->xc_log;
982 error = xlog_cil_order_write(ctx->cil, ctx->sequence, _START_RECORD);
985 return xlog_write(log, ctx, &ctx->lv_chain, ctx->ticket, chain_len);
989 * Write out the commit record of a checkpoint transaction to close off a
990 * running log write. These commit records are strictly ordered in ascending CIL
991 * sequence order so that log recovery will always replay the checkpoints in the
995 xlog_cil_write_commit_record(
996 struct xfs_cil_ctx *ctx)
998 struct xlog *log = ctx->cil->xc_log;
999 struct xlog_op_header ophdr = {
1000 .oh_clientid = XFS_TRANSACTION,
1001 .oh_tid = cpu_to_be32(ctx->ticket->t_tid),
1002 .oh_flags = XLOG_COMMIT_TRANS,
1004 struct xfs_log_iovec reg = {
1006 .i_len = sizeof(struct xlog_op_header),
1007 .i_type = XLOG_REG_TYPE_COMMIT,
1009 struct xfs_log_vec vec = {
1014 LIST_HEAD(lv_chain);
1015 list_add(&vec.lv_list, &lv_chain);
1017 if (xlog_is_shutdown(log))
1020 error = xlog_cil_order_write(ctx->cil, ctx->sequence, _COMMIT_RECORD);
1024 /* account for space used by record data */
1025 ctx->ticket->t_curr_res -= reg.i_len;
1026 error = xlog_write(log, ctx, &lv_chain, ctx->ticket, reg.i_len);
1028 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1032 struct xlog_cil_trans_hdr {
1033 struct xlog_op_header oph[2];
1034 struct xfs_trans_header thdr;
1035 struct xfs_log_iovec lhdr[2];
1039 * Build a checkpoint transaction header to begin the journal transaction. We
1040 * need to account for the space used by the transaction header here as it is
1041 * not accounted for in xlog_write().
1043 * This is the only place we write a transaction header, so we also build the
1044 * log opheaders that indicate the start of a log transaction and wrap the
1045 * transaction header. We keep the start record in it's own log vector rather
1046 * than compacting them into a single region as this ends up making the logic
1047 * in xlog_write() for handling empty opheaders for start, commit and unmount
1048 * records much simpler.
1051 xlog_cil_build_trans_hdr(
1052 struct xfs_cil_ctx *ctx,
1053 struct xlog_cil_trans_hdr *hdr,
1054 struct xfs_log_vec *lvhdr,
1057 struct xlog_ticket *tic = ctx->ticket;
1058 __be32 tid = cpu_to_be32(tic->t_tid);
1060 memset(hdr, 0, sizeof(*hdr));
1062 /* Log start record */
1063 hdr->oph[0].oh_tid = tid;
1064 hdr->oph[0].oh_clientid = XFS_TRANSACTION;
1065 hdr->oph[0].oh_flags = XLOG_START_TRANS;
1067 /* log iovec region pointer */
1068 hdr->lhdr[0].i_addr = &hdr->oph[0];
1069 hdr->lhdr[0].i_len = sizeof(struct xlog_op_header);
1070 hdr->lhdr[0].i_type = XLOG_REG_TYPE_LRHEADER;
1073 hdr->oph[1].oh_tid = tid;
1074 hdr->oph[1].oh_clientid = XFS_TRANSACTION;
1075 hdr->oph[1].oh_len = cpu_to_be32(sizeof(struct xfs_trans_header));
1077 /* transaction header in host byte order format */
1078 hdr->thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
1079 hdr->thdr.th_type = XFS_TRANS_CHECKPOINT;
1080 hdr->thdr.th_tid = tic->t_tid;
1081 hdr->thdr.th_num_items = num_iovecs;
1083 /* log iovec region pointer */
1084 hdr->lhdr[1].i_addr = &hdr->oph[1];
1085 hdr->lhdr[1].i_len = sizeof(struct xlog_op_header) +
1086 sizeof(struct xfs_trans_header);
1087 hdr->lhdr[1].i_type = XLOG_REG_TYPE_TRANSHDR;
1089 lvhdr->lv_niovecs = 2;
1090 lvhdr->lv_iovecp = &hdr->lhdr[0];
1091 lvhdr->lv_bytes = hdr->lhdr[0].i_len + hdr->lhdr[1].i_len;
1093 tic->t_curr_res -= lvhdr->lv_bytes;
1097 * CIL item reordering compare function. We want to order in ascending ID order,
1098 * but we want to leave items with the same ID in the order they were added to
1099 * the list. This is important for operations like reflink where we log 4 order
1100 * dependent intents in a single transaction when we overwrite an existing
1101 * shared extent with a new shared extent. i.e. BUI(unmap), CUI(drop),
1102 * CUI (inc), BUI(remap)...
1107 const struct list_head *a,
1108 const struct list_head *b)
1110 struct xfs_log_vec *l1 = container_of(a, struct xfs_log_vec, lv_list);
1111 struct xfs_log_vec *l2 = container_of(b, struct xfs_log_vec, lv_list);
1113 return l1->lv_order_id > l2->lv_order_id;
1117 * Pull all the log vectors off the items in the CIL, and remove the items from
1118 * the CIL. We don't need the CIL lock here because it's only needed on the
1119 * transaction commit side which is currently locked out by the flush lock.
1121 * If a log item is marked with a whiteout, we do not need to write it to the
1122 * journal and so we just move them to the whiteout list for the caller to
1123 * dispose of appropriately.
1126 xlog_cil_build_lv_chain(
1127 struct xfs_cil_ctx *ctx,
1128 struct list_head *whiteouts,
1129 uint32_t *num_iovecs,
1130 uint32_t *num_bytes)
1132 while (!list_empty(&ctx->log_items)) {
1133 struct xfs_log_item *item;
1134 struct xfs_log_vec *lv;
1136 item = list_first_entry(&ctx->log_items,
1137 struct xfs_log_item, li_cil);
1139 if (test_bit(XFS_LI_WHITEOUT, &item->li_flags)) {
1140 list_move(&item->li_cil, whiteouts);
1141 trace_xfs_cil_whiteout_skip(item);
1146 lv->lv_order_id = item->li_order_id;
1148 /* we don't write ordered log vectors */
1149 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED)
1150 *num_bytes += lv->lv_bytes;
1151 *num_iovecs += lv->lv_niovecs;
1152 list_add_tail(&lv->lv_list, &ctx->lv_chain);
1154 list_del_init(&item->li_cil);
1155 item->li_order_id = 0;
1161 xlog_cil_cleanup_whiteouts(
1162 struct list_head *whiteouts)
1164 while (!list_empty(whiteouts)) {
1165 struct xfs_log_item *item = list_first_entry(whiteouts,
1166 struct xfs_log_item, li_cil);
1167 list_del_init(&item->li_cil);
1168 trace_xfs_cil_whiteout_unpin(item);
1169 item->li_ops->iop_unpin(item, 1);
1174 * Push the Committed Item List to the log.
1176 * If the current sequence is the same as xc_push_seq we need to do a flush. If
1177 * xc_push_seq is less than the current sequence, then it has already been
1178 * flushed and we don't need to do anything - the caller will wait for it to
1179 * complete if necessary.
1181 * xc_push_seq is checked unlocked against the sequence number for a match.
1182 * Hence we can allow log forces to run racily and not issue pushes for the
1183 * same sequence twice. If we get a race between multiple pushes for the same
1184 * sequence they will block on the first one and then abort, hence avoiding
1189 struct work_struct *work)
1191 struct xfs_cil_ctx *ctx =
1192 container_of(work, struct xfs_cil_ctx, push_work);
1193 struct xfs_cil *cil = ctx->cil;
1194 struct xlog *log = cil->xc_log;
1195 struct xfs_cil_ctx *new_ctx;
1199 struct xlog_cil_trans_hdr thdr;
1200 struct xfs_log_vec lvhdr = {};
1202 bool push_commit_stable;
1203 LIST_HEAD (whiteouts);
1204 struct xlog_ticket *ticket;
1206 new_ctx = xlog_cil_ctx_alloc();
1207 new_ctx->ticket = xlog_cil_ticket_alloc(log);
1209 down_write(&cil->xc_ctx_lock);
1211 spin_lock(&cil->xc_push_lock);
1212 push_seq = cil->xc_push_seq;
1213 ASSERT(push_seq <= ctx->sequence);
1214 push_commit_stable = cil->xc_push_commit_stable;
1215 cil->xc_push_commit_stable = false;
1218 * As we are about to switch to a new, empty CIL context, we no longer
1219 * need to throttle tasks on CIL space overruns. Wake any waiters that
1220 * the hard push throttle may have caught so they can start committing
1221 * to the new context. The ctx->xc_push_lock provides the serialisation
1222 * necessary for safely using the lockless waitqueue_active() check in
1225 if (waitqueue_active(&cil->xc_push_wait))
1226 wake_up_all(&cil->xc_push_wait);
1228 xlog_cil_push_pcp_aggregate(cil, ctx);
1231 * Check if we've anything to push. If there is nothing, then we don't
1232 * move on to a new sequence number and so we have to be able to push
1233 * this sequence again later.
1235 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) {
1236 cil->xc_push_seq = 0;
1237 spin_unlock(&cil->xc_push_lock);
1242 /* check for a previously pushed sequence */
1243 if (push_seq < ctx->sequence) {
1244 spin_unlock(&cil->xc_push_lock);
1249 * We are now going to push this context, so add it to the committing
1250 * list before we do anything else. This ensures that anyone waiting on
1251 * this push can easily detect the difference between a "push in
1252 * progress" and "CIL is empty, nothing to do".
1254 * IOWs, a wait loop can now check for:
1255 * the current sequence not being found on the committing list;
1257 * an unchanged sequence number
1258 * to detect a push that had nothing to do and therefore does not need
1259 * waiting on. If the CIL is not empty, we get put on the committing
1260 * list before emptying the CIL and bumping the sequence number. Hence
1261 * an empty CIL and an unchanged sequence number means we jumped out
1262 * above after doing nothing.
1264 * Hence the waiter will either find the commit sequence on the
1265 * committing list or the sequence number will be unchanged and the CIL
1266 * still dirty. In that latter case, the push has not yet started, and
1267 * so the waiter will have to continue trying to check the CIL
1268 * committing list until it is found. In extreme cases of delay, the
1269 * sequence may fully commit between the attempts the wait makes to wait
1270 * on the commit sequence.
1272 list_add(&ctx->committing, &cil->xc_committing);
1273 spin_unlock(&cil->xc_push_lock);
1275 xlog_cil_build_lv_chain(ctx, &whiteouts, &num_iovecs, &num_bytes);
1278 * Switch the contexts so we can drop the context lock and move out
1279 * of a shared context. We can't just go straight to the commit record,
1280 * though - we need to synchronise with previous and future commits so
1281 * that the commit records are correctly ordered in the log to ensure
1282 * that we process items during log IO completion in the correct order.
1284 * For example, if we get an EFI in one checkpoint and the EFD in the
1285 * next (e.g. due to log forces), we do not want the checkpoint with
1286 * the EFD to be committed before the checkpoint with the EFI. Hence
1287 * we must strictly order the commit records of the checkpoints so
1288 * that: a) the checkpoint callbacks are attached to the iclogs in the
1289 * correct order; and b) the checkpoints are replayed in correct order
1292 * Hence we need to add this context to the committing context list so
1293 * that higher sequences will wait for us to write out a commit record
1296 * xfs_log_force_seq requires us to mirror the new sequence into the cil
1297 * structure atomically with the addition of this sequence to the
1298 * committing list. This also ensures that we can do unlocked checks
1299 * against the current sequence in log forces without risking
1300 * deferencing a freed context pointer.
1302 spin_lock(&cil->xc_push_lock);
1303 xlog_cil_ctx_switch(cil, new_ctx);
1304 spin_unlock(&cil->xc_push_lock);
1305 up_write(&cil->xc_ctx_lock);
1308 * Sort the log vector chain before we add the transaction headers.
1309 * This ensures we always have the transaction headers at the start
1312 list_sort(NULL, &ctx->lv_chain, xlog_cil_order_cmp);
1315 * Build a checkpoint transaction header and write it to the log to
1316 * begin the transaction. We need to account for the space used by the
1317 * transaction header here as it is not accounted for in xlog_write().
1318 * Add the lvhdr to the head of the lv chain we pass to xlog_write() so
1319 * it gets written into the iclog first.
1321 xlog_cil_build_trans_hdr(ctx, &thdr, &lvhdr, num_iovecs);
1322 num_bytes += lvhdr.lv_bytes;
1323 list_add(&lvhdr.lv_list, &ctx->lv_chain);
1326 * Take the lvhdr back off the lv_chain immediately after calling
1327 * xlog_cil_write_chain() as it should not be passed to log IO
1330 error = xlog_cil_write_chain(ctx, num_bytes);
1331 list_del(&lvhdr.lv_list);
1333 goto out_abort_free_ticket;
1335 error = xlog_cil_write_commit_record(ctx);
1337 goto out_abort_free_ticket;
1340 * Grab the ticket from the ctx so we can ungrant it after releasing the
1341 * commit_iclog. The ctx may be freed by the time we return from
1342 * releasing the commit_iclog (i.e. checkpoint has been completed and
1343 * callback run) so we can't reference the ctx after the call to
1344 * xlog_state_release_iclog().
1346 ticket = ctx->ticket;
1349 * If the checkpoint spans multiple iclogs, wait for all previous iclogs
1350 * to complete before we submit the commit_iclog. We can't use state
1351 * checks for this - ACTIVE can be either a past completed iclog or a
1352 * future iclog being filled, while WANT_SYNC through SYNC_DONE can be a
1353 * past or future iclog awaiting IO or ordered IO completion to be run.
1354 * In the latter case, if it's a future iclog and we wait on it, the we
1355 * will hang because it won't get processed through to ic_force_wait
1356 * wakeup until this commit_iclog is written to disk. Hence we use the
1357 * iclog header lsn and compare it to the commit lsn to determine if we
1358 * need to wait on iclogs or not.
1360 spin_lock(&log->l_icloglock);
1361 if (ctx->start_lsn != ctx->commit_lsn) {
1364 plsn = be64_to_cpu(ctx->commit_iclog->ic_prev->ic_header.h_lsn);
1365 if (plsn && XFS_LSN_CMP(plsn, ctx->commit_lsn) < 0) {
1367 * Waiting on ic_force_wait orders the completion of
1368 * iclogs older than ic_prev. Hence we only need to wait
1369 * on the most recent older iclog here.
1371 xlog_wait_on_iclog(ctx->commit_iclog->ic_prev);
1372 spin_lock(&log->l_icloglock);
1376 * We need to issue a pre-flush so that the ordering for this
1377 * checkpoint is correctly preserved down to stable storage.
1379 ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
1383 * The commit iclog must be written to stable storage to guarantee
1384 * journal IO vs metadata writeback IO is correctly ordered on stable
1387 * If the push caller needs the commit to be immediately stable and the
1388 * commit_iclog is not yet marked as XLOG_STATE_WANT_SYNC to indicate it
1389 * will be written when released, switch it's state to WANT_SYNC right
1392 ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA;
1393 if (push_commit_stable &&
1394 ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE)
1395 xlog_state_switch_iclogs(log, ctx->commit_iclog, 0);
1396 ticket = ctx->ticket;
1397 xlog_state_release_iclog(log, ctx->commit_iclog, ticket);
1399 /* Not safe to reference ctx now! */
1401 spin_unlock(&log->l_icloglock);
1402 xlog_cil_cleanup_whiteouts(&whiteouts);
1403 xfs_log_ticket_ungrant(log, ticket);
1407 up_write(&cil->xc_ctx_lock);
1408 xfs_log_ticket_put(new_ctx->ticket);
1412 out_abort_free_ticket:
1413 ASSERT(xlog_is_shutdown(log));
1414 xlog_cil_cleanup_whiteouts(&whiteouts);
1415 if (!ctx->commit_iclog) {
1416 xfs_log_ticket_ungrant(log, ctx->ticket);
1417 xlog_cil_committed(ctx);
1420 spin_lock(&log->l_icloglock);
1421 ticket = ctx->ticket;
1422 xlog_state_release_iclog(log, ctx->commit_iclog, ticket);
1423 /* Not safe to reference ctx now! */
1424 spin_unlock(&log->l_icloglock);
1425 xfs_log_ticket_ungrant(log, ticket);
1429 * We need to push CIL every so often so we don't cache more than we can fit in
1430 * the log. The limit really is that a checkpoint can't be more than half the
1431 * log (the current checkpoint is not allowed to overwrite the previous
1432 * checkpoint), but commit latency and memory usage limit this to a smaller
1436 xlog_cil_push_background(
1437 struct xlog *log) __releases(cil->xc_ctx_lock)
1439 struct xfs_cil *cil = log->l_cilp;
1440 int space_used = atomic_read(&cil->xc_ctx->space_used);
1443 * The cil won't be empty because we are called while holding the
1444 * context lock so whatever we added to the CIL will still be there.
1446 ASSERT(!test_bit(XLOG_CIL_EMPTY, &cil->xc_flags));
1450 * - we haven't used up all the space available yet; or
1451 * - we've already queued up a push; and
1452 * - we're not over the hard limit; and
1453 * - nothing has been over the hard limit.
1455 * If so, we don't need to take the push lock as there's nothing to do.
1457 if (space_used < XLOG_CIL_SPACE_LIMIT(log) ||
1458 (cil->xc_push_seq == cil->xc_current_sequence &&
1459 space_used < XLOG_CIL_BLOCKING_SPACE_LIMIT(log) &&
1460 !waitqueue_active(&cil->xc_push_wait))) {
1461 up_read(&cil->xc_ctx_lock);
1465 spin_lock(&cil->xc_push_lock);
1466 if (cil->xc_push_seq < cil->xc_current_sequence) {
1467 cil->xc_push_seq = cil->xc_current_sequence;
1468 queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
1472 * Drop the context lock now, we can't hold that if we need to sleep
1473 * because we are over the blocking threshold. The push_lock is still
1474 * held, so blocking threshold sleep/wakeup is still correctly
1477 up_read(&cil->xc_ctx_lock);
1480 * If we are well over the space limit, throttle the work that is being
1481 * done until the push work on this context has begun. Enforce the hard
1482 * throttle on all transaction commits once it has been activated, even
1483 * if the committing transactions have resulted in the space usage
1484 * dipping back down under the hard limit.
1486 * The ctx->xc_push_lock provides the serialisation necessary for safely
1487 * calling xlog_cil_over_hard_limit() in this context.
1489 if (xlog_cil_over_hard_limit(log, space_used)) {
1490 trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
1491 ASSERT(space_used < log->l_logsize);
1492 xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
1496 spin_unlock(&cil->xc_push_lock);
1501 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence
1502 * number that is passed. When it returns, the work will be queued for
1503 * @push_seq, but it won't be completed.
1505 * If the caller is performing a synchronous force, we will flush the workqueue
1506 * to get previously queued work moving to minimise the wait time they will
1507 * undergo waiting for all outstanding pushes to complete. The caller is
1508 * expected to do the required waiting for push_seq to complete.
1510 * If the caller is performing an async push, we need to ensure that the
1511 * checkpoint is fully flushed out of the iclogs when we finish the push. If we
1512 * don't do this, then the commit record may remain sitting in memory in an
1513 * ACTIVE iclog. This then requires another full log force to push to disk,
1514 * which defeats the purpose of having an async, non-blocking CIL force
1515 * mechanism. Hence in this case we need to pass a flag to the push work to
1516 * indicate it needs to flush the commit record itself.
1524 struct xfs_cil *cil = log->l_cilp;
1529 ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
1531 /* start on any pending background push to minimise wait time on it */
1533 flush_workqueue(cil->xc_push_wq);
1535 spin_lock(&cil->xc_push_lock);
1538 * If this is an async flush request, we always need to set the
1539 * xc_push_commit_stable flag even if something else has already queued
1540 * a push. The flush caller is asking for the CIL to be on stable
1541 * storage when the next push completes, so regardless of who has queued
1542 * the push, the flush requires stable semantics from it.
1544 cil->xc_push_commit_stable = async;
1547 * If the CIL is empty or we've already pushed the sequence then
1548 * there's no more work that we need to do.
1550 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) ||
1551 push_seq <= cil->xc_push_seq) {
1552 spin_unlock(&cil->xc_push_lock);
1556 cil->xc_push_seq = push_seq;
1557 queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
1558 spin_unlock(&cil->xc_push_lock);
1565 struct xfs_cil *cil = log->l_cilp;
1568 spin_lock(&cil->xc_push_lock);
1569 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
1571 spin_unlock(&cil->xc_push_lock);
1576 * If there are intent done items in this transaction and the related intent was
1577 * committed in the current (same) CIL checkpoint, we don't need to write either
1578 * the intent or intent done item to the journal as the change will be
1579 * journalled atomically within this checkpoint. As we cannot remove items from
1580 * the CIL here, mark the related intent with a whiteout so that the CIL push
1581 * can remove it rather than writing it to the journal. Then remove the intent
1582 * done item from the current transaction and release it so it doesn't get put
1583 * into the CIL at all.
1586 xlog_cil_process_intents(
1587 struct xfs_cil *cil,
1588 struct xfs_trans *tp)
1590 struct xfs_log_item *lip, *ilip, *next;
1593 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
1594 if (!(lip->li_ops->flags & XFS_ITEM_INTENT_DONE))
1597 ilip = lip->li_ops->iop_intent(lip);
1598 if (!ilip || !xlog_item_in_current_chkpt(cil, ilip))
1600 set_bit(XFS_LI_WHITEOUT, &ilip->li_flags);
1601 trace_xfs_cil_whiteout_mark(ilip);
1602 len += ilip->li_lv->lv_bytes;
1603 kmem_free(ilip->li_lv);
1606 xfs_trans_del_item(lip);
1607 lip->li_ops->iop_release(lip);
1613 * Commit a transaction with the given vector to the Committed Item List.
1615 * To do this, we need to format the item, pin it in memory if required and
1616 * account for the space used by the transaction. Once we have done that we
1617 * need to release the unused reservation for the transaction, attach the
1618 * transaction to the checkpoint context so we carry the busy extents through
1619 * to checkpoint completion, and then unlock all the items in the transaction.
1621 * Called with the context lock already held in read mode to lock out
1622 * background commit, returns without it held once background commits are
1628 struct xfs_trans *tp,
1629 xfs_csn_t *commit_seq,
1632 struct xfs_cil *cil = log->l_cilp;
1633 struct xfs_log_item *lip, *next;
1634 uint32_t released_space = 0;
1637 * Do all necessary memory allocation before we lock the CIL.
1638 * This ensures the allocation does not deadlock with a CIL
1639 * push in memory reclaim (e.g. from kswapd).
1641 xlog_cil_alloc_shadow_bufs(log, tp);
1643 /* lock out background commit */
1644 down_read(&cil->xc_ctx_lock);
1646 if (tp->t_flags & XFS_TRANS_HAS_INTENT_DONE)
1647 released_space = xlog_cil_process_intents(cil, tp);
1649 xlog_cil_insert_items(log, tp, released_space);
1651 if (regrant && !xlog_is_shutdown(log))
1652 xfs_log_ticket_regrant(log, tp->t_ticket);
1654 xfs_log_ticket_ungrant(log, tp->t_ticket);
1655 tp->t_ticket = NULL;
1656 xfs_trans_unreserve_and_mod_sb(tp);
1659 * Once all the items of the transaction have been copied to the CIL,
1660 * the items can be unlocked and possibly freed.
1662 * This needs to be done before we drop the CIL context lock because we
1663 * have to update state in the log items and unlock them before they go
1664 * to disk. If we don't, then the CIL checkpoint can race with us and
1665 * we can run checkpoint completion before we've updated and unlocked
1666 * the log items. This affects (at least) processing of stale buffers,
1669 trace_xfs_trans_commit_items(tp, _RET_IP_);
1670 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
1671 xfs_trans_del_item(lip);
1672 if (lip->li_ops->iop_committing)
1673 lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence);
1676 *commit_seq = cil->xc_ctx->sequence;
1678 /* xlog_cil_push_background() releases cil->xc_ctx_lock */
1679 xlog_cil_push_background(log);
1683 * Flush the CIL to stable storage but don't wait for it to complete. This
1684 * requires the CIL push to ensure the commit record for the push hits the disk,
1685 * but otherwise is no different to a push done from a log force.
1691 xfs_csn_t seq = log->l_cilp->xc_current_sequence;
1693 trace_xfs_log_force(log->l_mp, seq, _RET_IP_);
1694 xlog_cil_push_now(log, seq, true);
1697 * If the CIL is empty, make sure that any previous checkpoint that may
1698 * still be in an active iclog is pushed to stable storage.
1700 if (test_bit(XLOG_CIL_EMPTY, &log->l_cilp->xc_flags))
1701 xfs_log_force(log->l_mp, 0);
1705 * Conditionally push the CIL based on the sequence passed in.
1707 * We only need to push if we haven't already pushed the sequence number given.
1708 * Hence the only time we will trigger a push here is if the push sequence is
1709 * the same as the current context.
1711 * We return the current commit lsn to allow the callers to determine if a
1712 * iclog flush is necessary following this call.
1719 struct xfs_cil *cil = log->l_cilp;
1720 struct xfs_cil_ctx *ctx;
1721 xfs_lsn_t commit_lsn = NULLCOMMITLSN;
1723 ASSERT(sequence <= cil->xc_current_sequence);
1726 sequence = cil->xc_current_sequence;
1727 trace_xfs_log_force(log->l_mp, sequence, _RET_IP_);
1730 * check to see if we need to force out the current context.
1731 * xlog_cil_push() handles racing pushes for the same sequence,
1732 * so no need to deal with it here.
1735 xlog_cil_push_now(log, sequence, false);
1738 * See if we can find a previous sequence still committing.
1739 * We need to wait for all previous sequence commits to complete
1740 * before allowing the force of push_seq to go ahead. Hence block
1741 * on commits for those as well.
1743 spin_lock(&cil->xc_push_lock);
1744 list_for_each_entry(ctx, &cil->xc_committing, committing) {
1746 * Avoid getting stuck in this loop because we were woken by the
1747 * shutdown, but then went back to sleep once already in the
1750 if (xlog_is_shutdown(log))
1752 if (ctx->sequence > sequence)
1754 if (!ctx->commit_lsn) {
1756 * It is still being pushed! Wait for the push to
1757 * complete, then start again from the beginning.
1759 XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
1760 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
1763 if (ctx->sequence != sequence)
1766 commit_lsn = ctx->commit_lsn;
1770 * The call to xlog_cil_push_now() executes the push in the background.
1771 * Hence by the time we have got here it our sequence may not have been
1772 * pushed yet. This is true if the current sequence still matches the
1773 * push sequence after the above wait loop and the CIL still contains
1774 * dirty objects. This is guaranteed by the push code first adding the
1775 * context to the committing list before emptying the CIL.
1777 * Hence if we don't find the context in the committing list and the
1778 * current sequence number is unchanged then the CIL contents are
1779 * significant. If the CIL is empty, if means there was nothing to push
1780 * and that means there is nothing to wait for. If the CIL is not empty,
1781 * it means we haven't yet started the push, because if it had started
1782 * we would have found the context on the committing list.
1784 if (sequence == cil->xc_current_sequence &&
1785 !test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) {
1786 spin_unlock(&cil->xc_push_lock);
1790 spin_unlock(&cil->xc_push_lock);
1794 * We detected a shutdown in progress. We need to trigger the log force
1795 * to pass through it's iclog state machine error handling, even though
1796 * we are already in a shutdown state. Hence we can't return
1797 * NULLCOMMITLSN here as that has special meaning to log forces (i.e.
1798 * LSN is already stable), so we return a zero LSN instead.
1801 spin_unlock(&cil->xc_push_lock);
1806 * Perform initial CIL structure initialisation.
1812 struct xfs_cil *cil;
1813 struct xfs_cil_ctx *ctx;
1814 struct xlog_cil_pcp *cilpcp;
1817 cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL);
1821 * Limit the CIL pipeline depth to 4 concurrent works to bound the
1822 * concurrency the log spinlocks will be exposed to.
1824 cil->xc_push_wq = alloc_workqueue("xfs-cil/%s",
1825 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND),
1826 4, log->l_mp->m_super->s_id);
1827 if (!cil->xc_push_wq)
1828 goto out_destroy_cil;
1831 cil->xc_pcp = alloc_percpu(struct xlog_cil_pcp);
1833 goto out_destroy_wq;
1835 for_each_possible_cpu(cpu) {
1836 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
1837 INIT_LIST_HEAD(&cilpcp->busy_extents);
1838 INIT_LIST_HEAD(&cilpcp->log_items);
1841 INIT_LIST_HEAD(&cil->xc_committing);
1842 spin_lock_init(&cil->xc_push_lock);
1843 init_waitqueue_head(&cil->xc_push_wait);
1844 init_rwsem(&cil->xc_ctx_lock);
1845 init_waitqueue_head(&cil->xc_start_wait);
1846 init_waitqueue_head(&cil->xc_commit_wait);
1849 ctx = xlog_cil_ctx_alloc();
1850 xlog_cil_ctx_switch(cil, ctx);
1854 destroy_workqueue(cil->xc_push_wq);
1864 struct xfs_cil *cil = log->l_cilp;
1867 if (cil->xc_ctx->ticket)
1868 xfs_log_ticket_put(cil->xc_ctx->ticket);
1869 kmem_free(cil->xc_ctx);
1872 ASSERT(test_bit(XLOG_CIL_EMPTY, &cil->xc_flags));
1873 free_percpu(cil->xc_pcp);
1874 destroy_workqueue(cil->xc_push_wq);