1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * Copyright (c) 2008 Dave Chinner
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
16 #include "xfs_trace.h"
17 #include "xfs_errortag.h"
18 #include "xfs_error.h"
20 #include "xfs_log_priv.h"
24 * Check that the list is sorted as it should be.
26 * Called with the ail lock held, but we don't want to assert fail with it
27 * held otherwise we'll lock everything up and won't be able to debug the
28 * cause. Hence we sample and check the state under the AIL lock and return if
29 * everything is fine, otherwise we drop the lock and run the ASSERT checks.
30 * Asserts may not be fatal, so pick the lock back up and continue onwards.
35 struct xfs_log_item *lip)
36 __must_hold(&ailp->ail_lock)
38 struct xfs_log_item *prev_lip;
39 struct xfs_log_item *next_lip;
40 xfs_lsn_t prev_lsn = NULLCOMMITLSN;
41 xfs_lsn_t next_lsn = NULLCOMMITLSN;
46 if (list_empty(&ailp->ail_head))
50 * Sample then check the next and previous entries are valid.
52 in_ail = test_bit(XFS_LI_IN_AIL, &lip->li_flags);
53 prev_lip = list_entry(lip->li_ail.prev, struct xfs_log_item, li_ail);
54 if (&prev_lip->li_ail != &ailp->ail_head)
55 prev_lsn = prev_lip->li_lsn;
56 next_lip = list_entry(lip->li_ail.next, struct xfs_log_item, li_ail);
57 if (&next_lip->li_ail != &ailp->ail_head)
58 next_lsn = next_lip->li_lsn;
62 (prev_lsn == NULLCOMMITLSN || XFS_LSN_CMP(prev_lsn, lsn) <= 0) &&
63 (next_lsn == NULLCOMMITLSN || XFS_LSN_CMP(next_lsn, lsn) >= 0))
66 spin_unlock(&ailp->ail_lock);
68 ASSERT(prev_lsn == NULLCOMMITLSN || XFS_LSN_CMP(prev_lsn, lsn) <= 0);
69 ASSERT(next_lsn == NULLCOMMITLSN || XFS_LSN_CMP(next_lsn, lsn) >= 0);
70 spin_lock(&ailp->ail_lock);
73 #define xfs_ail_check(a,l)
77 * Return a pointer to the last item in the AIL. If the AIL is empty, then
80 static struct xfs_log_item *
84 if (list_empty(&ailp->ail_head))
87 return list_entry(ailp->ail_head.prev, struct xfs_log_item, li_ail);
91 * Return a pointer to the item which follows the given item in the AIL. If
92 * the given item is the last item in the list, then return NULL.
94 static struct xfs_log_item *
97 struct xfs_log_item *lip)
99 if (lip->li_ail.next == &ailp->ail_head)
102 return list_first_entry(&lip->li_ail, struct xfs_log_item, li_ail);
106 * This is called by the log manager code to determine the LSN of the tail of
107 * the log. This is exactly the LSN of the first item in the AIL. If the AIL
108 * is empty, then this function returns 0.
110 * We need the AIL lock in order to get a coherent read of the lsn of the last
115 struct xfs_ail *ailp)
117 struct xfs_log_item *lip = xfs_ail_min(ailp);
126 struct xfs_ail *ailp)
130 spin_lock(&ailp->ail_lock);
131 lsn = __xfs_ail_min_lsn(ailp);
132 spin_unlock(&ailp->ail_lock);
138 * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
142 struct xfs_ail *ailp)
145 struct xfs_log_item *lip;
147 spin_lock(&ailp->ail_lock);
148 lip = xfs_ail_max(ailp);
151 spin_unlock(&ailp->ail_lock);
157 * The cursor keeps track of where our current traversal is up to by tracking
158 * the next item in the list for us. However, for this to be safe, removing an
159 * object from the AIL needs to invalidate any cursor that points to it. hence
160 * the traversal cursor needs to be linked to the struct xfs_ail so that
161 * deletion can search all the active cursors for invalidation.
164 xfs_trans_ail_cursor_init(
165 struct xfs_ail *ailp,
166 struct xfs_ail_cursor *cur)
169 list_add_tail(&cur->list, &ailp->ail_cursors);
173 * Get the next item in the traversal and advance the cursor. If the cursor
174 * was invalidated (indicated by a lip of 1), restart the traversal.
176 struct xfs_log_item *
177 xfs_trans_ail_cursor_next(
178 struct xfs_ail *ailp,
179 struct xfs_ail_cursor *cur)
181 struct xfs_log_item *lip = cur->item;
183 if ((uintptr_t)lip & 1)
184 lip = xfs_ail_min(ailp);
186 cur->item = xfs_ail_next(ailp, lip);
191 * When the traversal is complete, we need to remove the cursor from the list
192 * of traversing cursors.
195 xfs_trans_ail_cursor_done(
196 struct xfs_ail_cursor *cur)
199 list_del_init(&cur->list);
203 * Invalidate any cursor that is pointing to this item. This is called when an
204 * item is removed from the AIL. Any cursor pointing to this object is now
205 * invalid and the traversal needs to be terminated so it doesn't reference a
206 * freed object. We set the low bit of the cursor item pointer so we can
207 * distinguish between an invalidation and the end of the list when getting the
208 * next item from the cursor.
211 xfs_trans_ail_cursor_clear(
212 struct xfs_ail *ailp,
213 struct xfs_log_item *lip)
215 struct xfs_ail_cursor *cur;
217 list_for_each_entry(cur, &ailp->ail_cursors, list) {
218 if (cur->item == lip)
219 cur->item = (struct xfs_log_item *)
220 ((uintptr_t)cur->item | 1);
225 * Find the first item in the AIL with the given @lsn by searching in ascending
226 * LSN order and initialise the cursor to point to the next item for a
227 * ascending traversal. Pass a @lsn of zero to initialise the cursor to the
228 * first item in the AIL. Returns NULL if the list is empty.
230 struct xfs_log_item *
231 xfs_trans_ail_cursor_first(
232 struct xfs_ail *ailp,
233 struct xfs_ail_cursor *cur,
236 struct xfs_log_item *lip;
238 xfs_trans_ail_cursor_init(ailp, cur);
241 lip = xfs_ail_min(ailp);
245 list_for_each_entry(lip, &ailp->ail_head, li_ail) {
246 if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0)
253 cur->item = xfs_ail_next(ailp, lip);
257 static struct xfs_log_item *
258 __xfs_trans_ail_cursor_last(
259 struct xfs_ail *ailp,
262 struct xfs_log_item *lip;
264 list_for_each_entry_reverse(lip, &ailp->ail_head, li_ail) {
265 if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0)
272 * Find the last item in the AIL with the given @lsn by searching in descending
273 * LSN order and initialise the cursor to point to that item. If there is no
274 * item with the value of @lsn, then it sets the cursor to the last item with an
275 * LSN lower than @lsn. Returns NULL if the list is empty.
277 struct xfs_log_item *
278 xfs_trans_ail_cursor_last(
279 struct xfs_ail *ailp,
280 struct xfs_ail_cursor *cur,
283 xfs_trans_ail_cursor_init(ailp, cur);
284 cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
289 * Splice the log item list into the AIL at the given LSN. We splice to the
290 * tail of the given LSN to maintain insert order for push traversals. The
291 * cursor is optional, allowing repeated updates to the same LSN to avoid
292 * repeated traversals. This should not be called with an empty list.
296 struct xfs_ail *ailp,
297 struct xfs_ail_cursor *cur,
298 struct list_head *list,
301 struct xfs_log_item *lip;
303 ASSERT(!list_empty(list));
306 * Use the cursor to determine the insertion point if one is
307 * provided. If not, or if the one we got is not valid,
308 * find the place in the AIL where the items belong.
310 lip = cur ? cur->item : NULL;
311 if (!lip || (uintptr_t)lip & 1)
312 lip = __xfs_trans_ail_cursor_last(ailp, lsn);
315 * If a cursor is provided, we know we're processing the AIL
316 * in lsn order, and future items to be spliced in will
317 * follow the last one being inserted now. Update the
318 * cursor to point to that last item, now while we have a
319 * reliable pointer to it.
322 cur->item = list_entry(list->prev, struct xfs_log_item, li_ail);
325 * Finally perform the splice. Unless the AIL was empty,
326 * lip points to the item in the AIL _after_ which the new
327 * items should go. If lip is null the AIL was empty, so
328 * the new items go at the head of the AIL.
331 list_splice(list, &lip->li_ail);
333 list_splice(list, &ailp->ail_head);
337 * Delete the given item from the AIL. Return a pointer to the item.
341 struct xfs_ail *ailp,
342 struct xfs_log_item *lip)
344 xfs_ail_check(ailp, lip);
345 list_del(&lip->li_ail);
346 xfs_trans_ail_cursor_clear(ailp, lip);
350 * Requeue a failed buffer for writeback.
352 * We clear the log item failed state here as well, but we have to be careful
353 * about reference counts because the only active reference counts on the buffer
354 * may be the failed log items. Hence if we clear the log item failed state
355 * before queuing the buffer for IO we can release all active references to
356 * the buffer and free it, leading to use after free problems in
357 * xfs_buf_delwri_queue. It makes no difference to the buffer or log items which
358 * order we process them in - the buffer is locked, and we own the buffer list
359 * so nothing on them is going to change while we are performing this action.
361 * Hence we can safely queue the buffer for IO before we clear the failed log
362 * item state, therefore always having an active reference to the buffer and
363 * avoiding the transient zero-reference state that leads to use-after-free.
366 xfsaild_resubmit_item(
367 struct xfs_log_item *lip,
368 struct list_head *buffer_list)
370 struct xfs_buf *bp = lip->li_buf;
372 if (!xfs_buf_trylock(bp))
373 return XFS_ITEM_LOCKED;
375 if (!xfs_buf_delwri_queue(bp, buffer_list)) {
377 return XFS_ITEM_FLUSHING;
380 /* protected by ail_lock */
381 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
382 if (bp->b_flags & _XBF_INODES)
383 clear_bit(XFS_LI_FAILED, &lip->li_flags);
385 xfs_clear_li_failed(lip);
389 return XFS_ITEM_SUCCESS;
394 struct xfs_ail *ailp,
395 struct xfs_log_item *lip)
398 * If log item pinning is enabled, skip the push and track the item as
399 * pinned. This can help induce head-behind-tail conditions.
401 if (XFS_TEST_ERROR(false, ailp->ail_log->l_mp, XFS_ERRTAG_LOG_ITEM_PIN))
402 return XFS_ITEM_PINNED;
405 * Consider the item pinned if a push callback is not defined so the
406 * caller will force the log. This should only happen for intent items
407 * as they are unpinned once the associated done item is committed to
410 if (!lip->li_ops->iop_push)
411 return XFS_ITEM_PINNED;
412 if (test_bit(XFS_LI_FAILED, &lip->li_flags))
413 return xfsaild_resubmit_item(lip, &ailp->ail_buf_list);
414 return lip->li_ops->iop_push(lip, &ailp->ail_buf_list);
419 struct xfs_ail *ailp)
421 struct xfs_mount *mp = ailp->ail_log->l_mp;
422 struct xfs_ail_cursor cur;
423 struct xfs_log_item *lip;
432 * If we encountered pinned items or did not finish writing out all
433 * buffers the last time we ran, force a background CIL push to get the
434 * items unpinned in the near future. We do not wait on the CIL push as
435 * that could stall us for seconds if there is enough background IO
436 * load. Stalling for that long when the tail of the log is pinned and
437 * needs flushing will hard stop the transaction subsystem when log
440 if (ailp->ail_log_flush && ailp->ail_last_pushed_lsn == 0 &&
441 (!list_empty_careful(&ailp->ail_buf_list) ||
442 xfs_ail_min_lsn(ailp))) {
443 ailp->ail_log_flush = 0;
445 XFS_STATS_INC(mp, xs_push_ail_flush);
446 xlog_cil_flush(ailp->ail_log);
449 spin_lock(&ailp->ail_lock);
452 * If we have a sync push waiter, we always have to push till the AIL is
453 * empty. Update the target to point to the end of the AIL so that
454 * capture updates that occur after the sync push waiter has gone to
457 if (waitqueue_active(&ailp->ail_empty)) {
458 lip = xfs_ail_max(ailp);
460 target = lip->li_lsn;
462 /* barrier matches the ail_target update in xfs_ail_push() */
464 target = ailp->ail_target;
465 ailp->ail_target_prev = target;
468 /* we're done if the AIL is empty or our push has reached the end */
469 lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->ail_last_pushed_lsn);
473 XFS_STATS_INC(mp, xs_push_ail);
476 while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
480 * Note that iop_push may unlock and reacquire the AIL lock. We
481 * rely on the AIL cursor implementation to be able to deal with
484 lock_result = xfsaild_push_item(ailp, lip);
485 switch (lock_result) {
486 case XFS_ITEM_SUCCESS:
487 XFS_STATS_INC(mp, xs_push_ail_success);
488 trace_xfs_ail_push(lip);
490 ailp->ail_last_pushed_lsn = lsn;
493 case XFS_ITEM_FLUSHING:
495 * The item or its backing buffer is already being
496 * flushed. The typical reason for that is that an
497 * inode buffer is locked because we already pushed the
498 * updates to it as part of inode clustering.
500 * We do not want to stop flushing just because lots
501 * of items are already being flushed, but we need to
502 * re-try the flushing relatively soon if most of the
503 * AIL is being flushed.
505 XFS_STATS_INC(mp, xs_push_ail_flushing);
506 trace_xfs_ail_flushing(lip);
509 ailp->ail_last_pushed_lsn = lsn;
512 case XFS_ITEM_PINNED:
513 XFS_STATS_INC(mp, xs_push_ail_pinned);
514 trace_xfs_ail_pinned(lip);
517 ailp->ail_log_flush++;
519 case XFS_ITEM_LOCKED:
520 XFS_STATS_INC(mp, xs_push_ail_locked);
521 trace_xfs_ail_locked(lip);
533 * Are there too many items we can't do anything with?
535 * If we are skipping too many items because we can't flush
536 * them or they are already being flushed, we back off and
537 * given them time to complete whatever operation is being
538 * done. i.e. remove pressure from the AIL while we can't make
539 * progress so traversals don't slow down further inserts and
540 * removals to/from the AIL.
542 * The value of 100 is an arbitrary magic number based on
548 lip = xfs_trans_ail_cursor_next(ailp, &cur);
555 xfs_trans_ail_cursor_done(&cur);
556 spin_unlock(&ailp->ail_lock);
558 if (xfs_buf_delwri_submit_nowait(&ailp->ail_buf_list))
559 ailp->ail_log_flush++;
561 if (!count || XFS_LSN_CMP(lsn, target) >= 0) {
563 * We reached the target or the AIL is empty, so wait a bit
564 * longer for I/O to complete and remove pushed items from the
565 * AIL before we start the next scan from the start of the AIL.
568 ailp->ail_last_pushed_lsn = 0;
569 } else if (((stuck + flushing) * 100) / count > 90) {
571 * Either there is a lot of contention on the AIL or we are
572 * stuck due to operations in progress. "Stuck" in this case
573 * is defined as >90% of the items we tried to push were stuck.
575 * Backoff a bit more to allow some I/O to complete before
576 * restarting from the start of the AIL. This prevents us from
577 * spinning on the same items, and if they are pinned will all
578 * the restart to issue a log force to unpin the stuck items.
581 ailp->ail_last_pushed_lsn = 0;
584 * Assume we have more work to do in a short while.
596 struct xfs_ail *ailp = data;
597 long tout = 0; /* milliseconds */
598 unsigned int noreclaim_flag;
600 noreclaim_flag = memalloc_noreclaim_save();
604 if (tout && tout <= 20)
605 set_current_state(TASK_KILLABLE);
607 set_current_state(TASK_INTERRUPTIBLE);
610 * Check kthread_should_stop() after we set the task state to
611 * guarantee that we either see the stop bit and exit or the
612 * task state is reset to runnable such that it's not scheduled
613 * out indefinitely and detects the stop bit at next iteration.
614 * A memory barrier is included in above task state set to
615 * serialize again kthread_stop().
617 if (kthread_should_stop()) {
618 __set_current_state(TASK_RUNNING);
621 * The caller forces out the AIL before stopping the
622 * thread in the common case, which means the delwri
623 * queue is drained. In the shutdown case, the queue may
624 * still hold relogged buffers that haven't been
625 * submitted because they were pinned since added to the
628 * Log I/O error processing stales the underlying buffer
629 * and clears the delwri state, expecting the buf to be
630 * removed on the next submission attempt. That won't
631 * happen if we're shutting down, so this is the last
632 * opportunity to release such buffers from the queue.
634 ASSERT(list_empty(&ailp->ail_buf_list) ||
635 xlog_is_shutdown(ailp->ail_log));
636 xfs_buf_delwri_cancel(&ailp->ail_buf_list);
640 spin_lock(&ailp->ail_lock);
643 * Idle if the AIL is empty and we are not racing with a target
644 * update. We check the AIL after we set the task to a sleep
645 * state to guarantee that we either catch an ail_target update
646 * or that a wake_up resets the state to TASK_RUNNING.
647 * Otherwise, we run the risk of sleeping indefinitely.
649 * The barrier matches the ail_target update in xfs_ail_push().
652 if (!xfs_ail_min(ailp) &&
653 ailp->ail_target == ailp->ail_target_prev &&
654 list_empty(&ailp->ail_buf_list)) {
655 spin_unlock(&ailp->ail_lock);
656 freezable_schedule();
660 spin_unlock(&ailp->ail_lock);
663 freezable_schedule_timeout(msecs_to_jiffies(tout));
665 __set_current_state(TASK_RUNNING);
669 tout = xfsaild_push(ailp);
672 memalloc_noreclaim_restore(noreclaim_flag);
677 * This routine is called to move the tail of the AIL forward. It does this by
678 * trying to flush items in the AIL whose lsns are below the given
681 * The push is run asynchronously in a workqueue, which means the caller needs
682 * to handle waiting on the async flush for space to become available.
683 * We don't want to interrupt any push that is in progress, hence we only queue
684 * work if we set the pushing bit appropriately.
686 * We do this unlocked - we only need to know whether there is anything in the
687 * AIL at the time we are called. We don't need to access the contents of
688 * any of the objects, so the lock is not needed.
692 struct xfs_ail *ailp,
693 xfs_lsn_t threshold_lsn)
695 struct xfs_log_item *lip;
697 lip = xfs_ail_min(ailp);
698 if (!lip || xlog_is_shutdown(ailp->ail_log) ||
699 XFS_LSN_CMP(threshold_lsn, ailp->ail_target) <= 0)
703 * Ensure that the new target is noticed in push code before it clears
704 * the XFS_AIL_PUSHING_BIT.
707 xfs_trans_ail_copy_lsn(ailp, &ailp->ail_target, &threshold_lsn);
710 wake_up_process(ailp->ail_task);
714 * Push out all items in the AIL immediately
718 struct xfs_ail *ailp)
720 xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp);
723 xfs_ail_push(ailp, threshold_lsn);
727 * Push out all items in the AIL immediately and wait until the AIL is empty.
730 xfs_ail_push_all_sync(
731 struct xfs_ail *ailp)
733 struct xfs_log_item *lip;
736 spin_lock(&ailp->ail_lock);
737 while ((lip = xfs_ail_max(ailp)) != NULL) {
738 prepare_to_wait(&ailp->ail_empty, &wait, TASK_UNINTERRUPTIBLE);
739 wake_up_process(ailp->ail_task);
740 spin_unlock(&ailp->ail_lock);
742 spin_lock(&ailp->ail_lock);
744 spin_unlock(&ailp->ail_lock);
746 finish_wait(&ailp->ail_empty, &wait);
750 xfs_ail_update_finish(
751 struct xfs_ail *ailp,
752 xfs_lsn_t old_lsn) __releases(ailp->ail_lock)
754 struct xlog *log = ailp->ail_log;
756 /* if the tail lsn hasn't changed, don't do updates or wakeups. */
757 if (!old_lsn || old_lsn == __xfs_ail_min_lsn(ailp)) {
758 spin_unlock(&ailp->ail_lock);
762 if (!xlog_is_shutdown(log))
763 xlog_assign_tail_lsn_locked(log->l_mp);
765 if (list_empty(&ailp->ail_head))
766 wake_up_all(&ailp->ail_empty);
767 spin_unlock(&ailp->ail_lock);
768 xfs_log_space_wake(log->l_mp);
772 * xfs_trans_ail_update - bulk AIL insertion operation.
774 * @xfs_trans_ail_update takes an array of log items that all need to be
775 * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
776 * be added. Otherwise, it will be repositioned by removing it and re-adding
777 * it to the AIL. If we move the first item in the AIL, update the log tail to
778 * match the new minimum LSN in the AIL.
780 * This function takes the AIL lock once to execute the update operations on
781 * all the items in the array, and as such should not be called with the AIL
782 * lock held. As a result, once we have the AIL lock, we need to check each log
783 * item LSN to confirm it needs to be moved forward in the AIL.
785 * To optimise the insert operation, we delete all the items from the AIL in
786 * the first pass, moving them into a temporary list, then splice the temporary
787 * list into the correct position in the AIL. This avoids needing to do an
788 * insert operation on every item.
790 * This function must be called with the AIL lock held. The lock is dropped
794 xfs_trans_ail_update_bulk(
795 struct xfs_ail *ailp,
796 struct xfs_ail_cursor *cur,
797 struct xfs_log_item **log_items,
799 xfs_lsn_t lsn) __releases(ailp->ail_lock)
801 struct xfs_log_item *mlip;
802 xfs_lsn_t tail_lsn = 0;
806 ASSERT(nr_items > 0); /* Not required, but true. */
807 mlip = xfs_ail_min(ailp);
809 for (i = 0; i < nr_items; i++) {
810 struct xfs_log_item *lip = log_items[i];
811 if (test_and_set_bit(XFS_LI_IN_AIL, &lip->li_flags)) {
812 /* check if we really need to move the item */
813 if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
816 trace_xfs_ail_move(lip, lip->li_lsn, lsn);
817 if (mlip == lip && !tail_lsn)
818 tail_lsn = lip->li_lsn;
820 xfs_ail_delete(ailp, lip);
822 trace_xfs_ail_insert(lip, 0, lsn);
825 list_add(&lip->li_ail, &tmp);
828 if (!list_empty(&tmp))
829 xfs_ail_splice(ailp, cur, &tmp, lsn);
831 xfs_ail_update_finish(ailp, tail_lsn);
834 /* Insert a log item into the AIL. */
836 xfs_trans_ail_insert(
837 struct xfs_ail *ailp,
838 struct xfs_log_item *lip,
841 spin_lock(&ailp->ail_lock);
842 xfs_trans_ail_update_bulk(ailp, NULL, &lip, 1, lsn);
846 * Delete one log item from the AIL.
848 * If this item was at the tail of the AIL, return the LSN of the log item so
849 * that we can use it to check if the LSN of the tail of the log has moved
850 * when finishing up the AIL delete process in xfs_ail_update_finish().
854 struct xfs_ail *ailp,
855 struct xfs_log_item *lip)
857 struct xfs_log_item *mlip = xfs_ail_min(ailp);
858 xfs_lsn_t lsn = lip->li_lsn;
860 trace_xfs_ail_delete(lip, mlip->li_lsn, lip->li_lsn);
861 xfs_ail_delete(ailp, lip);
862 clear_bit(XFS_LI_IN_AIL, &lip->li_flags);
871 xfs_trans_ail_delete(
872 struct xfs_log_item *lip,
875 struct xfs_ail *ailp = lip->li_ailp;
876 struct xfs_mount *mp = ailp->ail_log->l_mp;
879 spin_lock(&ailp->ail_lock);
880 if (!test_bit(XFS_LI_IN_AIL, &lip->li_flags)) {
881 spin_unlock(&ailp->ail_lock);
882 if (shutdown_type && !xlog_is_shutdown(ailp->ail_log)) {
883 xfs_alert_tag(mp, XFS_PTAG_AILDELETE,
884 "%s: attempting to delete a log item that is not in the AIL",
886 xfs_force_shutdown(mp, shutdown_type);
891 /* xfs_ail_update_finish() drops the AIL lock */
892 xfs_clear_li_failed(lip);
893 tail_lsn = xfs_ail_delete_one(ailp, lip);
894 xfs_ail_update_finish(ailp, tail_lsn);
901 struct xfs_ail *ailp;
903 ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
907 ailp->ail_log = mp->m_log;
908 INIT_LIST_HEAD(&ailp->ail_head);
909 INIT_LIST_HEAD(&ailp->ail_cursors);
910 spin_lock_init(&ailp->ail_lock);
911 INIT_LIST_HEAD(&ailp->ail_buf_list);
912 init_waitqueue_head(&ailp->ail_empty);
914 ailp->ail_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
916 if (IS_ERR(ailp->ail_task))
928 xfs_trans_ail_destroy(
931 struct xfs_ail *ailp = mp->m_ail;
933 kthread_stop(ailp->ail_task);