1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
6 #ifndef __XFS_LOG_PRIV_H__
7 #define __XFS_LOG_PRIV_H__
15 * get client id from packed copy.
17 * this hack is here because the xlog_pack code copies four bytes
18 * of xlog_op_header containing the fields oh_clientid, oh_flags
19 * and oh_res2 into the packed copy.
21 * later on this four byte chunk is treated as an int and the
22 * client id is pulled out.
24 * this has endian issues, of course.
26 static inline uint xlog_get_client_id(__be32 i)
28 return be32_to_cpu(i) >> 24;
34 enum xlog_iclog_state {
35 XLOG_STATE_ACTIVE, /* Current IC log being written to */
36 XLOG_STATE_WANT_SYNC, /* Want to sync this iclog; no more writes */
37 XLOG_STATE_SYNCING, /* This IC log is syncing */
38 XLOG_STATE_DONE_SYNC, /* Done syncing to disk */
39 XLOG_STATE_CALLBACK, /* Callback functions now */
40 XLOG_STATE_DIRTY, /* Dirty IC log, not ready for ACTIVE status */
43 #define XLOG_STATE_STRINGS \
44 { XLOG_STATE_ACTIVE, "XLOG_STATE_ACTIVE" }, \
45 { XLOG_STATE_WANT_SYNC, "XLOG_STATE_WANT_SYNC" }, \
46 { XLOG_STATE_SYNCING, "XLOG_STATE_SYNCING" }, \
47 { XLOG_STATE_DONE_SYNC, "XLOG_STATE_DONE_SYNC" }, \
48 { XLOG_STATE_CALLBACK, "XLOG_STATE_CALLBACK" }, \
49 { XLOG_STATE_DIRTY, "XLOG_STATE_DIRTY" }
54 #define XLOG_ICL_NEED_FLUSH (1u << 0) /* iclog needs REQ_PREFLUSH */
55 #define XLOG_ICL_NEED_FUA (1u << 1) /* iclog needs REQ_FUA */
57 #define XLOG_ICL_STRINGS \
58 { XLOG_ICL_NEED_FLUSH, "XLOG_ICL_NEED_FLUSH" }, \
59 { XLOG_ICL_NEED_FUA, "XLOG_ICL_NEED_FUA" }
65 #define XLOG_TIC_PERM_RESERV (1u << 0) /* permanent reservation */
67 #define XLOG_TIC_FLAGS \
68 { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" }
71 * Below are states for covering allocation transactions.
72 * By covering, we mean changing the h_tail_lsn in the last on-disk
73 * log write such that no allocation transactions will be re-done during
74 * recovery after a system crash. Recovery starts at the last on-disk
77 * These states are used to insert dummy log entries to cover
78 * space allocation transactions which can undo non-transactional changes
79 * after a crash. Writes to a file with space
80 * already allocated do not result in any transactions. Allocations
81 * might include space beyond the EOF. So if we just push the EOF a
82 * little, the last transaction for the file could contain the wrong
83 * size. If there is no file system activity, after an allocation
84 * transaction, and the system crashes, the allocation transaction
85 * will get replayed and the file will be truncated. This could
86 * be hours/days/... after the allocation occurred.
88 * The fix for this is to do two dummy transactions when the
89 * system is idle. We need two dummy transaction because the h_tail_lsn
90 * in the log record header needs to point beyond the last possible
91 * non-dummy transaction. The first dummy changes the h_tail_lsn to
92 * the first transaction before the dummy. The second dummy causes
93 * h_tail_lsn to point to the first dummy. Recovery starts at h_tail_lsn.
95 * These dummy transactions get committed when everything
96 * is idle (after there has been some activity).
98 * There are 5 states used to control this.
100 * IDLE -- no logging has been done on the file system or
101 * we are done covering previous transactions.
102 * NEED -- logging has occurred and we need a dummy transaction
103 * when the log becomes idle.
104 * DONE -- we were in the NEED state and have committed a dummy
106 * NEED2 -- we detected that a dummy transaction has gone to the
107 * on disk log with no other transactions.
108 * DONE2 -- we committed a dummy transaction when in the NEED2 state.
110 * There are two places where we switch states:
112 * 1.) In xfs_sync, when we detect an idle log and are in NEED or NEED2.
113 * We commit the dummy transaction and switch to DONE or DONE2,
114 * respectively. In all other states, we don't do anything.
116 * 2.) When we finish writing the on-disk log (xlog_state_clean_log).
118 * No matter what state we are in, if this isn't the dummy
119 * transaction going out, the next state is NEED.
120 * So, if we aren't in the DONE or DONE2 states, the next state
121 * is NEED. We can't be finishing a write of the dummy record
122 * unless it was committed and the state switched to DONE or DONE2.
124 * If we are in the DONE state and this was a write of the
125 * dummy transaction, we move to NEED2.
127 * If we are in the DONE2 state and this was a write of the
128 * dummy transaction, we move to IDLE.
131 * Writing only one dummy transaction can get appended to
132 * one file space allocation. When this happens, the log recovery
133 * code replays the space allocation and a file could be truncated.
134 * This is why we have the NEED2 and DONE2 states before going idle.
137 #define XLOG_STATE_COVER_IDLE 0
138 #define XLOG_STATE_COVER_NEED 1
139 #define XLOG_STATE_COVER_DONE 2
140 #define XLOG_STATE_COVER_NEED2 3
141 #define XLOG_STATE_COVER_DONE2 4
143 #define XLOG_COVER_OPS 5
145 typedef struct xlog_ticket {
146 struct list_head t_queue; /* reserve/write queue */
147 struct task_struct *t_task; /* task that owns this ticket */
148 xlog_tid_t t_tid; /* transaction identifier */
149 atomic_t t_ref; /* ticket reference count */
150 int t_curr_res; /* current reservation */
151 int t_unit_res; /* unit reservation */
152 char t_ocnt; /* original unit count */
153 char t_cnt; /* current unit count */
154 uint8_t t_flags; /* properties of reservation */
155 int t_iclog_hdrs; /* iclog hdrs in t_curr_res */
159 * - A log record header is 512 bytes. There is plenty of room to grow the
160 * xlog_rec_header_t into the reserved space.
161 * - ic_data follows, so a write to disk can start at the beginning of
163 * - ic_forcewait is used to implement synchronous forcing of the iclog to disk.
164 * - ic_next is the pointer to the next iclog in the ring.
165 * - ic_log is a pointer back to the global log structure.
166 * - ic_size is the full size of the log buffer, minus the cycle headers.
167 * - ic_offset is the current number of bytes written to in this iclog.
168 * - ic_refcnt is bumped when someone is writing to the log.
169 * - ic_state is the state of the iclog.
171 * Because of cacheline contention on large machines, we need to separate
172 * various resources onto different cachelines. To start with, make the
173 * structure cacheline aligned. The following fields can be contended on
174 * by independent processes:
178 * - fields protected by the global l_icloglock
180 * so we need to ensure that these fields are located in separate cachelines.
181 * We'll put all the read-only and l_icloglock fields in the first cacheline,
182 * and move everything else out to subsequent cachelines.
184 typedef struct xlog_in_core {
185 wait_queue_head_t ic_force_wait;
186 wait_queue_head_t ic_write_wait;
187 struct xlog_in_core *ic_next;
188 struct xlog_in_core *ic_prev;
192 enum xlog_iclog_state ic_state;
193 unsigned int ic_flags;
194 void *ic_datap; /* pointer to iclog data */
195 struct list_head ic_callbacks;
197 /* reference counts need their own cacheline */
198 atomic_t ic_refcnt ____cacheline_aligned_in_smp;
199 xlog_in_core_2_t *ic_data;
200 #define ic_header ic_data->hic_header
202 bool ic_fail_crc : 1;
204 struct semaphore ic_sema;
205 struct work_struct ic_end_io_work;
207 struct bio_vec ic_bvec[];
211 * The CIL context is used to aggregate per-transaction details as well be
212 * passed to the iclog for checkpoint post-commit processing. After being
213 * passed to the iclog, another context needs to be allocated for tracking the
214 * next set of transactions to be aggregated into a checkpoint.
220 xfs_csn_t sequence; /* chkpt sequence # */
221 xfs_lsn_t start_lsn; /* first LSN of chkpt commit */
222 xfs_lsn_t commit_lsn; /* chkpt commit record lsn */
223 struct xlog_in_core *commit_iclog;
224 struct xlog_ticket *ticket; /* chkpt ticket */
225 atomic_t space_used; /* aggregate size of regions */
226 struct list_head busy_extents; /* busy extents in chkpt */
227 struct list_head log_items; /* log items in chkpt */
228 struct list_head lv_chain; /* logvecs being pushed */
229 struct list_head iclog_entry;
230 struct list_head committing; /* ctx committing list */
231 struct work_struct discard_endio_work;
232 struct work_struct push_work;
236 * CPUs that could have added items to the percpu CIL data. Access is
237 * coordinated with xc_ctx_lock.
239 struct cpumask cil_pcpmask;
243 * Per-cpu CIL tracking items
245 struct xlog_cil_pcp {
247 uint32_t space_reserved;
248 struct list_head busy_extents;
249 struct list_head log_items;
253 * Committed Item List structure
255 * This structure is used to track log items that have been committed but not
256 * yet written into the log. It is used only when the delayed logging mount
259 * This structure tracks the list of committing checkpoint contexts so
260 * we can avoid the problem of having to hold out new transactions during a
261 * flush until we have a the commit record LSN of the checkpoint. We can
262 * traverse the list of committing contexts in xlog_cil_push_lsn() to find a
263 * sequence match and extract the commit LSN directly from there. If the
264 * checkpoint is still in the process of committing, we can block waiting for
265 * the commit LSN to be determined as well. This should make synchronous
266 * operations almost as efficient as the old logging methods.
270 unsigned long xc_flags;
271 atomic_t xc_iclog_hdrs;
272 struct workqueue_struct *xc_push_wq;
274 struct rw_semaphore xc_ctx_lock ____cacheline_aligned_in_smp;
275 struct xfs_cil_ctx *xc_ctx;
277 spinlock_t xc_push_lock ____cacheline_aligned_in_smp;
278 xfs_csn_t xc_push_seq;
279 bool xc_push_commit_stable;
280 struct list_head xc_committing;
281 wait_queue_head_t xc_commit_wait;
282 wait_queue_head_t xc_start_wait;
283 xfs_csn_t xc_current_sequence;
284 wait_queue_head_t xc_push_wait; /* background push throttle */
286 void __percpu *xc_pcp; /* percpu CIL structures */
287 } ____cacheline_aligned_in_smp;
289 /* xc_flags bit values */
290 #define XLOG_CIL_EMPTY 1
291 #define XLOG_CIL_PCP_SPACE 2
294 * The amount of log space we allow the CIL to aggregate is difficult to size.
295 * Whatever we choose, we have to make sure we can get a reservation for the
296 * log space effectively, that it is large enough to capture sufficient
297 * relogging to reduce log buffer IO significantly, but it is not too large for
298 * the log or induces too much latency when writing out through the iclogs. We
299 * track both space consumed and the number of vectors in the checkpoint
300 * context, so we need to decide which to use for limiting.
302 * Every log buffer we write out during a push needs a header reserved, which
303 * is at least one sector and more for v2 logs. Hence we need a reservation of
304 * at least 512 bytes per 32k of log space just for the LR headers. That means
305 * 16KB of reservation per megabyte of delayed logging space we will consume,
306 * plus various headers. The number of headers will vary based on the num of
307 * io vectors, so limiting on a specific number of vectors is going to result
308 * in transactions of varying size. IOWs, it is more consistent to track and
309 * limit space consumed in the log rather than by the number of objects being
310 * logged in order to prevent checkpoint ticket overruns.
312 * Further, use of static reservations through the log grant mechanism is
313 * problematic. It introduces a lot of complexity (e.g. reserve grant vs write
314 * grant) and a significant deadlock potential because regranting write space
315 * can block on log pushes. Hence if we have to regrant log space during a log
316 * push, we can deadlock.
318 * However, we can avoid this by use of a dynamic "reservation stealing"
319 * technique during transaction commit whereby unused reservation space in the
320 * transaction ticket is transferred to the CIL ctx commit ticket to cover the
321 * space needed by the checkpoint transaction. This means that we never need to
322 * specifically reserve space for the CIL checkpoint transaction, nor do we
323 * need to regrant space once the checkpoint completes. This also means the
324 * checkpoint transaction ticket is specific to the checkpoint context, rather
325 * than the CIL itself.
327 * With dynamic reservations, we can effectively make up arbitrary limits for
328 * the checkpoint size so long as they don't violate any other size rules.
329 * Recovery imposes a rule that no transaction exceed half the log, so we are
330 * limited by that. Furthermore, the log transaction reservation subsystem
331 * tries to keep 25% of the log free, so we need to keep below that limit or we
332 * risk running out of free log space to start any new transactions.
334 * In order to keep background CIL push efficient, we only need to ensure the
335 * CIL is large enough to maintain sufficient in-memory relogging to avoid
336 * repeated physical writes of frequently modified metadata. If we allow the CIL
337 * to grow to a substantial fraction of the log, then we may be pinning hundreds
338 * of megabytes of metadata in memory until the CIL flushes. This can cause
339 * issues when we are running low on memory - pinned memory cannot be reclaimed,
340 * and the CIL consumes a lot of memory. Hence we need to set an upper physical
341 * size limit for the CIL that limits the maximum amount of memory pinned by the
342 * CIL but does not limit performance by reducing relogging efficiency
345 * As such, the CIL push threshold ends up being the smaller of two thresholds:
346 * - a threshold large enough that it allows CIL to be pushed and progress to be
347 * made without excessive blocking of incoming transaction commits. This is
348 * defined to be 12.5% of the log space - half the 25% push threshold of the
350 * - small enough that it doesn't pin excessive amounts of memory but maintains
351 * close to peak relogging efficiency. This is defined to be 16x the iclog
352 * buffer window (32MB) as measurements have shown this to be roughly the
353 * point of diminishing performance increases under highly concurrent
354 * modification workloads.
356 * To prevent the CIL from overflowing upper commit size bounds, we introduce a
357 * new threshold at which we block committing transactions until the background
358 * CIL commit commences and switches to a new context. While this is not a hard
359 * limit, it forces the process committing a transaction to the CIL to block and
360 * yeild the CPU, giving the CIL push work a chance to be scheduled and start
361 * work. This prevents a process running lots of transactions from overfilling
362 * the CIL because it is not yielding the CPU. We set the blocking limit at
363 * twice the background push space threshold so we keep in line with the AIL
366 * Note: this is not a -hard- limit as blocking is applied after the transaction
367 * is inserted into the CIL and the push has been triggered. It is largely a
368 * throttling mechanism that allows the CIL push to be scheduled and run. A hard
369 * limit will be difficult to implement without introducing global serialisation
370 * in the CIL commit fast path, and it's not at all clear that we actually need
371 * such hard limits given the ~7 years we've run without a hard limit before
372 * finding the first situation where a checkpoint size overflow actually
373 * occurred. Hence the simple throttle, and an ASSERT check to tell us that
374 * we've overrun the max size.
376 #define XLOG_CIL_SPACE_LIMIT(log) \
377 min_t(int, (log)->l_logsize >> 3, BBTOB(XLOG_TOTAL_REC_SHIFT(log)) << 4)
379 #define XLOG_CIL_BLOCKING_SPACE_LIMIT(log) \
380 (XLOG_CIL_SPACE_LIMIT(log) * 2)
383 * ticket grant locks, queues and accounting have their own cachlines
384 * as these are quite hot and can be operated on concurrently.
386 struct xlog_grant_head {
387 spinlock_t lock ____cacheline_aligned_in_smp;
388 struct list_head waiters;
393 * The reservation head lsn is not made up of a cycle number and block number.
394 * Instead, it uses a cycle number and byte number. Logs don't expect to
395 * overflow 31 bits worth of byte offset, so using a byte number will mean
396 * that round off problems won't occur when releasing partial reservations.
399 /* The following fields don't need locking */
400 struct xfs_mount *l_mp; /* mount point */
401 struct xfs_ail *l_ailp; /* AIL log is working with */
402 struct xfs_cil *l_cilp; /* CIL log is working with */
403 struct xfs_buftarg *l_targ; /* buftarg of log */
404 struct workqueue_struct *l_ioend_workqueue; /* for I/O completions */
405 struct delayed_work l_work; /* background flush work */
406 long l_opstate; /* operational state */
407 uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
408 struct list_head *l_buf_cancel_table;
409 int l_iclog_hsize; /* size of iclog header */
410 int l_iclog_heads; /* # of iclog header sectors */
411 uint l_sectBBsize; /* sector size in BBs (2^n) */
412 int l_iclog_size; /* size of log in bytes */
413 int l_iclog_bufs; /* number of iclog buffers */
414 xfs_daddr_t l_logBBstart; /* start block of log */
415 int l_logsize; /* size of log in bytes */
416 int l_logBBsize; /* size of log in BB chunks */
418 /* The following block of fields are changed while holding icloglock */
419 wait_queue_head_t l_flush_wait ____cacheline_aligned_in_smp;
420 /* waiting for iclog flush */
421 int l_covered_state;/* state of "covering disk
423 xlog_in_core_t *l_iclog; /* head log queue */
424 spinlock_t l_icloglock; /* grab to change iclog state */
425 int l_curr_cycle; /* Cycle number of log writes */
426 int l_prev_cycle; /* Cycle number before last
428 int l_curr_block; /* current logical log block */
429 int l_prev_block; /* previous logical log block */
432 * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and
433 * read without needing to hold specific locks. To avoid operations
434 * contending with other hot objects, place each of them on a separate
437 /* lsn of last LR on disk */
438 atomic64_t l_last_sync_lsn ____cacheline_aligned_in_smp;
439 /* lsn of 1st LR with unflushed * buffers */
440 atomic64_t l_tail_lsn ____cacheline_aligned_in_smp;
442 struct xlog_grant_head l_reserve_head;
443 struct xlog_grant_head l_write_head;
445 struct xfs_kobj l_kobj;
447 /* log recovery lsn tracking (for buffer submission */
448 xfs_lsn_t l_recovery_lsn;
450 uint32_t l_iclog_roundoff;/* padding roundoff */
452 /* Users of log incompat features should take a read lock. */
453 struct rw_semaphore l_incompat_users;
457 * Bits for operational state
459 #define XLOG_ACTIVE_RECOVERY 0 /* in the middle of recovery */
460 #define XLOG_RECOVERY_NEEDED 1 /* log was recovered */
461 #define XLOG_IO_ERROR 2 /* log hit an I/O error, and being
463 #define XLOG_TAIL_WARN 3 /* log tail verify warning issued */
466 xlog_recovery_needed(struct xlog *log)
468 return test_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
472 xlog_in_recovery(struct xlog *log)
474 return test_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
478 xlog_is_shutdown(struct xlog *log)
480 return test_bit(XLOG_IO_ERROR, &log->l_opstate);
484 * Wait until the xlog_force_shutdown() has marked the log as shut down
485 * so xlog_is_shutdown() will always return true.
491 wait_var_event(&log->l_opstate, xlog_is_shutdown(log));
494 /* common routines */
502 xlog_recover_cancel(struct xlog *);
504 extern __le32 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead,
507 extern struct kmem_cache *xfs_log_ticket_cache;
508 struct xlog_ticket *xlog_ticket_alloc(struct xlog *log, int unit_bytes,
509 int count, bool permanent);
511 void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
512 void xlog_print_trans(struct xfs_trans *);
513 int xlog_write(struct xlog *log, struct xfs_cil_ctx *ctx,
514 struct list_head *lv_chain, struct xlog_ticket *tic,
516 void xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket);
517 void xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket);
519 void xlog_state_switch_iclogs(struct xlog *log, struct xlog_in_core *iclog,
521 int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog,
522 struct xlog_ticket *ticket);
525 * When we crack an atomic LSN, we sample it first so that the value will not
526 * change while we are cracking it into the component values. This means we
527 * will always get consistent component values to work from. This should always
528 * be used to sample and crack LSNs that are stored and updated in atomic
532 xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block)
534 xfs_lsn_t val = atomic64_read(lsn);
536 *cycle = CYCLE_LSN(val);
537 *block = BLOCK_LSN(val);
541 * Calculate and assign a value to an atomic LSN variable from component pieces.
544 xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block)
546 atomic64_set(lsn, xlog_assign_lsn(cycle, block));
550 * When we crack the grant head, we sample it first so that the value will not
551 * change while we are cracking it into the component values. This means we
552 * will always get consistent component values to work from.
555 xlog_crack_grant_head_val(int64_t val, int *cycle, int *space)
558 *space = val & 0xffffffff;
562 xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space)
564 xlog_crack_grant_head_val(atomic64_read(head), cycle, space);
567 static inline int64_t
568 xlog_assign_grant_head_val(int cycle, int space)
570 return ((int64_t)cycle << 32) | space;
574 xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
576 atomic64_set(head, xlog_assign_grant_head_val(cycle, space));
580 * Committed Item List interfaces
582 int xlog_cil_init(struct xlog *log);
583 void xlog_cil_init_post_recovery(struct xlog *log);
584 void xlog_cil_destroy(struct xlog *log);
585 bool xlog_cil_empty(struct xlog *log);
586 void xlog_cil_commit(struct xlog *log, struct xfs_trans *tp,
587 xfs_csn_t *commit_seq, bool regrant);
588 void xlog_cil_set_ctx_write_state(struct xfs_cil_ctx *ctx,
589 struct xlog_in_core *iclog);
595 void xlog_cil_flush(struct xlog *log);
596 xfs_lsn_t xlog_cil_force_seq(struct xlog *log, xfs_csn_t sequence);
599 xlog_cil_force(struct xlog *log)
601 xlog_cil_force_seq(log, log->l_cilp->xc_current_sequence);
605 * Wrapper function for waiting on a wait queue serialised against wakeups
606 * by a spinlock. This matches the semantics of all the wait queues used in the
611 struct wait_queue_head *wq,
612 struct spinlock *lock)
615 DECLARE_WAITQUEUE(wait, current);
617 add_wait_queue_exclusive(wq, &wait);
618 __set_current_state(TASK_UNINTERRUPTIBLE);
621 remove_wait_queue(wq, &wait);
624 int xlog_wait_on_iclog(struct xlog_in_core *iclog);
627 * The LSN is valid so long as it is behind the current LSN. If it isn't, this
628 * means that the next log record that includes this metadata could have a
629 * smaller LSN. In turn, this means that the modification in the log would not
642 * First, sample the current lsn without locking to avoid added
643 * contention from metadata I/O. The current cycle and block are updated
644 * (in xlog_state_switch_iclogs()) and read here in a particular order
645 * to avoid false negatives (e.g., thinking the metadata LSN is valid
648 * The current block is always rewound before the cycle is bumped in
649 * xlog_state_switch_iclogs() to ensure the current LSN is never seen in
650 * a transiently forward state. Instead, we can see the LSN in a
651 * transiently behind state if we happen to race with a cycle wrap.
653 cur_cycle = READ_ONCE(log->l_curr_cycle);
655 cur_block = READ_ONCE(log->l_curr_block);
657 if ((CYCLE_LSN(lsn) > cur_cycle) ||
658 (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) {
660 * If the metadata LSN appears invalid, it's possible the check
661 * above raced with a wrap to the next log cycle. Grab the lock
664 spin_lock(&log->l_icloglock);
665 cur_cycle = log->l_curr_cycle;
666 cur_block = log->l_curr_block;
667 spin_unlock(&log->l_icloglock);
669 if ((CYCLE_LSN(lsn) > cur_cycle) ||
670 (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block))
678 * Log vector and shadow buffers can be large, so we need to use kvmalloc() here
679 * to ensure success. Unfortunately, kvmalloc() only allows GFP_KERNEL contexts
680 * to fall back to vmalloc, so we can't actually do anything useful with gfp
681 * flags to control the kmalloc() behaviour within kvmalloc(). Hence kmalloc()
682 * will do direct reclaim and compaction in the slow path, both of which are
683 * horrendously expensive. We just want kmalloc to fail fast and fall back to
684 * vmalloc if it can't get somethign straight away from the free lists or
685 * buddy allocator. Hence we have to open code kvmalloc outselves here.
687 * This assumes that the caller uses memalloc_nofs_save task context here, so
688 * despite the use of GFP_KERNEL here, we are going to be doing GFP_NOFS
689 * allocations. This is actually the only way to make vmalloc() do GFP_NOFS
690 * allocations, so lets just all pretend this is a GFP_KERNEL context
697 gfp_t flags = GFP_KERNEL;
700 flags &= ~__GFP_DIRECT_RECLAIM;
701 flags |= __GFP_NOWARN | __GFP_NORETRY;
703 p = kmalloc(buf_size, flags);
705 p = vmalloc(buf_size);
711 #endif /* __XFS_LOG_PRIV_H__ */