1 /* SPDX-License-Identifier: GPL-2.0+ */
2 #ifndef _LINUX_XARRAY_H
3 #define _LINUX_XARRAY_H
6 * Copyright (c) 2017 Microsoft Corporation
7 * Author: Matthew Wilcox <willy@infradead.org>
9 * See Documentation/core-api/xarray.rst for how to use the XArray.
12 #include <linux/bug.h>
13 #include <linux/compiler.h>
14 #include <linux/gfp.h>
15 #include <linux/kconfig.h>
16 #include <linux/kernel.h>
17 #include <linux/rcupdate.h>
18 #include <linux/spinlock.h>
19 #include <linux/types.h>
22 * The bottom two bits of the entry determine how the XArray interprets
27 * x1: Value entry or tagged pointer
29 * Attempting to store internal entries in the XArray is a bug.
31 * Most internal entries are pointers to the next node in the tree.
32 * The following internal entries have a special meaning:
34 * 0-62: Sibling entries
38 * Errors are also represented as internal entries, but use the negative
39 * space (-4094 to -2). They're never stored in the slots array; only
40 * returned by the normal API.
43 #define BITS_PER_XA_VALUE (BITS_PER_LONG - 1)
46 * xa_mk_value() - Create an XArray entry from an integer.
47 * @v: Value to store in XArray.
49 * Context: Any context.
50 * Return: An entry suitable for storing in the XArray.
52 static inline void *xa_mk_value(unsigned long v)
55 return (void *)((v << 1) | 1);
59 * xa_to_value() - Get value stored in an XArray entry.
60 * @entry: XArray entry.
62 * Context: Any context.
63 * Return: The value stored in the XArray entry.
65 static inline unsigned long xa_to_value(const void *entry)
67 return (unsigned long)entry >> 1;
71 * xa_is_value() - Determine if an entry is a value.
72 * @entry: XArray entry.
74 * Context: Any context.
75 * Return: True if the entry is a value, false if it is a pointer.
77 static inline bool xa_is_value(const void *entry)
79 return (unsigned long)entry & 1;
83 * xa_tag_pointer() - Create an XArray entry for a tagged pointer.
85 * @tag: Tag value (0, 1 or 3).
87 * If the user of the XArray prefers, they can tag their pointers instead
88 * of storing value entries. Three tags are available (0, 1 and 3).
89 * These are distinct from the xa_mark_t as they are not replicated up
90 * through the array and cannot be searched for.
92 * Context: Any context.
93 * Return: An XArray entry.
95 static inline void *xa_tag_pointer(void *p, unsigned long tag)
97 return (void *)((unsigned long)p | tag);
101 * xa_untag_pointer() - Turn an XArray entry into a plain pointer.
102 * @entry: XArray entry.
104 * If you have stored a tagged pointer in the XArray, call this function
105 * to get the untagged version of the pointer.
107 * Context: Any context.
110 static inline void *xa_untag_pointer(void *entry)
112 return (void *)((unsigned long)entry & ~3UL);
116 * xa_pointer_tag() - Get the tag stored in an XArray entry.
117 * @entry: XArray entry.
119 * If you have stored a tagged pointer in the XArray, call this function
120 * to get the tag of that pointer.
122 * Context: Any context.
125 static inline unsigned int xa_pointer_tag(void *entry)
127 return (unsigned long)entry & 3UL;
131 * xa_mk_internal() - Create an internal entry.
132 * @v: Value to turn into an internal entry.
134 * Internal entries are used for a number of purposes. Entries 0-255 are
135 * used for sibling entries (only 0-62 are used by the current code). 256
136 * is used for the retry entry. 257 is used for the reserved / zero entry.
137 * Negative internal entries are used to represent errnos. Node pointers
138 * are also tagged as internal entries in some situations.
140 * Context: Any context.
141 * Return: An XArray internal entry corresponding to this value.
143 static inline void *xa_mk_internal(unsigned long v)
145 return (void *)((v << 2) | 2);
149 * xa_to_internal() - Extract the value from an internal entry.
150 * @entry: XArray entry.
152 * Context: Any context.
153 * Return: The value which was stored in the internal entry.
155 static inline unsigned long xa_to_internal(const void *entry)
157 return (unsigned long)entry >> 2;
161 * xa_is_internal() - Is the entry an internal entry?
162 * @entry: XArray entry.
164 * Context: Any context.
165 * Return: %true if the entry is an internal entry.
167 static inline bool xa_is_internal(const void *entry)
169 return ((unsigned long)entry & 3) == 2;
172 #define XA_ZERO_ENTRY xa_mk_internal(257)
175 * xa_is_zero() - Is the entry a zero entry?
176 * @entry: Entry retrieved from the XArray
178 * The normal API will return NULL as the contents of a slot containing
179 * a zero entry. You can only see zero entries by using the advanced API.
181 * Return: %true if the entry is a zero entry.
183 static inline bool xa_is_zero(const void *entry)
185 return unlikely(entry == XA_ZERO_ENTRY);
189 * xa_is_err() - Report whether an XArray operation returned an error
190 * @entry: Result from calling an XArray function
192 * If an XArray operation cannot complete an operation, it will return
193 * a special value indicating an error. This function tells you
194 * whether an error occurred; xa_err() tells you which error occurred.
196 * Context: Any context.
197 * Return: %true if the entry indicates an error.
199 static inline bool xa_is_err(const void *entry)
201 return unlikely(xa_is_internal(entry) &&
202 entry >= xa_mk_internal(-MAX_ERRNO));
206 * xa_err() - Turn an XArray result into an errno.
207 * @entry: Result from calling an XArray function.
209 * If an XArray operation cannot complete an operation, it will return
210 * a special pointer value which encodes an errno. This function extracts
211 * the errno from the pointer value, or returns 0 if the pointer does not
212 * represent an errno.
214 * Context: Any context.
215 * Return: A negative errno or 0.
217 static inline int xa_err(void *entry)
219 /* xa_to_internal() would not do sign extension. */
220 if (xa_is_err(entry))
221 return (long)entry >> 2;
226 * struct xa_limit - Represents a range of IDs.
227 * @min: The lowest ID to allocate (inclusive).
228 * @max: The maximum ID to allocate (inclusive).
230 * This structure is used either directly or via the XA_LIMIT() macro
231 * to communicate the range of IDs that are valid for allocation.
232 * Three common ranges are predefined for you:
233 * * xa_limit_32b - [0 - UINT_MAX]
234 * * xa_limit_31b - [0 - INT_MAX]
235 * * xa_limit_16b - [0 - USHRT_MAX]
242 #define XA_LIMIT(_min, _max) (struct xa_limit) { .min = _min, .max = _max }
244 #define xa_limit_32b XA_LIMIT(0, UINT_MAX)
245 #define xa_limit_31b XA_LIMIT(0, INT_MAX)
246 #define xa_limit_16b XA_LIMIT(0, USHRT_MAX)
248 typedef unsigned __bitwise xa_mark_t;
249 #define XA_MARK_0 ((__force xa_mark_t)0U)
250 #define XA_MARK_1 ((__force xa_mark_t)1U)
251 #define XA_MARK_2 ((__force xa_mark_t)2U)
252 #define XA_PRESENT ((__force xa_mark_t)8U)
253 #define XA_MARK_MAX XA_MARK_2
254 #define XA_FREE_MARK XA_MARK_0
262 * Values for xa_flags. The radix tree stores its GFP flags in the xa_flags,
263 * and we remain compatible with that.
265 #define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ)
266 #define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH)
267 #define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U)
268 #define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U)
269 #define XA_FLAGS_ALLOC_WRAPPED ((__force gfp_t)16U)
270 #define XA_FLAGS_ACCOUNT ((__force gfp_t)32U)
271 #define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
272 (__force unsigned)(mark)))
274 /* ALLOC is for a normal 0-based alloc. ALLOC1 is for an 1-based alloc */
275 #define XA_FLAGS_ALLOC (XA_FLAGS_TRACK_FREE | XA_FLAGS_MARK(XA_FREE_MARK))
276 #define XA_FLAGS_ALLOC1 (XA_FLAGS_TRACK_FREE | XA_FLAGS_ZERO_BUSY)
279 * struct xarray - The anchor of the XArray.
280 * @xa_lock: Lock that protects the contents of the XArray.
282 * To use the xarray, define it statically or embed it in your data structure.
283 * It is a very small data structure, so it does not usually make sense to
284 * allocate it separately and keep a pointer to it in your data structure.
286 * You may use the xa_lock to protect your own data structures as well.
289 * If all of the entries in the array are NULL, @xa_head is a NULL pointer.
290 * If the only non-NULL entry in the array is at index 0, @xa_head is that
291 * entry. If any other entry in the array is non-NULL, @xa_head points
296 /* private: The rest of the data structure is not to be used directly. */
298 void __rcu * xa_head;
301 #define XARRAY_INIT(name, flags) { \
302 .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \
308 * DEFINE_XARRAY_FLAGS() - Define an XArray with custom flags.
309 * @name: A string that names your XArray.
310 * @flags: XA_FLAG values.
312 * This is intended for file scope definitions of XArrays. It declares
313 * and initialises an empty XArray with the chosen name and flags. It is
314 * equivalent to calling xa_init_flags() on the array, but it does the
315 * initialisation at compiletime instead of runtime.
317 #define DEFINE_XARRAY_FLAGS(name, flags) \
318 struct xarray name = XARRAY_INIT(name, flags)
321 * DEFINE_XARRAY() - Define an XArray.
322 * @name: A string that names your XArray.
324 * This is intended for file scope definitions of XArrays. It declares
325 * and initialises an empty XArray with the chosen name. It is equivalent
326 * to calling xa_init() on the array, but it does the initialisation at
327 * compiletime instead of runtime.
329 #define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0)
332 * DEFINE_XARRAY_ALLOC() - Define an XArray which allocates IDs starting at 0.
333 * @name: A string that names your XArray.
335 * This is intended for file scope definitions of allocating XArrays.
336 * See also DEFINE_XARRAY().
338 #define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC)
341 * DEFINE_XARRAY_ALLOC1() - Define an XArray which allocates IDs starting at 1.
342 * @name: A string that names your XArray.
344 * This is intended for file scope definitions of allocating XArrays.
345 * See also DEFINE_XARRAY().
347 #define DEFINE_XARRAY_ALLOC1(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC1)
349 void *xa_load(struct xarray *, unsigned long index);
350 void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
351 void *xa_erase(struct xarray *, unsigned long index);
352 void *xa_store_range(struct xarray *, unsigned long first, unsigned long last,
354 bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
355 void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
356 void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
357 void *xa_find(struct xarray *xa, unsigned long *index,
358 unsigned long max, xa_mark_t) __attribute__((nonnull(2)));
359 void *xa_find_after(struct xarray *xa, unsigned long *index,
360 unsigned long max, xa_mark_t) __attribute__((nonnull(2)));
361 unsigned int xa_extract(struct xarray *, void **dst, unsigned long start,
362 unsigned long max, unsigned int n, xa_mark_t);
363 void xa_destroy(struct xarray *);
366 * xa_init_flags() - Initialise an empty XArray with flags.
368 * @flags: XA_FLAG values.
370 * If you need to initialise an XArray with special flags (eg you need
371 * to take the lock from interrupt context), use this function instead
374 * Context: Any context.
376 static inline void xa_init_flags(struct xarray *xa, gfp_t flags)
378 spin_lock_init(&xa->xa_lock);
379 xa->xa_flags = flags;
384 * xa_init() - Initialise an empty XArray.
387 * An empty XArray is full of NULL entries.
389 * Context: Any context.
391 static inline void xa_init(struct xarray *xa)
393 xa_init_flags(xa, 0);
397 * xa_empty() - Determine if an array has any present entries.
400 * Context: Any context.
401 * Return: %true if the array contains only NULL pointers.
403 static inline bool xa_empty(const struct xarray *xa)
405 return xa->xa_head == NULL;
409 * xa_marked() - Inquire whether any entry in this array has a mark set
413 * Context: Any context.
414 * Return: %true if any entry has this mark set.
416 static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
418 return xa->xa_flags & XA_FLAGS_MARK(mark);
422 * xa_for_each_range() - Iterate over a portion of an XArray.
424 * @index: Index of @entry.
425 * @entry: Entry retrieved from array.
426 * @start: First index to retrieve from array.
427 * @last: Last index to retrieve from array.
429 * During the iteration, @entry will have the value of the entry stored
430 * in @xa at @index. You may modify @index during the iteration if you
431 * want to skip or reprocess indices. It is safe to modify the array
432 * during the iteration. At the end of the iteration, @entry will be set
433 * to NULL and @index will have a value less than or equal to max.
435 * xa_for_each_range() is O(n.log(n)) while xas_for_each() is O(n). You have
436 * to handle your own locking with xas_for_each(), and if you have to unlock
437 * after each iteration, it will also end up being O(n.log(n)).
438 * xa_for_each_range() will spin if it hits a retry entry; if you intend to
439 * see retry entries, you should use the xas_for_each() iterator instead.
440 * The xas_for_each() iterator will expand into more inline code than
441 * xa_for_each_range().
443 * Context: Any context. Takes and releases the RCU lock.
445 #define xa_for_each_range(xa, index, entry, start, last) \
446 for (index = start, \
447 entry = xa_find(xa, &index, last, XA_PRESENT); \
449 entry = xa_find_after(xa, &index, last, XA_PRESENT))
452 * xa_for_each_start() - Iterate over a portion of an XArray.
454 * @index: Index of @entry.
455 * @entry: Entry retrieved from array.
456 * @start: First index to retrieve from array.
458 * During the iteration, @entry will have the value of the entry stored
459 * in @xa at @index. You may modify @index during the iteration if you
460 * want to skip or reprocess indices. It is safe to modify the array
461 * during the iteration. At the end of the iteration, @entry will be set
462 * to NULL and @index will have a value less than or equal to max.
464 * xa_for_each_start() is O(n.log(n)) while xas_for_each() is O(n). You have
465 * to handle your own locking with xas_for_each(), and if you have to unlock
466 * after each iteration, it will also end up being O(n.log(n)).
467 * xa_for_each_start() will spin if it hits a retry entry; if you intend to
468 * see retry entries, you should use the xas_for_each() iterator instead.
469 * The xas_for_each() iterator will expand into more inline code than
470 * xa_for_each_start().
472 * Context: Any context. Takes and releases the RCU lock.
474 #define xa_for_each_start(xa, index, entry, start) \
475 xa_for_each_range(xa, index, entry, start, ULONG_MAX)
478 * xa_for_each() - Iterate over present entries in an XArray.
480 * @index: Index of @entry.
481 * @entry: Entry retrieved from array.
483 * During the iteration, @entry will have the value of the entry stored
484 * in @xa at @index. You may modify @index during the iteration if you want
485 * to skip or reprocess indices. It is safe to modify the array during the
486 * iteration. At the end of the iteration, @entry will be set to NULL and
487 * @index will have a value less than or equal to max.
489 * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have
490 * to handle your own locking with xas_for_each(), and if you have to unlock
491 * after each iteration, it will also end up being O(n.log(n)). xa_for_each()
492 * will spin if it hits a retry entry; if you intend to see retry entries,
493 * you should use the xas_for_each() iterator instead. The xas_for_each()
494 * iterator will expand into more inline code than xa_for_each().
496 * Context: Any context. Takes and releases the RCU lock.
498 #define xa_for_each(xa, index, entry) \
499 xa_for_each_start(xa, index, entry, 0)
502 * xa_for_each_marked() - Iterate over marked entries in an XArray.
504 * @index: Index of @entry.
505 * @entry: Entry retrieved from array.
506 * @filter: Selection criterion.
508 * During the iteration, @entry will have the value of the entry stored
509 * in @xa at @index. The iteration will skip all entries in the array
510 * which do not match @filter. You may modify @index during the iteration
511 * if you want to skip or reprocess indices. It is safe to modify the array
512 * during the iteration. At the end of the iteration, @entry will be set to
513 * NULL and @index will have a value less than or equal to max.
515 * xa_for_each_marked() is O(n.log(n)) while xas_for_each_marked() is O(n).
516 * You have to handle your own locking with xas_for_each(), and if you have
517 * to unlock after each iteration, it will also end up being O(n.log(n)).
518 * xa_for_each_marked() will spin if it hits a retry entry; if you intend to
519 * see retry entries, you should use the xas_for_each_marked() iterator
520 * instead. The xas_for_each_marked() iterator will expand into more inline
521 * code than xa_for_each_marked().
523 * Context: Any context. Takes and releases the RCU lock.
525 #define xa_for_each_marked(xa, index, entry, filter) \
526 for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \
527 entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter))
529 #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
530 #define xa_lock(xa) spin_lock(&(xa)->xa_lock)
531 #define xa_unlock(xa) spin_unlock(&(xa)->xa_lock)
532 #define xa_lock_bh(xa) spin_lock_bh(&(xa)->xa_lock)
533 #define xa_unlock_bh(xa) spin_unlock_bh(&(xa)->xa_lock)
534 #define xa_lock_irq(xa) spin_lock_irq(&(xa)->xa_lock)
535 #define xa_unlock_irq(xa) spin_unlock_irq(&(xa)->xa_lock)
536 #define xa_lock_irqsave(xa, flags) \
537 spin_lock_irqsave(&(xa)->xa_lock, flags)
538 #define xa_unlock_irqrestore(xa, flags) \
539 spin_unlock_irqrestore(&(xa)->xa_lock, flags)
540 #define xa_lock_nested(xa, subclass) \
541 spin_lock_nested(&(xa)->xa_lock, subclass)
542 #define xa_lock_bh_nested(xa, subclass) \
543 spin_lock_bh_nested(&(xa)->xa_lock, subclass)
544 #define xa_lock_irq_nested(xa, subclass) \
545 spin_lock_irq_nested(&(xa)->xa_lock, subclass)
546 #define xa_lock_irqsave_nested(xa, flags, subclass) \
547 spin_lock_irqsave_nested(&(xa)->xa_lock, flags, subclass)
550 * Versions of the normal API which require the caller to hold the
551 * xa_lock. If the GFP flags allow it, they will drop the lock to
552 * allocate memory, then reacquire it afterwards. These functions
553 * may also re-enable interrupts if the XArray flags indicate the
554 * locking should be interrupt safe.
556 void *__xa_erase(struct xarray *, unsigned long index);
557 void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
558 void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
560 int __must_check __xa_insert(struct xarray *, unsigned long index,
562 int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry,
563 struct xa_limit, gfp_t);
564 int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry,
565 struct xa_limit, u32 *next, gfp_t);
566 void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
567 void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
570 * xa_store_bh() - Store this entry in the XArray.
572 * @index: Index into array.
574 * @gfp: Memory allocation flags.
576 * This function is like calling xa_store() except it disables softirqs
577 * while holding the array lock.
579 * Context: Any context. Takes and releases the xa_lock while
580 * disabling softirqs.
581 * Return: The old entry at this index or xa_err() if an error happened.
583 static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
584 void *entry, gfp_t gfp)
589 curr = __xa_store(xa, index, entry, gfp);
596 * xa_store_irq() - Store this entry in the XArray.
598 * @index: Index into array.
600 * @gfp: Memory allocation flags.
602 * This function is like calling xa_store() except it disables interrupts
603 * while holding the array lock.
605 * Context: Process context. Takes and releases the xa_lock while
606 * disabling interrupts.
607 * Return: The old entry at this index or xa_err() if an error happened.
609 static inline void *xa_store_irq(struct xarray *xa, unsigned long index,
610 void *entry, gfp_t gfp)
615 curr = __xa_store(xa, index, entry, gfp);
622 * xa_erase_bh() - Erase this entry from the XArray.
624 * @index: Index of entry.
626 * After this function returns, loading from @index will return %NULL.
627 * If the index is part of a multi-index entry, all indices will be erased
628 * and none of the entries will be part of a multi-index entry.
630 * Context: Any context. Takes and releases the xa_lock while
631 * disabling softirqs.
632 * Return: The entry which used to be at this index.
634 static inline void *xa_erase_bh(struct xarray *xa, unsigned long index)
639 entry = __xa_erase(xa, index);
646 * xa_erase_irq() - Erase this entry from the XArray.
648 * @index: Index of entry.
650 * After this function returns, loading from @index will return %NULL.
651 * If the index is part of a multi-index entry, all indices will be erased
652 * and none of the entries will be part of a multi-index entry.
654 * Context: Process context. Takes and releases the xa_lock while
655 * disabling interrupts.
656 * Return: The entry which used to be at this index.
658 static inline void *xa_erase_irq(struct xarray *xa, unsigned long index)
663 entry = __xa_erase(xa, index);
670 * xa_cmpxchg() - Conditionally replace an entry in the XArray.
672 * @index: Index into array.
673 * @old: Old value to test against.
674 * @entry: New value to place in array.
675 * @gfp: Memory allocation flags.
677 * If the entry at @index is the same as @old, replace it with @entry.
678 * If the return value is equal to @old, then the exchange was successful.
680 * Context: Any context. Takes and releases the xa_lock. May sleep
681 * if the @gfp flags permit.
682 * Return: The old value at this index or xa_err() if an error happened.
684 static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index,
685 void *old, void *entry, gfp_t gfp)
690 curr = __xa_cmpxchg(xa, index, old, entry, gfp);
697 * xa_cmpxchg_bh() - Conditionally replace an entry in the XArray.
699 * @index: Index into array.
700 * @old: Old value to test against.
701 * @entry: New value to place in array.
702 * @gfp: Memory allocation flags.
704 * This function is like calling xa_cmpxchg() except it disables softirqs
705 * while holding the array lock.
707 * Context: Any context. Takes and releases the xa_lock while
708 * disabling softirqs. May sleep if the @gfp flags permit.
709 * Return: The old value at this index or xa_err() if an error happened.
711 static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index,
712 void *old, void *entry, gfp_t gfp)
717 curr = __xa_cmpxchg(xa, index, old, entry, gfp);
724 * xa_cmpxchg_irq() - Conditionally replace an entry in the XArray.
726 * @index: Index into array.
727 * @old: Old value to test against.
728 * @entry: New value to place in array.
729 * @gfp: Memory allocation flags.
731 * This function is like calling xa_cmpxchg() except it disables interrupts
732 * while holding the array lock.
734 * Context: Process context. Takes and releases the xa_lock while
735 * disabling interrupts. May sleep if the @gfp flags permit.
736 * Return: The old value at this index or xa_err() if an error happened.
738 static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
739 void *old, void *entry, gfp_t gfp)
744 curr = __xa_cmpxchg(xa, index, old, entry, gfp);
751 * xa_insert() - Store this entry in the XArray unless another entry is
754 * @index: Index into array.
756 * @gfp: Memory allocation flags.
758 * Inserting a NULL entry will store a reserved entry (like xa_reserve())
759 * if no entry is present. Inserting will fail if a reserved entry is
760 * present, even though loading from this index will return NULL.
762 * Context: Any context. Takes and releases the xa_lock. May sleep if
763 * the @gfp flags permit.
764 * Return: 0 if the store succeeded. -EBUSY if another entry was present.
765 * -ENOMEM if memory could not be allocated.
767 static inline int __must_check xa_insert(struct xarray *xa,
768 unsigned long index, void *entry, gfp_t gfp)
773 err = __xa_insert(xa, index, entry, gfp);
780 * xa_insert_bh() - Store this entry in the XArray unless another entry is
783 * @index: Index into array.
785 * @gfp: Memory allocation flags.
787 * Inserting a NULL entry will store a reserved entry (like xa_reserve())
788 * if no entry is present. Inserting will fail if a reserved entry is
789 * present, even though loading from this index will return NULL.
791 * Context: Any context. Takes and releases the xa_lock while
792 * disabling softirqs. May sleep if the @gfp flags permit.
793 * Return: 0 if the store succeeded. -EBUSY if another entry was present.
794 * -ENOMEM if memory could not be allocated.
796 static inline int __must_check xa_insert_bh(struct xarray *xa,
797 unsigned long index, void *entry, gfp_t gfp)
802 err = __xa_insert(xa, index, entry, gfp);
809 * xa_insert_irq() - Store this entry in the XArray unless another entry is
812 * @index: Index into array.
814 * @gfp: Memory allocation flags.
816 * Inserting a NULL entry will store a reserved entry (like xa_reserve())
817 * if no entry is present. Inserting will fail if a reserved entry is
818 * present, even though loading from this index will return NULL.
820 * Context: Process context. Takes and releases the xa_lock while
821 * disabling interrupts. May sleep if the @gfp flags permit.
822 * Return: 0 if the store succeeded. -EBUSY if another entry was present.
823 * -ENOMEM if memory could not be allocated.
825 static inline int __must_check xa_insert_irq(struct xarray *xa,
826 unsigned long index, void *entry, gfp_t gfp)
831 err = __xa_insert(xa, index, entry, gfp);
838 * xa_alloc() - Find somewhere to store this entry in the XArray.
840 * @id: Pointer to ID.
842 * @limit: Range of ID to allocate.
843 * @gfp: Memory allocation flags.
845 * Finds an empty entry in @xa between @limit.min and @limit.max,
846 * stores the index into the @id pointer, then stores the entry at
847 * that index. A concurrent lookup will not see an uninitialised @id.
849 * Context: Any context. Takes and releases the xa_lock. May sleep if
850 * the @gfp flags permit.
851 * Return: 0 on success, -ENOMEM if memory could not be allocated or
852 * -EBUSY if there are no free entries in @limit.
854 static inline __must_check int xa_alloc(struct xarray *xa, u32 *id,
855 void *entry, struct xa_limit limit, gfp_t gfp)
860 err = __xa_alloc(xa, id, entry, limit, gfp);
867 * xa_alloc_bh() - Find somewhere to store this entry in the XArray.
869 * @id: Pointer to ID.
871 * @limit: Range of ID to allocate.
872 * @gfp: Memory allocation flags.
874 * Finds an empty entry in @xa between @limit.min and @limit.max,
875 * stores the index into the @id pointer, then stores the entry at
876 * that index. A concurrent lookup will not see an uninitialised @id.
878 * Context: Any context. Takes and releases the xa_lock while
879 * disabling softirqs. May sleep if the @gfp flags permit.
880 * Return: 0 on success, -ENOMEM if memory could not be allocated or
881 * -EBUSY if there are no free entries in @limit.
883 static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id,
884 void *entry, struct xa_limit limit, gfp_t gfp)
889 err = __xa_alloc(xa, id, entry, limit, gfp);
896 * xa_alloc_irq() - Find somewhere to store this entry in the XArray.
898 * @id: Pointer to ID.
900 * @limit: Range of ID to allocate.
901 * @gfp: Memory allocation flags.
903 * Finds an empty entry in @xa between @limit.min and @limit.max,
904 * stores the index into the @id pointer, then stores the entry at
905 * that index. A concurrent lookup will not see an uninitialised @id.
907 * Context: Process context. Takes and releases the xa_lock while
908 * disabling interrupts. May sleep if the @gfp flags permit.
909 * Return: 0 on success, -ENOMEM if memory could not be allocated or
910 * -EBUSY if there are no free entries in @limit.
912 static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id,
913 void *entry, struct xa_limit limit, gfp_t gfp)
918 err = __xa_alloc(xa, id, entry, limit, gfp);
925 * xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
927 * @id: Pointer to ID.
929 * @limit: Range of allocated ID.
930 * @next: Pointer to next ID to allocate.
931 * @gfp: Memory allocation flags.
933 * Finds an empty entry in @xa between @limit.min and @limit.max,
934 * stores the index into the @id pointer, then stores the entry at
935 * that index. A concurrent lookup will not see an uninitialised @id.
936 * The search for an empty entry will start at @next and will wrap
937 * around if necessary.
939 * Context: Any context. Takes and releases the xa_lock. May sleep if
940 * the @gfp flags permit.
941 * Return: 0 if the allocation succeeded without wrapping. 1 if the
942 * allocation succeeded after wrapping, -ENOMEM if memory could not be
943 * allocated or -EBUSY if there are no free entries in @limit.
945 static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
946 struct xa_limit limit, u32 *next, gfp_t gfp)
951 err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
958 * xa_alloc_cyclic_bh() - Find somewhere to store this entry in the XArray.
960 * @id: Pointer to ID.
962 * @limit: Range of allocated ID.
963 * @next: Pointer to next ID to allocate.
964 * @gfp: Memory allocation flags.
966 * Finds an empty entry in @xa between @limit.min and @limit.max,
967 * stores the index into the @id pointer, then stores the entry at
968 * that index. A concurrent lookup will not see an uninitialised @id.
969 * The search for an empty entry will start at @next and will wrap
970 * around if necessary.
972 * Context: Any context. Takes and releases the xa_lock while
973 * disabling softirqs. May sleep if the @gfp flags permit.
974 * Return: 0 if the allocation succeeded without wrapping. 1 if the
975 * allocation succeeded after wrapping, -ENOMEM if memory could not be
976 * allocated or -EBUSY if there are no free entries in @limit.
978 static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
979 struct xa_limit limit, u32 *next, gfp_t gfp)
984 err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
991 * xa_alloc_cyclic_irq() - Find somewhere to store this entry in the XArray.
993 * @id: Pointer to ID.
995 * @limit: Range of allocated ID.
996 * @next: Pointer to next ID to allocate.
997 * @gfp: Memory allocation flags.
999 * Finds an empty entry in @xa between @limit.min and @limit.max,
1000 * stores the index into the @id pointer, then stores the entry at
1001 * that index. A concurrent lookup will not see an uninitialised @id.
1002 * The search for an empty entry will start at @next and will wrap
1003 * around if necessary.
1005 * Context: Process context. Takes and releases the xa_lock while
1006 * disabling interrupts. May sleep if the @gfp flags permit.
1007 * Return: 0 if the allocation succeeded without wrapping. 1 if the
1008 * allocation succeeded after wrapping, -ENOMEM if memory could not be
1009 * allocated or -EBUSY if there are no free entries in @limit.
1011 static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry,
1012 struct xa_limit limit, u32 *next, gfp_t gfp)
1017 err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
1024 * xa_reserve() - Reserve this index in the XArray.
1026 * @index: Index into array.
1027 * @gfp: Memory allocation flags.
1029 * Ensures there is somewhere to store an entry at @index in the array.
1030 * If there is already something stored at @index, this function does
1031 * nothing. If there was nothing there, the entry is marked as reserved.
1032 * Loading from a reserved entry returns a %NULL pointer.
1034 * If you do not use the entry that you have reserved, call xa_release()
1035 * or xa_erase() to free any unnecessary memory.
1037 * Context: Any context. Takes and releases the xa_lock.
1038 * May sleep if the @gfp flags permit.
1039 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1041 static inline __must_check
1042 int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
1044 return xa_err(xa_cmpxchg(xa, index, NULL, XA_ZERO_ENTRY, gfp));
1048 * xa_reserve_bh() - Reserve this index in the XArray.
1050 * @index: Index into array.
1051 * @gfp: Memory allocation flags.
1053 * A softirq-disabling version of xa_reserve().
1055 * Context: Any context. Takes and releases the xa_lock while
1056 * disabling softirqs.
1057 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1059 static inline __must_check
1060 int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp)
1062 return xa_err(xa_cmpxchg_bh(xa, index, NULL, XA_ZERO_ENTRY, gfp));
1066 * xa_reserve_irq() - Reserve this index in the XArray.
1068 * @index: Index into array.
1069 * @gfp: Memory allocation flags.
1071 * An interrupt-disabling version of xa_reserve().
1073 * Context: Process context. Takes and releases the xa_lock while
1074 * disabling interrupts.
1075 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1077 static inline __must_check
1078 int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp)
1080 return xa_err(xa_cmpxchg_irq(xa, index, NULL, XA_ZERO_ENTRY, gfp));
1084 * xa_release() - Release a reserved entry.
1086 * @index: Index of entry.
1088 * After calling xa_reserve(), you can call this function to release the
1089 * reservation. If the entry at @index has been stored to, this function
1092 static inline void xa_release(struct xarray *xa, unsigned long index)
1094 xa_cmpxchg(xa, index, XA_ZERO_ENTRY, NULL, 0);
1097 /* Everything below here is the Advanced API. Proceed with caution. */
1100 * The xarray is constructed out of a set of 'chunks' of pointers. Choosing
1101 * the best chunk size requires some tradeoffs. A power of two recommends
1102 * itself so that we can walk the tree based purely on shifts and masks.
1103 * Generally, the larger the better; as the number of slots per level of the
1104 * tree increases, the less tall the tree needs to be. But that needs to be
1105 * balanced against the memory consumption of each node. On a 64-bit system,
1106 * xa_node is currently 576 bytes, and we get 7 of them per 4kB page. If we
1107 * doubled the number of slots per node, we'd get only 3 nodes per 4kB page.
1109 #ifndef XA_CHUNK_SHIFT
1110 #define XA_CHUNK_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
1112 #define XA_CHUNK_SIZE (1UL << XA_CHUNK_SHIFT)
1113 #define XA_CHUNK_MASK (XA_CHUNK_SIZE - 1)
1114 #define XA_MAX_MARKS 3
1115 #define XA_MARK_LONGS DIV_ROUND_UP(XA_CHUNK_SIZE, BITS_PER_LONG)
1118 * @count is the count of every non-NULL element in the ->slots array
1119 * whether that is a value entry, a retry entry, a user pointer,
1120 * a sibling entry or a pointer to the next level of the tree.
1121 * @nr_values is the count of every element in ->slots which is
1122 * either a value entry or a sibling of a value entry.
1125 unsigned char shift; /* Bits remaining in each slot */
1126 unsigned char offset; /* Slot offset in parent */
1127 unsigned char count; /* Total entry count */
1128 unsigned char nr_values; /* Value entry count */
1129 struct xa_node __rcu *parent; /* NULL at top of tree */
1130 struct xarray *array; /* The array we belong to */
1132 struct list_head private_list; /* For tree user */
1133 struct rcu_head rcu_head; /* Used when freeing node */
1135 void __rcu *slots[XA_CHUNK_SIZE];
1137 unsigned long tags[XA_MAX_MARKS][XA_MARK_LONGS];
1138 unsigned long marks[XA_MAX_MARKS][XA_MARK_LONGS];
1142 void xa_dump(const struct xarray *);
1143 void xa_dump_node(const struct xa_node *);
1146 #define XA_BUG_ON(xa, x) do { \
1152 #define XA_NODE_BUG_ON(node, x) do { \
1154 if (node) xa_dump_node(node); \
1159 #define XA_BUG_ON(xa, x) do { } while (0)
1160 #define XA_NODE_BUG_ON(node, x) do { } while (0)
1164 static inline void *xa_head(const struct xarray *xa)
1166 return rcu_dereference_check(xa->xa_head,
1167 lockdep_is_held(&xa->xa_lock));
1171 static inline void *xa_head_locked(const struct xarray *xa)
1173 return rcu_dereference_protected(xa->xa_head,
1174 lockdep_is_held(&xa->xa_lock));
1178 static inline void *xa_entry(const struct xarray *xa,
1179 const struct xa_node *node, unsigned int offset)
1181 XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
1182 return rcu_dereference_check(node->slots[offset],
1183 lockdep_is_held(&xa->xa_lock));
1187 static inline void *xa_entry_locked(const struct xarray *xa,
1188 const struct xa_node *node, unsigned int offset)
1190 XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
1191 return rcu_dereference_protected(node->slots[offset],
1192 lockdep_is_held(&xa->xa_lock));
1196 static inline struct xa_node *xa_parent(const struct xarray *xa,
1197 const struct xa_node *node)
1199 return rcu_dereference_check(node->parent,
1200 lockdep_is_held(&xa->xa_lock));
1204 static inline struct xa_node *xa_parent_locked(const struct xarray *xa,
1205 const struct xa_node *node)
1207 return rcu_dereference_protected(node->parent,
1208 lockdep_is_held(&xa->xa_lock));
1212 static inline void *xa_mk_node(const struct xa_node *node)
1214 return (void *)((unsigned long)node | 2);
1218 static inline struct xa_node *xa_to_node(const void *entry)
1220 return (struct xa_node *)((unsigned long)entry - 2);
1224 static inline bool xa_is_node(const void *entry)
1226 return xa_is_internal(entry) && (unsigned long)entry > 4096;
1230 static inline void *xa_mk_sibling(unsigned int offset)
1232 return xa_mk_internal(offset);
1236 static inline unsigned long xa_to_sibling(const void *entry)
1238 return xa_to_internal(entry);
1242 * xa_is_sibling() - Is the entry a sibling entry?
1243 * @entry: Entry retrieved from the XArray
1245 * Return: %true if the entry is a sibling entry.
1247 static inline bool xa_is_sibling(const void *entry)
1249 return IS_ENABLED(CONFIG_XARRAY_MULTI) && xa_is_internal(entry) &&
1250 (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
1253 #define XA_RETRY_ENTRY xa_mk_internal(256)
1256 * xa_is_retry() - Is the entry a retry entry?
1257 * @entry: Entry retrieved from the XArray
1259 * Return: %true if the entry is a retry entry.
1261 static inline bool xa_is_retry(const void *entry)
1263 return unlikely(entry == XA_RETRY_ENTRY);
1267 * xa_is_advanced() - Is the entry only permitted for the advanced API?
1268 * @entry: Entry to be stored in the XArray.
1270 * Return: %true if the entry cannot be stored by the normal API.
1272 static inline bool xa_is_advanced(const void *entry)
1274 return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY);
1278 * typedef xa_update_node_t - A callback function from the XArray.
1279 * @node: The node which is being processed
1281 * This function is called every time the XArray updates the count of
1282 * present and value entries in a node. It allows advanced users to
1283 * maintain the private_list in the node.
1285 * Context: The xa_lock is held and interrupts may be disabled.
1286 * Implementations should not drop the xa_lock, nor re-enable
1289 typedef void (*xa_update_node_t)(struct xa_node *node);
1291 void xa_delete_node(struct xa_node *, xa_update_node_t);
1294 * The xa_state is opaque to its users. It contains various different pieces
1295 * of state involved in the current operation on the XArray. It should be
1296 * declared on the stack and passed between the various internal routines.
1297 * The various elements in it should not be accessed directly, but only
1298 * through the provided accessor functions. The below documentation is for
1299 * the benefit of those working on the code, not for users of the XArray.
1301 * @xa_node usually points to the xa_node containing the slot we're operating
1302 * on (and @xa_offset is the offset in the slots array). If there is a
1303 * single entry in the array at index 0, there are no allocated xa_nodes to
1304 * point to, and so we store %NULL in @xa_node. @xa_node is set to
1305 * the value %XAS_RESTART if the xa_state is not walked to the correct
1306 * position in the tree of nodes for this operation. If an error occurs
1307 * during an operation, it is set to an %XAS_ERROR value. If we run off the
1308 * end of the allocated nodes, it is set to %XAS_BOUNDS.
1312 unsigned long xa_index;
1313 unsigned char xa_shift;
1314 unsigned char xa_sibs;
1315 unsigned char xa_offset;
1316 unsigned char xa_pad; /* Helps gcc generate better code */
1317 struct xa_node *xa_node;
1318 struct xa_node *xa_alloc;
1319 xa_update_node_t xa_update;
1323 * We encode errnos in the xas->xa_node. If an error has happened, we need to
1324 * drop the lock to fix it, and once we've done so the xa_state is invalid.
1326 #define XA_ERROR(errno) ((struct xa_node *)(((unsigned long)errno << 2) | 2UL))
1327 #define XAS_BOUNDS ((struct xa_node *)1UL)
1328 #define XAS_RESTART ((struct xa_node *)3UL)
1330 #define __XA_STATE(array, index, shift, sibs) { \
1332 .xa_index = index, \
1333 .xa_shift = shift, \
1337 .xa_node = XAS_RESTART, \
1343 * XA_STATE() - Declare an XArray operation state.
1344 * @name: Name of this operation state (usually xas).
1345 * @array: Array to operate on.
1346 * @index: Initial index of interest.
1348 * Declare and initialise an xa_state on the stack.
1350 #define XA_STATE(name, array, index) \
1351 struct xa_state name = __XA_STATE(array, index, 0, 0)
1354 * XA_STATE_ORDER() - Declare an XArray operation state.
1355 * @name: Name of this operation state (usually xas).
1356 * @array: Array to operate on.
1357 * @index: Initial index of interest.
1358 * @order: Order of entry.
1360 * Declare and initialise an xa_state on the stack. This variant of
1361 * XA_STATE() allows you to specify the 'order' of the element you
1362 * want to operate on.`
1364 #define XA_STATE_ORDER(name, array, index, order) \
1365 struct xa_state name = __XA_STATE(array, \
1366 (index >> order) << order, \
1367 order - (order % XA_CHUNK_SHIFT), \
1368 (1U << (order % XA_CHUNK_SHIFT)) - 1)
1370 #define xas_marked(xas, mark) xa_marked((xas)->xa, (mark))
1371 #define xas_trylock(xas) xa_trylock((xas)->xa)
1372 #define xas_lock(xas) xa_lock((xas)->xa)
1373 #define xas_unlock(xas) xa_unlock((xas)->xa)
1374 #define xas_lock_bh(xas) xa_lock_bh((xas)->xa)
1375 #define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa)
1376 #define xas_lock_irq(xas) xa_lock_irq((xas)->xa)
1377 #define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa)
1378 #define xas_lock_irqsave(xas, flags) \
1379 xa_lock_irqsave((xas)->xa, flags)
1380 #define xas_unlock_irqrestore(xas, flags) \
1381 xa_unlock_irqrestore((xas)->xa, flags)
1384 * xas_error() - Return an errno stored in the xa_state.
1385 * @xas: XArray operation state.
1387 * Return: 0 if no error has been noted. A negative errno if one has.
1389 static inline int xas_error(const struct xa_state *xas)
1391 return xa_err(xas->xa_node);
1395 * xas_set_err() - Note an error in the xa_state.
1396 * @xas: XArray operation state.
1397 * @err: Negative error number.
1399 * Only call this function with a negative @err; zero or positive errors
1400 * will probably not behave the way you think they should. If you want
1401 * to clear the error from an xa_state, use xas_reset().
1403 static inline void xas_set_err(struct xa_state *xas, long err)
1405 xas->xa_node = XA_ERROR(err);
1409 * xas_invalid() - Is the xas in a retry or error state?
1410 * @xas: XArray operation state.
1412 * Return: %true if the xas cannot be used for operations.
1414 static inline bool xas_invalid(const struct xa_state *xas)
1416 return (unsigned long)xas->xa_node & 3;
1420 * xas_valid() - Is the xas a valid cursor into the array?
1421 * @xas: XArray operation state.
1423 * Return: %true if the xas can be used for operations.
1425 static inline bool xas_valid(const struct xa_state *xas)
1427 return !xas_invalid(xas);
1431 * xas_is_node() - Does the xas point to a node?
1432 * @xas: XArray operation state.
1434 * Return: %true if the xas currently references a node.
1436 static inline bool xas_is_node(const struct xa_state *xas)
1438 return xas_valid(xas) && xas->xa_node;
1441 /* True if the pointer is something other than a node */
1442 static inline bool xas_not_node(struct xa_node *node)
1444 return ((unsigned long)node & 3) || !node;
1447 /* True if the node represents RESTART or an error */
1448 static inline bool xas_frozen(struct xa_node *node)
1450 return (unsigned long)node & 2;
1453 /* True if the node represents head-of-tree, RESTART or BOUNDS */
1454 static inline bool xas_top(struct xa_node *node)
1456 return node <= XAS_RESTART;
1460 * xas_reset() - Reset an XArray operation state.
1461 * @xas: XArray operation state.
1463 * Resets the error or walk state of the @xas so future walks of the
1464 * array will start from the root. Use this if you have dropped the
1465 * xarray lock and want to reuse the xa_state.
1467 * Context: Any context.
1469 static inline void xas_reset(struct xa_state *xas)
1471 xas->xa_node = XAS_RESTART;
1475 * xas_retry() - Retry the operation if appropriate.
1476 * @xas: XArray operation state.
1477 * @entry: Entry from xarray.
1479 * The advanced functions may sometimes return an internal entry, such as
1480 * a retry entry or a zero entry. This function sets up the @xas to restart
1481 * the walk from the head of the array if needed.
1483 * Context: Any context.
1484 * Return: true if the operation needs to be retried.
1486 static inline bool xas_retry(struct xa_state *xas, const void *entry)
1488 if (xa_is_zero(entry))
1490 if (!xa_is_retry(entry))
1496 void *xas_load(struct xa_state *);
1497 void *xas_store(struct xa_state *, void *entry);
1498 void *xas_find(struct xa_state *, unsigned long max);
1499 void *xas_find_conflict(struct xa_state *);
1501 bool xas_get_mark(const struct xa_state *, xa_mark_t);
1502 void xas_set_mark(const struct xa_state *, xa_mark_t);
1503 void xas_clear_mark(const struct xa_state *, xa_mark_t);
1504 void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t);
1505 void xas_init_marks(const struct xa_state *);
1507 bool xas_nomem(struct xa_state *, gfp_t);
1508 void xas_pause(struct xa_state *);
1510 void xas_create_range(struct xa_state *);
1512 #ifdef CONFIG_XARRAY_MULTI
1513 int xa_get_order(struct xarray *, unsigned long index);
1514 void xas_split(struct xa_state *, void *entry, unsigned int order);
1515 void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t);
1517 static inline int xa_get_order(struct xarray *xa, unsigned long index)
1522 static inline void xas_split(struct xa_state *xas, void *entry,
1525 xas_store(xas, entry);
1528 static inline void xas_split_alloc(struct xa_state *xas, void *entry,
1529 unsigned int order, gfp_t gfp)
1535 * xas_reload() - Refetch an entry from the xarray.
1536 * @xas: XArray operation state.
1538 * Use this function to check that a previously loaded entry still has
1539 * the same value. This is useful for the lockless pagecache lookup where
1540 * we walk the array with only the RCU lock to protect us, lock the page,
1541 * then check that the page hasn't moved since we looked it up.
1543 * The caller guarantees that @xas is still valid. If it may be in an
1544 * error or restart state, call xas_load() instead.
1546 * Return: The entry at this location in the xarray.
1548 static inline void *xas_reload(struct xa_state *xas)
1550 struct xa_node *node = xas->xa_node;
1555 return xa_head(xas->xa);
1556 if (IS_ENABLED(CONFIG_XARRAY_MULTI)) {
1557 offset = (xas->xa_index >> node->shift) & XA_CHUNK_MASK;
1558 entry = xa_entry(xas->xa, node, offset);
1559 if (!xa_is_sibling(entry))
1561 offset = xa_to_sibling(entry);
1563 offset = xas->xa_offset;
1565 return xa_entry(xas->xa, node, offset);
1569 * xas_set() - Set up XArray operation state for a different index.
1570 * @xas: XArray operation state.
1571 * @index: New index into the XArray.
1573 * Move the operation state to refer to a different index. This will
1574 * have the effect of starting a walk from the top; see xas_next()
1575 * to move to an adjacent index.
1577 static inline void xas_set(struct xa_state *xas, unsigned long index)
1579 xas->xa_index = index;
1580 xas->xa_node = XAS_RESTART;
1584 * xas_set_order() - Set up XArray operation state for a multislot entry.
1585 * @xas: XArray operation state.
1586 * @index: Target of the operation.
1587 * @order: Entry occupies 2^@order indices.
1589 static inline void xas_set_order(struct xa_state *xas, unsigned long index,
1592 #ifdef CONFIG_XARRAY_MULTI
1593 xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0;
1594 xas->xa_shift = order - (order % XA_CHUNK_SHIFT);
1595 xas->xa_sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
1596 xas->xa_node = XAS_RESTART;
1599 xas_set(xas, index);
1604 * xas_set_update() - Set up XArray operation state for a callback.
1605 * @xas: XArray operation state.
1606 * @update: Function to call when updating a node.
1608 * The XArray can notify a caller after it has updated an xa_node.
1609 * This is advanced functionality and is only needed by the page cache.
1611 static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update)
1613 xas->xa_update = update;
1617 * xas_next_entry() - Advance iterator to next present entry.
1618 * @xas: XArray operation state.
1619 * @max: Highest index to return.
1621 * xas_next_entry() is an inline function to optimise xarray traversal for
1622 * speed. It is equivalent to calling xas_find(), and will call xas_find()
1623 * for all the hard cases.
1625 * Return: The next present entry after the one currently referred to by @xas.
1627 static inline void *xas_next_entry(struct xa_state *xas, unsigned long max)
1629 struct xa_node *node = xas->xa_node;
1632 if (unlikely(xas_not_node(node) || node->shift ||
1633 xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)))
1634 return xas_find(xas, max);
1637 if (unlikely(xas->xa_index >= max))
1638 return xas_find(xas, max);
1639 if (unlikely(xas->xa_offset == XA_CHUNK_MASK))
1640 return xas_find(xas, max);
1641 entry = xa_entry(xas->xa, node, xas->xa_offset + 1);
1642 if (unlikely(xa_is_internal(entry)))
1643 return xas_find(xas, max);
1652 static inline unsigned int xas_find_chunk(struct xa_state *xas, bool advance,
1655 unsigned long *addr = xas->xa_node->marks[(__force unsigned)mark];
1656 unsigned int offset = xas->xa_offset;
1660 if (XA_CHUNK_SIZE == BITS_PER_LONG) {
1661 if (offset < XA_CHUNK_SIZE) {
1662 unsigned long data = *addr & (~0UL << offset);
1666 return XA_CHUNK_SIZE;
1669 return find_next_bit(addr, XA_CHUNK_SIZE, offset);
1673 * xas_next_marked() - Advance iterator to next marked entry.
1674 * @xas: XArray operation state.
1675 * @max: Highest index to return.
1676 * @mark: Mark to search for.
1678 * xas_next_marked() is an inline function to optimise xarray traversal for
1679 * speed. It is equivalent to calling xas_find_marked(), and will call
1680 * xas_find_marked() for all the hard cases.
1682 * Return: The next marked entry after the one currently referred to by @xas.
1684 static inline void *xas_next_marked(struct xa_state *xas, unsigned long max,
1687 struct xa_node *node = xas->xa_node;
1689 unsigned int offset;
1691 if (unlikely(xas_not_node(node) || node->shift))
1692 return xas_find_marked(xas, max, mark);
1693 offset = xas_find_chunk(xas, true, mark);
1694 xas->xa_offset = offset;
1695 xas->xa_index = (xas->xa_index & ~XA_CHUNK_MASK) + offset;
1696 if (xas->xa_index > max)
1698 if (offset == XA_CHUNK_SIZE)
1699 return xas_find_marked(xas, max, mark);
1700 entry = xa_entry(xas->xa, node, offset);
1702 return xas_find_marked(xas, max, mark);
1707 * If iterating while holding a lock, drop the lock and reschedule
1708 * every %XA_CHECK_SCHED loops.
1711 XA_CHECK_SCHED = 4096,
1715 * xas_for_each() - Iterate over a range of an XArray.
1716 * @xas: XArray operation state.
1717 * @entry: Entry retrieved from the array.
1718 * @max: Maximum index to retrieve from array.
1720 * The loop body will be executed for each entry present in the xarray
1721 * between the current xas position and @max. @entry will be set to
1722 * the entry retrieved from the xarray. It is safe to delete entries
1723 * from the array in the loop body. You should hold either the RCU lock
1724 * or the xa_lock while iterating. If you need to drop the lock, call
1725 * xas_pause() first.
1727 #define xas_for_each(xas, entry, max) \
1728 for (entry = xas_find(xas, max); entry; \
1729 entry = xas_next_entry(xas, max))
1732 * xas_for_each_marked() - Iterate over a range of an XArray.
1733 * @xas: XArray operation state.
1734 * @entry: Entry retrieved from the array.
1735 * @max: Maximum index to retrieve from array.
1736 * @mark: Mark to search for.
1738 * The loop body will be executed for each marked entry in the xarray
1739 * between the current xas position and @max. @entry will be set to
1740 * the entry retrieved from the xarray. It is safe to delete entries
1741 * from the array in the loop body. You should hold either the RCU lock
1742 * or the xa_lock while iterating. If you need to drop the lock, call
1743 * xas_pause() first.
1745 #define xas_for_each_marked(xas, entry, max, mark) \
1746 for (entry = xas_find_marked(xas, max, mark); entry; \
1747 entry = xas_next_marked(xas, max, mark))
1750 * xas_for_each_conflict() - Iterate over a range of an XArray.
1751 * @xas: XArray operation state.
1752 * @entry: Entry retrieved from the array.
1754 * The loop body will be executed for each entry in the XArray that
1755 * lies within the range specified by @xas. If the loop terminates
1756 * normally, @entry will be %NULL. The user may break out of the loop,
1757 * which will leave @entry set to the conflicting entry. The caller
1758 * may also call xa_set_err() to exit the loop while setting an error
1759 * to record the reason.
1761 #define xas_for_each_conflict(xas, entry) \
1762 while ((entry = xas_find_conflict(xas)))
1764 void *__xas_next(struct xa_state *);
1765 void *__xas_prev(struct xa_state *);
1768 * xas_prev() - Move iterator to previous index.
1769 * @xas: XArray operation state.
1771 * If the @xas was in an error state, it will remain in an error state
1772 * and this function will return %NULL. If the @xas has never been walked,
1773 * it will have the effect of calling xas_load(). Otherwise one will be
1774 * subtracted from the index and the state will be walked to the correct
1775 * location in the array for the next operation.
1777 * If the iterator was referencing index 0, this function wraps
1778 * around to %ULONG_MAX.
1780 * Return: The entry at the new index. This may be %NULL or an internal
1783 static inline void *xas_prev(struct xa_state *xas)
1785 struct xa_node *node = xas->xa_node;
1787 if (unlikely(xas_not_node(node) || node->shift ||
1788 xas->xa_offset == 0))
1789 return __xas_prev(xas);
1793 return xa_entry(xas->xa, node, xas->xa_offset);
1797 * xas_next() - Move state to next index.
1798 * @xas: XArray operation state.
1800 * If the @xas was in an error state, it will remain in an error state
1801 * and this function will return %NULL. If the @xas has never been walked,
1802 * it will have the effect of calling xas_load(). Otherwise one will be
1803 * added to the index and the state will be walked to the correct
1804 * location in the array for the next operation.
1806 * If the iterator was referencing index %ULONG_MAX, this function wraps
1809 * Return: The entry at the new index. This may be %NULL or an internal
1812 static inline void *xas_next(struct xa_state *xas)
1814 struct xa_node *node = xas->xa_node;
1816 if (unlikely(xas_not_node(node) || node->shift ||
1817 xas->xa_offset == XA_CHUNK_MASK))
1818 return __xas_next(xas);
1822 return xa_entry(xas->xa, node, xas->xa_offset);
1825 #endif /* _LINUX_XARRAY_H */