1 // SPDX-License-Identifier: GPL-2.0+
3 * Maple Tree implementation
4 * Copyright (c) 2018-2022 Oracle Corporation
5 * Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
6 * Matthew Wilcox <willy@infradead.org>
10 * DOC: Interesting implementation details of the Maple Tree
12 * Each node type has a number of slots for entries and a number of slots for
13 * pivots. In the case of dense nodes, the pivots are implied by the position
14 * and are simply the slot index + the minimum of the node.
16 * In regular B-Tree terms, pivots are called keys. The term pivot is used to
17 * indicate that the tree is specifying ranges, Pivots may appear in the
18 * subtree with an entry attached to the value where as keys are unique to a
19 * specific position of a B-tree. Pivot values are inclusive of the slot with
23 * The following illustrates the layout of a range64 nodes slots and pivots.
26 * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
28 * │ │ │ │ │ │ │ │ └─ Implied maximum
29 * │ │ │ │ │ │ │ └─ Pivot 14
30 * │ │ │ │ │ │ └─ Pivot 13
31 * │ │ │ │ │ └─ Pivot 12
39 * Internal (non-leaf) nodes contain pointers to other nodes.
40 * Leaf nodes contain entries.
42 * The location of interest is often referred to as an offset. All offsets have
43 * a slot, but the last offset has an implied pivot from the node above (or
44 * UINT_MAX for the root node.
46 * Ranges complicate certain write activities. When modifying any of
47 * the B-tree variants, it is known that one entry will either be added or
48 * deleted. When modifying the Maple Tree, one store operation may overwrite
49 * the entire data set, or one half of the tree, or the middle half of the tree.
54 #include <linux/maple_tree.h>
55 #include <linux/xarray.h>
56 #include <linux/types.h>
57 #include <linux/export.h>
58 #include <linux/slab.h>
59 #include <linux/limits.h>
60 #include <asm/barrier.h>
62 #define CREATE_TRACE_POINTS
63 #include <trace/events/maple_tree.h>
65 #define MA_ROOT_PARENT 1
69 * * MA_STATE_BULK - Bulk insert mode
70 * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert
71 * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation
73 #define MA_STATE_BULK 1
74 #define MA_STATE_REBALANCE 2
75 #define MA_STATE_PREALLOC 4
77 #define ma_parent_ptr(x) ((struct maple_pnode *)(x))
78 #define ma_mnode_ptr(x) ((struct maple_node *)(x))
79 #define ma_enode_ptr(x) ((struct maple_enode *)(x))
80 static struct kmem_cache *maple_node_cache;
82 #ifdef CONFIG_DEBUG_MAPLE_TREE
83 static const unsigned long mt_max[] = {
84 [maple_dense] = MAPLE_NODE_SLOTS,
85 [maple_leaf_64] = ULONG_MAX,
86 [maple_range_64] = ULONG_MAX,
87 [maple_arange_64] = ULONG_MAX,
89 #define mt_node_max(x) mt_max[mte_node_type(x)]
92 static const unsigned char mt_slots[] = {
93 [maple_dense] = MAPLE_NODE_SLOTS,
94 [maple_leaf_64] = MAPLE_RANGE64_SLOTS,
95 [maple_range_64] = MAPLE_RANGE64_SLOTS,
96 [maple_arange_64] = MAPLE_ARANGE64_SLOTS,
98 #define mt_slot_count(x) mt_slots[mte_node_type(x)]
100 static const unsigned char mt_pivots[] = {
102 [maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1,
103 [maple_range_64] = MAPLE_RANGE64_SLOTS - 1,
104 [maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1,
106 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
108 static const unsigned char mt_min_slots[] = {
109 [maple_dense] = MAPLE_NODE_SLOTS / 2,
110 [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
111 [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
112 [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1,
114 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
116 #define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2)
117 #define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1)
119 struct maple_big_node {
120 struct maple_pnode *parent;
121 unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
123 struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
125 unsigned long padding[MAPLE_BIG_NODE_GAPS];
126 unsigned long gap[MAPLE_BIG_NODE_GAPS];
130 enum maple_type type;
134 * The maple_subtree_state is used to build a tree to replace a segment of an
135 * existing tree in a more atomic way. Any walkers of the older tree will hit a
136 * dead node and restart on updates.
138 struct maple_subtree_state {
139 struct ma_state *orig_l; /* Original left side of subtree */
140 struct ma_state *orig_r; /* Original right side of subtree */
141 struct ma_state *l; /* New left side of subtree */
142 struct ma_state *m; /* New middle of subtree (rare) */
143 struct ma_state *r; /* New right side of subtree */
144 struct ma_topiary *free; /* nodes to be freed */
145 struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */
146 struct maple_big_node *bn;
150 static inline struct maple_node *mt_alloc_one(gfp_t gfp)
152 return kmem_cache_alloc(maple_node_cache, gfp);
155 static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
157 return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes);
160 static inline void mt_free_bulk(size_t size, void __rcu **nodes)
162 kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
165 static void mt_free_rcu(struct rcu_head *head)
167 struct maple_node *node = container_of(head, struct maple_node, rcu);
169 kmem_cache_free(maple_node_cache, node);
173 * ma_free_rcu() - Use rcu callback to free a maple node
174 * @node: The node to free
176 * The maple tree uses the parent pointer to indicate this node is no longer in
177 * use and will be freed.
179 static void ma_free_rcu(struct maple_node *node)
181 node->parent = ma_parent_ptr(node);
182 call_rcu(&node->rcu, mt_free_rcu);
186 static void mas_set_height(struct ma_state *mas)
188 unsigned int new_flags = mas->tree->ma_flags;
190 new_flags &= ~MT_FLAGS_HEIGHT_MASK;
191 BUG_ON(mas->depth > MAPLE_HEIGHT_MAX);
192 new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET;
193 mas->tree->ma_flags = new_flags;
196 static unsigned int mas_mt_height(struct ma_state *mas)
198 return mt_height(mas->tree);
201 static inline enum maple_type mte_node_type(const struct maple_enode *entry)
203 return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
204 MAPLE_NODE_TYPE_MASK;
207 static inline bool ma_is_dense(const enum maple_type type)
209 return type < maple_leaf_64;
212 static inline bool ma_is_leaf(const enum maple_type type)
214 return type < maple_range_64;
217 static inline bool mte_is_leaf(const struct maple_enode *entry)
219 return ma_is_leaf(mte_node_type(entry));
223 * We also reserve values with the bottom two bits set to '10' which are
226 static inline bool mt_is_reserved(const void *entry)
228 return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
229 xa_is_internal(entry);
232 static inline void mas_set_err(struct ma_state *mas, long err)
234 mas->node = MA_ERROR(err);
237 static inline bool mas_is_ptr(struct ma_state *mas)
239 return mas->node == MAS_ROOT;
242 static inline bool mas_is_start(struct ma_state *mas)
244 return mas->node == MAS_START;
247 bool mas_is_err(struct ma_state *mas)
249 return xa_is_err(mas->node);
252 static inline bool mas_searchable(struct ma_state *mas)
254 if (mas_is_none(mas))
263 static inline struct maple_node *mte_to_node(const struct maple_enode *entry)
265 return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
269 * mte_to_mat() - Convert a maple encoded node to a maple topiary node.
270 * @entry: The maple encoded node
272 * Return: a maple topiary pointer
274 static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry)
276 return (struct maple_topiary *)
277 ((unsigned long)entry & ~MAPLE_NODE_MASK);
281 * mas_mn() - Get the maple state node.
282 * @mas: The maple state
284 * Return: the maple node (not encoded - bare pointer).
286 static inline struct maple_node *mas_mn(const struct ma_state *mas)
288 return mte_to_node(mas->node);
292 * mte_set_node_dead() - Set a maple encoded node as dead.
293 * @mn: The maple encoded node.
295 static inline void mte_set_node_dead(struct maple_enode *mn)
297 mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn));
298 smp_wmb(); /* Needed for RCU */
301 /* Bit 1 indicates the root is a node */
302 #define MAPLE_ROOT_NODE 0x02
303 /* maple_type stored bit 3-6 */
304 #define MAPLE_ENODE_TYPE_SHIFT 0x03
305 /* Bit 2 means a NULL somewhere below */
306 #define MAPLE_ENODE_NULL 0x04
308 static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
309 enum maple_type type)
311 return (void *)((unsigned long)node |
312 (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL);
315 static inline void *mte_mk_root(const struct maple_enode *node)
317 return (void *)((unsigned long)node | MAPLE_ROOT_NODE);
320 static inline void *mte_safe_root(const struct maple_enode *node)
322 return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
325 static inline void mte_set_full(const struct maple_enode *node)
327 node = (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
330 static inline void mte_clear_full(const struct maple_enode *node)
332 node = (void *)((unsigned long)node | MAPLE_ENODE_NULL);
335 static inline bool ma_is_root(struct maple_node *node)
337 return ((unsigned long)node->parent & MA_ROOT_PARENT);
340 static inline bool mte_is_root(const struct maple_enode *node)
342 return ma_is_root(mte_to_node(node));
345 static inline bool mas_is_root_limits(const struct ma_state *mas)
347 return !mas->min && mas->max == ULONG_MAX;
350 static inline bool mt_is_alloc(struct maple_tree *mt)
352 return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
357 * Excluding root, the parent pointer is 256B aligned like all other tree nodes.
358 * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16
359 * bit values need an extra bit to store the offset. This extra bit comes from
360 * a reuse of the last bit in the node type. This is possible by using bit 1 to
361 * indicate if bit 2 is part of the type or the slot.
365 * 0x?00 = 16 bit nodes
366 * 0x010 = 32 bit nodes
367 * 0x110 = 64 bit nodes
369 * Slot size and alignment
371 * 0b?00 : 16 bit values, type in 0-1, slot in 2-7
372 * 0b010 : 32 bit values, type in 0-2, slot in 3-7
373 * 0b110 : 64 bit values, type in 0-2, slot in 3-7
376 #define MAPLE_PARENT_ROOT 0x01
378 #define MAPLE_PARENT_SLOT_SHIFT 0x03
379 #define MAPLE_PARENT_SLOT_MASK 0xF8
381 #define MAPLE_PARENT_16B_SLOT_SHIFT 0x02
382 #define MAPLE_PARENT_16B_SLOT_MASK 0xFC
384 #define MAPLE_PARENT_RANGE64 0x06
385 #define MAPLE_PARENT_RANGE32 0x04
386 #define MAPLE_PARENT_NOT_RANGE16 0x02
389 * mte_parent_shift() - Get the parent shift for the slot storage.
390 * @parent: The parent pointer cast as an unsigned long
391 * Return: The shift into that pointer to the star to of the slot
393 static inline unsigned long mte_parent_shift(unsigned long parent)
395 /* Note bit 1 == 0 means 16B */
396 if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
397 return MAPLE_PARENT_SLOT_SHIFT;
399 return MAPLE_PARENT_16B_SLOT_SHIFT;
403 * mte_parent_slot_mask() - Get the slot mask for the parent.
404 * @parent: The parent pointer cast as an unsigned long.
405 * Return: The slot mask for that parent.
407 static inline unsigned long mte_parent_slot_mask(unsigned long parent)
409 /* Note bit 1 == 0 means 16B */
410 if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
411 return MAPLE_PARENT_SLOT_MASK;
413 return MAPLE_PARENT_16B_SLOT_MASK;
417 * mas_parent_enum() - Return the maple_type of the parent from the stored
419 * @mas: The maple state
420 * @node: The maple_enode to extract the parent's enum
421 * Return: The node->parent maple_type
424 enum maple_type mte_parent_enum(struct maple_enode *p_enode,
425 struct maple_tree *mt)
427 unsigned long p_type;
429 p_type = (unsigned long)p_enode;
430 if (p_type & MAPLE_PARENT_ROOT)
431 return 0; /* Validated in the caller. */
433 p_type &= MAPLE_NODE_MASK;
434 p_type = p_type & ~(MAPLE_PARENT_ROOT | mte_parent_slot_mask(p_type));
437 case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */
439 return maple_arange_64;
440 return maple_range_64;
447 enum maple_type mas_parent_enum(struct ma_state *mas, struct maple_enode *enode)
449 return mte_parent_enum(ma_enode_ptr(mte_to_node(enode)->parent), mas->tree);
453 * mte_set_parent() - Set the parent node and encode the slot
454 * @enode: The encoded maple node.
455 * @parent: The encoded maple node that is the parent of @enode.
456 * @slot: The slot that @enode resides in @parent.
458 * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
462 void mte_set_parent(struct maple_enode *enode, const struct maple_enode *parent,
465 unsigned long val = (unsigned long) parent;
468 enum maple_type p_type = mte_node_type(parent);
470 BUG_ON(p_type == maple_dense);
471 BUG_ON(p_type == maple_leaf_64);
475 case maple_arange_64:
476 shift = MAPLE_PARENT_SLOT_SHIFT;
477 type = MAPLE_PARENT_RANGE64;
486 val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */
487 val |= (slot << shift) | type;
488 mte_to_node(enode)->parent = ma_parent_ptr(val);
492 * mte_parent_slot() - get the parent slot of @enode.
493 * @enode: The encoded maple node.
495 * Return: The slot in the parent node where @enode resides.
497 static inline unsigned int mte_parent_slot(const struct maple_enode *enode)
499 unsigned long val = (unsigned long) mte_to_node(enode)->parent;
506 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
507 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
509 return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val);
513 * mte_parent() - Get the parent of @node.
514 * @node: The encoded maple node.
516 * Return: The parent maple node.
518 static inline struct maple_node *mte_parent(const struct maple_enode *enode)
520 return (void *)((unsigned long)
521 (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
525 * ma_dead_node() - check if the @enode is dead.
526 * @enode: The encoded maple node
528 * Return: true if dead, false otherwise.
530 static inline bool ma_dead_node(const struct maple_node *node)
532 struct maple_node *parent = (void *)((unsigned long)
533 node->parent & ~MAPLE_NODE_MASK);
535 return (parent == node);
539 * mte_dead_node() - check if the @enode is dead.
540 * @enode: The encoded maple node
542 * Return: true if dead, false otherwise.
544 static inline bool mte_dead_node(const struct maple_enode *enode)
546 struct maple_node *parent, *node;
548 node = mte_to_node(enode);
549 parent = mte_parent(enode);
550 return (parent == node);
554 * mas_allocated() - Get the number of nodes allocated in a maple state.
555 * @mas: The maple state
557 * The ma_state alloc member is overloaded to hold a pointer to the first
558 * allocated node or to the number of requested nodes to allocate. If bit 0 is
559 * set, then the alloc contains the number of requested nodes. If there is an
560 * allocated node, then the total allocated nodes is in that node.
562 * Return: The total number of nodes allocated
564 static inline unsigned long mas_allocated(const struct ma_state *mas)
566 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1))
569 return mas->alloc->total;
573 * mas_set_alloc_req() - Set the requested number of allocations.
574 * @mas: the maple state
575 * @count: the number of allocations.
577 * The requested number of allocations is either in the first allocated node,
578 * located in @mas->alloc->request_count, or directly in @mas->alloc if there is
579 * no allocated node. Set the request either in the node or do the necessary
580 * encoding to store in @mas->alloc directly.
582 static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count)
584 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) {
588 mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U);
592 mas->alloc->request_count = count;
596 * mas_alloc_req() - get the requested number of allocations.
597 * @mas: The maple state
599 * The alloc count is either stored directly in @mas, or in
600 * @mas->alloc->request_count if there is at least one node allocated. Decode
601 * the request count if it's stored directly in @mas->alloc.
603 * Return: The allocation request count.
605 static inline unsigned int mas_alloc_req(const struct ma_state *mas)
607 if ((unsigned long)mas->alloc & 0x1)
608 return (unsigned long)(mas->alloc) >> 1;
610 return mas->alloc->request_count;
615 * ma_pivots() - Get a pointer to the maple node pivots.
616 * @node - the maple node
617 * @type - the node type
619 * In the event of a dead node, this array may be %NULL
621 * Return: A pointer to the maple node pivots
623 static inline unsigned long *ma_pivots(struct maple_node *node,
624 enum maple_type type)
627 case maple_arange_64:
628 return node->ma64.pivot;
631 return node->mr64.pivot;
639 * ma_gaps() - Get a pointer to the maple node gaps.
640 * @node - the maple node
641 * @type - the node type
643 * Return: A pointer to the maple node gaps
645 static inline unsigned long *ma_gaps(struct maple_node *node,
646 enum maple_type type)
649 case maple_arange_64:
650 return node->ma64.gap;
660 * mte_pivot() - Get the pivot at @piv of the maple encoded node.
661 * @mn: The maple encoded node.
664 * Return: the pivot at @piv of @mn.
666 static inline unsigned long mte_pivot(const struct maple_enode *mn,
669 struct maple_node *node = mte_to_node(mn);
670 enum maple_type type = mte_node_type(mn);
672 if (piv >= mt_pivots[type]) {
677 case maple_arange_64:
678 return node->ma64.pivot[piv];
681 return node->mr64.pivot[piv];
689 * mas_safe_pivot() - get the pivot at @piv or mas->max.
690 * @mas: The maple state
691 * @pivots: The pointer to the maple node pivots
692 * @piv: The pivot to fetch
693 * @type: The maple node type
695 * Return: The pivot at @piv within the limit of the @pivots array, @mas->max
698 static inline unsigned long
699 mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
700 unsigned char piv, enum maple_type type)
702 if (piv >= mt_pivots[type])
709 * mas_safe_min() - Return the minimum for a given offset.
710 * @mas: The maple state
711 * @pivots: The pointer to the maple node pivots
712 * @offset: The offset into the pivot array
714 * Return: The minimum range value that is contained in @offset.
716 static inline unsigned long
717 mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset)
720 return pivots[offset - 1] + 1;
726 * mas_logical_pivot() - Get the logical pivot of a given offset.
727 * @mas: The maple state
728 * @pivots: The pointer to the maple node pivots
729 * @offset: The offset into the pivot array
730 * @type: The maple node type
732 * When there is no value at a pivot (beyond the end of the data), then the
733 * pivot is actually @mas->max.
735 * Return: the logical pivot of a given @offset.
737 static inline unsigned long
738 mas_logical_pivot(struct ma_state *mas, unsigned long *pivots,
739 unsigned char offset, enum maple_type type)
741 unsigned long lpiv = mas_safe_pivot(mas, pivots, offset, type);
753 * mte_set_pivot() - Set a pivot to a value in an encoded maple node.
754 * @mn: The encoded maple node
755 * @piv: The pivot offset
756 * @val: The value of the pivot
758 static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
761 struct maple_node *node = mte_to_node(mn);
762 enum maple_type type = mte_node_type(mn);
764 BUG_ON(piv >= mt_pivots[type]);
769 node->mr64.pivot[piv] = val;
771 case maple_arange_64:
772 node->ma64.pivot[piv] = val;
781 * ma_slots() - Get a pointer to the maple node slots.
782 * @mn: The maple node
783 * @mt: The maple node type
785 * Return: A pointer to the maple node slots
787 static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
791 case maple_arange_64:
792 return mn->ma64.slot;
795 return mn->mr64.slot;
801 static inline bool mt_locked(const struct maple_tree *mt)
803 return mt_external_lock(mt) ? mt_lock_is_held(mt) :
804 lockdep_is_held(&mt->ma_lock);
807 static inline void *mt_slot(const struct maple_tree *mt,
808 void __rcu **slots, unsigned char offset)
810 return rcu_dereference_check(slots[offset], mt_locked(mt));
814 * mas_slot_locked() - Get the slot value when holding the maple tree lock.
815 * @mas: The maple state
816 * @slots: The pointer to the slots
817 * @offset: The offset into the slots array to fetch
819 * Return: The entry stored in @slots at the @offset.
821 static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
822 unsigned char offset)
824 return rcu_dereference_protected(slots[offset], mt_locked(mas->tree));
828 * mas_slot() - Get the slot value when not holding the maple tree lock.
829 * @mas: The maple state
830 * @slots: The pointer to the slots
831 * @offset: The offset into the slots array to fetch
833 * Return: The entry stored in @slots at the @offset
835 static inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
836 unsigned char offset)
838 return mt_slot(mas->tree, slots, offset);
842 * mas_root() - Get the maple tree root.
843 * @mas: The maple state.
845 * Return: The pointer to the root of the tree
847 static inline void *mas_root(struct ma_state *mas)
849 return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
852 static inline void *mt_root_locked(struct maple_tree *mt)
854 return rcu_dereference_protected(mt->ma_root, mt_locked(mt));
858 * mas_root_locked() - Get the maple tree root when holding the maple tree lock.
859 * @mas: The maple state.
861 * Return: The pointer to the root of the tree
863 static inline void *mas_root_locked(struct ma_state *mas)
865 return mt_root_locked(mas->tree);
868 static inline struct maple_metadata *ma_meta(struct maple_node *mn,
872 case maple_arange_64:
873 return &mn->ma64.meta;
875 return &mn->mr64.meta;
880 * ma_set_meta() - Set the metadata information of a node.
881 * @mn: The maple node
882 * @mt: The maple node type
883 * @offset: The offset of the highest sub-gap in this node.
884 * @end: The end of the data in this node.
886 static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
887 unsigned char offset, unsigned char end)
889 struct maple_metadata *meta = ma_meta(mn, mt);
896 * ma_meta_end() - Get the data end of a node from the metadata
897 * @mn: The maple node
898 * @mt: The maple node type
900 static inline unsigned char ma_meta_end(struct maple_node *mn,
903 struct maple_metadata *meta = ma_meta(mn, mt);
909 * ma_meta_gap() - Get the largest gap location of a node from the metadata
910 * @mn: The maple node
911 * @mt: The maple node type
913 static inline unsigned char ma_meta_gap(struct maple_node *mn,
916 BUG_ON(mt != maple_arange_64);
918 return mn->ma64.meta.gap;
922 * ma_set_meta_gap() - Set the largest gap location in a nodes metadata
923 * @mn: The maple node
924 * @mn: The maple node type
925 * @offset: The location of the largest gap.
927 static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
928 unsigned char offset)
931 struct maple_metadata *meta = ma_meta(mn, mt);
937 * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
938 * @mat - the ma_topiary, a linked list of dead nodes.
939 * @dead_enode - the node to be marked as dead and added to the tail of the list
941 * Add the @dead_enode to the linked list in @mat.
943 static inline void mat_add(struct ma_topiary *mat,
944 struct maple_enode *dead_enode)
946 mte_set_node_dead(dead_enode);
947 mte_to_mat(dead_enode)->next = NULL;
949 mat->tail = mat->head = dead_enode;
953 mte_to_mat(mat->tail)->next = dead_enode;
954 mat->tail = dead_enode;
957 static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
958 static inline void mas_free(struct ma_state *mas, struct maple_enode *used);
961 * mas_mat_free() - Free all nodes in a dead list.
962 * @mas - the maple state
963 * @mat - the ma_topiary linked list of dead nodes to free.
965 * Free walk a dead list.
967 static void mas_mat_free(struct ma_state *mas, struct ma_topiary *mat)
969 struct maple_enode *next;
972 next = mte_to_mat(mat->head)->next;
973 mas_free(mas, mat->head);
979 * mas_mat_destroy() - Free all nodes and subtrees in a dead list.
980 * @mas - the maple state
981 * @mat - the ma_topiary linked list of dead nodes to free.
983 * Destroy walk a dead list.
985 static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
987 struct maple_enode *next;
990 next = mte_to_mat(mat->head)->next;
991 mte_destroy_walk(mat->head, mat->mtree);
996 * mas_descend() - Descend into the slot stored in the ma_state.
997 * @mas - the maple state.
999 * Note: Not RCU safe, only use in write side or debug code.
1001 static inline void mas_descend(struct ma_state *mas)
1003 enum maple_type type;
1004 unsigned long *pivots;
1005 struct maple_node *node;
1009 type = mte_node_type(mas->node);
1010 pivots = ma_pivots(node, type);
1011 slots = ma_slots(node, type);
1014 mas->min = pivots[mas->offset - 1] + 1;
1015 mas->max = mas_safe_pivot(mas, pivots, mas->offset, type);
1016 mas->node = mas_slot(mas, slots, mas->offset);
1020 * mte_set_gap() - Set a maple node gap.
1021 * @mn: The encoded maple node
1022 * @gap: The offset of the gap to set
1023 * @val: The gap value
1025 static inline void mte_set_gap(const struct maple_enode *mn,
1026 unsigned char gap, unsigned long val)
1028 switch (mte_node_type(mn)) {
1031 case maple_arange_64:
1032 mte_to_node(mn)->ma64.gap[gap] = val;
1038 * mas_ascend() - Walk up a level of the tree.
1039 * @mas: The maple state
1041 * Sets the @mas->max and @mas->min to the correct values when walking up. This
1042 * may cause several levels of walking up to find the correct min and max.
1043 * May find a dead node which will cause a premature return.
1044 * Return: 1 on dead node, 0 otherwise
1046 static int mas_ascend(struct ma_state *mas)
1048 struct maple_enode *p_enode; /* parent enode. */
1049 struct maple_enode *a_enode; /* ancestor enode. */
1050 struct maple_node *a_node; /* ancestor node. */
1051 struct maple_node *p_node; /* parent node. */
1052 unsigned char a_slot;
1053 enum maple_type a_type;
1054 unsigned long min, max;
1055 unsigned long *pivots;
1056 unsigned char offset;
1057 bool set_max = false, set_min = false;
1059 a_node = mas_mn(mas);
1060 if (ma_is_root(a_node)) {
1065 p_node = mte_parent(mas->node);
1066 if (unlikely(a_node == p_node))
1068 a_type = mas_parent_enum(mas, mas->node);
1069 offset = mte_parent_slot(mas->node);
1070 a_enode = mt_mk_node(p_node, a_type);
1072 /* Check to make sure all parent information is still accurate */
1073 if (p_node != mte_parent(mas->node))
1076 mas->node = a_enode;
1077 mas->offset = offset;
1079 if (mte_is_root(a_enode)) {
1080 mas->max = ULONG_MAX;
1089 a_type = mas_parent_enum(mas, p_enode);
1090 a_node = mte_parent(p_enode);
1091 a_slot = mte_parent_slot(p_enode);
1092 a_enode = mt_mk_node(a_node, a_type);
1093 pivots = ma_pivots(a_node, a_type);
1095 if (unlikely(ma_dead_node(a_node)))
1098 if (!set_min && a_slot) {
1100 min = pivots[a_slot - 1] + 1;
1103 if (!set_max && a_slot < mt_pivots[a_type]) {
1105 max = pivots[a_slot];
1108 if (unlikely(ma_dead_node(a_node)))
1111 if (unlikely(ma_is_root(a_node)))
1114 } while (!set_min || !set_max);
1122 * mas_pop_node() - Get a previously allocated maple node from the maple state.
1123 * @mas: The maple state
1125 * Return: A pointer to a maple node.
1127 static inline struct maple_node *mas_pop_node(struct ma_state *mas)
1129 struct maple_alloc *ret, *node = mas->alloc;
1130 unsigned long total = mas_allocated(mas);
1131 unsigned int req = mas_alloc_req(mas);
1133 /* nothing or a request pending. */
1134 if (WARN_ON(!total))
1138 /* single allocation in this ma_state */
1144 if (node->node_count == 1) {
1145 /* Single allocation in this node. */
1146 mas->alloc = node->slot[0];
1147 mas->alloc->total = node->total - 1;
1152 ret = node->slot[--node->node_count];
1153 node->slot[node->node_count] = NULL;
1159 mas_set_alloc_req(mas, req);
1162 memset(ret, 0, sizeof(*ret));
1163 return (struct maple_node *)ret;
1167 * mas_push_node() - Push a node back on the maple state allocation.
1168 * @mas: The maple state
1169 * @used: The used maple node
1171 * Stores the maple node back into @mas->alloc for reuse. Updates allocated and
1172 * requested node count as necessary.
1174 static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
1176 struct maple_alloc *reuse = (struct maple_alloc *)used;
1177 struct maple_alloc *head = mas->alloc;
1178 unsigned long count;
1179 unsigned int requested = mas_alloc_req(mas);
1181 count = mas_allocated(mas);
1183 reuse->request_count = 0;
1184 reuse->node_count = 0;
1185 if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) {
1186 head->slot[head->node_count++] = reuse;
1192 if ((head) && !((unsigned long)head & 0x1)) {
1193 reuse->slot[0] = head;
1194 reuse->node_count = 1;
1195 reuse->total += head->total;
1201 mas_set_alloc_req(mas, requested - 1);
1205 * mas_alloc_nodes() - Allocate nodes into a maple state
1206 * @mas: The maple state
1207 * @gfp: The GFP Flags
1209 static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
1211 struct maple_alloc *node;
1212 unsigned long allocated = mas_allocated(mas);
1213 unsigned int requested = mas_alloc_req(mas);
1215 void **slots = NULL;
1216 unsigned int max_req = 0;
1221 mas_set_alloc_req(mas, 0);
1222 if (mas->mas_flags & MA_STATE_PREALLOC) {
1225 WARN_ON(!allocated);
1228 if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) {
1229 node = (struct maple_alloc *)mt_alloc_one(gfp);
1234 node->slot[0] = mas->alloc;
1235 node->node_count = 1;
1237 node->node_count = 0;
1241 node->total = ++allocated;
1246 node->request_count = 0;
1248 max_req = MAPLE_ALLOC_SLOTS;
1249 if (node->node_count) {
1250 unsigned int offset = node->node_count;
1252 slots = (void **)&node->slot[offset];
1255 slots = (void **)&node->slot;
1258 max_req = min(requested, max_req);
1259 count = mt_alloc_bulk(gfp, max_req, slots);
1263 node->node_count += count;
1265 node = node->slot[0];
1266 node->node_count = 0;
1267 node->request_count = 0;
1270 mas->alloc->total = allocated;
1274 /* Clean up potential freed allocations on bulk failure */
1275 memset(slots, 0, max_req * sizeof(unsigned long));
1277 mas_set_alloc_req(mas, requested);
1278 if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
1279 mas->alloc->total = allocated;
1280 mas_set_err(mas, -ENOMEM);
1286 * mas_free() - Free an encoded maple node
1287 * @mas: The maple state
1288 * @used: The encoded maple node to free.
1290 * Uses rcu free if necessary, pushes @used back on the maple state allocations
1293 static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
1295 struct maple_node *tmp = mte_to_node(used);
1297 if (mt_in_rcu(mas->tree))
1300 mas_push_node(mas, tmp);
1304 * mas_node_count() - Check if enough nodes are allocated and request more if
1305 * there is not enough nodes.
1306 * @mas: The maple state
1307 * @count: The number of nodes needed
1308 * @gfp: the gfp flags
1310 static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp)
1312 unsigned long allocated = mas_allocated(mas);
1314 if (allocated < count) {
1315 mas_set_alloc_req(mas, count - allocated);
1316 mas_alloc_nodes(mas, gfp);
1321 * mas_node_count() - Check if enough nodes are allocated and request more if
1322 * there is not enough nodes.
1323 * @mas: The maple state
1324 * @count: The number of nodes needed
1326 * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
1328 static void mas_node_count(struct ma_state *mas, int count)
1330 return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN);
1334 * mas_start() - Sets up maple state for operations.
1335 * @mas: The maple state.
1337 * If mas->node == MAS_START, then set the min, max and depth to
1341 * - If mas->node is an error or not MAS_START, return NULL.
1342 * - If it's an empty tree: NULL & mas->node == MAS_NONE
1343 * - If it's a single entry: The entry & mas->node == MAS_ROOT
1344 * - If it's a tree: NULL & mas->node == safe root node.
1346 static inline struct maple_enode *mas_start(struct ma_state *mas)
1348 if (likely(mas_is_start(mas))) {
1349 struct maple_enode *root;
1352 mas->max = ULONG_MAX;
1355 root = mas_root(mas);
1356 /* Tree with nodes */
1357 if (likely(xa_is_node(root))) {
1359 mas->node = mte_safe_root(root);
1365 if (unlikely(!root)) {
1366 mas->node = MAS_NONE;
1367 mas->offset = MAPLE_NODE_SLOTS;
1371 /* Single entry tree */
1372 mas->node = MAS_ROOT;
1373 mas->offset = MAPLE_NODE_SLOTS;
1375 /* Single entry tree. */
1386 * ma_data_end() - Find the end of the data in a node.
1387 * @node: The maple node
1388 * @type: The maple node type
1389 * @pivots: The array of pivots in the node
1390 * @max: The maximum value in the node
1392 * Uses metadata to find the end of the data when possible.
1393 * Return: The zero indexed last slot with data (may be null).
1395 static inline unsigned char ma_data_end(struct maple_node *node,
1396 enum maple_type type,
1397 unsigned long *pivots,
1400 unsigned char offset;
1405 if (type == maple_arange_64)
1406 return ma_meta_end(node, type);
1408 offset = mt_pivots[type] - 1;
1409 if (likely(!pivots[offset]))
1410 return ma_meta_end(node, type);
1412 if (likely(pivots[offset] == max))
1415 return mt_pivots[type];
1419 * mas_data_end() - Find the end of the data (slot).
1420 * @mas: the maple state
1422 * This method is optimized to check the metadata of a node if the node type
1423 * supports data end metadata.
1425 * Return: The zero indexed last slot with data (may be null).
1427 static inline unsigned char mas_data_end(struct ma_state *mas)
1429 enum maple_type type;
1430 struct maple_node *node;
1431 unsigned char offset;
1432 unsigned long *pivots;
1434 type = mte_node_type(mas->node);
1436 if (type == maple_arange_64)
1437 return ma_meta_end(node, type);
1439 pivots = ma_pivots(node, type);
1440 if (unlikely(ma_dead_node(node)))
1443 offset = mt_pivots[type] - 1;
1444 if (likely(!pivots[offset]))
1445 return ma_meta_end(node, type);
1447 if (likely(pivots[offset] == mas->max))
1450 return mt_pivots[type];
1454 * mas_leaf_max_gap() - Returns the largest gap in a leaf node
1455 * @mas - the maple state
1457 * Return: The maximum gap in the leaf.
1459 static unsigned long mas_leaf_max_gap(struct ma_state *mas)
1462 unsigned long pstart, gap, max_gap;
1463 struct maple_node *mn;
1464 unsigned long *pivots;
1467 unsigned char max_piv;
1469 mt = mte_node_type(mas->node);
1471 slots = ma_slots(mn, mt);
1473 if (unlikely(ma_is_dense(mt))) {
1475 for (i = 0; i < mt_slots[mt]; i++) {
1490 * Check the first implied pivot optimizes the loop below and slot 1 may
1491 * be skipped if there is a gap in slot 0.
1493 pivots = ma_pivots(mn, mt);
1494 if (likely(!slots[0])) {
1495 max_gap = pivots[0] - mas->min + 1;
1501 /* reduce max_piv as the special case is checked before the loop */
1502 max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1;
1504 * Check end implied pivot which can only be a gap on the right most
1507 if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) {
1508 gap = ULONG_MAX - pivots[max_piv];
1513 for (; i <= max_piv; i++) {
1514 /* data == no gap. */
1515 if (likely(slots[i]))
1518 pstart = pivots[i - 1];
1519 gap = pivots[i] - pstart;
1523 /* There cannot be two gaps in a row. */
1530 * ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
1531 * @node: The maple node
1532 * @gaps: The pointer to the gaps
1533 * @mt: The maple node type
1534 * @*off: Pointer to store the offset location of the gap.
1536 * Uses the metadata data end to scan backwards across set gaps.
1538 * Return: The maximum gap value
1540 static inline unsigned long
1541 ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt,
1544 unsigned char offset, i;
1545 unsigned long max_gap = 0;
1547 i = offset = ma_meta_end(node, mt);
1549 if (gaps[i] > max_gap) {
1560 * mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
1561 * @mas: The maple state.
1563 * If the metadata gap is set to MAPLE_ARANGE64_META_MAX, there is no gap.
1565 * Return: The gap value.
1567 static inline unsigned long mas_max_gap(struct ma_state *mas)
1569 unsigned long *gaps;
1570 unsigned char offset;
1572 struct maple_node *node;
1574 mt = mte_node_type(mas->node);
1576 return mas_leaf_max_gap(mas);
1579 offset = ma_meta_gap(node, mt);
1580 if (offset == MAPLE_ARANGE64_META_MAX)
1583 gaps = ma_gaps(node, mt);
1584 return gaps[offset];
1588 * mas_parent_gap() - Set the parent gap and any gaps above, as needed
1589 * @mas: The maple state
1590 * @offset: The gap offset in the parent to set
1591 * @new: The new gap value.
1593 * Set the parent gap then continue to set the gap upwards, using the metadata
1594 * of the parent to see if it is necessary to check the node above.
1596 static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
1599 unsigned long meta_gap = 0;
1600 struct maple_node *pnode;
1601 struct maple_enode *penode;
1602 unsigned long *pgaps;
1603 unsigned char meta_offset;
1604 enum maple_type pmt;
1606 pnode = mte_parent(mas->node);
1607 pmt = mas_parent_enum(mas, mas->node);
1608 penode = mt_mk_node(pnode, pmt);
1609 pgaps = ma_gaps(pnode, pmt);
1612 meta_offset = ma_meta_gap(pnode, pmt);
1613 if (meta_offset == MAPLE_ARANGE64_META_MAX)
1616 meta_gap = pgaps[meta_offset];
1618 pgaps[offset] = new;
1620 if (meta_gap == new)
1623 if (offset != meta_offset) {
1627 ma_set_meta_gap(pnode, pmt, offset);
1628 } else if (new < meta_gap) {
1630 new = ma_max_gap(pnode, pgaps, pmt, &meta_offset);
1631 ma_set_meta_gap(pnode, pmt, meta_offset);
1634 if (ma_is_root(pnode))
1637 /* Go to the parent node. */
1638 pnode = mte_parent(penode);
1639 pmt = mas_parent_enum(mas, penode);
1640 pgaps = ma_gaps(pnode, pmt);
1641 offset = mte_parent_slot(penode);
1642 penode = mt_mk_node(pnode, pmt);
1647 * mas_update_gap() - Update a nodes gaps and propagate up if necessary.
1648 * @mas - the maple state.
1650 static inline void mas_update_gap(struct ma_state *mas)
1652 unsigned char pslot;
1653 unsigned long p_gap;
1654 unsigned long max_gap;
1656 if (!mt_is_alloc(mas->tree))
1659 if (mte_is_root(mas->node))
1662 max_gap = mas_max_gap(mas);
1664 pslot = mte_parent_slot(mas->node);
1665 p_gap = ma_gaps(mte_parent(mas->node),
1666 mas_parent_enum(mas, mas->node))[pslot];
1668 if (p_gap != max_gap)
1669 mas_parent_gap(mas, pslot, max_gap);
1673 * mas_adopt_children() - Set the parent pointer of all nodes in @parent to
1674 * @parent with the slot encoded.
1675 * @mas - the maple state (for the tree)
1676 * @parent - the maple encoded node containing the children.
1678 static inline void mas_adopt_children(struct ma_state *mas,
1679 struct maple_enode *parent)
1681 enum maple_type type = mte_node_type(parent);
1682 struct maple_node *node = mas_mn(mas);
1683 void __rcu **slots = ma_slots(node, type);
1684 unsigned long *pivots = ma_pivots(node, type);
1685 struct maple_enode *child;
1686 unsigned char offset;
1688 offset = ma_data_end(node, type, pivots, mas->max);
1690 child = mas_slot_locked(mas, slots, offset);
1691 mte_set_parent(child, parent, offset);
1696 * mas_replace() - Replace a maple node in the tree with mas->node. Uses the
1697 * parent encoding to locate the maple node in the tree.
1698 * @mas - the ma_state to use for operations.
1699 * @advanced - boolean to adopt the child nodes and free the old node (false) or
1700 * leave the node (true) and handle the adoption and free elsewhere.
1702 static inline void mas_replace(struct ma_state *mas, bool advanced)
1703 __must_hold(mas->tree->lock)
1705 struct maple_node *mn = mas_mn(mas);
1706 struct maple_enode *old_enode;
1707 unsigned char offset = 0;
1708 void __rcu **slots = NULL;
1710 if (ma_is_root(mn)) {
1711 old_enode = mas_root_locked(mas);
1713 offset = mte_parent_slot(mas->node);
1714 slots = ma_slots(mte_parent(mas->node),
1715 mas_parent_enum(mas, mas->node));
1716 old_enode = mas_slot_locked(mas, slots, offset);
1719 if (!advanced && !mte_is_leaf(mas->node))
1720 mas_adopt_children(mas, mas->node);
1722 if (mte_is_root(mas->node)) {
1723 mn->parent = ma_parent_ptr(
1724 ((unsigned long)mas->tree | MA_ROOT_PARENT));
1725 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
1726 mas_set_height(mas);
1728 rcu_assign_pointer(slots[offset], mas->node);
1732 mas_free(mas, old_enode);
1736 * mas_new_child() - Find the new child of a node.
1737 * @mas: the maple state
1738 * @child: the maple state to store the child.
1740 static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child)
1741 __must_hold(mas->tree->lock)
1744 unsigned char offset;
1746 unsigned long *pivots;
1747 struct maple_enode *entry;
1748 struct maple_node *node;
1751 mt = mte_node_type(mas->node);
1753 slots = ma_slots(node, mt);
1754 pivots = ma_pivots(node, mt);
1755 end = ma_data_end(node, mt, pivots, mas->max);
1756 for (offset = mas->offset; offset <= end; offset++) {
1757 entry = mas_slot_locked(mas, slots, offset);
1758 if (mte_parent(entry) == node) {
1760 mas->offset = offset + 1;
1761 child->offset = offset;
1771 * mab_shift_right() - Shift the data in mab right. Note, does not clean out the
1772 * old data or set b_node->b_end.
1773 * @b_node: the maple_big_node
1774 * @shift: the shift count
1776 static inline void mab_shift_right(struct maple_big_node *b_node,
1777 unsigned char shift)
1779 unsigned long size = b_node->b_end * sizeof(unsigned long);
1781 memmove(b_node->pivot + shift, b_node->pivot, size);
1782 memmove(b_node->slot + shift, b_node->slot, size);
1783 if (b_node->type == maple_arange_64)
1784 memmove(b_node->gap + shift, b_node->gap, size);
1788 * mab_middle_node() - Check if a middle node is needed (unlikely)
1789 * @b_node: the maple_big_node that contains the data.
1790 * @size: the amount of data in the b_node
1791 * @split: the potential split location
1792 * @slot_count: the size that can be stored in a single node being considered.
1794 * Return: true if a middle node is required.
1796 static inline bool mab_middle_node(struct maple_big_node *b_node, int split,
1797 unsigned char slot_count)
1799 unsigned char size = b_node->b_end;
1801 if (size >= 2 * slot_count)
1804 if (!b_node->slot[split] && (size >= 2 * slot_count - 1))
1811 * mab_no_null_split() - ensure the split doesn't fall on a NULL
1812 * @b_node: the maple_big_node with the data
1813 * @split: the suggested split location
1814 * @slot_count: the number of slots in the node being considered.
1816 * Return: the split location.
1818 static inline int mab_no_null_split(struct maple_big_node *b_node,
1819 unsigned char split, unsigned char slot_count)
1821 if (!b_node->slot[split]) {
1823 * If the split is less than the max slot && the right side will
1824 * still be sufficient, then increment the split on NULL.
1826 if ((split < slot_count - 1) &&
1827 (b_node->b_end - split) > (mt_min_slots[b_node->type]))
1836 * mab_calc_split() - Calculate the split location and if there needs to be two
1838 * @bn: The maple_big_node with the data
1839 * @mid_split: The second split, if required. 0 otherwise.
1841 * Return: The first split location. The middle split is set in @mid_split.
1843 static inline int mab_calc_split(struct ma_state *mas,
1844 struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
1846 unsigned char b_end = bn->b_end;
1847 int split = b_end / 2; /* Assume equal split. */
1848 unsigned char slot_min, slot_count = mt_slots[bn->type];
1851 * To support gap tracking, all NULL entries are kept together and a node cannot
1852 * end on a NULL entry, with the exception of the left-most leaf. The
1853 * limitation means that the split of a node must be checked for this condition
1854 * and be able to put more data in one direction or the other.
1856 if (unlikely((mas->mas_flags & MA_STATE_BULK))) {
1858 split = b_end - mt_min_slots[bn->type];
1860 if (!ma_is_leaf(bn->type))
1863 mas->mas_flags |= MA_STATE_REBALANCE;
1864 if (!bn->slot[split])
1870 * Although extremely rare, it is possible to enter what is known as the 3-way
1871 * split scenario. The 3-way split comes about by means of a store of a range
1872 * that overwrites the end and beginning of two full nodes. The result is a set
1873 * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can
1874 * also be located in different parent nodes which are also full. This can
1875 * carry upwards all the way to the root in the worst case.
1877 if (unlikely(mab_middle_node(bn, split, slot_count))) {
1879 *mid_split = split * 2;
1881 slot_min = mt_min_slots[bn->type];
1885 * Avoid having a range less than the slot count unless it
1886 * causes one node to be deficient.
1887 * NOTE: mt_min_slots is 1 based, b_end and split are zero.
1889 while (((bn->pivot[split] - min) < slot_count - 1) &&
1890 (split < slot_count - 1) && (b_end - split > slot_min))
1894 /* Avoid ending a node on a NULL entry */
1895 split = mab_no_null_split(bn, split, slot_count);
1899 *mid_split = mab_no_null_split(bn, *mid_split, slot_count);
1905 * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
1906 * and set @b_node->b_end to the next free slot.
1907 * @mas: The maple state
1908 * @mas_start: The starting slot to copy
1909 * @mas_end: The end slot to copy (inclusively)
1910 * @b_node: The maple_big_node to place the data
1911 * @mab_start: The starting location in maple_big_node to store the data.
1913 static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
1914 unsigned char mas_end, struct maple_big_node *b_node,
1915 unsigned char mab_start)
1918 struct maple_node *node;
1920 unsigned long *pivots, *gaps;
1921 int i = mas_start, j = mab_start;
1922 unsigned char piv_end;
1925 mt = mte_node_type(mas->node);
1926 pivots = ma_pivots(node, mt);
1928 b_node->pivot[j] = pivots[i++];
1929 if (unlikely(i > mas_end))
1934 piv_end = min(mas_end, mt_pivots[mt]);
1935 for (; i < piv_end; i++, j++) {
1936 b_node->pivot[j] = pivots[i];
1937 if (unlikely(!b_node->pivot[j]))
1940 if (unlikely(mas->max == b_node->pivot[j]))
1944 if (likely(i <= mas_end))
1945 b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt);
1948 b_node->b_end = ++j;
1950 slots = ma_slots(node, mt);
1951 memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j);
1952 if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) {
1953 gaps = ma_gaps(node, mt);
1954 memcpy(b_node->gap + mab_start, gaps + mas_start,
1955 sizeof(unsigned long) * j);
1960 * mas_leaf_set_meta() - Set the metadata of a leaf if possible.
1961 * @mas: The maple state
1962 * @node: The maple node
1963 * @pivots: pointer to the maple node pivots
1964 * @mt: The maple type
1965 * @end: The assumed end
1967 * Note, end may be incremented within this function but not modified at the
1968 * source. This is fine since the metadata is the last thing to be stored in a
1969 * node during a write.
1971 static inline void mas_leaf_set_meta(struct ma_state *mas,
1972 struct maple_node *node, unsigned long *pivots,
1973 enum maple_type mt, unsigned char end)
1975 /* There is no room for metadata already */
1976 if (mt_pivots[mt] <= end)
1979 if (pivots[end] && pivots[end] < mas->max)
1982 if (end < mt_slots[mt] - 1)
1983 ma_set_meta(node, mt, 0, end);
1987 * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
1988 * @b_node: the maple_big_node that has the data
1989 * @mab_start: the start location in @b_node.
1990 * @mab_end: The end location in @b_node (inclusively)
1991 * @mas: The maple state with the maple encoded node.
1993 static inline void mab_mas_cp(struct maple_big_node *b_node,
1994 unsigned char mab_start, unsigned char mab_end,
1995 struct ma_state *mas, bool new_max)
1998 enum maple_type mt = mte_node_type(mas->node);
1999 struct maple_node *node = mte_to_node(mas->node);
2000 void __rcu **slots = ma_slots(node, mt);
2001 unsigned long *pivots = ma_pivots(node, mt);
2002 unsigned long *gaps = NULL;
2005 if (mab_end - mab_start > mt_pivots[mt])
2008 if (!pivots[mt_pivots[mt] - 1])
2009 slots[mt_pivots[mt]] = NULL;
2013 pivots[j++] = b_node->pivot[i++];
2014 } while (i <= mab_end && likely(b_node->pivot[i]));
2016 memcpy(slots, b_node->slot + mab_start,
2017 sizeof(void *) * (i - mab_start));
2020 mas->max = b_node->pivot[i - 1];
2023 if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
2024 unsigned long max_gap = 0;
2025 unsigned char offset = 15;
2027 gaps = ma_gaps(node, mt);
2029 gaps[--j] = b_node->gap[--i];
2030 if (gaps[j] > max_gap) {
2036 ma_set_meta(node, mt, offset, end);
2038 mas_leaf_set_meta(mas, node, pivots, mt, end);
2043 * mas_descend_adopt() - Descend through a sub-tree and adopt children.
2044 * @mas: the maple state with the maple encoded node of the sub-tree.
2046 * Descend through a sub-tree and adopt children who do not have the correct
2047 * parents set. Follow the parents which have the correct parents as they are
2048 * the new entries which need to be followed to find other incorrectly set
2051 static inline void mas_descend_adopt(struct ma_state *mas)
2053 struct ma_state list[3], next[3];
2057 * At each level there may be up to 3 correct parent pointers which indicates
2058 * the new nodes which need to be walked to find any new nodes at a lower level.
2061 for (i = 0; i < 3; i++) {
2068 while (!mte_is_leaf(list[0].node)) {
2070 for (i = 0; i < 3; i++) {
2071 if (mas_is_none(&list[i]))
2074 if (i && list[i-1].node == list[i].node)
2077 while ((n < 3) && (mas_new_child(&list[i], &next[n])))
2080 mas_adopt_children(&list[i], list[i].node);
2084 next[n++].node = MAS_NONE;
2086 /* descend by setting the list to the children */
2087 for (i = 0; i < 3; i++)
2093 * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
2094 * @mas: The maple state
2095 * @end: The maple node end
2096 * @mt: The maple node type
2098 static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end,
2101 if (!(mas->mas_flags & MA_STATE_BULK))
2104 if (mte_is_root(mas->node))
2107 if (end > mt_min_slots[mt]) {
2108 mas->mas_flags &= ~MA_STATE_REBALANCE;
2114 * mas_store_b_node() - Store an @entry into the b_node while also copying the
2115 * data from a maple encoded node.
2116 * @wr_mas: the maple write state
2117 * @b_node: the maple_big_node to fill with data
2118 * @offset_end: the offset to end copying
2120 * Return: The actual end of the data stored in @b_node
2122 static inline void mas_store_b_node(struct ma_wr_state *wr_mas,
2123 struct maple_big_node *b_node, unsigned char offset_end)
2126 unsigned char b_end;
2127 /* Possible underflow of piv will wrap back to 0 before use. */
2129 struct ma_state *mas = wr_mas->mas;
2131 b_node->type = wr_mas->type;
2135 /* Copy start data up to insert. */
2136 mas_mab_cp(mas, 0, slot - 1, b_node, 0);
2137 b_end = b_node->b_end;
2138 piv = b_node->pivot[b_end - 1];
2142 if (piv + 1 < mas->index) {
2143 /* Handle range starting after old range */
2144 b_node->slot[b_end] = wr_mas->content;
2145 if (!wr_mas->content)
2146 b_node->gap[b_end] = mas->index - 1 - piv;
2147 b_node->pivot[b_end++] = mas->index - 1;
2150 /* Store the new entry. */
2151 mas->offset = b_end;
2152 b_node->slot[b_end] = wr_mas->entry;
2153 b_node->pivot[b_end] = mas->last;
2156 if (mas->last >= mas->max)
2159 /* Handle new range ending before old range ends */
2160 piv = mas_logical_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
2161 if (piv > mas->last) {
2162 if (piv == ULONG_MAX)
2163 mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
2165 if (offset_end != slot)
2166 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
2169 b_node->slot[++b_end] = wr_mas->content;
2170 if (!wr_mas->content)
2171 b_node->gap[b_end] = piv - mas->last + 1;
2172 b_node->pivot[b_end] = piv;
2175 slot = offset_end + 1;
2176 if (slot > wr_mas->node_end)
2179 /* Copy end data to the end of the node. */
2180 mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end);
2185 b_node->b_end = b_end;
2189 * mas_prev_sibling() - Find the previous node with the same parent.
2190 * @mas: the maple state
2192 * Return: True if there is a previous sibling, false otherwise.
2194 static inline bool mas_prev_sibling(struct ma_state *mas)
2196 unsigned int p_slot = mte_parent_slot(mas->node);
2198 if (mte_is_root(mas->node))
2205 mas->offset = p_slot - 1;
2211 * mas_next_sibling() - Find the next node with the same parent.
2212 * @mas: the maple state
2214 * Return: true if there is a next sibling, false otherwise.
2216 static inline bool mas_next_sibling(struct ma_state *mas)
2218 MA_STATE(parent, mas->tree, mas->index, mas->last);
2220 if (mte_is_root(mas->node))
2224 mas_ascend(&parent);
2225 parent.offset = mte_parent_slot(mas->node) + 1;
2226 if (parent.offset > mas_data_end(&parent))
2235 * mte_node_or_node() - Return the encoded node or MAS_NONE.
2236 * @enode: The encoded maple node.
2238 * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state.
2240 * Return: @enode or MAS_NONE
2242 static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode)
2247 return ma_enode_ptr(MAS_NONE);
2251 * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
2252 * @wr_mas: The maple write state
2254 * Uses mas_slot_locked() and does not need to worry about dead nodes.
2256 static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
2258 struct ma_state *mas = wr_mas->mas;
2259 unsigned char count;
2260 unsigned char offset;
2261 unsigned long index, min, max;
2263 if (unlikely(ma_is_dense(wr_mas->type))) {
2264 wr_mas->r_max = wr_mas->r_min = mas->index;
2265 mas->offset = mas->index = mas->min;
2269 wr_mas->node = mas_mn(wr_mas->mas);
2270 wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
2271 count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type,
2272 wr_mas->pivots, mas->max);
2273 offset = mas->offset;
2274 min = mas_safe_min(mas, wr_mas->pivots, offset);
2275 if (unlikely(offset == count))
2278 max = wr_mas->pivots[offset];
2280 if (unlikely(index <= max))
2283 if (unlikely(!max && offset))
2287 while (++offset < count) {
2288 max = wr_mas->pivots[offset];
2291 else if (unlikely(!max))
2300 wr_mas->r_max = max;
2301 wr_mas->r_min = min;
2302 wr_mas->offset_end = mas->offset = offset;
2306 * mas_topiary_range() - Add a range of slots to the topiary.
2307 * @mas: The maple state
2308 * @destroy: The topiary to add the slots (usually destroy)
2309 * @start: The starting slot inclusively
2310 * @end: The end slot inclusively
2312 static inline void mas_topiary_range(struct ma_state *mas,
2313 struct ma_topiary *destroy, unsigned char start, unsigned char end)
2316 unsigned char offset;
2318 MT_BUG_ON(mas->tree, mte_is_leaf(mas->node));
2319 slots = ma_slots(mas_mn(mas), mte_node_type(mas->node));
2320 for (offset = start; offset <= end; offset++) {
2321 struct maple_enode *enode = mas_slot_locked(mas, slots, offset);
2323 if (mte_dead_node(enode))
2326 mat_add(destroy, enode);
2331 * mast_topiary() - Add the portions of the tree to the removal list; either to
2332 * be freed or discarded (destroy walk).
2333 * @mast: The maple_subtree_state.
2335 static inline void mast_topiary(struct maple_subtree_state *mast)
2337 MA_WR_STATE(wr_mas, mast->orig_l, NULL);
2338 unsigned char r_start, r_end;
2339 unsigned char l_start, l_end;
2340 void __rcu **l_slots, **r_slots;
2342 wr_mas.type = mte_node_type(mast->orig_l->node);
2343 mast->orig_l->index = mast->orig_l->last;
2344 mas_wr_node_walk(&wr_mas);
2345 l_start = mast->orig_l->offset + 1;
2346 l_end = mas_data_end(mast->orig_l);
2348 r_end = mast->orig_r->offset;
2353 l_slots = ma_slots(mas_mn(mast->orig_l),
2354 mte_node_type(mast->orig_l->node));
2356 r_slots = ma_slots(mas_mn(mast->orig_r),
2357 mte_node_type(mast->orig_r->node));
2359 if ((l_start < l_end) &&
2360 mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_start))) {
2364 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_end))) {
2369 if ((l_start > r_end) && (mast->orig_l->node == mast->orig_r->node))
2372 /* At the node where left and right sides meet, add the parts between */
2373 if (mast->orig_l->node == mast->orig_r->node) {
2374 return mas_topiary_range(mast->orig_l, mast->destroy,
2378 /* mast->orig_r is different and consumed. */
2379 if (mte_is_leaf(mast->orig_r->node))
2382 if (mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_end)))
2386 if (l_start <= l_end)
2387 mas_topiary_range(mast->orig_l, mast->destroy, l_start, l_end);
2389 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_start)))
2392 if (r_start <= r_end)
2393 mas_topiary_range(mast->orig_r, mast->destroy, 0, r_end);
2397 * mast_rebalance_next() - Rebalance against the next node
2398 * @mast: The maple subtree state
2399 * @old_r: The encoded maple node to the right (next node).
2401 static inline void mast_rebalance_next(struct maple_subtree_state *mast)
2403 unsigned char b_end = mast->bn->b_end;
2405 mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node),
2407 mast->orig_r->last = mast->orig_r->max;
2411 * mast_rebalance_prev() - Rebalance against the previous node
2412 * @mast: The maple subtree state
2413 * @old_l: The encoded maple node to the left (previous node)
2415 static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
2417 unsigned char end = mas_data_end(mast->orig_l) + 1;
2418 unsigned char b_end = mast->bn->b_end;
2420 mab_shift_right(mast->bn, end);
2421 mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0);
2422 mast->l->min = mast->orig_l->min;
2423 mast->orig_l->index = mast->orig_l->min;
2424 mast->bn->b_end = end + b_end;
2425 mast->l->offset += end;
2429 * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
2430 * the node to the right. Checking the nodes to the right then the left at each
2431 * level upwards until root is reached. Free and destroy as needed.
2432 * Data is copied into the @mast->bn.
2433 * @mast: The maple_subtree_state.
2436 bool mast_spanning_rebalance(struct maple_subtree_state *mast)
2438 struct ma_state r_tmp = *mast->orig_r;
2439 struct ma_state l_tmp = *mast->orig_l;
2440 struct maple_enode *ancestor = NULL;
2441 unsigned char start, end;
2442 unsigned char depth = 0;
2444 r_tmp = *mast->orig_r;
2445 l_tmp = *mast->orig_l;
2447 mas_ascend(mast->orig_r);
2448 mas_ascend(mast->orig_l);
2451 (mast->orig_r->node == mast->orig_l->node)) {
2452 ancestor = mast->orig_r->node;
2453 end = mast->orig_r->offset - 1;
2454 start = mast->orig_l->offset + 1;
2457 if (mast->orig_r->offset < mas_data_end(mast->orig_r)) {
2459 ancestor = mast->orig_r->node;
2463 mast->orig_r->offset++;
2465 mas_descend(mast->orig_r);
2466 mast->orig_r->offset = 0;
2470 mast_rebalance_next(mast);
2472 unsigned char l_off = 0;
2473 struct maple_enode *child = r_tmp.node;
2476 if (ancestor == r_tmp.node)
2482 if (l_off < r_tmp.offset)
2483 mas_topiary_range(&r_tmp, mast->destroy,
2484 l_off, r_tmp.offset);
2486 if (l_tmp.node != child)
2487 mat_add(mast->free, child);
2489 } while (r_tmp.node != ancestor);
2491 *mast->orig_l = l_tmp;
2494 } else if (mast->orig_l->offset != 0) {
2496 ancestor = mast->orig_l->node;
2497 end = mas_data_end(mast->orig_l);
2500 mast->orig_l->offset--;
2502 mas_descend(mast->orig_l);
2503 mast->orig_l->offset =
2504 mas_data_end(mast->orig_l);
2508 mast_rebalance_prev(mast);
2510 unsigned char r_off;
2511 struct maple_enode *child = l_tmp.node;
2514 if (ancestor == l_tmp.node)
2517 r_off = mas_data_end(&l_tmp);
2519 if (l_tmp.offset < r_off)
2522 if (l_tmp.offset < r_off)
2523 mas_topiary_range(&l_tmp, mast->destroy,
2524 l_tmp.offset, r_off);
2526 if (r_tmp.node != child)
2527 mat_add(mast->free, child);
2529 } while (l_tmp.node != ancestor);
2531 *mast->orig_r = r_tmp;
2534 } while (!mte_is_root(mast->orig_r->node));
2536 *mast->orig_r = r_tmp;
2537 *mast->orig_l = l_tmp;
2542 * mast_ascend_free() - Add current original maple state nodes to the free list
2544 * @mast: the maple subtree state.
2546 * Ascend the original left and right sides and add the previous nodes to the
2547 * free list. Set the slots to point to the correct location in the new nodes.
2550 mast_ascend_free(struct maple_subtree_state *mast)
2552 MA_WR_STATE(wr_mas, mast->orig_r, NULL);
2553 struct maple_enode *left = mast->orig_l->node;
2554 struct maple_enode *right = mast->orig_r->node;
2556 mas_ascend(mast->orig_l);
2557 mas_ascend(mast->orig_r);
2558 mat_add(mast->free, left);
2561 mat_add(mast->free, right);
2563 mast->orig_r->offset = 0;
2564 mast->orig_r->index = mast->r->max;
2565 /* last should be larger than or equal to index */
2566 if (mast->orig_r->last < mast->orig_r->index)
2567 mast->orig_r->last = mast->orig_r->index;
2569 * The node may not contain the value so set slot to ensure all
2570 * of the nodes contents are freed or destroyed.
2572 wr_mas.type = mte_node_type(mast->orig_r->node);
2573 mas_wr_node_walk(&wr_mas);
2574 /* Set up the left side of things */
2575 mast->orig_l->offset = 0;
2576 mast->orig_l->index = mast->l->min;
2577 wr_mas.mas = mast->orig_l;
2578 wr_mas.type = mte_node_type(mast->orig_l->node);
2579 mas_wr_node_walk(&wr_mas);
2581 mast->bn->type = wr_mas.type;
2585 * mas_new_ma_node() - Create and return a new maple node. Helper function.
2586 * @mas: the maple state with the allocations.
2587 * @b_node: the maple_big_node with the type encoding.
2589 * Use the node type from the maple_big_node to allocate a new node from the
2590 * ma_state. This function exists mainly for code readability.
2592 * Return: A new maple encoded node
2594 static inline struct maple_enode
2595 *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
2597 return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
2601 * mas_mab_to_node() - Set up right and middle nodes
2603 * @mas: the maple state that contains the allocations.
2604 * @b_node: the node which contains the data.
2605 * @left: The pointer which will have the left node
2606 * @right: The pointer which may have the right node
2607 * @middle: the pointer which may have the middle node (rare)
2608 * @mid_split: the split location for the middle node
2610 * Return: the split of left.
2612 static inline unsigned char mas_mab_to_node(struct ma_state *mas,
2613 struct maple_big_node *b_node, struct maple_enode **left,
2614 struct maple_enode **right, struct maple_enode **middle,
2615 unsigned char *mid_split, unsigned long min)
2617 unsigned char split = 0;
2618 unsigned char slot_count = mt_slots[b_node->type];
2620 *left = mas_new_ma_node(mas, b_node);
2625 if (b_node->b_end < slot_count) {
2626 split = b_node->b_end;
2628 split = mab_calc_split(mas, b_node, mid_split, min);
2629 *right = mas_new_ma_node(mas, b_node);
2633 *middle = mas_new_ma_node(mas, b_node);
2640 * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
2642 * @b_node - the big node to add the entry
2643 * @mas - the maple state to get the pivot (mas->max)
2644 * @entry - the entry to add, if NULL nothing happens.
2646 static inline void mab_set_b_end(struct maple_big_node *b_node,
2647 struct ma_state *mas,
2653 b_node->slot[b_node->b_end] = entry;
2654 if (mt_is_alloc(mas->tree))
2655 b_node->gap[b_node->b_end] = mas_max_gap(mas);
2656 b_node->pivot[b_node->b_end++] = mas->max;
2660 * mas_set_split_parent() - combine_then_separate helper function. Sets the parent
2661 * of @mas->node to either @left or @right, depending on @slot and @split
2663 * @mas - the maple state with the node that needs a parent
2664 * @left - possible parent 1
2665 * @right - possible parent 2
2666 * @slot - the slot the mas->node was placed
2667 * @split - the split location between @left and @right
2669 static inline void mas_set_split_parent(struct ma_state *mas,
2670 struct maple_enode *left,
2671 struct maple_enode *right,
2672 unsigned char *slot, unsigned char split)
2674 if (mas_is_none(mas))
2677 if ((*slot) <= split)
2678 mte_set_parent(mas->node, left, *slot);
2680 mte_set_parent(mas->node, right, (*slot) - split - 1);
2686 * mte_mid_split_check() - Check if the next node passes the mid-split
2687 * @**l: Pointer to left encoded maple node.
2688 * @**m: Pointer to middle encoded maple node.
2689 * @**r: Pointer to right encoded maple node.
2691 * @*split: The split location.
2692 * @mid_split: The middle split.
2694 static inline void mte_mid_split_check(struct maple_enode **l,
2695 struct maple_enode **r,
2696 struct maple_enode *right,
2698 unsigned char *split,
2699 unsigned char mid_split)
2704 if (slot < mid_split)
2713 * mast_set_split_parents() - Helper function to set three nodes parents. Slot
2714 * is taken from @mast->l.
2715 * @mast - the maple subtree state
2716 * @left - the left node
2717 * @right - the right node
2718 * @split - the split location.
2720 static inline void mast_set_split_parents(struct maple_subtree_state *mast,
2721 struct maple_enode *left,
2722 struct maple_enode *middle,
2723 struct maple_enode *right,
2724 unsigned char split,
2725 unsigned char mid_split)
2728 struct maple_enode *l = left;
2729 struct maple_enode *r = right;
2731 if (mas_is_none(mast->l))
2737 slot = mast->l->offset;
2739 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2740 mas_set_split_parent(mast->l, l, r, &slot, split);
2742 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2743 mas_set_split_parent(mast->m, l, r, &slot, split);
2745 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2746 mas_set_split_parent(mast->r, l, r, &slot, split);
2750 * mas_wmb_replace() - Write memory barrier and replace
2751 * @mas: The maple state
2752 * @free: the maple topiary list of nodes to free
2753 * @destroy: The maple topiary list of nodes to destroy (walk and free)
2755 * Updates gap as necessary.
2757 static inline void mas_wmb_replace(struct ma_state *mas,
2758 struct ma_topiary *free,
2759 struct ma_topiary *destroy)
2761 /* All nodes must see old data as dead prior to replacing that data */
2762 smp_wmb(); /* Needed for RCU */
2764 /* Insert the new data in the tree */
2765 mas_replace(mas, true);
2767 if (!mte_is_leaf(mas->node))
2768 mas_descend_adopt(mas);
2770 mas_mat_free(mas, free);
2773 mas_mat_destroy(mas, destroy);
2775 if (mte_is_leaf(mas->node))
2778 mas_update_gap(mas);
2782 * mast_new_root() - Set a new tree root during subtree creation
2783 * @mast: The maple subtree state
2784 * @mas: The maple state
2786 static inline void mast_new_root(struct maple_subtree_state *mast,
2787 struct ma_state *mas)
2789 mas_mn(mast->l)->parent =
2790 ma_parent_ptr(((unsigned long)mas->tree | MA_ROOT_PARENT));
2791 if (!mte_dead_node(mast->orig_l->node) &&
2792 !mte_is_root(mast->orig_l->node)) {
2794 mast_ascend_free(mast);
2796 } while (!mte_is_root(mast->orig_l->node));
2798 if ((mast->orig_l->node != mas->node) &&
2799 (mast->l->depth > mas_mt_height(mas))) {
2800 mat_add(mast->free, mas->node);
2805 * mast_cp_to_nodes() - Copy data out to nodes.
2806 * @mast: The maple subtree state
2807 * @left: The left encoded maple node
2808 * @middle: The middle encoded maple node
2809 * @right: The right encoded maple node
2810 * @split: The location to split between left and (middle ? middle : right)
2811 * @mid_split: The location to split between middle and right.
2813 static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
2814 struct maple_enode *left, struct maple_enode *middle,
2815 struct maple_enode *right, unsigned char split, unsigned char mid_split)
2817 bool new_lmax = true;
2819 mast->l->node = mte_node_or_none(left);
2820 mast->m->node = mte_node_or_none(middle);
2821 mast->r->node = mte_node_or_none(right);
2823 mast->l->min = mast->orig_l->min;
2824 if (split == mast->bn->b_end) {
2825 mast->l->max = mast->orig_r->max;
2829 mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax);
2832 mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true);
2833 mast->m->min = mast->bn->pivot[split] + 1;
2837 mast->r->max = mast->orig_r->max;
2839 mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false);
2840 mast->r->min = mast->bn->pivot[split] + 1;
2845 * mast_combine_cp_left - Copy in the original left side of the tree into the
2846 * combined data set in the maple subtree state big node.
2847 * @mast: The maple subtree state
2849 static inline void mast_combine_cp_left(struct maple_subtree_state *mast)
2851 unsigned char l_slot = mast->orig_l->offset;
2856 mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0);
2860 * mast_combine_cp_right: Copy in the original right side of the tree into the
2861 * combined data set in the maple subtree state big node.
2862 * @mast: The maple subtree state
2864 static inline void mast_combine_cp_right(struct maple_subtree_state *mast)
2866 if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max)
2869 mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1,
2870 mt_slot_count(mast->orig_r->node), mast->bn,
2872 mast->orig_r->last = mast->orig_r->max;
2876 * mast_sufficient: Check if the maple subtree state has enough data in the big
2877 * node to create at least one sufficient node
2878 * @mast: the maple subtree state
2880 static inline bool mast_sufficient(struct maple_subtree_state *mast)
2882 if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
2889 * mast_overflow: Check if there is too much data in the subtree state for a
2891 * @mast: The maple subtree state
2893 static inline bool mast_overflow(struct maple_subtree_state *mast)
2895 if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node))
2901 static inline void *mtree_range_walk(struct ma_state *mas)
2903 unsigned long *pivots;
2904 unsigned char offset;
2905 struct maple_node *node;
2906 struct maple_enode *next, *last;
2907 enum maple_type type;
2910 unsigned long max, min;
2911 unsigned long prev_max, prev_min;
2919 node = mte_to_node(next);
2920 type = mte_node_type(next);
2921 pivots = ma_pivots(node, type);
2922 end = ma_data_end(node, type, pivots, max);
2923 if (unlikely(ma_dead_node(node)))
2926 if (pivots[offset] >= mas->index) {
2929 max = pivots[offset];
2935 } while ((offset < end) && (pivots[offset] < mas->index));
2938 min = pivots[offset - 1] + 1;
2940 if (likely(offset < end && pivots[offset]))
2941 max = pivots[offset];
2944 slots = ma_slots(node, type);
2945 next = mt_slot(mas->tree, slots, offset);
2946 if (unlikely(ma_dead_node(node)))
2948 } while (!ma_is_leaf(type));
2950 mas->offset = offset;
2953 mas->min = prev_min;
2954 mas->max = prev_max;
2956 return (void *) next;
2964 * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
2965 * @mas: The starting maple state
2966 * @mast: The maple_subtree_state, keeps track of 4 maple states.
2967 * @count: The estimated count of iterations needed.
2969 * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
2970 * is hit. First @b_node is split into two entries which are inserted into the
2971 * next iteration of the loop. @b_node is returned populated with the final
2972 * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the
2973 * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
2974 * to account of what has been copied into the new sub-tree. The update of
2975 * orig_l_mas->last is used in mas_consume to find the slots that will need to
2976 * be either freed or destroyed. orig_l_mas->depth keeps track of the height of
2977 * the new sub-tree in case the sub-tree becomes the full tree.
2979 * Return: the number of elements in b_node during the last loop.
2981 static int mas_spanning_rebalance(struct ma_state *mas,
2982 struct maple_subtree_state *mast, unsigned char count)
2984 unsigned char split, mid_split;
2985 unsigned char slot = 0;
2986 struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
2988 MA_STATE(l_mas, mas->tree, mas->index, mas->index);
2989 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
2990 MA_STATE(m_mas, mas->tree, mas->index, mas->index);
2991 MA_TOPIARY(free, mas->tree);
2992 MA_TOPIARY(destroy, mas->tree);
2995 * The tree needs to be rebalanced and leaves need to be kept at the same level.
2996 * Rebalancing is done by use of the ``struct maple_topiary``.
3002 mast->destroy = &destroy;
3003 l_mas.node = r_mas.node = m_mas.node = MAS_NONE;
3005 /* Check if this is not root and has sufficient data. */
3006 if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
3007 unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
3008 mast_spanning_rebalance(mast);
3010 mast->orig_l->depth = 0;
3013 * Each level of the tree is examined and balanced, pushing data to the left or
3014 * right, or rebalancing against left or right nodes is employed to avoid
3015 * rippling up the tree to limit the amount of churn. Once a new sub-section of
3016 * the tree is created, there may be a mix of new and old nodes. The old nodes
3017 * will have the incorrect parent pointers and currently be in two trees: the
3018 * original tree and the partially new tree. To remedy the parent pointers in
3019 * the old tree, the new data is swapped into the active tree and a walk down
3020 * the tree is performed and the parent pointers are updated.
3021 * See mas_descend_adopt() for more information..
3025 mast->bn->type = mte_node_type(mast->orig_l->node);
3026 split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
3027 &mid_split, mast->orig_l->min);
3028 mast_set_split_parents(mast, left, middle, right, split,
3030 mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
3033 * Copy data from next level in the tree to mast->bn from next
3036 memset(mast->bn, 0, sizeof(struct maple_big_node));
3037 mast->bn->type = mte_node_type(left);
3038 mast->orig_l->depth++;
3040 /* Root already stored in l->node. */
3041 if (mas_is_root_limits(mast->l))
3044 mast_ascend_free(mast);
3045 mast_combine_cp_left(mast);
3046 l_mas.offset = mast->bn->b_end;
3047 mab_set_b_end(mast->bn, &l_mas, left);
3048 mab_set_b_end(mast->bn, &m_mas, middle);
3049 mab_set_b_end(mast->bn, &r_mas, right);
3051 /* Copy anything necessary out of the right node. */
3052 mast_combine_cp_right(mast);
3054 mast->orig_l->last = mast->orig_l->max;
3056 if (mast_sufficient(mast))
3059 if (mast_overflow(mast))
3062 /* May be a new root stored in mast->bn */
3063 if (mas_is_root_limits(mast->orig_l))
3066 mast_spanning_rebalance(mast);
3068 /* rebalancing from other nodes may require another loop. */
3073 l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
3074 mte_node_type(mast->orig_l->node));
3075 mast->orig_l->depth++;
3076 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
3077 mte_set_parent(left, l_mas.node, slot);
3079 mte_set_parent(middle, l_mas.node, ++slot);
3082 mte_set_parent(right, l_mas.node, ++slot);
3084 if (mas_is_root_limits(mast->l)) {
3086 mast_new_root(mast, mas);
3088 mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent;
3091 if (!mte_dead_node(mast->orig_l->node))
3092 mat_add(&free, mast->orig_l->node);
3094 mas->depth = mast->orig_l->depth;
3095 *mast->orig_l = l_mas;
3096 mte_set_node_dead(mas->node);
3098 /* Set up mas for insertion. */
3099 mast->orig_l->depth = mas->depth;
3100 mast->orig_l->alloc = mas->alloc;
3101 *mas = *mast->orig_l;
3102 mas_wmb_replace(mas, &free, &destroy);
3103 mtree_range_walk(mas);
3104 return mast->bn->b_end;
3108 * mas_rebalance() - Rebalance a given node.
3109 * @mas: The maple state
3110 * @b_node: The big maple node.
3112 * Rebalance two nodes into a single node or two new nodes that are sufficient.
3113 * Continue upwards until tree is sufficient.
3115 * Return: the number of elements in b_node during the last loop.
3117 static inline int mas_rebalance(struct ma_state *mas,
3118 struct maple_big_node *b_node)
3120 char empty_count = mas_mt_height(mas);
3121 struct maple_subtree_state mast;
3122 unsigned char shift, b_end = ++b_node->b_end;
3124 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3125 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3127 trace_ma_op(__func__, mas);
3130 * Rebalancing occurs if a node is insufficient. Data is rebalanced
3131 * against the node to the right if it exists, otherwise the node to the
3132 * left of this node is rebalanced against this node. If rebalancing
3133 * causes just one node to be produced instead of two, then the parent
3134 * is also examined and rebalanced if it is insufficient. Every level
3135 * tries to combine the data in the same way. If one node contains the
3136 * entire range of the tree, then that node is used as a new root node.
3138 mas_node_count(mas, 1 + empty_count * 3);
3139 if (mas_is_err(mas))
3142 mast.orig_l = &l_mas;
3143 mast.orig_r = &r_mas;
3145 mast.bn->type = mte_node_type(mas->node);
3147 l_mas = r_mas = *mas;
3149 if (mas_next_sibling(&r_mas)) {
3150 mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end);
3151 r_mas.last = r_mas.index = r_mas.max;
3153 mas_prev_sibling(&l_mas);
3154 shift = mas_data_end(&l_mas) + 1;
3155 mab_shift_right(b_node, shift);
3156 mas->offset += shift;
3157 mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0);
3158 b_node->b_end = shift + b_end;
3159 l_mas.index = l_mas.last = l_mas.min;
3162 return mas_spanning_rebalance(mas, &mast, empty_count);
3166 * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
3168 * @mas: The maple state
3169 * @end: The end of the left-most node.
3171 * During a mass-insert event (such as forking), it may be necessary to
3172 * rebalance the left-most node when it is not sufficient.
3174 static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end)
3176 enum maple_type mt = mte_node_type(mas->node);
3177 struct maple_node reuse, *newnode, *parent, *new_left, *left, *node;
3178 struct maple_enode *eparent;
3179 unsigned char offset, tmp, split = mt_slots[mt] / 2;
3180 void __rcu **l_slots, **slots;
3181 unsigned long *l_pivs, *pivs, gap;
3182 bool in_rcu = mt_in_rcu(mas->tree);
3184 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3187 mas_prev_sibling(&l_mas);
3191 /* Allocate for both left and right as well as parent. */
3192 mas_node_count(mas, 3);
3193 if (mas_is_err(mas))
3196 newnode = mas_pop_node(mas);
3202 newnode->parent = node->parent;
3203 slots = ma_slots(newnode, mt);
3204 pivs = ma_pivots(newnode, mt);
3205 left = mas_mn(&l_mas);
3206 l_slots = ma_slots(left, mt);
3207 l_pivs = ma_pivots(left, mt);
3208 if (!l_slots[split])
3210 tmp = mas_data_end(&l_mas) - split;
3212 memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp);
3213 memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp);
3214 pivs[tmp] = l_mas.max;
3215 memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end);
3216 memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end);
3218 l_mas.max = l_pivs[split];
3219 mas->min = l_mas.max + 1;
3220 eparent = mt_mk_node(mte_parent(l_mas.node),
3221 mas_parent_enum(&l_mas, l_mas.node));
3224 unsigned char max_p = mt_pivots[mt];
3225 unsigned char max_s = mt_slots[mt];
3228 memset(pivs + tmp, 0,
3229 sizeof(unsigned long *) * (max_p - tmp));
3231 if (tmp < mt_slots[mt])
3232 memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3234 memcpy(node, newnode, sizeof(struct maple_node));
3235 ma_set_meta(node, mt, 0, tmp - 1);
3236 mte_set_pivot(eparent, mte_parent_slot(l_mas.node),
3239 /* Remove data from l_pivs. */
3241 memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp));
3242 memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3243 ma_set_meta(left, mt, 0, split);
3248 /* RCU requires replacing both l_mas, mas, and parent. */
3249 mas->node = mt_mk_node(newnode, mt);
3250 ma_set_meta(newnode, mt, 0, tmp);
3252 new_left = mas_pop_node(mas);
3253 new_left->parent = left->parent;
3254 mt = mte_node_type(l_mas.node);
3255 slots = ma_slots(new_left, mt);
3256 pivs = ma_pivots(new_left, mt);
3257 memcpy(slots, l_slots, sizeof(void *) * split);
3258 memcpy(pivs, l_pivs, sizeof(unsigned long) * split);
3259 ma_set_meta(new_left, mt, 0, split);
3260 l_mas.node = mt_mk_node(new_left, mt);
3262 /* replace parent. */
3263 offset = mte_parent_slot(mas->node);
3264 mt = mas_parent_enum(&l_mas, l_mas.node);
3265 parent = mas_pop_node(mas);
3266 slots = ma_slots(parent, mt);
3267 pivs = ma_pivots(parent, mt);
3268 memcpy(parent, mte_to_node(eparent), sizeof(struct maple_node));
3269 rcu_assign_pointer(slots[offset], mas->node);
3270 rcu_assign_pointer(slots[offset - 1], l_mas.node);
3271 pivs[offset - 1] = l_mas.max;
3272 eparent = mt_mk_node(parent, mt);
3274 gap = mas_leaf_max_gap(mas);
3275 mte_set_gap(eparent, mte_parent_slot(mas->node), gap);
3276 gap = mas_leaf_max_gap(&l_mas);
3277 mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap);
3281 mas_replace(mas, false);
3283 mas_update_gap(mas);
3287 * mas_split_final_node() - Split the final node in a subtree operation.
3288 * @mast: the maple subtree state
3289 * @mas: The maple state
3290 * @height: The height of the tree in case it's a new root.
3292 static inline bool mas_split_final_node(struct maple_subtree_state *mast,
3293 struct ma_state *mas, int height)
3295 struct maple_enode *ancestor;
3297 if (mte_is_root(mas->node)) {
3298 if (mt_is_alloc(mas->tree))
3299 mast->bn->type = maple_arange_64;
3301 mast->bn->type = maple_range_64;
3302 mas->depth = height;
3305 * Only a single node is used here, could be root.
3306 * The Big_node data should just fit in a single node.
3308 ancestor = mas_new_ma_node(mas, mast->bn);
3309 mte_set_parent(mast->l->node, ancestor, mast->l->offset);
3310 mte_set_parent(mast->r->node, ancestor, mast->r->offset);
3311 mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
3313 mast->l->node = ancestor;
3314 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
3315 mas->offset = mast->bn->b_end - 1;
3320 * mast_fill_bnode() - Copy data into the big node in the subtree state
3321 * @mast: The maple subtree state
3322 * @mas: the maple state
3323 * @skip: The number of entries to skip for new nodes insertion.
3325 static inline void mast_fill_bnode(struct maple_subtree_state *mast,
3326 struct ma_state *mas,
3330 struct maple_enode *old = mas->node;
3331 unsigned char split;
3333 memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap));
3334 memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot));
3335 memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot));
3336 mast->bn->b_end = 0;
3338 if (mte_is_root(mas->node)) {
3342 mat_add(mast->free, old);
3343 mas->offset = mte_parent_slot(mas->node);
3346 if (cp && mast->l->offset)
3347 mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0);
3349 split = mast->bn->b_end;
3350 mab_set_b_end(mast->bn, mast->l, mast->l->node);
3351 mast->r->offset = mast->bn->b_end;
3352 mab_set_b_end(mast->bn, mast->r, mast->r->node);
3353 if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max)
3357 mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1,
3358 mast->bn, mast->bn->b_end);
3361 mast->bn->type = mte_node_type(mas->node);
3365 * mast_split_data() - Split the data in the subtree state big node into regular
3367 * @mast: The maple subtree state
3368 * @mas: The maple state
3369 * @split: The location to split the big node
3371 static inline void mast_split_data(struct maple_subtree_state *mast,
3372 struct ma_state *mas, unsigned char split)
3374 unsigned char p_slot;
3376 mab_mas_cp(mast->bn, 0, split, mast->l, true);
3377 mte_set_pivot(mast->r->node, 0, mast->r->max);
3378 mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false);
3379 mast->l->offset = mte_parent_slot(mas->node);
3380 mast->l->max = mast->bn->pivot[split];
3381 mast->r->min = mast->l->max + 1;
3382 if (mte_is_leaf(mas->node))
3385 p_slot = mast->orig_l->offset;
3386 mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node,
3388 mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node,
3393 * mas_push_data() - Instead of splitting a node, it is beneficial to push the
3394 * data to the right or left node if there is room.
3395 * @mas: The maple state
3396 * @height: The current height of the maple state
3397 * @mast: The maple subtree state
3398 * @left: Push left or not.
3400 * Keeping the height of the tree low means faster lookups.
3402 * Return: True if pushed, false otherwise.
3404 static inline bool mas_push_data(struct ma_state *mas, int height,
3405 struct maple_subtree_state *mast, bool left)
3407 unsigned char slot_total = mast->bn->b_end;
3408 unsigned char end, space, split;
3410 MA_STATE(tmp_mas, mas->tree, mas->index, mas->last);
3412 tmp_mas.depth = mast->l->depth;
3414 if (left && !mas_prev_sibling(&tmp_mas))
3416 else if (!left && !mas_next_sibling(&tmp_mas))
3419 end = mas_data_end(&tmp_mas);
3421 space = 2 * mt_slot_count(mas->node) - 2;
3422 /* -2 instead of -1 to ensure there isn't a triple split */
3423 if (ma_is_leaf(mast->bn->type))
3426 if (mas->max == ULONG_MAX)
3429 if (slot_total >= space)
3432 /* Get the data; Fill mast->bn */
3435 mab_shift_right(mast->bn, end + 1);
3436 mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0);
3437 mast->bn->b_end = slot_total + 1;
3439 mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end);
3442 /* Configure mast for splitting of mast->bn */
3443 split = mt_slots[mast->bn->type] - 2;
3445 /* Switch mas to prev node */
3446 mat_add(mast->free, mas->node);
3448 /* Start using mast->l for the left side. */
3449 tmp_mas.node = mast->l->node;
3452 mat_add(mast->free, tmp_mas.node);
3453 tmp_mas.node = mast->r->node;
3455 split = slot_total - split;
3457 split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]);
3458 /* Update parent slot for split calculation. */
3460 mast->orig_l->offset += end + 1;
3462 mast_split_data(mast, mas, split);
3463 mast_fill_bnode(mast, mas, 2);
3464 mas_split_final_node(mast, mas, height + 1);
3469 * mas_split() - Split data that is too big for one node into two.
3470 * @mas: The maple state
3471 * @b_node: The maple big node
3472 * Return: 1 on success, 0 on failure.
3474 static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
3477 struct maple_subtree_state mast;
3479 unsigned char mid_split, split = 0;
3482 * Splitting is handled differently from any other B-tree; the Maple
3483 * Tree splits upwards. Splitting up means that the split operation
3484 * occurs when the walk of the tree hits the leaves and not on the way
3485 * down. The reason for splitting up is that it is impossible to know
3486 * how much space will be needed until the leaf is (or leaves are)
3487 * reached. Since overwriting data is allowed and a range could
3488 * overwrite more than one range or result in changing one entry into 3
3489 * entries, it is impossible to know if a split is required until the
3492 * Splitting is a balancing act between keeping allocations to a minimum
3493 * and avoiding a 'jitter' event where a tree is expanded to make room
3494 * for an entry followed by a contraction when the entry is removed. To
3495 * accomplish the balance, there are empty slots remaining in both left
3496 * and right nodes after a split.
3498 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3499 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3500 MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
3501 MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
3502 MA_TOPIARY(mat, mas->tree);
3504 trace_ma_op(__func__, mas);
3505 mas->depth = mas_mt_height(mas);
3506 /* Allocation failures will happen early. */
3507 mas_node_count(mas, 1 + mas->depth * 2);
3508 if (mas_is_err(mas))
3513 mast.orig_l = &prev_l_mas;
3514 mast.orig_r = &prev_r_mas;
3518 while (height++ <= mas->depth) {
3519 if (mt_slots[b_node->type] > b_node->b_end) {
3520 mas_split_final_node(&mast, mas, height);
3524 l_mas = r_mas = *mas;
3525 l_mas.node = mas_new_ma_node(mas, b_node);
3526 r_mas.node = mas_new_ma_node(mas, b_node);
3528 * Another way that 'jitter' is avoided is to terminate a split up early if the
3529 * left or right node has space to spare. This is referred to as "pushing left"
3530 * or "pushing right" and is similar to the B* tree, except the nodes left or
3531 * right can rarely be reused due to RCU, but the ripple upwards is halted which
3532 * is a significant savings.
3534 /* Try to push left. */
3535 if (mas_push_data(mas, height, &mast, true))
3538 /* Try to push right. */
3539 if (mas_push_data(mas, height, &mast, false))
3542 split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
3543 mast_split_data(&mast, mas, split);
3545 * Usually correct, mab_mas_cp in the above call overwrites
3548 mast.r->max = mas->max;
3549 mast_fill_bnode(&mast, mas, 1);
3550 prev_l_mas = *mast.l;
3551 prev_r_mas = *mast.r;
3554 /* Set the original node as dead */
3555 mat_add(mast.free, mas->node);
3556 mas->node = l_mas.node;
3557 mas_wmb_replace(mas, mast.free, NULL);
3558 mtree_range_walk(mas);
3563 * mas_reuse_node() - Reuse the node to store the data.
3564 * @wr_mas: The maple write state
3565 * @bn: The maple big node
3566 * @end: The end of the data.
3568 * Will always return false in RCU mode.
3570 * Return: True if node was reused, false otherwise.
3572 static inline bool mas_reuse_node(struct ma_wr_state *wr_mas,
3573 struct maple_big_node *bn, unsigned char end)
3575 /* Need to be rcu safe. */
3576 if (mt_in_rcu(wr_mas->mas->tree))
3579 if (end > bn->b_end) {
3580 int clear = mt_slots[wr_mas->type] - bn->b_end;
3582 memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--);
3583 memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear);
3585 mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false);
3590 * mas_commit_b_node() - Commit the big node into the tree.
3591 * @wr_mas: The maple write state
3592 * @b_node: The maple big node
3593 * @end: The end of the data.
3595 static inline int mas_commit_b_node(struct ma_wr_state *wr_mas,
3596 struct maple_big_node *b_node, unsigned char end)
3598 struct maple_node *node;
3599 unsigned char b_end = b_node->b_end;
3600 enum maple_type b_type = b_node->type;
3602 if ((b_end < mt_min_slots[b_type]) &&
3603 (!mte_is_root(wr_mas->mas->node)) &&
3604 (mas_mt_height(wr_mas->mas) > 1))
3605 return mas_rebalance(wr_mas->mas, b_node);
3607 if (b_end >= mt_slots[b_type])
3608 return mas_split(wr_mas->mas, b_node);
3610 if (mas_reuse_node(wr_mas, b_node, end))
3613 mas_node_count(wr_mas->mas, 1);
3614 if (mas_is_err(wr_mas->mas))
3617 node = mas_pop_node(wr_mas->mas);
3618 node->parent = mas_mn(wr_mas->mas)->parent;
3619 wr_mas->mas->node = mt_mk_node(node, b_type);
3620 mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
3621 mas_replace(wr_mas->mas, false);
3623 mas_update_gap(wr_mas->mas);
3628 * mas_root_expand() - Expand a root to a node
3629 * @mas: The maple state
3630 * @entry: The entry to store into the tree
3632 static inline int mas_root_expand(struct ma_state *mas, void *entry)
3634 void *contents = mas_root_locked(mas);
3635 enum maple_type type = maple_leaf_64;
3636 struct maple_node *node;
3638 unsigned long *pivots;
3641 mas_node_count(mas, 1);
3642 if (unlikely(mas_is_err(mas)))
3645 node = mas_pop_node(mas);
3646 pivots = ma_pivots(node, type);
3647 slots = ma_slots(node, type);
3648 node->parent = ma_parent_ptr(
3649 ((unsigned long)mas->tree | MA_ROOT_PARENT));
3650 mas->node = mt_mk_node(node, type);
3654 rcu_assign_pointer(slots[slot], contents);
3655 if (likely(mas->index > 1))
3658 pivots[slot++] = mas->index - 1;
3661 rcu_assign_pointer(slots[slot], entry);
3663 pivots[slot] = mas->last;
3664 if (mas->last != ULONG_MAX)
3667 mas_set_height(mas);
3668 ma_set_meta(node, maple_leaf_64, 0, slot);
3669 /* swap the new root into the tree */
3670 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3674 static inline void mas_store_root(struct ma_state *mas, void *entry)
3676 if (likely((mas->last != 0) || (mas->index != 0)))
3677 mas_root_expand(mas, entry);
3678 else if (((unsigned long) (entry) & 3) == 2)
3679 mas_root_expand(mas, entry);
3681 rcu_assign_pointer(mas->tree->ma_root, entry);
3682 mas->node = MAS_START;
3687 * mas_is_span_wr() - Check if the write needs to be treated as a write that
3689 * @mas: The maple state
3690 * @piv: The pivot value being written
3691 * @type: The maple node type
3692 * @entry: The data to write
3694 * Spanning writes are writes that start in one node and end in another OR if
3695 * the write of a %NULL will cause the node to end with a %NULL.
3697 * Return: True if this is a spanning write, false otherwise.
3699 static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
3702 unsigned long last = wr_mas->mas->last;
3703 unsigned long piv = wr_mas->r_max;
3704 enum maple_type type = wr_mas->type;
3705 void *entry = wr_mas->entry;
3707 /* Contained in this pivot */
3711 max = wr_mas->mas->max;
3712 if (unlikely(ma_is_leaf(type))) {
3713 /* Fits in the node, but may span slots. */
3717 /* Writes to the end of the node but not null. */
3718 if ((last == max) && entry)
3722 * Writing ULONG_MAX is not a spanning write regardless of the
3723 * value being written as long as the range fits in the node.
3725 if ((last == ULONG_MAX) && (last == max))
3727 } else if (piv == last) {
3731 /* Detect spanning store wr walk */
3732 if (last == ULONG_MAX)
3736 trace_ma_write(__func__, wr_mas->mas, piv, entry);
3741 static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
3743 wr_mas->type = mte_node_type(wr_mas->mas->node);
3744 mas_wr_node_walk(wr_mas);
3745 wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
3748 static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas)
3750 wr_mas->mas->max = wr_mas->r_max;
3751 wr_mas->mas->min = wr_mas->r_min;
3752 wr_mas->mas->node = wr_mas->content;
3753 wr_mas->mas->offset = 0;
3754 wr_mas->mas->depth++;
3757 * mas_wr_walk() - Walk the tree for a write.
3758 * @wr_mas: The maple write state
3760 * Uses mas_slot_locked() and does not need to worry about dead nodes.
3762 * Return: True if it's contained in a node, false on spanning write.
3764 static bool mas_wr_walk(struct ma_wr_state *wr_mas)
3766 struct ma_state *mas = wr_mas->mas;
3769 mas_wr_walk_descend(wr_mas);
3770 if (unlikely(mas_is_span_wr(wr_mas)))
3773 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3775 if (ma_is_leaf(wr_mas->type))
3778 mas_wr_walk_traverse(wr_mas);
3784 static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
3786 struct ma_state *mas = wr_mas->mas;
3789 mas_wr_walk_descend(wr_mas);
3790 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3792 if (ma_is_leaf(wr_mas->type))
3794 mas_wr_walk_traverse(wr_mas);
3800 * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
3801 * @l_wr_mas: The left maple write state
3802 * @r_wr_mas: The right maple write state
3804 static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas,
3805 struct ma_wr_state *r_wr_mas)
3807 struct ma_state *r_mas = r_wr_mas->mas;
3808 struct ma_state *l_mas = l_wr_mas->mas;
3809 unsigned char l_slot;
3811 l_slot = l_mas->offset;
3812 if (!l_wr_mas->content)
3813 l_mas->index = l_wr_mas->r_min;
3815 if ((l_mas->index == l_wr_mas->r_min) &&
3817 !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) {
3819 l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1;
3821 l_mas->index = l_mas->min;
3823 l_mas->offset = l_slot - 1;
3826 if (!r_wr_mas->content) {
3827 if (r_mas->last < r_wr_mas->r_max)
3828 r_mas->last = r_wr_mas->r_max;
3830 } else if ((r_mas->last == r_wr_mas->r_max) &&
3831 (r_mas->last < r_mas->max) &&
3832 !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) {
3833 r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots,
3834 r_wr_mas->type, r_mas->offset + 1);
3839 static inline void *mas_state_walk(struct ma_state *mas)
3843 entry = mas_start(mas);
3844 if (mas_is_none(mas))
3847 if (mas_is_ptr(mas))
3850 return mtree_range_walk(mas);
3854 * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
3857 * @mas: The maple state.
3859 * Note: Leaves mas in undesirable state.
3860 * Return: The entry for @mas->index or %NULL on dead node.
3862 static inline void *mtree_lookup_walk(struct ma_state *mas)
3864 unsigned long *pivots;
3865 unsigned char offset;
3866 struct maple_node *node;
3867 struct maple_enode *next;
3868 enum maple_type type;
3877 node = mte_to_node(next);
3878 type = mte_node_type(next);
3879 pivots = ma_pivots(node, type);
3880 end = ma_data_end(node, type, pivots, max);
3881 if (unlikely(ma_dead_node(node)))
3884 if (pivots[offset] >= mas->index) {
3885 max = pivots[offset];
3888 } while (++offset < end);
3890 slots = ma_slots(node, type);
3891 next = mt_slot(mas->tree, slots, offset);
3892 if (unlikely(ma_dead_node(node)))
3894 } while (!ma_is_leaf(type));
3896 return (void *) next;
3904 * mas_new_root() - Create a new root node that only contains the entry passed
3906 * @mas: The maple state
3907 * @entry: The entry to store.
3909 * Only valid when the index == 0 and the last == ULONG_MAX
3911 * Return 0 on error, 1 on success.
3913 static inline int mas_new_root(struct ma_state *mas, void *entry)
3915 struct maple_enode *root = mas_root_locked(mas);
3916 enum maple_type type = maple_leaf_64;
3917 struct maple_node *node;
3919 unsigned long *pivots;
3921 if (!entry && !mas->index && mas->last == ULONG_MAX) {
3923 mas_set_height(mas);
3924 rcu_assign_pointer(mas->tree->ma_root, entry);
3925 mas->node = MAS_START;
3929 mas_node_count(mas, 1);
3930 if (mas_is_err(mas))
3933 node = mas_pop_node(mas);
3934 pivots = ma_pivots(node, type);
3935 slots = ma_slots(node, type);
3936 node->parent = ma_parent_ptr(
3937 ((unsigned long)mas->tree | MA_ROOT_PARENT));
3938 mas->node = mt_mk_node(node, type);
3939 rcu_assign_pointer(slots[0], entry);
3940 pivots[0] = mas->last;
3942 mas_set_height(mas);
3943 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3946 if (xa_is_node(root))
3947 mte_destroy_walk(root, mas->tree);
3952 * mas_wr_spanning_store() - Create a subtree with the store operation completed
3953 * and new nodes where necessary, then place the sub-tree in the actual tree.
3954 * Note that mas is expected to point to the node which caused the store to
3956 * @wr_mas: The maple write state
3958 * Return: 0 on error, positive on success.
3960 static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
3962 struct maple_subtree_state mast;
3963 struct maple_big_node b_node;
3964 struct ma_state *mas;
3965 unsigned char height;
3967 /* Left and Right side of spanning store */
3968 MA_STATE(l_mas, NULL, 0, 0);
3969 MA_STATE(r_mas, NULL, 0, 0);
3971 MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry);
3972 MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry);
3975 * A store operation that spans multiple nodes is called a spanning
3976 * store and is handled early in the store call stack by the function
3977 * mas_is_span_wr(). When a spanning store is identified, the maple
3978 * state is duplicated. The first maple state walks the left tree path
3979 * to ``index``, the duplicate walks the right tree path to ``last``.
3980 * The data in the two nodes are combined into a single node, two nodes,
3981 * or possibly three nodes (see the 3-way split above). A ``NULL``
3982 * written to the last entry of a node is considered a spanning store as
3983 * a rebalance is required for the operation to complete and an overflow
3984 * of data may happen.
3987 trace_ma_op(__func__, mas);
3989 if (unlikely(!mas->index && mas->last == ULONG_MAX))
3990 return mas_new_root(mas, wr_mas->entry);
3992 * Node rebalancing may occur due to this store, so there may be three new
3993 * entries per level plus a new root.
3995 height = mas_mt_height(mas);
3996 mas_node_count(mas, 1 + height * 3);
3997 if (mas_is_err(mas))
4001 * Set up right side. Need to get to the next offset after the spanning
4002 * store to ensure it's not NULL and to combine both the next node and
4003 * the node with the start together.
4006 /* Avoid overflow, walk to next slot in the tree. */
4010 r_mas.index = r_mas.last;
4011 mas_wr_walk_index(&r_wr_mas);
4012 r_mas.last = r_mas.index = mas->last;
4014 /* Set up left side. */
4016 mas_wr_walk_index(&l_wr_mas);
4018 if (!wr_mas->entry) {
4019 mas_extend_spanning_null(&l_wr_mas, &r_wr_mas);
4020 mas->offset = l_mas.offset;
4021 mas->index = l_mas.index;
4022 mas->last = l_mas.last = r_mas.last;
4025 /* expanding NULLs may make this cover the entire range */
4026 if (!l_mas.index && r_mas.last == ULONG_MAX) {
4027 mas_set_range(mas, 0, ULONG_MAX);
4028 return mas_new_root(mas, wr_mas->entry);
4031 memset(&b_node, 0, sizeof(struct maple_big_node));
4032 /* Copy l_mas and store the value in b_node. */
4033 mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end);
4034 /* Copy r_mas into b_node. */
4035 if (r_mas.offset <= r_wr_mas.node_end)
4036 mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end,
4037 &b_node, b_node.b_end + 1);
4041 /* Stop spanning searches by searching for just index. */
4042 l_mas.index = l_mas.last = mas->index;
4045 mast.orig_l = &l_mas;
4046 mast.orig_r = &r_mas;
4047 /* Combine l_mas and r_mas and split them up evenly again. */
4048 return mas_spanning_rebalance(mas, &mast, height + 1);
4052 * mas_wr_node_store() - Attempt to store the value in a node
4053 * @wr_mas: The maple write state
4055 * Attempts to reuse the node, but may allocate.
4057 * Return: True if stored, false otherwise
4059 static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
4061 struct ma_state *mas = wr_mas->mas;
4062 void __rcu **dst_slots;
4063 unsigned long *dst_pivots;
4064 unsigned char dst_offset;
4065 unsigned char new_end = wr_mas->node_end;
4066 unsigned char offset;
4067 unsigned char node_slots = mt_slots[wr_mas->type];
4068 struct maple_node reuse, *newnode;
4069 unsigned char copy_size, max_piv = mt_pivots[wr_mas->type];
4070 bool in_rcu = mt_in_rcu(mas->tree);
4072 offset = mas->offset;
4073 if (mas->last == wr_mas->r_max) {
4074 /* runs right to the end of the node */
4075 if (mas->last == mas->max)
4077 /* don't copy this offset */
4078 wr_mas->offset_end++;
4079 } else if (mas->last < wr_mas->r_max) {
4080 /* new range ends in this range */
4081 if (unlikely(wr_mas->r_max == ULONG_MAX))
4082 mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
4086 if (wr_mas->end_piv == mas->last)
4087 wr_mas->offset_end++;
4089 new_end -= wr_mas->offset_end - offset - 1;
4092 /* new range starts within a range */
4093 if (wr_mas->r_min < mas->index)
4096 /* Not enough room */
4097 if (new_end >= node_slots)
4100 /* Not enough data. */
4101 if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
4102 !(mas->mas_flags & MA_STATE_BULK))
4107 mas_node_count(mas, 1);
4108 if (mas_is_err(mas))
4111 newnode = mas_pop_node(mas);
4113 memset(&reuse, 0, sizeof(struct maple_node));
4117 newnode->parent = mas_mn(mas)->parent;
4118 dst_pivots = ma_pivots(newnode, wr_mas->type);
4119 dst_slots = ma_slots(newnode, wr_mas->type);
4120 /* Copy from start to insert point */
4121 memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * (offset + 1));
4122 memcpy(dst_slots, wr_mas->slots, sizeof(void *) * (offset + 1));
4123 dst_offset = offset;
4125 /* Handle insert of new range starting after old range */
4126 if (wr_mas->r_min < mas->index) {
4128 rcu_assign_pointer(dst_slots[dst_offset], wr_mas->content);
4129 dst_pivots[dst_offset++] = mas->index - 1;
4132 /* Store the new entry and range end. */
4133 if (dst_offset < max_piv)
4134 dst_pivots[dst_offset] = mas->last;
4135 mas->offset = dst_offset;
4136 rcu_assign_pointer(dst_slots[dst_offset], wr_mas->entry);
4139 * this range wrote to the end of the node or it overwrote the rest of
4142 if (wr_mas->offset_end > wr_mas->node_end || mas->last >= mas->max) {
4143 new_end = dst_offset;
4148 /* Copy to the end of node if necessary. */
4149 copy_size = wr_mas->node_end - wr_mas->offset_end + 1;
4150 memcpy(dst_slots + dst_offset, wr_mas->slots + wr_mas->offset_end,
4151 sizeof(void *) * copy_size);
4152 if (dst_offset < max_piv) {
4153 if (copy_size > max_piv - dst_offset)
4154 copy_size = max_piv - dst_offset;
4156 memcpy(dst_pivots + dst_offset,
4157 wr_mas->pivots + wr_mas->offset_end,
4158 sizeof(unsigned long) * copy_size);
4161 if ((wr_mas->node_end == node_slots - 1) && (new_end < node_slots - 1))
4162 dst_pivots[new_end] = mas->max;
4165 mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end);
4167 mas->node = mt_mk_node(newnode, wr_mas->type);
4168 mas_replace(mas, false);
4170 memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
4172 trace_ma_write(__func__, mas, 0, wr_mas->entry);
4173 mas_update_gap(mas);
4178 * mas_wr_slot_store: Attempt to store a value in a slot.
4179 * @wr_mas: the maple write state
4181 * Return: True if stored, false otherwise
4183 static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
4185 struct ma_state *mas = wr_mas->mas;
4186 unsigned long lmax; /* Logical max. */
4187 unsigned char offset = mas->offset;
4189 if ((wr_mas->r_max > mas->last) && ((wr_mas->r_min != mas->index) ||
4190 (offset != wr_mas->node_end)))
4193 if (offset == wr_mas->node_end - 1)
4196 lmax = wr_mas->pivots[offset + 1];
4198 /* going to overwrite too many slots. */
4199 if (lmax < mas->last)
4202 if (wr_mas->r_min == mas->index) {
4203 /* overwriting two or more ranges with one. */
4204 if (lmax == mas->last)
4207 /* Overwriting all of offset and a portion of offset + 1. */
4208 rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry);
4209 wr_mas->pivots[offset] = mas->last;
4213 /* Doesn't end on the next range end. */
4214 if (lmax != mas->last)
4217 /* Overwriting a portion of offset and all of offset + 1 */
4218 if ((offset + 1 < mt_pivots[wr_mas->type]) &&
4219 (wr_mas->entry || wr_mas->pivots[offset + 1]))
4220 wr_mas->pivots[offset + 1] = mas->last;
4222 rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry);
4223 wr_mas->pivots[offset] = mas->index - 1;
4224 mas->offset++; /* Keep mas accurate. */
4227 trace_ma_write(__func__, mas, 0, wr_mas->entry);
4228 mas_update_gap(mas);
4232 static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
4234 while ((wr_mas->mas->last > wr_mas->end_piv) &&
4235 (wr_mas->offset_end < wr_mas->node_end))
4236 wr_mas->end_piv = wr_mas->pivots[++wr_mas->offset_end];
4238 if (wr_mas->mas->last > wr_mas->end_piv)
4239 wr_mas->end_piv = wr_mas->mas->max;
4242 static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
4244 struct ma_state *mas = wr_mas->mas;
4246 if (mas->last < wr_mas->end_piv && !wr_mas->slots[wr_mas->offset_end])
4247 mas->last = wr_mas->end_piv;
4249 /* Check next slot(s) if we are overwriting the end */
4250 if ((mas->last == wr_mas->end_piv) &&
4251 (wr_mas->node_end != wr_mas->offset_end) &&
4252 !wr_mas->slots[wr_mas->offset_end + 1]) {
4253 wr_mas->offset_end++;
4254 if (wr_mas->offset_end == wr_mas->node_end)
4255 mas->last = mas->max;
4257 mas->last = wr_mas->pivots[wr_mas->offset_end];
4258 wr_mas->end_piv = mas->last;
4261 if (!wr_mas->content) {
4262 /* If this one is null, the next and prev are not */
4263 mas->index = wr_mas->r_min;
4265 /* Check prev slot if we are overwriting the start */
4266 if (mas->index == wr_mas->r_min && mas->offset &&
4267 !wr_mas->slots[mas->offset - 1]) {
4269 wr_mas->r_min = mas->index =
4270 mas_safe_min(mas, wr_mas->pivots, mas->offset);
4271 wr_mas->r_max = wr_mas->pivots[mas->offset];
4276 static inline bool mas_wr_append(struct ma_wr_state *wr_mas)
4278 unsigned char end = wr_mas->node_end;
4279 unsigned char new_end = end + 1;
4280 struct ma_state *mas = wr_mas->mas;
4281 unsigned char node_pivots = mt_pivots[wr_mas->type];
4283 if ((mas->index != wr_mas->r_min) && (mas->last == wr_mas->r_max)) {
4284 if (new_end < node_pivots)
4285 wr_mas->pivots[new_end] = wr_mas->pivots[end];
4287 if (new_end < node_pivots)
4288 ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
4290 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->entry);
4291 mas->offset = new_end;
4292 wr_mas->pivots[end] = mas->index - 1;
4297 if ((mas->index == wr_mas->r_min) && (mas->last < wr_mas->r_max)) {
4298 if (new_end < node_pivots)
4299 wr_mas->pivots[new_end] = wr_mas->pivots[end];
4301 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content);
4302 if (new_end < node_pivots)
4303 ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
4305 wr_mas->pivots[end] = mas->last;
4306 rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry);
4314 * mas_wr_bnode() - Slow path for a modification.
4315 * @wr_mas: The write maple state
4317 * This is where split, rebalance end up.
4319 static void mas_wr_bnode(struct ma_wr_state *wr_mas)
4321 struct maple_big_node b_node;
4323 trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
4324 memset(&b_node, 0, sizeof(struct maple_big_node));
4325 mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
4326 mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end);
4329 static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
4331 unsigned char node_slots;
4332 unsigned char node_size;
4333 struct ma_state *mas = wr_mas->mas;
4335 /* Direct replacement */
4336 if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
4337 rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
4338 if (!!wr_mas->entry ^ !!wr_mas->content)
4339 mas_update_gap(mas);
4343 /* Attempt to append */
4344 node_slots = mt_slots[wr_mas->type];
4345 node_size = wr_mas->node_end - wr_mas->offset_end + mas->offset + 2;
4346 if (mas->max == ULONG_MAX)
4349 /* slot and node store will not fit, go to the slow path */
4350 if (unlikely(node_size >= node_slots))
4353 if (wr_mas->entry && (wr_mas->node_end < node_slots - 1) &&
4354 (mas->offset == wr_mas->node_end) && mas_wr_append(wr_mas)) {
4355 if (!wr_mas->content || !wr_mas->entry)
4356 mas_update_gap(mas);
4360 if ((wr_mas->offset_end - mas->offset <= 1) && mas_wr_slot_store(wr_mas))
4362 else if (mas_wr_node_store(wr_mas))
4365 if (mas_is_err(mas))
4369 mas_wr_bnode(wr_mas);
4373 * mas_wr_store_entry() - Internal call to store a value
4374 * @mas: The maple state
4375 * @entry: The entry to store.
4377 * Return: The contents that was stored at the index.
4379 static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
4381 struct ma_state *mas = wr_mas->mas;
4383 wr_mas->content = mas_start(mas);
4384 if (mas_is_none(mas) || mas_is_ptr(mas)) {
4385 mas_store_root(mas, wr_mas->entry);
4386 return wr_mas->content;
4389 if (unlikely(!mas_wr_walk(wr_mas))) {
4390 mas_wr_spanning_store(wr_mas);
4391 return wr_mas->content;
4394 /* At this point, we are at the leaf node that needs to be altered. */
4395 wr_mas->end_piv = wr_mas->r_max;
4396 mas_wr_end_piv(wr_mas);
4399 mas_wr_extend_null(wr_mas);
4401 /* New root for a single pointer */
4402 if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
4403 mas_new_root(mas, wr_mas->entry);
4404 return wr_mas->content;
4407 mas_wr_modify(wr_mas);
4408 return wr_mas->content;
4412 * mas_insert() - Internal call to insert a value
4413 * @mas: The maple state
4414 * @entry: The entry to store
4416 * Return: %NULL or the contents that already exists at the requested index
4417 * otherwise. The maple state needs to be checked for error conditions.
4419 static inline void *mas_insert(struct ma_state *mas, void *entry)
4421 MA_WR_STATE(wr_mas, mas, entry);
4424 * Inserting a new range inserts either 0, 1, or 2 pivots within the
4425 * tree. If the insert fits exactly into an existing gap with a value
4426 * of NULL, then the slot only needs to be written with the new value.
4427 * If the range being inserted is adjacent to another range, then only a
4428 * single pivot needs to be inserted (as well as writing the entry). If
4429 * the new range is within a gap but does not touch any other ranges,
4430 * then two pivots need to be inserted: the start - 1, and the end. As
4431 * usual, the entry must be written. Most operations require a new node
4432 * to be allocated and replace an existing node to ensure RCU safety,
4433 * when in RCU mode. The exception to requiring a newly allocated node
4434 * is when inserting at the end of a node (appending). When done
4435 * carefully, appending can reuse the node in place.
4437 wr_mas.content = mas_start(mas);
4441 if (mas_is_none(mas) || mas_is_ptr(mas)) {
4442 mas_store_root(mas, entry);
4446 /* spanning writes always overwrite something */
4447 if (!mas_wr_walk(&wr_mas))
4450 /* At this point, we are at the leaf node that needs to be altered. */
4451 wr_mas.offset_end = mas->offset;
4452 wr_mas.end_piv = wr_mas.r_max;
4454 if (wr_mas.content || (mas->last > wr_mas.r_max))
4460 mas_wr_modify(&wr_mas);
4461 return wr_mas.content;
4464 mas_set_err(mas, -EEXIST);
4465 return wr_mas.content;
4470 * mas_prev_node() - Find the prev non-null entry at the same level in the
4471 * tree. The prev value will be mas->node[mas->offset] or MAS_NONE.
4472 * @mas: The maple state
4473 * @min: The lower limit to search
4475 * The prev node value will be mas->node[mas->offset] or MAS_NONE.
4476 * Return: 1 if the node is dead, 0 otherwise.
4478 static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
4483 struct maple_node *node;
4484 struct maple_enode *enode;
4485 unsigned long *pivots;
4487 if (mas_is_none(mas))
4493 if (ma_is_root(node))
4497 if (unlikely(mas_ascend(mas)))
4499 offset = mas->offset;
4504 mt = mte_node_type(mas->node);
4506 slots = ma_slots(node, mt);
4507 pivots = ma_pivots(node, mt);
4508 if (unlikely(ma_dead_node(node)))
4511 mas->max = pivots[offset];
4513 mas->min = pivots[offset - 1] + 1;
4514 if (unlikely(ma_dead_node(node)))
4522 enode = mas_slot(mas, slots, offset);
4523 if (unlikely(ma_dead_node(node)))
4527 mt = mte_node_type(mas->node);
4529 slots = ma_slots(node, mt);
4530 pivots = ma_pivots(node, mt);
4531 offset = ma_data_end(node, mt, pivots, mas->max);
4532 if (unlikely(ma_dead_node(node)))
4536 mas->min = pivots[offset - 1] + 1;
4538 if (offset < mt_pivots[mt])
4539 mas->max = pivots[offset];
4545 mas->node = mas_slot(mas, slots, offset);
4546 if (unlikely(ma_dead_node(node)))
4549 mas->offset = mas_data_end(mas);
4550 if (unlikely(mte_dead_node(mas->node)))
4556 mas->offset = offset;
4558 mas->min = pivots[offset - 1] + 1;
4560 if (unlikely(ma_dead_node(node)))
4563 mas->node = MAS_NONE;
4568 * mas_next_node() - Get the next node at the same level in the tree.
4569 * @mas: The maple state
4570 * @max: The maximum pivot value to check.
4572 * The next value will be mas->node[mas->offset] or MAS_NONE.
4573 * Return: 1 on dead node, 0 otherwise.
4575 static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
4578 unsigned long min, pivot;
4579 unsigned long *pivots;
4580 struct maple_enode *enode;
4582 unsigned char offset;
4583 unsigned char node_end;
4587 if (mas->max >= max)
4592 if (ma_is_root(node))
4599 if (unlikely(mas_ascend(mas)))
4602 offset = mas->offset;
4605 mt = mte_node_type(mas->node);
4606 pivots = ma_pivots(node, mt);
4607 node_end = ma_data_end(node, mt, pivots, mas->max);
4608 if (unlikely(ma_dead_node(node)))
4611 } while (unlikely(offset == node_end));
4613 slots = ma_slots(node, mt);
4614 pivot = mas_safe_pivot(mas, pivots, ++offset, mt);
4615 while (unlikely(level > 1)) {
4616 /* Descend, if necessary */
4617 enode = mas_slot(mas, slots, offset);
4618 if (unlikely(ma_dead_node(node)))
4624 mt = mte_node_type(mas->node);
4625 slots = ma_slots(node, mt);
4626 pivots = ma_pivots(node, mt);
4627 if (unlikely(ma_dead_node(node)))
4634 enode = mas_slot(mas, slots, offset);
4635 if (unlikely(ma_dead_node(node)))
4644 if (unlikely(ma_dead_node(node)))
4647 mas->node = MAS_NONE;
4652 * mas_next_nentry() - Get the next node entry
4653 * @mas: The maple state
4654 * @max: The maximum value to check
4655 * @*range_start: Pointer to store the start of the range.
4657 * Sets @mas->offset to the offset of the next node entry, @mas->last to the
4658 * pivot of the entry.
4660 * Return: The next entry, %NULL otherwise
4662 static inline void *mas_next_nentry(struct ma_state *mas,
4663 struct maple_node *node, unsigned long max, enum maple_type type)
4665 unsigned char count;
4666 unsigned long pivot;
4667 unsigned long *pivots;
4671 if (mas->last == mas->max) {
4672 mas->index = mas->max;
4676 slots = ma_slots(node, type);
4677 pivots = ma_pivots(node, type);
4678 count = ma_data_end(node, type, pivots, mas->max);
4679 if (unlikely(ma_dead_node(node)))
4682 mas->index = mas_safe_min(mas, pivots, mas->offset);
4683 if (unlikely(ma_dead_node(node)))
4686 if (mas->index > max)
4689 if (mas->offset > count)
4692 while (mas->offset < count) {
4693 pivot = pivots[mas->offset];
4694 entry = mas_slot(mas, slots, mas->offset);
4695 if (ma_dead_node(node))
4704 mas->index = pivot + 1;
4708 if (mas->index > mas->max) {
4709 mas->index = mas->last;
4713 pivot = mas_safe_pivot(mas, pivots, mas->offset, type);
4714 entry = mas_slot(mas, slots, mas->offset);
4715 if (ma_dead_node(node))
4729 static inline void mas_rewalk(struct ma_state *mas, unsigned long index)
4733 mas_set(mas, index);
4734 mas_state_walk(mas);
4735 if (mas_is_start(mas))
4743 * mas_next_entry() - Internal function to get the next entry.
4744 * @mas: The maple state
4745 * @limit: The maximum range start.
4747 * Set the @mas->node to the next entry and the range_start to
4748 * the beginning value for the entry. Does not check beyond @limit.
4749 * Sets @mas->index and @mas->last to the limit if it is hit.
4750 * Restarts on dead nodes.
4752 * Return: the next entry or %NULL.
4754 static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
4757 struct maple_enode *prev_node;
4758 struct maple_node *node;
4759 unsigned char offset;
4763 if (mas->index > limit) {
4764 mas->index = mas->last = limit;
4770 offset = mas->offset;
4771 prev_node = mas->node;
4773 mt = mte_node_type(mas->node);
4775 if (unlikely(mas->offset >= mt_slots[mt])) {
4776 mas->offset = mt_slots[mt] - 1;
4780 while (!mas_is_none(mas)) {
4781 entry = mas_next_nentry(mas, node, limit, mt);
4782 if (unlikely(ma_dead_node(node))) {
4783 mas_rewalk(mas, last);
4790 if (unlikely((mas->index > limit)))
4794 prev_node = mas->node;
4795 offset = mas->offset;
4796 if (unlikely(mas_next_node(mas, node, limit))) {
4797 mas_rewalk(mas, last);
4802 mt = mte_node_type(mas->node);
4805 mas->index = mas->last = limit;
4806 mas->offset = offset;
4807 mas->node = prev_node;
4812 * mas_prev_nentry() - Get the previous node entry.
4813 * @mas: The maple state.
4814 * @limit: The lower limit to check for a value.
4816 * Return: the entry, %NULL otherwise.
4818 static inline void *mas_prev_nentry(struct ma_state *mas, unsigned long limit,
4819 unsigned long index)
4821 unsigned long pivot, min;
4822 unsigned char offset;
4823 struct maple_node *mn;
4825 unsigned long *pivots;
4834 mt = mte_node_type(mas->node);
4835 offset = mas->offset - 1;
4836 if (offset >= mt_slots[mt])
4837 offset = mt_slots[mt] - 1;
4839 slots = ma_slots(mn, mt);
4840 pivots = ma_pivots(mn, mt);
4841 if (unlikely(ma_dead_node(mn))) {
4842 mas_rewalk(mas, index);
4846 if (offset == mt_pivots[mt])
4849 pivot = pivots[offset];
4851 if (unlikely(ma_dead_node(mn))) {
4852 mas_rewalk(mas, index);
4856 while (offset && ((!mas_slot(mas, slots, offset) && pivot >= limit) ||
4858 pivot = pivots[--offset];
4860 min = mas_safe_min(mas, pivots, offset);
4861 entry = mas_slot(mas, slots, offset);
4862 if (unlikely(ma_dead_node(mn))) {
4863 mas_rewalk(mas, index);
4867 if (likely(entry)) {
4868 mas->offset = offset;
4875 static inline void *mas_prev_entry(struct ma_state *mas, unsigned long min)
4879 if (mas->index < min) {
4880 mas->index = mas->last = min;
4881 mas->node = MAS_NONE;
4885 while (likely(!mas_is_none(mas))) {
4886 entry = mas_prev_nentry(mas, min, mas->index);
4887 if (unlikely(mas->last < min))
4893 if (unlikely(mas_prev_node(mas, min))) {
4894 mas_rewalk(mas, mas->index);
4903 mas->index = mas->last = min;
4908 * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the
4909 * highest gap address of a given size in a given node and descend.
4910 * @mas: The maple state
4911 * @size: The needed size.
4913 * Return: True if found in a leaf, false otherwise.
4916 static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
4918 enum maple_type type = mte_node_type(mas->node);
4919 struct maple_node *node = mas_mn(mas);
4920 unsigned long *pivots, *gaps;
4922 unsigned long gap = 0;
4923 unsigned long max, min;
4924 unsigned char offset;
4926 if (unlikely(mas_is_err(mas)))
4929 if (ma_is_dense(type)) {
4931 mas->offset = (unsigned char)(mas->index - mas->min);
4935 pivots = ma_pivots(node, type);
4936 slots = ma_slots(node, type);
4937 gaps = ma_gaps(node, type);
4938 offset = mas->offset;
4939 min = mas_safe_min(mas, pivots, offset);
4940 /* Skip out of bounds. */
4941 while (mas->last < min)
4942 min = mas_safe_min(mas, pivots, --offset);
4944 max = mas_safe_pivot(mas, pivots, offset, type);
4945 while (mas->index <= max) {
4949 else if (!mas_slot(mas, slots, offset))
4950 gap = max - min + 1;
4953 if ((size <= gap) && (size <= mas->last - min + 1))
4957 /* Skip the next slot, it cannot be a gap. */
4962 max = pivots[offset];
4963 min = mas_safe_min(mas, pivots, offset);
4973 min = mas_safe_min(mas, pivots, offset);
4976 if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
4979 if (unlikely(ma_is_leaf(type))) {
4980 mas->offset = offset;
4982 mas->max = min + gap - 1;
4986 /* descend, only happens under lock. */
4987 mas->node = mas_slot(mas, slots, offset);
4990 mas->offset = mas_data_end(mas);
4994 if (!mte_is_root(mas->node))
4998 mas_set_err(mas, -EBUSY);
5002 static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
5004 enum maple_type type = mte_node_type(mas->node);
5005 unsigned long pivot, min, gap = 0;
5006 unsigned char offset;
5007 unsigned long *gaps;
5008 unsigned long *pivots = ma_pivots(mas_mn(mas), type);
5009 void __rcu **slots = ma_slots(mas_mn(mas), type);
5012 if (ma_is_dense(type)) {
5013 mas->offset = (unsigned char)(mas->index - mas->min);
5017 gaps = ma_gaps(mte_to_node(mas->node), type);
5018 offset = mas->offset;
5019 min = mas_safe_min(mas, pivots, offset);
5020 for (; offset < mt_slots[type]; offset++) {
5021 pivot = mas_safe_pivot(mas, pivots, offset, type);
5022 if (offset && !pivot)
5025 /* Not within lower bounds */
5026 if (mas->index > pivot)
5031 else if (!mas_slot(mas, slots, offset))
5032 gap = min(pivot, mas->last) - max(mas->index, min) + 1;
5037 if (ma_is_leaf(type)) {
5041 if (mas->index <= pivot) {
5042 mas->node = mas_slot(mas, slots, offset);
5051 if (mas->last <= pivot) {
5052 mas_set_err(mas, -EBUSY);
5057 if (mte_is_root(mas->node))
5060 mas->offset = offset;
5065 * mas_walk() - Search for @mas->index in the tree.
5066 * @mas: The maple state.
5068 * mas->index and mas->last will be set to the range if there is a value. If
5069 * mas->node is MAS_NONE, reset to MAS_START.
5071 * Return: the entry at the location or %NULL.
5073 void *mas_walk(struct ma_state *mas)
5078 entry = mas_state_walk(mas);
5079 if (mas_is_start(mas))
5082 if (mas_is_ptr(mas)) {
5087 mas->last = ULONG_MAX;
5092 if (mas_is_none(mas)) {
5094 mas->last = ULONG_MAX;
5099 EXPORT_SYMBOL_GPL(mas_walk);
5101 static inline bool mas_rewind_node(struct ma_state *mas)
5106 if (mte_is_root(mas->node)) {
5116 mas->offset = --slot;
5121 * mas_skip_node() - Internal function. Skip over a node.
5122 * @mas: The maple state.
5124 * Return: true if there is another node, false otherwise.
5126 static inline bool mas_skip_node(struct ma_state *mas)
5128 if (mas_is_err(mas))
5132 if (mte_is_root(mas->node)) {
5133 if (mas->offset >= mas_data_end(mas)) {
5134 mas_set_err(mas, -EBUSY);
5140 } while (mas->offset >= mas_data_end(mas));
5147 * mas_awalk() - Allocation walk. Search from low address to high, for a gap of
5149 * @mas: The maple state
5150 * @size: The size of the gap required
5152 * Search between @mas->index and @mas->last for a gap of @size.
5154 static inline void mas_awalk(struct ma_state *mas, unsigned long size)
5156 struct maple_enode *last = NULL;
5159 * There are 4 options:
5160 * go to child (descend)
5161 * go back to parent (ascend)
5162 * no gap found. (return, slot == MAPLE_NODE_SLOTS)
5163 * found the gap. (return, slot != MAPLE_NODE_SLOTS)
5165 while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) {
5166 if (last == mas->node)
5174 * mas_fill_gap() - Fill a located gap with @entry.
5175 * @mas: The maple state
5176 * @entry: The value to store
5177 * @slot: The offset into the node to store the @entry
5178 * @size: The size of the entry
5179 * @index: The start location
5181 static inline void mas_fill_gap(struct ma_state *mas, void *entry,
5182 unsigned char slot, unsigned long size, unsigned long *index)
5184 MA_WR_STATE(wr_mas, mas, entry);
5185 unsigned char pslot = mte_parent_slot(mas->node);
5186 struct maple_enode *mn = mas->node;
5187 unsigned long *pivots;
5188 enum maple_type ptype;
5190 * mas->index is the start address for the search
5191 * which may no longer be needed.
5192 * mas->last is the end address for the search
5195 *index = mas->index;
5196 mas->last = mas->index + size - 1;
5199 * It is possible that using mas->max and mas->min to correctly
5200 * calculate the index and last will cause an issue in the gap
5201 * calculation, so fix the ma_state here
5204 ptype = mte_node_type(mas->node);
5205 pivots = ma_pivots(mas_mn(mas), ptype);
5206 mas->max = mas_safe_pivot(mas, pivots, pslot, ptype);
5207 mas->min = mas_safe_min(mas, pivots, pslot);
5210 mas_wr_store_entry(&wr_mas);
5214 * mas_sparse_area() - Internal function. Return upper or lower limit when
5215 * searching for a gap in an empty tree.
5216 * @mas: The maple state
5217 * @min: the minimum range
5218 * @max: The maximum range
5219 * @size: The size of the gap
5220 * @fwd: Searching forward or back
5222 static inline void mas_sparse_area(struct ma_state *mas, unsigned long min,
5223 unsigned long max, unsigned long size, bool fwd)
5225 unsigned long start = 0;
5227 if (!unlikely(mas_is_none(mas)))
5236 mas->last = start + size - 1;
5244 * mas_empty_area() - Get the lowest address within the range that is
5245 * sufficient for the size requested.
5246 * @mas: The maple state
5247 * @min: The lowest value of the range
5248 * @max: The highest value of the range
5249 * @size: The size needed
5251 int mas_empty_area(struct ma_state *mas, unsigned long min,
5252 unsigned long max, unsigned long size)
5254 unsigned char offset;
5255 unsigned long *pivots;
5258 if (mas_is_start(mas))
5260 else if (mas->offset >= 2)
5262 else if (!mas_skip_node(mas))
5266 if (mas_is_none(mas) || mas_is_ptr(mas)) {
5267 mas_sparse_area(mas, min, max, size, true);
5271 /* The start of the window can only be within these values */
5274 mas_awalk(mas, size);
5276 if (unlikely(mas_is_err(mas)))
5277 return xa_err(mas->node);
5279 offset = mas->offset;
5280 if (unlikely(offset == MAPLE_NODE_SLOTS))
5283 mt = mte_node_type(mas->node);
5284 pivots = ma_pivots(mas_mn(mas), mt);
5286 mas->min = pivots[offset - 1] + 1;
5288 if (offset < mt_pivots[mt])
5289 mas->max = pivots[offset];
5291 if (mas->index < mas->min)
5292 mas->index = mas->min;
5294 mas->last = mas->index + size - 1;
5297 EXPORT_SYMBOL_GPL(mas_empty_area);
5300 * mas_empty_area_rev() - Get the highest address within the range that is
5301 * sufficient for the size requested.
5302 * @mas: The maple state
5303 * @min: The lowest value of the range
5304 * @max: The highest value of the range
5305 * @size: The size needed
5307 int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
5308 unsigned long max, unsigned long size)
5310 struct maple_enode *last = mas->node;
5312 if (mas_is_start(mas)) {
5314 mas->offset = mas_data_end(mas);
5315 } else if (mas->offset >= 2) {
5317 } else if (!mas_rewind_node(mas)) {
5322 if (mas_is_none(mas) || mas_is_ptr(mas)) {
5323 mas_sparse_area(mas, min, max, size, false);
5327 /* The start of the window can only be within these values. */
5331 while (!mas_rev_awalk(mas, size)) {
5332 if (last == mas->node) {
5333 if (!mas_rewind_node(mas))
5340 if (mas_is_err(mas))
5341 return xa_err(mas->node);
5343 if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
5347 * mas_rev_awalk() has set mas->min and mas->max to the gap values. If
5348 * the maximum is outside the window we are searching, then use the last
5349 * location in the search.
5350 * mas->max and mas->min is the range of the gap.
5351 * mas->index and mas->last are currently set to the search range.
5354 /* Trim the upper limit to the max. */
5355 if (mas->max <= mas->last)
5356 mas->last = mas->max;
5358 mas->index = mas->last - size + 1;
5361 EXPORT_SYMBOL_GPL(mas_empty_area_rev);
5363 static inline int mas_alloc(struct ma_state *mas, void *entry,
5364 unsigned long size, unsigned long *index)
5369 if (mas_is_none(mas) || mas_is_ptr(mas)) {
5370 mas_root_expand(mas, entry);
5371 if (mas_is_err(mas))
5372 return xa_err(mas->node);
5375 return mte_pivot(mas->node, 0);
5376 return mte_pivot(mas->node, 1);
5379 /* Must be walking a tree. */
5380 mas_awalk(mas, size);
5381 if (mas_is_err(mas))
5382 return xa_err(mas->node);
5384 if (mas->offset == MAPLE_NODE_SLOTS)
5388 * At this point, mas->node points to the right node and we have an
5389 * offset that has a sufficient gap.
5393 min = mte_pivot(mas->node, mas->offset - 1) + 1;
5395 if (mas->index < min)
5398 mas_fill_gap(mas, entry, mas->offset, size, index);
5405 static inline int mas_rev_alloc(struct ma_state *mas, unsigned long min,
5406 unsigned long max, void *entry,
5407 unsigned long size, unsigned long *index)
5411 ret = mas_empty_area_rev(mas, min, max, size);
5415 if (mas_is_err(mas))
5416 return xa_err(mas->node);
5418 if (mas->offset == MAPLE_NODE_SLOTS)
5421 mas_fill_gap(mas, entry, mas->offset, size, index);
5429 * mas_dead_leaves() - Mark all leaves of a node as dead.
5430 * @mas: The maple state
5431 * @slots: Pointer to the slot array
5433 * Must hold the write lock.
5435 * Return: The number of leaves marked as dead.
5438 unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots)
5440 struct maple_node *node;
5441 enum maple_type type;
5445 for (offset = 0; offset < mt_slot_count(mas->node); offset++) {
5446 entry = mas_slot_locked(mas, slots, offset);
5447 type = mte_node_type(entry);
5448 node = mte_to_node(entry);
5449 /* Use both node and type to catch LE & BE metadata */
5453 mte_set_node_dead(entry);
5454 smp_wmb(); /* Needed for RCU */
5456 rcu_assign_pointer(slots[offset], node);
5462 static void __rcu **mas_dead_walk(struct ma_state *mas, unsigned char offset)
5464 struct maple_node *node, *next;
5465 void __rcu **slots = NULL;
5469 mas->node = ma_enode_ptr(next);
5471 slots = ma_slots(node, node->type);
5472 next = mas_slot_locked(mas, slots, offset);
5474 } while (!ma_is_leaf(next->type));
5479 static void mt_free_walk(struct rcu_head *head)
5482 struct maple_node *node, *start;
5483 struct maple_tree mt;
5484 unsigned char offset;
5485 enum maple_type type;
5486 MA_STATE(mas, &mt, 0, 0);
5488 node = container_of(head, struct maple_node, rcu);
5490 if (ma_is_leaf(node->type))
5493 mt_init_flags(&mt, node->ma_flags);
5496 mas.node = mt_mk_node(node, node->type);
5497 slots = mas_dead_walk(&mas, 0);
5498 node = mas_mn(&mas);
5500 mt_free_bulk(node->slot_len, slots);
5501 offset = node->parent_slot + 1;
5502 mas.node = node->piv_parent;
5503 if (mas_mn(&mas) == node)
5504 goto start_slots_free;
5506 type = mte_node_type(mas.node);
5507 slots = ma_slots(mte_to_node(mas.node), type);
5508 if ((offset < mt_slots[type]) && (slots[offset]))
5509 slots = mas_dead_walk(&mas, offset);
5511 node = mas_mn(&mas);
5512 } while ((node != start) || (node->slot_len < offset));
5514 slots = ma_slots(node, node->type);
5515 mt_free_bulk(node->slot_len, slots);
5520 mt_free_rcu(&node->rcu);
5523 static inline void __rcu **mas_destroy_descend(struct ma_state *mas,
5524 struct maple_enode *prev, unsigned char offset)
5526 struct maple_node *node;
5527 struct maple_enode *next = mas->node;
5528 void __rcu **slots = NULL;
5533 slots = ma_slots(node, mte_node_type(mas->node));
5534 next = mas_slot_locked(mas, slots, 0);
5535 if ((mte_dead_node(next)))
5536 next = mas_slot_locked(mas, slots, 1);
5538 mte_set_node_dead(mas->node);
5539 node->type = mte_node_type(mas->node);
5540 node->piv_parent = prev;
5541 node->parent_slot = offset;
5544 } while (!mte_is_leaf(next));
5549 static void mt_destroy_walk(struct maple_enode *enode, unsigned char ma_flags,
5553 struct maple_node *node = mte_to_node(enode);
5554 struct maple_enode *start;
5555 struct maple_tree mt;
5557 MA_STATE(mas, &mt, 0, 0);
5559 if (mte_is_leaf(enode))
5562 mt_init_flags(&mt, ma_flags);
5565 mas.node = start = enode;
5566 slots = mas_destroy_descend(&mas, start, 0);
5567 node = mas_mn(&mas);
5569 enum maple_type type;
5570 unsigned char offset;
5571 struct maple_enode *parent, *tmp;
5573 node->slot_len = mas_dead_leaves(&mas, slots);
5575 mt_free_bulk(node->slot_len, slots);
5576 offset = node->parent_slot + 1;
5577 mas.node = node->piv_parent;
5578 if (mas_mn(&mas) == node)
5579 goto start_slots_free;
5581 type = mte_node_type(mas.node);
5582 slots = ma_slots(mte_to_node(mas.node), type);
5583 if (offset >= mt_slots[type])
5586 tmp = mas_slot_locked(&mas, slots, offset);
5587 if (mte_node_type(tmp) && mte_to_node(tmp)) {
5590 slots = mas_destroy_descend(&mas, parent, offset);
5593 node = mas_mn(&mas);
5594 } while (start != mas.node);
5596 node = mas_mn(&mas);
5597 node->slot_len = mas_dead_leaves(&mas, slots);
5599 mt_free_bulk(node->slot_len, slots);
5606 mt_free_rcu(&node->rcu);
5610 * mte_destroy_walk() - Free a tree or sub-tree.
5611 * @enode - the encoded maple node (maple_enode) to start
5612 * @mn - the tree to free - needed for node types.
5614 * Must hold the write lock.
5616 static inline void mte_destroy_walk(struct maple_enode *enode,
5617 struct maple_tree *mt)
5619 struct maple_node *node = mte_to_node(enode);
5621 if (mt_in_rcu(mt)) {
5622 mt_destroy_walk(enode, mt->ma_flags, false);
5623 call_rcu(&node->rcu, mt_free_walk);
5625 mt_destroy_walk(enode, mt->ma_flags, true);
5629 static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
5631 if (unlikely(mas_is_paused(wr_mas->mas)))
5632 mas_reset(wr_mas->mas);
5634 if (!mas_is_start(wr_mas->mas)) {
5635 if (mas_is_none(wr_mas->mas)) {
5636 mas_reset(wr_mas->mas);
5638 wr_mas->r_max = wr_mas->mas->max;
5639 wr_mas->type = mte_node_type(wr_mas->mas->node);
5640 if (mas_is_span_wr(wr_mas))
5641 mas_reset(wr_mas->mas);
5650 * mas_store() - Store an @entry.
5651 * @mas: The maple state.
5652 * @entry: The entry to store.
5654 * The @mas->index and @mas->last is used to set the range for the @entry.
5655 * Note: The @mas should have pre-allocated entries to ensure there is memory to
5656 * store the entry. Please see mas_expected_entries()/mas_destroy() for more details.
5658 * Return: the first entry between mas->index and mas->last or %NULL.
5660 void *mas_store(struct ma_state *mas, void *entry)
5662 MA_WR_STATE(wr_mas, mas, entry);
5664 trace_ma_write(__func__, mas, 0, entry);
5665 #ifdef CONFIG_DEBUG_MAPLE_TREE
5666 if (mas->index > mas->last)
5667 pr_err("Error %lu > %lu %p\n", mas->index, mas->last, entry);
5668 MT_BUG_ON(mas->tree, mas->index > mas->last);
5669 if (mas->index > mas->last) {
5670 mas_set_err(mas, -EINVAL);
5677 * Storing is the same operation as insert with the added caveat that it
5678 * can overwrite entries. Although this seems simple enough, one may
5679 * want to examine what happens if a single store operation was to
5680 * overwrite multiple entries within a self-balancing B-Tree.
5682 mas_wr_store_setup(&wr_mas);
5683 mas_wr_store_entry(&wr_mas);
5684 return wr_mas.content;
5686 EXPORT_SYMBOL_GPL(mas_store);
5689 * mas_store_gfp() - Store a value into the tree.
5690 * @mas: The maple state
5691 * @entry: The entry to store
5692 * @gfp: The GFP_FLAGS to use for allocations if necessary.
5694 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
5697 int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
5699 MA_WR_STATE(wr_mas, mas, entry);
5701 mas_wr_store_setup(&wr_mas);
5702 trace_ma_write(__func__, mas, 0, entry);
5704 mas_wr_store_entry(&wr_mas);
5705 if (unlikely(mas_nomem(mas, gfp)))
5708 if (unlikely(mas_is_err(mas)))
5709 return xa_err(mas->node);
5713 EXPORT_SYMBOL_GPL(mas_store_gfp);
5716 * mas_store_prealloc() - Store a value into the tree using memory
5717 * preallocated in the maple state.
5718 * @mas: The maple state
5719 * @entry: The entry to store.
5721 void mas_store_prealloc(struct ma_state *mas, void *entry)
5723 MA_WR_STATE(wr_mas, mas, entry);
5725 mas_wr_store_setup(&wr_mas);
5726 trace_ma_write(__func__, mas, 0, entry);
5727 mas_wr_store_entry(&wr_mas);
5728 BUG_ON(mas_is_err(mas));
5731 EXPORT_SYMBOL_GPL(mas_store_prealloc);
5734 * mas_preallocate() - Preallocate enough nodes for a store operation
5735 * @mas: The maple state
5736 * @entry: The entry that will be stored
5737 * @gfp: The GFP_FLAGS to use for allocations.
5739 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5741 int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
5745 mas_node_count_gfp(mas, 1 + mas_mt_height(mas) * 3, gfp);
5746 mas->mas_flags |= MA_STATE_PREALLOC;
5747 if (likely(!mas_is_err(mas)))
5750 mas_set_alloc_req(mas, 0);
5751 ret = xa_err(mas->node);
5759 * mas_destroy() - destroy a maple state.
5760 * @mas: The maple state
5762 * Upon completion, check the left-most node and rebalance against the node to
5763 * the right if necessary. Frees any allocated nodes associated with this maple
5766 void mas_destroy(struct ma_state *mas)
5768 struct maple_alloc *node;
5769 unsigned long total;
5772 * When using mas_for_each() to insert an expected number of elements,
5773 * it is possible that the number inserted is less than the expected
5774 * number. To fix an invalid final node, a check is performed here to
5775 * rebalance the previous node with the final node.
5777 if (mas->mas_flags & MA_STATE_REBALANCE) {
5780 if (mas_is_start(mas))
5783 mtree_range_walk(mas);
5784 end = mas_data_end(mas) + 1;
5785 if (end < mt_min_slot_count(mas->node) - 1)
5786 mas_destroy_rebalance(mas, end);
5788 mas->mas_flags &= ~MA_STATE_REBALANCE;
5790 mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
5792 total = mas_allocated(mas);
5795 mas->alloc = node->slot[0];
5796 if (node->node_count > 1) {
5797 size_t count = node->node_count - 1;
5799 mt_free_bulk(count, (void __rcu **)&node->slot[1]);
5802 kmem_cache_free(maple_node_cache, node);
5808 EXPORT_SYMBOL_GPL(mas_destroy);
5811 * mas_expected_entries() - Set the expected number of entries that will be inserted.
5812 * @mas: The maple state
5813 * @nr_entries: The number of expected entries.
5815 * This will attempt to pre-allocate enough nodes to store the expected number
5816 * of entries. The allocations will occur using the bulk allocator interface
5817 * for speed. Please call mas_destroy() on the @mas after inserting the entries
5818 * to ensure any unused nodes are freed.
5820 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5822 int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
5824 int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
5825 struct maple_enode *enode = mas->node;
5830 * Sometimes it is necessary to duplicate a tree to a new tree, such as
5831 * forking a process and duplicating the VMAs from one tree to a new
5832 * tree. When such a situation arises, it is known that the new tree is
5833 * not going to be used until the entire tree is populated. For
5834 * performance reasons, it is best to use a bulk load with RCU disabled.
5835 * This allows for optimistic splitting that favours the left and reuse
5836 * of nodes during the operation.
5839 /* Optimize splitting for bulk insert in-order */
5840 mas->mas_flags |= MA_STATE_BULK;
5843 * Avoid overflow, assume a gap between each entry and a trailing null.
5844 * If this is wrong, it just means allocation can happen during
5845 * insertion of entries.
5847 nr_nodes = max(nr_entries, nr_entries * 2 + 1);
5848 if (!mt_is_alloc(mas->tree))
5849 nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
5851 /* Leaves; reduce slots to keep space for expansion */
5852 nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2);
5853 /* Internal nodes */
5854 nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
5855 /* Add working room for split (2 nodes) + new parents */
5856 mas_node_count(mas, nr_nodes + 3);
5858 /* Detect if allocations run out */
5859 mas->mas_flags |= MA_STATE_PREALLOC;
5861 if (!mas_is_err(mas))
5864 ret = xa_err(mas->node);
5870 EXPORT_SYMBOL_GPL(mas_expected_entries);
5873 * mas_next() - Get the next entry.
5874 * @mas: The maple state
5875 * @max: The maximum index to check.
5877 * Returns the next entry after @mas->index.
5878 * Must hold rcu_read_lock or the write lock.
5879 * Can return the zero entry.
5881 * Return: The next entry or %NULL
5883 void *mas_next(struct ma_state *mas, unsigned long max)
5885 if (mas_is_none(mas) || mas_is_paused(mas))
5886 mas->node = MAS_START;
5888 if (mas_is_start(mas))
5889 mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
5891 if (mas_is_ptr(mas)) {
5894 mas->last = ULONG_MAX;
5899 if (mas->last == ULONG_MAX)
5902 /* Retries on dead nodes handled by mas_next_entry */
5903 return mas_next_entry(mas, max);
5905 EXPORT_SYMBOL_GPL(mas_next);
5908 * mt_next() - get the next value in the maple tree
5909 * @mt: The maple tree
5910 * @index: The start index
5911 * @max: The maximum index to check
5913 * Return: The entry at @index or higher, or %NULL if nothing is found.
5915 void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
5918 MA_STATE(mas, mt, index, index);
5921 entry = mas_next(&mas, max);
5925 EXPORT_SYMBOL_GPL(mt_next);
5928 * mas_prev() - Get the previous entry
5929 * @mas: The maple state
5930 * @min: The minimum value to check.
5932 * Must hold rcu_read_lock or the write lock.
5933 * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
5936 * Return: the previous value or %NULL.
5938 void *mas_prev(struct ma_state *mas, unsigned long min)
5941 /* Nothing comes before 0 */
5943 mas->node = MAS_NONE;
5947 if (unlikely(mas_is_ptr(mas)))
5950 if (mas_is_none(mas) || mas_is_paused(mas))
5951 mas->node = MAS_START;
5953 if (mas_is_start(mas)) {
5959 if (mas_is_ptr(mas)) {
5965 mas->index = mas->last = 0;
5966 return mas_root_locked(mas);
5968 return mas_prev_entry(mas, min);
5970 EXPORT_SYMBOL_GPL(mas_prev);
5973 * mt_prev() - get the previous value in the maple tree
5974 * @mt: The maple tree
5975 * @index: The start index
5976 * @min: The minimum index to check
5978 * Return: The entry at @index or lower, or %NULL if nothing is found.
5980 void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min)
5983 MA_STATE(mas, mt, index, index);
5986 entry = mas_prev(&mas, min);
5990 EXPORT_SYMBOL_GPL(mt_prev);
5993 * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
5994 * @mas: The maple state to pause
5996 * Some users need to pause a walk and drop the lock they're holding in
5997 * order to yield to a higher priority thread or carry out an operation
5998 * on an entry. Those users should call this function before they drop
5999 * the lock. It resets the @mas to be suitable for the next iteration
6000 * of the loop after the user has reacquired the lock. If most entries
6001 * found during a walk require you to call mas_pause(), the mt_for_each()
6002 * iterator may be more appropriate.
6005 void mas_pause(struct ma_state *mas)
6007 mas->node = MAS_PAUSE;
6009 EXPORT_SYMBOL_GPL(mas_pause);
6012 * mas_find() - On the first call, find the entry at or after mas->index up to
6013 * %max. Otherwise, find the entry after mas->index.
6014 * @mas: The maple state
6015 * @max: The maximum value to check.
6017 * Must hold rcu_read_lock or the write lock.
6018 * If an entry exists, last and index are updated accordingly.
6019 * May set @mas->node to MAS_NONE.
6021 * Return: The entry or %NULL.
6023 void *mas_find(struct ma_state *mas, unsigned long max)
6025 if (unlikely(mas_is_paused(mas))) {
6026 if (unlikely(mas->last == ULONG_MAX)) {
6027 mas->node = MAS_NONE;
6030 mas->node = MAS_START;
6031 mas->index = ++mas->last;
6034 if (unlikely(mas_is_none(mas)))
6035 mas->node = MAS_START;
6037 if (unlikely(mas_is_start(mas))) {
6038 /* First run or continue */
6041 if (mas->index > max)
6044 entry = mas_walk(mas);
6049 if (unlikely(!mas_searchable(mas)))
6052 /* Retries on dead nodes handled by mas_next_entry */
6053 return mas_next_entry(mas, max);
6055 EXPORT_SYMBOL_GPL(mas_find);
6058 * mas_find_rev: On the first call, find the first non-null entry at or below
6059 * mas->index down to %min. Otherwise find the first non-null entry below
6060 * mas->index down to %min.
6061 * @mas: The maple state
6062 * @min: The minimum value to check.
6064 * Must hold rcu_read_lock or the write lock.
6065 * If an entry exists, last and index are updated accordingly.
6066 * May set @mas->node to MAS_NONE.
6068 * Return: The entry or %NULL.
6070 void *mas_find_rev(struct ma_state *mas, unsigned long min)
6072 if (unlikely(mas_is_paused(mas))) {
6073 if (unlikely(mas->last == ULONG_MAX)) {
6074 mas->node = MAS_NONE;
6077 mas->node = MAS_START;
6078 mas->last = --mas->index;
6081 if (unlikely(mas_is_start(mas))) {
6082 /* First run or continue */
6085 if (mas->index < min)
6088 entry = mas_walk(mas);
6093 if (unlikely(!mas_searchable(mas)))
6096 if (mas->index < min)
6099 /* Retries on dead nodes handled by mas_next_entry */
6100 return mas_prev_entry(mas, min);
6102 EXPORT_SYMBOL_GPL(mas_find_rev);
6105 * mas_erase() - Find the range in which index resides and erase the entire
6107 * @mas: The maple state
6109 * Must hold the write lock.
6110 * Searches for @mas->index, sets @mas->index and @mas->last to the range and
6111 * erases that range.
6113 * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
6115 void *mas_erase(struct ma_state *mas)
6118 MA_WR_STATE(wr_mas, mas, NULL);
6120 if (mas_is_none(mas) || mas_is_paused(mas))
6121 mas->node = MAS_START;
6123 /* Retry unnecessary when holding the write lock. */
6124 entry = mas_state_walk(mas);
6129 /* Must reset to ensure spanning writes of last slot are detected */
6131 mas_wr_store_setup(&wr_mas);
6132 mas_wr_store_entry(&wr_mas);
6133 if (mas_nomem(mas, GFP_KERNEL))
6138 EXPORT_SYMBOL_GPL(mas_erase);
6141 * mas_nomem() - Check if there was an error allocating and do the allocation
6142 * if necessary If there are allocations, then free them.
6143 * @mas: The maple state
6144 * @gfp: The GFP_FLAGS to use for allocations
6145 * Return: true on allocation, false otherwise.
6147 bool mas_nomem(struct ma_state *mas, gfp_t gfp)
6148 __must_hold(mas->tree->lock)
6150 if (likely(mas->node != MA_ERROR(-ENOMEM))) {
6155 if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
6156 mtree_unlock(mas->tree);
6157 mas_alloc_nodes(mas, gfp);
6158 mtree_lock(mas->tree);
6160 mas_alloc_nodes(mas, gfp);
6163 if (!mas_allocated(mas))
6166 mas->node = MAS_START;
6170 void __init maple_tree_init(void)
6172 maple_node_cache = kmem_cache_create("maple_node",
6173 sizeof(struct maple_node), sizeof(struct maple_node),
6178 * mtree_load() - Load a value stored in a maple tree
6179 * @mt: The maple tree
6180 * @index: The index to load
6182 * Return: the entry or %NULL
6184 void *mtree_load(struct maple_tree *mt, unsigned long index)
6186 MA_STATE(mas, mt, index, index);
6189 trace_ma_read(__func__, &mas);
6192 entry = mas_start(&mas);
6193 if (unlikely(mas_is_none(&mas)))
6196 if (unlikely(mas_is_ptr(&mas))) {
6203 entry = mtree_lookup_walk(&mas);
6204 if (!entry && unlikely(mas_is_start(&mas)))
6208 if (xa_is_zero(entry))
6213 EXPORT_SYMBOL(mtree_load);
6216 * mtree_store_range() - Store an entry at a given range.
6217 * @mt: The maple tree
6218 * @index: The start of the range
6219 * @last: The end of the range
6220 * @entry: The entry to store
6221 * @gfp: The GFP_FLAGS to use for allocations
6223 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6226 int mtree_store_range(struct maple_tree *mt, unsigned long index,
6227 unsigned long last, void *entry, gfp_t gfp)
6229 MA_STATE(mas, mt, index, last);
6230 MA_WR_STATE(wr_mas, &mas, entry);
6232 trace_ma_write(__func__, &mas, 0, entry);
6233 if (WARN_ON_ONCE(xa_is_advanced(entry)))
6241 mas_wr_store_entry(&wr_mas);
6242 if (mas_nomem(&mas, gfp))
6246 if (mas_is_err(&mas))
6247 return xa_err(mas.node);
6251 EXPORT_SYMBOL(mtree_store_range);
6254 * mtree_store() - Store an entry at a given index.
6255 * @mt: The maple tree
6256 * @index: The index to store the value
6257 * @entry: The entry to store
6258 * @gfp: The GFP_FLAGS to use for allocations
6260 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6263 int mtree_store(struct maple_tree *mt, unsigned long index, void *entry,
6266 return mtree_store_range(mt, index, index, entry, gfp);
6268 EXPORT_SYMBOL(mtree_store);
6271 * mtree_insert_range() - Insert an entry at a give range if there is no value.
6272 * @mt: The maple tree
6273 * @first: The start of the range
6274 * @last: The end of the range
6275 * @entry: The entry to store
6276 * @gfp: The GFP_FLAGS to use for allocations.
6278 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6279 * request, -ENOMEM if memory could not be allocated.
6281 int mtree_insert_range(struct maple_tree *mt, unsigned long first,
6282 unsigned long last, void *entry, gfp_t gfp)
6284 MA_STATE(ms, mt, first, last);
6286 if (WARN_ON_ONCE(xa_is_advanced(entry)))
6294 mas_insert(&ms, entry);
6295 if (mas_nomem(&ms, gfp))
6299 if (mas_is_err(&ms))
6300 return xa_err(ms.node);
6304 EXPORT_SYMBOL(mtree_insert_range);
6307 * mtree_insert() - Insert an entry at a give index if there is no value.
6308 * @mt: The maple tree
6309 * @index : The index to store the value
6310 * @entry: The entry to store
6311 * @gfp: The FGP_FLAGS to use for allocations.
6313 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6314 * request, -ENOMEM if memory could not be allocated.
6316 int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry,
6319 return mtree_insert_range(mt, index, index, entry, gfp);
6321 EXPORT_SYMBOL(mtree_insert);
6323 int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
6324 void *entry, unsigned long size, unsigned long min,
6325 unsigned long max, gfp_t gfp)
6329 MA_STATE(mas, mt, min, max - size);
6330 if (!mt_is_alloc(mt))
6333 if (WARN_ON_ONCE(mt_is_reserved(entry)))
6349 mas.last = max - size;
6350 ret = mas_alloc(&mas, entry, size, startp);
6351 if (mas_nomem(&mas, gfp))
6357 EXPORT_SYMBOL(mtree_alloc_range);
6359 int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
6360 void *entry, unsigned long size, unsigned long min,
6361 unsigned long max, gfp_t gfp)
6365 MA_STATE(mas, mt, min, max - size);
6366 if (!mt_is_alloc(mt))
6369 if (WARN_ON_ONCE(mt_is_reserved(entry)))
6383 ret = mas_rev_alloc(&mas, min, max, entry, size, startp);
6384 if (mas_nomem(&mas, gfp))
6390 EXPORT_SYMBOL(mtree_alloc_rrange);
6393 * mtree_erase() - Find an index and erase the entire range.
6394 * @mt: The maple tree
6395 * @index: The index to erase
6397 * Erasing is the same as a walk to an entry then a store of a NULL to that
6398 * ENTIRE range. In fact, it is implemented as such using the advanced API.
6400 * Return: The entry stored at the @index or %NULL
6402 void *mtree_erase(struct maple_tree *mt, unsigned long index)
6406 MA_STATE(mas, mt, index, index);
6407 trace_ma_op(__func__, &mas);
6410 entry = mas_erase(&mas);
6415 EXPORT_SYMBOL(mtree_erase);
6418 * __mt_destroy() - Walk and free all nodes of a locked maple tree.
6419 * @mt: The maple tree
6421 * Note: Does not handle locking.
6423 void __mt_destroy(struct maple_tree *mt)
6425 void *root = mt_root_locked(mt);
6427 rcu_assign_pointer(mt->ma_root, NULL);
6428 if (xa_is_node(root))
6429 mte_destroy_walk(root, mt);
6433 EXPORT_SYMBOL_GPL(__mt_destroy);
6436 * mtree_destroy() - Destroy a maple tree
6437 * @mt: The maple tree
6439 * Frees all resources used by the tree. Handles locking.
6441 void mtree_destroy(struct maple_tree *mt)
6447 EXPORT_SYMBOL(mtree_destroy);
6450 * mt_find() - Search from the start up until an entry is found.
6451 * @mt: The maple tree
6452 * @index: Pointer which contains the start location of the search
6453 * @max: The maximum value to check
6455 * Handles locking. @index will be incremented to one beyond the range.
6457 * Return: The entry at or after the @index or %NULL
6459 void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
6461 MA_STATE(mas, mt, *index, *index);
6463 #ifdef CONFIG_DEBUG_MAPLE_TREE
6464 unsigned long copy = *index;
6467 trace_ma_read(__func__, &mas);
6474 entry = mas_state_walk(&mas);
6475 if (mas_is_start(&mas))
6478 if (unlikely(xa_is_zero(entry)))
6484 while (mas_searchable(&mas) && (mas.index < max)) {
6485 entry = mas_next_entry(&mas, max);
6486 if (likely(entry && !xa_is_zero(entry)))
6490 if (unlikely(xa_is_zero(entry)))
6494 if (likely(entry)) {
6495 *index = mas.last + 1;
6496 #ifdef CONFIG_DEBUG_MAPLE_TREE
6497 if ((*index) && (*index) <= copy)
6498 pr_err("index not increased! %lx <= %lx\n",
6500 MT_BUG_ON(mt, (*index) && ((*index) <= copy));
6506 EXPORT_SYMBOL(mt_find);
6509 * mt_find_after() - Search from the start up until an entry is found.
6510 * @mt: The maple tree
6511 * @index: Pointer which contains the start location of the search
6512 * @max: The maximum value to check
6514 * Handles locking, detects wrapping on index == 0
6516 * Return: The entry at or after the @index or %NULL
6518 void *mt_find_after(struct maple_tree *mt, unsigned long *index,
6524 return mt_find(mt, index, max);
6526 EXPORT_SYMBOL(mt_find_after);
6528 #ifdef CONFIG_DEBUG_MAPLE_TREE
6529 atomic_t maple_tree_tests_run;
6530 EXPORT_SYMBOL_GPL(maple_tree_tests_run);
6531 atomic_t maple_tree_tests_passed;
6532 EXPORT_SYMBOL_GPL(maple_tree_tests_passed);
6535 extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int);
6536 void mt_set_non_kernel(unsigned int val)
6538 kmem_cache_set_non_kernel(maple_node_cache, val);
6541 extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
6542 unsigned long mt_get_alloc_size(void)
6544 return kmem_cache_get_alloc(maple_node_cache);
6547 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *);
6548 void mt_zero_nr_tallocated(void)
6550 kmem_cache_zero_nr_tallocated(maple_node_cache);
6553 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *);
6554 unsigned int mt_nr_tallocated(void)
6556 return kmem_cache_nr_tallocated(maple_node_cache);
6559 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *);
6560 unsigned int mt_nr_allocated(void)
6562 return kmem_cache_nr_allocated(maple_node_cache);
6566 * mas_dead_node() - Check if the maple state is pointing to a dead node.
6567 * @mas: The maple state
6568 * @index: The index to restore in @mas.
6570 * Used in test code.
6571 * Return: 1 if @mas has been reset to MAS_START, 0 otherwise.
6573 static inline int mas_dead_node(struct ma_state *mas, unsigned long index)
6575 if (unlikely(!mas_searchable(mas) || mas_is_start(mas)))
6578 if (likely(!mte_dead_node(mas->node)))
6581 mas_rewalk(mas, index);
6585 void mt_cache_shrink(void)
6590 * mt_cache_shrink() - For testing, don't use this.
6592 * Certain testcases can trigger an OOM when combined with other memory
6593 * debugging configuration options. This function is used to reduce the
6594 * possibility of an out of memory even due to kmem_cache objects remaining
6595 * around for longer than usual.
6597 void mt_cache_shrink(void)
6599 kmem_cache_shrink(maple_node_cache);
6602 EXPORT_SYMBOL_GPL(mt_cache_shrink);
6604 #endif /* not defined __KERNEL__ */
6606 * mas_get_slot() - Get the entry in the maple state node stored at @offset.
6607 * @mas: The maple state
6608 * @offset: The offset into the slot array to fetch.
6610 * Return: The entry stored at @offset.
6612 static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
6613 unsigned char offset)
6615 return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)),
6621 * mas_first_entry() - Go the first leaf and find the first entry.
6622 * @mas: the maple state.
6623 * @limit: the maximum index to check.
6624 * @*r_start: Pointer to set to the range start.
6626 * Sets mas->offset to the offset of the entry, r_start to the range minimum.
6628 * Return: The first entry or MAS_NONE.
6630 static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
6631 unsigned long limit, enum maple_type mt)
6635 unsigned long *pivots;
6639 mas->index = mas->min;
6640 if (mas->index > limit)
6645 while (likely(!ma_is_leaf(mt))) {
6646 MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
6647 slots = ma_slots(mn, mt);
6648 entry = mas_slot(mas, slots, 0);
6649 pivots = ma_pivots(mn, mt);
6650 if (unlikely(ma_dead_node(mn)))
6655 mt = mte_node_type(mas->node);
6657 MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
6660 slots = ma_slots(mn, mt);
6661 entry = mas_slot(mas, slots, 0);
6662 if (unlikely(ma_dead_node(mn)))
6665 /* Slot 0 or 1 must be set */
6666 if (mas->index > limit)
6673 entry = mas_slot(mas, slots, 1);
6674 pivots = ma_pivots(mn, mt);
6675 if (unlikely(ma_dead_node(mn)))
6678 mas->index = pivots[0] + 1;
6679 if (mas->index > limit)
6686 if (likely(!ma_dead_node(mn)))
6687 mas->node = MAS_NONE;
6691 /* Depth first search, post-order */
6692 static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
6695 struct maple_enode *p = MAS_NONE, *mn = mas->node;
6696 unsigned long p_min, p_max;
6698 mas_next_node(mas, mas_mn(mas), max);
6699 if (!mas_is_none(mas))
6702 if (mte_is_root(mn))
6707 while (mas->node != MAS_NONE) {
6711 mas_prev_node(mas, 0);
6722 /* Tree validations */
6723 static void mt_dump_node(const struct maple_tree *mt, void *entry,
6724 unsigned long min, unsigned long max, unsigned int depth);
6725 static void mt_dump_range(unsigned long min, unsigned long max,
6728 static const char spaces[] = " ";
6731 pr_info("%.*s%lu: ", depth * 2, spaces, min);
6733 pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max);
6736 static void mt_dump_entry(void *entry, unsigned long min, unsigned long max,
6739 mt_dump_range(min, max, depth);
6741 if (xa_is_value(entry))
6742 pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry),
6743 xa_to_value(entry), entry);
6744 else if (xa_is_zero(entry))
6745 pr_cont("zero (%ld)\n", xa_to_internal(entry));
6746 else if (mt_is_reserved(entry))
6747 pr_cont("UNKNOWN ENTRY (%p)\n", entry);
6749 pr_cont("%p\n", entry);
6752 static void mt_dump_range64(const struct maple_tree *mt, void *entry,
6753 unsigned long min, unsigned long max, unsigned int depth)
6755 struct maple_range_64 *node = &mte_to_node(entry)->mr64;
6756 bool leaf = mte_is_leaf(entry);
6757 unsigned long first = min;
6760 pr_cont(" contents: ");
6761 for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++)
6762 pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
6763 pr_cont("%p\n", node->slot[i]);
6764 for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
6765 unsigned long last = max;
6767 if (i < (MAPLE_RANGE64_SLOTS - 1))
6768 last = node->pivot[i];
6769 else if (!node->slot[i] && max != mt_max[mte_node_type(entry)])
6771 if (last == 0 && i > 0)
6774 mt_dump_entry(mt_slot(mt, node->slot, i),
6775 first, last, depth + 1);
6776 else if (node->slot[i])
6777 mt_dump_node(mt, mt_slot(mt, node->slot, i),
6778 first, last, depth + 1);
6783 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6784 node, last, max, i);
6791 static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
6792 unsigned long min, unsigned long max, unsigned int depth)
6794 struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
6795 bool leaf = mte_is_leaf(entry);
6796 unsigned long first = min;
6799 pr_cont(" contents: ");
6800 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++)
6801 pr_cont("%lu ", node->gap[i]);
6802 pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
6803 for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++)
6804 pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
6805 pr_cont("%p\n", node->slot[i]);
6806 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
6807 unsigned long last = max;
6809 if (i < (MAPLE_ARANGE64_SLOTS - 1))
6810 last = node->pivot[i];
6811 else if (!node->slot[i])
6813 if (last == 0 && i > 0)
6816 mt_dump_entry(mt_slot(mt, node->slot, i),
6817 first, last, depth + 1);
6818 else if (node->slot[i])
6819 mt_dump_node(mt, mt_slot(mt, node->slot, i),
6820 first, last, depth + 1);
6825 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6826 node, last, max, i);
6833 static void mt_dump_node(const struct maple_tree *mt, void *entry,
6834 unsigned long min, unsigned long max, unsigned int depth)
6836 struct maple_node *node = mte_to_node(entry);
6837 unsigned int type = mte_node_type(entry);
6840 mt_dump_range(min, max, depth);
6842 pr_cont("node %p depth %d type %d parent %p", node, depth, type,
6843 node ? node->parent : NULL);
6847 for (i = 0; i < MAPLE_NODE_SLOTS; i++) {
6849 pr_cont("OUT OF RANGE: ");
6850 mt_dump_entry(mt_slot(mt, node->slot, i),
6851 min + i, min + i, depth);
6855 case maple_range_64:
6856 mt_dump_range64(mt, entry, min, max, depth);
6858 case maple_arange_64:
6859 mt_dump_arange64(mt, entry, min, max, depth);
6863 pr_cont(" UNKNOWN TYPE\n");
6867 void mt_dump(const struct maple_tree *mt)
6869 void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt));
6871 pr_info("maple_tree(%p) flags %X, height %u root %p\n",
6872 mt, mt->ma_flags, mt_height(mt), entry);
6873 if (!xa_is_node(entry))
6874 mt_dump_entry(entry, 0, 0, 0);
6876 mt_dump_node(mt, entry, 0, mt_max[mte_node_type(entry)], 0);
6878 EXPORT_SYMBOL_GPL(mt_dump);
6881 * Calculate the maximum gap in a node and check if that's what is reported in
6882 * the parent (unless root).
6884 static void mas_validate_gaps(struct ma_state *mas)
6886 struct maple_enode *mte = mas->node;
6887 struct maple_node *p_mn;
6888 unsigned long gap = 0, max_gap = 0;
6889 unsigned long p_end, p_start = mas->min;
6890 unsigned char p_slot;
6891 unsigned long *gaps = NULL;
6892 unsigned long *pivots = ma_pivots(mte_to_node(mte), mte_node_type(mte));
6895 if (ma_is_dense(mte_node_type(mte))) {
6896 for (i = 0; i < mt_slot_count(mte); i++) {
6897 if (mas_get_slot(mas, i)) {
6908 gaps = ma_gaps(mte_to_node(mte), mte_node_type(mte));
6909 for (i = 0; i < mt_slot_count(mte); i++) {
6910 p_end = mas_logical_pivot(mas, pivots, i, mte_node_type(mte));
6913 if (mas_get_slot(mas, i)) {
6918 gap += p_end - p_start + 1;
6920 void *entry = mas_get_slot(mas, i);
6924 if (gap != p_end - p_start + 1) {
6925 pr_err("%p[%u] -> %p %lu != %lu - %lu + 1\n",
6927 mas_get_slot(mas, i), gap,
6931 MT_BUG_ON(mas->tree,
6932 gap != p_end - p_start + 1);
6935 if (gap > p_end - p_start + 1) {
6936 pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
6937 mas_mn(mas), i, gap, p_end, p_start,
6938 p_end - p_start + 1);
6939 MT_BUG_ON(mas->tree,
6940 gap > p_end - p_start + 1);
6948 p_start = p_end + 1;
6949 if (p_end >= mas->max)
6954 if (mte_is_root(mte))
6957 p_slot = mte_parent_slot(mas->node);
6958 p_mn = mte_parent(mte);
6959 MT_BUG_ON(mas->tree, max_gap > mas->max);
6960 if (ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap) {
6961 pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
6965 MT_BUG_ON(mas->tree,
6966 ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap);
6969 static void mas_validate_parent_slot(struct ma_state *mas)
6971 struct maple_node *parent;
6972 struct maple_enode *node;
6973 enum maple_type p_type = mas_parent_enum(mas, mas->node);
6974 unsigned char p_slot = mte_parent_slot(mas->node);
6978 if (mte_is_root(mas->node))
6981 parent = mte_parent(mas->node);
6982 slots = ma_slots(parent, p_type);
6983 MT_BUG_ON(mas->tree, mas_mn(mas) == parent);
6985 /* Check prev/next parent slot for duplicate node entry */
6987 for (i = 0; i < mt_slots[p_type]; i++) {
6988 node = mas_slot(mas, slots, i);
6990 if (node != mas->node)
6991 pr_err("parent %p[%u] does not have %p\n",
6992 parent, i, mas_mn(mas));
6993 MT_BUG_ON(mas->tree, node != mas->node);
6994 } else if (node == mas->node) {
6995 pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
6996 mas_mn(mas), parent, i, p_slot);
6997 MT_BUG_ON(mas->tree, node == mas->node);
7002 static void mas_validate_child_slot(struct ma_state *mas)
7004 enum maple_type type = mte_node_type(mas->node);
7005 void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7006 unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
7007 struct maple_enode *child;
7010 if (mte_is_leaf(mas->node))
7013 for (i = 0; i < mt_slots[type]; i++) {
7014 child = mas_slot(mas, slots, i);
7015 if (!pivots[i] || pivots[i] == mas->max)
7021 if (mte_parent_slot(child) != i) {
7022 pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
7023 mas_mn(mas), i, mte_to_node(child),
7024 mte_parent_slot(child));
7025 MT_BUG_ON(mas->tree, 1);
7028 if (mte_parent(child) != mte_to_node(mas->node)) {
7029 pr_err("child %p has parent %p not %p\n",
7030 mte_to_node(child), mte_parent(child),
7031 mte_to_node(mas->node));
7032 MT_BUG_ON(mas->tree, 1);
7038 * Validate all pivots are within mas->min and mas->max.
7040 static void mas_validate_limits(struct ma_state *mas)
7043 unsigned long prev_piv = 0;
7044 enum maple_type type = mte_node_type(mas->node);
7045 void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7046 unsigned long *pivots = ma_pivots(mas_mn(mas), type);
7048 /* all limits are fine here. */
7049 if (mte_is_root(mas->node))
7052 for (i = 0; i < mt_slots[type]; i++) {
7055 piv = mas_safe_pivot(mas, pivots, i, type);
7057 if (!piv && (i != 0))
7060 if (!mte_is_leaf(mas->node)) {
7061 void *entry = mas_slot(mas, slots, i);
7064 pr_err("%p[%u] cannot be null\n",
7067 MT_BUG_ON(mas->tree, !entry);
7070 if (prev_piv > piv) {
7071 pr_err("%p[%u] piv %lu < prev_piv %lu\n",
7072 mas_mn(mas), i, piv, prev_piv);
7073 MT_BUG_ON(mas->tree, piv < prev_piv);
7076 if (piv < mas->min) {
7077 pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
7079 MT_BUG_ON(mas->tree, piv < mas->min);
7081 if (piv > mas->max) {
7082 pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i,
7084 MT_BUG_ON(mas->tree, piv > mas->max);
7087 if (piv == mas->max)
7090 for (i += 1; i < mt_slots[type]; i++) {
7091 void *entry = mas_slot(mas, slots, i);
7093 if (entry && (i != mt_slots[type] - 1)) {
7094 pr_err("%p[%u] should not have entry %p\n", mas_mn(mas),
7096 MT_BUG_ON(mas->tree, entry != NULL);
7099 if (i < mt_pivots[type]) {
7100 unsigned long piv = pivots[i];
7105 pr_err("%p[%u] should not have piv %lu\n",
7106 mas_mn(mas), i, piv);
7107 MT_BUG_ON(mas->tree, i < mt_pivots[type] - 1);
7112 static void mt_validate_nulls(struct maple_tree *mt)
7114 void *entry, *last = (void *)1;
7115 unsigned char offset = 0;
7117 MA_STATE(mas, mt, 0, 0);
7120 if (mas_is_none(&mas) || (mas.node == MAS_ROOT))
7123 while (!mte_is_leaf(mas.node))
7126 slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
7128 entry = mas_slot(&mas, slots, offset);
7129 if (!last && !entry) {
7130 pr_err("Sequential nulls end at %p[%u]\n",
7131 mas_mn(&mas), offset);
7133 MT_BUG_ON(mt, !last && !entry);
7135 if (offset == mas_data_end(&mas)) {
7136 mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
7137 if (mas_is_none(&mas))
7140 slots = ma_slots(mte_to_node(mas.node),
7141 mte_node_type(mas.node));
7146 } while (!mas_is_none(&mas));
7150 * validate a maple tree by checking:
7151 * 1. The limits (pivots are within mas->min to mas->max)
7152 * 2. The gap is correctly set in the parents
7154 void mt_validate(struct maple_tree *mt)
7158 MA_STATE(mas, mt, 0, 0);
7161 if (!mas_searchable(&mas))
7164 mas_first_entry(&mas, mas_mn(&mas), ULONG_MAX, mte_node_type(mas.node));
7165 while (!mas_is_none(&mas)) {
7166 MT_BUG_ON(mas.tree, mte_dead_node(mas.node));
7167 if (!mte_is_root(mas.node)) {
7168 end = mas_data_end(&mas);
7169 if ((end < mt_min_slot_count(mas.node)) &&
7170 (mas.max != ULONG_MAX)) {
7171 pr_err("Invalid size %u of %p\n", end,
7173 MT_BUG_ON(mas.tree, 1);
7177 mas_validate_parent_slot(&mas);
7178 mas_validate_child_slot(&mas);
7179 mas_validate_limits(&mas);
7180 if (mt_is_alloc(mt))
7181 mas_validate_gaps(&mas);
7182 mas_dfs_postorder(&mas, ULONG_MAX);
7184 mt_validate_nulls(mt);
7189 EXPORT_SYMBOL_GPL(mt_validate);
7191 #endif /* CONFIG_DEBUG_MAPLE_TREE */