1 // SPDX-License-Identifier: GPL-2.0+
3 * Maple Tree implementation
4 * Copyright (c) 2018-2022 Oracle Corporation
5 * Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
6 * Matthew Wilcox <willy@infradead.org>
10 * DOC: Interesting implementation details of the Maple Tree
12 * Each node type has a number of slots for entries and a number of slots for
13 * pivots. In the case of dense nodes, the pivots are implied by the position
14 * and are simply the slot index + the minimum of the node.
16 * In regular B-Tree terms, pivots are called keys. The term pivot is used to
17 * indicate that the tree is specifying ranges, Pivots may appear in the
18 * subtree with an entry attached to the value where as keys are unique to a
19 * specific position of a B-tree. Pivot values are inclusive of the slot with
23 * The following illustrates the layout of a range64 nodes slots and pivots.
26 * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
28 * │ │ │ │ │ │ │ │ └─ Implied maximum
29 * │ │ │ │ │ │ │ └─ Pivot 14
30 * │ │ │ │ │ │ └─ Pivot 13
31 * │ │ │ │ │ └─ Pivot 12
39 * Internal (non-leaf) nodes contain pointers to other nodes.
40 * Leaf nodes contain entries.
42 * The location of interest is often referred to as an offset. All offsets have
43 * a slot, but the last offset has an implied pivot from the node above (or
44 * UINT_MAX for the root node.
46 * Ranges complicate certain write activities. When modifying any of
47 * the B-tree variants, it is known that one entry will either be added or
48 * deleted. When modifying the Maple Tree, one store operation may overwrite
49 * the entire data set, or one half of the tree, or the middle half of the tree.
54 #include <linux/maple_tree.h>
55 #include <linux/xarray.h>
56 #include <linux/types.h>
57 #include <linux/export.h>
58 #include <linux/slab.h>
59 #include <linux/limits.h>
60 #include <asm/barrier.h>
62 #define CREATE_TRACE_POINTS
63 #include <trace/events/maple_tree.h>
65 #define MA_ROOT_PARENT 1
69 * * MA_STATE_BULK - Bulk insert mode
70 * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert
71 * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation
73 #define MA_STATE_BULK 1
74 #define MA_STATE_REBALANCE 2
75 #define MA_STATE_PREALLOC 4
77 #define ma_parent_ptr(x) ((struct maple_pnode *)(x))
78 #define ma_mnode_ptr(x) ((struct maple_node *)(x))
79 #define ma_enode_ptr(x) ((struct maple_enode *)(x))
80 static struct kmem_cache *maple_node_cache;
82 #ifdef CONFIG_DEBUG_MAPLE_TREE
83 static const unsigned long mt_max[] = {
84 [maple_dense] = MAPLE_NODE_SLOTS,
85 [maple_leaf_64] = ULONG_MAX,
86 [maple_range_64] = ULONG_MAX,
87 [maple_arange_64] = ULONG_MAX,
89 #define mt_node_max(x) mt_max[mte_node_type(x)]
92 static const unsigned char mt_slots[] = {
93 [maple_dense] = MAPLE_NODE_SLOTS,
94 [maple_leaf_64] = MAPLE_RANGE64_SLOTS,
95 [maple_range_64] = MAPLE_RANGE64_SLOTS,
96 [maple_arange_64] = MAPLE_ARANGE64_SLOTS,
98 #define mt_slot_count(x) mt_slots[mte_node_type(x)]
100 static const unsigned char mt_pivots[] = {
102 [maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1,
103 [maple_range_64] = MAPLE_RANGE64_SLOTS - 1,
104 [maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1,
106 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
108 static const unsigned char mt_min_slots[] = {
109 [maple_dense] = MAPLE_NODE_SLOTS / 2,
110 [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
111 [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
112 [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1,
114 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
116 #define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2)
117 #define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1)
119 struct maple_big_node {
120 struct maple_pnode *parent;
121 unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
123 struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
125 unsigned long padding[MAPLE_BIG_NODE_GAPS];
126 unsigned long gap[MAPLE_BIG_NODE_GAPS];
130 enum maple_type type;
134 * The maple_subtree_state is used to build a tree to replace a segment of an
135 * existing tree in a more atomic way. Any walkers of the older tree will hit a
136 * dead node and restart on updates.
138 struct maple_subtree_state {
139 struct ma_state *orig_l; /* Original left side of subtree */
140 struct ma_state *orig_r; /* Original right side of subtree */
141 struct ma_state *l; /* New left side of subtree */
142 struct ma_state *m; /* New middle of subtree (rare) */
143 struct ma_state *r; /* New right side of subtree */
144 struct ma_topiary *free; /* nodes to be freed */
145 struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */
146 struct maple_big_node *bn;
149 #ifdef CONFIG_KASAN_STACK
150 /* Prevent mas_wr_bnode() from exceeding the stack frame limit */
151 #define noinline_for_kasan noinline_for_stack
153 #define noinline_for_kasan inline
157 static inline struct maple_node *mt_alloc_one(gfp_t gfp)
159 return kmem_cache_alloc(maple_node_cache, gfp);
162 static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
164 return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes);
167 static inline void mt_free_bulk(size_t size, void __rcu **nodes)
169 kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
172 static void mt_free_rcu(struct rcu_head *head)
174 struct maple_node *node = container_of(head, struct maple_node, rcu);
176 kmem_cache_free(maple_node_cache, node);
180 * ma_free_rcu() - Use rcu callback to free a maple node
181 * @node: The node to free
183 * The maple tree uses the parent pointer to indicate this node is no longer in
184 * use and will be freed.
186 static void ma_free_rcu(struct maple_node *node)
188 WARN_ON(node->parent != ma_parent_ptr(node));
189 call_rcu(&node->rcu, mt_free_rcu);
192 static void mas_set_height(struct ma_state *mas)
194 unsigned int new_flags = mas->tree->ma_flags;
196 new_flags &= ~MT_FLAGS_HEIGHT_MASK;
197 MAS_BUG_ON(mas, mas->depth > MAPLE_HEIGHT_MAX);
198 new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET;
199 mas->tree->ma_flags = new_flags;
202 static unsigned int mas_mt_height(struct ma_state *mas)
204 return mt_height(mas->tree);
207 static inline enum maple_type mte_node_type(const struct maple_enode *entry)
209 return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
210 MAPLE_NODE_TYPE_MASK;
213 static inline bool ma_is_dense(const enum maple_type type)
215 return type < maple_leaf_64;
218 static inline bool ma_is_leaf(const enum maple_type type)
220 return type < maple_range_64;
223 static inline bool mte_is_leaf(const struct maple_enode *entry)
225 return ma_is_leaf(mte_node_type(entry));
229 * We also reserve values with the bottom two bits set to '10' which are
232 static inline bool mt_is_reserved(const void *entry)
234 return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
235 xa_is_internal(entry);
238 static inline void mas_set_err(struct ma_state *mas, long err)
240 mas->node = MA_ERROR(err);
243 static inline bool mas_is_ptr(const struct ma_state *mas)
245 return mas->node == MAS_ROOT;
248 static inline bool mas_is_start(const struct ma_state *mas)
250 return mas->node == MAS_START;
253 bool mas_is_err(struct ma_state *mas)
255 return xa_is_err(mas->node);
258 static inline bool mas_searchable(struct ma_state *mas)
260 if (mas_is_none(mas))
269 static inline struct maple_node *mte_to_node(const struct maple_enode *entry)
271 return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
275 * mte_to_mat() - Convert a maple encoded node to a maple topiary node.
276 * @entry: The maple encoded node
278 * Return: a maple topiary pointer
280 static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry)
282 return (struct maple_topiary *)
283 ((unsigned long)entry & ~MAPLE_NODE_MASK);
287 * mas_mn() - Get the maple state node.
288 * @mas: The maple state
290 * Return: the maple node (not encoded - bare pointer).
292 static inline struct maple_node *mas_mn(const struct ma_state *mas)
294 return mte_to_node(mas->node);
298 * mte_set_node_dead() - Set a maple encoded node as dead.
299 * @mn: The maple encoded node.
301 static inline void mte_set_node_dead(struct maple_enode *mn)
303 mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn));
304 smp_wmb(); /* Needed for RCU */
307 /* Bit 1 indicates the root is a node */
308 #define MAPLE_ROOT_NODE 0x02
309 /* maple_type stored bit 3-6 */
310 #define MAPLE_ENODE_TYPE_SHIFT 0x03
311 /* Bit 2 means a NULL somewhere below */
312 #define MAPLE_ENODE_NULL 0x04
314 static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
315 enum maple_type type)
317 return (void *)((unsigned long)node |
318 (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL);
321 static inline void *mte_mk_root(const struct maple_enode *node)
323 return (void *)((unsigned long)node | MAPLE_ROOT_NODE);
326 static inline void *mte_safe_root(const struct maple_enode *node)
328 return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
331 static inline void *mte_set_full(const struct maple_enode *node)
333 return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
336 static inline void *mte_clear_full(const struct maple_enode *node)
338 return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
341 static inline bool mte_has_null(const struct maple_enode *node)
343 return (unsigned long)node & MAPLE_ENODE_NULL;
346 static inline bool ma_is_root(struct maple_node *node)
348 return ((unsigned long)node->parent & MA_ROOT_PARENT);
351 static inline bool mte_is_root(const struct maple_enode *node)
353 return ma_is_root(mte_to_node(node));
356 static inline bool mas_is_root_limits(const struct ma_state *mas)
358 return !mas->min && mas->max == ULONG_MAX;
361 static inline bool mt_is_alloc(struct maple_tree *mt)
363 return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
368 * Excluding root, the parent pointer is 256B aligned like all other tree nodes.
369 * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16
370 * bit values need an extra bit to store the offset. This extra bit comes from
371 * a reuse of the last bit in the node type. This is possible by using bit 1 to
372 * indicate if bit 2 is part of the type or the slot.
376 * 0x?00 = 16 bit nodes
377 * 0x010 = 32 bit nodes
378 * 0x110 = 64 bit nodes
380 * Slot size and alignment
382 * 0b?00 : 16 bit values, type in 0-1, slot in 2-7
383 * 0b010 : 32 bit values, type in 0-2, slot in 3-7
384 * 0b110 : 64 bit values, type in 0-2, slot in 3-7
387 #define MAPLE_PARENT_ROOT 0x01
389 #define MAPLE_PARENT_SLOT_SHIFT 0x03
390 #define MAPLE_PARENT_SLOT_MASK 0xF8
392 #define MAPLE_PARENT_16B_SLOT_SHIFT 0x02
393 #define MAPLE_PARENT_16B_SLOT_MASK 0xFC
395 #define MAPLE_PARENT_RANGE64 0x06
396 #define MAPLE_PARENT_RANGE32 0x04
397 #define MAPLE_PARENT_NOT_RANGE16 0x02
400 * mte_parent_shift() - Get the parent shift for the slot storage.
401 * @parent: The parent pointer cast as an unsigned long
402 * Return: The shift into that pointer to the star to of the slot
404 static inline unsigned long mte_parent_shift(unsigned long parent)
406 /* Note bit 1 == 0 means 16B */
407 if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
408 return MAPLE_PARENT_SLOT_SHIFT;
410 return MAPLE_PARENT_16B_SLOT_SHIFT;
414 * mte_parent_slot_mask() - Get the slot mask for the parent.
415 * @parent: The parent pointer cast as an unsigned long.
416 * Return: The slot mask for that parent.
418 static inline unsigned long mte_parent_slot_mask(unsigned long parent)
420 /* Note bit 1 == 0 means 16B */
421 if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
422 return MAPLE_PARENT_SLOT_MASK;
424 return MAPLE_PARENT_16B_SLOT_MASK;
428 * mas_parent_type() - Return the maple_type of the parent from the stored
430 * @mas: The maple state
431 * @enode: The maple_enode to extract the parent's enum
432 * Return: The node->parent maple_type
435 enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode)
437 unsigned long p_type;
439 p_type = (unsigned long)mte_to_node(enode)->parent;
440 if (WARN_ON(p_type & MAPLE_PARENT_ROOT))
443 p_type &= MAPLE_NODE_MASK;
444 p_type &= ~mte_parent_slot_mask(p_type);
446 case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */
447 if (mt_is_alloc(mas->tree))
448 return maple_arange_64;
449 return maple_range_64;
456 * mas_set_parent() - Set the parent node and encode the slot
457 * @enode: The encoded maple node.
458 * @parent: The encoded maple node that is the parent of @enode.
459 * @slot: The slot that @enode resides in @parent.
461 * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
465 void mas_set_parent(struct ma_state *mas, struct maple_enode *enode,
466 const struct maple_enode *parent, unsigned char slot)
468 unsigned long val = (unsigned long)parent;
471 enum maple_type p_type = mte_node_type(parent);
473 MAS_BUG_ON(mas, p_type == maple_dense);
474 MAS_BUG_ON(mas, p_type == maple_leaf_64);
478 case maple_arange_64:
479 shift = MAPLE_PARENT_SLOT_SHIFT;
480 type = MAPLE_PARENT_RANGE64;
489 val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */
490 val |= (slot << shift) | type;
491 mte_to_node(enode)->parent = ma_parent_ptr(val);
495 * mte_parent_slot() - get the parent slot of @enode.
496 * @enode: The encoded maple node.
498 * Return: The slot in the parent node where @enode resides.
500 static inline unsigned int mte_parent_slot(const struct maple_enode *enode)
502 unsigned long val = (unsigned long)mte_to_node(enode)->parent;
504 if (val & MA_ROOT_PARENT)
508 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
509 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
511 return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val);
515 * mte_parent() - Get the parent of @node.
516 * @node: The encoded maple node.
518 * Return: The parent maple node.
520 static inline struct maple_node *mte_parent(const struct maple_enode *enode)
522 return (void *)((unsigned long)
523 (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
527 * ma_dead_node() - check if the @enode is dead.
528 * @enode: The encoded maple node
530 * Return: true if dead, false otherwise.
532 static inline bool ma_dead_node(const struct maple_node *node)
534 struct maple_node *parent;
536 /* Do not reorder reads from the node prior to the parent check */
538 parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK);
539 return (parent == node);
543 * mte_dead_node() - check if the @enode is dead.
544 * @enode: The encoded maple node
546 * Return: true if dead, false otherwise.
548 static inline bool mte_dead_node(const struct maple_enode *enode)
550 struct maple_node *parent, *node;
552 node = mte_to_node(enode);
553 /* Do not reorder reads from the node prior to the parent check */
555 parent = mte_parent(enode);
556 return (parent == node);
560 * mas_allocated() - Get the number of nodes allocated in a maple state.
561 * @mas: The maple state
563 * The ma_state alloc member is overloaded to hold a pointer to the first
564 * allocated node or to the number of requested nodes to allocate. If bit 0 is
565 * set, then the alloc contains the number of requested nodes. If there is an
566 * allocated node, then the total allocated nodes is in that node.
568 * Return: The total number of nodes allocated
570 static inline unsigned long mas_allocated(const struct ma_state *mas)
572 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1))
575 return mas->alloc->total;
579 * mas_set_alloc_req() - Set the requested number of allocations.
580 * @mas: the maple state
581 * @count: the number of allocations.
583 * The requested number of allocations is either in the first allocated node,
584 * located in @mas->alloc->request_count, or directly in @mas->alloc if there is
585 * no allocated node. Set the request either in the node or do the necessary
586 * encoding to store in @mas->alloc directly.
588 static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count)
590 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) {
594 mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U);
598 mas->alloc->request_count = count;
602 * mas_alloc_req() - get the requested number of allocations.
603 * @mas: The maple state
605 * The alloc count is either stored directly in @mas, or in
606 * @mas->alloc->request_count if there is at least one node allocated. Decode
607 * the request count if it's stored directly in @mas->alloc.
609 * Return: The allocation request count.
611 static inline unsigned int mas_alloc_req(const struct ma_state *mas)
613 if ((unsigned long)mas->alloc & 0x1)
614 return (unsigned long)(mas->alloc) >> 1;
616 return mas->alloc->request_count;
621 * ma_pivots() - Get a pointer to the maple node pivots.
622 * @node - the maple node
623 * @type - the node type
625 * In the event of a dead node, this array may be %NULL
627 * Return: A pointer to the maple node pivots
629 static inline unsigned long *ma_pivots(struct maple_node *node,
630 enum maple_type type)
633 case maple_arange_64:
634 return node->ma64.pivot;
637 return node->mr64.pivot;
645 * ma_gaps() - Get a pointer to the maple node gaps.
646 * @node - the maple node
647 * @type - the node type
649 * Return: A pointer to the maple node gaps
651 static inline unsigned long *ma_gaps(struct maple_node *node,
652 enum maple_type type)
655 case maple_arange_64:
656 return node->ma64.gap;
666 * mas_pivot() - Get the pivot at @piv of the maple encoded node.
667 * @mas: The maple state.
670 * Return: the pivot at @piv of @mn.
672 static inline unsigned long mas_pivot(struct ma_state *mas, unsigned char piv)
674 struct maple_node *node = mas_mn(mas);
675 enum maple_type type = mte_node_type(mas->node);
677 if (MAS_WARN_ON(mas, piv >= mt_pivots[type])) {
678 mas_set_err(mas, -EIO);
683 case maple_arange_64:
684 return node->ma64.pivot[piv];
687 return node->mr64.pivot[piv];
695 * mas_safe_pivot() - get the pivot at @piv or mas->max.
696 * @mas: The maple state
697 * @pivots: The pointer to the maple node pivots
698 * @piv: The pivot to fetch
699 * @type: The maple node type
701 * Return: The pivot at @piv within the limit of the @pivots array, @mas->max
704 static inline unsigned long
705 mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
706 unsigned char piv, enum maple_type type)
708 if (piv >= mt_pivots[type])
715 * mas_safe_min() - Return the minimum for a given offset.
716 * @mas: The maple state
717 * @pivots: The pointer to the maple node pivots
718 * @offset: The offset into the pivot array
720 * Return: The minimum range value that is contained in @offset.
722 static inline unsigned long
723 mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset)
726 return pivots[offset - 1] + 1;
732 * mas_logical_pivot() - Get the logical pivot of a given offset.
733 * @mas: The maple state
734 * @pivots: The pointer to the maple node pivots
735 * @offset: The offset into the pivot array
736 * @type: The maple node type
738 * When there is no value at a pivot (beyond the end of the data), then the
739 * pivot is actually @mas->max.
741 * Return: the logical pivot of a given @offset.
743 static inline unsigned long
744 mas_logical_pivot(struct ma_state *mas, unsigned long *pivots,
745 unsigned char offset, enum maple_type type)
747 unsigned long lpiv = mas_safe_pivot(mas, pivots, offset, type);
759 * mte_set_pivot() - Set a pivot to a value in an encoded maple node.
760 * @mn: The encoded maple node
761 * @piv: The pivot offset
762 * @val: The value of the pivot
764 static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
767 struct maple_node *node = mte_to_node(mn);
768 enum maple_type type = mte_node_type(mn);
770 BUG_ON(piv >= mt_pivots[type]);
775 node->mr64.pivot[piv] = val;
777 case maple_arange_64:
778 node->ma64.pivot[piv] = val;
787 * ma_slots() - Get a pointer to the maple node slots.
788 * @mn: The maple node
789 * @mt: The maple node type
791 * Return: A pointer to the maple node slots
793 static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
797 case maple_arange_64:
798 return mn->ma64.slot;
801 return mn->mr64.slot;
807 static inline bool mt_locked(const struct maple_tree *mt)
809 return mt_external_lock(mt) ? mt_lock_is_held(mt) :
810 lockdep_is_held(&mt->ma_lock);
813 static inline void *mt_slot(const struct maple_tree *mt,
814 void __rcu **slots, unsigned char offset)
816 return rcu_dereference_check(slots[offset], mt_locked(mt));
819 static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots,
820 unsigned char offset)
822 return rcu_dereference_protected(slots[offset], mt_locked(mt));
825 * mas_slot_locked() - Get the slot value when holding the maple tree lock.
826 * @mas: The maple state
827 * @slots: The pointer to the slots
828 * @offset: The offset into the slots array to fetch
830 * Return: The entry stored in @slots at the @offset.
832 static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
833 unsigned char offset)
835 return mt_slot_locked(mas->tree, slots, offset);
839 * mas_slot() - Get the slot value when not holding the maple tree lock.
840 * @mas: The maple state
841 * @slots: The pointer to the slots
842 * @offset: The offset into the slots array to fetch
844 * Return: The entry stored in @slots at the @offset
846 static inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
847 unsigned char offset)
849 return mt_slot(mas->tree, slots, offset);
853 * mas_root() - Get the maple tree root.
854 * @mas: The maple state.
856 * Return: The pointer to the root of the tree
858 static inline void *mas_root(struct ma_state *mas)
860 return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
863 static inline void *mt_root_locked(struct maple_tree *mt)
865 return rcu_dereference_protected(mt->ma_root, mt_locked(mt));
869 * mas_root_locked() - Get the maple tree root when holding the maple tree lock.
870 * @mas: The maple state.
872 * Return: The pointer to the root of the tree
874 static inline void *mas_root_locked(struct ma_state *mas)
876 return mt_root_locked(mas->tree);
879 static inline struct maple_metadata *ma_meta(struct maple_node *mn,
883 case maple_arange_64:
884 return &mn->ma64.meta;
886 return &mn->mr64.meta;
891 * ma_set_meta() - Set the metadata information of a node.
892 * @mn: The maple node
893 * @mt: The maple node type
894 * @offset: The offset of the highest sub-gap in this node.
895 * @end: The end of the data in this node.
897 static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
898 unsigned char offset, unsigned char end)
900 struct maple_metadata *meta = ma_meta(mn, mt);
907 * mt_clear_meta() - clear the metadata information of a node, if it exists
908 * @mt: The maple tree
909 * @mn: The maple node
910 * @type: The maple node type
911 * @offset: The offset of the highest sub-gap in this node.
912 * @end: The end of the data in this node.
914 static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn,
915 enum maple_type type)
917 struct maple_metadata *meta;
918 unsigned long *pivots;
924 pivots = mn->mr64.pivot;
925 if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) {
926 slots = mn->mr64.slot;
927 next = mt_slot_locked(mt, slots,
928 MAPLE_RANGE64_SLOTS - 1);
929 if (unlikely((mte_to_node(next) &&
930 mte_node_type(next))))
931 return; /* no metadata, could be node */
934 case maple_arange_64:
935 meta = ma_meta(mn, type);
946 * ma_meta_end() - Get the data end of a node from the metadata
947 * @mn: The maple node
948 * @mt: The maple node type
950 static inline unsigned char ma_meta_end(struct maple_node *mn,
953 struct maple_metadata *meta = ma_meta(mn, mt);
959 * ma_meta_gap() - Get the largest gap location of a node from the metadata
960 * @mn: The maple node
961 * @mt: The maple node type
963 static inline unsigned char ma_meta_gap(struct maple_node *mn,
966 return mn->ma64.meta.gap;
970 * ma_set_meta_gap() - Set the largest gap location in a nodes metadata
971 * @mn: The maple node
972 * @mn: The maple node type
973 * @offset: The location of the largest gap.
975 static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
976 unsigned char offset)
979 struct maple_metadata *meta = ma_meta(mn, mt);
985 * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
986 * @mat - the ma_topiary, a linked list of dead nodes.
987 * @dead_enode - the node to be marked as dead and added to the tail of the list
989 * Add the @dead_enode to the linked list in @mat.
991 static inline void mat_add(struct ma_topiary *mat,
992 struct maple_enode *dead_enode)
994 mte_set_node_dead(dead_enode);
995 mte_to_mat(dead_enode)->next = NULL;
997 mat->tail = mat->head = dead_enode;
1001 mte_to_mat(mat->tail)->next = dead_enode;
1002 mat->tail = dead_enode;
1005 static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
1006 static inline void mas_free(struct ma_state *mas, struct maple_enode *used);
1009 * mas_mat_free() - Free all nodes in a dead list.
1010 * @mas - the maple state
1011 * @mat - the ma_topiary linked list of dead nodes to free.
1013 * Free walk a dead list.
1015 static void mas_mat_free(struct ma_state *mas, struct ma_topiary *mat)
1017 struct maple_enode *next;
1020 next = mte_to_mat(mat->head)->next;
1021 mas_free(mas, mat->head);
1027 * mas_mat_destroy() - Free all nodes and subtrees in a dead list.
1028 * @mas - the maple state
1029 * @mat - the ma_topiary linked list of dead nodes to free.
1031 * Destroy walk a dead list.
1033 static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
1035 struct maple_enode *next;
1038 next = mte_to_mat(mat->head)->next;
1039 mte_destroy_walk(mat->head, mat->mtree);
1044 * mas_descend() - Descend into the slot stored in the ma_state.
1045 * @mas - the maple state.
1047 * Note: Not RCU safe, only use in write side or debug code.
1049 static inline void mas_descend(struct ma_state *mas)
1051 enum maple_type type;
1052 unsigned long *pivots;
1053 struct maple_node *node;
1057 type = mte_node_type(mas->node);
1058 pivots = ma_pivots(node, type);
1059 slots = ma_slots(node, type);
1062 mas->min = pivots[mas->offset - 1] + 1;
1063 mas->max = mas_safe_pivot(mas, pivots, mas->offset, type);
1064 mas->node = mas_slot(mas, slots, mas->offset);
1068 * mte_set_gap() - Set a maple node gap.
1069 * @mn: The encoded maple node
1070 * @gap: The offset of the gap to set
1071 * @val: The gap value
1073 static inline void mte_set_gap(const struct maple_enode *mn,
1074 unsigned char gap, unsigned long val)
1076 switch (mte_node_type(mn)) {
1079 case maple_arange_64:
1080 mte_to_node(mn)->ma64.gap[gap] = val;
1086 * mas_ascend() - Walk up a level of the tree.
1087 * @mas: The maple state
1089 * Sets the @mas->max and @mas->min to the correct values when walking up. This
1090 * may cause several levels of walking up to find the correct min and max.
1091 * May find a dead node which will cause a premature return.
1092 * Return: 1 on dead node, 0 otherwise
1094 static int mas_ascend(struct ma_state *mas)
1096 struct maple_enode *p_enode; /* parent enode. */
1097 struct maple_enode *a_enode; /* ancestor enode. */
1098 struct maple_node *a_node; /* ancestor node. */
1099 struct maple_node *p_node; /* parent node. */
1100 unsigned char a_slot;
1101 enum maple_type a_type;
1102 unsigned long min, max;
1103 unsigned long *pivots;
1104 bool set_max = false, set_min = false;
1106 a_node = mas_mn(mas);
1107 if (ma_is_root(a_node)) {
1112 p_node = mte_parent(mas->node);
1113 if (unlikely(a_node == p_node))
1116 a_type = mas_parent_type(mas, mas->node);
1117 mas->offset = mte_parent_slot(mas->node);
1118 a_enode = mt_mk_node(p_node, a_type);
1120 /* Check to make sure all parent information is still accurate */
1121 if (p_node != mte_parent(mas->node))
1124 mas->node = a_enode;
1126 if (mte_is_root(a_enode)) {
1127 mas->max = ULONG_MAX;
1135 if (mas->max == ULONG_MAX)
1142 a_type = mas_parent_type(mas, p_enode);
1143 a_node = mte_parent(p_enode);
1144 a_slot = mte_parent_slot(p_enode);
1145 a_enode = mt_mk_node(a_node, a_type);
1146 pivots = ma_pivots(a_node, a_type);
1148 if (unlikely(ma_dead_node(a_node)))
1151 if (!set_min && a_slot) {
1153 min = pivots[a_slot - 1] + 1;
1156 if (!set_max && a_slot < mt_pivots[a_type]) {
1158 max = pivots[a_slot];
1161 if (unlikely(ma_dead_node(a_node)))
1164 if (unlikely(ma_is_root(a_node)))
1167 } while (!set_min || !set_max);
1175 * mas_pop_node() - Get a previously allocated maple node from the maple state.
1176 * @mas: The maple state
1178 * Return: A pointer to a maple node.
1180 static inline struct maple_node *mas_pop_node(struct ma_state *mas)
1182 struct maple_alloc *ret, *node = mas->alloc;
1183 unsigned long total = mas_allocated(mas);
1184 unsigned int req = mas_alloc_req(mas);
1186 /* nothing or a request pending. */
1187 if (WARN_ON(!total))
1191 /* single allocation in this ma_state */
1197 if (node->node_count == 1) {
1198 /* Single allocation in this node. */
1199 mas->alloc = node->slot[0];
1200 mas->alloc->total = node->total - 1;
1205 ret = node->slot[--node->node_count];
1206 node->slot[node->node_count] = NULL;
1212 mas_set_alloc_req(mas, req);
1215 memset(ret, 0, sizeof(*ret));
1216 return (struct maple_node *)ret;
1220 * mas_push_node() - Push a node back on the maple state allocation.
1221 * @mas: The maple state
1222 * @used: The used maple node
1224 * Stores the maple node back into @mas->alloc for reuse. Updates allocated and
1225 * requested node count as necessary.
1227 static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
1229 struct maple_alloc *reuse = (struct maple_alloc *)used;
1230 struct maple_alloc *head = mas->alloc;
1231 unsigned long count;
1232 unsigned int requested = mas_alloc_req(mas);
1234 count = mas_allocated(mas);
1236 reuse->request_count = 0;
1237 reuse->node_count = 0;
1238 if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) {
1239 head->slot[head->node_count++] = reuse;
1245 if ((head) && !((unsigned long)head & 0x1)) {
1246 reuse->slot[0] = head;
1247 reuse->node_count = 1;
1248 reuse->total += head->total;
1254 mas_set_alloc_req(mas, requested - 1);
1258 * mas_alloc_nodes() - Allocate nodes into a maple state
1259 * @mas: The maple state
1260 * @gfp: The GFP Flags
1262 static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
1264 struct maple_alloc *node;
1265 unsigned long allocated = mas_allocated(mas);
1266 unsigned int requested = mas_alloc_req(mas);
1268 void **slots = NULL;
1269 unsigned int max_req = 0;
1274 mas_set_alloc_req(mas, 0);
1275 if (mas->mas_flags & MA_STATE_PREALLOC) {
1278 WARN_ON(!allocated);
1281 if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) {
1282 node = (struct maple_alloc *)mt_alloc_one(gfp);
1287 node->slot[0] = mas->alloc;
1288 node->node_count = 1;
1290 node->node_count = 0;
1294 node->total = ++allocated;
1299 node->request_count = 0;
1301 max_req = MAPLE_ALLOC_SLOTS - node->node_count;
1302 slots = (void **)&node->slot[node->node_count];
1303 max_req = min(requested, max_req);
1304 count = mt_alloc_bulk(gfp, max_req, slots);
1308 if (node->node_count == 0) {
1309 node->slot[0]->node_count = 0;
1310 node->slot[0]->request_count = 0;
1313 node->node_count += count;
1315 node = node->slot[0];
1318 mas->alloc->total = allocated;
1322 /* Clean up potential freed allocations on bulk failure */
1323 memset(slots, 0, max_req * sizeof(unsigned long));
1325 mas_set_alloc_req(mas, requested);
1326 if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
1327 mas->alloc->total = allocated;
1328 mas_set_err(mas, -ENOMEM);
1332 * mas_free() - Free an encoded maple node
1333 * @mas: The maple state
1334 * @used: The encoded maple node to free.
1336 * Uses rcu free if necessary, pushes @used back on the maple state allocations
1339 static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
1341 struct maple_node *tmp = mte_to_node(used);
1343 if (mt_in_rcu(mas->tree))
1346 mas_push_node(mas, tmp);
1350 * mas_node_count() - Check if enough nodes are allocated and request more if
1351 * there is not enough nodes.
1352 * @mas: The maple state
1353 * @count: The number of nodes needed
1354 * @gfp: the gfp flags
1356 static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp)
1358 unsigned long allocated = mas_allocated(mas);
1360 if (allocated < count) {
1361 mas_set_alloc_req(mas, count - allocated);
1362 mas_alloc_nodes(mas, gfp);
1367 * mas_node_count() - Check if enough nodes are allocated and request more if
1368 * there is not enough nodes.
1369 * @mas: The maple state
1370 * @count: The number of nodes needed
1372 * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
1374 static void mas_node_count(struct ma_state *mas, int count)
1376 return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN);
1380 * mas_start() - Sets up maple state for operations.
1381 * @mas: The maple state.
1383 * If mas->node == MAS_START, then set the min, max and depth to
1387 * - If mas->node is an error or not MAS_START, return NULL.
1388 * - If it's an empty tree: NULL & mas->node == MAS_NONE
1389 * - If it's a single entry: The entry & mas->node == MAS_ROOT
1390 * - If it's a tree: NULL & mas->node == safe root node.
1392 static inline struct maple_enode *mas_start(struct ma_state *mas)
1394 if (likely(mas_is_start(mas))) {
1395 struct maple_enode *root;
1398 mas->max = ULONG_MAX;
1402 root = mas_root(mas);
1403 /* Tree with nodes */
1404 if (likely(xa_is_node(root))) {
1406 mas->node = mte_safe_root(root);
1408 if (mte_dead_node(mas->node))
1415 if (unlikely(!root)) {
1416 mas->node = MAS_NONE;
1417 mas->offset = MAPLE_NODE_SLOTS;
1421 /* Single entry tree */
1422 mas->node = MAS_ROOT;
1423 mas->offset = MAPLE_NODE_SLOTS;
1425 /* Single entry tree. */
1436 * ma_data_end() - Find the end of the data in a node.
1437 * @node: The maple node
1438 * @type: The maple node type
1439 * @pivots: The array of pivots in the node
1440 * @max: The maximum value in the node
1442 * Uses metadata to find the end of the data when possible.
1443 * Return: The zero indexed last slot with data (may be null).
1445 static inline unsigned char ma_data_end(struct maple_node *node,
1446 enum maple_type type,
1447 unsigned long *pivots,
1450 unsigned char offset;
1455 if (type == maple_arange_64)
1456 return ma_meta_end(node, type);
1458 offset = mt_pivots[type] - 1;
1459 if (likely(!pivots[offset]))
1460 return ma_meta_end(node, type);
1462 if (likely(pivots[offset] == max))
1465 return mt_pivots[type];
1469 * mas_data_end() - Find the end of the data (slot).
1470 * @mas: the maple state
1472 * This method is optimized to check the metadata of a node if the node type
1473 * supports data end metadata.
1475 * Return: The zero indexed last slot with data (may be null).
1477 static inline unsigned char mas_data_end(struct ma_state *mas)
1479 enum maple_type type;
1480 struct maple_node *node;
1481 unsigned char offset;
1482 unsigned long *pivots;
1484 type = mte_node_type(mas->node);
1486 if (type == maple_arange_64)
1487 return ma_meta_end(node, type);
1489 pivots = ma_pivots(node, type);
1490 if (unlikely(ma_dead_node(node)))
1493 offset = mt_pivots[type] - 1;
1494 if (likely(!pivots[offset]))
1495 return ma_meta_end(node, type);
1497 if (likely(pivots[offset] == mas->max))
1500 return mt_pivots[type];
1504 * mas_leaf_max_gap() - Returns the largest gap in a leaf node
1505 * @mas - the maple state
1507 * Return: The maximum gap in the leaf.
1509 static unsigned long mas_leaf_max_gap(struct ma_state *mas)
1512 unsigned long pstart, gap, max_gap;
1513 struct maple_node *mn;
1514 unsigned long *pivots;
1517 unsigned char max_piv;
1519 mt = mte_node_type(mas->node);
1521 slots = ma_slots(mn, mt);
1523 if (unlikely(ma_is_dense(mt))) {
1525 for (i = 0; i < mt_slots[mt]; i++) {
1540 * Check the first implied pivot optimizes the loop below and slot 1 may
1541 * be skipped if there is a gap in slot 0.
1543 pivots = ma_pivots(mn, mt);
1544 if (likely(!slots[0])) {
1545 max_gap = pivots[0] - mas->min + 1;
1551 /* reduce max_piv as the special case is checked before the loop */
1552 max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1;
1554 * Check end implied pivot which can only be a gap on the right most
1557 if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) {
1558 gap = ULONG_MAX - pivots[max_piv];
1563 for (; i <= max_piv; i++) {
1564 /* data == no gap. */
1565 if (likely(slots[i]))
1568 pstart = pivots[i - 1];
1569 gap = pivots[i] - pstart;
1573 /* There cannot be two gaps in a row. */
1580 * ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
1581 * @node: The maple node
1582 * @gaps: The pointer to the gaps
1583 * @mt: The maple node type
1584 * @*off: Pointer to store the offset location of the gap.
1586 * Uses the metadata data end to scan backwards across set gaps.
1588 * Return: The maximum gap value
1590 static inline unsigned long
1591 ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt,
1594 unsigned char offset, i;
1595 unsigned long max_gap = 0;
1597 i = offset = ma_meta_end(node, mt);
1599 if (gaps[i] > max_gap) {
1610 * mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
1611 * @mas: The maple state.
1613 * If the metadata gap is set to MAPLE_ARANGE64_META_MAX, there is no gap.
1615 * Return: The gap value.
1617 static inline unsigned long mas_max_gap(struct ma_state *mas)
1619 unsigned long *gaps;
1620 unsigned char offset;
1622 struct maple_node *node;
1624 mt = mte_node_type(mas->node);
1626 return mas_leaf_max_gap(mas);
1629 MAS_BUG_ON(mas, mt != maple_arange_64);
1630 offset = ma_meta_gap(node, mt);
1631 if (offset == MAPLE_ARANGE64_META_MAX)
1634 gaps = ma_gaps(node, mt);
1635 return gaps[offset];
1639 * mas_parent_gap() - Set the parent gap and any gaps above, as needed
1640 * @mas: The maple state
1641 * @offset: The gap offset in the parent to set
1642 * @new: The new gap value.
1644 * Set the parent gap then continue to set the gap upwards, using the metadata
1645 * of the parent to see if it is necessary to check the node above.
1647 static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
1650 unsigned long meta_gap = 0;
1651 struct maple_node *pnode;
1652 struct maple_enode *penode;
1653 unsigned long *pgaps;
1654 unsigned char meta_offset;
1655 enum maple_type pmt;
1657 pnode = mte_parent(mas->node);
1658 pmt = mas_parent_type(mas, mas->node);
1659 penode = mt_mk_node(pnode, pmt);
1660 pgaps = ma_gaps(pnode, pmt);
1663 MAS_BUG_ON(mas, pmt != maple_arange_64);
1664 meta_offset = ma_meta_gap(pnode, pmt);
1665 if (meta_offset == MAPLE_ARANGE64_META_MAX)
1668 meta_gap = pgaps[meta_offset];
1670 pgaps[offset] = new;
1672 if (meta_gap == new)
1675 if (offset != meta_offset) {
1679 ma_set_meta_gap(pnode, pmt, offset);
1680 } else if (new < meta_gap) {
1682 new = ma_max_gap(pnode, pgaps, pmt, &meta_offset);
1683 ma_set_meta_gap(pnode, pmt, meta_offset);
1686 if (ma_is_root(pnode))
1689 /* Go to the parent node. */
1690 pnode = mte_parent(penode);
1691 pmt = mas_parent_type(mas, penode);
1692 pgaps = ma_gaps(pnode, pmt);
1693 offset = mte_parent_slot(penode);
1694 penode = mt_mk_node(pnode, pmt);
1699 * mas_update_gap() - Update a nodes gaps and propagate up if necessary.
1700 * @mas - the maple state.
1702 static inline void mas_update_gap(struct ma_state *mas)
1704 unsigned char pslot;
1705 unsigned long p_gap;
1706 unsigned long max_gap;
1708 if (!mt_is_alloc(mas->tree))
1711 if (mte_is_root(mas->node))
1714 max_gap = mas_max_gap(mas);
1716 pslot = mte_parent_slot(mas->node);
1717 p_gap = ma_gaps(mte_parent(mas->node),
1718 mas_parent_type(mas, mas->node))[pslot];
1720 if (p_gap != max_gap)
1721 mas_parent_gap(mas, pslot, max_gap);
1725 * mas_adopt_children() - Set the parent pointer of all nodes in @parent to
1726 * @parent with the slot encoded.
1727 * @mas - the maple state (for the tree)
1728 * @parent - the maple encoded node containing the children.
1730 static inline void mas_adopt_children(struct ma_state *mas,
1731 struct maple_enode *parent)
1733 enum maple_type type = mte_node_type(parent);
1734 struct maple_node *node = mas_mn(mas);
1735 void __rcu **slots = ma_slots(node, type);
1736 unsigned long *pivots = ma_pivots(node, type);
1737 struct maple_enode *child;
1738 unsigned char offset;
1740 offset = ma_data_end(node, type, pivots, mas->max);
1742 child = mas_slot_locked(mas, slots, offset);
1743 mas_set_parent(mas, child, parent, offset);
1748 * mas_replace() - Replace a maple node in the tree with mas->node. Uses the
1749 * parent encoding to locate the maple node in the tree.
1750 * @mas - the ma_state to use for operations.
1751 * @advanced - boolean to adopt the child nodes and free the old node (false) or
1752 * leave the node (true) and handle the adoption and free elsewhere.
1754 static inline void mas_replace(struct ma_state *mas, bool advanced)
1755 __must_hold(mas->tree->ma_lock)
1757 struct maple_node *mn = mas_mn(mas);
1758 struct maple_enode *old_enode;
1759 unsigned char offset = 0;
1760 void __rcu **slots = NULL;
1762 if (ma_is_root(mn)) {
1763 old_enode = mas_root_locked(mas);
1765 offset = mte_parent_slot(mas->node);
1766 slots = ma_slots(mte_parent(mas->node),
1767 mas_parent_type(mas, mas->node));
1768 old_enode = mas_slot_locked(mas, slots, offset);
1771 if (!advanced && !mte_is_leaf(mas->node))
1772 mas_adopt_children(mas, mas->node);
1774 if (mte_is_root(mas->node)) {
1775 mn->parent = ma_parent_ptr(
1776 ((unsigned long)mas->tree | MA_ROOT_PARENT));
1777 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
1778 mas_set_height(mas);
1780 rcu_assign_pointer(slots[offset], mas->node);
1784 mte_set_node_dead(old_enode);
1785 mas_free(mas, old_enode);
1790 * mas_new_child() - Find the new child of a node.
1791 * @mas: the maple state
1792 * @child: the maple state to store the child.
1794 static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child)
1795 __must_hold(mas->tree->ma_lock)
1798 unsigned char offset;
1800 unsigned long *pivots;
1801 struct maple_enode *entry;
1802 struct maple_node *node;
1805 mt = mte_node_type(mas->node);
1807 slots = ma_slots(node, mt);
1808 pivots = ma_pivots(node, mt);
1809 end = ma_data_end(node, mt, pivots, mas->max);
1810 for (offset = mas->offset; offset <= end; offset++) {
1811 entry = mas_slot_locked(mas, slots, offset);
1812 if (mte_parent(entry) == node) {
1814 mas->offset = offset + 1;
1815 child->offset = offset;
1825 * mab_shift_right() - Shift the data in mab right. Note, does not clean out the
1826 * old data or set b_node->b_end.
1827 * @b_node: the maple_big_node
1828 * @shift: the shift count
1830 static inline void mab_shift_right(struct maple_big_node *b_node,
1831 unsigned char shift)
1833 unsigned long size = b_node->b_end * sizeof(unsigned long);
1835 memmove(b_node->pivot + shift, b_node->pivot, size);
1836 memmove(b_node->slot + shift, b_node->slot, size);
1837 if (b_node->type == maple_arange_64)
1838 memmove(b_node->gap + shift, b_node->gap, size);
1842 * mab_middle_node() - Check if a middle node is needed (unlikely)
1843 * @b_node: the maple_big_node that contains the data.
1844 * @size: the amount of data in the b_node
1845 * @split: the potential split location
1846 * @slot_count: the size that can be stored in a single node being considered.
1848 * Return: true if a middle node is required.
1850 static inline bool mab_middle_node(struct maple_big_node *b_node, int split,
1851 unsigned char slot_count)
1853 unsigned char size = b_node->b_end;
1855 if (size >= 2 * slot_count)
1858 if (!b_node->slot[split] && (size >= 2 * slot_count - 1))
1865 * mab_no_null_split() - ensure the split doesn't fall on a NULL
1866 * @b_node: the maple_big_node with the data
1867 * @split: the suggested split location
1868 * @slot_count: the number of slots in the node being considered.
1870 * Return: the split location.
1872 static inline int mab_no_null_split(struct maple_big_node *b_node,
1873 unsigned char split, unsigned char slot_count)
1875 if (!b_node->slot[split]) {
1877 * If the split is less than the max slot && the right side will
1878 * still be sufficient, then increment the split on NULL.
1880 if ((split < slot_count - 1) &&
1881 (b_node->b_end - split) > (mt_min_slots[b_node->type]))
1890 * mab_calc_split() - Calculate the split location and if there needs to be two
1892 * @bn: The maple_big_node with the data
1893 * @mid_split: The second split, if required. 0 otherwise.
1895 * Return: The first split location. The middle split is set in @mid_split.
1897 static inline int mab_calc_split(struct ma_state *mas,
1898 struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
1900 unsigned char b_end = bn->b_end;
1901 int split = b_end / 2; /* Assume equal split. */
1902 unsigned char slot_min, slot_count = mt_slots[bn->type];
1905 * To support gap tracking, all NULL entries are kept together and a node cannot
1906 * end on a NULL entry, with the exception of the left-most leaf. The
1907 * limitation means that the split of a node must be checked for this condition
1908 * and be able to put more data in one direction or the other.
1910 if (unlikely((mas->mas_flags & MA_STATE_BULK))) {
1912 split = b_end - mt_min_slots[bn->type];
1914 if (!ma_is_leaf(bn->type))
1917 mas->mas_flags |= MA_STATE_REBALANCE;
1918 if (!bn->slot[split])
1924 * Although extremely rare, it is possible to enter what is known as the 3-way
1925 * split scenario. The 3-way split comes about by means of a store of a range
1926 * that overwrites the end and beginning of two full nodes. The result is a set
1927 * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can
1928 * also be located in different parent nodes which are also full. This can
1929 * carry upwards all the way to the root in the worst case.
1931 if (unlikely(mab_middle_node(bn, split, slot_count))) {
1933 *mid_split = split * 2;
1935 slot_min = mt_min_slots[bn->type];
1939 * Avoid having a range less than the slot count unless it
1940 * causes one node to be deficient.
1941 * NOTE: mt_min_slots is 1 based, b_end and split are zero.
1943 while ((split < slot_count - 1) &&
1944 ((bn->pivot[split] - min) < slot_count - 1) &&
1945 (b_end - split > slot_min))
1949 /* Avoid ending a node on a NULL entry */
1950 split = mab_no_null_split(bn, split, slot_count);
1952 if (unlikely(*mid_split))
1953 *mid_split = mab_no_null_split(bn, *mid_split, slot_count);
1959 * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
1960 * and set @b_node->b_end to the next free slot.
1961 * @mas: The maple state
1962 * @mas_start: The starting slot to copy
1963 * @mas_end: The end slot to copy (inclusively)
1964 * @b_node: The maple_big_node to place the data
1965 * @mab_start: The starting location in maple_big_node to store the data.
1967 static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
1968 unsigned char mas_end, struct maple_big_node *b_node,
1969 unsigned char mab_start)
1972 struct maple_node *node;
1974 unsigned long *pivots, *gaps;
1975 int i = mas_start, j = mab_start;
1976 unsigned char piv_end;
1979 mt = mte_node_type(mas->node);
1980 pivots = ma_pivots(node, mt);
1982 b_node->pivot[j] = pivots[i++];
1983 if (unlikely(i > mas_end))
1988 piv_end = min(mas_end, mt_pivots[mt]);
1989 for (; i < piv_end; i++, j++) {
1990 b_node->pivot[j] = pivots[i];
1991 if (unlikely(!b_node->pivot[j]))
1994 if (unlikely(mas->max == b_node->pivot[j]))
1998 if (likely(i <= mas_end))
1999 b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt);
2002 b_node->b_end = ++j;
2004 slots = ma_slots(node, mt);
2005 memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j);
2006 if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) {
2007 gaps = ma_gaps(node, mt);
2008 memcpy(b_node->gap + mab_start, gaps + mas_start,
2009 sizeof(unsigned long) * j);
2014 * mas_leaf_set_meta() - Set the metadata of a leaf if possible.
2015 * @mas: The maple state
2016 * @node: The maple node
2017 * @pivots: pointer to the maple node pivots
2018 * @mt: The maple type
2019 * @end: The assumed end
2021 * Note, end may be incremented within this function but not modified at the
2022 * source. This is fine since the metadata is the last thing to be stored in a
2023 * node during a write.
2025 static inline void mas_leaf_set_meta(struct ma_state *mas,
2026 struct maple_node *node, unsigned long *pivots,
2027 enum maple_type mt, unsigned char end)
2029 /* There is no room for metadata already */
2030 if (mt_pivots[mt] <= end)
2033 if (pivots[end] && pivots[end] < mas->max)
2036 if (end < mt_slots[mt] - 1)
2037 ma_set_meta(node, mt, 0, end);
2041 * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
2042 * @b_node: the maple_big_node that has the data
2043 * @mab_start: the start location in @b_node.
2044 * @mab_end: The end location in @b_node (inclusively)
2045 * @mas: The maple state with the maple encoded node.
2047 static inline void mab_mas_cp(struct maple_big_node *b_node,
2048 unsigned char mab_start, unsigned char mab_end,
2049 struct ma_state *mas, bool new_max)
2052 enum maple_type mt = mte_node_type(mas->node);
2053 struct maple_node *node = mte_to_node(mas->node);
2054 void __rcu **slots = ma_slots(node, mt);
2055 unsigned long *pivots = ma_pivots(node, mt);
2056 unsigned long *gaps = NULL;
2059 if (mab_end - mab_start > mt_pivots[mt])
2062 if (!pivots[mt_pivots[mt] - 1])
2063 slots[mt_pivots[mt]] = NULL;
2067 pivots[j++] = b_node->pivot[i++];
2068 } while (i <= mab_end && likely(b_node->pivot[i]));
2070 memcpy(slots, b_node->slot + mab_start,
2071 sizeof(void *) * (i - mab_start));
2074 mas->max = b_node->pivot[i - 1];
2077 if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
2078 unsigned long max_gap = 0;
2079 unsigned char offset = 15;
2081 gaps = ma_gaps(node, mt);
2083 gaps[--j] = b_node->gap[--i];
2084 if (gaps[j] > max_gap) {
2090 ma_set_meta(node, mt, offset, end);
2092 mas_leaf_set_meta(mas, node, pivots, mt, end);
2097 * mas_descend_adopt() - Descend through a sub-tree and adopt children.
2098 * @mas: the maple state with the maple encoded node of the sub-tree.
2100 * Descend through a sub-tree and adopt children who do not have the correct
2101 * parents set. Follow the parents which have the correct parents as they are
2102 * the new entries which need to be followed to find other incorrectly set
2105 static inline void mas_descend_adopt(struct ma_state *mas)
2107 struct ma_state list[3], next[3];
2111 * At each level there may be up to 3 correct parent pointers which indicates
2112 * the new nodes which need to be walked to find any new nodes at a lower level.
2115 for (i = 0; i < 3; i++) {
2122 while (!mte_is_leaf(list[0].node)) {
2124 for (i = 0; i < 3; i++) {
2125 if (mas_is_none(&list[i]))
2128 if (i && list[i-1].node == list[i].node)
2131 while ((n < 3) && (mas_new_child(&list[i], &next[n])))
2134 mas_adopt_children(&list[i], list[i].node);
2138 next[n++].node = MAS_NONE;
2140 /* descend by setting the list to the children */
2141 for (i = 0; i < 3; i++)
2147 * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
2148 * @mas: The maple state
2149 * @end: The maple node end
2150 * @mt: The maple node type
2152 static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end,
2155 if (!(mas->mas_flags & MA_STATE_BULK))
2158 if (mte_is_root(mas->node))
2161 if (end > mt_min_slots[mt]) {
2162 mas->mas_flags &= ~MA_STATE_REBALANCE;
2168 * mas_store_b_node() - Store an @entry into the b_node while also copying the
2169 * data from a maple encoded node.
2170 * @wr_mas: the maple write state
2171 * @b_node: the maple_big_node to fill with data
2172 * @offset_end: the offset to end copying
2174 * Return: The actual end of the data stored in @b_node
2176 static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas,
2177 struct maple_big_node *b_node, unsigned char offset_end)
2180 unsigned char b_end;
2181 /* Possible underflow of piv will wrap back to 0 before use. */
2183 struct ma_state *mas = wr_mas->mas;
2185 b_node->type = wr_mas->type;
2189 /* Copy start data up to insert. */
2190 mas_mab_cp(mas, 0, slot - 1, b_node, 0);
2191 b_end = b_node->b_end;
2192 piv = b_node->pivot[b_end - 1];
2196 if (piv + 1 < mas->index) {
2197 /* Handle range starting after old range */
2198 b_node->slot[b_end] = wr_mas->content;
2199 if (!wr_mas->content)
2200 b_node->gap[b_end] = mas->index - 1 - piv;
2201 b_node->pivot[b_end++] = mas->index - 1;
2204 /* Store the new entry. */
2205 mas->offset = b_end;
2206 b_node->slot[b_end] = wr_mas->entry;
2207 b_node->pivot[b_end] = mas->last;
2210 if (mas->last >= mas->max)
2213 /* Handle new range ending before old range ends */
2214 piv = mas_logical_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
2215 if (piv > mas->last) {
2216 if (piv == ULONG_MAX)
2217 mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
2219 if (offset_end != slot)
2220 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
2223 b_node->slot[++b_end] = wr_mas->content;
2224 if (!wr_mas->content)
2225 b_node->gap[b_end] = piv - mas->last + 1;
2226 b_node->pivot[b_end] = piv;
2229 slot = offset_end + 1;
2230 if (slot > wr_mas->node_end)
2233 /* Copy end data to the end of the node. */
2234 mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end);
2239 b_node->b_end = b_end;
2243 * mas_prev_sibling() - Find the previous node with the same parent.
2244 * @mas: the maple state
2246 * Return: True if there is a previous sibling, false otherwise.
2248 static inline bool mas_prev_sibling(struct ma_state *mas)
2250 unsigned int p_slot = mte_parent_slot(mas->node);
2252 if (mte_is_root(mas->node))
2259 mas->offset = p_slot - 1;
2265 * mas_next_sibling() - Find the next node with the same parent.
2266 * @mas: the maple state
2268 * Return: true if there is a next sibling, false otherwise.
2270 static inline bool mas_next_sibling(struct ma_state *mas)
2272 MA_STATE(parent, mas->tree, mas->index, mas->last);
2274 if (mte_is_root(mas->node))
2278 mas_ascend(&parent);
2279 parent.offset = mte_parent_slot(mas->node) + 1;
2280 if (parent.offset > mas_data_end(&parent))
2289 * mte_node_or_node() - Return the encoded node or MAS_NONE.
2290 * @enode: The encoded maple node.
2292 * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state.
2294 * Return: @enode or MAS_NONE
2296 static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode)
2301 return ma_enode_ptr(MAS_NONE);
2305 * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
2306 * @wr_mas: The maple write state
2308 * Uses mas_slot_locked() and does not need to worry about dead nodes.
2310 static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
2312 struct ma_state *mas = wr_mas->mas;
2313 unsigned char count, offset;
2315 if (unlikely(ma_is_dense(wr_mas->type))) {
2316 wr_mas->r_max = wr_mas->r_min = mas->index;
2317 mas->offset = mas->index = mas->min;
2321 wr_mas->node = mas_mn(wr_mas->mas);
2322 wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
2323 count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type,
2324 wr_mas->pivots, mas->max);
2325 offset = mas->offset;
2327 while (offset < count && mas->index > wr_mas->pivots[offset])
2330 wr_mas->r_max = offset < count ? wr_mas->pivots[offset] : mas->max;
2331 wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, offset);
2332 wr_mas->offset_end = mas->offset = offset;
2336 * mas_topiary_range() - Add a range of slots to the topiary.
2337 * @mas: The maple state
2338 * @destroy: The topiary to add the slots (usually destroy)
2339 * @start: The starting slot inclusively
2340 * @end: The end slot inclusively
2342 static inline void mas_topiary_range(struct ma_state *mas,
2343 struct ma_topiary *destroy, unsigned char start, unsigned char end)
2346 unsigned char offset;
2348 MAS_BUG_ON(mas, mte_is_leaf(mas->node));
2350 slots = ma_slots(mas_mn(mas), mte_node_type(mas->node));
2351 for (offset = start; offset <= end; offset++) {
2352 struct maple_enode *enode = mas_slot_locked(mas, slots, offset);
2354 if (mte_dead_node(enode))
2357 mat_add(destroy, enode);
2362 * mast_topiary() - Add the portions of the tree to the removal list; either to
2363 * be freed or discarded (destroy walk).
2364 * @mast: The maple_subtree_state.
2366 static inline void mast_topiary(struct maple_subtree_state *mast)
2368 MA_WR_STATE(wr_mas, mast->orig_l, NULL);
2369 unsigned char r_start, r_end;
2370 unsigned char l_start, l_end;
2371 void __rcu **l_slots, **r_slots;
2373 wr_mas.type = mte_node_type(mast->orig_l->node);
2374 mast->orig_l->index = mast->orig_l->last;
2375 mas_wr_node_walk(&wr_mas);
2376 l_start = mast->orig_l->offset + 1;
2377 l_end = mas_data_end(mast->orig_l);
2379 r_end = mast->orig_r->offset;
2384 l_slots = ma_slots(mas_mn(mast->orig_l),
2385 mte_node_type(mast->orig_l->node));
2387 r_slots = ma_slots(mas_mn(mast->orig_r),
2388 mte_node_type(mast->orig_r->node));
2390 if ((l_start < l_end) &&
2391 mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_start))) {
2395 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_end))) {
2400 if ((l_start > r_end) && (mast->orig_l->node == mast->orig_r->node))
2403 /* At the node where left and right sides meet, add the parts between */
2404 if (mast->orig_l->node == mast->orig_r->node) {
2405 return mas_topiary_range(mast->orig_l, mast->destroy,
2409 /* mast->orig_r is different and consumed. */
2410 if (mte_is_leaf(mast->orig_r->node))
2413 if (mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_end)))
2417 if (l_start <= l_end)
2418 mas_topiary_range(mast->orig_l, mast->destroy, l_start, l_end);
2420 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_start)))
2423 if (r_start <= r_end)
2424 mas_topiary_range(mast->orig_r, mast->destroy, 0, r_end);
2428 * mast_rebalance_next() - Rebalance against the next node
2429 * @mast: The maple subtree state
2430 * @old_r: The encoded maple node to the right (next node).
2432 static inline void mast_rebalance_next(struct maple_subtree_state *mast)
2434 unsigned char b_end = mast->bn->b_end;
2436 mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node),
2438 mast->orig_r->last = mast->orig_r->max;
2442 * mast_rebalance_prev() - Rebalance against the previous node
2443 * @mast: The maple subtree state
2444 * @old_l: The encoded maple node to the left (previous node)
2446 static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
2448 unsigned char end = mas_data_end(mast->orig_l) + 1;
2449 unsigned char b_end = mast->bn->b_end;
2451 mab_shift_right(mast->bn, end);
2452 mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0);
2453 mast->l->min = mast->orig_l->min;
2454 mast->orig_l->index = mast->orig_l->min;
2455 mast->bn->b_end = end + b_end;
2456 mast->l->offset += end;
2460 * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
2461 * the node to the right. Checking the nodes to the right then the left at each
2462 * level upwards until root is reached. Free and destroy as needed.
2463 * Data is copied into the @mast->bn.
2464 * @mast: The maple_subtree_state.
2467 bool mast_spanning_rebalance(struct maple_subtree_state *mast)
2469 struct ma_state r_tmp = *mast->orig_r;
2470 struct ma_state l_tmp = *mast->orig_l;
2471 struct maple_enode *ancestor = NULL;
2472 unsigned char start, end;
2473 unsigned char depth = 0;
2475 r_tmp = *mast->orig_r;
2476 l_tmp = *mast->orig_l;
2478 mas_ascend(mast->orig_r);
2479 mas_ascend(mast->orig_l);
2482 (mast->orig_r->node == mast->orig_l->node)) {
2483 ancestor = mast->orig_r->node;
2484 end = mast->orig_r->offset - 1;
2485 start = mast->orig_l->offset + 1;
2488 if (mast->orig_r->offset < mas_data_end(mast->orig_r)) {
2490 ancestor = mast->orig_r->node;
2494 mast->orig_r->offset++;
2496 mas_descend(mast->orig_r);
2497 mast->orig_r->offset = 0;
2501 mast_rebalance_next(mast);
2503 unsigned char l_off = 0;
2504 struct maple_enode *child = r_tmp.node;
2507 if (ancestor == r_tmp.node)
2513 if (l_off < r_tmp.offset)
2514 mas_topiary_range(&r_tmp, mast->destroy,
2515 l_off, r_tmp.offset);
2517 if (l_tmp.node != child)
2518 mat_add(mast->free, child);
2520 } while (r_tmp.node != ancestor);
2522 *mast->orig_l = l_tmp;
2525 } else if (mast->orig_l->offset != 0) {
2527 ancestor = mast->orig_l->node;
2528 end = mas_data_end(mast->orig_l);
2531 mast->orig_l->offset--;
2533 mas_descend(mast->orig_l);
2534 mast->orig_l->offset =
2535 mas_data_end(mast->orig_l);
2539 mast_rebalance_prev(mast);
2541 unsigned char r_off;
2542 struct maple_enode *child = l_tmp.node;
2545 if (ancestor == l_tmp.node)
2548 r_off = mas_data_end(&l_tmp);
2550 if (l_tmp.offset < r_off)
2553 if (l_tmp.offset < r_off)
2554 mas_topiary_range(&l_tmp, mast->destroy,
2555 l_tmp.offset, r_off);
2557 if (r_tmp.node != child)
2558 mat_add(mast->free, child);
2560 } while (l_tmp.node != ancestor);
2562 *mast->orig_r = r_tmp;
2565 } while (!mte_is_root(mast->orig_r->node));
2567 *mast->orig_r = r_tmp;
2568 *mast->orig_l = l_tmp;
2573 * mast_ascend_free() - Add current original maple state nodes to the free list
2575 * @mast: the maple subtree state.
2577 * Ascend the original left and right sides and add the previous nodes to the
2578 * free list. Set the slots to point to the correct location in the new nodes.
2581 mast_ascend_free(struct maple_subtree_state *mast)
2583 MA_WR_STATE(wr_mas, mast->orig_r, NULL);
2584 struct maple_enode *left = mast->orig_l->node;
2585 struct maple_enode *right = mast->orig_r->node;
2587 mas_ascend(mast->orig_l);
2588 mas_ascend(mast->orig_r);
2589 mat_add(mast->free, left);
2592 mat_add(mast->free, right);
2594 mast->orig_r->offset = 0;
2595 mast->orig_r->index = mast->r->max;
2596 /* last should be larger than or equal to index */
2597 if (mast->orig_r->last < mast->orig_r->index)
2598 mast->orig_r->last = mast->orig_r->index;
2600 * The node may not contain the value so set slot to ensure all
2601 * of the nodes contents are freed or destroyed.
2603 wr_mas.type = mte_node_type(mast->orig_r->node);
2604 mas_wr_node_walk(&wr_mas);
2605 /* Set up the left side of things */
2606 mast->orig_l->offset = 0;
2607 mast->orig_l->index = mast->l->min;
2608 wr_mas.mas = mast->orig_l;
2609 wr_mas.type = mte_node_type(mast->orig_l->node);
2610 mas_wr_node_walk(&wr_mas);
2612 mast->bn->type = wr_mas.type;
2616 * mas_new_ma_node() - Create and return a new maple node. Helper function.
2617 * @mas: the maple state with the allocations.
2618 * @b_node: the maple_big_node with the type encoding.
2620 * Use the node type from the maple_big_node to allocate a new node from the
2621 * ma_state. This function exists mainly for code readability.
2623 * Return: A new maple encoded node
2625 static inline struct maple_enode
2626 *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
2628 return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
2632 * mas_mab_to_node() - Set up right and middle nodes
2634 * @mas: the maple state that contains the allocations.
2635 * @b_node: the node which contains the data.
2636 * @left: The pointer which will have the left node
2637 * @right: The pointer which may have the right node
2638 * @middle: the pointer which may have the middle node (rare)
2639 * @mid_split: the split location for the middle node
2641 * Return: the split of left.
2643 static inline unsigned char mas_mab_to_node(struct ma_state *mas,
2644 struct maple_big_node *b_node, struct maple_enode **left,
2645 struct maple_enode **right, struct maple_enode **middle,
2646 unsigned char *mid_split, unsigned long min)
2648 unsigned char split = 0;
2649 unsigned char slot_count = mt_slots[b_node->type];
2651 *left = mas_new_ma_node(mas, b_node);
2656 if (b_node->b_end < slot_count) {
2657 split = b_node->b_end;
2659 split = mab_calc_split(mas, b_node, mid_split, min);
2660 *right = mas_new_ma_node(mas, b_node);
2664 *middle = mas_new_ma_node(mas, b_node);
2671 * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
2673 * @b_node - the big node to add the entry
2674 * @mas - the maple state to get the pivot (mas->max)
2675 * @entry - the entry to add, if NULL nothing happens.
2677 static inline void mab_set_b_end(struct maple_big_node *b_node,
2678 struct ma_state *mas,
2684 b_node->slot[b_node->b_end] = entry;
2685 if (mt_is_alloc(mas->tree))
2686 b_node->gap[b_node->b_end] = mas_max_gap(mas);
2687 b_node->pivot[b_node->b_end++] = mas->max;
2691 * mas_set_split_parent() - combine_then_separate helper function. Sets the parent
2692 * of @mas->node to either @left or @right, depending on @slot and @split
2694 * @mas - the maple state with the node that needs a parent
2695 * @left - possible parent 1
2696 * @right - possible parent 2
2697 * @slot - the slot the mas->node was placed
2698 * @split - the split location between @left and @right
2700 static inline void mas_set_split_parent(struct ma_state *mas,
2701 struct maple_enode *left,
2702 struct maple_enode *right,
2703 unsigned char *slot, unsigned char split)
2705 if (mas_is_none(mas))
2708 if ((*slot) <= split)
2709 mas_set_parent(mas, mas->node, left, *slot);
2711 mas_set_parent(mas, mas->node, right, (*slot) - split - 1);
2717 * mte_mid_split_check() - Check if the next node passes the mid-split
2718 * @**l: Pointer to left encoded maple node.
2719 * @**m: Pointer to middle encoded maple node.
2720 * @**r: Pointer to right encoded maple node.
2722 * @*split: The split location.
2723 * @mid_split: The middle split.
2725 static inline void mte_mid_split_check(struct maple_enode **l,
2726 struct maple_enode **r,
2727 struct maple_enode *right,
2729 unsigned char *split,
2730 unsigned char mid_split)
2735 if (slot < mid_split)
2744 * mast_set_split_parents() - Helper function to set three nodes parents. Slot
2745 * is taken from @mast->l.
2746 * @mast - the maple subtree state
2747 * @left - the left node
2748 * @right - the right node
2749 * @split - the split location.
2751 static inline void mast_set_split_parents(struct maple_subtree_state *mast,
2752 struct maple_enode *left,
2753 struct maple_enode *middle,
2754 struct maple_enode *right,
2755 unsigned char split,
2756 unsigned char mid_split)
2759 struct maple_enode *l = left;
2760 struct maple_enode *r = right;
2762 if (mas_is_none(mast->l))
2768 slot = mast->l->offset;
2770 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2771 mas_set_split_parent(mast->l, l, r, &slot, split);
2773 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2774 mas_set_split_parent(mast->m, l, r, &slot, split);
2776 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2777 mas_set_split_parent(mast->r, l, r, &slot, split);
2781 * mas_wmb_replace() - Write memory barrier and replace
2782 * @mas: The maple state
2783 * @free: the maple topiary list of nodes to free
2784 * @destroy: The maple topiary list of nodes to destroy (walk and free)
2786 * Updates gap as necessary.
2788 static inline void mas_wmb_replace(struct ma_state *mas,
2789 struct ma_topiary *free,
2790 struct ma_topiary *destroy)
2792 /* All nodes must see old data as dead prior to replacing that data */
2793 smp_wmb(); /* Needed for RCU */
2795 /* Insert the new data in the tree */
2796 mas_replace(mas, true);
2798 if (!mte_is_leaf(mas->node))
2799 mas_descend_adopt(mas);
2801 mas_mat_free(mas, free);
2804 mas_mat_destroy(mas, destroy);
2806 if (mte_is_leaf(mas->node))
2809 mas_update_gap(mas);
2813 * mast_new_root() - Set a new tree root during subtree creation
2814 * @mast: The maple subtree state
2815 * @mas: The maple state
2817 static inline void mast_new_root(struct maple_subtree_state *mast,
2818 struct ma_state *mas)
2820 mas_mn(mast->l)->parent =
2821 ma_parent_ptr(((unsigned long)mas->tree | MA_ROOT_PARENT));
2822 if (!mte_dead_node(mast->orig_l->node) &&
2823 !mte_is_root(mast->orig_l->node)) {
2825 mast_ascend_free(mast);
2827 } while (!mte_is_root(mast->orig_l->node));
2829 if ((mast->orig_l->node != mas->node) &&
2830 (mast->l->depth > mas_mt_height(mas))) {
2831 mat_add(mast->free, mas->node);
2836 * mast_cp_to_nodes() - Copy data out to nodes.
2837 * @mast: The maple subtree state
2838 * @left: The left encoded maple node
2839 * @middle: The middle encoded maple node
2840 * @right: The right encoded maple node
2841 * @split: The location to split between left and (middle ? middle : right)
2842 * @mid_split: The location to split between middle and right.
2844 static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
2845 struct maple_enode *left, struct maple_enode *middle,
2846 struct maple_enode *right, unsigned char split, unsigned char mid_split)
2848 bool new_lmax = true;
2850 mast->l->node = mte_node_or_none(left);
2851 mast->m->node = mte_node_or_none(middle);
2852 mast->r->node = mte_node_or_none(right);
2854 mast->l->min = mast->orig_l->min;
2855 if (split == mast->bn->b_end) {
2856 mast->l->max = mast->orig_r->max;
2860 mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax);
2863 mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true);
2864 mast->m->min = mast->bn->pivot[split] + 1;
2868 mast->r->max = mast->orig_r->max;
2870 mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false);
2871 mast->r->min = mast->bn->pivot[split] + 1;
2876 * mast_combine_cp_left - Copy in the original left side of the tree into the
2877 * combined data set in the maple subtree state big node.
2878 * @mast: The maple subtree state
2880 static inline void mast_combine_cp_left(struct maple_subtree_state *mast)
2882 unsigned char l_slot = mast->orig_l->offset;
2887 mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0);
2891 * mast_combine_cp_right: Copy in the original right side of the tree into the
2892 * combined data set in the maple subtree state big node.
2893 * @mast: The maple subtree state
2895 static inline void mast_combine_cp_right(struct maple_subtree_state *mast)
2897 if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max)
2900 mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1,
2901 mt_slot_count(mast->orig_r->node), mast->bn,
2903 mast->orig_r->last = mast->orig_r->max;
2907 * mast_sufficient: Check if the maple subtree state has enough data in the big
2908 * node to create at least one sufficient node
2909 * @mast: the maple subtree state
2911 static inline bool mast_sufficient(struct maple_subtree_state *mast)
2913 if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
2920 * mast_overflow: Check if there is too much data in the subtree state for a
2922 * @mast: The maple subtree state
2924 static inline bool mast_overflow(struct maple_subtree_state *mast)
2926 if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node))
2932 static inline void *mtree_range_walk(struct ma_state *mas)
2934 unsigned long *pivots;
2935 unsigned char offset;
2936 struct maple_node *node;
2937 struct maple_enode *next, *last;
2938 enum maple_type type;
2941 unsigned long max, min;
2942 unsigned long prev_max, prev_min;
2950 node = mte_to_node(next);
2951 type = mte_node_type(next);
2952 pivots = ma_pivots(node, type);
2953 end = ma_data_end(node, type, pivots, max);
2954 if (unlikely(ma_dead_node(node)))
2957 if (pivots[offset] >= mas->index) {
2960 max = pivots[offset];
2966 } while ((offset < end) && (pivots[offset] < mas->index));
2969 min = pivots[offset - 1] + 1;
2971 if (likely(offset < end && pivots[offset]))
2972 max = pivots[offset];
2975 slots = ma_slots(node, type);
2976 next = mt_slot(mas->tree, slots, offset);
2977 if (unlikely(ma_dead_node(node)))
2979 } while (!ma_is_leaf(type));
2981 mas->offset = offset;
2984 mas->min = prev_min;
2985 mas->max = prev_max;
2987 return (void *)next;
2995 * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
2996 * @mas: The starting maple state
2997 * @mast: The maple_subtree_state, keeps track of 4 maple states.
2998 * @count: The estimated count of iterations needed.
3000 * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
3001 * is hit. First @b_node is split into two entries which are inserted into the
3002 * next iteration of the loop. @b_node is returned populated with the final
3003 * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the
3004 * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
3005 * to account of what has been copied into the new sub-tree. The update of
3006 * orig_l_mas->last is used in mas_consume to find the slots that will need to
3007 * be either freed or destroyed. orig_l_mas->depth keeps track of the height of
3008 * the new sub-tree in case the sub-tree becomes the full tree.
3010 * Return: the number of elements in b_node during the last loop.
3012 static int mas_spanning_rebalance(struct ma_state *mas,
3013 struct maple_subtree_state *mast, unsigned char count)
3015 unsigned char split, mid_split;
3016 unsigned char slot = 0;
3017 struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
3019 MA_STATE(l_mas, mas->tree, mas->index, mas->index);
3020 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3021 MA_STATE(m_mas, mas->tree, mas->index, mas->index);
3022 MA_TOPIARY(free, mas->tree);
3023 MA_TOPIARY(destroy, mas->tree);
3026 * The tree needs to be rebalanced and leaves need to be kept at the same level.
3027 * Rebalancing is done by use of the ``struct maple_topiary``.
3033 mast->destroy = &destroy;
3034 l_mas.node = r_mas.node = m_mas.node = MAS_NONE;
3036 /* Check if this is not root and has sufficient data. */
3037 if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
3038 unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
3039 mast_spanning_rebalance(mast);
3041 mast->orig_l->depth = 0;
3044 * Each level of the tree is examined and balanced, pushing data to the left or
3045 * right, or rebalancing against left or right nodes is employed to avoid
3046 * rippling up the tree to limit the amount of churn. Once a new sub-section of
3047 * the tree is created, there may be a mix of new and old nodes. The old nodes
3048 * will have the incorrect parent pointers and currently be in two trees: the
3049 * original tree and the partially new tree. To remedy the parent pointers in
3050 * the old tree, the new data is swapped into the active tree and a walk down
3051 * the tree is performed and the parent pointers are updated.
3052 * See mas_descend_adopt() for more information..
3056 mast->bn->type = mte_node_type(mast->orig_l->node);
3057 split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
3058 &mid_split, mast->orig_l->min);
3059 mast_set_split_parents(mast, left, middle, right, split,
3061 mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
3064 * Copy data from next level in the tree to mast->bn from next
3067 memset(mast->bn, 0, sizeof(struct maple_big_node));
3068 mast->bn->type = mte_node_type(left);
3069 mast->orig_l->depth++;
3071 /* Root already stored in l->node. */
3072 if (mas_is_root_limits(mast->l))
3075 mast_ascend_free(mast);
3076 mast_combine_cp_left(mast);
3077 l_mas.offset = mast->bn->b_end;
3078 mab_set_b_end(mast->bn, &l_mas, left);
3079 mab_set_b_end(mast->bn, &m_mas, middle);
3080 mab_set_b_end(mast->bn, &r_mas, right);
3082 /* Copy anything necessary out of the right node. */
3083 mast_combine_cp_right(mast);
3085 mast->orig_l->last = mast->orig_l->max;
3087 if (mast_sufficient(mast))
3090 if (mast_overflow(mast))
3093 /* May be a new root stored in mast->bn */
3094 if (mas_is_root_limits(mast->orig_l))
3097 mast_spanning_rebalance(mast);
3099 /* rebalancing from other nodes may require another loop. */
3104 l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
3105 mte_node_type(mast->orig_l->node));
3106 mast->orig_l->depth++;
3107 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
3108 mas_set_parent(mas, left, l_mas.node, slot);
3110 mas_set_parent(mas, middle, l_mas.node, ++slot);
3113 mas_set_parent(mas, right, l_mas.node, ++slot);
3115 if (mas_is_root_limits(mast->l)) {
3117 mast_new_root(mast, mas);
3119 mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent;
3122 if (!mte_dead_node(mast->orig_l->node))
3123 mat_add(&free, mast->orig_l->node);
3125 mas->depth = mast->orig_l->depth;
3126 *mast->orig_l = l_mas;
3127 mte_set_node_dead(mas->node);
3129 /* Set up mas for insertion. */
3130 mast->orig_l->depth = mas->depth;
3131 mast->orig_l->alloc = mas->alloc;
3132 *mas = *mast->orig_l;
3133 mas_wmb_replace(mas, &free, &destroy);
3134 mtree_range_walk(mas);
3135 return mast->bn->b_end;
3139 * mas_rebalance() - Rebalance a given node.
3140 * @mas: The maple state
3141 * @b_node: The big maple node.
3143 * Rebalance two nodes into a single node or two new nodes that are sufficient.
3144 * Continue upwards until tree is sufficient.
3146 * Return: the number of elements in b_node during the last loop.
3148 static inline int mas_rebalance(struct ma_state *mas,
3149 struct maple_big_node *b_node)
3151 char empty_count = mas_mt_height(mas);
3152 struct maple_subtree_state mast;
3153 unsigned char shift, b_end = ++b_node->b_end;
3155 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3156 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3158 trace_ma_op(__func__, mas);
3161 * Rebalancing occurs if a node is insufficient. Data is rebalanced
3162 * against the node to the right if it exists, otherwise the node to the
3163 * left of this node is rebalanced against this node. If rebalancing
3164 * causes just one node to be produced instead of two, then the parent
3165 * is also examined and rebalanced if it is insufficient. Every level
3166 * tries to combine the data in the same way. If one node contains the
3167 * entire range of the tree, then that node is used as a new root node.
3169 mas_node_count(mas, 1 + empty_count * 3);
3170 if (mas_is_err(mas))
3173 mast.orig_l = &l_mas;
3174 mast.orig_r = &r_mas;
3176 mast.bn->type = mte_node_type(mas->node);
3178 l_mas = r_mas = *mas;
3180 if (mas_next_sibling(&r_mas)) {
3181 mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end);
3182 r_mas.last = r_mas.index = r_mas.max;
3184 mas_prev_sibling(&l_mas);
3185 shift = mas_data_end(&l_mas) + 1;
3186 mab_shift_right(b_node, shift);
3187 mas->offset += shift;
3188 mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0);
3189 b_node->b_end = shift + b_end;
3190 l_mas.index = l_mas.last = l_mas.min;
3193 return mas_spanning_rebalance(mas, &mast, empty_count);
3197 * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
3199 * @mas: The maple state
3200 * @end: The end of the left-most node.
3202 * During a mass-insert event (such as forking), it may be necessary to
3203 * rebalance the left-most node when it is not sufficient.
3205 static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end)
3207 enum maple_type mt = mte_node_type(mas->node);
3208 struct maple_node reuse, *newnode, *parent, *new_left, *left, *node;
3209 struct maple_enode *eparent;
3210 unsigned char offset, tmp, split = mt_slots[mt] / 2;
3211 void __rcu **l_slots, **slots;
3212 unsigned long *l_pivs, *pivs, gap;
3213 bool in_rcu = mt_in_rcu(mas->tree);
3215 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3218 mas_prev_sibling(&l_mas);
3222 /* Allocate for both left and right as well as parent. */
3223 mas_node_count(mas, 3);
3224 if (mas_is_err(mas))
3227 newnode = mas_pop_node(mas);
3233 newnode->parent = node->parent;
3234 slots = ma_slots(newnode, mt);
3235 pivs = ma_pivots(newnode, mt);
3236 left = mas_mn(&l_mas);
3237 l_slots = ma_slots(left, mt);
3238 l_pivs = ma_pivots(left, mt);
3239 if (!l_slots[split])
3241 tmp = mas_data_end(&l_mas) - split;
3243 memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp);
3244 memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp);
3245 pivs[tmp] = l_mas.max;
3246 memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end);
3247 memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end);
3249 l_mas.max = l_pivs[split];
3250 mas->min = l_mas.max + 1;
3251 eparent = mt_mk_node(mte_parent(l_mas.node),
3252 mas_parent_type(&l_mas, l_mas.node));
3255 unsigned char max_p = mt_pivots[mt];
3256 unsigned char max_s = mt_slots[mt];
3259 memset(pivs + tmp, 0,
3260 sizeof(unsigned long) * (max_p - tmp));
3262 if (tmp < mt_slots[mt])
3263 memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3265 memcpy(node, newnode, sizeof(struct maple_node));
3266 ma_set_meta(node, mt, 0, tmp - 1);
3267 mte_set_pivot(eparent, mte_parent_slot(l_mas.node),
3270 /* Remove data from l_pivs. */
3272 memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp));
3273 memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3274 ma_set_meta(left, mt, 0, split);
3279 /* RCU requires replacing both l_mas, mas, and parent. */
3280 mas->node = mt_mk_node(newnode, mt);
3281 ma_set_meta(newnode, mt, 0, tmp);
3283 new_left = mas_pop_node(mas);
3284 new_left->parent = left->parent;
3285 mt = mte_node_type(l_mas.node);
3286 slots = ma_slots(new_left, mt);
3287 pivs = ma_pivots(new_left, mt);
3288 memcpy(slots, l_slots, sizeof(void *) * split);
3289 memcpy(pivs, l_pivs, sizeof(unsigned long) * split);
3290 ma_set_meta(new_left, mt, 0, split);
3291 l_mas.node = mt_mk_node(new_left, mt);
3293 /* replace parent. */
3294 offset = mte_parent_slot(mas->node);
3295 mt = mas_parent_type(&l_mas, l_mas.node);
3296 parent = mas_pop_node(mas);
3297 slots = ma_slots(parent, mt);
3298 pivs = ma_pivots(parent, mt);
3299 memcpy(parent, mte_to_node(eparent), sizeof(struct maple_node));
3300 rcu_assign_pointer(slots[offset], mas->node);
3301 rcu_assign_pointer(slots[offset - 1], l_mas.node);
3302 pivs[offset - 1] = l_mas.max;
3303 eparent = mt_mk_node(parent, mt);
3305 gap = mas_leaf_max_gap(mas);
3306 mte_set_gap(eparent, mte_parent_slot(mas->node), gap);
3307 gap = mas_leaf_max_gap(&l_mas);
3308 mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap);
3312 mas_replace(mas, false);
3314 mas_update_gap(mas);
3318 * mas_split_final_node() - Split the final node in a subtree operation.
3319 * @mast: the maple subtree state
3320 * @mas: The maple state
3321 * @height: The height of the tree in case it's a new root.
3323 static inline bool mas_split_final_node(struct maple_subtree_state *mast,
3324 struct ma_state *mas, int height)
3326 struct maple_enode *ancestor;
3328 if (mte_is_root(mas->node)) {
3329 if (mt_is_alloc(mas->tree))
3330 mast->bn->type = maple_arange_64;
3332 mast->bn->type = maple_range_64;
3333 mas->depth = height;
3336 * Only a single node is used here, could be root.
3337 * The Big_node data should just fit in a single node.
3339 ancestor = mas_new_ma_node(mas, mast->bn);
3340 mas_set_parent(mas, mast->l->node, ancestor, mast->l->offset);
3341 mas_set_parent(mas, mast->r->node, ancestor, mast->r->offset);
3342 mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
3344 mast->l->node = ancestor;
3345 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
3346 mas->offset = mast->bn->b_end - 1;
3351 * mast_fill_bnode() - Copy data into the big node in the subtree state
3352 * @mast: The maple subtree state
3353 * @mas: the maple state
3354 * @skip: The number of entries to skip for new nodes insertion.
3356 static inline void mast_fill_bnode(struct maple_subtree_state *mast,
3357 struct ma_state *mas,
3361 struct maple_enode *old = mas->node;
3362 unsigned char split;
3364 memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap));
3365 memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot));
3366 memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot));
3367 mast->bn->b_end = 0;
3369 if (mte_is_root(mas->node)) {
3373 mat_add(mast->free, old);
3374 mas->offset = mte_parent_slot(mas->node);
3377 if (cp && mast->l->offset)
3378 mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0);
3380 split = mast->bn->b_end;
3381 mab_set_b_end(mast->bn, mast->l, mast->l->node);
3382 mast->r->offset = mast->bn->b_end;
3383 mab_set_b_end(mast->bn, mast->r, mast->r->node);
3384 if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max)
3388 mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1,
3389 mast->bn, mast->bn->b_end);
3392 mast->bn->type = mte_node_type(mas->node);
3396 * mast_split_data() - Split the data in the subtree state big node into regular
3398 * @mast: The maple subtree state
3399 * @mas: The maple state
3400 * @split: The location to split the big node
3402 static inline void mast_split_data(struct maple_subtree_state *mast,
3403 struct ma_state *mas, unsigned char split)
3405 unsigned char p_slot;
3407 mab_mas_cp(mast->bn, 0, split, mast->l, true);
3408 mte_set_pivot(mast->r->node, 0, mast->r->max);
3409 mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false);
3410 mast->l->offset = mte_parent_slot(mas->node);
3411 mast->l->max = mast->bn->pivot[split];
3412 mast->r->min = mast->l->max + 1;
3413 if (mte_is_leaf(mas->node))
3416 p_slot = mast->orig_l->offset;
3417 mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node,
3419 mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node,
3424 * mas_push_data() - Instead of splitting a node, it is beneficial to push the
3425 * data to the right or left node if there is room.
3426 * @mas: The maple state
3427 * @height: The current height of the maple state
3428 * @mast: The maple subtree state
3429 * @left: Push left or not.
3431 * Keeping the height of the tree low means faster lookups.
3433 * Return: True if pushed, false otherwise.
3435 static inline bool mas_push_data(struct ma_state *mas, int height,
3436 struct maple_subtree_state *mast, bool left)
3438 unsigned char slot_total = mast->bn->b_end;
3439 unsigned char end, space, split;
3441 MA_STATE(tmp_mas, mas->tree, mas->index, mas->last);
3443 tmp_mas.depth = mast->l->depth;
3445 if (left && !mas_prev_sibling(&tmp_mas))
3447 else if (!left && !mas_next_sibling(&tmp_mas))
3450 end = mas_data_end(&tmp_mas);
3452 space = 2 * mt_slot_count(mas->node) - 2;
3453 /* -2 instead of -1 to ensure there isn't a triple split */
3454 if (ma_is_leaf(mast->bn->type))
3457 if (mas->max == ULONG_MAX)
3460 if (slot_total >= space)
3463 /* Get the data; Fill mast->bn */
3466 mab_shift_right(mast->bn, end + 1);
3467 mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0);
3468 mast->bn->b_end = slot_total + 1;
3470 mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end);
3473 /* Configure mast for splitting of mast->bn */
3474 split = mt_slots[mast->bn->type] - 2;
3476 /* Switch mas to prev node */
3477 mat_add(mast->free, mas->node);
3479 /* Start using mast->l for the left side. */
3480 tmp_mas.node = mast->l->node;
3483 mat_add(mast->free, tmp_mas.node);
3484 tmp_mas.node = mast->r->node;
3486 split = slot_total - split;
3488 split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]);
3489 /* Update parent slot for split calculation. */
3491 mast->orig_l->offset += end + 1;
3493 mast_split_data(mast, mas, split);
3494 mast_fill_bnode(mast, mas, 2);
3495 mas_split_final_node(mast, mas, height + 1);
3500 * mas_split() - Split data that is too big for one node into two.
3501 * @mas: The maple state
3502 * @b_node: The maple big node
3503 * Return: 1 on success, 0 on failure.
3505 static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
3507 struct maple_subtree_state mast;
3509 unsigned char mid_split, split = 0;
3512 * Splitting is handled differently from any other B-tree; the Maple
3513 * Tree splits upwards. Splitting up means that the split operation
3514 * occurs when the walk of the tree hits the leaves and not on the way
3515 * down. The reason for splitting up is that it is impossible to know
3516 * how much space will be needed until the leaf is (or leaves are)
3517 * reached. Since overwriting data is allowed and a range could
3518 * overwrite more than one range or result in changing one entry into 3
3519 * entries, it is impossible to know if a split is required until the
3522 * Splitting is a balancing act between keeping allocations to a minimum
3523 * and avoiding a 'jitter' event where a tree is expanded to make room
3524 * for an entry followed by a contraction when the entry is removed. To
3525 * accomplish the balance, there are empty slots remaining in both left
3526 * and right nodes after a split.
3528 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3529 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3530 MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
3531 MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
3532 MA_TOPIARY(mat, mas->tree);
3534 trace_ma_op(__func__, mas);
3535 mas->depth = mas_mt_height(mas);
3536 /* Allocation failures will happen early. */
3537 mas_node_count(mas, 1 + mas->depth * 2);
3538 if (mas_is_err(mas))
3543 mast.orig_l = &prev_l_mas;
3544 mast.orig_r = &prev_r_mas;
3548 while (height++ <= mas->depth) {
3549 if (mt_slots[b_node->type] > b_node->b_end) {
3550 mas_split_final_node(&mast, mas, height);
3554 l_mas = r_mas = *mas;
3555 l_mas.node = mas_new_ma_node(mas, b_node);
3556 r_mas.node = mas_new_ma_node(mas, b_node);
3558 * Another way that 'jitter' is avoided is to terminate a split up early if the
3559 * left or right node has space to spare. This is referred to as "pushing left"
3560 * or "pushing right" and is similar to the B* tree, except the nodes left or
3561 * right can rarely be reused due to RCU, but the ripple upwards is halted which
3562 * is a significant savings.
3564 /* Try to push left. */
3565 if (mas_push_data(mas, height, &mast, true))
3568 /* Try to push right. */
3569 if (mas_push_data(mas, height, &mast, false))
3572 split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
3573 mast_split_data(&mast, mas, split);
3575 * Usually correct, mab_mas_cp in the above call overwrites
3578 mast.r->max = mas->max;
3579 mast_fill_bnode(&mast, mas, 1);
3580 prev_l_mas = *mast.l;
3581 prev_r_mas = *mast.r;
3584 /* Set the original node as dead */
3585 mat_add(mast.free, mas->node);
3586 mas->node = l_mas.node;
3587 mas_wmb_replace(mas, mast.free, NULL);
3588 mtree_range_walk(mas);
3593 * mas_reuse_node() - Reuse the node to store the data.
3594 * @wr_mas: The maple write state
3595 * @bn: The maple big node
3596 * @end: The end of the data.
3598 * Will always return false in RCU mode.
3600 * Return: True if node was reused, false otherwise.
3602 static inline bool mas_reuse_node(struct ma_wr_state *wr_mas,
3603 struct maple_big_node *bn, unsigned char end)
3605 /* Need to be rcu safe. */
3606 if (mt_in_rcu(wr_mas->mas->tree))
3609 if (end > bn->b_end) {
3610 int clear = mt_slots[wr_mas->type] - bn->b_end;
3612 memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--);
3613 memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear);
3615 mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false);
3620 * mas_commit_b_node() - Commit the big node into the tree.
3621 * @wr_mas: The maple write state
3622 * @b_node: The maple big node
3623 * @end: The end of the data.
3625 static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas,
3626 struct maple_big_node *b_node, unsigned char end)
3628 struct maple_node *node;
3629 unsigned char b_end = b_node->b_end;
3630 enum maple_type b_type = b_node->type;
3632 if ((b_end < mt_min_slots[b_type]) &&
3633 (!mte_is_root(wr_mas->mas->node)) &&
3634 (mas_mt_height(wr_mas->mas) > 1))
3635 return mas_rebalance(wr_mas->mas, b_node);
3637 if (b_end >= mt_slots[b_type])
3638 return mas_split(wr_mas->mas, b_node);
3640 if (mas_reuse_node(wr_mas, b_node, end))
3643 mas_node_count(wr_mas->mas, 1);
3644 if (mas_is_err(wr_mas->mas))
3647 node = mas_pop_node(wr_mas->mas);
3648 node->parent = mas_mn(wr_mas->mas)->parent;
3649 wr_mas->mas->node = mt_mk_node(node, b_type);
3650 mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
3651 mas_replace(wr_mas->mas, false);
3653 mas_update_gap(wr_mas->mas);
3658 * mas_root_expand() - Expand a root to a node
3659 * @mas: The maple state
3660 * @entry: The entry to store into the tree
3662 static inline int mas_root_expand(struct ma_state *mas, void *entry)
3664 void *contents = mas_root_locked(mas);
3665 enum maple_type type = maple_leaf_64;
3666 struct maple_node *node;
3668 unsigned long *pivots;
3671 mas_node_count(mas, 1);
3672 if (unlikely(mas_is_err(mas)))
3675 node = mas_pop_node(mas);
3676 pivots = ma_pivots(node, type);
3677 slots = ma_slots(node, type);
3678 node->parent = ma_parent_ptr(
3679 ((unsigned long)mas->tree | MA_ROOT_PARENT));
3680 mas->node = mt_mk_node(node, type);
3684 rcu_assign_pointer(slots[slot], contents);
3685 if (likely(mas->index > 1))
3688 pivots[slot++] = mas->index - 1;
3691 rcu_assign_pointer(slots[slot], entry);
3693 pivots[slot] = mas->last;
3694 if (mas->last != ULONG_MAX)
3695 pivots[++slot] = ULONG_MAX;
3698 mas_set_height(mas);
3699 ma_set_meta(node, maple_leaf_64, 0, slot);
3700 /* swap the new root into the tree */
3701 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3705 static inline void mas_store_root(struct ma_state *mas, void *entry)
3707 if (likely((mas->last != 0) || (mas->index != 0)))
3708 mas_root_expand(mas, entry);
3709 else if (((unsigned long) (entry) & 3) == 2)
3710 mas_root_expand(mas, entry);
3712 rcu_assign_pointer(mas->tree->ma_root, entry);
3713 mas->node = MAS_START;
3718 * mas_is_span_wr() - Check if the write needs to be treated as a write that
3720 * @mas: The maple state
3721 * @piv: The pivot value being written
3722 * @type: The maple node type
3723 * @entry: The data to write
3725 * Spanning writes are writes that start in one node and end in another OR if
3726 * the write of a %NULL will cause the node to end with a %NULL.
3728 * Return: True if this is a spanning write, false otherwise.
3730 static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
3732 unsigned long max = wr_mas->r_max;
3733 unsigned long last = wr_mas->mas->last;
3734 enum maple_type type = wr_mas->type;
3735 void *entry = wr_mas->entry;
3737 /* Contained in this pivot, fast path */
3741 if (ma_is_leaf(type)) {
3742 max = wr_mas->mas->max;
3749 * The last entry of leaf node cannot be NULL unless it is the
3750 * rightmost node (writing ULONG_MAX), otherwise it spans slots.
3752 if (entry || last == ULONG_MAX)
3756 trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry);
3760 static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
3762 wr_mas->type = mte_node_type(wr_mas->mas->node);
3763 mas_wr_node_walk(wr_mas);
3764 wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
3767 static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas)
3769 wr_mas->mas->max = wr_mas->r_max;
3770 wr_mas->mas->min = wr_mas->r_min;
3771 wr_mas->mas->node = wr_mas->content;
3772 wr_mas->mas->offset = 0;
3773 wr_mas->mas->depth++;
3776 * mas_wr_walk() - Walk the tree for a write.
3777 * @wr_mas: The maple write state
3779 * Uses mas_slot_locked() and does not need to worry about dead nodes.
3781 * Return: True if it's contained in a node, false on spanning write.
3783 static bool mas_wr_walk(struct ma_wr_state *wr_mas)
3785 struct ma_state *mas = wr_mas->mas;
3788 mas_wr_walk_descend(wr_mas);
3789 if (unlikely(mas_is_span_wr(wr_mas)))
3792 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3794 if (ma_is_leaf(wr_mas->type))
3797 mas_wr_walk_traverse(wr_mas);
3803 static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
3805 struct ma_state *mas = wr_mas->mas;
3808 mas_wr_walk_descend(wr_mas);
3809 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3811 if (ma_is_leaf(wr_mas->type))
3813 mas_wr_walk_traverse(wr_mas);
3819 * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
3820 * @l_wr_mas: The left maple write state
3821 * @r_wr_mas: The right maple write state
3823 static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas,
3824 struct ma_wr_state *r_wr_mas)
3826 struct ma_state *r_mas = r_wr_mas->mas;
3827 struct ma_state *l_mas = l_wr_mas->mas;
3828 unsigned char l_slot;
3830 l_slot = l_mas->offset;
3831 if (!l_wr_mas->content)
3832 l_mas->index = l_wr_mas->r_min;
3834 if ((l_mas->index == l_wr_mas->r_min) &&
3836 !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) {
3838 l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1;
3840 l_mas->index = l_mas->min;
3842 l_mas->offset = l_slot - 1;
3845 if (!r_wr_mas->content) {
3846 if (r_mas->last < r_wr_mas->r_max)
3847 r_mas->last = r_wr_mas->r_max;
3849 } else if ((r_mas->last == r_wr_mas->r_max) &&
3850 (r_mas->last < r_mas->max) &&
3851 !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) {
3852 r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots,
3853 r_wr_mas->type, r_mas->offset + 1);
3858 static inline void *mas_state_walk(struct ma_state *mas)
3862 entry = mas_start(mas);
3863 if (mas_is_none(mas))
3866 if (mas_is_ptr(mas))
3869 return mtree_range_walk(mas);
3873 * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
3876 * @mas: The maple state.
3878 * Note: Leaves mas in undesirable state.
3879 * Return: The entry for @mas->index or %NULL on dead node.
3881 static inline void *mtree_lookup_walk(struct ma_state *mas)
3883 unsigned long *pivots;
3884 unsigned char offset;
3885 struct maple_node *node;
3886 struct maple_enode *next;
3887 enum maple_type type;
3896 node = mte_to_node(next);
3897 type = mte_node_type(next);
3898 pivots = ma_pivots(node, type);
3899 end = ma_data_end(node, type, pivots, max);
3900 if (unlikely(ma_dead_node(node)))
3903 if (pivots[offset] >= mas->index) {
3904 max = pivots[offset];
3907 } while (++offset < end);
3909 slots = ma_slots(node, type);
3910 next = mt_slot(mas->tree, slots, offset);
3911 if (unlikely(ma_dead_node(node)))
3913 } while (!ma_is_leaf(type));
3915 return (void *)next;
3923 * mas_new_root() - Create a new root node that only contains the entry passed
3925 * @mas: The maple state
3926 * @entry: The entry to store.
3928 * Only valid when the index == 0 and the last == ULONG_MAX
3930 * Return 0 on error, 1 on success.
3932 static inline int mas_new_root(struct ma_state *mas, void *entry)
3934 struct maple_enode *root = mas_root_locked(mas);
3935 enum maple_type type = maple_leaf_64;
3936 struct maple_node *node;
3938 unsigned long *pivots;
3940 if (!entry && !mas->index && mas->last == ULONG_MAX) {
3942 mas_set_height(mas);
3943 rcu_assign_pointer(mas->tree->ma_root, entry);
3944 mas->node = MAS_START;
3948 mas_node_count(mas, 1);
3949 if (mas_is_err(mas))
3952 node = mas_pop_node(mas);
3953 pivots = ma_pivots(node, type);
3954 slots = ma_slots(node, type);
3955 node->parent = ma_parent_ptr(
3956 ((unsigned long)mas->tree | MA_ROOT_PARENT));
3957 mas->node = mt_mk_node(node, type);
3958 rcu_assign_pointer(slots[0], entry);
3959 pivots[0] = mas->last;
3961 mas_set_height(mas);
3962 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3965 if (xa_is_node(root))
3966 mte_destroy_walk(root, mas->tree);
3971 * mas_wr_spanning_store() - Create a subtree with the store operation completed
3972 * and new nodes where necessary, then place the sub-tree in the actual tree.
3973 * Note that mas is expected to point to the node which caused the store to
3975 * @wr_mas: The maple write state
3977 * Return: 0 on error, positive on success.
3979 static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
3981 struct maple_subtree_state mast;
3982 struct maple_big_node b_node;
3983 struct ma_state *mas;
3984 unsigned char height;
3986 /* Left and Right side of spanning store */
3987 MA_STATE(l_mas, NULL, 0, 0);
3988 MA_STATE(r_mas, NULL, 0, 0);
3990 MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry);
3991 MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry);
3994 * A store operation that spans multiple nodes is called a spanning
3995 * store and is handled early in the store call stack by the function
3996 * mas_is_span_wr(). When a spanning store is identified, the maple
3997 * state is duplicated. The first maple state walks the left tree path
3998 * to ``index``, the duplicate walks the right tree path to ``last``.
3999 * The data in the two nodes are combined into a single node, two nodes,
4000 * or possibly three nodes (see the 3-way split above). A ``NULL``
4001 * written to the last entry of a node is considered a spanning store as
4002 * a rebalance is required for the operation to complete and an overflow
4003 * of data may happen.
4006 trace_ma_op(__func__, mas);
4008 if (unlikely(!mas->index && mas->last == ULONG_MAX))
4009 return mas_new_root(mas, wr_mas->entry);
4011 * Node rebalancing may occur due to this store, so there may be three new
4012 * entries per level plus a new root.
4014 height = mas_mt_height(mas);
4015 mas_node_count(mas, 1 + height * 3);
4016 if (mas_is_err(mas))
4020 * Set up right side. Need to get to the next offset after the spanning
4021 * store to ensure it's not NULL and to combine both the next node and
4022 * the node with the start together.
4025 /* Avoid overflow, walk to next slot in the tree. */
4029 r_mas.index = r_mas.last;
4030 mas_wr_walk_index(&r_wr_mas);
4031 r_mas.last = r_mas.index = mas->last;
4033 /* Set up left side. */
4035 mas_wr_walk_index(&l_wr_mas);
4037 if (!wr_mas->entry) {
4038 mas_extend_spanning_null(&l_wr_mas, &r_wr_mas);
4039 mas->offset = l_mas.offset;
4040 mas->index = l_mas.index;
4041 mas->last = l_mas.last = r_mas.last;
4044 /* expanding NULLs may make this cover the entire range */
4045 if (!l_mas.index && r_mas.last == ULONG_MAX) {
4046 mas_set_range(mas, 0, ULONG_MAX);
4047 return mas_new_root(mas, wr_mas->entry);
4050 memset(&b_node, 0, sizeof(struct maple_big_node));
4051 /* Copy l_mas and store the value in b_node. */
4052 mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end);
4053 /* Copy r_mas into b_node. */
4054 if (r_mas.offset <= r_wr_mas.node_end)
4055 mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end,
4056 &b_node, b_node.b_end + 1);
4060 /* Stop spanning searches by searching for just index. */
4061 l_mas.index = l_mas.last = mas->index;
4064 mast.orig_l = &l_mas;
4065 mast.orig_r = &r_mas;
4066 /* Combine l_mas and r_mas and split them up evenly again. */
4067 return mas_spanning_rebalance(mas, &mast, height + 1);
4071 * mas_wr_node_store() - Attempt to store the value in a node
4072 * @wr_mas: The maple write state
4074 * Attempts to reuse the node, but may allocate.
4076 * Return: True if stored, false otherwise
4078 static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
4079 unsigned char new_end)
4081 struct ma_state *mas = wr_mas->mas;
4082 void __rcu **dst_slots;
4083 unsigned long *dst_pivots;
4084 unsigned char dst_offset, offset_end = wr_mas->offset_end;
4085 struct maple_node reuse, *newnode;
4086 unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type];
4087 bool in_rcu = mt_in_rcu(mas->tree);
4089 /* Check if there is enough data. The room is enough. */
4090 if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
4091 !(mas->mas_flags & MA_STATE_BULK))
4094 if (mas->last == wr_mas->end_piv)
4095 offset_end++; /* don't copy this offset */
4096 else if (unlikely(wr_mas->r_max == ULONG_MAX))
4097 mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
4101 mas_node_count(mas, 1);
4102 if (mas_is_err(mas))
4105 newnode = mas_pop_node(mas);
4107 memset(&reuse, 0, sizeof(struct maple_node));
4111 newnode->parent = mas_mn(mas)->parent;
4112 dst_pivots = ma_pivots(newnode, wr_mas->type);
4113 dst_slots = ma_slots(newnode, wr_mas->type);
4114 /* Copy from start to insert point */
4115 memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * mas->offset);
4116 memcpy(dst_slots, wr_mas->slots, sizeof(void *) * mas->offset);
4118 /* Handle insert of new range starting after old range */
4119 if (wr_mas->r_min < mas->index) {
4120 rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content);
4121 dst_pivots[mas->offset++] = mas->index - 1;
4124 /* Store the new entry and range end. */
4125 if (mas->offset < node_pivots)
4126 dst_pivots[mas->offset] = mas->last;
4127 rcu_assign_pointer(dst_slots[mas->offset], wr_mas->entry);
4130 * this range wrote to the end of the node or it overwrote the rest of
4133 if (offset_end > wr_mas->node_end)
4136 dst_offset = mas->offset + 1;
4137 /* Copy to the end of node if necessary. */
4138 copy_size = wr_mas->node_end - offset_end + 1;
4139 memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end,
4140 sizeof(void *) * copy_size);
4141 memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end,
4142 sizeof(unsigned long) * (copy_size - 1));
4144 if (new_end < node_pivots)
4145 dst_pivots[new_end] = mas->max;
4148 mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end);
4150 mte_set_node_dead(mas->node);
4151 mas->node = mt_mk_node(newnode, wr_mas->type);
4152 mas_replace(mas, false);
4154 memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
4156 trace_ma_write(__func__, mas, 0, wr_mas->entry);
4157 mas_update_gap(mas);
4162 * mas_wr_slot_store: Attempt to store a value in a slot.
4163 * @wr_mas: the maple write state
4165 * Return: True if stored, false otherwise
4167 static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
4169 struct ma_state *mas = wr_mas->mas;
4170 unsigned char offset = mas->offset;
4173 if (wr_mas->offset_end - offset != 1)
4176 gap |= !mt_slot_locked(mas->tree, wr_mas->slots, offset);
4177 gap |= !mt_slot_locked(mas->tree, wr_mas->slots, offset + 1);
4179 if (mas->index == wr_mas->r_min) {
4180 /* Overwriting the range and over a part of the next range. */
4181 rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry);
4182 wr_mas->pivots[offset] = mas->last;
4184 /* Overwriting a part of the range and over the next range */
4185 rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry);
4186 wr_mas->pivots[offset] = mas->index - 1;
4187 mas->offset++; /* Keep mas accurate. */
4190 trace_ma_write(__func__, mas, 0, wr_mas->entry);
4192 * Only update gap when the new entry is empty or there is an empty
4193 * entry in the original two ranges.
4195 if (!wr_mas->entry || gap)
4196 mas_update_gap(mas);
4201 static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
4203 while ((wr_mas->offset_end < wr_mas->node_end) &&
4204 (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end]))
4205 wr_mas->offset_end++;
4207 if (wr_mas->offset_end < wr_mas->node_end)
4208 wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end];
4210 wr_mas->end_piv = wr_mas->mas->max;
4213 static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
4215 struct ma_state *mas = wr_mas->mas;
4217 if (!wr_mas->slots[wr_mas->offset_end]) {
4218 /* If this one is null, the next and prev are not */
4219 mas->last = wr_mas->end_piv;
4221 /* Check next slot(s) if we are overwriting the end */
4222 if ((mas->last == wr_mas->end_piv) &&
4223 (wr_mas->node_end != wr_mas->offset_end) &&
4224 !wr_mas->slots[wr_mas->offset_end + 1]) {
4225 wr_mas->offset_end++;
4226 if (wr_mas->offset_end == wr_mas->node_end)
4227 mas->last = mas->max;
4229 mas->last = wr_mas->pivots[wr_mas->offset_end];
4230 wr_mas->end_piv = mas->last;
4234 if (!wr_mas->content) {
4235 /* If this one is null, the next and prev are not */
4236 mas->index = wr_mas->r_min;
4238 /* Check prev slot if we are overwriting the start */
4239 if (mas->index == wr_mas->r_min && mas->offset &&
4240 !wr_mas->slots[mas->offset - 1]) {
4242 wr_mas->r_min = mas->index =
4243 mas_safe_min(mas, wr_mas->pivots, mas->offset);
4244 wr_mas->r_max = wr_mas->pivots[mas->offset];
4249 static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
4251 struct ma_state *mas = wr_mas->mas;
4252 unsigned char new_end = wr_mas->node_end + 2;
4254 new_end -= wr_mas->offset_end - mas->offset;
4255 if (wr_mas->r_min == mas->index)
4258 if (wr_mas->end_piv == mas->last)
4265 * mas_wr_append: Attempt to append
4266 * @wr_mas: the maple write state
4268 * This is currently unsafe in rcu mode since the end of the node may be cached
4269 * by readers while the node contents may be updated which could result in
4270 * inaccurate information.
4272 * Return: True if appended, false otherwise
4274 static inline bool mas_wr_append(struct ma_wr_state *wr_mas)
4276 unsigned char end = wr_mas->node_end;
4277 unsigned char new_end = end + 1;
4278 struct ma_state *mas = wr_mas->mas;
4279 unsigned char node_pivots = mt_pivots[wr_mas->type];
4281 if (mt_in_rcu(mas->tree))
4284 if (mas->offset != wr_mas->node_end)
4287 if (new_end < node_pivots) {
4288 wr_mas->pivots[new_end] = wr_mas->pivots[end];
4289 ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
4292 if (mas->last == wr_mas->r_max) {
4293 /* Append to end of range */
4294 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->entry);
4295 wr_mas->pivots[end] = mas->index - 1;
4296 mas->offset = new_end;
4298 /* Append to start of range */
4299 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content);
4300 wr_mas->pivots[end] = mas->last;
4301 rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry);
4304 if (!wr_mas->content || !wr_mas->entry)
4305 mas_update_gap(mas);
4311 * mas_wr_bnode() - Slow path for a modification.
4312 * @wr_mas: The write maple state
4314 * This is where split, rebalance end up.
4316 static void mas_wr_bnode(struct ma_wr_state *wr_mas)
4318 struct maple_big_node b_node;
4320 trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
4321 memset(&b_node, 0, sizeof(struct maple_big_node));
4322 mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
4323 mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end);
4326 static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
4328 struct ma_state *mas = wr_mas->mas;
4329 unsigned char new_end;
4331 /* Direct replacement */
4332 if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
4333 rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
4334 if (!!wr_mas->entry ^ !!wr_mas->content)
4335 mas_update_gap(mas);
4340 * new_end exceeds the size of the maple node and cannot enter the fast
4343 new_end = mas_wr_new_end(wr_mas);
4344 if (new_end >= mt_slots[wr_mas->type])
4347 /* Attempt to append */
4348 if (new_end == wr_mas->node_end + 1 && mas_wr_append(wr_mas))
4351 if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas))
4354 if (mas_wr_node_store(wr_mas, new_end))
4357 if (mas_is_err(mas))
4361 mas_wr_bnode(wr_mas);
4365 * mas_wr_store_entry() - Internal call to store a value
4366 * @mas: The maple state
4367 * @entry: The entry to store.
4369 * Return: The contents that was stored at the index.
4371 static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
4373 struct ma_state *mas = wr_mas->mas;
4375 wr_mas->content = mas_start(mas);
4376 if (mas_is_none(mas) || mas_is_ptr(mas)) {
4377 mas_store_root(mas, wr_mas->entry);
4378 return wr_mas->content;
4381 if (unlikely(!mas_wr_walk(wr_mas))) {
4382 mas_wr_spanning_store(wr_mas);
4383 return wr_mas->content;
4386 /* At this point, we are at the leaf node that needs to be altered. */
4387 mas_wr_end_piv(wr_mas);
4390 mas_wr_extend_null(wr_mas);
4392 /* New root for a single pointer */
4393 if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
4394 mas_new_root(mas, wr_mas->entry);
4395 return wr_mas->content;
4398 mas_wr_modify(wr_mas);
4399 return wr_mas->content;
4403 * mas_insert() - Internal call to insert a value
4404 * @mas: The maple state
4405 * @entry: The entry to store
4407 * Return: %NULL or the contents that already exists at the requested index
4408 * otherwise. The maple state needs to be checked for error conditions.
4410 static inline void *mas_insert(struct ma_state *mas, void *entry)
4412 MA_WR_STATE(wr_mas, mas, entry);
4415 * Inserting a new range inserts either 0, 1, or 2 pivots within the
4416 * tree. If the insert fits exactly into an existing gap with a value
4417 * of NULL, then the slot only needs to be written with the new value.
4418 * If the range being inserted is adjacent to another range, then only a
4419 * single pivot needs to be inserted (as well as writing the entry). If
4420 * the new range is within a gap but does not touch any other ranges,
4421 * then two pivots need to be inserted: the start - 1, and the end. As
4422 * usual, the entry must be written. Most operations require a new node
4423 * to be allocated and replace an existing node to ensure RCU safety,
4424 * when in RCU mode. The exception to requiring a newly allocated node
4425 * is when inserting at the end of a node (appending). When done
4426 * carefully, appending can reuse the node in place.
4428 wr_mas.content = mas_start(mas);
4432 if (mas_is_none(mas) || mas_is_ptr(mas)) {
4433 mas_store_root(mas, entry);
4437 /* spanning writes always overwrite something */
4438 if (!mas_wr_walk(&wr_mas))
4441 /* At this point, we are at the leaf node that needs to be altered. */
4442 wr_mas.offset_end = mas->offset;
4443 wr_mas.end_piv = wr_mas.r_max;
4445 if (wr_mas.content || (mas->last > wr_mas.r_max))
4451 mas_wr_modify(&wr_mas);
4452 return wr_mas.content;
4455 mas_set_err(mas, -EEXIST);
4456 return wr_mas.content;
4460 static inline void mas_rewalk(struct ma_state *mas, unsigned long index)
4463 mas_set(mas, index);
4464 mas_state_walk(mas);
4465 if (mas_is_start(mas))
4469 static inline bool mas_rewalk_if_dead(struct ma_state *mas,
4470 struct maple_node *node, const unsigned long index)
4472 if (unlikely(ma_dead_node(node))) {
4473 mas_rewalk(mas, index);
4480 * mas_prev_node() - Find the prev non-null entry at the same level in the
4481 * tree. The prev value will be mas->node[mas->offset] or MAS_NONE.
4482 * @mas: The maple state
4483 * @min: The lower limit to search
4485 * The prev node value will be mas->node[mas->offset] or MAS_NONE.
4486 * Return: 1 if the node is dead, 0 otherwise.
4488 static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
4493 struct maple_node *node;
4494 unsigned long *pivots;
4507 if (ma_is_root(node))
4511 if (unlikely(mas_ascend(mas)))
4513 offset = mas->offset;
4519 mt = mte_node_type(mas->node);
4522 slots = ma_slots(node, mt);
4523 mas->node = mas_slot(mas, slots, offset);
4524 if (unlikely(ma_dead_node(node)))
4527 mt = mte_node_type(mas->node);
4529 pivots = ma_pivots(node, mt);
4530 offset = ma_data_end(node, mt, pivots, max);
4531 if (unlikely(ma_dead_node(node)))
4535 slots = ma_slots(node, mt);
4536 mas->node = mas_slot(mas, slots, offset);
4537 pivots = ma_pivots(node, mt);
4538 if (unlikely(ma_dead_node(node)))
4542 mas->min = pivots[offset - 1] + 1;
4544 mas->offset = mas_data_end(mas);
4545 if (unlikely(mte_dead_node(mas->node)))
4551 if (unlikely(ma_dead_node(node)))
4554 mas->node = MAS_NONE;
4559 * mas_prev_slot() - Get the entry in the previous slot
4561 * @mas: The maple state
4562 * @max: The minimum starting range
4564 * Return: The entry in the previous slot which is possibly NULL
4566 static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty)
4570 unsigned long pivot;
4571 enum maple_type type;
4572 unsigned long *pivots;
4573 struct maple_node *node;
4574 unsigned long save_point = mas->index;
4578 type = mte_node_type(mas->node);
4579 pivots = ma_pivots(node, type);
4580 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4584 if (mas->min <= min) {
4585 pivot = mas_safe_min(mas, pivots, mas->offset);
4587 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4594 if (likely(mas->offset)) {
4596 mas->last = mas->index - 1;
4597 mas->index = mas_safe_min(mas, pivots, mas->offset);
4599 if (mas_prev_node(mas, min)) {
4600 mas_rewalk(mas, save_point);
4604 if (mas_is_none(mas))
4607 mas->last = mas->max;
4609 type = mte_node_type(mas->node);
4610 pivots = ma_pivots(node, type);
4611 mas->index = pivots[mas->offset - 1] + 1;
4614 slots = ma_slots(node, type);
4615 entry = mas_slot(mas, slots, mas->offset);
4616 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4629 * mas_next_node() - Get the next node at the same level in the tree.
4630 * @mas: The maple state
4631 * @max: The maximum pivot value to check.
4633 * The next value will be mas->node[mas->offset] or MAS_NONE.
4634 * Return: 1 on dead node, 0 otherwise.
4636 static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
4640 unsigned long *pivots;
4641 struct maple_enode *enode;
4643 unsigned char node_end;
4647 if (mas->max >= max)
4653 if (ma_is_root(node))
4657 if (unlikely(mas_ascend(mas)))
4662 mt = mte_node_type(mas->node);
4663 pivots = ma_pivots(node, mt);
4664 node_end = ma_data_end(node, mt, pivots, mas->max);
4665 if (unlikely(ma_dead_node(node)))
4668 } while (unlikely(mas->offset == node_end));
4670 slots = ma_slots(node, mt);
4672 enode = mas_slot(mas, slots, mas->offset);
4673 if (unlikely(ma_dead_node(node)))
4679 while (unlikely(level > 1)) {
4683 mt = mte_node_type(mas->node);
4684 slots = ma_slots(node, mt);
4685 enode = mas_slot(mas, slots, 0);
4686 if (unlikely(ma_dead_node(node)))
4691 pivots = ma_pivots(node, mt);
4693 mas->max = mas_safe_pivot(mas, pivots, mas->offset, mt);
4694 if (unlikely(ma_dead_node(node)))
4702 if (unlikely(ma_dead_node(node)))
4705 mas->node = MAS_NONE;
4710 * mas_next_slot() - Get the entry in the next slot
4712 * @mas: The maple state
4713 * @max: The maximum starting range
4714 * @empty: Can be empty
4716 * Return: The entry in the next slot which is possibly NULL
4718 static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty)
4721 unsigned long *pivots;
4722 unsigned long pivot;
4723 enum maple_type type;
4724 struct maple_node *node;
4725 unsigned char data_end;
4726 unsigned long save_point = mas->last;
4731 type = mte_node_type(mas->node);
4732 pivots = ma_pivots(node, type);
4733 data_end = ma_data_end(node, type, pivots, mas->max);
4734 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4738 if (mas->max >= max) {
4739 if (likely(mas->offset < data_end))
4740 pivot = pivots[mas->offset];
4742 return NULL; /* must be mas->max */
4744 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4751 if (likely(mas->offset < data_end)) {
4752 mas->index = pivots[mas->offset] + 1;
4754 if (likely(mas->offset < data_end))
4755 mas->last = pivots[mas->offset];
4757 mas->last = mas->max;
4759 if (mas_next_node(mas, node, max)) {
4760 mas_rewalk(mas, save_point);
4764 if (mas_is_none(mas))
4768 mas->index = mas->min;
4770 type = mte_node_type(mas->node);
4771 pivots = ma_pivots(node, type);
4772 mas->last = pivots[0];
4775 slots = ma_slots(node, type);
4776 entry = mt_slot(mas->tree, slots, mas->offset);
4777 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4793 * mas_next_entry() - Internal function to get the next entry.
4794 * @mas: The maple state
4795 * @limit: The maximum range start.
4797 * Set the @mas->node to the next entry and the range_start to
4798 * the beginning value for the entry. Does not check beyond @limit.
4799 * Sets @mas->index and @mas->last to the limit if it is hit.
4800 * Restarts on dead nodes.
4802 * Return: the next entry or %NULL.
4804 static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
4806 if (mas->last >= limit)
4809 return mas_next_slot(mas, limit, false);
4813 * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the
4814 * highest gap address of a given size in a given node and descend.
4815 * @mas: The maple state
4816 * @size: The needed size.
4818 * Return: True if found in a leaf, false otherwise.
4821 static bool mas_rev_awalk(struct ma_state *mas, unsigned long size,
4822 unsigned long *gap_min, unsigned long *gap_max)
4824 enum maple_type type = mte_node_type(mas->node);
4825 struct maple_node *node = mas_mn(mas);
4826 unsigned long *pivots, *gaps;
4828 unsigned long gap = 0;
4829 unsigned long max, min;
4830 unsigned char offset;
4832 if (unlikely(mas_is_err(mas)))
4835 if (ma_is_dense(type)) {
4837 mas->offset = (unsigned char)(mas->index - mas->min);
4841 pivots = ma_pivots(node, type);
4842 slots = ma_slots(node, type);
4843 gaps = ma_gaps(node, type);
4844 offset = mas->offset;
4845 min = mas_safe_min(mas, pivots, offset);
4846 /* Skip out of bounds. */
4847 while (mas->last < min)
4848 min = mas_safe_min(mas, pivots, --offset);
4850 max = mas_safe_pivot(mas, pivots, offset, type);
4851 while (mas->index <= max) {
4855 else if (!mas_slot(mas, slots, offset))
4856 gap = max - min + 1;
4859 if ((size <= gap) && (size <= mas->last - min + 1))
4863 /* Skip the next slot, it cannot be a gap. */
4868 max = pivots[offset];
4869 min = mas_safe_min(mas, pivots, offset);
4879 min = mas_safe_min(mas, pivots, offset);
4882 if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
4885 if (unlikely(ma_is_leaf(type))) {
4886 mas->offset = offset;
4888 *gap_max = min + gap - 1;
4892 /* descend, only happens under lock. */
4893 mas->node = mas_slot(mas, slots, offset);
4896 mas->offset = mas_data_end(mas);
4900 if (!mte_is_root(mas->node))
4904 mas_set_err(mas, -EBUSY);
4908 static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
4910 enum maple_type type = mte_node_type(mas->node);
4911 unsigned long pivot, min, gap = 0;
4912 unsigned char offset, data_end;
4913 unsigned long *gaps, *pivots;
4915 struct maple_node *node;
4918 if (ma_is_dense(type)) {
4919 mas->offset = (unsigned char)(mas->index - mas->min);
4924 pivots = ma_pivots(node, type);
4925 slots = ma_slots(node, type);
4926 gaps = ma_gaps(node, type);
4927 offset = mas->offset;
4928 min = mas_safe_min(mas, pivots, offset);
4929 data_end = ma_data_end(node, type, pivots, mas->max);
4930 for (; offset <= data_end; offset++) {
4931 pivot = mas_logical_pivot(mas, pivots, offset, type);
4933 /* Not within lower bounds */
4934 if (mas->index > pivot)
4939 else if (!mas_slot(mas, slots, offset))
4940 gap = min(pivot, mas->last) - max(mas->index, min) + 1;
4945 if (ma_is_leaf(type)) {
4949 if (mas->index <= pivot) {
4950 mas->node = mas_slot(mas, slots, offset);
4959 if (mas->last <= pivot) {
4960 mas_set_err(mas, -EBUSY);
4965 if (mte_is_root(mas->node))
4968 mas->offset = offset;
4973 * mas_walk() - Search for @mas->index in the tree.
4974 * @mas: The maple state.
4976 * mas->index and mas->last will be set to the range if there is a value. If
4977 * mas->node is MAS_NONE, reset to MAS_START.
4979 * Return: the entry at the location or %NULL.
4981 void *mas_walk(struct ma_state *mas)
4985 if (mas_is_none(mas) || mas_is_paused(mas) || mas_is_ptr(mas))
4986 mas->node = MAS_START;
4988 entry = mas_state_walk(mas);
4989 if (mas_is_start(mas)) {
4991 } else if (mas_is_none(mas)) {
4993 mas->last = ULONG_MAX;
4994 } else if (mas_is_ptr(mas)) {
5001 mas->last = ULONG_MAX;
5002 mas->node = MAS_NONE;
5008 EXPORT_SYMBOL_GPL(mas_walk);
5010 static inline bool mas_rewind_node(struct ma_state *mas)
5015 if (mte_is_root(mas->node)) {
5025 mas->offset = --slot;
5030 * mas_skip_node() - Internal function. Skip over a node.
5031 * @mas: The maple state.
5033 * Return: true if there is another node, false otherwise.
5035 static inline bool mas_skip_node(struct ma_state *mas)
5037 if (mas_is_err(mas))
5041 if (mte_is_root(mas->node)) {
5042 if (mas->offset >= mas_data_end(mas)) {
5043 mas_set_err(mas, -EBUSY);
5049 } while (mas->offset >= mas_data_end(mas));
5056 * mas_awalk() - Allocation walk. Search from low address to high, for a gap of
5058 * @mas: The maple state
5059 * @size: The size of the gap required
5061 * Search between @mas->index and @mas->last for a gap of @size.
5063 static inline void mas_awalk(struct ma_state *mas, unsigned long size)
5065 struct maple_enode *last = NULL;
5068 * There are 4 options:
5069 * go to child (descend)
5070 * go back to parent (ascend)
5071 * no gap found. (return, slot == MAPLE_NODE_SLOTS)
5072 * found the gap. (return, slot != MAPLE_NODE_SLOTS)
5074 while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) {
5075 if (last == mas->node)
5083 * mas_sparse_area() - Internal function. Return upper or lower limit when
5084 * searching for a gap in an empty tree.
5085 * @mas: The maple state
5086 * @min: the minimum range
5087 * @max: The maximum range
5088 * @size: The size of the gap
5089 * @fwd: Searching forward or back
5091 static inline int mas_sparse_area(struct ma_state *mas, unsigned long min,
5092 unsigned long max, unsigned long size, bool fwd)
5094 if (!unlikely(mas_is_none(mas)) && min == 0) {
5097 * At this time, min is increased, we need to recheck whether
5098 * the size is satisfied.
5100 if (min > max || max - min + 1 < size)
5107 mas->last = min + size - 1;
5110 mas->index = max - size + 1;
5116 * mas_empty_area() - Get the lowest address within the range that is
5117 * sufficient for the size requested.
5118 * @mas: The maple state
5119 * @min: The lowest value of the range
5120 * @max: The highest value of the range
5121 * @size: The size needed
5123 int mas_empty_area(struct ma_state *mas, unsigned long min,
5124 unsigned long max, unsigned long size)
5126 unsigned char offset;
5127 unsigned long *pivots;
5133 if (size == 0 || max - min < size - 1)
5136 if (mas_is_start(mas))
5138 else if (mas->offset >= 2)
5140 else if (!mas_skip_node(mas))
5144 if (mas_is_none(mas) || mas_is_ptr(mas))
5145 return mas_sparse_area(mas, min, max, size, true);
5147 /* The start of the window can only be within these values */
5150 mas_awalk(mas, size);
5152 if (unlikely(mas_is_err(mas)))
5153 return xa_err(mas->node);
5155 offset = mas->offset;
5156 if (unlikely(offset == MAPLE_NODE_SLOTS))
5159 mt = mte_node_type(mas->node);
5160 pivots = ma_pivots(mas_mn(mas), mt);
5161 min = mas_safe_min(mas, pivots, offset);
5162 if (mas->index < min)
5164 mas->last = mas->index + size - 1;
5167 EXPORT_SYMBOL_GPL(mas_empty_area);
5170 * mas_empty_area_rev() - Get the highest address within the range that is
5171 * sufficient for the size requested.
5172 * @mas: The maple state
5173 * @min: The lowest value of the range
5174 * @max: The highest value of the range
5175 * @size: The size needed
5177 int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
5178 unsigned long max, unsigned long size)
5180 struct maple_enode *last = mas->node;
5185 if (size == 0 || max - min < size - 1)
5188 if (mas_is_start(mas)) {
5190 mas->offset = mas_data_end(mas);
5191 } else if (mas->offset >= 2) {
5193 } else if (!mas_rewind_node(mas)) {
5198 if (mas_is_none(mas) || mas_is_ptr(mas))
5199 return mas_sparse_area(mas, min, max, size, false);
5201 /* The start of the window can only be within these values. */
5205 while (!mas_rev_awalk(mas, size, &min, &max)) {
5206 if (last == mas->node) {
5207 if (!mas_rewind_node(mas))
5214 if (mas_is_err(mas))
5215 return xa_err(mas->node);
5217 if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
5220 /* Trim the upper limit to the max. */
5221 if (max < mas->last)
5224 mas->index = mas->last - size + 1;
5227 EXPORT_SYMBOL_GPL(mas_empty_area_rev);
5230 * mte_dead_leaves() - Mark all leaves of a node as dead.
5231 * @mas: The maple state
5232 * @slots: Pointer to the slot array
5233 * @type: The maple node type
5235 * Must hold the write lock.
5237 * Return: The number of leaves marked as dead.
5240 unsigned char mte_dead_leaves(struct maple_enode *enode, struct maple_tree *mt,
5243 struct maple_node *node;
5244 enum maple_type type;
5248 for (offset = 0; offset < mt_slot_count(enode); offset++) {
5249 entry = mt_slot(mt, slots, offset);
5250 type = mte_node_type(entry);
5251 node = mte_to_node(entry);
5252 /* Use both node and type to catch LE & BE metadata */
5256 mte_set_node_dead(entry);
5258 rcu_assign_pointer(slots[offset], node);
5265 * mte_dead_walk() - Walk down a dead tree to just before the leaves
5266 * @enode: The maple encoded node
5267 * @offset: The starting offset
5269 * Note: This can only be used from the RCU callback context.
5271 static void __rcu **mte_dead_walk(struct maple_enode **enode, unsigned char offset)
5273 struct maple_node *node, *next;
5274 void __rcu **slots = NULL;
5276 next = mte_to_node(*enode);
5278 *enode = ma_enode_ptr(next);
5279 node = mte_to_node(*enode);
5280 slots = ma_slots(node, node->type);
5281 next = rcu_dereference_protected(slots[offset],
5282 lock_is_held(&rcu_callback_map));
5284 } while (!ma_is_leaf(next->type));
5290 * mt_free_walk() - Walk & free a tree in the RCU callback context
5291 * @head: The RCU head that's within the node.
5293 * Note: This can only be used from the RCU callback context.
5295 static void mt_free_walk(struct rcu_head *head)
5298 struct maple_node *node, *start;
5299 struct maple_enode *enode;
5300 unsigned char offset;
5301 enum maple_type type;
5303 node = container_of(head, struct maple_node, rcu);
5305 if (ma_is_leaf(node->type))
5309 enode = mt_mk_node(node, node->type);
5310 slots = mte_dead_walk(&enode, 0);
5311 node = mte_to_node(enode);
5313 mt_free_bulk(node->slot_len, slots);
5314 offset = node->parent_slot + 1;
5315 enode = node->piv_parent;
5316 if (mte_to_node(enode) == node)
5319 type = mte_node_type(enode);
5320 slots = ma_slots(mte_to_node(enode), type);
5321 if ((offset < mt_slots[type]) &&
5322 rcu_dereference_protected(slots[offset],
5323 lock_is_held(&rcu_callback_map)))
5324 slots = mte_dead_walk(&enode, offset);
5325 node = mte_to_node(enode);
5326 } while ((node != start) || (node->slot_len < offset));
5328 slots = ma_slots(node, node->type);
5329 mt_free_bulk(node->slot_len, slots);
5332 mt_free_rcu(&node->rcu);
5335 static inline void __rcu **mte_destroy_descend(struct maple_enode **enode,
5336 struct maple_tree *mt, struct maple_enode *prev, unsigned char offset)
5338 struct maple_node *node;
5339 struct maple_enode *next = *enode;
5340 void __rcu **slots = NULL;
5341 enum maple_type type;
5342 unsigned char next_offset = 0;
5346 node = mte_to_node(*enode);
5347 type = mte_node_type(*enode);
5348 slots = ma_slots(node, type);
5349 next = mt_slot_locked(mt, slots, next_offset);
5350 if ((mte_dead_node(next)))
5351 next = mt_slot_locked(mt, slots, ++next_offset);
5353 mte_set_node_dead(*enode);
5355 node->piv_parent = prev;
5356 node->parent_slot = offset;
5357 offset = next_offset;
5360 } while (!mte_is_leaf(next));
5365 static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
5369 struct maple_node *node = mte_to_node(enode);
5370 struct maple_enode *start;
5372 if (mte_is_leaf(enode)) {
5373 node->type = mte_node_type(enode);
5378 slots = mte_destroy_descend(&enode, mt, start, 0);
5379 node = mte_to_node(enode); // Updated in the above call.
5381 enum maple_type type;
5382 unsigned char offset;
5383 struct maple_enode *parent, *tmp;
5385 node->slot_len = mte_dead_leaves(enode, mt, slots);
5387 mt_free_bulk(node->slot_len, slots);
5388 offset = node->parent_slot + 1;
5389 enode = node->piv_parent;
5390 if (mte_to_node(enode) == node)
5393 type = mte_node_type(enode);
5394 slots = ma_slots(mte_to_node(enode), type);
5395 if (offset >= mt_slots[type])
5398 tmp = mt_slot_locked(mt, slots, offset);
5399 if (mte_node_type(tmp) && mte_to_node(tmp)) {
5402 slots = mte_destroy_descend(&enode, mt, parent, offset);
5405 node = mte_to_node(enode);
5406 } while (start != enode);
5408 node = mte_to_node(enode);
5409 node->slot_len = mte_dead_leaves(enode, mt, slots);
5411 mt_free_bulk(node->slot_len, slots);
5415 mt_free_rcu(&node->rcu);
5417 mt_clear_meta(mt, node, node->type);
5421 * mte_destroy_walk() - Free a tree or sub-tree.
5422 * @enode: the encoded maple node (maple_enode) to start
5423 * @mt: the tree to free - needed for node types.
5425 * Must hold the write lock.
5427 static inline void mte_destroy_walk(struct maple_enode *enode,
5428 struct maple_tree *mt)
5430 struct maple_node *node = mte_to_node(enode);
5432 if (mt_in_rcu(mt)) {
5433 mt_destroy_walk(enode, mt, false);
5434 call_rcu(&node->rcu, mt_free_walk);
5436 mt_destroy_walk(enode, mt, true);
5440 static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
5442 if (unlikely(mas_is_paused(wr_mas->mas)))
5443 mas_reset(wr_mas->mas);
5445 if (!mas_is_start(wr_mas->mas)) {
5446 if (mas_is_none(wr_mas->mas)) {
5447 mas_reset(wr_mas->mas);
5449 wr_mas->r_max = wr_mas->mas->max;
5450 wr_mas->type = mte_node_type(wr_mas->mas->node);
5451 if (mas_is_span_wr(wr_mas))
5452 mas_reset(wr_mas->mas);
5460 * mas_store() - Store an @entry.
5461 * @mas: The maple state.
5462 * @entry: The entry to store.
5464 * The @mas->index and @mas->last is used to set the range for the @entry.
5465 * Note: The @mas should have pre-allocated entries to ensure there is memory to
5466 * store the entry. Please see mas_expected_entries()/mas_destroy() for more details.
5468 * Return: the first entry between mas->index and mas->last or %NULL.
5470 void *mas_store(struct ma_state *mas, void *entry)
5472 MA_WR_STATE(wr_mas, mas, entry);
5474 trace_ma_write(__func__, mas, 0, entry);
5475 #ifdef CONFIG_DEBUG_MAPLE_TREE
5476 if (MAS_WARN_ON(mas, mas->index > mas->last))
5477 pr_err("Error %lX > %lX %p\n", mas->index, mas->last, entry);
5479 if (mas->index > mas->last) {
5480 mas_set_err(mas, -EINVAL);
5487 * Storing is the same operation as insert with the added caveat that it
5488 * can overwrite entries. Although this seems simple enough, one may
5489 * want to examine what happens if a single store operation was to
5490 * overwrite multiple entries within a self-balancing B-Tree.
5492 mas_wr_store_setup(&wr_mas);
5493 mas_wr_store_entry(&wr_mas);
5494 return wr_mas.content;
5496 EXPORT_SYMBOL_GPL(mas_store);
5499 * mas_store_gfp() - Store a value into the tree.
5500 * @mas: The maple state
5501 * @entry: The entry to store
5502 * @gfp: The GFP_FLAGS to use for allocations if necessary.
5504 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
5507 int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
5509 MA_WR_STATE(wr_mas, mas, entry);
5511 mas_wr_store_setup(&wr_mas);
5512 trace_ma_write(__func__, mas, 0, entry);
5514 mas_wr_store_entry(&wr_mas);
5515 if (unlikely(mas_nomem(mas, gfp)))
5518 if (unlikely(mas_is_err(mas)))
5519 return xa_err(mas->node);
5523 EXPORT_SYMBOL_GPL(mas_store_gfp);
5526 * mas_store_prealloc() - Store a value into the tree using memory
5527 * preallocated in the maple state.
5528 * @mas: The maple state
5529 * @entry: The entry to store.
5531 void mas_store_prealloc(struct ma_state *mas, void *entry)
5533 MA_WR_STATE(wr_mas, mas, entry);
5535 mas_wr_store_setup(&wr_mas);
5536 trace_ma_write(__func__, mas, 0, entry);
5537 mas_wr_store_entry(&wr_mas);
5538 MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
5541 EXPORT_SYMBOL_GPL(mas_store_prealloc);
5544 * mas_preallocate() - Preallocate enough nodes for a store operation
5545 * @mas: The maple state
5546 * @gfp: The GFP_FLAGS to use for allocations.
5548 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5550 int mas_preallocate(struct ma_state *mas, gfp_t gfp)
5554 mas_node_count_gfp(mas, 1 + mas_mt_height(mas) * 3, gfp);
5555 mas->mas_flags |= MA_STATE_PREALLOC;
5556 if (likely(!mas_is_err(mas)))
5559 mas_set_alloc_req(mas, 0);
5560 ret = xa_err(mas->node);
5566 EXPORT_SYMBOL_GPL(mas_preallocate);
5569 * mas_destroy() - destroy a maple state.
5570 * @mas: The maple state
5572 * Upon completion, check the left-most node and rebalance against the node to
5573 * the right if necessary. Frees any allocated nodes associated with this maple
5576 void mas_destroy(struct ma_state *mas)
5578 struct maple_alloc *node;
5579 unsigned long total;
5582 * When using mas_for_each() to insert an expected number of elements,
5583 * it is possible that the number inserted is less than the expected
5584 * number. To fix an invalid final node, a check is performed here to
5585 * rebalance the previous node with the final node.
5587 if (mas->mas_flags & MA_STATE_REBALANCE) {
5591 mtree_range_walk(mas);
5592 end = mas_data_end(mas) + 1;
5593 if (end < mt_min_slot_count(mas->node) - 1)
5594 mas_destroy_rebalance(mas, end);
5596 mas->mas_flags &= ~MA_STATE_REBALANCE;
5598 mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
5600 total = mas_allocated(mas);
5603 mas->alloc = node->slot[0];
5604 if (node->node_count > 1) {
5605 size_t count = node->node_count - 1;
5607 mt_free_bulk(count, (void __rcu **)&node->slot[1]);
5610 kmem_cache_free(maple_node_cache, node);
5616 EXPORT_SYMBOL_GPL(mas_destroy);
5619 * mas_expected_entries() - Set the expected number of entries that will be inserted.
5620 * @mas: The maple state
5621 * @nr_entries: The number of expected entries.
5623 * This will attempt to pre-allocate enough nodes to store the expected number
5624 * of entries. The allocations will occur using the bulk allocator interface
5625 * for speed. Please call mas_destroy() on the @mas after inserting the entries
5626 * to ensure any unused nodes are freed.
5628 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5630 int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
5632 int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
5633 struct maple_enode *enode = mas->node;
5638 * Sometimes it is necessary to duplicate a tree to a new tree, such as
5639 * forking a process and duplicating the VMAs from one tree to a new
5640 * tree. When such a situation arises, it is known that the new tree is
5641 * not going to be used until the entire tree is populated. For
5642 * performance reasons, it is best to use a bulk load with RCU disabled.
5643 * This allows for optimistic splitting that favours the left and reuse
5644 * of nodes during the operation.
5647 /* Optimize splitting for bulk insert in-order */
5648 mas->mas_flags |= MA_STATE_BULK;
5651 * Avoid overflow, assume a gap between each entry and a trailing null.
5652 * If this is wrong, it just means allocation can happen during
5653 * insertion of entries.
5655 nr_nodes = max(nr_entries, nr_entries * 2 + 1);
5656 if (!mt_is_alloc(mas->tree))
5657 nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
5659 /* Leaves; reduce slots to keep space for expansion */
5660 nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2);
5661 /* Internal nodes */
5662 nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
5663 /* Add working room for split (2 nodes) + new parents */
5664 mas_node_count(mas, nr_nodes + 3);
5666 /* Detect if allocations run out */
5667 mas->mas_flags |= MA_STATE_PREALLOC;
5669 if (!mas_is_err(mas))
5672 ret = xa_err(mas->node);
5678 EXPORT_SYMBOL_GPL(mas_expected_entries);
5680 static inline bool mas_next_setup(struct ma_state *mas, unsigned long max,
5683 bool was_none = mas_is_none(mas);
5685 if (mas_is_none(mas) || mas_is_paused(mas))
5686 mas->node = MAS_START;
5688 if (mas_is_start(mas))
5689 *entry = mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
5691 if (mas_is_ptr(mas)) {
5693 if (was_none && mas->index == 0) {
5694 mas->index = mas->last = 0;
5698 mas->last = ULONG_MAX;
5699 mas->node = MAS_NONE;
5703 if (mas_is_none(mas))
5709 * mas_next() - Get the next entry.
5710 * @mas: The maple state
5711 * @max: The maximum index to check.
5713 * Returns the next entry after @mas->index.
5714 * Must hold rcu_read_lock or the write lock.
5715 * Can return the zero entry.
5717 * Return: The next entry or %NULL
5719 void *mas_next(struct ma_state *mas, unsigned long max)
5723 if (mas_next_setup(mas, max, &entry))
5726 /* Retries on dead nodes handled by mas_next_slot */
5727 return mas_next_slot(mas, max, false);
5729 EXPORT_SYMBOL_GPL(mas_next);
5732 * mas_next_range() - Advance the maple state to the next range
5733 * @mas: The maple state
5734 * @max: The maximum index to check.
5736 * Sets @mas->index and @mas->last to the range.
5737 * Must hold rcu_read_lock or the write lock.
5738 * Can return the zero entry.
5740 * Return: The next entry or %NULL
5742 void *mas_next_range(struct ma_state *mas, unsigned long max)
5746 if (mas_next_setup(mas, max, &entry))
5749 /* Retries on dead nodes handled by mas_next_slot */
5750 return mas_next_slot(mas, max, true);
5752 EXPORT_SYMBOL_GPL(mas_next_range);
5755 * mt_next() - get the next value in the maple tree
5756 * @mt: The maple tree
5757 * @index: The start index
5758 * @max: The maximum index to check
5760 * Return: The entry at @index or higher, or %NULL if nothing is found.
5762 void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
5765 MA_STATE(mas, mt, index, index);
5768 entry = mas_next(&mas, max);
5772 EXPORT_SYMBOL_GPL(mt_next);
5774 static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min,
5777 if (mas->index <= min)
5780 if (mas_is_none(mas) || mas_is_paused(mas))
5781 mas->node = MAS_START;
5783 if (mas_is_start(mas)) {
5789 if (unlikely(mas_is_ptr(mas))) {
5792 mas->index = mas->last = 0;
5793 *entry = mas_root(mas);
5797 if (mas_is_none(mas)) {
5799 /* Walked to out-of-range pointer? */
5800 mas->index = mas->last = 0;
5801 mas->node = MAS_ROOT;
5802 *entry = mas_root(mas);
5811 mas->node = MAS_NONE;
5816 * mas_prev() - Get the previous entry
5817 * @mas: The maple state
5818 * @min: The minimum value to check.
5820 * Must hold rcu_read_lock or the write lock.
5821 * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
5824 * Return: the previous value or %NULL.
5826 void *mas_prev(struct ma_state *mas, unsigned long min)
5830 if (mas_prev_setup(mas, min, &entry))
5833 return mas_prev_slot(mas, min, false);
5835 EXPORT_SYMBOL_GPL(mas_prev);
5838 * mas_prev_range() - Advance to the previous range
5839 * @mas: The maple state
5840 * @min: The minimum value to check.
5842 * Sets @mas->index and @mas->last to the range.
5843 * Must hold rcu_read_lock or the write lock.
5844 * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
5847 * Return: the previous value or %NULL.
5849 void *mas_prev_range(struct ma_state *mas, unsigned long min)
5853 if (mas_prev_setup(mas, min, &entry))
5856 return mas_prev_slot(mas, min, true);
5858 EXPORT_SYMBOL_GPL(mas_prev_range);
5861 * mt_prev() - get the previous value in the maple tree
5862 * @mt: The maple tree
5863 * @index: The start index
5864 * @min: The minimum index to check
5866 * Return: The entry at @index or lower, or %NULL if nothing is found.
5868 void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min)
5871 MA_STATE(mas, mt, index, index);
5874 entry = mas_prev(&mas, min);
5878 EXPORT_SYMBOL_GPL(mt_prev);
5881 * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
5882 * @mas: The maple state to pause
5884 * Some users need to pause a walk and drop the lock they're holding in
5885 * order to yield to a higher priority thread or carry out an operation
5886 * on an entry. Those users should call this function before they drop
5887 * the lock. It resets the @mas to be suitable for the next iteration
5888 * of the loop after the user has reacquired the lock. If most entries
5889 * found during a walk require you to call mas_pause(), the mt_for_each()
5890 * iterator may be more appropriate.
5893 void mas_pause(struct ma_state *mas)
5895 mas->node = MAS_PAUSE;
5897 EXPORT_SYMBOL_GPL(mas_pause);
5900 * mas_find_setup() - Internal function to set up mas_find*().
5901 * @mas: The maple state
5902 * @max: The maximum index
5903 * @entry: Pointer to the entry
5905 * Returns: True if entry is the answer, false otherwise.
5907 static inline bool mas_find_setup(struct ma_state *mas, unsigned long max,
5912 if (unlikely(mas_is_none(mas))) {
5913 if (unlikely(mas->last >= max))
5916 mas->index = mas->last;
5917 mas->node = MAS_START;
5918 } else if (unlikely(mas_is_paused(mas))) {
5919 if (unlikely(mas->last >= max))
5922 mas->node = MAS_START;
5923 mas->index = ++mas->last;
5924 } else if (unlikely(mas_is_ptr(mas)))
5925 goto ptr_out_of_range;
5927 if (unlikely(mas_is_start(mas))) {
5928 /* First run or continue */
5929 if (mas->index > max)
5932 *entry = mas_walk(mas);
5938 if (unlikely(!mas_searchable(mas))) {
5939 if (unlikely(mas_is_ptr(mas)))
5940 goto ptr_out_of_range;
5945 if (mas->index == max)
5951 mas->node = MAS_NONE;
5953 mas->last = ULONG_MAX;
5958 * mas_find() - On the first call, find the entry at or after mas->index up to
5959 * %max. Otherwise, find the entry after mas->index.
5960 * @mas: The maple state
5961 * @max: The maximum value to check.
5963 * Must hold rcu_read_lock or the write lock.
5964 * If an entry exists, last and index are updated accordingly.
5965 * May set @mas->node to MAS_NONE.
5967 * Return: The entry or %NULL.
5969 void *mas_find(struct ma_state *mas, unsigned long max)
5973 if (mas_find_setup(mas, max, &entry))
5976 /* Retries on dead nodes handled by mas_next_slot */
5977 return mas_next_slot(mas, max, false);
5979 EXPORT_SYMBOL_GPL(mas_find);
5982 * mas_find_range() - On the first call, find the entry at or after
5983 * mas->index up to %max. Otherwise, advance to the next slot mas->index.
5984 * @mas: The maple state
5985 * @max: The maximum value to check.
5987 * Must hold rcu_read_lock or the write lock.
5988 * If an entry exists, last and index are updated accordingly.
5989 * May set @mas->node to MAS_NONE.
5991 * Return: The entry or %NULL.
5993 void *mas_find_range(struct ma_state *mas, unsigned long max)
5997 if (mas_find_setup(mas, max, &entry))
6000 /* Retries on dead nodes handled by mas_next_slot */
6001 return mas_next_slot(mas, max, true);
6003 EXPORT_SYMBOL_GPL(mas_find_range);
6006 * mas_find_rev_setup() - Internal function to set up mas_find_*_rev()
6007 * @mas: The maple state
6008 * @min: The minimum index
6009 * @entry: Pointer to the entry
6011 * Returns: True if entry is the answer, false otherwise.
6013 static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
6018 if (unlikely(mas_is_none(mas))) {
6019 if (mas->index <= min)
6022 mas->last = mas->index;
6023 mas->node = MAS_START;
6026 if (unlikely(mas_is_paused(mas))) {
6027 if (unlikely(mas->index <= min)) {
6028 mas->node = MAS_NONE;
6031 mas->node = MAS_START;
6032 mas->last = --mas->index;
6035 if (unlikely(mas_is_start(mas))) {
6036 /* First run or continue */
6037 if (mas->index < min)
6040 *entry = mas_walk(mas);
6045 if (unlikely(!mas_searchable(mas))) {
6046 if (mas_is_ptr(mas))
6049 if (mas_is_none(mas)) {
6051 * Walked to the location, and there was nothing so the
6052 * previous location is 0.
6054 mas->last = mas->index = 0;
6055 mas->node = MAS_ROOT;
6056 *entry = mas_root(mas);
6061 if (mas->index < min)
6067 mas->node = MAS_NONE;
6072 * mas_find_rev: On the first call, find the first non-null entry at or below
6073 * mas->index down to %min. Otherwise find the first non-null entry below
6074 * mas->index down to %min.
6075 * @mas: The maple state
6076 * @min: The minimum value to check.
6078 * Must hold rcu_read_lock or the write lock.
6079 * If an entry exists, last and index are updated accordingly.
6080 * May set @mas->node to MAS_NONE.
6082 * Return: The entry or %NULL.
6084 void *mas_find_rev(struct ma_state *mas, unsigned long min)
6088 if (mas_find_rev_setup(mas, min, &entry))
6091 /* Retries on dead nodes handled by mas_prev_slot */
6092 return mas_prev_slot(mas, min, false);
6095 EXPORT_SYMBOL_GPL(mas_find_rev);
6098 * mas_find_range_rev: On the first call, find the first non-null entry at or
6099 * below mas->index down to %min. Otherwise advance to the previous slot after
6100 * mas->index down to %min.
6101 * @mas: The maple state
6102 * @min: The minimum value to check.
6104 * Must hold rcu_read_lock or the write lock.
6105 * If an entry exists, last and index are updated accordingly.
6106 * May set @mas->node to MAS_NONE.
6108 * Return: The entry or %NULL.
6110 void *mas_find_range_rev(struct ma_state *mas, unsigned long min)
6114 if (mas_find_rev_setup(mas, min, &entry))
6117 /* Retries on dead nodes handled by mas_prev_slot */
6118 return mas_prev_slot(mas, min, true);
6120 EXPORT_SYMBOL_GPL(mas_find_range_rev);
6123 * mas_erase() - Find the range in which index resides and erase the entire
6125 * @mas: The maple state
6127 * Must hold the write lock.
6128 * Searches for @mas->index, sets @mas->index and @mas->last to the range and
6129 * erases that range.
6131 * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
6133 void *mas_erase(struct ma_state *mas)
6136 MA_WR_STATE(wr_mas, mas, NULL);
6138 if (mas_is_none(mas) || mas_is_paused(mas))
6139 mas->node = MAS_START;
6141 /* Retry unnecessary when holding the write lock. */
6142 entry = mas_state_walk(mas);
6147 /* Must reset to ensure spanning writes of last slot are detected */
6149 mas_wr_store_setup(&wr_mas);
6150 mas_wr_store_entry(&wr_mas);
6151 if (mas_nomem(mas, GFP_KERNEL))
6156 EXPORT_SYMBOL_GPL(mas_erase);
6159 * mas_nomem() - Check if there was an error allocating and do the allocation
6160 * if necessary If there are allocations, then free them.
6161 * @mas: The maple state
6162 * @gfp: The GFP_FLAGS to use for allocations
6163 * Return: true on allocation, false otherwise.
6165 bool mas_nomem(struct ma_state *mas, gfp_t gfp)
6166 __must_hold(mas->tree->ma_lock)
6168 if (likely(mas->node != MA_ERROR(-ENOMEM))) {
6173 if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
6174 mtree_unlock(mas->tree);
6175 mas_alloc_nodes(mas, gfp);
6176 mtree_lock(mas->tree);
6178 mas_alloc_nodes(mas, gfp);
6181 if (!mas_allocated(mas))
6184 mas->node = MAS_START;
6188 void __init maple_tree_init(void)
6190 maple_node_cache = kmem_cache_create("maple_node",
6191 sizeof(struct maple_node), sizeof(struct maple_node),
6196 * mtree_load() - Load a value stored in a maple tree
6197 * @mt: The maple tree
6198 * @index: The index to load
6200 * Return: the entry or %NULL
6202 void *mtree_load(struct maple_tree *mt, unsigned long index)
6204 MA_STATE(mas, mt, index, index);
6207 trace_ma_read(__func__, &mas);
6210 entry = mas_start(&mas);
6211 if (unlikely(mas_is_none(&mas)))
6214 if (unlikely(mas_is_ptr(&mas))) {
6221 entry = mtree_lookup_walk(&mas);
6222 if (!entry && unlikely(mas_is_start(&mas)))
6226 if (xa_is_zero(entry))
6231 EXPORT_SYMBOL(mtree_load);
6234 * mtree_store_range() - Store an entry at a given range.
6235 * @mt: The maple tree
6236 * @index: The start of the range
6237 * @last: The end of the range
6238 * @entry: The entry to store
6239 * @gfp: The GFP_FLAGS to use for allocations
6241 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6244 int mtree_store_range(struct maple_tree *mt, unsigned long index,
6245 unsigned long last, void *entry, gfp_t gfp)
6247 MA_STATE(mas, mt, index, last);
6248 MA_WR_STATE(wr_mas, &mas, entry);
6250 trace_ma_write(__func__, &mas, 0, entry);
6251 if (WARN_ON_ONCE(xa_is_advanced(entry)))
6259 mas_wr_store_entry(&wr_mas);
6260 if (mas_nomem(&mas, gfp))
6264 if (mas_is_err(&mas))
6265 return xa_err(mas.node);
6269 EXPORT_SYMBOL(mtree_store_range);
6272 * mtree_store() - Store an entry at a given index.
6273 * @mt: The maple tree
6274 * @index: The index to store the value
6275 * @entry: The entry to store
6276 * @gfp: The GFP_FLAGS to use for allocations
6278 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6281 int mtree_store(struct maple_tree *mt, unsigned long index, void *entry,
6284 return mtree_store_range(mt, index, index, entry, gfp);
6286 EXPORT_SYMBOL(mtree_store);
6289 * mtree_insert_range() - Insert an entry at a give range if there is no value.
6290 * @mt: The maple tree
6291 * @first: The start of the range
6292 * @last: The end of the range
6293 * @entry: The entry to store
6294 * @gfp: The GFP_FLAGS to use for allocations.
6296 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6297 * request, -ENOMEM if memory could not be allocated.
6299 int mtree_insert_range(struct maple_tree *mt, unsigned long first,
6300 unsigned long last, void *entry, gfp_t gfp)
6302 MA_STATE(ms, mt, first, last);
6304 if (WARN_ON_ONCE(xa_is_advanced(entry)))
6312 mas_insert(&ms, entry);
6313 if (mas_nomem(&ms, gfp))
6317 if (mas_is_err(&ms))
6318 return xa_err(ms.node);
6322 EXPORT_SYMBOL(mtree_insert_range);
6325 * mtree_insert() - Insert an entry at a give index if there is no value.
6326 * @mt: The maple tree
6327 * @index : The index to store the value
6328 * @entry: The entry to store
6329 * @gfp: The FGP_FLAGS to use for allocations.
6331 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6332 * request, -ENOMEM if memory could not be allocated.
6334 int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry,
6337 return mtree_insert_range(mt, index, index, entry, gfp);
6339 EXPORT_SYMBOL(mtree_insert);
6341 int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
6342 void *entry, unsigned long size, unsigned long min,
6343 unsigned long max, gfp_t gfp)
6347 MA_STATE(mas, mt, 0, 0);
6348 if (!mt_is_alloc(mt))
6351 if (WARN_ON_ONCE(mt_is_reserved(entry)))
6356 ret = mas_empty_area(&mas, min, max, size);
6360 mas_insert(&mas, entry);
6362 * mas_nomem() may release the lock, causing the allocated area
6363 * to be unavailable, so try to allocate a free area again.
6365 if (mas_nomem(&mas, gfp))
6368 if (mas_is_err(&mas))
6369 ret = xa_err(mas.node);
6371 *startp = mas.index;
6377 EXPORT_SYMBOL(mtree_alloc_range);
6379 int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
6380 void *entry, unsigned long size, unsigned long min,
6381 unsigned long max, gfp_t gfp)
6385 MA_STATE(mas, mt, 0, 0);
6386 if (!mt_is_alloc(mt))
6389 if (WARN_ON_ONCE(mt_is_reserved(entry)))
6394 ret = mas_empty_area_rev(&mas, min, max, size);
6398 mas_insert(&mas, entry);
6400 * mas_nomem() may release the lock, causing the allocated area
6401 * to be unavailable, so try to allocate a free area again.
6403 if (mas_nomem(&mas, gfp))
6406 if (mas_is_err(&mas))
6407 ret = xa_err(mas.node);
6409 *startp = mas.index;
6415 EXPORT_SYMBOL(mtree_alloc_rrange);
6418 * mtree_erase() - Find an index and erase the entire range.
6419 * @mt: The maple tree
6420 * @index: The index to erase
6422 * Erasing is the same as a walk to an entry then a store of a NULL to that
6423 * ENTIRE range. In fact, it is implemented as such using the advanced API.
6425 * Return: The entry stored at the @index or %NULL
6427 void *mtree_erase(struct maple_tree *mt, unsigned long index)
6431 MA_STATE(mas, mt, index, index);
6432 trace_ma_op(__func__, &mas);
6435 entry = mas_erase(&mas);
6440 EXPORT_SYMBOL(mtree_erase);
6443 * __mt_destroy() - Walk and free all nodes of a locked maple tree.
6444 * @mt: The maple tree
6446 * Note: Does not handle locking.
6448 void __mt_destroy(struct maple_tree *mt)
6450 void *root = mt_root_locked(mt);
6452 rcu_assign_pointer(mt->ma_root, NULL);
6453 if (xa_is_node(root))
6454 mte_destroy_walk(root, mt);
6458 EXPORT_SYMBOL_GPL(__mt_destroy);
6461 * mtree_destroy() - Destroy a maple tree
6462 * @mt: The maple tree
6464 * Frees all resources used by the tree. Handles locking.
6466 void mtree_destroy(struct maple_tree *mt)
6472 EXPORT_SYMBOL(mtree_destroy);
6475 * mt_find() - Search from the start up until an entry is found.
6476 * @mt: The maple tree
6477 * @index: Pointer which contains the start location of the search
6478 * @max: The maximum value to check
6480 * Handles locking. @index will be incremented to one beyond the range.
6482 * Return: The entry at or after the @index or %NULL
6484 void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
6486 MA_STATE(mas, mt, *index, *index);
6488 #ifdef CONFIG_DEBUG_MAPLE_TREE
6489 unsigned long copy = *index;
6492 trace_ma_read(__func__, &mas);
6499 entry = mas_state_walk(&mas);
6500 if (mas_is_start(&mas))
6503 if (unlikely(xa_is_zero(entry)))
6509 while (mas_searchable(&mas) && (mas.last < max)) {
6510 entry = mas_next_entry(&mas, max);
6511 if (likely(entry && !xa_is_zero(entry)))
6515 if (unlikely(xa_is_zero(entry)))
6519 if (likely(entry)) {
6520 *index = mas.last + 1;
6521 #ifdef CONFIG_DEBUG_MAPLE_TREE
6522 if (MT_WARN_ON(mt, (*index) && ((*index) <= copy)))
6523 pr_err("index not increased! %lx <= %lx\n",
6530 EXPORT_SYMBOL(mt_find);
6533 * mt_find_after() - Search from the start up until an entry is found.
6534 * @mt: The maple tree
6535 * @index: Pointer which contains the start location of the search
6536 * @max: The maximum value to check
6538 * Handles locking, detects wrapping on index == 0
6540 * Return: The entry at or after the @index or %NULL
6542 void *mt_find_after(struct maple_tree *mt, unsigned long *index,
6548 return mt_find(mt, index, max);
6550 EXPORT_SYMBOL(mt_find_after);
6552 #ifdef CONFIG_DEBUG_MAPLE_TREE
6553 atomic_t maple_tree_tests_run;
6554 EXPORT_SYMBOL_GPL(maple_tree_tests_run);
6555 atomic_t maple_tree_tests_passed;
6556 EXPORT_SYMBOL_GPL(maple_tree_tests_passed);
6559 extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int);
6560 void mt_set_non_kernel(unsigned int val)
6562 kmem_cache_set_non_kernel(maple_node_cache, val);
6565 extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
6566 unsigned long mt_get_alloc_size(void)
6568 return kmem_cache_get_alloc(maple_node_cache);
6571 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *);
6572 void mt_zero_nr_tallocated(void)
6574 kmem_cache_zero_nr_tallocated(maple_node_cache);
6577 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *);
6578 unsigned int mt_nr_tallocated(void)
6580 return kmem_cache_nr_tallocated(maple_node_cache);
6583 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *);
6584 unsigned int mt_nr_allocated(void)
6586 return kmem_cache_nr_allocated(maple_node_cache);
6590 * mas_dead_node() - Check if the maple state is pointing to a dead node.
6591 * @mas: The maple state
6592 * @index: The index to restore in @mas.
6594 * Used in test code.
6595 * Return: 1 if @mas has been reset to MAS_START, 0 otherwise.
6597 static inline int mas_dead_node(struct ma_state *mas, unsigned long index)
6599 if (unlikely(!mas_searchable(mas) || mas_is_start(mas)))
6602 if (likely(!mte_dead_node(mas->node)))
6605 mas_rewalk(mas, index);
6609 void mt_cache_shrink(void)
6614 * mt_cache_shrink() - For testing, don't use this.
6616 * Certain testcases can trigger an OOM when combined with other memory
6617 * debugging configuration options. This function is used to reduce the
6618 * possibility of an out of memory even due to kmem_cache objects remaining
6619 * around for longer than usual.
6621 void mt_cache_shrink(void)
6623 kmem_cache_shrink(maple_node_cache);
6626 EXPORT_SYMBOL_GPL(mt_cache_shrink);
6628 #endif /* not defined __KERNEL__ */
6630 * mas_get_slot() - Get the entry in the maple state node stored at @offset.
6631 * @mas: The maple state
6632 * @offset: The offset into the slot array to fetch.
6634 * Return: The entry stored at @offset.
6636 static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
6637 unsigned char offset)
6639 return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)),
6645 * mas_first_entry() - Go the first leaf and find the first entry.
6646 * @mas: the maple state.
6647 * @limit: the maximum index to check.
6648 * @*r_start: Pointer to set to the range start.
6650 * Sets mas->offset to the offset of the entry, r_start to the range minimum.
6652 * Return: The first entry or MAS_NONE.
6654 static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
6655 unsigned long limit, enum maple_type mt)
6659 unsigned long *pivots;
6663 mas->index = mas->min;
6664 if (mas->index > limit)
6669 while (likely(!ma_is_leaf(mt))) {
6670 MAS_WARN_ON(mas, mte_dead_node(mas->node));
6671 slots = ma_slots(mn, mt);
6672 entry = mas_slot(mas, slots, 0);
6673 pivots = ma_pivots(mn, mt);
6674 if (unlikely(ma_dead_node(mn)))
6679 mt = mte_node_type(mas->node);
6681 MAS_WARN_ON(mas, mte_dead_node(mas->node));
6684 slots = ma_slots(mn, mt);
6685 entry = mas_slot(mas, slots, 0);
6686 if (unlikely(ma_dead_node(mn)))
6689 /* Slot 0 or 1 must be set */
6690 if (mas->index > limit)
6697 entry = mas_slot(mas, slots, 1);
6698 pivots = ma_pivots(mn, mt);
6699 if (unlikely(ma_dead_node(mn)))
6702 mas->index = pivots[0] + 1;
6703 if (mas->index > limit)
6710 if (likely(!ma_dead_node(mn)))
6711 mas->node = MAS_NONE;
6715 /* Depth first search, post-order */
6716 static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
6719 struct maple_enode *p = MAS_NONE, *mn = mas->node;
6720 unsigned long p_min, p_max;
6722 mas_next_node(mas, mas_mn(mas), max);
6723 if (!mas_is_none(mas))
6726 if (mte_is_root(mn))
6735 mas_prev_node(mas, 0);
6736 } while (!mas_is_none(mas));
6743 /* Tree validations */
6744 static void mt_dump_node(const struct maple_tree *mt, void *entry,
6745 unsigned long min, unsigned long max, unsigned int depth,
6746 enum mt_dump_format format);
6747 static void mt_dump_range(unsigned long min, unsigned long max,
6748 unsigned int depth, enum mt_dump_format format)
6750 static const char spaces[] = " ";
6755 pr_info("%.*s%lx: ", depth * 2, spaces, min);
6757 pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max);
6762 pr_info("%.*s%lu: ", depth * 2, spaces, min);
6764 pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max);
6768 static void mt_dump_entry(void *entry, unsigned long min, unsigned long max,
6769 unsigned int depth, enum mt_dump_format format)
6771 mt_dump_range(min, max, depth, format);
6773 if (xa_is_value(entry))
6774 pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry),
6775 xa_to_value(entry), entry);
6776 else if (xa_is_zero(entry))
6777 pr_cont("zero (%ld)\n", xa_to_internal(entry));
6778 else if (mt_is_reserved(entry))
6779 pr_cont("UNKNOWN ENTRY (%p)\n", entry);
6781 pr_cont("%p\n", entry);
6784 static void mt_dump_range64(const struct maple_tree *mt, void *entry,
6785 unsigned long min, unsigned long max, unsigned int depth,
6786 enum mt_dump_format format)
6788 struct maple_range_64 *node = &mte_to_node(entry)->mr64;
6789 bool leaf = mte_is_leaf(entry);
6790 unsigned long first = min;
6793 pr_cont(" contents: ");
6794 for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) {
6797 pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
6801 pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
6804 pr_cont("%p\n", node->slot[i]);
6805 for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
6806 unsigned long last = max;
6808 if (i < (MAPLE_RANGE64_SLOTS - 1))
6809 last = node->pivot[i];
6810 else if (!node->slot[i] && max != mt_node_max(entry))
6812 if (last == 0 && i > 0)
6815 mt_dump_entry(mt_slot(mt, node->slot, i),
6816 first, last, depth + 1, format);
6817 else if (node->slot[i])
6818 mt_dump_node(mt, mt_slot(mt, node->slot, i),
6819 first, last, depth + 1, format);
6826 pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n",
6827 node, last, max, i);
6831 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6832 node, last, max, i);
6839 static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
6840 unsigned long min, unsigned long max, unsigned int depth,
6841 enum mt_dump_format format)
6843 struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
6844 bool leaf = mte_is_leaf(entry);
6845 unsigned long first = min;
6848 pr_cont(" contents: ");
6849 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++)
6850 pr_cont("%lu ", node->gap[i]);
6851 pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
6852 for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++)
6853 pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
6854 pr_cont("%p\n", node->slot[i]);
6855 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
6856 unsigned long last = max;
6858 if (i < (MAPLE_ARANGE64_SLOTS - 1))
6859 last = node->pivot[i];
6860 else if (!node->slot[i])
6862 if (last == 0 && i > 0)
6865 mt_dump_entry(mt_slot(mt, node->slot, i),
6866 first, last, depth + 1, format);
6867 else if (node->slot[i])
6868 mt_dump_node(mt, mt_slot(mt, node->slot, i),
6869 first, last, depth + 1, format);
6874 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6875 node, last, max, i);
6882 static void mt_dump_node(const struct maple_tree *mt, void *entry,
6883 unsigned long min, unsigned long max, unsigned int depth,
6884 enum mt_dump_format format)
6886 struct maple_node *node = mte_to_node(entry);
6887 unsigned int type = mte_node_type(entry);
6890 mt_dump_range(min, max, depth, format);
6892 pr_cont("node %p depth %d type %d parent %p", node, depth, type,
6893 node ? node->parent : NULL);
6897 for (i = 0; i < MAPLE_NODE_SLOTS; i++) {
6899 pr_cont("OUT OF RANGE: ");
6900 mt_dump_entry(mt_slot(mt, node->slot, i),
6901 min + i, min + i, depth, format);
6905 case maple_range_64:
6906 mt_dump_range64(mt, entry, min, max, depth, format);
6908 case maple_arange_64:
6909 mt_dump_arange64(mt, entry, min, max, depth, format);
6913 pr_cont(" UNKNOWN TYPE\n");
6917 void mt_dump(const struct maple_tree *mt, enum mt_dump_format format)
6919 void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt));
6921 pr_info("maple_tree(%p) flags %X, height %u root %p\n",
6922 mt, mt->ma_flags, mt_height(mt), entry);
6923 if (!xa_is_node(entry))
6924 mt_dump_entry(entry, 0, 0, 0, format);
6926 mt_dump_node(mt, entry, 0, mt_node_max(entry), 0, format);
6928 EXPORT_SYMBOL_GPL(mt_dump);
6931 * Calculate the maximum gap in a node and check if that's what is reported in
6932 * the parent (unless root).
6934 static void mas_validate_gaps(struct ma_state *mas)
6936 struct maple_enode *mte = mas->node;
6937 struct maple_node *p_mn;
6938 unsigned long gap = 0, max_gap = 0;
6939 unsigned long p_end, p_start = mas->min;
6940 unsigned char p_slot;
6941 unsigned long *gaps = NULL;
6942 unsigned long *pivots = ma_pivots(mte_to_node(mte), mte_node_type(mte));
6945 if (ma_is_dense(mte_node_type(mte))) {
6946 for (i = 0; i < mt_slot_count(mte); i++) {
6947 if (mas_get_slot(mas, i)) {
6958 gaps = ma_gaps(mte_to_node(mte), mte_node_type(mte));
6959 for (i = 0; i < mt_slot_count(mte); i++) {
6960 p_end = mas_logical_pivot(mas, pivots, i, mte_node_type(mte));
6963 if (mas_get_slot(mas, i)) {
6968 gap += p_end - p_start + 1;
6970 void *entry = mas_get_slot(mas, i);
6974 if (gap != p_end - p_start + 1) {
6975 pr_err("%p[%u] -> %p %lu != %lu - %lu + 1\n",
6977 mas_get_slot(mas, i), gap,
6979 mt_dump(mas->tree, mt_dump_hex);
6981 MT_BUG_ON(mas->tree,
6982 gap != p_end - p_start + 1);
6985 if (gap > p_end - p_start + 1) {
6986 pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
6987 mas_mn(mas), i, gap, p_end, p_start,
6988 p_end - p_start + 1);
6989 MT_BUG_ON(mas->tree,
6990 gap > p_end - p_start + 1);
6998 p_start = p_end + 1;
6999 if (p_end >= mas->max)
7004 if (mte_is_root(mte))
7007 p_slot = mte_parent_slot(mas->node);
7008 p_mn = mte_parent(mte);
7009 MT_BUG_ON(mas->tree, max_gap > mas->max);
7010 if (ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap) {
7011 pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
7012 mt_dump(mas->tree, mt_dump_hex);
7015 MT_BUG_ON(mas->tree,
7016 ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap);
7019 static void mas_validate_parent_slot(struct ma_state *mas)
7021 struct maple_node *parent;
7022 struct maple_enode *node;
7023 enum maple_type p_type;
7024 unsigned char p_slot;
7028 if (mte_is_root(mas->node))
7031 p_slot = mte_parent_slot(mas->node);
7032 p_type = mas_parent_type(mas, mas->node);
7033 parent = mte_parent(mas->node);
7034 slots = ma_slots(parent, p_type);
7035 MT_BUG_ON(mas->tree, mas_mn(mas) == parent);
7037 /* Check prev/next parent slot for duplicate node entry */
7039 for (i = 0; i < mt_slots[p_type]; i++) {
7040 node = mas_slot(mas, slots, i);
7042 if (node != mas->node)
7043 pr_err("parent %p[%u] does not have %p\n",
7044 parent, i, mas_mn(mas));
7045 MT_BUG_ON(mas->tree, node != mas->node);
7046 } else if (node == mas->node) {
7047 pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
7048 mas_mn(mas), parent, i, p_slot);
7049 MT_BUG_ON(mas->tree, node == mas->node);
7054 static void mas_validate_child_slot(struct ma_state *mas)
7056 enum maple_type type = mte_node_type(mas->node);
7057 void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7058 unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
7059 struct maple_enode *child;
7062 if (mte_is_leaf(mas->node))
7065 for (i = 0; i < mt_slots[type]; i++) {
7066 child = mas_slot(mas, slots, i);
7067 if (!pivots[i] || pivots[i] == mas->max)
7073 if (mte_parent_slot(child) != i) {
7074 pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
7075 mas_mn(mas), i, mte_to_node(child),
7076 mte_parent_slot(child));
7077 MT_BUG_ON(mas->tree, 1);
7080 if (mte_parent(child) != mte_to_node(mas->node)) {
7081 pr_err("child %p has parent %p not %p\n",
7082 mte_to_node(child), mte_parent(child),
7083 mte_to_node(mas->node));
7084 MT_BUG_ON(mas->tree, 1);
7090 * Validate all pivots are within mas->min and mas->max.
7092 static void mas_validate_limits(struct ma_state *mas)
7095 unsigned long prev_piv = 0;
7096 enum maple_type type = mte_node_type(mas->node);
7097 void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7098 unsigned long *pivots = ma_pivots(mas_mn(mas), type);
7100 /* all limits are fine here. */
7101 if (mte_is_root(mas->node))
7104 for (i = 0; i < mt_slots[type]; i++) {
7107 piv = mas_safe_pivot(mas, pivots, i, type);
7109 if (!piv && (i != 0))
7112 if (!mte_is_leaf(mas->node)) {
7113 void *entry = mas_slot(mas, slots, i);
7116 pr_err("%p[%u] cannot be null\n",
7119 MT_BUG_ON(mas->tree, !entry);
7122 if (prev_piv > piv) {
7123 pr_err("%p[%u] piv %lu < prev_piv %lu\n",
7124 mas_mn(mas), i, piv, prev_piv);
7125 MAS_WARN_ON(mas, piv < prev_piv);
7128 if (piv < mas->min) {
7129 pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
7131 MAS_WARN_ON(mas, piv < mas->min);
7133 if (piv > mas->max) {
7134 pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i,
7136 MAS_WARN_ON(mas, piv > mas->max);
7139 if (piv == mas->max)
7142 for (i += 1; i < mt_slots[type]; i++) {
7143 void *entry = mas_slot(mas, slots, i);
7145 if (entry && (i != mt_slots[type] - 1)) {
7146 pr_err("%p[%u] should not have entry %p\n", mas_mn(mas),
7148 MT_BUG_ON(mas->tree, entry != NULL);
7151 if (i < mt_pivots[type]) {
7152 unsigned long piv = pivots[i];
7157 pr_err("%p[%u] should not have piv %lu\n",
7158 mas_mn(mas), i, piv);
7159 MAS_WARN_ON(mas, i < mt_pivots[type] - 1);
7164 static void mt_validate_nulls(struct maple_tree *mt)
7166 void *entry, *last = (void *)1;
7167 unsigned char offset = 0;
7169 MA_STATE(mas, mt, 0, 0);
7172 if (mas_is_none(&mas) || (mas.node == MAS_ROOT))
7175 while (!mte_is_leaf(mas.node))
7178 slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
7180 entry = mas_slot(&mas, slots, offset);
7181 if (!last && !entry) {
7182 pr_err("Sequential nulls end at %p[%u]\n",
7183 mas_mn(&mas), offset);
7185 MT_BUG_ON(mt, !last && !entry);
7187 if (offset == mas_data_end(&mas)) {
7188 mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
7189 if (mas_is_none(&mas))
7192 slots = ma_slots(mte_to_node(mas.node),
7193 mte_node_type(mas.node));
7198 } while (!mas_is_none(&mas));
7202 * validate a maple tree by checking:
7203 * 1. The limits (pivots are within mas->min to mas->max)
7204 * 2. The gap is correctly set in the parents
7206 void mt_validate(struct maple_tree *mt)
7210 MA_STATE(mas, mt, 0, 0);
7213 if (!mas_searchable(&mas))
7216 mas_first_entry(&mas, mas_mn(&mas), ULONG_MAX, mte_node_type(mas.node));
7217 while (!mas_is_none(&mas)) {
7218 MAS_WARN_ON(&mas, mte_dead_node(mas.node));
7219 if (!mte_is_root(mas.node)) {
7220 end = mas_data_end(&mas);
7221 if (MAS_WARN_ON(&mas,
7222 (end < mt_min_slot_count(mas.node)) &&
7223 (mas.max != ULONG_MAX))) {
7224 pr_err("Invalid size %u of %p\n", end,
7228 mas_validate_parent_slot(&mas);
7229 mas_validate_child_slot(&mas);
7230 mas_validate_limits(&mas);
7231 if (mt_is_alloc(mt))
7232 mas_validate_gaps(&mas);
7233 mas_dfs_postorder(&mas, ULONG_MAX);
7235 mt_validate_nulls(mt);
7240 EXPORT_SYMBOL_GPL(mt_validate);
7242 void mas_dump(const struct ma_state *mas)
7244 pr_err("MAS: tree=%p enode=%p ", mas->tree, mas->node);
7245 if (mas_is_none(mas))
7246 pr_err("(MAS_NONE) ");
7247 else if (mas_is_ptr(mas))
7248 pr_err("(MAS_ROOT) ");
7249 else if (mas_is_start(mas))
7250 pr_err("(MAS_START) ");
7251 else if (mas_is_paused(mas))
7252 pr_err("(MAS_PAUSED) ");
7254 pr_err("[%u] index=%lx last=%lx\n", mas->offset, mas->index, mas->last);
7255 pr_err(" min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n",
7256 mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags);
7257 if (mas->index > mas->last)
7258 pr_err("Check index & last\n");
7260 EXPORT_SYMBOL_GPL(mas_dump);
7262 void mas_wr_dump(const struct ma_wr_state *wr_mas)
7264 pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n",
7265 wr_mas->node, wr_mas->r_min, wr_mas->r_max);
7266 pr_err(" type=%u off_end=%u, node_end=%u, end_piv=%lx\n",
7267 wr_mas->type, wr_mas->offset_end, wr_mas->node_end,
7270 EXPORT_SYMBOL_GPL(mas_wr_dump);
7272 #endif /* CONFIG_DEBUG_MAPLE_TREE */