1 // SPDX-License-Identifier: GPL-2.0+
3 * Maple Tree implementation
4 * Copyright (c) 2018-2022 Oracle Corporation
5 * Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
6 * Matthew Wilcox <willy@infradead.org>
10 * DOC: Interesting implementation details of the Maple Tree
12 * Each node type has a number of slots for entries and a number of slots for
13 * pivots. In the case of dense nodes, the pivots are implied by the position
14 * and are simply the slot index + the minimum of the node.
16 * In regular B-Tree terms, pivots are called keys. The term pivot is used to
17 * indicate that the tree is specifying ranges, Pivots may appear in the
18 * subtree with an entry attached to the value where as keys are unique to a
19 * specific position of a B-tree. Pivot values are inclusive of the slot with
23 * The following illustrates the layout of a range64 nodes slots and pivots.
26 * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
28 * │ │ │ │ │ │ │ │ └─ Implied maximum
29 * │ │ │ │ │ │ │ └─ Pivot 14
30 * │ │ │ │ │ │ └─ Pivot 13
31 * │ │ │ │ │ └─ Pivot 12
39 * Internal (non-leaf) nodes contain pointers to other nodes.
40 * Leaf nodes contain entries.
42 * The location of interest is often referred to as an offset. All offsets have
43 * a slot, but the last offset has an implied pivot from the node above (or
44 * UINT_MAX for the root node.
46 * Ranges complicate certain write activities. When modifying any of
47 * the B-tree variants, it is known that one entry will either be added or
48 * deleted. When modifying the Maple Tree, one store operation may overwrite
49 * the entire data set, or one half of the tree, or the middle half of the tree.
54 #include <linux/maple_tree.h>
55 #include <linux/xarray.h>
56 #include <linux/types.h>
57 #include <linux/export.h>
58 #include <linux/slab.h>
59 #include <linux/limits.h>
60 #include <asm/barrier.h>
62 #define CREATE_TRACE_POINTS
63 #include <trace/events/maple_tree.h>
65 #define MA_ROOT_PARENT 1
69 * * MA_STATE_BULK - Bulk insert mode
70 * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert
71 * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation
73 #define MA_STATE_BULK 1
74 #define MA_STATE_REBALANCE 2
75 #define MA_STATE_PREALLOC 4
77 #define ma_parent_ptr(x) ((struct maple_pnode *)(x))
78 #define ma_mnode_ptr(x) ((struct maple_node *)(x))
79 #define ma_enode_ptr(x) ((struct maple_enode *)(x))
80 static struct kmem_cache *maple_node_cache;
82 #ifdef CONFIG_DEBUG_MAPLE_TREE
83 static const unsigned long mt_max[] = {
84 [maple_dense] = MAPLE_NODE_SLOTS,
85 [maple_leaf_64] = ULONG_MAX,
86 [maple_range_64] = ULONG_MAX,
87 [maple_arange_64] = ULONG_MAX,
89 #define mt_node_max(x) mt_max[mte_node_type(x)]
92 static const unsigned char mt_slots[] = {
93 [maple_dense] = MAPLE_NODE_SLOTS,
94 [maple_leaf_64] = MAPLE_RANGE64_SLOTS,
95 [maple_range_64] = MAPLE_RANGE64_SLOTS,
96 [maple_arange_64] = MAPLE_ARANGE64_SLOTS,
98 #define mt_slot_count(x) mt_slots[mte_node_type(x)]
100 static const unsigned char mt_pivots[] = {
102 [maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1,
103 [maple_range_64] = MAPLE_RANGE64_SLOTS - 1,
104 [maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1,
106 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
108 static const unsigned char mt_min_slots[] = {
109 [maple_dense] = MAPLE_NODE_SLOTS / 2,
110 [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
111 [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
112 [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1,
114 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
116 #define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2)
117 #define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1)
119 struct maple_big_node {
120 struct maple_pnode *parent;
121 unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
123 struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
125 unsigned long padding[MAPLE_BIG_NODE_GAPS];
126 unsigned long gap[MAPLE_BIG_NODE_GAPS];
130 enum maple_type type;
134 * The maple_subtree_state is used to build a tree to replace a segment of an
135 * existing tree in a more atomic way. Any walkers of the older tree will hit a
136 * dead node and restart on updates.
138 struct maple_subtree_state {
139 struct ma_state *orig_l; /* Original left side of subtree */
140 struct ma_state *orig_r; /* Original right side of subtree */
141 struct ma_state *l; /* New left side of subtree */
142 struct ma_state *m; /* New middle of subtree (rare) */
143 struct ma_state *r; /* New right side of subtree */
144 struct ma_topiary *free; /* nodes to be freed */
145 struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */
146 struct maple_big_node *bn;
150 static inline struct maple_node *mt_alloc_one(gfp_t gfp)
152 return kmem_cache_alloc(maple_node_cache, gfp);
155 static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
157 return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes);
160 static inline void mt_free_bulk(size_t size, void __rcu **nodes)
162 kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
165 static void mt_free_rcu(struct rcu_head *head)
167 struct maple_node *node = container_of(head, struct maple_node, rcu);
169 kmem_cache_free(maple_node_cache, node);
173 * ma_free_rcu() - Use rcu callback to free a maple node
174 * @node: The node to free
176 * The maple tree uses the parent pointer to indicate this node is no longer in
177 * use and will be freed.
179 static void ma_free_rcu(struct maple_node *node)
181 WARN_ON(node->parent != ma_parent_ptr(node));
182 call_rcu(&node->rcu, mt_free_rcu);
186 static void mas_set_height(struct ma_state *mas)
188 unsigned int new_flags = mas->tree->ma_flags;
190 new_flags &= ~MT_FLAGS_HEIGHT_MASK;
191 BUG_ON(mas->depth > MAPLE_HEIGHT_MAX);
192 new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET;
193 mas->tree->ma_flags = new_flags;
196 static unsigned int mas_mt_height(struct ma_state *mas)
198 return mt_height(mas->tree);
201 static inline enum maple_type mte_node_type(const struct maple_enode *entry)
203 return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
204 MAPLE_NODE_TYPE_MASK;
207 static inline bool ma_is_dense(const enum maple_type type)
209 return type < maple_leaf_64;
212 static inline bool ma_is_leaf(const enum maple_type type)
214 return type < maple_range_64;
217 static inline bool mte_is_leaf(const struct maple_enode *entry)
219 return ma_is_leaf(mte_node_type(entry));
223 * We also reserve values with the bottom two bits set to '10' which are
226 static inline bool mt_is_reserved(const void *entry)
228 return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
229 xa_is_internal(entry);
232 static inline void mas_set_err(struct ma_state *mas, long err)
234 mas->node = MA_ERROR(err);
237 static inline bool mas_is_ptr(struct ma_state *mas)
239 return mas->node == MAS_ROOT;
242 static inline bool mas_is_start(struct ma_state *mas)
244 return mas->node == MAS_START;
247 bool mas_is_err(struct ma_state *mas)
249 return xa_is_err(mas->node);
252 static inline bool mas_searchable(struct ma_state *mas)
254 if (mas_is_none(mas))
263 static inline struct maple_node *mte_to_node(const struct maple_enode *entry)
265 return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
269 * mte_to_mat() - Convert a maple encoded node to a maple topiary node.
270 * @entry: The maple encoded node
272 * Return: a maple topiary pointer
274 static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry)
276 return (struct maple_topiary *)
277 ((unsigned long)entry & ~MAPLE_NODE_MASK);
281 * mas_mn() - Get the maple state node.
282 * @mas: The maple state
284 * Return: the maple node (not encoded - bare pointer).
286 static inline struct maple_node *mas_mn(const struct ma_state *mas)
288 return mte_to_node(mas->node);
292 * mte_set_node_dead() - Set a maple encoded node as dead.
293 * @mn: The maple encoded node.
295 static inline void mte_set_node_dead(struct maple_enode *mn)
297 mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn));
298 smp_wmb(); /* Needed for RCU */
301 /* Bit 1 indicates the root is a node */
302 #define MAPLE_ROOT_NODE 0x02
303 /* maple_type stored bit 3-6 */
304 #define MAPLE_ENODE_TYPE_SHIFT 0x03
305 /* Bit 2 means a NULL somewhere below */
306 #define MAPLE_ENODE_NULL 0x04
308 static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
309 enum maple_type type)
311 return (void *)((unsigned long)node |
312 (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL);
315 static inline void *mte_mk_root(const struct maple_enode *node)
317 return (void *)((unsigned long)node | MAPLE_ROOT_NODE);
320 static inline void *mte_safe_root(const struct maple_enode *node)
322 return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
325 static inline void mte_set_full(const struct maple_enode *node)
327 node = (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
330 static inline void mte_clear_full(const struct maple_enode *node)
332 node = (void *)((unsigned long)node | MAPLE_ENODE_NULL);
335 static inline bool ma_is_root(struct maple_node *node)
337 return ((unsigned long)node->parent & MA_ROOT_PARENT);
340 static inline bool mte_is_root(const struct maple_enode *node)
342 return ma_is_root(mte_to_node(node));
345 static inline bool mas_is_root_limits(const struct ma_state *mas)
347 return !mas->min && mas->max == ULONG_MAX;
350 static inline bool mt_is_alloc(struct maple_tree *mt)
352 return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
357 * Excluding root, the parent pointer is 256B aligned like all other tree nodes.
358 * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16
359 * bit values need an extra bit to store the offset. This extra bit comes from
360 * a reuse of the last bit in the node type. This is possible by using bit 1 to
361 * indicate if bit 2 is part of the type or the slot.
365 * 0x?00 = 16 bit nodes
366 * 0x010 = 32 bit nodes
367 * 0x110 = 64 bit nodes
369 * Slot size and alignment
371 * 0b?00 : 16 bit values, type in 0-1, slot in 2-7
372 * 0b010 : 32 bit values, type in 0-2, slot in 3-7
373 * 0b110 : 64 bit values, type in 0-2, slot in 3-7
376 #define MAPLE_PARENT_ROOT 0x01
378 #define MAPLE_PARENT_SLOT_SHIFT 0x03
379 #define MAPLE_PARENT_SLOT_MASK 0xF8
381 #define MAPLE_PARENT_16B_SLOT_SHIFT 0x02
382 #define MAPLE_PARENT_16B_SLOT_MASK 0xFC
384 #define MAPLE_PARENT_RANGE64 0x06
385 #define MAPLE_PARENT_RANGE32 0x04
386 #define MAPLE_PARENT_NOT_RANGE16 0x02
389 * mte_parent_shift() - Get the parent shift for the slot storage.
390 * @parent: The parent pointer cast as an unsigned long
391 * Return: The shift into that pointer to the star to of the slot
393 static inline unsigned long mte_parent_shift(unsigned long parent)
395 /* Note bit 1 == 0 means 16B */
396 if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
397 return MAPLE_PARENT_SLOT_SHIFT;
399 return MAPLE_PARENT_16B_SLOT_SHIFT;
403 * mte_parent_slot_mask() - Get the slot mask for the parent.
404 * @parent: The parent pointer cast as an unsigned long.
405 * Return: The slot mask for that parent.
407 static inline unsigned long mte_parent_slot_mask(unsigned long parent)
409 /* Note bit 1 == 0 means 16B */
410 if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
411 return MAPLE_PARENT_SLOT_MASK;
413 return MAPLE_PARENT_16B_SLOT_MASK;
417 * mas_parent_enum() - Return the maple_type of the parent from the stored
419 * @mas: The maple state
420 * @node: The maple_enode to extract the parent's enum
421 * Return: The node->parent maple_type
424 enum maple_type mte_parent_enum(struct maple_enode *p_enode,
425 struct maple_tree *mt)
427 unsigned long p_type;
429 p_type = (unsigned long)p_enode;
430 if (p_type & MAPLE_PARENT_ROOT)
431 return 0; /* Validated in the caller. */
433 p_type &= MAPLE_NODE_MASK;
434 p_type = p_type & ~(MAPLE_PARENT_ROOT | mte_parent_slot_mask(p_type));
437 case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */
439 return maple_arange_64;
440 return maple_range_64;
447 enum maple_type mas_parent_enum(struct ma_state *mas, struct maple_enode *enode)
449 return mte_parent_enum(ma_enode_ptr(mte_to_node(enode)->parent), mas->tree);
453 * mte_set_parent() - Set the parent node and encode the slot
454 * @enode: The encoded maple node.
455 * @parent: The encoded maple node that is the parent of @enode.
456 * @slot: The slot that @enode resides in @parent.
458 * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
462 void mte_set_parent(struct maple_enode *enode, const struct maple_enode *parent,
465 unsigned long val = (unsigned long) parent;
468 enum maple_type p_type = mte_node_type(parent);
470 BUG_ON(p_type == maple_dense);
471 BUG_ON(p_type == maple_leaf_64);
475 case maple_arange_64:
476 shift = MAPLE_PARENT_SLOT_SHIFT;
477 type = MAPLE_PARENT_RANGE64;
486 val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */
487 val |= (slot << shift) | type;
488 mte_to_node(enode)->parent = ma_parent_ptr(val);
492 * mte_parent_slot() - get the parent slot of @enode.
493 * @enode: The encoded maple node.
495 * Return: The slot in the parent node where @enode resides.
497 static inline unsigned int mte_parent_slot(const struct maple_enode *enode)
499 unsigned long val = (unsigned long) mte_to_node(enode)->parent;
506 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
507 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
509 return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val);
513 * mte_parent() - Get the parent of @node.
514 * @node: The encoded maple node.
516 * Return: The parent maple node.
518 static inline struct maple_node *mte_parent(const struct maple_enode *enode)
520 return (void *)((unsigned long)
521 (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
525 * ma_dead_node() - check if the @enode is dead.
526 * @enode: The encoded maple node
528 * Return: true if dead, false otherwise.
530 static inline bool ma_dead_node(const struct maple_node *node)
532 struct maple_node *parent;
534 /* Do not reorder reads from the node prior to the parent check */
536 parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK);
537 return (parent == node);
541 * mte_dead_node() - check if the @enode is dead.
542 * @enode: The encoded maple node
544 * Return: true if dead, false otherwise.
546 static inline bool mte_dead_node(const struct maple_enode *enode)
548 struct maple_node *parent, *node;
550 node = mte_to_node(enode);
551 /* Do not reorder reads from the node prior to the parent check */
553 parent = mte_parent(enode);
554 return (parent == node);
558 * mas_allocated() - Get the number of nodes allocated in a maple state.
559 * @mas: The maple state
561 * The ma_state alloc member is overloaded to hold a pointer to the first
562 * allocated node or to the number of requested nodes to allocate. If bit 0 is
563 * set, then the alloc contains the number of requested nodes. If there is an
564 * allocated node, then the total allocated nodes is in that node.
566 * Return: The total number of nodes allocated
568 static inline unsigned long mas_allocated(const struct ma_state *mas)
570 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1))
573 return mas->alloc->total;
577 * mas_set_alloc_req() - Set the requested number of allocations.
578 * @mas: the maple state
579 * @count: the number of allocations.
581 * The requested number of allocations is either in the first allocated node,
582 * located in @mas->alloc->request_count, or directly in @mas->alloc if there is
583 * no allocated node. Set the request either in the node or do the necessary
584 * encoding to store in @mas->alloc directly.
586 static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count)
588 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) {
592 mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U);
596 mas->alloc->request_count = count;
600 * mas_alloc_req() - get the requested number of allocations.
601 * @mas: The maple state
603 * The alloc count is either stored directly in @mas, or in
604 * @mas->alloc->request_count if there is at least one node allocated. Decode
605 * the request count if it's stored directly in @mas->alloc.
607 * Return: The allocation request count.
609 static inline unsigned int mas_alloc_req(const struct ma_state *mas)
611 if ((unsigned long)mas->alloc & 0x1)
612 return (unsigned long)(mas->alloc) >> 1;
614 return mas->alloc->request_count;
619 * ma_pivots() - Get a pointer to the maple node pivots.
620 * @node - the maple node
621 * @type - the node type
623 * In the event of a dead node, this array may be %NULL
625 * Return: A pointer to the maple node pivots
627 static inline unsigned long *ma_pivots(struct maple_node *node,
628 enum maple_type type)
631 case maple_arange_64:
632 return node->ma64.pivot;
635 return node->mr64.pivot;
643 * ma_gaps() - Get a pointer to the maple node gaps.
644 * @node - the maple node
645 * @type - the node type
647 * Return: A pointer to the maple node gaps
649 static inline unsigned long *ma_gaps(struct maple_node *node,
650 enum maple_type type)
653 case maple_arange_64:
654 return node->ma64.gap;
664 * mte_pivot() - Get the pivot at @piv of the maple encoded node.
665 * @mn: The maple encoded node.
668 * Return: the pivot at @piv of @mn.
670 static inline unsigned long mte_pivot(const struct maple_enode *mn,
673 struct maple_node *node = mte_to_node(mn);
674 enum maple_type type = mte_node_type(mn);
676 if (piv >= mt_pivots[type]) {
681 case maple_arange_64:
682 return node->ma64.pivot[piv];
685 return node->mr64.pivot[piv];
693 * mas_safe_pivot() - get the pivot at @piv or mas->max.
694 * @mas: The maple state
695 * @pivots: The pointer to the maple node pivots
696 * @piv: The pivot to fetch
697 * @type: The maple node type
699 * Return: The pivot at @piv within the limit of the @pivots array, @mas->max
702 static inline unsigned long
703 mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
704 unsigned char piv, enum maple_type type)
706 if (piv >= mt_pivots[type])
713 * mas_safe_min() - Return the minimum for a given offset.
714 * @mas: The maple state
715 * @pivots: The pointer to the maple node pivots
716 * @offset: The offset into the pivot array
718 * Return: The minimum range value that is contained in @offset.
720 static inline unsigned long
721 mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset)
724 return pivots[offset - 1] + 1;
730 * mas_logical_pivot() - Get the logical pivot of a given offset.
731 * @mas: The maple state
732 * @pivots: The pointer to the maple node pivots
733 * @offset: The offset into the pivot array
734 * @type: The maple node type
736 * When there is no value at a pivot (beyond the end of the data), then the
737 * pivot is actually @mas->max.
739 * Return: the logical pivot of a given @offset.
741 static inline unsigned long
742 mas_logical_pivot(struct ma_state *mas, unsigned long *pivots,
743 unsigned char offset, enum maple_type type)
745 unsigned long lpiv = mas_safe_pivot(mas, pivots, offset, type);
757 * mte_set_pivot() - Set a pivot to a value in an encoded maple node.
758 * @mn: The encoded maple node
759 * @piv: The pivot offset
760 * @val: The value of the pivot
762 static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
765 struct maple_node *node = mte_to_node(mn);
766 enum maple_type type = mte_node_type(mn);
768 BUG_ON(piv >= mt_pivots[type]);
773 node->mr64.pivot[piv] = val;
775 case maple_arange_64:
776 node->ma64.pivot[piv] = val;
785 * ma_slots() - Get a pointer to the maple node slots.
786 * @mn: The maple node
787 * @mt: The maple node type
789 * Return: A pointer to the maple node slots
791 static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
795 case maple_arange_64:
796 return mn->ma64.slot;
799 return mn->mr64.slot;
805 static inline bool mt_locked(const struct maple_tree *mt)
807 return mt_external_lock(mt) ? mt_lock_is_held(mt) :
808 lockdep_is_held(&mt->ma_lock);
811 static inline void *mt_slot(const struct maple_tree *mt,
812 void __rcu **slots, unsigned char offset)
814 return rcu_dereference_check(slots[offset], mt_locked(mt));
817 static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots,
818 unsigned char offset)
820 return rcu_dereference_protected(slots[offset], mt_locked(mt));
823 * mas_slot_locked() - Get the slot value when holding the maple tree lock.
824 * @mas: The maple state
825 * @slots: The pointer to the slots
826 * @offset: The offset into the slots array to fetch
828 * Return: The entry stored in @slots at the @offset.
830 static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
831 unsigned char offset)
833 return mt_slot_locked(mas->tree, slots, offset);
837 * mas_slot() - Get the slot value when not holding the maple tree lock.
838 * @mas: The maple state
839 * @slots: The pointer to the slots
840 * @offset: The offset into the slots array to fetch
842 * Return: The entry stored in @slots at the @offset
844 static inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
845 unsigned char offset)
847 return mt_slot(mas->tree, slots, offset);
851 * mas_root() - Get the maple tree root.
852 * @mas: The maple state.
854 * Return: The pointer to the root of the tree
856 static inline void *mas_root(struct ma_state *mas)
858 return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
861 static inline void *mt_root_locked(struct maple_tree *mt)
863 return rcu_dereference_protected(mt->ma_root, mt_locked(mt));
867 * mas_root_locked() - Get the maple tree root when holding the maple tree lock.
868 * @mas: The maple state.
870 * Return: The pointer to the root of the tree
872 static inline void *mas_root_locked(struct ma_state *mas)
874 return mt_root_locked(mas->tree);
877 static inline struct maple_metadata *ma_meta(struct maple_node *mn,
881 case maple_arange_64:
882 return &mn->ma64.meta;
884 return &mn->mr64.meta;
889 * ma_set_meta() - Set the metadata information of a node.
890 * @mn: The maple node
891 * @mt: The maple node type
892 * @offset: The offset of the highest sub-gap in this node.
893 * @end: The end of the data in this node.
895 static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
896 unsigned char offset, unsigned char end)
898 struct maple_metadata *meta = ma_meta(mn, mt);
905 * mt_clear_meta() - clear the metadata information of a node, if it exists
906 * @mt: The maple tree
907 * @mn: The maple node
908 * @type: The maple node type
909 * @offset: The offset of the highest sub-gap in this node.
910 * @end: The end of the data in this node.
912 static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn,
913 enum maple_type type)
915 struct maple_metadata *meta;
916 unsigned long *pivots;
922 pivots = mn->mr64.pivot;
923 if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) {
924 slots = mn->mr64.slot;
925 next = mt_slot_locked(mt, slots,
926 MAPLE_RANGE64_SLOTS - 1);
927 if (unlikely((mte_to_node(next) &&
928 mte_node_type(next))))
929 return; /* no metadata, could be node */
932 case maple_arange_64:
933 meta = ma_meta(mn, type);
944 * ma_meta_end() - Get the data end of a node from the metadata
945 * @mn: The maple node
946 * @mt: The maple node type
948 static inline unsigned char ma_meta_end(struct maple_node *mn,
951 struct maple_metadata *meta = ma_meta(mn, mt);
957 * ma_meta_gap() - Get the largest gap location of a node from the metadata
958 * @mn: The maple node
959 * @mt: The maple node type
961 static inline unsigned char ma_meta_gap(struct maple_node *mn,
964 BUG_ON(mt != maple_arange_64);
966 return mn->ma64.meta.gap;
970 * ma_set_meta_gap() - Set the largest gap location in a nodes metadata
971 * @mn: The maple node
972 * @mn: The maple node type
973 * @offset: The location of the largest gap.
975 static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
976 unsigned char offset)
979 struct maple_metadata *meta = ma_meta(mn, mt);
985 * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
986 * @mat - the ma_topiary, a linked list of dead nodes.
987 * @dead_enode - the node to be marked as dead and added to the tail of the list
989 * Add the @dead_enode to the linked list in @mat.
991 static inline void mat_add(struct ma_topiary *mat,
992 struct maple_enode *dead_enode)
994 mte_set_node_dead(dead_enode);
995 mte_to_mat(dead_enode)->next = NULL;
997 mat->tail = mat->head = dead_enode;
1001 mte_to_mat(mat->tail)->next = dead_enode;
1002 mat->tail = dead_enode;
1005 static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
1006 static inline void mas_free(struct ma_state *mas, struct maple_enode *used);
1009 * mas_mat_free() - Free all nodes in a dead list.
1010 * @mas - the maple state
1011 * @mat - the ma_topiary linked list of dead nodes to free.
1013 * Free walk a dead list.
1015 static void mas_mat_free(struct ma_state *mas, struct ma_topiary *mat)
1017 struct maple_enode *next;
1020 next = mte_to_mat(mat->head)->next;
1021 mas_free(mas, mat->head);
1027 * mas_mat_destroy() - Free all nodes and subtrees in a dead list.
1028 * @mas - the maple state
1029 * @mat - the ma_topiary linked list of dead nodes to free.
1031 * Destroy walk a dead list.
1033 static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
1035 struct maple_enode *next;
1038 next = mte_to_mat(mat->head)->next;
1039 mte_destroy_walk(mat->head, mat->mtree);
1044 * mas_descend() - Descend into the slot stored in the ma_state.
1045 * @mas - the maple state.
1047 * Note: Not RCU safe, only use in write side or debug code.
1049 static inline void mas_descend(struct ma_state *mas)
1051 enum maple_type type;
1052 unsigned long *pivots;
1053 struct maple_node *node;
1057 type = mte_node_type(mas->node);
1058 pivots = ma_pivots(node, type);
1059 slots = ma_slots(node, type);
1062 mas->min = pivots[mas->offset - 1] + 1;
1063 mas->max = mas_safe_pivot(mas, pivots, mas->offset, type);
1064 mas->node = mas_slot(mas, slots, mas->offset);
1068 * mte_set_gap() - Set a maple node gap.
1069 * @mn: The encoded maple node
1070 * @gap: The offset of the gap to set
1071 * @val: The gap value
1073 static inline void mte_set_gap(const struct maple_enode *mn,
1074 unsigned char gap, unsigned long val)
1076 switch (mte_node_type(mn)) {
1079 case maple_arange_64:
1080 mte_to_node(mn)->ma64.gap[gap] = val;
1086 * mas_ascend() - Walk up a level of the tree.
1087 * @mas: The maple state
1089 * Sets the @mas->max and @mas->min to the correct values when walking up. This
1090 * may cause several levels of walking up to find the correct min and max.
1091 * May find a dead node which will cause a premature return.
1092 * Return: 1 on dead node, 0 otherwise
1094 static int mas_ascend(struct ma_state *mas)
1096 struct maple_enode *p_enode; /* parent enode. */
1097 struct maple_enode *a_enode; /* ancestor enode. */
1098 struct maple_node *a_node; /* ancestor node. */
1099 struct maple_node *p_node; /* parent node. */
1100 unsigned char a_slot;
1101 enum maple_type a_type;
1102 unsigned long min, max;
1103 unsigned long *pivots;
1104 unsigned char offset;
1105 bool set_max = false, set_min = false;
1107 a_node = mas_mn(mas);
1108 if (ma_is_root(a_node)) {
1113 p_node = mte_parent(mas->node);
1114 if (unlikely(a_node == p_node))
1116 a_type = mas_parent_enum(mas, mas->node);
1117 offset = mte_parent_slot(mas->node);
1118 a_enode = mt_mk_node(p_node, a_type);
1120 /* Check to make sure all parent information is still accurate */
1121 if (p_node != mte_parent(mas->node))
1124 mas->node = a_enode;
1125 mas->offset = offset;
1127 if (mte_is_root(a_enode)) {
1128 mas->max = ULONG_MAX;
1137 a_type = mas_parent_enum(mas, p_enode);
1138 a_node = mte_parent(p_enode);
1139 a_slot = mte_parent_slot(p_enode);
1140 a_enode = mt_mk_node(a_node, a_type);
1141 pivots = ma_pivots(a_node, a_type);
1143 if (unlikely(ma_dead_node(a_node)))
1146 if (!set_min && a_slot) {
1148 min = pivots[a_slot - 1] + 1;
1151 if (!set_max && a_slot < mt_pivots[a_type]) {
1153 max = pivots[a_slot];
1156 if (unlikely(ma_dead_node(a_node)))
1159 if (unlikely(ma_is_root(a_node)))
1162 } while (!set_min || !set_max);
1170 * mas_pop_node() - Get a previously allocated maple node from the maple state.
1171 * @mas: The maple state
1173 * Return: A pointer to a maple node.
1175 static inline struct maple_node *mas_pop_node(struct ma_state *mas)
1177 struct maple_alloc *ret, *node = mas->alloc;
1178 unsigned long total = mas_allocated(mas);
1179 unsigned int req = mas_alloc_req(mas);
1181 /* nothing or a request pending. */
1182 if (WARN_ON(!total))
1186 /* single allocation in this ma_state */
1192 if (node->node_count == 1) {
1193 /* Single allocation in this node. */
1194 mas->alloc = node->slot[0];
1195 mas->alloc->total = node->total - 1;
1200 ret = node->slot[--node->node_count];
1201 node->slot[node->node_count] = NULL;
1207 mas_set_alloc_req(mas, req);
1210 memset(ret, 0, sizeof(*ret));
1211 return (struct maple_node *)ret;
1215 * mas_push_node() - Push a node back on the maple state allocation.
1216 * @mas: The maple state
1217 * @used: The used maple node
1219 * Stores the maple node back into @mas->alloc for reuse. Updates allocated and
1220 * requested node count as necessary.
1222 static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
1224 struct maple_alloc *reuse = (struct maple_alloc *)used;
1225 struct maple_alloc *head = mas->alloc;
1226 unsigned long count;
1227 unsigned int requested = mas_alloc_req(mas);
1229 count = mas_allocated(mas);
1231 reuse->request_count = 0;
1232 reuse->node_count = 0;
1233 if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) {
1234 head->slot[head->node_count++] = reuse;
1240 if ((head) && !((unsigned long)head & 0x1)) {
1241 reuse->slot[0] = head;
1242 reuse->node_count = 1;
1243 reuse->total += head->total;
1249 mas_set_alloc_req(mas, requested - 1);
1253 * mas_alloc_nodes() - Allocate nodes into a maple state
1254 * @mas: The maple state
1255 * @gfp: The GFP Flags
1257 static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
1259 struct maple_alloc *node;
1260 unsigned long allocated = mas_allocated(mas);
1261 unsigned int requested = mas_alloc_req(mas);
1263 void **slots = NULL;
1264 unsigned int max_req = 0;
1269 mas_set_alloc_req(mas, 0);
1270 if (mas->mas_flags & MA_STATE_PREALLOC) {
1273 WARN_ON(!allocated);
1276 if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) {
1277 node = (struct maple_alloc *)mt_alloc_one(gfp);
1282 node->slot[0] = mas->alloc;
1283 node->node_count = 1;
1285 node->node_count = 0;
1289 node->total = ++allocated;
1294 node->request_count = 0;
1296 max_req = MAPLE_ALLOC_SLOTS;
1297 if (node->node_count) {
1298 unsigned int offset = node->node_count;
1300 slots = (void **)&node->slot[offset];
1303 slots = (void **)&node->slot;
1306 max_req = min(requested, max_req);
1307 count = mt_alloc_bulk(gfp, max_req, slots);
1311 node->node_count += count;
1313 node = node->slot[0];
1314 node->node_count = 0;
1315 node->request_count = 0;
1318 mas->alloc->total = allocated;
1322 /* Clean up potential freed allocations on bulk failure */
1323 memset(slots, 0, max_req * sizeof(unsigned long));
1325 mas_set_alloc_req(mas, requested);
1326 if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
1327 mas->alloc->total = allocated;
1328 mas_set_err(mas, -ENOMEM);
1334 * mas_free() - Free an encoded maple node
1335 * @mas: The maple state
1336 * @used: The encoded maple node to free.
1338 * Uses rcu free if necessary, pushes @used back on the maple state allocations
1341 static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
1343 struct maple_node *tmp = mte_to_node(used);
1345 if (mt_in_rcu(mas->tree))
1348 mas_push_node(mas, tmp);
1352 * mas_node_count() - Check if enough nodes are allocated and request more if
1353 * there is not enough nodes.
1354 * @mas: The maple state
1355 * @count: The number of nodes needed
1356 * @gfp: the gfp flags
1358 static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp)
1360 unsigned long allocated = mas_allocated(mas);
1362 if (allocated < count) {
1363 mas_set_alloc_req(mas, count - allocated);
1364 mas_alloc_nodes(mas, gfp);
1369 * mas_node_count() - Check if enough nodes are allocated and request more if
1370 * there is not enough nodes.
1371 * @mas: The maple state
1372 * @count: The number of nodes needed
1374 * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
1376 static void mas_node_count(struct ma_state *mas, int count)
1378 return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN);
1382 * mas_start() - Sets up maple state for operations.
1383 * @mas: The maple state.
1385 * If mas->node == MAS_START, then set the min, max and depth to
1389 * - If mas->node is an error or not MAS_START, return NULL.
1390 * - If it's an empty tree: NULL & mas->node == MAS_NONE
1391 * - If it's a single entry: The entry & mas->node == MAS_ROOT
1392 * - If it's a tree: NULL & mas->node == safe root node.
1394 static inline struct maple_enode *mas_start(struct ma_state *mas)
1396 if (likely(mas_is_start(mas))) {
1397 struct maple_enode *root;
1400 mas->max = ULONG_MAX;
1404 root = mas_root(mas);
1405 /* Tree with nodes */
1406 if (likely(xa_is_node(root))) {
1408 mas->node = mte_safe_root(root);
1410 if (mte_dead_node(mas->node))
1417 if (unlikely(!root)) {
1418 mas->node = MAS_NONE;
1419 mas->offset = MAPLE_NODE_SLOTS;
1423 /* Single entry tree */
1424 mas->node = MAS_ROOT;
1425 mas->offset = MAPLE_NODE_SLOTS;
1427 /* Single entry tree. */
1438 * ma_data_end() - Find the end of the data in a node.
1439 * @node: The maple node
1440 * @type: The maple node type
1441 * @pivots: The array of pivots in the node
1442 * @max: The maximum value in the node
1444 * Uses metadata to find the end of the data when possible.
1445 * Return: The zero indexed last slot with data (may be null).
1447 static inline unsigned char ma_data_end(struct maple_node *node,
1448 enum maple_type type,
1449 unsigned long *pivots,
1452 unsigned char offset;
1457 if (type == maple_arange_64)
1458 return ma_meta_end(node, type);
1460 offset = mt_pivots[type] - 1;
1461 if (likely(!pivots[offset]))
1462 return ma_meta_end(node, type);
1464 if (likely(pivots[offset] == max))
1467 return mt_pivots[type];
1471 * mas_data_end() - Find the end of the data (slot).
1472 * @mas: the maple state
1474 * This method is optimized to check the metadata of a node if the node type
1475 * supports data end metadata.
1477 * Return: The zero indexed last slot with data (may be null).
1479 static inline unsigned char mas_data_end(struct ma_state *mas)
1481 enum maple_type type;
1482 struct maple_node *node;
1483 unsigned char offset;
1484 unsigned long *pivots;
1486 type = mte_node_type(mas->node);
1488 if (type == maple_arange_64)
1489 return ma_meta_end(node, type);
1491 pivots = ma_pivots(node, type);
1492 if (unlikely(ma_dead_node(node)))
1495 offset = mt_pivots[type] - 1;
1496 if (likely(!pivots[offset]))
1497 return ma_meta_end(node, type);
1499 if (likely(pivots[offset] == mas->max))
1502 return mt_pivots[type];
1506 * mas_leaf_max_gap() - Returns the largest gap in a leaf node
1507 * @mas - the maple state
1509 * Return: The maximum gap in the leaf.
1511 static unsigned long mas_leaf_max_gap(struct ma_state *mas)
1514 unsigned long pstart, gap, max_gap;
1515 struct maple_node *mn;
1516 unsigned long *pivots;
1519 unsigned char max_piv;
1521 mt = mte_node_type(mas->node);
1523 slots = ma_slots(mn, mt);
1525 if (unlikely(ma_is_dense(mt))) {
1527 for (i = 0; i < mt_slots[mt]; i++) {
1542 * Check the first implied pivot optimizes the loop below and slot 1 may
1543 * be skipped if there is a gap in slot 0.
1545 pivots = ma_pivots(mn, mt);
1546 if (likely(!slots[0])) {
1547 max_gap = pivots[0] - mas->min + 1;
1553 /* reduce max_piv as the special case is checked before the loop */
1554 max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1;
1556 * Check end implied pivot which can only be a gap on the right most
1559 if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) {
1560 gap = ULONG_MAX - pivots[max_piv];
1565 for (; i <= max_piv; i++) {
1566 /* data == no gap. */
1567 if (likely(slots[i]))
1570 pstart = pivots[i - 1];
1571 gap = pivots[i] - pstart;
1575 /* There cannot be two gaps in a row. */
1582 * ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
1583 * @node: The maple node
1584 * @gaps: The pointer to the gaps
1585 * @mt: The maple node type
1586 * @*off: Pointer to store the offset location of the gap.
1588 * Uses the metadata data end to scan backwards across set gaps.
1590 * Return: The maximum gap value
1592 static inline unsigned long
1593 ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt,
1596 unsigned char offset, i;
1597 unsigned long max_gap = 0;
1599 i = offset = ma_meta_end(node, mt);
1601 if (gaps[i] > max_gap) {
1612 * mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
1613 * @mas: The maple state.
1615 * If the metadata gap is set to MAPLE_ARANGE64_META_MAX, there is no gap.
1617 * Return: The gap value.
1619 static inline unsigned long mas_max_gap(struct ma_state *mas)
1621 unsigned long *gaps;
1622 unsigned char offset;
1624 struct maple_node *node;
1626 mt = mte_node_type(mas->node);
1628 return mas_leaf_max_gap(mas);
1631 offset = ma_meta_gap(node, mt);
1632 if (offset == MAPLE_ARANGE64_META_MAX)
1635 gaps = ma_gaps(node, mt);
1636 return gaps[offset];
1640 * mas_parent_gap() - Set the parent gap and any gaps above, as needed
1641 * @mas: The maple state
1642 * @offset: The gap offset in the parent to set
1643 * @new: The new gap value.
1645 * Set the parent gap then continue to set the gap upwards, using the metadata
1646 * of the parent to see if it is necessary to check the node above.
1648 static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
1651 unsigned long meta_gap = 0;
1652 struct maple_node *pnode;
1653 struct maple_enode *penode;
1654 unsigned long *pgaps;
1655 unsigned char meta_offset;
1656 enum maple_type pmt;
1658 pnode = mte_parent(mas->node);
1659 pmt = mas_parent_enum(mas, mas->node);
1660 penode = mt_mk_node(pnode, pmt);
1661 pgaps = ma_gaps(pnode, pmt);
1664 meta_offset = ma_meta_gap(pnode, pmt);
1665 if (meta_offset == MAPLE_ARANGE64_META_MAX)
1668 meta_gap = pgaps[meta_offset];
1670 pgaps[offset] = new;
1672 if (meta_gap == new)
1675 if (offset != meta_offset) {
1679 ma_set_meta_gap(pnode, pmt, offset);
1680 } else if (new < meta_gap) {
1682 new = ma_max_gap(pnode, pgaps, pmt, &meta_offset);
1683 ma_set_meta_gap(pnode, pmt, meta_offset);
1686 if (ma_is_root(pnode))
1689 /* Go to the parent node. */
1690 pnode = mte_parent(penode);
1691 pmt = mas_parent_enum(mas, penode);
1692 pgaps = ma_gaps(pnode, pmt);
1693 offset = mte_parent_slot(penode);
1694 penode = mt_mk_node(pnode, pmt);
1699 * mas_update_gap() - Update a nodes gaps and propagate up if necessary.
1700 * @mas - the maple state.
1702 static inline void mas_update_gap(struct ma_state *mas)
1704 unsigned char pslot;
1705 unsigned long p_gap;
1706 unsigned long max_gap;
1708 if (!mt_is_alloc(mas->tree))
1711 if (mte_is_root(mas->node))
1714 max_gap = mas_max_gap(mas);
1716 pslot = mte_parent_slot(mas->node);
1717 p_gap = ma_gaps(mte_parent(mas->node),
1718 mas_parent_enum(mas, mas->node))[pslot];
1720 if (p_gap != max_gap)
1721 mas_parent_gap(mas, pslot, max_gap);
1725 * mas_adopt_children() - Set the parent pointer of all nodes in @parent to
1726 * @parent with the slot encoded.
1727 * @mas - the maple state (for the tree)
1728 * @parent - the maple encoded node containing the children.
1730 static inline void mas_adopt_children(struct ma_state *mas,
1731 struct maple_enode *parent)
1733 enum maple_type type = mte_node_type(parent);
1734 struct maple_node *node = mas_mn(mas);
1735 void __rcu **slots = ma_slots(node, type);
1736 unsigned long *pivots = ma_pivots(node, type);
1737 struct maple_enode *child;
1738 unsigned char offset;
1740 offset = ma_data_end(node, type, pivots, mas->max);
1742 child = mas_slot_locked(mas, slots, offset);
1743 mte_set_parent(child, parent, offset);
1748 * mas_replace() - Replace a maple node in the tree with mas->node. Uses the
1749 * parent encoding to locate the maple node in the tree.
1750 * @mas - the ma_state to use for operations.
1751 * @advanced - boolean to adopt the child nodes and free the old node (false) or
1752 * leave the node (true) and handle the adoption and free elsewhere.
1754 static inline void mas_replace(struct ma_state *mas, bool advanced)
1755 __must_hold(mas->tree->lock)
1757 struct maple_node *mn = mas_mn(mas);
1758 struct maple_enode *old_enode;
1759 unsigned char offset = 0;
1760 void __rcu **slots = NULL;
1762 if (ma_is_root(mn)) {
1763 old_enode = mas_root_locked(mas);
1765 offset = mte_parent_slot(mas->node);
1766 slots = ma_slots(mte_parent(mas->node),
1767 mas_parent_enum(mas, mas->node));
1768 old_enode = mas_slot_locked(mas, slots, offset);
1771 if (!advanced && !mte_is_leaf(mas->node))
1772 mas_adopt_children(mas, mas->node);
1774 if (mte_is_root(mas->node)) {
1775 mn->parent = ma_parent_ptr(
1776 ((unsigned long)mas->tree | MA_ROOT_PARENT));
1777 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
1778 mas_set_height(mas);
1780 rcu_assign_pointer(slots[offset], mas->node);
1784 mte_set_node_dead(old_enode);
1785 mas_free(mas, old_enode);
1790 * mas_new_child() - Find the new child of a node.
1791 * @mas: the maple state
1792 * @child: the maple state to store the child.
1794 static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child)
1795 __must_hold(mas->tree->lock)
1798 unsigned char offset;
1800 unsigned long *pivots;
1801 struct maple_enode *entry;
1802 struct maple_node *node;
1805 mt = mte_node_type(mas->node);
1807 slots = ma_slots(node, mt);
1808 pivots = ma_pivots(node, mt);
1809 end = ma_data_end(node, mt, pivots, mas->max);
1810 for (offset = mas->offset; offset <= end; offset++) {
1811 entry = mas_slot_locked(mas, slots, offset);
1812 if (mte_parent(entry) == node) {
1814 mas->offset = offset + 1;
1815 child->offset = offset;
1825 * mab_shift_right() - Shift the data in mab right. Note, does not clean out the
1826 * old data or set b_node->b_end.
1827 * @b_node: the maple_big_node
1828 * @shift: the shift count
1830 static inline void mab_shift_right(struct maple_big_node *b_node,
1831 unsigned char shift)
1833 unsigned long size = b_node->b_end * sizeof(unsigned long);
1835 memmove(b_node->pivot + shift, b_node->pivot, size);
1836 memmove(b_node->slot + shift, b_node->slot, size);
1837 if (b_node->type == maple_arange_64)
1838 memmove(b_node->gap + shift, b_node->gap, size);
1842 * mab_middle_node() - Check if a middle node is needed (unlikely)
1843 * @b_node: the maple_big_node that contains the data.
1844 * @size: the amount of data in the b_node
1845 * @split: the potential split location
1846 * @slot_count: the size that can be stored in a single node being considered.
1848 * Return: true if a middle node is required.
1850 static inline bool mab_middle_node(struct maple_big_node *b_node, int split,
1851 unsigned char slot_count)
1853 unsigned char size = b_node->b_end;
1855 if (size >= 2 * slot_count)
1858 if (!b_node->slot[split] && (size >= 2 * slot_count - 1))
1865 * mab_no_null_split() - ensure the split doesn't fall on a NULL
1866 * @b_node: the maple_big_node with the data
1867 * @split: the suggested split location
1868 * @slot_count: the number of slots in the node being considered.
1870 * Return: the split location.
1872 static inline int mab_no_null_split(struct maple_big_node *b_node,
1873 unsigned char split, unsigned char slot_count)
1875 if (!b_node->slot[split]) {
1877 * If the split is less than the max slot && the right side will
1878 * still be sufficient, then increment the split on NULL.
1880 if ((split < slot_count - 1) &&
1881 (b_node->b_end - split) > (mt_min_slots[b_node->type]))
1890 * mab_calc_split() - Calculate the split location and if there needs to be two
1892 * @bn: The maple_big_node with the data
1893 * @mid_split: The second split, if required. 0 otherwise.
1895 * Return: The first split location. The middle split is set in @mid_split.
1897 static inline int mab_calc_split(struct ma_state *mas,
1898 struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
1900 unsigned char b_end = bn->b_end;
1901 int split = b_end / 2; /* Assume equal split. */
1902 unsigned char slot_min, slot_count = mt_slots[bn->type];
1905 * To support gap tracking, all NULL entries are kept together and a node cannot
1906 * end on a NULL entry, with the exception of the left-most leaf. The
1907 * limitation means that the split of a node must be checked for this condition
1908 * and be able to put more data in one direction or the other.
1910 if (unlikely((mas->mas_flags & MA_STATE_BULK))) {
1912 split = b_end - mt_min_slots[bn->type];
1914 if (!ma_is_leaf(bn->type))
1917 mas->mas_flags |= MA_STATE_REBALANCE;
1918 if (!bn->slot[split])
1924 * Although extremely rare, it is possible to enter what is known as the 3-way
1925 * split scenario. The 3-way split comes about by means of a store of a range
1926 * that overwrites the end and beginning of two full nodes. The result is a set
1927 * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can
1928 * also be located in different parent nodes which are also full. This can
1929 * carry upwards all the way to the root in the worst case.
1931 if (unlikely(mab_middle_node(bn, split, slot_count))) {
1933 *mid_split = split * 2;
1935 slot_min = mt_min_slots[bn->type];
1939 * Avoid having a range less than the slot count unless it
1940 * causes one node to be deficient.
1941 * NOTE: mt_min_slots is 1 based, b_end and split are zero.
1943 while (((bn->pivot[split] - min) < slot_count - 1) &&
1944 (split < slot_count - 1) && (b_end - split > slot_min))
1948 /* Avoid ending a node on a NULL entry */
1949 split = mab_no_null_split(bn, split, slot_count);
1953 *mid_split = mab_no_null_split(bn, *mid_split, slot_count);
1959 * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
1960 * and set @b_node->b_end to the next free slot.
1961 * @mas: The maple state
1962 * @mas_start: The starting slot to copy
1963 * @mas_end: The end slot to copy (inclusively)
1964 * @b_node: The maple_big_node to place the data
1965 * @mab_start: The starting location in maple_big_node to store the data.
1967 static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
1968 unsigned char mas_end, struct maple_big_node *b_node,
1969 unsigned char mab_start)
1972 struct maple_node *node;
1974 unsigned long *pivots, *gaps;
1975 int i = mas_start, j = mab_start;
1976 unsigned char piv_end;
1979 mt = mte_node_type(mas->node);
1980 pivots = ma_pivots(node, mt);
1982 b_node->pivot[j] = pivots[i++];
1983 if (unlikely(i > mas_end))
1988 piv_end = min(mas_end, mt_pivots[mt]);
1989 for (; i < piv_end; i++, j++) {
1990 b_node->pivot[j] = pivots[i];
1991 if (unlikely(!b_node->pivot[j]))
1994 if (unlikely(mas->max == b_node->pivot[j]))
1998 if (likely(i <= mas_end))
1999 b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt);
2002 b_node->b_end = ++j;
2004 slots = ma_slots(node, mt);
2005 memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j);
2006 if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) {
2007 gaps = ma_gaps(node, mt);
2008 memcpy(b_node->gap + mab_start, gaps + mas_start,
2009 sizeof(unsigned long) * j);
2014 * mas_leaf_set_meta() - Set the metadata of a leaf if possible.
2015 * @mas: The maple state
2016 * @node: The maple node
2017 * @pivots: pointer to the maple node pivots
2018 * @mt: The maple type
2019 * @end: The assumed end
2021 * Note, end may be incremented within this function but not modified at the
2022 * source. This is fine since the metadata is the last thing to be stored in a
2023 * node during a write.
2025 static inline void mas_leaf_set_meta(struct ma_state *mas,
2026 struct maple_node *node, unsigned long *pivots,
2027 enum maple_type mt, unsigned char end)
2029 /* There is no room for metadata already */
2030 if (mt_pivots[mt] <= end)
2033 if (pivots[end] && pivots[end] < mas->max)
2036 if (end < mt_slots[mt] - 1)
2037 ma_set_meta(node, mt, 0, end);
2041 * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
2042 * @b_node: the maple_big_node that has the data
2043 * @mab_start: the start location in @b_node.
2044 * @mab_end: The end location in @b_node (inclusively)
2045 * @mas: The maple state with the maple encoded node.
2047 static inline void mab_mas_cp(struct maple_big_node *b_node,
2048 unsigned char mab_start, unsigned char mab_end,
2049 struct ma_state *mas, bool new_max)
2052 enum maple_type mt = mte_node_type(mas->node);
2053 struct maple_node *node = mte_to_node(mas->node);
2054 void __rcu **slots = ma_slots(node, mt);
2055 unsigned long *pivots = ma_pivots(node, mt);
2056 unsigned long *gaps = NULL;
2059 if (mab_end - mab_start > mt_pivots[mt])
2062 if (!pivots[mt_pivots[mt] - 1])
2063 slots[mt_pivots[mt]] = NULL;
2067 pivots[j++] = b_node->pivot[i++];
2068 } while (i <= mab_end && likely(b_node->pivot[i]));
2070 memcpy(slots, b_node->slot + mab_start,
2071 sizeof(void *) * (i - mab_start));
2074 mas->max = b_node->pivot[i - 1];
2077 if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
2078 unsigned long max_gap = 0;
2079 unsigned char offset = 15;
2081 gaps = ma_gaps(node, mt);
2083 gaps[--j] = b_node->gap[--i];
2084 if (gaps[j] > max_gap) {
2090 ma_set_meta(node, mt, offset, end);
2092 mas_leaf_set_meta(mas, node, pivots, mt, end);
2097 * mas_descend_adopt() - Descend through a sub-tree and adopt children.
2098 * @mas: the maple state with the maple encoded node of the sub-tree.
2100 * Descend through a sub-tree and adopt children who do not have the correct
2101 * parents set. Follow the parents which have the correct parents as they are
2102 * the new entries which need to be followed to find other incorrectly set
2105 static inline void mas_descend_adopt(struct ma_state *mas)
2107 struct ma_state list[3], next[3];
2111 * At each level there may be up to 3 correct parent pointers which indicates
2112 * the new nodes which need to be walked to find any new nodes at a lower level.
2115 for (i = 0; i < 3; i++) {
2122 while (!mte_is_leaf(list[0].node)) {
2124 for (i = 0; i < 3; i++) {
2125 if (mas_is_none(&list[i]))
2128 if (i && list[i-1].node == list[i].node)
2131 while ((n < 3) && (mas_new_child(&list[i], &next[n])))
2134 mas_adopt_children(&list[i], list[i].node);
2138 next[n++].node = MAS_NONE;
2140 /* descend by setting the list to the children */
2141 for (i = 0; i < 3; i++)
2147 * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
2148 * @mas: The maple state
2149 * @end: The maple node end
2150 * @mt: The maple node type
2152 static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end,
2155 if (!(mas->mas_flags & MA_STATE_BULK))
2158 if (mte_is_root(mas->node))
2161 if (end > mt_min_slots[mt]) {
2162 mas->mas_flags &= ~MA_STATE_REBALANCE;
2168 * mas_store_b_node() - Store an @entry into the b_node while also copying the
2169 * data from a maple encoded node.
2170 * @wr_mas: the maple write state
2171 * @b_node: the maple_big_node to fill with data
2172 * @offset_end: the offset to end copying
2174 * Return: The actual end of the data stored in @b_node
2176 static inline void mas_store_b_node(struct ma_wr_state *wr_mas,
2177 struct maple_big_node *b_node, unsigned char offset_end)
2180 unsigned char b_end;
2181 /* Possible underflow of piv will wrap back to 0 before use. */
2183 struct ma_state *mas = wr_mas->mas;
2185 b_node->type = wr_mas->type;
2189 /* Copy start data up to insert. */
2190 mas_mab_cp(mas, 0, slot - 1, b_node, 0);
2191 b_end = b_node->b_end;
2192 piv = b_node->pivot[b_end - 1];
2196 if (piv + 1 < mas->index) {
2197 /* Handle range starting after old range */
2198 b_node->slot[b_end] = wr_mas->content;
2199 if (!wr_mas->content)
2200 b_node->gap[b_end] = mas->index - 1 - piv;
2201 b_node->pivot[b_end++] = mas->index - 1;
2204 /* Store the new entry. */
2205 mas->offset = b_end;
2206 b_node->slot[b_end] = wr_mas->entry;
2207 b_node->pivot[b_end] = mas->last;
2210 if (mas->last >= mas->max)
2213 /* Handle new range ending before old range ends */
2214 piv = mas_logical_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
2215 if (piv > mas->last) {
2216 if (piv == ULONG_MAX)
2217 mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
2219 if (offset_end != slot)
2220 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
2223 b_node->slot[++b_end] = wr_mas->content;
2224 if (!wr_mas->content)
2225 b_node->gap[b_end] = piv - mas->last + 1;
2226 b_node->pivot[b_end] = piv;
2229 slot = offset_end + 1;
2230 if (slot > wr_mas->node_end)
2233 /* Copy end data to the end of the node. */
2234 mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end);
2239 b_node->b_end = b_end;
2243 * mas_prev_sibling() - Find the previous node with the same parent.
2244 * @mas: the maple state
2246 * Return: True if there is a previous sibling, false otherwise.
2248 static inline bool mas_prev_sibling(struct ma_state *mas)
2250 unsigned int p_slot = mte_parent_slot(mas->node);
2252 if (mte_is_root(mas->node))
2259 mas->offset = p_slot - 1;
2265 * mas_next_sibling() - Find the next node with the same parent.
2266 * @mas: the maple state
2268 * Return: true if there is a next sibling, false otherwise.
2270 static inline bool mas_next_sibling(struct ma_state *mas)
2272 MA_STATE(parent, mas->tree, mas->index, mas->last);
2274 if (mte_is_root(mas->node))
2278 mas_ascend(&parent);
2279 parent.offset = mte_parent_slot(mas->node) + 1;
2280 if (parent.offset > mas_data_end(&parent))
2289 * mte_node_or_node() - Return the encoded node or MAS_NONE.
2290 * @enode: The encoded maple node.
2292 * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state.
2294 * Return: @enode or MAS_NONE
2296 static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode)
2301 return ma_enode_ptr(MAS_NONE);
2305 * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
2306 * @wr_mas: The maple write state
2308 * Uses mas_slot_locked() and does not need to worry about dead nodes.
2310 static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
2312 struct ma_state *mas = wr_mas->mas;
2313 unsigned char count;
2314 unsigned char offset;
2315 unsigned long index, min, max;
2317 if (unlikely(ma_is_dense(wr_mas->type))) {
2318 wr_mas->r_max = wr_mas->r_min = mas->index;
2319 mas->offset = mas->index = mas->min;
2323 wr_mas->node = mas_mn(wr_mas->mas);
2324 wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
2325 count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type,
2326 wr_mas->pivots, mas->max);
2327 offset = mas->offset;
2328 min = mas_safe_min(mas, wr_mas->pivots, offset);
2329 if (unlikely(offset == count))
2332 max = wr_mas->pivots[offset];
2334 if (unlikely(index <= max))
2337 if (unlikely(!max && offset))
2341 while (++offset < count) {
2342 max = wr_mas->pivots[offset];
2345 else if (unlikely(!max))
2354 wr_mas->r_max = max;
2355 wr_mas->r_min = min;
2356 wr_mas->offset_end = mas->offset = offset;
2360 * mas_topiary_range() - Add a range of slots to the topiary.
2361 * @mas: The maple state
2362 * @destroy: The topiary to add the slots (usually destroy)
2363 * @start: The starting slot inclusively
2364 * @end: The end slot inclusively
2366 static inline void mas_topiary_range(struct ma_state *mas,
2367 struct ma_topiary *destroy, unsigned char start, unsigned char end)
2370 unsigned char offset;
2372 MT_BUG_ON(mas->tree, mte_is_leaf(mas->node));
2373 slots = ma_slots(mas_mn(mas), mte_node_type(mas->node));
2374 for (offset = start; offset <= end; offset++) {
2375 struct maple_enode *enode = mas_slot_locked(mas, slots, offset);
2377 if (mte_dead_node(enode))
2380 mat_add(destroy, enode);
2385 * mast_topiary() - Add the portions of the tree to the removal list; either to
2386 * be freed or discarded (destroy walk).
2387 * @mast: The maple_subtree_state.
2389 static inline void mast_topiary(struct maple_subtree_state *mast)
2391 MA_WR_STATE(wr_mas, mast->orig_l, NULL);
2392 unsigned char r_start, r_end;
2393 unsigned char l_start, l_end;
2394 void __rcu **l_slots, **r_slots;
2396 wr_mas.type = mte_node_type(mast->orig_l->node);
2397 mast->orig_l->index = mast->orig_l->last;
2398 mas_wr_node_walk(&wr_mas);
2399 l_start = mast->orig_l->offset + 1;
2400 l_end = mas_data_end(mast->orig_l);
2402 r_end = mast->orig_r->offset;
2407 l_slots = ma_slots(mas_mn(mast->orig_l),
2408 mte_node_type(mast->orig_l->node));
2410 r_slots = ma_slots(mas_mn(mast->orig_r),
2411 mte_node_type(mast->orig_r->node));
2413 if ((l_start < l_end) &&
2414 mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_start))) {
2418 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_end))) {
2423 if ((l_start > r_end) && (mast->orig_l->node == mast->orig_r->node))
2426 /* At the node where left and right sides meet, add the parts between */
2427 if (mast->orig_l->node == mast->orig_r->node) {
2428 return mas_topiary_range(mast->orig_l, mast->destroy,
2432 /* mast->orig_r is different and consumed. */
2433 if (mte_is_leaf(mast->orig_r->node))
2436 if (mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_end)))
2440 if (l_start <= l_end)
2441 mas_topiary_range(mast->orig_l, mast->destroy, l_start, l_end);
2443 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_start)))
2446 if (r_start <= r_end)
2447 mas_topiary_range(mast->orig_r, mast->destroy, 0, r_end);
2451 * mast_rebalance_next() - Rebalance against the next node
2452 * @mast: The maple subtree state
2453 * @old_r: The encoded maple node to the right (next node).
2455 static inline void mast_rebalance_next(struct maple_subtree_state *mast)
2457 unsigned char b_end = mast->bn->b_end;
2459 mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node),
2461 mast->orig_r->last = mast->orig_r->max;
2465 * mast_rebalance_prev() - Rebalance against the previous node
2466 * @mast: The maple subtree state
2467 * @old_l: The encoded maple node to the left (previous node)
2469 static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
2471 unsigned char end = mas_data_end(mast->orig_l) + 1;
2472 unsigned char b_end = mast->bn->b_end;
2474 mab_shift_right(mast->bn, end);
2475 mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0);
2476 mast->l->min = mast->orig_l->min;
2477 mast->orig_l->index = mast->orig_l->min;
2478 mast->bn->b_end = end + b_end;
2479 mast->l->offset += end;
2483 * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
2484 * the node to the right. Checking the nodes to the right then the left at each
2485 * level upwards until root is reached. Free and destroy as needed.
2486 * Data is copied into the @mast->bn.
2487 * @mast: The maple_subtree_state.
2490 bool mast_spanning_rebalance(struct maple_subtree_state *mast)
2492 struct ma_state r_tmp = *mast->orig_r;
2493 struct ma_state l_tmp = *mast->orig_l;
2494 struct maple_enode *ancestor = NULL;
2495 unsigned char start, end;
2496 unsigned char depth = 0;
2498 r_tmp = *mast->orig_r;
2499 l_tmp = *mast->orig_l;
2501 mas_ascend(mast->orig_r);
2502 mas_ascend(mast->orig_l);
2505 (mast->orig_r->node == mast->orig_l->node)) {
2506 ancestor = mast->orig_r->node;
2507 end = mast->orig_r->offset - 1;
2508 start = mast->orig_l->offset + 1;
2511 if (mast->orig_r->offset < mas_data_end(mast->orig_r)) {
2513 ancestor = mast->orig_r->node;
2517 mast->orig_r->offset++;
2519 mas_descend(mast->orig_r);
2520 mast->orig_r->offset = 0;
2524 mast_rebalance_next(mast);
2526 unsigned char l_off = 0;
2527 struct maple_enode *child = r_tmp.node;
2530 if (ancestor == r_tmp.node)
2536 if (l_off < r_tmp.offset)
2537 mas_topiary_range(&r_tmp, mast->destroy,
2538 l_off, r_tmp.offset);
2540 if (l_tmp.node != child)
2541 mat_add(mast->free, child);
2543 } while (r_tmp.node != ancestor);
2545 *mast->orig_l = l_tmp;
2548 } else if (mast->orig_l->offset != 0) {
2550 ancestor = mast->orig_l->node;
2551 end = mas_data_end(mast->orig_l);
2554 mast->orig_l->offset--;
2556 mas_descend(mast->orig_l);
2557 mast->orig_l->offset =
2558 mas_data_end(mast->orig_l);
2562 mast_rebalance_prev(mast);
2564 unsigned char r_off;
2565 struct maple_enode *child = l_tmp.node;
2568 if (ancestor == l_tmp.node)
2571 r_off = mas_data_end(&l_tmp);
2573 if (l_tmp.offset < r_off)
2576 if (l_tmp.offset < r_off)
2577 mas_topiary_range(&l_tmp, mast->destroy,
2578 l_tmp.offset, r_off);
2580 if (r_tmp.node != child)
2581 mat_add(mast->free, child);
2583 } while (l_tmp.node != ancestor);
2585 *mast->orig_r = r_tmp;
2588 } while (!mte_is_root(mast->orig_r->node));
2590 *mast->orig_r = r_tmp;
2591 *mast->orig_l = l_tmp;
2596 * mast_ascend_free() - Add current original maple state nodes to the free list
2598 * @mast: the maple subtree state.
2600 * Ascend the original left and right sides and add the previous nodes to the
2601 * free list. Set the slots to point to the correct location in the new nodes.
2604 mast_ascend_free(struct maple_subtree_state *mast)
2606 MA_WR_STATE(wr_mas, mast->orig_r, NULL);
2607 struct maple_enode *left = mast->orig_l->node;
2608 struct maple_enode *right = mast->orig_r->node;
2610 mas_ascend(mast->orig_l);
2611 mas_ascend(mast->orig_r);
2612 mat_add(mast->free, left);
2615 mat_add(mast->free, right);
2617 mast->orig_r->offset = 0;
2618 mast->orig_r->index = mast->r->max;
2619 /* last should be larger than or equal to index */
2620 if (mast->orig_r->last < mast->orig_r->index)
2621 mast->orig_r->last = mast->orig_r->index;
2623 * The node may not contain the value so set slot to ensure all
2624 * of the nodes contents are freed or destroyed.
2626 wr_mas.type = mte_node_type(mast->orig_r->node);
2627 mas_wr_node_walk(&wr_mas);
2628 /* Set up the left side of things */
2629 mast->orig_l->offset = 0;
2630 mast->orig_l->index = mast->l->min;
2631 wr_mas.mas = mast->orig_l;
2632 wr_mas.type = mte_node_type(mast->orig_l->node);
2633 mas_wr_node_walk(&wr_mas);
2635 mast->bn->type = wr_mas.type;
2639 * mas_new_ma_node() - Create and return a new maple node. Helper function.
2640 * @mas: the maple state with the allocations.
2641 * @b_node: the maple_big_node with the type encoding.
2643 * Use the node type from the maple_big_node to allocate a new node from the
2644 * ma_state. This function exists mainly for code readability.
2646 * Return: A new maple encoded node
2648 static inline struct maple_enode
2649 *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
2651 return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
2655 * mas_mab_to_node() - Set up right and middle nodes
2657 * @mas: the maple state that contains the allocations.
2658 * @b_node: the node which contains the data.
2659 * @left: The pointer which will have the left node
2660 * @right: The pointer which may have the right node
2661 * @middle: the pointer which may have the middle node (rare)
2662 * @mid_split: the split location for the middle node
2664 * Return: the split of left.
2666 static inline unsigned char mas_mab_to_node(struct ma_state *mas,
2667 struct maple_big_node *b_node, struct maple_enode **left,
2668 struct maple_enode **right, struct maple_enode **middle,
2669 unsigned char *mid_split, unsigned long min)
2671 unsigned char split = 0;
2672 unsigned char slot_count = mt_slots[b_node->type];
2674 *left = mas_new_ma_node(mas, b_node);
2679 if (b_node->b_end < slot_count) {
2680 split = b_node->b_end;
2682 split = mab_calc_split(mas, b_node, mid_split, min);
2683 *right = mas_new_ma_node(mas, b_node);
2687 *middle = mas_new_ma_node(mas, b_node);
2694 * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
2696 * @b_node - the big node to add the entry
2697 * @mas - the maple state to get the pivot (mas->max)
2698 * @entry - the entry to add, if NULL nothing happens.
2700 static inline void mab_set_b_end(struct maple_big_node *b_node,
2701 struct ma_state *mas,
2707 b_node->slot[b_node->b_end] = entry;
2708 if (mt_is_alloc(mas->tree))
2709 b_node->gap[b_node->b_end] = mas_max_gap(mas);
2710 b_node->pivot[b_node->b_end++] = mas->max;
2714 * mas_set_split_parent() - combine_then_separate helper function. Sets the parent
2715 * of @mas->node to either @left or @right, depending on @slot and @split
2717 * @mas - the maple state with the node that needs a parent
2718 * @left - possible parent 1
2719 * @right - possible parent 2
2720 * @slot - the slot the mas->node was placed
2721 * @split - the split location between @left and @right
2723 static inline void mas_set_split_parent(struct ma_state *mas,
2724 struct maple_enode *left,
2725 struct maple_enode *right,
2726 unsigned char *slot, unsigned char split)
2728 if (mas_is_none(mas))
2731 if ((*slot) <= split)
2732 mte_set_parent(mas->node, left, *slot);
2734 mte_set_parent(mas->node, right, (*slot) - split - 1);
2740 * mte_mid_split_check() - Check if the next node passes the mid-split
2741 * @**l: Pointer to left encoded maple node.
2742 * @**m: Pointer to middle encoded maple node.
2743 * @**r: Pointer to right encoded maple node.
2745 * @*split: The split location.
2746 * @mid_split: The middle split.
2748 static inline void mte_mid_split_check(struct maple_enode **l,
2749 struct maple_enode **r,
2750 struct maple_enode *right,
2752 unsigned char *split,
2753 unsigned char mid_split)
2758 if (slot < mid_split)
2767 * mast_set_split_parents() - Helper function to set three nodes parents. Slot
2768 * is taken from @mast->l.
2769 * @mast - the maple subtree state
2770 * @left - the left node
2771 * @right - the right node
2772 * @split - the split location.
2774 static inline void mast_set_split_parents(struct maple_subtree_state *mast,
2775 struct maple_enode *left,
2776 struct maple_enode *middle,
2777 struct maple_enode *right,
2778 unsigned char split,
2779 unsigned char mid_split)
2782 struct maple_enode *l = left;
2783 struct maple_enode *r = right;
2785 if (mas_is_none(mast->l))
2791 slot = mast->l->offset;
2793 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2794 mas_set_split_parent(mast->l, l, r, &slot, split);
2796 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2797 mas_set_split_parent(mast->m, l, r, &slot, split);
2799 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2800 mas_set_split_parent(mast->r, l, r, &slot, split);
2804 * mas_wmb_replace() - Write memory barrier and replace
2805 * @mas: The maple state
2806 * @free: the maple topiary list of nodes to free
2807 * @destroy: The maple topiary list of nodes to destroy (walk and free)
2809 * Updates gap as necessary.
2811 static inline void mas_wmb_replace(struct ma_state *mas,
2812 struct ma_topiary *free,
2813 struct ma_topiary *destroy)
2815 /* All nodes must see old data as dead prior to replacing that data */
2816 smp_wmb(); /* Needed for RCU */
2818 /* Insert the new data in the tree */
2819 mas_replace(mas, true);
2821 if (!mte_is_leaf(mas->node))
2822 mas_descend_adopt(mas);
2824 mas_mat_free(mas, free);
2827 mas_mat_destroy(mas, destroy);
2829 if (mte_is_leaf(mas->node))
2832 mas_update_gap(mas);
2836 * mast_new_root() - Set a new tree root during subtree creation
2837 * @mast: The maple subtree state
2838 * @mas: The maple state
2840 static inline void mast_new_root(struct maple_subtree_state *mast,
2841 struct ma_state *mas)
2843 mas_mn(mast->l)->parent =
2844 ma_parent_ptr(((unsigned long)mas->tree | MA_ROOT_PARENT));
2845 if (!mte_dead_node(mast->orig_l->node) &&
2846 !mte_is_root(mast->orig_l->node)) {
2848 mast_ascend_free(mast);
2850 } while (!mte_is_root(mast->orig_l->node));
2852 if ((mast->orig_l->node != mas->node) &&
2853 (mast->l->depth > mas_mt_height(mas))) {
2854 mat_add(mast->free, mas->node);
2859 * mast_cp_to_nodes() - Copy data out to nodes.
2860 * @mast: The maple subtree state
2861 * @left: The left encoded maple node
2862 * @middle: The middle encoded maple node
2863 * @right: The right encoded maple node
2864 * @split: The location to split between left and (middle ? middle : right)
2865 * @mid_split: The location to split between middle and right.
2867 static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
2868 struct maple_enode *left, struct maple_enode *middle,
2869 struct maple_enode *right, unsigned char split, unsigned char mid_split)
2871 bool new_lmax = true;
2873 mast->l->node = mte_node_or_none(left);
2874 mast->m->node = mte_node_or_none(middle);
2875 mast->r->node = mte_node_or_none(right);
2877 mast->l->min = mast->orig_l->min;
2878 if (split == mast->bn->b_end) {
2879 mast->l->max = mast->orig_r->max;
2883 mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax);
2886 mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true);
2887 mast->m->min = mast->bn->pivot[split] + 1;
2891 mast->r->max = mast->orig_r->max;
2893 mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false);
2894 mast->r->min = mast->bn->pivot[split] + 1;
2899 * mast_combine_cp_left - Copy in the original left side of the tree into the
2900 * combined data set in the maple subtree state big node.
2901 * @mast: The maple subtree state
2903 static inline void mast_combine_cp_left(struct maple_subtree_state *mast)
2905 unsigned char l_slot = mast->orig_l->offset;
2910 mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0);
2914 * mast_combine_cp_right: Copy in the original right side of the tree into the
2915 * combined data set in the maple subtree state big node.
2916 * @mast: The maple subtree state
2918 static inline void mast_combine_cp_right(struct maple_subtree_state *mast)
2920 if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max)
2923 mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1,
2924 mt_slot_count(mast->orig_r->node), mast->bn,
2926 mast->orig_r->last = mast->orig_r->max;
2930 * mast_sufficient: Check if the maple subtree state has enough data in the big
2931 * node to create at least one sufficient node
2932 * @mast: the maple subtree state
2934 static inline bool mast_sufficient(struct maple_subtree_state *mast)
2936 if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
2943 * mast_overflow: Check if there is too much data in the subtree state for a
2945 * @mast: The maple subtree state
2947 static inline bool mast_overflow(struct maple_subtree_state *mast)
2949 if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node))
2955 static inline void *mtree_range_walk(struct ma_state *mas)
2957 unsigned long *pivots;
2958 unsigned char offset;
2959 struct maple_node *node;
2960 struct maple_enode *next, *last;
2961 enum maple_type type;
2964 unsigned long max, min;
2965 unsigned long prev_max, prev_min;
2973 node = mte_to_node(next);
2974 type = mte_node_type(next);
2975 pivots = ma_pivots(node, type);
2976 end = ma_data_end(node, type, pivots, max);
2977 if (unlikely(ma_dead_node(node)))
2980 if (pivots[offset] >= mas->index) {
2983 max = pivots[offset];
2989 } while ((offset < end) && (pivots[offset] < mas->index));
2992 min = pivots[offset - 1] + 1;
2994 if (likely(offset < end && pivots[offset]))
2995 max = pivots[offset];
2998 slots = ma_slots(node, type);
2999 next = mt_slot(mas->tree, slots, offset);
3000 if (unlikely(ma_dead_node(node)))
3002 } while (!ma_is_leaf(type));
3004 mas->offset = offset;
3007 mas->min = prev_min;
3008 mas->max = prev_max;
3010 return (void *) next;
3018 * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
3019 * @mas: The starting maple state
3020 * @mast: The maple_subtree_state, keeps track of 4 maple states.
3021 * @count: The estimated count of iterations needed.
3023 * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
3024 * is hit. First @b_node is split into two entries which are inserted into the
3025 * next iteration of the loop. @b_node is returned populated with the final
3026 * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the
3027 * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
3028 * to account of what has been copied into the new sub-tree. The update of
3029 * orig_l_mas->last is used in mas_consume to find the slots that will need to
3030 * be either freed or destroyed. orig_l_mas->depth keeps track of the height of
3031 * the new sub-tree in case the sub-tree becomes the full tree.
3033 * Return: the number of elements in b_node during the last loop.
3035 static int mas_spanning_rebalance(struct ma_state *mas,
3036 struct maple_subtree_state *mast, unsigned char count)
3038 unsigned char split, mid_split;
3039 unsigned char slot = 0;
3040 struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
3042 MA_STATE(l_mas, mas->tree, mas->index, mas->index);
3043 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3044 MA_STATE(m_mas, mas->tree, mas->index, mas->index);
3045 MA_TOPIARY(free, mas->tree);
3046 MA_TOPIARY(destroy, mas->tree);
3049 * The tree needs to be rebalanced and leaves need to be kept at the same level.
3050 * Rebalancing is done by use of the ``struct maple_topiary``.
3056 mast->destroy = &destroy;
3057 l_mas.node = r_mas.node = m_mas.node = MAS_NONE;
3059 /* Check if this is not root and has sufficient data. */
3060 if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
3061 unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
3062 mast_spanning_rebalance(mast);
3064 mast->orig_l->depth = 0;
3067 * Each level of the tree is examined and balanced, pushing data to the left or
3068 * right, or rebalancing against left or right nodes is employed to avoid
3069 * rippling up the tree to limit the amount of churn. Once a new sub-section of
3070 * the tree is created, there may be a mix of new and old nodes. The old nodes
3071 * will have the incorrect parent pointers and currently be in two trees: the
3072 * original tree and the partially new tree. To remedy the parent pointers in
3073 * the old tree, the new data is swapped into the active tree and a walk down
3074 * the tree is performed and the parent pointers are updated.
3075 * See mas_descend_adopt() for more information..
3079 mast->bn->type = mte_node_type(mast->orig_l->node);
3080 split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
3081 &mid_split, mast->orig_l->min);
3082 mast_set_split_parents(mast, left, middle, right, split,
3084 mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
3087 * Copy data from next level in the tree to mast->bn from next
3090 memset(mast->bn, 0, sizeof(struct maple_big_node));
3091 mast->bn->type = mte_node_type(left);
3092 mast->orig_l->depth++;
3094 /* Root already stored in l->node. */
3095 if (mas_is_root_limits(mast->l))
3098 mast_ascend_free(mast);
3099 mast_combine_cp_left(mast);
3100 l_mas.offset = mast->bn->b_end;
3101 mab_set_b_end(mast->bn, &l_mas, left);
3102 mab_set_b_end(mast->bn, &m_mas, middle);
3103 mab_set_b_end(mast->bn, &r_mas, right);
3105 /* Copy anything necessary out of the right node. */
3106 mast_combine_cp_right(mast);
3108 mast->orig_l->last = mast->orig_l->max;
3110 if (mast_sufficient(mast))
3113 if (mast_overflow(mast))
3116 /* May be a new root stored in mast->bn */
3117 if (mas_is_root_limits(mast->orig_l))
3120 mast_spanning_rebalance(mast);
3122 /* rebalancing from other nodes may require another loop. */
3127 l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
3128 mte_node_type(mast->orig_l->node));
3129 mast->orig_l->depth++;
3130 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
3131 mte_set_parent(left, l_mas.node, slot);
3133 mte_set_parent(middle, l_mas.node, ++slot);
3136 mte_set_parent(right, l_mas.node, ++slot);
3138 if (mas_is_root_limits(mast->l)) {
3140 mast_new_root(mast, mas);
3142 mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent;
3145 if (!mte_dead_node(mast->orig_l->node))
3146 mat_add(&free, mast->orig_l->node);
3148 mas->depth = mast->orig_l->depth;
3149 *mast->orig_l = l_mas;
3150 mte_set_node_dead(mas->node);
3152 /* Set up mas for insertion. */
3153 mast->orig_l->depth = mas->depth;
3154 mast->orig_l->alloc = mas->alloc;
3155 *mas = *mast->orig_l;
3156 mas_wmb_replace(mas, &free, &destroy);
3157 mtree_range_walk(mas);
3158 return mast->bn->b_end;
3162 * mas_rebalance() - Rebalance a given node.
3163 * @mas: The maple state
3164 * @b_node: The big maple node.
3166 * Rebalance two nodes into a single node or two new nodes that are sufficient.
3167 * Continue upwards until tree is sufficient.
3169 * Return: the number of elements in b_node during the last loop.
3171 static inline int mas_rebalance(struct ma_state *mas,
3172 struct maple_big_node *b_node)
3174 char empty_count = mas_mt_height(mas);
3175 struct maple_subtree_state mast;
3176 unsigned char shift, b_end = ++b_node->b_end;
3178 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3179 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3181 trace_ma_op(__func__, mas);
3184 * Rebalancing occurs if a node is insufficient. Data is rebalanced
3185 * against the node to the right if it exists, otherwise the node to the
3186 * left of this node is rebalanced against this node. If rebalancing
3187 * causes just one node to be produced instead of two, then the parent
3188 * is also examined and rebalanced if it is insufficient. Every level
3189 * tries to combine the data in the same way. If one node contains the
3190 * entire range of the tree, then that node is used as a new root node.
3192 mas_node_count(mas, 1 + empty_count * 3);
3193 if (mas_is_err(mas))
3196 mast.orig_l = &l_mas;
3197 mast.orig_r = &r_mas;
3199 mast.bn->type = mte_node_type(mas->node);
3201 l_mas = r_mas = *mas;
3203 if (mas_next_sibling(&r_mas)) {
3204 mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end);
3205 r_mas.last = r_mas.index = r_mas.max;
3207 mas_prev_sibling(&l_mas);
3208 shift = mas_data_end(&l_mas) + 1;
3209 mab_shift_right(b_node, shift);
3210 mas->offset += shift;
3211 mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0);
3212 b_node->b_end = shift + b_end;
3213 l_mas.index = l_mas.last = l_mas.min;
3216 return mas_spanning_rebalance(mas, &mast, empty_count);
3220 * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
3222 * @mas: The maple state
3223 * @end: The end of the left-most node.
3225 * During a mass-insert event (such as forking), it may be necessary to
3226 * rebalance the left-most node when it is not sufficient.
3228 static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end)
3230 enum maple_type mt = mte_node_type(mas->node);
3231 struct maple_node reuse, *newnode, *parent, *new_left, *left, *node;
3232 struct maple_enode *eparent;
3233 unsigned char offset, tmp, split = mt_slots[mt] / 2;
3234 void __rcu **l_slots, **slots;
3235 unsigned long *l_pivs, *pivs, gap;
3236 bool in_rcu = mt_in_rcu(mas->tree);
3238 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3241 mas_prev_sibling(&l_mas);
3245 /* Allocate for both left and right as well as parent. */
3246 mas_node_count(mas, 3);
3247 if (mas_is_err(mas))
3250 newnode = mas_pop_node(mas);
3256 newnode->parent = node->parent;
3257 slots = ma_slots(newnode, mt);
3258 pivs = ma_pivots(newnode, mt);
3259 left = mas_mn(&l_mas);
3260 l_slots = ma_slots(left, mt);
3261 l_pivs = ma_pivots(left, mt);
3262 if (!l_slots[split])
3264 tmp = mas_data_end(&l_mas) - split;
3266 memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp);
3267 memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp);
3268 pivs[tmp] = l_mas.max;
3269 memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end);
3270 memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end);
3272 l_mas.max = l_pivs[split];
3273 mas->min = l_mas.max + 1;
3274 eparent = mt_mk_node(mte_parent(l_mas.node),
3275 mas_parent_enum(&l_mas, l_mas.node));
3278 unsigned char max_p = mt_pivots[mt];
3279 unsigned char max_s = mt_slots[mt];
3282 memset(pivs + tmp, 0,
3283 sizeof(unsigned long *) * (max_p - tmp));
3285 if (tmp < mt_slots[mt])
3286 memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3288 memcpy(node, newnode, sizeof(struct maple_node));
3289 ma_set_meta(node, mt, 0, tmp - 1);
3290 mte_set_pivot(eparent, mte_parent_slot(l_mas.node),
3293 /* Remove data from l_pivs. */
3295 memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp));
3296 memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3297 ma_set_meta(left, mt, 0, split);
3302 /* RCU requires replacing both l_mas, mas, and parent. */
3303 mas->node = mt_mk_node(newnode, mt);
3304 ma_set_meta(newnode, mt, 0, tmp);
3306 new_left = mas_pop_node(mas);
3307 new_left->parent = left->parent;
3308 mt = mte_node_type(l_mas.node);
3309 slots = ma_slots(new_left, mt);
3310 pivs = ma_pivots(new_left, mt);
3311 memcpy(slots, l_slots, sizeof(void *) * split);
3312 memcpy(pivs, l_pivs, sizeof(unsigned long) * split);
3313 ma_set_meta(new_left, mt, 0, split);
3314 l_mas.node = mt_mk_node(new_left, mt);
3316 /* replace parent. */
3317 offset = mte_parent_slot(mas->node);
3318 mt = mas_parent_enum(&l_mas, l_mas.node);
3319 parent = mas_pop_node(mas);
3320 slots = ma_slots(parent, mt);
3321 pivs = ma_pivots(parent, mt);
3322 memcpy(parent, mte_to_node(eparent), sizeof(struct maple_node));
3323 rcu_assign_pointer(slots[offset], mas->node);
3324 rcu_assign_pointer(slots[offset - 1], l_mas.node);
3325 pivs[offset - 1] = l_mas.max;
3326 eparent = mt_mk_node(parent, mt);
3328 gap = mas_leaf_max_gap(mas);
3329 mte_set_gap(eparent, mte_parent_slot(mas->node), gap);
3330 gap = mas_leaf_max_gap(&l_mas);
3331 mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap);
3335 mas_replace(mas, false);
3337 mas_update_gap(mas);
3341 * mas_split_final_node() - Split the final node in a subtree operation.
3342 * @mast: the maple subtree state
3343 * @mas: The maple state
3344 * @height: The height of the tree in case it's a new root.
3346 static inline bool mas_split_final_node(struct maple_subtree_state *mast,
3347 struct ma_state *mas, int height)
3349 struct maple_enode *ancestor;
3351 if (mte_is_root(mas->node)) {
3352 if (mt_is_alloc(mas->tree))
3353 mast->bn->type = maple_arange_64;
3355 mast->bn->type = maple_range_64;
3356 mas->depth = height;
3359 * Only a single node is used here, could be root.
3360 * The Big_node data should just fit in a single node.
3362 ancestor = mas_new_ma_node(mas, mast->bn);
3363 mte_set_parent(mast->l->node, ancestor, mast->l->offset);
3364 mte_set_parent(mast->r->node, ancestor, mast->r->offset);
3365 mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
3367 mast->l->node = ancestor;
3368 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
3369 mas->offset = mast->bn->b_end - 1;
3374 * mast_fill_bnode() - Copy data into the big node in the subtree state
3375 * @mast: The maple subtree state
3376 * @mas: the maple state
3377 * @skip: The number of entries to skip for new nodes insertion.
3379 static inline void mast_fill_bnode(struct maple_subtree_state *mast,
3380 struct ma_state *mas,
3384 struct maple_enode *old = mas->node;
3385 unsigned char split;
3387 memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap));
3388 memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot));
3389 memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot));
3390 mast->bn->b_end = 0;
3392 if (mte_is_root(mas->node)) {
3396 mat_add(mast->free, old);
3397 mas->offset = mte_parent_slot(mas->node);
3400 if (cp && mast->l->offset)
3401 mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0);
3403 split = mast->bn->b_end;
3404 mab_set_b_end(mast->bn, mast->l, mast->l->node);
3405 mast->r->offset = mast->bn->b_end;
3406 mab_set_b_end(mast->bn, mast->r, mast->r->node);
3407 if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max)
3411 mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1,
3412 mast->bn, mast->bn->b_end);
3415 mast->bn->type = mte_node_type(mas->node);
3419 * mast_split_data() - Split the data in the subtree state big node into regular
3421 * @mast: The maple subtree state
3422 * @mas: The maple state
3423 * @split: The location to split the big node
3425 static inline void mast_split_data(struct maple_subtree_state *mast,
3426 struct ma_state *mas, unsigned char split)
3428 unsigned char p_slot;
3430 mab_mas_cp(mast->bn, 0, split, mast->l, true);
3431 mte_set_pivot(mast->r->node, 0, mast->r->max);
3432 mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false);
3433 mast->l->offset = mte_parent_slot(mas->node);
3434 mast->l->max = mast->bn->pivot[split];
3435 mast->r->min = mast->l->max + 1;
3436 if (mte_is_leaf(mas->node))
3439 p_slot = mast->orig_l->offset;
3440 mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node,
3442 mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node,
3447 * mas_push_data() - Instead of splitting a node, it is beneficial to push the
3448 * data to the right or left node if there is room.
3449 * @mas: The maple state
3450 * @height: The current height of the maple state
3451 * @mast: The maple subtree state
3452 * @left: Push left or not.
3454 * Keeping the height of the tree low means faster lookups.
3456 * Return: True if pushed, false otherwise.
3458 static inline bool mas_push_data(struct ma_state *mas, int height,
3459 struct maple_subtree_state *mast, bool left)
3461 unsigned char slot_total = mast->bn->b_end;
3462 unsigned char end, space, split;
3464 MA_STATE(tmp_mas, mas->tree, mas->index, mas->last);
3466 tmp_mas.depth = mast->l->depth;
3468 if (left && !mas_prev_sibling(&tmp_mas))
3470 else if (!left && !mas_next_sibling(&tmp_mas))
3473 end = mas_data_end(&tmp_mas);
3475 space = 2 * mt_slot_count(mas->node) - 2;
3476 /* -2 instead of -1 to ensure there isn't a triple split */
3477 if (ma_is_leaf(mast->bn->type))
3480 if (mas->max == ULONG_MAX)
3483 if (slot_total >= space)
3486 /* Get the data; Fill mast->bn */
3489 mab_shift_right(mast->bn, end + 1);
3490 mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0);
3491 mast->bn->b_end = slot_total + 1;
3493 mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end);
3496 /* Configure mast for splitting of mast->bn */
3497 split = mt_slots[mast->bn->type] - 2;
3499 /* Switch mas to prev node */
3500 mat_add(mast->free, mas->node);
3502 /* Start using mast->l for the left side. */
3503 tmp_mas.node = mast->l->node;
3506 mat_add(mast->free, tmp_mas.node);
3507 tmp_mas.node = mast->r->node;
3509 split = slot_total - split;
3511 split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]);
3512 /* Update parent slot for split calculation. */
3514 mast->orig_l->offset += end + 1;
3516 mast_split_data(mast, mas, split);
3517 mast_fill_bnode(mast, mas, 2);
3518 mas_split_final_node(mast, mas, height + 1);
3523 * mas_split() - Split data that is too big for one node into two.
3524 * @mas: The maple state
3525 * @b_node: The maple big node
3526 * Return: 1 on success, 0 on failure.
3528 static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
3531 struct maple_subtree_state mast;
3533 unsigned char mid_split, split = 0;
3536 * Splitting is handled differently from any other B-tree; the Maple
3537 * Tree splits upwards. Splitting up means that the split operation
3538 * occurs when the walk of the tree hits the leaves and not on the way
3539 * down. The reason for splitting up is that it is impossible to know
3540 * how much space will be needed until the leaf is (or leaves are)
3541 * reached. Since overwriting data is allowed and a range could
3542 * overwrite more than one range or result in changing one entry into 3
3543 * entries, it is impossible to know if a split is required until the
3546 * Splitting is a balancing act between keeping allocations to a minimum
3547 * and avoiding a 'jitter' event where a tree is expanded to make room
3548 * for an entry followed by a contraction when the entry is removed. To
3549 * accomplish the balance, there are empty slots remaining in both left
3550 * and right nodes after a split.
3552 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3553 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3554 MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
3555 MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
3556 MA_TOPIARY(mat, mas->tree);
3558 trace_ma_op(__func__, mas);
3559 mas->depth = mas_mt_height(mas);
3560 /* Allocation failures will happen early. */
3561 mas_node_count(mas, 1 + mas->depth * 2);
3562 if (mas_is_err(mas))
3567 mast.orig_l = &prev_l_mas;
3568 mast.orig_r = &prev_r_mas;
3572 while (height++ <= mas->depth) {
3573 if (mt_slots[b_node->type] > b_node->b_end) {
3574 mas_split_final_node(&mast, mas, height);
3578 l_mas = r_mas = *mas;
3579 l_mas.node = mas_new_ma_node(mas, b_node);
3580 r_mas.node = mas_new_ma_node(mas, b_node);
3582 * Another way that 'jitter' is avoided is to terminate a split up early if the
3583 * left or right node has space to spare. This is referred to as "pushing left"
3584 * or "pushing right" and is similar to the B* tree, except the nodes left or
3585 * right can rarely be reused due to RCU, but the ripple upwards is halted which
3586 * is a significant savings.
3588 /* Try to push left. */
3589 if (mas_push_data(mas, height, &mast, true))
3592 /* Try to push right. */
3593 if (mas_push_data(mas, height, &mast, false))
3596 split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
3597 mast_split_data(&mast, mas, split);
3599 * Usually correct, mab_mas_cp in the above call overwrites
3602 mast.r->max = mas->max;
3603 mast_fill_bnode(&mast, mas, 1);
3604 prev_l_mas = *mast.l;
3605 prev_r_mas = *mast.r;
3608 /* Set the original node as dead */
3609 mat_add(mast.free, mas->node);
3610 mas->node = l_mas.node;
3611 mas_wmb_replace(mas, mast.free, NULL);
3612 mtree_range_walk(mas);
3617 * mas_reuse_node() - Reuse the node to store the data.
3618 * @wr_mas: The maple write state
3619 * @bn: The maple big node
3620 * @end: The end of the data.
3622 * Will always return false in RCU mode.
3624 * Return: True if node was reused, false otherwise.
3626 static inline bool mas_reuse_node(struct ma_wr_state *wr_mas,
3627 struct maple_big_node *bn, unsigned char end)
3629 /* Need to be rcu safe. */
3630 if (mt_in_rcu(wr_mas->mas->tree))
3633 if (end > bn->b_end) {
3634 int clear = mt_slots[wr_mas->type] - bn->b_end;
3636 memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--);
3637 memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear);
3639 mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false);
3644 * mas_commit_b_node() - Commit the big node into the tree.
3645 * @wr_mas: The maple write state
3646 * @b_node: The maple big node
3647 * @end: The end of the data.
3649 static inline int mas_commit_b_node(struct ma_wr_state *wr_mas,
3650 struct maple_big_node *b_node, unsigned char end)
3652 struct maple_node *node;
3653 unsigned char b_end = b_node->b_end;
3654 enum maple_type b_type = b_node->type;
3656 if ((b_end < mt_min_slots[b_type]) &&
3657 (!mte_is_root(wr_mas->mas->node)) &&
3658 (mas_mt_height(wr_mas->mas) > 1))
3659 return mas_rebalance(wr_mas->mas, b_node);
3661 if (b_end >= mt_slots[b_type])
3662 return mas_split(wr_mas->mas, b_node);
3664 if (mas_reuse_node(wr_mas, b_node, end))
3667 mas_node_count(wr_mas->mas, 1);
3668 if (mas_is_err(wr_mas->mas))
3671 node = mas_pop_node(wr_mas->mas);
3672 node->parent = mas_mn(wr_mas->mas)->parent;
3673 wr_mas->mas->node = mt_mk_node(node, b_type);
3674 mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
3675 mas_replace(wr_mas->mas, false);
3677 mas_update_gap(wr_mas->mas);
3682 * mas_root_expand() - Expand a root to a node
3683 * @mas: The maple state
3684 * @entry: The entry to store into the tree
3686 static inline int mas_root_expand(struct ma_state *mas, void *entry)
3688 void *contents = mas_root_locked(mas);
3689 enum maple_type type = maple_leaf_64;
3690 struct maple_node *node;
3692 unsigned long *pivots;
3695 mas_node_count(mas, 1);
3696 if (unlikely(mas_is_err(mas)))
3699 node = mas_pop_node(mas);
3700 pivots = ma_pivots(node, type);
3701 slots = ma_slots(node, type);
3702 node->parent = ma_parent_ptr(
3703 ((unsigned long)mas->tree | MA_ROOT_PARENT));
3704 mas->node = mt_mk_node(node, type);
3708 rcu_assign_pointer(slots[slot], contents);
3709 if (likely(mas->index > 1))
3712 pivots[slot++] = mas->index - 1;
3715 rcu_assign_pointer(slots[slot], entry);
3717 pivots[slot] = mas->last;
3718 if (mas->last != ULONG_MAX)
3721 mas_set_height(mas);
3722 ma_set_meta(node, maple_leaf_64, 0, slot);
3723 /* swap the new root into the tree */
3724 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3728 static inline void mas_store_root(struct ma_state *mas, void *entry)
3730 if (likely((mas->last != 0) || (mas->index != 0)))
3731 mas_root_expand(mas, entry);
3732 else if (((unsigned long) (entry) & 3) == 2)
3733 mas_root_expand(mas, entry);
3735 rcu_assign_pointer(mas->tree->ma_root, entry);
3736 mas->node = MAS_START;
3741 * mas_is_span_wr() - Check if the write needs to be treated as a write that
3743 * @mas: The maple state
3744 * @piv: The pivot value being written
3745 * @type: The maple node type
3746 * @entry: The data to write
3748 * Spanning writes are writes that start in one node and end in another OR if
3749 * the write of a %NULL will cause the node to end with a %NULL.
3751 * Return: True if this is a spanning write, false otherwise.
3753 static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
3756 unsigned long last = wr_mas->mas->last;
3757 unsigned long piv = wr_mas->r_max;
3758 enum maple_type type = wr_mas->type;
3759 void *entry = wr_mas->entry;
3761 /* Contained in this pivot */
3765 max = wr_mas->mas->max;
3766 if (unlikely(ma_is_leaf(type))) {
3767 /* Fits in the node, but may span slots. */
3771 /* Writes to the end of the node but not null. */
3772 if ((last == max) && entry)
3776 * Writing ULONG_MAX is not a spanning write regardless of the
3777 * value being written as long as the range fits in the node.
3779 if ((last == ULONG_MAX) && (last == max))
3781 } else if (piv == last) {
3785 /* Detect spanning store wr walk */
3786 if (last == ULONG_MAX)
3790 trace_ma_write(__func__, wr_mas->mas, piv, entry);
3795 static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
3797 wr_mas->type = mte_node_type(wr_mas->mas->node);
3798 mas_wr_node_walk(wr_mas);
3799 wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
3802 static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas)
3804 wr_mas->mas->max = wr_mas->r_max;
3805 wr_mas->mas->min = wr_mas->r_min;
3806 wr_mas->mas->node = wr_mas->content;
3807 wr_mas->mas->offset = 0;
3808 wr_mas->mas->depth++;
3811 * mas_wr_walk() - Walk the tree for a write.
3812 * @wr_mas: The maple write state
3814 * Uses mas_slot_locked() and does not need to worry about dead nodes.
3816 * Return: True if it's contained in a node, false on spanning write.
3818 static bool mas_wr_walk(struct ma_wr_state *wr_mas)
3820 struct ma_state *mas = wr_mas->mas;
3823 mas_wr_walk_descend(wr_mas);
3824 if (unlikely(mas_is_span_wr(wr_mas)))
3827 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3829 if (ma_is_leaf(wr_mas->type))
3832 mas_wr_walk_traverse(wr_mas);
3838 static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
3840 struct ma_state *mas = wr_mas->mas;
3843 mas_wr_walk_descend(wr_mas);
3844 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3846 if (ma_is_leaf(wr_mas->type))
3848 mas_wr_walk_traverse(wr_mas);
3854 * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
3855 * @l_wr_mas: The left maple write state
3856 * @r_wr_mas: The right maple write state
3858 static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas,
3859 struct ma_wr_state *r_wr_mas)
3861 struct ma_state *r_mas = r_wr_mas->mas;
3862 struct ma_state *l_mas = l_wr_mas->mas;
3863 unsigned char l_slot;
3865 l_slot = l_mas->offset;
3866 if (!l_wr_mas->content)
3867 l_mas->index = l_wr_mas->r_min;
3869 if ((l_mas->index == l_wr_mas->r_min) &&
3871 !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) {
3873 l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1;
3875 l_mas->index = l_mas->min;
3877 l_mas->offset = l_slot - 1;
3880 if (!r_wr_mas->content) {
3881 if (r_mas->last < r_wr_mas->r_max)
3882 r_mas->last = r_wr_mas->r_max;
3884 } else if ((r_mas->last == r_wr_mas->r_max) &&
3885 (r_mas->last < r_mas->max) &&
3886 !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) {
3887 r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots,
3888 r_wr_mas->type, r_mas->offset + 1);
3893 static inline void *mas_state_walk(struct ma_state *mas)
3897 entry = mas_start(mas);
3898 if (mas_is_none(mas))
3901 if (mas_is_ptr(mas))
3904 return mtree_range_walk(mas);
3908 * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
3911 * @mas: The maple state.
3913 * Note: Leaves mas in undesirable state.
3914 * Return: The entry for @mas->index or %NULL on dead node.
3916 static inline void *mtree_lookup_walk(struct ma_state *mas)
3918 unsigned long *pivots;
3919 unsigned char offset;
3920 struct maple_node *node;
3921 struct maple_enode *next;
3922 enum maple_type type;
3931 node = mte_to_node(next);
3932 type = mte_node_type(next);
3933 pivots = ma_pivots(node, type);
3934 end = ma_data_end(node, type, pivots, max);
3935 if (unlikely(ma_dead_node(node)))
3938 if (pivots[offset] >= mas->index) {
3939 max = pivots[offset];
3942 } while (++offset < end);
3944 slots = ma_slots(node, type);
3945 next = mt_slot(mas->tree, slots, offset);
3946 if (unlikely(ma_dead_node(node)))
3948 } while (!ma_is_leaf(type));
3950 return (void *) next;
3958 * mas_new_root() - Create a new root node that only contains the entry passed
3960 * @mas: The maple state
3961 * @entry: The entry to store.
3963 * Only valid when the index == 0 and the last == ULONG_MAX
3965 * Return 0 on error, 1 on success.
3967 static inline int mas_new_root(struct ma_state *mas, void *entry)
3969 struct maple_enode *root = mas_root_locked(mas);
3970 enum maple_type type = maple_leaf_64;
3971 struct maple_node *node;
3973 unsigned long *pivots;
3975 if (!entry && !mas->index && mas->last == ULONG_MAX) {
3977 mas_set_height(mas);
3978 rcu_assign_pointer(mas->tree->ma_root, entry);
3979 mas->node = MAS_START;
3983 mas_node_count(mas, 1);
3984 if (mas_is_err(mas))
3987 node = mas_pop_node(mas);
3988 pivots = ma_pivots(node, type);
3989 slots = ma_slots(node, type);
3990 node->parent = ma_parent_ptr(
3991 ((unsigned long)mas->tree | MA_ROOT_PARENT));
3992 mas->node = mt_mk_node(node, type);
3993 rcu_assign_pointer(slots[0], entry);
3994 pivots[0] = mas->last;
3996 mas_set_height(mas);
3997 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
4000 if (xa_is_node(root))
4001 mte_destroy_walk(root, mas->tree);
4006 * mas_wr_spanning_store() - Create a subtree with the store operation completed
4007 * and new nodes where necessary, then place the sub-tree in the actual tree.
4008 * Note that mas is expected to point to the node which caused the store to
4010 * @wr_mas: The maple write state
4012 * Return: 0 on error, positive on success.
4014 static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
4016 struct maple_subtree_state mast;
4017 struct maple_big_node b_node;
4018 struct ma_state *mas;
4019 unsigned char height;
4021 /* Left and Right side of spanning store */
4022 MA_STATE(l_mas, NULL, 0, 0);
4023 MA_STATE(r_mas, NULL, 0, 0);
4025 MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry);
4026 MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry);
4029 * A store operation that spans multiple nodes is called a spanning
4030 * store and is handled early in the store call stack by the function
4031 * mas_is_span_wr(). When a spanning store is identified, the maple
4032 * state is duplicated. The first maple state walks the left tree path
4033 * to ``index``, the duplicate walks the right tree path to ``last``.
4034 * The data in the two nodes are combined into a single node, two nodes,
4035 * or possibly three nodes (see the 3-way split above). A ``NULL``
4036 * written to the last entry of a node is considered a spanning store as
4037 * a rebalance is required for the operation to complete and an overflow
4038 * of data may happen.
4041 trace_ma_op(__func__, mas);
4043 if (unlikely(!mas->index && mas->last == ULONG_MAX))
4044 return mas_new_root(mas, wr_mas->entry);
4046 * Node rebalancing may occur due to this store, so there may be three new
4047 * entries per level plus a new root.
4049 height = mas_mt_height(mas);
4050 mas_node_count(mas, 1 + height * 3);
4051 if (mas_is_err(mas))
4055 * Set up right side. Need to get to the next offset after the spanning
4056 * store to ensure it's not NULL and to combine both the next node and
4057 * the node with the start together.
4060 /* Avoid overflow, walk to next slot in the tree. */
4064 r_mas.index = r_mas.last;
4065 mas_wr_walk_index(&r_wr_mas);
4066 r_mas.last = r_mas.index = mas->last;
4068 /* Set up left side. */
4070 mas_wr_walk_index(&l_wr_mas);
4072 if (!wr_mas->entry) {
4073 mas_extend_spanning_null(&l_wr_mas, &r_wr_mas);
4074 mas->offset = l_mas.offset;
4075 mas->index = l_mas.index;
4076 mas->last = l_mas.last = r_mas.last;
4079 /* expanding NULLs may make this cover the entire range */
4080 if (!l_mas.index && r_mas.last == ULONG_MAX) {
4081 mas_set_range(mas, 0, ULONG_MAX);
4082 return mas_new_root(mas, wr_mas->entry);
4085 memset(&b_node, 0, sizeof(struct maple_big_node));
4086 /* Copy l_mas and store the value in b_node. */
4087 mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end);
4088 /* Copy r_mas into b_node. */
4089 if (r_mas.offset <= r_wr_mas.node_end)
4090 mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end,
4091 &b_node, b_node.b_end + 1);
4095 /* Stop spanning searches by searching for just index. */
4096 l_mas.index = l_mas.last = mas->index;
4099 mast.orig_l = &l_mas;
4100 mast.orig_r = &r_mas;
4101 /* Combine l_mas and r_mas and split them up evenly again. */
4102 return mas_spanning_rebalance(mas, &mast, height + 1);
4106 * mas_wr_node_store() - Attempt to store the value in a node
4107 * @wr_mas: The maple write state
4109 * Attempts to reuse the node, but may allocate.
4111 * Return: True if stored, false otherwise
4113 static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
4115 struct ma_state *mas = wr_mas->mas;
4116 void __rcu **dst_slots;
4117 unsigned long *dst_pivots;
4118 unsigned char dst_offset;
4119 unsigned char new_end = wr_mas->node_end;
4120 unsigned char offset;
4121 unsigned char node_slots = mt_slots[wr_mas->type];
4122 struct maple_node reuse, *newnode;
4123 unsigned char copy_size, max_piv = mt_pivots[wr_mas->type];
4124 bool in_rcu = mt_in_rcu(mas->tree);
4126 offset = mas->offset;
4127 if (mas->last == wr_mas->r_max) {
4128 /* runs right to the end of the node */
4129 if (mas->last == mas->max)
4131 /* don't copy this offset */
4132 wr_mas->offset_end++;
4133 } else if (mas->last < wr_mas->r_max) {
4134 /* new range ends in this range */
4135 if (unlikely(wr_mas->r_max == ULONG_MAX))
4136 mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
4140 if (wr_mas->end_piv == mas->last)
4141 wr_mas->offset_end++;
4143 new_end -= wr_mas->offset_end - offset - 1;
4146 /* new range starts within a range */
4147 if (wr_mas->r_min < mas->index)
4150 /* Not enough room */
4151 if (new_end >= node_slots)
4154 /* Not enough data. */
4155 if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
4156 !(mas->mas_flags & MA_STATE_BULK))
4161 mas_node_count(mas, 1);
4162 if (mas_is_err(mas))
4165 newnode = mas_pop_node(mas);
4167 memset(&reuse, 0, sizeof(struct maple_node));
4171 newnode->parent = mas_mn(mas)->parent;
4172 dst_pivots = ma_pivots(newnode, wr_mas->type);
4173 dst_slots = ma_slots(newnode, wr_mas->type);
4174 /* Copy from start to insert point */
4175 memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * (offset + 1));
4176 memcpy(dst_slots, wr_mas->slots, sizeof(void *) * (offset + 1));
4177 dst_offset = offset;
4179 /* Handle insert of new range starting after old range */
4180 if (wr_mas->r_min < mas->index) {
4182 rcu_assign_pointer(dst_slots[dst_offset], wr_mas->content);
4183 dst_pivots[dst_offset++] = mas->index - 1;
4186 /* Store the new entry and range end. */
4187 if (dst_offset < max_piv)
4188 dst_pivots[dst_offset] = mas->last;
4189 mas->offset = dst_offset;
4190 rcu_assign_pointer(dst_slots[dst_offset], wr_mas->entry);
4193 * this range wrote to the end of the node or it overwrote the rest of
4196 if (wr_mas->offset_end > wr_mas->node_end || mas->last >= mas->max) {
4197 new_end = dst_offset;
4202 /* Copy to the end of node if necessary. */
4203 copy_size = wr_mas->node_end - wr_mas->offset_end + 1;
4204 memcpy(dst_slots + dst_offset, wr_mas->slots + wr_mas->offset_end,
4205 sizeof(void *) * copy_size);
4206 if (dst_offset < max_piv) {
4207 if (copy_size > max_piv - dst_offset)
4208 copy_size = max_piv - dst_offset;
4210 memcpy(dst_pivots + dst_offset,
4211 wr_mas->pivots + wr_mas->offset_end,
4212 sizeof(unsigned long) * copy_size);
4215 if ((wr_mas->node_end == node_slots - 1) && (new_end < node_slots - 1))
4216 dst_pivots[new_end] = mas->max;
4219 mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end);
4221 mte_set_node_dead(mas->node);
4222 mas->node = mt_mk_node(newnode, wr_mas->type);
4223 mas_replace(mas, false);
4225 memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
4227 trace_ma_write(__func__, mas, 0, wr_mas->entry);
4228 mas_update_gap(mas);
4233 * mas_wr_slot_store: Attempt to store a value in a slot.
4234 * @wr_mas: the maple write state
4236 * Return: True if stored, false otherwise
4238 static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
4240 struct ma_state *mas = wr_mas->mas;
4241 unsigned long lmax; /* Logical max. */
4242 unsigned char offset = mas->offset;
4244 if ((wr_mas->r_max > mas->last) && ((wr_mas->r_min != mas->index) ||
4245 (offset != wr_mas->node_end)))
4248 if (offset == wr_mas->node_end - 1)
4251 lmax = wr_mas->pivots[offset + 1];
4253 /* going to overwrite too many slots. */
4254 if (lmax < mas->last)
4257 if (wr_mas->r_min == mas->index) {
4258 /* overwriting two or more ranges with one. */
4259 if (lmax == mas->last)
4262 /* Overwriting all of offset and a portion of offset + 1. */
4263 rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry);
4264 wr_mas->pivots[offset] = mas->last;
4268 /* Doesn't end on the next range end. */
4269 if (lmax != mas->last)
4272 /* Overwriting a portion of offset and all of offset + 1 */
4273 if ((offset + 1 < mt_pivots[wr_mas->type]) &&
4274 (wr_mas->entry || wr_mas->pivots[offset + 1]))
4275 wr_mas->pivots[offset + 1] = mas->last;
4277 rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry);
4278 wr_mas->pivots[offset] = mas->index - 1;
4279 mas->offset++; /* Keep mas accurate. */
4282 trace_ma_write(__func__, mas, 0, wr_mas->entry);
4283 mas_update_gap(mas);
4287 static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
4289 while ((wr_mas->mas->last > wr_mas->end_piv) &&
4290 (wr_mas->offset_end < wr_mas->node_end))
4291 wr_mas->end_piv = wr_mas->pivots[++wr_mas->offset_end];
4293 if (wr_mas->mas->last > wr_mas->end_piv)
4294 wr_mas->end_piv = wr_mas->mas->max;
4297 static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
4299 struct ma_state *mas = wr_mas->mas;
4301 if (mas->last < wr_mas->end_piv && !wr_mas->slots[wr_mas->offset_end])
4302 mas->last = wr_mas->end_piv;
4304 /* Check next slot(s) if we are overwriting the end */
4305 if ((mas->last == wr_mas->end_piv) &&
4306 (wr_mas->node_end != wr_mas->offset_end) &&
4307 !wr_mas->slots[wr_mas->offset_end + 1]) {
4308 wr_mas->offset_end++;
4309 if (wr_mas->offset_end == wr_mas->node_end)
4310 mas->last = mas->max;
4312 mas->last = wr_mas->pivots[wr_mas->offset_end];
4313 wr_mas->end_piv = mas->last;
4316 if (!wr_mas->content) {
4317 /* If this one is null, the next and prev are not */
4318 mas->index = wr_mas->r_min;
4320 /* Check prev slot if we are overwriting the start */
4321 if (mas->index == wr_mas->r_min && mas->offset &&
4322 !wr_mas->slots[mas->offset - 1]) {
4324 wr_mas->r_min = mas->index =
4325 mas_safe_min(mas, wr_mas->pivots, mas->offset);
4326 wr_mas->r_max = wr_mas->pivots[mas->offset];
4331 static inline bool mas_wr_append(struct ma_wr_state *wr_mas)
4333 unsigned char end = wr_mas->node_end;
4334 unsigned char new_end = end + 1;
4335 struct ma_state *mas = wr_mas->mas;
4336 unsigned char node_pivots = mt_pivots[wr_mas->type];
4338 if ((mas->index != wr_mas->r_min) && (mas->last == wr_mas->r_max)) {
4339 if (new_end < node_pivots)
4340 wr_mas->pivots[new_end] = wr_mas->pivots[end];
4342 if (new_end < node_pivots)
4343 ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
4345 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->entry);
4346 mas->offset = new_end;
4347 wr_mas->pivots[end] = mas->index - 1;
4352 if ((mas->index == wr_mas->r_min) && (mas->last < wr_mas->r_max)) {
4353 if (new_end < node_pivots)
4354 wr_mas->pivots[new_end] = wr_mas->pivots[end];
4356 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content);
4357 if (new_end < node_pivots)
4358 ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
4360 wr_mas->pivots[end] = mas->last;
4361 rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry);
4369 * mas_wr_bnode() - Slow path for a modification.
4370 * @wr_mas: The write maple state
4372 * This is where split, rebalance end up.
4374 static void mas_wr_bnode(struct ma_wr_state *wr_mas)
4376 struct maple_big_node b_node;
4378 trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
4379 memset(&b_node, 0, sizeof(struct maple_big_node));
4380 mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
4381 mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end);
4384 static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
4386 unsigned char node_slots;
4387 unsigned char node_size;
4388 struct ma_state *mas = wr_mas->mas;
4390 /* Direct replacement */
4391 if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
4392 rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
4393 if (!!wr_mas->entry ^ !!wr_mas->content)
4394 mas_update_gap(mas);
4398 /* Attempt to append */
4399 node_slots = mt_slots[wr_mas->type];
4400 node_size = wr_mas->node_end - wr_mas->offset_end + mas->offset + 2;
4401 if (mas->max == ULONG_MAX)
4404 /* slot and node store will not fit, go to the slow path */
4405 if (unlikely(node_size >= node_slots))
4408 if (wr_mas->entry && (wr_mas->node_end < node_slots - 1) &&
4409 (mas->offset == wr_mas->node_end) && mas_wr_append(wr_mas)) {
4410 if (!wr_mas->content || !wr_mas->entry)
4411 mas_update_gap(mas);
4415 if ((wr_mas->offset_end - mas->offset <= 1) && mas_wr_slot_store(wr_mas))
4417 else if (mas_wr_node_store(wr_mas))
4420 if (mas_is_err(mas))
4424 mas_wr_bnode(wr_mas);
4428 * mas_wr_store_entry() - Internal call to store a value
4429 * @mas: The maple state
4430 * @entry: The entry to store.
4432 * Return: The contents that was stored at the index.
4434 static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
4436 struct ma_state *mas = wr_mas->mas;
4438 wr_mas->content = mas_start(mas);
4439 if (mas_is_none(mas) || mas_is_ptr(mas)) {
4440 mas_store_root(mas, wr_mas->entry);
4441 return wr_mas->content;
4444 if (unlikely(!mas_wr_walk(wr_mas))) {
4445 mas_wr_spanning_store(wr_mas);
4446 return wr_mas->content;
4449 /* At this point, we are at the leaf node that needs to be altered. */
4450 wr_mas->end_piv = wr_mas->r_max;
4451 mas_wr_end_piv(wr_mas);
4454 mas_wr_extend_null(wr_mas);
4456 /* New root for a single pointer */
4457 if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
4458 mas_new_root(mas, wr_mas->entry);
4459 return wr_mas->content;
4462 mas_wr_modify(wr_mas);
4463 return wr_mas->content;
4467 * mas_insert() - Internal call to insert a value
4468 * @mas: The maple state
4469 * @entry: The entry to store
4471 * Return: %NULL or the contents that already exists at the requested index
4472 * otherwise. The maple state needs to be checked for error conditions.
4474 static inline void *mas_insert(struct ma_state *mas, void *entry)
4476 MA_WR_STATE(wr_mas, mas, entry);
4479 * Inserting a new range inserts either 0, 1, or 2 pivots within the
4480 * tree. If the insert fits exactly into an existing gap with a value
4481 * of NULL, then the slot only needs to be written with the new value.
4482 * If the range being inserted is adjacent to another range, then only a
4483 * single pivot needs to be inserted (as well as writing the entry). If
4484 * the new range is within a gap but does not touch any other ranges,
4485 * then two pivots need to be inserted: the start - 1, and the end. As
4486 * usual, the entry must be written. Most operations require a new node
4487 * to be allocated and replace an existing node to ensure RCU safety,
4488 * when in RCU mode. The exception to requiring a newly allocated node
4489 * is when inserting at the end of a node (appending). When done
4490 * carefully, appending can reuse the node in place.
4492 wr_mas.content = mas_start(mas);
4496 if (mas_is_none(mas) || mas_is_ptr(mas)) {
4497 mas_store_root(mas, entry);
4501 /* spanning writes always overwrite something */
4502 if (!mas_wr_walk(&wr_mas))
4505 /* At this point, we are at the leaf node that needs to be altered. */
4506 wr_mas.offset_end = mas->offset;
4507 wr_mas.end_piv = wr_mas.r_max;
4509 if (wr_mas.content || (mas->last > wr_mas.r_max))
4515 mas_wr_modify(&wr_mas);
4516 return wr_mas.content;
4519 mas_set_err(mas, -EEXIST);
4520 return wr_mas.content;
4525 * mas_prev_node() - Find the prev non-null entry at the same level in the
4526 * tree. The prev value will be mas->node[mas->offset] or MAS_NONE.
4527 * @mas: The maple state
4528 * @min: The lower limit to search
4530 * The prev node value will be mas->node[mas->offset] or MAS_NONE.
4531 * Return: 1 if the node is dead, 0 otherwise.
4533 static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
4538 struct maple_node *node;
4539 struct maple_enode *enode;
4540 unsigned long *pivots;
4542 if (mas_is_none(mas))
4548 if (ma_is_root(node))
4552 if (unlikely(mas_ascend(mas)))
4554 offset = mas->offset;
4559 mt = mte_node_type(mas->node);
4561 slots = ma_slots(node, mt);
4562 pivots = ma_pivots(node, mt);
4563 if (unlikely(ma_dead_node(node)))
4566 mas->max = pivots[offset];
4568 mas->min = pivots[offset - 1] + 1;
4569 if (unlikely(ma_dead_node(node)))
4577 enode = mas_slot(mas, slots, offset);
4578 if (unlikely(ma_dead_node(node)))
4582 mt = mte_node_type(mas->node);
4584 slots = ma_slots(node, mt);
4585 pivots = ma_pivots(node, mt);
4586 offset = ma_data_end(node, mt, pivots, mas->max);
4587 if (unlikely(ma_dead_node(node)))
4591 mas->min = pivots[offset - 1] + 1;
4593 if (offset < mt_pivots[mt])
4594 mas->max = pivots[offset];
4600 mas->node = mas_slot(mas, slots, offset);
4601 if (unlikely(ma_dead_node(node)))
4604 mas->offset = mas_data_end(mas);
4605 if (unlikely(mte_dead_node(mas->node)))
4611 mas->offset = offset;
4613 mas->min = pivots[offset - 1] + 1;
4615 if (unlikely(ma_dead_node(node)))
4618 mas->node = MAS_NONE;
4623 * mas_next_node() - Get the next node at the same level in the tree.
4624 * @mas: The maple state
4625 * @max: The maximum pivot value to check.
4627 * The next value will be mas->node[mas->offset] or MAS_NONE.
4628 * Return: 1 on dead node, 0 otherwise.
4630 static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
4633 unsigned long min, pivot;
4634 unsigned long *pivots;
4635 struct maple_enode *enode;
4637 unsigned char offset;
4638 unsigned char node_end;
4642 if (mas->max >= max)
4647 if (ma_is_root(node))
4654 if (unlikely(mas_ascend(mas)))
4657 offset = mas->offset;
4660 mt = mte_node_type(mas->node);
4661 pivots = ma_pivots(node, mt);
4662 node_end = ma_data_end(node, mt, pivots, mas->max);
4663 if (unlikely(ma_dead_node(node)))
4666 } while (unlikely(offset == node_end));
4668 slots = ma_slots(node, mt);
4669 pivot = mas_safe_pivot(mas, pivots, ++offset, mt);
4670 while (unlikely(level > 1)) {
4671 /* Descend, if necessary */
4672 enode = mas_slot(mas, slots, offset);
4673 if (unlikely(ma_dead_node(node)))
4679 mt = mte_node_type(mas->node);
4680 slots = ma_slots(node, mt);
4681 pivots = ma_pivots(node, mt);
4682 if (unlikely(ma_dead_node(node)))
4689 enode = mas_slot(mas, slots, offset);
4690 if (unlikely(ma_dead_node(node)))
4699 if (unlikely(ma_dead_node(node)))
4702 mas->node = MAS_NONE;
4707 * mas_next_nentry() - Get the next node entry
4708 * @mas: The maple state
4709 * @max: The maximum value to check
4710 * @*range_start: Pointer to store the start of the range.
4712 * Sets @mas->offset to the offset of the next node entry, @mas->last to the
4713 * pivot of the entry.
4715 * Return: The next entry, %NULL otherwise
4717 static inline void *mas_next_nentry(struct ma_state *mas,
4718 struct maple_node *node, unsigned long max, enum maple_type type)
4720 unsigned char count;
4721 unsigned long pivot;
4722 unsigned long *pivots;
4726 if (mas->last == mas->max) {
4727 mas->index = mas->max;
4731 slots = ma_slots(node, type);
4732 pivots = ma_pivots(node, type);
4733 count = ma_data_end(node, type, pivots, mas->max);
4734 if (unlikely(ma_dead_node(node)))
4737 mas->index = mas_safe_min(mas, pivots, mas->offset);
4738 if (unlikely(ma_dead_node(node)))
4741 if (mas->index > max)
4744 if (mas->offset > count)
4747 while (mas->offset < count) {
4748 pivot = pivots[mas->offset];
4749 entry = mas_slot(mas, slots, mas->offset);
4750 if (ma_dead_node(node))
4759 mas->index = pivot + 1;
4763 if (mas->index > mas->max) {
4764 mas->index = mas->last;
4768 pivot = mas_safe_pivot(mas, pivots, mas->offset, type);
4769 entry = mas_slot(mas, slots, mas->offset);
4770 if (ma_dead_node(node))
4784 static inline void mas_rewalk(struct ma_state *mas, unsigned long index)
4788 mas_set(mas, index);
4789 mas_state_walk(mas);
4790 if (mas_is_start(mas))
4798 * mas_next_entry() - Internal function to get the next entry.
4799 * @mas: The maple state
4800 * @limit: The maximum range start.
4802 * Set the @mas->node to the next entry and the range_start to
4803 * the beginning value for the entry. Does not check beyond @limit.
4804 * Sets @mas->index and @mas->last to the limit if it is hit.
4805 * Restarts on dead nodes.
4807 * Return: the next entry or %NULL.
4809 static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
4812 struct maple_enode *prev_node;
4813 struct maple_node *node;
4814 unsigned char offset;
4818 if (mas->index > limit) {
4819 mas->index = mas->last = limit;
4825 offset = mas->offset;
4826 prev_node = mas->node;
4828 mt = mte_node_type(mas->node);
4830 if (unlikely(mas->offset >= mt_slots[mt])) {
4831 mas->offset = mt_slots[mt] - 1;
4835 while (!mas_is_none(mas)) {
4836 entry = mas_next_nentry(mas, node, limit, mt);
4837 if (unlikely(ma_dead_node(node))) {
4838 mas_rewalk(mas, last);
4845 if (unlikely((mas->index > limit)))
4849 prev_node = mas->node;
4850 offset = mas->offset;
4851 if (unlikely(mas_next_node(mas, node, limit))) {
4852 mas_rewalk(mas, last);
4857 mt = mte_node_type(mas->node);
4860 mas->index = mas->last = limit;
4861 mas->offset = offset;
4862 mas->node = prev_node;
4867 * mas_prev_nentry() - Get the previous node entry.
4868 * @mas: The maple state.
4869 * @limit: The lower limit to check for a value.
4871 * Return: the entry, %NULL otherwise.
4873 static inline void *mas_prev_nentry(struct ma_state *mas, unsigned long limit,
4874 unsigned long index)
4876 unsigned long pivot, min;
4877 unsigned char offset;
4878 struct maple_node *mn;
4880 unsigned long *pivots;
4889 mt = mte_node_type(mas->node);
4890 offset = mas->offset - 1;
4891 if (offset >= mt_slots[mt])
4892 offset = mt_slots[mt] - 1;
4894 slots = ma_slots(mn, mt);
4895 pivots = ma_pivots(mn, mt);
4896 if (unlikely(ma_dead_node(mn))) {
4897 mas_rewalk(mas, index);
4901 if (offset == mt_pivots[mt])
4904 pivot = pivots[offset];
4906 if (unlikely(ma_dead_node(mn))) {
4907 mas_rewalk(mas, index);
4911 while (offset && ((!mas_slot(mas, slots, offset) && pivot >= limit) ||
4913 pivot = pivots[--offset];
4915 min = mas_safe_min(mas, pivots, offset);
4916 entry = mas_slot(mas, slots, offset);
4917 if (unlikely(ma_dead_node(mn))) {
4918 mas_rewalk(mas, index);
4922 if (likely(entry)) {
4923 mas->offset = offset;
4930 static inline void *mas_prev_entry(struct ma_state *mas, unsigned long min)
4934 if (mas->index < min) {
4935 mas->index = mas->last = min;
4936 mas->node = MAS_NONE;
4940 while (likely(!mas_is_none(mas))) {
4941 entry = mas_prev_nentry(mas, min, mas->index);
4942 if (unlikely(mas->last < min))
4948 if (unlikely(mas_prev_node(mas, min))) {
4949 mas_rewalk(mas, mas->index);
4958 mas->index = mas->last = min;
4963 * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the
4964 * highest gap address of a given size in a given node and descend.
4965 * @mas: The maple state
4966 * @size: The needed size.
4968 * Return: True if found in a leaf, false otherwise.
4971 static bool mas_rev_awalk(struct ma_state *mas, unsigned long size,
4972 unsigned long *gap_min, unsigned long *gap_max)
4974 enum maple_type type = mte_node_type(mas->node);
4975 struct maple_node *node = mas_mn(mas);
4976 unsigned long *pivots, *gaps;
4978 unsigned long gap = 0;
4979 unsigned long max, min;
4980 unsigned char offset;
4982 if (unlikely(mas_is_err(mas)))
4985 if (ma_is_dense(type)) {
4987 mas->offset = (unsigned char)(mas->index - mas->min);
4991 pivots = ma_pivots(node, type);
4992 slots = ma_slots(node, type);
4993 gaps = ma_gaps(node, type);
4994 offset = mas->offset;
4995 min = mas_safe_min(mas, pivots, offset);
4996 /* Skip out of bounds. */
4997 while (mas->last < min)
4998 min = mas_safe_min(mas, pivots, --offset);
5000 max = mas_safe_pivot(mas, pivots, offset, type);
5001 while (mas->index <= max) {
5005 else if (!mas_slot(mas, slots, offset))
5006 gap = max - min + 1;
5009 if ((size <= gap) && (size <= mas->last - min + 1))
5013 /* Skip the next slot, it cannot be a gap. */
5018 max = pivots[offset];
5019 min = mas_safe_min(mas, pivots, offset);
5029 min = mas_safe_min(mas, pivots, offset);
5032 if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
5035 if (unlikely(ma_is_leaf(type))) {
5036 mas->offset = offset;
5038 *gap_max = min + gap - 1;
5042 /* descend, only happens under lock. */
5043 mas->node = mas_slot(mas, slots, offset);
5046 mas->offset = mas_data_end(mas);
5050 if (!mte_is_root(mas->node))
5054 mas_set_err(mas, -EBUSY);
5058 static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
5060 enum maple_type type = mte_node_type(mas->node);
5061 unsigned long pivot, min, gap = 0;
5062 unsigned char offset;
5063 unsigned long *gaps;
5064 unsigned long *pivots = ma_pivots(mas_mn(mas), type);
5065 void __rcu **slots = ma_slots(mas_mn(mas), type);
5068 if (ma_is_dense(type)) {
5069 mas->offset = (unsigned char)(mas->index - mas->min);
5073 gaps = ma_gaps(mte_to_node(mas->node), type);
5074 offset = mas->offset;
5075 min = mas_safe_min(mas, pivots, offset);
5076 for (; offset < mt_slots[type]; offset++) {
5077 pivot = mas_safe_pivot(mas, pivots, offset, type);
5078 if (offset && !pivot)
5081 /* Not within lower bounds */
5082 if (mas->index > pivot)
5087 else if (!mas_slot(mas, slots, offset))
5088 gap = min(pivot, mas->last) - max(mas->index, min) + 1;
5093 if (ma_is_leaf(type)) {
5097 if (mas->index <= pivot) {
5098 mas->node = mas_slot(mas, slots, offset);
5107 if (mas->last <= pivot) {
5108 mas_set_err(mas, -EBUSY);
5113 if (mte_is_root(mas->node))
5116 mas->offset = offset;
5121 * mas_walk() - Search for @mas->index in the tree.
5122 * @mas: The maple state.
5124 * mas->index and mas->last will be set to the range if there is a value. If
5125 * mas->node is MAS_NONE, reset to MAS_START.
5127 * Return: the entry at the location or %NULL.
5129 void *mas_walk(struct ma_state *mas)
5134 entry = mas_state_walk(mas);
5135 if (mas_is_start(mas))
5138 if (mas_is_ptr(mas)) {
5143 mas->last = ULONG_MAX;
5148 if (mas_is_none(mas)) {
5150 mas->last = ULONG_MAX;
5155 EXPORT_SYMBOL_GPL(mas_walk);
5157 static inline bool mas_rewind_node(struct ma_state *mas)
5162 if (mte_is_root(mas->node)) {
5172 mas->offset = --slot;
5177 * mas_skip_node() - Internal function. Skip over a node.
5178 * @mas: The maple state.
5180 * Return: true if there is another node, false otherwise.
5182 static inline bool mas_skip_node(struct ma_state *mas)
5184 if (mas_is_err(mas))
5188 if (mte_is_root(mas->node)) {
5189 if (mas->offset >= mas_data_end(mas)) {
5190 mas_set_err(mas, -EBUSY);
5196 } while (mas->offset >= mas_data_end(mas));
5203 * mas_awalk() - Allocation walk. Search from low address to high, for a gap of
5205 * @mas: The maple state
5206 * @size: The size of the gap required
5208 * Search between @mas->index and @mas->last for a gap of @size.
5210 static inline void mas_awalk(struct ma_state *mas, unsigned long size)
5212 struct maple_enode *last = NULL;
5215 * There are 4 options:
5216 * go to child (descend)
5217 * go back to parent (ascend)
5218 * no gap found. (return, slot == MAPLE_NODE_SLOTS)
5219 * found the gap. (return, slot != MAPLE_NODE_SLOTS)
5221 while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) {
5222 if (last == mas->node)
5230 * mas_fill_gap() - Fill a located gap with @entry.
5231 * @mas: The maple state
5232 * @entry: The value to store
5233 * @slot: The offset into the node to store the @entry
5234 * @size: The size of the entry
5235 * @index: The start location
5237 static inline void mas_fill_gap(struct ma_state *mas, void *entry,
5238 unsigned char slot, unsigned long size, unsigned long *index)
5240 MA_WR_STATE(wr_mas, mas, entry);
5241 unsigned char pslot = mte_parent_slot(mas->node);
5242 struct maple_enode *mn = mas->node;
5243 unsigned long *pivots;
5244 enum maple_type ptype;
5246 * mas->index is the start address for the search
5247 * which may no longer be needed.
5248 * mas->last is the end address for the search
5251 *index = mas->index;
5252 mas->last = mas->index + size - 1;
5255 * It is possible that using mas->max and mas->min to correctly
5256 * calculate the index and last will cause an issue in the gap
5257 * calculation, so fix the ma_state here
5260 ptype = mte_node_type(mas->node);
5261 pivots = ma_pivots(mas_mn(mas), ptype);
5262 mas->max = mas_safe_pivot(mas, pivots, pslot, ptype);
5263 mas->min = mas_safe_min(mas, pivots, pslot);
5266 mas_wr_store_entry(&wr_mas);
5270 * mas_sparse_area() - Internal function. Return upper or lower limit when
5271 * searching for a gap in an empty tree.
5272 * @mas: The maple state
5273 * @min: the minimum range
5274 * @max: The maximum range
5275 * @size: The size of the gap
5276 * @fwd: Searching forward or back
5278 static inline void mas_sparse_area(struct ma_state *mas, unsigned long min,
5279 unsigned long max, unsigned long size, bool fwd)
5281 unsigned long start = 0;
5283 if (!unlikely(mas_is_none(mas)))
5292 mas->last = start + size - 1;
5300 * mas_empty_area() - Get the lowest address within the range that is
5301 * sufficient for the size requested.
5302 * @mas: The maple state
5303 * @min: The lowest value of the range
5304 * @max: The highest value of the range
5305 * @size: The size needed
5307 int mas_empty_area(struct ma_state *mas, unsigned long min,
5308 unsigned long max, unsigned long size)
5310 unsigned char offset;
5311 unsigned long *pivots;
5317 if (mas_is_start(mas))
5319 else if (mas->offset >= 2)
5321 else if (!mas_skip_node(mas))
5325 if (mas_is_none(mas) || mas_is_ptr(mas)) {
5326 mas_sparse_area(mas, min, max, size, true);
5330 /* The start of the window can only be within these values */
5333 mas_awalk(mas, size);
5335 if (unlikely(mas_is_err(mas)))
5336 return xa_err(mas->node);
5338 offset = mas->offset;
5339 if (unlikely(offset == MAPLE_NODE_SLOTS))
5342 mt = mte_node_type(mas->node);
5343 pivots = ma_pivots(mas_mn(mas), mt);
5345 mas->min = pivots[offset - 1] + 1;
5347 if (offset < mt_pivots[mt])
5348 mas->max = pivots[offset];
5350 if (mas->index < mas->min)
5351 mas->index = mas->min;
5353 mas->last = mas->index + size - 1;
5356 EXPORT_SYMBOL_GPL(mas_empty_area);
5359 * mas_empty_area_rev() - Get the highest address within the range that is
5360 * sufficient for the size requested.
5361 * @mas: The maple state
5362 * @min: The lowest value of the range
5363 * @max: The highest value of the range
5364 * @size: The size needed
5366 int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
5367 unsigned long max, unsigned long size)
5369 struct maple_enode *last = mas->node;
5374 if (mas_is_start(mas)) {
5376 mas->offset = mas_data_end(mas);
5377 } else if (mas->offset >= 2) {
5379 } else if (!mas_rewind_node(mas)) {
5384 if (mas_is_none(mas) || mas_is_ptr(mas)) {
5385 mas_sparse_area(mas, min, max, size, false);
5389 /* The start of the window can only be within these values. */
5393 while (!mas_rev_awalk(mas, size, &min, &max)) {
5394 if (last == mas->node) {
5395 if (!mas_rewind_node(mas))
5402 if (mas_is_err(mas))
5403 return xa_err(mas->node);
5405 if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
5408 /* Trim the upper limit to the max. */
5409 if (max <= mas->last)
5412 mas->index = mas->last - size + 1;
5415 EXPORT_SYMBOL_GPL(mas_empty_area_rev);
5417 static inline int mas_alloc(struct ma_state *mas, void *entry,
5418 unsigned long size, unsigned long *index)
5423 if (mas_is_none(mas) || mas_is_ptr(mas)) {
5424 mas_root_expand(mas, entry);
5425 if (mas_is_err(mas))
5426 return xa_err(mas->node);
5429 return mte_pivot(mas->node, 0);
5430 return mte_pivot(mas->node, 1);
5433 /* Must be walking a tree. */
5434 mas_awalk(mas, size);
5435 if (mas_is_err(mas))
5436 return xa_err(mas->node);
5438 if (mas->offset == MAPLE_NODE_SLOTS)
5442 * At this point, mas->node points to the right node and we have an
5443 * offset that has a sufficient gap.
5447 min = mte_pivot(mas->node, mas->offset - 1) + 1;
5449 if (mas->index < min)
5452 mas_fill_gap(mas, entry, mas->offset, size, index);
5459 static inline int mas_rev_alloc(struct ma_state *mas, unsigned long min,
5460 unsigned long max, void *entry,
5461 unsigned long size, unsigned long *index)
5465 ret = mas_empty_area_rev(mas, min, max, size);
5469 if (mas_is_err(mas))
5470 return xa_err(mas->node);
5472 if (mas->offset == MAPLE_NODE_SLOTS)
5475 mas_fill_gap(mas, entry, mas->offset, size, index);
5483 * mte_dead_leaves() - Mark all leaves of a node as dead.
5484 * @mas: The maple state
5485 * @slots: Pointer to the slot array
5486 * @type: The maple node type
5488 * Must hold the write lock.
5490 * Return: The number of leaves marked as dead.
5493 unsigned char mte_dead_leaves(struct maple_enode *enode, struct maple_tree *mt,
5496 struct maple_node *node;
5497 enum maple_type type;
5501 for (offset = 0; offset < mt_slot_count(enode); offset++) {
5502 entry = mt_slot(mt, slots, offset);
5503 type = mte_node_type(entry);
5504 node = mte_to_node(entry);
5505 /* Use both node and type to catch LE & BE metadata */
5509 mte_set_node_dead(entry);
5511 rcu_assign_pointer(slots[offset], node);
5518 * mte_dead_walk() - Walk down a dead tree to just before the leaves
5519 * @enode: The maple encoded node
5520 * @offset: The starting offset
5522 * Note: This can only be used from the RCU callback context.
5524 static void __rcu **mte_dead_walk(struct maple_enode **enode, unsigned char offset)
5526 struct maple_node *node, *next;
5527 void __rcu **slots = NULL;
5529 next = mte_to_node(*enode);
5531 *enode = ma_enode_ptr(next);
5532 node = mte_to_node(*enode);
5533 slots = ma_slots(node, node->type);
5534 next = rcu_dereference_protected(slots[offset],
5535 lock_is_held(&rcu_callback_map));
5537 } while (!ma_is_leaf(next->type));
5543 * mt_free_walk() - Walk & free a tree in the RCU callback context
5544 * @head: The RCU head that's within the node.
5546 * Note: This can only be used from the RCU callback context.
5548 static void mt_free_walk(struct rcu_head *head)
5551 struct maple_node *node, *start;
5552 struct maple_enode *enode;
5553 unsigned char offset;
5554 enum maple_type type;
5556 node = container_of(head, struct maple_node, rcu);
5558 if (ma_is_leaf(node->type))
5562 enode = mt_mk_node(node, node->type);
5563 slots = mte_dead_walk(&enode, 0);
5564 node = mte_to_node(enode);
5566 mt_free_bulk(node->slot_len, slots);
5567 offset = node->parent_slot + 1;
5568 enode = node->piv_parent;
5569 if (mte_to_node(enode) == node)
5572 type = mte_node_type(enode);
5573 slots = ma_slots(mte_to_node(enode), type);
5574 if ((offset < mt_slots[type]) &&
5575 rcu_dereference_protected(slots[offset],
5576 lock_is_held(&rcu_callback_map)))
5577 slots = mte_dead_walk(&enode, offset);
5578 node = mte_to_node(enode);
5579 } while ((node != start) || (node->slot_len < offset));
5581 slots = ma_slots(node, node->type);
5582 mt_free_bulk(node->slot_len, slots);
5585 mt_free_rcu(&node->rcu);
5588 static inline void __rcu **mte_destroy_descend(struct maple_enode **enode,
5589 struct maple_tree *mt, struct maple_enode *prev, unsigned char offset)
5591 struct maple_node *node;
5592 struct maple_enode *next = *enode;
5593 void __rcu **slots = NULL;
5594 enum maple_type type;
5595 unsigned char next_offset = 0;
5599 node = mte_to_node(*enode);
5600 type = mte_node_type(*enode);
5601 slots = ma_slots(node, type);
5602 next = mt_slot_locked(mt, slots, next_offset);
5603 if ((mte_dead_node(next)))
5604 next = mt_slot_locked(mt, slots, ++next_offset);
5606 mte_set_node_dead(*enode);
5608 node->piv_parent = prev;
5609 node->parent_slot = offset;
5610 offset = next_offset;
5613 } while (!mte_is_leaf(next));
5618 static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
5622 struct maple_node *node = mte_to_node(enode);
5623 struct maple_enode *start;
5625 if (mte_is_leaf(enode)) {
5626 node->type = mte_node_type(enode);
5631 slots = mte_destroy_descend(&enode, mt, start, 0);
5632 node = mte_to_node(enode); // Updated in the above call.
5634 enum maple_type type;
5635 unsigned char offset;
5636 struct maple_enode *parent, *tmp;
5638 node->slot_len = mte_dead_leaves(enode, mt, slots);
5640 mt_free_bulk(node->slot_len, slots);
5641 offset = node->parent_slot + 1;
5642 enode = node->piv_parent;
5643 if (mte_to_node(enode) == node)
5646 type = mte_node_type(enode);
5647 slots = ma_slots(mte_to_node(enode), type);
5648 if (offset >= mt_slots[type])
5651 tmp = mt_slot_locked(mt, slots, offset);
5652 if (mte_node_type(tmp) && mte_to_node(tmp)) {
5655 slots = mte_destroy_descend(&enode, mt, parent, offset);
5658 node = mte_to_node(enode);
5659 } while (start != enode);
5661 node = mte_to_node(enode);
5662 node->slot_len = mte_dead_leaves(enode, mt, slots);
5664 mt_free_bulk(node->slot_len, slots);
5668 mt_free_rcu(&node->rcu);
5670 mt_clear_meta(mt, node, node->type);
5674 * mte_destroy_walk() - Free a tree or sub-tree.
5675 * @enode - the encoded maple node (maple_enode) to start
5676 * @mn - the tree to free - needed for node types.
5678 * Must hold the write lock.
5680 static inline void mte_destroy_walk(struct maple_enode *enode,
5681 struct maple_tree *mt)
5683 struct maple_node *node = mte_to_node(enode);
5685 if (mt_in_rcu(mt)) {
5686 mt_destroy_walk(enode, mt, false);
5687 call_rcu(&node->rcu, mt_free_walk);
5689 mt_destroy_walk(enode, mt, true);
5693 static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
5695 if (unlikely(mas_is_paused(wr_mas->mas)))
5696 mas_reset(wr_mas->mas);
5698 if (!mas_is_start(wr_mas->mas)) {
5699 if (mas_is_none(wr_mas->mas)) {
5700 mas_reset(wr_mas->mas);
5702 wr_mas->r_max = wr_mas->mas->max;
5703 wr_mas->type = mte_node_type(wr_mas->mas->node);
5704 if (mas_is_span_wr(wr_mas))
5705 mas_reset(wr_mas->mas);
5714 * mas_store() - Store an @entry.
5715 * @mas: The maple state.
5716 * @entry: The entry to store.
5718 * The @mas->index and @mas->last is used to set the range for the @entry.
5719 * Note: The @mas should have pre-allocated entries to ensure there is memory to
5720 * store the entry. Please see mas_expected_entries()/mas_destroy() for more details.
5722 * Return: the first entry between mas->index and mas->last or %NULL.
5724 void *mas_store(struct ma_state *mas, void *entry)
5726 MA_WR_STATE(wr_mas, mas, entry);
5728 trace_ma_write(__func__, mas, 0, entry);
5729 #ifdef CONFIG_DEBUG_MAPLE_TREE
5730 if (mas->index > mas->last)
5731 pr_err("Error %lu > %lu %p\n", mas->index, mas->last, entry);
5732 MT_BUG_ON(mas->tree, mas->index > mas->last);
5733 if (mas->index > mas->last) {
5734 mas_set_err(mas, -EINVAL);
5741 * Storing is the same operation as insert with the added caveat that it
5742 * can overwrite entries. Although this seems simple enough, one may
5743 * want to examine what happens if a single store operation was to
5744 * overwrite multiple entries within a self-balancing B-Tree.
5746 mas_wr_store_setup(&wr_mas);
5747 mas_wr_store_entry(&wr_mas);
5748 return wr_mas.content;
5750 EXPORT_SYMBOL_GPL(mas_store);
5753 * mas_store_gfp() - Store a value into the tree.
5754 * @mas: The maple state
5755 * @entry: The entry to store
5756 * @gfp: The GFP_FLAGS to use for allocations if necessary.
5758 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
5761 int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
5763 MA_WR_STATE(wr_mas, mas, entry);
5765 mas_wr_store_setup(&wr_mas);
5766 trace_ma_write(__func__, mas, 0, entry);
5768 mas_wr_store_entry(&wr_mas);
5769 if (unlikely(mas_nomem(mas, gfp)))
5772 if (unlikely(mas_is_err(mas)))
5773 return xa_err(mas->node);
5777 EXPORT_SYMBOL_GPL(mas_store_gfp);
5780 * mas_store_prealloc() - Store a value into the tree using memory
5781 * preallocated in the maple state.
5782 * @mas: The maple state
5783 * @entry: The entry to store.
5785 void mas_store_prealloc(struct ma_state *mas, void *entry)
5787 MA_WR_STATE(wr_mas, mas, entry);
5789 mas_wr_store_setup(&wr_mas);
5790 trace_ma_write(__func__, mas, 0, entry);
5791 mas_wr_store_entry(&wr_mas);
5792 BUG_ON(mas_is_err(mas));
5795 EXPORT_SYMBOL_GPL(mas_store_prealloc);
5798 * mas_preallocate() - Preallocate enough nodes for a store operation
5799 * @mas: The maple state
5800 * @entry: The entry that will be stored
5801 * @gfp: The GFP_FLAGS to use for allocations.
5803 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5805 int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
5809 mas_node_count_gfp(mas, 1 + mas_mt_height(mas) * 3, gfp);
5810 mas->mas_flags |= MA_STATE_PREALLOC;
5811 if (likely(!mas_is_err(mas)))
5814 mas_set_alloc_req(mas, 0);
5815 ret = xa_err(mas->node);
5823 * mas_destroy() - destroy a maple state.
5824 * @mas: The maple state
5826 * Upon completion, check the left-most node and rebalance against the node to
5827 * the right if necessary. Frees any allocated nodes associated with this maple
5830 void mas_destroy(struct ma_state *mas)
5832 struct maple_alloc *node;
5833 unsigned long total;
5836 * When using mas_for_each() to insert an expected number of elements,
5837 * it is possible that the number inserted is less than the expected
5838 * number. To fix an invalid final node, a check is performed here to
5839 * rebalance the previous node with the final node.
5841 if (mas->mas_flags & MA_STATE_REBALANCE) {
5844 if (mas_is_start(mas))
5847 mtree_range_walk(mas);
5848 end = mas_data_end(mas) + 1;
5849 if (end < mt_min_slot_count(mas->node) - 1)
5850 mas_destroy_rebalance(mas, end);
5852 mas->mas_flags &= ~MA_STATE_REBALANCE;
5854 mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
5856 total = mas_allocated(mas);
5859 mas->alloc = node->slot[0];
5860 if (node->node_count > 1) {
5861 size_t count = node->node_count - 1;
5863 mt_free_bulk(count, (void __rcu **)&node->slot[1]);
5866 kmem_cache_free(maple_node_cache, node);
5872 EXPORT_SYMBOL_GPL(mas_destroy);
5875 * mas_expected_entries() - Set the expected number of entries that will be inserted.
5876 * @mas: The maple state
5877 * @nr_entries: The number of expected entries.
5879 * This will attempt to pre-allocate enough nodes to store the expected number
5880 * of entries. The allocations will occur using the bulk allocator interface
5881 * for speed. Please call mas_destroy() on the @mas after inserting the entries
5882 * to ensure any unused nodes are freed.
5884 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5886 int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
5888 int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
5889 struct maple_enode *enode = mas->node;
5894 * Sometimes it is necessary to duplicate a tree to a new tree, such as
5895 * forking a process and duplicating the VMAs from one tree to a new
5896 * tree. When such a situation arises, it is known that the new tree is
5897 * not going to be used until the entire tree is populated. For
5898 * performance reasons, it is best to use a bulk load with RCU disabled.
5899 * This allows for optimistic splitting that favours the left and reuse
5900 * of nodes during the operation.
5903 /* Optimize splitting for bulk insert in-order */
5904 mas->mas_flags |= MA_STATE_BULK;
5907 * Avoid overflow, assume a gap between each entry and a trailing null.
5908 * If this is wrong, it just means allocation can happen during
5909 * insertion of entries.
5911 nr_nodes = max(nr_entries, nr_entries * 2 + 1);
5912 if (!mt_is_alloc(mas->tree))
5913 nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
5915 /* Leaves; reduce slots to keep space for expansion */
5916 nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2);
5917 /* Internal nodes */
5918 nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
5919 /* Add working room for split (2 nodes) + new parents */
5920 mas_node_count(mas, nr_nodes + 3);
5922 /* Detect if allocations run out */
5923 mas->mas_flags |= MA_STATE_PREALLOC;
5925 if (!mas_is_err(mas))
5928 ret = xa_err(mas->node);
5934 EXPORT_SYMBOL_GPL(mas_expected_entries);
5937 * mas_next() - Get the next entry.
5938 * @mas: The maple state
5939 * @max: The maximum index to check.
5941 * Returns the next entry after @mas->index.
5942 * Must hold rcu_read_lock or the write lock.
5943 * Can return the zero entry.
5945 * Return: The next entry or %NULL
5947 void *mas_next(struct ma_state *mas, unsigned long max)
5949 if (mas_is_none(mas) || mas_is_paused(mas))
5950 mas->node = MAS_START;
5952 if (mas_is_start(mas))
5953 mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
5955 if (mas_is_ptr(mas)) {
5958 mas->last = ULONG_MAX;
5963 if (mas->last == ULONG_MAX)
5966 /* Retries on dead nodes handled by mas_next_entry */
5967 return mas_next_entry(mas, max);
5969 EXPORT_SYMBOL_GPL(mas_next);
5972 * mt_next() - get the next value in the maple tree
5973 * @mt: The maple tree
5974 * @index: The start index
5975 * @max: The maximum index to check
5977 * Return: The entry at @index or higher, or %NULL if nothing is found.
5979 void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
5982 MA_STATE(mas, mt, index, index);
5985 entry = mas_next(&mas, max);
5989 EXPORT_SYMBOL_GPL(mt_next);
5992 * mas_prev() - Get the previous entry
5993 * @mas: The maple state
5994 * @min: The minimum value to check.
5996 * Must hold rcu_read_lock or the write lock.
5997 * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
6000 * Return: the previous value or %NULL.
6002 void *mas_prev(struct ma_state *mas, unsigned long min)
6005 /* Nothing comes before 0 */
6007 mas->node = MAS_NONE;
6011 if (unlikely(mas_is_ptr(mas)))
6014 if (mas_is_none(mas) || mas_is_paused(mas))
6015 mas->node = MAS_START;
6017 if (mas_is_start(mas)) {
6023 if (mas_is_ptr(mas)) {
6029 mas->index = mas->last = 0;
6030 return mas_root_locked(mas);
6032 return mas_prev_entry(mas, min);
6034 EXPORT_SYMBOL_GPL(mas_prev);
6037 * mt_prev() - get the previous value in the maple tree
6038 * @mt: The maple tree
6039 * @index: The start index
6040 * @min: The minimum index to check
6042 * Return: The entry at @index or lower, or %NULL if nothing is found.
6044 void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min)
6047 MA_STATE(mas, mt, index, index);
6050 entry = mas_prev(&mas, min);
6054 EXPORT_SYMBOL_GPL(mt_prev);
6057 * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
6058 * @mas: The maple state to pause
6060 * Some users need to pause a walk and drop the lock they're holding in
6061 * order to yield to a higher priority thread or carry out an operation
6062 * on an entry. Those users should call this function before they drop
6063 * the lock. It resets the @mas to be suitable for the next iteration
6064 * of the loop after the user has reacquired the lock. If most entries
6065 * found during a walk require you to call mas_pause(), the mt_for_each()
6066 * iterator may be more appropriate.
6069 void mas_pause(struct ma_state *mas)
6071 mas->node = MAS_PAUSE;
6073 EXPORT_SYMBOL_GPL(mas_pause);
6076 * mas_find() - On the first call, find the entry at or after mas->index up to
6077 * %max. Otherwise, find the entry after mas->index.
6078 * @mas: The maple state
6079 * @max: The maximum value to check.
6081 * Must hold rcu_read_lock or the write lock.
6082 * If an entry exists, last and index are updated accordingly.
6083 * May set @mas->node to MAS_NONE.
6085 * Return: The entry or %NULL.
6087 void *mas_find(struct ma_state *mas, unsigned long max)
6089 if (unlikely(mas_is_paused(mas))) {
6090 if (unlikely(mas->last == ULONG_MAX)) {
6091 mas->node = MAS_NONE;
6094 mas->node = MAS_START;
6095 mas->index = ++mas->last;
6098 if (unlikely(mas_is_none(mas)))
6099 mas->node = MAS_START;
6101 if (unlikely(mas_is_start(mas))) {
6102 /* First run or continue */
6105 if (mas->index > max)
6108 entry = mas_walk(mas);
6113 if (unlikely(!mas_searchable(mas)))
6116 /* Retries on dead nodes handled by mas_next_entry */
6117 return mas_next_entry(mas, max);
6119 EXPORT_SYMBOL_GPL(mas_find);
6122 * mas_find_rev: On the first call, find the first non-null entry at or below
6123 * mas->index down to %min. Otherwise find the first non-null entry below
6124 * mas->index down to %min.
6125 * @mas: The maple state
6126 * @min: The minimum value to check.
6128 * Must hold rcu_read_lock or the write lock.
6129 * If an entry exists, last and index are updated accordingly.
6130 * May set @mas->node to MAS_NONE.
6132 * Return: The entry or %NULL.
6134 void *mas_find_rev(struct ma_state *mas, unsigned long min)
6136 if (unlikely(mas_is_paused(mas))) {
6137 if (unlikely(mas->last == ULONG_MAX)) {
6138 mas->node = MAS_NONE;
6141 mas->node = MAS_START;
6142 mas->last = --mas->index;
6145 if (unlikely(mas_is_start(mas))) {
6146 /* First run or continue */
6149 if (mas->index < min)
6152 entry = mas_walk(mas);
6157 if (unlikely(!mas_searchable(mas)))
6160 if (mas->index < min)
6163 /* Retries on dead nodes handled by mas_next_entry */
6164 return mas_prev_entry(mas, min);
6166 EXPORT_SYMBOL_GPL(mas_find_rev);
6169 * mas_erase() - Find the range in which index resides and erase the entire
6171 * @mas: The maple state
6173 * Must hold the write lock.
6174 * Searches for @mas->index, sets @mas->index and @mas->last to the range and
6175 * erases that range.
6177 * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
6179 void *mas_erase(struct ma_state *mas)
6182 MA_WR_STATE(wr_mas, mas, NULL);
6184 if (mas_is_none(mas) || mas_is_paused(mas))
6185 mas->node = MAS_START;
6187 /* Retry unnecessary when holding the write lock. */
6188 entry = mas_state_walk(mas);
6193 /* Must reset to ensure spanning writes of last slot are detected */
6195 mas_wr_store_setup(&wr_mas);
6196 mas_wr_store_entry(&wr_mas);
6197 if (mas_nomem(mas, GFP_KERNEL))
6202 EXPORT_SYMBOL_GPL(mas_erase);
6205 * mas_nomem() - Check if there was an error allocating and do the allocation
6206 * if necessary If there are allocations, then free them.
6207 * @mas: The maple state
6208 * @gfp: The GFP_FLAGS to use for allocations
6209 * Return: true on allocation, false otherwise.
6211 bool mas_nomem(struct ma_state *mas, gfp_t gfp)
6212 __must_hold(mas->tree->lock)
6214 if (likely(mas->node != MA_ERROR(-ENOMEM))) {
6219 if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
6220 mtree_unlock(mas->tree);
6221 mas_alloc_nodes(mas, gfp);
6222 mtree_lock(mas->tree);
6224 mas_alloc_nodes(mas, gfp);
6227 if (!mas_allocated(mas))
6230 mas->node = MAS_START;
6234 void __init maple_tree_init(void)
6236 maple_node_cache = kmem_cache_create("maple_node",
6237 sizeof(struct maple_node), sizeof(struct maple_node),
6242 * mtree_load() - Load a value stored in a maple tree
6243 * @mt: The maple tree
6244 * @index: The index to load
6246 * Return: the entry or %NULL
6248 void *mtree_load(struct maple_tree *mt, unsigned long index)
6250 MA_STATE(mas, mt, index, index);
6253 trace_ma_read(__func__, &mas);
6256 entry = mas_start(&mas);
6257 if (unlikely(mas_is_none(&mas)))
6260 if (unlikely(mas_is_ptr(&mas))) {
6267 entry = mtree_lookup_walk(&mas);
6268 if (!entry && unlikely(mas_is_start(&mas)))
6272 if (xa_is_zero(entry))
6277 EXPORT_SYMBOL(mtree_load);
6280 * mtree_store_range() - Store an entry at a given range.
6281 * @mt: The maple tree
6282 * @index: The start of the range
6283 * @last: The end of the range
6284 * @entry: The entry to store
6285 * @gfp: The GFP_FLAGS to use for allocations
6287 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6290 int mtree_store_range(struct maple_tree *mt, unsigned long index,
6291 unsigned long last, void *entry, gfp_t gfp)
6293 MA_STATE(mas, mt, index, last);
6294 MA_WR_STATE(wr_mas, &mas, entry);
6296 trace_ma_write(__func__, &mas, 0, entry);
6297 if (WARN_ON_ONCE(xa_is_advanced(entry)))
6305 mas_wr_store_entry(&wr_mas);
6306 if (mas_nomem(&mas, gfp))
6310 if (mas_is_err(&mas))
6311 return xa_err(mas.node);
6315 EXPORT_SYMBOL(mtree_store_range);
6318 * mtree_store() - Store an entry at a given index.
6319 * @mt: The maple tree
6320 * @index: The index to store the value
6321 * @entry: The entry to store
6322 * @gfp: The GFP_FLAGS to use for allocations
6324 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6327 int mtree_store(struct maple_tree *mt, unsigned long index, void *entry,
6330 return mtree_store_range(mt, index, index, entry, gfp);
6332 EXPORT_SYMBOL(mtree_store);
6335 * mtree_insert_range() - Insert an entry at a give range if there is no value.
6336 * @mt: The maple tree
6337 * @first: The start of the range
6338 * @last: The end of the range
6339 * @entry: The entry to store
6340 * @gfp: The GFP_FLAGS to use for allocations.
6342 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6343 * request, -ENOMEM if memory could not be allocated.
6345 int mtree_insert_range(struct maple_tree *mt, unsigned long first,
6346 unsigned long last, void *entry, gfp_t gfp)
6348 MA_STATE(ms, mt, first, last);
6350 if (WARN_ON_ONCE(xa_is_advanced(entry)))
6358 mas_insert(&ms, entry);
6359 if (mas_nomem(&ms, gfp))
6363 if (mas_is_err(&ms))
6364 return xa_err(ms.node);
6368 EXPORT_SYMBOL(mtree_insert_range);
6371 * mtree_insert() - Insert an entry at a give index if there is no value.
6372 * @mt: The maple tree
6373 * @index : The index to store the value
6374 * @entry: The entry to store
6375 * @gfp: The FGP_FLAGS to use for allocations.
6377 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6378 * request, -ENOMEM if memory could not be allocated.
6380 int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry,
6383 return mtree_insert_range(mt, index, index, entry, gfp);
6385 EXPORT_SYMBOL(mtree_insert);
6387 int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
6388 void *entry, unsigned long size, unsigned long min,
6389 unsigned long max, gfp_t gfp)
6393 MA_STATE(mas, mt, min, max - size);
6394 if (!mt_is_alloc(mt))
6397 if (WARN_ON_ONCE(mt_is_reserved(entry)))
6413 mas.last = max - size;
6414 ret = mas_alloc(&mas, entry, size, startp);
6415 if (mas_nomem(&mas, gfp))
6421 EXPORT_SYMBOL(mtree_alloc_range);
6423 int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
6424 void *entry, unsigned long size, unsigned long min,
6425 unsigned long max, gfp_t gfp)
6429 MA_STATE(mas, mt, min, max - size);
6430 if (!mt_is_alloc(mt))
6433 if (WARN_ON_ONCE(mt_is_reserved(entry)))
6447 ret = mas_rev_alloc(&mas, min, max, entry, size, startp);
6448 if (mas_nomem(&mas, gfp))
6454 EXPORT_SYMBOL(mtree_alloc_rrange);
6457 * mtree_erase() - Find an index and erase the entire range.
6458 * @mt: The maple tree
6459 * @index: The index to erase
6461 * Erasing is the same as a walk to an entry then a store of a NULL to that
6462 * ENTIRE range. In fact, it is implemented as such using the advanced API.
6464 * Return: The entry stored at the @index or %NULL
6466 void *mtree_erase(struct maple_tree *mt, unsigned long index)
6470 MA_STATE(mas, mt, index, index);
6471 trace_ma_op(__func__, &mas);
6474 entry = mas_erase(&mas);
6479 EXPORT_SYMBOL(mtree_erase);
6482 * __mt_destroy() - Walk and free all nodes of a locked maple tree.
6483 * @mt: The maple tree
6485 * Note: Does not handle locking.
6487 void __mt_destroy(struct maple_tree *mt)
6489 void *root = mt_root_locked(mt);
6491 rcu_assign_pointer(mt->ma_root, NULL);
6492 if (xa_is_node(root))
6493 mte_destroy_walk(root, mt);
6497 EXPORT_SYMBOL_GPL(__mt_destroy);
6500 * mtree_destroy() - Destroy a maple tree
6501 * @mt: The maple tree
6503 * Frees all resources used by the tree. Handles locking.
6505 void mtree_destroy(struct maple_tree *mt)
6511 EXPORT_SYMBOL(mtree_destroy);
6514 * mt_find() - Search from the start up until an entry is found.
6515 * @mt: The maple tree
6516 * @index: Pointer which contains the start location of the search
6517 * @max: The maximum value to check
6519 * Handles locking. @index will be incremented to one beyond the range.
6521 * Return: The entry at or after the @index or %NULL
6523 void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
6525 MA_STATE(mas, mt, *index, *index);
6527 #ifdef CONFIG_DEBUG_MAPLE_TREE
6528 unsigned long copy = *index;
6531 trace_ma_read(__func__, &mas);
6538 entry = mas_state_walk(&mas);
6539 if (mas_is_start(&mas))
6542 if (unlikely(xa_is_zero(entry)))
6548 while (mas_searchable(&mas) && (mas.index < max)) {
6549 entry = mas_next_entry(&mas, max);
6550 if (likely(entry && !xa_is_zero(entry)))
6554 if (unlikely(xa_is_zero(entry)))
6558 if (likely(entry)) {
6559 *index = mas.last + 1;
6560 #ifdef CONFIG_DEBUG_MAPLE_TREE
6561 if ((*index) && (*index) <= copy)
6562 pr_err("index not increased! %lx <= %lx\n",
6564 MT_BUG_ON(mt, (*index) && ((*index) <= copy));
6570 EXPORT_SYMBOL(mt_find);
6573 * mt_find_after() - Search from the start up until an entry is found.
6574 * @mt: The maple tree
6575 * @index: Pointer which contains the start location of the search
6576 * @max: The maximum value to check
6578 * Handles locking, detects wrapping on index == 0
6580 * Return: The entry at or after the @index or %NULL
6582 void *mt_find_after(struct maple_tree *mt, unsigned long *index,
6588 return mt_find(mt, index, max);
6590 EXPORT_SYMBOL(mt_find_after);
6592 #ifdef CONFIG_DEBUG_MAPLE_TREE
6593 atomic_t maple_tree_tests_run;
6594 EXPORT_SYMBOL_GPL(maple_tree_tests_run);
6595 atomic_t maple_tree_tests_passed;
6596 EXPORT_SYMBOL_GPL(maple_tree_tests_passed);
6599 extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int);
6600 void mt_set_non_kernel(unsigned int val)
6602 kmem_cache_set_non_kernel(maple_node_cache, val);
6605 extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
6606 unsigned long mt_get_alloc_size(void)
6608 return kmem_cache_get_alloc(maple_node_cache);
6611 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *);
6612 void mt_zero_nr_tallocated(void)
6614 kmem_cache_zero_nr_tallocated(maple_node_cache);
6617 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *);
6618 unsigned int mt_nr_tallocated(void)
6620 return kmem_cache_nr_tallocated(maple_node_cache);
6623 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *);
6624 unsigned int mt_nr_allocated(void)
6626 return kmem_cache_nr_allocated(maple_node_cache);
6630 * mas_dead_node() - Check if the maple state is pointing to a dead node.
6631 * @mas: The maple state
6632 * @index: The index to restore in @mas.
6634 * Used in test code.
6635 * Return: 1 if @mas has been reset to MAS_START, 0 otherwise.
6637 static inline int mas_dead_node(struct ma_state *mas, unsigned long index)
6639 if (unlikely(!mas_searchable(mas) || mas_is_start(mas)))
6642 if (likely(!mte_dead_node(mas->node)))
6645 mas_rewalk(mas, index);
6649 void mt_cache_shrink(void)
6654 * mt_cache_shrink() - For testing, don't use this.
6656 * Certain testcases can trigger an OOM when combined with other memory
6657 * debugging configuration options. This function is used to reduce the
6658 * possibility of an out of memory even due to kmem_cache objects remaining
6659 * around for longer than usual.
6661 void mt_cache_shrink(void)
6663 kmem_cache_shrink(maple_node_cache);
6666 EXPORT_SYMBOL_GPL(mt_cache_shrink);
6668 #endif /* not defined __KERNEL__ */
6670 * mas_get_slot() - Get the entry in the maple state node stored at @offset.
6671 * @mas: The maple state
6672 * @offset: The offset into the slot array to fetch.
6674 * Return: The entry stored at @offset.
6676 static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
6677 unsigned char offset)
6679 return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)),
6685 * mas_first_entry() - Go the first leaf and find the first entry.
6686 * @mas: the maple state.
6687 * @limit: the maximum index to check.
6688 * @*r_start: Pointer to set to the range start.
6690 * Sets mas->offset to the offset of the entry, r_start to the range minimum.
6692 * Return: The first entry or MAS_NONE.
6694 static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
6695 unsigned long limit, enum maple_type mt)
6699 unsigned long *pivots;
6703 mas->index = mas->min;
6704 if (mas->index > limit)
6709 while (likely(!ma_is_leaf(mt))) {
6710 MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
6711 slots = ma_slots(mn, mt);
6712 entry = mas_slot(mas, slots, 0);
6713 pivots = ma_pivots(mn, mt);
6714 if (unlikely(ma_dead_node(mn)))
6719 mt = mte_node_type(mas->node);
6721 MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
6724 slots = ma_slots(mn, mt);
6725 entry = mas_slot(mas, slots, 0);
6726 if (unlikely(ma_dead_node(mn)))
6729 /* Slot 0 or 1 must be set */
6730 if (mas->index > limit)
6737 entry = mas_slot(mas, slots, 1);
6738 pivots = ma_pivots(mn, mt);
6739 if (unlikely(ma_dead_node(mn)))
6742 mas->index = pivots[0] + 1;
6743 if (mas->index > limit)
6750 if (likely(!ma_dead_node(mn)))
6751 mas->node = MAS_NONE;
6755 /* Depth first search, post-order */
6756 static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
6759 struct maple_enode *p = MAS_NONE, *mn = mas->node;
6760 unsigned long p_min, p_max;
6762 mas_next_node(mas, mas_mn(mas), max);
6763 if (!mas_is_none(mas))
6766 if (mte_is_root(mn))
6771 while (mas->node != MAS_NONE) {
6775 mas_prev_node(mas, 0);
6786 /* Tree validations */
6787 static void mt_dump_node(const struct maple_tree *mt, void *entry,
6788 unsigned long min, unsigned long max, unsigned int depth);
6789 static void mt_dump_range(unsigned long min, unsigned long max,
6792 static const char spaces[] = " ";
6795 pr_info("%.*s%lu: ", depth * 2, spaces, min);
6797 pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max);
6800 static void mt_dump_entry(void *entry, unsigned long min, unsigned long max,
6803 mt_dump_range(min, max, depth);
6805 if (xa_is_value(entry))
6806 pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry),
6807 xa_to_value(entry), entry);
6808 else if (xa_is_zero(entry))
6809 pr_cont("zero (%ld)\n", xa_to_internal(entry));
6810 else if (mt_is_reserved(entry))
6811 pr_cont("UNKNOWN ENTRY (%p)\n", entry);
6813 pr_cont("%p\n", entry);
6816 static void mt_dump_range64(const struct maple_tree *mt, void *entry,
6817 unsigned long min, unsigned long max, unsigned int depth)
6819 struct maple_range_64 *node = &mte_to_node(entry)->mr64;
6820 bool leaf = mte_is_leaf(entry);
6821 unsigned long first = min;
6824 pr_cont(" contents: ");
6825 for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++)
6826 pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
6827 pr_cont("%p\n", node->slot[i]);
6828 for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
6829 unsigned long last = max;
6831 if (i < (MAPLE_RANGE64_SLOTS - 1))
6832 last = node->pivot[i];
6833 else if (!node->slot[i] && max != mt_max[mte_node_type(entry)])
6835 if (last == 0 && i > 0)
6838 mt_dump_entry(mt_slot(mt, node->slot, i),
6839 first, last, depth + 1);
6840 else if (node->slot[i])
6841 mt_dump_node(mt, mt_slot(mt, node->slot, i),
6842 first, last, depth + 1);
6847 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6848 node, last, max, i);
6855 static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
6856 unsigned long min, unsigned long max, unsigned int depth)
6858 struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
6859 bool leaf = mte_is_leaf(entry);
6860 unsigned long first = min;
6863 pr_cont(" contents: ");
6864 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++)
6865 pr_cont("%lu ", node->gap[i]);
6866 pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
6867 for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++)
6868 pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
6869 pr_cont("%p\n", node->slot[i]);
6870 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
6871 unsigned long last = max;
6873 if (i < (MAPLE_ARANGE64_SLOTS - 1))
6874 last = node->pivot[i];
6875 else if (!node->slot[i])
6877 if (last == 0 && i > 0)
6880 mt_dump_entry(mt_slot(mt, node->slot, i),
6881 first, last, depth + 1);
6882 else if (node->slot[i])
6883 mt_dump_node(mt, mt_slot(mt, node->slot, i),
6884 first, last, depth + 1);
6889 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6890 node, last, max, i);
6897 static void mt_dump_node(const struct maple_tree *mt, void *entry,
6898 unsigned long min, unsigned long max, unsigned int depth)
6900 struct maple_node *node = mte_to_node(entry);
6901 unsigned int type = mte_node_type(entry);
6904 mt_dump_range(min, max, depth);
6906 pr_cont("node %p depth %d type %d parent %p", node, depth, type,
6907 node ? node->parent : NULL);
6911 for (i = 0; i < MAPLE_NODE_SLOTS; i++) {
6913 pr_cont("OUT OF RANGE: ");
6914 mt_dump_entry(mt_slot(mt, node->slot, i),
6915 min + i, min + i, depth);
6919 case maple_range_64:
6920 mt_dump_range64(mt, entry, min, max, depth);
6922 case maple_arange_64:
6923 mt_dump_arange64(mt, entry, min, max, depth);
6927 pr_cont(" UNKNOWN TYPE\n");
6931 void mt_dump(const struct maple_tree *mt)
6933 void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt));
6935 pr_info("maple_tree(%p) flags %X, height %u root %p\n",
6936 mt, mt->ma_flags, mt_height(mt), entry);
6937 if (!xa_is_node(entry))
6938 mt_dump_entry(entry, 0, 0, 0);
6940 mt_dump_node(mt, entry, 0, mt_max[mte_node_type(entry)], 0);
6942 EXPORT_SYMBOL_GPL(mt_dump);
6945 * Calculate the maximum gap in a node and check if that's what is reported in
6946 * the parent (unless root).
6948 static void mas_validate_gaps(struct ma_state *mas)
6950 struct maple_enode *mte = mas->node;
6951 struct maple_node *p_mn;
6952 unsigned long gap = 0, max_gap = 0;
6953 unsigned long p_end, p_start = mas->min;
6954 unsigned char p_slot;
6955 unsigned long *gaps = NULL;
6956 unsigned long *pivots = ma_pivots(mte_to_node(mte), mte_node_type(mte));
6959 if (ma_is_dense(mte_node_type(mte))) {
6960 for (i = 0; i < mt_slot_count(mte); i++) {
6961 if (mas_get_slot(mas, i)) {
6972 gaps = ma_gaps(mte_to_node(mte), mte_node_type(mte));
6973 for (i = 0; i < mt_slot_count(mte); i++) {
6974 p_end = mas_logical_pivot(mas, pivots, i, mte_node_type(mte));
6977 if (mas_get_slot(mas, i)) {
6982 gap += p_end - p_start + 1;
6984 void *entry = mas_get_slot(mas, i);
6988 if (gap != p_end - p_start + 1) {
6989 pr_err("%p[%u] -> %p %lu != %lu - %lu + 1\n",
6991 mas_get_slot(mas, i), gap,
6995 MT_BUG_ON(mas->tree,
6996 gap != p_end - p_start + 1);
6999 if (gap > p_end - p_start + 1) {
7000 pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
7001 mas_mn(mas), i, gap, p_end, p_start,
7002 p_end - p_start + 1);
7003 MT_BUG_ON(mas->tree,
7004 gap > p_end - p_start + 1);
7012 p_start = p_end + 1;
7013 if (p_end >= mas->max)
7018 if (mte_is_root(mte))
7021 p_slot = mte_parent_slot(mas->node);
7022 p_mn = mte_parent(mte);
7023 MT_BUG_ON(mas->tree, max_gap > mas->max);
7024 if (ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap) {
7025 pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
7029 MT_BUG_ON(mas->tree,
7030 ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap);
7033 static void mas_validate_parent_slot(struct ma_state *mas)
7035 struct maple_node *parent;
7036 struct maple_enode *node;
7037 enum maple_type p_type = mas_parent_enum(mas, mas->node);
7038 unsigned char p_slot = mte_parent_slot(mas->node);
7042 if (mte_is_root(mas->node))
7045 parent = mte_parent(mas->node);
7046 slots = ma_slots(parent, p_type);
7047 MT_BUG_ON(mas->tree, mas_mn(mas) == parent);
7049 /* Check prev/next parent slot for duplicate node entry */
7051 for (i = 0; i < mt_slots[p_type]; i++) {
7052 node = mas_slot(mas, slots, i);
7054 if (node != mas->node)
7055 pr_err("parent %p[%u] does not have %p\n",
7056 parent, i, mas_mn(mas));
7057 MT_BUG_ON(mas->tree, node != mas->node);
7058 } else if (node == mas->node) {
7059 pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
7060 mas_mn(mas), parent, i, p_slot);
7061 MT_BUG_ON(mas->tree, node == mas->node);
7066 static void mas_validate_child_slot(struct ma_state *mas)
7068 enum maple_type type = mte_node_type(mas->node);
7069 void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7070 unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
7071 struct maple_enode *child;
7074 if (mte_is_leaf(mas->node))
7077 for (i = 0; i < mt_slots[type]; i++) {
7078 child = mas_slot(mas, slots, i);
7079 if (!pivots[i] || pivots[i] == mas->max)
7085 if (mte_parent_slot(child) != i) {
7086 pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
7087 mas_mn(mas), i, mte_to_node(child),
7088 mte_parent_slot(child));
7089 MT_BUG_ON(mas->tree, 1);
7092 if (mte_parent(child) != mte_to_node(mas->node)) {
7093 pr_err("child %p has parent %p not %p\n",
7094 mte_to_node(child), mte_parent(child),
7095 mte_to_node(mas->node));
7096 MT_BUG_ON(mas->tree, 1);
7102 * Validate all pivots are within mas->min and mas->max.
7104 static void mas_validate_limits(struct ma_state *mas)
7107 unsigned long prev_piv = 0;
7108 enum maple_type type = mte_node_type(mas->node);
7109 void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7110 unsigned long *pivots = ma_pivots(mas_mn(mas), type);
7112 /* all limits are fine here. */
7113 if (mte_is_root(mas->node))
7116 for (i = 0; i < mt_slots[type]; i++) {
7119 piv = mas_safe_pivot(mas, pivots, i, type);
7121 if (!piv && (i != 0))
7124 if (!mte_is_leaf(mas->node)) {
7125 void *entry = mas_slot(mas, slots, i);
7128 pr_err("%p[%u] cannot be null\n",
7131 MT_BUG_ON(mas->tree, !entry);
7134 if (prev_piv > piv) {
7135 pr_err("%p[%u] piv %lu < prev_piv %lu\n",
7136 mas_mn(mas), i, piv, prev_piv);
7137 MT_BUG_ON(mas->tree, piv < prev_piv);
7140 if (piv < mas->min) {
7141 pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
7143 MT_BUG_ON(mas->tree, piv < mas->min);
7145 if (piv > mas->max) {
7146 pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i,
7148 MT_BUG_ON(mas->tree, piv > mas->max);
7151 if (piv == mas->max)
7154 for (i += 1; i < mt_slots[type]; i++) {
7155 void *entry = mas_slot(mas, slots, i);
7157 if (entry && (i != mt_slots[type] - 1)) {
7158 pr_err("%p[%u] should not have entry %p\n", mas_mn(mas),
7160 MT_BUG_ON(mas->tree, entry != NULL);
7163 if (i < mt_pivots[type]) {
7164 unsigned long piv = pivots[i];
7169 pr_err("%p[%u] should not have piv %lu\n",
7170 mas_mn(mas), i, piv);
7171 MT_BUG_ON(mas->tree, i < mt_pivots[type] - 1);
7176 static void mt_validate_nulls(struct maple_tree *mt)
7178 void *entry, *last = (void *)1;
7179 unsigned char offset = 0;
7181 MA_STATE(mas, mt, 0, 0);
7184 if (mas_is_none(&mas) || (mas.node == MAS_ROOT))
7187 while (!mte_is_leaf(mas.node))
7190 slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
7192 entry = mas_slot(&mas, slots, offset);
7193 if (!last && !entry) {
7194 pr_err("Sequential nulls end at %p[%u]\n",
7195 mas_mn(&mas), offset);
7197 MT_BUG_ON(mt, !last && !entry);
7199 if (offset == mas_data_end(&mas)) {
7200 mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
7201 if (mas_is_none(&mas))
7204 slots = ma_slots(mte_to_node(mas.node),
7205 mte_node_type(mas.node));
7210 } while (!mas_is_none(&mas));
7214 * validate a maple tree by checking:
7215 * 1. The limits (pivots are within mas->min to mas->max)
7216 * 2. The gap is correctly set in the parents
7218 void mt_validate(struct maple_tree *mt)
7222 MA_STATE(mas, mt, 0, 0);
7225 if (!mas_searchable(&mas))
7228 mas_first_entry(&mas, mas_mn(&mas), ULONG_MAX, mte_node_type(mas.node));
7229 while (!mas_is_none(&mas)) {
7230 MT_BUG_ON(mas.tree, mte_dead_node(mas.node));
7231 if (!mte_is_root(mas.node)) {
7232 end = mas_data_end(&mas);
7233 if ((end < mt_min_slot_count(mas.node)) &&
7234 (mas.max != ULONG_MAX)) {
7235 pr_err("Invalid size %u of %p\n", end,
7237 MT_BUG_ON(mas.tree, 1);
7241 mas_validate_parent_slot(&mas);
7242 mas_validate_child_slot(&mas);
7243 mas_validate_limits(&mas);
7244 if (mt_is_alloc(mt))
7245 mas_validate_gaps(&mas);
7246 mas_dfs_postorder(&mas, ULONG_MAX);
7248 mt_validate_nulls(mt);
7253 EXPORT_SYMBOL_GPL(mt_validate);
7255 #endif /* CONFIG_DEBUG_MAPLE_TREE */