1 /* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
5 #include <linux/atomic.h>
8 * Copyright: cameron@moodycamel.com
10 * A simple CAS-based lock-free free list. Not the fastest thing in the world
11 * under heavy contention, but simple and correct (assuming nodes are never
12 * freed until after the free list is destroyed), and fairly speedy under low
15 * Adapted from: https://moodycamel.com/blog/2014/solving-the-aba-problem-for-lock-free-free-lists
18 struct freelist_node {
20 struct freelist_node *next;
23 struct freelist_head {
24 struct freelist_node *head;
27 #define REFS_ON_FREELIST 0x80000000
28 #define REFS_MASK 0x7FFFFFFF
30 static inline void __freelist_add(struct freelist_node *node, struct freelist_head *list)
33 * Since the refcount is zero, and nobody can increase it once it's
34 * zero (except us, and we run only one copy of this method per node at
35 * a time, i.e. the single thread case), then we know we can safely
36 * change the next pointer of the node; however, once the refcount is
37 * back above zero, then other threads could increase it (happens under
38 * heavy contention, when the refcount goes to zero in between a load
39 * and a refcount increment of a node in try_get, then back up to
40 * something non-zero, then the refcount increment is done by the other
41 * thread) -- so if the CAS to add the node to the actual list fails,
42 * decrese the refcount and leave the add operation to the next thread
43 * who puts the refcount back to zero (which could be us, hence the
46 struct freelist_node *head = READ_ONCE(list->head);
49 WRITE_ONCE(node->next, head);
50 atomic_set_release(&node->refs, 1);
52 if (!try_cmpxchg_release(&list->head, &head, node)) {
54 * Hmm, the add failed, but we can only try again when
55 * the refcount goes back to zero.
57 if (atomic_fetch_add_release(REFS_ON_FREELIST - 1, &node->refs) == 1)
64 static inline void freelist_add(struct freelist_node *node, struct freelist_head *list)
67 * We know that the should-be-on-freelist bit is 0 at this point, so
68 * it's safe to set it using a fetch_add.
70 if (!atomic_fetch_add_release(REFS_ON_FREELIST, &node->refs)) {
72 * Oh look! We were the last ones referencing this node, and we
73 * know we want to add it to the free list, so let's do it!
75 __freelist_add(node, list);
79 static inline struct freelist_node *freelist_try_get(struct freelist_head *list)
81 struct freelist_node *prev, *next, *head = smp_load_acquire(&list->head);
86 refs = atomic_read(&head->refs);
87 if ((refs & REFS_MASK) == 0 ||
88 !atomic_try_cmpxchg_acquire(&head->refs, &refs, refs+1)) {
89 head = smp_load_acquire(&list->head);
94 * Good, reference count has been incremented (it wasn't at
95 * zero), which means we can read the next and not worry about
96 * it changing between now and the time we do the CAS.
98 next = READ_ONCE(head->next);
99 if (try_cmpxchg_acquire(&list->head, &head, next)) {
101 * Yay, got the node. This means it was on the list,
102 * which means should-be-on-freelist must be false no
103 * matter the refcount (because nobody else knows it's
104 * been taken off yet, it can't have been put back on).
106 WARN_ON_ONCE(atomic_read(&head->refs) & REFS_ON_FREELIST);
109 * Decrease refcount twice, once for our ref, and once
110 * for the list's ref.
112 atomic_fetch_add(-2, &head->refs);
118 * OK, the head must have changed on us, but we still need to decrement
119 * the refcount we increased.
121 refs = atomic_fetch_add(-1, &prev->refs);
122 if (refs == REFS_ON_FREELIST + 1)
123 __freelist_add(prev, list);
129 #endif /* FREELIST_H */