3 * Copyright (C) 2011 Maciej Piechotka
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2.1 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 * Maciej Piechotka <uzytkownik2@gmail.com>
24 * Hazard pointer is a method of protecting a pointer shared by many threads.
25 * If you want to use atomic pointer that may be freed you should use following code:
28 * string *shared_pointer = ...;
29 * HazardPointer<string> hptr = HazardPointer.get_hazard_pointer (&shared_pointer);
30 * // my_string contains value from shared_pinter. It is valid as long as hptr is alive.
31 * unowned string my_string = ptr.get ();
32 * // instead of delete
33 * ptr.release ((ptr) => {string *sptr = ptr;string ref = (owned)sptr;});
37 * In some cases you may use helper methods which might involve copying of object (and are unsafe for unowned objects):
39 * Gtk.Window *window = ...;
40 * Gtk.Window? local_window = HazardPointer.get_pointer (&window);
41 * HazardPointer.set_pointer (&window, ...)
42 * local_window = HazardPointer.exchange_pointer (&window, null);
43 * HazardPointer.compare_and_exchange (&window, null, local_window);
46 * The class also provides helper methods if least significant bits are used for storing flags.
48 * HazardPointers are not thread-safe (unless documentation states otherwise).
51 public class Gee.HazardPointer<G> { // FIXME: Make it a struct
53 * Creates a hazard pointer for a pointer.
55 * @param ptr Protected pointer
57 public HazardPointer (G *ptr) {
58 this._node = acquire ();
59 this._node.set ((void *)ptr);
63 * Create a hazard pointer from Node.
65 internal HazardPointer.from_node (Node node) {
70 * Gets hazard pointer from atomic pointer safely.
72 * @param aptr Atomic pointer.
73 * @param mask Mask of bits.
74 * @param mask_out Result of mask.
75 * @return Hazard pointer containing the element.
77 public static HazardPointer<G>? get_hazard_pointer<G> (G **aptr, size_t mask = 0, out size_t mask_out = null) {
78 unowned Node node = acquire ();
83 rptr = AtomicPointer.get ((void **)aptr);
84 ptr = (void *)((size_t) rptr & ~mask);
85 mask_out = (size_t) rptr & mask;
87 } while (rptr != AtomicPointer.get ((void **)aptr));
89 return new HazardPointer<G>.from_node (node);
97 * Copy an object from atomic pointer.
99 * @param aptr Atomic pointer.
100 * @param mask Mask of flags.
101 * @param mask_out Result of mask.
102 * @return A copy of object from atomic pointer.
104 public static G? get_pointer<G> (G **aptr, size_t mask = 0, out size_t mask_out = null) {
105 unowned Node node = acquire ();
110 rptr = AtomicPointer.get ((void **)aptr);
111 ptr = (void *)((size_t) rptr & ~mask);
112 mask_out = (size_t) rptr & mask;
114 } while (rptr != AtomicPointer.get ((void **)aptr));
121 * Exchange objects safly.
123 * @param aptr Atomic pointer.
124 * @param new_ptr New value
125 * @param mask Mask of flags.
126 * @param new_mask New mask.
127 * @param old_mask Previous mask mask.
128 * @return Hazard pointer containing old value.
130 public static HazardPointer<G>? exchange_hazard_pointer<G> (G **aptr, owned G? new_ptr, size_t mask = 0, size_t new_mask = 0, out size_t old_mask = null) {
131 unowned Node? new_node = null;
132 if (new_ptr != null) {
133 new_node = acquire ();
134 new_node.set (new_ptr);
137 void *new_rptr = (void *)((size_t)((owned) new_ptr) | (mask & new_mask));
138 unowned Node node = acquire ();
142 rptr = AtomicPointer.get ((void **)aptr);
143 ptr = (void *)((size_t) rptr & ~mask);
144 old_mask = (size_t) rptr & mask;
146 } while (!AtomicPointer.compare_and_exchange((void **)aptr, rptr, new_rptr));
147 if (new_node != null)
150 return new HazardPointer<G>.from_node (node);
160 * @param aptr Atomic pointer.
161 * @param new_ptr New value
162 * @param mask Mask of flags.
163 * @param new_mask New mask.
165 public static void set_pointer<G> (G **aptr, owned G? new_ptr, size_t mask = 0, size_t new_mask = 0) {
166 HazardPointer<G>? ptr = exchange_hazard_pointer<G> (aptr, new_ptr, mask, new_mask, null);
168 DestroyNotify<G> notify = get_destroy_notify<G> ();
169 ptr.release ((owned)notify);
174 * Exchange objects safly.
176 * @param aptr Atomic pointer.
177 * @param new_ptr New value
178 * @param mask Mask of flags.
179 * @param new_mask New mask.
180 * @param old_mask Previous mask mask.
181 * @return Value that was previously stored.
183 public static G? exchange_pointer<G> (G **aptr, owned G? new_ptr, size_t mask = 0, size_t new_mask = 0, out size_t old_mask = null) {
184 HazardPointer<G>? ptr = exchange_hazard_pointer<G> (aptr, new_ptr, mask, new_mask, out old_mask);
185 G? rptr = ptr != null ? ptr.get () : null;
190 * Compares and exchanges objects.
192 * @param aptr Atomic pointer.
193 * @param old_ptr Old pointer.
194 * @param _new_ptr New value.
195 * @param old_mask Old mask.
196 * @param new_mask New mask.
197 * @return Value that was previously stored.
199 public static bool compare_and_exchange_pointer<G> (G **aptr, G? old_ptr, owned G? _new_ptr, size_t mask = 0, size_t old_mask = 0, size_t new_mask = 0) {
200 G *new_ptr = (owned)_new_ptr;
201 void *new_rptr = (void *)((size_t)(new_ptr) | (mask & new_mask));
202 void *old_rptr = (void *)((size_t)(old_ptr) | (mask & old_mask));
203 bool success = AtomicPointer.compare_and_exchange((void **)aptr, old_rptr, new_rptr);
205 DestroyNotify<G> notify = get_destroy_notify<G> ();
206 if (old_ptr != null) {
207 Context.get_current_context ()->release_ptr (old_ptr, (owned)notify);
209 } else if (new_ptr != null) {
210 _new_ptr = (owned)new_ptr;
220 * Gets the pointer hold by hazard pointer.
222 * @param other_thread Have to be set to ``true`` if accessed from thread that did not create this thread.
223 * @return The value hold by pointer.
225 public inline new unowned G get (bool other_thread = false) {
226 return _node[other_thread];
232 * @param notify method freeing object
234 public void release (owned DestroyNotify notify) {
235 unowned G item = _node[false];
238 Context.get_current_context ()->release_ptr (item, (owned)notify);
243 * Sets default policy (i.e. default policy for user-created contexts).
244 * The policy must be concrete and should not be blocking.
246 * @param policy New default policy.
248 public static void set_default_policy (Policy policy) requires (policy.is_concrete ()) {
249 if (policy.is_blocking ())
250 warning ("Setting blocking defautl Gee.HazardPointer.Policy (there may be a deadlock).\n");
251 AtomicInt.set(ref _default_policy, (int)policy);
255 * Sets thread exit policy (i.e. default policy for the top-most Context).
256 * The policy must be concrete and should not be unsafe.
258 * @param policy New thread policy.
260 public static void set_thread_exit_policy (Policy policy) requires (policy.is_concrete ()) {
261 if (!policy.is_safe ())
262 warning ("Setting unsafe globale thread-exit Gee.HazardPointer.Policy (there may be a memory leak).\n");
263 AtomicInt.set(ref _thread_exit_policy, (int)policy);
267 * Sets release (i.e. how exactly the released objects arefreed).
269 * The method can be only set before any objects is released and is not thread-safe.
271 * @param policy New release policy.
273 public static bool set_release_policy (ReleasePolicy policy) {
274 int old_policy = AtomicInt.get (ref release_policy);
275 if ((old_policy & (sizeof(int) * 8 - 1)) != 0) {
276 critical ("Attempt to change the policy of running helper. Failing.");
279 if (!AtomicInt.compare_and_exchange (ref release_policy, old_policy, (int)policy)) {
280 critical ("Concurrent access to release policy detected. Failing.");
287 * Policy determines what happens on exit from Context.
291 * Performs default action on exit from thread.
295 * Performs the same action as on exit from current thread.
299 * Goes through the free list and attempts to free un-freed elements.
303 * Goes through the free list and attempts to free un-freed elements
304 * untill all elements are freed.
308 * Release the un-freed elements to either helper thread or to main loop.
309 * Please note if the operation would block it is not performed.
313 * Release the un-freed elements to either helper thread or to main loop.
314 * Please note it may block while adding to queue.
319 * Checks if the policy is concrete or if it depends on global variables.
321 * @return ``true`` if this policy does not depend on global variables
323 public bool is_concrete () {
334 assert_not_reached ();
339 * Checks if policy blocks or is lock-free.
340 * Please note that it works on a concrete policy only.
342 * @return ``true`` if the policy may block the thread.
344 public bool is_blocking () requires (this.is_concrete ()) {
353 assert_not_reached ();
358 * Checks if policy guarantees freeing all elements.
359 * Please note that it works on a concrete policy only.
361 * @return ``true`` if the policy guarantees freeing all elements.
363 public bool is_safe () requires (this.is_concrete ()) {
372 assert_not_reached ();
377 * Finds concrete policy which corresponds to given policy.
379 * @return Policy that corresponds to given policy at given time in given thread.
381 public Policy to_concrete () ensures (result.is_concrete ()) {
389 return (Policy) AtomicInt.get (ref _default_policy);
391 return (Policy) AtomicInt.get (ref _thread_exit_policy);
393 assert_not_reached ();
400 * @param to_free List containing elements to free.
401 * @return Non-empty list of not freed elements or ``null`` if all elements have been disposed.
403 internal ArrayList<FreeNode *>? perform (owned ArrayList<FreeNode *> to_free) {
404 switch (this.to_concrete ()) {
406 return try_free (to_free) ? (owned) to_free : null;
408 while (try_free (to_free)) {
413 ReleasePolicy.ensure_start ();
414 if (_queue_mutex.trylock ()) {
415 _queue.offer ((owned) to_free);
416 _queue_mutex.unlock ();
419 return (owned) to_free;
422 ReleasePolicy.ensure_start ();
423 _queue_mutex.lock ();
424 _queue.offer ((owned) to_free);
425 _queue_mutex.unlock ();
428 assert_not_reached ();
433 public delegate void DestroyNotify (void *ptr);
436 * Release policy determines what happens with object freed by Policy.TRY_RELEASE
437 * and Policy.RELEASE.
439 public enum ReleasePolicy {
441 * Libgee spawns helper thread to free those elements.
446 * Libgee uses GLib main loop.
447 * This is recommended for application using GLib main loop.
451 private static void start (ReleasePolicy self) { // FIXME: Make it non-static [bug 659778]
454 Thread.create<bool> (() => {
455 Thread.self<bool> ().set_priority (ThreadPriority.LOW);
469 assert_not_reached ();
474 * Ensures that helper methods are started.
476 internal static inline void ensure_start () {
477 int policy = AtomicInt.get (ref release_policy);
478 if ((policy & (1 << (sizeof(int) * 8 - 1))) != 0)
480 if (_queue_mutex.trylock ()) {
481 policy = AtomicInt.get (ref release_policy);
482 if ((policy & (1 << (sizeof(int) * 8 - 1))) == 0) {
483 _queue = new LinkedList<ArrayList<FreeNode *>> ();
484 // Hack to not lie about successfull setting policy
485 policy = AtomicInt.exchange_and_add (ref release_policy, (int)(1 << (sizeof(int) * 8 - 1)));
486 start ((ReleasePolicy) policy);
488 _queue_mutex.unlock ();
492 private static inline void attempt_free () {
493 if (_queue_mutex.trylock ()) {
494 Collection<ArrayList<FreeNode *>> temp = new ArrayList<ArrayList<FreeNode *>> ();
496 _queue_mutex.unlock ();
497 temp.foreach ((x) => {_global_to_free.add_all (x); return true;});
499 try_free (_global_to_free);
504 * Create a new context. User does not need to create explicitly however it might be benefitial
505 * if he is about to issue bunch of commands he might consider it benefitial to fine-tune the creation of contexts.
508 * Context ctx = new Context ();
509 * lock_free_collection.operation1 ();
510 * // Normally on exit the thread exit operation would be executed but here the default operation of
511 * // child context is executed.
512 * lock_free_collection.operation2 ();
515 * Please note that the Context in implicitly part of stack and:
517 * 1. It cannot be moved between threads.
518 * 2. If in given thread the child (created later) context is alive parent must be alive as well.
521 public class Context { // FIXME: Should be struct
522 public Context (Policy? policy = null) {
523 this._to_free = new ArrayList<FreeNode *> ();
524 this._parent = _current_context.get ();
525 _current_context.set (this, null);
526 if (policy == null) {
527 if (_parent == null) {
528 _policy = (Policy)AtomicInt.get (ref _thread_exit_policy);
530 _policy = (Policy)AtomicInt.get (ref _default_policy);
533 this._policy = policy.to_concrete ();
536 stderr.printf ("Entering context %p (policy %s, parent %p)\n", this, _policy != null ? _policy.to_string () : null, _parent);
542 stderr.printf ("Exiting context %p (policy %s, parent %p)\n", this, _policy != null ? _policy.to_string () : null, _parent);
544 int size = _to_free.size;
545 bool clean_parent = false;
547 ArrayList<FreeNode *>? remaining;
548 if (_parent == null || size >= THRESHOLD)
549 remaining = _policy.perform ((owned) _to_free);
551 remaining = (owned) _to_free;
552 if (remaining != null) {
553 assert (_parent != null);
554 _parent->_to_free.add_all (remaining);
559 stderr.printf ("Setting current context to %p\n", _parent);
561 _current_context.set (_parent, null);
563 HazardPointer.try_free (_parent->_to_free);
567 * Tries to free all freed pointer in current context.
569 public void try_free () {
570 HazardPointer.try_free (_to_free);
574 * Ensure that whole context is freed. Plase note that it might block.
576 public void free_all () {
577 while (HazardPointer.try_free (_to_free))
582 * Tries to push the current context to releaser.
584 public void try_release () {
585 if (_queue_mutex.trylock ()) {
586 _queue.offer ((owned) _to_free);
587 _to_free = new ArrayList<FreeNode *> ();
588 _queue_mutex.unlock ();
593 * Pushes the current context to releaser. Plase note that it might block.
595 public void release () {
596 _queue_mutex.lock ();
597 _queue.offer ((owned) _to_free);
598 _to_free = new ArrayList<FreeNode *> ();
599 _queue_mutex.unlock ();
603 * Add pointer to freed array.
605 internal inline void release_ptr (void *ptr, owned DestroyNotify notify) {
606 FreeNode *node = new FreeNode ();
608 node->destroy_notify = (owned)notify;
610 if (_to_free.size >= THRESHOLD)
611 HazardPointer.try_free (_to_free);
615 * Gets current context.
617 internal inline static Context *get_current_context () {
618 return _current_context.get ();
621 internal Context *_parent;
622 internal ArrayList<FreeNode *> _to_free;
623 internal Policy? _policy;
624 internal static StaticPrivate _current_context;
625 internal static StaticPrivate _root_context;
626 private static uint THRESHOLD = 10;
630 * Gets a new hazard pointer node.
632 * @return new hazard pointer node.
634 internal static inline unowned Node acquire () {
635 for (unowned Node? curr = get_head (); curr != null; curr = curr.get_next ())
636 if (curr.activate ())
638 Node *node = new Node ();
639 Node *old_head = null;
641 node->set_next (old_head = (Node *)AtomicPointer.get (&_head));
642 } while (!AtomicPointer.compare_and_exchange (&_head, old_head, node));
647 * Tries to free from list.
649 * @return ``true`` if list is empty.
651 internal static bool try_free (ArrayList<FreeNode *> to_free) {
652 Collection<void *> used = new HashSet<void *>();
653 for (unowned Node? current = get_head (); current != null; current = current.get_next ()) {
654 used.add (current.get ());
656 for (int i = 0; i < to_free.size;) {
657 FreeNode *current = to_free[i];
658 if (used.contains (current->pointer)) {
660 stderr.printf ("Skipping freeing %p\n", current->pointer);
665 stderr.printf ("Freeing %p\n", current->pointer);
667 FreeNode *cur = to_free.remove_at (to_free.size - 1);
668 if (i != to_free.size) {
669 FreeNode *temp = to_free[i];
673 cur->destroy_notify (cur->pointer);
677 return to_free.size > 0;
681 * Gets head of hazard pointers.
682 * @return Hazard pointer head.
684 internal static unowned Node? get_head () {
685 return (Node *)AtomicPointer.get(&_head);
688 internal unowned Node _node;
690 internal static Node *_head = null;
692 internal static int _default_policy = (int)Policy.TRY_FREE;
693 internal static int _thread_exit_policy = (int)Policy.RELEASE;
695 internal static int release_policy = 0;
697 internal static Queue<ArrayList<FreeNode *>> _queue;
698 internal static StaticMutex _queue_mutex;
700 internal static ArrayList<FreeNode *> _global_to_free;
702 internal static DestroyNotify get_destroy_notify<G> () {
711 internal class FreeNode {
712 public void *pointer;
713 public DestroyNotify destroy_notify;
717 * List of used pointers.
720 internal class Node {
722 AtomicPointer.set (&_hazard, null);
723 AtomicInt.set (ref _active, 1);
730 public void release () {
731 AtomicPointer.set (&_hazard, null);
732 AtomicInt.set (ref _active, 0);
735 public inline bool is_active () {
736 return AtomicInt.get (ref _active) != 0;
739 public inline bool activate () {
740 return AtomicInt.compare_and_exchange (ref _active, 0, 1);
743 public inline void set (void *ptr) {
744 AtomicPointer.set (&_hazard, ptr);
747 public inline void *get (bool safe = true) {
749 return (void *)AtomicPointer.get (&_hazard);
751 return (void *)_hazard;
755 public inline unowned Node? get_next () {
756 return (Node *)AtomicPointer.get (&_next);
759 public inline void set_next (Node *next) {
760 AtomicPointer.set (&_next, next);
765 public void *_hazard;