3 * Copyright (C) 2011 Maciej Piechotka
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2.1 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 * Maciej Piechotka <uzytkownik2@gmail.com>
24 * Hazard pointer is a method of protecting a pointer shared by many threads.
25 * If you want to use atomic pointer that may be freed you should use following code:
28 * string *shared_pointer = ...;
29 * HazardPointer<string> hptr = HazardPointer.get_hazard_pointer (&shared_pointer);
30 * // my_string contains value from shared_pinter. It is valid as long as hptr is alive.
31 * unowned string my_string = ptr.get ();
32 * // instead of delete
33 * ptr.release ((ptr) => {string *sptr = ptr;string ref = (owned)sptr;});
37 * In some cases you may use helper methods which might involve copying of object (and are unsafe for unowned objects):
39 * Gtk.Window *window = ...;
40 * Gtk.Window? local_window = HazardPointer.get_pointer (&window);
41 * HazardPointer.set_pointer (&window, ...)
42 * local_window = HazardPointer.exchange_pointer (&window, null);
43 * HazardPointer.compare_and_exchange (&window, null, local_window);
46 * The class also provides helper methods if least significant bits are used for storing flags.
48 * HazardPointers are not thread-safe (unless documentation states otherwise).
51 public class Gee.HazardPointer<G> { // FIXME: Make it a struct
53 * Creates a hazard pointer for a pointer.
55 * @param ptr Protected pointer
57 public HazardPointer (G *ptr) {
58 this._node = acquire ();
59 this._node.set ((void *)ptr);
63 * Create a hazard pointer from Node.
65 internal HazardPointer.from_node (Node node) {
70 * Gets hazard pointer from atomic pointer safely.
72 * @param aptr Atomic pointer.
73 * @param mask Mask of bits.
74 * @param mask_out Result of mask.
75 * @return Hazard pointer containing the element.
77 public static HazardPointer<G>? get_hazard_pointer<G> (G **aptr, size_t mask = 0, out size_t mask_out = null) {
78 unowned Node node = acquire ();
83 rptr = AtomicPointer.get ((void **)aptr);
84 ptr = (void *)((size_t) rptr & ~mask);
85 mask_out = (size_t) rptr & mask;
87 } while (rptr != AtomicPointer.get ((void **)aptr));
89 return new HazardPointer<G>.from_node (node);
97 * Copy an object from atomic pointer.
99 * @param aptr Atomic pointer.
100 * @param mask Mask of flags.
101 * @param mask_out Result of mask.
102 * @return A copy of object from atomic pointer.
104 public static G? get_pointer<G> (G **aptr, size_t mask = 0, out size_t mask_out = null) {
105 unowned Node node = acquire ();
110 rptr = AtomicPointer.get ((void **)aptr);
111 ptr = (void *)((size_t) rptr & ~mask);
112 mask_out = (size_t) rptr & mask;
114 } while (rptr != AtomicPointer.get ((void **)aptr));
121 * Exchange objects safly.
123 * @param aptr Atomic pointer.
124 * @param new_ptr New value
125 * @param mask Mask of flags.
126 * @param new_mask New mask.
127 * @param old_mask Previous mask mask.
128 * @return Hazard pointer containing old value.
130 public static HazardPointer<G>? exchange_hazard_pointer<G> (G **aptr, owned G? new_ptr, size_t mask = 0, size_t new_mask = 0, out size_t old_mask = null) {
131 unowned Node? new_node = null;
132 if (new_ptr != null) {
133 new_node = acquire ();
134 new_node.set (new_ptr);
137 void *new_rptr = (void *)((size_t)((owned) new_ptr) | (mask & new_mask));
138 unowned Node node = acquire ();
142 rptr = AtomicPointer.get ((void **)aptr);
143 ptr = (void *)((size_t) rptr & ~mask);
144 old_mask = (size_t) rptr & mask;
146 } while (!AtomicPointer.compare_and_exchange((void **)aptr, rptr, new_rptr));
147 if (new_node != null)
150 return new HazardPointer<G>.from_node (node);
160 * @param aptr Atomic pointer.
161 * @param new_ptr New value
162 * @param mask Mask of flags.
163 * @param new_mask New mask.
165 public static void set_pointer<G> (G **aptr, owned G? new_ptr, size_t mask = 0, size_t new_mask = 0) {
166 HazardPointer<G>? ptr = exchange_hazard_pointer<G> (aptr, new_ptr, mask, new_mask, null);
168 DestroyNotify<G> notify = get_destroy_notify<G> ();
169 ptr.release ((owned)notify);
174 * Exchange objects safly.
176 * @param aptr Atomic pointer.
177 * @param new_ptr New value
178 * @param mask Mask of flags.
179 * @param new_mask New mask.
180 * @param old_mask Previous mask mask.
181 * @return Value that was previously stored.
183 public static G? exchange_pointer<G> (G **aptr, owned G? new_ptr, size_t mask = 0, size_t new_mask = 0, out size_t old_mask = null) {
184 HazardPointer<G>? ptr = exchange_hazard_pointer<G> (aptr, new_ptr, mask, new_mask, out old_mask);
185 G? rptr = ptr != null ? ptr.get () : null;
190 * Compares and exchanges objects.
192 * @param aptr Atomic pointer.
193 * @param old_ptr Old pointer.
194 * @param _new_ptr New value.
195 * @param old_mask Old mask.
196 * @param new_mask New mask.
197 * @return Value that was previously stored.
199 public static bool compare_and_exchange_pointer<G> (G **aptr, G? old_ptr, owned G? _new_ptr, size_t mask = 0, size_t old_mask = 0, size_t new_mask = 0) {
200 G *new_ptr = (owned)_new_ptr;
201 void *new_rptr = (void *)((size_t)(new_ptr) | (mask & new_mask));
202 void *old_rptr = (void *)((size_t)(old_ptr) | (mask & old_mask));
203 bool success = AtomicPointer.compare_and_exchange((void **)aptr, old_rptr, new_rptr);
205 DestroyNotify<G> notify = get_destroy_notify<G> ();
206 Context.get_current_context ()->release_ptr (old_ptr, (owned)notify);
207 } else if (new_ptr != null) {
218 * Gets the pointer hold by hazard pointer.
220 * @param other_thread Have to be set to ``true`` if accessed from thread that did not create this thread.
221 * @return The value hold by pointer.
223 public inline new unowned G get (bool other_thread = false) {
224 return _node[other_thread];
230 * @param notify method freeing object
232 public void release (owned DestroyNotify notify) {
233 unowned G item = _node[false];
235 Context.get_current_context ()->release_ptr (item, (owned)notify);
239 * Sets default policy (i.e. default policy for user-created contexts).
240 * The policy must be concrete and should not be blocking.
242 * @param policy New default policy.
244 public static void set_default_policy (Policy policy) requires (policy.is_concrete ()) {
245 if (policy.is_blocking ())
246 warning ("Setting blocking defautl Gee.HazardPointer.Policy (there may be a deadlock).\n");
247 AtomicInt.set(ref _default_policy, (int)policy);
251 * Sets thread exit policy (i.e. default policy for the top-most Context).
252 * The policy must be concrete and should not be unsafe.
254 * @param policy New thread policy.
256 public static void set_thread_exit_policy (Policy policy) requires (policy.is_concrete ()) {
257 if (!policy.is_safe ())
258 warning ("Setting unsafe globale thread-exit Gee.HazardPointer.Policy (there may be a memory leak).\n");
259 AtomicInt.set(ref _thread_exit_policy, (int)policy);
263 * Sets release (i.e. how exactly the released objects arefreed).
265 * The method can be only set before any objects is released and is not thread-safe.
267 * @param policy New release policy.
269 public static bool set_release_policy (ReleasePolicy policy) {
270 int old_policy = AtomicInt.get (ref release_policy);
271 if ((old_policy & (sizeof(int) * 8 - 1)) != 0) {
272 critical ("Attempt to change the policy of running helper. Failing.");
275 if (!AtomicInt.compare_and_exchange (ref release_policy, old_policy, (int)policy)) {
276 critical ("Concurrent access to release policy detected. Failing.");
283 * Policy determines what happens on exit from Context.
287 * Performs default action on exit from thread.
291 * Performs the same action as on exit from current thread.
295 * Goes through the free list and attempts to free un-freed elements.
299 * Goes through the free list and attempts to free un-freed elements
300 * untill all elements are freed.
304 * Release the un-freed elements to either helper thread or to main loop.
305 * Please note if the operation would block it is not performed.
309 * Release the un-freed elements to either helper thread or to main loop.
310 * Please note it may block while adding to queue.
315 * Checks if the policy is concrete or if it depends on global variables.
317 * @return ``true`` if this policy does not depend on global variables
319 public bool is_concrete () {
330 assert_not_reached ();
335 * Checks if policy blocks or is lock-free.
336 * Please note that it works on a concrete policy only.
338 * @return ``true`` if the policy may block the thread.
340 public bool is_blocking () requires (this.is_concrete ()) {
349 assert_not_reached ();
354 * Checks if policy guarantees freeing all elements.
355 * Please note that it works on a concrete policy only.
357 * @return ``true`` if the policy guarantees freeing all elements.
359 public bool is_safe () requires (this.is_concrete ()) {
368 assert_not_reached ();
373 * Finds concrete policy which corresponds to given policy.
375 * @return Policy that corresponds to given policy at given time in given thread.
377 public Policy to_concrete () ensures (result.is_concrete ()) {
385 return (Policy) AtomicInt.get (ref _default_policy);
387 return (Policy) AtomicInt.get (ref _thread_exit_policy);
389 assert_not_reached ();
396 * @param to_free List containing elements to free.
397 * @return Non-empty list of not freed elements or ``null`` if all elements have been disposed.
399 internal ArrayList<FreeNode *>? perform (owned ArrayList<FreeNode *> to_free) {
400 switch (this.to_concrete ()) {
402 return try_free (to_free) ? (owned) to_free : null;
404 while (try_free (to_free)) {
409 ReleasePolicy.ensure_start ();
410 if (_queue_mutex.trylock ()) {
411 _queue.offer ((owned) to_free);
412 _queue_mutex.unlock ();
415 return (owned) to_free;
418 ReleasePolicy.ensure_start ();
419 _queue_mutex.lock ();
420 _queue.offer ((owned) to_free);
421 _queue_mutex.unlock ();
424 assert_not_reached ();
429 public delegate void DestroyNotify (void *ptr);
432 * Release policy determines what happens with object freed by Policy.TRY_RELEASE
433 * and Policy.RELEASE.
435 public enum ReleasePolicy {
437 * Libgee spawns helper thread to free those elements.
442 * Libgee uses GLib main loop.
443 * This is recommended for application using GLib main loop.
447 private static void start (ReleasePolicy self) { // FIXME: Make it non-static [bug 659778]
451 Thread.create<bool> (() => {
452 Thread.self<bool> ().set_priority (ThreadPriority.LOW);
458 } catch (ThreadError error) {
459 assert_not_reached ();
469 assert_not_reached ();
474 * Ensures that helper methods are started.
476 internal static inline void ensure_start () {
477 int policy = AtomicInt.get (ref release_policy);
478 if ((policy & (1 << (sizeof(int) * 8 - 1))) != 0)
480 if (_queue_mutex.trylock ()) {
481 policy = AtomicInt.get (ref release_policy);
482 if ((policy & (1 << (sizeof(int) * 8 - 1))) == 0) {
483 _queue = new LinkedList<ArrayList<FreeNode *>> ();
484 // Hack to not lie about successfull setting policy
485 policy = AtomicInt.exchange_and_add (ref release_policy, (int)(1 << (sizeof(int) * 8 - 1)));
487 _queue_mutex.unlock ();
488 if ((policy & (1 << (sizeof(int) * 8 - 1))) == 0) {
493 private static inline void attempt_free () {
494 if (_queue_mutex.trylock ()) {
495 Collection<ArrayList<FreeNode *>> temp = new ArrayList<ArrayList<FreeNode *>> ();
497 _queue_mutex.unlock ();
498 temp.foreach ((x) => {_global_to_free.add_all (x);});
500 try_free (_global_to_free);
505 * Create a new context. User does not need to create explicitly however it might be benefitial
506 * if he is about to issue bunch of commands he might consider it benefitial to fine-tune the creation of contexts.
509 * Context ctx = new Context ();
510 * lock_free_collection.operation1 ();
511 * // Normally on exit the thread exit operation would be executed but here the default operation of
512 * // child context is executed.
513 * lock_free_collection.operation2 ();
516 * Please note that the Context in implicitly part of stack and:
518 * 1. It cannot be moved between threads.
519 * 2. If in given thread the child (created later) context is alive parent must be alive as well.
522 public class Context { // FIXME: Should be struct
523 public Context (Policy? policy = null) {
524 this._to_free = new ArrayList<FreeNode *> ();
525 this._parent = _current_context.get ();
526 _current_context.set (this, null);
527 if (policy == null) {
528 if (_parent == null) {
529 _policy = (Policy)AtomicInt.get (ref _thread_exit_policy);
531 _policy = (Policy)AtomicInt.get (ref _default_policy);
534 this._policy = policy.to_concrete ();
537 stderr.printf ("Entering context %p (policy %s, parent %p)\n", this, _policy != null ? _policy.to_string () : null, _parent);
543 stderr.printf ("Exiting context %p (policy %s, parent %p)\n", this, _policy != null ? _policy.to_string () : null, _parent);
545 int size = _to_free.size;
546 bool clean_parent = false;
548 ArrayList<FreeNode *>? remaining;
549 if (_parent == null || size >= THRESHOLD)
550 remaining = _policy.perform ((owned) _to_free);
552 remaining = (owned) _to_free;
553 if (remaining != null) {
554 assert (_parent != null);
555 _parent->_to_free.add_all (remaining);
560 stderr.printf ("Setting current context to %p\n", _parent);
562 _current_context.set (_parent, null);
564 HazardPointer.try_free (_parent->_to_free);
568 * Tries to free all freed pointer in current context.
570 public void try_free () {
571 HazardPointer.try_free (_to_free);
575 * Ensure that whole context is freed. Plase note that it might block.
577 public void free_all () {
578 while (HazardPointer.try_free (_to_free))
583 * Tries to push the current context to releaser.
585 public void try_release () {
586 if (_queue_mutex.trylock ()) {
587 _queue.offer ((owned) _to_free);
588 _to_free = new ArrayList<FreeNode *> ();
589 _queue_mutex.unlock ();
594 * Pushes the current context to releaser. Plase note that it might block.
596 public void release () {
597 _queue_mutex.lock ();
598 _queue.offer ((owned) _to_free);
599 _to_free = new ArrayList<FreeNode *> ();
600 _queue_mutex.unlock ();
604 * Add pointer to freed array.
606 internal inline void release_ptr (void *ptr, owned DestroyNotify notify) {
607 FreeNode *node = new FreeNode ();
609 node->destroy_notify = (owned)notify;
611 if (_to_free.size >= THRESHOLD)
612 HazardPointer.try_free (_to_free);
616 * Gets current context.
618 internal inline static Context *get_current_context () {
619 return _current_context.get ();
622 private inline bool _should_free () {
623 return (_parent == null && _to_free.size > 0) || _to_free.size >= THRESHOLD;
626 internal Context *_parent;
627 internal ArrayList<FreeNode *> _to_free;
628 internal Policy? _policy;
629 internal static StaticPrivate _current_context;
630 internal static StaticPrivate _root_context;
631 private static uint THRESHOLD = 10;
635 * Gets a new hazard pointer node.
637 * @return new hazard pointer node.
639 internal static inline unowned Node acquire () {
640 for (unowned Node? curr = get_head (); curr != null; curr = curr.get_next ())
641 if (curr.activate ())
643 Node *node = new Node ();
644 Node *old_head = null;
646 node->set_next (old_head = (Node *)AtomicPointer.get (&_head));
647 } while (!AtomicPointer.compare_and_exchange (&_head, old_head, node));
652 * Tries to free from list.
654 * @return ``true`` if list is empty.
656 internal static bool try_free (ArrayList<FreeNode *> to_free) {
657 Collection<void *> used = new HashSet<void *>();
658 for (unowned Node? current = get_head (); current != null; current = current.get_next ()) {
659 used.add (current.get ());
661 for (int i = 0; i < to_free.size;) {
662 FreeNode *current = to_free[i];
663 if (used.contains (current->pointer)) {
665 stderr.printf ("Skipping freeing %p\n", current->pointer);
670 stderr.printf ("Freeing %p\n", current->pointer);
672 FreeNode *cur = to_free.remove_at (to_free.size - 1);
673 if (i != to_free.size) {
674 FreeNode *temp = to_free[i];
678 cur->destroy_notify (cur->pointer);
682 return to_free.size > 0;
686 * Gets head of hazard pointers.
687 * @return Hazard pointer head.
689 internal static unowned Node? get_head () {
690 return (Node *)AtomicPointer.get(&_head);
693 internal unowned Node _node;
695 internal static Node *_head = null;
697 internal static int _default_policy = (int)Policy.TRY_FREE;
698 internal static int _thread_exit_policy = (int)Policy.RELEASE;
700 internal static int release_policy = 0;
702 internal static Queue<ArrayList<FreeNode *>> _queue;
703 internal static StaticMutex _queue_mutex;
705 internal static ArrayList<FreeNode *> _global_to_free;
707 internal static DestroyNotify get_destroy_notify<G> () {
716 internal class FreeNode {
717 public void *pointer;
718 public DestroyNotify destroy_notify;
722 * List of used pointers.
725 internal class Node {
727 AtomicPointer.set (&_hazard, null);
728 AtomicInt.set (ref _active, 1);
735 public void release () {
736 AtomicPointer.set (&_hazard, null);
737 AtomicInt.set (ref _active, 0);
740 public inline bool is_active () {
741 return AtomicInt.get (ref _active) != 0;
744 public inline bool activate () {
745 return AtomicInt.compare_and_exchange (ref _active, 0, 1);
748 public inline void set (void *ptr) {
749 AtomicPointer.set (&_hazard, ptr);
752 public inline void *get (bool safe = true) {
754 return (void *)AtomicPointer.get (&_hazard);
756 return (void *)_hazard;
760 public inline unowned Node? get_next () {
761 return (Node *)AtomicPointer.get (&_next);
764 public inline void set_next (Node *next) {
765 AtomicPointer.set (&_next, next);
770 public void *_hazard;