2 // Copyright Oliver Kowalke 2016.
3 // Distributed under the Boost Software License, Version 1.0.
4 // (See accompanying file LICENSE_1_0.txt or copy at
5 // http://www.boost.org/LICENSE_1_0.txt)
7 #ifndef BOOST_FIBERS_SPINLOCK_TTAS_H
8 #define BOOST_FIBERS_SPINLOCK_TTAS_H
15 #include <boost/fiber/detail/config.hpp>
16 #include <boost/fiber/detail/cpu_relax.hpp>
18 // based on informations from:
19 // https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops
20 // https://software.intel.com/en-us/articles/long-duration-spin-wait-loops-on-hyper-threading-technology-enabled-intel-processors
23 #pragma clang diagnostic push
24 #pragma clang diagnostic ignored "-Wunused-private-field"
33 enum class spinlock_status {
38 std::atomic< spinlock_status > state_{ spinlock_status::unlocked };
41 spinlock_ttas() noexcept = default;
43 spinlock_ttas( spinlock_ttas const&) = delete;
44 spinlock_ttas & operator=( spinlock_ttas const&) = delete;
46 void lock() noexcept {
47 std::size_t collisions = 0 ;
49 // avoid using multiple pause instructions for a delay of a specific cycle count
50 // the delay of cpu_relax() (pause on Intel) depends on the processor family
51 // the cycle count can not guaranteed from one system to the next
52 // -> check the shared variable 'state_' in between each cpu_relax() to prevent
53 // unnecessarily long delays on some systems
54 std::size_t tests = 0;
55 // test shared variable 'status_'
56 // first access to 'state_' -> chache miss
57 // sucessive acccess to 'state_' -> cache hit
58 // if 'state_' was released by other fiber
59 // cached 'state_' is invalidated -> cache miss
60 while ( spinlock_status::locked == state_.load( std::memory_order_relaxed) ) {
61 #if !defined(BOOST_FIBERS_SPIN_SINGLE_CORE)
62 if ( BOOST_FIBERS_SPIN_MAX_TESTS > tests) {
64 // give CPU a hint that this thread is in a "spin-wait" loop
65 // delays the next instruction's execution for a finite period of time (depends on processor family)
66 // the CPU is not under demand, parts of the pipeline are no longer being used
67 // -> reduces the power consumed by the CPU
68 // -> prevent pipeline stalls
71 // std::this_thread::sleep_for( 0us) has a fairly long instruction path length,
72 // combined with an expensive ring3 to ring 0 transition costing about 1000 cycles
73 // std::this_thread::sleep_for( 0us) lets give up this_thread the remaining part of its time slice
74 // if and only if a thread of equal or greater priority is ready to run
75 static constexpr std::chrono::microseconds us0{ 0 };
76 std::this_thread::sleep_for( us0);
79 std::this_thread::yield();
82 // test-and-set shared variable 'status_'
83 // everytime 'status_' is signaled over the bus, even if the test failes
84 if ( spinlock_status::locked == state_.exchange( spinlock_status::locked, std::memory_order_acquire) ) {
85 // spinlock now contended
86 // utilize 'Binary Exponential Backoff' algorithm
87 // linear_congruential_engine is a random number engine based on Linear congruential generator (LCG)
88 static thread_local std::minstd_rand generator;
89 static std::uniform_int_distribution< std::size_t > distribution{ 0, static_cast< std::size_t >( 1) << collisions };
90 const std::size_t z = distribution( generator);
92 for ( std::size_t i = 0; i < z; ++i) {
93 // -> reduces the power consumed by the CPU
94 // -> prevent pipeline stalls
98 // success, thread has acquired the lock
104 void unlock() noexcept {
105 state_.store( spinlock_status::unlocked, std::memory_order_release);
112 #pragma clang diagnostic pop
115 #endif // BOOST_FIBERS_SPINLOCK_TTAS_H