Imported Upstream version 1.64.0
[platform/upstream/boost.git] / boost / fiber / detail / spinlock_ttas_adaptive.hpp
1
2 //          Copyright Oliver Kowalke 2016.
3 // Distributed under the Boost Software License, Version 1.0.
4 //    (See accompanying file LICENSE_1_0.txt or copy at
5 //          http://www.boost.org/LICENSE_1_0.txt)
6
7 #ifndef BOOST_FIBERS_SPINLOCK_TTAS_ADAPTIVE_H
8 #define BOOST_FIBERS_SPINLOCK_TTAS_ADAPTIVE_H
9
10 #include <atomic>
11 #include <chrono>
12 #include <cmath>
13 #include <random>
14 #include <thread>
15
16 #include <boost/fiber/detail/config.hpp>
17 #include <boost/fiber/detail/cpu_relax.hpp>
18
19 // based on informations from:
20 // https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops
21 // https://software.intel.com/en-us/articles/long-duration-spin-wait-loops-on-hyper-threading-technology-enabled-intel-processors
22
23 namespace boost {
24 namespace fibers {
25 namespace detail {
26
27 class spinlock_ttas_adaptive {
28 private:
29     enum class spinlock_status {
30         locked = 0,
31         unlocked
32     };
33
34     std::atomic< spinlock_status >  state_{ spinlock_status::unlocked };
35     std::atomic< std::size_t >      tests_{ 0 };
36
37 public:
38     spinlock_ttas_adaptive() noexcept = default;
39
40     spinlock_ttas_adaptive( spinlock_ttas_adaptive const&) = delete;
41     spinlock_ttas_adaptive & operator=( spinlock_ttas_adaptive const&) = delete;
42
43     void lock() noexcept {
44         std::size_t collisions = 0 ;
45         for (;;) {
46             std::size_t tests = 0;
47             const std::size_t prev_tests = tests_.load( std::memory_order_relaxed);
48             const std::size_t max_tests = (std::min)( static_cast< std::size_t >( BOOST_FIBERS_SPIN_MAX_TESTS), 2 * prev_tests + 10);
49             // avoid using multiple pause instructions for a delay of a specific cycle count
50             // the delay of cpu_relax() (pause on Intel) depends on the processor family
51             // the cycle count can not guaranteed from one system to the next
52             // -> check the shared variable 'state_' in between each cpu_relax() to prevent
53             //    unnecessarily long delays on some systems
54             // test shared variable 'status_'
55             // first access to 'state_' -> chache miss
56             // sucessive acccess to 'state_' -> cache hit
57             // if 'state_' was released by other fiber
58             // cached 'state_' is invalidated -> cache miss
59             while ( spinlock_status::locked == state_.load( std::memory_order_relaxed) ) {
60 #if !defined(BOOST_FIBERS_SPIN_SINGLE_CORE)
61                 if ( max_tests > tests) {
62                     ++tests;
63                     // give CPU a hint that this thread is in a "spin-wait" loop
64                     // delays the next instruction's execution for a finite period of time (depends on processor family)
65                     // the CPU is not under demand, parts of the pipeline are no longer being used
66                     // -> reduces the power consumed by the CPU
67                     // -> prevent pipeline stalls
68                     cpu_relax();
69                 } else {
70                     ++tests;
71                     // std::this_thread::sleep_for( 0us) has a fairly long instruction path length,
72                     // combined with an expensive ring3 to ring 0 transition costing about 1000 cycles
73                     // std::this_thread::sleep_for( 0us) lets give up this_thread the remaining part of its time slice
74                     // if and only if a thread of equal or greater priority is ready to run
75                     static constexpr std::chrono::microseconds us0{ 0 };
76                     std::this_thread::sleep_for( us0);
77                 }
78 #else
79                 std::this_thread::yield();
80 #endif
81             }
82             // test-and-set shared variable 'status_'
83             // everytime 'status_' is signaled over the bus, even if the test failes
84             if ( spinlock_status::locked == state_.exchange( spinlock_status::locked, std::memory_order_acquire) ) {
85                 // spinlock now contended
86                 // utilize 'Binary Exponential Backoff' algorithm
87                 // linear_congruential_engine is a random number engine based on Linear congruential generator (LCG)
88                 static thread_local std::minstd_rand generator;
89                 static std::uniform_int_distribution< std::size_t > distribution{ 0, static_cast< std::size_t >( 1) << collisions };
90                 const std::size_t z = distribution( generator);
91                 ++collisions;
92                 for ( std::size_t i = 0; i < z; ++i) {
93                     // -> reduces the power consumed by the CPU
94                     // -> prevent pipeline stalls
95                     cpu_relax();
96                 }
97             } else {
98                 tests_.store( prev_tests + (tests - prev_tests) / 8, std::memory_order_relaxed);
99                 // success, thread has acquired the lock
100                 break;
101             }
102         }
103     }
104
105     void unlock() noexcept {
106         state_.store( spinlock_status::unlocked, std::memory_order_release);
107     }
108 };
109
110 }}}
111
112 #endif // BOOST_FIBERS_SPINLOCK_TTAS_ADAPTIVE_H