const std::size_t size = sctx.size - ( static_cast< char * >( sctx.sp) - static_cast< char * >( sp) );
#endif
// placement new of context on top of fiber's stack
- return intrusive_ptr< context >(
- ::new ( sp) context(
+ return intrusive_ptr< context >{
+ ::new ( sp) context{
dispatcher_context,
- boost::context::preallocated( sp, size, sctx),
+ boost::context::preallocated{ sp, size, sctx },
salloc,
- sched) );
+ sched } };
}
// schwarz counter
constexpr std::size_t size = sizeof( context) + sizeof( scheduler);
void * vp = std::malloc( size);
if ( nullptr == vp) {
- throw std::bad_alloc();
+ throw std::bad_alloc{};
}
// main fiber context of this thread
- context * main_ctx = ::new ( vp) context( main_context);
+ context * main_ctx = ::new ( vp) context{ main_context };
// scheduler of this thread
- scheduler * sched = ::new ( static_cast< char * >( vp) + sizeof( context) ) scheduler();
+ scheduler * sched = ::new ( static_cast< char * >( vp) + sizeof( context) ) scheduler{};
// attach main context to scheduler
sched->attach_main_context( main_ctx);
// create and attach dispatcher context to scheduler
- sched->attach_dispatcher_context(
- make_dispatcher_context( sched) );
+ sched->attach_dispatcher_context( make_dispatcher_context( sched) );
// make main context to active context
active_ = main_ctx;
# else
constexpr std::size_t size = 2 * alignment + ctx_size + sched_size;
void * vp = std::malloc( size);
if ( nullptr == vp) {
- throw std::bad_alloc();
+ throw std::bad_alloc{};
}
// reserve space for shift
void * vp1 = static_cast< char * >( vp) + sizeof( int);
vp1 = std::align( alignment, ctx_size, vp1, space);
// reserves space for integer holding shifted size
int * shift = reinterpret_cast< int * >( static_cast< char * >( vp1) - sizeof( int) );
- // store shifted size in fornt of context
- * shift = static_cast< char * >( vp1) - static_cast< char * >( vp);
+ // store shifted size in front of context
+ * shift = static_cast< int >( static_cast< char * >( vp1) - static_cast< char * >( vp) );
// main fiber context of this thread
- context * main_ctx = ::new ( vp1) context( main_context);
+ context * main_ctx = ::new ( vp1) context{ main_context };
vp1 = static_cast< char * >( vp1) + ctx_size;
// align scheduler pointer
space = sched_size + alignment;
vp1 = std::align( alignment, sched_size, vp1, space);
// scheduler of this thread
- scheduler * sched = ::new ( vp1) scheduler();
+ scheduler * sched = ::new ( vp1) scheduler{};
// attach main context to scheduler
sched->attach_main_context( main_ctx);
// create and attach dispatcher context to scheduler
- sched->attach_dispatcher_context(
- make_dispatcher_context( sched) );
+ sched->attach_dispatcher_context( make_dispatcher_context( sched) );
// make main context to active context
active_ = main_ctx;
# endif
if ( nullptr != dp->lk) {
dp->lk->unlock();
} else if ( nullptr != dp->ctx) {
- context_initializer::active_->set_ready_( dp->ctx);
+ active()->schedule_( dp->ctx);
}
}
#else
void
context::resume_( detail::data_t & d) noexcept {
- auto result = ctx_( & d);
- detail::data_t * dp( std::get< 1 >( result) );
- if ( nullptr != dp) {
- dp->from->ctx_ = std::move( std::get< 0 >( result) );
- if ( nullptr != dp->lk) {
- dp->lk->unlock();
- } else if ( nullptr != dp->ctx) {
- context_initializer::active_->set_ready_( dp->ctx);
+ boost::context::continuation c = c_.resume( & d);
+ if ( c) {
+ detail::data_t * dp = c.get_data< detail::data_t * >();
+ if ( nullptr != dp) {
+ dp->from->c_ = std::move( c);
+ if ( nullptr != dp->lk) {
+ dp->lk->unlock();
+ } else if ( nullptr != dp->ctx) {
+ active()->schedule_( dp->ctx);
+ }
}
}
}
#endif
void
-context::set_ready_( context * ctx) noexcept {
- get_scheduler()->set_ready( ctx);
+context::schedule_( context * ctx) noexcept {
+ get_scheduler()->schedule( ctx);
}
// main fiber context
context::context( main_context_t) noexcept :
use_count_{ 1 }, // allocated on main- or thread-stack
- flags_{ 0 },
- type_{ type::main_context },
#if (BOOST_EXECUTION_CONTEXT==1)
- ctx_{ boost::context::execution_context::current() } {
+ ctx_{ boost::context::execution_context::current() },
+ type_{ type::main_context },
+ policy_{ launch::post } {
#else
- ctx_{} {
+ c_{},
+ type_{ type::main_context },
+ policy_{ launch::post } {
#endif
}
// dispatcher fiber context
context::context( dispatcher_context_t, boost::context::preallocated const& palloc,
default_stack const& salloc, scheduler * sched) :
- flags_{ 0 },
- type_{ type::dispatcher_context },
#if (BOOST_EXECUTION_CONTEXT==1)
ctx_{ std::allocator_arg, palloc, salloc,
[this,sched](void * vp) noexcept {
if ( nullptr != dp->lk) {
dp->lk->unlock();
} else if ( nullptr != dp->ctx) {
- context_initializer::active_->set_ready_( dp->ctx);
+ active()->schedule_( dp->ctx);
}
// execute scheduler::dispatch()
sched->dispatch();
// dispatcher context should never return from scheduler::dispatch()
BOOST_ASSERT_MSG( false, "disatcher fiber already terminated");
- }}
+ }},
+ type_{ type::dispatcher_context },
+ policy_{ launch::post }
+{}
#else
- ctx_{ std::allocator_arg, palloc, salloc,
- [this,sched](boost::context::execution_context< detail::data_t * > ctx, detail::data_t * dp) noexcept {
- // update execution_context of calling fiber
- dp->from->ctx_ = std::move( ctx);
- if ( nullptr != dp->lk) {
- dp->lk->unlock();
- } else if ( nullptr != dp->ctx) {
- context_initializer::active_->set_ready_( dp->ctx);
- }
- // execute scheduler::dispatch()
- return sched->dispatch();
- }}
+ c_{},
+ type_{ type::dispatcher_context },
+ policy_{ launch::post }
+{
+ c_ = boost::context::callcc(
+ std::allocator_arg, palloc, salloc,
+ [this,sched](boost::context::continuation && c) noexcept {
+ c = c.resume();
+ detail::data_t * dp = c.get_data< detail::data_t * >();
+ // update continuation of calling fiber
+ dp->from->c_ = std::move( c);
+ if ( nullptr != dp->lk) {
+ dp->lk->unlock();
+ } else if ( nullptr != dp->ctx) {
+ active()->schedule_( dp->ctx);
+ }
+ // execute scheduler::dispatch()
+ return sched->dispatch();
+ });
+
+}
#endif
-{}
context::~context() {
- BOOST_ASSERT( wait_queue_.empty() );
+ // protect for concurrent access
+ std::unique_lock< detail::spinlock > lk{ splk_ };
BOOST_ASSERT( ! ready_is_linked() );
+ BOOST_ASSERT( ! remote_ready_is_linked() );
BOOST_ASSERT( ! sleep_is_linked() );
BOOST_ASSERT( ! wait_is_linked() );
+ if ( is_context( type::dispatcher_context) ) {
+ // dispatcher-context is resumed by main-context
+ // while the scheduler is deconstructed
+#ifdef BOOST_DISABLE_ASSERTS
+ wait_queue_.pop_front();
+#else
+ context * ctx = & wait_queue_.front();
+ wait_queue_.pop_front();
+ BOOST_ASSERT( ctx->is_context( type::main_context) );
+ BOOST_ASSERT( nullptr == active() );
+#endif
+ }
+ BOOST_ASSERT( wait_queue_.empty() );
delete properties_;
}
context::id
context::get_id() const noexcept {
- return id( const_cast< context * >( this) );
+ return id{ const_cast< context * >( this) };
}
void
// get active context
context * active_ctx = context::active();
// protect for concurrent access
- std::unique_lock< detail::spinlock > lk( splk_);
+ std::unique_lock< detail::spinlock > lk{ splk_ };
// wait for context which is not terminated
- if ( 0 == ( flags_ & flag_terminated) ) {
+ if ( ! terminated_) {
// push active context to wait-queue, member
// of the context which has to be joined by
// the active context
active_ctx->wait_link( wait_queue_);
- lk.unlock();
// suspend active context
- get_scheduler()->suspend();
- // remove from wait-queue
- active_ctx->wait_unlink();
+ active_ctx->get_scheduler()->suspend( lk);
// active context resumed
BOOST_ASSERT( context::active() == active_ctx);
}
get_scheduler()->yield( context::active() );
}
-#if (BOOST_EXECUTION_CONTEXT>1)
-boost::context::execution_context< detail::data_t * >
+#if (BOOST_EXECUTION_CONTEXT==1)
+void
+context::terminate() noexcept {
+ // protect for concurrent access
+ std::unique_lock< detail::spinlock > lk{ splk_ };
+ // mark as terminated
+ terminated_ = true;
+ // notify all waiting fibers
+ while ( ! wait_queue_.empty() ) {
+ context * ctx = & wait_queue_.front();
+ // remove fiber from wait-queue
+ wait_queue_.pop_front();
+ // notify scheduler
+ schedule( ctx);
+ }
+ BOOST_ASSERT( wait_queue_.empty() );
+ // release fiber-specific-data
+ for ( fss_data_t::value_type & data : fss_data_) {
+ data.second.do_cleanup();
+ }
+ fss_data_.clear();
+ // switch to another context
+ get_scheduler()->terminate( lk, this);
+}
+#else
+boost::context::continuation
context::suspend_with_cc() noexcept {
context * prev = this;
// context_initializer::active_ will point to `this`
std::swap( context_initializer::active_, prev);
detail::data_t d{ prev };
// context switch
- return std::move( std::get< 0 >( ctx_( & d) ) );
+ return c_.resume( & d);
}
-#endif
-#if (BOOST_EXECUTION_CONTEXT==1)
-void
-#else
-boost::context::execution_context< detail::data_t * >
-#endif
-context::set_terminated() noexcept {
+boost::context::continuation
+context::terminate() noexcept {
// protect for concurrent access
- std::unique_lock< detail::spinlock > lk( splk_);
+ std::unique_lock< detail::spinlock > lk{ splk_ };
// mark as terminated
- flags_ |= flag_terminated;
+ terminated_ = true;
// notify all waiting fibers
while ( ! wait_queue_.empty() ) {
context * ctx = & wait_queue_.front();
// remove fiber from wait-queue
wait_queue_.pop_front();
// notify scheduler
- set_ready( ctx);
+ schedule( ctx);
}
- lk.unlock();
+ BOOST_ASSERT( wait_queue_.empty() );
// release fiber-specific-data
for ( fss_data_t::value_type & data : fss_data_) {
data.second.do_cleanup();
}
fss_data_.clear();
// switch to another context
-#if (BOOST_EXECUTION_CONTEXT==1)
- get_scheduler()->set_terminated( this);
-#else
- return get_scheduler()->set_terminated( this);
+ return get_scheduler()->terminate( lk, this);
#endif
}
bool
context::wait_until( std::chrono::steady_clock::time_point const& tp) noexcept {
BOOST_ASSERT( nullptr != get_scheduler() );
- BOOST_ASSERT( this == context_initializer::active_);
+ BOOST_ASSERT( this == active());
return get_scheduler()->wait_until( this, tp);
}
context::wait_until( std::chrono::steady_clock::time_point const& tp,
detail::spinlock_lock & lk) noexcept {
BOOST_ASSERT( nullptr != get_scheduler() );
- BOOST_ASSERT( this == context_initializer::active_);
+ BOOST_ASSERT( this == active());
return get_scheduler()->wait_until( this, tp, lk);
}
void
-context::set_ready( context * ctx) noexcept {
+context::schedule( context * ctx) noexcept {
//BOOST_ASSERT( nullptr != ctx);
BOOST_ASSERT( this != ctx);
BOOST_ASSERT( nullptr != get_scheduler() );
// (other scheduler assigned)
if ( scheduler_ == ctx->get_scheduler() ) {
// local
- get_scheduler()->set_ready( ctx);
+ schedule_( ctx);
} else {
// remote
- ctx->get_scheduler()->set_remote_ready( ctx);
+ ctx->get_scheduler()->schedule_from_remote( ctx);
}
#else
BOOST_ASSERT( get_scheduler() == ctx->get_scheduler() );
- get_scheduler()->set_ready( ctx);
+ schedule_( ctx);
#endif
}
void *
context::get_fss_data( void const * vp) const {
- uintptr_t key( reinterpret_cast< uintptr_t >( vp) );
- fss_data_t::const_iterator i( fss_data_.find( key) );
+ uintptr_t key = reinterpret_cast< uintptr_t >( vp);
+ fss_data_t::const_iterator i = fss_data_.find( key);
return fss_data_.end() != i ? i->second.vp : nullptr;
}
void * data,
bool cleanup_existing) {
BOOST_ASSERT( cleanup_fn);
- uintptr_t key( reinterpret_cast< uintptr_t >( vp) );
- fss_data_t::iterator i( fss_data_.find( key) );
+ uintptr_t key = reinterpret_cast< uintptr_t >( vp);
+ fss_data_t::iterator i = fss_data_.find( key);
if ( fss_data_.end() != i) {
if( cleanup_existing) {
i->second.do_cleanup();
i,
std::make_pair(
key,
- fss_data( data, cleanup_fn) ) );
+ fss_data{ data, cleanup_fn } ) );
} else {
fss_data_.erase( i);
}
fss_data_.insert(
std::make_pair(
key,
- fss_data( data, cleanup_fn) ) );
+ fss_data{ data, cleanup_fn } ) );
}
}
}
bool
-context::terminated_is_linked() const noexcept {
- return terminated_hook_.is_linked();
+context::ready_is_linked() const noexcept {
+ return ready_hook_.is_linked();
}
bool
-context::ready_is_linked() const noexcept {
- return ready_hook_.is_linked();
+context::remote_ready_is_linked() const noexcept {
+ return remote_ready_hook_.is_linked();
}
bool
}
bool
+context::terminated_is_linked() const noexcept {
+ return terminated_hook_.is_linked();
+}
+
+bool
context::wait_is_linked() const noexcept {
return wait_hook_.is_linked();
}
void
context::worker_unlink() noexcept {
+ BOOST_ASSERT( worker_is_linked() );
worker_hook_.unlink();
}
void
context::ready_unlink() noexcept {
+ BOOST_ASSERT( ready_is_linked() );
ready_hook_.unlink();
}
void
context::sleep_unlink() noexcept {
+ BOOST_ASSERT( sleep_is_linked() );
sleep_hook_.unlink();
}
void
-context::wait_unlink() noexcept {
- wait_hook_.unlink();
-}
-
-void
context::detach() noexcept {
BOOST_ASSERT( context::active() != this);
get_scheduler()->detach_worker_context( this);