Summary:
This improves locality and affinity by keeping work on the same
threads preferentially to starting work on new ones, and reduces
contention on the threadpool lock more generally.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/17808
Differential Revision:
D14391282
Pulled By: resistor
fbshipit-source-id:
3aec81656a50460a725aa4187c61864295d4f46e
void ThreadPool::main_loop(std::size_t index) {
init_thread();
+ std::unique_lock<std::mutex> lock(mutex_);
while (running_) {
// Wait on condition variable while the task is empty and
// the pool is still running.
- std::unique_lock<std::mutex> lock(mutex_);
while (tasks_.empty() && running_) {
condition_.wait(lock);
}
complete_ = true;
completed_.notify_one();
}
+
+ // Deliberately hold the lock on the backedge, so this thread has an
+ // opportunity to acquire a new task before another thread acquires
+ // the lock.
}
} // while running_
}