*/
// CLASS HEADER
-#include "async-task-manager-impl.h"
+#include <dali/internal/system/common/async-task-manager-impl.h>
// EXTERNAL INCLUDES
#include <dali/devel-api/adaptor-framework/environment-variable.h>
#include <dali/integration-api/adaptor-framework/adaptor.h>
#include <dali/integration-api/debug.h>
+#include <unordered_map>
+
namespace Dali
{
namespace Internal
{
namespace
{
+constexpr auto FORCE_TRIGGER_THRESHOLD = 128u; ///< Trigger TasksCompleted() forcely if the number of completed task contain too much.
+
constexpr auto DEFAULT_NUMBER_OF_ASYNC_THREADS = size_t{8u};
constexpr auto NUMBER_OF_ASYNC_THREADS_ENV = "DALI_ASYNC_MANAGER_THREAD_POOL_SIZE";
+// The number of threads for low priority task.
+constexpr auto DEFAULT_NUMBER_OF_LOW_PRIORITY_THREADS = size_t{6u};
+constexpr auto NUMBER_OF_LOW_PRIORITY_THREADS_ENV = "DALI_ASYNC_MANAGER_LOW_PRIORITY_THREAD_POOL_SIZE";
+
size_t GetNumberOfThreads(const char* environmentVariable, size_t defaultValue)
{
auto numberString = EnvironmentVariable::GetEnvironmentVariable(environmentVariable);
auto numberOfThreads = numberString ? std::strtoul(numberString, nullptr, 10) : 0;
- constexpr auto MAX_NUMBER_OF_THREADS = 10u;
- DALI_ASSERT_DEBUG(numberOfThreads < MAX_NUMBER_OF_THREADS);
- return (numberOfThreads > 0 && numberOfThreads < MAX_NUMBER_OF_THREADS) ? numberOfThreads : defaultValue;
+ constexpr auto MAX_NUMBER_OF_THREADS = 16u;
+ DALI_ASSERT_DEBUG(numberOfThreads <= MAX_NUMBER_OF_THREADS);
+ return (numberOfThreads > 0 && numberOfThreads <= MAX_NUMBER_OF_THREADS) ? numberOfThreads : defaultValue;
+}
+
+size_t GetNumberOfLowPriorityThreads(const char* environmentVariable, size_t defaultValue, size_t maxValue)
+{
+ auto numberString = EnvironmentVariable::GetEnvironmentVariable(environmentVariable);
+ auto numberOfThreads = numberString ? std::strtoul(numberString, nullptr, 10) : 0;
+ DALI_ASSERT_DEBUG(numberOfThreads <= maxValue);
+ return (numberOfThreads > 0 && numberOfThreads <= maxValue) ? numberOfThreads : std::min(defaultValue, maxValue);
}
#if defined(DEBUG_ENABLED)
Debug::Filter* gAsyncTasksManagerLogFilter = Debug::Filter::New(Debug::NoLogging, false, "LOG_ASYNC_TASK_MANAGER");
+
+uint32_t gThreadId = 0u; // Only for debug
#endif
} // unnamed namespace
+// AsyncTaskThread
+
AsyncTaskThread::AsyncTaskThread(AsyncTaskManager& asyncTaskManager)
: mConditionalWait(),
mAsyncTaskManager(asyncTaskManager),
mLogFactory(Dali::Adaptor::Get().GetLogFactory()),
+ mTraceFactory(Dali::Adaptor::Get().GetTraceFactory()),
mDestroyThread(false),
mIsThreadStarted(false),
mIsThreadIdle(true)
void AsyncTaskThread::Run()
{
+#if defined(DEBUG_ENABLED)
+ uint32_t threadId = gThreadId++;
+ {
+ char temp[100];
+ snprintf(temp, 100, "AsyncTaskThread[%u]", threadId);
+ SetThreadName(temp);
+ }
+#else
SetThreadName("AsyncTaskThread");
+#endif
mLogFactory.InstallLogFunction();
+ mTraceFactory.InstallTraceFunction();
while(!mDestroyThread)
{
if(!mDestroyThread)
{
mIsThreadIdle = true;
+ DALI_LOG_INFO(gAsyncTasksManagerLogFilter, Debug::General, "Thread[%u] wait\n", threadId);
mConditionalWait.Wait(lock);
+ DALI_LOG_INFO(gAsyncTasksManagerLogFilter, Debug::General, "Thread[%u] awake\n", threadId);
}
}
else
{
+ DALI_LOG_INFO(gAsyncTasksManagerLogFilter, Debug::General, "Thread[%u] Process task [%p]\n", threadId, task.Get());
task->Process();
- mAsyncTaskManager.CompleteTask(task);
+ DALI_LOG_INFO(gAsyncTasksManagerLogFilter, Debug::General, "Thread[%u] Complete task [%p]\n", threadId, task.Get());
+ if(!mDestroyThread)
+ {
+ mAsyncTaskManager.CompleteTask(std::move(task));
+ }
}
}
}
+// AsyncTaskManager::CacheImpl
+
+struct AsyncTaskManager::CacheImpl
+{
+ CacheImpl(AsyncTaskManager& manager)
+ : mManager(manager)
+ {
+ }
+
+public:
+ // Insert / Erase task cache API.
+
+ /**
+ * @brief Insert cache that input task.
+ * @pre Mutex be locked.
+ */
+ template<typename CacheContainer, typename Iterator>
+ static void InsertTaskCache(CacheContainer& cacheMap, AsyncTaskPtr task, Iterator iterator)
+ {
+ auto& cacheContainer = cacheMap[task.Get()]; // Get or Create cache container.
+ cacheContainer.insert(cacheContainer.end(), iterator);
+ }
+
+ /**
+ * @brief Erase cache that input task.
+ * @pre Mutex be locked.
+ */
+ template<typename CacheContainer, typename Iterator>
+ static void EraseTaskCache(CacheContainer& cacheMap, AsyncTaskPtr task, Iterator iterator)
+ {
+ auto mapIter = cacheMap.find(task.Get());
+ if(mapIter != cacheMap.end())
+ {
+ auto& cacheContainer = (*mapIter).second;
+ auto cacheIter = std::find(cacheContainer.begin(), cacheContainer.end(), iterator);
+
+ if(cacheIter != cacheContainer.end())
+ {
+ cacheContainer.erase(cacheIter);
+ if(cacheContainer.empty())
+ {
+ cacheMap.erase(mapIter);
+ }
+ }
+ }
+ }
+
+ /**
+ * @brief Erase all cache that input task.
+ * @pre Mutex be locked.
+ */
+ template<typename CacheContainer>
+ static void EraseAllTaskCache(CacheContainer& cacheMap, AsyncTaskPtr task)
+ {
+ auto mapIter = cacheMap.find(task.Get());
+ if(mapIter != cacheMap.end())
+ {
+ cacheMap.erase(mapIter);
+ }
+ }
+
+public:
+ AsyncTaskManager& mManager; ///< Owner of this CacheImpl.
+
+ // Keep cache iterators as list since we take tasks by FIFO as default.
+ using TaskCacheContainer = std::unordered_map<const AsyncTask*, std::list<AsyncTaskContainer::iterator>>;
+ using RunningTaskCacheContainer = std::unordered_map<const AsyncTask*, std::list<AsyncRunningTaskContainer::iterator>>;
+ using CompletedTaskCacheContainer = std::unordered_map<const AsyncTask*, std::list<AsyncCompletedTaskContainer::iterator>>;
+
+ TaskCacheContainer mWaitingTasksCache; ///< The cache of tasks and iterator for waiting to async process. Must be locked under mWaitingTasksMutex.
+ RunningTaskCacheContainer mRunningTasksCache; ///< The cache of tasks and iterator for running tasks. Must be locked under mRunningTasksMutex.
+ CompletedTaskCacheContainer mCompletedTasksCache; ///< The cache of tasks and iterator for completed async process. Must be locked under mCompletedTasksMutex.
+};
+
+// AsyncTaskManager
+
Dali::AsyncTaskManager AsyncTaskManager::Get()
{
Dali::AsyncTaskManager manager;
AsyncTaskManager::AsyncTaskManager()
: mTasks(GetNumberOfThreads(NUMBER_OF_ASYNC_THREADS_ENV, DEFAULT_NUMBER_OF_ASYNC_THREADS), [&]() { return TaskHelper(*this); }),
+ mAvaliableLowPriorityTaskCounts(GetNumberOfLowPriorityThreads(NUMBER_OF_LOW_PRIORITY_THREADS_ENV, DEFAULT_NUMBER_OF_LOW_PRIORITY_THREADS, mTasks.GetElementCount())),
+ mWaitingHighProirityTaskCounts(0u),
+ mCacheImpl(new CacheImpl(*this)),
mTrigger(new EventThreadCallback(MakeCallback(this, &AsyncTaskManager::TasksCompleted))),
mProcessorRegistered(false)
{
{
if(mProcessorRegistered && Dali::Adaptor::IsAvailable())
{
+ mProcessorRegistered = false;
Dali::Adaptor::Get().UnregisterProcessor(*this);
}
+ // Join all threads.
mTasks.Clear();
+
+ // Remove cache impl after all threads are join.
+ mCacheImpl.reset();
+
+ // Remove tasks after CacheImpl removed
+ mWaitingTasks.clear();
+ mRunningTasks.clear();
+ mCompletedTasks.clear();
}
void AsyncTaskManager::AddTask(AsyncTaskPtr task)
{
+ if(task)
{
// Lock while adding task to the queue
- Mutex::ScopedLock lock(mMutex);
- mWaitingTasks.push_back(task);
+ Mutex::ScopedLock lock(mWaitingTasksMutex);
+
+ DALI_LOG_INFO(gAsyncTasksManagerLogFilter, Debug::Verbose, "AddTask [%p]\n", task.Get());
+
+ // push back into waiting queue.
+ auto waitingIter = mWaitingTasks.insert(mWaitingTasks.end(), task);
+ CacheImpl::InsertTaskCache(mCacheImpl->mWaitingTasksCache, task, waitingIter);
+
+ if(task->GetPriorityType() == AsyncTask::PriorityType::HIGH)
+ {
+ // Increase the number of waiting tasks for high priority.
+ ++mWaitingHighProirityTaskCounts;
+ }
- // Finish all Running threads are working
- if(mRunningTasks.size() >= mTasks.GetElementCount())
{
- return;
+ // For thread safety
+ Mutex::ScopedLock lock(mRunningTasksMutex); // We can lock this mutex under mWaitingTasksMutex.
+
+ // Finish all Running threads are working
+ if(mRunningTasks.size() >= mTasks.GetElementCount())
+ {
+ return;
+ }
}
}
// If all threads are busy, then it's ok just to push the task because they will try to get the next job.
}
+ // Register Process (Since mTrigger execute too late timing if event thread running a lots of events.)
if(!mProcessorRegistered && Dali::Adaptor::IsAvailable())
{
Dali::Adaptor::Get().RegisterProcessor(*this);
void AsyncTaskManager::RemoveTask(AsyncTaskPtr task)
{
+ if(task)
{
- // Lock while remove task from the queue
- Mutex::ScopedLock lock(mMutex);
- if(!mWaitingTasks.empty())
+ DALI_LOG_INFO(gAsyncTasksManagerLogFilter, Debug::Verbose, "RemoveTask [%p]\n", task.Get());
+
+ // Check whether we need to unregister processor.
+ // If there is some non-empty queue exist, we don't need to unregister processor.
+ bool needCheckUnregisterProcessor = true;
+
{
- for(std::vector<AsyncTaskPtr>::iterator it = mWaitingTasks.begin(); it != mWaitingTasks.end();)
+ // Lock while remove task from the queue
+ Mutex::ScopedLock lock(mWaitingTasksMutex);
+
+ auto mapIter = mCacheImpl->mWaitingTasksCache.find(task.Get());
+ if(mapIter != mCacheImpl->mWaitingTasksCache.end())
{
- if((*it) && (*it) == task)
+ for(auto& iterator : mapIter->second)
{
- it = mWaitingTasks.erase(it);
- }
- else
- {
- it++;
+ DALI_ASSERT_DEBUG((*iterator) == task);
+ if((*iterator)->GetPriorityType() == AsyncTask::PriorityType::HIGH)
+ {
+ // Decrease the number of waiting tasks for high priority.
+ --mWaitingHighProirityTaskCounts;
+ }
+ mWaitingTasks.erase(iterator);
}
+ CacheImpl::EraseAllTaskCache(mCacheImpl->mWaitingTasksCache, task);
+ }
+
+ if(!mWaitingTasks.empty())
+ {
+ needCheckUnregisterProcessor = false;
}
}
- if(!mRunningTasks.empty())
{
- for(auto iter = mRunningTasks.begin(), endIter = mRunningTasks.end(); iter != endIter; ++iter)
+ // Lock while remove task from the queue
+ Mutex::ScopedLock lock(mRunningTasksMutex);
+
+ auto mapIter = mCacheImpl->mRunningTasksCache.find(task.Get());
+ if(mapIter != mCacheImpl->mRunningTasksCache.end())
{
- if((*iter).first == task)
+ for(auto& iterator : mapIter->second)
{
- (*iter).second = true;
+ DALI_ASSERT_DEBUG((*iterator).first == task);
+ // We cannot erase container. Just mark as canceled.
+ // Note : mAvaliableLowPriorityTaskCounts will be increased after process finished.
+ (*iterator).second = RunningTaskState::CANCELED;
}
}
+
+ if(!mRunningTasks.empty())
+ {
+ needCheckUnregisterProcessor = false;
+ }
}
- if(!mCompletedTasks.empty())
{
- for(std::vector<AsyncTaskPtr>::iterator it = mCompletedTasks.begin(); it != mCompletedTasks.end();)
+ // Lock while remove task from the queue
+ Mutex::ScopedLock lock(mCompletedTasksMutex);
+
+ auto mapIter = mCacheImpl->mCompletedTasksCache.find(task.Get());
+ if(mapIter != mCacheImpl->mCompletedTasksCache.end())
{
- if((*it) && (*it) == task)
+ for(auto& iterator : mapIter->second)
{
- it = mCompletedTasks.erase(it);
- }
- else
- {
- it++;
+ DALI_ASSERT_DEBUG(iterator->first == task);
+ mCompletedTasks.erase(iterator);
}
+ CacheImpl::EraseAllTaskCache(mCacheImpl->mCompletedTasksCache, task);
+ }
+
+ if(!mCompletedTasks.empty())
+ {
+ needCheckUnregisterProcessor = false;
}
}
- }
- UnregisterProcessor();
+ // UnregisterProcessor required to lock mutex. Call this API only if required.
+ if(needCheckUnregisterProcessor)
+ {
+ UnregisterProcessor();
+ }
+ }
}
-AsyncTaskPtr AsyncTaskManager::PopNextTaskToProcess()
+AsyncTaskPtr AsyncTaskManager::PopNextCompletedTask()
{
// Lock while popping task out from the queue
- Mutex::ScopedLock lock(mMutex);
+ Mutex::ScopedLock lock(mCompletedTasksMutex);
- // pop out the next task from the queue
- AsyncTaskPtr nextTask = nullptr;
+ AsyncTaskPtr nextCompletedTask = nullptr;
- for(auto iter = mWaitingTasks.begin(), endIter = mWaitingTasks.end(); iter != endIter; ++iter)
+ while(!mCompletedTasks.empty())
{
- if((*iter)->IsReady())
- {
- nextTask = *iter;
+ DALI_LOG_INFO(gAsyncTasksManagerLogFilter, Debug::Verbose, "PopNextCompletedTask, completed task count : [%zu]\n", mCompletedTasks.size());
+
+ auto next = mCompletedTasks.begin();
+ AsyncTaskPtr nextTask = next->first;
+ CompletedTaskState taskState = next->second;
+ CacheImpl::EraseTaskCache(mCacheImpl->mCompletedTasksCache, nextTask, next);
+ mCompletedTasks.erase(next);
- // Add Running queue
- mRunningTasks.push_back(std::make_pair(nextTask, false));
- mWaitingTasks.erase(iter);
+ DALI_LOG_INFO(gAsyncTasksManagerLogFilter, Debug::General, "Completed task [%p] (callback required? : %d)\n", nextTask.Get(), taskState == CompletedTaskState::REQUIRE_CALLBACK);
+
+ if(taskState == CompletedTaskState::REQUIRE_CALLBACK)
+ {
+ nextCompletedTask = nextTask;
break;
}
}
- return nextTask;
+ DALI_LOG_INFO(gAsyncTasksManagerLogFilter, Debug::General, "Pickup completed [%p]\n", nextCompletedTask.Get());
+
+ return nextCompletedTask;
}
-AsyncTaskPtr AsyncTaskManager::PopNextCompletedTask()
+void AsyncTaskManager::UnregisterProcessor()
{
- // Lock while popping task out from the queue
- Mutex::ScopedLock lock(mMutex);
+ if(mProcessorRegistered && Dali::Adaptor::IsAvailable())
+ {
+ DALI_LOG_INFO(gAsyncTasksManagerLogFilter, Debug::Verbose, "UnregisterProcessor begin\n");
+ // Keep processor at least 1 task exist.
+ // Please be careful the order of mutex, to avoid dead lock.
+ // TODO : Should we lock all mutex rightnow?
+ Mutex::ScopedLock lockWait(mWaitingTasksMutex);
+ if(mWaitingTasks.empty())
+ {
+ Mutex::ScopedLock lockRunning(mRunningTasksMutex); // We can lock this mutex under mWaitingTasksMutex.
+ if(mRunningTasks.empty())
+ {
+ Mutex::ScopedLock lockComplete(mCompletedTasksMutex); // We can lock this mutex under mWaitingTasksMutex and mRunningTasksMutex.
+ if(mCompletedTasks.empty())
+ {
+ mProcessorRegistered = false;
+ Dali::Adaptor::Get().UnregisterProcessor(*this);
+ }
+ }
+ }
+ DALI_LOG_INFO(gAsyncTasksManagerLogFilter, Debug::Verbose, "UnregisterProcessor end (registed? %d)\n", mProcessorRegistered);
+ }
+}
- if(mCompletedTasks.empty())
+void AsyncTaskManager::TasksCompleted()
+{
+ DALI_LOG_INFO(gAsyncTasksManagerLogFilter, Debug::Verbose, "TasksCompleted begin\n");
+ while(AsyncTaskPtr task = PopNextCompletedTask())
{
- return AsyncTaskPtr();
+ DALI_LOG_INFO(gAsyncTasksManagerLogFilter, Debug::Verbose, "Execute callback [%p]\n", task.Get());
+ CallbackBase::Execute(*(task->GetCompletedCallback()), task);
}
- std::vector<AsyncTaskPtr>::iterator next = mCompletedTasks.begin();
- AsyncTaskPtr nextTask = *next;
- mCompletedTasks.erase(next);
+ UnregisterProcessor();
+ DALI_LOG_INFO(gAsyncTasksManagerLogFilter, Debug::Verbose, "TasksCompleted end\n");
+}
- return nextTask;
+void AsyncTaskManager::Process(bool postProcessor)
+{
+ TasksCompleted();
}
-void AsyncTaskManager::CompleteTask(AsyncTaskPtr task)
+/// Worker thread called
+AsyncTaskPtr AsyncTaskManager::PopNextTaskToProcess()
{
- // Lock while adding task to the queue
+ // Lock while popping task out from the queue
+ Mutex::ScopedLock lock(mWaitingTasksMutex);
+
+ DALI_LOG_INFO(gAsyncTasksManagerLogFilter, Debug::Verbose, "PopNextTaskToProcess, waiting task count : [%zu]\n", mWaitingTasks.size());
+
+ // pop out the next task from the queue
+ AsyncTaskPtr nextTask = nullptr;
+
+ // Fast cut if all waiting tasks are LOW priority, and we cannot excute low task anymore.
+ if(mWaitingHighProirityTaskCounts == 0u && !mWaitingTasks.empty())
{
- Mutex::ScopedLock lock(mMutex);
- for(auto iter = mRunningTasks.begin(), endIter = mRunningTasks.end(); iter != endIter; ++iter)
+ // For thread safety
+ Mutex::ScopedLock lock(mRunningTasksMutex); // We can lock this mutex under mWaitingTasksMutex.
+
+ if(mAvaliableLowPriorityTaskCounts == 0u)
{
- if((*iter).first == task)
+ // There are no avaliabe tasks to run now. Return nullptr.
+ return nextTask;
+ }
+ }
+
+ for(auto iter = mWaitingTasks.begin(), endIter = mWaitingTasks.end(); iter != endIter; ++iter)
+ {
+ if((*iter)->IsReady())
+ {
+ const auto priorityType = (*iter)->GetPriorityType();
+ bool taskAvaliable = priorityType == AsyncTask::PriorityType::HIGH; // Task always valid if it's priority is high
+ if(!taskAvaliable)
+ {
+ // For thread safety
+ Mutex::ScopedLock lock(mRunningTasksMutex); // We can lock this mutex under mWaitingTasksMutex.
+
+ taskAvaliable = (mAvaliableLowPriorityTaskCounts > 0u); // priority is low, but we can use it.
+ }
+
+ if(taskAvaliable)
{
- if(!(*iter).second)
+ nextTask = *iter;
+
+ // Add Running queue
{
- if(task->GetCallbackInvocationThread() == AsyncTask::ThreadType::MAIN_THREAD)
+ // Lock while popping task out from the queue
+ Mutex::ScopedLock lock(mRunningTasksMutex); // We can lock this mutex under mWaitingTasksMutex.
+
+ DALI_LOG_INFO(gAsyncTasksManagerLogFilter, Debug::Verbose, "Waiting -> Running [%p]\n", nextTask.Get());
+
+ auto runningIter = mRunningTasks.insert(mRunningTasks.end(), std::make_pair(nextTask, RunningTaskState::RUNNING));
+ CacheImpl::InsertTaskCache(mCacheImpl->mRunningTasksCache, nextTask, runningIter);
+
+ CacheImpl::EraseTaskCache(mCacheImpl->mWaitingTasksCache, nextTask, iter);
+ mWaitingTasks.erase(iter);
+
+ // Decrease avaliable task counts if it is low priority
+ if(priorityType == AsyncTask::PriorityType::LOW)
{
- mCompletedTasks.push_back(task);
+ // We are under running task mutex. We can decrease it.
+ --mAvaliableLowPriorityTaskCounts;
}
}
- // Delete this task in running queue
- mRunningTasks.erase(iter);
+ if(priorityType == AsyncTask::PriorityType::HIGH)
+ {
+ // Decrease the number of waiting tasks for high priority.
+ --mWaitingHighProirityTaskCounts;
+ }
break;
}
}
}
- // wake up the main thread
- if(task->GetCallbackInvocationThread() == AsyncTask::ThreadType::MAIN_THREAD)
- {
- mTrigger->Trigger();
- }
- else
- {
- CallbackBase::Execute(*(task->GetCompletedCallback()), task);
- }
+ DALI_LOG_INFO(gAsyncTasksManagerLogFilter, Debug::General, "Pickup process [%p]\n", nextTask.Get());
+
+ return nextTask;
}
-void AsyncTaskManager::UnregisterProcessor()
+/// Worker thread called
+void AsyncTaskManager::CompleteTask(AsyncTaskPtr&& task)
{
- if(mProcessorRegistered && Dali::Adaptor::IsAvailable())
+ bool notify = false;
+
+ if(task)
{
- Mutex::ScopedLock lock(mMutex);
- if(mWaitingTasks.empty() && mCompletedTasks.empty() && mRunningTasks.empty())
+ bool needTrigger = (task->GetCallbackInvocationThread() == AsyncTask::ThreadType::MAIN_THREAD);
+
+ // Lock while check validation of task.
{
- Dali::Adaptor::Get().UnregisterProcessor(*this);
- mProcessorRegistered = false;
+ Mutex::ScopedLock lock(mRunningTasksMutex);
+
+ auto mapIter = mCacheImpl->mRunningTasksCache.find(task.Get());
+ if(mapIter != mCacheImpl->mRunningTasksCache.end())
+ {
+ const auto cacheIter = mapIter->second.begin();
+ DALI_ASSERT_ALWAYS(cacheIter != mapIter->second.end());
+
+ const auto iter = *cacheIter;
+ DALI_ASSERT_DEBUG(iter->first == task);
+ if(iter->second == RunningTaskState::RUNNING)
+ {
+ // This task is valid.
+ notify = true;
+ }
+ }
+
+ DALI_LOG_INFO(gAsyncTasksManagerLogFilter, Debug::Verbose, "CompleteTask [%p] (is notify? : %d)\n", task.Get(), notify);
}
- }
-}
-void AsyncTaskManager::TasksCompleted()
-{
- while(AsyncTaskPtr task = PopNextCompletedTask())
- {
- CallbackBase::Execute(*(task->GetCompletedCallback()), task);
- }
+ // We should execute this tasks complete callback out of mutex
+ if(notify && task->GetCallbackInvocationThread() == AsyncTask::ThreadType::WORKER_THREAD)
+ {
+ DALI_LOG_INFO(gAsyncTasksManagerLogFilter, Debug::Verbose, "Execute callback on worker thread [%p]\n", task.Get());
+ CallbackBase::Execute(*(task->GetCompletedCallback()), task);
+ }
- UnregisterProcessor();
-}
+ // Lock while adding task to the queue
+ {
+ Mutex::ScopedLock lock(mRunningTasksMutex);
-void AsyncTaskManager::Process(bool postProcessor)
-{
- TasksCompleted();
+ auto mapIter = mCacheImpl->mRunningTasksCache.find(task.Get());
+ if(mapIter != mCacheImpl->mRunningTasksCache.end())
+ {
+ const auto cacheIter = mapIter->second.begin();
+ DALI_ASSERT_ALWAYS(cacheIter != mapIter->second.end());
+
+ const auto iter = *cacheIter;
+ const auto priorityType = iter->first->GetPriorityType();
+ // Increase avaliable task counts if it is low priority
+ if(priorityType == AsyncTask::PriorityType::LOW)
+ {
+ // We are under running task mutex. We can increase it.
+ ++mAvaliableLowPriorityTaskCounts;
+ }
+
+ // Move task into completed, for ensure that AsyncTask destroy at main thread.
+ {
+ Mutex::ScopedLock lock(mCompletedTasksMutex); // We can lock this mutex under mRunningTasksMutex.
+
+ const bool callbackRequired = notify && (task->GetCallbackInvocationThread() == AsyncTask::ThreadType::MAIN_THREAD);
+
+ DALI_LOG_INFO(gAsyncTasksManagerLogFilter, Debug::Verbose, "Running -> Completed [%p] (callback required? : %d)\n", task.Get(), callbackRequired);
+
+ auto completedIter = mCompletedTasks.insert(mCompletedTasks.end(), std::make_pair(task, callbackRequired ? CompletedTaskState::REQUIRE_CALLBACK : CompletedTaskState::SKIP_CALLBACK));
+ CacheImpl::InsertTaskCache(mCacheImpl->mCompletedTasksCache, task, completedIter);
+
+ CacheImpl::EraseTaskCache(mCacheImpl->mRunningTasksCache, task, iter);
+ mRunningTasks.erase(iter);
+
+ if(!needTrigger)
+ {
+ needTrigger |= (mCompletedTasks.size() >= FORCE_TRIGGER_THRESHOLD);
+ }
+
+ // Now, task is invalidate.
+ task.Reset();
+ }
+ }
+ }
+
+ // Wake up the main thread
+ if(needTrigger)
+ {
+ DALI_LOG_INFO(gAsyncTasksManagerLogFilter, Debug::Verbose, "Trigger main thread\n");
+ mTrigger->Trigger();
+ }
+ }
}
+// AsyncTaskManager::TaskHelper
+
AsyncTaskManager::TaskHelper::TaskHelper(AsyncTaskManager& asyncTaskManager)
: TaskHelper(std::unique_ptr<AsyncTaskThread>(new AsyncTaskThread(asyncTaskManager)), asyncTaskManager)
{