From: 김용섭/On-Device Lab(SR)/Engineer/삼성전자 Date: Wed, 4 Sep 2019 11:54:29 +0000 (+0900) Subject: [neurun] Implement classes inherit to ITensorManager (#7083) X-Git-Tag: accepted/tizen/unified/20190911.111615~142 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=509b7dc3f2848323c770d60c17435c34db4d6c7c;p=platform%2Fcore%2Fml%2Fnnfw.git [neurun] Implement classes inherit to ITensorManager (#7083) * [neurun] Implement classes inherit to ITensorManager Implement classes inherit to ITensorManager such as cpu/TensorManager and AclTensorManager Signed-off-by: Yongseop Kim * Add findTensorAsParent() * Merge two buildXXXTensor into one with as_const param --- diff --git a/runtimes/neurun/backend/acl_cl/MemoryManager.h b/runtimes/neurun/backend/acl_cl/MemoryManager.h index ef15a49..ec509ac 100644 --- a/runtimes/neurun/backend/acl_cl/MemoryManager.h +++ b/runtimes/neurun/backend/acl_cl/MemoryManager.h @@ -14,6 +14,7 @@ * limitations under the License. */ +// TODO Rename this to TensorManager.h #ifndef __NEURUN_BACKEND_ACL_CL_MEMORY_MANAGER_H__ #define __NEURUN_BACKEND_ACL_CL_MEMORY_MANAGER_H__ @@ -25,6 +26,8 @@ #include #include +#include +#include #include "operand/CLTensor.h" #include "operand/CLSubTensor.h" @@ -49,6 +52,7 @@ using LinearMemoryManager = ::neurun::backend::acl_common::AclLinearMemoryManage ::arm_compute::BlobLifetimeManager, ::arm_compute::CLBufferAllocator, ::arm_compute::CLMemoryGroup>; +// TODO Remove this MemoryManager *createMemoryManager() { const std::string executor_str = util::getConfigString(util::config::EXECUTOR); @@ -65,6 +69,35 @@ MemoryManager *createMemoryManager() } } +// TODO Enable this +/* +using InternalBufferManager = ::neurun::backend::acl_common::AclInternalBufferManager< + ::arm_compute::MemoryManagerOnDemand, ::arm_compute::PoolManager, + ::arm_compute::BlobLifetimeManager, ::arm_compute::CLBufferAllocator>; + +using TensorManager = + ::neurun::backend::acl_common::AclTensorManager<::neurun::backend::acl_cl::operand::ICLTensor, + operand::CLTensor, operand::CLSubTensor, + operand::Object>; + +TensorManager *createTensorManager() +{ + const std::string executor_str = util::getConfigString(util::config::EXECUTOR); + + if (executor_str == "Linear") + { + VERBOSE(acl_cl_createTensorManager) << "AclTensorManager as Linear" << std::endl; + return new TensorManager(new MemoryManager(), new LinearMemoryManager(), + new InternalBufferManager()); + } + else + { + VERBOSE(acl_cl_createTensorManager) << "AclTensorManager" << std::endl; + return new TensorManager(new MemoryManager(), new MemoryManager(), new InternalBufferManager()); + } +} +*/ + } // namespace acl_cl } // namespace backend } // namespace neurun diff --git a/runtimes/neurun/backend/acl_common/AclTensorManager.h b/runtimes/neurun/backend/acl_common/AclTensorManager.h new file mode 100644 index 0000000..c4935ad --- /dev/null +++ b/runtimes/neurun/backend/acl_common/AclTensorManager.h @@ -0,0 +1,312 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NEURUN_BACKEND_ACL_COMMON_TENSOR_MANAGER_H__ +#define __NEURUN_BACKEND_ACL_COMMON_TENSOR_MANAGER_H__ + +#include + +#include "backend/ITensorManager.h" +#include "AclMemoryManager.h" +#include "AclInternalBufferManager.h" +#include "model/OperandIndexMap.h" + +namespace neurun +{ +namespace backend +{ +namespace acl_common +{ + +template +class AclTensorManager : public backend::ITensorManager +{ +public: + using T_AclMemoryManager = AclMemoryManager; + + AclTensorManager(T_AclMemoryManager *const_mgr, T_AclMemoryManager *nonconst_mgr, + IInternalBufferManager *inter_mgr); + + virtual ~AclTensorManager() = default; + + void allocateConsts(void) override; + void allocateNonconsts(void) override; + void deallocateConsts(void) override; + void deallocateNonconsts(void) override; + + void allocateInternalBufferManager(void); + void deallocateInternalBufferManager(void); + + void buildTensor(const model::OperandIndex &ind, const ::arm_compute::TensorInfo &info, + bool as_const); + void buildSubtensor(const model::OperandIndex &parent, const model::OperandIndex &child, + const ::arm_compute::TensorShape &shape, + const ::arm_compute::Coordinates &coordinates); + + std::shared_ptr findTensorAsParent(const model::OperandIndex &ind); + + void startLifetime(const model::OperandIndex &ind); + void finishLifetime(const model::OperandIndex &ind); + + std::shared_ptr wrapTensor(const model::OperandIndex &ind); + std::shared_ptr at(const ::neurun::model::OperandIndex &ind); + + model::OperandIndexMap> &constTensors(void); + model::OperandIndexMap> &nonconstTensors(void); + model::OperandIndexMap> &nonconstSubtensors(void); + + std::shared_ptr<::arm_compute::IMemoryManager> internal_buffer_manager(void); + + void iterate(const std::function &fn); + + void tryDeallocConstants(void); + +private: + std::unique_ptr _const_mgr; + std::unique_ptr _nonconst_mgr; + std::unique_ptr _inter_mgr; + model::OperandIndexMap _ind_to_mgr; +}; + +} // namespace acl_common +} // namespace backend +} // namespace neurun + +#include +#include "util/logging.h" + +namespace neurun +{ +namespace backend +{ +namespace acl_common +{ + +template +AclTensorManager::AclTensorManager( + T_AclMemoryManager *const_mgr, T_AclMemoryManager *nonconst_mgr, + IInternalBufferManager *inter_mgr) + : _const_mgr{const_mgr}, _nonconst_mgr{nonconst_mgr}, _inter_mgr{inter_mgr} +{ + // DO NOTHING +} + +template +void AclTensorManager::allocateConsts(void) +{ + _const_mgr->allocate(); +} + +template +void AclTensorManager::allocateNonconsts(void) +{ + _nonconst_mgr->allocate(); +} + +template +void AclTensorManager::deallocateConsts(void) +{ + _const_mgr->deallocate(); +} + +template +void AclTensorManager::deallocateNonconsts(void) +{ + _nonconst_mgr->deallocate(); +} + +template +void AclTensorManager::allocateInternalBufferManager( + void) +{ + _inter_mgr->allocate(); +} + +template +void AclTensorManager::deallocateInternalBufferManager( + void) +{ + _inter_mgr->deallocate(); +} + +template +void AclTensorManager::buildTensor( + const model::OperandIndex &ind, const ::arm_compute::TensorInfo &info, bool as_const) +{ + assert(_ind_to_mgr.find(ind) == _ind_to_mgr.end()); + if (as_const) + { + _const_mgr->buildTensor(ind, info); + _ind_to_mgr.insert({ind, *_const_mgr}); + } + else + { + _nonconst_mgr->buildTensor(ind, info); + _ind_to_mgr.insert({ind, *_nonconst_mgr}); + } +} + +template +void AclTensorManager::buildSubtensor( + const model::OperandIndex &parent, const model::OperandIndex &child, + const ::arm_compute::TensorShape &shape, const ::arm_compute::Coordinates &coordinates) +{ + assert(_ind_to_mgr.find(child) == _ind_to_mgr.end()); + std::shared_ptr parent_tensor = findTensorAsParent(parent); + assert(parent_tensor); + _nonconst_mgr->buildSubtensor(parent_tensor, child, shape, coordinates); + _ind_to_mgr.insert({child, *_nonconst_mgr}); +} + +template +std::shared_ptr +AclTensorManager::findTensorAsParent( + const model::OperandIndex &ind) +{ + + auto &tensors = _nonconst_mgr->tensors(); + auto &subtensors = _nonconst_mgr->subtensors(); + if (tensors.find(ind) != tensors.end()) + { + // Parent is allocated as tensor + return tensors[ind]; + } + else if (subtensors.find(ind) != subtensors.end()) + { + // Parent is allocated as subtensor + return subtensors[ind]; + } + else + { + return nullptr; + } +} + +template +void AclTensorManager::startLifetime( + const model::OperandIndex &ind) +{ + assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); + _ind_to_mgr.at(ind).startLifetime(ind); +} + +template +void AclTensorManager::finishLifetime( + const model::OperandIndex &ind) +{ + assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); + _ind_to_mgr.at(ind).finishLifetime(ind); +} + +template +std::shared_ptr +AclTensorManager::wrapTensor( + const model::OperandIndex &ind) +{ + assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); + return _ind_to_mgr.at(ind).wrapTensor(ind); +} + +template +std::shared_ptr AclTensorManager::at( + const ::neurun::model::OperandIndex &ind) +{ + assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); + + auto &tensors = _ind_to_mgr.at(ind).tensors(); + if (tensors.find(ind) != tensors.end()) + { + return tensors.at(ind); + } + else + { + return _ind_to_mgr.at(ind).subtensors().at(ind); + } +} + +template +model::OperandIndexMap> & +AclTensorManager::constTensors(void) +{ + return _const_mgr->tensors(); +} + +template +model::OperandIndexMap> & +AclTensorManager::nonconstTensors(void) +{ + return _nonconst_mgr->tensors(); +} + +template +model::OperandIndexMap> & +AclTensorManager::nonconstSubtensors(void) +{ + return _nonconst_mgr->subtensors(); +} + +template +std::shared_ptr<::arm_compute::IMemoryManager> +AclTensorManager::internal_buffer_manager(void) +{ + return _inter_mgr->internal_buffer_manager(); +} + +template +void AclTensorManager::iterate( + const std::function &fn) +{ + for (auto it : _nonconst_mgr->tensors()) + fn(it.first); + + for (auto it : _nonconst_mgr->subtensors()) + fn(it.first); + + for (auto it : _const_mgr->tensors()) + fn(it.first); +} + +template +void AclTensorManager::tryDeallocConstants(void) +{ + auto &tensors = _const_mgr->tensors(); + auto &objects = _const_mgr->objects(); + + for (auto it = tensors.begin(); it != tensors.end();) + { + const auto &ind = it->first; + auto tensor = it->second; + if (tensor->handle() && !tensor->handle()->is_used()) + { + VERBOSE(AclTensorManager) << "Tensor #" << ind.value() + << " will be deallocated as an unused constant tensor" << std::endl; + tensor->allocator()->free(); + tensor.reset(); + it = tensors.erase(it); + objects.erase(ind); + } + else + { + ++it; + } + } +} + +} // namespace acl_common +} // namespace backend +} // namespace neurun + +#endif // __NEURUN_BACKEND_ACL_COMMON_TENSOR_MANAGER_H__ diff --git a/runtimes/neurun/backend/acl_common/TemplTensorBuilder.h b/runtimes/neurun/backend/acl_common/TemplTensorBuilder.h index 19d6c3d..c8913ef 100644 --- a/runtimes/neurun/backend/acl_common/TemplTensorBuilder.h +++ b/runtimes/neurun/backend/acl_common/TemplTensorBuilder.h @@ -23,7 +23,8 @@ #include #include #include "model/OperandIndexMap.h" -#include "AclMemoryManager.h" +#include "AclMemoryManager.h" // TODO Remove this +#include "AclTensorManager.h" #include "cpp14/memory.h" #include @@ -111,7 +112,8 @@ private: model::OperandIndexMap _subtensor_info_map; model::OperandIndexMap _apply_dim_correction_map; model::OperandIndexMap> _tensor_layouts_map; - ; + + // TODO Replace this by TensorManager std::unique_ptr _mem_mgr; // TODO Consider dividing TensorBuilder into Linear and others diff --git a/runtimes/neurun/backend/acl_neon/MemoryManager.h b/runtimes/neurun/backend/acl_neon/MemoryManager.h index ef7b60a..ac7c91a 100644 --- a/runtimes/neurun/backend/acl_neon/MemoryManager.h +++ b/runtimes/neurun/backend/acl_neon/MemoryManager.h @@ -14,6 +14,7 @@ * limitations under the License. */ +// TODO Rename this to TensorManager.h #ifndef __NEURUN_BACKEND_ACL_NEON_MEMORY_MANAGER_H__ #define __NEURUN_BACKEND_ACL_NEON_MEMORY_MANAGER_H__ @@ -25,6 +26,8 @@ #include #include +#include +#include #include "operand/NETensor.h" #include "operand/NESubTensor.h" @@ -49,6 +52,7 @@ using LinearMemoryManager = ::neurun::backend::acl_common::AclLinearMemoryManage ::arm_compute::MemoryManagerOnDemand, ::arm_compute::PoolManager, ::arm_compute::OffsetLifetimeManager, ::arm_compute::Allocator, ::arm_compute::MemoryGroup>; +// TODO Remove this MemoryManager *createMemoryManager() { const std::string executor_str = util::getConfigString(util::config::EXECUTOR); @@ -64,6 +68,34 @@ MemoryManager *createMemoryManager() } } +// TODO Enable this instead of createMemoryManager() +/* +using InternalBufferManager = ::neurun::backend::acl_common::AclInternalBufferManager< + ::arm_compute::MemoryManagerOnDemand, ::arm_compute::PoolManager, + ::arm_compute::OffsetLifetimeManager, ::arm_compute::Allocator>; + +using TensorManager = + ::neurun::backend::acl_common::AclTensorManager<::neurun::backend::acl_neon::operand::INETensor, + operand::NETensor, operand::NESubTensor, + ::neurun::backend::operand::Object>; + +TensorManager *createTensorManager() +{ + const std::string executor_str = util::getConfigString(util::config::EXECUTOR); + if (executor_str == "Linear") + { + VERBOSE(acl_neon_createTensorManager) << "AclTensorManager as Linear" << std::endl; + return new TensorManager(new MemoryManager(), new LinearMemoryManager(), + new InternalBufferManager()); + } + else + { + VERBOSE(acl_neon_createTensorManager) << "AclTensorManager" << std::endl; + return new TensorManager(new MemoryManager(), new MemoryManager(), new InternalBufferManager()); + } +} +*/ + } // namespace acl_neon } // namespace backend } // namespace neurun diff --git a/runtimes/neurun/backend/cpu/TensorBuilder.h b/runtimes/neurun/backend/cpu/TensorBuilder.h index 275793f..5bcbb6c 100644 --- a/runtimes/neurun/backend/cpu/TensorBuilder.h +++ b/runtimes/neurun/backend/cpu/TensorBuilder.h @@ -23,7 +23,8 @@ #include #include "operand/Tensor.h" #include "model/OperandIndexMap.h" -#include "MemoryManager.h" +#include "MemoryManager.h" // TODO Remove this +#include "TensorManager.h" namespace neurun { @@ -83,6 +84,7 @@ public: std::shared_ptr at(const ::neurun::model::OperandIndex &ind); private: + // TODO Replace this by TensorManager std::unique_ptr _mem_mgr; model::OperandIndexMap _tensor_info_map; model::OperandIndexMap> _tensor_layouts_map; diff --git a/runtimes/neurun/backend/cpu/TensorManager.cc b/runtimes/neurun/backend/cpu/TensorManager.cc new file mode 100644 index 0000000..22d874b --- /dev/null +++ b/runtimes/neurun/backend/cpu/TensorManager.cc @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "TensorManager.h" + +namespace neurun +{ +namespace backend +{ +namespace cpu +{ + +TensorManager::TensorManager() : _const_mgr{new MemoryManager()}, _nonconst_mgr{new MemoryManager()} +{ + // DO NOTHING +} + +void TensorManager::allocateConsts(void) { _const_mgr->allocate(); } + +void TensorManager::allocateNonconsts(void) { _nonconst_mgr->allocate(); } + +void TensorManager::deallocateConsts(void) { _const_mgr->deallocate(); } + +void TensorManager::deallocateNonconsts(void) { _nonconst_mgr->deallocate(); } + +void TensorManager::buildTensor(const model::OperandIndex &ind, + const model::OperandInfo &tensor_info, bool as_const) +{ + assert(_ind_to_mgr.find(ind) == _ind_to_mgr.end()); + if (as_const) + { + _const_mgr->buildTensor(ind, tensor_info); + _ind_to_mgr.insert({ind, *_const_mgr}); + } + else + { + _nonconst_mgr->buildTensor(ind, tensor_info); + _ind_to_mgr.insert({ind, *_nonconst_mgr}); + } +} + +void TensorManager::claimPlan(const model::OperandIndex &ind, uint32_t size) +{ + assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); + _ind_to_mgr.at(ind).claimPlan(ind, size); +} + +void TensorManager::releasePlan(const model::OperandIndex &ind) +{ + assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); + _ind_to_mgr.at(ind).releasePlan(ind); +} + +std::shared_ptr TensorManager::wrapTensor(const model::OperandIndex &ind) +{ + assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); + return _ind_to_mgr.at(ind).wrapTensor(ind); +} + +std::shared_ptr TensorManager::at(const ::neurun::model::OperandIndex &ind) +{ + assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); + return _ind_to_mgr.at(ind).tensors().at(ind); +} + +model::OperandIndexMap> &TensorManager::constTensors(void) +{ + return _const_mgr->tensors(); +} + +model::OperandIndexMap> &TensorManager::nonconstTensors(void) +{ + return _nonconst_mgr->tensors(); +} + +void TensorManager::iterate(const std::function &fn) +{ + for (auto it : _nonconst_mgr->tensors()) + fn(it.first); + + for (auto it : _const_mgr->tensors()) + fn(it.first); +} + +} // namespace cpu +} // namespace backend +} // namespace neurun diff --git a/runtimes/neurun/backend/cpu/TensorManager.h b/runtimes/neurun/backend/cpu/TensorManager.h new file mode 100644 index 0000000..c1f4a00 --- /dev/null +++ b/runtimes/neurun/backend/cpu/TensorManager.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NEURUN_BACKEND_CPU_TENSOR_MANAGER_H__ +#define __NEURUN_BACKEND_CPU_TENSOR_MANAGER_H__ + +#include "backend/ITensorManager.h" +#include "MemoryManager.h" +#include "model/OperandIndexMap.h" + +namespace neurun +{ +namespace backend +{ +namespace cpu +{ + +class TensorManager : public backend::ITensorManager +{ +public: + TensorManager(); + virtual ~TensorManager() = default; + + void allocateConsts(void) override; + void allocateNonconsts(void) override; + void deallocateConsts(void) override; + void deallocateNonconsts(void) override; + + void buildTensor(const model::OperandIndex &ind, const model::OperandInfo &tensor_info, + bool as_const); + + void claimPlan(const model::OperandIndex &ind, uint32_t size); + void releasePlan(const model::OperandIndex &ind); + + std::shared_ptr wrapTensor(const model::OperandIndex &ind); + std::shared_ptr at(const ::neurun::model::OperandIndex &ind); + + model::OperandIndexMap> &constTensors(void); + model::OperandIndexMap> &nonconstTensors(void); + + void iterate(const std::function &fn); + +private: + std::unique_ptr _const_mgr; + std::unique_ptr _nonconst_mgr; + model::OperandIndexMap _ind_to_mgr; +}; + +} // namespace cpu +} // namespace backend +} // namespace neurun + +#endif // __NEURUN_BACKEND_CPU_TENSOR_MANAGER_H__