--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ACL_COMMON_TENSOR_MANAGER_H__
+#define __NEURUN_BACKEND_ACL_COMMON_TENSOR_MANAGER_H__
+
+#include <arm_compute/runtime/IMemoryManager.h>
+
+#include "backend/ITensorManager.h"
+#include "AclMemoryManager.h"
+#include "AclInternalBufferManager.h"
+#include "model/OperandIndexMap.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_common
+{
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+class AclTensorManager : public backend::ITensorManager
+{
+public:
+ using T_AclMemoryManager = AclMemoryManager<T_Tensor, T_SubTensor, T_Object>;
+
+ AclTensorManager(T_AclMemoryManager *const_mgr, T_AclMemoryManager *nonconst_mgr,
+ IInternalBufferManager *inter_mgr);
+
+ virtual ~AclTensorManager() = default;
+
+ void allocateConsts(void) override;
+ void allocateNonconsts(void) override;
+ void deallocateConsts(void) override;
+ void deallocateNonconsts(void) override;
+
+ void allocateInternalBufferManager(void);
+ void deallocateInternalBufferManager(void);
+
+ void buildTensor(const model::OperandIndex &ind, const ::arm_compute::TensorInfo &info,
+ bool as_const);
+ void buildSubtensor(const model::OperandIndex &parent, const model::OperandIndex &child,
+ const ::arm_compute::TensorShape &shape,
+ const ::arm_compute::Coordinates &coordinates);
+
+ std::shared_ptr<T_ITensor> findTensorAsParent(const model::OperandIndex &ind);
+
+ void startLifetime(const model::OperandIndex &ind);
+ void finishLifetime(const model::OperandIndex &ind);
+
+ std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind);
+ std::shared_ptr<T_ITensor> at(const ::neurun::model::OperandIndex &ind);
+
+ model::OperandIndexMap<std::shared_ptr<T_Tensor>> &constTensors(void);
+ model::OperandIndexMap<std::shared_ptr<T_Tensor>> &nonconstTensors(void);
+ model::OperandIndexMap<std::shared_ptr<T_SubTensor>> &nonconstSubtensors(void);
+
+ std::shared_ptr<::arm_compute::IMemoryManager> internal_buffer_manager(void);
+
+ void iterate(const std::function<void(const model::OperandIndex &)> &fn);
+
+ void tryDeallocConstants(void);
+
+private:
+ std::unique_ptr<T_AclMemoryManager> _const_mgr;
+ std::unique_ptr<T_AclMemoryManager> _nonconst_mgr;
+ std::unique_ptr<IInternalBufferManager> _inter_mgr;
+ model::OperandIndexMap<T_AclMemoryManager &> _ind_to_mgr;
+};
+
+} // namespace acl_common
+} // namespace backend
+} // namespace neurun
+
+#include <cassert>
+#include "util/logging.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_common
+{
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::AclTensorManager(
+ T_AclMemoryManager *const_mgr, T_AclMemoryManager *nonconst_mgr,
+ IInternalBufferManager *inter_mgr)
+ : _const_mgr{const_mgr}, _nonconst_mgr{nonconst_mgr}, _inter_mgr{inter_mgr}
+{
+ // DO NOTHING
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::allocateConsts(void)
+{
+ _const_mgr->allocate();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::allocateNonconsts(void)
+{
+ _nonconst_mgr->allocate();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::deallocateConsts(void)
+{
+ _const_mgr->deallocate();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::deallocateNonconsts(void)
+{
+ _nonconst_mgr->deallocate();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::allocateInternalBufferManager(
+ void)
+{
+ _inter_mgr->allocate();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::deallocateInternalBufferManager(
+ void)
+{
+ _inter_mgr->deallocate();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::buildTensor(
+ const model::OperandIndex &ind, const ::arm_compute::TensorInfo &info, bool as_const)
+{
+ assert(_ind_to_mgr.find(ind) == _ind_to_mgr.end());
+ if (as_const)
+ {
+ _const_mgr->buildTensor(ind, info);
+ _ind_to_mgr.insert({ind, *_const_mgr});
+ }
+ else
+ {
+ _nonconst_mgr->buildTensor(ind, info);
+ _ind_to_mgr.insert({ind, *_nonconst_mgr});
+ }
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::buildSubtensor(
+ const model::OperandIndex &parent, const model::OperandIndex &child,
+ const ::arm_compute::TensorShape &shape, const ::arm_compute::Coordinates &coordinates)
+{
+ assert(_ind_to_mgr.find(child) == _ind_to_mgr.end());
+ std::shared_ptr<T_ITensor> parent_tensor = findTensorAsParent(parent);
+ assert(parent_tensor);
+ _nonconst_mgr->buildSubtensor(parent_tensor, child, shape, coordinates);
+ _ind_to_mgr.insert({child, *_nonconst_mgr});
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+std::shared_ptr<T_ITensor>
+AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::findTensorAsParent(
+ const model::OperandIndex &ind)
+{
+
+ auto &tensors = _nonconst_mgr->tensors();
+ auto &subtensors = _nonconst_mgr->subtensors();
+ if (tensors.find(ind) != tensors.end())
+ {
+ // Parent is allocated as tensor
+ return tensors[ind];
+ }
+ else if (subtensors.find(ind) != subtensors.end())
+ {
+ // Parent is allocated as subtensor
+ return subtensors[ind];
+ }
+ else
+ {
+ return nullptr;
+ }
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::startLifetime(
+ const model::OperandIndex &ind)
+{
+ assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+ _ind_to_mgr.at(ind).startLifetime(ind);
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::finishLifetime(
+ const model::OperandIndex &ind)
+{
+ assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+ _ind_to_mgr.at(ind).finishLifetime(ind);
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+std::shared_ptr<backend::operand::IObject>
+AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::wrapTensor(
+ const model::OperandIndex &ind)
+{
+ assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+ return _ind_to_mgr.at(ind).wrapTensor(ind);
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+std::shared_ptr<T_ITensor> AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::at(
+ const ::neurun::model::OperandIndex &ind)
+{
+ assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+
+ auto &tensors = _ind_to_mgr.at(ind).tensors();
+ if (tensors.find(ind) != tensors.end())
+ {
+ return tensors.at(ind);
+ }
+ else
+ {
+ return _ind_to_mgr.at(ind).subtensors().at(ind);
+ }
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+model::OperandIndexMap<std::shared_ptr<T_Tensor>> &
+AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::constTensors(void)
+{
+ return _const_mgr->tensors();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+model::OperandIndexMap<std::shared_ptr<T_Tensor>> &
+AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::nonconstTensors(void)
+{
+ return _nonconst_mgr->tensors();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+model::OperandIndexMap<std::shared_ptr<T_SubTensor>> &
+AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::nonconstSubtensors(void)
+{
+ return _nonconst_mgr->subtensors();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+std::shared_ptr<::arm_compute::IMemoryManager>
+AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::internal_buffer_manager(void)
+{
+ return _inter_mgr->internal_buffer_manager();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::iterate(
+ const std::function<void(const model::OperandIndex &)> &fn)
+{
+ for (auto it : _nonconst_mgr->tensors())
+ fn(it.first);
+
+ for (auto it : _nonconst_mgr->subtensors())
+ fn(it.first);
+
+ for (auto it : _const_mgr->tensors())
+ fn(it.first);
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::tryDeallocConstants(void)
+{
+ auto &tensors = _const_mgr->tensors();
+ auto &objects = _const_mgr->objects();
+
+ for (auto it = tensors.begin(); it != tensors.end();)
+ {
+ const auto &ind = it->first;
+ auto tensor = it->second;
+ if (tensor->handle() && !tensor->handle()->is_used())
+ {
+ VERBOSE(AclTensorManager) << "Tensor #" << ind.value()
+ << " will be deallocated as an unused constant tensor" << std::endl;
+ tensor->allocator()->free();
+ tensor.reset();
+ it = tensors.erase(it);
+ objects.erase(ind);
+ }
+ else
+ {
+ ++it;
+ }
+ }
+}
+
+} // namespace acl_common
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ACL_COMMON_TENSOR_MANAGER_H__
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TensorManager.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+
+TensorManager::TensorManager() : _const_mgr{new MemoryManager()}, _nonconst_mgr{new MemoryManager()}
+{
+ // DO NOTHING
+}
+
+void TensorManager::allocateConsts(void) { _const_mgr->allocate(); }
+
+void TensorManager::allocateNonconsts(void) { _nonconst_mgr->allocate(); }
+
+void TensorManager::deallocateConsts(void) { _const_mgr->deallocate(); }
+
+void TensorManager::deallocateNonconsts(void) { _nonconst_mgr->deallocate(); }
+
+void TensorManager::buildTensor(const model::OperandIndex &ind,
+ const model::OperandInfo &tensor_info, bool as_const)
+{
+ assert(_ind_to_mgr.find(ind) == _ind_to_mgr.end());
+ if (as_const)
+ {
+ _const_mgr->buildTensor(ind, tensor_info);
+ _ind_to_mgr.insert({ind, *_const_mgr});
+ }
+ else
+ {
+ _nonconst_mgr->buildTensor(ind, tensor_info);
+ _ind_to_mgr.insert({ind, *_nonconst_mgr});
+ }
+}
+
+void TensorManager::claimPlan(const model::OperandIndex &ind, uint32_t size)
+{
+ assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+ _ind_to_mgr.at(ind).claimPlan(ind, size);
+}
+
+void TensorManager::releasePlan(const model::OperandIndex &ind)
+{
+ assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+ _ind_to_mgr.at(ind).releasePlan(ind);
+}
+
+std::shared_ptr<backend::operand::IObject> TensorManager::wrapTensor(const model::OperandIndex &ind)
+{
+ assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+ return _ind_to_mgr.at(ind).wrapTensor(ind);
+}
+
+std::shared_ptr<operand::Tensor> TensorManager::at(const ::neurun::model::OperandIndex &ind)
+{
+ assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+ return _ind_to_mgr.at(ind).tensors().at(ind);
+}
+
+model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &TensorManager::constTensors(void)
+{
+ return _const_mgr->tensors();
+}
+
+model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &TensorManager::nonconstTensors(void)
+{
+ return _nonconst_mgr->tensors();
+}
+
+void TensorManager::iterate(const std::function<void(const model::OperandIndex &)> &fn)
+{
+ for (auto it : _nonconst_mgr->tensors())
+ fn(it.first);
+
+ for (auto it : _const_mgr->tensors())
+ fn(it.first);
+}
+
+} // namespace cpu
+} // namespace backend
+} // namespace neurun