#include "ConstantInitializer.h"
#include "KernelGenerator.h"
#include "ShapeFixer.h"
-#include "MemoryManager.h"
+#include "TensorManager.h"
#include "backend/CustomKernelRegistry.h"
namespace neurun
newContext(const model::Operands &operands,
const std::shared_ptr<custom::KernelRegistry> &) const override
{
- auto tensor_builder = std::make_shared<TensorBuilder>(createMemoryManager());
+ auto tensor_builder = std::make_shared<TensorBuilder>(createTensorManager());
return std::unique_ptr<BackendContext>{new BackendContext{
this, tensor_builder, std::make_shared<ConstantInitializer>(operands, tensor_builder),
std::make_shared<KernelGenerator>(operands, tensor_builder),
const auto act_info = acl_common::asActivationLayerInfo(activation);
auto fn = nnfw::cpp14::make_unique<::arm_compute::CLConvolutionLayer>(
- _tensor_builder->acl_memory_manager()->internal_buffer_manager());
+ _tensor_builder->acl_tensor_manager()->internal_buffer_manager());
fn->configure(ifm_alloc->handle(), ker_alloc->handle(), bias_alloc->handle(), ofm_alloc->handle(),
conv_info, ::arm_compute::WeightsInfo(), ::arm_compute::Size2D(1U, 1U), act_info);
if (ker_height == 3 && ker_width == 3)
{
auto fn = nnfw::cpp14::make_unique<::arm_compute::CLDepthwiseConvolutionLayer3x3>(
- _tensor_builder->acl_memory_manager()->internal_buffer_manager());
+ _tensor_builder->acl_tensor_manager()->internal_buffer_manager());
fn->configure(ifm_alloc->handle(), ker_alloc->handle(), bias_alloc->handle(),
ofm_alloc->handle(), conv_info, multiplier, act_info);
auto acl_layout = output_alloc->handle()->info()->data_layout();
auto fn = nnfw::cpp14::make_unique<arm_compute::CLFullyConnectedReshapingLayer>(
- _tensor_builder->acl_memory_manager()->internal_buffer_manager());
+ _tensor_builder->acl_tensor_manager()->internal_buffer_manager());
fn->configure(
input_alloc->handle(), weight_alloc->handle(), bias_alloc->handle(), output_alloc->handle(),
auto input_alloc = _tensor_builder->at(input_index).get();
auto fn = nnfw::cpp14::make_unique<::arm_compute::CLSoftmaxLayer>(
- _tensor_builder->acl_memory_manager()->internal_buffer_manager());
+ _tensor_builder->acl_tensor_manager()->internal_buffer_manager());
fn->configure(input_alloc->handle(), output_alloc->handle(), beta);
std::unique_ptr<::arm_compute::IFunction> fn;
auto rnn_layer = nnfw::cpp14::make_unique<::arm_compute::CLRNNLayerEx>(
- _tensor_builder->acl_memory_manager()->internal_buffer_manager());
+ _tensor_builder->acl_tensor_manager()->internal_buffer_manager());
rnn_layer->configure(input_alloc->handle(), weights_alloc->handle(),
recurrent_weights_alloc->handle(), bias_alloc->handle(),
hidden_state_out_alloc->handle(), output_alloc->handle(), act_info);
std::unique_ptr<::arm_compute::IFunction> fn;
auto l = nnfw::cpp14::make_unique<::arm_compute::CLTransposeConvLayer>(
- _tensor_builder->acl_memory_manager()->internal_buffer_manager());
+ _tensor_builder->acl_tensor_manager()->internal_buffer_manager());
l->configure(ifm_alloc->handle(), ker_alloc->handle(), nullptr, ofm_alloc->handle(), tconv_info,
invalid_horizontal, invalid_vertical);
* limitations under the License.
*/
-// TODO Rename this to TensorManager.h
-#ifndef __NEURUN_BACKEND_ACL_CL_MEMORY_MANAGER_H__
-#define __NEURUN_BACKEND_ACL_CL_MEMORY_MANAGER_H__
+#ifndef __NEURUN_BACKEND_ACL_CL_TENSOR_MANAGER_H__
+#define __NEURUN_BACKEND_ACL_CL_TENSOR_MANAGER_H__
#include <arm_compute/runtime/CL/CLBufferAllocator.h>
#include <arm_compute/runtime/PoolManager.h>
::arm_compute::BlobLifetimeManager, ::arm_compute::CLBufferAllocator,
::arm_compute::CLMemoryGroup>;
-// TODO Remove this
-MemoryManager *createMemoryManager()
-{
- const std::string executor_str = util::getConfigString(util::config::EXECUTOR);
-
- if (executor_str == "Linear")
- {
- VERBOSE(acl_cl_createMemoryManager) << "AclMemoryManager as Linear" << std::endl;
- return new LinearMemoryManager();
- }
- else
- {
- VERBOSE(acl_cl_createMemoryManager) << "AclMemoryManager" << std::endl;
- return new MemoryManager();
- }
-}
-
-// TODO Enable this
-/*
using InternalBufferManager = ::neurun::backend::acl_common::AclInternalBufferManager<
::arm_compute::MemoryManagerOnDemand, ::arm_compute::PoolManager,
::arm_compute::BlobLifetimeManager, ::arm_compute::CLBufferAllocator>;
return new TensorManager(new MemoryManager(), new MemoryManager(), new InternalBufferManager());
}
}
-*/
} // namespace acl_cl
} // namespace backend
} // namespace neurun
-#endif // __NEURUN_BACKEND_ACL_CL_MEMORY_MANAGER_H__
+#endif // __NEURUN_BACKEND_ACL_CL_TENSOR_MANAGER_H__
AclLinearMemoryManager()
: _allocator{nullptr},
_io_manager{createMemoryManager<T_MemoryManager, T_PoolManager, T_LifetimeManager>()},
- _io_group{std::make_shared<T_MemoryGroup>(_io_manager)},
- _internal_manager{createMemoryManager<T_MemoryManager, T_PoolManager, T_LifetimeManager>()}
+ _io_group{std::make_shared<T_MemoryGroup>(_io_manager)}
{
// DO NOTHING
}
virtual void allocate(void) override
{
_allocator = std::make_shared<T_Allocator>();
- _internal_manager->populate(*_allocator, 1);
_io_manager->populate(*_allocator, 1);
_io_group->acquire();
}
{
_io_group->release();
_io_manager->clear();
- _internal_manager->clear();
}
virtual void startLifetime(const model::OperandIndex &ind) override
tensor->allocator()->allocate();
}
- virtual std::shared_ptr<::arm_compute::IMemoryManager> internal_buffer_manager() override
- {
- return _internal_manager;
- }
-
private:
std::shared_ptr<T_Allocator> _allocator;
std::shared_ptr<T_MemoryManager> _io_manager;
std::shared_ptr<T_MemoryGroup> _io_group;
- std::shared_ptr<T_MemoryManager> _internal_manager;
};
} // namespace acl_common
}
}
- virtual void startLifetime(const model::OperandIndex &) {}
- virtual void finishLifetime(const model::OperandIndex &) {}
-
- // TODO Remove this
- virtual std::shared_ptr<::arm_compute::IMemoryManager> internal_buffer_manager()
- {
- return nullptr;
- }
+ virtual void startLifetime(const model::OperandIndex &) { /* DO NOTHING */}
+ virtual void finishLifetime(const model::OperandIndex &) { /* DO NOTHING */}
void buildTensor(const model::OperandIndex &ind, const ::arm_compute::TensorInfo &info,
size_t rank)
void deallocateInternalBufferManager(void);
void buildTensor(const model::OperandIndex &ind, const ::arm_compute::TensorInfo &info,
- bool as_const);
+ size_t rank, bool as_const);
void buildSubtensor(const model::OperandIndex &parent, const model::OperandIndex &child,
const ::arm_compute::TensorShape &shape,
- const ::arm_compute::Coordinates &coordinates);
+ const ::arm_compute::Coordinates &coordinates, size_t rank,
+ bool extent_parent);
std::shared_ptr<T_ITensor> findTensorAsParent(const model::OperandIndex &ind);
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::buildTensor(
- const model::OperandIndex &ind, const ::arm_compute::TensorInfo &info, bool as_const)
+ const model::OperandIndex &ind, const ::arm_compute::TensorInfo &info, size_t rank,
+ bool as_const)
{
assert(_ind_to_mgr.find(ind) == _ind_to_mgr.end());
if (as_const)
{
- _const_mgr->buildTensor(ind, info);
+ _const_mgr->buildTensor(ind, info, rank);
_ind_to_mgr.insert({ind, *_const_mgr});
}
else
{
- _nonconst_mgr->buildTensor(ind, info);
+ _nonconst_mgr->buildTensor(ind, info, rank);
_ind_to_mgr.insert({ind, *_nonconst_mgr});
}
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::buildSubtensor(
const model::OperandIndex &parent, const model::OperandIndex &child,
- const ::arm_compute::TensorShape &shape, const ::arm_compute::Coordinates &coordinates)
+ const ::arm_compute::TensorShape &shape, const ::arm_compute::Coordinates &coordinates,
+ size_t rank, bool extent_parent)
{
assert(_ind_to_mgr.find(child) == _ind_to_mgr.end());
std::shared_ptr<T_ITensor> parent_tensor = findTensorAsParent(parent);
assert(parent_tensor);
- _nonconst_mgr->buildSubtensor(parent_tensor, child, shape, coordinates);
+ _nonconst_mgr->buildSubtensor(parent_tensor, child, shape, coordinates, rank, extent_parent);
_ind_to_mgr.insert({child, *_nonconst_mgr});
}
#include <arm_compute/core/Types.h>
#include <backend/ITensorBuilder.h>
#include "model/OperandIndexMap.h"
-#include "AclMemoryManager.h" // TODO Remove this
#include "AclTensorManager.h"
#include "cpp14/memory.h"
#include <util/Utils.h>
class TemplTensorBuilder : public ITensorBuilder
{
public:
- using T_AclMemoryManager = AclMemoryManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>;
+ using T_AclTensorManager = AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>;
- TemplTensorBuilder(T_AclMemoryManager *mem_mgr);
+ TemplTensorBuilder(T_AclTensorManager *tensor_mgr);
/**
* @brief Register tensor information to allocate on ACL-CL backend
void notifyLastUse(const model::OperandIndex &) override;
void prepare(void) override;
- void allocate(void) override;
-
- // TODO Fill these
- void allocateConsts() override {}
- void allocateNonconsts() override {}
- void postFunctionPrepare() override {}
- void finalize() override {}
+ void allocate(void) override; // TODO Remove this
+ void allocateConsts() override;
+ void allocateNonconsts() override;
+ void postFunctionPrepare() override;
+ void finalize() override;
std::shared_ptr<::neurun::backend::operand::ITensor>
tensorAt(const model::OperandIndex &ind) override;
void preVisit(const model::Operation &node) override;
void postVisit(const model::Operation &node) override;
- std::unique_ptr<IMemoryManager> releaseMemoryManager(void) override;
+ std::unique_ptr<ITensorManager> releaseTensorManager(void) override;
std::shared_ptr<T_ITensor> at(const ::neurun::model::OperandIndex &ind);
/**
void dimCorrection(const model::OperandIndex &index, bool apply_dim_correction);
- T_AclMemoryManager *acl_memory_manager(void) { return _mem_mgr.get(); }
+ T_AclTensorManager *acl_tensor_manager(void) { return _tensor_mgr.get(); }
private:
void buildTensors(void);
model::OperandIndexMap<bool> _apply_dim_correction_map;
model::OperandIndexMap<std::pair<model::Layout, model::Layout>> _tensor_layouts_map;
- // TODO Replace this by TensorManager
- std::unique_ptr<T_AclMemoryManager> _mem_mgr;
+ std::unique_ptr<T_AclTensorManager> _tensor_mgr;
+ model::OperandIndexSequence _constants;
// TODO Consider dividing TensorBuilder into Linear and others
const std::string _executor_str;
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::TemplTensorBuilder(
- T_AclMemoryManager *mem_mgr)
- : _mem_mgr{mem_mgr}, _executor_str(util::getConfigString(util::config::EXECUTOR)),
+ T_AclTensorManager *tensor_mgr)
+ : _tensor_mgr{tensor_mgr}, _executor_str(util::getConfigString(util::config::EXECUTOR)),
_first_uses_num(0)
{
- assert(_mem_mgr);
+ assert(_tensor_mgr);
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::registerTensorInfo(
const model::OperandIndex &ind, const model::OperandInfo &info, model::Layout frontend_layout,
- model::Layout backend_layout, bool /*as_const*/)
+ model::Layout backend_layout, bool as_const)
{
- // TODO Adding handling tensor as const
- assert(_mem_mgr->tensors().size() == 0);
+ assert(_tensor_mgr->constTensors().size() == 0);
+ assert(_tensor_mgr->nonconstTensors().size() == 0);
_tensor_info_map.emplace(ind, info);
_apply_dim_correction_map.emplace(ind, true);
_tensor_layouts_map.insert({ind, std::make_pair(frontend_layout, backend_layout)});
+ if (as_const)
+ _constants.append(ind);
assert(_first_uses_visit.find(ind) == _first_uses_visit.end());
_first_uses_visit[ind] = false;
void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::registerSubTensorInfo(
const model::OperandIndex &ind, const compiler::SubTensorInfo &info)
{
- assert(_mem_mgr->tensors().size() == 0);
+ assert(_tensor_mgr->constTensors().size() == 0);
+ assert(_tensor_mgr->nonconstTensors().size() == 0);
_subtensor_info_map.emplace(ind, info);
_apply_dim_correction_map.emplace(ind, true);
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::allocate(void)
{
- validate();
+ allocateConsts();
+ allocateNonconsts();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::allocateConsts(void)
+{
+ assert(_constants.size() == _tensor_mgr->constTensors().size());
+ _tensor_mgr->allocateConsts();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::allocateNonconsts(void)
+{
+ assert(_tensor_info_map.size() == _tensor_mgr->nonconstTensors().size() + _constants.size());
+ _tensor_mgr->allocateNonconsts();
+}
- assert(_tensor_info_map.size() == _mem_mgr->tensors().size());
- _mem_mgr->allocate();
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::postFunctionPrepare(void)
+{
+ _tensor_mgr->tryDeallocConstants();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::finalize(void)
+{
+ validate();
+ _tensor_mgr->allocateInternalBufferManager();
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::tensorAt(
const model::OperandIndex &ind)
{
- auto &tensors = _mem_mgr->tensors();
- if (tensors.find(ind) != tensors.end())
- {
- return tensors.at(ind);
- }
- else
- {
- return _mem_mgr->subtensors().at(ind);
- }
+ return _tensor_mgr->at(ind);
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::wrapTensor(
const model::OperandIndex &ind)
{
- return _mem_mgr->wrapTensor(ind);
+ return _tensor_mgr->wrapTensor(ind);
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::iterate(
const IterateFunction &fn)
{
- for (auto it : _mem_mgr->tensors())
- {
- fn(it.first);
- }
- for (auto it : _mem_mgr->subtensors())
- {
- fn(it.first);
- }
+ _tensor_mgr->iterate(fn);
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
std::shared_ptr<T_ITensor> TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::at(
const ::neurun::model::OperandIndex &ind)
{
- auto &tensors = _mem_mgr->tensors();
- if (tensors.find(ind) != tensors.end())
- {
- return tensors.at(ind);
- }
- else
- {
- return _mem_mgr->subtensors().at(ind);
- }
+ return _tensor_mgr->at(ind);
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
return false;
}
- auto &subtensors = _mem_mgr->subtensors();
+ auto &subtensors = _tensor_mgr->nonconstSubtensors();
if (subtensors.find(child) == subtensors.end())
{
return false;
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
-std::unique_ptr<IMemoryManager>
-TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::releaseMemoryManager(void)
+std::unique_ptr<ITensorManager>
+TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::releaseTensorManager(void)
{
- return std::move(_mem_mgr);
+ return std::move(_tensor_mgr);
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::buildTensors(void)
{
- assert(_mem_mgr->tensors().size() == 0);
+ assert(_tensor_mgr->constTensors().size() == 0);
+ assert(_tensor_mgr->nonconstTensors().size() == 0);
for (auto &entry : _tensor_info_map)
{
const auto &backend_layout = _tensor_layouts_map[root_parent].second;
auto tensor_info = asTensorInfo(info.shape(), info.typeInfo(), frontend_layout, backend_layout,
_apply_dim_correction_map[ind]);
- _mem_mgr->buildTensor(ind, tensor_info, info.shape().rank());
+ _tensor_mgr->buildTensor(ind, tensor_info, info.shape().rank(), _constants.contains(ind));
}
}
// TODO Handle SubTensor(subsumption)
// Currently this TemplTensorBuilder does not have subsumption info yet
// Allocated subtensor will be mapped to _subtensors instead of _tensors
- assert(_mem_mgr->subtensors().size() == 0);
+ assert(_tensor_mgr->nonconstSubtensors().size() == 0);
// To make subtensor, parent tensor must be made first
// For this condition, use stack
// 3-2) If parent tensor is not made, we can't make child tensor yet
// Push parent tensor index to stack and return to 4)
// 4) If stack is empty, return to 1), else return to 2)
- auto &tensors = _mem_mgr->tensors();
- auto &subtensors = _mem_mgr->subtensors();
+ auto &subtensors = _tensor_mgr->nonconstSubtensors();
for (auto &entry : _subtensor_info_map)
{
model::OperandIndex ind = entry.first;
}
auto parent = info.parent();
- std::shared_ptr<T_ITensor> parent_tensor;
-
- if (tensors.find(parent) != tensors.end())
- {
- // Parent is allocated as tensor
- parent_tensor = tensors[parent];
- }
- else if (subtensors.find(parent) != subtensors.end())
- {
- // Parent is allocated as subtensor
- parent_tensor = subtensors[parent];
- }
- else
+ std::shared_ptr<T_ITensor> parent_tensor = _tensor_mgr->findTensorAsParent(parent);
+ if (!parent_tensor)
{
// Cannot find allocated parent tensor: allocate parent first
assert(_subtensor_info_map.find(parent) != _subtensor_info_map.end());
assert(info.type().offset() == parent_tensor->info()->quantization_info().offset);
assert(info.type().scale() == parent_tensor->info()->quantization_info().scale);
assert(asDataType(info.type().type()) == parent_tensor->info()->data_type());
+
// NOTE SubTensor's layout must be the same with layout of parent tensor
const auto &root_parent = findRootParent(parent);
const auto &frontend_layout = _tensor_layouts_map[root_parent].first;
_apply_dim_correction_map[current]);
::arm_compute::Coordinates coordinates =
asTensorCoordinate(info.offset(), frontend_layout, backend_layout);
- _mem_mgr->buildSubtensor(parent_tensor, current, shape, coordinates, info.shape().rank(),
- true);
+ _tensor_mgr->buildSubtensor(parent, current, shape, coordinates, info.shape().rank(), true);
stack.pop();
}
}
bool is_parent = _parent_def.find(ind) != _parent_def.end();
if (!is_subtensor && !is_parent)
{
- _mem_mgr->startLifetime(ind);
+ _tensor_mgr->startLifetime(ind);
return;
}
}
else
{
- _mem_mgr->startLifetime(ind);
+ _tensor_mgr->startLifetime(ind);
}
}
else if (is_subtensor)
bool is_parent = _parent_uses.find(ind) != _parent_uses.end();
if (!is_subtensor && !is_parent)
{
- _mem_mgr->finishLifetime(ind);
+ _tensor_mgr->finishLifetime(ind);
return;
}
}
else
{
- _mem_mgr->finishLifetime(ind);
+ _tensor_mgr->finishLifetime(ind);
}
}
else if (is_subtensor)
#include "ConstantInitializer.h"
#include "KernelGenerator.h"
#include "ShapeFixer.h"
-#include "MemoryManager.h"
+#include "TensorManager.h"
#include "backend/CustomKernelRegistry.h"
namespace neurun
newContext(const model::Operands &operands,
const std::shared_ptr<custom::KernelRegistry> &) const override
{
- auto tensor_builder = std::make_shared<TensorBuilder>(createMemoryManager());
+ auto tensor_builder = std::make_shared<TensorBuilder>(createTensorManager());
return std::unique_ptr<BackendContext>{new BackendContext{
this, tensor_builder, std::make_shared<ConstantInitializer>(operands, tensor_builder),
std::make_shared<KernelGenerator>(operands, tensor_builder),
const auto act_info = acl_common::asActivationLayerInfo(activation);
auto fn = nnfw::cpp14::make_unique<::arm_compute::NEConvolutionLayer>(
- _tensor_builder->acl_memory_manager()->internal_buffer_manager());
+ _tensor_builder->acl_tensor_manager()->internal_buffer_manager());
fn->configure(ifm_alloc->handle(), ker_alloc->handle(), bias_alloc->handle(), ofm_alloc->handle(),
conv_info, ::arm_compute::WeightsInfo(), ::arm_compute::Size2D(1U, 1U), act_info);
auto acl_layout = output_alloc->handle()->info()->data_layout();
auto fn = nnfw::cpp14::make_unique<arm_compute::NEFullyConnectedReshapingLayer>(
- _tensor_builder->acl_memory_manager()->internal_buffer_manager());
+ _tensor_builder->acl_tensor_manager()->internal_buffer_manager());
fn->configure(
input_alloc->handle(), weight_alloc->handle(), bias_alloc->handle(), output_alloc->handle(),
std::unique_ptr<::arm_compute::IFunction> fn;
auto rnn_layer = nnfw::cpp14::make_unique<::arm_compute::NERNNLayerEx>(
- _tensor_builder->acl_memory_manager()->internal_buffer_manager());
+ _tensor_builder->acl_tensor_manager()->internal_buffer_manager());
rnn_layer->configure(input_alloc->handle(), weights_alloc->handle(),
recurrent_weights_alloc->handle(), bias_alloc->handle(),
hidden_state_out_alloc->handle(), output_alloc->handle(), act_info);
auto input_alloc = _tensor_builder->at(input_index).get();
auto fn = nnfw::cpp14::make_unique<::arm_compute::NESoftmaxLayer>(
- _tensor_builder->acl_memory_manager()->internal_buffer_manager());
+ _tensor_builder->acl_tensor_manager()->internal_buffer_manager());
fn->configure(input_alloc->handle(), output_alloc->handle(), beta);
* limitations under the License.
*/
-// TODO Rename this to TensorManager.h
-#ifndef __NEURUN_BACKEND_ACL_NEON_MEMORY_MANAGER_H__
-#define __NEURUN_BACKEND_ACL_NEON_MEMORY_MANAGER_H__
+#ifndef __NEURUN_BACKEND_ACL_NEON_TENSOR_MANAGER_H__
+#define __NEURUN_BACKEND_ACL_NEON_TENSOR_MANAGER_H__
#include <arm_compute/runtime/Allocator.h>
#include <arm_compute/runtime/PoolManager.h>
::arm_compute::MemoryManagerOnDemand, ::arm_compute::PoolManager,
::arm_compute::OffsetLifetimeManager, ::arm_compute::Allocator, ::arm_compute::MemoryGroup>;
-// TODO Remove this
-MemoryManager *createMemoryManager()
-{
- const std::string executor_str = util::getConfigString(util::config::EXECUTOR);
- if (executor_str == "Linear")
- {
- VERBOSE(acl_neon_createMemoryManager) << "AclMemoryManager as Linear" << std::endl;
- return new LinearMemoryManager();
- }
- else
- {
- VERBOSE(acl_neon_createMemoryManager) << "AclMemoryManager" << std::endl;
- return new MemoryManager();
- }
-}
-
-// TODO Enable this instead of createMemoryManager()
-/*
using InternalBufferManager = ::neurun::backend::acl_common::AclInternalBufferManager<
::arm_compute::MemoryManagerOnDemand, ::arm_compute::PoolManager,
::arm_compute::OffsetLifetimeManager, ::arm_compute::Allocator>;
return new TensorManager(new MemoryManager(), new MemoryManager(), new InternalBufferManager());
}
}
-*/
} // namespace acl_neon
} // namespace backend
} // namespace neurun
-#endif // __NEURUN_BACKEND_ACL_NEON_MEMORY_MANAGER_H__
+#endif // __NEURUN_BACKEND_ACL_NEON_TENSOR_MANAGER_H__
namespace cpu
{
-TensorBuilder::TensorBuilder() : _mem_mgr{new MemoryManager()}
+TensorBuilder::TensorBuilder() : _tensor_mgr{new TensorManager()}
{
// DO NOTHING
}
void TensorBuilder::registerTensorInfo(const model::OperandIndex &ind,
const model::OperandInfo &info,
model::Layout frontend_layout, model::Layout backend_layout,
- bool /*as_const*/)
+ bool as_const)
{
- // TODO Adding handling tensor as const
_tensor_info_map.emplace(ind, info);
_tensor_layouts_map.insert({ind, std::make_pair(frontend_layout, backend_layout)});
+
+ if (as_const)
+ _constants.append(ind);
}
void TensorBuilder::registerSubTensorInfo(const model::OperandIndex &,
{
assert(_tensor_info_map.find(ind) != _tensor_info_map.end());
const auto tensor_info = asTensorInfo(_tensor_info_map.at(ind), _tensor_layouts_map[ind].first);
- _mem_mgr->buildTensor(ind, tensor_info);
-
const auto size = tensor_info.total_size();
- _mem_mgr->claimPlan(ind, size);
+ _tensor_mgr->buildTensor(ind, tensor_info, _constants.contains(ind));
+ _tensor_mgr->claimPlan(ind, size);
}
-void TensorBuilder::notifyLastUse(const model::OperandIndex &ind) { _mem_mgr->releasePlan(ind); }
+void TensorBuilder::notifyLastUse(const model::OperandIndex &ind) { _tensor_mgr->releasePlan(ind); }
-void TensorBuilder::prepare(void) { _mem_mgr->allocate(); }
+void TensorBuilder::prepare(void)
+{
+ _tensor_mgr->allocateConsts();
+ _tensor_mgr->allocateNonconsts();
+}
+// TODO Remove this
void TensorBuilder::allocate(void)
{
// NOTE For now nothing to do. Allocation is done in prepare stage, which is not appropriate
// This is because CPU kernels require `ITensor`s to be allocated before Kernel Generation.
}
+void TensorBuilder::allocateConsts()
+{
+ // NOTE For now nothing to do. Allocation is done in prepare stage, which is not appropriate
+ // This is because CPU kernels require `ITensor`s to be allocated before Kernel Generation.
+}
+
+void TensorBuilder::allocateNonconsts()
+{
+ // NOTE For now nothing to do. Allocation is done in prepare stage, which is not appropriate
+ // This is because CPU kernels require `ITensor`s to be allocated before Kernel Generation.
+}
+
std::shared_ptr<::neurun::backend::operand::ITensor>
TensorBuilder::tensorAt(const model::OperandIndex &ind)
{
- return _mem_mgr->tensors().at(ind);
+ return _tensor_mgr->at(ind);
}
std::shared_ptr<backend::operand::IObject> TensorBuilder::wrapTensor(const model::OperandIndex &ind)
{
- return _mem_mgr->wrapTensor(ind);
+ return _tensor_mgr->wrapTensor(ind);
}
-void TensorBuilder::iterate(const IterateFunction &fn)
-{
- for (auto it : _mem_mgr->tensors())
- {
- fn(it.first);
- }
-}
+void TensorBuilder::iterate(const IterateFunction &fn) { _tensor_mgr->iterate(fn); }
std::shared_ptr<operand::Tensor> TensorBuilder::at(const ::neurun::model::OperandIndex &ind)
{
- return _mem_mgr->tensors().at(ind);
+ return _tensor_mgr->at(ind);
}
-std::unique_ptr<IMemoryManager> TensorBuilder::releaseMemoryManager(void)
+std::unique_ptr<ITensorManager> TensorBuilder::releaseTensorManager(void)
{
- return std::move(_mem_mgr);
+ return std::move(_tensor_mgr);
}
} // namespace cpu
#include <backend/operand/Object.h>
#include "operand/Tensor.h"
#include "model/OperandIndexMap.h"
-#include "MemoryManager.h" // TODO Remove this
#include "TensorManager.h"
namespace neurun
const compiler::SubTensorInfo &info) override;
void notifyFirstUse(const model::OperandIndex &) override;
-
void notifyLastUse(const model::OperandIndex &) override;
void prepare(void) override;
-
- void allocate(void) override;
-
- // TODO Fill these
- void allocateConsts() override {}
- void allocateNonconsts() override {}
- void postFunctionPrepare() override {}
- void finalize() override {}
+ void allocate(void) override; // TODO Remove this
+ void allocateConsts() override;
+ void allocateNonconsts() override;
+ void postFunctionPrepare() override { /* DO NOTHING */}
+ void finalize() override { /* DO NOTHING */}
std::shared_ptr<::neurun::backend::operand::ITensor>
tensorAt(const model::OperandIndex &ind) override;
void iterate(const IterateFunction &fn) override;
- void preVisit(const model::Operation &) override {}
- void postVisit(const model::Operation &) override {}
+ void preVisit(const model::Operation &) override { /* DO NOTHING */}
+ void postVisit(const model::Operation &) override { /* DO NOTHING */}
- std::unique_ptr<IMemoryManager> releaseMemoryManager(void) override;
+ std::unique_ptr<ITensorManager> releaseTensorManager(void) override;
std::shared_ptr<operand::Tensor> at(const ::neurun::model::OperandIndex &ind);
private:
- // TODO Replace this by TensorManager
- std::unique_ptr<MemoryManager> _mem_mgr;
+ std::unique_ptr<TensorManager> _tensor_mgr;
model::OperandIndexMap<model::OperandInfo> _tensor_info_map;
model::OperandIndexMap<std::pair<model::Layout, model::Layout>> _tensor_layouts_map;
+ model::OperandIndexSequence _constants;
};
} // namespace cpu
namespace srcn
{
-TensorBuilder::TensorBuilder() : _mem_mgr{new MemoryManager()}
+TensorBuilder::TensorBuilder() : _tensor_mgr{new TensorManager()}
{
// DO NOTHING
}
void TensorBuilder::registerTensorInfo(const model::OperandIndex &ind,
- const model::OperandInfo &info, model::Layout, model::Layout,
- bool /*as_const*/)
+ const model::OperandInfo &info,
+ model::Layout /*frontend_layout*/,
+ model::Layout /*backend_layout*/, bool as_const)
{
- // TODO Adding handling tensor as const
_tensor_info_map.emplace(ind, info);
+
// TODO set the layout
+
+ if (as_const)
+ _constants.append(ind);
}
void TensorBuilder::registerSubTensorInfo(const model::OperandIndex &,
{
assert(_tensor_info_map.find(ind) != _tensor_info_map.end());
const auto &info = _tensor_info_map.at(ind);
- _mem_mgr->buildTensor(ind, info);
-
const auto size = info.total_size();
- _mem_mgr->claimPlan(ind, size);
+ _tensor_mgr->buildTensor(ind, info, _constants.contains(ind));
+ _tensor_mgr->claimPlan(ind, size);
}
-void TensorBuilder::notifyLastUse(const model::OperandIndex &ind) { _mem_mgr->releasePlan(ind); }
+void TensorBuilder::notifyLastUse(const model::OperandIndex &ind) { _tensor_mgr->releasePlan(ind); }
-void TensorBuilder::prepare(void) { _mem_mgr->allocate(); }
+void TensorBuilder::prepare(void)
+{
+ _tensor_mgr->allocateConsts();
+ _tensor_mgr->allocateNonconsts();
+}
+// TODO Remove this
void TensorBuilder::allocate(void)
{
// NOTE For now nothing to do. Allocation is done in prepare stage, which is not appropriate
- // This is because CPU kernels require `ITensor`s to be allocated before Kernel Generation.
+ // This is because SRCN kernels require `ITensor`s to be allocated before Kernel Generation.
+}
+
+void TensorBuilder::allocateConsts()
+{
+ // NOTE For now nothing to do. Allocation is done in prepare stage, which is not appropriate
+ // This is because SRCN kernels require `ITensor`s to be allocated before Kernel Generation.
+}
+
+void TensorBuilder::allocateNonconsts()
+{
+ // NOTE For now nothing to do. Allocation is done in prepare stage, which is not appropriate
+ // This is because SRCN kernels require `ITensor`s to be allocated before Kernel Generation.
}
std::shared_ptr<::neurun::backend::operand::ITensor>
TensorBuilder::tensorAt(const model::OperandIndex &ind)
{
- return _mem_mgr->tensors().at(ind);
+ return _tensor_mgr->at(ind);
}
std::shared_ptr<backend::operand::IObject> TensorBuilder::wrapTensor(const model::OperandIndex &ind)
{
- return _mem_mgr->wrapTensor(ind);
+ return _tensor_mgr->wrapTensor(ind);
}
-void TensorBuilder::iterate(const IterateFunction &fn)
-{
- for (auto it : _mem_mgr->tensors())
- {
- fn(it.first);
- }
-}
+void TensorBuilder::iterate(const IterateFunction &fn) { _tensor_mgr->iterate(fn); }
std::shared_ptr<operand::Tensor> TensorBuilder::at(const ::neurun::model::OperandIndex &ind)
{
- return _mem_mgr->tensors().at(ind);
+ return _tensor_mgr->at(ind);
}
-std::unique_ptr<IMemoryManager> TensorBuilder::releaseMemoryManager(void)
+std::unique_ptr<ITensorManager> TensorBuilder::releaseTensorManager(void)
{
- return std::move(_mem_mgr);
+ return std::move(_tensor_mgr);
}
} // namespace srcn
/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include <backend/operand/Object.h>
#include "operand/Tensor.h"
#include "model/OperandIndexMap.h"
-#include "MemoryManager.h"
+#include "TensorManager.h"
namespace neurun
{
const compiler::SubTensorInfo &info) override;
void notifyFirstUse(const model::OperandIndex &) override;
-
void notifyLastUse(const model::OperandIndex &) override;
void prepare(void) override;
-
- void allocate(void) override;
-
- // TODO Fill these
- void allocateConsts() override {}
- void allocateNonconsts() override {}
- void postFunctionPrepare() override {}
- void finalize() override {}
+ void allocate(void) override; // TODO Remove this
+ void allocateConsts() override;
+ void allocateNonconsts() override;
+ void postFunctionPrepare() override { /* DO NOTHING */}
+ void finalize() override { /* DO NOTHING */}
std::shared_ptr<::neurun::backend::operand::ITensor>
tensorAt(const model::OperandIndex &ind) override;
void iterate(const IterateFunction &fn) override;
- void preVisit(const model::Operation &) override {}
- void postVisit(const model::Operation &) override {}
+ void preVisit(const model::Operation &) override { /* DO NOTHING */}
+ void postVisit(const model::Operation &) override { /* DO NOTHING */}
- std::unique_ptr<IMemoryManager> releaseMemoryManager(void) override;
+ std::unique_ptr<ITensorManager> releaseTensorManager(void) override;
std::shared_ptr<operand::Tensor> at(const ::neurun::model::OperandIndex &ind);
private:
- std::unique_ptr<MemoryManager> _mem_mgr;
+ std::unique_ptr<TensorManager> _tensor_mgr;
model::OperandIndexMap<model::OperandInfo> _tensor_info_map;
+ model::OperandIndexMap<std::pair<model::Layout, model::Layout>> _tensor_layouts_map;
+ model::OperandIndexSequence _constants;
};
} // namespace srcn
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TensorManager.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+
+TensorManager::TensorManager() : _const_mgr{new MemoryManager()}, _nonconst_mgr{new MemoryManager()}
+{
+ // DO NOTHING
+}
+
+void TensorManager::allocateConsts(void) { _const_mgr->allocate(); }
+
+void TensorManager::allocateNonconsts(void) { _nonconst_mgr->allocate(); }
+
+void TensorManager::deallocateConsts(void) { _const_mgr->deallocate(); }
+
+void TensorManager::deallocateNonconsts(void) { _nonconst_mgr->deallocate(); }
+
+void TensorManager::buildTensor(const model::OperandIndex &ind,
+ const model::OperandInfo &tensor_info, bool as_const)
+{
+ assert(_ind_to_mgr.find(ind) == _ind_to_mgr.end());
+ if (as_const)
+ {
+ _const_mgr->buildTensor(ind, tensor_info);
+ _ind_to_mgr.insert({ind, *_const_mgr});
+ }
+ else
+ {
+ _nonconst_mgr->buildTensor(ind, tensor_info);
+ _ind_to_mgr.insert({ind, *_nonconst_mgr});
+ }
+}
+
+void TensorManager::claimPlan(const model::OperandIndex &ind, uint32_t size)
+{
+ assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+ _ind_to_mgr.at(ind).claimPlan(ind, size);
+}
+
+void TensorManager::releasePlan(const model::OperandIndex &ind)
+{
+ assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+ _ind_to_mgr.at(ind).releasePlan(ind);
+}
+
+std::shared_ptr<backend::operand::IObject> TensorManager::wrapTensor(const model::OperandIndex &ind)
+{
+ assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+ return _ind_to_mgr.at(ind).wrapTensor(ind);
+}
+
+std::shared_ptr<operand::Tensor> TensorManager::at(const ::neurun::model::OperandIndex &ind)
+{
+ assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+ return _ind_to_mgr.at(ind).tensors().at(ind);
+}
+
+model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &TensorManager::constTensors(void)
+{
+ return _const_mgr->tensors();
+}
+
+model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &TensorManager::nonconstTensors(void)
+{
+ return _nonconst_mgr->tensors();
+}
+
+void TensorManager::iterate(const std::function<void(const model::OperandIndex &)> &fn)
+{
+ for (auto it : _nonconst_mgr->tensors())
+ fn(it.first);
+
+ for (auto it : _const_mgr->tensors())
+ fn(it.first);
+}
+
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_SRCN_TENSOR_MANAGER_H__
+#define __NEURUN_BACKEND_SRCN_TENSOR_MANAGER_H__
+
+#include "backend/ITensorManager.h"
+#include "MemoryManager.h"
+#include "model/OperandIndexMap.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+
+class TensorManager : public backend::ITensorManager
+{
+public:
+ TensorManager();
+ virtual ~TensorManager() = default;
+
+ void allocateConsts(void) override;
+ void allocateNonconsts(void) override;
+ void deallocateConsts(void) override;
+ void deallocateNonconsts(void) override;
+
+ void buildTensor(const model::OperandIndex &ind, const model::OperandInfo &tensor_info,
+ bool as_const);
+
+ void claimPlan(const model::OperandIndex &ind, uint32_t size);
+ void releasePlan(const model::OperandIndex &ind);
+
+ std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind);
+ std::shared_ptr<operand::Tensor> at(const ::neurun::model::OperandIndex &ind);
+
+ model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &constTensors(void);
+ model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &nonconstTensors(void);
+
+ void iterate(const std::function<void(const model::OperandIndex &)> &fn);
+
+private:
+ std::unique_ptr<MemoryManager> _const_mgr;
+ std::unique_ptr<MemoryManager> _nonconst_mgr;
+ model::OperandIndexMap<MemoryManager &> _ind_to_mgr;
+};
+
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_SRCN_TENSOR_MANAGER_H__
#include "operand/IObject.h"
#include "operand/ITensor.h"
#include "compiler/SubTensorInfo.h"
-#include "IMemoryManager.h"
+#include "ITensorManager.h"
namespace neurun
{
virtual void preVisit(const model::Operation &) = 0;
virtual void postVisit(const model::Operation &) = 0;
- virtual std::unique_ptr<IMemoryManager> releaseMemoryManager(void) = 0;
+ virtual std::unique_ptr<ITensorManager> releaseTensorManager(void) = 0;
};
} // namespace backend
namespace backend
{
-// TODO Replace existing MemoryManager which TensorBuilder uses by this
+// NOTE This name ITensorManager has been discussed whether or not the name is proper.
+// Anyone can argue with any better name.
/**
* @brief Interface as an abstract tensor manager which has MemoryManager
*/
for (auto &tensor_builder : tensor_builders)
{
tensor_builder->prepare();
-
- // Wrap tensors as Object and store them to plan
- tensor_builder->iterate([&](const model::OperandIndex &index) {
- auto object = tensor_builder->wrapTensor(index);
- operand_context->set(index, object);
- });
}
// Generate initializers
linear->getBackendContext(backend)->constant_initializer->run();
}
- // Prepare each MemoryManager on each backend
- auto mem_mgrs = nnfw::cpp14::make_unique<backend::MemoryManagerSet>();
for (auto &tensor_builder : tensor_builders)
{
- mem_mgrs->insert(tensor_builder->releaseMemoryManager());
+ tensor_builder->finalize();
+ }
+
+ // Wrap tensors as Object and store them to plan
+ for (auto &tensor_builder : tensor_builders)
+ {
+ tensor_builder->iterate([&](const model::OperandIndex &index) {
+ auto object = tensor_builder->wrapTensor(index);
+ operand_context->set(index, object);
+ });
+ }
+
+ // Prepare each TensorManager on each backend
+ auto tensor_mgrs = nnfw::cpp14::make_unique<backend::TensorManagerSet>();
+ for (auto &tensor_builder : tensor_builders)
+ {
+ tensor_mgrs->insert(tensor_builder->releaseTensorManager());
}
- return new exec::LinearExecutor{
- graph.shareModel(), linear->releaseSubgraphs(), operand_context, linear->releaseLowerInfo(),
- std::move(mem_mgrs), linear->releaseElements(), function_sequence};
+ return new exec::LinearExecutor{graph.shareModel(), linear->releaseSubgraphs(),
+ operand_context, linear->releaseLowerInfo(),
+ std::move(tensor_mgrs), linear->releaseElements(),
+ function_sequence};
}
exec::IExecutor *ExecutorFactory::createDataflowExecutor(graph::Graph &graph, bool parallel)
frontend_layout = graph.subgraphs().at(graph.subgraphs().getOperation(use)).getLayout();
}
const auto backend_layout = lower_info->def_factors().getOnlyElement().layout();
- tensor_builder->registerTensorInfo(ind, info, frontend_layout, backend_layout, false);
+ tensor_builder->registerTensorInfo(ind, info, frontend_layout, backend_layout,
+ obj.isConstant());
// To make this never be deallocated, this is a workaround to use static memory planner
tensor_builder->notifyFirstUse(ind);
}
for (auto &tensor_builder : tensor_builders)
{
tensor_builder->prepare();
-
- // Wrap tensors as Object and store them to plan
- tensor_builder->iterate([&](const model::OperandIndex &index) {
- auto object = tensor_builder->wrapTensor(index);
- operand_context->set(index, object);
- });
}
class ExecutionBuilder : public IExecutionBuilder
auto lower_info = graph.releaseLowerInfo();
- // Prepare each MemoryManager on each backend
- auto mem_mgrs = nnfw::cpp14::make_unique<backend::MemoryManagerSet>();
for (auto &tensor_builder : tensor_builders)
{
- mem_mgrs->insert(tensor_builder->releaseMemoryManager());
+ tensor_builder->finalize();
+ }
+
+ // Wrap tensors as Object and store them to plan
+ for (auto &tensor_builder : tensor_builders)
+ {
+ tensor_builder->iterate([&](const model::OperandIndex &index) {
+ auto object = tensor_builder->wrapTensor(index);
+ operand_context->set(index, object);
+ });
+ }
+
+ // Prepare each TensorManager on each backend
+ auto tensor_mgrs = nnfw::cpp14::make_unique<backend::TensorManagerSet>();
+ for (auto &tensor_builder : tensor_builders)
+ {
+ tensor_mgrs->insert(tensor_builder->releaseTensorManager());
}
if (parallel)
{
return new exec::ParallelExecutor{
- graph.shareModel(), graph.releaseSubgraphs(),
- operand_context, std::move(lower_info),
- std::move(mem_mgrs), std::move(execution_builder->releaseCodeMap())};
+ graph.shareModel(), graph.releaseSubgraphs(),
+ operand_context, std::move(lower_info),
+ std::move(tensor_mgrs), std::move(execution_builder->releaseCodeMap())};
}
else
{
auto exec = new exec::DataflowExecutor{
- graph.shareModel(), graph.releaseSubgraphs(),
- operand_context, std::move(lower_info),
- std::move(mem_mgrs), std::move(execution_builder->releaseCodeMap())};
+ graph.shareModel(), graph.releaseSubgraphs(),
+ operand_context, std::move(lower_info),
+ std::move(tensor_mgrs), std::move(execution_builder->releaseCodeMap())};
if (util::getConfigBool(util::config::PROFILING_MODE))
{
auto et = std::make_shared<backend::ExecTime>(backend::BackendManager::instance().getAll());
uses_map[ind] = obj.getUses().size();
def_map[ind] = obj.getDef().size(); // should be 1 or 0
- if (obj.isConstant())
+ bool is_const = obj.isConstant();
+ if (is_const)
{
constants.append(ind);
}
frontend_layout = _subgraphs->at(_subgraphs->getOperation(use)).getLayout();
}
const auto backend_layout = lower_info->def_factors().getOnlyElement().layout();
- tensor_builder->registerTensorInfo(ind, info, frontend_layout, backend_layout, false);
+ tensor_builder->registerTensorInfo(ind, info, frontend_layout, backend_layout, is_const);
}
tensor_builder_map[ind] = tensor_builder;
std::unique_ptr<model::Subgraphs> subgraphs,
const std::shared_ptr<compiler::OperandContext> &operand_context,
std::unique_ptr<graph::LowerInfoMap> lower_info,
- std::unique_ptr<backend::MemoryManagerSet> mem_mgrs,
+ std::unique_ptr<backend::TensorManagerSet> tensor_mgrs,
CodeMap &&code_map)
: ExecutorBase{model, std::move(subgraphs), operand_context, std::move(lower_info),
- std::move(mem_mgrs)},
+ std::move(tensor_mgrs)},
_code_map{std::move(code_map)}
{
VERBOSE(DataflowExecutor) << "Constructing Dataflow Executor" << std::endl;
std::unique_ptr<model::Subgraphs> subgraphs,
const std::shared_ptr<compiler::OperandContext> &operand_context,
std::unique_ptr<graph::LowerInfoMap> lower_info,
- std::unique_ptr<backend::MemoryManagerSet> mem_mgrs, CodeMap &&code_map);
+ std::unique_ptr<backend::TensorManagerSet> tensor_mgrs, CodeMap &&code_map);
void executeImpl() override;
std::unique_ptr<model::Subgraphs> subgraphs,
const std::shared_ptr<compiler::OperandContext> &operand_context,
std::unique_ptr<graph::LowerInfoMap> lower_info,
- std::unique_ptr<backend::MemoryManagerSet> mem_mgrs)
+ std::unique_ptr<backend::TensorManagerSet> tensor_mgrs)
: _observers(), _model{model}, _subgraphs{std::move(subgraphs)},
_operand_context{operand_context}, _lower_info{std::move(lower_info)},
- _mem_mgrs{std::move(mem_mgrs)}, _mutex()
+ _tensor_mgrs{std::move(tensor_mgrs)}, _mutex()
{
// DO NOTHING
}
#include "model/Subgraph.h"
#include "backend/ExecTime.h"
#include "exec/IFunction.h"
-#include "backend/IMemoryManager.h"
+#include "backend/ITensorManager.h"
#include <list>
namespace neurun
std::unique_ptr<model::Subgraphs> subgraphs,
const std::shared_ptr<compiler::OperandContext> &operand_context,
std::unique_ptr<graph::LowerInfoMap> lower_info,
- std::unique_ptr<backend::MemoryManagerSet> mem_mgrs);
+ std::unique_ptr<backend::TensorManagerSet> tensor_mgrs);
virtual ~ExecutorBase() = default;
std::unique_ptr<model::Subgraphs> _subgraphs;
std::shared_ptr<compiler::OperandContext> _operand_context;
std::unique_ptr<graph::LowerInfoMap> _lower_info;
- std::unique_ptr<backend::MemoryManagerSet> _mem_mgrs;
+ std::unique_ptr<backend::TensorManagerSet> _tensor_mgrs;
std::mutex _mutex;
};
std::unique_ptr<model::Subgraphs> subgraphs,
const std::shared_ptr<compiler::OperandContext> &operand_context,
std::unique_ptr<graph::LowerInfoMap> lower_info,
- std::unique_ptr<backend::MemoryManagerSet> mem_mgrs,
+ std::unique_ptr<backend::TensorManagerSet> tensor_mgrs,
std::vector<compiler::Linear::Element> &&elements,
const std::shared_ptr<exec::FunctionSequence> &fn_seq)
: ExecutorBase{model, std::move(subgraphs), operand_context, std::move(lower_info),
- std::move(mem_mgrs)},
+ std::move(tensor_mgrs)},
_fn_seq{fn_seq}, _elements{std::move(elements)}
{
}
std::unique_ptr<model::Subgraphs> subgraphs,
const std::shared_ptr<compiler::OperandContext> &operand_context,
std::unique_ptr<graph::LowerInfoMap> lower_info,
- std::unique_ptr<backend::MemoryManagerSet> mem_mgrs,
+ std::unique_ptr<backend::TensorManagerSet> tensor_mgrs,
CodeMap &&code_map)
: DataflowExecutor{model,
std::move(subgraphs),
operand_context,
std::move(lower_info),
- std::move(mem_mgrs),
+ std::move(tensor_mgrs),
std::move(code_map)}
{
VERBOSE(ParallelExecutor) << "Constructing Parallel Executor" << std::endl;
std::unique_ptr<model::Subgraphs> subgraphs,
const std::shared_ptr<compiler::OperandContext> &operand_context,
std::unique_ptr<graph::LowerInfoMap> lower_info,
- std::unique_ptr<backend::MemoryManagerSet> mem_mgrs, CodeMap &&code_map);
+ std::unique_ptr<backend::TensorManagerSet> tensor_mgrs, CodeMap &&code_map);
void executeImpl() override;