From: Sergei Barannikov/AI Tools Lab /SRR/Engineer/Samsung Electronics Date: Mon, 9 Dec 2019 03:44:11 +0000 (+0300) Subject: [neurun] Move some files from model into ir directory (#9448) X-Git-Tag: accepted/tizen/unified/20191209.144032~3 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=8e7cdbd6b79470eab62d5df0b1a572f0249795ae;p=platform%2Fcore%2Fml%2Fnnfw.git [neurun] Move some files from model into ir directory (#9448) * Move some files from `model` into `ir` directory. * Move contained symbols into `neurun::ir` namespace, fixing uses where possible. Signed-off-by: Sergei Barannikov --- diff --git a/runtime/neurun/api/src/nnfw_api_internal.cc b/runtime/neurun/api/src/nnfw_api_internal.cc index dcf14a2..bfcc4d7 100644 --- a/runtime/neurun/api/src/nnfw_api_internal.cc +++ b/runtime/neurun/api/src/nnfw_api_internal.cc @@ -172,7 +172,7 @@ NNFW_STATUS nnfw_session::set_input(uint32_t index, NNFW_TYPE /*type*/, const vo { try { - _execution->setInput(neurun::model::IOIndex(index), buffer, length); + _execution->setInput(neurun::ir::IOIndex(index), buffer, length); } catch (...) { @@ -187,7 +187,7 @@ NNFW_STATUS nnfw_session::set_output(uint32_t index, NNFW_TYPE /*type*/, void *b { try { - _execution->setOutput(neurun::model::IOIndex(index), buffer, length); + _execution->setOutput(neurun::ir::IOIndex(index), buffer, length); } catch (...) { @@ -245,7 +245,7 @@ NNFW_STATUS nnfw_session::set_input_layout(uint32_t index, NNFW_LAYOUT layout) std::cerr << "Error during nnfw_session::set_input_layout, not supported layout" << std::endl; return NNFW_STATUS_ERROR; } - _execution->setInputLayout(neurun::model::IOIndex(index), convertLayout(layout)); + _execution->setInputLayout(neurun::ir::IOIndex(index), convertLayout(layout)); } catch (...) { @@ -266,7 +266,7 @@ NNFW_STATUS nnfw_session::set_output_layout(uint32_t index, NNFW_LAYOUT layout) << std::endl; return NNFW_STATUS_ERROR; } - _execution->setOutputLayout(neurun::model::IOIndex(index), convertLayout(layout)); + _execution->setOutputLayout(neurun::ir::IOIndex(index), convertLayout(layout)); } catch (...) { diff --git a/runtime/neurun/backend/acl_cl/Backend.h b/runtime/neurun/backend/acl_cl/Backend.h index feb3faf..2033b42 100644 --- a/runtime/neurun/backend/acl_cl/Backend.h +++ b/runtime/neurun/backend/acl_cl/Backend.h @@ -19,7 +19,7 @@ #include #include -#include +#include #include "Config.h" #include "ConstantInitializer.h" @@ -43,7 +43,7 @@ public: std::shared_ptr config() const override { return _config; } std::unique_ptr - newContext(const model::Operands &operands, + newContext(const ir::Operands &operands, const std::shared_ptr &) const override { auto tensor_builder = std::make_shared(createTensorManager()); diff --git a/runtime/neurun/backend/acl_cl/ConstantInitializer.cc b/runtime/neurun/backend/acl_cl/ConstantInitializer.cc index 52621f2..aa4254c 100644 --- a/runtime/neurun/backend/acl_cl/ConstantInitializer.cc +++ b/runtime/neurun/backend/acl_cl/ConstantInitializer.cc @@ -23,7 +23,7 @@ namespace backend namespace acl_cl { -ConstantInitializer::ConstantInitializer(const model::Operands &operands, +ConstantInitializer::ConstantInitializer(const ir::Operands &operands, const std::shared_ptr &tensor_builder) : _operands{operands}, _tensor_builder{tensor_builder} { @@ -37,8 +37,7 @@ void ConstantInitializer::visit(const model::operation::BatchToSpaceND &node) if (block_size_obj.isConstant()) { - _init_map[block_size_index] = [](const model::Operand &model_obj, - backend::operand::ITensor &obj) { + _init_map[block_size_index] = [](const ir::Operand &model_obj, backend::operand::ITensor &obj) { const auto &shape = model_obj.shape(); const auto base = reinterpret_cast(model_obj.data().base()); assert(model_obj.shape().rank() == 1); @@ -217,8 +216,7 @@ void ConstantInitializer::visit(const model::operation::SpaceToBatchND &node) if (block_size_obj.isConstant()) { - _init_map[block_size_index] = [](const model::Operand &model_obj, - backend::operand::ITensor &obj) { + _init_map[block_size_index] = [](const ir::Operand &model_obj, backend::operand::ITensor &obj) { const auto &shape = model_obj.shape(); const auto base = reinterpret_cast(model_obj.data().base()); assert(model_obj.shape().rank() == 1); @@ -238,8 +236,7 @@ void ConstantInitializer::visit(const model::operation::SpaceToBatchND &node) const auto &paddings_obj = _operands.at(paddings_index); if (paddings_obj.isConstant()) { - _init_map[paddings_index] = [](const model::Operand &model_obj, - backend::operand::ITensor &obj) { + _init_map[paddings_index] = [](const ir::Operand &model_obj, backend::operand::ITensor &obj) { const auto &shape = model_obj.shape(); const auto base = reinterpret_cast(model_obj.data().base()); assert(model_obj.shape().rank() == 2); diff --git a/runtime/neurun/backend/acl_cl/ConstantInitializer.h b/runtime/neurun/backend/acl_cl/ConstantInitializer.h index 59d7f38..bb38e5e 100644 --- a/runtime/neurun/backend/acl_cl/ConstantInitializer.h +++ b/runtime/neurun/backend/acl_cl/ConstantInitializer.h @@ -18,7 +18,7 @@ #define __NEURUN_COMPILER_ACL_CL_CONSTANT_INITIALIZER_H__ #include -#include +#include #include "TensorBuilder.h" namespace neurun @@ -31,7 +31,7 @@ namespace acl_cl class ConstantInitializer : public IConstantInitializer { public: - ConstantInitializer(const model::Operands &operands, + ConstantInitializer(const ir::Operands &operands, const std::shared_ptr &tensor_builder); public: @@ -48,11 +48,11 @@ public: void visit(const model::operation::TransposeConv &) override; private: - const model::Operands &operands() const override { return _operands; } + const ir::Operands &operands() const override { return _operands; } std::shared_ptr tensor_builder() const override { return _tensor_builder; } private: - const model::Operands &_operands; + const ir::Operands &_operands; std::shared_ptr _tensor_builder; }; diff --git a/runtime/neurun/backend/acl_cl/KernelGenerator.cc b/runtime/neurun/backend/acl_cl/KernelGenerator.cc index 436366a..9e2126e 100644 --- a/runtime/neurun/backend/acl_cl/KernelGenerator.cc +++ b/runtime/neurun/backend/acl_cl/KernelGenerator.cc @@ -24,7 +24,7 @@ #include #include "kernel/ConcatLayer.h" -#include "model/Index.h" +#include "ir/Index.h" #include "ir/DataType.h" #include "ir/InternalType.h" #include "compiler/IExecutionBuilder.h" @@ -143,7 +143,7 @@ void ActivationBuilder::append(ir::Activation code, ::arm_compute::ICLTensor *if // // KernelGenerator // -KernelGenerator::KernelGenerator(const neurun::model::Operands &ctx, +KernelGenerator::KernelGenerator(const ir::Operands &ctx, const std::shared_ptr &tensor_builder) : _ctx(ctx), _tensor_builder(tensor_builder), _current_subg_layout(ir::Layout::UNKNOWN) { @@ -386,7 +386,7 @@ void KernelGenerator::visit(const model::operation::Concat &node) { const auto ofm_index{node.getOutputs().at(0)}; - std::vector input_indexes; + std::vector input_indexes; for (const auto &input : node.getInputs()) input_indexes.emplace_back(input); @@ -1125,7 +1125,7 @@ void KernelGenerator::visit(const model::operation::Pack &node) const auto output_rank = _ctx.at(output_index).shape().rank(); - std::vector input_indexes; + std::vector input_indexes; for (const auto &input_index : node.getInputs()) input_indexes.emplace_back(input_index); @@ -1941,7 +1941,7 @@ void KernelGenerator::visit(const model::operation::Split &node) assert(node.param().num_splits == static_cast(node.getOutputs().size())); const auto ifm_rank = _ctx.at(ifm_index).shape().rank(); - std::vector output_indexes; + std::vector output_indexes; for (const auto &output : node.getOutputs()) output_indexes.emplace_back(output); @@ -1971,7 +1971,7 @@ void KernelGenerator::visit(const model::operation::Unpack &node) const auto input_rank = _ctx.at(input_index).shape().rank(); - std::vector output_indexes; + std::vector output_indexes; for (const auto &output_index : node.getOutputs()) output_indexes.emplace_back(output_index); diff --git a/runtime/neurun/backend/acl_cl/KernelGenerator.h b/runtime/neurun/backend/acl_cl/KernelGenerator.h index df178a8..c658535 100644 --- a/runtime/neurun/backend/acl_cl/KernelGenerator.h +++ b/runtime/neurun/backend/acl_cl/KernelGenerator.h @@ -19,7 +19,7 @@ #include -#include "model/Operands.h" +#include "ir/Operands.h" #include "TensorBuilder.h" namespace neurun @@ -32,8 +32,7 @@ namespace acl_cl class KernelGenerator : public IKernelGenerator { public: - KernelGenerator(const neurun::model::Operands &ctx, - const std::shared_ptr &tensor_builder); + KernelGenerator(const ir::Operands &ctx, const std::shared_ptr &tensor_builder); void visit(const model::Subgraph &) override; void visit(const model::operation::BatchToSpaceND &) override; @@ -98,7 +97,7 @@ public: void visit(const model::operation::Pad &) override; private: - const neurun::model::Operands &_ctx; + const ir::Operands &_ctx; std::shared_ptr _tensor_builder; ir::Layout _current_subg_layout; }; diff --git a/runtime/neurun/backend/acl_cl/ShapeFixer.cc b/runtime/neurun/backend/acl_cl/ShapeFixer.cc index 674b3d6..99e2464 100644 --- a/runtime/neurun/backend/acl_cl/ShapeFixer.cc +++ b/runtime/neurun/backend/acl_cl/ShapeFixer.cc @@ -24,7 +24,7 @@ #include #include "kernel/ConcatLayer.h" -#include "model/Index.h" +#include "ir/Index.h" #include "compiler/IExecutionBuilder.h" #include "exec/NopFunction.h" #include "util/logging.h" @@ -42,7 +42,7 @@ namespace acl_cl using ::neurun::backend::acl_common::asAclFunction; -ShapeFixer::ShapeFixer(const neurun::model::Operands &ctx, +ShapeFixer::ShapeFixer(const ir::Operands &ctx, const std::shared_ptr &tensor_builder) : _ctx(ctx), _tensor_builder(tensor_builder) { diff --git a/runtime/neurun/backend/acl_cl/ShapeFixer.h b/runtime/neurun/backend/acl_cl/ShapeFixer.h index b7256f7..ac384af 100644 --- a/runtime/neurun/backend/acl_cl/ShapeFixer.h +++ b/runtime/neurun/backend/acl_cl/ShapeFixer.h @@ -19,7 +19,7 @@ #include -#include "model/Operands.h" +#include "ir/Operands.h" #include "TensorBuilder.h" namespace neurun @@ -32,8 +32,7 @@ namespace acl_cl class ShapeFixer : public IShapeFixer { public: - ShapeFixer(const neurun::model::Operands &ctx, - const std::shared_ptr &tensor_builder); + ShapeFixer(const ir::Operands &ctx, const std::shared_ptr &tensor_builder); std::shared_ptr tensor_builder() override { return _tensor_builder; } @@ -99,7 +98,7 @@ public: void visit(const model::operation::Pad &) override; private: - const neurun::model::Operands &_ctx; + const ir::Operands &_ctx; std::shared_ptr _tensor_builder; }; diff --git a/runtime/neurun/backend/acl_cl/TensorRegister.h b/runtime/neurun/backend/acl_cl/TensorRegister.h index a523f2d..02de455 100644 --- a/runtime/neurun/backend/acl_cl/TensorRegister.h +++ b/runtime/neurun/backend/acl_cl/TensorRegister.h @@ -31,14 +31,13 @@ namespace acl_cl class TensorRegister : public acl_common::AclTensorRegister { public: - TensorRegister(const model::Operands &operands, - const std::shared_ptr &tensor_builder) + TensorRegister(const ir::Operands &operands, const std::shared_ptr &tensor_builder) : acl_common::AclTensorRegister{operands, tensor_builder} { // DO NOTHING } - void setUsesCount(const model::OperandIndex &ind, size_t num_uses) const override + void setUsesCount(const ir::OperandIndex &ind, size_t num_uses) const override { nnfw::misc::polymorphic_downcast(tensor_builder().get()) ->setUsesCount(ind, num_uses); diff --git a/runtime/neurun/backend/acl_common/AclLinearMemoryManager.h b/runtime/neurun/backend/acl_common/AclLinearMemoryManager.h index 3ef9358..b55121d 100644 --- a/runtime/neurun/backend/acl_common/AclLinearMemoryManager.h +++ b/runtime/neurun/backend/acl_common/AclLinearMemoryManager.h @@ -20,7 +20,7 @@ #include #include "AclMemoryManager.h" -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" #include "util/logging.h" namespace @@ -75,7 +75,7 @@ public: _io_manager->clear(); } - virtual void startLifetime(const model::OperandIndex &ind) override + virtual void startLifetime(const ir::OperandIndex &ind) override { auto &tensors = this->tensors(); assert(tensors.find(ind) != tensors.end()); @@ -86,7 +86,7 @@ public: _io_group->manage(tensor->handle()); } - virtual void finishLifetime(const model::OperandIndex &ind) override + virtual void finishLifetime(const ir::OperandIndex &ind) override { auto &tensors = this->tensors(); assert(tensors.find(ind) != tensors.end()); diff --git a/runtime/neurun/backend/acl_common/AclMemoryManager.h b/runtime/neurun/backend/acl_common/AclMemoryManager.h index 910a990..40ce2e6 100644 --- a/runtime/neurun/backend/acl_common/AclMemoryManager.h +++ b/runtime/neurun/backend/acl_common/AclMemoryManager.h @@ -22,7 +22,7 @@ #include #include "backend/IMemoryManager.h" -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" #include "Convert.h" #include "util/logging.h" @@ -62,18 +62,18 @@ public: } } - virtual void startLifetime(const model::OperandIndex &) { /* DO NOTHING */} - virtual void finishLifetime(const model::OperandIndex &) { /* DO NOTHING */} + virtual void startLifetime(const ir::OperandIndex &) { /* DO NOTHING */} + virtual void finishLifetime(const ir::OperandIndex &) { /* DO NOTHING */} - void buildTensor(const model::OperandIndex &ind, const ::arm_compute::TensorInfo &info, - size_t rank, size_t num_uses) + void buildTensor(const ir::OperandIndex &ind, const ::arm_compute::TensorInfo &info, size_t rank, + size_t num_uses) { auto tensor = std::make_shared(info, rank, num_uses); _tensors[ind] = tensor; } - void buildSubtensor(std::shared_ptr parent_tensor, - const model::OperandIndex &child_ind, const ::arm_compute::TensorShape &shape, + void buildSubtensor(std::shared_ptr parent_tensor, const ir::OperandIndex &child_ind, + const ::arm_compute::TensorShape &shape, const ::arm_compute::Coordinates &coordinates, size_t rank, bool extent_parent) { @@ -82,13 +82,13 @@ public: _subtensors[child_ind] = subtensor; } - model::OperandIndexMap> &tensors(void) { return _tensors; } + ir::OperandIndexMap> &tensors(void) { return _tensors; } - model::OperandIndexMap> &subtensors(void) { return _subtensors; } + ir::OperandIndexMap> &subtensors(void) { return _subtensors; } private: - model::OperandIndexMap> _tensors; - model::OperandIndexMap> _subtensors; + ir::OperandIndexMap> _tensors; + ir::OperandIndexMap> _subtensors; }; } // namespace acl_common diff --git a/runtime/neurun/backend/acl_common/AclTensorManager.h b/runtime/neurun/backend/acl_common/AclTensorManager.h index ca77046..48a4c25 100644 --- a/runtime/neurun/backend/acl_common/AclTensorManager.h +++ b/runtime/neurun/backend/acl_common/AclTensorManager.h @@ -22,7 +22,7 @@ #include "backend/ITensorManager.h" #include "AclMemoryManager.h" #include "AclInternalBufferManager.h" -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" namespace neurun { @@ -50,27 +50,27 @@ public: void allocateInternalBufferManager(void); void deallocateInternalBufferManager(void); - void buildTensor(const model::OperandIndex &ind, const ::arm_compute::TensorInfo &info, - size_t rank, bool as_const, size_t num_uses); - void buildSubtensor(const model::OperandIndex &parent, const model::OperandIndex &child, + void buildTensor(const ir::OperandIndex &ind, const ::arm_compute::TensorInfo &info, size_t rank, + bool as_const, size_t num_uses); + void buildSubtensor(const ir::OperandIndex &parent, const ir::OperandIndex &child, const ::arm_compute::TensorShape &shape, const ::arm_compute::Coordinates &coordinates, size_t rank, bool extent_parent); - std::shared_ptr findTensorAsParent(const model::OperandIndex &ind); + std::shared_ptr findTensorAsParent(const ir::OperandIndex &ind); - void startLifetime(const model::OperandIndex &ind); - void finishLifetime(const model::OperandIndex &ind); + void startLifetime(const ir::OperandIndex &ind); + void finishLifetime(const ir::OperandIndex &ind); - std::shared_ptr at(const ::neurun::model::OperandIndex &ind); + std::shared_ptr at(const ir::OperandIndex &ind); - model::OperandIndexMap> &constTensors(void); - model::OperandIndexMap> &nonconstTensors(void); - model::OperandIndexMap> &nonconstSubtensors(void); + ir::OperandIndexMap> &constTensors(void); + ir::OperandIndexMap> &nonconstTensors(void); + ir::OperandIndexMap> &nonconstSubtensors(void); std::shared_ptr<::arm_compute::IMemoryManager> internal_buffer_manager(void); - void iterate(const std::function &fn); + void iterate(const std::function &fn); void tryDeallocConstants(void); @@ -78,7 +78,7 @@ private: std::unique_ptr _const_mgr; std::unique_ptr _nonconst_mgr; std::unique_ptr _inter_mgr; - model::OperandIndexMap _ind_to_mgr; + ir::OperandIndexMap _ind_to_mgr; }; } // namespace acl_common @@ -142,8 +142,8 @@ void AclTensorManager::deallocateInternalBuffe template void AclTensorManager::buildTensor( - const model::OperandIndex &ind, const ::arm_compute::TensorInfo &info, size_t rank, - bool as_const, size_t num_uses) + const ir::OperandIndex &ind, const ::arm_compute::TensorInfo &info, size_t rank, bool as_const, + size_t num_uses) { assert(_ind_to_mgr.find(ind) == _ind_to_mgr.end()); if (as_const) @@ -160,7 +160,7 @@ void AclTensorManager::buildTensor( template void AclTensorManager::buildSubtensor( - const model::OperandIndex &parent, const model::OperandIndex &child, + const ir::OperandIndex &parent, const ir::OperandIndex &child, const ::arm_compute::TensorShape &shape, const ::arm_compute::Coordinates &coordinates, size_t rank, bool extent_parent) { @@ -172,8 +172,8 @@ void AclTensorManager::buildSubtensor( } template -std::shared_ptr AclTensorManager::findTensorAsParent( - const model::OperandIndex &ind) +std::shared_ptr +AclTensorManager::findTensorAsParent(const ir::OperandIndex &ind) { auto &tensors = _nonconst_mgr->tensors(); @@ -195,16 +195,14 @@ std::shared_ptr AclTensorManager::f } template -void AclTensorManager::startLifetime( - const model::OperandIndex &ind) +void AclTensorManager::startLifetime(const ir::OperandIndex &ind) { assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); _ind_to_mgr.at(ind).startLifetime(ind); } template -void AclTensorManager::finishLifetime( - const model::OperandIndex &ind) +void AclTensorManager::finishLifetime(const ir::OperandIndex &ind) { assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); _ind_to_mgr.at(ind).finishLifetime(ind); @@ -212,7 +210,7 @@ void AclTensorManager::finishLifetime( template std::shared_ptr -AclTensorManager::at(const ::neurun::model::OperandIndex &ind) +AclTensorManager::at(const ir::OperandIndex &ind) { assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); @@ -228,21 +226,21 @@ AclTensorManager::at(const ::neurun::model::Op } template -model::OperandIndexMap> & +ir::OperandIndexMap> & AclTensorManager::constTensors(void) { return _const_mgr->tensors(); } template -model::OperandIndexMap> & +ir::OperandIndexMap> & AclTensorManager::nonconstTensors(void) { return _nonconst_mgr->tensors(); } template -model::OperandIndexMap> & +ir::OperandIndexMap> & AclTensorManager::nonconstSubtensors(void) { return _nonconst_mgr->subtensors(); @@ -257,7 +255,7 @@ AclTensorManager::internal_buffer_manager(void template void AclTensorManager::iterate( - const std::function &fn) + const std::function &fn) { for (auto it : _nonconst_mgr->tensors()) fn(it.first); diff --git a/runtime/neurun/backend/acl_common/AclTensorRegister.cc b/runtime/neurun/backend/acl_common/AclTensorRegister.cc index b85cfe7..a2c2b9e 100644 --- a/runtime/neurun/backend/acl_common/AclTensorRegister.cc +++ b/runtime/neurun/backend/acl_common/AclTensorRegister.cc @@ -23,7 +23,7 @@ namespace backend namespace acl_common { -AclTensorRegister::AclTensorRegister(const model::Operands &operands, +AclTensorRegister::AclTensorRegister(const ir::Operands &operands, const std::shared_ptr &tensor_builder) : _operands{operands}, _tensor_builder{tensor_builder} { diff --git a/runtime/neurun/backend/acl_common/AclTensorRegister.h b/runtime/neurun/backend/acl_common/AclTensorRegister.h index 1c31625..5982839 100644 --- a/runtime/neurun/backend/acl_common/AclTensorRegister.h +++ b/runtime/neurun/backend/acl_common/AclTensorRegister.h @@ -29,7 +29,7 @@ namespace acl_common class AclTensorRegister : public ITensorRegister { protected: - AclTensorRegister(const model::Operands &operands, + AclTensorRegister(const ir::Operands &operands, const std::shared_ptr &tensor_builder); public: @@ -37,15 +37,15 @@ public: protected: void visit(const model::Subgraph &subgraph); - virtual void setUsesCount(const model::OperandIndex &ind, size_t num_uses) const = 0; + virtual void setUsesCount(const ir::OperandIndex &ind, size_t num_uses) const = 0; protected: - const model::Operands &operands() const override { return _operands; } + const ir::Operands &operands() const override { return _operands; } std::shared_ptr tensor_builder() const override { return _tensor_builder; } bool supportSubTensor() const final { return true; } private: - const model::Operands &_operands; + const ir::Operands &_operands; const std::shared_ptr _tensor_builder; }; diff --git a/runtime/neurun/backend/acl_common/Convert.h b/runtime/neurun/backend/acl_common/Convert.h index 33e6815..4c63815 100644 --- a/runtime/neurun/backend/acl_common/Convert.h +++ b/runtime/neurun/backend/acl_common/Convert.h @@ -23,7 +23,7 @@ #include "ir/Layout.h" #include "ir/InternalType.h" -#include "model/Operand.h" +#include "ir/Operand.h" #include "ir/Shape.h" #include "ir/TypeInfo.h" #include "misc/feature/Shape.h" diff --git a/runtime/neurun/backend/acl_common/TemplTensorBuilder.h b/runtime/neurun/backend/acl_common/TemplTensorBuilder.h index 41d70a4..6439ff0 100644 --- a/runtime/neurun/backend/acl_common/TemplTensorBuilder.h +++ b/runtime/neurun/backend/acl_common/TemplTensorBuilder.h @@ -22,7 +22,7 @@ #include #include -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" #include "AclTensorManager.h" #include "cpp14/memory.h" #include @@ -54,20 +54,20 @@ public: * @param[in] info Tensor information * @param[in] layout Tensor data layout */ - void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info, + void registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info, ir::Layout backend_layout, bool as_const) override; /** * @brief Register subtensor information to allocate on ACL-CL backend * @param[in] ind Operand index * @param[in] info Tensor information */ - void registerSubTensorInfo(const model::OperandIndex &ind, + void registerSubTensorInfo(const ir::OperandIndex &ind, const compiler::SubTensorInfo &info) override; - void notifyFirstUse(const model::OperandIndex &) override; - void notifyLastUse(const model::OperandIndex &) override; + void notifyFirstUse(const ir::OperandIndex &) override; + void notifyLastUse(const ir::OperandIndex &) override; - bool isRegistered(const model::OperandIndex &) const override; + bool isRegistered(const ir::OperandIndex &) const override; void prepare(void) override; void allocateConsts() override; @@ -76,7 +76,7 @@ public: void finalize() override; std::shared_ptr<::neurun::backend::operand::ITensor> - tensorAt(const model::OperandIndex &ind) override; + tensorAt(const ir::OperandIndex &ind) override; void iterate(const IterateFunction &fn) override; void preVisit(const model::Operation &node) override; @@ -84,20 +84,20 @@ public: std::unique_ptr releaseTensorManager(void) override; - std::shared_ptr at(const ::neurun::model::OperandIndex &ind); + std::shared_ptr at(const ir::OperandIndex &ind); /** * @brief Check child tensor is allocated as subtensor of parent tensor * @param[in] parent Index of parent * @param[in] child Index of child * @return @c true if child is allocated as subtensor of parent, otherwise @c false */ - bool isSubTensorOf(const model::OperandIndex &parent, const model::OperandIndex &child); + bool isSubTensorOf(const ir::OperandIndex &parent, const ir::OperandIndex &child); - void dimCorrection(const model::OperandIndex &index, bool apply_dim_correction); + void dimCorrection(const ir::OperandIndex &index, bool apply_dim_correction); T_AclTensorManager *acl_tensor_manager(void) { return _tensor_mgr.get(); } - void setUsesCount(const model::OperandIndex &index, size_t num_uses) + void setUsesCount(const ir::OperandIndex &index, size_t num_uses) { assert(_uses_count_map.find(index) != _uses_count_map.end() ? _uses_count_map[index] == num_uses : true); @@ -108,29 +108,29 @@ private: void buildTensors(void); void buildSubtensors(void); void validate(void); - model::OperandIndex findRootParent(model::OperandIndex index); + ir::OperandIndex findRootParent(ir::OperandIndex index); private: - model::OperandIndexMap _tensor_info_map; - model::OperandIndexMap _subtensor_info_map; - model::OperandIndexMap _apply_dim_correction_map; - model::OperandIndexMap _tensor_layout_map; - model::OperandIndexMap _uses_count_map; + ir::OperandIndexMap _tensor_info_map; + ir::OperandIndexMap _subtensor_info_map; + ir::OperandIndexMap _apply_dim_correction_map; + ir::OperandIndexMap _tensor_layout_map; + ir::OperandIndexMap _uses_count_map; std::unique_ptr _tensor_mgr; - model::OperandIndexSequence _constants; + ir::OperandIndexSequence _constants; // TODO Consider dividing TensorBuilder into Linear and others const std::string _executor_str; // for linear executor - std::queue> _uses_queue; + std::queue> _uses_queue; uint32_t _first_uses_num; - model::OperandIndexMap _first_uses_visit; + ir::OperandIndexMap _first_uses_visit; // for subtensors - model::OperandIndexMap _parent_def; - model::OperandIndexMap _parent_uses; + ir::OperandIndexMap _parent_def; + ir::OperandIndexMap _parent_uses; }; } // namespace acl_common @@ -162,7 +162,7 @@ TemplTensorBuilder::TemplTensorBuilder( template void TemplTensorBuilder::registerTensorInfo( - const model::OperandIndex &ind, const model::OperandInfo &info, ir::Layout backend_layout, + const ir::OperandIndex &ind, const ir::OperandInfo &info, ir::Layout backend_layout, bool as_const) { assert(_tensor_mgr->constTensors().size() == 0); @@ -180,7 +180,7 @@ void TemplTensorBuilder::registerTensorInfo( template void TemplTensorBuilder::registerSubTensorInfo( - const model::OperandIndex &ind, const compiler::SubTensorInfo &info) + const ir::OperandIndex &ind, const compiler::SubTensorInfo &info) { assert(_tensor_mgr->constTensors().size() == 0); assert(_tensor_mgr->nonconstTensors().size() == 0); @@ -204,7 +204,7 @@ void TemplTensorBuilder::registerSubTensorInfo template void TemplTensorBuilder::notifyFirstUse( - const model::OperandIndex &ind) + const ir::OperandIndex &ind) { _first_uses_num++; _uses_queue.emplace(UsesType::FIRST, ind); @@ -212,14 +212,14 @@ void TemplTensorBuilder::notifyFirstUse( template void TemplTensorBuilder::notifyLastUse( - const model::OperandIndex &ind) + const ir::OperandIndex &ind) { _uses_queue.emplace(UsesType::LAST, ind); } template bool TemplTensorBuilder::isRegistered( - const model::OperandIndex &ind) const + const ir::OperandIndex &ind) const { return _tensor_info_map.find(ind) != _tensor_info_map.end() || _subtensor_info_map.find(ind) != _subtensor_info_map.end(); @@ -261,7 +261,7 @@ void TemplTensorBuilder::finalize(void) template std::shared_ptr<::neurun::backend::operand::ITensor> -TemplTensorBuilder::tensorAt(const model::OperandIndex &ind) +TemplTensorBuilder::tensorAt(const ir::OperandIndex &ind) { return _tensor_mgr->at(ind); } @@ -274,14 +274,14 @@ void TemplTensorBuilder::iterate(const Iterate template std::shared_ptr -TemplTensorBuilder::at(const ::neurun::model::OperandIndex &ind) +TemplTensorBuilder::at(const ir::OperandIndex &ind) { return _tensor_mgr->at(ind); } template bool TemplTensorBuilder::isSubTensorOf( - const model::OperandIndex &parent, const model::OperandIndex &child) + const ir::OperandIndex &parent, const ir::OperandIndex &child) { if (_subtensor_info_map.find(child) == _subtensor_info_map.end()) { @@ -304,7 +304,7 @@ bool TemplTensorBuilder::isSubTensorOf( template void TemplTensorBuilder::dimCorrection( - const model::OperandIndex &index, bool apply_dim_correction) + const ir::OperandIndex &index, bool apply_dim_correction) { _apply_dim_correction_map[index] = apply_dim_correction; } @@ -357,9 +357,9 @@ void TemplTensorBuilder::buildSubtensors(void) auto &subtensors = _tensor_mgr->nonconstSubtensors(); for (auto &entry : _subtensor_info_map) { - model::OperandIndex ind = entry.first; + ir::OperandIndex ind = entry.first; - std::stack stack; + std::stack stack; stack.push(ind); while (!stack.empty()) @@ -413,8 +413,8 @@ void TemplTensorBuilder::preVisit(const model: return; } - std::function def_handler = - [this, &def_handler](const model::OperandIndex &ind) { + std::function def_handler = + [this, &def_handler](const ir::OperandIndex &ind) { bool is_subtensor = _subtensor_info_map.find(ind) != _subtensor_info_map.end(); bool is_parent = _parent_def.find(ind) != _parent_def.end(); if (!is_subtensor && !is_parent) @@ -443,7 +443,7 @@ void TemplTensorBuilder::preVisit(const model: } else if (is_subtensor) { - const model::OperandIndex &parent_ind = _subtensor_info_map.at(ind).parent(); + const ir::OperandIndex &parent_ind = _subtensor_info_map.at(ind).parent(); if (_parent_def[parent_ind] == 0) return; def_handler(parent_ind); @@ -451,7 +451,7 @@ void TemplTensorBuilder::preVisit(const model: }; // See #5642 - model::OperandIndexMap outputs_map; + ir::OperandIndexMap outputs_map; for (const auto &ind : node.getOutputs()) { assert(_first_uses_visit.find(ind) != _first_uses_visit.end()); @@ -461,10 +461,10 @@ void TemplTensorBuilder::preVisit(const model: // outputs_map's all elements are true? auto outputs_map_all_check = [&outputs_map]() { return std::all_of(outputs_map.begin(), outputs_map.end(), - [](std::pair it) { return it.second; }); + [](std::pair it) { return it.second; }); }; - std::pair peak; + std::pair peak; while (!outputs_map_all_check() && (peak = _uses_queue.front()).first == UsesType::FIRST) { _uses_queue.pop(); @@ -487,8 +487,8 @@ void TemplTensorBuilder::postVisit(const model return; } - std::function use_handler = - [this, &use_handler](const model::OperandIndex &ind) { + std::function use_handler = + [this, &use_handler](const ir::OperandIndex &ind) { bool is_subtensor = _subtensor_info_map.find(ind) != _subtensor_info_map.end(); bool is_parent = _parent_uses.find(ind) != _parent_uses.end(); if (!is_subtensor && !is_parent) @@ -517,7 +517,7 @@ void TemplTensorBuilder::postVisit(const model } else if (is_subtensor) { - const model::OperandIndex &parent_ind = _subtensor_info_map.at(ind).parent(); + const ir::OperandIndex &parent_ind = _subtensor_info_map.at(ind).parent(); --_parent_uses[parent_ind]; assert(_parent_uses[parent_ind] > 0); } @@ -525,7 +525,7 @@ void TemplTensorBuilder::postVisit(const model // See #5642 const auto &inputs = node.getInputs(); - std::pair peak; + std::pair peak; while ((peak = _uses_queue.front()).first == UsesType::LAST) { const auto &popped_idx = peak.second; @@ -585,18 +585,18 @@ void TemplTensorBuilder::validate(void) assert(_uses_queue.size() == 0); assert(_first_uses_num == 0); - assert(std::all_of( - _parent_def.begin(), _parent_def.end(), - [](std::pair it) { return it.second == 0; })); + assert( + std::all_of(_parent_def.begin(), _parent_def.end(), + [](std::pair it) { return it.second == 0; })); - assert(std::all_of( - _parent_uses.begin(), _parent_uses.end(), - [](std::pair it) { return it.second == 0; })); + assert( + std::all_of(_parent_uses.begin(), _parent_uses.end(), + [](std::pair it) { return it.second == 0; })); } template -model::OperandIndex -TemplTensorBuilder::findRootParent(model::OperandIndex ind) +ir::OperandIndex +TemplTensorBuilder::findRootParent(ir::OperandIndex ind) { if (_subtensor_info_map.find(ind) == _subtensor_info_map.end()) return ind; diff --git a/runtime/neurun/backend/acl_neon/Backend.h b/runtime/neurun/backend/acl_neon/Backend.h index 4c0c613..2fcf669 100644 --- a/runtime/neurun/backend/acl_neon/Backend.h +++ b/runtime/neurun/backend/acl_neon/Backend.h @@ -19,7 +19,7 @@ #include #include -#include +#include #include "Config.h" #include "ConstantInitializer.h" @@ -43,7 +43,7 @@ public: std::shared_ptr config() const override { return _config; } std::unique_ptr - newContext(const model::Operands &operands, + newContext(const ir::Operands &operands, const std::shared_ptr &) const override { auto tensor_builder = std::make_shared(createTensorManager()); diff --git a/runtime/neurun/backend/acl_neon/ConstantInitializer.cc b/runtime/neurun/backend/acl_neon/ConstantInitializer.cc index 33d12b4..a8605af 100644 --- a/runtime/neurun/backend/acl_neon/ConstantInitializer.cc +++ b/runtime/neurun/backend/acl_neon/ConstantInitializer.cc @@ -23,7 +23,7 @@ namespace backend namespace acl_neon { -ConstantInitializer::ConstantInitializer(const model::Operands &operands, +ConstantInitializer::ConstantInitializer(const ir::Operands &operands, const std::shared_ptr &tensor_builder) : _operands{operands}, _tensor_builder{tensor_builder} { @@ -37,8 +37,7 @@ void ConstantInitializer::visit(const model::operation::BatchToSpaceND &node) if (block_size_obj.isConstant()) { - _init_map[block_size_index] = [](const model::Operand &model_obj, - backend::operand::ITensor &obj) { + _init_map[block_size_index] = [](const ir::Operand &model_obj, backend::operand::ITensor &obj) { const auto &shape = model_obj.shape(); const auto base = reinterpret_cast(model_obj.data().base()); assert(model_obj.shape().rank() == 1); @@ -192,8 +191,7 @@ void ConstantInitializer::visit(const model::operation::SpaceToBatchND &node) if (block_size_obj.isConstant()) { - _init_map[block_size_index] = [](const model::Operand &model_obj, - backend::operand::ITensor &obj) { + _init_map[block_size_index] = [](const ir::Operand &model_obj, backend::operand::ITensor &obj) { const auto &shape = model_obj.shape(); const auto base = reinterpret_cast(model_obj.data().base()); assert(model_obj.shape().rank() == 1); @@ -213,8 +211,7 @@ void ConstantInitializer::visit(const model::operation::SpaceToBatchND &node) const auto &paddings_obj = _operands.at(paddings_index); if (paddings_obj.isConstant()) { - _init_map[paddings_index] = [](const model::Operand &model_obj, - backend::operand::ITensor &obj) { + _init_map[paddings_index] = [](const ir::Operand &model_obj, backend::operand::ITensor &obj) { const auto &shape = model_obj.shape(); const auto base = reinterpret_cast(model_obj.data().base()); assert(model_obj.shape().rank() == 2); diff --git a/runtime/neurun/backend/acl_neon/ConstantInitializer.h b/runtime/neurun/backend/acl_neon/ConstantInitializer.h index 6d04149..b676073 100644 --- a/runtime/neurun/backend/acl_neon/ConstantInitializer.h +++ b/runtime/neurun/backend/acl_neon/ConstantInitializer.h @@ -18,7 +18,7 @@ #define __NEURUN_COMPILER_ACL_NEON_CONSTANT_INITIALIZER_H__ #include -#include +#include #include "TensorBuilder.h" namespace neurun @@ -31,7 +31,7 @@ namespace acl_neon class ConstantInitializer : public IConstantInitializer { public: - ConstantInitializer(const model::Operands &operands, + ConstantInitializer(const ir::Operands &operands, const std::shared_ptr &tensor_builder); public: @@ -45,11 +45,11 @@ public: void visit(const model::operation::TransposeConv &) override; private: - const model::Operands &operands() const override { return _operands; } + const ir::Operands &operands() const override { return _operands; } std::shared_ptr tensor_builder() const override { return _tensor_builder; } private: - const model::Operands &_operands; + const ir::Operands &_operands; std::shared_ptr _tensor_builder; }; diff --git a/runtime/neurun/backend/acl_neon/KernelGenerator.cc b/runtime/neurun/backend/acl_neon/KernelGenerator.cc index 080a38e..81737aa 100644 --- a/runtime/neurun/backend/acl_neon/KernelGenerator.cc +++ b/runtime/neurun/backend/acl_neon/KernelGenerator.cc @@ -24,7 +24,7 @@ #include "kernel/ConcatLayer.h" #include "util/Padding.h" -#include "model/Index.h" +#include "ir/Index.h" #include "ir/DataType.h" #include "ir/InternalType.h" #include "compiler/IExecutionBuilder.h" @@ -142,7 +142,7 @@ void ActivationBuilder::append(ir::Activation act, ::arm_compute::ITensor *ifm_a // // KernelGenerator // -KernelGenerator::KernelGenerator(const neurun::model::Operands &ctx, +KernelGenerator::KernelGenerator(const ir::Operands &ctx, const std::shared_ptr &tensor_builder) : _ctx(ctx), _tensor_builder(tensor_builder), _current_subg_layout(ir::Layout::UNKNOWN) { @@ -519,7 +519,7 @@ void KernelGenerator::visit(const model::operation::Concat &node) { const auto ofm_index{node.getOutputs().at(0)}; - std::vector input_indexes; + std::vector input_indexes; for (const auto &input : node.getInputs()) input_indexes.emplace_back(input); @@ -1131,7 +1131,7 @@ void KernelGenerator::visit(const model::operation::Pack &node) const auto output_rank = _ctx.at(output_index).shape().rank(); - std::vector input_indexes; + std::vector input_indexes; for (const auto &input_index : node.getInputs()) input_indexes.emplace_back(input_index); @@ -1657,7 +1657,7 @@ void KernelGenerator::visit(const model::operation::Split &node) assert(node.param().num_splits == static_cast(node.getOutputs().size())); const auto ifm_rank = _ctx.at(ifm_index).shape().rank(); - std::vector output_indexes; + std::vector output_indexes; for (const auto &output : node.getOutputs()) output_indexes.emplace_back(output); @@ -1919,7 +1919,7 @@ void KernelGenerator::visit(const model::operation::Unpack &node) const auto input_rank = _ctx.at(input_index).shape().rank(); - std::vector output_indexes; + std::vector output_indexes; for (const auto &output_index : node.getOutputs()) output_indexes.emplace_back(output_index); diff --git a/runtime/neurun/backend/acl_neon/KernelGenerator.h b/runtime/neurun/backend/acl_neon/KernelGenerator.h index 4bc1d2d..a3ff937 100644 --- a/runtime/neurun/backend/acl_neon/KernelGenerator.h +++ b/runtime/neurun/backend/acl_neon/KernelGenerator.h @@ -19,7 +19,7 @@ #include -#include "model/Operands.h" +#include "ir/Operands.h" #include "TensorBuilder.h" namespace neurun @@ -32,8 +32,7 @@ namespace acl_neon class KernelGenerator : public IKernelGenerator { public: - KernelGenerator(const neurun::model::Operands &ctx, - const std::shared_ptr &tensor_builder); + KernelGenerator(const ir::Operands &ctx, const std::shared_ptr &tensor_builder); void visit(const model::Subgraph &) override; void visit(const model::operation::Abs &) override; @@ -97,7 +96,7 @@ public: void visit(const model::operation::Comparison &) override; private: - const neurun::model::Operands &_ctx; + const ir::Operands &_ctx; std::shared_ptr _tensor_builder; ir::Layout _current_subg_layout; }; diff --git a/runtime/neurun/backend/acl_neon/ShapeFixer.cc b/runtime/neurun/backend/acl_neon/ShapeFixer.cc index 54c95a7..dcbae4c 100644 --- a/runtime/neurun/backend/acl_neon/ShapeFixer.cc +++ b/runtime/neurun/backend/acl_neon/ShapeFixer.cc @@ -33,7 +33,7 @@ #include "kernel/ConcatLayer.h" #include "util/Padding.h" -#include "model/Index.h" +#include "ir/Index.h" #include "compiler/IExecutionBuilder.h" #include "exec/NopFunction.h" #include "util/logging.h" @@ -50,7 +50,7 @@ namespace acl_neon using ::neurun::backend::acl_common::asAclFunction; -ShapeFixer::ShapeFixer(const neurun::model::Operands &ctx, +ShapeFixer::ShapeFixer(const ir::Operands &ctx, const std::shared_ptr &tensor_builder) : _ctx(ctx), _tensor_builder(tensor_builder) { diff --git a/runtime/neurun/backend/acl_neon/ShapeFixer.h b/runtime/neurun/backend/acl_neon/ShapeFixer.h index b8d8547..28ef712 100644 --- a/runtime/neurun/backend/acl_neon/ShapeFixer.h +++ b/runtime/neurun/backend/acl_neon/ShapeFixer.h @@ -19,7 +19,7 @@ #include -#include "model/Operands.h" +#include "ir/Operands.h" #include "TensorBuilder.h" namespace neurun @@ -32,8 +32,7 @@ namespace acl_neon class ShapeFixer : public IShapeFixer { public: - ShapeFixer(const neurun::model::Operands &ctx, - const std::shared_ptr &tensor_builder); + ShapeFixer(const ir::Operands &ctx, const std::shared_ptr &tensor_builder); std::shared_ptr tensor_builder() override { return _tensor_builder; } @@ -98,7 +97,7 @@ public: void visit(const model::operation::Comparison &) override; private: - const neurun::model::Operands &_ctx; + const ir::Operands &_ctx; std::shared_ptr _tensor_builder; }; diff --git a/runtime/neurun/backend/acl_neon/TensorRegister.h b/runtime/neurun/backend/acl_neon/TensorRegister.h index 708beb4..115e05d 100644 --- a/runtime/neurun/backend/acl_neon/TensorRegister.h +++ b/runtime/neurun/backend/acl_neon/TensorRegister.h @@ -31,14 +31,13 @@ namespace acl_neon class TensorRegister : public acl_common::AclTensorRegister { public: - TensorRegister(const model::Operands &operands, - const std::shared_ptr &tensor_builder) + TensorRegister(const ir::Operands &operands, const std::shared_ptr &tensor_builder) : acl_common::AclTensorRegister{operands, tensor_builder} { // DO NOTHING } - void setUsesCount(const model::OperandIndex &ind, size_t num_uses) const override + void setUsesCount(const ir::OperandIndex &ind, size_t num_uses) const override { nnfw::misc::polymorphic_downcast(tensor_builder().get()) ->setUsesCount(ind, num_uses); diff --git a/runtime/neurun/backend/cpu/Backend.h b/runtime/neurun/backend/cpu/Backend.h index 252d6c7..e52a776 100644 --- a/runtime/neurun/backend/cpu/Backend.h +++ b/runtime/neurun/backend/cpu/Backend.h @@ -19,7 +19,7 @@ #include #include -#include +#include #include "Config.h" #include "ConstantInitializer.h" @@ -42,7 +42,7 @@ public: std::shared_ptr config() const override { return _config; } std::unique_ptr - newContext(const model::Operands &operands, + newContext(const ir::Operands &operands, const std::shared_ptr &kb) const override { auto tensor_builder = std::make_shared(); diff --git a/runtime/neurun/backend/cpu/ConstantInitializer.cc b/runtime/neurun/backend/cpu/ConstantInitializer.cc index 8d30ffe..60a4090 100644 --- a/runtime/neurun/backend/cpu/ConstantInitializer.cc +++ b/runtime/neurun/backend/cpu/ConstantInitializer.cc @@ -23,7 +23,7 @@ namespace backend namespace cpu { -ConstantInitializer::ConstantInitializer(const model::Operands &operands, +ConstantInitializer::ConstantInitializer(const ir::Operands &operands, const std::shared_ptr &tensor_builder) : _operands{operands}, _tensor_builder{tensor_builder} { diff --git a/runtime/neurun/backend/cpu/ConstantInitializer.h b/runtime/neurun/backend/cpu/ConstantInitializer.h index 9515dbf..c901958 100644 --- a/runtime/neurun/backend/cpu/ConstantInitializer.h +++ b/runtime/neurun/backend/cpu/ConstantInitializer.h @@ -18,7 +18,7 @@ #define __NEURUN_COMPILER_CPU_CONSTANT_INITIALIZER_H__ #include -#include +#include #include "TensorBuilder.h" namespace neurun @@ -31,7 +31,7 @@ namespace cpu class ConstantInitializer : public IConstantInitializer { public: - ConstantInitializer(const model::Operands &operands, + ConstantInitializer(const ir::Operands &operands, const std::shared_ptr &tensor_builder); public: @@ -40,11 +40,11 @@ public: void visit(const model::operation::FullyConnected &) override; private: - const model::Operands &operands() const override { return _operands; } + const ir::Operands &operands() const override { return _operands; } std::shared_ptr tensor_builder() const override { return _tensor_builder; } private: - const model::Operands &_operands; + const ir::Operands &_operands; std::shared_ptr _tensor_builder; }; diff --git a/runtime/neurun/backend/cpu/KernelGenerator.cc b/runtime/neurun/backend/cpu/KernelGenerator.cc index c7a1045..461c90d 100644 --- a/runtime/neurun/backend/cpu/KernelGenerator.cc +++ b/runtime/neurun/backend/cpu/KernelGenerator.cc @@ -52,8 +52,7 @@ namespace cpu { KernelGenerator::KernelGenerator( - const neurun::model::Operands &operand_ctx, - const std::shared_ptr &tensor_builder, + const ir::Operands &operand_ctx, const std::shared_ptr &tensor_builder, const std::shared_ptr &kernel_builer) : _ctx(operand_ctx), _tensor_builder(tensor_builder), _kernel_builder(kernel_builer), _current_subg_layout(ir::Layout::UNKNOWN) @@ -535,7 +534,7 @@ void KernelGenerator::visit(const model::operation::Permute &node) void KernelGenerator::visit(const model::operation::Custom &node) { - auto get_type_info = [this](const model::Operand &operand) -> custom::TypeInfo { + auto get_type_info = [this](const ir::Operand &operand) -> custom::TypeInfo { auto backendDescr = ::neurun::backend::cpu::kernel::getTensorDescriptor(operand, _current_subg_layout); @@ -548,7 +547,7 @@ void KernelGenerator::visit(const model::operation::Custom &node) return {shape, backendDescr.type}; }; - auto fill_op_info = [&](const model::OperandIndexSequence &opSeq, + auto fill_op_info = [&](const ir::OperandIndexSequence &opSeq, std::vector &types, std::vector &allocs) { for (auto &idx : opSeq) { diff --git a/runtime/neurun/backend/cpu/KernelGenerator.h b/runtime/neurun/backend/cpu/KernelGenerator.h index 711ebc5..0884e77 100644 --- a/runtime/neurun/backend/cpu/KernelGenerator.h +++ b/runtime/neurun/backend/cpu/KernelGenerator.h @@ -18,7 +18,7 @@ #define __NEURUN_BACKEND_CPU_KERNEL_GENERATOR_H__ #include "backend/IKernelGenerator.h" -#include "model/Operands.h" +#include "ir/Operands.h" #include "operand/Tensor.h" #include "backend/CustomKernelBuilder.h" #include "TensorBuilder.h" @@ -33,8 +33,7 @@ namespace cpu class KernelGenerator : public IKernelGenerator { public: - KernelGenerator(const neurun::model::Operands &ctx, - const std::shared_ptr &tensor_builder, + KernelGenerator(const ir::Operands &ctx, const std::shared_ptr &tensor_builder, const std::shared_ptr &kernel_builder); using IKernelGenerator::visit; @@ -59,7 +58,7 @@ public: void visit(const model::operation::Pad &); private: - const neurun::model::Operands &_ctx; + const ir::Operands &_ctx; std::shared_ptr _tensor_builder; std::shared_ptr _kernel_builder; ir::Layout _current_subg_layout; diff --git a/runtime/neurun/backend/cpu/MemoryManager.cc b/runtime/neurun/backend/cpu/MemoryManager.cc index e15c500..9775d4d 100644 --- a/runtime/neurun/backend/cpu/MemoryManager.cc +++ b/runtime/neurun/backend/cpu/MemoryManager.cc @@ -40,18 +40,18 @@ IMemoryPlanner *MemoryManager::createMemoryPlanner() return MemoryPlannerFactory::get().create(planner_id); } -void MemoryManager::buildTensor(const model::OperandIndex &ind, const model::OperandInfo &info) +void MemoryManager::buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &info) { auto tensor = std::make_shared(info); _tensors[ind] = tensor; } -void MemoryManager::claimPlan(const model::OperandIndex &ind, uint32_t size) +void MemoryManager::claimPlan(const ir::OperandIndex &ind, uint32_t size) { _mem_planner->claim(ind, size); } -void MemoryManager::releasePlan(const model::OperandIndex &ind) { _mem_planner->release(ind); } +void MemoryManager::releasePlan(const ir::OperandIndex &ind) { _mem_planner->release(ind); } void MemoryManager::allocate(void) { diff --git a/runtime/neurun/backend/cpu/MemoryManager.h b/runtime/neurun/backend/cpu/MemoryManager.h index 0fb8906..bf0f554 100644 --- a/runtime/neurun/backend/cpu/MemoryManager.h +++ b/runtime/neurun/backend/cpu/MemoryManager.h @@ -20,7 +20,7 @@ #include "backend/IMemoryManager.h" #include "MemoryPlanner.h" #include "operand/Tensor.h" -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" namespace neurun { @@ -38,18 +38,18 @@ public: void allocate(void) override; void deallocate(void) override { _mem_alloc->release(); } - void buildTensor(const model::OperandIndex &ind, const model::OperandInfo &info); - void claimPlan(const model::OperandIndex &ind, uint32_t size); - void releasePlan(const model::OperandIndex &ind); + void buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &info); + void claimPlan(const ir::OperandIndex &ind, uint32_t size); + void releasePlan(const ir::OperandIndex &ind); - model::OperandIndexMap> &tensors(void) { return _tensors; } + ir::OperandIndexMap> &tensors(void) { return _tensors; } private: IMemoryPlanner *createMemoryPlanner(); private: - model::OperandIndexMap> _tensors; - model::OperandIndexMap _tensor_mem_map; + ir::OperandIndexMap> _tensors; + ir::OperandIndexMap _tensor_mem_map; std::shared_ptr _mem_planner; std::shared_ptr _mem_alloc; }; diff --git a/runtime/neurun/backend/cpu/MemoryPlanner.cc b/runtime/neurun/backend/cpu/MemoryPlanner.cc index 8eaf7bb..0cee5fd 100644 --- a/runtime/neurun/backend/cpu/MemoryPlanner.cc +++ b/runtime/neurun/backend/cpu/MemoryPlanner.cc @@ -33,7 +33,7 @@ Allocator::Allocator(uint32_t capacity) VERBOSE(ALLOC) << "base pointer: " << static_cast(_base.get()) << std::endl; } -void BumpPlanner::claim(const model::OperandIndex &ind, size_t size) +void BumpPlanner::claim(const ir::OperandIndex &ind, size_t size) { assert(size != 0); @@ -45,7 +45,7 @@ void BumpPlanner::claim(const model::OperandIndex &ind, size_t size) << std::endl; } -void BumpPlanner::release(const model::OperandIndex &ind) +void BumpPlanner::release(const ir::OperandIndex &ind) { VERBOSE(BP_PLANNER) << "RELEASE(#" << ind.value() << "): " << "NOTHING does" << std::endl; @@ -54,7 +54,7 @@ void BumpPlanner::release(const model::OperandIndex &ind) // There are some assumptions for claiming memory(== making a reservation for memory). // 1. About _claim_table(std::map). // - The table's data structure is std::map so that it always sorts -// value(model::OperandIndex) by key(base_offset). +// value(OperandIndex) by key(base_offset). // - This claim() inserts key/value into _claim_table and the release() removes the key/value from // _claim_table. // - _claim_table shows the memory status at a certain point in time. Therefore, @@ -65,7 +65,7 @@ void BumpPlanner::release(const model::OperandIndex &ind) // point in time, it means the place at the offset can be claimed. // 2. In the loop for _claim_table, we can assume the current claim_base_offset value is bigger than // the previous claim_base_offset. -void FirstFitPlanner::claim(const model::OperandIndex &ind, size_t size) +void FirstFitPlanner::claim(const ir::OperandIndex &ind, size_t size) { assert(size != 0); @@ -98,7 +98,7 @@ void FirstFitPlanner::claim(const model::OperandIndex &ind, size_t size) } } -void FirstFitPlanner::release(const model::OperandIndex &ind) +void FirstFitPlanner::release(const ir::OperandIndex &ind) { for (auto it = _claim_table.cbegin(); it != _claim_table.cend(); ++it) { diff --git a/runtime/neurun/backend/cpu/MemoryPlanner.h b/runtime/neurun/backend/cpu/MemoryPlanner.h index eaa4299..fc7302b 100644 --- a/runtime/neurun/backend/cpu/MemoryPlanner.h +++ b/runtime/neurun/backend/cpu/MemoryPlanner.h @@ -25,7 +25,7 @@ #include #include -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" namespace neurun { @@ -66,19 +66,19 @@ private: */ struct IMemoryPlanner { - using MemoryPlans = model::OperandIndexMap; + using MemoryPlans = ir::OperandIndexMap; /** * @brief Claim memory for operand * @param[in] index The operand index * @param[in] size The size of the memory */ - virtual void claim(const model::OperandIndex &, size_t) = 0; + virtual void claim(const ir::OperandIndex &, size_t) = 0; /** * @brief Release memory for operand * @param[in] index The operand index */ - virtual void release(const model::OperandIndex &) = 0; + virtual void release(const ir::OperandIndex &) = 0; /** * @brief Get capacity for memory planning * @return The value of capacity @@ -104,12 +104,12 @@ public: * @param[in] index The operand index * @param[in] size The size of the memory */ - void claim(const model::OperandIndex &, size_t) override; + void claim(const ir::OperandIndex &, size_t) override; /** * @brief Release memory for operand by bump way * @param[in] index The operand index */ - void release(const model::OperandIndex &) override; + void release(const ir::OperandIndex &) override; /** * @brief Get capacity for memory planning * @return The value of capacity @@ -137,12 +137,12 @@ public: * @param[in] index The operand index * @param[in] size The size of the memory */ - void claim(const model::OperandIndex &, size_t) override; + void claim(const ir::OperandIndex &, size_t) override; /** * @brief Release memory for operand by firstfit way * @param[in] index The operand index */ - void release(const model::OperandIndex &) override; + void release(const ir::OperandIndex &) override; /** * @brief Get capacity for memory planning * @return The value of capacity @@ -158,7 +158,7 @@ private: uint32_t _capacity = 0; MemoryPlans _mem_plans; // Use std::map because claim() assumes that _claim_table is sorted by uint32_t(base_offset) - std::map _claim_table; + std::map _claim_table; }; } // namespace cpu diff --git a/runtime/neurun/backend/cpu/MemoryPlanner.test.cc b/runtime/neurun/backend/cpu/MemoryPlanner.test.cc index 39e0f0d..eee30cf 100644 --- a/runtime/neurun/backend/cpu/MemoryPlanner.test.cc +++ b/runtime/neurun/backend/cpu/MemoryPlanner.test.cc @@ -17,7 +17,7 @@ #include #include "MemoryPlanner.h" -#include "model/Index.h" +#include "ir/Index.h" TEST(Allocator, allocate_test) { @@ -30,7 +30,7 @@ TEST(BumpPlanner, claim_test) ::neurun::backend::cpu::BumpPlanner planner; auto claim = [&planner](uint32_t index, size_t size, uint32_t expected_offset) { - ::neurun::model::OperandIndex mem_idx(index); + neurun::ir::OperandIndex mem_idx(index); planner.claim(mem_idx, size); auto mem_blk = planner.memory_plans()[mem_idx]; ASSERT_EQ(mem_blk.offset, expected_offset); @@ -47,7 +47,7 @@ TEST(FirstFitPlanner, claim_release_test) ::neurun::backend::cpu::FirstFitPlanner planner; auto claim = [&planner](uint32_t index, size_t size, uint32_t expected_offset) { - ::neurun::model::OperandIndex mem_idx(index); + neurun::ir::OperandIndex mem_idx(index); planner.claim(mem_idx, size); auto mem_blk = planner.memory_plans()[mem_idx]; ASSERT_EQ(mem_blk.offset, expected_offset); @@ -55,7 +55,7 @@ TEST(FirstFitPlanner, claim_release_test) }; auto release = [&planner](uint32_t index) { - ::neurun::model::OperandIndex mem_idx(index); + neurun::ir::OperandIndex mem_idx(index); planner.release(mem_idx); }; diff --git a/runtime/neurun/backend/cpu/ShapeFixer.cc b/runtime/neurun/backend/cpu/ShapeFixer.cc index 5ec7ebd..22120f3 100644 --- a/runtime/neurun/backend/cpu/ShapeFixer.cc +++ b/runtime/neurun/backend/cpu/ShapeFixer.cc @@ -50,7 +50,7 @@ namespace backend namespace cpu { -ShapeFixer::ShapeFixer(const neurun::model::Operands &operand_ctx, +ShapeFixer::ShapeFixer(const ir::Operands &operand_ctx, const std::shared_ptr &tensor_builder) : _ctx(operand_ctx), _tensor_builder(tensor_builder) { diff --git a/runtime/neurun/backend/cpu/ShapeFixer.h b/runtime/neurun/backend/cpu/ShapeFixer.h index 6788f41..ca2355c 100644 --- a/runtime/neurun/backend/cpu/ShapeFixer.h +++ b/runtime/neurun/backend/cpu/ShapeFixer.h @@ -19,7 +19,7 @@ #include -#include "model/Operands.h" +#include "ir/Operands.h" #include "operand/Tensor.h" #include "TensorBuilder.h" @@ -33,8 +33,7 @@ namespace cpu class ShapeFixer : public IShapeFixer { public: - ShapeFixer(const neurun::model::Operands &ctx, - const std::shared_ptr &tensor_builder); + ShapeFixer(const ir::Operands &ctx, const std::shared_ptr &tensor_builder); std::shared_ptr tensor_builder() override { return _tensor_builder; } @@ -57,7 +56,7 @@ public: void visit(const model::operation::Pad &); private: - const neurun::model::Operands &_ctx; + const ir::Operands &_ctx; std::shared_ptr _tensor_builder; }; diff --git a/runtime/neurun/backend/cpu/TensorBuilder.cc b/runtime/neurun/backend/cpu/TensorBuilder.cc index 5484cb3..2c654c2 100644 --- a/runtime/neurun/backend/cpu/TensorBuilder.cc +++ b/runtime/neurun/backend/cpu/TensorBuilder.cc @@ -32,8 +32,8 @@ TensorBuilder::TensorBuilder() : _tensor_mgr{new TensorManager()} // DO NOTHING } -void TensorBuilder::registerTensorInfo(const model::OperandIndex &ind, - const model::OperandInfo &info, ir::Layout, bool as_const) +void TensorBuilder::registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info, + ir::Layout, bool as_const) { _tensor_info_map.emplace(ind, info); @@ -41,14 +41,13 @@ void TensorBuilder::registerTensorInfo(const model::OperandIndex &ind, _constants.append(ind); } -void TensorBuilder::registerSubTensorInfo(const model::OperandIndex &, - const compiler::SubTensorInfo &) +void TensorBuilder::registerSubTensorInfo(const ir::OperandIndex &, const compiler::SubTensorInfo &) { // Not supported yet assert(false); } -void TensorBuilder::notifyFirstUse(const model::OperandIndex &ind) +void TensorBuilder::notifyFirstUse(const ir::OperandIndex &ind) { assert(_tensor_info_map.find(ind) != _tensor_info_map.end()); const auto tensor_info = _tensor_info_map.at(ind); @@ -57,9 +56,9 @@ void TensorBuilder::notifyFirstUse(const model::OperandIndex &ind) _tensor_mgr->claimPlan(ind, size); } -void TensorBuilder::notifyLastUse(const model::OperandIndex &ind) { _tensor_mgr->releasePlan(ind); } +void TensorBuilder::notifyLastUse(const ir::OperandIndex &ind) { _tensor_mgr->releasePlan(ind); } -bool TensorBuilder::isRegistered(const model::OperandIndex &ind) const +bool TensorBuilder::isRegistered(const ir::OperandIndex &ind) const { return _tensor_info_map.find(ind) != _tensor_info_map.end(); } @@ -83,14 +82,14 @@ void TensorBuilder::allocateNonconsts() } std::shared_ptr<::neurun::backend::operand::ITensor> -TensorBuilder::tensorAt(const model::OperandIndex &ind) +TensorBuilder::tensorAt(const ir::OperandIndex &ind) { return _tensor_mgr->at(ind); } void TensorBuilder::iterate(const IterateFunction &fn) { _tensor_mgr->iterate(fn); } -std::shared_ptr TensorBuilder::at(const ::neurun::model::OperandIndex &ind) +std::shared_ptr TensorBuilder::at(const ir::OperandIndex &ind) { return _tensor_mgr->at(ind); } diff --git a/runtime/neurun/backend/cpu/TensorBuilder.h b/runtime/neurun/backend/cpu/TensorBuilder.h index 0ec1eab..a8e2fbf 100644 --- a/runtime/neurun/backend/cpu/TensorBuilder.h +++ b/runtime/neurun/backend/cpu/TensorBuilder.h @@ -21,7 +21,7 @@ #include #include "operand/Tensor.h" -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" #include "TensorManager.h" namespace neurun @@ -42,20 +42,20 @@ public: * @param[in] info Operand information * @param[in] layout Operand data layout */ - void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info, + void registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info, ir::Layout backend_layout, bool as_const) override; /** * @brief Register subtensor information to allocate on CPU backend * @param[in] ind Operand index * @param[in] info Tensor information */ - void registerSubTensorInfo(const model::OperandIndex &ind, + void registerSubTensorInfo(const ir::OperandIndex &ind, const compiler::SubTensorInfo &info) override; - void notifyFirstUse(const model::OperandIndex &) override; - void notifyLastUse(const model::OperandIndex &) override; + void notifyFirstUse(const ir::OperandIndex &) override; + void notifyLastUse(const ir::OperandIndex &) override; - bool isRegistered(const model::OperandIndex &) const override; + bool isRegistered(const ir::OperandIndex &) const override; void prepare(void) override; void allocateConsts() override; @@ -64,7 +64,7 @@ public: void finalize() override { /* DO NOTHING */} std::shared_ptr<::neurun::backend::operand::ITensor> - tensorAt(const model::OperandIndex &ind) override; + tensorAt(const ir::OperandIndex &ind) override; void iterate(const IterateFunction &fn) override; @@ -73,12 +73,12 @@ public: std::unique_ptr releaseTensorManager(void) override; - std::shared_ptr at(const ::neurun::model::OperandIndex &ind); + std::shared_ptr at(const ir::OperandIndex &ind); private: std::unique_ptr _tensor_mgr; - model::OperandIndexMap _tensor_info_map; - model::OperandIndexSequence _constants; + ir::OperandIndexMap _tensor_info_map; + ir::OperandIndexSequence _constants; }; } // namespace cpu diff --git a/runtime/neurun/backend/cpu/TensorManager.cc b/runtime/neurun/backend/cpu/TensorManager.cc index 90751c7..912b6ca 100644 --- a/runtime/neurun/backend/cpu/TensorManager.cc +++ b/runtime/neurun/backend/cpu/TensorManager.cc @@ -36,8 +36,8 @@ void TensorManager::deallocateConsts(void) { _const_mgr->deallocate(); } void TensorManager::deallocateNonconsts(void) { _nonconst_mgr->deallocate(); } -void TensorManager::buildTensor(const model::OperandIndex &ind, - const model::OperandInfo &tensor_info, bool as_const) +void TensorManager::buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &tensor_info, + bool as_const) { assert(_ind_to_mgr.find(ind) == _ind_to_mgr.end()); if (as_const) @@ -52,35 +52,35 @@ void TensorManager::buildTensor(const model::OperandIndex &ind, } } -void TensorManager::claimPlan(const model::OperandIndex &ind, uint32_t size) +void TensorManager::claimPlan(const ir::OperandIndex &ind, uint32_t size) { assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); _ind_to_mgr.at(ind).claimPlan(ind, size); } -void TensorManager::releasePlan(const model::OperandIndex &ind) +void TensorManager::releasePlan(const ir::OperandIndex &ind) { assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); _ind_to_mgr.at(ind).releasePlan(ind); } -std::shared_ptr TensorManager::at(const ::neurun::model::OperandIndex &ind) +std::shared_ptr TensorManager::at(const ir::OperandIndex &ind) { assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); return _ind_to_mgr.at(ind).tensors().at(ind); } -model::OperandIndexMap> &TensorManager::constTensors(void) +ir::OperandIndexMap> &TensorManager::constTensors(void) { return _const_mgr->tensors(); } -model::OperandIndexMap> &TensorManager::nonconstTensors(void) +ir::OperandIndexMap> &TensorManager::nonconstTensors(void) { return _nonconst_mgr->tensors(); } -void TensorManager::iterate(const std::function &fn) +void TensorManager::iterate(const std::function &fn) { for (auto it : _nonconst_mgr->tensors()) fn(it.first); diff --git a/runtime/neurun/backend/cpu/TensorManager.h b/runtime/neurun/backend/cpu/TensorManager.h index 22cf446..c3ef706 100644 --- a/runtime/neurun/backend/cpu/TensorManager.h +++ b/runtime/neurun/backend/cpu/TensorManager.h @@ -19,7 +19,7 @@ #include "backend/ITensorManager.h" #include "MemoryManager.h" -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" namespace neurun { @@ -39,23 +39,22 @@ public: void deallocateConsts(void) override; void deallocateNonconsts(void) override; - void buildTensor(const model::OperandIndex &ind, const model::OperandInfo &tensor_info, - bool as_const); + void buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &tensor_info, bool as_const); - void claimPlan(const model::OperandIndex &ind, uint32_t size); - void releasePlan(const model::OperandIndex &ind); + void claimPlan(const ir::OperandIndex &ind, uint32_t size); + void releasePlan(const ir::OperandIndex &ind); - std::shared_ptr at(const ::neurun::model::OperandIndex &ind); + std::shared_ptr at(const ir::OperandIndex &ind); - model::OperandIndexMap> &constTensors(void); - model::OperandIndexMap> &nonconstTensors(void); + ir::OperandIndexMap> &constTensors(void); + ir::OperandIndexMap> &nonconstTensors(void); - void iterate(const std::function &fn); + void iterate(const std::function &fn); private: std::unique_ptr _const_mgr; std::unique_ptr _nonconst_mgr; - model::OperandIndexMap _ind_to_mgr; + ir::OperandIndexMap _ind_to_mgr; }; } // namespace cpu diff --git a/runtime/neurun/backend/cpu/TensorRegister.cc b/runtime/neurun/backend/cpu/TensorRegister.cc index 0f0a204..2701503 100644 --- a/runtime/neurun/backend/cpu/TensorRegister.cc +++ b/runtime/neurun/backend/cpu/TensorRegister.cc @@ -23,7 +23,7 @@ namespace backend namespace cpu { -TensorRegister::TensorRegister(const model::Operands &operands, +TensorRegister::TensorRegister(const ir::Operands &operands, const std::shared_ptr &tensor_builder) : _operands{operands}, _tensor_builder{tensor_builder} { diff --git a/runtime/neurun/backend/cpu/TensorRegister.h b/runtime/neurun/backend/cpu/TensorRegister.h index b302fd3..1bda9fc 100644 --- a/runtime/neurun/backend/cpu/TensorRegister.h +++ b/runtime/neurun/backend/cpu/TensorRegister.h @@ -30,16 +30,16 @@ namespace cpu class TensorRegister : public ITensorRegister { public: - TensorRegister(const model::Operands &operands, + TensorRegister(const ir::Operands &operands, const std::shared_ptr &tensor_builder); private: - const model::Operands &operands() const override { return _operands; } + const ir::Operands &operands() const override { return _operands; } std::shared_ptr tensor_builder() const override { return _tensor_builder; } bool supportSubTensor() const final { return false; } private: - const model::Operands &_operands; + const ir::Operands &_operands; const std::shared_ptr _tensor_builder; }; diff --git a/runtime/neurun/backend/cpu/kernel/OperationUtils.cc b/runtime/neurun/backend/cpu/kernel/OperationUtils.cc index dbcbac2..01215b8 100644 --- a/runtime/neurun/backend/cpu/kernel/OperationUtils.cc +++ b/runtime/neurun/backend/cpu/kernel/OperationUtils.cc @@ -191,7 +191,7 @@ int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift) return static_cast(std::floor(max_input_rescaled)); } -TensorDescriptor getTensorDescriptor(const ::neurun::model::Operand &o, ir::Layout frontend_layout) +TensorDescriptor getTensorDescriptor(const ir::Operand &o, ir::Layout frontend_layout) { TensorDescriptor descriptor; diff --git a/runtime/neurun/backend/cpu/kernel/OperationUtils.h b/runtime/neurun/backend/cpu/kernel/OperationUtils.h index f8ab905..4bd2b32 100644 --- a/runtime/neurun/backend/cpu/kernel/OperationUtils.h +++ b/runtime/neurun/backend/cpu/kernel/OperationUtils.h @@ -23,7 +23,7 @@ #include -#include "model/Operand.h" +#include "ir/Operand.h" #include "ir/DataType.h" #include @@ -138,7 +138,7 @@ void CalculateActivationRangeUint8(ir::Activation activation, const TensorDescri int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift); -TensorDescriptor getTensorDescriptor(const ::neurun::model::Operand &o, ir::Layout frontend_layout); +TensorDescriptor getTensorDescriptor(const ir::Operand &o, ir::Layout frontend_layout); uint32_t sizeOfData(OperandType type, const std::vector &dimensions); diff --git a/runtime/neurun/backend/cpu/operand/Tensor.h b/runtime/neurun/backend/cpu/operand/Tensor.h index ef0579f..dec6808 100644 --- a/runtime/neurun/backend/cpu/operand/Tensor.h +++ b/runtime/neurun/backend/cpu/operand/Tensor.h @@ -18,7 +18,7 @@ #define __NEURUN_BACKEND_CPU_OPERAND_TENSOR_H__ #include -#include "model/OperandInfo.h" +#include "ir/OperandInfo.h" namespace neurun { @@ -35,7 +35,7 @@ public: Tensor() = delete; public: - Tensor(const model::OperandInfo &info) : _info(info) + Tensor(const ir::OperandInfo &info) : _info(info) { // DO NOTHING } @@ -65,7 +65,7 @@ public: void access(const std::function &fn) final; private: - model::OperandInfo _info; + ir::OperandInfo _info; uint8_t *_buffer = nullptr; }; diff --git a/runtime/neurun/backend/hi_perf_cpu/KernelGenerator.h b/runtime/neurun/backend/hi_perf_cpu/KernelGenerator.h index 71322e7..3197995 100644 --- a/runtime/neurun/backend/hi_perf_cpu/KernelGenerator.h +++ b/runtime/neurun/backend/hi_perf_cpu/KernelGenerator.h @@ -19,7 +19,7 @@ #include -#include "model/Operands.h" +#include "ir/Operands.h" #include "TensorBuilder.h" namespace neurun @@ -32,12 +32,11 @@ namespace hi_perf_cpu class KernelGenerator : public IKernelGenerator { public: - KernelGenerator(const neurun::model::Operands &ctx, - const std::shared_ptr &tensor_builder); + KernelGenerator(const Operands &ctx, const std::shared_ptr &tensor_builder); // TODO add more ops private: - const neurun::model::Operands &_ctx; + const Operands &_ctx; std::shared_ptr _tensor_builder; }; diff --git a/runtime/neurun/backend/hi_perf_cpu/TensorBuilder.h b/runtime/neurun/backend/hi_perf_cpu/TensorBuilder.h index 500730d..af879a4 100644 --- a/runtime/neurun/backend/hi_perf_cpu/TensorBuilder.h +++ b/runtime/neurun/backend/hi_perf_cpu/TensorBuilder.h @@ -20,7 +20,7 @@ #include #include -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" namespace neurun { diff --git a/runtime/neurun/backend/srcn/Backend.h b/runtime/neurun/backend/srcn/Backend.h index 5426919..bc76a7e 100644 --- a/runtime/neurun/backend/srcn/Backend.h +++ b/runtime/neurun/backend/srcn/Backend.h @@ -19,7 +19,7 @@ #include #include -#include +#include #include "Config.h" #include "ConstantInitializer.h" @@ -42,7 +42,7 @@ public: std::shared_ptr config() const override { return _config; } std::unique_ptr - newContext(const model::Operands &operands, + newContext(const ir::Operands &operands, const std::shared_ptr &kb) const override { auto tensor_builder = std::make_shared(); diff --git a/runtime/neurun/backend/srcn/ConstantInitializer.cc b/runtime/neurun/backend/srcn/ConstantInitializer.cc index 3260d54..f152659 100644 --- a/runtime/neurun/backend/srcn/ConstantInitializer.cc +++ b/runtime/neurun/backend/srcn/ConstantInitializer.cc @@ -22,7 +22,7 @@ namespace { template -static void PermuteKernel(const neurun::model::Operand &model_obj, +static void PermuteKernel(const neurun::ir::Operand &model_obj, neurun::backend::operand::ITensor &obj, const std::vector &permutation) { @@ -81,15 +81,15 @@ namespace backend namespace srcn { -ConstantInitializer::ConstantInitializer(const model::Operands &operands, +ConstantInitializer::ConstantInitializer(const ir::Operands &operands, const std::shared_ptr &tensor_builder) : _operands{operands}, _tensor_builder{tensor_builder} { // DO NOTHING } -void ConstantInitializer::registerPermuteKernelInitializer(const model::OperandIndex &index, - const model::Operand &obj, +void ConstantInitializer::registerPermuteKernelInitializer(const ir::OperandIndex &index, + const ir::Operand &obj, const std::vector &permutation) { // For only CONSTANTS diff --git a/runtime/neurun/backend/srcn/ConstantInitializer.h b/runtime/neurun/backend/srcn/ConstantInitializer.h index 18dfc6c..ed75b0c 100644 --- a/runtime/neurun/backend/srcn/ConstantInitializer.h +++ b/runtime/neurun/backend/srcn/ConstantInitializer.h @@ -18,7 +18,7 @@ #define __NEURUN_COMPILER_SRCN_CONSTANT_INITIALIZER_H__ #include -#include +#include #include "TensorBuilder.h" #include @@ -32,11 +32,11 @@ namespace srcn class ConstantInitializer : public IConstantInitializer { public: - ConstantInitializer(const model::Operands &operands, + ConstantInitializer(const ir::Operands &operands, const std::shared_ptr &tensor_builder); public: - void registerPermuteKernelInitializer(const model::OperandIndex &index, const model::Operand &obj, + void registerPermuteKernelInitializer(const ir::OperandIndex &index, const ir::Operand &obj, const std::vector &permutation); public: @@ -45,11 +45,11 @@ public: void visit(const model::operation::TransposeConv &) override; private: - const model::Operands &operands() const override { return _operands; } + const ir::Operands &operands() const override { return _operands; } std::shared_ptr tensor_builder() const override { return _tensor_builder; } private: - const model::Operands &_operands; + const ir::Operands &_operands; std::shared_ptr _tensor_builder; }; diff --git a/runtime/neurun/backend/srcn/Convert.cc b/runtime/neurun/backend/srcn/Convert.cc index 46b11ca..1d80b2c 100644 --- a/runtime/neurun/backend/srcn/Convert.cc +++ b/runtime/neurun/backend/srcn/Convert.cc @@ -62,10 +62,10 @@ ir::Shape asTensorShape(const ir::Shape &shape, ir::Layout frontend_layout, return ret; } -model::OperandInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo, - ir::Layout frontend_layout, ir::Layout backend_layout) +ir::OperandInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo, + ir::Layout frontend_layout, ir::Layout backend_layout) { - model::OperandInfo info(asTensorShape(shape, frontend_layout, backend_layout), typeInfo); + ir::OperandInfo info(asTensorShape(shape, frontend_layout, backend_layout), typeInfo); return info; } diff --git a/runtime/neurun/backend/srcn/Convert.h b/runtime/neurun/backend/srcn/Convert.h index 3268da2..64be46e 100644 --- a/runtime/neurun/backend/srcn/Convert.h +++ b/runtime/neurun/backend/srcn/Convert.h @@ -21,7 +21,7 @@ #include #include #include -#include +#include namespace neurun { @@ -36,8 +36,8 @@ ir::Shape asKernelShape(const ir::Shape &shape, kernel::FilterLayout frontend_la ir::Shape asTensorShape(const ir::Shape &shape, ir::Layout frontend_layout, ir::Layout backend_layout); -model::OperandInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo, - ir::Layout frontend_layout, ir::Layout backend_layout); +ir::OperandInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo, + ir::Layout frontend_layout, ir::Layout backend_layout); } // namespace srcn } // namespace backend diff --git a/runtime/neurun/backend/srcn/KernelGenerator.cc b/runtime/neurun/backend/srcn/KernelGenerator.cc index d3f86d4..5b30fc8 100644 --- a/runtime/neurun/backend/srcn/KernelGenerator.cc +++ b/runtime/neurun/backend/srcn/KernelGenerator.cc @@ -40,7 +40,7 @@ namespace backend namespace srcn { -KernelGenerator::KernelGenerator(const neurun::model::Operands &operand_ctx, +KernelGenerator::KernelGenerator(const ir::Operands &operand_ctx, const std::shared_ptr &tensor_builder, const std::shared_ptr &kb) : _ctx(operand_ctx), _tensor_builder(tensor_builder), _kernel_builder(kb), diff --git a/runtime/neurun/backend/srcn/KernelGenerator.h b/runtime/neurun/backend/srcn/KernelGenerator.h index 34b44a5..aabecae 100644 --- a/runtime/neurun/backend/srcn/KernelGenerator.h +++ b/runtime/neurun/backend/srcn/KernelGenerator.h @@ -18,7 +18,7 @@ #define __NEURUN_BACKEND_SRCN_KERNEL_GENERATOR_H__ #include "backend/IKernelGenerator.h" -#include "model/Operands.h" +#include "ir/Operands.h" #include "operand/Tensor.h" #include "backend/CustomKernelBuilder.h" #include "TensorBuilder.h" @@ -33,8 +33,7 @@ namespace srcn class KernelGenerator : public IKernelGenerator { public: - KernelGenerator(const neurun::model::Operands &ctx, - const std::shared_ptr &tensor_builder, + KernelGenerator(const ir::Operands &ctx, const std::shared_ptr &tensor_builder, const std::shared_ptr &kb); using IKernelGenerator::visit; @@ -47,7 +46,7 @@ public: void visit(const model::operation::Add &) override; private: - const neurun::model::Operands &_ctx; + const ir::Operands &_ctx; std::shared_ptr _tensor_builder; std::shared_ptr _kernel_builder; ir::Layout _current_subg_layout; diff --git a/runtime/neurun/backend/srcn/MemoryManager.cc b/runtime/neurun/backend/srcn/MemoryManager.cc index 7871f29..4fae8c4 100644 --- a/runtime/neurun/backend/srcn/MemoryManager.cc +++ b/runtime/neurun/backend/srcn/MemoryManager.cc @@ -40,19 +40,19 @@ IMemoryPlanner *MemoryManager::createMemoryPlanner() return MemoryPlannerFactory::get().create(planner_id); } -void MemoryManager::buildTensor(const model::OperandIndex &ind, const model::OperandInfo &info, +void MemoryManager::buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &info, ir::Layout layout) { auto tensor = std::make_shared(info, layout); _tensors[ind] = tensor; } -void MemoryManager::claimPlan(const model::OperandIndex &ind, uint32_t size) +void MemoryManager::claimPlan(const ir::OperandIndex &ind, uint32_t size) { _mem_planner->claim(ind, size); } -void MemoryManager::releasePlan(const model::OperandIndex &ind) { _mem_planner->release(ind); } +void MemoryManager::releasePlan(const ir::OperandIndex &ind) { _mem_planner->release(ind); } void MemoryManager::allocate(void) { diff --git a/runtime/neurun/backend/srcn/MemoryManager.h b/runtime/neurun/backend/srcn/MemoryManager.h index a6bd7e0..73d9d49 100644 --- a/runtime/neurun/backend/srcn/MemoryManager.h +++ b/runtime/neurun/backend/srcn/MemoryManager.h @@ -20,7 +20,7 @@ #include "backend/IMemoryManager.h" #include "MemoryPlanner.h" #include "operand/Tensor.h" -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" namespace neurun { @@ -38,19 +38,18 @@ public: void allocate(void) override; void deallocate(void) override { _mem_alloc->release(); } - void buildTensor(const model::OperandIndex &ind, const model::OperandInfo &info, - ir::Layout layout); - void claimPlan(const model::OperandIndex &ind, uint32_t size); - void releasePlan(const model::OperandIndex &ind); + void buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &info, ir::Layout layout); + void claimPlan(const ir::OperandIndex &ind, uint32_t size); + void releasePlan(const ir::OperandIndex &ind); - model::OperandIndexMap> &tensors(void) { return _tensors; } + ir::OperandIndexMap> &tensors(void) { return _tensors; } private: IMemoryPlanner *createMemoryPlanner(); private: - model::OperandIndexMap> _tensors; - model::OperandIndexMap _tensor_mem_map; + ir::OperandIndexMap> _tensors; + ir::OperandIndexMap _tensor_mem_map; std::shared_ptr _mem_planner; std::shared_ptr _mem_alloc; }; diff --git a/runtime/neurun/backend/srcn/MemoryPlanner.cc b/runtime/neurun/backend/srcn/MemoryPlanner.cc index 96ce27b..f5063e0 100644 --- a/runtime/neurun/backend/srcn/MemoryPlanner.cc +++ b/runtime/neurun/backend/srcn/MemoryPlanner.cc @@ -33,7 +33,7 @@ Allocator::Allocator(uint32_t capacity) VERBOSE(ALLOC) << "base pointer: " << static_cast(_base.get()) << std::endl; } -void BumpPlanner::claim(const model::OperandIndex &ind, size_t size) +void BumpPlanner::claim(const ir::OperandIndex &ind, size_t size) { assert(size != 0); @@ -45,7 +45,7 @@ void BumpPlanner::claim(const model::OperandIndex &ind, size_t size) << std::endl; } -void BumpPlanner::release(const model::OperandIndex &ind) +void BumpPlanner::release(const ir::OperandIndex &ind) { VERBOSE(BP_PLANNER) << "RELEASE(#" << ind.value() << "): " << "NOTHING does" << std::endl; @@ -54,7 +54,7 @@ void BumpPlanner::release(const model::OperandIndex &ind) // There are some assumptions for claiming memory(== making a reservation for memory). // 1. About _claim_table(std::map). // - The table's data structure is std::map so that it always sorts -// value(model::OperandIndex) by key(base_offset). +// value(OperandIndex) by key(base_offset). // - This claim() inserts key/value into _claim_table and the release() removes the key/value from // _claim_table. // - _claim_table shows the memory status at a certain point in time. Therefore, @@ -65,7 +65,7 @@ void BumpPlanner::release(const model::OperandIndex &ind) // point in time, it means the place at the offset can be claimed. // 2. In the loop for _claim_table, we can assume the current claim_base_offset value is bigger than // the previous claim_base_offset. -void FirstFitPlanner::claim(const model::OperandIndex &ind, size_t size) +void FirstFitPlanner::claim(const ir::OperandIndex &ind, size_t size) { assert(size != 0); @@ -98,7 +98,7 @@ void FirstFitPlanner::claim(const model::OperandIndex &ind, size_t size) } } -void FirstFitPlanner::release(const model::OperandIndex &ind) +void FirstFitPlanner::release(const ir::OperandIndex &ind) { for (auto it = _claim_table.cbegin(); it != _claim_table.cend(); ++it) { diff --git a/runtime/neurun/backend/srcn/MemoryPlanner.h b/runtime/neurun/backend/srcn/MemoryPlanner.h index c66efec..a8b41a3 100644 --- a/runtime/neurun/backend/srcn/MemoryPlanner.h +++ b/runtime/neurun/backend/srcn/MemoryPlanner.h @@ -25,7 +25,7 @@ #include #include -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" namespace neurun { @@ -66,19 +66,19 @@ private: */ struct IMemoryPlanner { - using MemoryPlans = model::OperandIndexMap; + using MemoryPlans = ir::OperandIndexMap; /** * @brief Claim memory for operand * @param[in] index The operand index * @param[in] size The size of the memory */ - virtual void claim(const model::OperandIndex &, size_t) = 0; + virtual void claim(const ir::OperandIndex &, size_t) = 0; /** * @brief Release memory for operand * @param[in] index The operand index */ - virtual void release(const model::OperandIndex &) = 0; + virtual void release(const ir::OperandIndex &) = 0; /** * @brief Get capacity for memory planning * @return The value of capacity @@ -104,12 +104,12 @@ public: * @param[in] index The operand index * @param[in] size The size of the memory */ - void claim(const model::OperandIndex &, size_t) override; + void claim(const ir::OperandIndex &, size_t) override; /** * @brief Release memory for operand by bump way * @param[in] index The operand index */ - void release(const model::OperandIndex &) override; + void release(const ir::OperandIndex &) override; /** * @brief Get capacity for memory planning * @return The value of capacity @@ -137,12 +137,12 @@ public: * @param[in] index The operand index * @param[in] size The size of the memory */ - void claim(const model::OperandIndex &, size_t) override; + void claim(const ir::OperandIndex &, size_t) override; /** * @brief Release memory for operand by firstfit way * @param[in] index The operand index */ - void release(const model::OperandIndex &) override; + void release(const ir::OperandIndex &) override; /** * @brief Get capacity for memory planning * @return The value of capacity @@ -158,7 +158,7 @@ private: uint32_t _capacity = 0; MemoryPlans _mem_plans; // Use std::map because claim() assumes that _claim_table is sorted by uint32_t(base_offset) - std::map _claim_table; + std::map _claim_table; }; } // namespace srcn diff --git a/runtime/neurun/backend/srcn/ShapeFixer.cc b/runtime/neurun/backend/srcn/ShapeFixer.cc index 5dbd9fd..5251478 100644 --- a/runtime/neurun/backend/srcn/ShapeFixer.cc +++ b/runtime/neurun/backend/srcn/ShapeFixer.cc @@ -25,7 +25,7 @@ namespace backend namespace srcn { -ShapeFixer::ShapeFixer(const neurun::model::Operands &operand_ctx, +ShapeFixer::ShapeFixer(const ir::Operands &operand_ctx, const std::shared_ptr &tensor_builder) : _ctx(operand_ctx), _tensor_builder(tensor_builder) { diff --git a/runtime/neurun/backend/srcn/ShapeFixer.h b/runtime/neurun/backend/srcn/ShapeFixer.h index 5403fbb..ec9078e 100644 --- a/runtime/neurun/backend/srcn/ShapeFixer.h +++ b/runtime/neurun/backend/srcn/ShapeFixer.h @@ -19,7 +19,7 @@ #include -#include "model/Operands.h" +#include "ir/Operands.h" #include "operand/Tensor.h" #include "TensorBuilder.h" @@ -33,8 +33,7 @@ namespace srcn class ShapeFixer : public IShapeFixer { public: - ShapeFixer(const neurun::model::Operands &ctx, - const std::shared_ptr &tensor_builder); + ShapeFixer(const ir::Operands &ctx, const std::shared_ptr &tensor_builder); std::shared_ptr tensor_builder() override { return _tensor_builder; } @@ -45,7 +44,7 @@ public: void visit(const model::operation::Add &) override; private: - const neurun::model::Operands &_ctx; + const ir::Operands &_ctx; std::shared_ptr _tensor_builder; }; diff --git a/runtime/neurun/backend/srcn/TensorBuilder.cc b/runtime/neurun/backend/srcn/TensorBuilder.cc index bbf59ed..5ac25c3 100644 --- a/runtime/neurun/backend/srcn/TensorBuilder.cc +++ b/runtime/neurun/backend/srcn/TensorBuilder.cc @@ -32,8 +32,8 @@ TensorBuilder::TensorBuilder() : _tensor_mgr{new TensorManager()} // DO NOTHING } -void TensorBuilder::registerTensorInfo(const model::OperandIndex &ind, - const model::OperandInfo &tensor_info, +void TensorBuilder::registerTensorInfo(const ir::OperandIndex &ind, + const ir::OperandInfo &tensor_info, ir::Layout backend_layout, bool as_const) { _tensor_info_map.emplace(ind, tensor_info); @@ -43,14 +43,13 @@ void TensorBuilder::registerTensorInfo(const model::OperandIndex &ind, _constants.append(ind); } -void TensorBuilder::registerSubTensorInfo(const model::OperandIndex &, - const compiler::SubTensorInfo &) +void TensorBuilder::registerSubTensorInfo(const ir::OperandIndex &, const compiler::SubTensorInfo &) { // Not supported yet assert(false); } -void TensorBuilder::notifyFirstUse(const model::OperandIndex &ind) +void TensorBuilder::notifyFirstUse(const ir::OperandIndex &ind) { assert(_tensor_info_map.find(ind) != _tensor_info_map.end()); const auto &tensor_info = _tensor_info_map.at(ind); @@ -60,9 +59,9 @@ void TensorBuilder::notifyFirstUse(const model::OperandIndex &ind) _tensor_mgr->claimPlan(ind, size); } -void TensorBuilder::notifyLastUse(const model::OperandIndex &ind) { _tensor_mgr->releasePlan(ind); } +void TensorBuilder::notifyLastUse(const ir::OperandIndex &ind) { _tensor_mgr->releasePlan(ind); } -bool TensorBuilder::isRegistered(const model::OperandIndex &ind) const +bool TensorBuilder::isRegistered(const ir::OperandIndex &ind) const { return _tensor_info_map.find(ind) != _tensor_info_map.end(); } @@ -86,14 +85,14 @@ void TensorBuilder::allocateNonconsts() } std::shared_ptr<::neurun::backend::operand::ITensor> -TensorBuilder::tensorAt(const model::OperandIndex &ind) +TensorBuilder::tensorAt(const ir::OperandIndex &ind) { return _tensor_mgr->at(ind); } void TensorBuilder::iterate(const IterateFunction &fn) { _tensor_mgr->iterate(fn); } -std::shared_ptr TensorBuilder::at(const ::neurun::model::OperandIndex &ind) +std::shared_ptr TensorBuilder::at(const ir::OperandIndex &ind) { return _tensor_mgr->at(ind); } diff --git a/runtime/neurun/backend/srcn/TensorBuilder.h b/runtime/neurun/backend/srcn/TensorBuilder.h index 53a8123..4fa9e54 100644 --- a/runtime/neurun/backend/srcn/TensorBuilder.h +++ b/runtime/neurun/backend/srcn/TensorBuilder.h @@ -21,7 +21,7 @@ #include #include "operand/Tensor.h" -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" #include "TensorManager.h" namespace neurun @@ -42,20 +42,20 @@ public: * @param[in] info Operand information * @param[in] layout Operand data layout */ - void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info, + void registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info, ir::Layout backend_layout, bool as_const) override; /** * @brief Register subtensor information to allocate on CPU backend * @param[in] ind Operand index * @param[in] info Tensor information */ - void registerSubTensorInfo(const model::OperandIndex &ind, + void registerSubTensorInfo(const ir::OperandIndex &ind, const compiler::SubTensorInfo &info) override; - void notifyFirstUse(const model::OperandIndex &) override; - void notifyLastUse(const model::OperandIndex &) override; + void notifyFirstUse(const ir::OperandIndex &) override; + void notifyLastUse(const ir::OperandIndex &) override; - bool isRegistered(const model::OperandIndex &) const override; + bool isRegistered(const ir::OperandIndex &) const override; void prepare(void) override; void allocateConsts() override; @@ -64,7 +64,7 @@ public: void finalize() override { /* DO NOTHING */} std::shared_ptr<::neurun::backend::operand::ITensor> - tensorAt(const model::OperandIndex &ind) override; + tensorAt(const ir::OperandIndex &ind) override; void iterate(const IterateFunction &fn) override; @@ -73,13 +73,13 @@ public: std::unique_ptr releaseTensorManager(void) override; - std::shared_ptr at(const ::neurun::model::OperandIndex &ind); + std::shared_ptr at(const ir::OperandIndex &ind); private: std::unique_ptr _tensor_mgr; - model::OperandIndexMap _tensor_info_map; - model::OperandIndexMap _tensor_layout_map; - model::OperandIndexSequence _constants; + ir::OperandIndexMap _tensor_info_map; + ir::OperandIndexMap _tensor_layout_map; + ir::OperandIndexSequence _constants; }; } // namespace srcn diff --git a/runtime/neurun/backend/srcn/TensorManager.cc b/runtime/neurun/backend/srcn/TensorManager.cc index e6462db..adf357e 100644 --- a/runtime/neurun/backend/srcn/TensorManager.cc +++ b/runtime/neurun/backend/srcn/TensorManager.cc @@ -36,9 +36,8 @@ void TensorManager::deallocateConsts(void) { _const_mgr->deallocate(); } void TensorManager::deallocateNonconsts(void) { _nonconst_mgr->deallocate(); } -void TensorManager::buildTensor(const model::OperandIndex &ind, - const model::OperandInfo &tensor_info, ir::Layout layout, - bool as_const) +void TensorManager::buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &tensor_info, + ir::Layout layout, bool as_const) { assert(_ind_to_mgr.find(ind) == _ind_to_mgr.end()); if (as_const) @@ -53,35 +52,35 @@ void TensorManager::buildTensor(const model::OperandIndex &ind, } } -void TensorManager::claimPlan(const model::OperandIndex &ind, uint32_t size) +void TensorManager::claimPlan(const ir::OperandIndex &ind, uint32_t size) { assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); _ind_to_mgr.at(ind).claimPlan(ind, size); } -void TensorManager::releasePlan(const model::OperandIndex &ind) +void TensorManager::releasePlan(const ir::OperandIndex &ind) { assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); _ind_to_mgr.at(ind).releasePlan(ind); } -std::shared_ptr TensorManager::at(const ::neurun::model::OperandIndex &ind) +std::shared_ptr TensorManager::at(const ir::OperandIndex &ind) { assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); return _ind_to_mgr.at(ind).tensors().at(ind); } -model::OperandIndexMap> &TensorManager::constTensors(void) +ir::OperandIndexMap> &TensorManager::constTensors(void) { return _const_mgr->tensors(); } -model::OperandIndexMap> &TensorManager::nonconstTensors(void) +ir::OperandIndexMap> &TensorManager::nonconstTensors(void) { return _nonconst_mgr->tensors(); } -void TensorManager::iterate(const std::function &fn) +void TensorManager::iterate(const std::function &fn) { for (auto it : _nonconst_mgr->tensors()) fn(it.first); diff --git a/runtime/neurun/backend/srcn/TensorManager.h b/runtime/neurun/backend/srcn/TensorManager.h index 393828f..d4390d8 100644 --- a/runtime/neurun/backend/srcn/TensorManager.h +++ b/runtime/neurun/backend/srcn/TensorManager.h @@ -19,7 +19,7 @@ #include "backend/ITensorManager.h" #include "MemoryManager.h" -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" namespace neurun { @@ -39,23 +39,23 @@ public: void deallocateConsts(void) override; void deallocateNonconsts(void) override; - void buildTensor(const model::OperandIndex &ind, const model::OperandInfo &tensor_info, + void buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &tensor_info, ir::Layout layout, bool as_const); - void claimPlan(const model::OperandIndex &ind, uint32_t size); - void releasePlan(const model::OperandIndex &ind); + void claimPlan(const ir::OperandIndex &ind, uint32_t size); + void releasePlan(const ir::OperandIndex &ind); - std::shared_ptr at(const ::neurun::model::OperandIndex &ind); + std::shared_ptr at(const ir::OperandIndex &ind); - model::OperandIndexMap> &constTensors(void); - model::OperandIndexMap> &nonconstTensors(void); + ir::OperandIndexMap> &constTensors(void); + ir::OperandIndexMap> &nonconstTensors(void); - void iterate(const std::function &fn); + void iterate(const std::function &fn); private: std::unique_ptr _const_mgr; std::unique_ptr _nonconst_mgr; - model::OperandIndexMap _ind_to_mgr; + ir::OperandIndexMap _ind_to_mgr; }; } // namespace srcn diff --git a/runtime/neurun/backend/srcn/TensorRegister.cc b/runtime/neurun/backend/srcn/TensorRegister.cc index d35b15f..8fb5b54 100644 --- a/runtime/neurun/backend/srcn/TensorRegister.cc +++ b/runtime/neurun/backend/srcn/TensorRegister.cc @@ -26,7 +26,7 @@ namespace backend namespace srcn { -TensorRegister::TensorRegister(const model::Operands &operands, +TensorRegister::TensorRegister(const ir::Operands &operands, const std::shared_ptr &tensor_builder) : _operands{operands}, _tensor_builder{tensor_builder} { @@ -53,7 +53,7 @@ void TensorRegister::visit(const model::operation::Conv2D &node) const auto backend_filter_layout = backend_layout == ir::Layout::NHWC ? kernel::FilterLayout::HWIO : kernel::FilterLayout::OIHW; - model::OperandInfo backend_info{ + ir::OperandInfo backend_info{ asKernelShape(kernel_obj.shape(), frontend_filter_layout, backend_filter_layout), kernel_obj.info().typeInfo()}; _tensor_builder->registerTensorInfo(kernel_index, backend_info, backend_layout, @@ -80,7 +80,7 @@ void TensorRegister::visit(const model::operation::DepthwiseConv2D &node) const auto backend_filter_layout = backend_layout == ir::Layout::NHWC ? kernel::FilterLayout::HWIO : kernel::FilterLayout::OIHW; - model::OperandInfo backend_info{ + ir::OperandInfo backend_info{ asKernelShape(kernel_obj.shape(), frontend_filter_layout, backend_filter_layout), kernel_obj.info().typeInfo()}; _tensor_builder->registerTensorInfo(kernel_index, backend_info, backend_layout, @@ -106,7 +106,7 @@ void TensorRegister::visit(const model::operation::TransposeConv &node) const auto backend_filter_layout = backend_layout == ir::Layout::NHWC ? kernel::FilterLayout::HWOI : kernel::FilterLayout::IOHW; - model::OperandInfo backend_info{ + ir::OperandInfo backend_info{ asKernelShape(kernel_obj.shape(), frontend_filter_layout, backend_filter_layout), kernel_obj.info().typeInfo()}; _tensor_builder->registerTensorInfo(kernel_index, backend_info, backend_layout, diff --git a/runtime/neurun/backend/srcn/TensorRegister.h b/runtime/neurun/backend/srcn/TensorRegister.h index a558cb8..84ec0e9 100644 --- a/runtime/neurun/backend/srcn/TensorRegister.h +++ b/runtime/neurun/backend/srcn/TensorRegister.h @@ -30,7 +30,7 @@ namespace srcn class TensorRegister : public ITensorRegister { public: - TensorRegister(const model::Operands &operands, + TensorRegister(const ir::Operands &operands, const std::shared_ptr &tensor_builder); public: @@ -39,12 +39,12 @@ public: void visit(const model::operation::TransposeConv &) override; private: - const model::Operands &operands() const override { return _operands; } + const ir::Operands &operands() const override { return _operands; } std::shared_ptr tensor_builder() const override { return _tensor_builder; } bool supportSubTensor() const final { return false; } private: - const model::Operands &_operands; + const ir::Operands &_operands; const std::shared_ptr _tensor_builder; }; diff --git a/runtime/neurun/backend/srcn/kernel/OperationUtils.cc b/runtime/neurun/backend/srcn/kernel/OperationUtils.cc index 4795c38..211e220 100644 --- a/runtime/neurun/backend/srcn/kernel/OperationUtils.cc +++ b/runtime/neurun/backend/srcn/kernel/OperationUtils.cc @@ -97,7 +97,7 @@ nnfw::srcn::convType_t convertLayout(ir::Layout layout) } } -TensorDescriptor getTensorDescriptor(const ::neurun::model::Operand &o, ir::Layout frontend_layout, +TensorDescriptor getTensorDescriptor(const ir::Operand &o, ir::Layout frontend_layout, ir::Layout backend_layout) { TensorDescriptor descriptor; diff --git a/runtime/neurun/backend/srcn/kernel/OperationUtils.h b/runtime/neurun/backend/srcn/kernel/OperationUtils.h index a0610a2..aa163a1 100644 --- a/runtime/neurun/backend/srcn/kernel/OperationUtils.h +++ b/runtime/neurun/backend/srcn/kernel/OperationUtils.h @@ -21,7 +21,7 @@ #include #include -#include "model/Operand.h" +#include "ir/Operand.h" #include "ir/DataType.h" #include #include @@ -73,7 +73,7 @@ Coordinates convertCoordinates(const Coordinates &from_coordinates, FilterLayout nnfw::srcn::convType_t convertLayout(ir::Layout layout); -TensorDescriptor getTensorDescriptor(const ::neurun::model::Operand &o, ir::Layout frontend_layout, +TensorDescriptor getTensorDescriptor(const ir::Operand &o, ir::Layout frontend_layout, ir::Layout backend_layout); } // namespace kernel diff --git a/runtime/neurun/backend/srcn/operand/Tensor.h b/runtime/neurun/backend/srcn/operand/Tensor.h index af25593..e16234a 100644 --- a/runtime/neurun/backend/srcn/operand/Tensor.h +++ b/runtime/neurun/backend/srcn/operand/Tensor.h @@ -19,7 +19,7 @@ #include #include -#include "model/OperandInfo.h" +#include "ir/OperandInfo.h" namespace neurun { @@ -36,7 +36,7 @@ public: Tensor() = delete; public: - Tensor(const model::OperandInfo &info, ir::Layout layout) : _info(info), _layout(layout) + Tensor(const ir::OperandInfo &info, ir::Layout layout) : _info(info), _layout(layout) { // DO NOTHING } @@ -66,7 +66,7 @@ public: void access(const std::function &fn) final; private: - model::OperandInfo _info; + ir::OperandInfo _info; uint8_t *_buffer = nullptr; ir::Layout _layout; }; diff --git a/runtime/neurun/core/include/backend/Backend.h b/runtime/neurun/core/include/backend/Backend.h index 2fef3d0..9c4484f 100644 --- a/runtime/neurun/core/include/backend/Backend.h +++ b/runtime/neurun/core/include/backend/Backend.h @@ -19,7 +19,7 @@ #include -#include "model/Operands.h" +#include "ir/Operands.h" namespace neurun { @@ -57,7 +57,7 @@ public: virtual std::shared_ptr config() const = 0; virtual std::unique_ptr - newContext(const model::Operands &operands, + newContext(const ir::Operands &operands, const std::shared_ptr &kb) const = 0; }; diff --git a/runtime/neurun/core/include/backend/IConstantInitializer.h b/runtime/neurun/core/include/backend/IConstantInitializer.h index 89535a4..bb19279 100644 --- a/runtime/neurun/core/include/backend/IConstantInitializer.h +++ b/runtime/neurun/core/include/backend/IConstantInitializer.h @@ -22,8 +22,8 @@ #include "ITensorBuilder.h" #include "ir/Layout.h" -#include "model/Operand.h" -#include "model/Operands.h" +#include "ir/Operand.h" +#include "ir/Operands.h" #include "model/OperationVisitor.h" #include "model/Subgraph.h" #include "util/logging.h" @@ -32,7 +32,7 @@ namespace { template -static void Init(const neurun::model::Operand &model_obj, neurun::backend::operand::ITensor &obj, +static void Init(const neurun::ir::Operand &model_obj, neurun::backend::operand::ITensor &obj, const bool copy, const neurun::ir::Layout frontend_layout = neurun::ir::Layout::UNKNOWN) { @@ -134,13 +134,13 @@ static void Init(const neurun::model::Operand &model_obj, neurun::backend::opera } template -void copyInit(const neurun::model::Operand &model_obj, neurun::backend::operand::ITensor &obj) +void copyInit(const neurun::ir::Operand &model_obj, neurun::backend::operand::ITensor &obj) { Init(model_obj, obj, true); } template -void permuteInit(const neurun::model::Operand &model_obj, neurun::backend::operand::ITensor &obj, +void permuteInit(const neurun::ir::Operand &model_obj, neurun::backend::operand::ITensor &obj, const neurun::ir::Layout frontend_layout) { const bool copy = frontend_layout == obj.layout(); @@ -177,9 +177,9 @@ public: } public: - using Initializer = std::function; + using Initializer = std::function; - void generate(const model::Subgraph &subg, const model::Operands &operands) + void generate(const model::Subgraph &subg, const ir::Operands &operands) { _current_subg_layout = subg.getLayout(); subg.accept(*this); @@ -199,15 +199,15 @@ public: protected: #define OP(InternalName) \ virtual void visit(const model::operation::InternalName &) override { /* DO NOTHING */} -#include "model/Operations.lst" +#include "ir/Operations.lst" #undef OP protected: - virtual const model::Operands &operands() const = 0; + virtual const ir::Operands &operands() const = 0; virtual std::shared_ptr tensor_builder() const = 0; protected: - void registerCopyInitializer(const model::OperandIndex &index, const model::Operand &obj) + void registerCopyInitializer(const ir::OperandIndex &index, const ir::Operand &obj) { // For only CONSTANTS // TODO Add to check if tensor has been allocated @@ -239,7 +239,7 @@ protected: } protected: - void registerPermuteInitializer(const model::OperandIndex &index, const model::Operand &obj) + void registerPermuteInitializer(const ir::OperandIndex &index, const ir::Operand &obj) { // For only CONSTANTS // TODO Add to check if tensor has been allocated @@ -272,10 +272,10 @@ protected: } private: - bool exist(const model::OperandIndex &ind) { return _init_map.find(ind) != _init_map.end(); } + bool exist(const ir::OperandIndex &ind) { return _init_map.find(ind) != _init_map.end(); } protected: - std::unordered_map _init_map; + std::unordered_map _init_map; ir::Layout _current_subg_layout; }; diff --git a/runtime/neurun/core/include/backend/IKernelGenerator.h b/runtime/neurun/core/include/backend/IKernelGenerator.h index 42e4174..dcb9350 100644 --- a/runtime/neurun/core/include/backend/IKernelGenerator.h +++ b/runtime/neurun/core/include/backend/IKernelGenerator.h @@ -50,7 +50,7 @@ protected: { \ throw std::runtime_error("KernelGenerator: NYI for operation '" #InternalName "'"); \ } -#include "model/Operations.lst" +#include "ir/Operations.lst" #undef OP protected: diff --git a/runtime/neurun/core/include/backend/IShapeFixer.h b/runtime/neurun/core/include/backend/IShapeFixer.h index f33f4ec..6d52129 100644 --- a/runtime/neurun/core/include/backend/IShapeFixer.h +++ b/runtime/neurun/core/include/backend/IShapeFixer.h @@ -44,7 +44,7 @@ protected: { \ throw std::runtime_error("ShapeFixer: NYI for operation '" #InternalName "'"); \ } -#include "model/Operations.lst" +#include "ir/Operations.lst" #undef OP public: diff --git a/runtime/neurun/core/include/backend/ITensorBuilder.h b/runtime/neurun/core/include/backend/ITensorBuilder.h index 8fdc2ef..a9fce0a 100644 --- a/runtime/neurun/core/include/backend/ITensorBuilder.h +++ b/runtime/neurun/core/include/backend/ITensorBuilder.h @@ -19,8 +19,8 @@ #include -#include "model/Index.h" -#include "model/OperandInfo.h" +#include "ir/Index.h" +#include "ir/OperandInfo.h" #include "model/Operation.h" #include "ir/Layout.h" #include "operand/ITensor.h" @@ -34,7 +34,7 @@ namespace backend struct ITensorBuilder { - using IterateFunction = std::function; + using IterateFunction = std::function; virtual ~ITensorBuilder(void) = default; @@ -42,18 +42,17 @@ struct ITensorBuilder /** * @brief Register tensor information to allocate on backend */ - virtual void registerTensorInfo(const model::OperandIndex &, const model::OperandInfo &, + virtual void registerTensorInfo(const ir::OperandIndex &, const ir::OperandInfo &, ir::Layout backend_layout, bool as_const) = 0; /** * @brief Register subtensor information to allocate on backend */ - virtual void registerSubTensorInfo(const model::OperandIndex &, - const compiler::SubTensorInfo &) = 0; + virtual void registerSubTensorInfo(const ir::OperandIndex &, const compiler::SubTensorInfo &) = 0; - virtual void notifyFirstUse(const model::OperandIndex &) = 0; - virtual void notifyLastUse(const model::OperandIndex &) = 0; + virtual void notifyFirstUse(const ir::OperandIndex &) = 0; + virtual void notifyLastUse(const ir::OperandIndex &) = 0; - virtual bool isRegistered(const model::OperandIndex &) const = 0; + virtual bool isRegistered(const ir::OperandIndex &) const = 0; virtual void prepare(void) = 0; virtual void allocateConsts() = 0; @@ -62,7 +61,7 @@ struct ITensorBuilder virtual void finalize() = 0; virtual std::shared_ptr<::neurun::backend::operand::ITensor> - tensorAt(const model::OperandIndex &ind) = 0; + tensorAt(const ir::OperandIndex &ind) = 0; virtual void iterate(const IterateFunction &fn) = 0; virtual void preVisit(const model::Operation &) = 0; diff --git a/runtime/neurun/core/include/backend/ITensorRegister.h b/runtime/neurun/core/include/backend/ITensorRegister.h index d5be3a2..296b2a0 100644 --- a/runtime/neurun/core/include/backend/ITensorRegister.h +++ b/runtime/neurun/core/include/backend/ITensorRegister.h @@ -22,9 +22,9 @@ #include "ir/operand/ParentInfo.h" #include "ITensorBuilder.h" #include "ir/Layout.h" -#include "model/OperandIndexSequence.h" -#include "model/OperandInfo.h" -#include "model/Operands.h" +#include "ir/OperandIndexSequence.h" +#include "ir/OperandInfo.h" +#include "ir/Operands.h" #include "model/OperationVisitor.h" namespace @@ -76,7 +76,7 @@ public: } protected: - virtual const model::Operands &operands() const = 0; + virtual const ir::Operands &operands() const = 0; virtual std::shared_ptr tensor_builder() const = 0; virtual bool supportSubTensor() const = 0; @@ -84,18 +84,18 @@ protected: #define OP(InternalName) \ virtual void visit(const model::operation::InternalName &node) override \ { \ - model::OperandIndexSequence indices{node.getInputs()}; \ + ir::OperandIndexSequence indices{node.getInputs()}; \ indices.append(node.getOutputs()); \ for (const auto &index : indices) \ { \ defaultRegisterTensorInfo(index); \ } \ } -#include "model/Operations.lst" +#include "ir/Operations.lst" #undef OP protected: - void defaultRegisterTensorInfo(const model::OperandIndex &index) const + void defaultRegisterTensorInfo(const ir::OperandIndex &index) const { if (tensor_builder()->isRegistered(index)) { @@ -112,15 +112,15 @@ protected: } else { - model::OperandInfo backend_info{ - permuteTensorShape(obj.shape(), frontend_layout, backend_layout), obj.typeInfo()}; + ir::OperandInfo backend_info{permuteTensorShape(obj.shape(), frontend_layout, backend_layout), + obj.typeInfo()}; tensor_builder()->registerTensorInfo(index, backend_info, backend_layout, obj.isConstant()); } } protected: virtual ir::Layout frontendLayout() const final { return _current_subg_layout; } - virtual ir::Layout backendLayout(const model::OperandIndex &index) const final + virtual ir::Layout backendLayout(const ir::OperandIndex &index) const final { assert(_lower_info_map != nullptr); const auto lower_info = _lower_info_map->operand.at(index).get(); @@ -128,8 +128,7 @@ protected: } private: - compiler::SubTensorInfo generateSubTensorInfo(const model::Operand &obj, - ir::Layout frontend_layout, + compiler::SubTensorInfo generateSubTensorInfo(const ir::Operand &obj, ir::Layout frontend_layout, ir::Layout backend_layout) const { assert(obj.shape().rank() <= 4); @@ -148,8 +147,8 @@ private: shape.extendRank(4); offset = {offset[0], offset[2], offset[3], offset[1]}; } - model::Operand subtensor_obj{permuteTensorShape(shape, frontend_layout, backend_layout), - obj.typeInfo()}; + ir::Operand subtensor_obj{permuteTensorShape(shape, frontend_layout, backend_layout), + obj.typeInfo()}; subtensor_obj.parent_info( nnfw::cpp14::make_unique(parent_index, offset)); return compiler::SubTensorInfo{subtensor_obj}; diff --git a/runtime/neurun/core/include/compiler/SubTensorInfo.h b/runtime/neurun/core/include/compiler/SubTensorInfo.h index 60405af..18cab46 100644 --- a/runtime/neurun/core/include/compiler/SubTensorInfo.h +++ b/runtime/neurun/core/include/compiler/SubTensorInfo.h @@ -22,7 +22,7 @@ #ifndef __NEURUN_COMPILER_SUBTENSOR_INFO_H__ #define __NEURUN_COMPILER_SUBTENSOR_INFO_H__ -#include "model/Operand.h" +#include "ir/Operand.h" namespace neurun { @@ -41,7 +41,7 @@ public: * @brief Construct a new SubTensorInfo object * @param[in] obj SubTensor object */ - SubTensorInfo(const model::Operand &obj) + SubTensorInfo(const ir::Operand &obj) : _parent{obj.parent_info()->parent()}, _shape{obj.shape()}, _type{obj.typeInfo()}, _offset{obj.parent_info()->offset()} { @@ -53,7 +53,7 @@ public: * @brief Return parent tensor index * @return Parent tensor index */ - const model::OperandIndex parent(void) const { return _parent; } + const ir::OperandIndex parent(void) const { return _parent; } /** * @brief Return tensor shape * @return Tensor shape @@ -71,7 +71,7 @@ public: const neurun::util::Coordinates offset(void) const { return _offset; } private: - const model::OperandIndex _parent; + const ir::OperandIndex _parent; const ir::Shape _shape; const ir::TypeInfo _type; const neurun::util::Coordinates _offset; diff --git a/runtime/neurun/core/include/exec/Execution.h b/runtime/neurun/core/include/exec/Execution.h index 7a00741..6b108b4 100644 --- a/runtime/neurun/core/include/exec/Execution.h +++ b/runtime/neurun/core/include/exec/Execution.h @@ -59,7 +59,7 @@ public: * @param[in] length Input data's length * @param[in] layout Input data's data format */ - void setInput(const model::IOIndex &index, const void *buffer, size_t length, + void setInput(const ir::IOIndex &index, const void *buffer, size_t length, ir::Layout layout = ir::Layout::NHWC); /** * @brief Set input data's information, especially to specify unknown dimensions on model @@ -71,7 +71,7 @@ public: * @param[in] length Input data's length * @param[in] layout Input data's data format */ - void setInput(const model::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape, + void setInput(const ir::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape, const void *buffer, size_t length, ir::Layout layout = ir::Layout::NHWC); /** * @brief Set output data's information @@ -80,7 +80,7 @@ public: * @param[in] length Output data's length * @param[in] layout Output data's data format */ - void setOutput(const model::IOIndex &index, void *buffer, size_t length, + void setOutput(const ir::IOIndex &index, void *buffer, size_t length, ir::Layout layout = ir::Layout::NHWC); /** * @brief Set output data's information, especially to specify unknown dimensions on model @@ -92,20 +92,20 @@ public: * @param[in] length Output data's length * @param[in] layout Output data's data format */ - void setOutput(const model::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape, + void setOutput(const ir::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape, void *buffer, size_t length, ir::Layout layout = ir::Layout::NHWC); /** * @brief Set input data's data format * @param[in] index Input index * @param[in] layout Input data's data format */ - void setInputLayout(const model::IOIndex &index, ir::Layout layout); + void setInputLayout(const ir::IOIndex &index, ir::Layout layout); /** * @brief Set output data's data format * @param[in] index Output index * @param[in] layout Output data's data format */ - void setOutputLayout(const model::IOIndex &index, ir::Layout layout); + void setOutputLayout(const ir::IOIndex &index, ir::Layout layout); /** * @brief Execution * @note It should be called after setting input and output buffer diff --git a/runtime/neurun/core/include/exec/IExecutor.h b/runtime/neurun/core/include/exec/IExecutor.h index ecb195c..0786553 100644 --- a/runtime/neurun/core/include/exec/IExecutor.h +++ b/runtime/neurun/core/include/exec/IExecutor.h @@ -24,7 +24,7 @@ #include "ir/Graph.h" #include "IFunction.h" #include "IODescription.h" -#include "model/OperationIndexMap.h" +#include "ir/OperationIndexMap.h" namespace neurun { @@ -56,7 +56,7 @@ struct IExecutor * @brief Set an ordering on operations * @param[in] ranks The table encoding the ordering */ - virtual void setIndexedRanks(std::shared_ptr>) = 0; + virtual void setIndexedRanks(std::shared_ptr>) = 0; /** * @brief Start execution diff --git a/runtime/neurun/core/include/exec/IODescription.h b/runtime/neurun/core/include/exec/IODescription.h index fc766c6..bdcc781 100644 --- a/runtime/neurun/core/include/exec/IODescription.h +++ b/runtime/neurun/core/include/exec/IODescription.h @@ -19,7 +19,7 @@ #include -#include "model/OperandInfo.h" +#include "ir/OperandInfo.h" namespace neurun { @@ -28,14 +28,13 @@ namespace exec struct InputDesc { - const model::OperandInfo info; + const ir::OperandInfo info; const void *buffer; const size_t size; const ir::Layout layout; InputDesc(void) = delete; - InputDesc(const model::OperandInfo &info, const void *buffer, const size_t size, - ir::Layout layout) + InputDesc(const ir::OperandInfo &info, const void *buffer, const size_t size, ir::Layout layout) : info(info), buffer(buffer), size(size), layout(layout) { } @@ -43,13 +42,13 @@ struct InputDesc struct OutputDesc { - const model::OperandInfo info; + const ir::OperandInfo info; void *buffer; const size_t size; const ir::Layout layout; OutputDesc(void) = delete; - OutputDesc(const model::OperandInfo &info, void *buffer, const size_t size, ir::Layout layout) + OutputDesc(const ir::OperandInfo &info, void *buffer, const size_t size, ir::Layout layout) : info(info), buffer(buffer), size(size), layout(layout) { } diff --git a/runtime/neurun/core/include/model/Data.h b/runtime/neurun/core/include/ir/Data.h similarity index 86% rename from runtime/neurun/core/include/model/Data.h rename to runtime/neurun/core/include/ir/Data.h index 3316ad8..0cc8cb5 100644 --- a/runtime/neurun/core/include/model/Data.h +++ b/runtime/neurun/core/include/ir/Data.h @@ -14,14 +14,14 @@ * limitations under the License. */ -#ifndef __NEURUN_MODEL_DATA_H__ -#define __NEURUN_MODEL_DATA_H__ +#ifndef __NEURUN_IR_DATA_H__ +#define __NEURUN_IR_DATA_H__ #include namespace neurun { -namespace model +namespace ir { struct Data @@ -69,7 +69,14 @@ private: const size_t _size; }; +} // namespace ir + +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +using Data = ir::Data; +using CachedData = ir::CachedData; } // namespace model } // namespace neurun -#endif // __NEURUN_MODEL_DATA_H__ +#endif // __NEURUN_IR_DATA_H__ diff --git a/runtime/neurun/core/include/ir/Graph.h b/runtime/neurun/core/include/ir/Graph.h index 0ad5135..4564db2 100644 --- a/runtime/neurun/core/include/ir/Graph.h +++ b/runtime/neurun/core/include/ir/Graph.h @@ -19,7 +19,7 @@ #include -#include "model/Operands.h" +#include "ir/Operands.h" #include "model/Operations.h" #include "ir/LowerInfoMap.h" #include "model/Subgraph.h" @@ -82,7 +82,7 @@ public: { public: using GraphRef = typename std::conditional::type; - using IndexRef = const model::OperationIndex &; + using IndexRef = const ir::OperationIndex &; using NodeRef = typename std::conditional::type; using IterFn = std::function; @@ -124,14 +124,14 @@ public: // Graph Building public: - model::OperandIndex addOperand(const ir::Shape &shape, const ir::TypeInfo &type); - model::OperationIndex addOperation(std::unique_ptr &&node); - void setOperandValue(const model::OperandIndex &ind, std::unique_ptr &&data); - void addInput(const model::OperandIndex &ind); - void addOutput(const model::OperandIndex &ind); + ir::OperandIndex addOperand(const ir::Shape &shape, const ir::TypeInfo &type); + ir::OperationIndex addOperation(std::unique_ptr &&node); + void setOperandValue(const ir::OperandIndex &ind, std::unique_ptr &&data); + void addInput(const ir::OperandIndex &ind); + void addOutput(const ir::OperandIndex &ind); void finishBuilding(void); void lower(void); - void removeOperand(const model::OperandIndex &ind) { _operands.remove(ind); } + void removeOperand(const ir::OperandIndex &ind) { _operands.remove(ind); } bool isBuildingPhase(void) const { return _phase == Phase::BUILDING; } private: @@ -155,12 +155,12 @@ private: // Accessors public: - const model::OperandIndexSequence &getInputs() const { return _inputs; } - model::OperandIndexSequence &getInputs() { return _inputs; } - const model::OperandIndexSequence &getOutputs() const { return _outputs; } - model::OperandIndexSequence &getOutputs() { return _outputs; } - const model::Operands &operands() const { return _operands; } - model::Operands &operands() { return _operands; } // TODO Remove this non-const accessor + const ir::OperandIndexSequence &getInputs() const { return _inputs; } + ir::OperandIndexSequence &getInputs() { return _inputs; } + const ir::OperandIndexSequence &getOutputs() const { return _outputs; } + ir::OperandIndexSequence &getOutputs() { return _outputs; } + const ir::Operands &operands() const { return _operands; } + ir::Operands &operands() { return _operands; } // TODO Remove this non-const accessor const model::Operations &operations() const { return _operations; } model::Operations &operations() { return _operations; } const compiler::BackendResolver *backend_resolver() const { return _backend_resolver.get(); } @@ -168,22 +168,22 @@ public: private: Phase _phase{Phase::BUILDING}; model::Operations _operations; - model::Operands _operands; - model::OperandIndexSequence _inputs; - model::OperandIndexSequence _outputs; + ir::Operands _operands; + ir::OperandIndexSequence _inputs; + ir::OperandIndexSequence _outputs; // For LOWERED phase public: const LowerInfoMap *getLowerInfo() const { return _lower_info_map.get(); } - const operation::LowerInfo *getLowerInfo(const model::SubgraphIndex &subg_index) const; - void setLowerInfo(const model::SubgraphIndex &subg_index, + const operation::LowerInfo *getLowerInfo(const ir::SubgraphIndex &subg_index) const; + void setLowerInfo(const ir::SubgraphIndex &subg_index, std::unique_ptr &&lower_info); - void removeLowerInfo(const model::SubgraphIndex &subg_index); - const operand::LowerInfo *getLowerInfo(const model::OperandIndex &index) const; - operand::LowerInfo *getLowerInfo(const model::OperandIndex &index); - void setLowerInfo(const model::OperandIndex &index, + void removeLowerInfo(const ir::SubgraphIndex &subg_index); + const operand::LowerInfo *getLowerInfo(const ir::OperandIndex &index) const; + operand::LowerInfo *getLowerInfo(const ir::OperandIndex &index); + void setLowerInfo(const ir::OperandIndex &index, std::unique_ptr &&lower_info); - void removeLowerInfo(const model::OperandIndex &index); + void removeLowerInfo(const ir::OperandIndex &index); model::Subgraphs &subgraphs() { assert(_subgraphs); @@ -193,15 +193,14 @@ public: void setBackendResolver(std::unique_ptr &&br); private: - void - makeSubgraphs(model::OperandIndexMap> &operands_lower_info); + void makeSubgraphs(ir::OperandIndexMap> &operands_lower_info); void manipulateLowerInfo( - model::OperandIndexMap> &operands_lower_info); + ir::OperandIndexMap> &operands_lower_info); void dumpLowerInfo(); - bool mergeable(const model::SubgraphIndex &subg_index, const model::OperationIndex &node_index, + bool mergeable(const ir::SubgraphIndex &subg_index, const ir::OperationIndex &node_index, ir::Layout layout); - model::SubgraphIndex appendFreshSingleOpSubgraph(const model::OperationIndex &node_index, - const model::Operation &node, ir::Layout layout); + ir::SubgraphIndex appendFreshSingleOpSubgraph(const ir::OperationIndex &node_index, + const model::Operation &node, ir::Layout layout); private: std::unique_ptr _backend_resolver; diff --git a/runtime/neurun/core/include/model/Index.h b/runtime/neurun/core/include/ir/Index.h similarity index 75% rename from runtime/neurun/core/include/model/Index.h rename to runtime/neurun/core/include/ir/Index.h index e4218d5..69d307e 100644 --- a/runtime/neurun/core/include/model/Index.h +++ b/runtime/neurun/core/include/ir/Index.h @@ -14,14 +14,14 @@ * limitations under the License. */ -#ifndef __NEURUN_MODEL_OPERAND_INDEX_H__ -#define __NEURUN_MODEL_OPERAND_INDEX_H__ +#ifndef __NEURUN_IR_OPERAND_INDEX_H__ +#define __NEURUN_IR_OPERAND_INDEX_H__ #include "util/Index.h" namespace neurun { -namespace model +namespace ir { struct OperationIndexTag; @@ -36,7 +36,17 @@ using IOIndex = ::neurun::util::Index; struct SubgraphIndexTag; using SubgraphIndex = ::neurun::util::Index; -} // namespace model +} // namespace ir + +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +using OperationIndex = ir::OperationIndex; +using OperandIndex = ir::OperandIndex; +using IOIndex = ir::IOIndex; +using SubgraphIndex = ir::SubgraphIndex; +} + } // namespace neurun -#endif // __NEURUN_MODEL_OPERAND_INDEX_H__ +#endif // __NEURUN_IR_OPERAND_INDEX_H__ diff --git a/runtime/neurun/core/include/ir/LowerInfoMap.h b/runtime/neurun/core/include/ir/LowerInfoMap.h index 24ed296..3e7251f 100644 --- a/runtime/neurun/core/include/ir/LowerInfoMap.h +++ b/runtime/neurun/core/include/ir/LowerInfoMap.h @@ -22,8 +22,8 @@ #include "ir/operand/LowerInfo.h" #include "ir/operation/LowerInfo.h" -#include "model/OperandIndexMap.h" -#include "model/Index.h" +#include "ir/OperandIndexMap.h" +#include "ir/Index.h" namespace neurun { @@ -32,8 +32,8 @@ namespace graph struct LowerInfoMap { - std::unordered_map> operation; - model::OperandIndexMap> operand; + std::unordered_map> operation; + ir::OperandIndexMap> operand; }; } // namespace graph diff --git a/runtime/neurun/core/include/model/OpCode.h b/runtime/neurun/core/include/ir/OpCode.h similarity index 71% rename from runtime/neurun/core/include/model/OpCode.h rename to runtime/neurun/core/include/ir/OpCode.h index b698593..f1d5022 100644 --- a/runtime/neurun/core/include/model/OpCode.h +++ b/runtime/neurun/core/include/ir/OpCode.h @@ -14,22 +14,22 @@ * limitations under the License. */ -#ifndef __NEURUN_MODEL_OP_CODE_H__ -#define __NEURUN_MODEL_OP_CODE_H__ +#ifndef __NEURUN_IR_OP_CODE_H__ +#define __NEURUN_IR_OP_CODE_H__ #include #include namespace neurun { -namespace model +namespace ir { enum class OpCode { Invalid, //< Unused #define OP(Name) Name, //< All operations -#include "Operations.lst" +#include "ir/Operations.lst" #undef OP Subgraph, //< Subgraph is treated specially COUNT @@ -37,21 +37,27 @@ enum class OpCode const char *toString(OpCode opcode); +} // namespace ir + +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +using OpCode = ir::OpCode; } // namespace model } // namespace neurun namespace std { -template <> struct hash<::neurun::model::OpCode> +template <> struct hash { - size_t operator()(::neurun::model::OpCode value) const noexcept + size_t operator()(neurun::ir::OpCode value) const noexcept { - using type = typename std::underlying_type<::neurun::model::OpCode>::type; + using type = typename std::underlying_type::type; return hash()(static_cast(value)); } }; } // namespace std -#endif // __NEURUN_MODEL_OP_CODE_H__ +#endif // __NEURUN_IR_OP_CODE_H__ diff --git a/runtime/neurun/core/include/model/Operand.h b/runtime/neurun/core/include/ir/Operand.h similarity index 91% rename from runtime/neurun/core/include/model/Operand.h rename to runtime/neurun/core/include/ir/Operand.h index 7d5832e..424f0fe 100644 --- a/runtime/neurun/core/include/model/Operand.h +++ b/runtime/neurun/core/include/ir/Operand.h @@ -14,23 +14,23 @@ * limitations under the License. */ -#ifndef __NEURUN_MODEL_OPERAND_H__ -#define __NEURUN_MODEL_OPERAND_H__ +#ifndef __NEURUN_IR_OPERAND_H__ +#define __NEURUN_IR_OPERAND_H__ #include #include #include #include -#include "Data.h" +#include "ir/Data.h" #include "ir/DataType.h" -#include "OperandInfo.h" +#include "ir/OperandInfo.h" #include "ir/operand/ParentInfo.h" // TODO Remove this dependency -#include "model/OperationIndexList.h" +#include "ir/OperationIndexList.h" namespace neurun { -namespace model +namespace ir { class Operand @@ -124,7 +124,13 @@ private: std::shared_ptr _parent_info; }; +} // namespace ir + +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +using Operand = ir::Operand; } // namespace model } // namespace neurun -#endif // __NEURUN_MODEL_OPERAND_H__ +#endif // __NEURUN_IR_OPERAND_H__ diff --git a/runtime/neurun/core/include/model/OperandConstraint.h b/runtime/neurun/core/include/ir/OperandConstraint.h similarity index 92% rename from runtime/neurun/core/include/model/OperandConstraint.h rename to runtime/neurun/core/include/ir/OperandConstraint.h index c3145d2..cb06ff1 100644 --- a/runtime/neurun/core/include/model/OperandConstraint.h +++ b/runtime/neurun/core/include/ir/OperandConstraint.h @@ -23,9 +23,7 @@ namespace neurun { -namespace model -{ -namespace operation +namespace ir { class OperandConstraint @@ -54,7 +52,12 @@ private: uint32_t _end; }; -} // namespace operation +} // namespace ir + +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +using OperandConstraint = ir::OperandConstraint; } // namespace model } // namespace neurun diff --git a/runtime/neurun/core/include/model/OperandIndexMap.h b/runtime/neurun/core/include/ir/OperandIndexMap.h similarity index 70% rename from runtime/neurun/core/include/model/OperandIndexMap.h rename to runtime/neurun/core/include/ir/OperandIndexMap.h index c3492d4..4f555e6 100644 --- a/runtime/neurun/core/include/model/OperandIndexMap.h +++ b/runtime/neurun/core/include/ir/OperandIndexMap.h @@ -14,21 +14,27 @@ * limitations under the License. */ -#ifndef __NEURUN_MODEL_OPERAND_INDEX_MAP_H__ -#define __NEURUN_MODEL_OPERAND_INDEX_MAP_H__ +#ifndef __NEURUN_IR_OPERAND_INDEX_MAP_H__ +#define __NEURUN_IR_OPERAND_INDEX_MAP_H__ #include -#include "Index.h" +#include "ir/Index.h" namespace neurun { -namespace model +namespace ir { -template using OperandIndexMap = std::unordered_map; +template using OperandIndexMap = std::unordered_map; + +} // namespace ir +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +template using OperandIndexMap = ir::OperandIndexMap; } // namespace model } // namespace neurun -#endif // __NEURUN_MODEL_OPERAND_INDEX_MAP_H__ +#endif // __NEURUN_IR_OPERAND_INDEX_MAP_H__ diff --git a/runtime/neurun/core/include/model/OperandIndexSequence.h b/runtime/neurun/core/include/ir/OperandIndexSequence.h similarity index 91% rename from runtime/neurun/core/include/model/OperandIndexSequence.h rename to runtime/neurun/core/include/ir/OperandIndexSequence.h index 725811a..49b8c05 100644 --- a/runtime/neurun/core/include/model/OperandIndexSequence.h +++ b/runtime/neurun/core/include/ir/OperandIndexSequence.h @@ -20,11 +20,11 @@ #include #include -#include "Index.h" +#include "ir/Index.h" namespace neurun { -namespace model +namespace ir { class OperandIndexSequence @@ -54,6 +54,12 @@ private: std::vector _set; }; +} // namespace ir + +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +using OperandIndexSequence = ir::OperandIndexSequence; } // namespace model } // namespace neurun diff --git a/runtime/neurun/core/include/model/OperandInfo.h b/runtime/neurun/core/include/ir/OperandInfo.h similarity index 89% rename from runtime/neurun/core/include/model/OperandInfo.h rename to runtime/neurun/core/include/ir/OperandInfo.h index 7a97d06..1a7aad8 100644 --- a/runtime/neurun/core/include/model/OperandInfo.h +++ b/runtime/neurun/core/include/ir/OperandInfo.h @@ -18,8 +18,8 @@ * @file OperandInfo.h * @brief This file contains OperandInfo class */ -#ifndef __NEURUN_MODEL_OPERAND_INFO_H__ -#define __NEURUN_MODEL_OPERAND_INFO_H__ +#ifndef __NEURUN_IR_OPERAND_INFO_H__ +#define __NEURUN_IR_OPERAND_INFO_H__ #include "ir/Shape.h" #include "ir/TypeInfo.h" @@ -27,7 +27,7 @@ namespace neurun { -namespace model +namespace ir { /** @@ -84,7 +84,13 @@ private: TypeInfo _typeInfo; }; +} // namespace ir + +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +using OperandInfo = ir::OperandInfo; } // namespace model } // namespace neurun -#endif // __NEURUN_MODEL_OPERAND_INFO_H__ +#endif // __NEURUN_IR_OPERAND_INFO_H__ diff --git a/runtime/neurun/core/include/model/Operands.h b/runtime/neurun/core/include/ir/Operands.h similarity index 79% rename from runtime/neurun/core/include/model/Operands.h rename to runtime/neurun/core/include/ir/Operands.h index 517d2ff..797e8b4 100644 --- a/runtime/neurun/core/include/model/Operands.h +++ b/runtime/neurun/core/include/ir/Operands.h @@ -14,25 +14,31 @@ * limitations under the License. */ -#ifndef __NEURUN_MODEL_OPERANDS_H__ -#define __NEURUN_MODEL_OPERANDS_H__ +#ifndef __NEURUN_IR_OPERANDS_H__ +#define __NEURUN_IR_OPERANDS_H__ #include #include -#include "Operand.h" -#include "Index.h" +#include "ir/Operand.h" +#include "ir/Index.h" #include "util/ObjectManager.h" namespace neurun { -namespace model +namespace ir { class Operands : public util::ObjectManager { }; +} // namespace ir + +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +using Operands = ir::Operands; } // namespace model } // namespace neurun diff --git a/runtime/neurun/core/include/model/OperationIndexList.h b/runtime/neurun/core/include/ir/OperationIndexList.h similarity index 90% rename from runtime/neurun/core/include/model/OperationIndexList.h rename to runtime/neurun/core/include/ir/OperationIndexList.h index 50d9155..6b0fda3 100644 --- a/runtime/neurun/core/include/model/OperationIndexList.h +++ b/runtime/neurun/core/include/ir/OperationIndexList.h @@ -22,11 +22,11 @@ #include #include -#include "model/Index.h" +#include "ir/Index.h" namespace neurun { -namespace model +namespace ir { class OperationIndexList @@ -53,6 +53,12 @@ private: std::list _list; }; +} // namespace ir + +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +using OperationIndexList = ir::OperationIndexList; } // namespace model } // namespace neurun diff --git a/runtime/neurun/core/include/model/OperationIndexMap.h b/runtime/neurun/core/include/ir/OperationIndexMap.h similarity index 70% rename from runtime/neurun/core/include/model/OperationIndexMap.h rename to runtime/neurun/core/include/ir/OperationIndexMap.h index e0399ef..abce7b0 100644 --- a/runtime/neurun/core/include/model/OperationIndexMap.h +++ b/runtime/neurun/core/include/ir/OperationIndexMap.h @@ -14,21 +14,27 @@ * limitations under the License. */ -#ifndef __NEURUN_MODEL_OPERATION_INDEX_MAP_H__ -#define __NEURUN_MODEL_OPERATION_INDEX_MAP_H__ +#ifndef __NEURUN_IR_OPERATION_INDEX_MAP_H__ +#define __NEURUN_IR_OPERATION_INDEX_MAP_H__ #include -#include "Index.h" +#include "ir/Index.h" namespace neurun { -namespace model +namespace ir { -template using OperationIndexMap = std::unordered_map; +template using OperationIndexMap = std::unordered_map; + +} // namespace ir +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +template using OperationIndexMap = ir::OperationIndexMap; } // namespace model } // namespace neurun -#endif // __NEURUN_MODEL_OPERATION_INDEX_MAP_H__ +#endif // __NEURUN_IR_OPERATION_INDEX_MAP_H__ diff --git a/runtime/neurun/core/include/model/Operations.lst b/runtime/neurun/core/include/ir/Operations.lst similarity index 100% rename from runtime/neurun/core/include/model/Operations.lst rename to runtime/neurun/core/include/ir/Operations.lst diff --git a/runtime/neurun/core/include/ir/operand/ParentInfo.h b/runtime/neurun/core/include/ir/operand/ParentInfo.h index 024925d..e1bb78a 100644 --- a/runtime/neurun/core/include/ir/operand/ParentInfo.h +++ b/runtime/neurun/core/include/ir/operand/ParentInfo.h @@ -25,7 +25,7 @@ #include -#include "model/Index.h" +#include "ir/Index.h" #include "util/Coordinates.h" namespace neurun @@ -49,7 +49,7 @@ public: * @param[in] coordinate Offset of child operand in parent operand * @return */ - ParentInfo(const model::OperandIndex parent, const Coordinates &coordinate) + ParentInfo(const ir::OperandIndex parent, const Coordinates &coordinate) : _parent{parent}, _coordinate{coordinate} { // DO NOTHING @@ -60,7 +60,7 @@ public: * @brief Return parent index * @return Parent index */ - model::OperandIndex parent(void) const { return _parent; } + ir::OperandIndex parent(void) const { return _parent; } /** * @brief Retern offset in parent * @return Offset @@ -68,7 +68,7 @@ public: Coordinates offset(void) const { return _coordinate; } private: - model::OperandIndex _parent; + ir::OperandIndex _parent; Coordinates _coordinate; }; diff --git a/runtime/neurun/core/include/model/Operation.h b/runtime/neurun/core/include/model/Operation.h index 5ae1859..bf2ca5a 100644 --- a/runtime/neurun/core/include/model/Operation.h +++ b/runtime/neurun/core/include/model/Operation.h @@ -19,10 +19,10 @@ #include -#include "model/OpCode.h" -#include "model/Operand.h" -#include "model/OperandIndexSequence.h" -#include "model/OperandConstraint.h" +#include "ir/OpCode.h" +#include "ir/Operand.h" +#include "ir/OperandIndexSequence.h" +#include "ir/OperandConstraint.h" namespace neurun { @@ -37,8 +37,6 @@ namespace neurun namespace model { -using OperandConstraint = ::neurun::model::operation::OperandConstraint; - class Operation { public: diff --git a/runtime/neurun/core/include/model/OperationVisitor.h b/runtime/neurun/core/include/model/OperationVisitor.h index cb25679..b8b6bf5 100644 --- a/runtime/neurun/core/include/model/OperationVisitor.h +++ b/runtime/neurun/core/include/model/OperationVisitor.h @@ -31,7 +31,7 @@ struct OperationVisitor #define OP(InternalName) \ virtual void visit(const operation::InternalName &) {} -#include "model/Operations.lst" +#include "ir/Operations.lst" #undef OP // This Subgraph node should be handled specially so that diff --git a/runtime/neurun/core/include/model/Operations.h b/runtime/neurun/core/include/model/Operations.h index 4a1b2ca..6d683f3 100644 --- a/runtime/neurun/core/include/model/Operations.h +++ b/runtime/neurun/core/include/model/Operations.h @@ -17,7 +17,7 @@ #ifndef __NEURUN_MODEL_OPERATIONS_H__ #define __NEURUN_MODEL_OPERATIONS_H__ -#include "model/Index.h" +#include "ir/Index.h" #include "model/Operation.h" #include "util/ObjectManager.h" diff --git a/runtime/neurun/core/include/model/Subgraph.h b/runtime/neurun/core/include/model/Subgraph.h index a59db64..83eb403 100644 --- a/runtime/neurun/core/include/model/Subgraph.h +++ b/runtime/neurun/core/include/model/Subgraph.h @@ -22,7 +22,7 @@ #include #include "ir/Layout.h" -#include "Index.h" +#include "ir/Index.h" #include "Operation.h" namespace neurun @@ -81,7 +81,7 @@ public: std::vector::const_iterator end() const { return _operations.end(); } private: - bool exist(const neurun::model::OperationIndex &index) const; + bool exist(const OperationIndex &index) const; private: std::vector _operations; diff --git a/runtime/neurun/core/include/model/Subgraphs.h b/runtime/neurun/core/include/model/Subgraphs.h index 6946649..6de8729 100644 --- a/runtime/neurun/core/include/model/Subgraphs.h +++ b/runtime/neurun/core/include/model/Subgraphs.h @@ -17,7 +17,7 @@ #ifndef __NEURUN_MODEL_SUBGRAPHS_H__ #define __NEURUN_MODEL_SUBGRAPHS_H__ -#include "model/Index.h" +#include "ir/Index.h" #include "model/Subgraph.h" #include "util/ObjectManager.h" diff --git a/runtime/neurun/core/include/util/Config.lst b/runtime/neurun/core/include/util/Config.lst index 5f5c8de..ff24459 100644 --- a/runtime/neurun/core/include/util/Config.lst +++ b/runtime/neurun/core/include/util/Config.lst @@ -38,6 +38,6 @@ CONFIG(TRACE_FILEPATH , std::string , "") #define OP(InternalName) \ CONFIG(OP_BACKEND_ ## InternalName, std::string, "") -#include "model/Operations.lst" +#include "ir/Operations.lst" #undef OP diff --git a/runtime/neurun/core/include/util/ShapeInference.h b/runtime/neurun/core/include/util/ShapeInference.h index 1a6e5ec..f12727c 100644 --- a/runtime/neurun/core/include/util/ShapeInference.h +++ b/runtime/neurun/core/include/util/ShapeInference.h @@ -22,8 +22,8 @@ #include "model/operation/MaxPool2D.h" #include "model/operation/Conv2D.h" #include "model/operation/DepthwiseConv2D.h" -#include "model/Operands.h" -#include "model/Index.h" +#include "ir/Operands.h" +#include "ir/Index.h" #include "ir/Layout.h" namespace neurun diff --git a/runtime/neurun/core/include/util/Utils.h b/runtime/neurun/core/include/util/Utils.h index 63a7a97..e7468da 100644 --- a/runtime/neurun/core/include/util/Utils.h +++ b/runtime/neurun/core/include/util/Utils.h @@ -25,7 +25,7 @@ #include "ir/InternalType.h" #include "ir/Layout.h" -#include "model/Operand.h" +#include "ir/Operand.h" #include "util/Coordinates.h" #define UNUSED_RELEASE(a) (void)(a) diff --git a/runtime/neurun/core/src/backend/BackendManager.h b/runtime/neurun/core/src/backend/BackendManager.h index 29a772f..9c6483f 100644 --- a/runtime/neurun/core/src/backend/BackendManager.h +++ b/runtime/neurun/core/src/backend/BackendManager.h @@ -20,7 +20,7 @@ #include #include -#include "model/Operands.h" +#include "ir/Operands.h" #include "backend/Backend.h" namespace neurun diff --git a/runtime/neurun/core/src/compiler/BackendResolver.h b/runtime/neurun/core/src/compiler/BackendResolver.h index 9ba7bd4..ddcae79 100644 --- a/runtime/neurun/core/src/compiler/BackendResolver.h +++ b/runtime/neurun/core/src/compiler/BackendResolver.h @@ -24,7 +24,7 @@ #include "backend/Backend.h" #include "backend/BackendManager.h" #include "backend/ITensorBuilder.h" -#include "model/OperationIndexMap.h" +#include "ir/OperationIndexMap.h" namespace neurun { @@ -34,7 +34,7 @@ namespace compiler class BackendResolver { public: - BackendResolver(const model::Operands &operands, + BackendResolver(const ir::Operands &operands, const std::vector &backends, const std::shared_ptr &kb) { @@ -51,7 +51,7 @@ public: BackendResolver &operator=(BackendResolver &&obj) = default; public: - const backend::BackendContext *getBackendContext(const model::OperationIndex &index) const + const backend::BackendContext *getBackendContext(const ir::OperationIndex &index) const { return _context_manager.at(_gen_map.at(index)).get(); } @@ -71,17 +71,17 @@ public: return ret; } - const backend::Backend *getBackend(const model::OperationIndex &index) const + const backend::Backend *getBackend(const ir::OperationIndex &index) const { return getBackendContext(index)->backend; } - void setBackend(const model::OperationIndex &index, const backend::Backend *backend) + void setBackend(const ir::OperationIndex &index, const backend::Backend *backend) { _gen_map[index] = backend; } - void iterate(const std::function &fn) const { for (const auto &e : _gen_map) @@ -93,7 +93,7 @@ public: private: std::unordered_map> _context_manager; - model::OperationIndexMap _gen_map; + ir::OperationIndexMap _gen_map; }; } // namespace compiler diff --git a/runtime/neurun/core/src/compiler/Compiler.cc b/runtime/neurun/core/src/compiler/Compiler.cc index 9e03761..32eb578 100644 --- a/runtime/neurun/core/src/compiler/Compiler.cc +++ b/runtime/neurun/core/src/compiler/Compiler.cc @@ -64,7 +64,7 @@ void Compiler::compile(void) ***************************************************/ // Schedule std::unique_ptr br; - std::shared_ptr> indexed_ranks; + std::shared_ptr> indexed_ranks; if (util::getConfigBool(util::config::USE_SCHEDULER)) { auto scheduler = compiler::HEScheduler( diff --git a/runtime/neurun/core/src/compiler/ExecutorFactory.cc b/runtime/neurun/core/src/compiler/ExecutorFactory.cc index 5ba4b36..47e982d 100644 --- a/runtime/neurun/core/src/compiler/ExecutorFactory.cc +++ b/runtime/neurun/core/src/compiler/ExecutorFactory.cc @@ -180,7 +180,7 @@ exec::IExecutor *ExecutorFactory::createLinearExecutor(graph::Graph &graph) // Wrap tensors as Object and store them to plan for (auto &tensor_builder : tensor_builders) { - tensor_builder->iterate([&](const model::OperandIndex &index) { + tensor_builder->iterate([&](const ir::OperandIndex &index) { auto object = tensor_builder->tensorAt(index); operand_context->set(index, object); }); @@ -211,23 +211,22 @@ exec::IExecutor *ExecutorFactory::createDataflowExecutor(graph::Graph &graph, bo { auto operand_context = std::make_shared(); - graph.subgraphs().iterate([&](const model::SubgraphIndex &, const model::Subgraph &subg) { + graph.subgraphs().iterate([&](const ir::SubgraphIndex &, const model::Subgraph &subg) { auto subtensor_analyzer = SubTensorAnalyzer{graph.operands()}; subg.accept(subtensor_analyzer); }); // Fix shapes and register tensors - graph.subgraphs().iterate( - [&](const model::SubgraphIndex &subg_index, const model::Subgraph &subg) { - auto backend = graph.getLowerInfo(subg_index)->backend(); - auto shape_fixer = graph.backend_resolver()->getBackendContext(backend)->shape_fixer; - shape_fixer->fix(subg); - const auto tensor_register = - graph.backend_resolver()->getBackendContext(backend)->tensor_register; - tensor_register->registerTensors(subg, graph.getLowerInfo()); - }); - - graph.operands().iterate([&](const model::OperandIndex &ind, const model::Operand &obj) { + graph.subgraphs().iterate([&](const ir::SubgraphIndex &subg_index, const model::Subgraph &subg) { + auto backend = graph.getLowerInfo(subg_index)->backend(); + auto shape_fixer = graph.backend_resolver()->getBackendContext(backend)->shape_fixer; + shape_fixer->fix(subg); + const auto tensor_register = + graph.backend_resolver()->getBackendContext(backend)->tensor_register; + tensor_register->registerTensors(subg, graph.getLowerInfo()); + }); + + graph.operands().iterate([&](const ir::OperandIndex &ind, const ir::Operand &obj) { const auto lower_info = graph.getLowerInfo(ind); for (auto factor : lower_info->def_factors()) { @@ -276,29 +275,28 @@ exec::IExecutor *ExecutorFactory::createDataflowExecutor(graph::Graph &graph, bo }; // TODO Remove this method and make `append` to get index value as an argument - void setNextIndex(const model::SubgraphIndex next_index) { _next_index = next_index; } + void setNextIndex(const ir::SubgraphIndex next_index) { _next_index = next_index; } exec::DataflowExecutor::CodeMap &&releaseCodeMap() { return std::move(_code_map); } private: - model::SubgraphIndex _next_index; + ir::SubgraphIndex _next_index; exec::DataflowExecutor::CodeMap _code_map; }; auto execution_builder = nnfw::cpp14::make_unique(); // Generate kernels - graph.subgraphs().iterate( - [&](const model::SubgraphIndex &subg_index, const model::Subgraph &subg) { - auto backend = graph.getLowerInfo(subg_index)->backend(); - auto constant_initializer = - graph.backend_resolver()->getBackendContext(backend)->constant_initializer; - constant_initializer->generate(subg, graph.operands()); - // TODO This approach is temporal. See declaration of `setNextIndex`. - execution_builder->setNextIndex(subg_index); - auto kernel_gen = graph.backend_resolver()->getBackendContext(backend)->kernel_gen; - kernel_gen->generate(subg, execution_builder.get()); - }); + graph.subgraphs().iterate([&](const ir::SubgraphIndex &subg_index, const model::Subgraph &subg) { + auto backend = graph.getLowerInfo(subg_index)->backend(); + auto constant_initializer = + graph.backend_resolver()->getBackendContext(backend)->constant_initializer; + constant_initializer->generate(subg, graph.operands()); + // TODO This approach is temporal. See declaration of `setNextIndex`. + execution_builder->setNextIndex(subg_index); + auto kernel_gen = graph.backend_resolver()->getBackendContext(backend)->kernel_gen; + kernel_gen->generate(subg, execution_builder.get()); + }); for (const auto &tensor_builder : tensor_builders) { @@ -341,7 +339,7 @@ exec::IExecutor *ExecutorFactory::createDataflowExecutor(graph::Graph &graph, bo // Wrap tensors as Object and store them to plan for (auto &tensor_builder : tensor_builders) { - tensor_builder->iterate([&](const model::OperandIndex &index) { + tensor_builder->iterate([&](const ir::OperandIndex &index) { auto object = tensor_builder->tensorAt(index); operand_context->set(index, object); }); diff --git a/runtime/neurun/core/src/compiler/HEScheduler.cc b/runtime/neurun/core/src/compiler/HEScheduler.cc index 8f623b5..a925527 100644 --- a/runtime/neurun/core/src/compiler/HEScheduler.cc +++ b/runtime/neurun/core/src/compiler/HEScheduler.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "model/Operand.h" +#include "ir/Operand.h" #include "compiler/HEScheduler.h" #include "ir/Graph.h" #include "util/ConfigSource.h" @@ -69,8 +69,8 @@ static bool isWorkaroundSkip(const graph::Graph &graph, const backend::Backend * broadcast, scheduling will select it since it doesn't distinguish broadcast and non-broadcast like it does for quant non-quantized*/ if (backend->config()->id() == "cpu" && - (node.opcode() == model::OpCode::Add || node.opcode() == model::OpCode::Sub || - node.opcode() == model::OpCode::Mul)) + (node.opcode() == ir::OpCode::Add || node.opcode() == ir::OpCode::Sub || + node.opcode() == ir::OpCode::Mul)) { const auto lhs_index{node.getInputs().at(model::operation::Add::Input::LHS)}; const auto rhs_index{node.getInputs().at(model::operation::Add::Input::RHS)}; @@ -85,7 +85,7 @@ static bool isWorkaroundSkip(const graph::Graph &graph, const backend::Backend * Adding exception in stage doesn't help. Because if there is a record for Mul without broadcast, scheduling will select it since it doesn't distinguish broadcast and non-broadcast like it does for quant non-quantized*/ - else if (backend->config()->id() == "acl_neon" && node.opcode() == model::OpCode::Mul) + else if (backend->config()->id() == "acl_neon" && node.opcode() == ir::OpCode::Mul) { const auto lhs_index{node.getInputs().at(model::operation::Mul::Input::LHS)}; const auto rhs_index{node.getInputs().at(model::operation::Mul::Input::RHS)}; @@ -184,8 +184,8 @@ bool HEScheduler::isNodeProfiled(const model::Operation &node) return true; } -void HEScheduler::scheduleBranch(const model::OperationIndex &index, - model::OperationIndexMap &scheduled) +void HEScheduler::scheduleBranch(const ir::OperationIndex &index, + ir::OperationIndexMap &scheduled) { auto loc_index = index; const backend::Backend *parent_backend = nullptr; @@ -203,7 +203,6 @@ void HEScheduler::scheduleBranch(const model::OperationIndex &index, parent_backend = _backend_resolver->getBackend(loc_index); const auto &node = _graph->operations().at(loc_index); - model::OperandIndex tmp; /* get the only output operand, that is input of the next single operation * and just this nodes output.*/ if (node.getOutputs().size() != 1) @@ -238,7 +237,7 @@ std::unique_ptr HEScheduler::schedule(const graph::Gr { // Check if profiling info about all backend/node pairs already exists bool all_nodes_are_profiled = true; - _graph->operations().iterate([&](const model::OperationIndex &, const model::Operation &op) { + _graph->operations().iterate([&](const ir::OperationIndex &, const model::Operation &op) { if (all_nodes_are_profiled) all_nodes_are_profiled = isNodeProfiled(op); }); @@ -253,10 +252,9 @@ std::unique_ptr HEScheduler::schedule(const graph::Gr } } - model::OperationIndexMap visited; - graph.operations().iterate([&](const model::OperationIndex &index, const model::Operation &) { - visited[index] = false; - }); + ir::OperationIndexMap visited; + graph.operations().iterate( + [&](const ir::OperationIndex &index, const model::Operation &) { visited[index] = false; }); // for each task select the backend with the smallest earliest finishing time(eft) for (const auto &rank : _rank_to_op) { @@ -326,17 +324,17 @@ void HEScheduler::makeRank() VERBOSE(HEScheduler::makeRank) << "task prioritizing" << std::endl; _graph->operations().iterate( - [&](const model::OperationIndex &index, const model::Operation &) { DFSMaxRank(index); }); + [&](const ir::OperationIndex &index, const model::Operation &) { DFSMaxRank(index); }); // Check that ranks are calculated for all operations(nodes) - _graph->operations().iterate([&](const model::OperationIndex &index, const model::Operation &) { + _graph->operations().iterate([&](const ir::OperationIndex &index, const model::Operation &) { UNUSED_RELEASE(index); assert(_op_to_rank->find(index) != _op_to_rank->end()); }); VERBOSE(HEScheduler::makeRank) << "task prioritizing finished" << std::endl; } -int64_t HEScheduler::DFSMaxRank(const model::OperationIndex &index) +int64_t HEScheduler::DFSMaxRank(const ir::OperationIndex &index) { auto op_to_rank_it = _op_to_rank->find(index); if (op_to_rank_it != _op_to_rank->end()) @@ -401,7 +399,7 @@ int64_t HEScheduler::DFSMaxRank(const model::OperationIndex &index) return rank; } -int64_t HEScheduler::DFSChildrenMaxRank(const model::OperationIndex &index) +int64_t HEScheduler::DFSChildrenMaxRank(const ir::OperationIndex &index) { const auto &node = _graph->operations().at(index); int64_t max_child_rank = 0; @@ -456,8 +454,7 @@ int64_t HEScheduler::backendAvailableTime(const backend::Backend *backend, return prev_op_ft; } -bool HEScheduler::schedule(const model::OperationIndex &index, - const backend::Backend *parent_backend) +bool HEScheduler::schedule(const ir::OperationIndex &index, const backend::Backend *parent_backend) { VERBOSE(HEScheduler::schedule) << "scheduling (" << index.value() << ")" << std::endl; int64_t eft = std::numeric_limits::max(), selected_exec_time = 0; @@ -507,7 +504,7 @@ bool HEScheduler::schedule(const model::OperationIndex &index, } std::pair -HEScheduler::ESTAndExecTime(const backend::Backend *backend, const model::OperationIndex &index, +HEScheduler::ESTAndExecTime(const backend::Backend *backend, const ir::OperationIndex &index, std::multimap &transfer_st_exec_time) { const bool is_linear_exec = "Linear" == util::getConfigString(util::config::EXECUTOR); diff --git a/runtime/neurun/core/src/compiler/HEScheduler.h b/runtime/neurun/core/src/compiler/HEScheduler.h index b888fa6..657149e 100644 --- a/runtime/neurun/core/src/compiler/HEScheduler.h +++ b/runtime/neurun/core/src/compiler/HEScheduler.h @@ -28,7 +28,7 @@ #include "backend/ExecTime.h" #include "backend/Backend.h" #include "cpp14/memory.h" -#include "model/OperationIndexMap.h" +#include "ir/OperationIndexMap.h" #include #include @@ -48,11 +48,10 @@ public: * @param[in] model Graph model * @param[in] backend_resolver backend resolver */ - HEScheduler(const neurun::model::Operands &operands, - std::vector backends, + HEScheduler(const ir::Operands &operands, std::vector backends, const std::shared_ptr &kb) : _is_supported{}, _backends_avail_time{}, _ops_eft{}, - _op_to_rank{std::make_shared>()}, + _op_to_rank{std::make_shared>()}, _all_backends(std::move(backends)) { _backend_resolver = @@ -76,12 +75,12 @@ public: * https://www.hindawi.com/journals/sp/2016/3676149/ */ std::unique_ptr schedule(const graph::Graph &graph) final; - std::shared_ptr> getIndexedRanks() { return _op_to_rank; } + std::shared_ptr> getIndexedRanks() { return _op_to_rank; } private: bool isNodeProfiled(const model::Operation &); - bool schedule(const model::OperationIndex &, const backend::Backend *parent_backend); + bool schedule(const ir::OperationIndex &, const backend::Backend *parent_backend); /** * @brief Get earliest starting time and execution time of an operation on a backend. * @@ -95,7 +94,7 @@ private: * @return earliest starting time and execution time */ std::pair - ESTAndExecTime(const backend::Backend *backend, const model::OperationIndex &index, + ESTAndExecTime(const backend::Backend *backend, const ir::OperationIndex &index, std::multimap &transfer_st_exec_time); /** * @brief Returns the latest finishing time of parents of a node. @@ -111,9 +110,9 @@ private: void makeRank(); - int64_t DFSMaxRank(const model::OperationIndex &index); + int64_t DFSMaxRank(const ir::OperationIndex &index); - int64_t DFSChildrenMaxRank(const model::OperationIndex &index); + int64_t DFSChildrenMaxRank(const ir::OperationIndex &index); /** * @brief Returns the time, when backend is available for at least given amount of time. * @@ -149,8 +148,7 @@ private: * * @return N/A */ - void scheduleBranch(const model::OperationIndex &index, - model::OperationIndexMap &scheduled); + void scheduleBranch(const ir::OperationIndex &index, ir::OperationIndexMap &scheduled); private: // This variable stores backend/node pairs with unknown execution time, and hints scheduler @@ -160,9 +158,9 @@ private: std::unordered_map> _is_supported; // Finishing and starting time of each backend std::unordered_map> _backends_avail_time; - model::OperationIndexMap _ops_eft; - std::multimap> _rank_to_op; - std::shared_ptr> _op_to_rank; + ir::OperationIndexMap _ops_eft; + std::multimap> _rank_to_op; + std::shared_ptr> _op_to_rank; std::unique_ptr _backend_resolver; std::unique_ptr _exec_time; const graph::Graph *_graph{nullptr}; diff --git a/runtime/neurun/core/src/compiler/Linear.cc b/runtime/neurun/core/src/compiler/Linear.cc index 07ee7a8..864a90a 100644 --- a/runtime/neurun/core/src/compiler/Linear.cc +++ b/runtime/neurun/core/src/compiler/Linear.cc @@ -37,9 +37,9 @@ Linear::Linear(graph::Graph &graph) : _graph(graph) // Get SubgraphSequence by topological sorting { model::Subgraphs &subgraphs = _graph.subgraphs(); - model::Operands &operands = _graph.operands(); + ir::Operands &operands = _graph.operands(); // subgraphs can't access a subgraph by an operand so that input_to_subgs can offer it - std::unordered_map> input_to_subgs; + std::unordered_map> input_to_subgs; // Get the relations between input/subgraph to be used for dfs-post-iter // @@ -56,7 +56,7 @@ Linear::Linear(graph::Graph &graph) : _graph(graph) // [SUBG3] // | // [4] - subgraphs.iterate([&](const model::SubgraphIndex &subg_idx, model::Subgraph &subg) { + subgraphs.iterate([&](const ir::SubgraphIndex &subg_idx, model::Subgraph &subg) { for (auto input : subg.getInputs()) { // only valid_inputs @@ -67,7 +67,7 @@ Linear::Linear(graph::Graph &graph) : _graph(graph) auto it = input_to_subgs.find(input); if (it == input_to_subgs.end()) { - std::list list{subg_idx}; + std::list list{subg_idx}; input_to_subgs[input] = list; } else @@ -77,13 +77,12 @@ Linear::Linear(graph::Graph &graph) : _graph(graph) } }); - std::unordered_map visited; - subgraphs.iterate([&](const model::SubgraphIndex &index, const model::Subgraph &) { - visited[index] = false; - }); + std::unordered_map visited; + subgraphs.iterate( + [&](const ir::SubgraphIndex &index, const model::Subgraph &) { visited[index] = false; }); - std::function dfs_recursive = - [&](const model::SubgraphIndex &index, model::Subgraph &subg) -> void { + std::function dfs_recursive = + [&](const ir::SubgraphIndex &index, model::Subgraph &subg) -> void { if (visited[index]) return; visited[index] = true; @@ -109,9 +108,8 @@ Linear::Linear(graph::Graph &graph) : _graph(graph) subgraphs.iterate(dfs_recursive); // All of the nodes must have been visited. - assert( - std::all_of(visited.begin(), visited.end(), - [](const std::pair &v) { return v.second; })); + assert(std::all_of(visited.begin(), visited.end(), + [](const std::pair &v) { return v.second; })); // NOTE. Now these subgraph are on the reverse order std::reverse(_elements.begin(), _elements.end()); @@ -148,15 +146,15 @@ void Linear::accept(model::OperationVisitor &&visitor) const void Linear::planTensors() { - model::OperandIndexMap> tensor_builder_map; + ir::OperandIndexMap> tensor_builder_map; // NOTE // While current ITensorBuilder exposes registerSubTensorInfo for subtensor, // this stage uses registerSubTensorInfo() and notify{First|Last}Use() // but handling subtensor should be processed on each backend. See #5726. - model::OperandIndexMap uses_map; - model::OperandIndexMap def_map; - model::OperandIndexSequence constants; + ir::OperandIndexMap uses_map; + ir::OperandIndexMap def_map; + ir::OperandIndexSequence constants; iterate([&](const neurun::compiler::Linear::Element &element) { const auto backend = element.lower_info->backend(); @@ -166,7 +164,7 @@ void Linear::planTensors() }); // Prepare scanning - _graph.operands().iterate([&](const model::OperandIndex &ind, const model::Operand &obj) { + _graph.operands().iterate([&](const ir::OperandIndex &ind, const ir::Operand &obj) { const auto lower_info = _graph.getLowerInfo(ind); // TODO Remove if neurun doesn't support anymore such as // GeneratedTests.reshape_quant8_weights_as_inputs @@ -287,13 +285,13 @@ void Linear::planTensors() } } - assert(std::all_of( - uses_map.begin(), uses_map.end(), - [](std::pair it) { return it.second == 0; })); + assert( + std::all_of(uses_map.begin(), uses_map.end(), + [](std::pair it) { return it.second == 0; })); - assert(std::all_of( - def_map.begin(), def_map.end(), - [](std::pair it) { return it.second == 0; })); + assert( + std::all_of(def_map.begin(), def_map.end(), + [](std::pair it) { return it.second == 0; })); } void Linear::iterate(const std::function &fn) const diff --git a/runtime/neurun/core/src/compiler/ManualScheduler.cc b/runtime/neurun/core/src/compiler/ManualScheduler.cc index f7d859c..0768156 100644 --- a/runtime/neurun/core/src/compiler/ManualScheduler.cc +++ b/runtime/neurun/core/src/compiler/ManualScheduler.cc @@ -15,7 +15,7 @@ */ #include "ManualScheduler.h" -#include "model/OpCode.h" +#include "ir/OpCode.h" #include "model/Operations.Include.h" #include "backend/Backend.h" #include "backend/BackendManager.h" @@ -66,14 +66,14 @@ std::unique_ptr ManualScheduler::schedule(const graph::Graph &g VERBOSE(ManualScheduler) << "Default backend for all ops: " << backend_all_str << std::endl; - graph.operations().iterate([&](const model::OperationIndex &index, const model::Operation &) { + graph.operations().iterate([&](const ir::OperationIndex &index, const model::Operation &) { backend_resolver->setBackend(index, backend_all); }); // 2. Backend per operation type - std::unordered_map op_type_map; + std::unordered_map op_type_map; // By default, Custom uses cpu backend - op_type_map[model::OpCode::Custom] = backend::BackendManager::get().get("cpu"); + op_type_map[ir::OpCode::Custom] = backend::BackendManager::get().get("cpu"); #define OP(InternalName) \ { \ @@ -82,14 +82,14 @@ std::unique_ptr ManualScheduler::schedule(const graph::Graph &g { \ auto backend = backend::BackendManager::get().get(backend_str); \ VERBOSE(Lower) << "backend for " << #InternalName << ": " << backend_str << std::endl; \ - op_type_map[model::OpCode::InternalName] = backend; \ + op_type_map[ir::OpCode::InternalName] = backend; \ } \ } -#include "model/Operations.lst" +#include "ir/Operations.lst" #undef OP graph.operations().iterate( - [&](const model::OperationIndex &index, const model::Operation &operation) { + [&](const ir::OperationIndex &index, const model::Operation &operation) { auto itr = op_type_map.find(operation.opcode()); if (itr != op_type_map.end()) { @@ -114,8 +114,8 @@ std::unique_ptr ManualScheduler::schedule(const graph::Graph &g const auto &val = key_val.at(1); auto key = static_cast(std::stoi(key_str)); - graph.operations().at(model::OperationIndex{key}); // Check if exist, or this wil throw - backend_resolver->setBackend(model::OperationIndex{key}, + graph.operations().at(ir::OperationIndex{key}); // Check if exist, or this wil throw + backend_resolver->setBackend(ir::OperationIndex{key}, backend::BackendManager::get().get(val)); } } @@ -127,11 +127,11 @@ std::unique_ptr ManualScheduler::schedule(const graph::Graph &g // 4. Operations that are specially handled // All configuration above will be ignored(overwritten) - op_type_map[model::OpCode::Permute] = backend::BackendManager::get().get("cpu"); + op_type_map[ir::OpCode::Permute] = backend::BackendManager::get().get("cpu"); // Dump final assignment backend_resolver->iterate( - [&](const model::OperationIndex &index, const backend::BackendContext &backend_ctx) { + [&](const ir::OperationIndex &index, const backend::BackendContext &backend_ctx) { VERBOSE(ManualScheduler) << "backend for operation #" << index.value() << ": " << backend_ctx.backend->config()->id() << std::endl; }); diff --git a/runtime/neurun/core/src/compiler/OperandContext.cc b/runtime/neurun/core/src/compiler/OperandContext.cc index 3fc3816..c06f615 100644 --- a/runtime/neurun/core/src/compiler/OperandContext.cc +++ b/runtime/neurun/core/src/compiler/OperandContext.cc @@ -23,7 +23,7 @@ namespace neurun namespace compiler { -OperandContext &OperandContext::set(const model::OperandIndex &id, +OperandContext &OperandContext::set(const ir::OperandIndex &id, const std::shared_ptr &tensor) { // Only one tensor for an id @@ -33,7 +33,7 @@ OperandContext &OperandContext::set(const model::OperandIndex &id, } void OperandContext::iterate( - const std::function &fn) + const std::function &fn) { for (auto &e : _tensors) { diff --git a/runtime/neurun/core/src/compiler/OperandContext.h b/runtime/neurun/core/src/compiler/OperandContext.h index 0b7dc52..da1a51b 100644 --- a/runtime/neurun/core/src/compiler/OperandContext.h +++ b/runtime/neurun/core/src/compiler/OperandContext.h @@ -18,7 +18,7 @@ #define __NEURUN_COMPILER_OPERAND_CONTEXT_H__ #include "backend/operand/ITensor.h" -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" #include #include @@ -30,31 +30,28 @@ namespace compiler class OperandContext { public: - OperandContext &set(const model::OperandIndex &ind, + OperandContext &set(const ir::OperandIndex &ind, const std::shared_ptr &tensor); public: - bool exist(const ::neurun::model::OperandIndex &ind) const - { - return _tensors.find(ind) != _tensors.end(); - } + bool exist(const ir::OperandIndex &ind) const { return _tensors.find(ind) != _tensors.end(); } public: - std::shared_ptr at(const model::OperandIndex &ind) const + std::shared_ptr at(const ir::OperandIndex &ind) const { return _tensors.at(ind); } - std::shared_ptr &at(const model::OperandIndex &ind) + std::shared_ptr &at(const ir::OperandIndex &ind) { return _tensors.at(ind); } void - iterate(const std::function &fn); + iterate(const std::function &fn); private: - model::OperandIndexMap> _tensors; + ir::OperandIndexMap> _tensors; }; } // namespace compiler diff --git a/runtime/neurun/core/src/compiler/OperationValidator.cc b/runtime/neurun/core/src/compiler/OperationValidator.cc index 214139b..b6bdb9b 100644 --- a/runtime/neurun/core/src/compiler/OperationValidator.cc +++ b/runtime/neurun/core/src/compiler/OperationValidator.cc @@ -18,7 +18,7 @@ #include -#include "model/Operands.h" +#include "ir/Operands.h" #include "ir/operation/LowerInfo.h" #include "util/logging.h" diff --git a/runtime/neurun/core/src/compiler/OperationValidator.h b/runtime/neurun/core/src/compiler/OperationValidator.h index 4ea62ea..9501128 100644 --- a/runtime/neurun/core/src/compiler/OperationValidator.h +++ b/runtime/neurun/core/src/compiler/OperationValidator.h @@ -22,10 +22,10 @@ namespace neurun { -namespace model +namespace ir { class Operands; -} // namespace model +} // namespace ir } // namespace neurun namespace neurun @@ -36,8 +36,7 @@ namespace compiler class OperationValidator : public model::OperationVisitor { public: - OperationValidator(const neurun::model::Operands &ctx) - : _ctx{ctx}, _current_subg_layout{ir::Layout::UNKNOWN} + OperationValidator(const ir::Operands &ctx) : _ctx{ctx}, _current_subg_layout{ir::Layout::UNKNOWN} { } @@ -71,7 +70,7 @@ public: void visit(const model::operation::Pad &node) override; private: - const neurun::model::Operands &_ctx; + const ir::Operands &_ctx; ir::Layout _current_subg_layout; }; diff --git a/runtime/neurun/core/src/compiler/ParamChecker.cc b/runtime/neurun/core/src/compiler/ParamChecker.cc index 657da71..a9c713b 100644 --- a/runtime/neurun/core/src/compiler/ParamChecker.cc +++ b/runtime/neurun/core/src/compiler/ParamChecker.cc @@ -26,7 +26,7 @@ namespace compiler void ParamChecker::operator()() { _model->operations().iterate( - [&](const model::OperationIndex &, const model::Operation &node) { node.accept(*this); }); + [&](const ir::OperationIndex &, const model::Operation &node) { node.accept(*this); }); } } // namespace compiler diff --git a/runtime/neurun/core/src/compiler/SubTensorAnalyzer.cc b/runtime/neurun/core/src/compiler/SubTensorAnalyzer.cc index e3ad8d0..fd14d4a 100644 --- a/runtime/neurun/core/src/compiler/SubTensorAnalyzer.cc +++ b/runtime/neurun/core/src/compiler/SubTensorAnalyzer.cc @@ -19,7 +19,7 @@ #include #include "cpp14/memory.h" -#include "model/OperandIndexSequence.h" +#include "ir/OperandIndexSequence.h" #include "util/logging.h" #include "util/Coordinates.h" diff --git a/runtime/neurun/core/src/compiler/SubTensorAnalyzer.h b/runtime/neurun/core/src/compiler/SubTensorAnalyzer.h index 64591f7..d2a5f78 100644 --- a/runtime/neurun/core/src/compiler/SubTensorAnalyzer.h +++ b/runtime/neurun/core/src/compiler/SubTensorAnalyzer.h @@ -52,7 +52,7 @@ public: * @brief Construct a new SubTensorAnalyzer object * @param[in] ctx Graph operand set */ - SubTensorAnalyzer(neurun::model::Operands &ctx) : _ctx{ctx} + SubTensorAnalyzer(ir::Operands &ctx) : _ctx{ctx} { // DO NOTHING } @@ -61,7 +61,7 @@ public: void visit(const model::operation::Concat &) override; private: - neurun::model::Operands &_ctx; // TODO Refactor : Do not update Operands + ir::Operands &_ctx; // TODO Refactor : Do not update Operands }; } // namespace compiler diff --git a/runtime/neurun/core/src/dumper/dot/DotBuilder.h b/runtime/neurun/core/src/dumper/dot/DotBuilder.h index 9b10735..3a21339 100644 --- a/runtime/neurun/core/src/dumper/dot/DotBuilder.h +++ b/runtime/neurun/core/src/dumper/dot/DotBuilder.h @@ -19,16 +19,16 @@ #include -#include "model/Index.h" +#include "ir/Index.h" #include "model/Operation.h" -#include "model/Operand.h" +#include "ir/Operand.h" #include "OperationNode.h" #include "OperandNode.h" #include "DotSubgraphInfo.h" using Operation = neurun::model::Operation; -using Object = neurun::model::Operand; +using Object = neurun::ir::Operand; namespace neurun { diff --git a/runtime/neurun/core/src/dumper/dot/DotDumper.cc b/runtime/neurun/core/src/dumper/dot/DotDumper.cc index 30bf98e..5175f09 100644 --- a/runtime/neurun/core/src/dumper/dot/DotDumper.cc +++ b/runtime/neurun/core/src/dumper/dot/DotDumper.cc @@ -21,7 +21,7 @@ #include "DotBuilder.h" #include "DotSubgraphInfo.h" #include "model/Subgraph.h" -#include "model/OperationIndexMap.h" +#include "ir/OperationIndexMap.h" #include "backend/Backend.h" #include "backend/BackendManager.h" #include "backend/IConfig.h" @@ -47,10 +47,10 @@ void DotDumper::dump(const std::string &tag) auto &operations = _graph.operations(); auto &operands = _graph.operands(); - model::OperationIndexMap> operation_nodes; - std::unordered_map> operand_nodes; + ir::OperationIndexMap> operation_nodes; + std::unordered_map> operand_nodes; - operations.iterate([&](const model::OperationIndex &index, const model::Operation &op) { + operations.iterate([&](const ir::OperationIndex &index, const model::Operation &op) { auto node = nnfw::cpp14::make_unique(index, op); for (auto output : op.getOutputs()) @@ -86,9 +86,9 @@ void DotDumper::dump(const std::string &tag) } }; - util::Set shown_operand_set; + util::Set shown_operand_set; - operands.iterate([&](const model::OperandIndex &index, const model::Operand &object) { + operands.iterate([&](const ir::OperandIndex &index, const ir::Operand &object) { bool showing_cond = false; if (_level == Level::ALL) { @@ -152,7 +152,7 @@ void DotDumper::dump(const std::string &tag) const auto subgraphs = _graph.subgraphs(); if (subgraphs) { - subgraphs->iterate([&](const model::SubgraphIndex &index, const model::Subgraph &subgraph) { + subgraphs->iterate([&](const ir::SubgraphIndex &index, const model::Subgraph &subgraph) { const auto lower_info = _graph.getLowerInfo(index); auto fillcolor = backend_to_fillcolor(lower_info->backend()); std::string label = diff --git a/runtime/neurun/core/src/dumper/dot/DotSubgraphInfo.cc b/runtime/neurun/core/src/dumper/dot/DotSubgraphInfo.cc index 1ea681b..b784846 100644 --- a/runtime/neurun/core/src/dumper/dot/DotSubgraphInfo.cc +++ b/runtime/neurun/core/src/dumper/dot/DotSubgraphInfo.cc @@ -25,8 +25,8 @@ namespace dumper namespace dot { -DotSubgraphInfo::DotSubgraphInfo(const model::SubgraphIndex &index, const model::Subgraph &subgraph, - const util::Set &shown_operands) +DotSubgraphInfo::DotSubgraphInfo(const ir::SubgraphIndex &index, const model::Subgraph &subgraph, + const util::Set &shown_operands) : _index{index} { for (const auto &element : subgraph.operations()) diff --git a/runtime/neurun/core/src/dumper/dot/DotSubgraphInfo.h b/runtime/neurun/core/src/dumper/dot/DotSubgraphInfo.h index 771c555..ad16da1 100644 --- a/runtime/neurun/core/src/dumper/dot/DotSubgraphInfo.h +++ b/runtime/neurun/core/src/dumper/dot/DotSubgraphInfo.h @@ -19,7 +19,7 @@ #include -#include "model/Index.h" +#include "ir/Index.h" #include "model/Subgraph.h" #include "util/Set.h" @@ -33,23 +33,23 @@ namespace dot class DotSubgraphInfo { public: - DotSubgraphInfo(const model::SubgraphIndex &index, const model::Subgraph &subgraph, - const util::Set &shown_operands); + DotSubgraphInfo(const ir::SubgraphIndex &index, const model::Subgraph &subgraph, + const util::Set &shown_operands); - model::SubgraphIndex index() const { return _index; } + ir::SubgraphIndex index() const { return _index; } std::string label() const { return _label; } void label(const std::string &val) { _label = val; } std::string fillcolor() const { return _fillcolor; } void fillcolor(const std::string &val) { _fillcolor = val; } - const std::unordered_set &operations() const { return _operations; } - const std::unordered_set &operands() const { return _operands; } + const std::unordered_set &operations() const { return _operations; } + const std::unordered_set &operands() const { return _operands; } private: - model::SubgraphIndex _index; + ir::SubgraphIndex _index; std::string _label; std::string _fillcolor; - std::unordered_set _operations; - std::unordered_set _operands; + std::unordered_set _operations; + std::unordered_set _operands; }; } // namespace dot diff --git a/runtime/neurun/core/src/dumper/dot/OperandNode.cc b/runtime/neurun/core/src/dumper/dot/OperandNode.cc index 141549f..76d2c70 100644 --- a/runtime/neurun/core/src/dumper/dot/OperandNode.cc +++ b/runtime/neurun/core/src/dumper/dot/OperandNode.cc @@ -32,7 +32,7 @@ const std::string Operand::OUTPUT_SHAPE = "doublecircle"; const std::string Operand::OPERAND_SHAPE = "ellipse"; const std::string Operand::BG_COLOR_SCHEME = "set18"; -Operand::Operand(const neurun::model::OperandIndex &index, Type type) +Operand::Operand(const ir::OperandIndex &index, Type type) : Node{"operand" + std::to_string(index.value())} { { diff --git a/runtime/neurun/core/src/dumper/dot/OperandNode.h b/runtime/neurun/core/src/dumper/dot/OperandNode.h index faa8be2..5ebd651 100644 --- a/runtime/neurun/core/src/dumper/dot/OperandNode.h +++ b/runtime/neurun/core/src/dumper/dot/OperandNode.h @@ -27,8 +27,8 @@ #include #include "Node.h" -#include "model/Operand.h" -#include "model/Index.h" +#include "ir/Operand.h" +#include "ir/Index.h" namespace neurun { @@ -66,7 +66,7 @@ public: * @param[in] type Operand type * @param[in] lower_info Operand LowerInfo */ - Operand(const neurun::model::OperandIndex &index, Type type); + Operand(const ir::OperandIndex &index, Type type); private: void addBackendLabel(); diff --git a/runtime/neurun/core/src/dumper/dot/OperationNode.cc b/runtime/neurun/core/src/dumper/dot/OperationNode.cc index 5f599db..e2ea9f2 100644 --- a/runtime/neurun/core/src/dumper/dot/OperationNode.cc +++ b/runtime/neurun/core/src/dumper/dot/OperationNode.cc @@ -32,8 +32,7 @@ namespace dot const std::string Operation::OPERATION_SHAPE = "rect"; const std::string Operation::BG_COLOR_SCHEME = "pastel18"; -Operation::Operation(const neurun::model::OperationIndex &index, - const neurun::model::Operation &node) +Operation::Operation(const ir::OperationIndex &index, const neurun::model::Operation &node) : Node{"operation" + std::to_string(index.value())} { setAttribute("label", std::to_string(index.value()) + " : " + node.name()); diff --git a/runtime/neurun/core/src/dumper/dot/OperationNode.h b/runtime/neurun/core/src/dumper/dot/OperationNode.h index b8e609b..db27ab1 100644 --- a/runtime/neurun/core/src/dumper/dot/OperationNode.h +++ b/runtime/neurun/core/src/dumper/dot/OperationNode.h @@ -26,7 +26,7 @@ #include "Node.h" #include "model/Operation.h" -#include "model/Index.h" +#include "ir/Index.h" namespace neurun { @@ -52,7 +52,7 @@ public: * @param[in] index operation index * @param[in] node operation object */ - Operation(const neurun::model::OperationIndex &index, const neurun::model::Operation &node); + Operation(const ir::OperationIndex &index, const neurun::model::Operation &node); }; } // namespace dot diff --git a/runtime/neurun/core/src/exec/DataflowExecutor.cc b/runtime/neurun/core/src/exec/DataflowExecutor.cc index 32e68b0..856ca85 100644 --- a/runtime/neurun/core/src/exec/DataflowExecutor.cc +++ b/runtime/neurun/core/src/exec/DataflowExecutor.cc @@ -37,7 +37,7 @@ int64_t DataflowExecutor::calculateRank(const std::vector &opera auto it = _indexed_ranks->find(element.index); if (it == _indexed_ranks->end()) { - assert(element.node->opcode() == model::OpCode::Permute); + assert(element.node->opcode() == ir::OpCode::Permute); // assign int32_t::max to prevent integer overflow rank += std::numeric_limits::max(); } @@ -87,8 +87,8 @@ DataflowExecutor::DataflowExecutor(const graph::Graph &graph, const model::Subgraphs *subgraphs = _graph.subgraphs(); // Assign jobs convert SubgraphIndex to job index(uint32_t) uint32_t next_job_index = 0; - std::unordered_map subgraph_to_job; - subgraphs->iterate([&](const model::SubgraphIndex &subg_index, const model::Subgraph &) { + std::unordered_map subgraph_to_job; + subgraphs->iterate([&](const ir::SubgraphIndex &subg_index, const model::Subgraph &) { VERBOSE(DataflowExecutor) << "Create a job #" << next_job_index << " with SubgraphIndex " << subg_index.value() << std::endl; _finished_jobs.emplace_back( @@ -100,13 +100,13 @@ DataflowExecutor::DataflowExecutor(const graph::Graph &graph, _output_info.resize(next_job_index); _initial_input_info.resize(next_job_index, 0); - subgraphs->iterate([&](const model::SubgraphIndex &subg_index, const model::Subgraph &subg) { + subgraphs->iterate([&](const ir::SubgraphIndex &subg_index, const model::Subgraph &subg) { auto job_index = subgraph_to_job[subg_index]; for (auto output : subg.getOutputs()) { // Update output and input info subgraphs->iterate( - [&](const model::SubgraphIndex &subg_cur_index, const model::Subgraph &subg_cur) { + [&](const ir::SubgraphIndex &subg_cur_index, const model::Subgraph &subg_cur) { if (subg_cur.getInputs().contains(output)) { auto dep_index = subgraph_to_job[subg_cur_index]; diff --git a/runtime/neurun/core/src/exec/DataflowExecutor.h b/runtime/neurun/core/src/exec/DataflowExecutor.h index 1af3c0d..7e5a5bd 100644 --- a/runtime/neurun/core/src/exec/DataflowExecutor.h +++ b/runtime/neurun/core/src/exec/DataflowExecutor.h @@ -23,8 +23,8 @@ #include "FunctionSequence.h" #include "Job.h" -#include "model/OperandIndexSequence.h" -#include "model/Index.h" +#include "ir/OperandIndexSequence.h" +#include "ir/Index.h" #include "cpp14/memory.h" #include "exec/ExecutorBase.h" @@ -36,7 +36,7 @@ namespace exec class DataflowExecutor : public ExecutorBase { public: - using CodeMap = std::unordered_map>; + using CodeMap = std::unordered_map>; protected: virtual void notify(uint32_t finished_job_id); @@ -88,7 +88,7 @@ protected: std::multimap, std::greater> _ready_jobs; /// @brief Which job runs which op and function. - std::unordered_map _job_to_subgraph; + std::unordered_map _job_to_subgraph; }; } // namespace exec diff --git a/runtime/neurun/core/src/exec/Execution.cc b/runtime/neurun/core/src/exec/Execution.cc index bbbbba2..bc7bbd1 100644 --- a/runtime/neurun/core/src/exec/Execution.cc +++ b/runtime/neurun/core/src/exec/Execution.cc @@ -30,7 +30,7 @@ Execution::Execution(const std::shared_ptr &executor) : _executor{exe } // TODO Remove default parameter -void Execution::setInput(const model::IOIndex &index, const void *buffer, size_t length, +void Execution::setInput(const ir::IOIndex &index, const void *buffer, size_t length, ir::Layout layout) { const auto input_index = graph().getInputs().at(index); @@ -46,11 +46,10 @@ void Execution::setInput(const model::IOIndex &index, const void *buffer, size_t } // TODO Remove default parameter -void Execution::setInput(const model::IOIndex &index, const ir::TypeInfo &type, - const ir::Shape &shape, const void *buffer, size_t length, - ir::Layout layout) +void Execution::setInput(const ir::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape, + const void *buffer, size_t length, ir::Layout layout) { - const model::OperandInfo info{shape, type}; + const ir::OperandInfo info{shape, type}; if (length < info.total_size()) { @@ -62,8 +61,7 @@ void Execution::setInput(const model::IOIndex &index, const ir::TypeInfo &type, } // TODO Remove default parameter -void Execution::setOutput(const model::IOIndex &index, void *buffer, size_t length, - ir::Layout layout) +void Execution::setOutput(const ir::IOIndex &index, void *buffer, size_t length, ir::Layout layout) { const auto output_index = graph().getOutputs().at(index); const auto info = graph().operands().at(output_index).info(); @@ -78,10 +76,10 @@ void Execution::setOutput(const model::IOIndex &index, void *buffer, size_t leng } // TODO Remove default parameter -void Execution::setOutput(const model::IOIndex &index, const ir::TypeInfo &type, +void Execution::setOutput(const ir::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape, void *buffer, size_t length, ir::Layout layout) { - const model::OperandInfo info{shape, type}; + const ir::OperandInfo info{shape, type}; if (length < info.total_size()) { @@ -92,14 +90,14 @@ void Execution::setOutput(const model::IOIndex &index, const ir::TypeInfo &type, nnfw::cpp14::make_unique(info, buffer, length, layout); } -void Execution::setInputLayout(const model::IOIndex &index, ir::Layout layout) +void Execution::setInputLayout(const ir::IOIndex &index, ir::Layout layout) { const auto &input_desc = _io_desc.inputs.at(index.value()); _io_desc.inputs.at(index.value()) = nnfw::cpp14::make_unique( input_desc->info, input_desc->buffer, input_desc->size, layout); } -void Execution::setOutputLayout(const model::IOIndex &index, ir::Layout layout) +void Execution::setOutputLayout(const ir::IOIndex &index, ir::Layout layout) { const auto &output_desc = _io_desc.outputs.at(index.value()); _io_desc.outputs.at(index.value()) = nnfw::cpp14::make_unique( diff --git a/runtime/neurun/core/src/exec/ExecutorBase.cc b/runtime/neurun/core/src/exec/ExecutorBase.cc index 2a2bf5e..afd5833 100644 --- a/runtime/neurun/core/src/exec/ExecutorBase.cc +++ b/runtime/neurun/core/src/exec/ExecutorBase.cc @@ -30,7 +30,7 @@ ExecutorBase::ExecutorBase(const graph::Graph &graph, // DO NOTHING } -std::unique_ptr ExecutorBase::source(const model::IOIndex &index, const ir::TypeInfo &type, +std::unique_ptr ExecutorBase::source(const ir::IOIndex &index, const ir::TypeInfo &type, const void *buffer, size_t length, ir::Layout io_layout) { @@ -51,7 +51,7 @@ std::unique_ptr ExecutorBase::source(const model::IOIndex &index, const } } -std::unique_ptr ExecutorBase::sink(const model::IOIndex &index, const ir::TypeInfo &type, +std::unique_ptr ExecutorBase::sink(const ir::IOIndex &index, const ir::TypeInfo &type, void *buffer, size_t length, ir::Layout io_layout) { using ir::DataType; @@ -84,8 +84,8 @@ void ExecutorBase::execute(const IODescription &desc) // Set input(s) for (uint32_t n = 0; n < _graph.getInputs().size(); ++n) { - model::IOIndex input_index{n}; - model::OperandIndex index{_graph.getInputs().at(input_index)}; + ir::IOIndex input_index{n}; + ir::OperandIndex index{_graph.getInputs().at(input_index)}; if (desc.inputs.at(n) == nullptr) { @@ -116,7 +116,7 @@ void ExecutorBase::execute(const IODescription &desc) // Get output(s) for (uint32_t n = 0; n < _graph.getOutputs().size(); ++n) { - neurun::model::IOIndex output_index{n}; + ir::IOIndex output_index{n}; // Optional output if (desc.outputs.at(n) == nullptr) { @@ -128,7 +128,7 @@ void ExecutorBase::execute(const IODescription &desc) auto getter = [&](::neurun::backend::operand::ITensor &tensor) { sinks.at(n)->pull(tensor); }; - ::neurun::model::OperandIndex index{_graph.getOutputs().at(output_index)}; + ir::OperandIndex index{_graph.getOutputs().at(output_index)}; auto object = _operand_context->at(index); object->access(getter); diff --git a/runtime/neurun/core/src/exec/ExecutorBase.h b/runtime/neurun/core/src/exec/ExecutorBase.h index 618d14b..40b68be 100644 --- a/runtime/neurun/core/src/exec/ExecutorBase.h +++ b/runtime/neurun/core/src/exec/ExecutorBase.h @@ -53,7 +53,7 @@ public: void execute(const IODescription &desc) final; // Used only in Dataflow and Parallel Executors - void setIndexedRanks(std::shared_ptr> ranks) final + void setIndexedRanks(std::shared_ptr> ranks) final { _indexed_ranks = std::move(ranks); }; @@ -63,13 +63,13 @@ public: void addObserver(std::unique_ptr ref) { _subject.add(std::move(ref)); }; private: - std::unique_ptr source(const model::IOIndex &index, const ir::TypeInfo &type, + std::unique_ptr source(const ir::IOIndex &index, const ir::TypeInfo &type, const void *buffer, size_t length, ir::Layout io_layout); - std::unique_ptr sink(const model::IOIndex &index, const ir::TypeInfo &type, void *buffer, + std::unique_ptr sink(const ir::IOIndex &index, const ir::TypeInfo &type, void *buffer, size_t length, ir::Layout io_layout); template - std::unique_ptr source(const model::IOIndex &index, const void *buffer, size_t length, + std::unique_ptr source(const ir::IOIndex &index, const void *buffer, size_t length, ir::Layout io_layout) { const auto operand_index = _graph.getInputs().at(index); @@ -92,7 +92,7 @@ private: } template - std::unique_ptr sink(const model::IOIndex &index, void *buffer, size_t length, + std::unique_ptr sink(const ir::IOIndex &index, void *buffer, size_t length, ir::Layout io_layout) { const auto operand_index = _graph.getOutputs().at(index); @@ -114,7 +114,7 @@ private: protected: ExecutionObservee _subject; - std::shared_ptr> _indexed_ranks; + std::shared_ptr> _indexed_ranks; const graph::Graph &_graph; std::shared_ptr _operand_context; std::unique_ptr _tensor_mgrs; diff --git a/runtime/neurun/core/src/exec/Job.h b/runtime/neurun/core/src/exec/Job.h index cf3a185..1516b92 100644 --- a/runtime/neurun/core/src/exec/Job.h +++ b/runtime/neurun/core/src/exec/Job.h @@ -20,8 +20,8 @@ #include #include "exec/IFunction.h" -#include "model/Index.h" -#include "model/OperandIndexSequence.h" +#include "ir/Index.h" +#include "ir/OperandIndexSequence.h" #include "backend/Backend.h" namespace neurun diff --git a/runtime/neurun/core/src/exec/ParallelExecutor.h b/runtime/neurun/core/src/exec/ParallelExecutor.h index 2f81ef3..4db5946 100644 --- a/runtime/neurun/core/src/exec/ParallelExecutor.h +++ b/runtime/neurun/core/src/exec/ParallelExecutor.h @@ -23,8 +23,8 @@ #include "FunctionSequence.h" #include "Job.h" -#include "model/OperandIndexSequence.h" -#include "model/Index.h" +#include "ir/OperandIndexSequence.h" +#include "ir/Index.h" #include "cpp14/memory.h" #include "exec/DataflowExecutor.h" #include "ParallelScheduler.h" diff --git a/runtime/neurun/core/src/exec/interp/Buffer.h b/runtime/neurun/core/src/exec/interp/Buffer.h index 3528e08..d60b59a 100644 --- a/runtime/neurun/core/src/exec/interp/Buffer.h +++ b/runtime/neurun/core/src/exec/interp/Buffer.h @@ -23,7 +23,7 @@ #include -#include "model/Data.h" +#include "ir/Data.h" namespace neurun { @@ -35,7 +35,7 @@ namespace interp /** * @brief Interface for writable data area */ -class Buffer : public model::Data +class Buffer : public ir::Data { public: /** diff --git a/runtime/neurun/core/src/exec/interp/ExecEnv.h b/runtime/neurun/core/src/exec/interp/ExecEnv.h index b3b6e65..a322fee 100644 --- a/runtime/neurun/core/src/exec/interp/ExecEnv.h +++ b/runtime/neurun/core/src/exec/interp/ExecEnv.h @@ -64,7 +64,7 @@ public: * @param[in] index Tensor index * @param[in] tensor Tensor */ - void assignTensor(const model::OperandIndex index, std::shared_ptr tensor) + void assignTensor(const ir::OperandIndex index, std::shared_ptr tensor) { assert(tensor->bufferRO() != nullptr); _tensors.emplace(index, tensor); @@ -75,17 +75,14 @@ public: * @param[in] index Tensor index * @return Tensor pointer */ - const ITensor *tensorAt(const model::OperandIndex index) const - { - return _tensors.at(index).get(); - } + const ITensor *tensorAt(const ir::OperandIndex index) const { return _tensors.at(index).get(); } /** * @brief Check environment contains tensor * @param[in] index Tensor index * @return @c true if environment contain tensor, otherwise @c false */ - bool contains(const model::OperandIndex index) const + bool contains(const ir::OperandIndex index) const { return (_tensors.find(index) != _tensors.end()); } @@ -97,7 +94,7 @@ public: * @note If already allocated, just return * @TODO More smart allocation policy */ - void allocateIfNeeded(const model::OperandIndex index, const model::OperandInfo &info) + void allocateIfNeeded(const ir::OperandIndex index, const ir::OperandInfo &info) { // already allocated, or constant if (contains(index)) @@ -117,8 +114,8 @@ public: * @param[in] info Operand info * @param[in] index_to_share Tensor index that have data to share */ - void allocateAndShareIfNeeded(const model::OperandIndex index, const model::OperandInfo &info, - const model::OperandIndex index_to_share) + void allocateAndShareIfNeeded(const ir::OperandIndex index, const ir::OperandInfo &info, + const ir::OperandIndex index_to_share) { if (!contains(index_to_share)) { @@ -144,7 +141,7 @@ public: * @param[in] index Tensor index * @note If allocated by outside, just return */ - void freeIfAllocated(const model::OperandIndex index) + void freeIfAllocated(const ir::OperandIndex index) { if (_buffers.find(index) != _buffers.end()) { @@ -156,9 +153,9 @@ private: const graph::Graph &_graph; // Tensor map to use in interpreter // It should map tensors that have allocated or assigned buffer pointer - std::unordered_map> _tensors; + std::unordered_map> _tensors; // Tensors allocated by allocateIfNeed (buffer) - std::unordered_set _buffers; + std::unordered_set _buffers; }; } // namespace interp diff --git a/runtime/neurun/core/src/exec/interp/ExecManager.cc b/runtime/neurun/core/src/exec/interp/ExecManager.cc index f5fe7db..92f182c 100644 --- a/runtime/neurun/core/src/exec/interp/ExecManager.cc +++ b/runtime/neurun/core/src/exec/interp/ExecManager.cc @@ -36,23 +36,23 @@ void ExecManager::execute(const IODescription &desc) It may execute divided model but now consider model inference is done at interpreter ***********************************************************************/ - model::OperandIndexMap> tensor_map; + ir::OperandIndexMap> tensor_map; for (uint32_t n = 0; n < _graph.getInputs().size(); n++) { - neurun::model::IOIndex index{n}; + ir::IOIndex index{n}; const auto input_index = _graph.getInputs().at(index); const auto &input = *desc.inputs.at(n); auto input_tensor = std::make_shared(input.info); - input_tensor->setData(std::make_shared( + input_tensor->setData(std::make_shared( reinterpret_cast(input.buffer), input.size)); tensor_map[input_index] = input_tensor; } for (uint32_t n = 0; n < _graph.getOutputs().size(); n++) { - neurun::model::IOIndex index{n}; + ir::IOIndex index{n}; const auto output_index = _graph.getOutputs().at(index); const auto &output = *desc.outputs.at(n); @@ -90,7 +90,7 @@ void ExecManager::execute(const IODescription &desc) } // Allocate constant tensor - _graph.operands().iterate([&](const model::OperandIndex &ind, const model::Operand &obj) { + _graph.operands().iterate([&](const ir::OperandIndex &ind, const ir::Operand &obj) { if (obj.isConstant()) { VERBOSE(INTERPRETER) << "Allocate and assign constant tensor. operand index:" << ind.value() @@ -99,7 +99,7 @@ void ExecManager::execute(const IODescription &desc) auto const_tensor = std::make_shared(obj.info()); // Assume that interpreter's tensor layout is same with model (NHWC) const_tensor->setData( - std::make_shared(obj.data().base(), obj.info().total_size())); + std::make_shared(obj.data().base(), obj.info().total_size())); interp_env->assignTensor(ind, const_tensor); } }); diff --git a/runtime/neurun/core/src/exec/interp/ExecManager.h b/runtime/neurun/core/src/exec/interp/ExecManager.h index 098c110..420bcb9 100644 --- a/runtime/neurun/core/src/exec/interp/ExecManager.h +++ b/runtime/neurun/core/src/exec/interp/ExecManager.h @@ -50,7 +50,7 @@ public: * @return Graph object */ const graph::Graph &graph() final { return _graph; } - void setIndexedRanks(std::shared_ptr>) override{ + void setIndexedRanks(std::shared_ptr>) override{ // Not implemented }; /** @@ -61,7 +61,7 @@ public: private: const graph::Graph &_graph; - model::OperandIndexMap> _tensor_map; + ir::OperandIndexMap> _tensor_map; }; } // namespace interp diff --git a/runtime/neurun/core/src/exec/interp/Interpreter.cc b/runtime/neurun/core/src/exec/interp/Interpreter.cc index 30bc71d..e28f1bd 100644 --- a/runtime/neurun/core/src/exec/interp/Interpreter.cc +++ b/runtime/neurun/core/src/exec/interp/Interpreter.cc @@ -21,7 +21,7 @@ #include "Registration.h" -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" #include "util/logging.h" #include "model/OperationVisitor.h" @@ -41,18 +41,18 @@ class OperationExecutor : model::OperationVisitor public: OperationExecutor(ExecEnv *env) : _env{env} { - _kernels[model::OpCode::Add] = getAdd(); - _kernels[model::OpCode::Conv2D] = getConv2D(); - _kernels[model::OpCode::MaxPool2D] = getMaxPool2D(); - _kernels[model::OpCode::Concat] = getConcat(); - _kernels[model::OpCode::AvgPool2D] = getAvgPool2D(); - _kernels[model::OpCode::FullyConnected] = getFullyConnected(); - _kernels[model::OpCode::Softmax] = getSoftMax(); - _kernels[model::OpCode::Reshape] = getReshape(); - _kernels[model::OpCode::DepthwiseConv2D] = getDepthwiseConv(); + _kernels[ir::OpCode::Add] = getAdd(); + _kernels[ir::OpCode::Conv2D] = getConv2D(); + _kernels[ir::OpCode::MaxPool2D] = getMaxPool2D(); + _kernels[ir::OpCode::Concat] = getConcat(); + _kernels[ir::OpCode::AvgPool2D] = getAvgPool2D(); + _kernels[ir::OpCode::FullyConnected] = getFullyConnected(); + _kernels[ir::OpCode::Softmax] = getSoftMax(); + _kernels[ir::OpCode::Reshape] = getReshape(); + _kernels[ir::OpCode::DepthwiseConv2D] = getDepthwiseConv(); } - void execute(const model::OperationIndex &idx) + void execute(const ir::OperationIndex &idx) { const auto nodeName = _env->graph().operations().at(idx).name(); VERBOSE(INTERPRETER) << "Prepare output operands and execute " << nodeName @@ -64,18 +64,18 @@ private: #define OP(InternalName) \ virtual void visit(const model::operation::InternalName &node) override \ { \ - if (_kernels[model::OpCode::InternalName]->prepare != nullptr) \ + if (_kernels[ir::OpCode::InternalName]->prepare != nullptr) \ { \ - _kernels[model::OpCode::InternalName]->prepare(_env, node); \ + _kernels[ir::OpCode::InternalName]->prepare(_env, node); \ } \ - _kernels[model::OpCode::InternalName]->invoke(_env, node); \ + _kernels[ir::OpCode::InternalName]->invoke(_env, node); \ } -#include "model/Operations.lst" +#include "ir/Operations.lst" #undef OP private: ExecEnv *_env; - std::unordered_map _kernels; + std::unordered_map _kernels; }; void Interpreter::run() @@ -83,7 +83,7 @@ void Interpreter::run() VERBOSE(INTERPRETER) << "Interpreter is invoked " << std::endl; // operand_stack: save operands prepared to use - std::stack operand_stack; + std::stack operand_stack; // Note: We should push input first, then constant. // We use use-def for find operators ready to execution, @@ -97,7 +97,7 @@ void Interpreter::run() operand_stack.push(ind); } - _env->graph().operands().iterate([&](const model::OperandIndex &ind, const model::Operand &obj) { + _env->graph().operands().iterate([&](const ir::OperandIndex &ind, const ir::Operand &obj) { if (obj.isConstant()) { VERBOSE(INTERPRETER) << "Constant: Push to operand stack " << ind.value() << std::endl; @@ -107,8 +107,8 @@ void Interpreter::run() }); // Execution - std::unordered_set ready_check; - std::unordered_set executed; + std::unordered_set ready_check; + std::unordered_set executed; OperationExecutor executor{_env.get()}; while (!operand_stack.empty()) { @@ -121,7 +121,7 @@ void Interpreter::run() ready_check.insert(current_operand_index); // Find prepared operations by scan use of current operand - std::stack operation_stack; + std::stack operation_stack; const auto use_operators = _env->graph().operands().at(current_operand_index).getUses(); for (auto use_operator : use_operators.list()) { diff --git a/runtime/neurun/core/src/exec/interp/Tensor.h b/runtime/neurun/core/src/exec/interp/Tensor.h index 947ae5e..c53fd46 100644 --- a/runtime/neurun/core/src/exec/interp/Tensor.h +++ b/runtime/neurun/core/src/exec/interp/Tensor.h @@ -23,7 +23,7 @@ #include "Buffer.h" -#include "model/OperandInfo.h" +#include "ir/OperandInfo.h" #include "backend/operand/ITensor.h" #include "ir/Layout.h" @@ -58,7 +58,7 @@ public: * @brief Return shared pointer for data * @return Data shared pointer */ - virtual std::shared_ptr shareData() const = 0; + virtual std::shared_ptr shareData() const = 0; /** * @brief Set internal/external buffer * @param[in] buffer Buffer pointer @@ -68,7 +68,7 @@ public: * @brief Set data reference (including constant, input) * @param[in] data Data pointer */ - virtual void setData(std::shared_ptr data) = 0; + virtual void setData(std::shared_ptr data) = 0; virtual void releaseData() = 0; virtual size_t total_size() const = 0; @@ -86,7 +86,7 @@ public: * @brief Return TensorInfo * @return TensorInfo */ - virtual const model::OperandInfo &tensorInfo() const = 0; + virtual const ir::OperandInfo &tensorInfo() const = 0; /** * @brief Return number of elements * @return Number of elements @@ -102,7 +102,7 @@ class ROTensor final : public ITensor { public: ROTensor() = delete; - ROTensor(const model::OperandInfo &info) : _info(info) + ROTensor(const ir::OperandInfo &info) : _info(info) { // DO NOTHING } @@ -114,9 +114,9 @@ public: throw std::runtime_error{"Read only tensor"}; } const uint8_t *bufferRO() const override { return _data->base(); } - std::shared_ptr shareData() const override { return _data; } + std::shared_ptr shareData() const override { return _data; } void setBuffer(std::shared_ptr buffer) override { _data = buffer; } - void setData(std::shared_ptr data) override { _data = data; } + void setData(std::shared_ptr data) override { _data = data; } void releaseData() override { _data = nullptr; } size_t total_size() const override { return _info.total_size(); } @@ -126,12 +126,12 @@ public: ir::Layout layout() const override; bool has_padding() const override { return false; } ir::DataType data_type() const override { return _info.typeInfo().type(); } - const model::OperandInfo &tensorInfo() const override { return _info; } + const ir::OperandInfo &tensorInfo() const override { return _info; } uint64_t num_elements() const override { return _info.shape().num_elements(); }; private: - const model::OperandInfo _info; - std::shared_ptr _data{nullptr}; + const ir::OperandInfo _info; + std::shared_ptr _data{nullptr}; }; /** @@ -141,7 +141,7 @@ class Tensor final : public ITensor { public: Tensor() = delete; - Tensor(const model::OperandInfo &info) : _info(info) + Tensor(const ir::OperandInfo &info) : _info(info) { // DO NOTHING } @@ -150,9 +150,9 @@ public: uint8_t *buffer() const override { return _buffer->baseWritable(); } std::shared_ptr shareBuffer() const override { return _buffer; }; const uint8_t *bufferRO() const override { return _buffer->base(); } - std::shared_ptr shareData() const override { return _buffer; } + std::shared_ptr shareData() const override { return _buffer; } void setBuffer(std::shared_ptr buffer) override { _buffer = buffer; } - void setData(std::shared_ptr) override + void setData(std::shared_ptr) override { throw std::runtime_error{"Passed data may read-only"}; } @@ -165,11 +165,11 @@ public: ir::Layout layout() const override; bool has_padding() const override { return false; } ir::DataType data_type() const override { return _info.typeInfo().type(); } - const model::OperandInfo &tensorInfo() const override { return _info; } + const ir::OperandInfo &tensorInfo() const override { return _info; } uint64_t num_elements() const override { return _info.shape().num_elements(); }; private: - const model::OperandInfo _info; + const ir::OperandInfo _info; std::shared_ptr _buffer{nullptr}; }; diff --git a/runtime/neurun/core/src/exec/interp/operations/Concat.cc b/runtime/neurun/core/src/exec/interp/operations/Concat.cc index bcd90c5..efaf7c0 100644 --- a/runtime/neurun/core/src/exec/interp/operations/Concat.cc +++ b/runtime/neurun/core/src/exec/interp/operations/Concat.cc @@ -65,7 +65,7 @@ void prepareConcat(ExecEnv *env, const model::Operation &node) auto out_shape = first_tensor->tensorInfo().shape(); out_shape.dim(axis) = out_axis_dimension; env->allocateIfNeeded(out_index, - model::OperandInfo{out_shape, first_tensor->tensorInfo().typeInfo()}); + ir::OperandInfo{out_shape, first_tensor->tensorInfo().typeInfo()}); auto out_tensor = env->tensorAt(out_index); UNUSED_RELEASE(out_tensor); diff --git a/runtime/neurun/core/src/exec/interp/operations/FullyConnected.cc b/runtime/neurun/core/src/exec/interp/operations/FullyConnected.cc index f12f2fe..1e7e143 100644 --- a/runtime/neurun/core/src/exec/interp/operations/FullyConnected.cc +++ b/runtime/neurun/core/src/exec/interp/operations/FullyConnected.cc @@ -61,7 +61,7 @@ void prepareFC(ExecEnv *env, const model::Operation &node) ir::Shape output_shape(2); output_shape.dim(0) = batch_size; output_shape.dim(1) = num_units; - const model::OperandInfo out_info{output_shape, in_tensor->tensorInfo().typeInfo()}; + const ir::OperandInfo out_info{output_shape, in_tensor->tensorInfo().typeInfo()}; env->allocateIfNeeded(out_index, out_info); auto out_tensor = env->tensorAt(out_index); diff --git a/runtime/neurun/core/src/exec/interp/operations/SoftMax.cc b/runtime/neurun/core/src/exec/interp/operations/SoftMax.cc index 5b1be21..666263d 100644 --- a/runtime/neurun/core/src/exec/interp/operations/SoftMax.cc +++ b/runtime/neurun/core/src/exec/interp/operations/SoftMax.cc @@ -83,7 +83,7 @@ void prepareSoftMax(ExecEnv *env, const model::Operation &node) const auto output_shape = env->graph().operands().at(in_index).info().shape(); const auto output_type = env->graph().operands().at(out_index).info().typeInfo(); - const model::OperandInfo output_info{output_shape, output_type}; + const ir::OperandInfo output_info{output_shape, output_type}; env->allocateIfNeeded(out_index, output_info); auto out_tensor = env->tensorAt(out_index); diff --git a/runtime/neurun/core/src/ir/Graph.cc b/runtime/neurun/core/src/ir/Graph.cc index cd2e291..ad05c66 100644 --- a/runtime/neurun/core/src/ir/Graph.cc +++ b/runtime/neurun/core/src/ir/Graph.cc @@ -43,31 +43,31 @@ Graph::Graph() = default; Graph::~Graph(void) = default; -model::OperandIndex Graph::addOperand(const ir::Shape &shape, const ir::TypeInfo &type) +ir::OperandIndex Graph::addOperand(const ir::Shape &shape, const ir::TypeInfo &type) { return _operands.emplace(shape, type); } -model::OperationIndex Graph::addOperation(std::unique_ptr &&node) +ir::OperationIndex Graph::addOperation(std::unique_ptr &&node) { assert(isBuildingPhase()); return _operations.push(std::move(node)); } -void Graph::setOperandValue(const model::OperandIndex &ind, std::unique_ptr &&data) +void Graph::setOperandValue(const ir::OperandIndex &ind, std::unique_ptr &&data) { assert(isBuildingPhase()); assert(_operands.exist(ind)); _operands.at(ind).data(std::move(data)); } -void Graph::addInput(const model::OperandIndex &ind) +void Graph::addInput(const ir::OperandIndex &ind) { assert(isBuildingPhase()); _inputs.append(ind); } -void Graph::addOutput(const model::OperandIndex &ind) +void Graph::addOutput(const ir::OperandIndex &ind) { assert(isBuildingPhase()); _outputs.append(ind); @@ -97,9 +97,9 @@ void Graph::lower(void) // Lower { // operand::LowerInfo holder - model::OperandIndexMap> operands_lower_info; + ir::OperandIndexMap> operands_lower_info; - _operands.iterate([&](const model::OperandIndex &index, const model::Operand &object) { + _operands.iterate([&](const ir::OperandIndex &index, const ir::Operand &object) { operands_lower_info[index] = nnfw::cpp14::make_unique(graph::operand::asShape4D(object.shape())); }); @@ -109,7 +109,7 @@ void Graph::lower(void) // Make subgraphs while checking whether a node can be merged into a subgraph. makeSubgraphs(operands_lower_info); - _subgraphs->iterate([&](const model::SubgraphIndex &, model::Subgraph &subg) { + _subgraphs->iterate([&](const ir::SubgraphIndex &, model::Subgraph &subg) { assert(subg.operations().size() > 0); std::reverse(std::begin(subg.operations()), std::end(subg.operations())); }); @@ -149,23 +149,22 @@ void Graph::lower(void) void Graph::initializeUseDef() { - operations().iterate( - [&](const model::OperationIndex &index, const model::Operation &node) -> void { - auto outputs = node.getOutputs(); - for (auto output : outputs) - { - operands().at(output).appendDef(index); - } + operations().iterate([&](const ir::OperationIndex &index, const model::Operation &node) -> void { + auto outputs = node.getOutputs(); + for (auto output : outputs) + { + operands().at(output).appendDef(index); + } - auto inputs = node.getInputs(); - for (auto input : inputs) - { - operands().at(input).appendUse(index); - } - }); + auto inputs = node.getInputs(); + for (auto input : inputs) + { + operands().at(input).appendUse(index); + } + }); } -const operation::LowerInfo *Graph::getLowerInfo(const model::SubgraphIndex &subg_index) const +const operation::LowerInfo *Graph::getLowerInfo(const ir::SubgraphIndex &subg_index) const { if (!_lower_info_map) return nullptr; @@ -175,14 +174,14 @@ const operation::LowerInfo *Graph::getLowerInfo(const model::SubgraphIndex &subg return itr->second.get(); } -void Graph::setLowerInfo(const model::SubgraphIndex &subg_index, +void Graph::setLowerInfo(const ir::SubgraphIndex &subg_index, std::unique_ptr &&lower_info) { assert(_lower_info_map); _lower_info_map->operation.insert(std::make_pair(subg_index, std::move(lower_info))); } -void Graph::removeLowerInfo(const model::SubgraphIndex &subg_index) +void Graph::removeLowerInfo(const ir::SubgraphIndex &subg_index) { auto &subg_lower_info = _lower_info_map->operation; assert(subg_lower_info.find(subg_index) != subg_lower_info.end()); @@ -196,7 +195,7 @@ void Graph::removeLowerInfo(const model::SubgraphIndex &subg_index) } } -const operand::LowerInfo *Graph::getLowerInfo(const model::OperandIndex &index) const +const operand::LowerInfo *Graph::getLowerInfo(const ir::OperandIndex &index) const { if (!_lower_info_map) return nullptr; @@ -206,7 +205,7 @@ const operand::LowerInfo *Graph::getLowerInfo(const model::OperandIndex &index) return itr->second.get(); } -operand::LowerInfo *Graph::getLowerInfo(const model::OperandIndex &index) +operand::LowerInfo *Graph::getLowerInfo(const ir::OperandIndex &index) { if (!_lower_info_map) return nullptr; @@ -216,20 +215,20 @@ operand::LowerInfo *Graph::getLowerInfo(const model::OperandIndex &index) return itr->second.get(); } -void Graph::setLowerInfo(const model::OperandIndex &index, +void Graph::setLowerInfo(const ir::OperandIndex &index, std::unique_ptr &&lower_info) { assert(_lower_info_map); _lower_info_map->operand.insert(std::make_pair(index, std::move(lower_info))); } -void Graph::removeLowerInfo(const model::OperandIndex &index) +void Graph::removeLowerInfo(const ir::OperandIndex &index) { _lower_info_map->operand.erase(index); } void Graph::makeSubgraphs( - model::OperandIndexMap> &operands_lower_info) + ir::OperandIndexMap> &operands_lower_info) { // if SUBG_MAX_NODE == 0, no limit on nodes of a subgraph const int subg_max_node = util::getConfigInt(util::config::SUBG_MAX_NODE); @@ -237,11 +236,11 @@ void Graph::makeSubgraphs( bool is_profiling = util::getConfigBool(util::config::PROFILING_MODE); model::Subgraph *subg = nullptr; - model::SubgraphIndex subg_index; + ir::SubgraphIndex subg_index; // NOTE: The below method appends nodes while making one subgraph if needed. If something better // ways, happy to update this code. - Graph::PostDfsConstIterator().iterate(*this, [&](const model::OperationIndex &node_index, + Graph::PostDfsConstIterator().iterate(*this, [&](const ir::OperationIndex &node_index, const model::Operation &node) { // LowerInfo for in/output operands auto backend = _backend_resolver->getBackend(node_index); @@ -293,7 +292,7 @@ void Graph::makeSubgraphs( lower_info->addDefPermuteFactor(operand::PermuteFactor{backend, backend_layout}); } - if (node.opcode() == model::OpCode::Split) + if (node.opcode() == ir::OpCode::Split) { // Ideally this condition must be like 'node.getOutputs().size() > 1' but // this is true for HashtableLookup also. TODO: Come up with more clever solution @@ -380,7 +379,7 @@ void Graph::makeSubgraphs( } void Graph::manipulateLowerInfo( - model::OperandIndexMap> &operands_lower_info) + ir::OperandIndexMap> &operands_lower_info) { const auto default_backend = backend::BackendManager::get().getDefault(); for (auto index : _inputs) @@ -404,7 +403,7 @@ void Graph::manipulateLowerInfo( } // Set LowerInfo for each operand from the operand::LowerInfo holder - _operands.iterate([&](const model::OperandIndex &index, model::Operand &) { + _operands.iterate([&](const ir::OperandIndex &index, ir::Operand &) { setLowerInfo(index, std::move(operands_lower_info[index])); }); } @@ -416,7 +415,7 @@ void Graph::dumpLowerInfo() std::map dumps; - _operands.iterate([&](const model::OperandIndex &index, model::Operand &object) { + _operands.iterate([&](const ir::OperandIndex &index, ir::Operand &object) { std::stringstream sstream; if (!getLowerInfo(index)->def_factors().empty() || !getLowerInfo(index)->use_factors().empty()) { @@ -431,7 +430,7 @@ void Graph::dumpLowerInfo() return "{ " + str + "}"; }; - auto operation_index_to_string = [](const model::OperationIndexList &operations) { + auto operation_index_to_string = [](const ir::OperationIndexList &operations) { std::string str; for (auto op : operations.list()) { @@ -475,8 +474,8 @@ void Graph::dumpLowerInfo() } } -bool Graph::mergeable(const model::SubgraphIndex &subg_index, - const model::OperationIndex &node_index, ir::Layout layout) +bool Graph::mergeable(const ir::SubgraphIndex &subg_index, const ir::OperationIndex &node_index, + ir::Layout layout) { // Are they mergeable? // 1. the same backend id and layout? @@ -547,9 +546,9 @@ bool Graph::mergeable(const model::SubgraphIndex &subg_index, return false; } -model::SubgraphIndex Graph::appendFreshSingleOpSubgraph(const model::OperationIndex &node_index, - const model::Operation &node, - ir::Layout layout) +ir::SubgraphIndex Graph::appendFreshSingleOpSubgraph(const ir::OperationIndex &node_index, + const model::Operation &node, + ir::Layout layout) { // Create a fresh subgraph with one operation, and append it to subgraphs // Create a fresh subgraph @@ -589,7 +588,7 @@ template void Graph::DefaultIterator::iterate(GraphRef graph, const IterFn &fn) const { graph.operations().iterate( - [&](const model::OperationIndex &index, NodeRef node) -> void { fn(index, node); }); + [&](const ir::OperationIndex &index, NodeRef node) -> void { fn(index, node); }); } // @@ -601,12 +600,12 @@ void Graph::PostDfsIterator::iterate(GraphRef graph, const IterFn &fn) { assert(!graph.isBuildingPhase()); // Restrict iteration condition - model::OperationIndexMap visited; + ir::OperationIndexMap visited; graph.operations().iterate( - [&](const model::OperationIndex &index, NodeRef) { visited[index] = false; }); + [&](const ir::OperationIndex &index, NodeRef) { visited[index] = false; }); - std::function dfs_recursive = - [&](const model::OperationIndex &index, NodeRef node) -> void { + std::function dfs_recursive = + [&](const ir::OperationIndex &index, NodeRef node) -> void { if (visited[index]) return; visited[index] = true; @@ -626,9 +625,8 @@ void Graph::PostDfsIterator::iterate(GraphRef graph, const IterFn &fn) graph.operations().iterate(dfs_recursive); // All of the operations(nodes) must have been visited. - assert( - std::all_of(visited.begin(), visited.end(), - [](const std::pair &v) { return v.second; })); + assert(std::all_of(visited.begin(), visited.end(), + [](const std::pair &v) { return v.second; })); } void Graph::setBackendResolver(std::unique_ptr &&br) diff --git a/runtime/neurun/core/src/model/OpCode.cc b/runtime/neurun/core/src/ir/OpCode.cc similarity index 92% rename from runtime/neurun/core/src/model/OpCode.cc rename to runtime/neurun/core/src/ir/OpCode.cc index 48b80b9..dc50e0d 100644 --- a/runtime/neurun/core/src/model/OpCode.cc +++ b/runtime/neurun/core/src/ir/OpCode.cc @@ -14,25 +14,25 @@ * limitations under the License. */ -#include "model/OpCode.h" +#include "ir/OpCode.h" #include namespace neurun { -namespace model +namespace ir { const char *toString(OpCode opcode) { static const std::unordered_map map{{OpCode::Invalid, "Invalid"}, #define OP(Name) {OpCode::Name, #Name}, -#include "model/Operations.lst" +#include "ir/Operations.lst" #undef OP {OpCode::Subgraph, "Subgraph"}, {OpCode::COUNT, "COUNT"}}; return map.at(opcode); } -} // namespace model +} // namespace ir } // namespace neurun diff --git a/runtime/neurun/core/src/model/Operand.cc b/runtime/neurun/core/src/ir/Operand.cc similarity index 79% rename from runtime/neurun/core/src/model/Operand.cc rename to runtime/neurun/core/src/ir/Operand.cc index d5970cd..f16ff04 100644 --- a/runtime/neurun/core/src/model/Operand.cc +++ b/runtime/neurun/core/src/ir/Operand.cc @@ -14,11 +14,11 @@ * limitations under the License. */ -#include "model/Operand.h" +#include "ir/Operand.h" namespace neurun { -namespace model +namespace ir { size_t Operand::operandSize(void) const @@ -38,11 +38,11 @@ size_t Operand::operandSize(void) const return element_size * elements; } -void Operand::appendUse(const ::neurun::model::OperationIndex &idx) { _uses.append(idx); } +void Operand::appendUse(const OperationIndex &idx) { _uses.append(idx); } -void Operand::removeUse(const ::neurun::model::OperationIndex &idx) { _uses.remove(idx); } +void Operand::removeUse(const OperationIndex &idx) { _uses.remove(idx); } -void Operand::appendDef(const ::neurun::model::OperationIndex &idx) +void Operand::appendDef(const OperationIndex &idx) { assert(!isConstant()); assert(_def.size() == 0); @@ -50,7 +50,7 @@ void Operand::appendDef(const ::neurun::model::OperationIndex &idx) _def.append(idx); } -void Operand::removeDef(const ::neurun::model::OperationIndex &idx) +void Operand::removeDef(const OperationIndex &idx) { assert(_def.contains(idx)); @@ -66,5 +66,5 @@ const graph::operand::ParentInfo *Operand::parent_info() const { return _parent_ graph::operand::ParentInfo *Operand::parent_info() { return _parent_info.get(); } -} // namespace model +} // namespace ir } // namespace neurun diff --git a/runtime/neurun/core/src/model/OperandIndexSequence.cc b/runtime/neurun/core/src/ir/OperandIndexSequence.cc similarity index 94% rename from runtime/neurun/core/src/model/OperandIndexSequence.cc rename to runtime/neurun/core/src/ir/OperandIndexSequence.cc index a9454df..3024441 100644 --- a/runtime/neurun/core/src/model/OperandIndexSequence.cc +++ b/runtime/neurun/core/src/ir/OperandIndexSequence.cc @@ -14,13 +14,13 @@ * limitations under the License. */ -#include "model/OperandIndexSequence.h" +#include "ir/OperandIndexSequence.h" #include namespace neurun { -namespace model +namespace ir { OperandIndexSequence::OperandIndexSequence(std::initializer_list list) : _set(list) @@ -54,5 +54,5 @@ void OperandIndexSequence::replace(const OperandIndex &from, const OperandIndex std::replace(_set.begin(), _set.end(), from, to); } -} // namespace model +} // namespace ir } // namespace neurun diff --git a/runtime/neurun/core/src/model/OperationIndexList.cc b/runtime/neurun/core/src/ir/OperationIndexList.cc similarity index 84% rename from runtime/neurun/core/src/model/OperationIndexList.cc rename to runtime/neurun/core/src/ir/OperationIndexList.cc index e2c077e..261cc5c 100644 --- a/runtime/neurun/core/src/model/OperationIndexList.cc +++ b/runtime/neurun/core/src/ir/OperationIndexList.cc @@ -14,13 +14,13 @@ * limitations under the License. */ -#include "model/OperationIndexList.h" +#include "ir/OperationIndexList.h" #include namespace neurun { -namespace model +namespace ir { OperationIndexList::OperationIndexList(std::initializer_list list) : _list(list) @@ -28,10 +28,10 @@ OperationIndexList::OperationIndexList(std::initializer_list lis // DO NOTHING } -bool OperationIndexList::contains(const ::neurun::model::OperationIndex &index) const +bool OperationIndexList::contains(const OperationIndex &index) const { return std::find(_list.begin(), _list.end(), index) != _list.end(); } -} // namespace model +} // namespace ir } // namespace neurun diff --git a/runtime/neurun/core/src/ir/pass/ConstantInsertionPass.cc b/runtime/neurun/core/src/ir/pass/ConstantInsertionPass.cc index 7243b81..b662131 100644 --- a/runtime/neurun/core/src/ir/pass/ConstantInsertionPass.cc +++ b/runtime/neurun/core/src/ir/pass/ConstantInsertionPass.cc @@ -28,8 +28,7 @@ namespace graph namespace pass { -void ConstantInsertionPass::callback(const model::OperationIndex &node_index, - model::Operation &node) +void ConstantInsertionPass::callback(const ir::OperationIndex &node_index, model::Operation &node) { const auto &subgraph_index = _graph.subgraphs().getOperation(node_index); const auto subg_lower_info = _graph.getLowerInfo(subgraph_index); @@ -48,8 +47,8 @@ void ConstantInsertionPass::callback(const model::OperationIndex &node_index, { auto new_object = object; // TODO Remove const_case - const_cast &>(new_object.getDef().list()).clear(); - const_cast &>(new_object.getUses().list()).clear(); + const_cast &>(new_object.getDef().list()).clear(); + const_cast &>(new_object.getUses().list()).clear(); const auto new_index = _graph.operands().emplace(new_object); _replace_operands_map[key] = new_index; diff --git a/runtime/neurun/core/src/ir/pass/ConstantInsertionPass.h b/runtime/neurun/core/src/ir/pass/ConstantInsertionPass.h index c7a5bd9..245a047 100644 --- a/runtime/neurun/core/src/ir/pass/ConstantInsertionPass.h +++ b/runtime/neurun/core/src/ir/pass/ConstantInsertionPass.h @@ -18,7 +18,7 @@ #define __NEURUN_GRAPH_PASS_CONSTANT_INSERTION_PASS_H__ #include -#include +#include #include "OperationPass.h" #include #include @@ -39,12 +39,12 @@ public: std::string id() final { return "ConstantInsertionPass"; } public: - void callback(const model::OperationIndex &index, model::Operation &node) final; + void callback(const ir::OperationIndex &index, model::Operation &node) final; private: struct ReplaceKey { - model::OperandIndex index; + ir::OperandIndex index; graph::operand::PermuteFactor factor; bool operator==(const ReplaceKey &other) const @@ -61,12 +61,12 @@ private: std::size_t operator()(const ReplaceKey &key) const noexcept { using std::hash; - return hash()(key.index) ^ + return hash()(key.index) ^ (hash()(key.factor) << 1); } }; - std::unordered_map _replace_operands_map; + std::unordered_map _replace_operands_map; }; } // namespace pass diff --git a/runtime/neurun/core/src/ir/pass/OperandPass.cc b/runtime/neurun/core/src/ir/pass/OperandPass.cc index cec131e..132ffcf 100644 --- a/runtime/neurun/core/src/ir/pass/OperandPass.cc +++ b/runtime/neurun/core/src/ir/pass/OperandPass.cc @@ -28,7 +28,7 @@ namespace pass void OperandPass::run() { _graph.operands().iterate( - [&](const model::OperandIndex &index, model::Operand &object) { callback(index, object); }); + [&](const ir::OperandIndex &index, ir::Operand &object) { callback(index, object); }); } } // namespace pass diff --git a/runtime/neurun/core/src/ir/pass/OperandPass.h b/runtime/neurun/core/src/ir/pass/OperandPass.h index 4b25929..b112256 100644 --- a/runtime/neurun/core/src/ir/pass/OperandPass.h +++ b/runtime/neurun/core/src/ir/pass/OperandPass.h @@ -18,14 +18,14 @@ #define __NEURUN_GRAPH_PASS_OPERAND_PASS_H__ #include "Pass.h" -#include "model/Index.h" +#include "ir/Index.h" namespace neurun { -namespace model +namespace ir { class Operand; -} // namespace graph +} // namespace ir } // namespace neurun namespace neurun @@ -43,7 +43,7 @@ public: public: std::string id() override = 0; void run() override final; - virtual void callback(const model::OperandIndex &i, model::Operand &o) = 0; + virtual void callback(const ir::OperandIndex &i, ir::Operand &o) = 0; }; } // namespace pass diff --git a/runtime/neurun/core/src/ir/pass/OperationPass.cc b/runtime/neurun/core/src/ir/pass/OperationPass.cc index b726dec..1f3bd41 100644 --- a/runtime/neurun/core/src/ir/pass/OperationPass.cc +++ b/runtime/neurun/core/src/ir/pass/OperationPass.cc @@ -16,7 +16,7 @@ #include "OperationPass.h" -#include "model/Index.h" +#include "ir/Index.h" #include "model/Operation.h" #include "ir/Graph.h" @@ -30,7 +30,7 @@ namespace pass void OperationPass::run() { _graph.operations().iterate( - [&](const model::OperationIndex &index, model::Operation &node) { callback(index, node); }); + [&](const ir::OperationIndex &index, model::Operation &node) { callback(index, node); }); } } // namespace pass diff --git a/runtime/neurun/core/src/ir/pass/OperationPass.h b/runtime/neurun/core/src/ir/pass/OperationPass.h index 51cf328..3bca503 100644 --- a/runtime/neurun/core/src/ir/pass/OperationPass.h +++ b/runtime/neurun/core/src/ir/pass/OperationPass.h @@ -23,7 +23,7 @@ #define __NEURUN_GRAPH_PASS_OPERATION_PASS_H__ #include "Pass.h" -#include "model/Index.h" +#include "ir/Index.h" namespace neurun { @@ -61,7 +61,7 @@ public: * @param index is the index of a node in graph * @param node is the node in graph */ - virtual void callback(const model::OperationIndex &index, model::Operation &node) = 0; + virtual void callback(const ir::OperationIndex &index, model::Operation &node) = 0; /** * @brief Run the pass diff --git a/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.cc b/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.cc index e848c88..71a6853 100644 --- a/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.cc +++ b/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.cc @@ -16,7 +16,7 @@ #include "PermutationEliminationPass.h" -#include "model/Operand.h" +#include "ir/Operand.h" #include "ir/operand/LowerInfo.h" #include "ir/Graph.h" #include "backend/IConfig.h" @@ -29,8 +29,7 @@ namespace graph { namespace pass { -void PermutationEliminationPass::callback(const model::OperandIndex &inp_index, - model::Operand &object) +void PermutationEliminationPass::callback(const ir::OperandIndex &inp_index, ir::Operand &object) { if (_graph.getInputs().contains(inp_index)) { @@ -42,8 +41,8 @@ void PermutationEliminationPass::callback(const model::OperandIndex &inp_index, } } -void PermutationEliminationPass::eliminateInput(const model::OperandIndex &inp_index, - model::Operand &object) +void PermutationEliminationPass::eliminateInput(const ir::OperandIndex &inp_index, + ir::Operand &object) { auto &model_inputs = _graph.getInputs(); @@ -91,8 +90,8 @@ void PermutationEliminationPass::eliminateInput(const model::OperandIndex &inp_i } } -void PermutationEliminationPass::eliminateOutput(const model::OperandIndex &out_index, - model::Operand &object) +void PermutationEliminationPass::eliminateOutput(const ir::OperandIndex &out_index, + ir::Operand &object) { auto &model_outputs = _graph.getOutputs(); @@ -147,7 +146,7 @@ void PermutationEliminationPass::eliminateOutput(const model::OperandIndex &out_ } bool PermutationEliminationPass::isPermuteLayerToEliminate( - const model::OperandIndexSequence &inp_indexes, const model::OperandIndexSequence &out_indexes, + const ir::OperandIndexSequence &inp_indexes, const ir::OperandIndexSequence &out_indexes, bool is_for_model_input) { auto input_def_factors = _graph.getLowerInfo(inp_indexes.at(0))->def_factors(); diff --git a/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.h b/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.h index 332eeb6..e95418f 100644 --- a/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.h +++ b/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.h @@ -18,8 +18,8 @@ #define __NEURUN_GRAPH_PASS_PERMUTATION_ELIMINATION_PASS_H__ #include "OperandPass.h" -#include "model/Operand.h" -#include "model/OperandIndexSequence.h" +#include "ir/Operand.h" +#include "ir/OperandIndexSequence.h" namespace neurun { @@ -36,7 +36,7 @@ public: public: std::string id() override { return "PermutationEliminationPass"; } - void callback(const model::OperandIndex &index, model::Operand &object) override; + void callback(const ir::OperandIndex &index, ir::Operand &object) override; private: /** @@ -50,7 +50,7 @@ private: * * @return */ - void eliminateInput(const model::OperandIndex &inp_index, model::Operand &object); + void eliminateInput(const ir::OperandIndex &inp_index, ir::Operand &object); /** * @brief Remove Permute operation that permutates output of a model @@ -63,7 +63,7 @@ private: * * @return */ - void eliminateOutput(const model::OperandIndex &out_index, model::Operand &object); + void eliminateOutput(const ir::OperandIndex &out_index, ir::Operand &object); /** * @brief Determine if passed operands are permute layer's input and output, that must be @@ -75,8 +75,8 @@ private: * * @return if it is permutation layer */ - bool isPermuteLayerToEliminate(const model::OperandIndexSequence &inp_indexes, - const model::OperandIndexSequence &out_indexes, + bool isPermuteLayerToEliminate(const ir::OperandIndexSequence &inp_indexes, + const ir::OperandIndexSequence &out_indexes, bool is_for_model_input); }; diff --git a/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.cc b/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.cc index 13a751b..190916f 100644 --- a/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.cc +++ b/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.cc @@ -20,7 +20,7 @@ #include #include -#include "model/Operand.h" +#include "ir/Operand.h" #include "ir/operation/LowerInfo.h" #include "ir/Graph.h" #include "backend/IConfig.h" @@ -37,7 +37,7 @@ namespace graph namespace pass { -void PermutationInsertionPass::callback(const model::OperandIndex &index, model::Operand &object) +void PermutationInsertionPass::callback(const ir::OperandIndex &index, ir::Operand &object) { auto &&operand_li = _graph.getLowerInfo(index); assert(operand_li); @@ -49,10 +49,10 @@ void PermutationInsertionPass::callback(const model::OperandIndex &index, model: return; } - std::list permute_indexes; + std::list permute_indexes; // Build a map for all necessary type of operands - std::unordered_map factor_to_index; + std::unordered_map factor_to_index; { assert(operand_li->def_factors().size() == 1); for (auto factor : operand_li->def_factors()) @@ -75,7 +75,7 @@ void PermutationInsertionPass::callback(const model::OperandIndex &index, model: // Update operations' input that uses this operand { - std::list remove_list; + std::list remove_list; auto uses = object.getUses(); for (auto use : uses.list()) @@ -118,9 +118,8 @@ void PermutationInsertionPass::callback(const model::OperandIndex &index, model: } } -model::OperationIndex -PermutationInsertionPass::insertPermute(const model::OperandIndex &operand_index, - const operand::PermuteFactor &factor) +ir::OperationIndex PermutationInsertionPass::insertPermute(const ir::OperandIndex &operand_index, + const operand::PermuteFactor &factor) { assert(!_graph.isBuildingPhase()); diff --git a/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.h b/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.h index a0bc0bf..7269e42 100644 --- a/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.h +++ b/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.h @@ -18,7 +18,7 @@ #define __NEURUN_GRAPH_PASS_PERMUTATION_INSERTION_PASS_H__ #include "OperandPass.h" -#include "model/Operand.h" //for model::OperationIndex +#include "ir/Operand.h" //for OperationIndex #include "backend/BackendManager.h" #include "ir/operand/PermuteFactor.h" @@ -36,7 +36,7 @@ public: public: std::string id() override { return "PermutationInsertionPass"; } - void callback(const model::OperandIndex &index, model::Operand &object) override; + void callback(const ir::OperandIndex &index, ir::Operand &object) override; /** * @brief Insert Permute operation that has given operand as input @@ -44,10 +44,10 @@ public: * @param operand_index is the target operand index for the insertion * @param factor is the output operand's backend type and layout * - * @return model::OperationIndex + * @return OperationIndex */ - model::OperationIndex insertPermute(const model::OperandIndex &operand_index, - const operand::PermuteFactor &factor); + ir::OperationIndex insertPermute(const ir::OperandIndex &operand_index, + const operand::PermuteFactor &factor); private: }; diff --git a/runtime/neurun/core/src/ir/pass/PermutationOperationPass.cc b/runtime/neurun/core/src/ir/pass/PermutationOperationPass.cc index 9faeb45..ad05c1a 100644 --- a/runtime/neurun/core/src/ir/pass/PermutationOperationPass.cc +++ b/runtime/neurun/core/src/ir/pass/PermutationOperationPass.cc @@ -27,7 +27,7 @@ namespace graph namespace pass { -void PermutationOperationPass::callback(const model::OperationIndex &, model::Operation &node) +void PermutationOperationPass::callback(const ir::OperationIndex &, model::Operation &node) { node.accept(*this); }; @@ -74,7 +74,7 @@ void PermutationOperationPass::changeToKeepLayout(const model::Operation &node) below_subg.setInputs(it->node->getInputs()); below_subg.setOutputs(it->node->getOutputs()); - std::vector remove_list; + std::vector remove_list; remove_list.emplace_back(it->index); while (++it != above_subg.end()) { diff --git a/runtime/neurun/core/src/ir/pass/PermutationOperationPass.h b/runtime/neurun/core/src/ir/pass/PermutationOperationPass.h index bc3ca0d..d228235 100644 --- a/runtime/neurun/core/src/ir/pass/PermutationOperationPass.h +++ b/runtime/neurun/core/src/ir/pass/PermutationOperationPass.h @@ -36,7 +36,7 @@ public: std::string id() final { return "PermutationOperationPass"; } public: - void callback(const model::OperationIndex &i, model::Operation &n) final; + void callback(const ir::OperationIndex &i, model::Operation &n) final; public: void visit(const model::operation::FullyConnected &) final; diff --git a/runtime/neurun/core/src/ir/verifier/Verifier.cc b/runtime/neurun/core/src/ir/verifier/Verifier.cc index c1e6d20..6c9e9ed 100644 --- a/runtime/neurun/core/src/ir/verifier/Verifier.cc +++ b/runtime/neurun/core/src/ir/verifier/Verifier.cc @@ -17,7 +17,7 @@ #include "Verifier.h" #include "ir/Graph.h" -#include "model/OperationIndexMap.h" +#include "ir/OperationIndexMap.h" #include "util/logging.h" @@ -37,14 +37,13 @@ bool DAGChecker::verify(const Graph &graph) const auto &operations = graph.operations(); bool cyclic = false; - model::OperationIndexMap visited; - operations.iterate([&](const model::OperationIndex &index, const model::Operation &) { - visited[index] = false; - }); - model::OperationIndexMap on_stack = visited; // Copy from visited + ir::OperationIndexMap visited; + operations.iterate( + [&](const ir::OperationIndex &index, const model::Operation &) { visited[index] = false; }); + ir::OperationIndexMap on_stack = visited; // Copy from visited - std::function dfs_recursive = - [&](const model::OperationIndex &index, const model::Operation &node) -> void { + std::function dfs_recursive = + [&](const ir::OperationIndex &index, const model::Operation &node) -> void { if (on_stack[index]) cyclic = true; if (visited[index]) @@ -77,7 +76,7 @@ bool EdgeConsistencyChecker::verify(const Graph &graph) const { auto &operations = graph.operations(); uint32_t mismatches = 0; - operations.iterate([&](const model::OperationIndex &index, const model::Operation &node) { + operations.iterate([&](const ir::OperationIndex &index, const model::Operation &node) { for (auto operand_index : node.getInputs()) { auto &operand = graph.operands().at(operand_index); diff --git a/runtime/neurun/core/src/model/OperandConstraint.cc b/runtime/neurun/core/src/model/OperandConstraint.cc deleted file mode 100644 index 2730f71..0000000 --- a/runtime/neurun/core/src/model/OperandConstraint.cc +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "model/OperandConstraint.h" - -namespace neurun -{ -namespace model -{ -namespace operation -{ - -} // namespace operation -} // namespace model -} // namespace neurun diff --git a/runtime/neurun/core/src/model/Subgraph.cc b/runtime/neurun/core/src/model/Subgraph.cc index e1028ad..b86b72e 100644 --- a/runtime/neurun/core/src/model/Subgraph.cc +++ b/runtime/neurun/core/src/model/Subgraph.cc @@ -67,7 +67,7 @@ void Subgraph::remove(const OperationIndex &index) } } -bool Subgraph::exist(const neurun::model::OperationIndex &index) const +bool Subgraph::exist(const OperationIndex &index) const { for (const auto &element : _operations) { diff --git a/runtime/neurun/frontend/base_loader/base_loader.h b/runtime/neurun/frontend/base_loader/base_loader.h index 2578fee..17ab326 100644 --- a/runtime/neurun/frontend/base_loader/base_loader.h +++ b/runtime/neurun/frontend/base_loader/base_loader.h @@ -69,9 +69,9 @@ protected: ir::DataType tensorTypeToDataType(TensorType type); // Create operands form tflite::Tensor - model::OperandIndex loadOperand(const Tensor *tensor); - void loadOperationIO(const Operator *op, model::OperandIndexSequence &inputs, - model::OperandIndexSequence &outputs); + ir::OperandIndex loadOperand(const Tensor *tensor); + void loadOperationIO(const Operator *op, ir::OperandIndexSequence &inputs, + ir::OperandIndexSequence &outputs); // Create operations from Operator void loadOperation(const Operator *op); // Load Strides and Paddings from options to param @@ -124,7 +124,7 @@ protected: graph::Graph &_graph; const Model *_model; // Maps Tensor indices to neurun Operands. - std::vector _tensor_to_operand; + std::vector _tensor_to_operand; }; template @@ -194,7 +194,7 @@ BaseLoader::BaseLoader::tensorTypeToDataType(const } template -model::OperandIndex BaseLoader::loadOperand(const Tensor *tensor) +ir::OperandIndex BaseLoader::loadOperand(const Tensor *tensor) { ir::Shape shape; // Shape @@ -244,7 +244,7 @@ model::OperandIndex BaseLoader::loadOperand(const const auto *data = _model->buffers()->Get(tensor->buffer())->data(); if (data != nullptr) { - auto ptr = nnfw::cpp14::make_unique(data->data(), data->size()); + auto ptr = nnfw::cpp14::make_unique(data->data(), data->size()); _graph.setOperandValue(operand_index, std::move(ptr)); } @@ -259,8 +259,8 @@ model::OperandIndex BaseLoader::loadOperand(const template void BaseLoader::loadOperationIO(const Operator *op, - model::OperandIndexSequence &inputs, - model::OperandIndexSequence &outputs) + ir::OperandIndexSequence &inputs, + ir::OperandIndexSequence &outputs) { for (const std::int32_t idx : *op->inputs()) { @@ -307,8 +307,8 @@ void BaseLoader::loadPool2D(Param ¶m, template void BaseLoader::loadConv2D(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -324,8 +324,8 @@ void BaseLoader::loadConv2D(const Operator *op) template void BaseLoader::loadDepthwiseConv2D(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -344,8 +344,8 @@ void BaseLoader::loadDepthwiseConv2D(const Operato template void BaseLoader::loadTransposeConv(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -360,8 +360,8 @@ void BaseLoader::loadTransposeConv(const Operator template void BaseLoader::loadAvgPool2D(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -377,8 +377,8 @@ void BaseLoader::loadAvgPool2D(const Operator *op) template void BaseLoader::loadReshape(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -392,8 +392,8 @@ void BaseLoader::loadReshape(const Operator *op) template void BaseLoader::loadSoftmax(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -409,8 +409,8 @@ void BaseLoader::loadSoftmax(const Operator *op) template void BaseLoader::loadMaxPool2D(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -426,8 +426,8 @@ void BaseLoader::loadMaxPool2D(const Operator *op) template void BaseLoader::loadConcatenation(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -444,8 +444,8 @@ void BaseLoader::loadConcatenation(const Operator template void BaseLoader::loadInstanceNorm(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -464,8 +464,8 @@ void BaseLoader::loadInstanceNorm(const Operator * template void BaseLoader::loadFC(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -483,8 +483,8 @@ void BaseLoader::loadFC(const Operator *op) template void BaseLoader::loadAdd(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -500,8 +500,8 @@ void BaseLoader::loadAdd(const Operator *op) template void BaseLoader::loadSub(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -517,8 +517,8 @@ void BaseLoader::loadSub(const Operator *op) template void BaseLoader::loadMul(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -534,8 +534,8 @@ void BaseLoader::loadMul(const Operator *op) template void BaseLoader::loadDiv(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -552,8 +552,8 @@ template void BaseLoader::loadPack(const Operator *op) { // This runtime_error will be removed if the one of backend supports this operation - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -569,8 +569,8 @@ void BaseLoader::loadPack(const Operator *op) template void BaseLoader::loadRelu(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -581,8 +581,8 @@ void BaseLoader::loadRelu(const Operator *op) template void BaseLoader::loadRelu6(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -593,8 +593,8 @@ void BaseLoader::loadRelu6(const Operator *op) template void BaseLoader::loadResizeBilinear(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); auto input = inputs.at(0); @@ -618,8 +618,8 @@ void BaseLoader::loadResizeBilinear(const Operator template void BaseLoader::loadRsqrt(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -630,8 +630,8 @@ void BaseLoader::loadRsqrt(const Operator *op) template void BaseLoader::loadSqrt(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -642,8 +642,8 @@ void BaseLoader::loadSqrt(const Operator *op) template void BaseLoader::loadSquaredDifference(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -655,8 +655,8 @@ void BaseLoader::loadSquaredDifference(const Opera template void BaseLoader::loadTanh(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -667,8 +667,8 @@ void BaseLoader::loadTanh(const Operator *op) template void BaseLoader::loadTranspose(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); auto input = inputs.at(0); @@ -688,8 +688,8 @@ void BaseLoader::loadTranspose(const Operator *op) template void BaseLoader::loadMean(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); auto input = inputs.at(0); @@ -709,8 +709,8 @@ void BaseLoader::loadMean(const Operator *op) template void BaseLoader::loadReduceMax(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); auto input = inputs.at(0); @@ -731,8 +731,8 @@ void BaseLoader::loadReduceMax(const Operator *op) template void BaseLoader::loadPad(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -743,8 +743,8 @@ void BaseLoader::loadPad(const Operator *op) template void BaseLoader::loadLogistic(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -755,8 +755,8 @@ void BaseLoader::loadLogistic(const Operator *op) template void BaseLoader::loadExp(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -767,8 +767,8 @@ void BaseLoader::loadExp(const Operator *op) template void BaseLoader::loadGather(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); model::operation::Gather::Param param; @@ -781,8 +781,8 @@ void BaseLoader::loadGather(const Operator *op) template void BaseLoader::loadSpaceToBatchND(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -793,8 +793,8 @@ void BaseLoader::loadSpaceToBatchND(const Operator template void BaseLoader::loadBatchToSpaceND(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); auto input = inputs.at(0); @@ -817,8 +817,8 @@ void BaseLoader::loadBatchToSpaceND(const Operator template void BaseLoader::loadReduceSum(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); auto input = inputs.at(0); @@ -839,15 +839,15 @@ void BaseLoader::loadReduceSum(const Operator *op) template void BaseLoader::loadCustom(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); auto *op_code = _model->operator_codes()->Get(op->opcode_index()); auto custom_op_id = op_code->custom_code()->str(); - auto constraint = model::operation::OperandConstraint::createExact(inputs.size()); + auto constraint = ir::OperandConstraint::createExact(inputs.size()); assert(op->custom_options_format() == CustomOptionsFormat::CustomOptionsFormat_FLEXBUFFERS && "Unsupported custom operation options format"); @@ -869,8 +869,8 @@ void BaseLoader::loadCustom(const Operator *op) template void BaseLoader::loadSqueeze(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -892,8 +892,8 @@ void BaseLoader::loadSqueeze(const Operator *op) template void BaseLoader::loadPrelu(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); diff --git a/runtime/neurun/frontend/nnapi/model.cc b/runtime/neurun/frontend/nnapi/model.cc index e854b16..b1978ad 100644 --- a/runtime/neurun/frontend/nnapi/model.cc +++ b/runtime/neurun/frontend/nnapi/model.cc @@ -129,7 +129,7 @@ int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel *model, int32_t in VERBOSE(NNAPI::Model) << "setOperandValue: Invalid index value (negative)" << std::endl; return ANEURALNETWORKS_BAD_DATA; } - // NOTE ::neurun::model::OperandIndex uses uint32_t as its underlying type as various NNAPI + // NOTE OperandIndex uses uint32_t as its underlying type as various NNAPI // functions such as ANeuralNetworksModel_addOperation use uint32_t to represent operand // index // ANeuralNetworksModel_setOperandValue, however, uses int32_t to represent operand index. @@ -198,7 +198,7 @@ int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel *model, << std::endl; return ANEURALNETWORKS_BAD_DATA; } - // NOTE ::neurun::model::OperandIndex uses uint32_t as its underlying type as various NNAPI + // NOTE OperandIndex uses uint32_t as its underlying type as various NNAPI // functions such as ANeuralNetworksModel_addOperation use uint32_t to represent operand // index // ANeuralNetworksModel_setOperandValue, however, uses int32_t to represent operand index. diff --git a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc index 883d325..b8e43a6 100644 --- a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc +++ b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc @@ -18,50 +18,50 @@ #include "NNAPIConvert.h" #include "util/logging.h" -const neurun::model::OperandIndex +const neurun::ir::OperandIndex ANeuralNetworksExecution::getInputOperandIndex(int32_t index) noexcept { if (index < 0) { // Negative index: return invalid index - return neurun::model::OperandIndex{}; + return neurun::ir::OperandIndex{}; } uint32_t cast_index = static_cast(index); if (cast_index >= _execution->graph().getInputs().size()) { // Return invalid index - return neurun::model::OperandIndex{}; + return neurun::ir::OperandIndex{}; } - neurun::model::IOIndex input_index{cast_index}; + neurun::ir::IOIndex input_index{cast_index}; const auto operand_index = _execution->graph().getInputs().at(input_index); return operand_index; } -const neurun::model::OperandIndex +const neurun::ir::OperandIndex ANeuralNetworksExecution::getOutputOperandIndex(int32_t index) noexcept { if (index < 0) { // Negative index: return invalid index - return neurun::model::OperandIndex{}; + return neurun::ir::OperandIndex{}; } uint32_t cast_index = static_cast(index); if (cast_index >= _execution->graph().getOutputs().size()) { // Return invalid index - return neurun::model::OperandIndex{}; + return neurun::ir::OperandIndex{}; } - neurun::model::IOIndex output_index{cast_index}; + neurun::ir::IOIndex output_index{cast_index}; const auto operand_index = _execution->graph().getOutputs().at(output_index); return operand_index; } bool ANeuralNetworksExecution::compareDataType(const ANeuralNetworksOperandType *type, - const neurun::model::OperandIndex index) noexcept + const neurun::ir::OperandIndex index) noexcept { try { @@ -85,7 +85,7 @@ bool ANeuralNetworksExecution::compareDataType(const ANeuralNetworksOperandType } bool ANeuralNetworksExecution::compareShape(const ANeuralNetworksOperandType *type, - const neurun::model::OperandIndex index) noexcept + const neurun::ir::OperandIndex index) noexcept { // Passed shape should be specified if (haveUnspecifiedDims(index)) @@ -99,14 +99,14 @@ bool ANeuralNetworksExecution::compareShape(const ANeuralNetworksOperandType *ty return operand_shape == shape_from_type; } -bool ANeuralNetworksExecution::haveUnspecifiedDims(const neurun::model::OperandIndex index) noexcept +bool ANeuralNetworksExecution::haveUnspecifiedDims(const neurun::ir::OperandIndex index) noexcept { const auto operand_shape = _execution->graph().operands().at(index).shape(); return operand_shape.num_elements() == 0; } -size_t ANeuralNetworksExecution::getOperandSize(const neurun::model::OperandIndex index) noexcept +size_t ANeuralNetworksExecution::getOperandSize(const neurun::ir::OperandIndex index) noexcept { try { @@ -125,7 +125,7 @@ bool ANeuralNetworksExecution::setInput(uint32_t index, const ANeuralNetworksOpe { try { - neurun::model::IOIndex input_index{index}; + neurun::ir::IOIndex input_index{index}; const auto operand_index = getInputOperandIndex(index); const auto type_info = _execution->graph().operands().at(operand_index).typeInfo(); @@ -153,7 +153,7 @@ bool ANeuralNetworksExecution::setOutput(uint32_t index, const ANeuralNetworksOp { try { - neurun::model::IOIndex output_index{index}; + neurun::ir::IOIndex output_index{index}; const auto operand_index = getOutputOperandIndex(index); const auto type_info = _execution->graph().operands().at(operand_index).typeInfo(); @@ -217,7 +217,7 @@ bool ANeuralNetworksExecution::getOutputOperandRank(uint32_t index, uint32_t *ra { try { - neurun::model::IOIndex output_index{index}; + neurun::ir::IOIndex output_index{index}; const auto operand_index = getOutputOperandIndex(index); bool unspecified = haveUnspecifiedDims(operand_index); @@ -250,7 +250,7 @@ bool ANeuralNetworksExecution::getOutputOperandDimensions(uint32_t index, uint32 { try { - neurun::model::IOIndex output_index{index}; + neurun::ir::IOIndex output_index{index}; const auto operand_index = getOutputOperandIndex(index); bool unspecified = haveUnspecifiedDims(operand_index); if (unspecified) diff --git a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.h b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.h index 3f2b2bc..ecffedc 100644 --- a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.h +++ b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.h @@ -40,14 +40,14 @@ public: bool startExecute(void) noexcept; bool execute(void) noexcept; - const neurun::model::OperandIndex getInputOperandIndex(int32_t index) noexcept; - const neurun::model::OperandIndex getOutputOperandIndex(int32_t index) noexcept; + const neurun::ir::OperandIndex getInputOperandIndex(int32_t index) noexcept; + const neurun::ir::OperandIndex getOutputOperandIndex(int32_t index) noexcept; bool compareDataType(const ANeuralNetworksOperandType *type, - const neurun::model::OperandIndex index) noexcept; + const neurun::ir::OperandIndex index) noexcept; bool compareShape(const ANeuralNetworksOperandType *type, - const neurun::model::OperandIndex index) noexcept; - bool haveUnspecifiedDims(const neurun::model::OperandIndex index) noexcept; - size_t getOperandSize(const neurun::model::OperandIndex index) noexcept; + const neurun::ir::OperandIndex index) noexcept; + bool haveUnspecifiedDims(const neurun::ir::OperandIndex index) noexcept; + size_t getOperandSize(const neurun::ir::OperandIndex index) noexcept; const std::shared_ptr instance(void) noexcept; /** diff --git a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.cc b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.cc index 66b07a1..6e23308 100644 --- a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.cc +++ b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.cc @@ -53,7 +53,7 @@ bool ANeuralNetworksModel::addOperand(const ANeuralNetworksOperandType *type) no bool ANeuralNetworksModel::setOperandValue(uint32_t index, const void *buffer, size_t length, bool optional, bool copy) noexcept { - const neurun::model::OperandIndex ind{index}; + const neurun::ir::OperandIndex ind{index}; try { @@ -66,8 +66,8 @@ bool ANeuralNetworksModel::setOperandValue(uint32_t index, const void *buffer, s setOptionalOperand(ind); } - using ::neurun::model::CachedData; - using ::neurun::model::ExternalData; + using neurun::ir::CachedData; + using neurun::ir::ExternalData; if (copy) { _graph->operands().at(ind).data( @@ -146,7 +146,7 @@ bool ANeuralNetworksModel::addModelInput(uint32_t index) noexcept { _operand_usages[index] = OperandUsage::MODEL_INPUT; - const neurun::model::OperandIndex ind{index}; + const neurun::ir::OperandIndex ind{index}; _graph->addInput(ind); } catch (const std::exception &e) @@ -162,7 +162,7 @@ bool ANeuralNetworksModel::addModelOutput(uint32_t index) noexcept { try { - const neurun::model::OperandIndex ind{index}; + const neurun::ir::OperandIndex ind{index}; // Duplicated output is not allowed if (_graph->getOutputs().contains(ind)) @@ -206,14 +206,14 @@ bool ANeuralNetworksModel::isFinished() noexcept { return !_graph->isBuildingPha bool ANeuralNetworksModel::isExistOperand(uint32_t index) noexcept { - return _graph->operands().exist(neurun::model::OperandIndex{index}); + return _graph->operands().exist(neurun::ir::OperandIndex{index}); } size_t ANeuralNetworksModel::operandSize(uint32_t index) noexcept { try { - return _graph->operands().at(neurun::model::OperandIndex{index}).operandSize(); + return _graph->operands().at(neurun::ir::OperandIndex{index}).operandSize(); } catch (const std::exception &e) { @@ -233,7 +233,7 @@ bool ANeuralNetworksModel::isOperationOutput(uint32_t index) noexcept return (_operand_usages[index] == OperandUsage::OPERATION_OUTPUT); } -void ANeuralNetworksModel::setOptionalOperand(const neurun::model::OperandIndex idx) +void ANeuralNetworksModel::setOptionalOperand(const neurun::ir::OperandIndex idx) { _optional_operands.insert(idx); } @@ -241,7 +241,7 @@ void ANeuralNetworksModel::setOptionalOperand(const neurun::model::OperandIndex void ANeuralNetworksModel::fillOptionalOperand(void) { _graph->operations().iterate( - [&](const ::neurun::model::OperationIndex &, ::neurun::model::Operation &node) { + [&](const neurun::ir::OperationIndex &, ::neurun::model::Operation &node) { for (auto input : node.getInputs()) { // TODO fill default value for optional operands diff --git a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.h b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.h index a1af145..576c657 100644 --- a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.h +++ b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.h @@ -59,12 +59,12 @@ public: void release(std::shared_ptr &graph) { graph = _graph; } private: - void setOptionalOperand(const neurun::model::OperandIndex idx); + void setOptionalOperand(const neurun::ir::OperandIndex idx); void fillOptionalOperand(void); private: std::shared_ptr _graph; - std::unordered_set _optional_operands; + std::unordered_set _optional_operands; std::vector _operand_usages; }; diff --git a/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.cc b/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.cc index b3fef40..477d780 100644 --- a/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.cc +++ b/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.cc @@ -95,7 +95,7 @@ OperationFactory::OperationFactory() using namespace neurun::model; _map[ANEURALNETWORKS_BATCH_TO_SPACE_ND] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + Operands &) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -330,7 +330,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_CONCATENATION] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count >= 2); // At least one one input tensor and axis assert(init_param.output_count == 1); @@ -354,8 +354,7 @@ OperationFactory::OperationFactory() return new operation::Concat{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_RESHAPE] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_RESHAPE] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 2 && init_param.output_count == 1); // Each input should be interpreted as follows: @@ -414,8 +413,7 @@ OperationFactory::OperationFactory() return new operation::Softmax{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_CAST_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_CAST_EX] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -519,7 +517,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_REDUCE_SUM_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2); assert(init_param.output_count == 1); @@ -561,7 +559,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_STRIDED_SLICE] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 7 && init_param.output_count == 1); OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2], @@ -601,7 +599,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_TRANSPOSE] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { // TODO make this work with init_param.input_count == 1 (when permutation vector is optional) // Inputs @@ -649,7 +647,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_SQUEEZE] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 1 || init_param.input_count == 2); assert(init_param.output_count == 1); @@ -684,8 +682,7 @@ OperationFactory::OperationFactory() return new operation::Squeeze{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_TANH] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_TANH] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -698,8 +695,7 @@ OperationFactory::OperationFactory() return new operation::Tanh{inputs, outputs}; }; - _map[ANEURALNETWORKS_LOGISTIC] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_LOGISTIC] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -733,8 +729,7 @@ OperationFactory::OperationFactory() return new operation::Div{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_EXP_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_EXP_EX] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -748,7 +743,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_GREATER_EQUAL_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -769,7 +764,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_LESS_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -790,7 +785,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_REDUCE_MAX_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -810,7 +805,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_NOT_EQUAL_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -831,7 +826,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_LOGICAL_AND_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -850,8 +845,7 @@ OperationFactory::OperationFactory() return new operation::LogicalAnd{inputs, outputs}; }; - _map[ANEURALNETWORKS_RSQRT_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_RSQRT_EX] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -864,8 +858,7 @@ OperationFactory::OperationFactory() return new operation::RSQRT{inputs, outputs}; }; - _map[ANEURALNETWORKS_RELU] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_RELU] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -879,7 +872,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_RESIZE_BILINEAR] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 3 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -898,8 +891,7 @@ OperationFactory::OperationFactory() return new operation::ResizeBilinear{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_RELU1] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_RELU1] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -912,8 +904,7 @@ OperationFactory::OperationFactory() return new operation::ReLU1{inputs, outputs}; }; - _map[ANEURALNETWORKS_RELU6] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_RELU6] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -926,8 +917,7 @@ OperationFactory::OperationFactory() return new operation::ReLU6{inputs, outputs}; }; - _map[ANEURALNETWORKS_RNN] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + _map[ANEURALNETWORKS_RNN] = [](const OperationFactory::Param &init_param, Operands &operands) { assert(init_param.input_count == 6 && init_param.output_count == 2); // Each input should be interpreted as follows: @@ -958,8 +948,7 @@ OperationFactory::OperationFactory() return new operation::RNN{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_FLOOR] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_FLOOR] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -972,7 +961,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_SPACE_TO_BATCH_ND] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + Operands &) { assert(init_param.input_count == 3 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -992,7 +981,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_SPACE_TO_DEPTH] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1081,7 +1070,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_EMBEDDING_LOOKUP] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + Operands &) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1096,7 +1085,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_L2_NORMALIZATION] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1109,7 +1098,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_HASHTABLE_LOOKUP] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + Operands &) { assert(init_param.input_count == 3 && init_param.output_count == 2); // Each output should be interpreted as follows: @@ -1128,8 +1117,7 @@ OperationFactory::OperationFactory() return new operation::HashtableLookup{inputs, outputs}; }; - _map[ANEURALNETWORKS_PRELU_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_PRELU_EX] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1173,8 +1161,7 @@ OperationFactory::OperationFactory() return new operation::TransposeConv{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_SQRT_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_SQRT_EX] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1187,7 +1174,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_LOGICAL_OR_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1207,7 +1194,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_LOGICAL_NOT_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1224,8 +1211,7 @@ OperationFactory::OperationFactory() return new operation::LogicalNot{inputs, outputs}; }; - _map[ANEURALNETWORKS_LSTM] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + _map[ANEURALNETWORKS_LSTM] = [](const OperationFactory::Param &init_param, Operands &operands) { assert(init_param.input_count == 23 && init_param.output_count == 4); // Each input should be interpreted as follows: @@ -1301,7 +1287,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_EQUAL_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1322,7 +1308,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_SQUARED_DIFFERENCE_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + Operands &) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1337,7 +1323,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_TOPK_V2_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2 && init_param.output_count == 2); // Each output should be interpreted as follows: @@ -1359,7 +1345,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_GATHER_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 3 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1377,8 +1363,7 @@ OperationFactory::OperationFactory() return new operation::Gather{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_NEG_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_NEG_EX] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1391,8 +1376,7 @@ OperationFactory::OperationFactory() return new operation::Neg{inputs, outputs}; }; - _map[ANEURALNETWORKS_ABS_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_ABS_EX] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1406,7 +1390,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_ARGMAX_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1423,8 +1407,7 @@ OperationFactory::OperationFactory() return new operation::ArgMax{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_DEQUANTIZE] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_DEQUANTIZE] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1437,8 +1420,7 @@ OperationFactory::OperationFactory() return new operation::Dequantize{inputs, outputs}; }; - _map[ANEURALNETWORKS_MEAN] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + _map[ANEURALNETWORKS_MEAN] = [](const OperationFactory::Param &init_param, Operands &operands) { assert(init_param.input_count == 3 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1460,7 +1442,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 5 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1477,7 +1459,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_DEPTH_TO_SPACE] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1495,7 +1477,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_PACK_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count >= 3 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1515,7 +1497,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_REDUCE_MIN_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1535,7 +1517,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_SPLIT_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 3); assert(init_param.output_count >= 1); // At least one output tensor and axis @@ -1554,7 +1536,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_UNPACK_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 3 && init_param.output_count >= 1); OperandIndexSequence inputs{init_param.inputs[0]}; @@ -1585,7 +1567,7 @@ OperationFactory::OperationFactory() neurun::model::Operation *OperationFactory::create(ANeuralNetworksOperationType type, const OperationFactory::Param ¶m, - neurun::model::Operands &operands) + Operands &operands) { auto it = _map.find(type); if (it == _map.end()) diff --git a/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.h b/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.h index 4d5d02f..ad773c5 100644 --- a/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.h +++ b/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.h @@ -19,7 +19,7 @@ #include -#include "model/Operands.h" +#include "ir/Operands.h" #include "model/Operation.h" #include "NeuralNetworks.h" #include "NeuralNetworksEx.h" @@ -40,7 +40,7 @@ public: public: using Generator = std::function; + neurun::ir::Operands &)>; public: static OperationFactory &get(); @@ -51,7 +51,7 @@ private: public: neurun::model::Operation *create(ANeuralNetworksOperationType, const OperationFactory::Param ¶m, - neurun::model::Operands &operands); + neurun::ir::Operands &operands); // TODO add "register" method for separating registration, possibly supporting custom-ops private: diff --git a/runtime/neurun/test/core/backend/ExecTime.test.cc b/runtime/neurun/test/core/backend/ExecTime.test.cc index b5471c8..4b89e64 100644 --- a/runtime/neurun/test/core/backend/ExecTime.test.cc +++ b/runtime/neurun/test/core/backend/ExecTime.test.cc @@ -40,8 +40,7 @@ struct MockBackend : public ::neurun::backend::Backend return std::make_shared(); } std::unique_ptr - newContext(const model::Operands &, - const std::shared_ptr &kb) const override + newContext(const ir::Operands &, const std::shared_ptr &kb) const override { return nullptr; } diff --git a/runtime/neurun/test/core/compiler/Scheduler.cc b/runtime/neurun/test/core/compiler/Scheduler.cc index cf653b1..cb276c9 100644 --- a/runtime/neurun/test/core/compiler/Scheduler.cc +++ b/runtime/neurun/test/core/compiler/Scheduler.cc @@ -207,7 +207,7 @@ void setPermutationsExecutionTime(const std::vector &backends, using OIS = OperandIndexSequence; template -model::OperationIndex create(std::shared_ptr graph, Types &&... args) +OperationIndex create(std::shared_ptr graph, Types &&... args) { typename NodeT::Param op_params{Activation::NONE}; auto op = nnfw::cpp14::make_unique(std::forward(args)..., op_params); diff --git a/runtime/neurun/test/core/exec/ExecInstance.cc b/runtime/neurun/test/core/exec/ExecInstance.cc index c31af3d..3bc7697 100644 --- a/runtime/neurun/test/core/exec/ExecInstance.cc +++ b/runtime/neurun/test/core/exec/ExecInstance.cc @@ -26,7 +26,6 @@ namespace { using namespace neurun::model; -using DataType = DataType; class CompiledMockUpModel { diff --git a/runtime/neurun/test/core/exec/interp/ExecManager.cc b/runtime/neurun/test/core/exec/interp/ExecManager.cc index 69acb74..372b924 100644 --- a/runtime/neurun/test/core/exec/interp/ExecManager.cc +++ b/runtime/neurun/test/core/exec/interp/ExecManager.cc @@ -27,7 +27,6 @@ namespace { using namespace neurun::model; -using DataType = DataType; using ExecManager = neurun::exec::interp::ExecManager; using Execution = neurun::exec::Execution; diff --git a/runtime/neurun/test/graph/Graph.cc b/runtime/neurun/test/graph/Graph.cc index 4d52571..7e465af 100644 --- a/runtime/neurun/test/graph/Graph.cc +++ b/runtime/neurun/test/graph/Graph.cc @@ -22,15 +22,15 @@ TEST(Graph, inputs_and_outputs) { ::neurun::graph::Graph graph; - ::neurun::model::OperandIndex index0{0u}; - ::neurun::model::OperandIndex index1{1u}; + neurun::ir::OperandIndex index0{0u}; + neurun::ir::OperandIndex index1{1u}; graph.addInput({index0}); graph.addInput({index1}); - ::neurun::model::OperandIndex index10{10u}; - ::neurun::model::OperandIndex index11{11u}; - ::neurun::model::OperandIndex index12{12u}; + neurun::ir::OperandIndex index10{10u}; + neurun::ir::OperandIndex index11{11u}; + neurun::ir::OperandIndex index12{12u}; graph.addOutput({index10}); graph.addOutput({index11}); @@ -39,9 +39,9 @@ TEST(Graph, inputs_and_outputs) ASSERT_EQ(graph.getInputs().size(), 2); ASSERT_EQ(graph.getOutputs().size(), 3); - ::neurun::model::IOIndex io_index0{0}; - ::neurun::model::IOIndex io_index1{1}; - ::neurun::model::IOIndex io_index2{2}; + neurun::ir::IOIndex io_index0{0}; + neurun::ir::IOIndex io_index1{1}; + neurun::ir::IOIndex io_index2{2}; ASSERT_EQ(graph.getInputs().at(io_index0), 0); ASSERT_EQ(graph.getInputs().at(io_index1), 1); diff --git a/runtime/neurun/test/graph/MockNode.h b/runtime/neurun/test/graph/MockNode.h index 088140d..00b897c 100644 --- a/runtime/neurun/test/graph/MockNode.h +++ b/runtime/neurun/test/graph/MockNode.h @@ -18,7 +18,7 @@ #define __NEURUN_TEST_GRAPH_MOCK_NODE_H__ #include "model/Operation.h" -#include "model/OperandIndexSequence.h" +#include "ir/OperandIndexSequence.h" namespace neurun_test { @@ -28,9 +28,9 @@ namespace graph class SimpleMock : public neurun::model::Operation { public: - SimpleMock(const neurun::model::OperandIndexSequence &inputs, - const neurun::model::OperandIndexSequence &outputs) - : neurun::model::Operation{neurun::model::operation::OperandConstraint::createAny()} + SimpleMock(const neurun::ir::OperandIndexSequence &inputs, + const neurun::ir::OperandIndexSequence &outputs) + : neurun::model::Operation{neurun::ir::OperandConstraint::createAny()} { setInputs(inputs); setOutputs(outputs); @@ -38,7 +38,7 @@ public: public: void accept(neurun::model::OperationVisitor &) const override {} - neurun::model::OpCode opcode() const final { return neurun::model::OpCode::Invalid; } + neurun::ir::OpCode opcode() const final { return neurun::ir::OpCode::Invalid; } }; } // namespace graph diff --git a/runtime/neurun/test/graph/operand/IndexSet.cc b/runtime/neurun/test/graph/operand/IndexSet.cc index 73e7fd8..969290f 100644 --- a/runtime/neurun/test/graph/operand/IndexSet.cc +++ b/runtime/neurun/test/graph/operand/IndexSet.cc @@ -16,10 +16,10 @@ #include -#include "model/OperandIndexSequence.h" +#include "ir/OperandIndexSequence.h" -using neurun::model::OperandIndex; -using neurun::model::OperandIndexSequence; +using neurun::ir::OperandIndex; +using neurun::ir::OperandIndexSequence; TEST(graph_OperandIndexSequence, append) { @@ -31,8 +31,8 @@ TEST(graph_OperandIndexSequence, append) ASSERT_EQ(iset.size(), 5); - neurun::model::IOIndex index1{1}; - neurun::model::IOIndex index2{4}; + neurun::ir::IOIndex index1{1}; + neurun::ir::IOIndex index2{4}; ASSERT_EQ(iset.at(index1), 2); ASSERT_EQ(iset.at(index2), 10); diff --git a/runtime/neurun/test/graph/operand/Set.cc b/runtime/neurun/test/graph/operand/Set.cc index ee365684..e30a5b7 100644 --- a/runtime/neurun/test/graph/operand/Set.cc +++ b/runtime/neurun/test/graph/operand/Set.cc @@ -16,11 +16,11 @@ #include -#include "model/Operands.h" +#include "ir/Operands.h" TEST(graph_operand_Set, set_test) { - neurun::model::Operands set; + neurun::ir::Operands set; neurun::ir::Shape shape0{1, 2, 3}; @@ -35,11 +35,11 @@ TEST(graph_operand_Set, set_test) set.emplace(shape0, type); set.emplace(shape1, type); - ASSERT_EQ(set.exist(neurun::model::OperandIndex{0u}), true); - ASSERT_EQ(set.exist(neurun::model::OperandIndex{1u}), true); - ASSERT_EQ(set.exist(neurun::model::OperandIndex{2u}), false); + ASSERT_EQ(set.exist(neurun::ir::OperandIndex{0u}), true); + ASSERT_EQ(set.exist(neurun::ir::OperandIndex{1u}), true); + ASSERT_EQ(set.exist(neurun::ir::OperandIndex{2u}), false); - ASSERT_EQ(set.at(neurun::model::OperandIndex{0u}).shape().dim(0), 1); - ASSERT_EQ(set.at(neurun::model::OperandIndex{0u}).shape().dim(1), 2); - ASSERT_EQ(set.at(neurun::model::OperandIndex{0u}).shape().dim(2), 3); + ASSERT_EQ(set.at(neurun::ir::OperandIndex{0u}).shape().dim(0), 1); + ASSERT_EQ(set.at(neurun::ir::OperandIndex{0u}).shape().dim(1), 2); + ASSERT_EQ(set.at(neurun::ir::OperandIndex{0u}).shape().dim(2), 3); } diff --git a/runtime/neurun/test/graph/operand/UseDef.cc b/runtime/neurun/test/graph/operand/UseDef.cc index b049b8e..08c4d3f 100644 --- a/runtime/neurun/test/graph/operand/UseDef.cc +++ b/runtime/neurun/test/graph/operand/UseDef.cc @@ -26,7 +26,7 @@ namespace { -using IndexSet = neurun::model::OperandIndexSequence; +using IndexSet = neurun::ir::OperandIndexSequence; using Mock = neurun_test::graph::SimpleMock; } // namespace anonymous diff --git a/runtime/neurun/test/graph/operation/Set.cc b/runtime/neurun/test/graph/operation/Set.cc index b4b405e..4c0496b 100644 --- a/runtime/neurun/test/graph/operation/Set.cc +++ b/runtime/neurun/test/graph/operation/Set.cc @@ -21,7 +21,7 @@ using neurun::model::Operations; using neurun::model::Operation; -using neurun::model::OperationIndex; +using neurun::ir::OperationIndex; TEST(graph_operation_Set, operation_test) { diff --git a/runtime/neurun/test/graph/operation/SetIO.cc b/runtime/neurun/test/graph/operation/SetIO.cc index 31950b0..8e13e9e 100644 --- a/runtime/neurun/test/graph/operation/SetIO.cc +++ b/runtime/neurun/test/graph/operation/SetIO.cc @@ -17,8 +17,8 @@ #include #include "ir/Graph.h" -#include "model/Index.h" -#include "model/OperandIndexSequence.h" +#include "ir/Index.h" +#include "ir/OperandIndexSequence.h" #include "model/operation/Conv2D.h" #include "model/operation/Concat.h" @@ -26,8 +26,8 @@ #include -using Index = neurun::model::IOIndex; -using IndexSet = neurun::model::OperandIndexSequence; +using Index = neurun::ir::IOIndex; +using IndexSet = neurun::ir::OperandIndexSequence; TEST(graph_operation_setIO, operation_setIO_conv) { diff --git a/runtime/neurun/test/graph/verifier/Verifier.cc b/runtime/neurun/test/graph/verifier/Verifier.cc index ced5bda..f36fba0 100644 --- a/runtime/neurun/test/graph/verifier/Verifier.cc +++ b/runtime/neurun/test/graph/verifier/Verifier.cc @@ -20,10 +20,10 @@ #include "ir/Graph.h" #include "ir/verifier/Verifier.h" #include "cpp14/memory.h" -#include "model/Operand.h" +#include "ir/Operand.h" #include "../MockNode.h" -using IndexSet = neurun::model::OperandIndexSequence; +using IndexSet = neurun::ir::OperandIndexSequence; using Mock = neurun_test::graph::SimpleMock; TEST(Verifier, dag_checker)