From 872eb5e5b3e46b64b1036dc9a1895b609b65bf54 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=98=A4=ED=98=95=EC=84=9D/=EB=8F=99=EC=9E=91=EC=A0=9C?= =?utf8?q?=EC=96=B4Lab=28SR=29/Staff=20Engineer/=EC=82=BC=EC=84=B1?= =?utf8?q?=EC=A0=84=EC=9E=90?= Date: Fri, 17 Aug 2018 07:19:35 +0900 Subject: [PATCH] [neurun] Remove legacy operand index (#2315) * [neurun] Remove legacy operand index Remove legacy operand index Use operand index in graph Signed-off-by: Hyeongseok Oh * Simplify input/output index access --- runtimes/neurun/src/backend/ITensorBuilder.h | 2 +- .../src/backend/acl_cl/InitializerGenerator.cc | 10 +- .../neurun/src/backend/acl_cl/StageGenerator.cc | 112 +++++++++---------- .../neurun/src/backend/acl_cl/TensorBuilder.cc | 6 +- runtimes/neurun/src/backend/acl_cl/TensorBuilder.h | 4 +- .../neurun/src/backend/cpu/InitializerGenerator.cc | 10 +- runtimes/neurun/src/backend/cpu/StageGenerator.cc | 118 ++++++++++----------- runtimes/neurun/src/backend/cpu/TensorBuilder.cc | 6 +- runtimes/neurun/src/backend/cpu/TensorBuilder.h | 4 +- runtimes/neurun/src/codegen/IPlanBuilder.h | 4 +- runtimes/neurun/src/codegen/PlanBuilder.cc | 6 +- runtimes/neurun/src/codegen/PlanBuilder.h | 4 +- runtimes/neurun/src/codegen/Planner.cc | 40 +++---- runtimes/neurun/src/codegen/TensorMarker.h | 2 +- runtimes/neurun/src/frontend/execution.cc | 4 +- runtimes/neurun/src/frontend/model.cc | 10 +- runtimes/neurun/src/internal/Model.h | 30 ------ runtimes/neurun/src/internal/Plan.cc | 2 +- runtimes/neurun/src/internal/Plan.h | 8 +- .../neurun/src/internal/common/TensorBuilder.cc | 6 +- .../neurun/src/internal/common/TensorBuilder.h | 4 +- 21 files changed, 182 insertions(+), 210 deletions(-) diff --git a/runtimes/neurun/src/backend/ITensorBuilder.h b/runtimes/neurun/src/backend/ITensorBuilder.h index 98c70a7..f6a9d00 100644 --- a/runtimes/neurun/src/backend/ITensorBuilder.h +++ b/runtimes/neurun/src/backend/ITensorBuilder.h @@ -26,7 +26,7 @@ struct TensorConversionParam struct ITensorBuilder { virtual ~ITensorBuilder(void) = default; - virtual void mark(const ::internal::tflite::operand::Index &ind) = 0; + virtual void mark(const ::neurun::graph::operand::Index &ind) = 0; virtual void markFromCommon(const ::internal::tflite::op::Node &op, int32_t ind) = 0; virtual void markToCommon(const ::internal::tflite::op::Node &op, int32_t ind) = 0; virtual void insertTensorConvertNodes(::internal::tflite::op::Sequence &operations) = 0; diff --git a/runtimes/neurun/src/backend/acl_cl/InitializerGenerator.cc b/runtimes/neurun/src/backend/acl_cl/InitializerGenerator.cc index ff49209..5e23398 100644 --- a/runtimes/neurun/src/backend/acl_cl/InitializerGenerator.cc +++ b/runtimes/neurun/src/backend/acl_cl/InitializerGenerator.cc @@ -21,7 +21,7 @@ InitializerGenerator::InitializerGenerator(const neurun::graph::operand::Set &ct Initializer InitializerGenerator::generateWeight(const ::internal::tflite::op::Conv2D::implicit::Node &node) { - const ::internal::tflite::operand::Index ker_index{node.param().ker_index}; + const ::neurun::graph::operand::Index ker_index{node.param().ker_index}; const auto ker_shape = _ctx.at(ker_index).shape().asKernel(); auto ker_base = _ctx.at(ker_index).data().base(); @@ -42,8 +42,8 @@ InitializerGenerator::generateWeight(const ::internal::tflite::op::Conv2D::impli Initializer InitializerGenerator::generateWeight(const ::internal::tflite::op::FullyConnected::Node &node) { - const ::internal::tflite::operand::Index weight_index{node.param().weight_index}; - const ::internal::tflite::operand::Index input_index{node.param().input_index}; + const ::neurun::graph::operand::Index weight_index{node.param().weight_index}; + const ::neurun::graph::operand::Index input_index{node.param().input_index}; const auto num_output = _ctx.at(weight_index).shape().dim(0); auto weight_base = _ctx.at(weight_index).data().base(); @@ -83,7 +83,7 @@ InitializerGenerator::generateBias(const ::internal::tflite::op::Conv2D::implici { // TODO Refactor so we can reuse the common code - const ::internal::tflite::operand::Index bias_index{node.param().bias_index}; + const ::neurun::graph::operand::Index bias_index{node.param().bias_index}; auto bias_base = _ctx.at(bias_index).data().base(); const auto bias_size = _ctx.at(bias_index).shape().asVector(); @@ -106,7 +106,7 @@ InitializerGenerator::generateBias(const ::internal::tflite::op::Conv2D::implici Initializer InitializerGenerator::generateBias(const ::internal::tflite::op::FullyConnected::Node &node) { - const ::internal::tflite::operand::Index bias_index{node.param().bias_index}; + const ::neurun::graph::operand::Index bias_index{node.param().bias_index}; auto bias_base = _ctx.at(bias_index).data().base(); const auto bias_size = _ctx.at(bias_index).shape().asVector(); diff --git a/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc b/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc index 4e229d4..568a7d0 100644 --- a/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc +++ b/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc @@ -14,6 +14,8 @@ #include "internal/Padding.h" #include "internal/Model.h" +#include "graph/operand/Index.h" + #include "logging.h" #include "NeuralNetworks.h" @@ -108,16 +110,16 @@ StageGenerator::StageGenerator( Stage StageGenerator::generate(const ::internal::tflite::op::Conv2D::implicit::Node &node) { - const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index}; - const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index}; - const ::internal::tflite::operand::Index ker_index{node.param().ker_index}; - const ::internal::tflite::operand::Index bias_index{node.param().bias_index}; + const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index}; + const ::neurun::graph::operand::Index ifm_index{node.param().ifm_index}; + const ::neurun::graph::operand::Index ker_index{node.param().ker_index}; + const ::neurun::graph::operand::Index bias_index{node.param().bias_index}; - const ::internal::tflite::operand::Index vstride_index{node.param().vstride_index}; - const ::internal::tflite::operand::Index hstride_index{node.param().hstride_index}; + const ::neurun::graph::operand::Index vstride_index{node.param().vstride_index}; + const ::neurun::graph::operand::Index hstride_index{node.param().hstride_index}; - const ::internal::tflite::operand::Index padding_index{node.param().padding_index}; - const ::internal::tflite::operand::Index activation_index{node.param().activation_index}; + const ::neurun::graph::operand::Index padding_index{node.param().padding_index}; + const ::neurun::graph::operand::Index activation_index{node.param().activation_index}; const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(); const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(); @@ -167,10 +169,10 @@ Stage StageGenerator::generate(const ::internal::tflite::op::Conv2D::implicit::N auto tensors = _tensor_builder; return [tensors, param](IExecutionBuilder &builder) { - auto ofm_alloc = tensors->at(::internal::tflite::operand::Index{param.ofm_index}).get(); - auto ifm_alloc = tensors->at(::internal::tflite::operand::Index{param.ifm_index}).get(); - auto ker_alloc = tensors->at(::internal::tflite::operand::Index{param.ker_index}).get(); - auto bias_alloc = tensors->at(::internal::tflite::operand::Index{param.bias_index}).get(); + auto ofm_alloc = tensors->at(::neurun::graph::operand::Index{param.ofm_index}).get(); + auto ifm_alloc = tensors->at(::neurun::graph::operand::Index{param.ifm_index}).get(); + auto ker_alloc = tensors->at(::neurun::graph::operand::Index{param.ker_index}).get(); + auto bias_alloc = tensors->at(::neurun::graph::operand::Index{param.bias_index}).get(); const auto conv_info = asPadStringInfo(param.padding, param.stride); @@ -186,16 +188,16 @@ Stage StageGenerator::generate(const ::internal::tflite::op::Conv2D::implicit::N Stage StageGenerator::generate(const ::internal::tflite::op::MaxPool2D::implicit::Node &node) { - const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index}; - const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index}; + const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index}; + const ::neurun::graph::operand::Index ifm_index{node.param().ifm_index}; - const ::internal::tflite::operand::Index kh_index{node.param().kh_index}; - const ::internal::tflite::operand::Index kw_index{node.param().kw_index}; + const ::neurun::graph::operand::Index kh_index{node.param().kh_index}; + const ::neurun::graph::operand::Index kw_index{node.param().kw_index}; - const ::internal::tflite::operand::Index vstride_index{node.param().vstride_index}; - const ::internal::tflite::operand::Index hstride_index{node.param().hstride_index}; + const ::neurun::graph::operand::Index vstride_index{node.param().vstride_index}; + const ::neurun::graph::operand::Index hstride_index{node.param().hstride_index}; - const ::internal::tflite::operand::Index padding_index{node.param().padding_index}; + const ::neurun::graph::operand::Index padding_index{node.param().padding_index}; const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(); const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(); @@ -255,8 +257,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::MaxPool2D::implicit auto tensors = _tensor_builder; return [tensors, param](IExecutionBuilder &builder) { - auto ofm_alloc = tensors->at(::internal::tflite::operand::Index{param.ofm_index}).get(); - auto ifm_alloc = tensors->at(::internal::tflite::operand::Index{param.ifm_index}).get(); + auto ofm_alloc = tensors->at(::neurun::graph::operand::Index{param.ofm_index}).get(); + auto ifm_alloc = tensors->at(::neurun::graph::operand::Index{param.ifm_index}).get(); ::arm_compute::PoolingLayerInfo info{::arm_compute::PoolingType::MAX, ::arm_compute::Size2D{param.kw, param.kh}, @@ -272,16 +274,16 @@ Stage StageGenerator::generate(const ::internal::tflite::op::MaxPool2D::implicit Stage StageGenerator::generate(const ::internal::tflite::op::AvgPool2D::implicit::Node &node) { - const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index}; - const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index}; + const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index}; + const ::neurun::graph::operand::Index ifm_index{node.param().ifm_index}; - const ::internal::tflite::operand::Index kh_index{node.param().kh_index}; - const ::internal::tflite::operand::Index kw_index{node.param().kw_index}; + const ::neurun::graph::operand::Index kh_index{node.param().kh_index}; + const ::neurun::graph::operand::Index kw_index{node.param().kw_index}; - const ::internal::tflite::operand::Index vstride_index{node.param().vstride_index}; - const ::internal::tflite::operand::Index hstride_index{node.param().hstride_index}; + const ::neurun::graph::operand::Index vstride_index{node.param().vstride_index}; + const ::neurun::graph::operand::Index hstride_index{node.param().hstride_index}; - const ::internal::tflite::operand::Index padding_index{node.param().padding_index}; + const ::neurun::graph::operand::Index padding_index{node.param().padding_index}; const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(); const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(); @@ -345,8 +347,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::AvgPool2D::implicit auto tensors = _tensor_builder; return [tensors, param](IExecutionBuilder &builder) { - auto ofm_alloc = tensors->at(::internal::tflite::operand::Index{param.ofm_index}).get(); - auto ifm_alloc = tensors->at(::internal::tflite::operand::Index{param.ifm_index}).get(); + auto ofm_alloc = tensors->at(::neurun::graph::operand::Index{param.ofm_index}).get(); + auto ifm_alloc = tensors->at(::neurun::graph::operand::Index{param.ifm_index}).get(); ::arm_compute::PoolingLayerInfo info{ ::arm_compute::PoolingType::AVG, ::arm_compute::Size2D{param.kw, param.kh}, @@ -362,8 +364,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::AvgPool2D::implicit Stage StageGenerator::generate(const ::internal::tflite::op::Concat::Node &node) { - const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index}; - const ::internal::tflite::operand::Index axis_index{node.param().axis_index}; + const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index}; + const ::neurun::graph::operand::Index axis_index{node.param().axis_index}; struct Param { @@ -382,12 +384,12 @@ Stage StageGenerator::generate(const ::internal::tflite::op::Concat::Node &node) auto tensors = _tensor_builder; return [tensors, param](IExecutionBuilder &builder) { - auto output_alloc = tensors->at(::internal::tflite::operand::Index{param.output_index}).get(); + auto output_alloc = tensors->at(::neurun::graph::operand::Index{param.output_index}).get(); std::vector<::arm_compute::ICLTensor *> input_allocs; for (auto ifm_ind : param.input_indexes) { - input_allocs.emplace_back(tensors->at(::internal::tflite::operand::Index{ifm_ind}).get()); + input_allocs.emplace_back(tensors->at(::neurun::graph::operand::Index{ifm_ind}).get()); } std::unique_ptr<::neurun::kernel::acl_cl::ConcatLayer> fn{ @@ -401,11 +403,11 @@ Stage StageGenerator::generate(const ::internal::tflite::op::Concat::Node &node) Stage StageGenerator::generate(const ::internal::tflite::op::FullyConnected::Node &node) { - const ::internal::tflite::operand::Index output_index{node.param().output_index}; - const ::internal::tflite::operand::Index input_index{node.param().input_index}; - const ::internal::tflite::operand::Index weight_index{node.param().weight_index}; - const ::internal::tflite::operand::Index bias_index{node.param().bias_index}; - const ::internal::tflite::operand::Index activation_index{node.param().activation_index}; + const ::neurun::graph::operand::Index output_index{node.param().output_index}; + const ::neurun::graph::operand::Index input_index{node.param().input_index}; + const ::neurun::graph::operand::Index weight_index{node.param().weight_index}; + const ::neurun::graph::operand::Index bias_index{node.param().bias_index}; + const ::neurun::graph::operand::Index activation_index{node.param().activation_index}; // Construct operation parameters struct Param @@ -431,10 +433,10 @@ Stage StageGenerator::generate(const ::internal::tflite::op::FullyConnected::Nod auto tensors = _tensor_builder; return [tensors, param](IExecutionBuilder &builder) { - auto output_alloc = tensors->at(::internal::tflite::operand::Index{param.output_index}).get(); - auto input_alloc = tensors->at(::internal::tflite::operand::Index{param.input_index}).get(); - auto weight_alloc = tensors->at(::internal::tflite::operand::Index{param.weight_index}).get(); - auto bias_alloc = tensors->at(::internal::tflite::operand::Index{param.bias_index}).get(); + auto output_alloc = tensors->at(::neurun::graph::operand::Index{param.output_index}).get(); + auto input_alloc = tensors->at(::neurun::graph::operand::Index{param.input_index}).get(); + auto weight_alloc = tensors->at(::neurun::graph::operand::Index{param.weight_index}).get(); + auto bias_alloc = tensors->at(::neurun::graph::operand::Index{param.bias_index}).get(); auto fn = make_layer<::arm_compute::CLFullyConnectedLayer>(); @@ -448,8 +450,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::FullyConnected::Nod Stage StageGenerator::generate(const ::internal::tflite::op::Reshape::Node &node) { - const ::internal::tflite::operand::Index output_index{node.param().output_index}; - const ::internal::tflite::operand::Index input_index{node.param().input_index}; + const ::neurun::graph::operand::Index output_index{node.param().output_index}; + const ::neurun::graph::operand::Index input_index{node.param().input_index}; struct Param { @@ -465,8 +467,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::Reshape::Node &node auto tensors = _tensor_builder; return [tensors, param](IExecutionBuilder &builder) { - auto output_alloc = tensors->at(::internal::tflite::operand::Index{param.output_index}).get(); - auto input_alloc = tensors->at(::internal::tflite::operand::Index{param.input_index}).get(); + auto output_alloc = tensors->at(::neurun::graph::operand::Index{param.output_index}).get(); + auto input_alloc = tensors->at(::neurun::graph::operand::Index{param.input_index}).get(); auto fn = make_layer<::arm_compute::CLReshapeLayer>(); @@ -478,8 +480,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::Reshape::Node &node Stage StageGenerator::generate(const ::internal::tflite::op::Softmax::Node &node) { - const ::internal::tflite::operand::Index output_index{node.param().output_index}; - const ::internal::tflite::operand::Index input_index{node.param().input_index}; + const ::neurun::graph::operand::Index output_index{node.param().output_index}; + const ::neurun::graph::operand::Index input_index{node.param().input_index}; struct Param { @@ -498,8 +500,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::Softmax::Node &node auto tensors = _tensor_builder; return [tensors, param](IExecutionBuilder &builder) { - auto output_alloc = tensors->at(::internal::tflite::operand::Index{param.output_index}).get(); - auto input_alloc = tensors->at(::internal::tflite::operand::Index{param.input_index}).get(); + auto output_alloc = tensors->at(::neurun::graph::operand::Index{param.output_index}).get(); + auto input_alloc = tensors->at(::neurun::graph::operand::Index{param.input_index}).get(); auto fn = make_layer<::arm_compute::CLSoftmaxLayer>(); @@ -523,7 +525,7 @@ Stage StageGenerator::generate(const ::internal::tflite::op::TensorConvert::CpuT Stage StageGenerator::generate( const ::internal::tflite::op::TensorConvert::AclFromCommon::Node &node) { - const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index}; + const ::neurun::graph::operand::Index ifm_index{node.param().ifm_index}; struct Param { @@ -545,7 +547,7 @@ Stage StageGenerator::generate( auto common_tensor_builder = _common_tensor_builder; return [tensors, common_tensor_builder, param](IExecutionBuilder &builder) { - const ::internal::tflite::operand::Index ifm_index{param.ifm_index}; + const ::neurun::graph::operand::Index ifm_index{param.ifm_index}; auto input_alloc = tensors->at(ifm_index).get(); auto common_tensor = common_tensor_builder->at(ifm_index); @@ -561,7 +563,7 @@ Stage StageGenerator::generate( Stage StageGenerator::generate(const ::internal::tflite::op::TensorConvert::AclToCommon::Node &node) { - const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index}; + const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index}; struct Param { @@ -583,7 +585,7 @@ Stage StageGenerator::generate(const ::internal::tflite::op::TensorConvert::AclT auto common_tensor_builder = _common_tensor_builder; return [tensors, common_tensor_builder, param](IExecutionBuilder &builder) { - const ::internal::tflite::operand::Index ofm_index{param.ofm_index}; + const ::neurun::graph::operand::Index ofm_index{param.ofm_index}; auto output_alloc = tensors->at(ofm_index).get(); auto common_tensor = common_tensor_builder->at(ofm_index); diff --git a/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc b/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc index dd9e8d4..134e08f 100644 --- a/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc +++ b/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc @@ -16,7 +16,7 @@ TensorBuilder::TensorBuilder(::internal::Plan &plan) : _plan(plan) // DO NOTHING } -void TensorBuilder::mark(const ::internal::tflite::operand::Index &ind) +void TensorBuilder::mark(const ::neurun::graph::operand::Index &ind) { assert(_tensors.size() == 0); @@ -65,7 +65,7 @@ void TensorBuilder::prepare(const std::map &tens for (auto ind_int : _inds) { - ::internal::tflite::operand::Index ind{ind_int}; + ::neurun::graph::operand::Index ind{ind_int}; auto tensor = std::make_shared<::arm_compute::CLTensor>(); tensor->allocator()->init(tensor_info_ctx.at(ind.asInt())); _plan.operands().set(ind, std::make_shared(tensor)); @@ -85,7 +85,7 @@ void TensorBuilder::allocate(void) } std::shared_ptr<::arm_compute::CLTensor> -TensorBuilder::at(const ::internal::tflite::operand::Index &ind) +TensorBuilder::at(const ::neurun::graph::operand::Index &ind) { return _tensors.at(ind.asInt()); } diff --git a/runtimes/neurun/src/backend/acl_cl/TensorBuilder.h b/runtimes/neurun/src/backend/acl_cl/TensorBuilder.h index 7527867..1840b34 100644 --- a/runtimes/neurun/src/backend/acl_cl/TensorBuilder.h +++ b/runtimes/neurun/src/backend/acl_cl/TensorBuilder.h @@ -23,14 +23,14 @@ class TensorBuilder : public ITensorBuilder public: TensorBuilder(::internal::Plan &plan); - virtual void mark(const ::internal::tflite::operand::Index &ind) override; + virtual void mark(const ::neurun::graph::operand::Index &ind) override; virtual void markFromCommon(const ::internal::tflite::op::Node &op, int32_t ind) override; virtual void markToCommon(const ::internal::tflite::op::Node &op, int32_t ind) override; virtual void insertTensorConvertNodes(::internal::tflite::op::Sequence &operations) override; virtual void prepare(const std::map &tensor_info_ctx) override; virtual void allocate(void) override; - std::shared_ptr<::arm_compute::CLTensor> at(const ::internal::tflite::operand::Index &ind); + std::shared_ptr<::arm_compute::CLTensor> at(const ::neurun::graph::operand::Index &ind); private: ::internal::Plan &_plan; diff --git a/runtimes/neurun/src/backend/cpu/InitializerGenerator.cc b/runtimes/neurun/src/backend/cpu/InitializerGenerator.cc index 970dd9f..de56d85 100644 --- a/runtimes/neurun/src/backend/cpu/InitializerGenerator.cc +++ b/runtimes/neurun/src/backend/cpu/InitializerGenerator.cc @@ -21,7 +21,7 @@ InitializerGenerator::InitializerGenerator(const neurun::graph::operand::Set &ct Initializer InitializerGenerator::generateWeight(const ::internal::tflite::op::Conv2D::implicit::Node &node) { - const ::internal::tflite::operand::Index ker_index{node.param().ker_index}; + const ::neurun::graph::operand::Index ker_index{node.param().ker_index}; const auto ker_shape = _ctx.at(ker_index).shape().asKernel(); auto ker_base = _ctx.at(ker_index).data().base(); @@ -42,8 +42,8 @@ InitializerGenerator::generateWeight(const ::internal::tflite::op::Conv2D::impli Initializer InitializerGenerator::generateWeight(const ::internal::tflite::op::FullyConnected::Node &node) { - const ::internal::tflite::operand::Index weight_index{node.param().weight_index}; - const ::internal::tflite::operand::Index input_index{node.param().input_index}; + const ::neurun::graph::operand::Index weight_index{node.param().weight_index}; + const ::neurun::graph::operand::Index input_index{node.param().input_index}; const auto num_output = _ctx.at(weight_index).shape().dim(0); auto weight_base = _ctx.at(weight_index).data().base(); @@ -120,7 +120,7 @@ InitializerGenerator::generateBias(const ::internal::tflite::op::Conv2D::implici { // TODO Refactor so we can reuse the common code - const ::internal::tflite::operand::Index bias_index{node.param().bias_index}; + const ::neurun::graph::operand::Index bias_index{node.param().bias_index}; auto bias_base = _ctx.at(bias_index).data().base(); const auto bias_size = _ctx.at(bias_index).shape().asVector(); @@ -143,7 +143,7 @@ InitializerGenerator::generateBias(const ::internal::tflite::op::Conv2D::implici Initializer InitializerGenerator::generateBias(const ::internal::tflite::op::FullyConnected::Node &node) { - const ::internal::tflite::operand::Index bias_index{node.param().bias_index}; + const ::neurun::graph::operand::Index bias_index{node.param().bias_index}; auto bias_base = _ctx.at(bias_index).data().base(); auto bias_type = _ctx.at(bias_index).shape().type(); diff --git a/runtimes/neurun/src/backend/cpu/StageGenerator.cc b/runtimes/neurun/src/backend/cpu/StageGenerator.cc index d1f08e1..5f1450a 100644 --- a/runtimes/neurun/src/backend/cpu/StageGenerator.cc +++ b/runtimes/neurun/src/backend/cpu/StageGenerator.cc @@ -38,16 +38,16 @@ StageGenerator::StageGenerator( Stage StageGenerator::generate(const ::internal::tflite::op::Conv2D::implicit::Node &node) { - const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index}; - const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index}; - const ::internal::tflite::operand::Index ker_index{node.param().ker_index}; - const ::internal::tflite::operand::Index bias_index{node.param().bias_index}; + const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index}; + const ::neurun::graph::operand::Index ifm_index{node.param().ifm_index}; + const ::neurun::graph::operand::Index ker_index{node.param().ker_index}; + const ::neurun::graph::operand::Index bias_index{node.param().bias_index}; - const ::internal::tflite::operand::Index vstride_index{node.param().vstride_index}; - const ::internal::tflite::operand::Index hstride_index{node.param().hstride_index}; + const ::neurun::graph::operand::Index vstride_index{node.param().vstride_index}; + const ::neurun::graph::operand::Index hstride_index{node.param().hstride_index}; - const ::internal::tflite::operand::Index padding_index{node.param().padding_index}; - const ::internal::tflite::operand::Index activation_index{node.param().activation_index}; + const ::neurun::graph::operand::Index padding_index{node.param().padding_index}; + const ::neurun::graph::operand::Index activation_index{node.param().activation_index}; const PaddingCode padding_type = static_cast(_ctx.at(padding_index).asScalar()); @@ -103,10 +103,10 @@ Stage StageGenerator::generate(const ::internal::tflite::op::Conv2D::implicit::N auto tensors = _tensor_builder; return [tensors, param](IExecutionBuilder &builder) { - auto ofm_alloc = tensors->at(::internal::tflite::operand::Index{param.ofm_index}); - auto ifm_alloc = tensors->at(::internal::tflite::operand::Index{param.ifm_index}); - auto ker_alloc = tensors->at(::internal::tflite::operand::Index{param.ker_index}); - auto bias_alloc = tensors->at(::internal::tflite::operand::Index{param.bias_index}); + auto ofm_alloc = tensors->at(::neurun::graph::operand::Index{param.ofm_index}); + auto ifm_alloc = tensors->at(::neurun::graph::operand::Index{param.ifm_index}); + auto ker_alloc = tensors->at(::neurun::graph::operand::Index{param.ker_index}); + auto bias_alloc = tensors->at(::neurun::graph::operand::Index{param.bias_index}); std::unique_ptr<::neurun::kernel::cpu::ConvolutionLayer> fn{ new ::neurun::kernel::cpu::ConvolutionLayer}; @@ -124,17 +124,17 @@ Stage StageGenerator::generate(const ::internal::tflite::op::MaxPool2D::implicit { VERBOSE(MaxPool2D) << "generate CPU MaxPool2D" << std::endl; - const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index}; - const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index}; + const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index}; + const ::neurun::graph::operand::Index ifm_index{node.param().ifm_index}; - const ::internal::tflite::operand::Index kh_index{node.param().kh_index}; - const ::internal::tflite::operand::Index kw_index{node.param().kw_index}; + const ::neurun::graph::operand::Index kh_index{node.param().kh_index}; + const ::neurun::graph::operand::Index kw_index{node.param().kw_index}; - const ::internal::tflite::operand::Index vstride_index{node.param().vstride_index}; - const ::internal::tflite::operand::Index hstride_index{node.param().hstride_index}; + const ::neurun::graph::operand::Index vstride_index{node.param().vstride_index}; + const ::neurun::graph::operand::Index hstride_index{node.param().hstride_index}; - const ::internal::tflite::operand::Index padding_index{node.param().padding_index}; - const ::internal::tflite::operand::Index activation_index{node.param().activation_index}; + const ::neurun::graph::operand::Index padding_index{node.param().padding_index}; + const ::neurun::graph::operand::Index activation_index{node.param().activation_index}; const int32_t kh = _ctx.at(kh_index).asScalar(); const int32_t kw = _ctx.at(kw_index).asScalar(); @@ -200,8 +200,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::MaxPool2D::implicit auto tensors = _tensor_builder; return [tensors, param](IExecutionBuilder &builder) { - auto ofm_alloc = tensors->at(::internal::tflite::operand::Index{param.ofm_index}).get(); - auto ifm_alloc = tensors->at(::internal::tflite::operand::Index{param.ifm_index}).get(); + auto ofm_alloc = tensors->at(::neurun::graph::operand::Index{param.ofm_index}).get(); + auto ifm_alloc = tensors->at(::neurun::graph::operand::Index{param.ifm_index}).get(); std::unique_ptr<::neurun::kernel::cpu::MaxPoolLayer> fn{ new ::neurun::kernel::cpu::MaxPoolLayer}; @@ -219,17 +219,17 @@ Stage StageGenerator::generate(const ::internal::tflite::op::AvgPool2D::implicit { VERBOSE(AvgPool2D) << "generate CPU AvgPool2D" << std::endl; - const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index}; - const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index}; + const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index}; + const ::neurun::graph::operand::Index ifm_index{node.param().ifm_index}; - const ::internal::tflite::operand::Index kh_index{node.param().kh_index}; - const ::internal::tflite::operand::Index kw_index{node.param().kw_index}; + const ::neurun::graph::operand::Index kh_index{node.param().kh_index}; + const ::neurun::graph::operand::Index kw_index{node.param().kw_index}; - const ::internal::tflite::operand::Index vstride_index{node.param().vstride_index}; - const ::internal::tflite::operand::Index hstride_index{node.param().hstride_index}; + const ::neurun::graph::operand::Index vstride_index{node.param().vstride_index}; + const ::neurun::graph::operand::Index hstride_index{node.param().hstride_index}; - const ::internal::tflite::operand::Index padding_index{node.param().padding_index}; - const ::internal::tflite::operand::Index activation_index{node.param().activation_index}; + const ::neurun::graph::operand::Index padding_index{node.param().padding_index}; + const ::neurun::graph::operand::Index activation_index{node.param().activation_index}; const int32_t kh = _ctx.at(kh_index).asScalar(); const int32_t kw = _ctx.at(kw_index).asScalar(); @@ -299,8 +299,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::AvgPool2D::implicit auto tensors = _tensor_builder; return [tensors, param](IExecutionBuilder &builder) { - auto ofm_alloc = tensors->at(::internal::tflite::operand::Index{param.ofm_index}).get(); - auto ifm_alloc = tensors->at(::internal::tflite::operand::Index{param.ifm_index}).get(); + auto ofm_alloc = tensors->at(::neurun::graph::operand::Index{param.ofm_index}).get(); + auto ifm_alloc = tensors->at(::neurun::graph::operand::Index{param.ifm_index}).get(); std::unique_ptr<::neurun::kernel::cpu::AvgPoolLayer> fn{ new ::neurun::kernel::cpu::AvgPoolLayer}; @@ -318,8 +318,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::Concat::Node &node) { VERBOSE(Concat) << "generate CPU Concat" << std::endl; - const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index}; - const ::internal::tflite::operand::Index axis_index{node.param().axis_index}; + const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index}; + const ::neurun::graph::operand::Index axis_index{node.param().axis_index}; struct Param { @@ -342,20 +342,20 @@ Stage StageGenerator::generate(const ::internal::tflite::op::Concat::Node &node) for (auto ifm_ind : node.param().ifm_indexes) { - const ::internal::tflite::operand::Index ifm_index{ifm_ind}; + const ::neurun::graph::operand::Index ifm_index{ifm_ind}; param.ifm_shapes.emplace_back(_ctx.at(ifm_index).shape()); } auto tensors = _tensor_builder; return [tensors, param](IExecutionBuilder &builder) { - auto output_alloc = tensors->at(::internal::tflite::operand::Index{param.output_index}).get(); + auto output_alloc = tensors->at(::neurun::graph::operand::Index{param.output_index}).get(); std::vector input_buffers; for (auto ifm_ind : param.input_indexes) { input_buffers.emplace_back( - tensors->at(::internal::tflite::operand::Index{ifm_ind}).get()->buffer()); + tensors->at(::neurun::graph::operand::Index{ifm_ind}).get()->buffer()); } std::unique_ptr<::neurun::kernel::cpu::ConcatLayer> fn{new ::neurun::kernel::cpu::ConcatLayer}; @@ -371,11 +371,11 @@ Stage StageGenerator::generate(const ::internal::tflite::op::FullyConnected::Nod { VERBOSE(FullyConnected) << "generate CPU FullyConnected" << std::endl; - const ::internal::tflite::operand::Index output_index{node.param().output_index}; - const ::internal::tflite::operand::Index input_index{node.param().input_index}; - const ::internal::tflite::operand::Index weight_index{node.param().weight_index}; - const ::internal::tflite::operand::Index bias_index{node.param().bias_index}; - const ::internal::tflite::operand::Index activation_index{node.param().activation_index}; + const ::neurun::graph::operand::Index output_index{node.param().output_index}; + const ::neurun::graph::operand::Index input_index{node.param().input_index}; + const ::neurun::graph::operand::Index weight_index{node.param().weight_index}; + const ::neurun::graph::operand::Index bias_index{node.param().bias_index}; + const ::neurun::graph::operand::Index activation_index{node.param().activation_index}; // Construct operation parameters struct Param @@ -410,10 +410,10 @@ Stage StageGenerator::generate(const ::internal::tflite::op::FullyConnected::Nod auto tensors = _tensor_builder; return [tensors, param](IExecutionBuilder &builder) { - auto output_alloc = tensors->at(::internal::tflite::operand::Index{param.output_index}).get(); - auto input_alloc = tensors->at(::internal::tflite::operand::Index{param.input_index}).get(); - auto weight_alloc = tensors->at(::internal::tflite::operand::Index{param.weight_index}).get(); - auto bias_alloc = tensors->at(::internal::tflite::operand::Index{param.bias_index}).get(); + auto output_alloc = tensors->at(::neurun::graph::operand::Index{param.output_index}).get(); + auto input_alloc = tensors->at(::neurun::graph::operand::Index{param.input_index}).get(); + auto weight_alloc = tensors->at(::neurun::graph::operand::Index{param.weight_index}).get(); + auto bias_alloc = tensors->at(::neurun::graph::operand::Index{param.bias_index}).get(); std::unique_ptr<::neurun::kernel::cpu::FullyConnectedLayer> fn{ new ::neurun::kernel::cpu::FullyConnectedLayer}; @@ -428,8 +428,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::FullyConnected::Nod Stage StageGenerator::generate(const ::internal::tflite::op::Reshape::Node &node) { - const ::internal::tflite::operand::Index output_index{node.param().output_index}; - const ::internal::tflite::operand::Index input_index{node.param().input_index}; + const ::neurun::graph::operand::Index output_index{node.param().output_index}; + const ::neurun::graph::operand::Index input_index{node.param().input_index}; struct Param { @@ -451,8 +451,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::Reshape::Node &node auto tensors = _tensor_builder; return [tensors, param](IExecutionBuilder &builder) { - auto output_alloc = tensors->at(::internal::tflite::operand::Index{param.output_index}).get(); - auto input_alloc = tensors->at(::internal::tflite::operand::Index{param.input_index}).get(); + auto output_alloc = tensors->at(::neurun::graph::operand::Index{param.output_index}).get(); + auto input_alloc = tensors->at(::neurun::graph::operand::Index{param.input_index}).get(); std::unique_ptr<::neurun::kernel::cpu::ReshapeLayer> fn{ new ::neurun::kernel::cpu::ReshapeLayer}; @@ -467,9 +467,9 @@ Stage StageGenerator::generate(const ::internal::tflite::op::Softmax::Node &node { VERBOSE(Softmax) << "generate CPU Softmax" << std::endl; - const ::internal::tflite::operand::Index output_index{node.param().output_index}; - const ::internal::tflite::operand::Index input_index{node.param().input_index}; - const ::internal::tflite::operand::Index scale_index{node.param().scale_index}; + const ::neurun::graph::operand::Index output_index{node.param().output_index}; + const ::neurun::graph::operand::Index input_index{node.param().input_index}; + const ::neurun::graph::operand::Index scale_index{node.param().scale_index}; struct Param { @@ -495,8 +495,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::Softmax::Node &node auto tensors = _tensor_builder; return [tensors, param](IExecutionBuilder &builder) { - auto output_alloc = tensors->at(::internal::tflite::operand::Index{param.output_index}).get(); - auto input_alloc = tensors->at(::internal::tflite::operand::Index{param.input_index}).get(); + auto output_alloc = tensors->at(::neurun::graph::operand::Index{param.output_index}).get(); + auto input_alloc = tensors->at(::neurun::graph::operand::Index{param.input_index}).get(); std::unique_ptr<::neurun::kernel::cpu::SoftMaxLayer> fn{ new ::neurun::kernel::cpu::SoftMaxLayer}; @@ -511,7 +511,7 @@ Stage StageGenerator::generate(const ::internal::tflite::op::Softmax::Node &node Stage StageGenerator::generate( const ::internal::tflite::op::TensorConvert::CpuFromCommon::Node &node) { - const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index}; + const ::neurun::graph::operand::Index ifm_index{node.param().ifm_index}; struct Param { @@ -533,7 +533,7 @@ Stage StageGenerator::generate( auto common_tensor_builder = _common_tensor_builder; return [tensors, common_tensor_builder, param](IExecutionBuilder &builder) { - const ::internal::tflite::operand::Index ifm_index{param.ifm_index}; + const ::neurun::graph::operand::Index ifm_index{param.ifm_index}; auto input_alloc = tensors->at(ifm_index).get(); auto common_tensor = common_tensor_builder->at(ifm_index); @@ -549,7 +549,7 @@ Stage StageGenerator::generate( Stage StageGenerator::generate(const ::internal::tflite::op::TensorConvert::CpuToCommon::Node &node) { - const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index}; + const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index}; struct Param { @@ -571,7 +571,7 @@ Stage StageGenerator::generate(const ::internal::tflite::op::TensorConvert::CpuT auto common_tensor_builder = _common_tensor_builder; return [tensors, common_tensor_builder, param](IExecutionBuilder &builder) { - const ::internal::tflite::operand::Index ofm_index{param.ofm_index}; + const ::neurun::graph::operand::Index ofm_index{param.ofm_index}; auto output_alloc = tensors->at(ofm_index).get(); auto common_tensor = common_tensor_builder->at(ofm_index); diff --git a/runtimes/neurun/src/backend/cpu/TensorBuilder.cc b/runtimes/neurun/src/backend/cpu/TensorBuilder.cc index 18e10fd..0c23bdb 100644 --- a/runtimes/neurun/src/backend/cpu/TensorBuilder.cc +++ b/runtimes/neurun/src/backend/cpu/TensorBuilder.cc @@ -16,7 +16,7 @@ TensorBuilder::TensorBuilder(::internal::Plan &plan) : _plan(plan) // DO NOTHING } -void TensorBuilder::mark(const ::internal::tflite::operand::Index &ind) +void TensorBuilder::mark(const ::neurun::graph::operand::Index &ind) { assert(_tensors.size() == 0); @@ -62,7 +62,7 @@ void TensorBuilder::prepare(const std::map &tens for (auto ind_int : _inds) { - ::internal::tflite::operand::Index ind{ind_int}; + ::neurun::graph::operand::Index ind{ind_int}; auto tensor = std::make_shared<::internal::cpu::Tensor>(tensor_info_ctx.at(ind.asInt())); // TODO Fix allocation here. When Tensor object is created the memory for tensor is also // allocated, and this must be fixed. @@ -80,7 +80,7 @@ void TensorBuilder::allocate(void) } std::shared_ptr<::internal::cpu::Tensor> -TensorBuilder::at(const ::internal::tflite::operand::Index &ind) +TensorBuilder::at(const ::neurun::graph::operand::Index &ind) { return _tensors.at(ind.asInt()); } diff --git a/runtimes/neurun/src/backend/cpu/TensorBuilder.h b/runtimes/neurun/src/backend/cpu/TensorBuilder.h index 3309139..e43efe7 100644 --- a/runtimes/neurun/src/backend/cpu/TensorBuilder.h +++ b/runtimes/neurun/src/backend/cpu/TensorBuilder.h @@ -22,14 +22,14 @@ class TensorBuilder : public ITensorBuilder public: TensorBuilder(::internal::Plan &plan); - virtual void mark(const ::internal::tflite::operand::Index &ind) override; + virtual void mark(const ::neurun::graph::operand::Index &ind) override; virtual void markFromCommon(const ::internal::tflite::op::Node &op, int32_t ind) override; virtual void markToCommon(const ::internal::tflite::op::Node &op, int32_t ind) override; virtual void insertTensorConvertNodes(::internal::tflite::op::Sequence &operations) override; virtual void prepare(const std::map &tensor_info_ctx) override; virtual void allocate(void) override; - std::shared_ptr<::internal::cpu::Tensor> at(const ::internal::tflite::operand::Index &ind); + std::shared_ptr<::internal::cpu::Tensor> at(const ::neurun::graph::operand::Index &ind); private: ::internal::Plan &_plan; diff --git a/runtimes/neurun/src/codegen/IPlanBuilder.h b/runtimes/neurun/src/codegen/IPlanBuilder.h index 4bd7076..ba2d790 100644 --- a/runtimes/neurun/src/codegen/IPlanBuilder.h +++ b/runtimes/neurun/src/codegen/IPlanBuilder.h @@ -15,9 +15,9 @@ struct IPlanBuilder { virtual ~IPlanBuilder() = default; - virtual void addShapeConstr(const ::internal::tflite::operand::Index &ind, + virtual void addShapeConstr(const ::neurun::graph::operand::Index &ind, const ::arm_compute::TensorInfo &info) = 0; - virtual void addInitializer(const ::internal::tflite::operand::Index &ind, + virtual void addInitializer(const ::neurun::graph::operand::Index &ind, const Initializer &initializer) = 0; virtual void addStage(const Stage &) = 0; }; diff --git a/runtimes/neurun/src/codegen/PlanBuilder.cc b/runtimes/neurun/src/codegen/PlanBuilder.cc index 6835949..54cf03b 100644 --- a/runtimes/neurun/src/codegen/PlanBuilder.cc +++ b/runtimes/neurun/src/codegen/PlanBuilder.cc @@ -5,13 +5,13 @@ namespace neurun namespace codegen { -void PlanBuilder::addShapeConstr(const ::internal::tflite::operand::Index &ind, +void PlanBuilder::addShapeConstr(const ::neurun::graph::operand::Index &ind, const ::arm_compute::TensorInfo &info) { _tensor_info_ctx[ind.asInt()] = info; } -void PlanBuilder::addInitializer(const ::internal::tflite::operand::Index &ind, +void PlanBuilder::addInitializer(const ::neurun::graph::operand::Index &ind, const Initializer &initializer) { _initializer_ctx[ind.asInt()] = initializer; @@ -52,7 +52,7 @@ void PlanBuilder::finalize(BackendResolver &backend_resolver) // Fill weight/bias for (auto it = _initializer_ctx.begin(); it != _initializer_ctx.end(); ++it) { - const ::internal::tflite::operand::Index operand_index{it->first}; + const ::neurun::graph::operand::Index operand_index{it->first}; auto objects = _plan.operands().at(operand_index); for (auto object : objects) diff --git a/runtimes/neurun/src/codegen/PlanBuilder.h b/runtimes/neurun/src/codegen/PlanBuilder.h index 0a2cead..9bc2e92 100644 --- a/runtimes/neurun/src/codegen/PlanBuilder.h +++ b/runtimes/neurun/src/codegen/PlanBuilder.h @@ -38,11 +38,11 @@ public: } public: - void addShapeConstr(const ::internal::tflite::operand::Index &ind, + void addShapeConstr(const ::neurun::graph::operand::Index &ind, const ::arm_compute::TensorInfo &info) override; public: - void addInitializer(const ::internal::tflite::operand::Index &ind, + void addInitializer(const ::neurun::graph::operand::Index &ind, const Initializer &initializer) override; public: diff --git a/runtimes/neurun/src/codegen/Planner.cc b/runtimes/neurun/src/codegen/Planner.cc index 6eda216..2d1a3f6 100644 --- a/runtimes/neurun/src/codegen/Planner.cc +++ b/runtimes/neurun/src/codegen/Planner.cc @@ -15,11 +15,11 @@ namespace codegen void Planner::visit(const ::internal::tflite::op::Conv2D::implicit::Node &node) { - const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index}; + const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index}; - const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index}; - const ::internal::tflite::operand::Index ker_index{node.param().ker_index}; - const ::internal::tflite::operand::Index bias_index{node.param().bias_index}; + const ::neurun::graph::operand::Index ifm_index{node.param().ifm_index}; + const ::neurun::graph::operand::Index ker_index{node.param().ker_index}; + const ::neurun::graph::operand::Index bias_index{node.param().bias_index}; const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(); const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(); @@ -44,8 +44,8 @@ void Planner::visit(const ::internal::tflite::op::Conv2D::implicit::Node &node) void Planner::visit(const ::internal::tflite::op::MaxPool2D::implicit::Node &node) { - const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index}; - const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index}; + const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index}; + const ::neurun::graph::operand::Index ifm_index{node.param().ifm_index}; const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(); const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(); @@ -61,8 +61,8 @@ void Planner::visit(const ::internal::tflite::op::MaxPool2D::implicit::Node &nod void Planner::visit(const ::internal::tflite::op::AvgPool2D::implicit::Node &node) { - const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index}; - const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index}; + const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index}; + const ::neurun::graph::operand::Index ifm_index{node.param().ifm_index}; const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(); const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(); @@ -78,7 +78,7 @@ void Planner::visit(const ::internal::tflite::op::AvgPool2D::implicit::Node &nod void Planner::visit(const ::internal::tflite::op::Concat::Node &node) { - const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index}; + const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index}; // NOTE This implementation assumes that inputs and output are a feature // TODO Remove this assumption @@ -86,7 +86,7 @@ void Planner::visit(const ::internal::tflite::op::Concat::Node &node) // NOTE This implementation assumes concat over feature depth // TODO Remove this assumption - assert(_ctx.at(::internal::tflite::operand::Index{node.param().axis_index}).asScalar() == + assert(_ctx.at(::neurun::graph::operand::Index{node.param().axis_index}).asScalar() == 3); // Set Shape Constraints (for output) @@ -97,7 +97,7 @@ void Planner::visit(const ::internal::tflite::op::Concat::Node &node) for (const auto &index : node.param().ifm_indexes) { - const ::internal::tflite::operand::Index ifm_index{index}; + const ::neurun::graph::operand::Index ifm_index{index}; const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(); _builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape)); } @@ -111,13 +111,13 @@ void Planner::visit(const ::internal::tflite::op::FullyConnected::Node &node) { VERBOSE(FullyConnected) << "Configure FULLY_CONNECTED operation" << std::endl; - const ::internal::tflite::operand::Index output_index{node.param().output_index}; + const ::neurun::graph::operand::Index output_index{node.param().output_index}; - const ::internal::tflite::operand::Index input_index{node.param().input_index}; - const ::internal::tflite::operand::Index weight_index{node.param().weight_index}; - const ::internal::tflite::operand::Index bias_index{node.param().bias_index}; + const ::neurun::graph::operand::Index input_index{node.param().input_index}; + const ::neurun::graph::operand::Index weight_index{node.param().weight_index}; + const ::neurun::graph::operand::Index bias_index{node.param().bias_index}; - const ::internal::tflite::operand::Index activation_index{node.param().activation_index}; + const ::neurun::graph::operand::Index activation_index{node.param().activation_index}; assert(_ctx.at(output_index).shape().rank() == 2); const auto output_size = _ctx.at(output_index).shape().dim(1); @@ -152,8 +152,8 @@ void Planner::visit(const ::internal::tflite::op::FullyConnected::Node &node) void Planner::visit(const ::internal::tflite::op::Reshape::Node &node) { - const ::internal::tflite::operand::Index output_index{node.param().output_index}; - const ::internal::tflite::operand::Index input_index{node.param().input_index}; + const ::neurun::graph::operand::Index output_index{node.param().output_index}; + const ::neurun::graph::operand::Index input_index{node.param().input_index}; // NOTE The content of a tensor specified by shape_index should be aligned with // output tensor shape @@ -184,8 +184,8 @@ void Planner::visit(const ::internal::tflite::op::Softmax::Node &node) { VERBOSE(Softmax) << "Configure SOFTMAX operation" << std::endl; - const ::internal::tflite::operand::Index output_index{node.param().output_index}; - const ::internal::tflite::operand::Index input_index{node.param().input_index}; + const ::neurun::graph::operand::Index output_index{node.param().output_index}; + const ::neurun::graph::operand::Index input_index{node.param().input_index}; assert(_ctx.at(output_index).shape().rank() == _ctx.at(input_index).shape().rank()); diff --git a/runtimes/neurun/src/codegen/TensorMarker.h b/runtimes/neurun/src/codegen/TensorMarker.h index 6c9b72f..cb596fd 100644 --- a/runtimes/neurun/src/codegen/TensorMarker.h +++ b/runtimes/neurun/src/codegen/TensorMarker.h @@ -31,7 +31,7 @@ public: void visit(const ::internal::tflite::op::TensorConvert::AclToCommon::Node &node) override; private: - void mark(int32_t ind) { _tensor_builder.mark(::internal::tflite::operand::Index{ind}); } + void mark(int32_t ind) { _tensor_builder.mark(::neurun::graph::operand::Index{ind}); } void markFromCommon(const ::internal::tflite::op::Node &op, int32_t ind) { _tensor_builder.markFromCommon(op, ind); diff --git a/runtimes/neurun/src/frontend/execution.cc b/runtimes/neurun/src/frontend/execution.cc index 8276ce3..3ae189d 100644 --- a/runtimes/neurun/src/frontend/execution.cc +++ b/runtimes/neurun/src/frontend/execution.cc @@ -149,7 +149,7 @@ int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution *execution, neurun::graph::operand::IO::Index input_index{n}; - ::internal::tflite::operand::Index index{model.inputs().at(input_index).asInt()}; + ::neurun::graph::operand::Index index{model.inputs().at(input_index)}; auto objects = plan.operands().at(index); for (auto object : objects) @@ -172,7 +172,7 @@ int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution *execution, neurun::graph::operand::IO::Index output_index{n}; - ::internal::tflite::operand::Index index{model.outputs().at(output_index).asInt()}; + ::neurun::graph::operand::Index index{model.outputs().at(output_index)}; auto objects = plan.operands().at(index); for (auto object : objects) diff --git a/runtimes/neurun/src/frontend/model.cc b/runtimes/neurun/src/frontend/model.cc index 2f59e53..39b7206 100644 --- a/runtimes/neurun/src/frontend/model.cc +++ b/runtimes/neurun/src/frontend/model.cc @@ -192,7 +192,7 @@ int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model, for (uint32_t i = 0; i < outputCount; i++) { - // NOTE ::internal::tflite::operand::Index uses int as its underlying type as various NNAPI + // NOTE ::neurun::graph::operand::Index uses int as its underlying type as various NNAPI // functions such as ANeuralNetworksModel_setOperandValue use int to represent operand // index // @@ -201,7 +201,7 @@ int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model, // index. // // Below, static_cast(...) is introduced to eliminate compiler warning. - const internal::tflite::operand::Index ind{static_cast(outputs[i])}; + const ::neurun::graph::operand::Index ind{static_cast(outputs[i])}; auto &obj = model->deref().operands().at(ind); if (!obj.setAsOperationOutput()) @@ -355,7 +355,7 @@ int ANeuralNetworksModel_addOperationEx(ANeuralNetworksModel *model, for (uint32_t i = 0; i < outputCount; i++) { - // NOTE ::internal::tflite::operand::Index uses int as its underlying type as various NNAPI + // NOTE ::neurun::graph::operand::Index uses int as its underlying type as various NNAPI // functions such as ANeuralNetworksModel_setOperandValue use int to represent operand // index // @@ -364,7 +364,7 @@ int ANeuralNetworksModel_addOperationEx(ANeuralNetworksModel *model, // index. // // Below, static_cast(...) is introduced to eliminate compiler warning. - const internal::tflite::operand::Index ind{static_cast(outputs[i])}; + const ::neurun::graph::operand::Index ind{static_cast(outputs[i])}; auto &obj = model->deref().operands().at(ind); if (!obj.setAsOperationOutput()) @@ -394,7 +394,7 @@ int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel *model, u return ANEURALNETWORKS_BAD_STATE; } - // NOTE ::internal::tflite::operand::Index uses int as its underlying type as various NNAPI + // NOTE ::neurun::graph::operand::Index uses int as its underlying type as various NNAPI // functions such as ANeuralNetworksModel_setOperandValue use int to represent operand index // // ANeuralNetworksModel_identifyInputsAndOutputs, however, uses uint32_t to represent operand diff --git a/runtimes/neurun/src/internal/Model.h b/runtimes/neurun/src/internal/Model.h index 83665e4..04e1bce 100644 --- a/runtimes/neurun/src/internal/Model.h +++ b/runtimes/neurun/src/internal/Model.h @@ -3,36 +3,6 @@ #include "graph/operand/Index.h" -namespace internal -{ -namespace tflite -{ -namespace operand -{ - -class Index -{ -public: - explicit Index(int value) : _value{value} - { - // DO NOTHING - } - - // NOTE Temporary casting operator for legacy code compatibility -public: - operator neurun::graph::operand::Index() const { return neurun::graph::operand::Index{_value}; } - -public: - int asInt(void) const { return _value; } - -private: - int _value; -}; - -} // namespace operand -} // namespace tflite -} // namespace internal - #include #include diff --git a/runtimes/neurun/src/internal/Plan.cc b/runtimes/neurun/src/internal/Plan.cc index af772b6..8dcbaf1 100644 --- a/runtimes/neurun/src/internal/Plan.cc +++ b/runtimes/neurun/src/internal/Plan.cc @@ -5,7 +5,7 @@ namespace internal namespace operand { -Context &Context::set(const ::internal::tflite::operand::Index &id, +Context &Context::set(const ::neurun::graph::operand::Index &id, const std::shared_ptr &object) { _objects[id.asInt()].emplace_back(object); diff --git a/runtimes/neurun/src/internal/Plan.h b/runtimes/neurun/src/internal/Plan.h index 2c2bf77..3b02a98 100644 --- a/runtimes/neurun/src/internal/Plan.h +++ b/runtimes/neurun/src/internal/Plan.h @@ -14,24 +14,24 @@ namespace operand class Context { public: - Context &set(const ::internal::tflite::operand::Index &ind, + Context &set(const ::neurun::graph::operand::Index &ind, const std::shared_ptr &object); public: - bool exist(const ::internal::tflite::operand::Index &ind) const + bool exist(const ::neurun::graph::operand::Index &ind) const { return _objects.find(ind.asInt()) != _objects.end(); } public: const std::vector> & - at(const ::internal::tflite::operand::Index &ind) const + at(const ::neurun::graph::operand::Index &ind) const { return _objects.at(ind.asInt()); } std::vector> & - at(const ::internal::tflite::operand::Index &ind) + at(const ::neurun::graph::operand::Index &ind) { return _objects.at(ind.asInt()); } diff --git a/runtimes/neurun/src/internal/common/TensorBuilder.cc b/runtimes/neurun/src/internal/common/TensorBuilder.cc index 9ee06ec..e80d5e2 100644 --- a/runtimes/neurun/src/internal/common/TensorBuilder.cc +++ b/runtimes/neurun/src/internal/common/TensorBuilder.cc @@ -12,7 +12,7 @@ TensorBuilder::TensorBuilder(::internal::Plan &plan) : _plan(plan) // DO NOTHING } -void TensorBuilder::mark(const ::internal::tflite::operand::Index &ind) +void TensorBuilder::mark(const ::neurun::graph::operand::Index &ind) { assert(_tensors.size() == 0); @@ -49,7 +49,7 @@ void TensorBuilder::prepare(const std::map &tens for (auto ind_int : _inds) { - ::internal::tflite::operand::Index ind{ind_int}; + ::neurun::graph::operand::Index ind{ind_int}; auto tensor = std::make_shared<::internal::common::Tensor>(tensor_info_ctx.at(ind.asInt())); _plan.common_operands().set(ind, std::make_shared<::internal::common::Object>(tensor)); _tensors[ind.asInt()] = tensor; @@ -67,7 +67,7 @@ void TensorBuilder::allocate(void) } std::shared_ptr<::internal::common::Tensor> -TensorBuilder::at(const ::internal::tflite::operand::Index &ind) +TensorBuilder::at(const ::neurun::graph::operand::Index &ind) { return _tensors.at(ind.asInt()); } diff --git a/runtimes/neurun/src/internal/common/TensorBuilder.h b/runtimes/neurun/src/internal/common/TensorBuilder.h index 4a8ee20..2ba7833 100644 --- a/runtimes/neurun/src/internal/common/TensorBuilder.h +++ b/runtimes/neurun/src/internal/common/TensorBuilder.h @@ -21,14 +21,14 @@ class TensorBuilder : public neurun::backend::ITensorBuilder public: TensorBuilder(::internal::Plan &plan); - virtual void mark(const ::internal::tflite::operand::Index &ind) override; + virtual void mark(const ::neurun::graph::operand::Index &ind) override; virtual void markFromCommon(const ::internal::tflite::op::Node &op, int32_t ind) override; virtual void markToCommon(const ::internal::tflite::op::Node &op, int32_t ind) override; virtual void insertTensorConvertNodes(::internal::tflite::op::Sequence &operations) override; virtual void prepare(const std::map &tensor_info_ctx) override; virtual void allocate(void) override; - std::shared_ptr<::internal::common::Tensor> at(const ::internal::tflite::operand::Index &ind); + std::shared_ptr<::internal::common::Tensor> at(const ::neurun::graph::operand::Index &ind); private: ::internal::Plan &_plan; -- 2.7.4