From 8c42606aa853d33e5c4bb4087a1f14f8be329c77 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EB=B0=95=EC=B2=9C=EA=B5=90/On-Device=20Lab=28SR=29/Enginee?= =?utf8?q?r/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Fri, 6 Dec 2019 17:16:47 +0900 Subject: [PATCH 01/16] Propagate user's cmake generator setting (#9442) This commit propagates user's (or default) cmake generator setting to external build setting. External build project identifiers also changed for CI maintenance. Signed-off-by: Cheongyo Bahk --- infra/cmake/modules/ExternalBuildTools.cmake | 1 + infra/cmake/packages/FlatBuffersConfig.cmake | 2 +- infra/cmake/packages/GTestConfig.cmake | 2 +- infra/cmake/packages/ProtobufConfig.cmake | 2 +- 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/infra/cmake/modules/ExternalBuildTools.cmake b/infra/cmake/modules/ExternalBuildTools.cmake index 8e54e2d..e76cd6b 100644 --- a/infra/cmake/modules/ExternalBuildTools.cmake +++ b/infra/cmake/modules/ExternalBuildTools.cmake @@ -48,6 +48,7 @@ function(ExternalBuild_CMake) file(WRITE "${BUILD_STAMP_PATH}" "${PKG_IDENTIFIER}") execute_process(COMMAND ${CMAKE_COMMAND} + -G "${CMAKE_GENERATOR}" -DCMAKE_INSTALL_PREFIX=${ARG_INSTALL_DIR} -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS=${ARG_BUILD_FLAGS} diff --git a/infra/cmake/packages/FlatBuffersConfig.cmake b/infra/cmake/packages/FlatBuffersConfig.cmake index 4eb957c..ab0b770 100644 --- a/infra/cmake/packages/FlatBuffersConfig.cmake +++ b/infra/cmake/packages/FlatBuffersConfig.cmake @@ -25,7 +25,7 @@ function(_FlatBuffers_build) BUILD_DIR ${CMAKE_BINARY_DIR}/externals/FLATBUFFERS/build INSTALL_DIR ${EXT_OVERLAY_DIR} BUILD_FLAGS ${ADDITIONAL_CXX_FLAGS} - IDENTIFIER "1.10" + IDENTIFIER "1.10-fix1" PKG_NAME "FLATBUFFERS") endfunction(_FlatBuffers_build) diff --git a/infra/cmake/packages/GTestConfig.cmake b/infra/cmake/packages/GTestConfig.cmake index 87eb8d5..62a15e0 100644 --- a/infra/cmake/packages/GTestConfig.cmake +++ b/infra/cmake/packages/GTestConfig.cmake @@ -13,7 +13,7 @@ function(_GTest_build) ExternalBuild_CMake(CMAKE_DIR ${GTestSource_DIR} BUILD_DIR ${CMAKE_BINARY_DIR}/externals/GTEST/build INSTALL_DIR ${EXT_OVERLAY_DIR} - IDENTIFIER "1.8.0" + IDENTIFIER "1.8.0-fix1" PKG_NAME "GTEST") endfunction(_GTest_build) diff --git a/infra/cmake/packages/ProtobufConfig.cmake b/infra/cmake/packages/ProtobufConfig.cmake index dee0c33..349caab 100644 --- a/infra/cmake/packages/ProtobufConfig.cmake +++ b/infra/cmake/packages/ProtobufConfig.cmake @@ -58,7 +58,7 @@ function(_Protobuf_build) INSTALL_DIR ${EXT_OVERLAY_DIR} BUILD_FLAGS -fPIC EXTRA_OPTS -Dprotobuf_BUILD_TESTS=OFF -Dprotobuf_WITH_ZLIB=OFF - IDENTIFIER "3.5.2" + IDENTIFIER "3.5.2-fix1" PKG_NAME "PROTOBUF") endfunction(_Protobuf_build) -- 2.7.4 From 502d86b1a69e015efdfa90aec84f6b6c4682c31c Mon Sep 17 00:00:00 2001 From: Sergei Barannikov/AI Tools Lab /SRR/Engineer/Samsung Electronics Date: Fri, 6 Dec 2019 12:21:35 +0300 Subject: [PATCH 02/16] [neurun] Move DataType.h into ir directory (#9389) * Move `DataType.h` in `ir` directory. * Move `DataType` to `neurun::ir` namespace. Signed-off-by: Sergei Barannikov --- runtime/neurun/api/src/CustomKernel.cc | 8 ++++---- runtime/neurun/api/src/nnfw_api_internal.cc | 4 ++-- runtime/neurun/backend/acl_cl/KernelGenerator.cc | 6 +++--- runtime/neurun/backend/acl_cl/ShapeFixer.cc | 1 - runtime/neurun/backend/acl_common/Convert.cc | 14 +++++++------- runtime/neurun/backend/acl_common/Convert.h | 2 +- runtime/neurun/backend/acl_neon/KernelGenerator.cc | 4 ++-- runtime/neurun/backend/acl_neon/ShapeFixer.cc | 1 - runtime/neurun/backend/cpu/ShapeFixer.cc | 8 ++++---- runtime/neurun/backend/cpu/kernel/OperationUtils.h | 4 ++-- runtime/neurun/backend/cpu/kernel/PermuteLayer.cc | 4 ++-- runtime/neurun/backend/cpu/kernel/PermuteLayer.h | 4 ++-- runtime/neurun/backend/cpu/operand/Tensor.h | 2 +- runtime/neurun/backend/srcn/ConstantInitializer.cc | 2 +- runtime/neurun/backend/srcn/Convert.cc | 2 +- runtime/neurun/backend/srcn/kernel/OperationUtils.h | 4 ++-- runtime/neurun/backend/srcn/operand/Tensor.h | 2 +- .../neurun/core/include/backend/CustomKernelBuilder.h | 4 ++-- .../neurun/core/include/backend/IConstantInitializer.h | 4 ++-- runtime/neurun/core/include/{model => ir}/DataType.h | 14 ++++++++++---- runtime/neurun/core/include/model/Operand.h | 4 ++-- runtime/neurun/core/include/model/OperandInfo.h | 2 +- runtime/neurun/core/include/model/TypeInfo.h | 4 ++-- runtime/neurun/core/include/model/operation/Permute.h | 6 +++--- runtime/neurun/core/src/compiler/HEScheduler.cc | 6 +++--- runtime/neurun/core/src/compiler/OperationValidator.cc | 16 ++++++++-------- runtime/neurun/core/src/exec/ExecutionObservers.cc | 2 +- runtime/neurun/core/src/exec/ExecutorBase.cc | 4 ++-- runtime/neurun/core/src/exec/interp/Tensor.h | 6 +++--- runtime/neurun/core/src/exec/interp/operations/Add.cc | 4 ++-- .../core/src/exec/interp/operations/AvgPool2D.cc | 2 +- .../neurun/core/src/exec/interp/operations/Concat.cc | 2 +- .../neurun/core/src/exec/interp/operations/Conv2D.cc | 2 +- .../core/src/exec/interp/operations/DepthwiseConv.cc | 2 +- .../core/src/exec/interp/operations/FullyConnected.cc | 2 +- .../core/src/exec/interp/operations/MaxPool2D.cc | 2 +- .../neurun/core/src/exec/interp/operations/SoftMax.cc | 2 +- runtime/neurun/core/src/model/operation/Permute.cc | 3 +-- runtime/neurun/frontend/base_loader/base_loader.h | 18 +++++++----------- runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.h | 5 +---- .../neurun/frontend/nnapi/wrapper/OperationFactory.cc | 2 +- runtime/neurun/test/core/compiler/Scheduler.cc | 2 +- runtime/neurun/test/core/exec/ExecInstance.cc | 2 +- runtime/neurun/test/core/exec/interp/ExecManager.cc | 2 +- runtime/neurun/test/graph/operand/Set.cc | 2 +- runtime/neurun/test/graph/operand/UseDef.cc | 2 +- runtime/neurun/test/graph/operation/SetIO.cc | 4 ++-- runtime/neurun/test/graph/verifier/Verifier.cc | 2 +- 48 files changed, 101 insertions(+), 105 deletions(-) rename runtime/neurun/core/include/{model => ir}/DataType.h (84%) diff --git a/runtime/neurun/api/src/CustomKernel.cc b/runtime/neurun/api/src/CustomKernel.cc index dcf6c52..60ddeed 100644 --- a/runtime/neurun/api/src/CustomKernel.cc +++ b/runtime/neurun/api/src/CustomKernel.cc @@ -45,16 +45,16 @@ public: switch (type.dtype) { - case model::DataType::FLOAT32: + case ir::DataType::FLOAT32: api_type.dtype = NNFW_TYPE_TENSOR_FLOAT32; break; - case model::DataType::INT32: + case ir::DataType::INT32: api_type.dtype = NNFW_TYPE_TENSOR_INT32; break; - case model::DataType::QUANT8_ASYMM: + case ir::DataType::QUANT8_ASYMM: api_type.dtype = NNFW_TYPE_TENSOR_QUANT8_ASYMM; break; - case model::DataType::BOOL8: + case ir::DataType::BOOL8: api_type.dtype = NNFW_TYPE_TENSOR_BOOL; break; default: diff --git a/runtime/neurun/api/src/nnfw_api_internal.cc b/runtime/neurun/api/src/nnfw_api_internal.cc index 574f844..dcf14a2 100644 --- a/runtime/neurun/api/src/nnfw_api_internal.cc +++ b/runtime/neurun/api/src/nnfw_api_internal.cc @@ -276,9 +276,9 @@ NNFW_STATUS nnfw_session::set_output_layout(uint32_t index, NNFW_LAYOUT layout) return NNFW_STATUS_NO_ERROR; } -static NNFW_TYPE datatype_to_nnfw_dtype(neurun::model::DataType dt) +static NNFW_TYPE datatype_to_nnfw_dtype(neurun::ir::DataType dt) { - using neurun::model::DataType; + using neurun::ir::DataType; switch (dt) { case DataType::FLOAT32: diff --git a/runtime/neurun/backend/acl_cl/KernelGenerator.cc b/runtime/neurun/backend/acl_cl/KernelGenerator.cc index 08d77c8..a8e4e0b 100644 --- a/runtime/neurun/backend/acl_cl/KernelGenerator.cc +++ b/runtime/neurun/backend/acl_cl/KernelGenerator.cc @@ -25,7 +25,7 @@ #include "kernel/ConcatLayer.h" #include "model/Index.h" -#include "model/DataType.h" +#include "ir/DataType.h" #include "model/InternalType.h" #include "compiler/IExecutionBuilder.h" #include "exec/NopFunction.h" @@ -666,7 +666,7 @@ void KernelGenerator::visit(const model::operation::StridedSlice &node) const int endData_size = _ctx.at(ends_index).shape().num_elements(); const int stridesData_size = _ctx.at(strides_index).shape().num_elements(); - using neurun::model::DataType; + using ir::DataType; UNUSED_RELEASE(startData_size); UNUSED_RELEASE(endData_size); @@ -1369,7 +1369,7 @@ void KernelGenerator::visit(const model::operation::SpaceToBatchND &node) assert(_ctx.at(paddings_index).isConstant()); std::unique_ptr<::arm_compute::IFunction> fn; - if (_ctx.at(ofm_index).typeInfo().type() == model::DataType::QUANT8_ASYMM) + if (_ctx.at(ofm_index).typeInfo().type() == ir::DataType::QUANT8_ASYMM) { // NOTE CLSpaceToBatchLayer has a bug that padding's values are 0 even when zero point of // QASYMM8 is not 0. diff --git a/runtime/neurun/backend/acl_cl/ShapeFixer.cc b/runtime/neurun/backend/acl_cl/ShapeFixer.cc index d8a8035..2448e2e 100644 --- a/runtime/neurun/backend/acl_cl/ShapeFixer.cc +++ b/runtime/neurun/backend/acl_cl/ShapeFixer.cc @@ -25,7 +25,6 @@ #include "kernel/ConcatLayer.h" #include "model/Index.h" -#include "model/DataType.h" #include "model/InternalType.h" #include "compiler/IExecutionBuilder.h" #include "exec/NopFunction.h" diff --git a/runtime/neurun/backend/acl_common/Convert.cc b/runtime/neurun/backend/acl_common/Convert.cc index 6d1bc72..ed8258b 100644 --- a/runtime/neurun/backend/acl_common/Convert.cc +++ b/runtime/neurun/backend/acl_common/Convert.cc @@ -17,7 +17,7 @@ #include "Convert.h" #include "Swizzle.h" -#include "model/DataType.h" +#include "ir/DataType.h" #include namespace @@ -88,19 +88,19 @@ namespace acl_common return res; } -::arm_compute::DataType asDataType(const ::neurun::model::DataType &type) +::arm_compute::DataType asDataType(const ir::DataType type) { switch (type) { - case ::neurun::model::DataType::FLOAT32: + case ir::DataType::FLOAT32: return ::arm_compute::DataType::F32; - case ::neurun::model::DataType::INT32: + case ir::DataType::INT32: return ::arm_compute::DataType::S32; - case ::neurun::model::DataType::UINT32: + case ir::DataType::UINT32: return ::arm_compute::DataType::U32; - case ::neurun::model::DataType::QUANT8_ASYMM: + case ir::DataType::QUANT8_ASYMM: return ::arm_compute::DataType::QASYMM8; - case ::neurun::model::DataType::BOOL8: + case ir::DataType::BOOL8: return ::arm_compute::DataType::U8; default: throw std::runtime_error("Not supported, yet"); diff --git a/runtime/neurun/backend/acl_common/Convert.h b/runtime/neurun/backend/acl_common/Convert.h index 81bfc6e..66d4405 100644 --- a/runtime/neurun/backend/acl_common/Convert.h +++ b/runtime/neurun/backend/acl_common/Convert.h @@ -47,7 +47,7 @@ namespace acl_common ::arm_compute::Coordinates asTensorCoordinate(const ::neurun::util::Coordinates &coord, ir::Layout frontend_layout, ir::Layout backend_layout); -::arm_compute::DataType asDataType(const ::neurun::model::DataType &type); +::arm_compute::DataType asDataType(ir::DataType type); ::arm_compute::TensorInfo asTensorInfo(const ::neurun::model::Shape &shape, const ::neurun::model::TypeInfo &typeInfo, ir::Layout frontend_layout, ir::Layout backend_layout, diff --git a/runtime/neurun/backend/acl_neon/KernelGenerator.cc b/runtime/neurun/backend/acl_neon/KernelGenerator.cc index 2e3b51c..ea03b9e 100644 --- a/runtime/neurun/backend/acl_neon/KernelGenerator.cc +++ b/runtime/neurun/backend/acl_neon/KernelGenerator.cc @@ -25,7 +25,7 @@ #include "kernel/ConcatLayer.h" #include "util/Padding.h" #include "model/Index.h" -#include "model/DataType.h" +#include "ir/DataType.h" #include "model/InternalType.h" #include "compiler/IExecutionBuilder.h" #include "exec/NopFunction.h" @@ -1767,7 +1767,7 @@ void KernelGenerator::visit(const model::operation::StridedSlice &node) const int endData_size = _ctx.at(ends_index).shape().num_elements(); const int stridesData_size = _ctx.at(strides_index).shape().num_elements(); - using neurun::model::DataType; + using ir::DataType; UNUSED_RELEASE(startData_size); UNUSED_RELEASE(endData_size); diff --git a/runtime/neurun/backend/acl_neon/ShapeFixer.cc b/runtime/neurun/backend/acl_neon/ShapeFixer.cc index d6dbf17..3d69d8f 100644 --- a/runtime/neurun/backend/acl_neon/ShapeFixer.cc +++ b/runtime/neurun/backend/acl_neon/ShapeFixer.cc @@ -34,7 +34,6 @@ #include "kernel/ConcatLayer.h" #include "util/Padding.h" #include "model/Index.h" -#include "model/DataType.h" #include "model/InternalType.h" #include "compiler/IExecutionBuilder.h" #include "exec/NopFunction.h" diff --git a/runtime/neurun/backend/cpu/ShapeFixer.cc b/runtime/neurun/backend/cpu/ShapeFixer.cc index d874138..679d2cd 100644 --- a/runtime/neurun/backend/cpu/ShapeFixer.cc +++ b/runtime/neurun/backend/cpu/ShapeFixer.cc @@ -83,7 +83,7 @@ void ShapeFixer::visit(const model::operation::Add &node) const auto rhs_index{node.getInputs().at(model::operation::Add::Input::RHS)}; // Quantization : not supported - if (_ctx.at(lhs_index).typeInfo().type() == model::DataType::QUANT8_ASYMM) + if (_ctx.at(lhs_index).typeInfo().type() == ir::DataType::QUANT8_ASYMM) { throw std::runtime_error{"ShapeFixer: NYI for quantized Add"}; } @@ -107,7 +107,7 @@ void ShapeFixer::visit(const model::operation::Sub &node) const auto rhs_index{node.getInputs().at(model::operation::Sub::Input::RHS)}; // Quantization : not supported - if (_ctx.at(lhs_index).typeInfo().type() == model::DataType::QUANT8_ASYMM) + if (_ctx.at(lhs_index).typeInfo().type() == ir::DataType::QUANT8_ASYMM) { throw std::runtime_error{"ShapeFixer: NYI for quantized Sub"}; } @@ -129,7 +129,7 @@ void ShapeFixer::visit(const model::operation::Mul &node) const auto rhs_index{node.getInputs().at(model::operation::Sub::Input::RHS)}; // Quantization : not supported - if (_ctx.at(lhs_index).typeInfo().type() == model::DataType::QUANT8_ASYMM) + if (_ctx.at(lhs_index).typeInfo().type() == ir::DataType::QUANT8_ASYMM) { throw std::runtime_error{"ShapeFixer: NYI for quantized Mul"}; } @@ -155,7 +155,7 @@ void ShapeFixer::visit(const model::operation::Pad &node) const auto lhs_index{node.getInputs().at(model::operation::Sub::Input::LHS)}; // Quantization : not supported - if (_ctx.at(lhs_index).typeInfo().type() == model::DataType::QUANT8_ASYMM) + if (_ctx.at(lhs_index).typeInfo().type() == ir::DataType::QUANT8_ASYMM) { throw std::runtime_error{"ShapeFixer: NYI for quantized Pad"}; } diff --git a/runtime/neurun/backend/cpu/kernel/OperationUtils.h b/runtime/neurun/backend/cpu/kernel/OperationUtils.h index b8765a5..c466f9f 100644 --- a/runtime/neurun/backend/cpu/kernel/OperationUtils.h +++ b/runtime/neurun/backend/cpu/kernel/OperationUtils.h @@ -24,10 +24,10 @@ #include #include "model/Operand.h" -#include "model/DataType.h" +#include "ir/DataType.h" #include -using OperandType = neurun::model::DataType; +using OperandType = neurun::ir::DataType; namespace neurun { diff --git a/runtime/neurun/backend/cpu/kernel/PermuteLayer.cc b/runtime/neurun/backend/cpu/kernel/PermuteLayer.cc index 1520f99..fc758cf 100644 --- a/runtime/neurun/backend/cpu/kernel/PermuteLayer.cc +++ b/runtime/neurun/backend/cpu/kernel/PermuteLayer.cc @@ -29,7 +29,7 @@ using Type = model::operation::Permute::Type; void PermuteLayer::configure(std::shared_ptr input, std::shared_ptr output, - const model::Shape &output_shape, Type type, model::DataType dataType) + const model::Shape &output_shape, Type type, ir::DataType dataType) { _input = input; _output = output; @@ -40,7 +40,7 @@ void PermuteLayer::configure(std::shared_ptr input, void PermuteLayer::run() { - using ::neurun::model::DataType; + using ir::DataType; switch (_dataType) { case DataType::FLOAT32: diff --git a/runtime/neurun/backend/cpu/kernel/PermuteLayer.h b/runtime/neurun/backend/cpu/kernel/PermuteLayer.h index 2c7113e..3fec953 100644 --- a/runtime/neurun/backend/cpu/kernel/PermuteLayer.h +++ b/runtime/neurun/backend/cpu/kernel/PermuteLayer.h @@ -45,7 +45,7 @@ public: void configure(std::shared_ptr input, std::shared_ptr output, const model::Shape &output_shape, model::operation::Permute::Type type, - model::DataType dataType); + ir::DataType dataType); void run(); void runSync() { @@ -199,7 +199,7 @@ private: std::shared_ptr _output{nullptr}; model::Shape _output_shape{}; model::operation::Permute::Type _type{model::operation::Permute::Type::COPY}; - model::DataType _dataType{model::DataType::FLOAT32}; + ir::DataType _dataType{ir::DataType::FLOAT32}; }; } // namespace kernel diff --git a/runtime/neurun/backend/cpu/operand/Tensor.h b/runtime/neurun/backend/cpu/operand/Tensor.h index 7d83cda..ef0579f 100644 --- a/runtime/neurun/backend/cpu/operand/Tensor.h +++ b/runtime/neurun/backend/cpu/operand/Tensor.h @@ -42,7 +42,7 @@ public: public: void setBuffer(uint8_t *buffer) { _buffer = buffer; } - ::neurun::model::DataType data_type() const { return _info.typeInfo().type(); } + ir::DataType data_type() const { return _info.typeInfo().type(); } public: uint8_t *buffer() const override { return _buffer; } diff --git a/runtime/neurun/backend/srcn/ConstantInitializer.cc b/runtime/neurun/backend/srcn/ConstantInitializer.cc index 549248b..3260d54 100644 --- a/runtime/neurun/backend/srcn/ConstantInitializer.cc +++ b/runtime/neurun/backend/srcn/ConstantInitializer.cc @@ -99,7 +99,7 @@ void ConstantInitializer::registerPermuteKernelInitializer(const model::OperandI VERBOSE(FillOperandData) << "[SRCN] Fill data for operand " << index.value() << std::endl; const auto type = obj.typeInfo().type(); - using neurun::model::DataType; + using ir::DataType; using namespace std::placeholders; switch (type) diff --git a/runtime/neurun/backend/srcn/Convert.cc b/runtime/neurun/backend/srcn/Convert.cc index 0e347a1..267f62b 100644 --- a/runtime/neurun/backend/srcn/Convert.cc +++ b/runtime/neurun/backend/srcn/Convert.cc @@ -18,7 +18,7 @@ #include #include -#include +#include #include "Swizzle.h" #include diff --git a/runtime/neurun/backend/srcn/kernel/OperationUtils.h b/runtime/neurun/backend/srcn/kernel/OperationUtils.h index 4b6f12d..dce00b5 100644 --- a/runtime/neurun/backend/srcn/kernel/OperationUtils.h +++ b/runtime/neurun/backend/srcn/kernel/OperationUtils.h @@ -22,11 +22,11 @@ #include #include "model/Operand.h" -#include "model/DataType.h" +#include "ir/DataType.h" #include #include -using OperandType = neurun::model::DataType; +using OperandType = neurun::ir::DataType; using neurun::util::Coordinates; namespace neurun diff --git a/runtime/neurun/backend/srcn/operand/Tensor.h b/runtime/neurun/backend/srcn/operand/Tensor.h index 5cfb34d..af25593 100644 --- a/runtime/neurun/backend/srcn/operand/Tensor.h +++ b/runtime/neurun/backend/srcn/operand/Tensor.h @@ -43,7 +43,7 @@ public: public: void setBuffer(uint8_t *buffer) { _buffer = buffer; } - ::neurun::model::DataType data_type() const { return _info.typeInfo().type(); } + ir::DataType data_type() const { return _info.typeInfo().type(); } public: uint8_t *buffer() const override { return _buffer; } diff --git a/runtime/neurun/core/include/backend/CustomKernelBuilder.h b/runtime/neurun/core/include/backend/CustomKernelBuilder.h index b8fd906..848ebd5 100644 --- a/runtime/neurun/core/include/backend/CustomKernelBuilder.h +++ b/runtime/neurun/core/include/backend/CustomKernelBuilder.h @@ -20,7 +20,7 @@ #include "exec/IFunction.h" #include "misc/tensor/Shape.h" -#include "model/DataType.h" +#include "ir/DataType.h" #include #include @@ -37,7 +37,7 @@ using Shape = nnfw::misc::tensor::Shape; struct TypeInfo { Shape shape; - model::DataType dtype; + ir::DataType dtype; }; struct CustomKernelConfigParams diff --git a/runtime/neurun/core/include/backend/IConstantInitializer.h b/runtime/neurun/core/include/backend/IConstantInitializer.h index 0927d48..89535a4 100644 --- a/runtime/neurun/core/include/backend/IConstantInitializer.h +++ b/runtime/neurun/core/include/backend/IConstantInitializer.h @@ -215,7 +215,7 @@ protected: return; const auto type = obj.typeInfo().type(); - using neurun::model::DataType; + using ir::DataType; switch (type) { @@ -247,7 +247,7 @@ protected: return; const auto type = obj.typeInfo().type(); - using neurun::model::DataType; + using ir::DataType; using namespace std::placeholders; switch (type) diff --git a/runtime/neurun/core/include/model/DataType.h b/runtime/neurun/core/include/ir/DataType.h similarity index 84% rename from runtime/neurun/core/include/model/DataType.h rename to runtime/neurun/core/include/ir/DataType.h index 7b68dab..15f18a5 100644 --- a/runtime/neurun/core/include/model/DataType.h +++ b/runtime/neurun/core/include/ir/DataType.h @@ -14,14 +14,14 @@ * limitations under the License. */ -#ifndef __NEURUN_MODEL_DATATYPE_H__ -#define __NEURUN_MODEL_DATATYPE_H__ +#ifndef __NEURUN_IR_DATATYPE_H__ +#define __NEURUN_IR_DATATYPE_H__ #include namespace neurun { -namespace model +namespace ir { enum class DataType @@ -51,7 +51,13 @@ inline size_t sizeOfDataType(DataType data_type) } } +} // namespace ir + +// TODO Remove after merging 'model' and 'graph' namespaces. +namespace model +{ +using DataType = ir::DataType; } // namespace model } // namespace neurun -#endif // __NEURUN_MODEL_DATATYPE_H__ +#endif // __NEURUN_IR_DATATYPE_H__ diff --git a/runtime/neurun/core/include/model/Operand.h b/runtime/neurun/core/include/model/Operand.h index 470be4a..7d5832e 100644 --- a/runtime/neurun/core/include/model/Operand.h +++ b/runtime/neurun/core/include/model/Operand.h @@ -23,7 +23,7 @@ #include #include "Data.h" -#include "DataType.h" +#include "ir/DataType.h" #include "OperandInfo.h" #include "ir/operand/ParentInfo.h" // TODO Remove this dependency #include "model/OperationIndexList.h" @@ -55,7 +55,7 @@ public: void removeDef(const OperationIndex &idx); public: - void type(const DataType &type) { _info.type(type); }; + void type(const DataType type) { _info.type(type); }; public: void data(std::unique_ptr &&data) { _data = std::move(data); } diff --git a/runtime/neurun/core/include/model/OperandInfo.h b/runtime/neurun/core/include/model/OperandInfo.h index 7b0fdb0..66272c7 100644 --- a/runtime/neurun/core/include/model/OperandInfo.h +++ b/runtime/neurun/core/include/model/OperandInfo.h @@ -72,7 +72,7 @@ public: /** * @brief Set tensor data type */ - void type(const DataType &type) { _typeInfo.type(type); } + void type(const DataType type) { _typeInfo.type(type); } /** * @brief Return size of tensor (bytes) * @return Tensor size diff --git a/runtime/neurun/core/include/model/TypeInfo.h b/runtime/neurun/core/include/model/TypeInfo.h index 4d6a545..7b29085 100644 --- a/runtime/neurun/core/include/model/TypeInfo.h +++ b/runtime/neurun/core/include/model/TypeInfo.h @@ -19,7 +19,7 @@ #include -#include "DataType.h" +#include "ir/DataType.h" namespace neurun { @@ -42,7 +42,7 @@ public: int32_t offset() const { return _offset; } public: - void type(const DataType &type) { _type = type; } + void type(const DataType type) { _type = type; } private: DataType _type; diff --git a/runtime/neurun/core/include/model/operation/Permute.h b/runtime/neurun/core/include/model/operation/Permute.h index 51d6cb2..f7458ed 100644 --- a/runtime/neurun/core/include/model/operation/Permute.h +++ b/runtime/neurun/core/include/model/operation/Permute.h @@ -58,17 +58,17 @@ public: Permute(const OperandIndex &input, const OperandIndex &output, const backend::BackendContext *input_backend_ctx, const backend::BackendContext *output_backend_ctx, Type type, - model::DataType data_type = model::DataType::FLOAT32); + DataType data_type = DataType::FLOAT32); public: const Param ¶m() const { return _param; } - model::DataType getDataType() const { return _dataType; } + DataType getDataType() const { return _dataType; } Type getPermuteType() const { return _type; } private: Param _param; Type _type; - model::DataType _dataType; + DataType _dataType; }; } // namespace operation diff --git a/runtime/neurun/core/src/compiler/HEScheduler.cc b/runtime/neurun/core/src/compiler/HEScheduler.cc index 21241c7..8f623b5 100644 --- a/runtime/neurun/core/src/compiler/HEScheduler.cc +++ b/runtime/neurun/core/src/compiler/HEScheduler.cc @@ -53,7 +53,7 @@ static bool isQuant(const graph::Graph &graph, const model::Operation &node) for (const auto &input : node.getInputs()) { const auto &obj = graph.operands().at(input); - if (obj.typeInfo().type() == model::DataType::QUANT8_ASYMM) + if (obj.typeInfo().type() == ir::DataType::QUANT8_ASYMM) { return true; } @@ -408,7 +408,7 @@ int64_t HEScheduler::DFSChildrenMaxRank(const model::OperationIndex &index) for (const auto &output : node.getOutputs()) { const auto &operand = _graph->operands().at(output); - const bool quant = operand.typeInfo().type() == model::DataType::QUANT8_ASYMM; + const bool quant = operand.typeInfo().type() == ir::DataType::QUANT8_ASYMM; // average data transfer cost of this operand's data int64_t avg_transfer_cost = 1; for (const auto *backend : _all_backends) @@ -607,7 +607,7 @@ int64_t HEScheduler::predMaxEFT(const backend::Backend *backend, const model::Op for (const auto &input_operand_idx : node.getInputs()) { const auto &input_operand = _graph->operands().at(input_operand_idx); - const bool quant = input_operand.typeInfo().type() == model::DataType::QUANT8_ASYMM; + const bool quant = input_operand.typeInfo().type() == ir::DataType::QUANT8_ASYMM; for (const auto &input_node_idx : input_operand.getDef().list()) { diff --git a/runtime/neurun/core/src/compiler/OperationValidator.cc b/runtime/neurun/core/src/compiler/OperationValidator.cc index 8cdda66..3932be5 100644 --- a/runtime/neurun/core/src/compiler/OperationValidator.cc +++ b/runtime/neurun/core/src/compiler/OperationValidator.cc @@ -87,7 +87,7 @@ void OperationValidator::visit(const model::operation::Comparison &node) UNUSED_RELEASE(rhs_index); assert(_ctx.at(lhs_index).typeInfo().type() == _ctx.at(rhs_index).typeInfo().type()); - assert(_ctx.at(output_index).typeInfo().type() == model::DataType::BOOL8); + assert(_ctx.at(output_index).typeInfo().type() == ir::DataType::BOOL8); } void OperationValidator::visit(const model::operation::Softmax &node) @@ -377,7 +377,7 @@ void OperationValidator::visit(const model::operation::EmbeddingLookup &node) // Verify operand here, not at SimpleEmbeddingLookup::configure() to avoid acl's modifying // TensorShape sometimes(Issue: https://github.sec.samsung.net/STAR/nnfw/issues/729) { - assert(lookups_obj.typeInfo().type() == neurun::model::DataType::INT32); + assert(lookups_obj.typeInfo().type() == ir::DataType::INT32); const auto &output_shape = output_obj.shape(); const auto &lookups_shape = lookups_obj.shape(); @@ -441,9 +441,9 @@ void OperationValidator::visit(const model::operation::HashtableLookup &node) const auto &keys_obj = _ctx.at(keys_index); const auto &values_obj = _ctx.at(values_index); - assert(lookups_obj.typeInfo().type() == neurun::model::DataType::INT32); - assert(keys_obj.typeInfo().type() == neurun::model::DataType::INT32); - assert(hits_obj.typeInfo().type() == neurun::model::DataType::QUANT8_ASYMM); + assert(lookups_obj.typeInfo().type() == ir::DataType::INT32); + assert(keys_obj.typeInfo().type() == ir::DataType::INT32); + assert(hits_obj.typeInfo().type() == ir::DataType::QUANT8_ASYMM); const auto &output_shape = output_obj.shape(); const auto &hits_shape = hits_obj.shape(); @@ -533,8 +533,8 @@ void OperationValidator::visit(const model::operation::Dequantize &node) assert(_ctx.at(input_index).shape().rank() <= 4); assert(_ctx.at(input_index).shape() == _ctx.at(output_index).shape()); - assert(_ctx.at(input_index).typeInfo().type() == neurun::model::DataType::QUANT8_ASYMM); - assert(_ctx.at(output_index).typeInfo().type() == neurun::model::DataType::FLOAT32); + assert(_ctx.at(input_index).typeInfo().type() == ir::DataType::QUANT8_ASYMM); + assert(_ctx.at(output_index).typeInfo().type() == ir::DataType::FLOAT32); } void OperationValidator::visit(const model::operation::Mean &node) @@ -948,7 +948,7 @@ void OperationValidator::visit(const model::operation::Pad &node) assert(pad_shape.rank() == 2); assert(pad_shape.dim(0) == input_rank); assert(pad_shape.dim(1) == 2); - assert(_ctx.at(pad_index).typeInfo().type() == model::DataType::INT32); + assert(_ctx.at(pad_index).typeInfo().type() == ir::DataType::INT32); assert(_ctx.at(input_index).shape().rank() == _ctx.at(output_index).shape().rank()); } diff --git a/runtime/neurun/core/src/exec/ExecutionObservers.cc b/runtime/neurun/core/src/exec/ExecutionObservers.cc index 9d3b195..bc6a94b 100644 --- a/runtime/neurun/core/src/exec/ExecutionObservers.cc +++ b/runtime/neurun/core/src/exec/ExecutionObservers.cc @@ -52,7 +52,7 @@ void ProfileObserver::handleEnd(IExecutor *exec, const model::Subgraph *subgraph // fill ExecTime: bool is_quantized = exec->graph().operands().at(node->getInputs().at(0)).typeInfo().type() == - model::DataType::QUANT8_ASYMM; + ir::DataType::QUANT8_ASYMM; uint32_t size = 0; for (const auto &input : node->getInputs()) diff --git a/runtime/neurun/core/src/exec/ExecutorBase.cc b/runtime/neurun/core/src/exec/ExecutorBase.cc index 006c02f..ba316ed 100644 --- a/runtime/neurun/core/src/exec/ExecutorBase.cc +++ b/runtime/neurun/core/src/exec/ExecutorBase.cc @@ -34,7 +34,7 @@ std::unique_ptr ExecutorBase::source(const model::IOIndex &index, const model::TypeInfo &type, const void *buffer, size_t length, ir::Layout io_layout) { - using ::neurun::model::DataType; + using ir::DataType; switch (type.type()) { case DataType::FLOAT32: @@ -54,7 +54,7 @@ std::unique_ptr ExecutorBase::source(const model::IOIndex &index, std::unique_ptr ExecutorBase::sink(const model::IOIndex &index, const model::TypeInfo &type, void *buffer, size_t length, ir::Layout io_layout) { - using ::neurun::model::DataType; + using ir::DataType; switch (type.type()) { case DataType::FLOAT32: diff --git a/runtime/neurun/core/src/exec/interp/Tensor.h b/runtime/neurun/core/src/exec/interp/Tensor.h index 4617f7e..947ae5e 100644 --- a/runtime/neurun/core/src/exec/interp/Tensor.h +++ b/runtime/neurun/core/src/exec/interp/Tensor.h @@ -81,7 +81,7 @@ public: * @brief Return data type of tensor * @return Data type of tensor */ - virtual model::DataType data_type() const = 0; + virtual ir::DataType data_type() const = 0; /** * @brief Return TensorInfo * @return TensorInfo @@ -125,7 +125,7 @@ public: size_t calcOffset(const util::Coordinates &coords) const override; ir::Layout layout() const override; bool has_padding() const override { return false; } - model::DataType data_type() const override { return _info.typeInfo().type(); } + ir::DataType data_type() const override { return _info.typeInfo().type(); } const model::OperandInfo &tensorInfo() const override { return _info; } uint64_t num_elements() const override { return _info.shape().num_elements(); }; @@ -164,7 +164,7 @@ public: size_t calcOffset(const util::Coordinates &coords) const override; ir::Layout layout() const override; bool has_padding() const override { return false; } - model::DataType data_type() const override { return _info.typeInfo().type(); } + ir::DataType data_type() const override { return _info.typeInfo().type(); } const model::OperandInfo &tensorInfo() const override { return _info; } uint64_t num_elements() const override { return _info.shape().num_elements(); }; diff --git a/runtime/neurun/core/src/exec/interp/operations/Add.cc b/runtime/neurun/core/src/exec/interp/operations/Add.cc index 57fd2fd..c21a9a8 100644 --- a/runtime/neurun/core/src/exec/interp/operations/Add.cc +++ b/runtime/neurun/core/src/exec/interp/operations/Add.cc @@ -124,11 +124,11 @@ void invokeAdd(const ExecEnv *env, const model::Operation &node) const auto out_tensor = env->tensorAt(out_index); const auto data_type = lhs_tensor->data_type(); - if (data_type == model::DataType::INT32) + if (data_type == ir::DataType::INT32) { invoke(lhs_tensor, rhs_tensor, out_tensor, add_node.param()); } - else if (data_type == model::DataType::FLOAT32) + else if (data_type == ir::DataType::FLOAT32) { invoke(lhs_tensor, rhs_tensor, out_tensor, add_node.param()); } diff --git a/runtime/neurun/core/src/exec/interp/operations/AvgPool2D.cc b/runtime/neurun/core/src/exec/interp/operations/AvgPool2D.cc index 660514b..81ceaea 100644 --- a/runtime/neurun/core/src/exec/interp/operations/AvgPool2D.cc +++ b/runtime/neurun/core/src/exec/interp/operations/AvgPool2D.cc @@ -107,7 +107,7 @@ void invokeAvgPool2D(const ExecEnv *env, const model::Operation &node) const auto out_tensor = env->tensorAt(out_index); const auto data_type = in_tensor->data_type(); - if (data_type == model::DataType::FLOAT32) + if (data_type == ir::DataType::FLOAT32) { invoke(in_tensor, out_tensor, avgpool_node.param()); } diff --git a/runtime/neurun/core/src/exec/interp/operations/Concat.cc b/runtime/neurun/core/src/exec/interp/operations/Concat.cc index fcd1780..bcd90c5 100644 --- a/runtime/neurun/core/src/exec/interp/operations/Concat.cc +++ b/runtime/neurun/core/src/exec/interp/operations/Concat.cc @@ -130,7 +130,7 @@ void invokeConcat(const ExecEnv *env, const model::Operation &node) const uint32_t axis = (axis_raw < 0) ? (axis_raw + out_tensor->num_dimensions()) : axis_raw; const auto data_type = in_tensors[0]->data_type(); - if (data_type == model::DataType::FLOAT32) + if (data_type == ir::DataType::FLOAT32) { invoke(in_tensors, out_tensor, axis); } diff --git a/runtime/neurun/core/src/exec/interp/operations/Conv2D.cc b/runtime/neurun/core/src/exec/interp/operations/Conv2D.cc index d3b046f..cb2b97f 100644 --- a/runtime/neurun/core/src/exec/interp/operations/Conv2D.cc +++ b/runtime/neurun/core/src/exec/interp/operations/Conv2D.cc @@ -131,7 +131,7 @@ void invokeConv2D(const ExecEnv *env, const model::Operation &node) const auto ofm_tensor = env->tensorAt(ofm_index); const auto data_type = ifm_tensor->data_type(); - if (data_type == model::DataType::FLOAT32) + if (data_type == ir::DataType::FLOAT32) { invoke(ifm_tensor, ker_tensor, bias_tensor, ofm_tensor, conv_node.param()); } diff --git a/runtime/neurun/core/src/exec/interp/operations/DepthwiseConv.cc b/runtime/neurun/core/src/exec/interp/operations/DepthwiseConv.cc index 66dab69..c717f3d 100644 --- a/runtime/neurun/core/src/exec/interp/operations/DepthwiseConv.cc +++ b/runtime/neurun/core/src/exec/interp/operations/DepthwiseConv.cc @@ -136,7 +136,7 @@ void invokeDepthwiseConv(const ExecEnv *env, const model::Operation &node) const auto ofm_tensor = env->tensorAt(ofm_index); const auto data_type = ifm_tensor->data_type(); - if (data_type == model::DataType::FLOAT32) + if (data_type == ir::DataType::FLOAT32) { invoke(ifm_tensor, ker_tensor, bias_tensor, ofm_tensor, conv_node.param()); } diff --git a/runtime/neurun/core/src/exec/interp/operations/FullyConnected.cc b/runtime/neurun/core/src/exec/interp/operations/FullyConnected.cc index d03b0d0..0f1c0b9 100644 --- a/runtime/neurun/core/src/exec/interp/operations/FullyConnected.cc +++ b/runtime/neurun/core/src/exec/interp/operations/FullyConnected.cc @@ -115,7 +115,7 @@ void invokeFC(const ExecEnv *env, const model::Operation &node) const auto ofm_tensor = env->tensorAt(ofm_index); const auto data_type = ifm_tensor->data_type(); - if (data_type == model::DataType::FLOAT32) + if (data_type == ir::DataType::FLOAT32) { invoke(ifm_tensor, ker_tensor, bias_tensor, ofm_tensor, conv_node.param()); } diff --git a/runtime/neurun/core/src/exec/interp/operations/MaxPool2D.cc b/runtime/neurun/core/src/exec/interp/operations/MaxPool2D.cc index cd4e721..7d5256f 100644 --- a/runtime/neurun/core/src/exec/interp/operations/MaxPool2D.cc +++ b/runtime/neurun/core/src/exec/interp/operations/MaxPool2D.cc @@ -106,7 +106,7 @@ void invokeMaxPool2D(const ExecEnv *env, const model::Operation &node) const auto out_tensor = env->tensorAt(out_index); const auto data_type = in_tensor->data_type(); - if (data_type == model::DataType::FLOAT32) + if (data_type == ir::DataType::FLOAT32) { invoke(in_tensor, out_tensor, maxpool_node.param()); } diff --git a/runtime/neurun/core/src/exec/interp/operations/SoftMax.cc b/runtime/neurun/core/src/exec/interp/operations/SoftMax.cc index f7ddf02..5b1be21 100644 --- a/runtime/neurun/core/src/exec/interp/operations/SoftMax.cc +++ b/runtime/neurun/core/src/exec/interp/operations/SoftMax.cc @@ -141,7 +141,7 @@ void invokeSoftMax(const ExecEnv *env, const model::Operation &node) const auto in_data_type = in_tensor->data_type(); const auto out_data_type = out_tensor->data_type(); - if ((in_data_type == model::DataType::FLOAT32) && (out_data_type == model::DataType::FLOAT32)) + if ((in_data_type == ir::DataType::FLOAT32) && (out_data_type == ir::DataType::FLOAT32)) { invoke(in_tensor, out_tensor, softmax_node.param()); } diff --git a/runtime/neurun/core/src/model/operation/Permute.cc b/runtime/neurun/core/src/model/operation/Permute.cc index 91f771a..6c5f697 100644 --- a/runtime/neurun/core/src/model/operation/Permute.cc +++ b/runtime/neurun/core/src/model/operation/Permute.cc @@ -31,8 +31,7 @@ void Permute::accept(OperationVisitor &v) const { v.visit(*this); } Permute::Permute(const OperandIndex &input, const OperandIndex &output, const backend::BackendContext *input_backend_ctx, - const backend::BackendContext *output_backend_ctx, Type type, - model::DataType data_type) + const backend::BackendContext *output_backend_ctx, Type type, DataType data_type) : model::Operation{OperandConstraint::createExact(1u)}, _param{input_backend_ctx, output_backend_ctx}, _type{type}, _dataType{data_type} { diff --git a/runtime/neurun/frontend/base_loader/base_loader.h b/runtime/neurun/frontend/base_loader/base_loader.h index 2f28ac3..d95678a 100644 --- a/runtime/neurun/frontend/base_loader/base_loader.h +++ b/runtime/neurun/frontend/base_loader/base_loader.h @@ -66,7 +66,7 @@ protected: // Helper functions model::Activation convertActivation(ActivationFunctionType type); - model::DataType tensorTypeToDataType(TensorType type); + ir::DataType tensorTypeToDataType(TensorType type); // Create operands form tflite::Tensor model::OperandIndex loadOperand(const Tensor *tensor); @@ -174,19 +174,19 @@ model::Activation BaseLoader::BaseLoader::convertA } template -model::DataType +ir::DataType BaseLoader::BaseLoader::tensorTypeToDataType(const TensorType type) { switch (type) { case TensorType::TensorType_FLOAT32: - return model::DataType::FLOAT32; + return ir::DataType::FLOAT32; case TensorType::TensorType_INT32: - return model::DataType::INT32; + return ir::DataType::INT32; case TensorType::TensorType_BOOL: - return model::DataType::BOOL8; + return ir::DataType::BOOL8; case TensorType::TensorType_UINT8: - return model::DataType::QUANT8_ASYMM; + return ir::DataType::QUANT8_ASYMM; default: throw std::runtime_error( std::string("Unsupported tensor type: ").append(EnumNameTensorType(type))); @@ -204,7 +204,7 @@ model::OperandIndex BaseLoader::loadOperand(const shape.append(dim); } // Type - model::DataType data_type = tensorTypeToDataType(tensor->type()); + ir::DataType data_type = tensorTypeToDataType(tensor->type()); // Quantization auto q_params = tensor->quantization(); float scale = 0.0; @@ -279,7 +279,6 @@ void BaseLoader::loadStridesAndPaddings(Param &par const OptionsType *options) { model::Shape shape; - model::TypeInfo type_info(model::DataType::INT32); // Strides param.stride.vertical = options->stride_w(); param.stride.horizontal = options->stride_h(); @@ -300,7 +299,6 @@ void BaseLoader::loadPool2D(Param ¶m, loadStridesAndPaddings(param, options); // Filter width and height model::Shape shape; - model::TypeInfo type_info(model::DataType::INT32); // Strides param.kw = options->filter_width(); param.kh = options->filter_height(); @@ -339,7 +337,6 @@ void BaseLoader::loadDepthwiseConv2D(const Operato loadStridesAndPaddings(param, options); // Multiplier model::Shape shape; - model::TypeInfo type_info(model::DataType::INT32); param.multiplier = options->depth_multiplier(); // Dilation h/w factor unused std::unique_ptr new_op( @@ -441,7 +438,6 @@ void BaseLoader::loadConcatenation(const Operator const auto *options = op->builtin_options_as_ConcatenationOptions(); // Axis model::Shape shape; - model::TypeInfo type_info(model::DataType::INT32); param.axis = options->axis(); // activation unused diff --git a/runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.h b/runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.h index bd7fc6c..d736414 100644 --- a/runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.h +++ b/runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.h @@ -36,11 +36,8 @@ public: * @brief Convert data type from NNAPI to internal data type * @param[in] type NNAPI's data type * @return neurun's internal data type - * @note Now neurun::model::DataType shares the same enum value\n - with OperandCode in NeuralNetworks.h.\n - If we don't share same value, we must fix this mapping function. */ - static ::neurun::model::DataType getDataType(OperandCode type); + static neurun::ir::DataType getDataType(OperandCode type); /** * @brief Convert operand type info from NNAPI to interanl operand type info diff --git a/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.cc b/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.cc index b88629e..b3fef40 100644 --- a/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.cc +++ b/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.cc @@ -24,7 +24,7 @@ namespace { using namespace neurun::model; -void replaceDataType(Operands &operands, const OperandIndex &index, const DataType &type) +void replaceDataType(Operands &operands, const OperandIndex &index, const DataType type) { assert(operands.exist(index)); operands.at(index).type(type); diff --git a/runtime/neurun/test/core/compiler/Scheduler.cc b/runtime/neurun/test/core/compiler/Scheduler.cc index 04f6d71..66bfc3e 100644 --- a/runtime/neurun/test/core/compiler/Scheduler.cc +++ b/runtime/neurun/test/core/compiler/Scheduler.cc @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include diff --git a/runtime/neurun/test/core/exec/ExecInstance.cc b/runtime/neurun/test/core/exec/ExecInstance.cc index bd4a3c8..bbe8ba7 100644 --- a/runtime/neurun/test/core/exec/ExecInstance.cc +++ b/runtime/neurun/test/core/exec/ExecInstance.cc @@ -26,7 +26,7 @@ namespace { using namespace neurun::model; -using DataType = neurun::model::DataType; +using DataType = DataType; class CompiledMockUpModel { diff --git a/runtime/neurun/test/core/exec/interp/ExecManager.cc b/runtime/neurun/test/core/exec/interp/ExecManager.cc index 2915e78..2eba0ab 100644 --- a/runtime/neurun/test/core/exec/interp/ExecManager.cc +++ b/runtime/neurun/test/core/exec/interp/ExecManager.cc @@ -27,7 +27,7 @@ namespace { using namespace neurun::model; -using DataType = neurun::model::DataType; +using DataType = DataType; using ExecManager = neurun::exec::interp::ExecManager; using Execution = neurun::exec::Execution; diff --git a/runtime/neurun/test/graph/operand/Set.cc b/runtime/neurun/test/graph/operand/Set.cc index fce4223..44ede8c 100644 --- a/runtime/neurun/test/graph/operand/Set.cc +++ b/runtime/neurun/test/graph/operand/Set.cc @@ -30,7 +30,7 @@ TEST(graph_operand_Set, set_test) shape1.dim(2) = 30; shape1.dim(3) = 40; - ::neurun::model::TypeInfo type{neurun::model::DataType::INT32}; + ::neurun::model::TypeInfo type{neurun::ir::DataType::INT32}; set.emplace(shape0, type); set.emplace(shape1, type); diff --git a/runtime/neurun/test/graph/operand/UseDef.cc b/runtime/neurun/test/graph/operand/UseDef.cc index 1d7d64a..9e945ab 100644 --- a/runtime/neurun/test/graph/operand/UseDef.cc +++ b/runtime/neurun/test/graph/operand/UseDef.cc @@ -37,7 +37,7 @@ TEST(graph_operand_usedef, usedef_test) neurun::graph::verifier::DAGChecker verifier; neurun::model::Shape shape(3); - neurun::model::TypeInfo type{neurun::model::DataType::INT32}; + neurun::model::TypeInfo type{neurun::ir::DataType::INT32}; // Model Input/Output auto input_operand = graph.addOperand(shape, type); diff --git a/runtime/neurun/test/graph/operation/SetIO.cc b/runtime/neurun/test/graph/operation/SetIO.cc index 29309f9..ab0193f 100644 --- a/runtime/neurun/test/graph/operation/SetIO.cc +++ b/runtime/neurun/test/graph/operation/SetIO.cc @@ -34,7 +34,7 @@ TEST(graph_operation_setIO, operation_setIO_conv) neurun::graph::Graph graph; neurun::model::Shape shape{3}; - neurun::model::TypeInfo type{neurun::model::DataType::INT32}; + neurun::model::TypeInfo type{neurun::ir::DataType::INT32}; // Add Conv using Graph = neurun::model::operation::Conv2D; @@ -68,7 +68,7 @@ TEST(graph_operation_setIO, operation_setIO_concat) neurun::model::Shape shape{3}; - neurun::model::TypeInfo type{neurun::model::DataType::INT32}; + neurun::model::TypeInfo type{neurun::ir::DataType::INT32}; using Graph = neurun::model::operation::Concat; diff --git a/runtime/neurun/test/graph/verifier/Verifier.cc b/runtime/neurun/test/graph/verifier/Verifier.cc index d06c8ef..b430261 100644 --- a/runtime/neurun/test/graph/verifier/Verifier.cc +++ b/runtime/neurun/test/graph/verifier/Verifier.cc @@ -31,7 +31,7 @@ TEST(Verifier, dag_checker) neurun::graph::Graph graph; ::neurun::model::Shape shape{3}; - ::neurun::model::TypeInfo type{neurun::model::DataType::INT32}; + ::neurun::model::TypeInfo type{neurun::ir::DataType::INT32}; auto operand1 = graph.addOperand(shape, type); auto operand2 = graph.addOperand(shape, type); -- 2.7.4 From 7809ae4c293138d55aec2c63e2449fc6790e2caf Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=9E=A5=EC=A7=80=EC=84=AD/On-Device=20Lab=28SR=29/Enginee?= =?utf8?q?r/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Fri, 6 Dec 2019 18:48:04 +0900 Subject: [PATCH 03/16] Adjust out of range values generated by RandomGenerator (#9418) This commit adjusts out of range values generated by RandomGenerator to be mapped to end points of the range. Signed-off-by: jiseob.jang --- runtime/libs/tflite/src/Diff.cpp | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/runtime/libs/tflite/src/Diff.cpp b/runtime/libs/tflite/src/Diff.cpp index 1a3ac85..8ddad6c 100644 --- a/runtime/libs/tflite/src/Diff.cpp +++ b/runtime/libs/tflite/src/Diff.cpp @@ -270,7 +270,24 @@ template <> uint8_t RandomGenerator::generate(void) // Most _dist values range from -5.0 to 5.0. float min_range = -5.0f; float max_range = 5.0f; - return static_cast((_dist(_rand) - min_range) * type_range / (max_range - min_range)); + // NOTE shifted_relative_val has Gaussian distribution that origin mean was 0 and standard + // deviation was 2. And then its values are distributed and shift to that mean is 127.5 and range + // is about [0, 255]. + float shifted_relative_val = (_dist(_rand) - min_range) * type_range / (max_range - min_range); + + // shifted_relative_val is adjusted to be mapped to end points of the range, if it is out of range + // values. + if (shifted_relative_val < 0.0f) + { + return 0; + } + else if (shifted_relative_val > type_range) + { + return 255; + } + + // Convert shifted_relative_val from float to uint8 + return static_cast(shifted_relative_val); } #include "tflite/TensorLogger.h" -- 2.7.4 From 20ba332946b791b2bc62a77d940ab75077beda6e Mon Sep 17 00:00:00 2001 From: Sergei Barannikov/AI Tools Lab /SRR/Engineer/Samsung Electronics Date: Fri, 6 Dec 2019 13:29:59 +0300 Subject: [PATCH 04/16] [min_onnx] Support models stored as text (#9426) Add support for models stored as text. Signed-off-by: Sergei Barannikov --- compiler/mir-onnx-importer/ONNXImporterImpl.cpp | 74 ++++++++++++++++--------- compiler/mir-onnx-importer/ONNXImporterImpl.h | 5 +- 2 files changed, 53 insertions(+), 26 deletions(-) diff --git a/compiler/mir-onnx-importer/ONNXImporterImpl.cpp b/compiler/mir-onnx-importer/ONNXImporterImpl.cpp index cbe03ed..800e982 100644 --- a/compiler/mir-onnx-importer/ONNXImporterImpl.cpp +++ b/compiler/mir-onnx-importer/ONNXImporterImpl.cpp @@ -19,10 +19,8 @@ #include "ONNXOpRegistration.h" #include "onnx/onnx.pb.h" -#include "mir/Operation.h" #include "mir/Shape.h" #include "mir/TensorUtil.h" -#include "mir/TensorVariant.h" #include "mir/ops/ConstantOp.h" @@ -30,45 +28,41 @@ #include #include +#include #include #include #include #include -#include -namespace +namespace mir_onnx { -using namespace mir_onnx; +namespace +{ class ONNXImporterImpl final { public: - explicit ONNXImporterImpl(std::string filename); + ONNXImporterImpl(); ~ONNXImporterImpl(); /// @brief Load the model and convert it into a MIR Graph. - std::unique_ptr importModel(); + std::unique_ptr importModelFromBinaryFile(const std::string &filename); + std::unique_ptr importModelFromTextFile(const std::string &filename); private: - void import(); std::unique_ptr createIR(); void createGraphInputs(); void collectUnsupportedOps(); - // Maps ONNX tensor names to corresponding MIR operation outputs. - std::string _modelFilename; std::unique_ptr _model; std::unique_ptr _context; std::unique_ptr _graph; }; -ONNXImporterImpl::ONNXImporterImpl(std::string filename) : _modelFilename(std::move(filename)) -{ - registerSupportedOps(); -} +ONNXImporterImpl::ONNXImporterImpl() { registerSupportedOps(); } ONNXImporterImpl::~ONNXImporterImpl() = default; -static void loadModelFile(const std::string &filename, onnx::ModelProto *model) +void loadModelFromBinaryFile(const std::string &filename, onnx::ModelProto *model) { GOOGLE_PROTOBUF_VERIFY_VERSION; @@ -92,12 +86,39 @@ static void loadModelFile(const std::string &filename, onnx::ModelProto *model) throw std::runtime_error("File \"" + filename + "\" has not been consumed entirely."); } -void ONNXImporterImpl::import() +void loadModelFromTextFile(const std::string &filename, onnx::ModelProto *model) +{ + GOOGLE_PROTOBUF_VERIFY_VERSION; + + int file_handle = open(filename.c_str(), O_RDONLY); + + if (file_handle == -1) + throw std::runtime_error("Couldn't open file \"" + filename + "\": " + std::strerror(errno) + + "."); + + google::protobuf::io::FileInputStream file_stream(file_handle); + file_stream.SetCloseOnDelete(true); + + if (!google::protobuf::TextFormat::Parse(&file_stream, model)) + throw std::runtime_error("Couldn't parse file \"" + filename + "\"."); +} + +std::unique_ptr ONNXImporterImpl::importModelFromBinaryFile(const std::string &filename) { _model = stdex::make_unique(); - loadModelFile(_modelFilename, _model.get()); + loadModelFromBinaryFile(filename, _model.get()); collectUnsupportedOps(); + return createIR(); +} + +std::unique_ptr ONNXImporterImpl::importModelFromTextFile(const std::string &filename) +{ + _model = stdex::make_unique(); + loadModelFromTextFile(filename, _model.get()); + + collectUnsupportedOps(); + return createIR(); } void ONNXImporterImpl::collectUnsupportedOps() @@ -201,20 +222,23 @@ std::unique_ptr ONNXImporterImpl::createIR() return std::move(_graph); } -std::unique_ptr ONNXImporterImpl::importModel() +} // namespace + +std::unique_ptr importModelFromBinaryFile(const std::string &filename) { - import(); - return createIR(); -} + ONNXImporterImpl importer; + return importer.importModelFromBinaryFile(filename); } -namespace mir_onnx +std::unique_ptr importModelFromTextFile(const std::string &filename) { + ONNXImporterImpl importer; + return importer.importModelFromTextFile(filename); +} -std::unique_ptr loadModel(std::string filename) +std::unique_ptr loadModel(const std::string &filename) { - ONNXImporterImpl importer(std::move(filename)); - return importer.importModel(); + return importModelFromBinaryFile(filename); } } // namespace mir_onnx diff --git a/compiler/mir-onnx-importer/ONNXImporterImpl.h b/compiler/mir-onnx-importer/ONNXImporterImpl.h index 7b13370..02a49b3 100644 --- a/compiler/mir-onnx-importer/ONNXImporterImpl.h +++ b/compiler/mir-onnx-importer/ONNXImporterImpl.h @@ -25,7 +25,10 @@ namespace mir_onnx { -std::unique_ptr loadModel(std::string filename); +std::unique_ptr importModelFromBinaryFile(const std::string &filename); +std::unique_ptr importModelFromTextFile(const std::string &filename); +// TODO Remove after changing all uses. +std::unique_ptr loadModel(const std::string &filename); } // namespace mir_onnx -- 2.7.4 From 08fdcad95bacf594e3512e1aae6745922d5b2502 Mon Sep 17 00:00:00 2001 From: Sergei Barannikov/AI Tools Lab /SRR/Engineer/Samsung Electronics Date: Fri, 6 Dec 2019 14:05:05 +0300 Subject: [PATCH 05/16] [neurun] Move InternalType.h into ir directory (#9390) * Move `InternalType.h` in `ir` directory. * Move `Activation`, `PaddingType`, `ExplicitPadding`, `Padding`, `Stride` to `neurun::ir` namespace. Signed-off-by: Sergei Barannikov --- runtime/neurun/backend/acl_cl/KernelGenerator.cc | 20 +++++++-------- runtime/neurun/backend/acl_cl/ShapeFixer.cc | 1 - runtime/neurun/backend/acl_common/Convert.cc | 19 +++++++------- runtime/neurun/backend/acl_common/Convert.h | 9 +++---- runtime/neurun/backend/acl_neon/KernelGenerator.cc | 20 +++++++-------- runtime/neurun/backend/acl_neon/ShapeFixer.cc | 1 - runtime/neurun/backend/cpu/kernel/AddLayer.cc | 2 +- runtime/neurun/backend/cpu/kernel/AddLayer.h | 4 +-- runtime/neurun/backend/cpu/kernel/AvgPoolLayer.cc | 4 +-- runtime/neurun/backend/cpu/kernel/AvgPoolLayer.h | 6 ++--- .../neurun/backend/cpu/kernel/ConvolutionLayer.cc | 4 +-- .../neurun/backend/cpu/kernel/ConvolutionLayer.h | 4 +-- .../cpu/kernel/DepthwiseConvolutionLayer.cc | 4 +-- .../backend/cpu/kernel/DepthwiseConvolutionLayer.h | 4 +-- .../backend/cpu/kernel/FullyConnectedLayer.cc | 4 +-- .../backend/cpu/kernel/FullyConnectedLayer.h | 6 ++--- runtime/neurun/backend/cpu/kernel/MaxPoolLayer.cc | 4 +-- runtime/neurun/backend/cpu/kernel/MaxPoolLayer.h | 6 ++--- runtime/neurun/backend/cpu/kernel/MulLayer.cc | 2 +- runtime/neurun/backend/cpu/kernel/MulLayer.h | 4 +-- .../neurun/backend/cpu/kernel/OperationUtils.cc | 27 ++++++++++--------- runtime/neurun/backend/cpu/kernel/OperationUtils.h | 9 +++---- runtime/neurun/backend/cpu/kernel/SubLayer.cc | 2 +- runtime/neurun/backend/cpu/kernel/SubLayer.h | 4 +-- runtime/neurun/backend/srcn/KernelGenerator.cc | 2 +- runtime/neurun/backend/srcn/kernel/AddLayer.cc | 4 +-- runtime/neurun/backend/srcn/kernel/AddLayer.h | 4 +-- .../backend/srcn/kernel/InstanceNormLayer.cc | 12 ++++----- .../neurun/backend/srcn/kernel/InstanceNormLayer.h | 4 +-- .../neurun/backend/srcn/kernel/OperationUtils.h | 2 +- .../core/include/{model => ir}/InternalType.h | 18 ++++++++++--- runtime/neurun/core/include/model/operation/Add.h | 2 +- .../core/include/model/operation/AvgPool2D.h | 2 +- .../neurun/core/include/model/operation/Conv2D.h | 2 +- .../core/include/model/operation/DepthwiseConv2D.h | 2 +- runtime/neurun/core/include/model/operation/Div.h | 2 +- .../core/include/model/operation/FullyConnected.h | 2 +- .../core/include/model/operation/InstanceNorm.h | 2 +- .../neurun/core/include/model/operation/L2Pool2D.h | 2 +- runtime/neurun/core/include/model/operation/LSTM.h | 2 +- .../core/include/model/operation/MaxPool2D.h | 2 +- runtime/neurun/core/include/model/operation/Mul.h | 2 +- runtime/neurun/core/include/model/operation/RNN.h | 2 +- runtime/neurun/core/include/model/operation/Sub.h | 2 +- .../core/include/model/operation/TransposeConv.h | 2 +- runtime/neurun/core/include/util/Padding.h | 18 ++++++------- runtime/neurun/core/include/util/Utils.h | 6 ++--- .../neurun/core/src/compiler/OperationValidator.cc | 4 +-- .../src/exec/interp/operations/OperationUtil.h | 12 ++++----- runtime/neurun/core/src/ir/dumper/Dumper.cc | 8 +++--- runtime/neurun/core/src/util/Padding.cc | 30 +++++++++++----------- runtime/neurun/core/src/util/ShapeInference.cc | 12 ++++----- runtime/neurun/core/src/util/Utils.cc | 12 ++++----- runtime/neurun/frontend/base_loader/base_loader.h | 18 ++++++------- .../neurun/frontend/nnapi/wrapper/NNAPIConvert.h | 8 +++--- runtime/neurun/test/core/compiler/Scheduler.cc | 2 +- runtime/neurun/test/core/exec/ExecInstance.cc | 4 +-- .../neurun/test/core/exec/interp/ExecManager.cc | 8 +++--- runtime/neurun/test/graph/operation/SetIO.cc | 4 +-- 59 files changed, 197 insertions(+), 193 deletions(-) rename runtime/neurun/core/include/{model => ir}/InternalType.h (76%) diff --git a/runtime/neurun/backend/acl_cl/KernelGenerator.cc b/runtime/neurun/backend/acl_cl/KernelGenerator.cc index a8e4e0b..0be03c5 100644 --- a/runtime/neurun/backend/acl_cl/KernelGenerator.cc +++ b/runtime/neurun/backend/acl_cl/KernelGenerator.cc @@ -26,7 +26,7 @@ #include "kernel/ConcatLayer.h" #include "model/Index.h" #include "ir/DataType.h" -#include "model/InternalType.h" +#include "ir/InternalType.h" #include "compiler/IExecutionBuilder.h" #include "exec/NopFunction.h" #include "util/logging.h" @@ -61,7 +61,7 @@ private: void appendReLU6(::arm_compute::ICLTensor *ifm_alloc); public: - void append(model::Activation code, ::arm_compute::ICLTensor *ifm_alloc); + void append(ir::Activation code, ::arm_compute::ICLTensor *ifm_alloc); private: IExecutionBuilder &_builder; @@ -109,26 +109,26 @@ void ActivationBuilder::appendReLU6(::arm_compute::ICLTensor *ifm_alloc) _builder.append(std::move(acl_fn)); } -void ActivationBuilder::append(model::Activation code, ::arm_compute::ICLTensor *ifm_alloc) +void ActivationBuilder::append(ir::Activation code, ::arm_compute::ICLTensor *ifm_alloc) { switch (code) { - case model::Activation::NONE: + case ir::Activation::NONE: { // DO NOTHING break; } - case model::Activation::RELU: + case ir::Activation::RELU: { appendReLU(ifm_alloc); break; } - case model::Activation::RELU1: + case ir::Activation::RELU1: { appendReLU1(ifm_alloc); break; } - case model::Activation::RELU6: + case ir::Activation::RELU6: { appendReLU6(ifm_alloc); break; @@ -1555,14 +1555,14 @@ void KernelGenerator::visit(const model::operation::TransposeConv &node) const auto stride = node.param().stride; - assert((node.param().padding.type == model::PaddingType::SAME) || - (node.param().padding.type == model::PaddingType::VALID)); + assert((node.param().padding.type == ir::PaddingType::SAME) || + (node.param().padding.type == ir::PaddingType::VALID)); auto padding = neurun::util::calculatePadding(node.param().padding, ofm_shape, ifm_shape, stride, ker_shape.W, ker_shape.H); uint32_t invalid_horizontal = 0; uint32_t invalid_vertical = 0; - if (node.param().padding.type == model::PaddingType::VALID) + if (node.param().padding.type == ir::PaddingType::VALID) { invalid_horizontal = ofm_shape.W - (1 + (ifm_shape.W - 1) * stride.horizontal) - (ker_shape.W - 1); diff --git a/runtime/neurun/backend/acl_cl/ShapeFixer.cc b/runtime/neurun/backend/acl_cl/ShapeFixer.cc index 2448e2e..58efe0d 100644 --- a/runtime/neurun/backend/acl_cl/ShapeFixer.cc +++ b/runtime/neurun/backend/acl_cl/ShapeFixer.cc @@ -25,7 +25,6 @@ #include "kernel/ConcatLayer.h" #include "model/Index.h" -#include "model/InternalType.h" #include "compiler/IExecutionBuilder.h" #include "exec/NopFunction.h" #include "util/logging.h" diff --git a/runtime/neurun/backend/acl_common/Convert.cc b/runtime/neurun/backend/acl_common/Convert.cc index ed8258b..b3e22e6 100644 --- a/runtime/neurun/backend/acl_common/Convert.cc +++ b/runtime/neurun/backend/acl_common/Convert.cc @@ -125,8 +125,8 @@ namespace acl_common return info; } -::arm_compute::PadStrideInfo asPadStrideInfo(const model::ExplicitPadding &padding, - const model::Stride &stride) +::arm_compute::PadStrideInfo asPadStrideInfo(const ir::ExplicitPadding &padding, + const ir::Stride &stride) { return ::arm_compute::PadStrideInfo{stride.horizontal, stride.vertical, @@ -137,27 +137,26 @@ namespace acl_common ::arm_compute::DimensionRoundingType::FLOOR}; } -::arm_compute::ActivationLayerInfo -asActivationLayerInfo(const ::neurun::model::Activation &act_code) +::arm_compute::ActivationLayerInfo asActivationLayerInfo(const ir::Activation act_code) { switch (act_code) { - case ::neurun::model::Activation::NONE: + case ir::Activation::NONE: return ::arm_compute::ActivationLayerInfo{}; - case ::neurun::model::Activation::RELU: + case ir::Activation::RELU: return ::arm_compute::ActivationLayerInfo{ ::arm_compute::ActivationLayerInfo::ActivationFunction::RELU}; - case ::neurun::model::Activation::RELU1: + case ir::Activation::RELU1: return ::arm_compute::ActivationLayerInfo{ ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 1.0f, -1.0f}; - case ::neurun::model::Activation::RELU6: + case ir::Activation::RELU6: return ::arm_compute::ActivationLayerInfo{ ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.0f, 0.0f}; // Cases for activation of LSTM. - case ::neurun::model::Activation::TANH: + case ir::Activation::TANH: return ::arm_compute::ActivationLayerInfo{ ::arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0f, 1.0f}; - case ::neurun::model::Activation::SIGMOID: + case ir::Activation::SIGMOID: // NOTE The sigmoid function is a special case of the Logistic function when L=1, k=1, x0=0. // TODO In ACL and nnapi sepc, currently, Logistic's L always is 1, k always is 1, x0 always // 0(always sigmoid) regardless of values of the parameter. diff --git a/runtime/neurun/backend/acl_common/Convert.h b/runtime/neurun/backend/acl_common/Convert.h index 66d4405..f8564b7 100644 --- a/runtime/neurun/backend/acl_common/Convert.h +++ b/runtime/neurun/backend/acl_common/Convert.h @@ -22,7 +22,7 @@ #include #include "ir/Layout.h" -#include "model/InternalType.h" +#include "ir/InternalType.h" #include "model/Operand.h" #include "model/Shape.h" #include "model/TypeInfo.h" @@ -53,11 +53,10 @@ namespace acl_common ir::Layout frontend_layout, ir::Layout backend_layout, bool apply_dim_correction = true); -::arm_compute::PadStrideInfo asPadStrideInfo(const model::ExplicitPadding &padding, - const model::Stride &stride); +::arm_compute::PadStrideInfo asPadStrideInfo(const ir::ExplicitPadding &padding, + const ir::Stride &stride); -::arm_compute::ActivationLayerInfo -asActivationLayerInfo(const ::neurun::model::Activation &act_code); +::arm_compute::ActivationLayerInfo asActivationLayerInfo(ir::Activation act_code); std::unique_ptr asAclFunction(std::unique_ptr<::arm_compute::IFunction> &&layer); diff --git a/runtime/neurun/backend/acl_neon/KernelGenerator.cc b/runtime/neurun/backend/acl_neon/KernelGenerator.cc index ea03b9e..84e9177 100644 --- a/runtime/neurun/backend/acl_neon/KernelGenerator.cc +++ b/runtime/neurun/backend/acl_neon/KernelGenerator.cc @@ -26,7 +26,7 @@ #include "util/Padding.h" #include "model/Index.h" #include "ir/DataType.h" -#include "model/InternalType.h" +#include "ir/InternalType.h" #include "compiler/IExecutionBuilder.h" #include "exec/NopFunction.h" #include "util/logging.h" @@ -60,7 +60,7 @@ private: void appendReLU6(::arm_compute::ITensor *ifm_alloc); public: - void append(model::Activation act, ::arm_compute::ITensor *ifm_alloc); + void append(ir::Activation act, ::arm_compute::ITensor *ifm_alloc); private: IExecutionBuilder &_builder; @@ -108,26 +108,26 @@ void ActivationBuilder::appendReLU6(::arm_compute::ITensor *ifm_alloc) _builder.append(std::move(acl_fn)); } -void ActivationBuilder::append(model::Activation act, ::arm_compute::ITensor *ifm_alloc) +void ActivationBuilder::append(ir::Activation act, ::arm_compute::ITensor *ifm_alloc) { switch (act) { - case model::Activation::NONE: + case ir::Activation::NONE: { // DO NOTHING break; } - case model::Activation::RELU: + case ir::Activation::RELU: { appendReLU(ifm_alloc); break; } - case model::Activation::RELU1: + case ir::Activation::RELU1: { appendReLU1(ifm_alloc); break; } - case model::Activation::RELU6: + case ir::Activation::RELU6: { appendReLU6(ifm_alloc); break; @@ -1842,14 +1842,14 @@ void KernelGenerator::visit(const model::operation::TransposeConv &node) const auto stride = node.param().stride; - assert((node.param().padding.type == model::PaddingType::SAME) || - (node.param().padding.type == model::PaddingType::VALID)); + assert((node.param().padding.type == ir::PaddingType::SAME) || + (node.param().padding.type == ir::PaddingType::VALID)); auto padding = neurun::util::calculatePadding(node.param().padding, ofm_shape, ifm_shape, stride, ker_shape.W, ker_shape.H); uint32_t invalid_horizontal = 0; uint32_t invalid_vertical = 0; - if (node.param().padding.type == model::PaddingType::VALID) + if (node.param().padding.type == ir::PaddingType::VALID) { invalid_horizontal = ofm_shape.W - (1 + (ifm_shape.W - 1) * stride.horizontal) - (ker_shape.W - 1); diff --git a/runtime/neurun/backend/acl_neon/ShapeFixer.cc b/runtime/neurun/backend/acl_neon/ShapeFixer.cc index 3d69d8f..80f539a 100644 --- a/runtime/neurun/backend/acl_neon/ShapeFixer.cc +++ b/runtime/neurun/backend/acl_neon/ShapeFixer.cc @@ -34,7 +34,6 @@ #include "kernel/ConcatLayer.h" #include "util/Padding.h" #include "model/Index.h" -#include "model/InternalType.h" #include "compiler/IExecutionBuilder.h" #include "exec/NopFunction.h" #include "util/logging.h" diff --git a/runtime/neurun/backend/cpu/kernel/AddLayer.cc b/runtime/neurun/backend/cpu/kernel/AddLayer.cc index fa9af6f..389e326 100644 --- a/runtime/neurun/backend/cpu/kernel/AddLayer.cc +++ b/runtime/neurun/backend/cpu/kernel/AddLayer.cc @@ -60,7 +60,7 @@ void AddLayer::addQuant8() } void AddLayer::configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData, - const TensorDescriptor &rhsDescr, const model::Activation activation, + const TensorDescriptor &rhsDescr, const ir::Activation activation, uint8_t *outputData, const TensorDescriptor &outputDescr) { _lhsData.u8 = lhsData; diff --git a/runtime/neurun/backend/cpu/kernel/AddLayer.h b/runtime/neurun/backend/cpu/kernel/AddLayer.h index cf933c6..7018e4c 100644 --- a/runtime/neurun/backend/cpu/kernel/AddLayer.h +++ b/runtime/neurun/backend/cpu/kernel/AddLayer.h @@ -44,7 +44,7 @@ public: void addQuant8(); void configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData, - const TensorDescriptor &rhsDescr, const model::Activation activation, + const TensorDescriptor &rhsDescr, const ir::Activation activation, uint8_t *outputData, const TensorDescriptor &outputDescr); void run(); @@ -64,7 +64,7 @@ private: TensorDescriptor _rhsDescr; TensorDescriptor _outputDescr; - model::Activation _activation{model::Activation::NONE}; + ir::Activation _activation{ir::Activation::NONE}; OperandType _inputType{OperandType::FLOAT32}; }; diff --git a/runtime/neurun/backend/cpu/kernel/AvgPoolLayer.cc b/runtime/neurun/backend/cpu/kernel/AvgPoolLayer.cc index 15e015b..3899557 100644 --- a/runtime/neurun/backend/cpu/kernel/AvgPoolLayer.cc +++ b/runtime/neurun/backend/cpu/kernel/AvgPoolLayer.cc @@ -42,7 +42,7 @@ namespace kernel AvgPoolLayer::AvgPoolLayer() : _inputData(), _outputData(), _inputDescr(), _outputDescr(), _paddingLeft(0), _paddingTop(0), _paddingRight(0), _paddingBottom(0), _strideWidth(0), _strideHeight(0), _kernelWidth(0), - _kernelHeight(0), _activation(model::Activation::NONE), _inputType(OperandType::FLOAT32) + _kernelHeight(0), _activation(ir::Activation::NONE), _inputType(OperandType::FLOAT32) { // DO NOTHING } @@ -77,7 +77,7 @@ void AvgPoolLayer::configure(uint8_t *inputData, const TensorDescriptor inputDes const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideWidth, const uint32_t strideHeight, const uint32_t kernelWidth, const uint32_t kernelHeight, - const model::Activation activation, uint8_t *outputData, + const ir::Activation activation, uint8_t *outputData, const TensorDescriptor outputDescr) { _inputData.u8 = inputData; diff --git a/runtime/neurun/backend/cpu/kernel/AvgPoolLayer.h b/runtime/neurun/backend/cpu/kernel/AvgPoolLayer.h index e42f403..6339efa 100644 --- a/runtime/neurun/backend/cpu/kernel/AvgPoolLayer.h +++ b/runtime/neurun/backend/cpu/kernel/AvgPoolLayer.h @@ -44,8 +44,8 @@ public: const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideWidth, const uint32_t strideHeight, const uint32_t kernelWidth, - const uint32_t kernelHeight, const model::Activation activation, - uint8_t *outputData, const TensorDescriptor outputDescr); + const uint32_t kernelHeight, const ir::Activation activation, uint8_t *outputData, + const TensorDescriptor outputDescr); void run(); void runSync() @@ -72,7 +72,7 @@ private: uint32_t _kernelWidth; uint32_t _kernelHeight; - model::Activation _activation; + ir::Activation _activation; OperandType _inputType; }; diff --git a/runtime/neurun/backend/cpu/kernel/ConvolutionLayer.cc b/runtime/neurun/backend/cpu/kernel/ConvolutionLayer.cc index 289b26c..2fdb0ba 100644 --- a/runtime/neurun/backend/cpu/kernel/ConvolutionLayer.cc +++ b/runtime/neurun/backend/cpu/kernel/ConvolutionLayer.cc @@ -31,7 +31,7 @@ namespace kernel ConvolutionLayer::ConvolutionLayer() : _inputData(), _kernelData(), _outputData(), _biasData(), _inputDescr(), _kernelDescr(), _outputDescr(), _biasDescr(), _paddingLeft(0), _paddingTop(0), _paddingRight(0), - _paddingBottom(0), _strideWidth(0), _strideHeight(0), _activation(model::Activation::NONE), + _paddingBottom(0), _strideWidth(0), _strideHeight(0), _activation(ir::Activation::NONE), _inputType(OperandType::FLOAT32) { // DO NOTHING @@ -99,7 +99,7 @@ void ConvolutionLayer::configure(uint8_t *inputData, const TensorDescriptor inpu const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideWidth, const uint32_t strideHeight, - const model::Activation activation, uint8_t *outputData, + const ir::Activation activation, uint8_t *outputData, const TensorDescriptor outputDescr) { _inputData.u8 = inputData; diff --git a/runtime/neurun/backend/cpu/kernel/ConvolutionLayer.h b/runtime/neurun/backend/cpu/kernel/ConvolutionLayer.h index 1efb1dc..16669f3 100644 --- a/runtime/neurun/backend/cpu/kernel/ConvolutionLayer.h +++ b/runtime/neurun/backend/cpu/kernel/ConvolutionLayer.h @@ -45,7 +45,7 @@ public: const TensorDescriptor biasDescr, const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideW, const uint32_t strideH, - const model::Activation activation, uint8_t *outputData, + const ir::Activation activation, uint8_t *outputData, const TensorDescriptor outputDescr); void run(); @@ -75,7 +75,7 @@ private: uint32_t _strideWidth; uint32_t _strideHeight; - model::Activation _activation; + ir::Activation _activation; OperandType _inputType; }; diff --git a/runtime/neurun/backend/cpu/kernel/DepthwiseConvolutionLayer.cc b/runtime/neurun/backend/cpu/kernel/DepthwiseConvolutionLayer.cc index 2ba5c42..e33e346 100644 --- a/runtime/neurun/backend/cpu/kernel/DepthwiseConvolutionLayer.cc +++ b/runtime/neurun/backend/cpu/kernel/DepthwiseConvolutionLayer.cc @@ -31,7 +31,7 @@ DepthwiseConvolutionLayer::DepthwiseConvolutionLayer() : _inputData(), _kernelData(), _outputData(), _biasData(), _inputDescr(), _kernelDescr(), _outputDescr(), _biasDescr(), _paddingLeft(0), _paddingTop(0), _paddingRight(0), _paddingBottom(0), _strideWidth(0), _strideHeight(0), _multiplier(0), - _activation(model::Activation::NONE), _inputType(OperandType::FLOAT32) + _activation(ir::Activation::NONE), _inputType(OperandType::FLOAT32) { // DO NOTHING } @@ -103,7 +103,7 @@ void DepthwiseConvolutionLayer::configure(uint8_t *inputData, const TensorDescri const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideWidth, const uint32_t strideHeight, const uint32_t multiplier, - const model::Activation activation, uint8_t *outputData, + const ir::Activation activation, uint8_t *outputData, const TensorDescriptor outputDescr) { _inputData.u8 = inputData; diff --git a/runtime/neurun/backend/cpu/kernel/DepthwiseConvolutionLayer.h b/runtime/neurun/backend/cpu/kernel/DepthwiseConvolutionLayer.h index 85230e1..575cc0a 100644 --- a/runtime/neurun/backend/cpu/kernel/DepthwiseConvolutionLayer.h +++ b/runtime/neurun/backend/cpu/kernel/DepthwiseConvolutionLayer.h @@ -45,7 +45,7 @@ public: const TensorDescriptor biasDescr, const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideW, const uint32_t strideH, - const uint32_t multiplier, const model::Activation activation, uint8_t *outputData, + const uint32_t multiplier, const ir::Activation activation, uint8_t *outputData, const TensorDescriptor outputDescr); void run(); @@ -77,7 +77,7 @@ private: uint32_t _multiplier; - model::Activation _activation; + ir::Activation _activation; OperandType _inputType; }; diff --git a/runtime/neurun/backend/cpu/kernel/FullyConnectedLayer.cc b/runtime/neurun/backend/cpu/kernel/FullyConnectedLayer.cc index b9361b2..055f715 100644 --- a/runtime/neurun/backend/cpu/kernel/FullyConnectedLayer.cc +++ b/runtime/neurun/backend/cpu/kernel/FullyConnectedLayer.cc @@ -32,7 +32,7 @@ namespace kernel FullyConnectedLayer::FullyConnectedLayer() : _inputData(), _weightsData(), _biasData(), _outputData(), _inputDescr(), _weightsDescr(), - _biasDescr(), _outputDescr(), _activation(model::Activation::NONE), + _biasDescr(), _outputDescr(), _activation(ir::Activation::NONE), _inputType(OperandType::FLOAT32) { // DO NOTHING @@ -86,7 +86,7 @@ void FullyConnectedLayer::fullyConnectedQuant8() void FullyConnectedLayer::configure(uint8_t *inputData, const TensorDescriptor inputDescr, uint8_t *weightsData, const TensorDescriptor weightsDescr, uint8_t *biasData, const TensorDescriptor biasDescr, - model::Activation activation, uint8_t *outputData, + ir::Activation activation, uint8_t *outputData, const TensorDescriptor outputDescr) { _inputData.u8 = inputData; diff --git a/runtime/neurun/backend/cpu/kernel/FullyConnectedLayer.h b/runtime/neurun/backend/cpu/kernel/FullyConnectedLayer.h index 83e493a..9fdc393 100644 --- a/runtime/neurun/backend/cpu/kernel/FullyConnectedLayer.h +++ b/runtime/neurun/backend/cpu/kernel/FullyConnectedLayer.h @@ -42,8 +42,8 @@ public: void configure(uint8_t *inputData, const TensorDescriptor inputDescr, uint8_t *weightsData, const TensorDescriptor weightsDescr, uint8_t *biasData, - const TensorDescriptor biasDescr, model::Activation activation, - uint8_t *outputData, const TensorDescriptor outputDescr); + const TensorDescriptor biasDescr, ir::Activation activation, uint8_t *outputData, + const TensorDescriptor outputDescr); void run(); void runSync() @@ -64,7 +64,7 @@ private: TensorDescriptor _biasDescr; TensorDescriptor _outputDescr; - model::Activation _activation; + ir::Activation _activation; OperandType _inputType; }; diff --git a/runtime/neurun/backend/cpu/kernel/MaxPoolLayer.cc b/runtime/neurun/backend/cpu/kernel/MaxPoolLayer.cc index 0bce4c3..095cd6d 100644 --- a/runtime/neurun/backend/cpu/kernel/MaxPoolLayer.cc +++ b/runtime/neurun/backend/cpu/kernel/MaxPoolLayer.cc @@ -41,7 +41,7 @@ namespace kernel MaxPoolLayer::MaxPoolLayer() : _inputData(), _outputData(), _inputDescr(), _outputDescr(), _paddingLeft(0), _paddingTop(0), _paddingRight(0), _paddingBottom(0), _strideWidth(0), _strideHeight(0), _kernelWidth(0), - _kernelHeight(0), _activation(model::Activation::NONE), _inputType(OperandType::FLOAT32) + _kernelHeight(0), _activation(ir::Activation::NONE), _inputType(OperandType::FLOAT32) { // DO NOTHING } @@ -76,7 +76,7 @@ void MaxPoolLayer::configure(uint8_t *inputData, const TensorDescriptor inputDes const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideWidth, const uint32_t strideHeight, const uint32_t kernelWidth, const uint32_t kernelHeight, - const model::Activation activation, uint8_t *outputData, + const ir::Activation activation, uint8_t *outputData, const TensorDescriptor outputDescr) { _inputData.u8 = inputData; diff --git a/runtime/neurun/backend/cpu/kernel/MaxPoolLayer.h b/runtime/neurun/backend/cpu/kernel/MaxPoolLayer.h index bfd3481..88a574c 100644 --- a/runtime/neurun/backend/cpu/kernel/MaxPoolLayer.h +++ b/runtime/neurun/backend/cpu/kernel/MaxPoolLayer.h @@ -44,8 +44,8 @@ public: const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideWidth, const uint32_t strideHeight, const uint32_t kernelWidth, - const uint32_t kernelHeight, const model::Activation activation, - uint8_t *outputData, const TensorDescriptor outputDescr); + const uint32_t kernelHeight, const ir::Activation activation, uint8_t *outputData, + const TensorDescriptor outputDescr); void run(); void runSync() @@ -72,7 +72,7 @@ private: uint32_t _kernelWidth; uint32_t _kernelHeight; - model::Activation _activation; + ir::Activation _activation; OperandType _inputType; }; diff --git a/runtime/neurun/backend/cpu/kernel/MulLayer.cc b/runtime/neurun/backend/cpu/kernel/MulLayer.cc index 2a6f177..9848130 100644 --- a/runtime/neurun/backend/cpu/kernel/MulLayer.cc +++ b/runtime/neurun/backend/cpu/kernel/MulLayer.cc @@ -60,7 +60,7 @@ void MulLayer::mulQuant8() } void MulLayer::configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData, - const TensorDescriptor &rhsDescr, const model::Activation activation, + const TensorDescriptor &rhsDescr, const ir::Activation activation, uint8_t *outputData, const TensorDescriptor &outputDescr) { _lhsData.u8 = lhsData; diff --git a/runtime/neurun/backend/cpu/kernel/MulLayer.h b/runtime/neurun/backend/cpu/kernel/MulLayer.h index f5bda8e..05fc305 100644 --- a/runtime/neurun/backend/cpu/kernel/MulLayer.h +++ b/runtime/neurun/backend/cpu/kernel/MulLayer.h @@ -44,7 +44,7 @@ public: void mulQuant8(); void configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData, - const TensorDescriptor &rhsDescr, const model::Activation activation, + const TensorDescriptor &rhsDescr, const ir::Activation activation, uint8_t *outputData, const TensorDescriptor &outputDescr); void run(); @@ -64,7 +64,7 @@ private: TensorDescriptor _rhsDescr; TensorDescriptor _outputDescr; - model::Activation _activation{model::Activation::NONE}; + ir::Activation _activation{ir::Activation::NONE}; OperandType _inputType{OperandType::FLOAT32}; }; diff --git a/runtime/neurun/backend/cpu/kernel/OperationUtils.cc b/runtime/neurun/backend/cpu/kernel/OperationUtils.cc index e9f1140..dbcbac2 100644 --- a/runtime/neurun/backend/cpu/kernel/OperationUtils.cc +++ b/runtime/neurun/backend/cpu/kernel/OperationUtils.cc @@ -106,30 +106,30 @@ void QuantizeMultiplierGreaterThanOne(double double_multiplier, int32_t *quantiz *quantized_multiplier = static_cast(q_fixed); } -void CalculateActivationRangeFloat(model::Activation activation, float *activation_min, +void CalculateActivationRangeFloat(ir::Activation activation, float *activation_min, float *activation_max) { - if (activation == model::Activation::RELU) + if (activation == ir::Activation::RELU) { *activation_min = 0.f; *activation_max = std::numeric_limits::max(); } - else if (activation == model::Activation::RELU6) + else if (activation == ir::Activation::RELU6) { *activation_min = 0.f; *activation_max = 6.f; } - else if (activation == model::Activation::RELU1) + else if (activation == ir::Activation::RELU1) { *activation_min = -1.f; *activation_max = 1.f; } - else if (activation == model::Activation::SIGMOID) + else if (activation == ir::Activation::SIGMOID) { *activation_min = 0.f; *activation_max = 1.f; } - else if (activation == model::Activation::NONE) + else if (activation == ir::Activation::NONE) { *activation_min = std::numeric_limits::lowest(); *activation_max = std::numeric_limits::max(); @@ -140,9 +140,8 @@ void CalculateActivationRangeFloat(model::Activation activation, float *activati } } -void CalculateActivationRangeUint8(model::Activation activation, - const TensorDescriptor &outputDescr, int32_t *act_min, - int32_t *act_max) +void CalculateActivationRangeUint8(ir::Activation activation, const TensorDescriptor &outputDescr, + int32_t *act_min, int32_t *act_max) { const int32_t qmin = std::numeric_limits::min(); const int32_t qmax = std::numeric_limits::max(); @@ -151,27 +150,27 @@ void CalculateActivationRangeUint8(model::Activation activation, auto quantize = [scale, zero_point](float f) { return zero_point + static_cast(std::round(f / scale)); }; - if (activation == model::Activation::RELU) + if (activation == ir::Activation::RELU) { *act_min = std::max(qmin, quantize(0.0)); *act_max = qmax; } - else if (activation == model::Activation::RELU6) + else if (activation == ir::Activation::RELU6) { *act_min = std::max(qmin, quantize(0.0)); *act_max = std::min(qmax, quantize(6.0)); } - else if (activation == model::Activation::RELU1) + else if (activation == ir::Activation::RELU1) { *act_min = std::max(qmin, quantize(-1.0)); *act_max = std::min(qmax, quantize(1.0)); } - else if (activation == model::Activation::SIGMOID) + else if (activation == ir::Activation::SIGMOID) { *act_min = std::max(qmin, quantize(0.0)); *act_max = std::min(qmax, quantize(1.0)); } - else if (activation == model::Activation::NONE) + else if (activation == ir::Activation::NONE) { *act_min = qmin; *act_max = qmax; diff --git a/runtime/neurun/backend/cpu/kernel/OperationUtils.h b/runtime/neurun/backend/cpu/kernel/OperationUtils.h index c466f9f..f8ab905 100644 --- a/runtime/neurun/backend/cpu/kernel/OperationUtils.h +++ b/runtime/neurun/backend/cpu/kernel/OperationUtils.h @@ -25,7 +25,7 @@ #include "model/Operand.h" #include "ir/DataType.h" -#include +#include using OperandType = neurun::ir::DataType; @@ -130,12 +130,11 @@ void GetQuantizedConvolutionMultiplier(const TensorDescriptor &inputDescr, void QuantizeMultiplierGreaterThanOne(double double_multiplier, int32_t *quantized_multiplier, int *left_shift); -void CalculateActivationRangeFloat(model::Activation activation, float *activation_min, +void CalculateActivationRangeFloat(ir::Activation activation, float *activation_min, float *activation_max); -void CalculateActivationRangeUint8(model::Activation activation, - const TensorDescriptor &outputDescr, int32_t *act_min, - int32_t *act_max); +void CalculateActivationRangeUint8(ir::Activation activation, const TensorDescriptor &outputDescr, + int32_t *act_min, int32_t *act_max); int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift); diff --git a/runtime/neurun/backend/cpu/kernel/SubLayer.cc b/runtime/neurun/backend/cpu/kernel/SubLayer.cc index 984464a..946b978 100644 --- a/runtime/neurun/backend/cpu/kernel/SubLayer.cc +++ b/runtime/neurun/backend/cpu/kernel/SubLayer.cc @@ -59,7 +59,7 @@ void SubLayer::subQuant8() } void SubLayer::configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData, - const TensorDescriptor &rhsDescr, const model::Activation activation, + const TensorDescriptor &rhsDescr, const ir::Activation activation, uint8_t *outputData, const TensorDescriptor &outputDescr) { _lhsData.u8 = lhsData; diff --git a/runtime/neurun/backend/cpu/kernel/SubLayer.h b/runtime/neurun/backend/cpu/kernel/SubLayer.h index 7036ca1..c9abdb4 100644 --- a/runtime/neurun/backend/cpu/kernel/SubLayer.h +++ b/runtime/neurun/backend/cpu/kernel/SubLayer.h @@ -44,7 +44,7 @@ public: void subQuant8(); void configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData, - const TensorDescriptor &rhsDescr, const model::Activation activation, + const TensorDescriptor &rhsDescr, const ir::Activation activation, uint8_t *outputData, const TensorDescriptor &outputDescr); void run(); @@ -64,7 +64,7 @@ private: TensorDescriptor _rhsDescr; TensorDescriptor _outputDescr; - model::Activation _activation{model::Activation::NONE}; + ir::Activation _activation{ir::Activation::NONE}; OperandType _inputType{OperandType::FLOAT32}; }; diff --git a/runtime/neurun/backend/srcn/KernelGenerator.cc b/runtime/neurun/backend/srcn/KernelGenerator.cc index c37109f..d3f86d4 100644 --- a/runtime/neurun/backend/srcn/KernelGenerator.cc +++ b/runtime/neurun/backend/srcn/KernelGenerator.cc @@ -211,7 +211,7 @@ void KernelGenerator::visit(const model::operation::TransposeConv &node) const auto ker_height = ker_shape.H; const auto ker_width = ker_shape.W; const auto stride = node.param().stride; - const int padding_type = (node.param().padding.type == model::PaddingType::SAME); + const int padding_type = (node.param().padding.type == ir::PaddingType::SAME); const auto padding = neurun::util::calculatePadding(node.param().padding, ofm_shape, ifm_shape, stride, ker_width, ker_height); diff --git a/runtime/neurun/backend/srcn/kernel/AddLayer.cc b/runtime/neurun/backend/srcn/kernel/AddLayer.cc index 80b7a34..b53dfe8 100644 --- a/runtime/neurun/backend/srcn/kernel/AddLayer.cc +++ b/runtime/neurun/backend/srcn/kernel/AddLayer.cc @@ -58,7 +58,7 @@ namespace kernel void AddLayer::addFloat32() { - assert(_activation == model::Activation::NONE); + assert(_activation == ir::Activation::NONE); // ncnn kernel support // 1. rank < 4 @@ -90,7 +90,7 @@ void AddLayer::addQuant8() } void AddLayer::configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData, - const TensorDescriptor &rhsDescr, const model::Activation activation, + const TensorDescriptor &rhsDescr, const ir::Activation activation, uint8_t *outputData, const TensorDescriptor &outputDescr, const ir::Layout backendLayout) { diff --git a/runtime/neurun/backend/srcn/kernel/AddLayer.h b/runtime/neurun/backend/srcn/kernel/AddLayer.h index 9995754..1cae171 100644 --- a/runtime/neurun/backend/srcn/kernel/AddLayer.h +++ b/runtime/neurun/backend/srcn/kernel/AddLayer.h @@ -44,7 +44,7 @@ public: void addQuant8(); void configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData, - const TensorDescriptor &rhsDescr, const model::Activation activation, + const TensorDescriptor &rhsDescr, const ir::Activation activation, uint8_t *outputData, const TensorDescriptor &outputDescr, const ir::Layout backendLayout); @@ -65,7 +65,7 @@ private: TensorDescriptor _rhsDescr; TensorDescriptor _outputDescr; - model::Activation _activation{model::Activation::NONE}; + ir::Activation _activation{ir::Activation::NONE}; OperandType _inputType{OperandType::FLOAT32}; diff --git a/runtime/neurun/backend/srcn/kernel/InstanceNormLayer.cc b/runtime/neurun/backend/srcn/kernel/InstanceNormLayer.cc index d7627bc..c83fe6d 100644 --- a/runtime/neurun/backend/srcn/kernel/InstanceNormLayer.cc +++ b/runtime/neurun/backend/srcn/kernel/InstanceNormLayer.cc @@ -30,7 +30,7 @@ namespace kernel InstanceNormLayer::InstanceNormLayer() : _inputData(), _gammaData(), _betaData(), _outputData(), _inputDescr(), _gammaDescr(), - _betaDescr(), _outputDescr(), _epsilon(1e-5), _activation(model::Activation::NONE), + _betaDescr(), _outputDescr(), _epsilon(1e-5), _activation(ir::Activation::NONE), _inputType(OperandType::FLOAT32), _backendLayout(ir::Layout::UNKNOWN) { // DO NOTHING @@ -60,12 +60,12 @@ void InstanceNormLayer::instanceNormFloat32() const int output_width = _outputDescr.dimensions[3]; nnfw::ncnn::Mat out_mat(output_width, output_height, output_channels, _outputData.f); - if (_activation == model::Activation::NONE) + if (_activation == ir::Activation::NONE) { nnfw::ncnn::ncnn_instance_norm_rowmajor(in_mat, out_mat, gamma_mat, beta_mat, input_channels, _epsilon); } - else if (_activation == model::Activation::RELU) + else if (_activation == ir::Activation::RELU) { nnfw::ncnn::ncnn_instance_norm_with_relu_rowmajor(in_mat, out_mat, gamma_mat, beta_mat, input_channels, _epsilon, 0.f); @@ -97,12 +97,12 @@ void InstanceNormLayer::instanceNormFloat32() const int output_channels = _outputDescr.dimensions[3]; nnfw::ncnn::Mat out_mat(output_channels, output_width, output_height, _outputData.f); - if (_activation == model::Activation::NONE) + if (_activation == ir::Activation::NONE) { nnfw::ncnn::ncnn_instance_norm_colmajor(in_mat, out_mat, gamma_mat, beta_mat, input_channels, _epsilon); } - else if (_activation == model::Activation::RELU) + else if (_activation == ir::Activation::RELU) { nnfw::ncnn::ncnn_instance_norm_with_relu_colmajor(in_mat, out_mat, gamma_mat, beta_mat, input_channels, _epsilon, 0.f); @@ -121,7 +121,7 @@ void InstanceNormLayer::configure(uint8_t *inputData, const TensorDescriptor inp uint8_t *gammaData, const TensorDescriptor gammaDescr, uint8_t *betaData, const TensorDescriptor betaDescr, uint8_t *outputData, const TensorDescriptor outputDescr, - float epsilon, model::Activation activation, + float epsilon, ir::Activation activation, ir::Layout backendLayout) { _inputData.u8 = inputData; diff --git a/runtime/neurun/backend/srcn/kernel/InstanceNormLayer.h b/runtime/neurun/backend/srcn/kernel/InstanceNormLayer.h index 2cad9e0..0ac0cef 100644 --- a/runtime/neurun/backend/srcn/kernel/InstanceNormLayer.h +++ b/runtime/neurun/backend/srcn/kernel/InstanceNormLayer.h @@ -40,7 +40,7 @@ public: void configure(uint8_t *inputData, const TensorDescriptor inputDescr, uint8_t *gammaData, const TensorDescriptor gammaDescr, uint8_t *betaData, const TensorDescriptor betaDescr, uint8_t *outputData, - const TensorDescriptor outputDescr, float epsilon, model::Activation activation, + const TensorDescriptor outputDescr, float epsilon, ir::Activation activation, ir::Layout backendLayout); void run(); @@ -63,7 +63,7 @@ private: TensorDescriptor _outputDescr; float _epsilon; - model::Activation _activation; + ir::Activation _activation; OperandType _inputType; ir::Layout _backendLayout; diff --git a/runtime/neurun/backend/srcn/kernel/OperationUtils.h b/runtime/neurun/backend/srcn/kernel/OperationUtils.h index dce00b5..a0610a2 100644 --- a/runtime/neurun/backend/srcn/kernel/OperationUtils.h +++ b/runtime/neurun/backend/srcn/kernel/OperationUtils.h @@ -23,7 +23,7 @@ #include "model/Operand.h" #include "ir/DataType.h" -#include +#include #include using OperandType = neurun::ir::DataType; diff --git a/runtime/neurun/core/include/model/InternalType.h b/runtime/neurun/core/include/ir/InternalType.h similarity index 76% rename from runtime/neurun/core/include/model/InternalType.h rename to runtime/neurun/core/include/ir/InternalType.h index fccf2fe..3795e39 100644 --- a/runtime/neurun/core/include/model/InternalType.h +++ b/runtime/neurun/core/include/ir/InternalType.h @@ -14,14 +14,14 @@ * limitations under the License. */ -#ifndef __NEURUN_MODEL_INTERNAL_TYPE_H__ -#define __NEURUN_MODEL_INTERNAL_TYPE_H__ +#ifndef __NEURUN_IR_INTERNAL_TYPE_H__ +#define __NEURUN_IR_INTERNAL_TYPE_H__ #include namespace neurun { -namespace model +namespace ir { enum class Activation @@ -62,7 +62,17 @@ struct Stride uint32_t horizontal; }; +} // namespace ir + +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +using Activation = ir::Activation; +using PaddingType = ir::PaddingType; +using ExplicitPadding = ir::ExplicitPadding; +using Padding = ir::Padding; +using Stride = ir::Stride; } // namespace model } // namespace neurun -#endif // __NEURUN_MODEL_INTERNAL_TYPE_H__ +#endif // __NEURUN_IR_INTERNAL_TYPE_H__ diff --git a/runtime/neurun/core/include/model/operation/Add.h b/runtime/neurun/core/include/model/operation/Add.h index 9efd128..045c7af 100644 --- a/runtime/neurun/core/include/model/operation/Add.h +++ b/runtime/neurun/core/include/model/operation/Add.h @@ -18,7 +18,7 @@ #define __NEURUN_MODEL_OPERATION_ADD_H__ #include "model/Operation.h" -#include "model/InternalType.h" +#include "ir/InternalType.h" namespace neurun { diff --git a/runtime/neurun/core/include/model/operation/AvgPool2D.h b/runtime/neurun/core/include/model/operation/AvgPool2D.h index e9eed79..17429c1 100644 --- a/runtime/neurun/core/include/model/operation/AvgPool2D.h +++ b/runtime/neurun/core/include/model/operation/AvgPool2D.h @@ -20,7 +20,7 @@ #include #include "model/Operation.h" -#include "model/InternalType.h" +#include "ir/InternalType.h" namespace neurun { diff --git a/runtime/neurun/core/include/model/operation/Conv2D.h b/runtime/neurun/core/include/model/operation/Conv2D.h index a25cb1f..258b3e7 100644 --- a/runtime/neurun/core/include/model/operation/Conv2D.h +++ b/runtime/neurun/core/include/model/operation/Conv2D.h @@ -20,7 +20,7 @@ #include #include "model/Operation.h" -#include "model/InternalType.h" +#include "ir/InternalType.h" namespace neurun { diff --git a/runtime/neurun/core/include/model/operation/DepthwiseConv2D.h b/runtime/neurun/core/include/model/operation/DepthwiseConv2D.h index 3205bd5..fb49401 100644 --- a/runtime/neurun/core/include/model/operation/DepthwiseConv2D.h +++ b/runtime/neurun/core/include/model/operation/DepthwiseConv2D.h @@ -20,7 +20,7 @@ #include #include "model/Operation.h" -#include "model/InternalType.h" +#include "ir/InternalType.h" namespace neurun { diff --git a/runtime/neurun/core/include/model/operation/Div.h b/runtime/neurun/core/include/model/operation/Div.h index beb58eb..1b85d1d 100644 --- a/runtime/neurun/core/include/model/operation/Div.h +++ b/runtime/neurun/core/include/model/operation/Div.h @@ -18,7 +18,7 @@ #define __NEURUN_MODEL_OPERATION_DIV_H__ #include "model/Operation.h" -#include "model/InternalType.h" +#include "ir/InternalType.h" namespace neurun { diff --git a/runtime/neurun/core/include/model/operation/FullyConnected.h b/runtime/neurun/core/include/model/operation/FullyConnected.h index 1178aa0..1ee6f67 100644 --- a/runtime/neurun/core/include/model/operation/FullyConnected.h +++ b/runtime/neurun/core/include/model/operation/FullyConnected.h @@ -20,7 +20,7 @@ #include #include "model/Operation.h" -#include "model/InternalType.h" +#include "ir/InternalType.h" namespace neurun { diff --git a/runtime/neurun/core/include/model/operation/InstanceNorm.h b/runtime/neurun/core/include/model/operation/InstanceNorm.h index 61c3057..bab24ef 100644 --- a/runtime/neurun/core/include/model/operation/InstanceNorm.h +++ b/runtime/neurun/core/include/model/operation/InstanceNorm.h @@ -18,7 +18,7 @@ #define __NEURUN_MODEL_OPERATION_INSTANCE_NORM_H__ #include "model/Operation.h" -#include "model/InternalType.h" +#include "ir/InternalType.h" namespace neurun { diff --git a/runtime/neurun/core/include/model/operation/L2Pool2D.h b/runtime/neurun/core/include/model/operation/L2Pool2D.h index 48ef431..858cc49 100644 --- a/runtime/neurun/core/include/model/operation/L2Pool2D.h +++ b/runtime/neurun/core/include/model/operation/L2Pool2D.h @@ -20,7 +20,7 @@ #include #include "model/Operation.h" -#include "model/InternalType.h" +#include "ir/InternalType.h" namespace neurun { diff --git a/runtime/neurun/core/include/model/operation/LSTM.h b/runtime/neurun/core/include/model/operation/LSTM.h index db4eb2f..3244d0b 100644 --- a/runtime/neurun/core/include/model/operation/LSTM.h +++ b/runtime/neurun/core/include/model/operation/LSTM.h @@ -16,7 +16,7 @@ #ifndef __NEURUN_MODEL_OPERATION_LSTM_H__ #define __NEURUN_MODEL_OPERATION_LSTM_H__ -#include "model/InternalType.h" +#include "ir/InternalType.h" #include "model/Operation.h" namespace neurun diff --git a/runtime/neurun/core/include/model/operation/MaxPool2D.h b/runtime/neurun/core/include/model/operation/MaxPool2D.h index 6533235..5cb40c0 100644 --- a/runtime/neurun/core/include/model/operation/MaxPool2D.h +++ b/runtime/neurun/core/include/model/operation/MaxPool2D.h @@ -20,7 +20,7 @@ #include #include "model/Operation.h" -#include "model/InternalType.h" +#include "ir/InternalType.h" namespace neurun { diff --git a/runtime/neurun/core/include/model/operation/Mul.h b/runtime/neurun/core/include/model/operation/Mul.h index fa5bf14..1e37d0d 100644 --- a/runtime/neurun/core/include/model/operation/Mul.h +++ b/runtime/neurun/core/include/model/operation/Mul.h @@ -18,7 +18,7 @@ #define __NEURUN_MODEL_OPERATION_MUL_H__ #include "model/Operation.h" -#include "model/InternalType.h" +#include "ir/InternalType.h" namespace neurun { diff --git a/runtime/neurun/core/include/model/operation/RNN.h b/runtime/neurun/core/include/model/operation/RNN.h index bb1c7d0..2b5bf2d 100644 --- a/runtime/neurun/core/include/model/operation/RNN.h +++ b/runtime/neurun/core/include/model/operation/RNN.h @@ -16,7 +16,7 @@ #ifndef __NEURUN_MODEL_OPERATION_RNN_H__ #define __NEURUN_MODEL_OPERATION_RNN_H__ -#include "model/InternalType.h" +#include "ir/InternalType.h" #include "model/Operation.h" namespace neurun diff --git a/runtime/neurun/core/include/model/operation/Sub.h b/runtime/neurun/core/include/model/operation/Sub.h index 5156814..a752428 100644 --- a/runtime/neurun/core/include/model/operation/Sub.h +++ b/runtime/neurun/core/include/model/operation/Sub.h @@ -18,7 +18,7 @@ #define __NEURUN_MODEL_OPERATION_SUB_H__ #include "model/Operation.h" -#include "model/InternalType.h" +#include "ir/InternalType.h" namespace neurun { diff --git a/runtime/neurun/core/include/model/operation/TransposeConv.h b/runtime/neurun/core/include/model/operation/TransposeConv.h index 1c5dcdd..7926e02 100644 --- a/runtime/neurun/core/include/model/operation/TransposeConv.h +++ b/runtime/neurun/core/include/model/operation/TransposeConv.h @@ -20,7 +20,7 @@ #include #include "model/Operation.h" -#include "model/InternalType.h" +#include "ir/InternalType.h" namespace neurun { diff --git a/runtime/neurun/core/include/util/Padding.h b/runtime/neurun/core/include/util/Padding.h index 2300132..7bc8b65 100644 --- a/runtime/neurun/core/include/util/Padding.h +++ b/runtime/neurun/core/include/util/Padding.h @@ -20,21 +20,21 @@ #include #include "model/Shape.h" -#include "model/InternalType.h" +#include "ir/InternalType.h" namespace neurun { namespace util { -model::ExplicitPadding validPadding(void); -model::ExplicitPadding samePadding(const model::FeatureShape &ifm_shape, - const model::FeatureShape &ofm_shape, - const model::Stride &stride, uint32_t kw, uint32_t kh); -model::ExplicitPadding calculatePadding(const model::Padding &padding, - const model::FeatureShape &ifm_shape, - const model::FeatureShape &ofm_shape, - const model::Stride &stride, uint32_t kw, uint32_t kh); +ir::ExplicitPadding validPadding(void); +ir::ExplicitPadding samePadding(const model::FeatureShape &ifm_shape, + const model::FeatureShape &ofm_shape, const ir::Stride &stride, + uint32_t kw, uint32_t kh); +ir::ExplicitPadding calculatePadding(const ir::Padding &padding, + const model::FeatureShape &ifm_shape, + const model::FeatureShape &ofm_shape, const ir::Stride &stride, + uint32_t kw, uint32_t kh); } // namespace util } // namespace neurun diff --git a/runtime/neurun/core/include/util/Utils.h b/runtime/neurun/core/include/util/Utils.h index 06cd638..63a7a97 100644 --- a/runtime/neurun/core/include/util/Utils.h +++ b/runtime/neurun/core/include/util/Utils.h @@ -23,7 +23,7 @@ #ifndef __NEURUN_UTIL_UTILS_H__ #define __NEURUN_UTIL_UTILS_H__ -#include "model/InternalType.h" +#include "ir/InternalType.h" #include "ir/Layout.h" #include "model/Operand.h" #include "util/Coordinates.h" @@ -37,10 +37,10 @@ namespace util /** * @brief Converts a internal padding type to const char* - * @param[in] code Padding type to be converted + * @param[in] type Padding type to be converted * @return A string holding the converted value */ -const char *to_string(const model::PaddingType &type); +const char *to_string(ir::PaddingType type); Coordinates convertCoordinates(const Coordinates &from_coordinates, ir::Layout from_layout, ir::Layout to_layout); diff --git a/runtime/neurun/core/src/compiler/OperationValidator.cc b/runtime/neurun/core/src/compiler/OperationValidator.cc index 3932be5..214139b 100644 --- a/runtime/neurun/core/src/compiler/OperationValidator.cc +++ b/runtime/neurun/core/src/compiler/OperationValidator.cc @@ -493,8 +493,8 @@ void OperationValidator::visit(const model::operation::TransposeConv &node) UNUSED_RELEASE(ifm_shape); UNUSED_RELEASE(ker_shape); - assert((node.param().padding.type == model::PaddingType::SAME) || - (node.param().padding.type == model::PaddingType::VALID)); + assert((node.param().padding.type == ir::PaddingType::SAME) || + (node.param().padding.type == ir::PaddingType::VALID)); assert(ifm_shape.N == ofm_shape.N); assert(ifm_shape.C == ker_shape.C); assert(ker_shape.N == ofm_shape.C); diff --git a/runtime/neurun/core/src/exec/interp/operations/OperationUtil.h b/runtime/neurun/core/src/exec/interp/operations/OperationUtil.h index 4d2b4e1..8df4d41 100644 --- a/runtime/neurun/core/src/exec/interp/operations/OperationUtil.h +++ b/runtime/neurun/core/src/exec/interp/operations/OperationUtil.h @@ -18,7 +18,7 @@ #define __NEURUN_EXEC_INTERP_OPERATIONS_OPERATION_UTILS_H_ #include "model/Shape.h" -#include "model/InternalType.h" +#include "ir/InternalType.h" #include @@ -75,24 +75,24 @@ inline nnfw::cker::Shape convertExtendShape(const model::Shape &shape) } template -void calculateActivationRange(model::Activation activation, T *activation_min, T *activation_max) +void calculateActivationRange(ir::Activation activation, T *activation_min, T *activation_max) { - if (activation == model::Activation::RELU) + if (activation == ir::Activation::RELU) { *activation_min = 0; *activation_max = std::numeric_limits::max(); } - else if (activation == model::Activation::RELU6) + else if (activation == ir::Activation::RELU6) { *activation_min = 0; *activation_max = 6; } - else if (activation == model::Activation::RELU1) + else if (activation == ir::Activation::RELU1) { *activation_min = -1; *activation_max = 1; } - else if (activation == model::Activation::NONE) + else if (activation == ir::Activation::NONE) { *activation_min = std::numeric_limits::lowest(); *activation_max = std::numeric_limits::max(); diff --git a/runtime/neurun/core/src/ir/dumper/Dumper.cc b/runtime/neurun/core/src/ir/dumper/Dumper.cc index d725ac1..961aa36 100644 --- a/runtime/neurun/core/src/ir/dumper/Dumper.cc +++ b/runtime/neurun/core/src/ir/dumper/Dumper.cc @@ -93,7 +93,7 @@ void Dumper::visit(const Concat &node) void Dumper::visit(const Conv2D &node) { std::string padding_type = - node.param().padding.type == model::PaddingType::EXPLICIT ? "Explicit" : "Implicit"; + node.param().padding.type == ir::PaddingType::EXPLICIT ? "Explicit" : "Implicit"; VERBOSE(LIR) << "* Conv2D(" << padding_type << ")" << std::endl; VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(Conv2D::Input::INPUT).value() << ") Kernel(" << node.getInputs().at(Conv2D::Input::KERNEL).value() << ") Bias(" @@ -112,7 +112,7 @@ void Dumper::visit(const DepthToSpace &node) void Dumper::visit(const DepthwiseConv2D &node) { std::string padding_type = - node.param().padding.type == model::PaddingType::EXPLICIT ? "Explicit" : "Implicit"; + node.param().padding.type == ir::PaddingType::EXPLICIT ? "Explicit" : "Implicit"; VERBOSE(LIR) << "* DepthwiseConv2D(" << padding_type << ")" << std::endl; VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(DepthwiseConv2D::Input::INPUT).value() << ") Kernel(" << node.getInputs().at(DepthwiseConv2D::Input::KERNEL).value() @@ -308,7 +308,7 @@ void Dumper::visit(const Logistic &node) void Dumper::visit(const MaxPool2D &node) { std::string padding_type = - node.param().padding.type == model::PaddingType::EXPLICIT ? "Explicit" : "Implicit"; + node.param().padding.type == ir::PaddingType::EXPLICIT ? "Explicit" : "Implicit"; VERBOSE(LIR) << "* MaxPool2D(" << padding_type << ")" << std::endl; VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(MaxPool2D::Input::INPUT).value() << ")" << std::endl; @@ -559,7 +559,7 @@ void Dumper::visit(const TopKV2 &node) void Dumper::visit(const TransposeConv &node) { std::string padding_type = - node.param().padding.type == model::PaddingType::EXPLICIT ? "Explicit" : "Implicit"; + node.param().padding.type == ir::PaddingType::EXPLICIT ? "Explicit" : "Implicit"; VERBOSE(LIR) << "* TransposeConv(" << padding_type << ")" << std::endl; VERBOSE(LIR) << " - Inputs : Output Shape(" << node.getInputs().at(TransposeConv::Input::OUTPUT_SHAPE).value() << ") KERNEL(" diff --git a/runtime/neurun/core/src/util/Padding.cc b/runtime/neurun/core/src/util/Padding.cc index dd5a3b5..89e4577 100644 --- a/runtime/neurun/core/src/util/Padding.cc +++ b/runtime/neurun/core/src/util/Padding.cc @@ -25,7 +25,7 @@ namespace neurun namespace util { -model::ExplicitPadding validPadding(void) +ir::ExplicitPadding validPadding(void) { // // ANEURALNETWORKS_PADDING_VALID @@ -36,7 +36,7 @@ model::ExplicitPadding validPadding(void) // the input at the end that could not fill the whole filter tile // will simply be ignored. // - model::ExplicitPadding padding; + ir::ExplicitPadding padding; padding.top = 0; padding.bottom = 0; @@ -46,10 +46,10 @@ model::ExplicitPadding validPadding(void) return padding; } -model::ExplicitPadding samePaddingUsingIFM(const model::FeatureShape &ifm_shape, - const model::Stride &stride, uint32_t kw, uint32_t kh) +ir::ExplicitPadding samePaddingUsingIFM(const model::FeatureShape &ifm_shape, + const ir::Stride &stride, uint32_t kw, uint32_t kh) { - model::ExplicitPadding padding; + ir::ExplicitPadding padding; // ANEURALNETWORKS_PADDING_SAME (from NNAPI spec) // @@ -76,9 +76,9 @@ model::ExplicitPadding samePaddingUsingIFM(const model::FeatureShape &ifm_shape, return padding; } -model::ExplicitPadding samePadding(const model::FeatureShape &ifm_shape, - const model::FeatureShape &ofm_shape, - const model::Stride &stride, uint32_t kw, uint32_t kh) +ir::ExplicitPadding samePadding(const model::FeatureShape &ifm_shape, + const model::FeatureShape &ofm_shape, const ir::Stride &stride, + uint32_t kw, uint32_t kh) { const int32_t vertical_expected_output = (ifm_shape.H + stride.vertical - 1) / stride.vertical; const int32_t horizontal_expected_output = @@ -93,20 +93,20 @@ model::ExplicitPadding samePadding(const model::FeatureShape &ifm_shape, return samePaddingUsingIFM(ifm_shape, stride, kw, kh); } -model::ExplicitPadding calculatePadding(const model::Padding &padding, - const model::FeatureShape &ifm_shape, - const model::FeatureShape &ofm_shape, - const model::Stride &stride, uint32_t kw, uint32_t kh) +ir::ExplicitPadding calculatePadding(const ir::Padding &padding, + const model::FeatureShape &ifm_shape, + const model::FeatureShape &ofm_shape, const ir::Stride &stride, + uint32_t kw, uint32_t kh) { - if (padding.type == model::PaddingType::EXPLICIT) + if (padding.type == ir::PaddingType::EXPLICIT) { return padding.param; } - else if (padding.type == model::PaddingType::SAME) + else if (padding.type == ir::PaddingType::SAME) { return samePadding(ifm_shape, ofm_shape, stride, kw, kh); } - else if (padding.type == model::PaddingType::VALID) + else if (padding.type == ir::PaddingType::VALID) { return validPadding(); } diff --git a/runtime/neurun/core/src/util/ShapeInference.cc b/runtime/neurun/core/src/util/ShapeInference.cc index ffb8dab..de30b9b 100644 --- a/runtime/neurun/core/src/util/ShapeInference.cc +++ b/runtime/neurun/core/src/util/ShapeInference.cc @@ -15,7 +15,7 @@ */ #include "util/Utils.h" -#include "model/InternalType.h" +#include "ir/InternalType.h" #include "model/Shape.h" #include "model/operation/AvgPool2D.h" #include "model/operation/MaxPool2D.h" @@ -68,22 +68,22 @@ model::Shape broadcastShapes(const model::Shape &lhs_shape, const model::Shape & // Calculate output height and width of convolution-like operation std::pair calcConvLikeHeightAndWidth(const int in_h, const int in_w, const int ker_h, - const int ker_w, const model::Padding pad, - const model::Stride stride) + const int ker_w, const ir::Padding pad, + const ir::Stride stride) { int32_t out_h = 0, out_w = 0; switch (pad.type) { - case model::PaddingType::SAME: + case ir::PaddingType::SAME: out_h = ceil_div(in_h, stride.vertical); out_w = ceil_div(in_w, stride.horizontal); break; - case model::PaddingType::VALID: + case ir::PaddingType::VALID: out_h = ceil_div(in_h - ker_h + 1, stride.vertical); out_w = ceil_div(in_w - ker_w + 1, stride.horizontal); break; - case model::PaddingType::EXPLICIT: + case ir::PaddingType::EXPLICIT: out_h = (in_h + pad.param.top + pad.param.bottom - ker_h) / stride.vertical + 1; out_w = (in_w + pad.param.left + pad.param.right - ker_w) / stride.horizontal + 1; break; diff --git a/runtime/neurun/core/src/util/Utils.cc b/runtime/neurun/core/src/util/Utils.cc index f8daa02..1e24e28 100644 --- a/runtime/neurun/core/src/util/Utils.cc +++ b/runtime/neurun/core/src/util/Utils.cc @@ -23,18 +23,18 @@ namespace neurun namespace util { -const char *to_string(const model::PaddingType &type) +const char *to_string(const ir::PaddingType type) { - assert((type == model::PaddingType::EXPLICIT) || (type == model::PaddingType::SAME) || - (type == model::PaddingType::VALID)); + assert((type == ir::PaddingType::EXPLICIT) || (type == ir::PaddingType::SAME) || + (type == ir::PaddingType::VALID)); switch (type) { - case model::PaddingType::EXPLICIT: + case ir::PaddingType::EXPLICIT: return "Padding::EXPLICIT"; - case model::PaddingType::SAME: + case ir::PaddingType::SAME: return "Padding::SAME"; - case model::PaddingType::VALID: + case ir::PaddingType::VALID: return "Padding::VALID"; } diff --git a/runtime/neurun/frontend/base_loader/base_loader.h b/runtime/neurun/frontend/base_loader/base_loader.h index d95678a..153e07a 100644 --- a/runtime/neurun/frontend/base_loader/base_loader.h +++ b/runtime/neurun/frontend/base_loader/base_loader.h @@ -65,7 +65,7 @@ protected: void loadModel(); // Helper functions - model::Activation convertActivation(ActivationFunctionType type); + ir::Activation convertActivation(ActivationFunctionType type); ir::DataType tensorTypeToDataType(TensorType type); // Create operands form tflite::Tensor @@ -152,21 +152,21 @@ void BaseLoader::BaseLoader::loadFromFile(const ch } template -model::Activation BaseLoader::BaseLoader::convertActivation( +ir::Activation BaseLoader::BaseLoader::convertActivation( const ActivationFunctionType type) { switch (type) { case ActivationFunctionType::ActivationFunctionType_NONE: - return model::Activation::NONE; + return ir::Activation::NONE; case ActivationFunctionType::ActivationFunctionType_RELU: - return model::Activation::RELU; + return ir::Activation::RELU; case ActivationFunctionType::ActivationFunctionType_RELU_N1_TO_1: - return model::Activation::RELU1; + return ir::Activation::RELU1; case ActivationFunctionType::ActivationFunctionType_RELU6: - return model::Activation::RELU6; + return ir::Activation::RELU6; case ActivationFunctionType::ActivationFunctionType_TANH: - return model::Activation::TANH; + return ir::Activation::TANH; default: throw std::runtime_error(std::string("Unsupported activation type: ") .append(EnumNameActivationFunctionType(type))); @@ -284,9 +284,9 @@ void BaseLoader::loadStridesAndPaddings(Param &par param.stride.horizontal = options->stride_h(); // Paddings if (options->padding() == Padding::Padding_SAME) - param.padding.type = model::PaddingType::SAME; + param.padding.type = ir::PaddingType::SAME; if (options->padding() == Padding::Padding_VALID) - param.padding.type = model::PaddingType::VALID; + param.padding.type = ir::PaddingType::VALID; // param paddings indexes unused } diff --git a/runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.h b/runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.h index d736414..093c66f 100644 --- a/runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.h +++ b/runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.h @@ -26,7 +26,7 @@ #include #include -#include +#include class NNAPIConvert { @@ -65,14 +65,14 @@ public: * @param[in] act NNAPI's FuseCode type * @return neurun's internal activation type */ - static ::neurun::model::Activation getFusedActivation(FuseCode act); + static neurun::ir::Activation getFusedActivation(FuseCode act); /** * @brief Convert NNAPI PaddingCode to internal padding type - * @param[in] act NNAPI's PaddingCode type + * @param[in] type NNAPI's PaddingCode type * @return neurun's internal padding type */ - static ::neurun::model::PaddingType getPaddingType(PaddingCode type); + static neurun::ir::PaddingType getPaddingType(PaddingCode type); }; #endif // __NEURUN_NNAPI_CONVERT_H__ diff --git a/runtime/neurun/test/core/compiler/Scheduler.cc b/runtime/neurun/test/core/compiler/Scheduler.cc index 66bfc3e..3623608 100644 --- a/runtime/neurun/test/core/compiler/Scheduler.cc +++ b/runtime/neurun/test/core/compiler/Scheduler.cc @@ -19,7 +19,7 @@ #include #include -#include +#include #include #include diff --git a/runtime/neurun/test/core/exec/ExecInstance.cc b/runtime/neurun/test/core/exec/ExecInstance.cc index bbe8ba7..c31af3d 100644 --- a/runtime/neurun/test/core/exec/ExecInstance.cc +++ b/runtime/neurun/test/core/exec/ExecInstance.cc @@ -57,12 +57,12 @@ public: 16)); // 2nd add operations (result2 <= result1 + rhs2) operation::Add::Param param1; - param1.activation = neurun::model::Activation::NONE; + param1.activation = Activation::NONE; auto input_set1 = OperandIndexSequence{operand_lhs, operand_rhs1}; auto output_set1 = OperandIndexSequence{operand_result1}; graph->addOperation(nnfw::cpp14::make_unique(input_set1, output_set1, param1)); operation::Add::Param param2; - param2.activation = neurun::model::Activation::NONE; + param2.activation = Activation::NONE; auto input_set2 = OperandIndexSequence{operand_result1, operand_rhs2}; auto output_set2 = OperandIndexSequence{operand_result2}; graph->addOperation(nnfw::cpp14::make_unique(input_set2, output_set2, param2)); diff --git a/runtime/neurun/test/core/exec/interp/ExecManager.cc b/runtime/neurun/test/core/exec/interp/ExecManager.cc index 2eba0ab..69acb74 100644 --- a/runtime/neurun/test/core/exec/interp/ExecManager.cc +++ b/runtime/neurun/test/core/exec/interp/ExecManager.cc @@ -58,7 +58,7 @@ protected: // Add operations operation::Add::Param param; - param.activation = neurun::model::Activation::NONE; + param.activation = Activation::NONE; auto input_set = OperandIndexSequence{operand_lhs, operand_rhs}; auto output_set = OperandIndexSequence{operand_result}; _graph->addOperation(nnfw::cpp14::make_unique(input_set, output_set, param)); @@ -108,13 +108,13 @@ protected: // 2nd add operations (result2 <= result1 + rhs2) operation::Add::Param param1; - param1.activation = neurun::model::Activation::NONE; + param1.activation = Activation::NONE; auto input_set1 = OperandIndexSequence{operand_lhs, operand_rhs1}; auto output_set1 = OperandIndexSequence{operand_result1}; _graph->addOperation(nnfw::cpp14::make_unique(input_set1, output_set1, param1)); operation::Add::Param param2; - param2.activation = neurun::model::Activation::NONE; + param2.activation = Activation::NONE; auto input_set2 = OperandIndexSequence{operand_result1, operand_rhs2}; auto output_set2 = OperandIndexSequence{operand_result2}; _graph->addOperation(nnfw::cpp14::make_unique(input_set2, output_set2, param2)); @@ -160,7 +160,7 @@ protected: // Add operations operation::Add::Param param; - param.activation = neurun::model::Activation::NONE; + param.activation = Activation::NONE; auto input_set = OperandIndexSequence{operand_lhs, operand_rhs}; auto output_set = OperandIndexSequence{operand_result}; _graph->addOperation(nnfw::cpp14::make_unique(input_set, output_set, param)); diff --git a/runtime/neurun/test/graph/operation/SetIO.cc b/runtime/neurun/test/graph/operation/SetIO.cc index ab0193f..95f1f13 100644 --- a/runtime/neurun/test/graph/operation/SetIO.cc +++ b/runtime/neurun/test/graph/operation/SetIO.cc @@ -45,10 +45,10 @@ TEST(graph_operation_setIO, operation_setIO_conv) IndexSet inputs{input_operand, kernel_operand, bias_operand}; Graph::Param conv_params; - conv_params.padding.type = neurun::model::PaddingType::SAME; + conv_params.padding.type = neurun::ir::PaddingType::SAME; conv_params.stride.horizontal = 1; conv_params.stride.vertical = 1; - conv_params.activation = neurun::model::Activation::NONE; + conv_params.activation = neurun::ir::Activation::NONE; auto output_operand = graph.addOperand(shape, type).value(); IndexSet outputs{output_operand}; -- 2.7.4 From b703003a69b61923049c2f35d67c074de9460062 Mon Sep 17 00:00:00 2001 From: Sergei Barannikov/AI Tools Lab /SRR/Engineer/Samsung Electronics Date: Fri, 6 Dec 2019 14:39:36 +0300 Subject: [PATCH 06/16] [neurun] Move TypeInfo.h and Shape.h into ir directory (#9427) * Move `TypeInfo.h` and `Shape.h` in `ir` directory. * Move `TypeInfo`, `Shape`, `FeatureShape` to `neurun::ir` namespace, fixing uses where possible. Signed-off-by: Sergei Barannikov --- runtime/neurun/backend/acl_cl/KernelGenerator.cc | 2 +- runtime/neurun/backend/acl_cl/ShapeFixer.cc | 38 +++++++++++----------- runtime/neurun/backend/acl_common/Convert.cc | 8 ++--- runtime/neurun/backend/acl_common/Convert.h | 11 +++---- runtime/neurun/backend/acl_neon/KernelGenerator.cc | 4 +-- runtime/neurun/backend/acl_neon/ShapeFixer.cc | 36 ++++++++++---------- runtime/neurun/backend/cpu/ShapeFixer.cc | 12 +++---- runtime/neurun/backend/cpu/kernel/PermuteLayer.cc | 2 +- runtime/neurun/backend/cpu/kernel/PermuteLayer.h | 7 ++-- runtime/neurun/backend/srcn/Convert.cc | 18 +++++----- runtime/neurun/backend/srcn/Convert.h | 14 ++++---- .../neurun/core/include/backend/ITensorRegister.h | 8 ++--- .../neurun/core/include/compiler/SubTensorInfo.h | 8 ++--- runtime/neurun/core/include/exec/Execution.h | 7 ++-- runtime/neurun/core/include/ir/Graph.h | 2 +- runtime/neurun/core/include/{model => ir}/Shape.h | 15 ++++++--- .../neurun/core/include/{model => ir}/TypeInfo.h | 15 ++++++--- runtime/neurun/core/include/model/OperandInfo.h | 4 +-- runtime/neurun/core/include/util/Padding.h | 11 +++---- runtime/neurun/core/include/util/ShapeInference.h | 16 ++++----- runtime/neurun/core/src/exec/Execution.cc | 8 ++--- runtime/neurun/core/src/exec/ExecutorBase.cc | 8 ++--- runtime/neurun/core/src/exec/ExecutorBase.h | 6 ++-- runtime/neurun/core/src/exec/Sink.h | 8 ++--- runtime/neurun/core/src/exec/Source.h | 10 +++--- .../src/exec/interp/operations/FullyConnected.cc | 2 +- .../src/exec/interp/operations/OperationUtil.h | 6 ++-- runtime/neurun/core/src/ir/Graph.cc | 2 +- runtime/neurun/core/src/{model => ir}/Shape.cc | 6 ++-- runtime/neurun/core/src/{model => ir}/TypeInfo.cc | 6 ++-- .../neurun/core/src/ir/operand/Shape4DConvert.h | 2 +- runtime/neurun/core/src/util/Padding.cc | 13 ++++---- runtime/neurun/core/src/util/ShapeInference.cc | 34 +++++++++---------- runtime/neurun/frontend/base_loader/base_loader.h | 8 ++--- .../neurun/frontend/nnapi/wrapper/NNAPIConvert.h | 8 ++--- runtime/neurun/test/core/compiler/Scheduler.cc | 4 +-- runtime/neurun/test/graph/operand/Set.cc | 6 ++-- runtime/neurun/test/graph/operand/UseDef.cc | 4 +-- runtime/neurun/test/graph/operation/SetIO.cc | 8 ++--- runtime/neurun/test/graph/verifier/Verifier.cc | 4 +-- 40 files changed, 196 insertions(+), 195 deletions(-) rename runtime/neurun/core/include/{model => ir}/Shape.h (89%) rename runtime/neurun/core/include/{model => ir}/TypeInfo.h (85%) rename runtime/neurun/core/src/{model => ir}/Shape.cc (97%) rename runtime/neurun/core/src/{model => ir}/TypeInfo.cc (94%) diff --git a/runtime/neurun/backend/acl_cl/KernelGenerator.cc b/runtime/neurun/backend/acl_cl/KernelGenerator.cc index 0be03c5..436366a 100644 --- a/runtime/neurun/backend/acl_cl/KernelGenerator.cc +++ b/runtime/neurun/backend/acl_cl/KernelGenerator.cc @@ -454,7 +454,7 @@ void KernelGenerator::visit(const model::operation::FullyConnected &node) // Check for reshaping input's shape into rank-2 bool needs_reshape = false; - neurun::model::Shape reshape(2); + ir::Shape reshape(2); if (input_rank == 4) { const auto feature_size = _ctx.at(input_index).shape().num_elements(); diff --git a/runtime/neurun/backend/acl_cl/ShapeFixer.cc b/runtime/neurun/backend/acl_cl/ShapeFixer.cc index 58efe0d..674b3d6 100644 --- a/runtime/neurun/backend/acl_cl/ShapeFixer.cc +++ b/runtime/neurun/backend/acl_cl/ShapeFixer.cc @@ -96,8 +96,8 @@ void ShapeFixer::visit(const model::operation::Mul &node) // TODO remove const_cast later. For example, _ctx may need to be a non const variable or // a node to extend shape may be inserted in front of this operation - const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } @@ -115,7 +115,7 @@ void ShapeFixer::visit(const model::operation::Squeeze &node) { const auto output_index{node.getOutputs().at(0)}; if (_ctx.at(output_index).shape().rank() == 0) - const_cast<::neurun::model::Shape &>(_ctx.at(output_index).shape()).extendRank(1); + const_cast(_ctx.at(output_index).shape()).extendRank(1); const auto input_index{node.getInputs().at(model::operation::Squeeze::Input::INPUT)}; _tensor_builder->dimCorrection(input_index, false); _tensor_builder->dimCorrection(output_index, false); @@ -138,8 +138,8 @@ void ShapeFixer::visit(const model::operation::Add &node) { const auto broadcast_rank = std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank()); - const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } @@ -155,8 +155,8 @@ void ShapeFixer::visit(const model::operation::Sub &node) // TODO remove const_cast later. For example, _ctx may need to be a non const variable or // a node to extend shape may be inserted in front of this operation - const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } @@ -172,8 +172,8 @@ void ShapeFixer::visit(const model::operation::Div &node) // TODO remove const_cast later. For example, _ctx may need to be a non const variable or // a node to extend shape may be inserted in front of this operation - const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } @@ -195,8 +195,8 @@ void ShapeFixer::visit(const model::operation::LogicalAnd &node) // TODO remove const_cast later. For example, _ctx may need to be a non const variable or // a node to extend shape may be inserted in front of this operation - const_cast<::neurun::model::Shape &>(_ctx.at(input0_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(input1_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(input0_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(input1_index).shape()).extendRank(broadcast_rank); } } @@ -216,8 +216,8 @@ void ShapeFixer::visit(const model::operation::Comparison &node) // TODO remove const_cast later. For example, _ctx may need to be a non const variable or // a node to extend shape may be inserted in front of this operation - const_cast<::neurun::model::Shape &>(_ctx.at(input0_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(input1_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(input0_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(input1_index).shape()).extendRank(broadcast_rank); } } @@ -278,8 +278,8 @@ void ShapeFixer::visit(const model::operation::PReLU &node) { const auto broadcast_rank = std::max(_ctx.at(ifm_index).shape().rank(), _ctx.at(alpha_index).shape().rank()); - const_cast<::neurun::model::Shape &>(_ctx.at(ifm_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(alpha_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(ifm_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(alpha_index).shape()).extendRank(broadcast_rank); } } @@ -296,8 +296,8 @@ void ShapeFixer::visit(const model::operation::LogicalOr &node) { const auto broadcast_rank = std::max(_ctx.at(input0_index).shape().rank(), _ctx.at(input1_index).shape().rank()); - const_cast<::neurun::model::Shape &>(_ctx.at(input0_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(input1_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(input0_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(input1_index).shape()).extendRank(broadcast_rank); } } @@ -312,8 +312,8 @@ void ShapeFixer::visit(const model::operation::SquaredDifference &node) { const auto broadcast_rank = std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank()); - const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } diff --git a/runtime/neurun/backend/acl_common/Convert.cc b/runtime/neurun/backend/acl_common/Convert.cc index b3e22e6..c39fa66 100644 --- a/runtime/neurun/backend/acl_common/Convert.cc +++ b/runtime/neurun/backend/acl_common/Convert.cc @@ -45,9 +45,8 @@ namespace backend namespace acl_common { -::arm_compute::TensorShape asTensorShape(const ::neurun::model::Shape &shape, - ir::Layout frontend_layout, ir::Layout backend_layout, - bool apply_dim_correction) +::arm_compute::TensorShape asTensorShape(const ir::Shape &shape, ir::Layout frontend_layout, + ir::Layout backend_layout, bool apply_dim_correction) { const uint32_t rank = shape.rank(); @@ -113,8 +112,7 @@ namespace acl_common return ::arm_compute::QuantizationInfo(scale, offset); } -::arm_compute::TensorInfo asTensorInfo(const ::neurun::model::Shape &shape, - const ::neurun::model::TypeInfo &typeInfo, +::arm_compute::TensorInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo, ir::Layout frontend_layout, ir::Layout backend_layout, bool apply_dim_correction) { diff --git a/runtime/neurun/backend/acl_common/Convert.h b/runtime/neurun/backend/acl_common/Convert.h index f8564b7..33e6815 100644 --- a/runtime/neurun/backend/acl_common/Convert.h +++ b/runtime/neurun/backend/acl_common/Convert.h @@ -24,8 +24,8 @@ #include "ir/Layout.h" #include "ir/InternalType.h" #include "model/Operand.h" -#include "model/Shape.h" -#include "model/TypeInfo.h" +#include "ir/Shape.h" +#include "ir/TypeInfo.h" #include "misc/feature/Shape.h" #include "misc/kernel/Shape.h" @@ -41,15 +41,14 @@ namespace backend namespace acl_common { -::arm_compute::TensorShape asTensorShape(const ::neurun::model::Shape &shape, - ir::Layout frontend_layout, ir::Layout backend_layout, +::arm_compute::TensorShape asTensorShape(const ir::Shape &shape, ir::Layout frontend_layout, + ir::Layout backend_layout, bool apply_dim_correction = true); ::arm_compute::Coordinates asTensorCoordinate(const ::neurun::util::Coordinates &coord, ir::Layout frontend_layout, ir::Layout backend_layout); ::arm_compute::DataType asDataType(ir::DataType type); -::arm_compute::TensorInfo asTensorInfo(const ::neurun::model::Shape &shape, - const ::neurun::model::TypeInfo &typeInfo, +::arm_compute::TensorInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo, ir::Layout frontend_layout, ir::Layout backend_layout, bool apply_dim_correction = true); diff --git a/runtime/neurun/backend/acl_neon/KernelGenerator.cc b/runtime/neurun/backend/acl_neon/KernelGenerator.cc index 84e9177..080a38e 100644 --- a/runtime/neurun/backend/acl_neon/KernelGenerator.cc +++ b/runtime/neurun/backend/acl_neon/KernelGenerator.cc @@ -622,10 +622,10 @@ void KernelGenerator::visit(const model::operation::FullyConnected &node) // Check for reshaping input's shape into rank-2 bool needs_reshape = false; - neurun::model::Shape reshape(2); + ir::Shape reshape(2); if (input_rank == 4) { - model::FeatureShape ifm_shape_feature = + ir::FeatureShape ifm_shape_feature = _ctx.at(input_index).shape().asFeature(_current_subg_layout); auto feature_size = ifm_shape_feature.N * ifm_shape_feature.C * ifm_shape_feature.H * ifm_shape_feature.W; diff --git a/runtime/neurun/backend/acl_neon/ShapeFixer.cc b/runtime/neurun/backend/acl_neon/ShapeFixer.cc index 80f539a..54c95a7 100644 --- a/runtime/neurun/backend/acl_neon/ShapeFixer.cc +++ b/runtime/neurun/backend/acl_neon/ShapeFixer.cc @@ -156,8 +156,8 @@ void ShapeFixer::visit(const model::operation::LogicalAnd &node) // TODO remove const_cast later. For example, _ctx may need to be a non const variable or // a node to extend shape may be inserted in front of this operation - const_cast<::neurun::model::Shape &>(_ctx.at(input0_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(input1_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(input0_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(input1_index).shape()).extendRank(broadcast_rank); } } @@ -175,8 +175,8 @@ void ShapeFixer::visit(const model::operation::LogicalOr &node) // TODO remove const_cast later. For example, _ctx may need to be a non const variable or // a node to extend shape may be inserted in front of this operation - const_cast<::neurun::model::Shape &>(_ctx.at(input0_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(input1_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(input0_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(input1_index).shape()).extendRank(broadcast_rank); } } @@ -206,8 +206,8 @@ void ShapeFixer::visit(const model::operation::Mul &node) // TODO remove const_cast later. For example, _ctx may need to be a non const variable or // a node to extend shape may be inserted in front of this operation - const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } @@ -224,8 +224,8 @@ void ShapeFixer::visit(const model::operation::PReLU &node) { const auto broadcast_rank = std::max(_ctx.at(ifm_index).shape().rank(), _ctx.at(alpha_index).shape().rank()); - const_cast<::neurun::model::Shape &>(_ctx.at(ifm_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(alpha_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(ifm_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(alpha_index).shape()).extendRank(broadcast_rank); } } @@ -266,8 +266,8 @@ void ShapeFixer::visit(const model::operation::Comparison &node) // TODO remove const_cast later. For example, _ctx may need to be a non const variable or // a node to extend shape may be inserted in front of this operation - const_cast<::neurun::model::Shape &>(_ctx.at(input0_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(input1_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(input0_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(input1_index).shape()).extendRank(broadcast_rank); } } @@ -319,8 +319,8 @@ void ShapeFixer::visit(const model::operation::SquaredDifference &node) // TODO remove const_cast later. For example, _ctx may need to be a non const variable or // a node to extend shape may be inserted in front of this operation - const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } @@ -335,8 +335,8 @@ void ShapeFixer::visit(const model::operation::Sub &node) std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank()); // TODO remove const_cast later. For example, _ctx may need to be a non const variable or // a node to extend shape may be inserted in front of this operation - const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } @@ -361,8 +361,8 @@ void ShapeFixer::visit(const model::operation::Add &node) { const auto broadcast_rank = std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank()); - const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } @@ -378,8 +378,8 @@ void ShapeFixer::visit(const model::operation::Div &node) // TODO remove const_cast later. For example, _ctx may need to be a non const variable or // a node to extend shape may be inserted in front of this operation - const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } diff --git a/runtime/neurun/backend/cpu/ShapeFixer.cc b/runtime/neurun/backend/cpu/ShapeFixer.cc index 679d2cd..5ec7ebd 100644 --- a/runtime/neurun/backend/cpu/ShapeFixer.cc +++ b/runtime/neurun/backend/cpu/ShapeFixer.cc @@ -94,8 +94,8 @@ void ShapeFixer::visit(const model::operation::Add &node) throw std::runtime_error{"ShapeFixer: NYI for broadcast Add"}; const auto broadcast_rank = std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank()); - const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } @@ -118,8 +118,8 @@ void ShapeFixer::visit(const model::operation::Sub &node) throw std::runtime_error{"ShapeFixer: NYI for broadcast Sub"}; const auto broadcast_rank = std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank()); - const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } void ShapeFixer::visit(const model::operation::Mul &node) @@ -140,8 +140,8 @@ void ShapeFixer::visit(const model::operation::Mul &node) throw std::runtime_error{"ShapeFixer: NYI for broadcast Mul"}; const auto broadcast_rank = std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank()); - const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } diff --git a/runtime/neurun/backend/cpu/kernel/PermuteLayer.cc b/runtime/neurun/backend/cpu/kernel/PermuteLayer.cc index fc758cf..9d596cb 100644 --- a/runtime/neurun/backend/cpu/kernel/PermuteLayer.cc +++ b/runtime/neurun/backend/cpu/kernel/PermuteLayer.cc @@ -29,7 +29,7 @@ using Type = model::operation::Permute::Type; void PermuteLayer::configure(std::shared_ptr input, std::shared_ptr output, - const model::Shape &output_shape, Type type, ir::DataType dataType) + const ir::Shape &output_shape, Type type, ir::DataType dataType) { _input = input; _output = output; diff --git a/runtime/neurun/backend/cpu/kernel/PermuteLayer.h b/runtime/neurun/backend/cpu/kernel/PermuteLayer.h index 3fec953..cf89168 100644 --- a/runtime/neurun/backend/cpu/kernel/PermuteLayer.h +++ b/runtime/neurun/backend/cpu/kernel/PermuteLayer.h @@ -43,9 +43,8 @@ public: public: void configure(std::shared_ptr input, - std::shared_ptr output, - const model::Shape &output_shape, model::operation::Permute::Type type, - ir::DataType dataType); + std::shared_ptr output, const ir::Shape &output_shape, + model::operation::Permute::Type type, ir::DataType dataType); void run(); void runSync() { @@ -197,7 +196,7 @@ private: private: std::shared_ptr _input{nullptr}; std::shared_ptr _output{nullptr}; - model::Shape _output_shape{}; + ir::Shape _output_shape{}; model::operation::Permute::Type _type{model::operation::Permute::Type::COPY}; ir::DataType _dataType{ir::DataType::FLOAT32}; }; diff --git a/runtime/neurun/backend/srcn/Convert.cc b/runtime/neurun/backend/srcn/Convert.cc index 267f62b..46b11ca 100644 --- a/runtime/neurun/backend/srcn/Convert.cc +++ b/runtime/neurun/backend/srcn/Convert.cc @@ -29,13 +29,13 @@ namespace backend namespace srcn { -model::Shape asKernelShape(const model::Shape &shape, kernel::FilterLayout frontend_layout, - kernel::FilterLayout backend_layout) +ir::Shape asKernelShape(const ir::Shape &shape, kernel::FilterLayout frontend_layout, + kernel::FilterLayout backend_layout) { assert(shape.rank() == 4); if (frontend_layout == backend_layout) { - return model::Shape{shape.dim(0), shape.dim(1), shape.dim(2), shape.dim(3)}; + return ir::Shape{shape.dim(0), shape.dim(1), shape.dim(2), shape.dim(3)}; } const auto permutation = getFilterPermutation(frontend_layout, backend_layout); @@ -43,16 +43,16 @@ model::Shape asKernelShape(const model::Shape &shape, kernel::FilterLayout front { throw std::runtime_error("Not supported FilterLayout"); } - return model::Shape{shape.dim(permutation[0]), shape.dim(permutation[1]), - shape.dim(permutation[2]), shape.dim(permutation[3])}; + return ir::Shape{shape.dim(permutation[0]), shape.dim(permutation[1]), shape.dim(permutation[2]), + shape.dim(permutation[3])}; } -model::Shape asTensorShape(const model::Shape &shape, ir::Layout frontend_layout, - ir::Layout backend_layout) +ir::Shape asTensorShape(const ir::Shape &shape, ir::Layout frontend_layout, + ir::Layout backend_layout) { const uint32_t rank = shape.rank(); - model::Shape ret(rank); + ir::Shape ret(rank); for (uint32_t axis = 0; axis < rank; ++axis) { const auto ncnn_axis = ToNCNNAxis(rank, axis, frontend_layout, backend_layout); @@ -62,7 +62,7 @@ model::Shape asTensorShape(const model::Shape &shape, ir::Layout frontend_layout return ret; } -model::OperandInfo asTensorInfo(const model::Shape &shape, const model::TypeInfo &typeInfo, +model::OperandInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo, ir::Layout frontend_layout, ir::Layout backend_layout) { model::OperandInfo info(asTensorShape(shape, frontend_layout, backend_layout), typeInfo); diff --git a/runtime/neurun/backend/srcn/Convert.h b/runtime/neurun/backend/srcn/Convert.h index 6f4c4e3..3268da2 100644 --- a/runtime/neurun/backend/srcn/Convert.h +++ b/runtime/neurun/backend/srcn/Convert.h @@ -19,8 +19,8 @@ #include "kernel/OperationUtils.h" #include -#include -#include +#include +#include #include namespace neurun @@ -30,13 +30,13 @@ namespace backend namespace srcn { -model::Shape asKernelShape(const model::Shape &shape, kernel::FilterLayout frontend_layout, - kernel::FilterLayout backend_layout); +ir::Shape asKernelShape(const ir::Shape &shape, kernel::FilterLayout frontend_layout, + kernel::FilterLayout backend_layout); -model::Shape asTensorShape(const model::Shape &shape, ir::Layout frontend_layout, - ir::Layout backend_layout); +ir::Shape asTensorShape(const ir::Shape &shape, ir::Layout frontend_layout, + ir::Layout backend_layout); -model::OperandInfo asTensorInfo(const model::Shape &shape, const model::TypeInfo &typeInfo, +model::OperandInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo, ir::Layout frontend_layout, ir::Layout backend_layout); } // namespace srcn diff --git a/runtime/neurun/core/include/backend/ITensorRegister.h b/runtime/neurun/core/include/backend/ITensorRegister.h index 286823a..d5be3a2 100644 --- a/runtime/neurun/core/include/backend/ITensorRegister.h +++ b/runtime/neurun/core/include/backend/ITensorRegister.h @@ -30,12 +30,12 @@ namespace { -neurun::model::Shape permuteTensorShape(const neurun::model::Shape &shape, - neurun::ir::Layout frontend_layout, - neurun::ir::Layout backend_layout) +neurun::ir::Shape permuteTensorShape(const neurun::ir::Shape &shape, + neurun::ir::Layout frontend_layout, + neurun::ir::Layout backend_layout) { assert(shape.rank() <= 4); - neurun::model::Shape backend_shape{shape}; + neurun::ir::Shape backend_shape{shape}; if (shape.rank() == 4 && frontend_layout == neurun::ir::Layout::NHWC && backend_layout == neurun::ir::Layout::NCHW) { diff --git a/runtime/neurun/core/include/compiler/SubTensorInfo.h b/runtime/neurun/core/include/compiler/SubTensorInfo.h index 92b2759..60405af 100644 --- a/runtime/neurun/core/include/compiler/SubTensorInfo.h +++ b/runtime/neurun/core/include/compiler/SubTensorInfo.h @@ -58,12 +58,12 @@ public: * @brief Return tensor shape * @return Tensor shape */ - const model::Shape shape(void) const { return _shape; } + const ir::Shape &shape(void) const { return _shape; } /** * @brief Return tensor type * @return Tensor type */ - const model::TypeInfo type(void) const { return _type; } + const ir::TypeInfo &type(void) const { return _type; } /** * @brief Return tensor's offset in parent tensor * @return Tensor offset @@ -72,8 +72,8 @@ public: private: const model::OperandIndex _parent; - const model::Shape _shape; - const model::TypeInfo _type; + const ir::Shape _shape; + const ir::TypeInfo _type; const neurun::util::Coordinates _offset; }; diff --git a/runtime/neurun/core/include/exec/Execution.h b/runtime/neurun/core/include/exec/Execution.h index c23ac09..7a00741 100644 --- a/runtime/neurun/core/include/exec/Execution.h +++ b/runtime/neurun/core/include/exec/Execution.h @@ -71,7 +71,7 @@ public: * @param[in] length Input data's length * @param[in] layout Input data's data format */ - void setInput(const model::IOIndex &index, const model::TypeInfo &type, const model::Shape &shape, + void setInput(const model::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape, const void *buffer, size_t length, ir::Layout layout = ir::Layout::NHWC); /** * @brief Set output data's information @@ -92,9 +92,8 @@ public: * @param[in] length Output data's length * @param[in] layout Output data's data format */ - void setOutput(const model::IOIndex &index, const model::TypeInfo &type, - const model::Shape &shape, void *buffer, size_t length, - ir::Layout layout = ir::Layout::NHWC); + void setOutput(const model::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape, + void *buffer, size_t length, ir::Layout layout = ir::Layout::NHWC); /** * @brief Set input data's data format * @param[in] index Input index diff --git a/runtime/neurun/core/include/ir/Graph.h b/runtime/neurun/core/include/ir/Graph.h index be78f3a..0ad5135 100644 --- a/runtime/neurun/core/include/ir/Graph.h +++ b/runtime/neurun/core/include/ir/Graph.h @@ -124,7 +124,7 @@ public: // Graph Building public: - model::OperandIndex addOperand(const model::Shape &shape, const model::TypeInfo &type); + model::OperandIndex addOperand(const ir::Shape &shape, const ir::TypeInfo &type); model::OperationIndex addOperation(std::unique_ptr &&node); void setOperandValue(const model::OperandIndex &ind, std::unique_ptr &&data); void addInput(const model::OperandIndex &ind); diff --git a/runtime/neurun/core/include/model/Shape.h b/runtime/neurun/core/include/ir/Shape.h similarity index 89% rename from runtime/neurun/core/include/model/Shape.h rename to runtime/neurun/core/include/ir/Shape.h index e7d2553..fed2ae8 100644 --- a/runtime/neurun/core/include/model/Shape.h +++ b/runtime/neurun/core/include/ir/Shape.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef __NEURUN_MODEL_SHAPE_H__ -#define __NEURUN_MODEL_SHAPE_H__ +#ifndef __NEURUN_IR_SHAPE_H__ +#define __NEURUN_IR_SHAPE_H__ #include "ir/Layout.h" #include "misc/feature/Shape.h" @@ -25,7 +25,7 @@ namespace neurun { -namespace model +namespace ir { // TODO Remove this dependency. @@ -77,7 +77,14 @@ private: inline bool operator==(const Shape &lhs, const Shape &rhs) { return lhs.dims() == rhs.dims(); } +} // namespace ir + +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +using FeatureShape = ir::FeatureShape; +using Shape = ir::Shape; } // namespace model } // namespace neurun -#endif // __NEURUN_MODEL_SHAPE_H__ +#endif // __NEURUN_IR_SHAPE_H__ diff --git a/runtime/neurun/core/include/model/TypeInfo.h b/runtime/neurun/core/include/ir/TypeInfo.h similarity index 85% rename from runtime/neurun/core/include/model/TypeInfo.h rename to runtime/neurun/core/include/ir/TypeInfo.h index 7b29085..77f8b4e 100644 --- a/runtime/neurun/core/include/model/TypeInfo.h +++ b/runtime/neurun/core/include/ir/TypeInfo.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef __NEURUN_MODEL_TYPEINFO_H__ -#define __NEURUN_MODEL_TYPEINFO_H__ +#ifndef __NEURUN_IR_TYPEINFO_H__ +#define __NEURUN_IR_TYPEINFO_H__ #include @@ -23,7 +23,7 @@ namespace neurun { -namespace model +namespace ir { class TypeInfo @@ -53,7 +53,14 @@ private: bool operator==(const TypeInfo &lhs, const TypeInfo &rhs); bool operator!=(const TypeInfo &lhs, const TypeInfo &rhs); +} // namespace ir + +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +using TypeInfo = ir::TypeInfo; } // namespace model + } // namespace neurun -#endif // __NEURUN_MODEL_TYPEINFO_H__ +#endif // __NEURUN_IR_TYPEINFO_H__ diff --git a/runtime/neurun/core/include/model/OperandInfo.h b/runtime/neurun/core/include/model/OperandInfo.h index 66272c7..7a97d06 100644 --- a/runtime/neurun/core/include/model/OperandInfo.h +++ b/runtime/neurun/core/include/model/OperandInfo.h @@ -21,8 +21,8 @@ #ifndef __NEURUN_MODEL_OPERAND_INFO_H__ #define __NEURUN_MODEL_OPERAND_INFO_H__ -#include "Shape.h" -#include "TypeInfo.h" +#include "ir/Shape.h" +#include "ir/TypeInfo.h" #include "ir/Layout.h" namespace neurun diff --git a/runtime/neurun/core/include/util/Padding.h b/runtime/neurun/core/include/util/Padding.h index 7bc8b65..3c707b8 100644 --- a/runtime/neurun/core/include/util/Padding.h +++ b/runtime/neurun/core/include/util/Padding.h @@ -19,7 +19,7 @@ #include -#include "model/Shape.h" +#include "ir/Shape.h" #include "ir/InternalType.h" namespace neurun @@ -28,12 +28,11 @@ namespace util { ir::ExplicitPadding validPadding(void); -ir::ExplicitPadding samePadding(const model::FeatureShape &ifm_shape, - const model::FeatureShape &ofm_shape, const ir::Stride &stride, +ir::ExplicitPadding samePadding(const ir::FeatureShape &ifm_shape, + const ir::FeatureShape &ofm_shape, const ir::Stride &stride, uint32_t kw, uint32_t kh); -ir::ExplicitPadding calculatePadding(const ir::Padding &padding, - const model::FeatureShape &ifm_shape, - const model::FeatureShape &ofm_shape, const ir::Stride &stride, +ir::ExplicitPadding calculatePadding(const ir::Padding &padding, const ir::FeatureShape &ifm_shape, + const ir::FeatureShape &ofm_shape, const ir::Stride &stride, uint32_t kw, uint32_t kh); } // namespace util diff --git a/runtime/neurun/core/include/util/ShapeInference.h b/runtime/neurun/core/include/util/ShapeInference.h index fce8bf2..1a6e5ec 100644 --- a/runtime/neurun/core/include/util/ShapeInference.h +++ b/runtime/neurun/core/include/util/ShapeInference.h @@ -31,29 +31,27 @@ namespace neurun namespace shape_inference { -using Shapes = std::vector; +using Shapes = std::vector; -Shapes inferEltwiseShape(const model::Shape &lhs_shape, const model::Shape &rhs_shape); +Shapes inferEltwiseShape(const ir::Shape &lhs_shape, const ir::Shape &rhs_shape); -Shapes inferAvgPoolShape(const model::Shape &in_shape, - const model::operation::AvgPool2D::Param ¶m, +Shapes inferAvgPoolShape(const ir::Shape &in_shape, const model::operation::AvgPool2D::Param ¶m, ir::Layout layout = ir::Layout::NHWC); Shapes inferConcatShape(const Shapes &in_shapes, const model::operation::Concat::Param ¶m); -Shapes inferMaxPoolShape(const model::Shape &in_shape, - const model::operation::MaxPool2D::Param ¶m, +Shapes inferMaxPoolShape(const ir::Shape &in_shape, const model::operation::MaxPool2D::Param ¶m, ir::Layout layout = ir::Layout::NHWC); -Shapes inferConv2DShape(const model::Shape &in_shape, const model::Shape &ker_shape, +Shapes inferConv2DShape(const ir::Shape &in_shape, const ir::Shape &ker_shape, const model::operation::Conv2D::Param ¶m, ir::Layout layout = ir::Layout::NHWC); -Shapes inferDepthwiseConv2DShape(const model::Shape &in_shape, const model::Shape &ker_shape, +Shapes inferDepthwiseConv2DShape(const ir::Shape &in_shape, const ir::Shape &ker_shape, const model::operation::DepthwiseConv2D::Param ¶m, ir::Layout layout = ir::Layout::NHWC); -Shapes inferFullyConnectedShape(const model::Shape &in_shape, const model::Shape &ker_shape); +Shapes inferFullyConnectedShape(const ir::Shape &in_shape, const ir::Shape &ker_shape); } // namespace shape_inference } // namespace neurun diff --git a/runtime/neurun/core/src/exec/Execution.cc b/runtime/neurun/core/src/exec/Execution.cc index e135617..bbbbba2 100644 --- a/runtime/neurun/core/src/exec/Execution.cc +++ b/runtime/neurun/core/src/exec/Execution.cc @@ -46,8 +46,8 @@ void Execution::setInput(const model::IOIndex &index, const void *buffer, size_t } // TODO Remove default parameter -void Execution::setInput(const model::IOIndex &index, const model::TypeInfo &type, - const model::Shape &shape, const void *buffer, size_t length, +void Execution::setInput(const model::IOIndex &index, const ir::TypeInfo &type, + const ir::Shape &shape, const void *buffer, size_t length, ir::Layout layout) { const model::OperandInfo info{shape, type}; @@ -78,8 +78,8 @@ void Execution::setOutput(const model::IOIndex &index, void *buffer, size_t leng } // TODO Remove default parameter -void Execution::setOutput(const model::IOIndex &index, const model::TypeInfo &type, - const model::Shape &shape, void *buffer, size_t length, ir::Layout layout) +void Execution::setOutput(const model::IOIndex &index, const ir::TypeInfo &type, + const ir::Shape &shape, void *buffer, size_t length, ir::Layout layout) { const model::OperandInfo info{shape, type}; diff --git a/runtime/neurun/core/src/exec/ExecutorBase.cc b/runtime/neurun/core/src/exec/ExecutorBase.cc index ba316ed..2a2bf5e 100644 --- a/runtime/neurun/core/src/exec/ExecutorBase.cc +++ b/runtime/neurun/core/src/exec/ExecutorBase.cc @@ -30,9 +30,9 @@ ExecutorBase::ExecutorBase(const graph::Graph &graph, // DO NOTHING } -std::unique_ptr ExecutorBase::source(const model::IOIndex &index, - const model::TypeInfo &type, const void *buffer, - size_t length, ir::Layout io_layout) +std::unique_ptr ExecutorBase::source(const model::IOIndex &index, const ir::TypeInfo &type, + const void *buffer, size_t length, + ir::Layout io_layout) { using ir::DataType; switch (type.type()) @@ -51,7 +51,7 @@ std::unique_ptr ExecutorBase::source(const model::IOIndex &index, } } -std::unique_ptr ExecutorBase::sink(const model::IOIndex &index, const model::TypeInfo &type, +std::unique_ptr ExecutorBase::sink(const model::IOIndex &index, const ir::TypeInfo &type, void *buffer, size_t length, ir::Layout io_layout) { using ir::DataType; diff --git a/runtime/neurun/core/src/exec/ExecutorBase.h b/runtime/neurun/core/src/exec/ExecutorBase.h index 5abec6b..618d14b 100644 --- a/runtime/neurun/core/src/exec/ExecutorBase.h +++ b/runtime/neurun/core/src/exec/ExecutorBase.h @@ -63,10 +63,10 @@ public: void addObserver(std::unique_ptr ref) { _subject.add(std::move(ref)); }; private: - std::unique_ptr source(const model::IOIndex &index, const model::TypeInfo &type, + std::unique_ptr source(const model::IOIndex &index, const ir::TypeInfo &type, const void *buffer, size_t length, ir::Layout io_layout); - std::unique_ptr sink(const model::IOIndex &index, const model::TypeInfo &type, - void *buffer, size_t length, ir::Layout io_layout); + std::unique_ptr sink(const model::IOIndex &index, const ir::TypeInfo &type, void *buffer, + size_t length, ir::Layout io_layout); template std::unique_ptr source(const model::IOIndex &index, const void *buffer, size_t length, diff --git a/runtime/neurun/core/src/exec/Sink.h b/runtime/neurun/core/src/exec/Sink.h index 07b72aa..bb2a6c5 100644 --- a/runtime/neurun/core/src/exec/Sink.h +++ b/runtime/neurun/core/src/exec/Sink.h @@ -42,7 +42,7 @@ struct ISink template class ITemplSink : public ISink { public: - ITemplSink(void *output_buffer, const size_t &output_size, const model::Shape &shape, + ITemplSink(void *output_buffer, const size_t &output_size, const ir::Shape &shape, const bool copy, ir::Layout io_layout) : _output_buffer{reinterpret_cast(output_buffer)}, _output_size{output_size}, _shape{shape}, _copy{copy}, _io_layout{io_layout} @@ -161,7 +161,7 @@ protected: private: T *_output_buffer; const size_t _output_size; - const model::Shape _shape; + const ir::Shape _shape; const bool _copy; const ir::Layout _io_layout; }; @@ -169,7 +169,7 @@ private: template class PermutateSink final : public ITemplSink { public: - PermutateSink(void *output_buffer, const size_t &output_size, const model::Shape &shape, + PermutateSink(void *output_buffer, const size_t &output_size, const ir::Shape &shape, ir::Layout io_layout) : ITemplSink(output_buffer, output_size, shape, false, io_layout) { @@ -186,7 +186,7 @@ public: template class CopySink final : public ITemplSink { public: - CopySink(void *output_buffer, const size_t &output_size, const model::Shape &shape, + CopySink(void *output_buffer, const size_t &output_size, const ir::Shape &shape, ir::Layout io_layout = ir::Layout::UNKNOWN) : ITemplSink(output_buffer, output_size, shape, true, io_layout) { diff --git a/runtime/neurun/core/src/exec/Source.h b/runtime/neurun/core/src/exec/Source.h index 3272d07..fd52dd5 100644 --- a/runtime/neurun/core/src/exec/Source.h +++ b/runtime/neurun/core/src/exec/Source.h @@ -27,7 +27,7 @@ #include "util/Utils.h" #include #include -#include "model/Shape.h" +#include "ir/Shape.h" namespace neurun { @@ -45,7 +45,7 @@ struct ISource template class ITemplSource : public ISource { public: - ITemplSource(const void *input_buffer, const size_t &input_size, const model::Shape &shape, + ITemplSource(const void *input_buffer, const size_t &input_size, const ir::Shape &shape, const bool copy, ir::Layout io_layout) : _input_buffer{reinterpret_cast(input_buffer)}, _input_size{input_size}, _shape{shape}, _copy(copy), _io_layout{io_layout} @@ -167,7 +167,7 @@ protected: private: const T *_input_buffer; const size_t _input_size; - const model::Shape _shape; + const ir::Shape _shape; const bool _copy; const ir::Layout _io_layout; }; @@ -175,7 +175,7 @@ private: template class PermutateSource final : public ITemplSource { public: - PermutateSource(const void *input_buffer, const size_t &input_size, const model::Shape &shape, + PermutateSource(const void *input_buffer, const size_t &input_size, const ir::Shape &shape, ir::Layout io_layout) : ITemplSource(input_buffer, input_size, shape, false, io_layout) { @@ -192,7 +192,7 @@ public: template class CopySource final : public ITemplSource { public: - CopySource(const void *input_buffer, const size_t &input_size, const model::Shape &shape, + CopySource(const void *input_buffer, const size_t &input_size, const ir::Shape &shape, ir::Layout io_layout = ir::Layout::UNKNOWN) : ITemplSource(input_buffer, input_size, shape, true, io_layout) { diff --git a/runtime/neurun/core/src/exec/interp/operations/FullyConnected.cc b/runtime/neurun/core/src/exec/interp/operations/FullyConnected.cc index 0f1c0b9..f12f2fe 100644 --- a/runtime/neurun/core/src/exec/interp/operations/FullyConnected.cc +++ b/runtime/neurun/core/src/exec/interp/operations/FullyConnected.cc @@ -58,7 +58,7 @@ void prepareFC(ExecEnv *env, const model::Operation &node) assert(num_units == bias_tensor->dimension(0)); // Make output tensor info - model::Shape output_shape(2); + ir::Shape output_shape(2); output_shape.dim(0) = batch_size; output_shape.dim(1) = num_units; const model::OperandInfo out_info{output_shape, in_tensor->tensorInfo().typeInfo()}; diff --git a/runtime/neurun/core/src/exec/interp/operations/OperationUtil.h b/runtime/neurun/core/src/exec/interp/operations/OperationUtil.h index 8df4d41..8124a38 100644 --- a/runtime/neurun/core/src/exec/interp/operations/OperationUtil.h +++ b/runtime/neurun/core/src/exec/interp/operations/OperationUtil.h @@ -17,7 +17,7 @@ #ifndef __NEURUN_EXEC_INTERP_OPERATIONS_OPERATION_UTILS_H_ #define __NEURUN_EXEC_INTERP_OPERATIONS_OPERATION_UTILS_H_ -#include "model/Shape.h" +#include "ir/Shape.h" #include "ir/InternalType.h" #include @@ -29,7 +29,7 @@ namespace exec namespace interp { -inline nnfw::cker::Shape convertShape(const model::Shape &shape) +inline nnfw::cker::Shape convertShape(const ir::Shape &shape) { auto dimensions = std::vector(shape.dims().begin(), shape.dims().end()); @@ -51,7 +51,7 @@ inline nnfw::cker::Shape convertShape(const model::Shape &shape) return nnfw::cker::GetShape(raw_shape); } -inline nnfw::cker::Shape convertExtendShape(const model::Shape &shape) +inline nnfw::cker::Shape convertExtendShape(const ir::Shape &shape) { auto dimensions = std::vector(shape.dims().begin(), shape.dims().end()); diff --git a/runtime/neurun/core/src/ir/Graph.cc b/runtime/neurun/core/src/ir/Graph.cc index afd0a7c..cd2e291 100644 --- a/runtime/neurun/core/src/ir/Graph.cc +++ b/runtime/neurun/core/src/ir/Graph.cc @@ -43,7 +43,7 @@ Graph::Graph() = default; Graph::~Graph(void) = default; -model::OperandIndex Graph::addOperand(const model::Shape &shape, const model::TypeInfo &type) +model::OperandIndex Graph::addOperand(const ir::Shape &shape, const ir::TypeInfo &type) { return _operands.emplace(shape, type); } diff --git a/runtime/neurun/core/src/model/Shape.cc b/runtime/neurun/core/src/ir/Shape.cc similarity index 97% rename from runtime/neurun/core/src/model/Shape.cc rename to runtime/neurun/core/src/ir/Shape.cc index b7f7bff..64e9aa9 100644 --- a/runtime/neurun/core/src/model/Shape.cc +++ b/runtime/neurun/core/src/ir/Shape.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "model/Shape.h" +#include "ir/Shape.h" #include "util/Utils.h" #include @@ -23,7 +23,7 @@ namespace neurun { -namespace model +namespace ir { FeatureShape Shape::asFeature(Layout layout) const @@ -82,5 +82,5 @@ uint64_t Shape::num_elements() const std::multiplies()); } -} // namespace model +} // namespace ir } // namespace neurun diff --git a/runtime/neurun/core/src/model/TypeInfo.cc b/runtime/neurun/core/src/ir/TypeInfo.cc similarity index 94% rename from runtime/neurun/core/src/model/TypeInfo.cc rename to runtime/neurun/core/src/ir/TypeInfo.cc index 46ac2d4..280146b 100644 --- a/runtime/neurun/core/src/model/TypeInfo.cc +++ b/runtime/neurun/core/src/ir/TypeInfo.cc @@ -14,11 +14,11 @@ * limitations under the License. */ -#include "model/TypeInfo.h" +#include "ir/TypeInfo.h" namespace neurun { -namespace model +namespace ir { bool operator==(const TypeInfo &lhs, const TypeInfo &rhs) @@ -43,5 +43,5 @@ bool operator==(const TypeInfo &lhs, const TypeInfo &rhs) bool operator!=(const TypeInfo &lhs, const TypeInfo &rhs) { return !(lhs == rhs); } -} // namespace model +} // namespace ir } // namespace neurun diff --git a/runtime/neurun/core/src/ir/operand/Shape4DConvert.h b/runtime/neurun/core/src/ir/operand/Shape4DConvert.h index 36058fd..60e0555 100644 --- a/runtime/neurun/core/src/ir/operand/Shape4DConvert.h +++ b/runtime/neurun/core/src/ir/operand/Shape4DConvert.h @@ -26,7 +26,7 @@ namespace graph namespace operand { -inline LowerInfo::Shape4D asShape4D(const model::Shape &shape) +inline LowerInfo::Shape4D asShape4D(const ir::Shape &shape) { switch (shape.rank()) { diff --git a/runtime/neurun/core/src/util/Padding.cc b/runtime/neurun/core/src/util/Padding.cc index 89e4577..2e2202b 100644 --- a/runtime/neurun/core/src/util/Padding.cc +++ b/runtime/neurun/core/src/util/Padding.cc @@ -46,8 +46,8 @@ ir::ExplicitPadding validPadding(void) return padding; } -ir::ExplicitPadding samePaddingUsingIFM(const model::FeatureShape &ifm_shape, - const ir::Stride &stride, uint32_t kw, uint32_t kh) +ir::ExplicitPadding samePaddingUsingIFM(const ir::FeatureShape &ifm_shape, const ir::Stride &stride, + uint32_t kw, uint32_t kh) { ir::ExplicitPadding padding; @@ -76,8 +76,8 @@ ir::ExplicitPadding samePaddingUsingIFM(const model::FeatureShape &ifm_shape, return padding; } -ir::ExplicitPadding samePadding(const model::FeatureShape &ifm_shape, - const model::FeatureShape &ofm_shape, const ir::Stride &stride, +ir::ExplicitPadding samePadding(const ir::FeatureShape &ifm_shape, + const ir::FeatureShape &ofm_shape, const ir::Stride &stride, uint32_t kw, uint32_t kh) { const int32_t vertical_expected_output = (ifm_shape.H + stride.vertical - 1) / stride.vertical; @@ -93,9 +93,8 @@ ir::ExplicitPadding samePadding(const model::FeatureShape &ifm_shape, return samePaddingUsingIFM(ifm_shape, stride, kw, kh); } -ir::ExplicitPadding calculatePadding(const ir::Padding &padding, - const model::FeatureShape &ifm_shape, - const model::FeatureShape &ofm_shape, const ir::Stride &stride, +ir::ExplicitPadding calculatePadding(const ir::Padding &padding, const ir::FeatureShape &ifm_shape, + const ir::FeatureShape &ofm_shape, const ir::Stride &stride, uint32_t kw, uint32_t kh) { if (padding.type == ir::PaddingType::EXPLICIT) diff --git a/runtime/neurun/core/src/util/ShapeInference.cc b/runtime/neurun/core/src/util/ShapeInference.cc index de30b9b..44857ef 100644 --- a/runtime/neurun/core/src/util/ShapeInference.cc +++ b/runtime/neurun/core/src/util/ShapeInference.cc @@ -16,7 +16,7 @@ #include "util/Utils.h" #include "ir/InternalType.h" -#include "model/Shape.h" +#include "ir/Shape.h" #include "model/operation/AvgPool2D.h" #include "model/operation/MaxPool2D.h" #include "util/ShapeInference.h" @@ -43,9 +43,9 @@ ceil_div(T dividend, U divisor) } // Calculate the result of broadcast of two shapes -model::Shape broadcastShapes(const model::Shape &lhs_shape, const model::Shape &rhs_shape) +ir::Shape broadcastShapes(const ir::Shape &lhs_shape, const ir::Shape &rhs_shape) { - model::Shape out_shape; + ir::Shape out_shape; auto max_rank = std::max(lhs_shape.rank(), rhs_shape.rank()); for (int idx = 0; idx < max_rank; ++idx) @@ -100,20 +100,20 @@ std::pair calcConvLikeHeightAndWidth(const int in_h, const int in_w, c // Shape inference // -Shapes inferEltwiseShape(const model::Shape &lhs_shape, const model::Shape &rhs_shape) +Shapes inferEltwiseShape(const ir::Shape &lhs_shape, const ir::Shape &rhs_shape) { return {broadcastShapes(lhs_shape, rhs_shape)}; } -Shapes inferAvgPoolShape(const model::Shape &in_shape, - const model::operation::AvgPool2D::Param ¶m, const ir::Layout layout) +Shapes inferAvgPoolShape(const ir::Shape &in_shape, const model::operation::AvgPool2D::Param ¶m, + const ir::Layout layout) { assert(layout == ir::Layout::NHWC); auto ifm_shape = in_shape.asFeature(layout); const auto out_h_w = calcConvLikeHeightAndWidth(ifm_shape.H, ifm_shape.W, param.kh, param.kw, param.padding, param.stride); // Pooling don't change number of channels and batch size - return {model::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, ifm_shape.C}}; + return {ir::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, ifm_shape.C}}; } Shapes inferConcatShape(const Shapes &in_shapes, const model::operation::Concat::Param ¶m) @@ -130,25 +130,25 @@ Shapes inferConcatShape(const Shapes &in_shapes, const model::operation::Concat: } // Calculate output shape - model::Shape out_shape(first_in_shape); + ir::Shape out_shape(first_in_shape); out_shape.dim(concat_axis) = 0; for (const auto &in_shape : in_shapes) out_shape.dim(concat_axis) += in_shape.dim(concat_axis); return {out_shape}; } -Shapes inferMaxPoolShape(const model::Shape &in_shape, - const model::operation::MaxPool2D::Param ¶m, const ir::Layout layout) +Shapes inferMaxPoolShape(const ir::Shape &in_shape, const model::operation::MaxPool2D::Param ¶m, + const ir::Layout layout) { assert(layout == ir::Layout::NHWC); auto ifm_shape = in_shape.asFeature(layout); const auto out_h_w = calcConvLikeHeightAndWidth(ifm_shape.H, ifm_shape.W, param.kh, param.kw, param.padding, param.stride); // Pooling don't change number of channels and batch size - return {model::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, ifm_shape.C}}; + return {ir::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, ifm_shape.C}}; } -Shapes inferConv2DShape(const model::Shape &in_shape, const model::Shape &ker_shape, +Shapes inferConv2DShape(const ir::Shape &in_shape, const ir::Shape &ker_shape, const model::operation::Conv2D::Param ¶m, ir::Layout layout) { assert(layout == ir::Layout::NHWC); @@ -161,10 +161,10 @@ Shapes inferConv2DShape(const model::Shape &in_shape, const model::Shape &ker_sh const auto out_h_w = calcConvLikeHeightAndWidth(ifm_shape.H, ifm_shape.W, kf_shape.H, kf_shape.W, param.padding, param.stride); - return {model::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, kf_shape.N}}; + return {ir::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, kf_shape.N}}; } -Shapes inferDepthwiseConv2DShape(const model::Shape &in_shape, const model::Shape &ker_shape, +Shapes inferDepthwiseConv2DShape(const ir::Shape &in_shape, const ir::Shape &ker_shape, const model::operation::DepthwiseConv2D::Param ¶m, ir::Layout layout) { @@ -179,10 +179,10 @@ Shapes inferDepthwiseConv2DShape(const model::Shape &in_shape, const model::Shap const auto out_h_w = calcConvLikeHeightAndWidth(ifm_shape.H, ifm_shape.W, kf_shape.H, kf_shape.W, param.padding, param.stride); - return {model::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, kf_shape.C}}; + return {ir::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, kf_shape.C}}; } -Shapes inferFullyConnectedShape(const model::Shape &in_shape, const model::Shape &ker_shape) +Shapes inferFullyConnectedShape(const ir::Shape &in_shape, const ir::Shape &ker_shape) { assert(in_shape.rank() >= 2); assert(ker_shape.rank() == 2); @@ -193,7 +193,7 @@ Shapes inferFullyConnectedShape(const model::Shape &in_shape, const model::Shape const auto batch_size = input_size_with_batch / input_size; assert(input_size_with_batch % input_size == 0); - return {{model::Shape({static_cast(batch_size), num_units})}}; + return {{ir::Shape({static_cast(batch_size), num_units})}}; } } // namespace shape_inference diff --git a/runtime/neurun/frontend/base_loader/base_loader.h b/runtime/neurun/frontend/base_loader/base_loader.h index 153e07a..2578fee 100644 --- a/runtime/neurun/frontend/base_loader/base_loader.h +++ b/runtime/neurun/frontend/base_loader/base_loader.h @@ -196,7 +196,7 @@ BaseLoader::BaseLoader::tensorTypeToDataType(const template model::OperandIndex BaseLoader::loadOperand(const Tensor *tensor) { - model::Shape shape; + ir::Shape shape; // Shape const auto *tensor_shape = tensor->shape(); for (const auto &dim : *tensor_shape) @@ -236,7 +236,7 @@ model::OperandIndex BaseLoader::loadOperand(const throw std::runtime_error("Custom Quantization is not supported"); } // Create TypeInfo - model::TypeInfo type_info(data_type, scale, zero_point); + ir::TypeInfo type_info(data_type, scale, zero_point); // Create operand const auto operand_index = _graph.addOperand(shape, type_info); @@ -278,7 +278,6 @@ template void BaseLoader::loadStridesAndPaddings(Param ¶m, const OptionsType *options) { - model::Shape shape; // Strides param.stride.vertical = options->stride_w(); param.stride.horizontal = options->stride_h(); @@ -298,7 +297,6 @@ void BaseLoader::loadPool2D(Param ¶m, // Strides and Paddings loadStridesAndPaddings(param, options); // Filter width and height - model::Shape shape; // Strides param.kw = options->filter_width(); param.kh = options->filter_height(); @@ -336,7 +334,6 @@ void BaseLoader::loadDepthwiseConv2D(const Operato param.activation = convertActivation(options->fused_activation_function()); loadStridesAndPaddings(param, options); // Multiplier - model::Shape shape; param.multiplier = options->depth_multiplier(); // Dilation h/w factor unused std::unique_ptr new_op( @@ -437,7 +434,6 @@ void BaseLoader::loadConcatenation(const Operator model::operation::Concat::Param param; const auto *options = op->builtin_options_as_ConcatenationOptions(); // Axis - model::Shape shape; param.axis = options->axis(); // activation unused diff --git a/runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.h b/runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.h index 093c66f..91f84b9 100644 --- a/runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.h +++ b/runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.h @@ -24,8 +24,8 @@ #include -#include -#include +#include +#include #include class NNAPIConvert @@ -44,14 +44,14 @@ public: * @param[in] type NNAPI's operand type * @return neurun's internal operand type info */ - static ::neurun::model::TypeInfo getTypeInfo(const ANeuralNetworksOperandType *type); + static neurun::ir::TypeInfo getTypeInfo(const ANeuralNetworksOperandType *type); /** * @brief Convert operand shape info from NNAPI to internal operand shape * @param[in] type NNAPI's operand type * @return neurun's internal operand shape */ - static ::neurun::model::Shape getShape(const ANeuralNetworksOperandType *type); + static neurun::ir::Shape getShape(const ANeuralNetworksOperandType *type); /** * @brief Calcaulate operand size from NNAPI type diff --git a/runtime/neurun/test/core/compiler/Scheduler.cc b/runtime/neurun/test/core/compiler/Scheduler.cc index 3623608..72350f4 100644 --- a/runtime/neurun/test/core/compiler/Scheduler.cc +++ b/runtime/neurun/test/core/compiler/Scheduler.cc @@ -18,9 +18,9 @@ #include #include -#include +#include #include -#include +#include #include #include diff --git a/runtime/neurun/test/graph/operand/Set.cc b/runtime/neurun/test/graph/operand/Set.cc index 44ede8c..ee365684 100644 --- a/runtime/neurun/test/graph/operand/Set.cc +++ b/runtime/neurun/test/graph/operand/Set.cc @@ -22,15 +22,15 @@ TEST(graph_operand_Set, set_test) { neurun::model::Operands set; - ::neurun::model::Shape shape0{1, 2, 3}; + neurun::ir::Shape shape0{1, 2, 3}; - ::neurun::model::Shape shape1(4); + neurun::ir::Shape shape1(4); shape1.dim(0) = 10; shape1.dim(1) = 20; shape1.dim(2) = 30; shape1.dim(3) = 40; - ::neurun::model::TypeInfo type{neurun::ir::DataType::INT32}; + neurun::ir::TypeInfo type{neurun::ir::DataType::INT32}; set.emplace(shape0, type); set.emplace(shape1, type); diff --git a/runtime/neurun/test/graph/operand/UseDef.cc b/runtime/neurun/test/graph/operand/UseDef.cc index 9e945ab..b049b8e 100644 --- a/runtime/neurun/test/graph/operand/UseDef.cc +++ b/runtime/neurun/test/graph/operand/UseDef.cc @@ -36,8 +36,8 @@ TEST(graph_operand_usedef, usedef_test) neurun::graph::Graph graph; neurun::graph::verifier::DAGChecker verifier; - neurun::model::Shape shape(3); - neurun::model::TypeInfo type{neurun::ir::DataType::INT32}; + neurun::ir::Shape shape(3); + neurun::ir::TypeInfo type{neurun::ir::DataType::INT32}; // Model Input/Output auto input_operand = graph.addOperand(shape, type); diff --git a/runtime/neurun/test/graph/operation/SetIO.cc b/runtime/neurun/test/graph/operation/SetIO.cc index 95f1f13..31950b0 100644 --- a/runtime/neurun/test/graph/operation/SetIO.cc +++ b/runtime/neurun/test/graph/operation/SetIO.cc @@ -33,8 +33,8 @@ TEST(graph_operation_setIO, operation_setIO_conv) { neurun::graph::Graph graph; - neurun::model::Shape shape{3}; - neurun::model::TypeInfo type{neurun::ir::DataType::INT32}; + neurun::ir::Shape shape{3}; + neurun::ir::TypeInfo type{neurun::ir::DataType::INT32}; // Add Conv using Graph = neurun::model::operation::Conv2D; @@ -66,9 +66,9 @@ TEST(graph_operation_setIO, operation_setIO_concat) { neurun::graph::Graph graph; - neurun::model::Shape shape{3}; + neurun::ir::Shape shape{3}; - neurun::model::TypeInfo type{neurun::ir::DataType::INT32}; + neurun::ir::TypeInfo type{neurun::ir::DataType::INT32}; using Graph = neurun::model::operation::Concat; diff --git a/runtime/neurun/test/graph/verifier/Verifier.cc b/runtime/neurun/test/graph/verifier/Verifier.cc index b430261..ced5bda 100644 --- a/runtime/neurun/test/graph/verifier/Verifier.cc +++ b/runtime/neurun/test/graph/verifier/Verifier.cc @@ -30,8 +30,8 @@ TEST(Verifier, dag_checker) { neurun::graph::Graph graph; - ::neurun::model::Shape shape{3}; - ::neurun::model::TypeInfo type{neurun::ir::DataType::INT32}; + neurun::ir::Shape shape{3}; + neurun::ir::TypeInfo type{neurun::ir::DataType::INT32}; auto operand1 = graph.addOperand(shape, type); auto operand2 = graph.addOperand(shape, type); -- 2.7.4 From 9d59221746793e7c44231379b8bd831b761d1fbb Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EA=B9=80=EC=9A=A9=EC=84=AD/On-Device=20Lab=28SR=29/Enginee?= =?utf8?q?r/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Mon, 9 Dec 2019 10:41:06 +0900 Subject: [PATCH 07/16] [libbenchmark] Apply Phase into runners and MemoryPoller (#9421) Apply enum class Phase into nnpackage_run, tflite_run and MemoryPoller Signed-off-by: Yongseop Kim --- .../benchmark/include/benchmark/MemoryPoller.h | 8 +- runtime/libs/benchmark/src/MemoryPoller.cpp | 16 +-- tests/tools/nnpackage_run/src/nnpackage_run.cc | 53 ++++++---- tests/tools/tflite_run/src/tflite_run.cc | 107 ++++++++++++--------- 4 files changed, 112 insertions(+), 72 deletions(-) diff --git a/runtime/libs/benchmark/include/benchmark/MemoryPoller.h b/runtime/libs/benchmark/include/benchmark/MemoryPoller.h index b3570c5..4410822 100644 --- a/runtime/libs/benchmark/include/benchmark/MemoryPoller.h +++ b/runtime/libs/benchmark/include/benchmark/MemoryPoller.h @@ -26,6 +26,8 @@ #include #include +#include "Phase.h" + namespace benchmark { @@ -44,8 +46,8 @@ public: _thread.join(); } - bool Start(const std::string &phase); - uint32_t End(const std::string &phase); + bool Start(Phase phase); + uint32_t End(Phase phase); // TODO expose rss & hwm data @@ -58,7 +60,7 @@ private: private: std::chrono::milliseconds _duration; std::thread _thread; - std::unordered_map _data_map; + std::unordered_map _data_map; std::mutex _mutex; std::mutex _mutex_started; diff --git a/runtime/libs/benchmark/src/MemoryPoller.cpp b/runtime/libs/benchmark/src/MemoryPoller.cpp index 89d73ee..e76b9c6 100644 --- a/runtime/libs/benchmark/src/MemoryPoller.cpp +++ b/runtime/libs/benchmark/src/MemoryPoller.cpp @@ -73,7 +73,8 @@ std::vector GetValueFromFileStatus(const std::string &file, const s if (!found) { - std::cerr << "Wrong key name: " << key << std::endl; + // NOTE. the process which uses gpu resources cannot be there yet at the model-load phase. + // At that time, just return empty. return val; } @@ -95,11 +96,11 @@ MemoryPoller::MemoryPoller(std::chrono::milliseconds duration, bool gpu_poll) _thread = std::thread{&MemoryPoller::Process, this}; } -bool MemoryPoller::Start(const std::string &phase) +bool MemoryPoller::Start(Phase phase) { if (_data_map.find(phase) != _data_map.end()) { - std::cerr << phase << " is already processing/processed..." << std::endl; + std::cerr << GetPhaseString(phase) << " is already processing/processed..." << std::endl; return false; } @@ -113,11 +114,11 @@ bool MemoryPoller::Start(const std::string &phase) return true; } -uint32_t MemoryPoller::End(const std::string &phase) +uint32_t MemoryPoller::End(Phase phase) { if (_data_map.find(phase) == _data_map.end()) { - std::cerr << phase << " is not started..." << std::endl; + std::cerr << GetPhaseString(phase) << " is not started..." << std::endl; return 0; } @@ -204,7 +205,8 @@ bool MemoryPoller::PrepareMemoryPolling() uint32_t MemoryPoller::GetVmRSS() { auto val = GetValueFromFileStatus(proc_status_path, "VmRSS"); - // key: value + if (val.size() == 0) + return 0; assert(IsStrNumber(val[1])); return std::stoul(val[1]); } @@ -213,6 +215,8 @@ uint32_t MemoryPoller::GetGpuMemory() { assert(!_process_name.empty()); auto val = GetValueFromFileStatus(gpu_memory_path, _process_name); + if (val.size() == 0) + return 0; // process_name -> pid -> gpu_mem -> max_gpu_mem assert(IsStrNumber(val[2])); return std::stoul(val[2]); diff --git a/tests/tools/nnpackage_run/src/nnpackage_run.cc b/tests/tools/nnpackage_run/src/nnpackage_run.cc index f023e3a..396db94 100644 --- a/tests/tools/nnpackage_run/src/nnpackage_run.cc +++ b/tests/tools/nnpackage_run/src/nnpackage_run.cc @@ -115,7 +115,7 @@ int main(const int argc, char **argv) } } - std::vector mp_results({0, 0}); + std::vector mp_results({0, 0, 0}); nnfw_session *session = nullptr; NNPR_ENSURE_STATUS(nnfw_create_session(&session)); @@ -124,7 +124,14 @@ int main(const int argc, char **argv) NNPR_ENSURE_STATUS(nnfw_set_available_backends(session, available_backends)); NNPR_ENSURE_STATUS(resolve_op_backend(session)); + // ModelLoad + if (mp) + mp->Start(benchmark::Phase::MODEL_LOAD); + uint64_t t_model_load = benchmark::NowMicros(); NNPR_ENSURE_STATUS(nnfw_load_model_from_file(session, nnpackage_path.c_str())); + t_model_load = benchmark::NowMicros() - t_model_load; + if (mp) + mp_results[0] = mp->End(benchmark::Phase::MODEL_LOAD); uint32_t num_inputs; NNPR_ENSURE_STATUS(nnfw_input_size(session, &num_inputs)); @@ -169,12 +176,12 @@ int main(const int argc, char **argv) // TODO When nnfw_{prepare|run} are failed, can't catch the time if (mp) - mp->Start("Compiling"); - uint64_t prepare_us = benchmark::NowMicros(); + mp->Start(benchmark::Phase::PREPARE); + uint64_t t_prepare = benchmark::NowMicros(); NNPR_ENSURE_STATUS(nnfw_prepare(session)); - prepare_us = benchmark::NowMicros() - prepare_us; + t_prepare = benchmark::NowMicros() - t_prepare; if (mp) - mp_results[0] = mp->End("Compiling"); + mp_results[1] = mp->End(benchmark::Phase::PREPARE); // prepare input std::vector inputs(num_inputs); @@ -230,12 +237,12 @@ int main(const int argc, char **argv) // poll memories before warming up if (mp) - mp->Start("Executing"); + mp->Start(benchmark::Phase::EXECUTE); uint64_t run_us = benchmark::NowMicros(); NNPR_ENSURE_STATUS(nnfw_run(session)); run_us = benchmark::NowMicros() - run_us; if (mp) - mp_results[1] = mp->End("Executing"); + mp_results[2] = mp->End(benchmark::Phase::EXECUTE); // warmup runs for (uint32_t i = 1; i < args.getWarmupRuns(); i++) @@ -248,13 +255,13 @@ int main(const int argc, char **argv) } // actual runs - benchmark::Accumulator acc; + benchmark::Accumulator t_execute; for (uint32_t i = 0; i < args.getNumRuns(); i++) { uint64_t run_us = benchmark::NowMicros(); NNPR_ENSURE_STATUS(nnfw_run(session)); run_us = benchmark::NowMicros() - run_us; - acc(run_us); + t_execute(run_us); std::cout << "... " << "run " << i << " takes " << run_us / 1e3 << " ms" << std::endl; } @@ -268,18 +275,25 @@ int main(const int argc, char **argv) // to stdout { std::cout << "===================================" << std::endl; - std::cout << "nnfw_prepare takes " << prepare_us / 1e3 << " ms" << std::endl; - std::cout << "nnfw_run takes" << std::endl; - std::cout << "- Min: " << acc.min() / 1e3 << "ms" << std::endl; - std::cout << "- Max: " << acc.max() / 1e3 << "ms" << std::endl; - std::cout << "- Mean: " << acc.mean() / 1e3 << "ms" << std::endl; + std::cout << GetPhaseString(benchmark::Phase::MODEL_LOAD) << " takes " << t_model_load / 1e3 + << " ms" << std::endl; + std::cout << GetPhaseString(benchmark::Phase::PREPARE) << " takes " << t_prepare / 1e3 << " ms" + << std::endl; + std::cout << GetPhaseString(benchmark::Phase::EXECUTE) << " takes" << std::endl; + std::cout << "- Min: " << t_execute.min() / 1e3 << "ms" << std::endl; + std::cout << "- Max: " << t_execute.max() / 1e3 << "ms" << std::endl; + std::cout << "- Mean: " << t_execute.mean() / 1e3 << "ms" << std::endl; if (mp) { - assert(mp_results.size() == 2); + assert(mp_results.size() == 3); std::cout << "===================================" << std::endl; - std::cout << "nnfw_prepare takes " << mp_results[0] << " kb" << std::endl; - std::cout << "nnfw_run takes " << mp_results[1] << " kb" << std::endl; + std::cout << GetPhaseString(benchmark::Phase::MODEL_LOAD) << " takes " << mp_results[0] + << " kb" << std::endl; + std::cout << GetPhaseString(benchmark::Phase::PREPARE) << " takes " << mp_results[1] << " kb" + << std::endl; + std::cout << GetPhaseString(benchmark::Phase::EXECUTE) << " takes " << mp_results[2] << " kb" + << std::endl; } std::cout << "===================================" << std::endl; } @@ -310,10 +324,11 @@ int main(const int argc, char **argv) csv_filename = exec_name + "-" + model_name + "-" + backend_name + ".csv"; } + // TODO Update csv contents // to csv benchmark::CsvWriter writer(csv_filename); - writer << model_name << backend_name << acc.min() / 1e3 << acc.max() / 1e3 << acc.mean() / 1e3 - << mp_results[0] << mp_results[1]; + writer << model_name << backend_name << t_execute.min() / 1e3 << t_execute.max() / 1e3 + << t_execute.mean() / 1e3 << mp_results[1] << mp_results[2]; bool done = writer.Done(); std::cout << "Writing to " << csv_filename << " is "; diff --git a/tests/tools/tflite_run/src/tflite_run.cc b/tests/tools/tflite_run/src/tflite_run.cc index e77f255..d47fe12 100644 --- a/tests/tools/tflite_run/src/tflite_run.cc +++ b/tests/tools/tflite_run/src/tflite_run.cc @@ -98,22 +98,40 @@ int main(const int argc, char **argv) TFLiteRun::Args args(argc, argv); - auto model = FlatBufferModel::BuildFromFile(args.getTFLiteFilename().c_str(), &error_reporter); - std::unique_ptr interpreter; + std::chrono::milliseconds t_model_load(0), t_prepare(0); + std::vector mp_results({0, 0, 0}); - std::chrono::milliseconds t_prepare(0); + std::unique_ptr mp{nullptr}; + if (args.getMemoryPoll()) + { + try + { + mp.reset(new benchmark::MemoryPoller(std::chrono::milliseconds(5), args.getGpuMemoryPoll())); + } + catch (const std::runtime_error &error) + { + std::cerr << error.what() << std::endl; + return 1; + } + } + std::unique_ptr model; + std::unique_ptr interpreter; try { - nnfw::misc::benchmark::measure(t_prepare) << [&](void) { - BuiltinOpResolver resolver; + if (mp) + mp->Start(benchmark::Phase::MODEL_LOAD); + nnfw::misc::benchmark::measure(t_model_load) << [&](void) { + model = FlatBufferModel::BuildFromFile(args.getTFLiteFilename().c_str(), &error_reporter); + BuiltinOpResolver resolver; InterpreterBuilder builder(*model, resolver); - TFLITE_ENSURE(builder(&interpreter)) - interpreter->SetNumThreads(nnfw::misc::EnvVar("THREAD").asInt(-1)); }; + + if (mp) + mp_results[0] = mp->End(benchmark::Phase::MODEL_LOAD); } catch (const std::exception &e) { @@ -121,21 +139,6 @@ int main(const int argc, char **argv) return 1; } - std::unique_ptr mp{nullptr}; - if (args.getMemoryPoll()) - { - try - { - mp.reset(new benchmark::MemoryPoller(std::chrono::milliseconds(5), args.getGpuMemoryPoll())); - } - catch (const std::runtime_error &error) - { - std::cerr << error.what() << std::endl; - return 1; - } - } - std::vector mp_results({0, 0}); - std::shared_ptr sess; if (use_nnapi) @@ -147,11 +150,21 @@ int main(const int argc, char **argv) sess = std::make_shared(interpreter.get()); } - if (mp) - mp->Start("Compiling"); - sess->prepare(); - if (mp) - mp_results[0] = mp->End("Compiling"); + try + { + if (mp) + mp->Start(benchmark::Phase::PREPARE); + + nnfw::misc::benchmark::measure(t_prepare) << [&](void) { sess->prepare(); }; + + if (mp) + mp_results[1] = mp->End(benchmark::Phase::PREPARE); + } + catch (const std::exception &e) + { + std::cerr << e.what() << '\n'; + return 1; + } if (args.getInputShapes().size() != 0) { @@ -283,13 +296,13 @@ int main(const int argc, char **argv) // poll memories before warming up if (mp) - mp->Start("Executing"); + mp->Start(benchmark::Phase::EXECUTE); if (!sess->run()) { assert(0 && "run failed!"); } if (mp) - mp_results[1] = mp->End("Executing"); + mp_results[2] = mp->End(benchmark::Phase::EXECUTE); // warmup runs for (uint32_t i = 1; i < args.getWarmupRuns(); i++) @@ -305,7 +318,7 @@ int main(const int argc, char **argv) } // actual runs - benchmark::Accumulator acc; + benchmark::Accumulator t_execute; for (uint32_t i = 0; i < args.getNumRuns(); i++) { uint64_t run_us = benchmark::NowMicros(); @@ -314,7 +327,7 @@ int main(const int argc, char **argv) assert(0 && "run failed!"); } run_us = benchmark::NowMicros() - run_us; - acc(run_us); + t_execute(run_us); std::cout << "... " << "run " << i << " takes " << run_us / 1e3 << " ms" << std::endl; } @@ -338,22 +351,27 @@ int main(const int argc, char **argv) // to stdout { std::cout << "===================================" << std::endl; - std::cout << "Prepare takes " << t_prepare.count() / 1000.0 << " seconds" << std::endl; - std::cout << "Invoke takes " << std::endl; - - std::cout << "===================================" << std::endl; - std::cout << "- Min : " << acc.min() / 1e3 << "ms" << std::endl; - std::cout << "- Max : " << acc.max() / 1e3 << "ms" << std::endl; - std::cout << "- Mean: " << acc.mean() / 1e3 << "ms" << std::endl; + std::cout << GetPhaseString(benchmark::Phase::MODEL_LOAD) << " takes " + << t_model_load.count() / 1e3 << " ms" << std::endl; + std::cout << GetPhaseString(benchmark::Phase::PREPARE) << " takes " << t_prepare.count() / 1e3 + << " ms" << std::endl; + std::cout << GetPhaseString(benchmark::Phase::EXECUTE) << " takes" << std::endl; + std::cout << "- Min: " << t_execute.min() / 1e3 << "ms" << std::endl; + std::cout << "- Max: " << t_execute.max() / 1e3 << "ms" << std::endl; + std::cout << "- Mean: " << t_execute.mean() / 1e3 << "ms" << std::endl; if (mp) { - assert(mp_results.size() == 2); - std::cout << "===================================" << std::endl; - std::cout << "session_prepare takes " << mp_results[0] << " kb" << std::endl; - std::cout << "session_run takes " << mp_results[1] << " kb" << std::endl; + assert(mp_results.size() == 3); std::cout << "===================================" << std::endl; + std::cout << GetPhaseString(benchmark::Phase::MODEL_LOAD) << " takes " << mp_results[0] + << " kb" << std::endl; + std::cout << GetPhaseString(benchmark::Phase::PREPARE) << " takes " << mp_results[1] << " kb" + << std::endl; + std::cout << GetPhaseString(benchmark::Phase::EXECUTE) << " takes " << mp_results[2] << " kb" + << std::endl; } + std::cout << "===================================" << std::endl; } if (args.getWriteReport()) @@ -374,10 +392,11 @@ int main(const int argc, char **argv) csv_filename = exec_name + "-" + model_name + "-" + backend_name + ".csv"; } + // TODO Update csv contents // to csv benchmark::CsvWriter writer(csv_filename); - writer << model_name << backend_name << acc.min() / 1e3 << acc.max() / 1e3 << acc.mean() / 1e3 - << mp_results[0] << mp_results[1]; + writer << model_name << backend_name << t_execute.min() / 1e3 << t_execute.max() / 1e3 + << t_execute.mean() / 1e3 << mp_results[1] << mp_results[2]; bool done = writer.Done(); std::cout << "Writing to " << csv_filename << " is "; -- 2.7.4 From 796d985186734207c9cfcf1b7db57e11e5b74680 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=98=A4=ED=98=95=EC=84=9D/On-Device=20Lab=28SR=29/Staff?= =?utf8?q?=20Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Mon, 9 Dec 2019 10:43:59 +0900 Subject: [PATCH 08/16] [build] Use aarch64 for arm64 (#9417) Change naming arm64 to aarch64 Naming arm64 is used for external tools and libraries Update build rootfs script Signed-off-by: Hyeongseok Oh --- Makefile.template | 19 ++++++++------- docs/nnfw/howto/CrossBuildForAarch64.md | 24 ++++++++----------- docs/nnfw/howto/CrossBuildForAndroid.md | 7 +++--- docs/nnfw/howto/CrossBuildForArm.md | 14 ++--------- infra/cmake/modules/IdentifyPlatform.cmake | 27 ++++++++++++++++------ infra/cmake/packages/ARMComputeConfig.cmake | 5 ---- ...-android.cmake => config_aarch64-android.cmake} | 2 +- ...droid.cmake => toolchain_aarch64-android.cmake} | 3 +++ .../buildtool/cross/toolchain_aarch64-linux.cmake | 2 +- .../buildtool/cross/toolchain_aarch64-tizen.cmake | 6 ++--- ...android.cmake => options_aarch64-android.cmake} | 2 +- runtime/contrib/android_benchmark_app/README.md | 6 ++--- tools/cross/aarch64/sources.list.bionic | 11 +++++++++ tools/cross/{arm64 => aarch64}/sources.list.trusty | 0 tools/cross/{arm64 => aarch64}/sources.list.xenial | 0 tools/cross/build_rootfs.sh | 20 ++++++++-------- 16 files changed, 77 insertions(+), 71 deletions(-) rename infra/nnfw/cmake/buildtool/config/{config_arm64-android.cmake => config_aarch64-android.cmake} (90%) rename infra/nnfw/cmake/buildtool/cross/{toolchain_arm64-android.cmake => toolchain_aarch64-android.cmake} (93%) rename infra/nnfw/cmake/options/{options_arm64-android.cmake => options_aarch64-android.cmake} (96%) create mode 100644 tools/cross/aarch64/sources.list.bionic rename tools/cross/{arm64 => aarch64}/sources.list.trusty (100%) rename tools/cross/{arm64 => aarch64}/sources.list.xenial (100%) diff --git a/Makefile.template b/Makefile.template index 9ff5e11..ef3a44c 100644 --- a/Makefile.template +++ b/Makefile.template @@ -16,15 +16,16 @@ BUILD_TYPE_LC=$(shell echo $(BUILD_TYPE) | tr A-Z a-z) # we need base name 'arm` for all arm arch TARGET_ARCH_BASE=$(TARGET_ARCH_LC) ifneq (,$(findstring arm64,$(TARGET_ARCH_BASE))) - # arm64 as target-arch comes from Android - TARGET_ARCH_BASE=arm64 - # For now Android is the only option for arm64 - TARGET_OS:=android + TARGET_ARCH_LC=aarch64 else ifneq (,$(findstring arm,$(TARGET_ARCH_BASE))) - TARGET_ARCH_BASE=arm + TARGET_ARCH_LC=armv7l else ifneq (,$(findstring aarch64,$(TARGET_ARCH_BASE))) - # aarch64 as target-arch comes from all except for Android - TARGET_ARCH_BASE=aarch64 + TARGET_ARCH_LC=aarch64 +endif +ifneq (,$(findstring android,$(TARGET_OS))) + # Anndroid only allow aarch64 target-arch + TARGET_ARCH_LC=aarch64 + TARGET_OS=android endif # Set CROSS_BUILD=1 when ROOTFS_DIR is given, and TARGET_ARCH is different to HOST_ARCH. ifneq ($(ROOTFS_DIR),) @@ -125,9 +126,7 @@ distclean: ### configure_internal: NNFW_WORKSPACE="$(WORKSPACE)" NNFW_INSTALL_PREFIX=$(INSTALL_PATH) ./nnfw configure \ - -DCMAKE_BUILD_TYPE=$(BUILD_TYPE_LC) -DTARGET_ARCH=$(TARGET_ARCH_LC) \ - -DHOST_OS=$(HOST_OS) \ - -DTARGET_OS=$(TARGET_OS) \ + -DCMAKE_BUILD_TYPE=$(BUILD_TYPE_LC) \ -DNNFW_OVERLAY_DIR=$(OVERLAY_FOLDER) \ $(OPTIONS) touch $(TIMESTAMP_CONFIGURE) diff --git a/docs/nnfw/howto/CrossBuildForAarch64.md b/docs/nnfw/howto/CrossBuildForAarch64.md index f3dc552..9f0af85 100644 --- a/docs/nnfw/howto/CrossBuildForAarch64.md +++ b/docs/nnfw/howto/CrossBuildForAarch64.md @@ -1,10 +1,6 @@ -# Cross building for AARCH64 +# Cross building for AARCH64 (ARM64) -In nnfw, we use both `ARM64` and `AARCH64` on build files such as Makefile, CMakeLists.txt and so on. -- `ARM64`: only for Android -- `AARCH64`: all except for Android - -However we use only one term `ARM64` in RootFS. Use `ARM64` if you need a RootFS for `AARCH64`. +In nnfw, we use `AARCH64` on build files such as Makefile, CMakeLists.txt and so on. ## Prepare Ubuntu RootFS @@ -17,9 +13,9 @@ sudo apt-get install qemu qemu-user-static binfmt-support debootstrap Use `build_rootfs.sh` script to prepare Root File System. You should have `sudo` ``` -sudo ./tools/cross/build_rootfs.sh arm64 +sudo ./tools/cross/build_rootfs.sh aarch64 ``` -- supports `arm`(default) and `arm64` architecutre for now +- supports `arm`(default) and `aarch64` architecutre for now - supports `xenial`(default) and `trusty` release To see the options, @@ -27,14 +23,14 @@ To see the options, ./tools/cross/build_rootfs.sh -h ``` -RootFS will be prepared at `tools/cross/rootfs/arm64` folder. +RootFS will be prepared at `tools/cross/rootfs/aarch64` folder. ### Prepare RootFS at alternative folder Use `ROOTFS_DIR` to a full path to prepare at alternative path. ``` -ROOTFS_DIR=/home/user/rootfs/arm64-xenial sudo ./tools/cross/build_rootfs.sh arm64 +ROOTFS_DIR=/home/user/rootfs/aarch64-xenial sudo ./tools/cross/build_rootfs.sh aarch64 ``` ### Using proxy @@ -43,9 +39,9 @@ If you need to use proxy server while building the rootfs, use `--setproxy` opti ``` # for example, -sudo ./tools/cross/build_rootfs.sh arm64 --setproxy="1.2.3.4:8080" +sudo ./tools/cross/build_rootfs.sh aarch64 --setproxy="1.2.3.4:8080" # or -sudo ./tools/cross/build_rootfs.sh arm64 --setproxy="proxy.server.com:8888" +sudo ./tools/cross/build_rootfs.sh aarch64 --setproxy="proxy.server.com:8888" ``` This will put `apt` proxy settings in `rootfs/etc/apt/apt.conf.d/90proxy` file @@ -76,6 +72,6 @@ CROSS_BUILD=1 TARGET_ARCH=aarch64 make install If you used `ROOTFS_DIR` to prepare in alternative folder, you should also give this to makefile. ``` -CROSS_BUILD=1 ROOTFS_DIR=/home/user/rootfs/arm64-xenial TARGET_ARCH=aarch64 make -CROSS_BUILD=1 ROOTFS_DIR=/home/user/rootfs/arm64-xenial TARGET_ARCH=aarch64 make install +CROSS_BUILD=1 ROOTFS_DIR=/home/user/rootfs/aarch64-xenial TARGET_ARCH=aarch64 make +CROSS_BUILD=1 ROOTFS_DIR=/home/user/rootfs/aarch64-xenial TARGET_ARCH=aarch64 make install ``` diff --git a/docs/nnfw/howto/CrossBuildForAndroid.md b/docs/nnfw/howto/CrossBuildForAndroid.md index 86c2e9d..ab9d04e 100644 --- a/docs/nnfw/howto/CrossBuildForAndroid.md +++ b/docs/nnfw/howto/CrossBuildForAndroid.md @@ -1,6 +1,6 @@ # Cross building for Android -Supported Architecture : ARM64 only (ARM32 is not supported yet) +Supported Architecture : AARCH64 only (ARM32 is not supported yet) ## Prepare Android NDK @@ -33,19 +33,18 @@ libarm_compute.so ### Build and install the runtime Some tools/libs are still not supported and those are not built by default - mostly due to dependency on Boost library. -Please refer to `infra/nnfw/cmake/options/options_arm64-android.cmake` for details. +Please refer to `infra/nnfw/cmake/options/options_aarch64-android.cmake` for details. Different from cross build for linux, - `NDK_DIR` is required -- `TARGET_ARCH` must be `arm64`, not `aarch64`. Here is an example of using Makefile. ```bash cp -n Makefile.template Makefile -TARGET_ARCH=arm64 \ +TARGET_OS=android \ CROSS_BUILD=1 \ NDK_DIR=/path/android-tools/r20/ndk \ EXT_ACL_FOLDER=/path/arm_compute-v19.05-bin-android/lib/android-arm64-v8a-neon-cl \ diff --git a/docs/nnfw/howto/CrossBuildForArm.md b/docs/nnfw/howto/CrossBuildForArm.md index 8d0a25f..07b4a17 100644 --- a/docs/nnfw/howto/CrossBuildForArm.md +++ b/docs/nnfw/howto/CrossBuildForArm.md @@ -13,8 +13,8 @@ Use `build_rootfs.sh` script to prepare Root File System. You should have `sudo` ``` sudo ./tools/cross/build_rootfs.sh arm ``` -- supports `arm`(default) and `arm64` architecutre for now -- supports `xenial`(default) and `trusty` release +- supports `arm`(default) and `aarch` architecutre for now +- supports `xenial`(default) `trusty`, and `bionic` release To see the options, ``` @@ -110,18 +110,8 @@ make all install # do normal build TARGET_ARCH = armv7l make all install # do cross build ``` -If you want to build neurun, you should switch on `BUILD_NEURUN` option in `cmake/CfgOptionFlags.cmake` -``` -option(BUILD_NEURUN "Build neurun" ON) -``` - ## Run test -- PureACL -``` - ./tests/scripts/test_driver.sh --artifactpath=. -``` -- neurun ``` ./tests/scripts/test_driver.sh --artifactpath=. \ --frameworktest_list_file=tests/scripts/list/neurun_frameworktest_list.armv7l.acl_cl.txt diff --git a/infra/cmake/modules/IdentifyPlatform.cmake b/infra/cmake/modules/IdentifyPlatform.cmake index 9313eef..69fe48c 100644 --- a/infra/cmake/modules/IdentifyPlatform.cmake +++ b/infra/cmake/modules/IdentifyPlatform.cmake @@ -1,27 +1,42 @@ # set host platform to build if(NOT HOST_ARCH OR "${HOST_ARCH}" STREQUAL "") - set(HOST_ARCH ${CMAKE_HOST_SYSTEM_PROCESSOR}) + string(TOLOWER ${CMAKE_HOST_SYSTEM_PROCESSOR} HOST_ARCH) +else() + string(TOLOWER ${HOST_ARCH} HOST_ARCH) endif() # set target platform to run if(NOT TARGET_ARCH OR "${TARGET_ARCH}" STREQUAL "") - set(TARGET_ARCH "${HOST_ARCH}") + string(TOLOWER ${CMAKE_SYSTEM_PROCESSOR} TARGET_ARCH) +else() + string(TOLOWER ${TARGET_ARCH} TARGET_ARCH) endif() if(NOT DEFINED HOST_OS) string(TOLOWER ${CMAKE_HOST_SYSTEM_NAME} HOST_OS) +else() + string(TOLOWER ${HOST_OS} HOST_OS) endif() if(NOT DEFINED TARGET_OS) - set(TARGET_OS "${HOST_OS}") + string(TOLOWER ${CMAKE_SYSTEM_NAME} TARGET_OS) +else() + string(TOLOWER ${TARGET_OS} TARGET_OS) +endif() + +# If HOST_ARCH, TARGET_ARCH from CMAKE_HOST_SYSTEM_PROCESSOR, CMAKE_SYSTEM_NAME is arm64 +# Change ARCH name to aarch64 +if("${HOST_ARCH}" STREQUAL "arm64") + set(HOST_ARCH "aarch64") +endif() +if("${TARGET_ARCH}" STREQUAL "arm64") + set(TARGET_ARCH "aarch64") endif() if("${HOST_ARCH}" STREQUAL "x86_64") set(HOST_ARCH_BASE ${HOST_ARCH}) elseif("${HOST_ARCH}" STREQUAL "armv7l") set(HOST_ARCH_BASE "arm") -elseif("${HOST_ARCH}" STREQUAL "arm64") - set(HOST_ARCH_BASE "arm64") elseif("${HOST_ARCH}" STREQUAL "aarch64") set(HOST_ARCH_BASE "aarch64") else() @@ -32,8 +47,6 @@ if("${TARGET_ARCH}" STREQUAL "x86_64") set(TARGET_ARCH_BASE ${TARGET_ARCH}) elseif("${TARGET_ARCH}" STREQUAL "armv7l") set(TARGET_ARCH_BASE "arm") -elseif("${TARGET_ARCH}" STREQUAL "arm64") - set(TARGET_ARCH_BASE "arm64") elseif("${TARGET_ARCH}" STREQUAL "aarch64") set(TARGET_ARCH_BASE "aarch64") else() diff --git a/infra/cmake/packages/ARMComputeConfig.cmake b/infra/cmake/packages/ARMComputeConfig.cmake index ef70574..f014f3e 100644 --- a/infra/cmake/packages/ARMComputeConfig.cmake +++ b/infra/cmake/packages/ARMComputeConfig.cmake @@ -121,11 +121,6 @@ function(_ARMCompute_Build ARMCompute_INSTALL_PREFIX) set(BUILD_DIR "${BUILD_ARCH}-${TARGET_OS}.${SCON_BUILD_TYPE}") endif() - if(TARGET_ARCH STREQUAL "arm64") - set(BUILD_ARCH "arm64-v8a") - set(BUILD_DIR "${BUILD_ARCH}-${TARGET_OS}.${SCON_BUILD_TYPE}") - endif() - #### Platform-specific configurations #### TODO Support android diff --git a/infra/nnfw/cmake/buildtool/config/config_arm64-android.cmake b/infra/nnfw/cmake/buildtool/config/config_aarch64-android.cmake similarity index 90% rename from infra/nnfw/cmake/buildtool/config/config_arm64-android.cmake rename to infra/nnfw/cmake/buildtool/config/config_aarch64-android.cmake index 037541c..e0c81de 100644 --- a/infra/nnfw/cmake/buildtool/config/config_arm64-android.cmake +++ b/infra/nnfw/cmake/buildtool/config/config_aarch64-android.cmake @@ -3,7 +3,7 @@ include("cmake/buildtool/config/config_linux.cmake") # On Android, pthread is contained in bionic(libc) set(LIB_PTHREAD "") -# SIMD for arm64 +# SIMD for aarch64 set(FLAGS_COMMON ${FLAGS_COMMON} "-ftree-vectorize" ) diff --git a/infra/nnfw/cmake/buildtool/cross/toolchain_arm64-android.cmake b/infra/nnfw/cmake/buildtool/cross/toolchain_aarch64-android.cmake similarity index 93% rename from infra/nnfw/cmake/buildtool/cross/toolchain_arm64-android.cmake rename to infra/nnfw/cmake/buildtool/cross/toolchain_aarch64-android.cmake index 97c249c..2b2792a 100644 --- a/infra/nnfw/cmake/buildtool/cross/toolchain_arm64-android.cmake +++ b/infra/nnfw/cmake/buildtool/cross/toolchain_aarch64-android.cmake @@ -20,3 +20,6 @@ set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY NEVER) # Use the toolchain file that NDK provides include(${NDK_DIR}/build/cmake/android.toolchain.cmake) + +set(TARGET_OS "android") +set(TARGET_ARCH "aarch64") diff --git a/infra/nnfw/cmake/buildtool/cross/toolchain_aarch64-linux.cmake b/infra/nnfw/cmake/buildtool/cross/toolchain_aarch64-linux.cmake index 8eb9b20..3356aa7 100644 --- a/infra/nnfw/cmake/buildtool/cross/toolchain_aarch64-linux.cmake +++ b/infra/nnfw/cmake/buildtool/cross/toolchain_aarch64-linux.cmake @@ -11,7 +11,7 @@ set(CMAKE_CXX_COMPILER aarch64-linux-gnu-g++) # where is the target environment set(NNAS_PROJECT_SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/../../../../..") -set(ROOTFS_AARCH64 "${NNAS_PROJECT_SOURCE_DIR}/tools/cross/rootfs/arm64") +set(ROOTFS_AARCH64 "${NNAS_PROJECT_SOURCE_DIR}/tools/cross/rootfs/aarch64") include("${NNAS_PROJECT_SOURCE_DIR}/infra/cmake/modules/OptionTools.cmake") envoption(ROOTFS_DIR ${ROOTFS_AARCH64}) diff --git a/infra/nnfw/cmake/buildtool/cross/toolchain_aarch64-tizen.cmake b/infra/nnfw/cmake/buildtool/cross/toolchain_aarch64-tizen.cmake index 4129cc1..4d5d7ac 100644 --- a/infra/nnfw/cmake/buildtool/cross/toolchain_aarch64-tizen.cmake +++ b/infra/nnfw/cmake/buildtool/cross/toolchain_aarch64-tizen.cmake @@ -6,14 +6,14 @@ include(CMakeForceCompiler) set(CMAKE_SYSTEM_NAME Linux) set(CMAKE_SYSTEM_PROCESSOR aarch64) -set(CMAKE_C_COMPILER aarch64-linux-gnu-gcc-5) -set(CMAKE_CXX_COMPILER aarch64-linux-gnu-g++-5) +set(CMAKE_C_COMPILER aarch64-linux-gnu-gcc) +set(CMAKE_CXX_COMPILER aarch64-linux-gnu-g++) set(TIZEN_TOOLCHAIN "aarch64-tizen-linux-gnu/6.2.1") # where is the target environment set(NNAS_PROJECT_SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/../../../../..") -set(ROOTFS_AARCH64 "${NNAS_PROJECT_SOURCE_DIR}/tools/cross/rootfs/arm64") +set(ROOTFS_AARCH64 "${NNAS_PROJECT_SOURCE_DIR}/tools/cross/rootfs/aarch64") include("${NNAS_PROJECT_SOURCE_DIR}/infra/cmake/modules/OptionTools.cmake") envoption(ROOTFS_DIR ${ROOTFS_AARCH64}) diff --git a/infra/nnfw/cmake/options/options_arm64-android.cmake b/infra/nnfw/cmake/options/options_aarch64-android.cmake similarity index 96% rename from infra/nnfw/cmake/options/options_arm64-android.cmake rename to infra/nnfw/cmake/options/options_aarch64-android.cmake index 392375c..2393764 100644 --- a/infra/nnfw/cmake/options/options_arm64-android.cmake +++ b/infra/nnfw/cmake/options/options_aarch64-android.cmake @@ -1,4 +1,4 @@ -# arm64 android cmake options +# aarch64 android cmake options # option(BUILD_ARMCOMPUTE "Build ARM Compute from the downloaded source" OFF) # NOTE BUILD_ANDROID_TFLITE(JNI lib) is disabled due to BuiltinOpResolver issue. diff --git a/runtime/contrib/android_benchmark_app/README.md b/runtime/contrib/android_benchmark_app/README.md index d53a56c..2868e0a 100644 --- a/runtime/contrib/android_benchmark_app/README.md +++ b/runtime/contrib/android_benchmark_app/README.md @@ -9,7 +9,7 @@ You can run with two engines. ## Build -In addition to arm64-Android build, you need to specify more parameters. +In addition to aarch64-Android build, you need to specify more parameters. - `ANDROID_BUILD_TOOLS_DIR` : Android `build-tools` directory (You may find it in Android SDK directory) - `ANDROID_SDK_DIR` : Android SDK directory @@ -21,7 +21,7 @@ In addition to arm64-Android build, you need to specify more parameters. Example: ```bash -make TARGET_ARCH=arm64 \ +make TARGET_OS=android \ CROSS_BUILD=1 \ BUILD_TYPE=RELEASE \ NDK_DIR=/home/hanjoung/ws/android-tools/r20/ndk \ @@ -44,7 +44,7 @@ Before installing the package you probably need to sign the package. ```bash apksigner sign \ --ks ~/.android/debug.keystore \ - --in Product/arm64-android.release/obj/contrib/android_benchmark_app/android-benchmark.unsigned.pkg \ + --in Product/aarch64-android.release/obj/contrib/android_benchmark_app/android-benchmark.unsigned.pkg \ --out tflbench.apk ``` diff --git a/tools/cross/aarch64/sources.list.bionic b/tools/cross/aarch64/sources.list.bionic new file mode 100644 index 0000000..2109557 --- /dev/null +++ b/tools/cross/aarch64/sources.list.bionic @@ -0,0 +1,11 @@ +deb http://ports.ubuntu.com/ubuntu-ports/ bionic main restricted universe +deb-src http://ports.ubuntu.com/ubuntu-ports/ bionic main restricted universe + +deb http://ports.ubuntu.com/ubuntu-ports/ bionic-updates main restricted universe +deb-src http://ports.ubuntu.com/ubuntu-ports/ bionic-updates main restricted universe + +deb http://ports.ubuntu.com/ubuntu-ports/ bionic-backports main restricted +deb-src http://ports.ubuntu.com/ubuntu-ports/ bionic-backports main restricted + +deb http://ports.ubuntu.com/ubuntu-ports/ bionic-security main restricted universe multiverse +deb-src http://ports.ubuntu.com/ubuntu-ports/ bionic-security main restricted universe multiverse diff --git a/tools/cross/arm64/sources.list.trusty b/tools/cross/aarch64/sources.list.trusty similarity index 100% rename from tools/cross/arm64/sources.list.trusty rename to tools/cross/aarch64/sources.list.trusty diff --git a/tools/cross/arm64/sources.list.xenial b/tools/cross/aarch64/sources.list.xenial similarity index 100% rename from tools/cross/arm64/sources.list.xenial rename to tools/cross/aarch64/sources.list.xenial diff --git a/tools/cross/build_rootfs.sh b/tools/cross/build_rootfs.sh index 3aa242b..f6c59b9 100755 --- a/tools/cross/build_rootfs.sh +++ b/tools/cross/build_rootfs.sh @@ -4,7 +4,7 @@ set -x usage() { echo "Usage: $0 [BuildArch] [LinuxCodeName] [--setproxy=IP] [--skipunmount]" - echo "BuildArch can be: arm(default), arm64 and armel" + echo "BuildArch can be: arm(default), aarch64 and armel" echo "LinuxCodeName - optional, Code name for Linux, can be: xenial(default), trusty" echo " If BuildArch is armel, this can be tizen(default)" echo "--setproxy=IP - optional, IP is the proxy server IP address or url with portnumber" @@ -18,7 +18,7 @@ __InitialDir=$PWD __UbuntuRepo="http://ports.ubuntu.com/" __BuildArch=arm -__UbuntuArch=armhf +__QemuArch=armhf __LinuxCodeName=xenial __SkipUnmount=0 __IsProxySet=0 @@ -44,27 +44,27 @@ for i in "$@" ; do ;; arm) __BuildArch=arm - __UbuntuArch=armhf + __QemuArch=armhf ;; - arm64) - __BuildArch=arm64 - __UbuntuArch=arm64 + aarch64) + __BuildArch=aarch64 + __QemuArch=arm64 ;; armel) __BuildArch=armel __Tizen=tizen - __UbuntuArch= + __QemuArch= __UbuntuRepo= __LinuxCodeName= ;; tizen) if [ "$__BuildArch" != "armel" ]; then - echo "Tizen is available only for armel." + echo "Tizen rootfs is available only for armel." usage; exit 1; fi __Tizen=tizen - __UbuntuArch= + __QemuArch= __UbuntuRepo= __LinuxCodeName= ;; @@ -112,7 +112,7 @@ if [ $__IsProxySet == 1 ] && [ "$__Tizen" != "tizen" ]; then fi if [[ -n $__LinuxCodeName ]]; then - qemu-debootstrap --arch $__UbuntuArch $__LinuxCodeName $__RootfsDir $__UbuntuRepo + qemu-debootstrap --arch $__QemuArch $__LinuxCodeName $__RootfsDir $__UbuntuRepo cp $__CrossDir/$__BuildArch/sources.list.$__LinuxCodeName $__RootfsDir/etc/apt/sources.list chroot $__RootfsDir apt-get update chroot $__RootfsDir apt-get -f -y install -- 2.7.4 From a871d6143dea74df6408b1a215ef94f92c7bfd8a Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=98=A4=ED=98=95=EC=84=9D/On-Device=20Lab=28SR=29/Staff?= =?utf8?q?=20Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Mon, 9 Dec 2019 11:24:24 +0900 Subject: [PATCH 09/16] [neurun/gbs] Fix gbs aarch64 build (#9435) Fix gbs build error when there is no skiplist for target architecture Signed-off-by: Hyeongseok Oh --- packaging/nnfw.spec | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packaging/nnfw.spec b/packaging/nnfw.spec index 83210ca..1800ee4 100644 --- a/packaging/nnfw.spec +++ b/packaging/nnfw.spec @@ -125,9 +125,9 @@ mkdir -p %{buildroot}%{_libdir}/pkgconfig install -m 0644 ./nnfw.pc.in %{buildroot}%{_libdir}/pkgconfig/nnfw.pc %{test_build_env} ./nnfw install -# Rename to share test script -mv %{buildroot}%{test_install_dir}/unittest/nnapi_gtest.skip.armv7l-tizen %{buildroot}%{test_install_dir}/unittest/nnapi_gtest.skip.armv7l-linux -cp tests/nnapi/nnapi_gtest.skip.armv7l-linux.* %{buildroot}%{test_install_dir}/unittest/. +# Share test script with ubuntu (ignore error if there is no list for target) +cp tests/nnapi/nnapi_gtest.skip.* %{buildroot}%{test_install_dir}/unittest/. +cp %{buildroot}%{test_install_dir}/unittest/nnapi_gtest.skip %{buildroot}%{test_install_dir}/unittest/nnapi_gtest.skip.%{target_arch}-linux || true tar -zxf test-suite.tar.gz -C %{buildroot}%{test_install_home} %if %{coverage_build} == 1 -- 2.7.4 From 2989fbfc88f0edec35d18356859e75dadcd347e5 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=98=A4=ED=98=95=EC=84=9D/On-Device=20Lab=28SR=29/Staff?= =?utf8?q?=20Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Mon, 9 Dec 2019 12:35:03 +0900 Subject: [PATCH 10/16] [neurun] Check backend initialization success (#9454) Backend initialize() function return boolean to check initialization success/fail Signed-off-by: Hyeongseok Oh --- runtime/neurun/backend/acl_cl/Config.cc | 8 +++++++- runtime/neurun/backend/acl_cl/Config.h | 2 +- runtime/neurun/backend/acl_neon/Config.cc | 5 +---- runtime/neurun/backend/acl_neon/Config.h | 2 +- runtime/neurun/backend/cpu/Config.cc | 5 +---- runtime/neurun/backend/cpu/Config.h | 2 +- runtime/neurun/backend/srcn/Config.cc | 5 +---- runtime/neurun/backend/srcn/Config.h | 2 +- runtime/neurun/core/include/backend/IConfig.h | 2 +- runtime/neurun/core/src/backend/BackendManager.cc | 9 ++++++++- runtime/neurun/test/core/backend/ExecTime.test.cc | 2 +- runtime/neurun/test/core/compiler/Scheduler.cc | 6 +++--- 12 files changed, 27 insertions(+), 23 deletions(-) diff --git a/runtime/neurun/backend/acl_cl/Config.cc b/runtime/neurun/backend/acl_cl/Config.cc index 0c07691..36bf836 100644 --- a/runtime/neurun/backend/acl_cl/Config.cc +++ b/runtime/neurun/backend/acl_cl/Config.cc @@ -30,13 +30,19 @@ namespace backend namespace acl_cl { -void Config::initialize() +bool Config::initialize() { + if (!arm_compute::opencl_is_available()) + { + return false; + } arm_compute::CLScheduler::get().default_init(); // NOTE CLKernelLibraryEx must use the same context as CLScheduler // It did not check whether another device is available. arm_compute::CLKernelLibraryEx::get().init( "./cl_kernels/", arm_compute::CLScheduler::get().context(), cl::Device::getDefault()); + + return true; } } // namespace acl_cl diff --git a/runtime/neurun/backend/acl_cl/Config.h b/runtime/neurun/backend/acl_cl/Config.h index ae527dd..a7ceaac 100644 --- a/runtime/neurun/backend/acl_cl/Config.h +++ b/runtime/neurun/backend/acl_cl/Config.h @@ -32,7 +32,7 @@ class Config : public IConfig { public: std::string id() override { return "acl_cl"; } - void initialize() override; + bool initialize() override; bool SupportPermutation() override { return true; } bool SupportSubTensorAlloc() override { return true; } std::unique_ptr timer() override { return nnfw::cpp14::make_unique(); } diff --git a/runtime/neurun/backend/acl_neon/Config.cc b/runtime/neurun/backend/acl_neon/Config.cc index 8f4a8e7..352bc0b 100644 --- a/runtime/neurun/backend/acl_neon/Config.cc +++ b/runtime/neurun/backend/acl_neon/Config.cc @@ -23,10 +23,7 @@ namespace backend namespace acl_neon { -void Config::initialize() -{ - // DO NOTHING -} +bool Config::initialize() { return true; } } // namespace acl_neon } // namespace backend diff --git a/runtime/neurun/backend/acl_neon/Config.h b/runtime/neurun/backend/acl_neon/Config.h index 13f6c83..430c194 100644 --- a/runtime/neurun/backend/acl_neon/Config.h +++ b/runtime/neurun/backend/acl_neon/Config.h @@ -32,7 +32,7 @@ class Config : public IConfig { public: std::string id() override { return "acl_neon"; } - void initialize() override; + bool initialize() override; bool SupportPermutation() override { return true; } bool SupportSubTensorAlloc() override { return true; } diff --git a/runtime/neurun/backend/cpu/Config.cc b/runtime/neurun/backend/cpu/Config.cc index 1a487f7..3912740 100644 --- a/runtime/neurun/backend/cpu/Config.cc +++ b/runtime/neurun/backend/cpu/Config.cc @@ -23,10 +23,7 @@ namespace backend namespace cpu { -void Config::initialize() -{ - // DO NOTHING -} +bool Config::initialize() { return true; } } // namespace cpu } // namespace backend diff --git a/runtime/neurun/backend/cpu/Config.h b/runtime/neurun/backend/cpu/Config.h index 262d477..be303b5 100644 --- a/runtime/neurun/backend/cpu/Config.h +++ b/runtime/neurun/backend/cpu/Config.h @@ -32,7 +32,7 @@ class Config : public IConfig { public: std::string id() override { return "cpu"; } - void initialize() override; + bool initialize() override; bool SupportPermutation() override { return true; } bool SupportSubTensorAlloc() override { diff --git a/runtime/neurun/backend/srcn/Config.cc b/runtime/neurun/backend/srcn/Config.cc index e69136f..6865657 100644 --- a/runtime/neurun/backend/srcn/Config.cc +++ b/runtime/neurun/backend/srcn/Config.cc @@ -23,10 +23,7 @@ namespace backend namespace srcn { -void Config::initialize() -{ - // DO NOTHING -} +bool Config::initialize() { return true; } } // namespace srcn } // namespace backend diff --git a/runtime/neurun/backend/srcn/Config.h b/runtime/neurun/backend/srcn/Config.h index fbe2f09..efc77fd 100644 --- a/runtime/neurun/backend/srcn/Config.h +++ b/runtime/neurun/backend/srcn/Config.h @@ -30,7 +30,7 @@ class Config : public IConfig { public: std::string id() override { return "srcn"; } - void initialize() override; + bool initialize() override; bool SupportPermutation() override { return false; } bool SupportSubTensorAlloc() override { diff --git a/runtime/neurun/core/include/backend/IConfig.h b/runtime/neurun/core/include/backend/IConfig.h index e6c2235..855f31e 100644 --- a/runtime/neurun/core/include/backend/IConfig.h +++ b/runtime/neurun/core/include/backend/IConfig.h @@ -31,7 +31,7 @@ struct IConfig virtual ~IConfig() = default; virtual std::string id() = 0; - virtual void initialize() = 0; + virtual bool initialize() = 0; // Support permute kernel virtual bool SupportPermutation() = 0; // Support subtensor allocation diff --git a/runtime/neurun/core/src/backend/BackendManager.cc b/runtime/neurun/core/src/backend/BackendManager.cc index 6cc1deb..3db3b24 100644 --- a/runtime/neurun/core/src/backend/BackendManager.cc +++ b/runtime/neurun/core/src/backend/BackendManager.cc @@ -90,7 +90,14 @@ void BackendManager::loadBackend(const std::string &backend) auto backend_object = std::unique_ptr(backend_create(), backend_destroy); auto backend_object_raw = backend_object.get(); - backend_object->config()->initialize(); // Call initialize here? + bool initialized = backend_object->config()->initialize(); // Call initialize here? + if (!initialized) + { + VERBOSE(BackendManager::loadBackend) + << backend.c_str() << " backend initialization failed. Don't use this backend" + << std::endl; + return; + } _gen_map.emplace(backend_object->config()->id(), std::move(backend_object)); _available_backends.push_back(backend_object_raw); } diff --git a/runtime/neurun/test/core/backend/ExecTime.test.cc b/runtime/neurun/test/core/backend/ExecTime.test.cc index b0065d3..b5471c8 100644 --- a/runtime/neurun/test/core/backend/ExecTime.test.cc +++ b/runtime/neurun/test/core/backend/ExecTime.test.cc @@ -28,7 +28,7 @@ using namespace backend; struct MockConfig : public IConfig { std::string id() override { return "b1"; } - void initialize() override{}; + bool initialize() override { return true; }; bool SupportPermutation() override { return false; } bool SupportSubTensorAlloc() override { return false; } }; diff --git a/runtime/neurun/test/core/compiler/Scheduler.cc b/runtime/neurun/test/core/compiler/Scheduler.cc index 72350f4..cf653b1 100644 --- a/runtime/neurun/test/core/compiler/Scheduler.cc +++ b/runtime/neurun/test/core/compiler/Scheduler.cc @@ -55,7 +55,7 @@ struct MockShapeFixer : IShapeFixer struct MockConfigCPU : public IConfig { std::string id() override { return "cpu"; } - void initialize() override{}; + bool initialize() override { return true; }; bool SupportPermutation() override { return false; } bool SupportSubTensorAlloc() override { return false; } }; @@ -74,7 +74,7 @@ struct MockBackendCPU : public Backend struct MockConfigGPU : public IConfig { std::string id() override { return "gpu"; } - void initialize() override{}; + bool initialize() override { return true; }; bool SupportPermutation() override { return false; } bool SupportSubTensorAlloc() override { return false; } }; @@ -93,7 +93,7 @@ struct MockBackendGPU : public Backend struct MockConfigNPU : public IConfig { std::string id() override { return "npu"; } - void initialize() override{}; + bool initialize() override { return true; }; bool SupportPermutation() override { return false; } bool SupportSubTensorAlloc() override { return false; } }; -- 2.7.4 From 60ac3689901831a37effd3ef0c9d3863f9ac206a Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=9D=B4=ED=95=9C=EC=A2=85/On-Device=20Lab=28SR=29/Enginee?= =?utf8?q?r/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Mon, 9 Dec 2019 12:43:27 +0900 Subject: [PATCH 11/16] [neurun] Introduce EventCollectorGlobal (#9443) Introduce EventCollectorGlobal for event collection from anywhere Part of #9241 Signed-off-by: Hanjoung Lee --- .../core/include/util/EventCollectorGlobal.h | 155 +++++++++++++++++++++ 1 file changed, 155 insertions(+) create mode 100644 runtime/neurun/core/include/util/EventCollectorGlobal.h diff --git a/runtime/neurun/core/include/util/EventCollectorGlobal.h b/runtime/neurun/core/include/util/EventCollectorGlobal.h new file mode 100644 index 0000000..15e4084 --- /dev/null +++ b/runtime/neurun/core/include/util/EventCollectorGlobal.h @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NEURUN_UTIL_EVENT_COLLECTOR_GLOBAL_H__ +#define __NEURUN_UTIL_EVENT_COLLECTOR_GLOBAL_H__ + +#include "misc/EventRecorder.h" +#include "misc/EventCollector.h" + +namespace neurun +{ +namespace util +{ + +/** + * @brief Singleton class for event collection from anywhere in code + * + */ +class EventCollectorGlobal +{ +public: + /** + * @brief Get the singleton object of this class + * + * @return EventCollectorGlobal& Singleton object + */ + static EventCollectorGlobal &get(); + +public: + /** + * @brief Getter for event collector object + * + * @return EventCollector& Collector object + */ + EventCollector &collector() { return _collector; } + +private: + EventCollectorGlobal(); + ~EventCollectorGlobal(); + +private: + EventRecorder _recorder; + EventCollector _collector; +}; + +/** + * @brief Helper class for emitting duration event which is handled automatically with ctor/dtor + * + */ +class EventDurationBlock +{ +public: + /** + * @brief Raise a duration event with type of BEGIN + * + * @param tag A label for the duration event + */ + EventDurationBlock(const std::string &tag); + /** + * @brief Raise a duration event with type of END + * + */ + ~EventDurationBlock(); + +private: + std::string _tag; +}; + +/** + * @brief Helper class for emitting duration event which is handled manually + * + * Usage: + * { + * ... + * EventDurationManual duration("some tag"); + * duration.begin(); + * ... + * ... // Code for duration + * ... + * duration.end(); + * } + * + */ +class EventDurationManual +{ +public: + /** + * @brief Construct a new Event Duration Manual object + * + * @param tag A label for the duration object + */ + EventDurationManual(const std::string &tag); + /** + * @brief Destroy the Event Duration Manual object + * + */ + ~EventDurationManual(); + + /** + * @brief Raise a duration event with type of BEGIN + * + */ + void begin(); + /** + * @brief Raise a duration event with type of END + * + */ + void end(); + +private: + std::string _tag; + bool _pair; +}; + +} // namespace util +} // namespace neurun + +/** + * Helper Macro Definitions + * + * HOW TO USE + * + * void f(args) + * { + * EVENT_DURATION_FUNCTION(); + * ... + * if(cond) + * { + * EVENT_DURATION_REGION("if branch"); + * ... + * } + * ... + * } + */ + +#define EVENT_DURATION_FUNCTION() \ + ::neurun::util::EventDurationBlock __event_duration__##__LINE__ { __FUNCTION__ } + +#define EVENT_DURATION_REGION(tag) \ + ::neurun::util::EventDurationBlock __event_duration__##__LINE__ { tag } + +#endif // __NEURUN_UTIL_EVENT_COLLECTOR_GLOBAL_H__ -- 2.7.4 From 8e7cdbd6b79470eab62d5df0b1a572f0249795ae Mon Sep 17 00:00:00 2001 From: Sergei Barannikov/AI Tools Lab /SRR/Engineer/Samsung Electronics Date: Mon, 9 Dec 2019 06:44:11 +0300 Subject: [PATCH 12/16] [neurun] Move some files from model into ir directory (#9448) * Move some files from `model` into `ir` directory. * Move contained symbols into `neurun::ir` namespace, fixing uses where possible. Signed-off-by: Sergei Barannikov --- runtime/neurun/api/src/nnfw_api_internal.cc | 8 +- runtime/neurun/backend/acl_cl/Backend.h | 4 +- .../neurun/backend/acl_cl/ConstantInitializer.cc | 11 +- .../neurun/backend/acl_cl/ConstantInitializer.h | 8 +- runtime/neurun/backend/acl_cl/KernelGenerator.cc | 12 +- runtime/neurun/backend/acl_cl/KernelGenerator.h | 7 +- runtime/neurun/backend/acl_cl/ShapeFixer.cc | 4 +- runtime/neurun/backend/acl_cl/ShapeFixer.h | 7 +- runtime/neurun/backend/acl_cl/TensorRegister.h | 5 +- .../backend/acl_common/AclLinearMemoryManager.h | 6 +- .../neurun/backend/acl_common/AclMemoryManager.h | 22 +-- .../neurun/backend/acl_common/AclTensorManager.h | 52 ++++--- .../neurun/backend/acl_common/AclTensorRegister.cc | 2 +- .../neurun/backend/acl_common/AclTensorRegister.h | 8 +- runtime/neurun/backend/acl_common/Convert.h | 2 +- .../neurun/backend/acl_common/TemplTensorBuilder.h | 102 ++++++------- runtime/neurun/backend/acl_neon/Backend.h | 4 +- .../neurun/backend/acl_neon/ConstantInitializer.cc | 11 +- .../neurun/backend/acl_neon/ConstantInitializer.h | 8 +- runtime/neurun/backend/acl_neon/KernelGenerator.cc | 12 +- runtime/neurun/backend/acl_neon/KernelGenerator.h | 7 +- runtime/neurun/backend/acl_neon/ShapeFixer.cc | 4 +- runtime/neurun/backend/acl_neon/ShapeFixer.h | 7 +- runtime/neurun/backend/acl_neon/TensorRegister.h | 5 +- runtime/neurun/backend/cpu/Backend.h | 4 +- runtime/neurun/backend/cpu/ConstantInitializer.cc | 2 +- runtime/neurun/backend/cpu/ConstantInitializer.h | 8 +- runtime/neurun/backend/cpu/KernelGenerator.cc | 7 +- runtime/neurun/backend/cpu/KernelGenerator.h | 7 +- runtime/neurun/backend/cpu/MemoryManager.cc | 6 +- runtime/neurun/backend/cpu/MemoryManager.h | 14 +- runtime/neurun/backend/cpu/MemoryPlanner.cc | 10 +- runtime/neurun/backend/cpu/MemoryPlanner.h | 18 +-- runtime/neurun/backend/cpu/MemoryPlanner.test.cc | 8 +- runtime/neurun/backend/cpu/ShapeFixer.cc | 2 +- runtime/neurun/backend/cpu/ShapeFixer.h | 7 +- runtime/neurun/backend/cpu/TensorBuilder.cc | 17 ++- runtime/neurun/backend/cpu/TensorBuilder.h | 20 +-- runtime/neurun/backend/cpu/TensorManager.cc | 16 +-- runtime/neurun/backend/cpu/TensorManager.h | 19 ++- runtime/neurun/backend/cpu/TensorRegister.cc | 2 +- runtime/neurun/backend/cpu/TensorRegister.h | 6 +- .../neurun/backend/cpu/kernel/OperationUtils.cc | 2 +- runtime/neurun/backend/cpu/kernel/OperationUtils.h | 4 +- runtime/neurun/backend/cpu/operand/Tensor.h | 6 +- .../neurun/backend/hi_perf_cpu/KernelGenerator.h | 7 +- runtime/neurun/backend/hi_perf_cpu/TensorBuilder.h | 2 +- runtime/neurun/backend/srcn/Backend.h | 4 +- runtime/neurun/backend/srcn/ConstantInitializer.cc | 8 +- runtime/neurun/backend/srcn/ConstantInitializer.h | 10 +- runtime/neurun/backend/srcn/Convert.cc | 6 +- runtime/neurun/backend/srcn/Convert.h | 6 +- runtime/neurun/backend/srcn/KernelGenerator.cc | 2 +- runtime/neurun/backend/srcn/KernelGenerator.h | 7 +- runtime/neurun/backend/srcn/MemoryManager.cc | 6 +- runtime/neurun/backend/srcn/MemoryManager.h | 15 +- runtime/neurun/backend/srcn/MemoryPlanner.cc | 10 +- runtime/neurun/backend/srcn/MemoryPlanner.h | 18 +-- runtime/neurun/backend/srcn/ShapeFixer.cc | 2 +- runtime/neurun/backend/srcn/ShapeFixer.h | 7 +- runtime/neurun/backend/srcn/TensorBuilder.cc | 17 ++- runtime/neurun/backend/srcn/TensorBuilder.h | 22 +-- runtime/neurun/backend/srcn/TensorManager.cc | 17 ++- runtime/neurun/backend/srcn/TensorManager.h | 18 +-- runtime/neurun/backend/srcn/TensorRegister.cc | 8 +- runtime/neurun/backend/srcn/TensorRegister.h | 6 +- .../neurun/backend/srcn/kernel/OperationUtils.cc | 2 +- .../neurun/backend/srcn/kernel/OperationUtils.h | 4 +- runtime/neurun/backend/srcn/operand/Tensor.h | 6 +- runtime/neurun/core/include/backend/Backend.h | 4 +- .../core/include/backend/IConstantInitializer.h | 26 ++-- .../neurun/core/include/backend/IKernelGenerator.h | 2 +- runtime/neurun/core/include/backend/IShapeFixer.h | 2 +- .../neurun/core/include/backend/ITensorBuilder.h | 19 ++- .../neurun/core/include/backend/ITensorRegister.h | 27 ++-- .../neurun/core/include/compiler/SubTensorInfo.h | 8 +- runtime/neurun/core/include/exec/Execution.h | 12 +- runtime/neurun/core/include/exec/IExecutor.h | 4 +- runtime/neurun/core/include/exec/IODescription.h | 11 +- runtime/neurun/core/include/{model => ir}/Data.h | 15 +- runtime/neurun/core/include/ir/Graph.h | 59 ++++---- runtime/neurun/core/include/{model => ir}/Index.h | 20 ++- runtime/neurun/core/include/ir/LowerInfoMap.h | 8 +- runtime/neurun/core/include/{model => ir}/OpCode.h | 22 +-- .../neurun/core/include/{model => ir}/Operand.h | 20 ++- .../core/include/{model => ir}/OperandConstraint.h | 11 +- .../core/include/{model => ir}/OperandIndexMap.h | 18 ++- .../include/{model => ir}/OperandIndexSequence.h | 10 +- .../core/include/{model => ir}/OperandInfo.h | 14 +- .../neurun/core/include/{model => ir}/Operands.h | 16 ++- .../include/{model => ir}/OperationIndexList.h | 10 +- .../core/include/{model => ir}/OperationIndexMap.h | 18 ++- .../core/include/{model => ir}/Operations.lst | 0 .../neurun/core/include/ir/operand/ParentInfo.h | 8 +- runtime/neurun/core/include/model/Operation.h | 10 +- .../neurun/core/include/model/OperationVisitor.h | 2 +- runtime/neurun/core/include/model/Operations.h | 2 +- runtime/neurun/core/include/model/Subgraph.h | 4 +- runtime/neurun/core/include/model/Subgraphs.h | 2 +- runtime/neurun/core/include/util/Config.lst | 2 +- runtime/neurun/core/include/util/ShapeInference.h | 4 +- runtime/neurun/core/include/util/Utils.h | 2 +- runtime/neurun/core/src/backend/BackendManager.h | 2 +- runtime/neurun/core/src/compiler/BackendResolver.h | 14 +- runtime/neurun/core/src/compiler/Compiler.cc | 2 +- .../neurun/core/src/compiler/ExecutorFactory.cc | 52 ++++--- runtime/neurun/core/src/compiler/HEScheduler.cc | 35 +++-- runtime/neurun/core/src/compiler/HEScheduler.h | 26 ++-- runtime/neurun/core/src/compiler/Linear.cc | 46 +++--- .../neurun/core/src/compiler/ManualScheduler.cc | 22 +-- runtime/neurun/core/src/compiler/OperandContext.cc | 4 +- runtime/neurun/core/src/compiler/OperandContext.h | 17 +-- .../neurun/core/src/compiler/OperationValidator.cc | 2 +- .../neurun/core/src/compiler/OperationValidator.h | 9 +- runtime/neurun/core/src/compiler/ParamChecker.cc | 2 +- .../neurun/core/src/compiler/SubTensorAnalyzer.cc | 2 +- .../neurun/core/src/compiler/SubTensorAnalyzer.h | 4 +- runtime/neurun/core/src/dumper/dot/DotBuilder.h | 6 +- runtime/neurun/core/src/dumper/dot/DotDumper.cc | 14 +- .../neurun/core/src/dumper/dot/DotSubgraphInfo.cc | 4 +- .../neurun/core/src/dumper/dot/DotSubgraphInfo.h | 18 +-- runtime/neurun/core/src/dumper/dot/OperandNode.cc | 2 +- runtime/neurun/core/src/dumper/dot/OperandNode.h | 6 +- .../neurun/core/src/dumper/dot/OperationNode.cc | 3 +- runtime/neurun/core/src/dumper/dot/OperationNode.h | 4 +- runtime/neurun/core/src/exec/DataflowExecutor.cc | 10 +- runtime/neurun/core/src/exec/DataflowExecutor.h | 8 +- runtime/neurun/core/src/exec/Execution.cc | 20 ++- runtime/neurun/core/src/exec/ExecutorBase.cc | 12 +- runtime/neurun/core/src/exec/ExecutorBase.h | 12 +- runtime/neurun/core/src/exec/Job.h | 4 +- runtime/neurun/core/src/exec/ParallelExecutor.h | 4 +- runtime/neurun/core/src/exec/interp/Buffer.h | 4 +- runtime/neurun/core/src/exec/interp/ExecEnv.h | 21 ++- runtime/neurun/core/src/exec/interp/ExecManager.cc | 12 +- runtime/neurun/core/src/exec/interp/ExecManager.h | 4 +- runtime/neurun/core/src/exec/interp/Interpreter.cc | 42 +++--- runtime/neurun/core/src/exec/interp/Tensor.h | 30 ++-- .../core/src/exec/interp/operations/Concat.cc | 2 +- .../src/exec/interp/operations/FullyConnected.cc | 2 +- .../core/src/exec/interp/operations/SoftMax.cc | 2 +- runtime/neurun/core/src/ir/Graph.cc | 96 ++++++------- runtime/neurun/core/src/{model => ir}/OpCode.cc | 8 +- runtime/neurun/core/src/{model => ir}/Operand.cc | 14 +- .../core/src/{model => ir}/OperandIndexSequence.cc | 6 +- .../core/src/{model => ir}/OperationIndexList.cc | 8 +- .../core/src/ir/pass/ConstantInsertionPass.cc | 7 +- .../core/src/ir/pass/ConstantInsertionPass.h | 10 +- runtime/neurun/core/src/ir/pass/OperandPass.cc | 2 +- runtime/neurun/core/src/ir/pass/OperandPass.h | 8 +- runtime/neurun/core/src/ir/pass/OperationPass.cc | 4 +- runtime/neurun/core/src/ir/pass/OperationPass.h | 4 +- .../core/src/ir/pass/PermutationEliminationPass.cc | 15 +- .../core/src/ir/pass/PermutationEliminationPass.h | 14 +- .../core/src/ir/pass/PermutationInsertionPass.cc | 15 +- .../core/src/ir/pass/PermutationInsertionPass.h | 10 +- .../core/src/ir/pass/PermutationOperationPass.cc | 4 +- .../core/src/ir/pass/PermutationOperationPass.h | 2 +- runtime/neurun/core/src/ir/verifier/Verifier.cc | 17 ++- runtime/neurun/core/src/model/OperandConstraint.cc | 28 ---- runtime/neurun/core/src/model/Subgraph.cc | 2 +- runtime/neurun/frontend/base_loader/base_loader.h | 158 ++++++++++----------- runtime/neurun/frontend/nnapi/model.cc | 4 +- .../nnapi/wrapper/ANeuralNetworksExecution.cc | 32 ++--- .../nnapi/wrapper/ANeuralNetworksExecution.h | 12 +- .../frontend/nnapi/wrapper/ANeuralNetworksModel.cc | 18 +-- .../frontend/nnapi/wrapper/ANeuralNetworksModel.h | 4 +- .../frontend/nnapi/wrapper/OperationFactory.cc | 116 +++++++-------- .../frontend/nnapi/wrapper/OperationFactory.h | 6 +- runtime/neurun/test/core/backend/ExecTime.test.cc | 3 +- runtime/neurun/test/core/compiler/Scheduler.cc | 2 +- runtime/neurun/test/core/exec/ExecInstance.cc | 1 - .../neurun/test/core/exec/interp/ExecManager.cc | 1 - runtime/neurun/test/graph/Graph.cc | 16 +-- runtime/neurun/test/graph/MockNode.h | 10 +- runtime/neurun/test/graph/operand/IndexSet.cc | 10 +- runtime/neurun/test/graph/operand/Set.cc | 16 +-- runtime/neurun/test/graph/operand/UseDef.cc | 2 +- runtime/neurun/test/graph/operation/Set.cc | 2 +- runtime/neurun/test/graph/operation/SetIO.cc | 8 +- runtime/neurun/test/graph/verifier/Verifier.cc | 4 +- 181 files changed, 1126 insertions(+), 1163 deletions(-) rename runtime/neurun/core/include/{model => ir}/Data.h (86%) rename runtime/neurun/core/include/{model => ir}/Index.h (75%) rename runtime/neurun/core/include/{model => ir}/OpCode.h (71%) rename runtime/neurun/core/include/{model => ir}/Operand.h (91%) rename runtime/neurun/core/include/{model => ir}/OperandConstraint.h (92%) rename runtime/neurun/core/include/{model => ir}/OperandIndexMap.h (70%) rename runtime/neurun/core/include/{model => ir}/OperandIndexSequence.h (91%) rename runtime/neurun/core/include/{model => ir}/OperandInfo.h (89%) rename runtime/neurun/core/include/{model => ir}/Operands.h (79%) rename runtime/neurun/core/include/{model => ir}/OperationIndexList.h (90%) rename runtime/neurun/core/include/{model => ir}/OperationIndexMap.h (70%) rename runtime/neurun/core/include/{model => ir}/Operations.lst (100%) rename runtime/neurun/core/src/{model => ir}/OpCode.cc (92%) rename runtime/neurun/core/src/{model => ir}/Operand.cc (79%) rename runtime/neurun/core/src/{model => ir}/OperandIndexSequence.cc (94%) rename runtime/neurun/core/src/{model => ir}/OperationIndexList.cc (84%) delete mode 100644 runtime/neurun/core/src/model/OperandConstraint.cc diff --git a/runtime/neurun/api/src/nnfw_api_internal.cc b/runtime/neurun/api/src/nnfw_api_internal.cc index dcf14a2..bfcc4d7 100644 --- a/runtime/neurun/api/src/nnfw_api_internal.cc +++ b/runtime/neurun/api/src/nnfw_api_internal.cc @@ -172,7 +172,7 @@ NNFW_STATUS nnfw_session::set_input(uint32_t index, NNFW_TYPE /*type*/, const vo { try { - _execution->setInput(neurun::model::IOIndex(index), buffer, length); + _execution->setInput(neurun::ir::IOIndex(index), buffer, length); } catch (...) { @@ -187,7 +187,7 @@ NNFW_STATUS nnfw_session::set_output(uint32_t index, NNFW_TYPE /*type*/, void *b { try { - _execution->setOutput(neurun::model::IOIndex(index), buffer, length); + _execution->setOutput(neurun::ir::IOIndex(index), buffer, length); } catch (...) { @@ -245,7 +245,7 @@ NNFW_STATUS nnfw_session::set_input_layout(uint32_t index, NNFW_LAYOUT layout) std::cerr << "Error during nnfw_session::set_input_layout, not supported layout" << std::endl; return NNFW_STATUS_ERROR; } - _execution->setInputLayout(neurun::model::IOIndex(index), convertLayout(layout)); + _execution->setInputLayout(neurun::ir::IOIndex(index), convertLayout(layout)); } catch (...) { @@ -266,7 +266,7 @@ NNFW_STATUS nnfw_session::set_output_layout(uint32_t index, NNFW_LAYOUT layout) << std::endl; return NNFW_STATUS_ERROR; } - _execution->setOutputLayout(neurun::model::IOIndex(index), convertLayout(layout)); + _execution->setOutputLayout(neurun::ir::IOIndex(index), convertLayout(layout)); } catch (...) { diff --git a/runtime/neurun/backend/acl_cl/Backend.h b/runtime/neurun/backend/acl_cl/Backend.h index feb3faf..2033b42 100644 --- a/runtime/neurun/backend/acl_cl/Backend.h +++ b/runtime/neurun/backend/acl_cl/Backend.h @@ -19,7 +19,7 @@ #include #include -#include +#include #include "Config.h" #include "ConstantInitializer.h" @@ -43,7 +43,7 @@ public: std::shared_ptr config() const override { return _config; } std::unique_ptr - newContext(const model::Operands &operands, + newContext(const ir::Operands &operands, const std::shared_ptr &) const override { auto tensor_builder = std::make_shared(createTensorManager()); diff --git a/runtime/neurun/backend/acl_cl/ConstantInitializer.cc b/runtime/neurun/backend/acl_cl/ConstantInitializer.cc index 52621f2..aa4254c 100644 --- a/runtime/neurun/backend/acl_cl/ConstantInitializer.cc +++ b/runtime/neurun/backend/acl_cl/ConstantInitializer.cc @@ -23,7 +23,7 @@ namespace backend namespace acl_cl { -ConstantInitializer::ConstantInitializer(const model::Operands &operands, +ConstantInitializer::ConstantInitializer(const ir::Operands &operands, const std::shared_ptr &tensor_builder) : _operands{operands}, _tensor_builder{tensor_builder} { @@ -37,8 +37,7 @@ void ConstantInitializer::visit(const model::operation::BatchToSpaceND &node) if (block_size_obj.isConstant()) { - _init_map[block_size_index] = [](const model::Operand &model_obj, - backend::operand::ITensor &obj) { + _init_map[block_size_index] = [](const ir::Operand &model_obj, backend::operand::ITensor &obj) { const auto &shape = model_obj.shape(); const auto base = reinterpret_cast(model_obj.data().base()); assert(model_obj.shape().rank() == 1); @@ -217,8 +216,7 @@ void ConstantInitializer::visit(const model::operation::SpaceToBatchND &node) if (block_size_obj.isConstant()) { - _init_map[block_size_index] = [](const model::Operand &model_obj, - backend::operand::ITensor &obj) { + _init_map[block_size_index] = [](const ir::Operand &model_obj, backend::operand::ITensor &obj) { const auto &shape = model_obj.shape(); const auto base = reinterpret_cast(model_obj.data().base()); assert(model_obj.shape().rank() == 1); @@ -238,8 +236,7 @@ void ConstantInitializer::visit(const model::operation::SpaceToBatchND &node) const auto &paddings_obj = _operands.at(paddings_index); if (paddings_obj.isConstant()) { - _init_map[paddings_index] = [](const model::Operand &model_obj, - backend::operand::ITensor &obj) { + _init_map[paddings_index] = [](const ir::Operand &model_obj, backend::operand::ITensor &obj) { const auto &shape = model_obj.shape(); const auto base = reinterpret_cast(model_obj.data().base()); assert(model_obj.shape().rank() == 2); diff --git a/runtime/neurun/backend/acl_cl/ConstantInitializer.h b/runtime/neurun/backend/acl_cl/ConstantInitializer.h index 59d7f38..bb38e5e 100644 --- a/runtime/neurun/backend/acl_cl/ConstantInitializer.h +++ b/runtime/neurun/backend/acl_cl/ConstantInitializer.h @@ -18,7 +18,7 @@ #define __NEURUN_COMPILER_ACL_CL_CONSTANT_INITIALIZER_H__ #include -#include +#include #include "TensorBuilder.h" namespace neurun @@ -31,7 +31,7 @@ namespace acl_cl class ConstantInitializer : public IConstantInitializer { public: - ConstantInitializer(const model::Operands &operands, + ConstantInitializer(const ir::Operands &operands, const std::shared_ptr &tensor_builder); public: @@ -48,11 +48,11 @@ public: void visit(const model::operation::TransposeConv &) override; private: - const model::Operands &operands() const override { return _operands; } + const ir::Operands &operands() const override { return _operands; } std::shared_ptr tensor_builder() const override { return _tensor_builder; } private: - const model::Operands &_operands; + const ir::Operands &_operands; std::shared_ptr _tensor_builder; }; diff --git a/runtime/neurun/backend/acl_cl/KernelGenerator.cc b/runtime/neurun/backend/acl_cl/KernelGenerator.cc index 436366a..9e2126e 100644 --- a/runtime/neurun/backend/acl_cl/KernelGenerator.cc +++ b/runtime/neurun/backend/acl_cl/KernelGenerator.cc @@ -24,7 +24,7 @@ #include #include "kernel/ConcatLayer.h" -#include "model/Index.h" +#include "ir/Index.h" #include "ir/DataType.h" #include "ir/InternalType.h" #include "compiler/IExecutionBuilder.h" @@ -143,7 +143,7 @@ void ActivationBuilder::append(ir::Activation code, ::arm_compute::ICLTensor *if // // KernelGenerator // -KernelGenerator::KernelGenerator(const neurun::model::Operands &ctx, +KernelGenerator::KernelGenerator(const ir::Operands &ctx, const std::shared_ptr &tensor_builder) : _ctx(ctx), _tensor_builder(tensor_builder), _current_subg_layout(ir::Layout::UNKNOWN) { @@ -386,7 +386,7 @@ void KernelGenerator::visit(const model::operation::Concat &node) { const auto ofm_index{node.getOutputs().at(0)}; - std::vector input_indexes; + std::vector input_indexes; for (const auto &input : node.getInputs()) input_indexes.emplace_back(input); @@ -1125,7 +1125,7 @@ void KernelGenerator::visit(const model::operation::Pack &node) const auto output_rank = _ctx.at(output_index).shape().rank(); - std::vector input_indexes; + std::vector input_indexes; for (const auto &input_index : node.getInputs()) input_indexes.emplace_back(input_index); @@ -1941,7 +1941,7 @@ void KernelGenerator::visit(const model::operation::Split &node) assert(node.param().num_splits == static_cast(node.getOutputs().size())); const auto ifm_rank = _ctx.at(ifm_index).shape().rank(); - std::vector output_indexes; + std::vector output_indexes; for (const auto &output : node.getOutputs()) output_indexes.emplace_back(output); @@ -1971,7 +1971,7 @@ void KernelGenerator::visit(const model::operation::Unpack &node) const auto input_rank = _ctx.at(input_index).shape().rank(); - std::vector output_indexes; + std::vector output_indexes; for (const auto &output_index : node.getOutputs()) output_indexes.emplace_back(output_index); diff --git a/runtime/neurun/backend/acl_cl/KernelGenerator.h b/runtime/neurun/backend/acl_cl/KernelGenerator.h index df178a8..c658535 100644 --- a/runtime/neurun/backend/acl_cl/KernelGenerator.h +++ b/runtime/neurun/backend/acl_cl/KernelGenerator.h @@ -19,7 +19,7 @@ #include -#include "model/Operands.h" +#include "ir/Operands.h" #include "TensorBuilder.h" namespace neurun @@ -32,8 +32,7 @@ namespace acl_cl class KernelGenerator : public IKernelGenerator { public: - KernelGenerator(const neurun::model::Operands &ctx, - const std::shared_ptr &tensor_builder); + KernelGenerator(const ir::Operands &ctx, const std::shared_ptr &tensor_builder); void visit(const model::Subgraph &) override; void visit(const model::operation::BatchToSpaceND &) override; @@ -98,7 +97,7 @@ public: void visit(const model::operation::Pad &) override; private: - const neurun::model::Operands &_ctx; + const ir::Operands &_ctx; std::shared_ptr _tensor_builder; ir::Layout _current_subg_layout; }; diff --git a/runtime/neurun/backend/acl_cl/ShapeFixer.cc b/runtime/neurun/backend/acl_cl/ShapeFixer.cc index 674b3d6..99e2464 100644 --- a/runtime/neurun/backend/acl_cl/ShapeFixer.cc +++ b/runtime/neurun/backend/acl_cl/ShapeFixer.cc @@ -24,7 +24,7 @@ #include #include "kernel/ConcatLayer.h" -#include "model/Index.h" +#include "ir/Index.h" #include "compiler/IExecutionBuilder.h" #include "exec/NopFunction.h" #include "util/logging.h" @@ -42,7 +42,7 @@ namespace acl_cl using ::neurun::backend::acl_common::asAclFunction; -ShapeFixer::ShapeFixer(const neurun::model::Operands &ctx, +ShapeFixer::ShapeFixer(const ir::Operands &ctx, const std::shared_ptr &tensor_builder) : _ctx(ctx), _tensor_builder(tensor_builder) { diff --git a/runtime/neurun/backend/acl_cl/ShapeFixer.h b/runtime/neurun/backend/acl_cl/ShapeFixer.h index b7256f7..ac384af 100644 --- a/runtime/neurun/backend/acl_cl/ShapeFixer.h +++ b/runtime/neurun/backend/acl_cl/ShapeFixer.h @@ -19,7 +19,7 @@ #include -#include "model/Operands.h" +#include "ir/Operands.h" #include "TensorBuilder.h" namespace neurun @@ -32,8 +32,7 @@ namespace acl_cl class ShapeFixer : public IShapeFixer { public: - ShapeFixer(const neurun::model::Operands &ctx, - const std::shared_ptr &tensor_builder); + ShapeFixer(const ir::Operands &ctx, const std::shared_ptr &tensor_builder); std::shared_ptr tensor_builder() override { return _tensor_builder; } @@ -99,7 +98,7 @@ public: void visit(const model::operation::Pad &) override; private: - const neurun::model::Operands &_ctx; + const ir::Operands &_ctx; std::shared_ptr _tensor_builder; }; diff --git a/runtime/neurun/backend/acl_cl/TensorRegister.h b/runtime/neurun/backend/acl_cl/TensorRegister.h index a523f2d..02de455 100644 --- a/runtime/neurun/backend/acl_cl/TensorRegister.h +++ b/runtime/neurun/backend/acl_cl/TensorRegister.h @@ -31,14 +31,13 @@ namespace acl_cl class TensorRegister : public acl_common::AclTensorRegister { public: - TensorRegister(const model::Operands &operands, - const std::shared_ptr &tensor_builder) + TensorRegister(const ir::Operands &operands, const std::shared_ptr &tensor_builder) : acl_common::AclTensorRegister{operands, tensor_builder} { // DO NOTHING } - void setUsesCount(const model::OperandIndex &ind, size_t num_uses) const override + void setUsesCount(const ir::OperandIndex &ind, size_t num_uses) const override { nnfw::misc::polymorphic_downcast(tensor_builder().get()) ->setUsesCount(ind, num_uses); diff --git a/runtime/neurun/backend/acl_common/AclLinearMemoryManager.h b/runtime/neurun/backend/acl_common/AclLinearMemoryManager.h index 3ef9358..b55121d 100644 --- a/runtime/neurun/backend/acl_common/AclLinearMemoryManager.h +++ b/runtime/neurun/backend/acl_common/AclLinearMemoryManager.h @@ -20,7 +20,7 @@ #include #include "AclMemoryManager.h" -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" #include "util/logging.h" namespace @@ -75,7 +75,7 @@ public: _io_manager->clear(); } - virtual void startLifetime(const model::OperandIndex &ind) override + virtual void startLifetime(const ir::OperandIndex &ind) override { auto &tensors = this->tensors(); assert(tensors.find(ind) != tensors.end()); @@ -86,7 +86,7 @@ public: _io_group->manage(tensor->handle()); } - virtual void finishLifetime(const model::OperandIndex &ind) override + virtual void finishLifetime(const ir::OperandIndex &ind) override { auto &tensors = this->tensors(); assert(tensors.find(ind) != tensors.end()); diff --git a/runtime/neurun/backend/acl_common/AclMemoryManager.h b/runtime/neurun/backend/acl_common/AclMemoryManager.h index 910a990..40ce2e6 100644 --- a/runtime/neurun/backend/acl_common/AclMemoryManager.h +++ b/runtime/neurun/backend/acl_common/AclMemoryManager.h @@ -22,7 +22,7 @@ #include #include "backend/IMemoryManager.h" -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" #include "Convert.h" #include "util/logging.h" @@ -62,18 +62,18 @@ public: } } - virtual void startLifetime(const model::OperandIndex &) { /* DO NOTHING */} - virtual void finishLifetime(const model::OperandIndex &) { /* DO NOTHING */} + virtual void startLifetime(const ir::OperandIndex &) { /* DO NOTHING */} + virtual void finishLifetime(const ir::OperandIndex &) { /* DO NOTHING */} - void buildTensor(const model::OperandIndex &ind, const ::arm_compute::TensorInfo &info, - size_t rank, size_t num_uses) + void buildTensor(const ir::OperandIndex &ind, const ::arm_compute::TensorInfo &info, size_t rank, + size_t num_uses) { auto tensor = std::make_shared(info, rank, num_uses); _tensors[ind] = tensor; } - void buildSubtensor(std::shared_ptr parent_tensor, - const model::OperandIndex &child_ind, const ::arm_compute::TensorShape &shape, + void buildSubtensor(std::shared_ptr parent_tensor, const ir::OperandIndex &child_ind, + const ::arm_compute::TensorShape &shape, const ::arm_compute::Coordinates &coordinates, size_t rank, bool extent_parent) { @@ -82,13 +82,13 @@ public: _subtensors[child_ind] = subtensor; } - model::OperandIndexMap> &tensors(void) { return _tensors; } + ir::OperandIndexMap> &tensors(void) { return _tensors; } - model::OperandIndexMap> &subtensors(void) { return _subtensors; } + ir::OperandIndexMap> &subtensors(void) { return _subtensors; } private: - model::OperandIndexMap> _tensors; - model::OperandIndexMap> _subtensors; + ir::OperandIndexMap> _tensors; + ir::OperandIndexMap> _subtensors; }; } // namespace acl_common diff --git a/runtime/neurun/backend/acl_common/AclTensorManager.h b/runtime/neurun/backend/acl_common/AclTensorManager.h index ca77046..48a4c25 100644 --- a/runtime/neurun/backend/acl_common/AclTensorManager.h +++ b/runtime/neurun/backend/acl_common/AclTensorManager.h @@ -22,7 +22,7 @@ #include "backend/ITensorManager.h" #include "AclMemoryManager.h" #include "AclInternalBufferManager.h" -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" namespace neurun { @@ -50,27 +50,27 @@ public: void allocateInternalBufferManager(void); void deallocateInternalBufferManager(void); - void buildTensor(const model::OperandIndex &ind, const ::arm_compute::TensorInfo &info, - size_t rank, bool as_const, size_t num_uses); - void buildSubtensor(const model::OperandIndex &parent, const model::OperandIndex &child, + void buildTensor(const ir::OperandIndex &ind, const ::arm_compute::TensorInfo &info, size_t rank, + bool as_const, size_t num_uses); + void buildSubtensor(const ir::OperandIndex &parent, const ir::OperandIndex &child, const ::arm_compute::TensorShape &shape, const ::arm_compute::Coordinates &coordinates, size_t rank, bool extent_parent); - std::shared_ptr findTensorAsParent(const model::OperandIndex &ind); + std::shared_ptr findTensorAsParent(const ir::OperandIndex &ind); - void startLifetime(const model::OperandIndex &ind); - void finishLifetime(const model::OperandIndex &ind); + void startLifetime(const ir::OperandIndex &ind); + void finishLifetime(const ir::OperandIndex &ind); - std::shared_ptr at(const ::neurun::model::OperandIndex &ind); + std::shared_ptr at(const ir::OperandIndex &ind); - model::OperandIndexMap> &constTensors(void); - model::OperandIndexMap> &nonconstTensors(void); - model::OperandIndexMap> &nonconstSubtensors(void); + ir::OperandIndexMap> &constTensors(void); + ir::OperandIndexMap> &nonconstTensors(void); + ir::OperandIndexMap> &nonconstSubtensors(void); std::shared_ptr<::arm_compute::IMemoryManager> internal_buffer_manager(void); - void iterate(const std::function &fn); + void iterate(const std::function &fn); void tryDeallocConstants(void); @@ -78,7 +78,7 @@ private: std::unique_ptr _const_mgr; std::unique_ptr _nonconst_mgr; std::unique_ptr _inter_mgr; - model::OperandIndexMap _ind_to_mgr; + ir::OperandIndexMap _ind_to_mgr; }; } // namespace acl_common @@ -142,8 +142,8 @@ void AclTensorManager::deallocateInternalBuffe template void AclTensorManager::buildTensor( - const model::OperandIndex &ind, const ::arm_compute::TensorInfo &info, size_t rank, - bool as_const, size_t num_uses) + const ir::OperandIndex &ind, const ::arm_compute::TensorInfo &info, size_t rank, bool as_const, + size_t num_uses) { assert(_ind_to_mgr.find(ind) == _ind_to_mgr.end()); if (as_const) @@ -160,7 +160,7 @@ void AclTensorManager::buildTensor( template void AclTensorManager::buildSubtensor( - const model::OperandIndex &parent, const model::OperandIndex &child, + const ir::OperandIndex &parent, const ir::OperandIndex &child, const ::arm_compute::TensorShape &shape, const ::arm_compute::Coordinates &coordinates, size_t rank, bool extent_parent) { @@ -172,8 +172,8 @@ void AclTensorManager::buildSubtensor( } template -std::shared_ptr AclTensorManager::findTensorAsParent( - const model::OperandIndex &ind) +std::shared_ptr +AclTensorManager::findTensorAsParent(const ir::OperandIndex &ind) { auto &tensors = _nonconst_mgr->tensors(); @@ -195,16 +195,14 @@ std::shared_ptr AclTensorManager::f } template -void AclTensorManager::startLifetime( - const model::OperandIndex &ind) +void AclTensorManager::startLifetime(const ir::OperandIndex &ind) { assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); _ind_to_mgr.at(ind).startLifetime(ind); } template -void AclTensorManager::finishLifetime( - const model::OperandIndex &ind) +void AclTensorManager::finishLifetime(const ir::OperandIndex &ind) { assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); _ind_to_mgr.at(ind).finishLifetime(ind); @@ -212,7 +210,7 @@ void AclTensorManager::finishLifetime( template std::shared_ptr -AclTensorManager::at(const ::neurun::model::OperandIndex &ind) +AclTensorManager::at(const ir::OperandIndex &ind) { assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); @@ -228,21 +226,21 @@ AclTensorManager::at(const ::neurun::model::Op } template -model::OperandIndexMap> & +ir::OperandIndexMap> & AclTensorManager::constTensors(void) { return _const_mgr->tensors(); } template -model::OperandIndexMap> & +ir::OperandIndexMap> & AclTensorManager::nonconstTensors(void) { return _nonconst_mgr->tensors(); } template -model::OperandIndexMap> & +ir::OperandIndexMap> & AclTensorManager::nonconstSubtensors(void) { return _nonconst_mgr->subtensors(); @@ -257,7 +255,7 @@ AclTensorManager::internal_buffer_manager(void template void AclTensorManager::iterate( - const std::function &fn) + const std::function &fn) { for (auto it : _nonconst_mgr->tensors()) fn(it.first); diff --git a/runtime/neurun/backend/acl_common/AclTensorRegister.cc b/runtime/neurun/backend/acl_common/AclTensorRegister.cc index b85cfe7..a2c2b9e 100644 --- a/runtime/neurun/backend/acl_common/AclTensorRegister.cc +++ b/runtime/neurun/backend/acl_common/AclTensorRegister.cc @@ -23,7 +23,7 @@ namespace backend namespace acl_common { -AclTensorRegister::AclTensorRegister(const model::Operands &operands, +AclTensorRegister::AclTensorRegister(const ir::Operands &operands, const std::shared_ptr &tensor_builder) : _operands{operands}, _tensor_builder{tensor_builder} { diff --git a/runtime/neurun/backend/acl_common/AclTensorRegister.h b/runtime/neurun/backend/acl_common/AclTensorRegister.h index 1c31625..5982839 100644 --- a/runtime/neurun/backend/acl_common/AclTensorRegister.h +++ b/runtime/neurun/backend/acl_common/AclTensorRegister.h @@ -29,7 +29,7 @@ namespace acl_common class AclTensorRegister : public ITensorRegister { protected: - AclTensorRegister(const model::Operands &operands, + AclTensorRegister(const ir::Operands &operands, const std::shared_ptr &tensor_builder); public: @@ -37,15 +37,15 @@ public: protected: void visit(const model::Subgraph &subgraph); - virtual void setUsesCount(const model::OperandIndex &ind, size_t num_uses) const = 0; + virtual void setUsesCount(const ir::OperandIndex &ind, size_t num_uses) const = 0; protected: - const model::Operands &operands() const override { return _operands; } + const ir::Operands &operands() const override { return _operands; } std::shared_ptr tensor_builder() const override { return _tensor_builder; } bool supportSubTensor() const final { return true; } private: - const model::Operands &_operands; + const ir::Operands &_operands; const std::shared_ptr _tensor_builder; }; diff --git a/runtime/neurun/backend/acl_common/Convert.h b/runtime/neurun/backend/acl_common/Convert.h index 33e6815..4c63815 100644 --- a/runtime/neurun/backend/acl_common/Convert.h +++ b/runtime/neurun/backend/acl_common/Convert.h @@ -23,7 +23,7 @@ #include "ir/Layout.h" #include "ir/InternalType.h" -#include "model/Operand.h" +#include "ir/Operand.h" #include "ir/Shape.h" #include "ir/TypeInfo.h" #include "misc/feature/Shape.h" diff --git a/runtime/neurun/backend/acl_common/TemplTensorBuilder.h b/runtime/neurun/backend/acl_common/TemplTensorBuilder.h index 41d70a4..6439ff0 100644 --- a/runtime/neurun/backend/acl_common/TemplTensorBuilder.h +++ b/runtime/neurun/backend/acl_common/TemplTensorBuilder.h @@ -22,7 +22,7 @@ #include #include -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" #include "AclTensorManager.h" #include "cpp14/memory.h" #include @@ -54,20 +54,20 @@ public: * @param[in] info Tensor information * @param[in] layout Tensor data layout */ - void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info, + void registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info, ir::Layout backend_layout, bool as_const) override; /** * @brief Register subtensor information to allocate on ACL-CL backend * @param[in] ind Operand index * @param[in] info Tensor information */ - void registerSubTensorInfo(const model::OperandIndex &ind, + void registerSubTensorInfo(const ir::OperandIndex &ind, const compiler::SubTensorInfo &info) override; - void notifyFirstUse(const model::OperandIndex &) override; - void notifyLastUse(const model::OperandIndex &) override; + void notifyFirstUse(const ir::OperandIndex &) override; + void notifyLastUse(const ir::OperandIndex &) override; - bool isRegistered(const model::OperandIndex &) const override; + bool isRegistered(const ir::OperandIndex &) const override; void prepare(void) override; void allocateConsts() override; @@ -76,7 +76,7 @@ public: void finalize() override; std::shared_ptr<::neurun::backend::operand::ITensor> - tensorAt(const model::OperandIndex &ind) override; + tensorAt(const ir::OperandIndex &ind) override; void iterate(const IterateFunction &fn) override; void preVisit(const model::Operation &node) override; @@ -84,20 +84,20 @@ public: std::unique_ptr releaseTensorManager(void) override; - std::shared_ptr at(const ::neurun::model::OperandIndex &ind); + std::shared_ptr at(const ir::OperandIndex &ind); /** * @brief Check child tensor is allocated as subtensor of parent tensor * @param[in] parent Index of parent * @param[in] child Index of child * @return @c true if child is allocated as subtensor of parent, otherwise @c false */ - bool isSubTensorOf(const model::OperandIndex &parent, const model::OperandIndex &child); + bool isSubTensorOf(const ir::OperandIndex &parent, const ir::OperandIndex &child); - void dimCorrection(const model::OperandIndex &index, bool apply_dim_correction); + void dimCorrection(const ir::OperandIndex &index, bool apply_dim_correction); T_AclTensorManager *acl_tensor_manager(void) { return _tensor_mgr.get(); } - void setUsesCount(const model::OperandIndex &index, size_t num_uses) + void setUsesCount(const ir::OperandIndex &index, size_t num_uses) { assert(_uses_count_map.find(index) != _uses_count_map.end() ? _uses_count_map[index] == num_uses : true); @@ -108,29 +108,29 @@ private: void buildTensors(void); void buildSubtensors(void); void validate(void); - model::OperandIndex findRootParent(model::OperandIndex index); + ir::OperandIndex findRootParent(ir::OperandIndex index); private: - model::OperandIndexMap _tensor_info_map; - model::OperandIndexMap _subtensor_info_map; - model::OperandIndexMap _apply_dim_correction_map; - model::OperandIndexMap _tensor_layout_map; - model::OperandIndexMap _uses_count_map; + ir::OperandIndexMap _tensor_info_map; + ir::OperandIndexMap _subtensor_info_map; + ir::OperandIndexMap _apply_dim_correction_map; + ir::OperandIndexMap _tensor_layout_map; + ir::OperandIndexMap _uses_count_map; std::unique_ptr _tensor_mgr; - model::OperandIndexSequence _constants; + ir::OperandIndexSequence _constants; // TODO Consider dividing TensorBuilder into Linear and others const std::string _executor_str; // for linear executor - std::queue> _uses_queue; + std::queue> _uses_queue; uint32_t _first_uses_num; - model::OperandIndexMap _first_uses_visit; + ir::OperandIndexMap _first_uses_visit; // for subtensors - model::OperandIndexMap _parent_def; - model::OperandIndexMap _parent_uses; + ir::OperandIndexMap _parent_def; + ir::OperandIndexMap _parent_uses; }; } // namespace acl_common @@ -162,7 +162,7 @@ TemplTensorBuilder::TemplTensorBuilder( template void TemplTensorBuilder::registerTensorInfo( - const model::OperandIndex &ind, const model::OperandInfo &info, ir::Layout backend_layout, + const ir::OperandIndex &ind, const ir::OperandInfo &info, ir::Layout backend_layout, bool as_const) { assert(_tensor_mgr->constTensors().size() == 0); @@ -180,7 +180,7 @@ void TemplTensorBuilder::registerTensorInfo( template void TemplTensorBuilder::registerSubTensorInfo( - const model::OperandIndex &ind, const compiler::SubTensorInfo &info) + const ir::OperandIndex &ind, const compiler::SubTensorInfo &info) { assert(_tensor_mgr->constTensors().size() == 0); assert(_tensor_mgr->nonconstTensors().size() == 0); @@ -204,7 +204,7 @@ void TemplTensorBuilder::registerSubTensorInfo template void TemplTensorBuilder::notifyFirstUse( - const model::OperandIndex &ind) + const ir::OperandIndex &ind) { _first_uses_num++; _uses_queue.emplace(UsesType::FIRST, ind); @@ -212,14 +212,14 @@ void TemplTensorBuilder::notifyFirstUse( template void TemplTensorBuilder::notifyLastUse( - const model::OperandIndex &ind) + const ir::OperandIndex &ind) { _uses_queue.emplace(UsesType::LAST, ind); } template bool TemplTensorBuilder::isRegistered( - const model::OperandIndex &ind) const + const ir::OperandIndex &ind) const { return _tensor_info_map.find(ind) != _tensor_info_map.end() || _subtensor_info_map.find(ind) != _subtensor_info_map.end(); @@ -261,7 +261,7 @@ void TemplTensorBuilder::finalize(void) template std::shared_ptr<::neurun::backend::operand::ITensor> -TemplTensorBuilder::tensorAt(const model::OperandIndex &ind) +TemplTensorBuilder::tensorAt(const ir::OperandIndex &ind) { return _tensor_mgr->at(ind); } @@ -274,14 +274,14 @@ void TemplTensorBuilder::iterate(const Iterate template std::shared_ptr -TemplTensorBuilder::at(const ::neurun::model::OperandIndex &ind) +TemplTensorBuilder::at(const ir::OperandIndex &ind) { return _tensor_mgr->at(ind); } template bool TemplTensorBuilder::isSubTensorOf( - const model::OperandIndex &parent, const model::OperandIndex &child) + const ir::OperandIndex &parent, const ir::OperandIndex &child) { if (_subtensor_info_map.find(child) == _subtensor_info_map.end()) { @@ -304,7 +304,7 @@ bool TemplTensorBuilder::isSubTensorOf( template void TemplTensorBuilder::dimCorrection( - const model::OperandIndex &index, bool apply_dim_correction) + const ir::OperandIndex &index, bool apply_dim_correction) { _apply_dim_correction_map[index] = apply_dim_correction; } @@ -357,9 +357,9 @@ void TemplTensorBuilder::buildSubtensors(void) auto &subtensors = _tensor_mgr->nonconstSubtensors(); for (auto &entry : _subtensor_info_map) { - model::OperandIndex ind = entry.first; + ir::OperandIndex ind = entry.first; - std::stack stack; + std::stack stack; stack.push(ind); while (!stack.empty()) @@ -413,8 +413,8 @@ void TemplTensorBuilder::preVisit(const model: return; } - std::function def_handler = - [this, &def_handler](const model::OperandIndex &ind) { + std::function def_handler = + [this, &def_handler](const ir::OperandIndex &ind) { bool is_subtensor = _subtensor_info_map.find(ind) != _subtensor_info_map.end(); bool is_parent = _parent_def.find(ind) != _parent_def.end(); if (!is_subtensor && !is_parent) @@ -443,7 +443,7 @@ void TemplTensorBuilder::preVisit(const model: } else if (is_subtensor) { - const model::OperandIndex &parent_ind = _subtensor_info_map.at(ind).parent(); + const ir::OperandIndex &parent_ind = _subtensor_info_map.at(ind).parent(); if (_parent_def[parent_ind] == 0) return; def_handler(parent_ind); @@ -451,7 +451,7 @@ void TemplTensorBuilder::preVisit(const model: }; // See #5642 - model::OperandIndexMap outputs_map; + ir::OperandIndexMap outputs_map; for (const auto &ind : node.getOutputs()) { assert(_first_uses_visit.find(ind) != _first_uses_visit.end()); @@ -461,10 +461,10 @@ void TemplTensorBuilder::preVisit(const model: // outputs_map's all elements are true? auto outputs_map_all_check = [&outputs_map]() { return std::all_of(outputs_map.begin(), outputs_map.end(), - [](std::pair it) { return it.second; }); + [](std::pair it) { return it.second; }); }; - std::pair peak; + std::pair peak; while (!outputs_map_all_check() && (peak = _uses_queue.front()).first == UsesType::FIRST) { _uses_queue.pop(); @@ -487,8 +487,8 @@ void TemplTensorBuilder::postVisit(const model return; } - std::function use_handler = - [this, &use_handler](const model::OperandIndex &ind) { + std::function use_handler = + [this, &use_handler](const ir::OperandIndex &ind) { bool is_subtensor = _subtensor_info_map.find(ind) != _subtensor_info_map.end(); bool is_parent = _parent_uses.find(ind) != _parent_uses.end(); if (!is_subtensor && !is_parent) @@ -517,7 +517,7 @@ void TemplTensorBuilder::postVisit(const model } else if (is_subtensor) { - const model::OperandIndex &parent_ind = _subtensor_info_map.at(ind).parent(); + const ir::OperandIndex &parent_ind = _subtensor_info_map.at(ind).parent(); --_parent_uses[parent_ind]; assert(_parent_uses[parent_ind] > 0); } @@ -525,7 +525,7 @@ void TemplTensorBuilder::postVisit(const model // See #5642 const auto &inputs = node.getInputs(); - std::pair peak; + std::pair peak; while ((peak = _uses_queue.front()).first == UsesType::LAST) { const auto &popped_idx = peak.second; @@ -585,18 +585,18 @@ void TemplTensorBuilder::validate(void) assert(_uses_queue.size() == 0); assert(_first_uses_num == 0); - assert(std::all_of( - _parent_def.begin(), _parent_def.end(), - [](std::pair it) { return it.second == 0; })); + assert( + std::all_of(_parent_def.begin(), _parent_def.end(), + [](std::pair it) { return it.second == 0; })); - assert(std::all_of( - _parent_uses.begin(), _parent_uses.end(), - [](std::pair it) { return it.second == 0; })); + assert( + std::all_of(_parent_uses.begin(), _parent_uses.end(), + [](std::pair it) { return it.second == 0; })); } template -model::OperandIndex -TemplTensorBuilder::findRootParent(model::OperandIndex ind) +ir::OperandIndex +TemplTensorBuilder::findRootParent(ir::OperandIndex ind) { if (_subtensor_info_map.find(ind) == _subtensor_info_map.end()) return ind; diff --git a/runtime/neurun/backend/acl_neon/Backend.h b/runtime/neurun/backend/acl_neon/Backend.h index 4c0c613..2fcf669 100644 --- a/runtime/neurun/backend/acl_neon/Backend.h +++ b/runtime/neurun/backend/acl_neon/Backend.h @@ -19,7 +19,7 @@ #include #include -#include +#include #include "Config.h" #include "ConstantInitializer.h" @@ -43,7 +43,7 @@ public: std::shared_ptr config() const override { return _config; } std::unique_ptr - newContext(const model::Operands &operands, + newContext(const ir::Operands &operands, const std::shared_ptr &) const override { auto tensor_builder = std::make_shared(createTensorManager()); diff --git a/runtime/neurun/backend/acl_neon/ConstantInitializer.cc b/runtime/neurun/backend/acl_neon/ConstantInitializer.cc index 33d12b4..a8605af 100644 --- a/runtime/neurun/backend/acl_neon/ConstantInitializer.cc +++ b/runtime/neurun/backend/acl_neon/ConstantInitializer.cc @@ -23,7 +23,7 @@ namespace backend namespace acl_neon { -ConstantInitializer::ConstantInitializer(const model::Operands &operands, +ConstantInitializer::ConstantInitializer(const ir::Operands &operands, const std::shared_ptr &tensor_builder) : _operands{operands}, _tensor_builder{tensor_builder} { @@ -37,8 +37,7 @@ void ConstantInitializer::visit(const model::operation::BatchToSpaceND &node) if (block_size_obj.isConstant()) { - _init_map[block_size_index] = [](const model::Operand &model_obj, - backend::operand::ITensor &obj) { + _init_map[block_size_index] = [](const ir::Operand &model_obj, backend::operand::ITensor &obj) { const auto &shape = model_obj.shape(); const auto base = reinterpret_cast(model_obj.data().base()); assert(model_obj.shape().rank() == 1); @@ -192,8 +191,7 @@ void ConstantInitializer::visit(const model::operation::SpaceToBatchND &node) if (block_size_obj.isConstant()) { - _init_map[block_size_index] = [](const model::Operand &model_obj, - backend::operand::ITensor &obj) { + _init_map[block_size_index] = [](const ir::Operand &model_obj, backend::operand::ITensor &obj) { const auto &shape = model_obj.shape(); const auto base = reinterpret_cast(model_obj.data().base()); assert(model_obj.shape().rank() == 1); @@ -213,8 +211,7 @@ void ConstantInitializer::visit(const model::operation::SpaceToBatchND &node) const auto &paddings_obj = _operands.at(paddings_index); if (paddings_obj.isConstant()) { - _init_map[paddings_index] = [](const model::Operand &model_obj, - backend::operand::ITensor &obj) { + _init_map[paddings_index] = [](const ir::Operand &model_obj, backend::operand::ITensor &obj) { const auto &shape = model_obj.shape(); const auto base = reinterpret_cast(model_obj.data().base()); assert(model_obj.shape().rank() == 2); diff --git a/runtime/neurun/backend/acl_neon/ConstantInitializer.h b/runtime/neurun/backend/acl_neon/ConstantInitializer.h index 6d04149..b676073 100644 --- a/runtime/neurun/backend/acl_neon/ConstantInitializer.h +++ b/runtime/neurun/backend/acl_neon/ConstantInitializer.h @@ -18,7 +18,7 @@ #define __NEURUN_COMPILER_ACL_NEON_CONSTANT_INITIALIZER_H__ #include -#include +#include #include "TensorBuilder.h" namespace neurun @@ -31,7 +31,7 @@ namespace acl_neon class ConstantInitializer : public IConstantInitializer { public: - ConstantInitializer(const model::Operands &operands, + ConstantInitializer(const ir::Operands &operands, const std::shared_ptr &tensor_builder); public: @@ -45,11 +45,11 @@ public: void visit(const model::operation::TransposeConv &) override; private: - const model::Operands &operands() const override { return _operands; } + const ir::Operands &operands() const override { return _operands; } std::shared_ptr tensor_builder() const override { return _tensor_builder; } private: - const model::Operands &_operands; + const ir::Operands &_operands; std::shared_ptr _tensor_builder; }; diff --git a/runtime/neurun/backend/acl_neon/KernelGenerator.cc b/runtime/neurun/backend/acl_neon/KernelGenerator.cc index 080a38e..81737aa 100644 --- a/runtime/neurun/backend/acl_neon/KernelGenerator.cc +++ b/runtime/neurun/backend/acl_neon/KernelGenerator.cc @@ -24,7 +24,7 @@ #include "kernel/ConcatLayer.h" #include "util/Padding.h" -#include "model/Index.h" +#include "ir/Index.h" #include "ir/DataType.h" #include "ir/InternalType.h" #include "compiler/IExecutionBuilder.h" @@ -142,7 +142,7 @@ void ActivationBuilder::append(ir::Activation act, ::arm_compute::ITensor *ifm_a // // KernelGenerator // -KernelGenerator::KernelGenerator(const neurun::model::Operands &ctx, +KernelGenerator::KernelGenerator(const ir::Operands &ctx, const std::shared_ptr &tensor_builder) : _ctx(ctx), _tensor_builder(tensor_builder), _current_subg_layout(ir::Layout::UNKNOWN) { @@ -519,7 +519,7 @@ void KernelGenerator::visit(const model::operation::Concat &node) { const auto ofm_index{node.getOutputs().at(0)}; - std::vector input_indexes; + std::vector input_indexes; for (const auto &input : node.getInputs()) input_indexes.emplace_back(input); @@ -1131,7 +1131,7 @@ void KernelGenerator::visit(const model::operation::Pack &node) const auto output_rank = _ctx.at(output_index).shape().rank(); - std::vector input_indexes; + std::vector input_indexes; for (const auto &input_index : node.getInputs()) input_indexes.emplace_back(input_index); @@ -1657,7 +1657,7 @@ void KernelGenerator::visit(const model::operation::Split &node) assert(node.param().num_splits == static_cast(node.getOutputs().size())); const auto ifm_rank = _ctx.at(ifm_index).shape().rank(); - std::vector output_indexes; + std::vector output_indexes; for (const auto &output : node.getOutputs()) output_indexes.emplace_back(output); @@ -1919,7 +1919,7 @@ void KernelGenerator::visit(const model::operation::Unpack &node) const auto input_rank = _ctx.at(input_index).shape().rank(); - std::vector output_indexes; + std::vector output_indexes; for (const auto &output_index : node.getOutputs()) output_indexes.emplace_back(output_index); diff --git a/runtime/neurun/backend/acl_neon/KernelGenerator.h b/runtime/neurun/backend/acl_neon/KernelGenerator.h index 4bc1d2d..a3ff937 100644 --- a/runtime/neurun/backend/acl_neon/KernelGenerator.h +++ b/runtime/neurun/backend/acl_neon/KernelGenerator.h @@ -19,7 +19,7 @@ #include -#include "model/Operands.h" +#include "ir/Operands.h" #include "TensorBuilder.h" namespace neurun @@ -32,8 +32,7 @@ namespace acl_neon class KernelGenerator : public IKernelGenerator { public: - KernelGenerator(const neurun::model::Operands &ctx, - const std::shared_ptr &tensor_builder); + KernelGenerator(const ir::Operands &ctx, const std::shared_ptr &tensor_builder); void visit(const model::Subgraph &) override; void visit(const model::operation::Abs &) override; @@ -97,7 +96,7 @@ public: void visit(const model::operation::Comparison &) override; private: - const neurun::model::Operands &_ctx; + const ir::Operands &_ctx; std::shared_ptr _tensor_builder; ir::Layout _current_subg_layout; }; diff --git a/runtime/neurun/backend/acl_neon/ShapeFixer.cc b/runtime/neurun/backend/acl_neon/ShapeFixer.cc index 54c95a7..dcbae4c 100644 --- a/runtime/neurun/backend/acl_neon/ShapeFixer.cc +++ b/runtime/neurun/backend/acl_neon/ShapeFixer.cc @@ -33,7 +33,7 @@ #include "kernel/ConcatLayer.h" #include "util/Padding.h" -#include "model/Index.h" +#include "ir/Index.h" #include "compiler/IExecutionBuilder.h" #include "exec/NopFunction.h" #include "util/logging.h" @@ -50,7 +50,7 @@ namespace acl_neon using ::neurun::backend::acl_common::asAclFunction; -ShapeFixer::ShapeFixer(const neurun::model::Operands &ctx, +ShapeFixer::ShapeFixer(const ir::Operands &ctx, const std::shared_ptr &tensor_builder) : _ctx(ctx), _tensor_builder(tensor_builder) { diff --git a/runtime/neurun/backend/acl_neon/ShapeFixer.h b/runtime/neurun/backend/acl_neon/ShapeFixer.h index b8d8547..28ef712 100644 --- a/runtime/neurun/backend/acl_neon/ShapeFixer.h +++ b/runtime/neurun/backend/acl_neon/ShapeFixer.h @@ -19,7 +19,7 @@ #include -#include "model/Operands.h" +#include "ir/Operands.h" #include "TensorBuilder.h" namespace neurun @@ -32,8 +32,7 @@ namespace acl_neon class ShapeFixer : public IShapeFixer { public: - ShapeFixer(const neurun::model::Operands &ctx, - const std::shared_ptr &tensor_builder); + ShapeFixer(const ir::Operands &ctx, const std::shared_ptr &tensor_builder); std::shared_ptr tensor_builder() override { return _tensor_builder; } @@ -98,7 +97,7 @@ public: void visit(const model::operation::Comparison &) override; private: - const neurun::model::Operands &_ctx; + const ir::Operands &_ctx; std::shared_ptr _tensor_builder; }; diff --git a/runtime/neurun/backend/acl_neon/TensorRegister.h b/runtime/neurun/backend/acl_neon/TensorRegister.h index 708beb4..115e05d 100644 --- a/runtime/neurun/backend/acl_neon/TensorRegister.h +++ b/runtime/neurun/backend/acl_neon/TensorRegister.h @@ -31,14 +31,13 @@ namespace acl_neon class TensorRegister : public acl_common::AclTensorRegister { public: - TensorRegister(const model::Operands &operands, - const std::shared_ptr &tensor_builder) + TensorRegister(const ir::Operands &operands, const std::shared_ptr &tensor_builder) : acl_common::AclTensorRegister{operands, tensor_builder} { // DO NOTHING } - void setUsesCount(const model::OperandIndex &ind, size_t num_uses) const override + void setUsesCount(const ir::OperandIndex &ind, size_t num_uses) const override { nnfw::misc::polymorphic_downcast(tensor_builder().get()) ->setUsesCount(ind, num_uses); diff --git a/runtime/neurun/backend/cpu/Backend.h b/runtime/neurun/backend/cpu/Backend.h index 252d6c7..e52a776 100644 --- a/runtime/neurun/backend/cpu/Backend.h +++ b/runtime/neurun/backend/cpu/Backend.h @@ -19,7 +19,7 @@ #include #include -#include +#include #include "Config.h" #include "ConstantInitializer.h" @@ -42,7 +42,7 @@ public: std::shared_ptr config() const override { return _config; } std::unique_ptr - newContext(const model::Operands &operands, + newContext(const ir::Operands &operands, const std::shared_ptr &kb) const override { auto tensor_builder = std::make_shared(); diff --git a/runtime/neurun/backend/cpu/ConstantInitializer.cc b/runtime/neurun/backend/cpu/ConstantInitializer.cc index 8d30ffe..60a4090 100644 --- a/runtime/neurun/backend/cpu/ConstantInitializer.cc +++ b/runtime/neurun/backend/cpu/ConstantInitializer.cc @@ -23,7 +23,7 @@ namespace backend namespace cpu { -ConstantInitializer::ConstantInitializer(const model::Operands &operands, +ConstantInitializer::ConstantInitializer(const ir::Operands &operands, const std::shared_ptr &tensor_builder) : _operands{operands}, _tensor_builder{tensor_builder} { diff --git a/runtime/neurun/backend/cpu/ConstantInitializer.h b/runtime/neurun/backend/cpu/ConstantInitializer.h index 9515dbf..c901958 100644 --- a/runtime/neurun/backend/cpu/ConstantInitializer.h +++ b/runtime/neurun/backend/cpu/ConstantInitializer.h @@ -18,7 +18,7 @@ #define __NEURUN_COMPILER_CPU_CONSTANT_INITIALIZER_H__ #include -#include +#include #include "TensorBuilder.h" namespace neurun @@ -31,7 +31,7 @@ namespace cpu class ConstantInitializer : public IConstantInitializer { public: - ConstantInitializer(const model::Operands &operands, + ConstantInitializer(const ir::Operands &operands, const std::shared_ptr &tensor_builder); public: @@ -40,11 +40,11 @@ public: void visit(const model::operation::FullyConnected &) override; private: - const model::Operands &operands() const override { return _operands; } + const ir::Operands &operands() const override { return _operands; } std::shared_ptr tensor_builder() const override { return _tensor_builder; } private: - const model::Operands &_operands; + const ir::Operands &_operands; std::shared_ptr _tensor_builder; }; diff --git a/runtime/neurun/backend/cpu/KernelGenerator.cc b/runtime/neurun/backend/cpu/KernelGenerator.cc index c7a1045..461c90d 100644 --- a/runtime/neurun/backend/cpu/KernelGenerator.cc +++ b/runtime/neurun/backend/cpu/KernelGenerator.cc @@ -52,8 +52,7 @@ namespace cpu { KernelGenerator::KernelGenerator( - const neurun::model::Operands &operand_ctx, - const std::shared_ptr &tensor_builder, + const ir::Operands &operand_ctx, const std::shared_ptr &tensor_builder, const std::shared_ptr &kernel_builer) : _ctx(operand_ctx), _tensor_builder(tensor_builder), _kernel_builder(kernel_builer), _current_subg_layout(ir::Layout::UNKNOWN) @@ -535,7 +534,7 @@ void KernelGenerator::visit(const model::operation::Permute &node) void KernelGenerator::visit(const model::operation::Custom &node) { - auto get_type_info = [this](const model::Operand &operand) -> custom::TypeInfo { + auto get_type_info = [this](const ir::Operand &operand) -> custom::TypeInfo { auto backendDescr = ::neurun::backend::cpu::kernel::getTensorDescriptor(operand, _current_subg_layout); @@ -548,7 +547,7 @@ void KernelGenerator::visit(const model::operation::Custom &node) return {shape, backendDescr.type}; }; - auto fill_op_info = [&](const model::OperandIndexSequence &opSeq, + auto fill_op_info = [&](const ir::OperandIndexSequence &opSeq, std::vector &types, std::vector &allocs) { for (auto &idx : opSeq) { diff --git a/runtime/neurun/backend/cpu/KernelGenerator.h b/runtime/neurun/backend/cpu/KernelGenerator.h index 711ebc5..0884e77 100644 --- a/runtime/neurun/backend/cpu/KernelGenerator.h +++ b/runtime/neurun/backend/cpu/KernelGenerator.h @@ -18,7 +18,7 @@ #define __NEURUN_BACKEND_CPU_KERNEL_GENERATOR_H__ #include "backend/IKernelGenerator.h" -#include "model/Operands.h" +#include "ir/Operands.h" #include "operand/Tensor.h" #include "backend/CustomKernelBuilder.h" #include "TensorBuilder.h" @@ -33,8 +33,7 @@ namespace cpu class KernelGenerator : public IKernelGenerator { public: - KernelGenerator(const neurun::model::Operands &ctx, - const std::shared_ptr &tensor_builder, + KernelGenerator(const ir::Operands &ctx, const std::shared_ptr &tensor_builder, const std::shared_ptr &kernel_builder); using IKernelGenerator::visit; @@ -59,7 +58,7 @@ public: void visit(const model::operation::Pad &); private: - const neurun::model::Operands &_ctx; + const ir::Operands &_ctx; std::shared_ptr _tensor_builder; std::shared_ptr _kernel_builder; ir::Layout _current_subg_layout; diff --git a/runtime/neurun/backend/cpu/MemoryManager.cc b/runtime/neurun/backend/cpu/MemoryManager.cc index e15c500..9775d4d 100644 --- a/runtime/neurun/backend/cpu/MemoryManager.cc +++ b/runtime/neurun/backend/cpu/MemoryManager.cc @@ -40,18 +40,18 @@ IMemoryPlanner *MemoryManager::createMemoryPlanner() return MemoryPlannerFactory::get().create(planner_id); } -void MemoryManager::buildTensor(const model::OperandIndex &ind, const model::OperandInfo &info) +void MemoryManager::buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &info) { auto tensor = std::make_shared(info); _tensors[ind] = tensor; } -void MemoryManager::claimPlan(const model::OperandIndex &ind, uint32_t size) +void MemoryManager::claimPlan(const ir::OperandIndex &ind, uint32_t size) { _mem_planner->claim(ind, size); } -void MemoryManager::releasePlan(const model::OperandIndex &ind) { _mem_planner->release(ind); } +void MemoryManager::releasePlan(const ir::OperandIndex &ind) { _mem_planner->release(ind); } void MemoryManager::allocate(void) { diff --git a/runtime/neurun/backend/cpu/MemoryManager.h b/runtime/neurun/backend/cpu/MemoryManager.h index 0fb8906..bf0f554 100644 --- a/runtime/neurun/backend/cpu/MemoryManager.h +++ b/runtime/neurun/backend/cpu/MemoryManager.h @@ -20,7 +20,7 @@ #include "backend/IMemoryManager.h" #include "MemoryPlanner.h" #include "operand/Tensor.h" -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" namespace neurun { @@ -38,18 +38,18 @@ public: void allocate(void) override; void deallocate(void) override { _mem_alloc->release(); } - void buildTensor(const model::OperandIndex &ind, const model::OperandInfo &info); - void claimPlan(const model::OperandIndex &ind, uint32_t size); - void releasePlan(const model::OperandIndex &ind); + void buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &info); + void claimPlan(const ir::OperandIndex &ind, uint32_t size); + void releasePlan(const ir::OperandIndex &ind); - model::OperandIndexMap> &tensors(void) { return _tensors; } + ir::OperandIndexMap> &tensors(void) { return _tensors; } private: IMemoryPlanner *createMemoryPlanner(); private: - model::OperandIndexMap> _tensors; - model::OperandIndexMap _tensor_mem_map; + ir::OperandIndexMap> _tensors; + ir::OperandIndexMap _tensor_mem_map; std::shared_ptr _mem_planner; std::shared_ptr _mem_alloc; }; diff --git a/runtime/neurun/backend/cpu/MemoryPlanner.cc b/runtime/neurun/backend/cpu/MemoryPlanner.cc index 8eaf7bb..0cee5fd 100644 --- a/runtime/neurun/backend/cpu/MemoryPlanner.cc +++ b/runtime/neurun/backend/cpu/MemoryPlanner.cc @@ -33,7 +33,7 @@ Allocator::Allocator(uint32_t capacity) VERBOSE(ALLOC) << "base pointer: " << static_cast(_base.get()) << std::endl; } -void BumpPlanner::claim(const model::OperandIndex &ind, size_t size) +void BumpPlanner::claim(const ir::OperandIndex &ind, size_t size) { assert(size != 0); @@ -45,7 +45,7 @@ void BumpPlanner::claim(const model::OperandIndex &ind, size_t size) << std::endl; } -void BumpPlanner::release(const model::OperandIndex &ind) +void BumpPlanner::release(const ir::OperandIndex &ind) { VERBOSE(BP_PLANNER) << "RELEASE(#" << ind.value() << "): " << "NOTHING does" << std::endl; @@ -54,7 +54,7 @@ void BumpPlanner::release(const model::OperandIndex &ind) // There are some assumptions for claiming memory(== making a reservation for memory). // 1. About _claim_table(std::map). // - The table's data structure is std::map so that it always sorts -// value(model::OperandIndex) by key(base_offset). +// value(OperandIndex) by key(base_offset). // - This claim() inserts key/value into _claim_table and the release() removes the key/value from // _claim_table. // - _claim_table shows the memory status at a certain point in time. Therefore, @@ -65,7 +65,7 @@ void BumpPlanner::release(const model::OperandIndex &ind) // point in time, it means the place at the offset can be claimed. // 2. In the loop for _claim_table, we can assume the current claim_base_offset value is bigger than // the previous claim_base_offset. -void FirstFitPlanner::claim(const model::OperandIndex &ind, size_t size) +void FirstFitPlanner::claim(const ir::OperandIndex &ind, size_t size) { assert(size != 0); @@ -98,7 +98,7 @@ void FirstFitPlanner::claim(const model::OperandIndex &ind, size_t size) } } -void FirstFitPlanner::release(const model::OperandIndex &ind) +void FirstFitPlanner::release(const ir::OperandIndex &ind) { for (auto it = _claim_table.cbegin(); it != _claim_table.cend(); ++it) { diff --git a/runtime/neurun/backend/cpu/MemoryPlanner.h b/runtime/neurun/backend/cpu/MemoryPlanner.h index eaa4299..fc7302b 100644 --- a/runtime/neurun/backend/cpu/MemoryPlanner.h +++ b/runtime/neurun/backend/cpu/MemoryPlanner.h @@ -25,7 +25,7 @@ #include #include -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" namespace neurun { @@ -66,19 +66,19 @@ private: */ struct IMemoryPlanner { - using MemoryPlans = model::OperandIndexMap; + using MemoryPlans = ir::OperandIndexMap; /** * @brief Claim memory for operand * @param[in] index The operand index * @param[in] size The size of the memory */ - virtual void claim(const model::OperandIndex &, size_t) = 0; + virtual void claim(const ir::OperandIndex &, size_t) = 0; /** * @brief Release memory for operand * @param[in] index The operand index */ - virtual void release(const model::OperandIndex &) = 0; + virtual void release(const ir::OperandIndex &) = 0; /** * @brief Get capacity for memory planning * @return The value of capacity @@ -104,12 +104,12 @@ public: * @param[in] index The operand index * @param[in] size The size of the memory */ - void claim(const model::OperandIndex &, size_t) override; + void claim(const ir::OperandIndex &, size_t) override; /** * @brief Release memory for operand by bump way * @param[in] index The operand index */ - void release(const model::OperandIndex &) override; + void release(const ir::OperandIndex &) override; /** * @brief Get capacity for memory planning * @return The value of capacity @@ -137,12 +137,12 @@ public: * @param[in] index The operand index * @param[in] size The size of the memory */ - void claim(const model::OperandIndex &, size_t) override; + void claim(const ir::OperandIndex &, size_t) override; /** * @brief Release memory for operand by firstfit way * @param[in] index The operand index */ - void release(const model::OperandIndex &) override; + void release(const ir::OperandIndex &) override; /** * @brief Get capacity for memory planning * @return The value of capacity @@ -158,7 +158,7 @@ private: uint32_t _capacity = 0; MemoryPlans _mem_plans; // Use std::map because claim() assumes that _claim_table is sorted by uint32_t(base_offset) - std::map _claim_table; + std::map _claim_table; }; } // namespace cpu diff --git a/runtime/neurun/backend/cpu/MemoryPlanner.test.cc b/runtime/neurun/backend/cpu/MemoryPlanner.test.cc index 39e0f0d..eee30cf 100644 --- a/runtime/neurun/backend/cpu/MemoryPlanner.test.cc +++ b/runtime/neurun/backend/cpu/MemoryPlanner.test.cc @@ -17,7 +17,7 @@ #include #include "MemoryPlanner.h" -#include "model/Index.h" +#include "ir/Index.h" TEST(Allocator, allocate_test) { @@ -30,7 +30,7 @@ TEST(BumpPlanner, claim_test) ::neurun::backend::cpu::BumpPlanner planner; auto claim = [&planner](uint32_t index, size_t size, uint32_t expected_offset) { - ::neurun::model::OperandIndex mem_idx(index); + neurun::ir::OperandIndex mem_idx(index); planner.claim(mem_idx, size); auto mem_blk = planner.memory_plans()[mem_idx]; ASSERT_EQ(mem_blk.offset, expected_offset); @@ -47,7 +47,7 @@ TEST(FirstFitPlanner, claim_release_test) ::neurun::backend::cpu::FirstFitPlanner planner; auto claim = [&planner](uint32_t index, size_t size, uint32_t expected_offset) { - ::neurun::model::OperandIndex mem_idx(index); + neurun::ir::OperandIndex mem_idx(index); planner.claim(mem_idx, size); auto mem_blk = planner.memory_plans()[mem_idx]; ASSERT_EQ(mem_blk.offset, expected_offset); @@ -55,7 +55,7 @@ TEST(FirstFitPlanner, claim_release_test) }; auto release = [&planner](uint32_t index) { - ::neurun::model::OperandIndex mem_idx(index); + neurun::ir::OperandIndex mem_idx(index); planner.release(mem_idx); }; diff --git a/runtime/neurun/backend/cpu/ShapeFixer.cc b/runtime/neurun/backend/cpu/ShapeFixer.cc index 5ec7ebd..22120f3 100644 --- a/runtime/neurun/backend/cpu/ShapeFixer.cc +++ b/runtime/neurun/backend/cpu/ShapeFixer.cc @@ -50,7 +50,7 @@ namespace backend namespace cpu { -ShapeFixer::ShapeFixer(const neurun::model::Operands &operand_ctx, +ShapeFixer::ShapeFixer(const ir::Operands &operand_ctx, const std::shared_ptr &tensor_builder) : _ctx(operand_ctx), _tensor_builder(tensor_builder) { diff --git a/runtime/neurun/backend/cpu/ShapeFixer.h b/runtime/neurun/backend/cpu/ShapeFixer.h index 6788f41..ca2355c 100644 --- a/runtime/neurun/backend/cpu/ShapeFixer.h +++ b/runtime/neurun/backend/cpu/ShapeFixer.h @@ -19,7 +19,7 @@ #include -#include "model/Operands.h" +#include "ir/Operands.h" #include "operand/Tensor.h" #include "TensorBuilder.h" @@ -33,8 +33,7 @@ namespace cpu class ShapeFixer : public IShapeFixer { public: - ShapeFixer(const neurun::model::Operands &ctx, - const std::shared_ptr &tensor_builder); + ShapeFixer(const ir::Operands &ctx, const std::shared_ptr &tensor_builder); std::shared_ptr tensor_builder() override { return _tensor_builder; } @@ -57,7 +56,7 @@ public: void visit(const model::operation::Pad &); private: - const neurun::model::Operands &_ctx; + const ir::Operands &_ctx; std::shared_ptr _tensor_builder; }; diff --git a/runtime/neurun/backend/cpu/TensorBuilder.cc b/runtime/neurun/backend/cpu/TensorBuilder.cc index 5484cb3..2c654c2 100644 --- a/runtime/neurun/backend/cpu/TensorBuilder.cc +++ b/runtime/neurun/backend/cpu/TensorBuilder.cc @@ -32,8 +32,8 @@ TensorBuilder::TensorBuilder() : _tensor_mgr{new TensorManager()} // DO NOTHING } -void TensorBuilder::registerTensorInfo(const model::OperandIndex &ind, - const model::OperandInfo &info, ir::Layout, bool as_const) +void TensorBuilder::registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info, + ir::Layout, bool as_const) { _tensor_info_map.emplace(ind, info); @@ -41,14 +41,13 @@ void TensorBuilder::registerTensorInfo(const model::OperandIndex &ind, _constants.append(ind); } -void TensorBuilder::registerSubTensorInfo(const model::OperandIndex &, - const compiler::SubTensorInfo &) +void TensorBuilder::registerSubTensorInfo(const ir::OperandIndex &, const compiler::SubTensorInfo &) { // Not supported yet assert(false); } -void TensorBuilder::notifyFirstUse(const model::OperandIndex &ind) +void TensorBuilder::notifyFirstUse(const ir::OperandIndex &ind) { assert(_tensor_info_map.find(ind) != _tensor_info_map.end()); const auto tensor_info = _tensor_info_map.at(ind); @@ -57,9 +56,9 @@ void TensorBuilder::notifyFirstUse(const model::OperandIndex &ind) _tensor_mgr->claimPlan(ind, size); } -void TensorBuilder::notifyLastUse(const model::OperandIndex &ind) { _tensor_mgr->releasePlan(ind); } +void TensorBuilder::notifyLastUse(const ir::OperandIndex &ind) { _tensor_mgr->releasePlan(ind); } -bool TensorBuilder::isRegistered(const model::OperandIndex &ind) const +bool TensorBuilder::isRegistered(const ir::OperandIndex &ind) const { return _tensor_info_map.find(ind) != _tensor_info_map.end(); } @@ -83,14 +82,14 @@ void TensorBuilder::allocateNonconsts() } std::shared_ptr<::neurun::backend::operand::ITensor> -TensorBuilder::tensorAt(const model::OperandIndex &ind) +TensorBuilder::tensorAt(const ir::OperandIndex &ind) { return _tensor_mgr->at(ind); } void TensorBuilder::iterate(const IterateFunction &fn) { _tensor_mgr->iterate(fn); } -std::shared_ptr TensorBuilder::at(const ::neurun::model::OperandIndex &ind) +std::shared_ptr TensorBuilder::at(const ir::OperandIndex &ind) { return _tensor_mgr->at(ind); } diff --git a/runtime/neurun/backend/cpu/TensorBuilder.h b/runtime/neurun/backend/cpu/TensorBuilder.h index 0ec1eab..a8e2fbf 100644 --- a/runtime/neurun/backend/cpu/TensorBuilder.h +++ b/runtime/neurun/backend/cpu/TensorBuilder.h @@ -21,7 +21,7 @@ #include #include "operand/Tensor.h" -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" #include "TensorManager.h" namespace neurun @@ -42,20 +42,20 @@ public: * @param[in] info Operand information * @param[in] layout Operand data layout */ - void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info, + void registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info, ir::Layout backend_layout, bool as_const) override; /** * @brief Register subtensor information to allocate on CPU backend * @param[in] ind Operand index * @param[in] info Tensor information */ - void registerSubTensorInfo(const model::OperandIndex &ind, + void registerSubTensorInfo(const ir::OperandIndex &ind, const compiler::SubTensorInfo &info) override; - void notifyFirstUse(const model::OperandIndex &) override; - void notifyLastUse(const model::OperandIndex &) override; + void notifyFirstUse(const ir::OperandIndex &) override; + void notifyLastUse(const ir::OperandIndex &) override; - bool isRegistered(const model::OperandIndex &) const override; + bool isRegistered(const ir::OperandIndex &) const override; void prepare(void) override; void allocateConsts() override; @@ -64,7 +64,7 @@ public: void finalize() override { /* DO NOTHING */} std::shared_ptr<::neurun::backend::operand::ITensor> - tensorAt(const model::OperandIndex &ind) override; + tensorAt(const ir::OperandIndex &ind) override; void iterate(const IterateFunction &fn) override; @@ -73,12 +73,12 @@ public: std::unique_ptr releaseTensorManager(void) override; - std::shared_ptr at(const ::neurun::model::OperandIndex &ind); + std::shared_ptr at(const ir::OperandIndex &ind); private: std::unique_ptr _tensor_mgr; - model::OperandIndexMap _tensor_info_map; - model::OperandIndexSequence _constants; + ir::OperandIndexMap _tensor_info_map; + ir::OperandIndexSequence _constants; }; } // namespace cpu diff --git a/runtime/neurun/backend/cpu/TensorManager.cc b/runtime/neurun/backend/cpu/TensorManager.cc index 90751c7..912b6ca 100644 --- a/runtime/neurun/backend/cpu/TensorManager.cc +++ b/runtime/neurun/backend/cpu/TensorManager.cc @@ -36,8 +36,8 @@ void TensorManager::deallocateConsts(void) { _const_mgr->deallocate(); } void TensorManager::deallocateNonconsts(void) { _nonconst_mgr->deallocate(); } -void TensorManager::buildTensor(const model::OperandIndex &ind, - const model::OperandInfo &tensor_info, bool as_const) +void TensorManager::buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &tensor_info, + bool as_const) { assert(_ind_to_mgr.find(ind) == _ind_to_mgr.end()); if (as_const) @@ -52,35 +52,35 @@ void TensorManager::buildTensor(const model::OperandIndex &ind, } } -void TensorManager::claimPlan(const model::OperandIndex &ind, uint32_t size) +void TensorManager::claimPlan(const ir::OperandIndex &ind, uint32_t size) { assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); _ind_to_mgr.at(ind).claimPlan(ind, size); } -void TensorManager::releasePlan(const model::OperandIndex &ind) +void TensorManager::releasePlan(const ir::OperandIndex &ind) { assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); _ind_to_mgr.at(ind).releasePlan(ind); } -std::shared_ptr TensorManager::at(const ::neurun::model::OperandIndex &ind) +std::shared_ptr TensorManager::at(const ir::OperandIndex &ind) { assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); return _ind_to_mgr.at(ind).tensors().at(ind); } -model::OperandIndexMap> &TensorManager::constTensors(void) +ir::OperandIndexMap> &TensorManager::constTensors(void) { return _const_mgr->tensors(); } -model::OperandIndexMap> &TensorManager::nonconstTensors(void) +ir::OperandIndexMap> &TensorManager::nonconstTensors(void) { return _nonconst_mgr->tensors(); } -void TensorManager::iterate(const std::function &fn) +void TensorManager::iterate(const std::function &fn) { for (auto it : _nonconst_mgr->tensors()) fn(it.first); diff --git a/runtime/neurun/backend/cpu/TensorManager.h b/runtime/neurun/backend/cpu/TensorManager.h index 22cf446..c3ef706 100644 --- a/runtime/neurun/backend/cpu/TensorManager.h +++ b/runtime/neurun/backend/cpu/TensorManager.h @@ -19,7 +19,7 @@ #include "backend/ITensorManager.h" #include "MemoryManager.h" -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" namespace neurun { @@ -39,23 +39,22 @@ public: void deallocateConsts(void) override; void deallocateNonconsts(void) override; - void buildTensor(const model::OperandIndex &ind, const model::OperandInfo &tensor_info, - bool as_const); + void buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &tensor_info, bool as_const); - void claimPlan(const model::OperandIndex &ind, uint32_t size); - void releasePlan(const model::OperandIndex &ind); + void claimPlan(const ir::OperandIndex &ind, uint32_t size); + void releasePlan(const ir::OperandIndex &ind); - std::shared_ptr at(const ::neurun::model::OperandIndex &ind); + std::shared_ptr at(const ir::OperandIndex &ind); - model::OperandIndexMap> &constTensors(void); - model::OperandIndexMap> &nonconstTensors(void); + ir::OperandIndexMap> &constTensors(void); + ir::OperandIndexMap> &nonconstTensors(void); - void iterate(const std::function &fn); + void iterate(const std::function &fn); private: std::unique_ptr _const_mgr; std::unique_ptr _nonconst_mgr; - model::OperandIndexMap _ind_to_mgr; + ir::OperandIndexMap _ind_to_mgr; }; } // namespace cpu diff --git a/runtime/neurun/backend/cpu/TensorRegister.cc b/runtime/neurun/backend/cpu/TensorRegister.cc index 0f0a204..2701503 100644 --- a/runtime/neurun/backend/cpu/TensorRegister.cc +++ b/runtime/neurun/backend/cpu/TensorRegister.cc @@ -23,7 +23,7 @@ namespace backend namespace cpu { -TensorRegister::TensorRegister(const model::Operands &operands, +TensorRegister::TensorRegister(const ir::Operands &operands, const std::shared_ptr &tensor_builder) : _operands{operands}, _tensor_builder{tensor_builder} { diff --git a/runtime/neurun/backend/cpu/TensorRegister.h b/runtime/neurun/backend/cpu/TensorRegister.h index b302fd3..1bda9fc 100644 --- a/runtime/neurun/backend/cpu/TensorRegister.h +++ b/runtime/neurun/backend/cpu/TensorRegister.h @@ -30,16 +30,16 @@ namespace cpu class TensorRegister : public ITensorRegister { public: - TensorRegister(const model::Operands &operands, + TensorRegister(const ir::Operands &operands, const std::shared_ptr &tensor_builder); private: - const model::Operands &operands() const override { return _operands; } + const ir::Operands &operands() const override { return _operands; } std::shared_ptr tensor_builder() const override { return _tensor_builder; } bool supportSubTensor() const final { return false; } private: - const model::Operands &_operands; + const ir::Operands &_operands; const std::shared_ptr _tensor_builder; }; diff --git a/runtime/neurun/backend/cpu/kernel/OperationUtils.cc b/runtime/neurun/backend/cpu/kernel/OperationUtils.cc index dbcbac2..01215b8 100644 --- a/runtime/neurun/backend/cpu/kernel/OperationUtils.cc +++ b/runtime/neurun/backend/cpu/kernel/OperationUtils.cc @@ -191,7 +191,7 @@ int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift) return static_cast(std::floor(max_input_rescaled)); } -TensorDescriptor getTensorDescriptor(const ::neurun::model::Operand &o, ir::Layout frontend_layout) +TensorDescriptor getTensorDescriptor(const ir::Operand &o, ir::Layout frontend_layout) { TensorDescriptor descriptor; diff --git a/runtime/neurun/backend/cpu/kernel/OperationUtils.h b/runtime/neurun/backend/cpu/kernel/OperationUtils.h index f8ab905..4bd2b32 100644 --- a/runtime/neurun/backend/cpu/kernel/OperationUtils.h +++ b/runtime/neurun/backend/cpu/kernel/OperationUtils.h @@ -23,7 +23,7 @@ #include -#include "model/Operand.h" +#include "ir/Operand.h" #include "ir/DataType.h" #include @@ -138,7 +138,7 @@ void CalculateActivationRangeUint8(ir::Activation activation, const TensorDescri int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift); -TensorDescriptor getTensorDescriptor(const ::neurun::model::Operand &o, ir::Layout frontend_layout); +TensorDescriptor getTensorDescriptor(const ir::Operand &o, ir::Layout frontend_layout); uint32_t sizeOfData(OperandType type, const std::vector &dimensions); diff --git a/runtime/neurun/backend/cpu/operand/Tensor.h b/runtime/neurun/backend/cpu/operand/Tensor.h index ef0579f..dec6808 100644 --- a/runtime/neurun/backend/cpu/operand/Tensor.h +++ b/runtime/neurun/backend/cpu/operand/Tensor.h @@ -18,7 +18,7 @@ #define __NEURUN_BACKEND_CPU_OPERAND_TENSOR_H__ #include -#include "model/OperandInfo.h" +#include "ir/OperandInfo.h" namespace neurun { @@ -35,7 +35,7 @@ public: Tensor() = delete; public: - Tensor(const model::OperandInfo &info) : _info(info) + Tensor(const ir::OperandInfo &info) : _info(info) { // DO NOTHING } @@ -65,7 +65,7 @@ public: void access(const std::function &fn) final; private: - model::OperandInfo _info; + ir::OperandInfo _info; uint8_t *_buffer = nullptr; }; diff --git a/runtime/neurun/backend/hi_perf_cpu/KernelGenerator.h b/runtime/neurun/backend/hi_perf_cpu/KernelGenerator.h index 71322e7..3197995 100644 --- a/runtime/neurun/backend/hi_perf_cpu/KernelGenerator.h +++ b/runtime/neurun/backend/hi_perf_cpu/KernelGenerator.h @@ -19,7 +19,7 @@ #include -#include "model/Operands.h" +#include "ir/Operands.h" #include "TensorBuilder.h" namespace neurun @@ -32,12 +32,11 @@ namespace hi_perf_cpu class KernelGenerator : public IKernelGenerator { public: - KernelGenerator(const neurun::model::Operands &ctx, - const std::shared_ptr &tensor_builder); + KernelGenerator(const Operands &ctx, const std::shared_ptr &tensor_builder); // TODO add more ops private: - const neurun::model::Operands &_ctx; + const Operands &_ctx; std::shared_ptr _tensor_builder; }; diff --git a/runtime/neurun/backend/hi_perf_cpu/TensorBuilder.h b/runtime/neurun/backend/hi_perf_cpu/TensorBuilder.h index 500730d..af879a4 100644 --- a/runtime/neurun/backend/hi_perf_cpu/TensorBuilder.h +++ b/runtime/neurun/backend/hi_perf_cpu/TensorBuilder.h @@ -20,7 +20,7 @@ #include #include -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" namespace neurun { diff --git a/runtime/neurun/backend/srcn/Backend.h b/runtime/neurun/backend/srcn/Backend.h index 5426919..bc76a7e 100644 --- a/runtime/neurun/backend/srcn/Backend.h +++ b/runtime/neurun/backend/srcn/Backend.h @@ -19,7 +19,7 @@ #include #include -#include +#include #include "Config.h" #include "ConstantInitializer.h" @@ -42,7 +42,7 @@ public: std::shared_ptr config() const override { return _config; } std::unique_ptr - newContext(const model::Operands &operands, + newContext(const ir::Operands &operands, const std::shared_ptr &kb) const override { auto tensor_builder = std::make_shared(); diff --git a/runtime/neurun/backend/srcn/ConstantInitializer.cc b/runtime/neurun/backend/srcn/ConstantInitializer.cc index 3260d54..f152659 100644 --- a/runtime/neurun/backend/srcn/ConstantInitializer.cc +++ b/runtime/neurun/backend/srcn/ConstantInitializer.cc @@ -22,7 +22,7 @@ namespace { template -static void PermuteKernel(const neurun::model::Operand &model_obj, +static void PermuteKernel(const neurun::ir::Operand &model_obj, neurun::backend::operand::ITensor &obj, const std::vector &permutation) { @@ -81,15 +81,15 @@ namespace backend namespace srcn { -ConstantInitializer::ConstantInitializer(const model::Operands &operands, +ConstantInitializer::ConstantInitializer(const ir::Operands &operands, const std::shared_ptr &tensor_builder) : _operands{operands}, _tensor_builder{tensor_builder} { // DO NOTHING } -void ConstantInitializer::registerPermuteKernelInitializer(const model::OperandIndex &index, - const model::Operand &obj, +void ConstantInitializer::registerPermuteKernelInitializer(const ir::OperandIndex &index, + const ir::Operand &obj, const std::vector &permutation) { // For only CONSTANTS diff --git a/runtime/neurun/backend/srcn/ConstantInitializer.h b/runtime/neurun/backend/srcn/ConstantInitializer.h index 18dfc6c..ed75b0c 100644 --- a/runtime/neurun/backend/srcn/ConstantInitializer.h +++ b/runtime/neurun/backend/srcn/ConstantInitializer.h @@ -18,7 +18,7 @@ #define __NEURUN_COMPILER_SRCN_CONSTANT_INITIALIZER_H__ #include -#include +#include #include "TensorBuilder.h" #include @@ -32,11 +32,11 @@ namespace srcn class ConstantInitializer : public IConstantInitializer { public: - ConstantInitializer(const model::Operands &operands, + ConstantInitializer(const ir::Operands &operands, const std::shared_ptr &tensor_builder); public: - void registerPermuteKernelInitializer(const model::OperandIndex &index, const model::Operand &obj, + void registerPermuteKernelInitializer(const ir::OperandIndex &index, const ir::Operand &obj, const std::vector &permutation); public: @@ -45,11 +45,11 @@ public: void visit(const model::operation::TransposeConv &) override; private: - const model::Operands &operands() const override { return _operands; } + const ir::Operands &operands() const override { return _operands; } std::shared_ptr tensor_builder() const override { return _tensor_builder; } private: - const model::Operands &_operands; + const ir::Operands &_operands; std::shared_ptr _tensor_builder; }; diff --git a/runtime/neurun/backend/srcn/Convert.cc b/runtime/neurun/backend/srcn/Convert.cc index 46b11ca..1d80b2c 100644 --- a/runtime/neurun/backend/srcn/Convert.cc +++ b/runtime/neurun/backend/srcn/Convert.cc @@ -62,10 +62,10 @@ ir::Shape asTensorShape(const ir::Shape &shape, ir::Layout frontend_layout, return ret; } -model::OperandInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo, - ir::Layout frontend_layout, ir::Layout backend_layout) +ir::OperandInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo, + ir::Layout frontend_layout, ir::Layout backend_layout) { - model::OperandInfo info(asTensorShape(shape, frontend_layout, backend_layout), typeInfo); + ir::OperandInfo info(asTensorShape(shape, frontend_layout, backend_layout), typeInfo); return info; } diff --git a/runtime/neurun/backend/srcn/Convert.h b/runtime/neurun/backend/srcn/Convert.h index 3268da2..64be46e 100644 --- a/runtime/neurun/backend/srcn/Convert.h +++ b/runtime/neurun/backend/srcn/Convert.h @@ -21,7 +21,7 @@ #include #include #include -#include +#include namespace neurun { @@ -36,8 +36,8 @@ ir::Shape asKernelShape(const ir::Shape &shape, kernel::FilterLayout frontend_la ir::Shape asTensorShape(const ir::Shape &shape, ir::Layout frontend_layout, ir::Layout backend_layout); -model::OperandInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo, - ir::Layout frontend_layout, ir::Layout backend_layout); +ir::OperandInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo, + ir::Layout frontend_layout, ir::Layout backend_layout); } // namespace srcn } // namespace backend diff --git a/runtime/neurun/backend/srcn/KernelGenerator.cc b/runtime/neurun/backend/srcn/KernelGenerator.cc index d3f86d4..5b30fc8 100644 --- a/runtime/neurun/backend/srcn/KernelGenerator.cc +++ b/runtime/neurun/backend/srcn/KernelGenerator.cc @@ -40,7 +40,7 @@ namespace backend namespace srcn { -KernelGenerator::KernelGenerator(const neurun::model::Operands &operand_ctx, +KernelGenerator::KernelGenerator(const ir::Operands &operand_ctx, const std::shared_ptr &tensor_builder, const std::shared_ptr &kb) : _ctx(operand_ctx), _tensor_builder(tensor_builder), _kernel_builder(kb), diff --git a/runtime/neurun/backend/srcn/KernelGenerator.h b/runtime/neurun/backend/srcn/KernelGenerator.h index 34b44a5..aabecae 100644 --- a/runtime/neurun/backend/srcn/KernelGenerator.h +++ b/runtime/neurun/backend/srcn/KernelGenerator.h @@ -18,7 +18,7 @@ #define __NEURUN_BACKEND_SRCN_KERNEL_GENERATOR_H__ #include "backend/IKernelGenerator.h" -#include "model/Operands.h" +#include "ir/Operands.h" #include "operand/Tensor.h" #include "backend/CustomKernelBuilder.h" #include "TensorBuilder.h" @@ -33,8 +33,7 @@ namespace srcn class KernelGenerator : public IKernelGenerator { public: - KernelGenerator(const neurun::model::Operands &ctx, - const std::shared_ptr &tensor_builder, + KernelGenerator(const ir::Operands &ctx, const std::shared_ptr &tensor_builder, const std::shared_ptr &kb); using IKernelGenerator::visit; @@ -47,7 +46,7 @@ public: void visit(const model::operation::Add &) override; private: - const neurun::model::Operands &_ctx; + const ir::Operands &_ctx; std::shared_ptr _tensor_builder; std::shared_ptr _kernel_builder; ir::Layout _current_subg_layout; diff --git a/runtime/neurun/backend/srcn/MemoryManager.cc b/runtime/neurun/backend/srcn/MemoryManager.cc index 7871f29..4fae8c4 100644 --- a/runtime/neurun/backend/srcn/MemoryManager.cc +++ b/runtime/neurun/backend/srcn/MemoryManager.cc @@ -40,19 +40,19 @@ IMemoryPlanner *MemoryManager::createMemoryPlanner() return MemoryPlannerFactory::get().create(planner_id); } -void MemoryManager::buildTensor(const model::OperandIndex &ind, const model::OperandInfo &info, +void MemoryManager::buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &info, ir::Layout layout) { auto tensor = std::make_shared(info, layout); _tensors[ind] = tensor; } -void MemoryManager::claimPlan(const model::OperandIndex &ind, uint32_t size) +void MemoryManager::claimPlan(const ir::OperandIndex &ind, uint32_t size) { _mem_planner->claim(ind, size); } -void MemoryManager::releasePlan(const model::OperandIndex &ind) { _mem_planner->release(ind); } +void MemoryManager::releasePlan(const ir::OperandIndex &ind) { _mem_planner->release(ind); } void MemoryManager::allocate(void) { diff --git a/runtime/neurun/backend/srcn/MemoryManager.h b/runtime/neurun/backend/srcn/MemoryManager.h index a6bd7e0..73d9d49 100644 --- a/runtime/neurun/backend/srcn/MemoryManager.h +++ b/runtime/neurun/backend/srcn/MemoryManager.h @@ -20,7 +20,7 @@ #include "backend/IMemoryManager.h" #include "MemoryPlanner.h" #include "operand/Tensor.h" -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" namespace neurun { @@ -38,19 +38,18 @@ public: void allocate(void) override; void deallocate(void) override { _mem_alloc->release(); } - void buildTensor(const model::OperandIndex &ind, const model::OperandInfo &info, - ir::Layout layout); - void claimPlan(const model::OperandIndex &ind, uint32_t size); - void releasePlan(const model::OperandIndex &ind); + void buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &info, ir::Layout layout); + void claimPlan(const ir::OperandIndex &ind, uint32_t size); + void releasePlan(const ir::OperandIndex &ind); - model::OperandIndexMap> &tensors(void) { return _tensors; } + ir::OperandIndexMap> &tensors(void) { return _tensors; } private: IMemoryPlanner *createMemoryPlanner(); private: - model::OperandIndexMap> _tensors; - model::OperandIndexMap _tensor_mem_map; + ir::OperandIndexMap> _tensors; + ir::OperandIndexMap _tensor_mem_map; std::shared_ptr _mem_planner; std::shared_ptr _mem_alloc; }; diff --git a/runtime/neurun/backend/srcn/MemoryPlanner.cc b/runtime/neurun/backend/srcn/MemoryPlanner.cc index 96ce27b..f5063e0 100644 --- a/runtime/neurun/backend/srcn/MemoryPlanner.cc +++ b/runtime/neurun/backend/srcn/MemoryPlanner.cc @@ -33,7 +33,7 @@ Allocator::Allocator(uint32_t capacity) VERBOSE(ALLOC) << "base pointer: " << static_cast(_base.get()) << std::endl; } -void BumpPlanner::claim(const model::OperandIndex &ind, size_t size) +void BumpPlanner::claim(const ir::OperandIndex &ind, size_t size) { assert(size != 0); @@ -45,7 +45,7 @@ void BumpPlanner::claim(const model::OperandIndex &ind, size_t size) << std::endl; } -void BumpPlanner::release(const model::OperandIndex &ind) +void BumpPlanner::release(const ir::OperandIndex &ind) { VERBOSE(BP_PLANNER) << "RELEASE(#" << ind.value() << "): " << "NOTHING does" << std::endl; @@ -54,7 +54,7 @@ void BumpPlanner::release(const model::OperandIndex &ind) // There are some assumptions for claiming memory(== making a reservation for memory). // 1. About _claim_table(std::map). // - The table's data structure is std::map so that it always sorts -// value(model::OperandIndex) by key(base_offset). +// value(OperandIndex) by key(base_offset). // - This claim() inserts key/value into _claim_table and the release() removes the key/value from // _claim_table. // - _claim_table shows the memory status at a certain point in time. Therefore, @@ -65,7 +65,7 @@ void BumpPlanner::release(const model::OperandIndex &ind) // point in time, it means the place at the offset can be claimed. // 2. In the loop for _claim_table, we can assume the current claim_base_offset value is bigger than // the previous claim_base_offset. -void FirstFitPlanner::claim(const model::OperandIndex &ind, size_t size) +void FirstFitPlanner::claim(const ir::OperandIndex &ind, size_t size) { assert(size != 0); @@ -98,7 +98,7 @@ void FirstFitPlanner::claim(const model::OperandIndex &ind, size_t size) } } -void FirstFitPlanner::release(const model::OperandIndex &ind) +void FirstFitPlanner::release(const ir::OperandIndex &ind) { for (auto it = _claim_table.cbegin(); it != _claim_table.cend(); ++it) { diff --git a/runtime/neurun/backend/srcn/MemoryPlanner.h b/runtime/neurun/backend/srcn/MemoryPlanner.h index c66efec..a8b41a3 100644 --- a/runtime/neurun/backend/srcn/MemoryPlanner.h +++ b/runtime/neurun/backend/srcn/MemoryPlanner.h @@ -25,7 +25,7 @@ #include #include -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" namespace neurun { @@ -66,19 +66,19 @@ private: */ struct IMemoryPlanner { - using MemoryPlans = model::OperandIndexMap; + using MemoryPlans = ir::OperandIndexMap; /** * @brief Claim memory for operand * @param[in] index The operand index * @param[in] size The size of the memory */ - virtual void claim(const model::OperandIndex &, size_t) = 0; + virtual void claim(const ir::OperandIndex &, size_t) = 0; /** * @brief Release memory for operand * @param[in] index The operand index */ - virtual void release(const model::OperandIndex &) = 0; + virtual void release(const ir::OperandIndex &) = 0; /** * @brief Get capacity for memory planning * @return The value of capacity @@ -104,12 +104,12 @@ public: * @param[in] index The operand index * @param[in] size The size of the memory */ - void claim(const model::OperandIndex &, size_t) override; + void claim(const ir::OperandIndex &, size_t) override; /** * @brief Release memory for operand by bump way * @param[in] index The operand index */ - void release(const model::OperandIndex &) override; + void release(const ir::OperandIndex &) override; /** * @brief Get capacity for memory planning * @return The value of capacity @@ -137,12 +137,12 @@ public: * @param[in] index The operand index * @param[in] size The size of the memory */ - void claim(const model::OperandIndex &, size_t) override; + void claim(const ir::OperandIndex &, size_t) override; /** * @brief Release memory for operand by firstfit way * @param[in] index The operand index */ - void release(const model::OperandIndex &) override; + void release(const ir::OperandIndex &) override; /** * @brief Get capacity for memory planning * @return The value of capacity @@ -158,7 +158,7 @@ private: uint32_t _capacity = 0; MemoryPlans _mem_plans; // Use std::map because claim() assumes that _claim_table is sorted by uint32_t(base_offset) - std::map _claim_table; + std::map _claim_table; }; } // namespace srcn diff --git a/runtime/neurun/backend/srcn/ShapeFixer.cc b/runtime/neurun/backend/srcn/ShapeFixer.cc index 5dbd9fd..5251478 100644 --- a/runtime/neurun/backend/srcn/ShapeFixer.cc +++ b/runtime/neurun/backend/srcn/ShapeFixer.cc @@ -25,7 +25,7 @@ namespace backend namespace srcn { -ShapeFixer::ShapeFixer(const neurun::model::Operands &operand_ctx, +ShapeFixer::ShapeFixer(const ir::Operands &operand_ctx, const std::shared_ptr &tensor_builder) : _ctx(operand_ctx), _tensor_builder(tensor_builder) { diff --git a/runtime/neurun/backend/srcn/ShapeFixer.h b/runtime/neurun/backend/srcn/ShapeFixer.h index 5403fbb..ec9078e 100644 --- a/runtime/neurun/backend/srcn/ShapeFixer.h +++ b/runtime/neurun/backend/srcn/ShapeFixer.h @@ -19,7 +19,7 @@ #include -#include "model/Operands.h" +#include "ir/Operands.h" #include "operand/Tensor.h" #include "TensorBuilder.h" @@ -33,8 +33,7 @@ namespace srcn class ShapeFixer : public IShapeFixer { public: - ShapeFixer(const neurun::model::Operands &ctx, - const std::shared_ptr &tensor_builder); + ShapeFixer(const ir::Operands &ctx, const std::shared_ptr &tensor_builder); std::shared_ptr tensor_builder() override { return _tensor_builder; } @@ -45,7 +44,7 @@ public: void visit(const model::operation::Add &) override; private: - const neurun::model::Operands &_ctx; + const ir::Operands &_ctx; std::shared_ptr _tensor_builder; }; diff --git a/runtime/neurun/backend/srcn/TensorBuilder.cc b/runtime/neurun/backend/srcn/TensorBuilder.cc index bbf59ed..5ac25c3 100644 --- a/runtime/neurun/backend/srcn/TensorBuilder.cc +++ b/runtime/neurun/backend/srcn/TensorBuilder.cc @@ -32,8 +32,8 @@ TensorBuilder::TensorBuilder() : _tensor_mgr{new TensorManager()} // DO NOTHING } -void TensorBuilder::registerTensorInfo(const model::OperandIndex &ind, - const model::OperandInfo &tensor_info, +void TensorBuilder::registerTensorInfo(const ir::OperandIndex &ind, + const ir::OperandInfo &tensor_info, ir::Layout backend_layout, bool as_const) { _tensor_info_map.emplace(ind, tensor_info); @@ -43,14 +43,13 @@ void TensorBuilder::registerTensorInfo(const model::OperandIndex &ind, _constants.append(ind); } -void TensorBuilder::registerSubTensorInfo(const model::OperandIndex &, - const compiler::SubTensorInfo &) +void TensorBuilder::registerSubTensorInfo(const ir::OperandIndex &, const compiler::SubTensorInfo &) { // Not supported yet assert(false); } -void TensorBuilder::notifyFirstUse(const model::OperandIndex &ind) +void TensorBuilder::notifyFirstUse(const ir::OperandIndex &ind) { assert(_tensor_info_map.find(ind) != _tensor_info_map.end()); const auto &tensor_info = _tensor_info_map.at(ind); @@ -60,9 +59,9 @@ void TensorBuilder::notifyFirstUse(const model::OperandIndex &ind) _tensor_mgr->claimPlan(ind, size); } -void TensorBuilder::notifyLastUse(const model::OperandIndex &ind) { _tensor_mgr->releasePlan(ind); } +void TensorBuilder::notifyLastUse(const ir::OperandIndex &ind) { _tensor_mgr->releasePlan(ind); } -bool TensorBuilder::isRegistered(const model::OperandIndex &ind) const +bool TensorBuilder::isRegistered(const ir::OperandIndex &ind) const { return _tensor_info_map.find(ind) != _tensor_info_map.end(); } @@ -86,14 +85,14 @@ void TensorBuilder::allocateNonconsts() } std::shared_ptr<::neurun::backend::operand::ITensor> -TensorBuilder::tensorAt(const model::OperandIndex &ind) +TensorBuilder::tensorAt(const ir::OperandIndex &ind) { return _tensor_mgr->at(ind); } void TensorBuilder::iterate(const IterateFunction &fn) { _tensor_mgr->iterate(fn); } -std::shared_ptr TensorBuilder::at(const ::neurun::model::OperandIndex &ind) +std::shared_ptr TensorBuilder::at(const ir::OperandIndex &ind) { return _tensor_mgr->at(ind); } diff --git a/runtime/neurun/backend/srcn/TensorBuilder.h b/runtime/neurun/backend/srcn/TensorBuilder.h index 53a8123..4fa9e54 100644 --- a/runtime/neurun/backend/srcn/TensorBuilder.h +++ b/runtime/neurun/backend/srcn/TensorBuilder.h @@ -21,7 +21,7 @@ #include #include "operand/Tensor.h" -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" #include "TensorManager.h" namespace neurun @@ -42,20 +42,20 @@ public: * @param[in] info Operand information * @param[in] layout Operand data layout */ - void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info, + void registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info, ir::Layout backend_layout, bool as_const) override; /** * @brief Register subtensor information to allocate on CPU backend * @param[in] ind Operand index * @param[in] info Tensor information */ - void registerSubTensorInfo(const model::OperandIndex &ind, + void registerSubTensorInfo(const ir::OperandIndex &ind, const compiler::SubTensorInfo &info) override; - void notifyFirstUse(const model::OperandIndex &) override; - void notifyLastUse(const model::OperandIndex &) override; + void notifyFirstUse(const ir::OperandIndex &) override; + void notifyLastUse(const ir::OperandIndex &) override; - bool isRegistered(const model::OperandIndex &) const override; + bool isRegistered(const ir::OperandIndex &) const override; void prepare(void) override; void allocateConsts() override; @@ -64,7 +64,7 @@ public: void finalize() override { /* DO NOTHING */} std::shared_ptr<::neurun::backend::operand::ITensor> - tensorAt(const model::OperandIndex &ind) override; + tensorAt(const ir::OperandIndex &ind) override; void iterate(const IterateFunction &fn) override; @@ -73,13 +73,13 @@ public: std::unique_ptr releaseTensorManager(void) override; - std::shared_ptr at(const ::neurun::model::OperandIndex &ind); + std::shared_ptr at(const ir::OperandIndex &ind); private: std::unique_ptr _tensor_mgr; - model::OperandIndexMap _tensor_info_map; - model::OperandIndexMap _tensor_layout_map; - model::OperandIndexSequence _constants; + ir::OperandIndexMap _tensor_info_map; + ir::OperandIndexMap _tensor_layout_map; + ir::OperandIndexSequence _constants; }; } // namespace srcn diff --git a/runtime/neurun/backend/srcn/TensorManager.cc b/runtime/neurun/backend/srcn/TensorManager.cc index e6462db..adf357e 100644 --- a/runtime/neurun/backend/srcn/TensorManager.cc +++ b/runtime/neurun/backend/srcn/TensorManager.cc @@ -36,9 +36,8 @@ void TensorManager::deallocateConsts(void) { _const_mgr->deallocate(); } void TensorManager::deallocateNonconsts(void) { _nonconst_mgr->deallocate(); } -void TensorManager::buildTensor(const model::OperandIndex &ind, - const model::OperandInfo &tensor_info, ir::Layout layout, - bool as_const) +void TensorManager::buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &tensor_info, + ir::Layout layout, bool as_const) { assert(_ind_to_mgr.find(ind) == _ind_to_mgr.end()); if (as_const) @@ -53,35 +52,35 @@ void TensorManager::buildTensor(const model::OperandIndex &ind, } } -void TensorManager::claimPlan(const model::OperandIndex &ind, uint32_t size) +void TensorManager::claimPlan(const ir::OperandIndex &ind, uint32_t size) { assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); _ind_to_mgr.at(ind).claimPlan(ind, size); } -void TensorManager::releasePlan(const model::OperandIndex &ind) +void TensorManager::releasePlan(const ir::OperandIndex &ind) { assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); _ind_to_mgr.at(ind).releasePlan(ind); } -std::shared_ptr TensorManager::at(const ::neurun::model::OperandIndex &ind) +std::shared_ptr TensorManager::at(const ir::OperandIndex &ind) { assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end()); return _ind_to_mgr.at(ind).tensors().at(ind); } -model::OperandIndexMap> &TensorManager::constTensors(void) +ir::OperandIndexMap> &TensorManager::constTensors(void) { return _const_mgr->tensors(); } -model::OperandIndexMap> &TensorManager::nonconstTensors(void) +ir::OperandIndexMap> &TensorManager::nonconstTensors(void) { return _nonconst_mgr->tensors(); } -void TensorManager::iterate(const std::function &fn) +void TensorManager::iterate(const std::function &fn) { for (auto it : _nonconst_mgr->tensors()) fn(it.first); diff --git a/runtime/neurun/backend/srcn/TensorManager.h b/runtime/neurun/backend/srcn/TensorManager.h index 393828f..d4390d8 100644 --- a/runtime/neurun/backend/srcn/TensorManager.h +++ b/runtime/neurun/backend/srcn/TensorManager.h @@ -19,7 +19,7 @@ #include "backend/ITensorManager.h" #include "MemoryManager.h" -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" namespace neurun { @@ -39,23 +39,23 @@ public: void deallocateConsts(void) override; void deallocateNonconsts(void) override; - void buildTensor(const model::OperandIndex &ind, const model::OperandInfo &tensor_info, + void buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &tensor_info, ir::Layout layout, bool as_const); - void claimPlan(const model::OperandIndex &ind, uint32_t size); - void releasePlan(const model::OperandIndex &ind); + void claimPlan(const ir::OperandIndex &ind, uint32_t size); + void releasePlan(const ir::OperandIndex &ind); - std::shared_ptr at(const ::neurun::model::OperandIndex &ind); + std::shared_ptr at(const ir::OperandIndex &ind); - model::OperandIndexMap> &constTensors(void); - model::OperandIndexMap> &nonconstTensors(void); + ir::OperandIndexMap> &constTensors(void); + ir::OperandIndexMap> &nonconstTensors(void); - void iterate(const std::function &fn); + void iterate(const std::function &fn); private: std::unique_ptr _const_mgr; std::unique_ptr _nonconst_mgr; - model::OperandIndexMap _ind_to_mgr; + ir::OperandIndexMap _ind_to_mgr; }; } // namespace srcn diff --git a/runtime/neurun/backend/srcn/TensorRegister.cc b/runtime/neurun/backend/srcn/TensorRegister.cc index d35b15f..8fb5b54 100644 --- a/runtime/neurun/backend/srcn/TensorRegister.cc +++ b/runtime/neurun/backend/srcn/TensorRegister.cc @@ -26,7 +26,7 @@ namespace backend namespace srcn { -TensorRegister::TensorRegister(const model::Operands &operands, +TensorRegister::TensorRegister(const ir::Operands &operands, const std::shared_ptr &tensor_builder) : _operands{operands}, _tensor_builder{tensor_builder} { @@ -53,7 +53,7 @@ void TensorRegister::visit(const model::operation::Conv2D &node) const auto backend_filter_layout = backend_layout == ir::Layout::NHWC ? kernel::FilterLayout::HWIO : kernel::FilterLayout::OIHW; - model::OperandInfo backend_info{ + ir::OperandInfo backend_info{ asKernelShape(kernel_obj.shape(), frontend_filter_layout, backend_filter_layout), kernel_obj.info().typeInfo()}; _tensor_builder->registerTensorInfo(kernel_index, backend_info, backend_layout, @@ -80,7 +80,7 @@ void TensorRegister::visit(const model::operation::DepthwiseConv2D &node) const auto backend_filter_layout = backend_layout == ir::Layout::NHWC ? kernel::FilterLayout::HWIO : kernel::FilterLayout::OIHW; - model::OperandInfo backend_info{ + ir::OperandInfo backend_info{ asKernelShape(kernel_obj.shape(), frontend_filter_layout, backend_filter_layout), kernel_obj.info().typeInfo()}; _tensor_builder->registerTensorInfo(kernel_index, backend_info, backend_layout, @@ -106,7 +106,7 @@ void TensorRegister::visit(const model::operation::TransposeConv &node) const auto backend_filter_layout = backend_layout == ir::Layout::NHWC ? kernel::FilterLayout::HWOI : kernel::FilterLayout::IOHW; - model::OperandInfo backend_info{ + ir::OperandInfo backend_info{ asKernelShape(kernel_obj.shape(), frontend_filter_layout, backend_filter_layout), kernel_obj.info().typeInfo()}; _tensor_builder->registerTensorInfo(kernel_index, backend_info, backend_layout, diff --git a/runtime/neurun/backend/srcn/TensorRegister.h b/runtime/neurun/backend/srcn/TensorRegister.h index a558cb8..84ec0e9 100644 --- a/runtime/neurun/backend/srcn/TensorRegister.h +++ b/runtime/neurun/backend/srcn/TensorRegister.h @@ -30,7 +30,7 @@ namespace srcn class TensorRegister : public ITensorRegister { public: - TensorRegister(const model::Operands &operands, + TensorRegister(const ir::Operands &operands, const std::shared_ptr &tensor_builder); public: @@ -39,12 +39,12 @@ public: void visit(const model::operation::TransposeConv &) override; private: - const model::Operands &operands() const override { return _operands; } + const ir::Operands &operands() const override { return _operands; } std::shared_ptr tensor_builder() const override { return _tensor_builder; } bool supportSubTensor() const final { return false; } private: - const model::Operands &_operands; + const ir::Operands &_operands; const std::shared_ptr _tensor_builder; }; diff --git a/runtime/neurun/backend/srcn/kernel/OperationUtils.cc b/runtime/neurun/backend/srcn/kernel/OperationUtils.cc index 4795c38..211e220 100644 --- a/runtime/neurun/backend/srcn/kernel/OperationUtils.cc +++ b/runtime/neurun/backend/srcn/kernel/OperationUtils.cc @@ -97,7 +97,7 @@ nnfw::srcn::convType_t convertLayout(ir::Layout layout) } } -TensorDescriptor getTensorDescriptor(const ::neurun::model::Operand &o, ir::Layout frontend_layout, +TensorDescriptor getTensorDescriptor(const ir::Operand &o, ir::Layout frontend_layout, ir::Layout backend_layout) { TensorDescriptor descriptor; diff --git a/runtime/neurun/backend/srcn/kernel/OperationUtils.h b/runtime/neurun/backend/srcn/kernel/OperationUtils.h index a0610a2..aa163a1 100644 --- a/runtime/neurun/backend/srcn/kernel/OperationUtils.h +++ b/runtime/neurun/backend/srcn/kernel/OperationUtils.h @@ -21,7 +21,7 @@ #include #include -#include "model/Operand.h" +#include "ir/Operand.h" #include "ir/DataType.h" #include #include @@ -73,7 +73,7 @@ Coordinates convertCoordinates(const Coordinates &from_coordinates, FilterLayout nnfw::srcn::convType_t convertLayout(ir::Layout layout); -TensorDescriptor getTensorDescriptor(const ::neurun::model::Operand &o, ir::Layout frontend_layout, +TensorDescriptor getTensorDescriptor(const ir::Operand &o, ir::Layout frontend_layout, ir::Layout backend_layout); } // namespace kernel diff --git a/runtime/neurun/backend/srcn/operand/Tensor.h b/runtime/neurun/backend/srcn/operand/Tensor.h index af25593..e16234a 100644 --- a/runtime/neurun/backend/srcn/operand/Tensor.h +++ b/runtime/neurun/backend/srcn/operand/Tensor.h @@ -19,7 +19,7 @@ #include #include -#include "model/OperandInfo.h" +#include "ir/OperandInfo.h" namespace neurun { @@ -36,7 +36,7 @@ public: Tensor() = delete; public: - Tensor(const model::OperandInfo &info, ir::Layout layout) : _info(info), _layout(layout) + Tensor(const ir::OperandInfo &info, ir::Layout layout) : _info(info), _layout(layout) { // DO NOTHING } @@ -66,7 +66,7 @@ public: void access(const std::function &fn) final; private: - model::OperandInfo _info; + ir::OperandInfo _info; uint8_t *_buffer = nullptr; ir::Layout _layout; }; diff --git a/runtime/neurun/core/include/backend/Backend.h b/runtime/neurun/core/include/backend/Backend.h index 2fef3d0..9c4484f 100644 --- a/runtime/neurun/core/include/backend/Backend.h +++ b/runtime/neurun/core/include/backend/Backend.h @@ -19,7 +19,7 @@ #include -#include "model/Operands.h" +#include "ir/Operands.h" namespace neurun { @@ -57,7 +57,7 @@ public: virtual std::shared_ptr config() const = 0; virtual std::unique_ptr - newContext(const model::Operands &operands, + newContext(const ir::Operands &operands, const std::shared_ptr &kb) const = 0; }; diff --git a/runtime/neurun/core/include/backend/IConstantInitializer.h b/runtime/neurun/core/include/backend/IConstantInitializer.h index 89535a4..bb19279 100644 --- a/runtime/neurun/core/include/backend/IConstantInitializer.h +++ b/runtime/neurun/core/include/backend/IConstantInitializer.h @@ -22,8 +22,8 @@ #include "ITensorBuilder.h" #include "ir/Layout.h" -#include "model/Operand.h" -#include "model/Operands.h" +#include "ir/Operand.h" +#include "ir/Operands.h" #include "model/OperationVisitor.h" #include "model/Subgraph.h" #include "util/logging.h" @@ -32,7 +32,7 @@ namespace { template -static void Init(const neurun::model::Operand &model_obj, neurun::backend::operand::ITensor &obj, +static void Init(const neurun::ir::Operand &model_obj, neurun::backend::operand::ITensor &obj, const bool copy, const neurun::ir::Layout frontend_layout = neurun::ir::Layout::UNKNOWN) { @@ -134,13 +134,13 @@ static void Init(const neurun::model::Operand &model_obj, neurun::backend::opera } template -void copyInit(const neurun::model::Operand &model_obj, neurun::backend::operand::ITensor &obj) +void copyInit(const neurun::ir::Operand &model_obj, neurun::backend::operand::ITensor &obj) { Init(model_obj, obj, true); } template -void permuteInit(const neurun::model::Operand &model_obj, neurun::backend::operand::ITensor &obj, +void permuteInit(const neurun::ir::Operand &model_obj, neurun::backend::operand::ITensor &obj, const neurun::ir::Layout frontend_layout) { const bool copy = frontend_layout == obj.layout(); @@ -177,9 +177,9 @@ public: } public: - using Initializer = std::function; + using Initializer = std::function; - void generate(const model::Subgraph &subg, const model::Operands &operands) + void generate(const model::Subgraph &subg, const ir::Operands &operands) { _current_subg_layout = subg.getLayout(); subg.accept(*this); @@ -199,15 +199,15 @@ public: protected: #define OP(InternalName) \ virtual void visit(const model::operation::InternalName &) override { /* DO NOTHING */} -#include "model/Operations.lst" +#include "ir/Operations.lst" #undef OP protected: - virtual const model::Operands &operands() const = 0; + virtual const ir::Operands &operands() const = 0; virtual std::shared_ptr tensor_builder() const = 0; protected: - void registerCopyInitializer(const model::OperandIndex &index, const model::Operand &obj) + void registerCopyInitializer(const ir::OperandIndex &index, const ir::Operand &obj) { // For only CONSTANTS // TODO Add to check if tensor has been allocated @@ -239,7 +239,7 @@ protected: } protected: - void registerPermuteInitializer(const model::OperandIndex &index, const model::Operand &obj) + void registerPermuteInitializer(const ir::OperandIndex &index, const ir::Operand &obj) { // For only CONSTANTS // TODO Add to check if tensor has been allocated @@ -272,10 +272,10 @@ protected: } private: - bool exist(const model::OperandIndex &ind) { return _init_map.find(ind) != _init_map.end(); } + bool exist(const ir::OperandIndex &ind) { return _init_map.find(ind) != _init_map.end(); } protected: - std::unordered_map _init_map; + std::unordered_map _init_map; ir::Layout _current_subg_layout; }; diff --git a/runtime/neurun/core/include/backend/IKernelGenerator.h b/runtime/neurun/core/include/backend/IKernelGenerator.h index 42e4174..dcb9350 100644 --- a/runtime/neurun/core/include/backend/IKernelGenerator.h +++ b/runtime/neurun/core/include/backend/IKernelGenerator.h @@ -50,7 +50,7 @@ protected: { \ throw std::runtime_error("KernelGenerator: NYI for operation '" #InternalName "'"); \ } -#include "model/Operations.lst" +#include "ir/Operations.lst" #undef OP protected: diff --git a/runtime/neurun/core/include/backend/IShapeFixer.h b/runtime/neurun/core/include/backend/IShapeFixer.h index f33f4ec..6d52129 100644 --- a/runtime/neurun/core/include/backend/IShapeFixer.h +++ b/runtime/neurun/core/include/backend/IShapeFixer.h @@ -44,7 +44,7 @@ protected: { \ throw std::runtime_error("ShapeFixer: NYI for operation '" #InternalName "'"); \ } -#include "model/Operations.lst" +#include "ir/Operations.lst" #undef OP public: diff --git a/runtime/neurun/core/include/backend/ITensorBuilder.h b/runtime/neurun/core/include/backend/ITensorBuilder.h index 8fdc2ef..a9fce0a 100644 --- a/runtime/neurun/core/include/backend/ITensorBuilder.h +++ b/runtime/neurun/core/include/backend/ITensorBuilder.h @@ -19,8 +19,8 @@ #include -#include "model/Index.h" -#include "model/OperandInfo.h" +#include "ir/Index.h" +#include "ir/OperandInfo.h" #include "model/Operation.h" #include "ir/Layout.h" #include "operand/ITensor.h" @@ -34,7 +34,7 @@ namespace backend struct ITensorBuilder { - using IterateFunction = std::function; + using IterateFunction = std::function; virtual ~ITensorBuilder(void) = default; @@ -42,18 +42,17 @@ struct ITensorBuilder /** * @brief Register tensor information to allocate on backend */ - virtual void registerTensorInfo(const model::OperandIndex &, const model::OperandInfo &, + virtual void registerTensorInfo(const ir::OperandIndex &, const ir::OperandInfo &, ir::Layout backend_layout, bool as_const) = 0; /** * @brief Register subtensor information to allocate on backend */ - virtual void registerSubTensorInfo(const model::OperandIndex &, - const compiler::SubTensorInfo &) = 0; + virtual void registerSubTensorInfo(const ir::OperandIndex &, const compiler::SubTensorInfo &) = 0; - virtual void notifyFirstUse(const model::OperandIndex &) = 0; - virtual void notifyLastUse(const model::OperandIndex &) = 0; + virtual void notifyFirstUse(const ir::OperandIndex &) = 0; + virtual void notifyLastUse(const ir::OperandIndex &) = 0; - virtual bool isRegistered(const model::OperandIndex &) const = 0; + virtual bool isRegistered(const ir::OperandIndex &) const = 0; virtual void prepare(void) = 0; virtual void allocateConsts() = 0; @@ -62,7 +61,7 @@ struct ITensorBuilder virtual void finalize() = 0; virtual std::shared_ptr<::neurun::backend::operand::ITensor> - tensorAt(const model::OperandIndex &ind) = 0; + tensorAt(const ir::OperandIndex &ind) = 0; virtual void iterate(const IterateFunction &fn) = 0; virtual void preVisit(const model::Operation &) = 0; diff --git a/runtime/neurun/core/include/backend/ITensorRegister.h b/runtime/neurun/core/include/backend/ITensorRegister.h index d5be3a2..296b2a0 100644 --- a/runtime/neurun/core/include/backend/ITensorRegister.h +++ b/runtime/neurun/core/include/backend/ITensorRegister.h @@ -22,9 +22,9 @@ #include "ir/operand/ParentInfo.h" #include "ITensorBuilder.h" #include "ir/Layout.h" -#include "model/OperandIndexSequence.h" -#include "model/OperandInfo.h" -#include "model/Operands.h" +#include "ir/OperandIndexSequence.h" +#include "ir/OperandInfo.h" +#include "ir/Operands.h" #include "model/OperationVisitor.h" namespace @@ -76,7 +76,7 @@ public: } protected: - virtual const model::Operands &operands() const = 0; + virtual const ir::Operands &operands() const = 0; virtual std::shared_ptr tensor_builder() const = 0; virtual bool supportSubTensor() const = 0; @@ -84,18 +84,18 @@ protected: #define OP(InternalName) \ virtual void visit(const model::operation::InternalName &node) override \ { \ - model::OperandIndexSequence indices{node.getInputs()}; \ + ir::OperandIndexSequence indices{node.getInputs()}; \ indices.append(node.getOutputs()); \ for (const auto &index : indices) \ { \ defaultRegisterTensorInfo(index); \ } \ } -#include "model/Operations.lst" +#include "ir/Operations.lst" #undef OP protected: - void defaultRegisterTensorInfo(const model::OperandIndex &index) const + void defaultRegisterTensorInfo(const ir::OperandIndex &index) const { if (tensor_builder()->isRegistered(index)) { @@ -112,15 +112,15 @@ protected: } else { - model::OperandInfo backend_info{ - permuteTensorShape(obj.shape(), frontend_layout, backend_layout), obj.typeInfo()}; + ir::OperandInfo backend_info{permuteTensorShape(obj.shape(), frontend_layout, backend_layout), + obj.typeInfo()}; tensor_builder()->registerTensorInfo(index, backend_info, backend_layout, obj.isConstant()); } } protected: virtual ir::Layout frontendLayout() const final { return _current_subg_layout; } - virtual ir::Layout backendLayout(const model::OperandIndex &index) const final + virtual ir::Layout backendLayout(const ir::OperandIndex &index) const final { assert(_lower_info_map != nullptr); const auto lower_info = _lower_info_map->operand.at(index).get(); @@ -128,8 +128,7 @@ protected: } private: - compiler::SubTensorInfo generateSubTensorInfo(const model::Operand &obj, - ir::Layout frontend_layout, + compiler::SubTensorInfo generateSubTensorInfo(const ir::Operand &obj, ir::Layout frontend_layout, ir::Layout backend_layout) const { assert(obj.shape().rank() <= 4); @@ -148,8 +147,8 @@ private: shape.extendRank(4); offset = {offset[0], offset[2], offset[3], offset[1]}; } - model::Operand subtensor_obj{permuteTensorShape(shape, frontend_layout, backend_layout), - obj.typeInfo()}; + ir::Operand subtensor_obj{permuteTensorShape(shape, frontend_layout, backend_layout), + obj.typeInfo()}; subtensor_obj.parent_info( nnfw::cpp14::make_unique(parent_index, offset)); return compiler::SubTensorInfo{subtensor_obj}; diff --git a/runtime/neurun/core/include/compiler/SubTensorInfo.h b/runtime/neurun/core/include/compiler/SubTensorInfo.h index 60405af..18cab46 100644 --- a/runtime/neurun/core/include/compiler/SubTensorInfo.h +++ b/runtime/neurun/core/include/compiler/SubTensorInfo.h @@ -22,7 +22,7 @@ #ifndef __NEURUN_COMPILER_SUBTENSOR_INFO_H__ #define __NEURUN_COMPILER_SUBTENSOR_INFO_H__ -#include "model/Operand.h" +#include "ir/Operand.h" namespace neurun { @@ -41,7 +41,7 @@ public: * @brief Construct a new SubTensorInfo object * @param[in] obj SubTensor object */ - SubTensorInfo(const model::Operand &obj) + SubTensorInfo(const ir::Operand &obj) : _parent{obj.parent_info()->parent()}, _shape{obj.shape()}, _type{obj.typeInfo()}, _offset{obj.parent_info()->offset()} { @@ -53,7 +53,7 @@ public: * @brief Return parent tensor index * @return Parent tensor index */ - const model::OperandIndex parent(void) const { return _parent; } + const ir::OperandIndex parent(void) const { return _parent; } /** * @brief Return tensor shape * @return Tensor shape @@ -71,7 +71,7 @@ public: const neurun::util::Coordinates offset(void) const { return _offset; } private: - const model::OperandIndex _parent; + const ir::OperandIndex _parent; const ir::Shape _shape; const ir::TypeInfo _type; const neurun::util::Coordinates _offset; diff --git a/runtime/neurun/core/include/exec/Execution.h b/runtime/neurun/core/include/exec/Execution.h index 7a00741..6b108b4 100644 --- a/runtime/neurun/core/include/exec/Execution.h +++ b/runtime/neurun/core/include/exec/Execution.h @@ -59,7 +59,7 @@ public: * @param[in] length Input data's length * @param[in] layout Input data's data format */ - void setInput(const model::IOIndex &index, const void *buffer, size_t length, + void setInput(const ir::IOIndex &index, const void *buffer, size_t length, ir::Layout layout = ir::Layout::NHWC); /** * @brief Set input data's information, especially to specify unknown dimensions on model @@ -71,7 +71,7 @@ public: * @param[in] length Input data's length * @param[in] layout Input data's data format */ - void setInput(const model::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape, + void setInput(const ir::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape, const void *buffer, size_t length, ir::Layout layout = ir::Layout::NHWC); /** * @brief Set output data's information @@ -80,7 +80,7 @@ public: * @param[in] length Output data's length * @param[in] layout Output data's data format */ - void setOutput(const model::IOIndex &index, void *buffer, size_t length, + void setOutput(const ir::IOIndex &index, void *buffer, size_t length, ir::Layout layout = ir::Layout::NHWC); /** * @brief Set output data's information, especially to specify unknown dimensions on model @@ -92,20 +92,20 @@ public: * @param[in] length Output data's length * @param[in] layout Output data's data format */ - void setOutput(const model::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape, + void setOutput(const ir::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape, void *buffer, size_t length, ir::Layout layout = ir::Layout::NHWC); /** * @brief Set input data's data format * @param[in] index Input index * @param[in] layout Input data's data format */ - void setInputLayout(const model::IOIndex &index, ir::Layout layout); + void setInputLayout(const ir::IOIndex &index, ir::Layout layout); /** * @brief Set output data's data format * @param[in] index Output index * @param[in] layout Output data's data format */ - void setOutputLayout(const model::IOIndex &index, ir::Layout layout); + void setOutputLayout(const ir::IOIndex &index, ir::Layout layout); /** * @brief Execution * @note It should be called after setting input and output buffer diff --git a/runtime/neurun/core/include/exec/IExecutor.h b/runtime/neurun/core/include/exec/IExecutor.h index ecb195c..0786553 100644 --- a/runtime/neurun/core/include/exec/IExecutor.h +++ b/runtime/neurun/core/include/exec/IExecutor.h @@ -24,7 +24,7 @@ #include "ir/Graph.h" #include "IFunction.h" #include "IODescription.h" -#include "model/OperationIndexMap.h" +#include "ir/OperationIndexMap.h" namespace neurun { @@ -56,7 +56,7 @@ struct IExecutor * @brief Set an ordering on operations * @param[in] ranks The table encoding the ordering */ - virtual void setIndexedRanks(std::shared_ptr>) = 0; + virtual void setIndexedRanks(std::shared_ptr>) = 0; /** * @brief Start execution diff --git a/runtime/neurun/core/include/exec/IODescription.h b/runtime/neurun/core/include/exec/IODescription.h index fc766c6..bdcc781 100644 --- a/runtime/neurun/core/include/exec/IODescription.h +++ b/runtime/neurun/core/include/exec/IODescription.h @@ -19,7 +19,7 @@ #include -#include "model/OperandInfo.h" +#include "ir/OperandInfo.h" namespace neurun { @@ -28,14 +28,13 @@ namespace exec struct InputDesc { - const model::OperandInfo info; + const ir::OperandInfo info; const void *buffer; const size_t size; const ir::Layout layout; InputDesc(void) = delete; - InputDesc(const model::OperandInfo &info, const void *buffer, const size_t size, - ir::Layout layout) + InputDesc(const ir::OperandInfo &info, const void *buffer, const size_t size, ir::Layout layout) : info(info), buffer(buffer), size(size), layout(layout) { } @@ -43,13 +42,13 @@ struct InputDesc struct OutputDesc { - const model::OperandInfo info; + const ir::OperandInfo info; void *buffer; const size_t size; const ir::Layout layout; OutputDesc(void) = delete; - OutputDesc(const model::OperandInfo &info, void *buffer, const size_t size, ir::Layout layout) + OutputDesc(const ir::OperandInfo &info, void *buffer, const size_t size, ir::Layout layout) : info(info), buffer(buffer), size(size), layout(layout) { } diff --git a/runtime/neurun/core/include/model/Data.h b/runtime/neurun/core/include/ir/Data.h similarity index 86% rename from runtime/neurun/core/include/model/Data.h rename to runtime/neurun/core/include/ir/Data.h index 3316ad8..0cc8cb5 100644 --- a/runtime/neurun/core/include/model/Data.h +++ b/runtime/neurun/core/include/ir/Data.h @@ -14,14 +14,14 @@ * limitations under the License. */ -#ifndef __NEURUN_MODEL_DATA_H__ -#define __NEURUN_MODEL_DATA_H__ +#ifndef __NEURUN_IR_DATA_H__ +#define __NEURUN_IR_DATA_H__ #include namespace neurun { -namespace model +namespace ir { struct Data @@ -69,7 +69,14 @@ private: const size_t _size; }; +} // namespace ir + +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +using Data = ir::Data; +using CachedData = ir::CachedData; } // namespace model } // namespace neurun -#endif // __NEURUN_MODEL_DATA_H__ +#endif // __NEURUN_IR_DATA_H__ diff --git a/runtime/neurun/core/include/ir/Graph.h b/runtime/neurun/core/include/ir/Graph.h index 0ad5135..4564db2 100644 --- a/runtime/neurun/core/include/ir/Graph.h +++ b/runtime/neurun/core/include/ir/Graph.h @@ -19,7 +19,7 @@ #include -#include "model/Operands.h" +#include "ir/Operands.h" #include "model/Operations.h" #include "ir/LowerInfoMap.h" #include "model/Subgraph.h" @@ -82,7 +82,7 @@ public: { public: using GraphRef = typename std::conditional::type; - using IndexRef = const model::OperationIndex &; + using IndexRef = const ir::OperationIndex &; using NodeRef = typename std::conditional::type; using IterFn = std::function; @@ -124,14 +124,14 @@ public: // Graph Building public: - model::OperandIndex addOperand(const ir::Shape &shape, const ir::TypeInfo &type); - model::OperationIndex addOperation(std::unique_ptr &&node); - void setOperandValue(const model::OperandIndex &ind, std::unique_ptr &&data); - void addInput(const model::OperandIndex &ind); - void addOutput(const model::OperandIndex &ind); + ir::OperandIndex addOperand(const ir::Shape &shape, const ir::TypeInfo &type); + ir::OperationIndex addOperation(std::unique_ptr &&node); + void setOperandValue(const ir::OperandIndex &ind, std::unique_ptr &&data); + void addInput(const ir::OperandIndex &ind); + void addOutput(const ir::OperandIndex &ind); void finishBuilding(void); void lower(void); - void removeOperand(const model::OperandIndex &ind) { _operands.remove(ind); } + void removeOperand(const ir::OperandIndex &ind) { _operands.remove(ind); } bool isBuildingPhase(void) const { return _phase == Phase::BUILDING; } private: @@ -155,12 +155,12 @@ private: // Accessors public: - const model::OperandIndexSequence &getInputs() const { return _inputs; } - model::OperandIndexSequence &getInputs() { return _inputs; } - const model::OperandIndexSequence &getOutputs() const { return _outputs; } - model::OperandIndexSequence &getOutputs() { return _outputs; } - const model::Operands &operands() const { return _operands; } - model::Operands &operands() { return _operands; } // TODO Remove this non-const accessor + const ir::OperandIndexSequence &getInputs() const { return _inputs; } + ir::OperandIndexSequence &getInputs() { return _inputs; } + const ir::OperandIndexSequence &getOutputs() const { return _outputs; } + ir::OperandIndexSequence &getOutputs() { return _outputs; } + const ir::Operands &operands() const { return _operands; } + ir::Operands &operands() { return _operands; } // TODO Remove this non-const accessor const model::Operations &operations() const { return _operations; } model::Operations &operations() { return _operations; } const compiler::BackendResolver *backend_resolver() const { return _backend_resolver.get(); } @@ -168,22 +168,22 @@ public: private: Phase _phase{Phase::BUILDING}; model::Operations _operations; - model::Operands _operands; - model::OperandIndexSequence _inputs; - model::OperandIndexSequence _outputs; + ir::Operands _operands; + ir::OperandIndexSequence _inputs; + ir::OperandIndexSequence _outputs; // For LOWERED phase public: const LowerInfoMap *getLowerInfo() const { return _lower_info_map.get(); } - const operation::LowerInfo *getLowerInfo(const model::SubgraphIndex &subg_index) const; - void setLowerInfo(const model::SubgraphIndex &subg_index, + const operation::LowerInfo *getLowerInfo(const ir::SubgraphIndex &subg_index) const; + void setLowerInfo(const ir::SubgraphIndex &subg_index, std::unique_ptr &&lower_info); - void removeLowerInfo(const model::SubgraphIndex &subg_index); - const operand::LowerInfo *getLowerInfo(const model::OperandIndex &index) const; - operand::LowerInfo *getLowerInfo(const model::OperandIndex &index); - void setLowerInfo(const model::OperandIndex &index, + void removeLowerInfo(const ir::SubgraphIndex &subg_index); + const operand::LowerInfo *getLowerInfo(const ir::OperandIndex &index) const; + operand::LowerInfo *getLowerInfo(const ir::OperandIndex &index); + void setLowerInfo(const ir::OperandIndex &index, std::unique_ptr &&lower_info); - void removeLowerInfo(const model::OperandIndex &index); + void removeLowerInfo(const ir::OperandIndex &index); model::Subgraphs &subgraphs() { assert(_subgraphs); @@ -193,15 +193,14 @@ public: void setBackendResolver(std::unique_ptr &&br); private: - void - makeSubgraphs(model::OperandIndexMap> &operands_lower_info); + void makeSubgraphs(ir::OperandIndexMap> &operands_lower_info); void manipulateLowerInfo( - model::OperandIndexMap> &operands_lower_info); + ir::OperandIndexMap> &operands_lower_info); void dumpLowerInfo(); - bool mergeable(const model::SubgraphIndex &subg_index, const model::OperationIndex &node_index, + bool mergeable(const ir::SubgraphIndex &subg_index, const ir::OperationIndex &node_index, ir::Layout layout); - model::SubgraphIndex appendFreshSingleOpSubgraph(const model::OperationIndex &node_index, - const model::Operation &node, ir::Layout layout); + ir::SubgraphIndex appendFreshSingleOpSubgraph(const ir::OperationIndex &node_index, + const model::Operation &node, ir::Layout layout); private: std::unique_ptr _backend_resolver; diff --git a/runtime/neurun/core/include/model/Index.h b/runtime/neurun/core/include/ir/Index.h similarity index 75% rename from runtime/neurun/core/include/model/Index.h rename to runtime/neurun/core/include/ir/Index.h index e4218d5..69d307e 100644 --- a/runtime/neurun/core/include/model/Index.h +++ b/runtime/neurun/core/include/ir/Index.h @@ -14,14 +14,14 @@ * limitations under the License. */ -#ifndef __NEURUN_MODEL_OPERAND_INDEX_H__ -#define __NEURUN_MODEL_OPERAND_INDEX_H__ +#ifndef __NEURUN_IR_OPERAND_INDEX_H__ +#define __NEURUN_IR_OPERAND_INDEX_H__ #include "util/Index.h" namespace neurun { -namespace model +namespace ir { struct OperationIndexTag; @@ -36,7 +36,17 @@ using IOIndex = ::neurun::util::Index; struct SubgraphIndexTag; using SubgraphIndex = ::neurun::util::Index; -} // namespace model +} // namespace ir + +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +using OperationIndex = ir::OperationIndex; +using OperandIndex = ir::OperandIndex; +using IOIndex = ir::IOIndex; +using SubgraphIndex = ir::SubgraphIndex; +} + } // namespace neurun -#endif // __NEURUN_MODEL_OPERAND_INDEX_H__ +#endif // __NEURUN_IR_OPERAND_INDEX_H__ diff --git a/runtime/neurun/core/include/ir/LowerInfoMap.h b/runtime/neurun/core/include/ir/LowerInfoMap.h index 24ed296..3e7251f 100644 --- a/runtime/neurun/core/include/ir/LowerInfoMap.h +++ b/runtime/neurun/core/include/ir/LowerInfoMap.h @@ -22,8 +22,8 @@ #include "ir/operand/LowerInfo.h" #include "ir/operation/LowerInfo.h" -#include "model/OperandIndexMap.h" -#include "model/Index.h" +#include "ir/OperandIndexMap.h" +#include "ir/Index.h" namespace neurun { @@ -32,8 +32,8 @@ namespace graph struct LowerInfoMap { - std::unordered_map> operation; - model::OperandIndexMap> operand; + std::unordered_map> operation; + ir::OperandIndexMap> operand; }; } // namespace graph diff --git a/runtime/neurun/core/include/model/OpCode.h b/runtime/neurun/core/include/ir/OpCode.h similarity index 71% rename from runtime/neurun/core/include/model/OpCode.h rename to runtime/neurun/core/include/ir/OpCode.h index b698593..f1d5022 100644 --- a/runtime/neurun/core/include/model/OpCode.h +++ b/runtime/neurun/core/include/ir/OpCode.h @@ -14,22 +14,22 @@ * limitations under the License. */ -#ifndef __NEURUN_MODEL_OP_CODE_H__ -#define __NEURUN_MODEL_OP_CODE_H__ +#ifndef __NEURUN_IR_OP_CODE_H__ +#define __NEURUN_IR_OP_CODE_H__ #include #include namespace neurun { -namespace model +namespace ir { enum class OpCode { Invalid, //< Unused #define OP(Name) Name, //< All operations -#include "Operations.lst" +#include "ir/Operations.lst" #undef OP Subgraph, //< Subgraph is treated specially COUNT @@ -37,21 +37,27 @@ enum class OpCode const char *toString(OpCode opcode); +} // namespace ir + +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +using OpCode = ir::OpCode; } // namespace model } // namespace neurun namespace std { -template <> struct hash<::neurun::model::OpCode> +template <> struct hash { - size_t operator()(::neurun::model::OpCode value) const noexcept + size_t operator()(neurun::ir::OpCode value) const noexcept { - using type = typename std::underlying_type<::neurun::model::OpCode>::type; + using type = typename std::underlying_type::type; return hash()(static_cast(value)); } }; } // namespace std -#endif // __NEURUN_MODEL_OP_CODE_H__ +#endif // __NEURUN_IR_OP_CODE_H__ diff --git a/runtime/neurun/core/include/model/Operand.h b/runtime/neurun/core/include/ir/Operand.h similarity index 91% rename from runtime/neurun/core/include/model/Operand.h rename to runtime/neurun/core/include/ir/Operand.h index 7d5832e..424f0fe 100644 --- a/runtime/neurun/core/include/model/Operand.h +++ b/runtime/neurun/core/include/ir/Operand.h @@ -14,23 +14,23 @@ * limitations under the License. */ -#ifndef __NEURUN_MODEL_OPERAND_H__ -#define __NEURUN_MODEL_OPERAND_H__ +#ifndef __NEURUN_IR_OPERAND_H__ +#define __NEURUN_IR_OPERAND_H__ #include #include #include #include -#include "Data.h" +#include "ir/Data.h" #include "ir/DataType.h" -#include "OperandInfo.h" +#include "ir/OperandInfo.h" #include "ir/operand/ParentInfo.h" // TODO Remove this dependency -#include "model/OperationIndexList.h" +#include "ir/OperationIndexList.h" namespace neurun { -namespace model +namespace ir { class Operand @@ -124,7 +124,13 @@ private: std::shared_ptr _parent_info; }; +} // namespace ir + +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +using Operand = ir::Operand; } // namespace model } // namespace neurun -#endif // __NEURUN_MODEL_OPERAND_H__ +#endif // __NEURUN_IR_OPERAND_H__ diff --git a/runtime/neurun/core/include/model/OperandConstraint.h b/runtime/neurun/core/include/ir/OperandConstraint.h similarity index 92% rename from runtime/neurun/core/include/model/OperandConstraint.h rename to runtime/neurun/core/include/ir/OperandConstraint.h index c3145d2..cb06ff1 100644 --- a/runtime/neurun/core/include/model/OperandConstraint.h +++ b/runtime/neurun/core/include/ir/OperandConstraint.h @@ -23,9 +23,7 @@ namespace neurun { -namespace model -{ -namespace operation +namespace ir { class OperandConstraint @@ -54,7 +52,12 @@ private: uint32_t _end; }; -} // namespace operation +} // namespace ir + +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +using OperandConstraint = ir::OperandConstraint; } // namespace model } // namespace neurun diff --git a/runtime/neurun/core/include/model/OperandIndexMap.h b/runtime/neurun/core/include/ir/OperandIndexMap.h similarity index 70% rename from runtime/neurun/core/include/model/OperandIndexMap.h rename to runtime/neurun/core/include/ir/OperandIndexMap.h index c3492d4..4f555e6 100644 --- a/runtime/neurun/core/include/model/OperandIndexMap.h +++ b/runtime/neurun/core/include/ir/OperandIndexMap.h @@ -14,21 +14,27 @@ * limitations under the License. */ -#ifndef __NEURUN_MODEL_OPERAND_INDEX_MAP_H__ -#define __NEURUN_MODEL_OPERAND_INDEX_MAP_H__ +#ifndef __NEURUN_IR_OPERAND_INDEX_MAP_H__ +#define __NEURUN_IR_OPERAND_INDEX_MAP_H__ #include -#include "Index.h" +#include "ir/Index.h" namespace neurun { -namespace model +namespace ir { -template using OperandIndexMap = std::unordered_map; +template using OperandIndexMap = std::unordered_map; + +} // namespace ir +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +template using OperandIndexMap = ir::OperandIndexMap; } // namespace model } // namespace neurun -#endif // __NEURUN_MODEL_OPERAND_INDEX_MAP_H__ +#endif // __NEURUN_IR_OPERAND_INDEX_MAP_H__ diff --git a/runtime/neurun/core/include/model/OperandIndexSequence.h b/runtime/neurun/core/include/ir/OperandIndexSequence.h similarity index 91% rename from runtime/neurun/core/include/model/OperandIndexSequence.h rename to runtime/neurun/core/include/ir/OperandIndexSequence.h index 725811a..49b8c05 100644 --- a/runtime/neurun/core/include/model/OperandIndexSequence.h +++ b/runtime/neurun/core/include/ir/OperandIndexSequence.h @@ -20,11 +20,11 @@ #include #include -#include "Index.h" +#include "ir/Index.h" namespace neurun { -namespace model +namespace ir { class OperandIndexSequence @@ -54,6 +54,12 @@ private: std::vector _set; }; +} // namespace ir + +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +using OperandIndexSequence = ir::OperandIndexSequence; } // namespace model } // namespace neurun diff --git a/runtime/neurun/core/include/model/OperandInfo.h b/runtime/neurun/core/include/ir/OperandInfo.h similarity index 89% rename from runtime/neurun/core/include/model/OperandInfo.h rename to runtime/neurun/core/include/ir/OperandInfo.h index 7a97d06..1a7aad8 100644 --- a/runtime/neurun/core/include/model/OperandInfo.h +++ b/runtime/neurun/core/include/ir/OperandInfo.h @@ -18,8 +18,8 @@ * @file OperandInfo.h * @brief This file contains OperandInfo class */ -#ifndef __NEURUN_MODEL_OPERAND_INFO_H__ -#define __NEURUN_MODEL_OPERAND_INFO_H__ +#ifndef __NEURUN_IR_OPERAND_INFO_H__ +#define __NEURUN_IR_OPERAND_INFO_H__ #include "ir/Shape.h" #include "ir/TypeInfo.h" @@ -27,7 +27,7 @@ namespace neurun { -namespace model +namespace ir { /** @@ -84,7 +84,13 @@ private: TypeInfo _typeInfo; }; +} // namespace ir + +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +using OperandInfo = ir::OperandInfo; } // namespace model } // namespace neurun -#endif // __NEURUN_MODEL_OPERAND_INFO_H__ +#endif // __NEURUN_IR_OPERAND_INFO_H__ diff --git a/runtime/neurun/core/include/model/Operands.h b/runtime/neurun/core/include/ir/Operands.h similarity index 79% rename from runtime/neurun/core/include/model/Operands.h rename to runtime/neurun/core/include/ir/Operands.h index 517d2ff..797e8b4 100644 --- a/runtime/neurun/core/include/model/Operands.h +++ b/runtime/neurun/core/include/ir/Operands.h @@ -14,25 +14,31 @@ * limitations under the License. */ -#ifndef __NEURUN_MODEL_OPERANDS_H__ -#define __NEURUN_MODEL_OPERANDS_H__ +#ifndef __NEURUN_IR_OPERANDS_H__ +#define __NEURUN_IR_OPERANDS_H__ #include #include -#include "Operand.h" -#include "Index.h" +#include "ir/Operand.h" +#include "ir/Index.h" #include "util/ObjectManager.h" namespace neurun { -namespace model +namespace ir { class Operands : public util::ObjectManager { }; +} // namespace ir + +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +using Operands = ir::Operands; } // namespace model } // namespace neurun diff --git a/runtime/neurun/core/include/model/OperationIndexList.h b/runtime/neurun/core/include/ir/OperationIndexList.h similarity index 90% rename from runtime/neurun/core/include/model/OperationIndexList.h rename to runtime/neurun/core/include/ir/OperationIndexList.h index 50d9155..6b0fda3 100644 --- a/runtime/neurun/core/include/model/OperationIndexList.h +++ b/runtime/neurun/core/include/ir/OperationIndexList.h @@ -22,11 +22,11 @@ #include #include -#include "model/Index.h" +#include "ir/Index.h" namespace neurun { -namespace model +namespace ir { class OperationIndexList @@ -53,6 +53,12 @@ private: std::list _list; }; +} // namespace ir + +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +using OperationIndexList = ir::OperationIndexList; } // namespace model } // namespace neurun diff --git a/runtime/neurun/core/include/model/OperationIndexMap.h b/runtime/neurun/core/include/ir/OperationIndexMap.h similarity index 70% rename from runtime/neurun/core/include/model/OperationIndexMap.h rename to runtime/neurun/core/include/ir/OperationIndexMap.h index e0399ef..abce7b0 100644 --- a/runtime/neurun/core/include/model/OperationIndexMap.h +++ b/runtime/neurun/core/include/ir/OperationIndexMap.h @@ -14,21 +14,27 @@ * limitations under the License. */ -#ifndef __NEURUN_MODEL_OPERATION_INDEX_MAP_H__ -#define __NEURUN_MODEL_OPERATION_INDEX_MAP_H__ +#ifndef __NEURUN_IR_OPERATION_INDEX_MAP_H__ +#define __NEURUN_IR_OPERATION_INDEX_MAP_H__ #include -#include "Index.h" +#include "ir/Index.h" namespace neurun { -namespace model +namespace ir { -template using OperationIndexMap = std::unordered_map; +template using OperationIndexMap = std::unordered_map; + +} // namespace ir +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +template using OperationIndexMap = ir::OperationIndexMap; } // namespace model } // namespace neurun -#endif // __NEURUN_MODEL_OPERATION_INDEX_MAP_H__ +#endif // __NEURUN_IR_OPERATION_INDEX_MAP_H__ diff --git a/runtime/neurun/core/include/model/Operations.lst b/runtime/neurun/core/include/ir/Operations.lst similarity index 100% rename from runtime/neurun/core/include/model/Operations.lst rename to runtime/neurun/core/include/ir/Operations.lst diff --git a/runtime/neurun/core/include/ir/operand/ParentInfo.h b/runtime/neurun/core/include/ir/operand/ParentInfo.h index 024925d..e1bb78a 100644 --- a/runtime/neurun/core/include/ir/operand/ParentInfo.h +++ b/runtime/neurun/core/include/ir/operand/ParentInfo.h @@ -25,7 +25,7 @@ #include -#include "model/Index.h" +#include "ir/Index.h" #include "util/Coordinates.h" namespace neurun @@ -49,7 +49,7 @@ public: * @param[in] coordinate Offset of child operand in parent operand * @return */ - ParentInfo(const model::OperandIndex parent, const Coordinates &coordinate) + ParentInfo(const ir::OperandIndex parent, const Coordinates &coordinate) : _parent{parent}, _coordinate{coordinate} { // DO NOTHING @@ -60,7 +60,7 @@ public: * @brief Return parent index * @return Parent index */ - model::OperandIndex parent(void) const { return _parent; } + ir::OperandIndex parent(void) const { return _parent; } /** * @brief Retern offset in parent * @return Offset @@ -68,7 +68,7 @@ public: Coordinates offset(void) const { return _coordinate; } private: - model::OperandIndex _parent; + ir::OperandIndex _parent; Coordinates _coordinate; }; diff --git a/runtime/neurun/core/include/model/Operation.h b/runtime/neurun/core/include/model/Operation.h index 5ae1859..bf2ca5a 100644 --- a/runtime/neurun/core/include/model/Operation.h +++ b/runtime/neurun/core/include/model/Operation.h @@ -19,10 +19,10 @@ #include -#include "model/OpCode.h" -#include "model/Operand.h" -#include "model/OperandIndexSequence.h" -#include "model/OperandConstraint.h" +#include "ir/OpCode.h" +#include "ir/Operand.h" +#include "ir/OperandIndexSequence.h" +#include "ir/OperandConstraint.h" namespace neurun { @@ -37,8 +37,6 @@ namespace neurun namespace model { -using OperandConstraint = ::neurun::model::operation::OperandConstraint; - class Operation { public: diff --git a/runtime/neurun/core/include/model/OperationVisitor.h b/runtime/neurun/core/include/model/OperationVisitor.h index cb25679..b8b6bf5 100644 --- a/runtime/neurun/core/include/model/OperationVisitor.h +++ b/runtime/neurun/core/include/model/OperationVisitor.h @@ -31,7 +31,7 @@ struct OperationVisitor #define OP(InternalName) \ virtual void visit(const operation::InternalName &) {} -#include "model/Operations.lst" +#include "ir/Operations.lst" #undef OP // This Subgraph node should be handled specially so that diff --git a/runtime/neurun/core/include/model/Operations.h b/runtime/neurun/core/include/model/Operations.h index 4a1b2ca..6d683f3 100644 --- a/runtime/neurun/core/include/model/Operations.h +++ b/runtime/neurun/core/include/model/Operations.h @@ -17,7 +17,7 @@ #ifndef __NEURUN_MODEL_OPERATIONS_H__ #define __NEURUN_MODEL_OPERATIONS_H__ -#include "model/Index.h" +#include "ir/Index.h" #include "model/Operation.h" #include "util/ObjectManager.h" diff --git a/runtime/neurun/core/include/model/Subgraph.h b/runtime/neurun/core/include/model/Subgraph.h index a59db64..83eb403 100644 --- a/runtime/neurun/core/include/model/Subgraph.h +++ b/runtime/neurun/core/include/model/Subgraph.h @@ -22,7 +22,7 @@ #include #include "ir/Layout.h" -#include "Index.h" +#include "ir/Index.h" #include "Operation.h" namespace neurun @@ -81,7 +81,7 @@ public: std::vector::const_iterator end() const { return _operations.end(); } private: - bool exist(const neurun::model::OperationIndex &index) const; + bool exist(const OperationIndex &index) const; private: std::vector _operations; diff --git a/runtime/neurun/core/include/model/Subgraphs.h b/runtime/neurun/core/include/model/Subgraphs.h index 6946649..6de8729 100644 --- a/runtime/neurun/core/include/model/Subgraphs.h +++ b/runtime/neurun/core/include/model/Subgraphs.h @@ -17,7 +17,7 @@ #ifndef __NEURUN_MODEL_SUBGRAPHS_H__ #define __NEURUN_MODEL_SUBGRAPHS_H__ -#include "model/Index.h" +#include "ir/Index.h" #include "model/Subgraph.h" #include "util/ObjectManager.h" diff --git a/runtime/neurun/core/include/util/Config.lst b/runtime/neurun/core/include/util/Config.lst index 5f5c8de..ff24459 100644 --- a/runtime/neurun/core/include/util/Config.lst +++ b/runtime/neurun/core/include/util/Config.lst @@ -38,6 +38,6 @@ CONFIG(TRACE_FILEPATH , std::string , "") #define OP(InternalName) \ CONFIG(OP_BACKEND_ ## InternalName, std::string, "") -#include "model/Operations.lst" +#include "ir/Operations.lst" #undef OP diff --git a/runtime/neurun/core/include/util/ShapeInference.h b/runtime/neurun/core/include/util/ShapeInference.h index 1a6e5ec..f12727c 100644 --- a/runtime/neurun/core/include/util/ShapeInference.h +++ b/runtime/neurun/core/include/util/ShapeInference.h @@ -22,8 +22,8 @@ #include "model/operation/MaxPool2D.h" #include "model/operation/Conv2D.h" #include "model/operation/DepthwiseConv2D.h" -#include "model/Operands.h" -#include "model/Index.h" +#include "ir/Operands.h" +#include "ir/Index.h" #include "ir/Layout.h" namespace neurun diff --git a/runtime/neurun/core/include/util/Utils.h b/runtime/neurun/core/include/util/Utils.h index 63a7a97..e7468da 100644 --- a/runtime/neurun/core/include/util/Utils.h +++ b/runtime/neurun/core/include/util/Utils.h @@ -25,7 +25,7 @@ #include "ir/InternalType.h" #include "ir/Layout.h" -#include "model/Operand.h" +#include "ir/Operand.h" #include "util/Coordinates.h" #define UNUSED_RELEASE(a) (void)(a) diff --git a/runtime/neurun/core/src/backend/BackendManager.h b/runtime/neurun/core/src/backend/BackendManager.h index 29a772f..9c6483f 100644 --- a/runtime/neurun/core/src/backend/BackendManager.h +++ b/runtime/neurun/core/src/backend/BackendManager.h @@ -20,7 +20,7 @@ #include #include -#include "model/Operands.h" +#include "ir/Operands.h" #include "backend/Backend.h" namespace neurun diff --git a/runtime/neurun/core/src/compiler/BackendResolver.h b/runtime/neurun/core/src/compiler/BackendResolver.h index 9ba7bd4..ddcae79 100644 --- a/runtime/neurun/core/src/compiler/BackendResolver.h +++ b/runtime/neurun/core/src/compiler/BackendResolver.h @@ -24,7 +24,7 @@ #include "backend/Backend.h" #include "backend/BackendManager.h" #include "backend/ITensorBuilder.h" -#include "model/OperationIndexMap.h" +#include "ir/OperationIndexMap.h" namespace neurun { @@ -34,7 +34,7 @@ namespace compiler class BackendResolver { public: - BackendResolver(const model::Operands &operands, + BackendResolver(const ir::Operands &operands, const std::vector &backends, const std::shared_ptr &kb) { @@ -51,7 +51,7 @@ public: BackendResolver &operator=(BackendResolver &&obj) = default; public: - const backend::BackendContext *getBackendContext(const model::OperationIndex &index) const + const backend::BackendContext *getBackendContext(const ir::OperationIndex &index) const { return _context_manager.at(_gen_map.at(index)).get(); } @@ -71,17 +71,17 @@ public: return ret; } - const backend::Backend *getBackend(const model::OperationIndex &index) const + const backend::Backend *getBackend(const ir::OperationIndex &index) const { return getBackendContext(index)->backend; } - void setBackend(const model::OperationIndex &index, const backend::Backend *backend) + void setBackend(const ir::OperationIndex &index, const backend::Backend *backend) { _gen_map[index] = backend; } - void iterate(const std::function &fn) const { for (const auto &e : _gen_map) @@ -93,7 +93,7 @@ public: private: std::unordered_map> _context_manager; - model::OperationIndexMap _gen_map; + ir::OperationIndexMap _gen_map; }; } // namespace compiler diff --git a/runtime/neurun/core/src/compiler/Compiler.cc b/runtime/neurun/core/src/compiler/Compiler.cc index 9e03761..32eb578 100644 --- a/runtime/neurun/core/src/compiler/Compiler.cc +++ b/runtime/neurun/core/src/compiler/Compiler.cc @@ -64,7 +64,7 @@ void Compiler::compile(void) ***************************************************/ // Schedule std::unique_ptr br; - std::shared_ptr> indexed_ranks; + std::shared_ptr> indexed_ranks; if (util::getConfigBool(util::config::USE_SCHEDULER)) { auto scheduler = compiler::HEScheduler( diff --git a/runtime/neurun/core/src/compiler/ExecutorFactory.cc b/runtime/neurun/core/src/compiler/ExecutorFactory.cc index 5ba4b36..47e982d 100644 --- a/runtime/neurun/core/src/compiler/ExecutorFactory.cc +++ b/runtime/neurun/core/src/compiler/ExecutorFactory.cc @@ -180,7 +180,7 @@ exec::IExecutor *ExecutorFactory::createLinearExecutor(graph::Graph &graph) // Wrap tensors as Object and store them to plan for (auto &tensor_builder : tensor_builders) { - tensor_builder->iterate([&](const model::OperandIndex &index) { + tensor_builder->iterate([&](const ir::OperandIndex &index) { auto object = tensor_builder->tensorAt(index); operand_context->set(index, object); }); @@ -211,23 +211,22 @@ exec::IExecutor *ExecutorFactory::createDataflowExecutor(graph::Graph &graph, bo { auto operand_context = std::make_shared(); - graph.subgraphs().iterate([&](const model::SubgraphIndex &, const model::Subgraph &subg) { + graph.subgraphs().iterate([&](const ir::SubgraphIndex &, const model::Subgraph &subg) { auto subtensor_analyzer = SubTensorAnalyzer{graph.operands()}; subg.accept(subtensor_analyzer); }); // Fix shapes and register tensors - graph.subgraphs().iterate( - [&](const model::SubgraphIndex &subg_index, const model::Subgraph &subg) { - auto backend = graph.getLowerInfo(subg_index)->backend(); - auto shape_fixer = graph.backend_resolver()->getBackendContext(backend)->shape_fixer; - shape_fixer->fix(subg); - const auto tensor_register = - graph.backend_resolver()->getBackendContext(backend)->tensor_register; - tensor_register->registerTensors(subg, graph.getLowerInfo()); - }); - - graph.operands().iterate([&](const model::OperandIndex &ind, const model::Operand &obj) { + graph.subgraphs().iterate([&](const ir::SubgraphIndex &subg_index, const model::Subgraph &subg) { + auto backend = graph.getLowerInfo(subg_index)->backend(); + auto shape_fixer = graph.backend_resolver()->getBackendContext(backend)->shape_fixer; + shape_fixer->fix(subg); + const auto tensor_register = + graph.backend_resolver()->getBackendContext(backend)->tensor_register; + tensor_register->registerTensors(subg, graph.getLowerInfo()); + }); + + graph.operands().iterate([&](const ir::OperandIndex &ind, const ir::Operand &obj) { const auto lower_info = graph.getLowerInfo(ind); for (auto factor : lower_info->def_factors()) { @@ -276,29 +275,28 @@ exec::IExecutor *ExecutorFactory::createDataflowExecutor(graph::Graph &graph, bo }; // TODO Remove this method and make `append` to get index value as an argument - void setNextIndex(const model::SubgraphIndex next_index) { _next_index = next_index; } + void setNextIndex(const ir::SubgraphIndex next_index) { _next_index = next_index; } exec::DataflowExecutor::CodeMap &&releaseCodeMap() { return std::move(_code_map); } private: - model::SubgraphIndex _next_index; + ir::SubgraphIndex _next_index; exec::DataflowExecutor::CodeMap _code_map; }; auto execution_builder = nnfw::cpp14::make_unique(); // Generate kernels - graph.subgraphs().iterate( - [&](const model::SubgraphIndex &subg_index, const model::Subgraph &subg) { - auto backend = graph.getLowerInfo(subg_index)->backend(); - auto constant_initializer = - graph.backend_resolver()->getBackendContext(backend)->constant_initializer; - constant_initializer->generate(subg, graph.operands()); - // TODO This approach is temporal. See declaration of `setNextIndex`. - execution_builder->setNextIndex(subg_index); - auto kernel_gen = graph.backend_resolver()->getBackendContext(backend)->kernel_gen; - kernel_gen->generate(subg, execution_builder.get()); - }); + graph.subgraphs().iterate([&](const ir::SubgraphIndex &subg_index, const model::Subgraph &subg) { + auto backend = graph.getLowerInfo(subg_index)->backend(); + auto constant_initializer = + graph.backend_resolver()->getBackendContext(backend)->constant_initializer; + constant_initializer->generate(subg, graph.operands()); + // TODO This approach is temporal. See declaration of `setNextIndex`. + execution_builder->setNextIndex(subg_index); + auto kernel_gen = graph.backend_resolver()->getBackendContext(backend)->kernel_gen; + kernel_gen->generate(subg, execution_builder.get()); + }); for (const auto &tensor_builder : tensor_builders) { @@ -341,7 +339,7 @@ exec::IExecutor *ExecutorFactory::createDataflowExecutor(graph::Graph &graph, bo // Wrap tensors as Object and store them to plan for (auto &tensor_builder : tensor_builders) { - tensor_builder->iterate([&](const model::OperandIndex &index) { + tensor_builder->iterate([&](const ir::OperandIndex &index) { auto object = tensor_builder->tensorAt(index); operand_context->set(index, object); }); diff --git a/runtime/neurun/core/src/compiler/HEScheduler.cc b/runtime/neurun/core/src/compiler/HEScheduler.cc index 8f623b5..a925527 100644 --- a/runtime/neurun/core/src/compiler/HEScheduler.cc +++ b/runtime/neurun/core/src/compiler/HEScheduler.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "model/Operand.h" +#include "ir/Operand.h" #include "compiler/HEScheduler.h" #include "ir/Graph.h" #include "util/ConfigSource.h" @@ -69,8 +69,8 @@ static bool isWorkaroundSkip(const graph::Graph &graph, const backend::Backend * broadcast, scheduling will select it since it doesn't distinguish broadcast and non-broadcast like it does for quant non-quantized*/ if (backend->config()->id() == "cpu" && - (node.opcode() == model::OpCode::Add || node.opcode() == model::OpCode::Sub || - node.opcode() == model::OpCode::Mul)) + (node.opcode() == ir::OpCode::Add || node.opcode() == ir::OpCode::Sub || + node.opcode() == ir::OpCode::Mul)) { const auto lhs_index{node.getInputs().at(model::operation::Add::Input::LHS)}; const auto rhs_index{node.getInputs().at(model::operation::Add::Input::RHS)}; @@ -85,7 +85,7 @@ static bool isWorkaroundSkip(const graph::Graph &graph, const backend::Backend * Adding exception in stage doesn't help. Because if there is a record for Mul without broadcast, scheduling will select it since it doesn't distinguish broadcast and non-broadcast like it does for quant non-quantized*/ - else if (backend->config()->id() == "acl_neon" && node.opcode() == model::OpCode::Mul) + else if (backend->config()->id() == "acl_neon" && node.opcode() == ir::OpCode::Mul) { const auto lhs_index{node.getInputs().at(model::operation::Mul::Input::LHS)}; const auto rhs_index{node.getInputs().at(model::operation::Mul::Input::RHS)}; @@ -184,8 +184,8 @@ bool HEScheduler::isNodeProfiled(const model::Operation &node) return true; } -void HEScheduler::scheduleBranch(const model::OperationIndex &index, - model::OperationIndexMap &scheduled) +void HEScheduler::scheduleBranch(const ir::OperationIndex &index, + ir::OperationIndexMap &scheduled) { auto loc_index = index; const backend::Backend *parent_backend = nullptr; @@ -203,7 +203,6 @@ void HEScheduler::scheduleBranch(const model::OperationIndex &index, parent_backend = _backend_resolver->getBackend(loc_index); const auto &node = _graph->operations().at(loc_index); - model::OperandIndex tmp; /* get the only output operand, that is input of the next single operation * and just this nodes output.*/ if (node.getOutputs().size() != 1) @@ -238,7 +237,7 @@ std::unique_ptr HEScheduler::schedule(const graph::Gr { // Check if profiling info about all backend/node pairs already exists bool all_nodes_are_profiled = true; - _graph->operations().iterate([&](const model::OperationIndex &, const model::Operation &op) { + _graph->operations().iterate([&](const ir::OperationIndex &, const model::Operation &op) { if (all_nodes_are_profiled) all_nodes_are_profiled = isNodeProfiled(op); }); @@ -253,10 +252,9 @@ std::unique_ptr HEScheduler::schedule(const graph::Gr } } - model::OperationIndexMap visited; - graph.operations().iterate([&](const model::OperationIndex &index, const model::Operation &) { - visited[index] = false; - }); + ir::OperationIndexMap visited; + graph.operations().iterate( + [&](const ir::OperationIndex &index, const model::Operation &) { visited[index] = false; }); // for each task select the backend with the smallest earliest finishing time(eft) for (const auto &rank : _rank_to_op) { @@ -326,17 +324,17 @@ void HEScheduler::makeRank() VERBOSE(HEScheduler::makeRank) << "task prioritizing" << std::endl; _graph->operations().iterate( - [&](const model::OperationIndex &index, const model::Operation &) { DFSMaxRank(index); }); + [&](const ir::OperationIndex &index, const model::Operation &) { DFSMaxRank(index); }); // Check that ranks are calculated for all operations(nodes) - _graph->operations().iterate([&](const model::OperationIndex &index, const model::Operation &) { + _graph->operations().iterate([&](const ir::OperationIndex &index, const model::Operation &) { UNUSED_RELEASE(index); assert(_op_to_rank->find(index) != _op_to_rank->end()); }); VERBOSE(HEScheduler::makeRank) << "task prioritizing finished" << std::endl; } -int64_t HEScheduler::DFSMaxRank(const model::OperationIndex &index) +int64_t HEScheduler::DFSMaxRank(const ir::OperationIndex &index) { auto op_to_rank_it = _op_to_rank->find(index); if (op_to_rank_it != _op_to_rank->end()) @@ -401,7 +399,7 @@ int64_t HEScheduler::DFSMaxRank(const model::OperationIndex &index) return rank; } -int64_t HEScheduler::DFSChildrenMaxRank(const model::OperationIndex &index) +int64_t HEScheduler::DFSChildrenMaxRank(const ir::OperationIndex &index) { const auto &node = _graph->operations().at(index); int64_t max_child_rank = 0; @@ -456,8 +454,7 @@ int64_t HEScheduler::backendAvailableTime(const backend::Backend *backend, return prev_op_ft; } -bool HEScheduler::schedule(const model::OperationIndex &index, - const backend::Backend *parent_backend) +bool HEScheduler::schedule(const ir::OperationIndex &index, const backend::Backend *parent_backend) { VERBOSE(HEScheduler::schedule) << "scheduling (" << index.value() << ")" << std::endl; int64_t eft = std::numeric_limits::max(), selected_exec_time = 0; @@ -507,7 +504,7 @@ bool HEScheduler::schedule(const model::OperationIndex &index, } std::pair -HEScheduler::ESTAndExecTime(const backend::Backend *backend, const model::OperationIndex &index, +HEScheduler::ESTAndExecTime(const backend::Backend *backend, const ir::OperationIndex &index, std::multimap &transfer_st_exec_time) { const bool is_linear_exec = "Linear" == util::getConfigString(util::config::EXECUTOR); diff --git a/runtime/neurun/core/src/compiler/HEScheduler.h b/runtime/neurun/core/src/compiler/HEScheduler.h index b888fa6..657149e 100644 --- a/runtime/neurun/core/src/compiler/HEScheduler.h +++ b/runtime/neurun/core/src/compiler/HEScheduler.h @@ -28,7 +28,7 @@ #include "backend/ExecTime.h" #include "backend/Backend.h" #include "cpp14/memory.h" -#include "model/OperationIndexMap.h" +#include "ir/OperationIndexMap.h" #include #include @@ -48,11 +48,10 @@ public: * @param[in] model Graph model * @param[in] backend_resolver backend resolver */ - HEScheduler(const neurun::model::Operands &operands, - std::vector backends, + HEScheduler(const ir::Operands &operands, std::vector backends, const std::shared_ptr &kb) : _is_supported{}, _backends_avail_time{}, _ops_eft{}, - _op_to_rank{std::make_shared>()}, + _op_to_rank{std::make_shared>()}, _all_backends(std::move(backends)) { _backend_resolver = @@ -76,12 +75,12 @@ public: * https://www.hindawi.com/journals/sp/2016/3676149/ */ std::unique_ptr schedule(const graph::Graph &graph) final; - std::shared_ptr> getIndexedRanks() { return _op_to_rank; } + std::shared_ptr> getIndexedRanks() { return _op_to_rank; } private: bool isNodeProfiled(const model::Operation &); - bool schedule(const model::OperationIndex &, const backend::Backend *parent_backend); + bool schedule(const ir::OperationIndex &, const backend::Backend *parent_backend); /** * @brief Get earliest starting time and execution time of an operation on a backend. * @@ -95,7 +94,7 @@ private: * @return earliest starting time and execution time */ std::pair - ESTAndExecTime(const backend::Backend *backend, const model::OperationIndex &index, + ESTAndExecTime(const backend::Backend *backend, const ir::OperationIndex &index, std::multimap &transfer_st_exec_time); /** * @brief Returns the latest finishing time of parents of a node. @@ -111,9 +110,9 @@ private: void makeRank(); - int64_t DFSMaxRank(const model::OperationIndex &index); + int64_t DFSMaxRank(const ir::OperationIndex &index); - int64_t DFSChildrenMaxRank(const model::OperationIndex &index); + int64_t DFSChildrenMaxRank(const ir::OperationIndex &index); /** * @brief Returns the time, when backend is available for at least given amount of time. * @@ -149,8 +148,7 @@ private: * * @return N/A */ - void scheduleBranch(const model::OperationIndex &index, - model::OperationIndexMap &scheduled); + void scheduleBranch(const ir::OperationIndex &index, ir::OperationIndexMap &scheduled); private: // This variable stores backend/node pairs with unknown execution time, and hints scheduler @@ -160,9 +158,9 @@ private: std::unordered_map> _is_supported; // Finishing and starting time of each backend std::unordered_map> _backends_avail_time; - model::OperationIndexMap _ops_eft; - std::multimap> _rank_to_op; - std::shared_ptr> _op_to_rank; + ir::OperationIndexMap _ops_eft; + std::multimap> _rank_to_op; + std::shared_ptr> _op_to_rank; std::unique_ptr _backend_resolver; std::unique_ptr _exec_time; const graph::Graph *_graph{nullptr}; diff --git a/runtime/neurun/core/src/compiler/Linear.cc b/runtime/neurun/core/src/compiler/Linear.cc index 07ee7a8..864a90a 100644 --- a/runtime/neurun/core/src/compiler/Linear.cc +++ b/runtime/neurun/core/src/compiler/Linear.cc @@ -37,9 +37,9 @@ Linear::Linear(graph::Graph &graph) : _graph(graph) // Get SubgraphSequence by topological sorting { model::Subgraphs &subgraphs = _graph.subgraphs(); - model::Operands &operands = _graph.operands(); + ir::Operands &operands = _graph.operands(); // subgraphs can't access a subgraph by an operand so that input_to_subgs can offer it - std::unordered_map> input_to_subgs; + std::unordered_map> input_to_subgs; // Get the relations between input/subgraph to be used for dfs-post-iter // @@ -56,7 +56,7 @@ Linear::Linear(graph::Graph &graph) : _graph(graph) // [SUBG3] // | // [4] - subgraphs.iterate([&](const model::SubgraphIndex &subg_idx, model::Subgraph &subg) { + subgraphs.iterate([&](const ir::SubgraphIndex &subg_idx, model::Subgraph &subg) { for (auto input : subg.getInputs()) { // only valid_inputs @@ -67,7 +67,7 @@ Linear::Linear(graph::Graph &graph) : _graph(graph) auto it = input_to_subgs.find(input); if (it == input_to_subgs.end()) { - std::list list{subg_idx}; + std::list list{subg_idx}; input_to_subgs[input] = list; } else @@ -77,13 +77,12 @@ Linear::Linear(graph::Graph &graph) : _graph(graph) } }); - std::unordered_map visited; - subgraphs.iterate([&](const model::SubgraphIndex &index, const model::Subgraph &) { - visited[index] = false; - }); + std::unordered_map visited; + subgraphs.iterate( + [&](const ir::SubgraphIndex &index, const model::Subgraph &) { visited[index] = false; }); - std::function dfs_recursive = - [&](const model::SubgraphIndex &index, model::Subgraph &subg) -> void { + std::function dfs_recursive = + [&](const ir::SubgraphIndex &index, model::Subgraph &subg) -> void { if (visited[index]) return; visited[index] = true; @@ -109,9 +108,8 @@ Linear::Linear(graph::Graph &graph) : _graph(graph) subgraphs.iterate(dfs_recursive); // All of the nodes must have been visited. - assert( - std::all_of(visited.begin(), visited.end(), - [](const std::pair &v) { return v.second; })); + assert(std::all_of(visited.begin(), visited.end(), + [](const std::pair &v) { return v.second; })); // NOTE. Now these subgraph are on the reverse order std::reverse(_elements.begin(), _elements.end()); @@ -148,15 +146,15 @@ void Linear::accept(model::OperationVisitor &&visitor) const void Linear::planTensors() { - model::OperandIndexMap> tensor_builder_map; + ir::OperandIndexMap> tensor_builder_map; // NOTE // While current ITensorBuilder exposes registerSubTensorInfo for subtensor, // this stage uses registerSubTensorInfo() and notify{First|Last}Use() // but handling subtensor should be processed on each backend. See #5726. - model::OperandIndexMap uses_map; - model::OperandIndexMap def_map; - model::OperandIndexSequence constants; + ir::OperandIndexMap uses_map; + ir::OperandIndexMap def_map; + ir::OperandIndexSequence constants; iterate([&](const neurun::compiler::Linear::Element &element) { const auto backend = element.lower_info->backend(); @@ -166,7 +164,7 @@ void Linear::planTensors() }); // Prepare scanning - _graph.operands().iterate([&](const model::OperandIndex &ind, const model::Operand &obj) { + _graph.operands().iterate([&](const ir::OperandIndex &ind, const ir::Operand &obj) { const auto lower_info = _graph.getLowerInfo(ind); // TODO Remove if neurun doesn't support anymore such as // GeneratedTests.reshape_quant8_weights_as_inputs @@ -287,13 +285,13 @@ void Linear::planTensors() } } - assert(std::all_of( - uses_map.begin(), uses_map.end(), - [](std::pair it) { return it.second == 0; })); + assert( + std::all_of(uses_map.begin(), uses_map.end(), + [](std::pair it) { return it.second == 0; })); - assert(std::all_of( - def_map.begin(), def_map.end(), - [](std::pair it) { return it.second == 0; })); + assert( + std::all_of(def_map.begin(), def_map.end(), + [](std::pair it) { return it.second == 0; })); } void Linear::iterate(const std::function &fn) const diff --git a/runtime/neurun/core/src/compiler/ManualScheduler.cc b/runtime/neurun/core/src/compiler/ManualScheduler.cc index f7d859c..0768156 100644 --- a/runtime/neurun/core/src/compiler/ManualScheduler.cc +++ b/runtime/neurun/core/src/compiler/ManualScheduler.cc @@ -15,7 +15,7 @@ */ #include "ManualScheduler.h" -#include "model/OpCode.h" +#include "ir/OpCode.h" #include "model/Operations.Include.h" #include "backend/Backend.h" #include "backend/BackendManager.h" @@ -66,14 +66,14 @@ std::unique_ptr ManualScheduler::schedule(const graph::Graph &g VERBOSE(ManualScheduler) << "Default backend for all ops: " << backend_all_str << std::endl; - graph.operations().iterate([&](const model::OperationIndex &index, const model::Operation &) { + graph.operations().iterate([&](const ir::OperationIndex &index, const model::Operation &) { backend_resolver->setBackend(index, backend_all); }); // 2. Backend per operation type - std::unordered_map op_type_map; + std::unordered_map op_type_map; // By default, Custom uses cpu backend - op_type_map[model::OpCode::Custom] = backend::BackendManager::get().get("cpu"); + op_type_map[ir::OpCode::Custom] = backend::BackendManager::get().get("cpu"); #define OP(InternalName) \ { \ @@ -82,14 +82,14 @@ std::unique_ptr ManualScheduler::schedule(const graph::Graph &g { \ auto backend = backend::BackendManager::get().get(backend_str); \ VERBOSE(Lower) << "backend for " << #InternalName << ": " << backend_str << std::endl; \ - op_type_map[model::OpCode::InternalName] = backend; \ + op_type_map[ir::OpCode::InternalName] = backend; \ } \ } -#include "model/Operations.lst" +#include "ir/Operations.lst" #undef OP graph.operations().iterate( - [&](const model::OperationIndex &index, const model::Operation &operation) { + [&](const ir::OperationIndex &index, const model::Operation &operation) { auto itr = op_type_map.find(operation.opcode()); if (itr != op_type_map.end()) { @@ -114,8 +114,8 @@ std::unique_ptr ManualScheduler::schedule(const graph::Graph &g const auto &val = key_val.at(1); auto key = static_cast(std::stoi(key_str)); - graph.operations().at(model::OperationIndex{key}); // Check if exist, or this wil throw - backend_resolver->setBackend(model::OperationIndex{key}, + graph.operations().at(ir::OperationIndex{key}); // Check if exist, or this wil throw + backend_resolver->setBackend(ir::OperationIndex{key}, backend::BackendManager::get().get(val)); } } @@ -127,11 +127,11 @@ std::unique_ptr ManualScheduler::schedule(const graph::Graph &g // 4. Operations that are specially handled // All configuration above will be ignored(overwritten) - op_type_map[model::OpCode::Permute] = backend::BackendManager::get().get("cpu"); + op_type_map[ir::OpCode::Permute] = backend::BackendManager::get().get("cpu"); // Dump final assignment backend_resolver->iterate( - [&](const model::OperationIndex &index, const backend::BackendContext &backend_ctx) { + [&](const ir::OperationIndex &index, const backend::BackendContext &backend_ctx) { VERBOSE(ManualScheduler) << "backend for operation #" << index.value() << ": " << backend_ctx.backend->config()->id() << std::endl; }); diff --git a/runtime/neurun/core/src/compiler/OperandContext.cc b/runtime/neurun/core/src/compiler/OperandContext.cc index 3fc3816..c06f615 100644 --- a/runtime/neurun/core/src/compiler/OperandContext.cc +++ b/runtime/neurun/core/src/compiler/OperandContext.cc @@ -23,7 +23,7 @@ namespace neurun namespace compiler { -OperandContext &OperandContext::set(const model::OperandIndex &id, +OperandContext &OperandContext::set(const ir::OperandIndex &id, const std::shared_ptr &tensor) { // Only one tensor for an id @@ -33,7 +33,7 @@ OperandContext &OperandContext::set(const model::OperandIndex &id, } void OperandContext::iterate( - const std::function &fn) + const std::function &fn) { for (auto &e : _tensors) { diff --git a/runtime/neurun/core/src/compiler/OperandContext.h b/runtime/neurun/core/src/compiler/OperandContext.h index 0b7dc52..da1a51b 100644 --- a/runtime/neurun/core/src/compiler/OperandContext.h +++ b/runtime/neurun/core/src/compiler/OperandContext.h @@ -18,7 +18,7 @@ #define __NEURUN_COMPILER_OPERAND_CONTEXT_H__ #include "backend/operand/ITensor.h" -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" #include #include @@ -30,31 +30,28 @@ namespace compiler class OperandContext { public: - OperandContext &set(const model::OperandIndex &ind, + OperandContext &set(const ir::OperandIndex &ind, const std::shared_ptr &tensor); public: - bool exist(const ::neurun::model::OperandIndex &ind) const - { - return _tensors.find(ind) != _tensors.end(); - } + bool exist(const ir::OperandIndex &ind) const { return _tensors.find(ind) != _tensors.end(); } public: - std::shared_ptr at(const model::OperandIndex &ind) const + std::shared_ptr at(const ir::OperandIndex &ind) const { return _tensors.at(ind); } - std::shared_ptr &at(const model::OperandIndex &ind) + std::shared_ptr &at(const ir::OperandIndex &ind) { return _tensors.at(ind); } void - iterate(const std::function &fn); + iterate(const std::function &fn); private: - model::OperandIndexMap> _tensors; + ir::OperandIndexMap> _tensors; }; } // namespace compiler diff --git a/runtime/neurun/core/src/compiler/OperationValidator.cc b/runtime/neurun/core/src/compiler/OperationValidator.cc index 214139b..b6bdb9b 100644 --- a/runtime/neurun/core/src/compiler/OperationValidator.cc +++ b/runtime/neurun/core/src/compiler/OperationValidator.cc @@ -18,7 +18,7 @@ #include -#include "model/Operands.h" +#include "ir/Operands.h" #include "ir/operation/LowerInfo.h" #include "util/logging.h" diff --git a/runtime/neurun/core/src/compiler/OperationValidator.h b/runtime/neurun/core/src/compiler/OperationValidator.h index 4ea62ea..9501128 100644 --- a/runtime/neurun/core/src/compiler/OperationValidator.h +++ b/runtime/neurun/core/src/compiler/OperationValidator.h @@ -22,10 +22,10 @@ namespace neurun { -namespace model +namespace ir { class Operands; -} // namespace model +} // namespace ir } // namespace neurun namespace neurun @@ -36,8 +36,7 @@ namespace compiler class OperationValidator : public model::OperationVisitor { public: - OperationValidator(const neurun::model::Operands &ctx) - : _ctx{ctx}, _current_subg_layout{ir::Layout::UNKNOWN} + OperationValidator(const ir::Operands &ctx) : _ctx{ctx}, _current_subg_layout{ir::Layout::UNKNOWN} { } @@ -71,7 +70,7 @@ public: void visit(const model::operation::Pad &node) override; private: - const neurun::model::Operands &_ctx; + const ir::Operands &_ctx; ir::Layout _current_subg_layout; }; diff --git a/runtime/neurun/core/src/compiler/ParamChecker.cc b/runtime/neurun/core/src/compiler/ParamChecker.cc index 657da71..a9c713b 100644 --- a/runtime/neurun/core/src/compiler/ParamChecker.cc +++ b/runtime/neurun/core/src/compiler/ParamChecker.cc @@ -26,7 +26,7 @@ namespace compiler void ParamChecker::operator()() { _model->operations().iterate( - [&](const model::OperationIndex &, const model::Operation &node) { node.accept(*this); }); + [&](const ir::OperationIndex &, const model::Operation &node) { node.accept(*this); }); } } // namespace compiler diff --git a/runtime/neurun/core/src/compiler/SubTensorAnalyzer.cc b/runtime/neurun/core/src/compiler/SubTensorAnalyzer.cc index e3ad8d0..fd14d4a 100644 --- a/runtime/neurun/core/src/compiler/SubTensorAnalyzer.cc +++ b/runtime/neurun/core/src/compiler/SubTensorAnalyzer.cc @@ -19,7 +19,7 @@ #include #include "cpp14/memory.h" -#include "model/OperandIndexSequence.h" +#include "ir/OperandIndexSequence.h" #include "util/logging.h" #include "util/Coordinates.h" diff --git a/runtime/neurun/core/src/compiler/SubTensorAnalyzer.h b/runtime/neurun/core/src/compiler/SubTensorAnalyzer.h index 64591f7..d2a5f78 100644 --- a/runtime/neurun/core/src/compiler/SubTensorAnalyzer.h +++ b/runtime/neurun/core/src/compiler/SubTensorAnalyzer.h @@ -52,7 +52,7 @@ public: * @brief Construct a new SubTensorAnalyzer object * @param[in] ctx Graph operand set */ - SubTensorAnalyzer(neurun::model::Operands &ctx) : _ctx{ctx} + SubTensorAnalyzer(ir::Operands &ctx) : _ctx{ctx} { // DO NOTHING } @@ -61,7 +61,7 @@ public: void visit(const model::operation::Concat &) override; private: - neurun::model::Operands &_ctx; // TODO Refactor : Do not update Operands + ir::Operands &_ctx; // TODO Refactor : Do not update Operands }; } // namespace compiler diff --git a/runtime/neurun/core/src/dumper/dot/DotBuilder.h b/runtime/neurun/core/src/dumper/dot/DotBuilder.h index 9b10735..3a21339 100644 --- a/runtime/neurun/core/src/dumper/dot/DotBuilder.h +++ b/runtime/neurun/core/src/dumper/dot/DotBuilder.h @@ -19,16 +19,16 @@ #include -#include "model/Index.h" +#include "ir/Index.h" #include "model/Operation.h" -#include "model/Operand.h" +#include "ir/Operand.h" #include "OperationNode.h" #include "OperandNode.h" #include "DotSubgraphInfo.h" using Operation = neurun::model::Operation; -using Object = neurun::model::Operand; +using Object = neurun::ir::Operand; namespace neurun { diff --git a/runtime/neurun/core/src/dumper/dot/DotDumper.cc b/runtime/neurun/core/src/dumper/dot/DotDumper.cc index 30bf98e..5175f09 100644 --- a/runtime/neurun/core/src/dumper/dot/DotDumper.cc +++ b/runtime/neurun/core/src/dumper/dot/DotDumper.cc @@ -21,7 +21,7 @@ #include "DotBuilder.h" #include "DotSubgraphInfo.h" #include "model/Subgraph.h" -#include "model/OperationIndexMap.h" +#include "ir/OperationIndexMap.h" #include "backend/Backend.h" #include "backend/BackendManager.h" #include "backend/IConfig.h" @@ -47,10 +47,10 @@ void DotDumper::dump(const std::string &tag) auto &operations = _graph.operations(); auto &operands = _graph.operands(); - model::OperationIndexMap> operation_nodes; - std::unordered_map> operand_nodes; + ir::OperationIndexMap> operation_nodes; + std::unordered_map> operand_nodes; - operations.iterate([&](const model::OperationIndex &index, const model::Operation &op) { + operations.iterate([&](const ir::OperationIndex &index, const model::Operation &op) { auto node = nnfw::cpp14::make_unique(index, op); for (auto output : op.getOutputs()) @@ -86,9 +86,9 @@ void DotDumper::dump(const std::string &tag) } }; - util::Set shown_operand_set; + util::Set shown_operand_set; - operands.iterate([&](const model::OperandIndex &index, const model::Operand &object) { + operands.iterate([&](const ir::OperandIndex &index, const ir::Operand &object) { bool showing_cond = false; if (_level == Level::ALL) { @@ -152,7 +152,7 @@ void DotDumper::dump(const std::string &tag) const auto subgraphs = _graph.subgraphs(); if (subgraphs) { - subgraphs->iterate([&](const model::SubgraphIndex &index, const model::Subgraph &subgraph) { + subgraphs->iterate([&](const ir::SubgraphIndex &index, const model::Subgraph &subgraph) { const auto lower_info = _graph.getLowerInfo(index); auto fillcolor = backend_to_fillcolor(lower_info->backend()); std::string label = diff --git a/runtime/neurun/core/src/dumper/dot/DotSubgraphInfo.cc b/runtime/neurun/core/src/dumper/dot/DotSubgraphInfo.cc index 1ea681b..b784846 100644 --- a/runtime/neurun/core/src/dumper/dot/DotSubgraphInfo.cc +++ b/runtime/neurun/core/src/dumper/dot/DotSubgraphInfo.cc @@ -25,8 +25,8 @@ namespace dumper namespace dot { -DotSubgraphInfo::DotSubgraphInfo(const model::SubgraphIndex &index, const model::Subgraph &subgraph, - const util::Set &shown_operands) +DotSubgraphInfo::DotSubgraphInfo(const ir::SubgraphIndex &index, const model::Subgraph &subgraph, + const util::Set &shown_operands) : _index{index} { for (const auto &element : subgraph.operations()) diff --git a/runtime/neurun/core/src/dumper/dot/DotSubgraphInfo.h b/runtime/neurun/core/src/dumper/dot/DotSubgraphInfo.h index 771c555..ad16da1 100644 --- a/runtime/neurun/core/src/dumper/dot/DotSubgraphInfo.h +++ b/runtime/neurun/core/src/dumper/dot/DotSubgraphInfo.h @@ -19,7 +19,7 @@ #include -#include "model/Index.h" +#include "ir/Index.h" #include "model/Subgraph.h" #include "util/Set.h" @@ -33,23 +33,23 @@ namespace dot class DotSubgraphInfo { public: - DotSubgraphInfo(const model::SubgraphIndex &index, const model::Subgraph &subgraph, - const util::Set &shown_operands); + DotSubgraphInfo(const ir::SubgraphIndex &index, const model::Subgraph &subgraph, + const util::Set &shown_operands); - model::SubgraphIndex index() const { return _index; } + ir::SubgraphIndex index() const { return _index; } std::string label() const { return _label; } void label(const std::string &val) { _label = val; } std::string fillcolor() const { return _fillcolor; } void fillcolor(const std::string &val) { _fillcolor = val; } - const std::unordered_set &operations() const { return _operations; } - const std::unordered_set &operands() const { return _operands; } + const std::unordered_set &operations() const { return _operations; } + const std::unordered_set &operands() const { return _operands; } private: - model::SubgraphIndex _index; + ir::SubgraphIndex _index; std::string _label; std::string _fillcolor; - std::unordered_set _operations; - std::unordered_set _operands; + std::unordered_set _operations; + std::unordered_set _operands; }; } // namespace dot diff --git a/runtime/neurun/core/src/dumper/dot/OperandNode.cc b/runtime/neurun/core/src/dumper/dot/OperandNode.cc index 141549f..76d2c70 100644 --- a/runtime/neurun/core/src/dumper/dot/OperandNode.cc +++ b/runtime/neurun/core/src/dumper/dot/OperandNode.cc @@ -32,7 +32,7 @@ const std::string Operand::OUTPUT_SHAPE = "doublecircle"; const std::string Operand::OPERAND_SHAPE = "ellipse"; const std::string Operand::BG_COLOR_SCHEME = "set18"; -Operand::Operand(const neurun::model::OperandIndex &index, Type type) +Operand::Operand(const ir::OperandIndex &index, Type type) : Node{"operand" + std::to_string(index.value())} { { diff --git a/runtime/neurun/core/src/dumper/dot/OperandNode.h b/runtime/neurun/core/src/dumper/dot/OperandNode.h index faa8be2..5ebd651 100644 --- a/runtime/neurun/core/src/dumper/dot/OperandNode.h +++ b/runtime/neurun/core/src/dumper/dot/OperandNode.h @@ -27,8 +27,8 @@ #include #include "Node.h" -#include "model/Operand.h" -#include "model/Index.h" +#include "ir/Operand.h" +#include "ir/Index.h" namespace neurun { @@ -66,7 +66,7 @@ public: * @param[in] type Operand type * @param[in] lower_info Operand LowerInfo */ - Operand(const neurun::model::OperandIndex &index, Type type); + Operand(const ir::OperandIndex &index, Type type); private: void addBackendLabel(); diff --git a/runtime/neurun/core/src/dumper/dot/OperationNode.cc b/runtime/neurun/core/src/dumper/dot/OperationNode.cc index 5f599db..e2ea9f2 100644 --- a/runtime/neurun/core/src/dumper/dot/OperationNode.cc +++ b/runtime/neurun/core/src/dumper/dot/OperationNode.cc @@ -32,8 +32,7 @@ namespace dot const std::string Operation::OPERATION_SHAPE = "rect"; const std::string Operation::BG_COLOR_SCHEME = "pastel18"; -Operation::Operation(const neurun::model::OperationIndex &index, - const neurun::model::Operation &node) +Operation::Operation(const ir::OperationIndex &index, const neurun::model::Operation &node) : Node{"operation" + std::to_string(index.value())} { setAttribute("label", std::to_string(index.value()) + " : " + node.name()); diff --git a/runtime/neurun/core/src/dumper/dot/OperationNode.h b/runtime/neurun/core/src/dumper/dot/OperationNode.h index b8e609b..db27ab1 100644 --- a/runtime/neurun/core/src/dumper/dot/OperationNode.h +++ b/runtime/neurun/core/src/dumper/dot/OperationNode.h @@ -26,7 +26,7 @@ #include "Node.h" #include "model/Operation.h" -#include "model/Index.h" +#include "ir/Index.h" namespace neurun { @@ -52,7 +52,7 @@ public: * @param[in] index operation index * @param[in] node operation object */ - Operation(const neurun::model::OperationIndex &index, const neurun::model::Operation &node); + Operation(const ir::OperationIndex &index, const neurun::model::Operation &node); }; } // namespace dot diff --git a/runtime/neurun/core/src/exec/DataflowExecutor.cc b/runtime/neurun/core/src/exec/DataflowExecutor.cc index 32e68b0..856ca85 100644 --- a/runtime/neurun/core/src/exec/DataflowExecutor.cc +++ b/runtime/neurun/core/src/exec/DataflowExecutor.cc @@ -37,7 +37,7 @@ int64_t DataflowExecutor::calculateRank(const std::vector &opera auto it = _indexed_ranks->find(element.index); if (it == _indexed_ranks->end()) { - assert(element.node->opcode() == model::OpCode::Permute); + assert(element.node->opcode() == ir::OpCode::Permute); // assign int32_t::max to prevent integer overflow rank += std::numeric_limits::max(); } @@ -87,8 +87,8 @@ DataflowExecutor::DataflowExecutor(const graph::Graph &graph, const model::Subgraphs *subgraphs = _graph.subgraphs(); // Assign jobs convert SubgraphIndex to job index(uint32_t) uint32_t next_job_index = 0; - std::unordered_map subgraph_to_job; - subgraphs->iterate([&](const model::SubgraphIndex &subg_index, const model::Subgraph &) { + std::unordered_map subgraph_to_job; + subgraphs->iterate([&](const ir::SubgraphIndex &subg_index, const model::Subgraph &) { VERBOSE(DataflowExecutor) << "Create a job #" << next_job_index << " with SubgraphIndex " << subg_index.value() << std::endl; _finished_jobs.emplace_back( @@ -100,13 +100,13 @@ DataflowExecutor::DataflowExecutor(const graph::Graph &graph, _output_info.resize(next_job_index); _initial_input_info.resize(next_job_index, 0); - subgraphs->iterate([&](const model::SubgraphIndex &subg_index, const model::Subgraph &subg) { + subgraphs->iterate([&](const ir::SubgraphIndex &subg_index, const model::Subgraph &subg) { auto job_index = subgraph_to_job[subg_index]; for (auto output : subg.getOutputs()) { // Update output and input info subgraphs->iterate( - [&](const model::SubgraphIndex &subg_cur_index, const model::Subgraph &subg_cur) { + [&](const ir::SubgraphIndex &subg_cur_index, const model::Subgraph &subg_cur) { if (subg_cur.getInputs().contains(output)) { auto dep_index = subgraph_to_job[subg_cur_index]; diff --git a/runtime/neurun/core/src/exec/DataflowExecutor.h b/runtime/neurun/core/src/exec/DataflowExecutor.h index 1af3c0d..7e5a5bd 100644 --- a/runtime/neurun/core/src/exec/DataflowExecutor.h +++ b/runtime/neurun/core/src/exec/DataflowExecutor.h @@ -23,8 +23,8 @@ #include "FunctionSequence.h" #include "Job.h" -#include "model/OperandIndexSequence.h" -#include "model/Index.h" +#include "ir/OperandIndexSequence.h" +#include "ir/Index.h" #include "cpp14/memory.h" #include "exec/ExecutorBase.h" @@ -36,7 +36,7 @@ namespace exec class DataflowExecutor : public ExecutorBase { public: - using CodeMap = std::unordered_map>; + using CodeMap = std::unordered_map>; protected: virtual void notify(uint32_t finished_job_id); @@ -88,7 +88,7 @@ protected: std::multimap, std::greater> _ready_jobs; /// @brief Which job runs which op and function. - std::unordered_map _job_to_subgraph; + std::unordered_map _job_to_subgraph; }; } // namespace exec diff --git a/runtime/neurun/core/src/exec/Execution.cc b/runtime/neurun/core/src/exec/Execution.cc index bbbbba2..bc7bbd1 100644 --- a/runtime/neurun/core/src/exec/Execution.cc +++ b/runtime/neurun/core/src/exec/Execution.cc @@ -30,7 +30,7 @@ Execution::Execution(const std::shared_ptr &executor) : _executor{exe } // TODO Remove default parameter -void Execution::setInput(const model::IOIndex &index, const void *buffer, size_t length, +void Execution::setInput(const ir::IOIndex &index, const void *buffer, size_t length, ir::Layout layout) { const auto input_index = graph().getInputs().at(index); @@ -46,11 +46,10 @@ void Execution::setInput(const model::IOIndex &index, const void *buffer, size_t } // TODO Remove default parameter -void Execution::setInput(const model::IOIndex &index, const ir::TypeInfo &type, - const ir::Shape &shape, const void *buffer, size_t length, - ir::Layout layout) +void Execution::setInput(const ir::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape, + const void *buffer, size_t length, ir::Layout layout) { - const model::OperandInfo info{shape, type}; + const ir::OperandInfo info{shape, type}; if (length < info.total_size()) { @@ -62,8 +61,7 @@ void Execution::setInput(const model::IOIndex &index, const ir::TypeInfo &type, } // TODO Remove default parameter -void Execution::setOutput(const model::IOIndex &index, void *buffer, size_t length, - ir::Layout layout) +void Execution::setOutput(const ir::IOIndex &index, void *buffer, size_t length, ir::Layout layout) { const auto output_index = graph().getOutputs().at(index); const auto info = graph().operands().at(output_index).info(); @@ -78,10 +76,10 @@ void Execution::setOutput(const model::IOIndex &index, void *buffer, size_t leng } // TODO Remove default parameter -void Execution::setOutput(const model::IOIndex &index, const ir::TypeInfo &type, +void Execution::setOutput(const ir::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape, void *buffer, size_t length, ir::Layout layout) { - const model::OperandInfo info{shape, type}; + const ir::OperandInfo info{shape, type}; if (length < info.total_size()) { @@ -92,14 +90,14 @@ void Execution::setOutput(const model::IOIndex &index, const ir::TypeInfo &type, nnfw::cpp14::make_unique(info, buffer, length, layout); } -void Execution::setInputLayout(const model::IOIndex &index, ir::Layout layout) +void Execution::setInputLayout(const ir::IOIndex &index, ir::Layout layout) { const auto &input_desc = _io_desc.inputs.at(index.value()); _io_desc.inputs.at(index.value()) = nnfw::cpp14::make_unique( input_desc->info, input_desc->buffer, input_desc->size, layout); } -void Execution::setOutputLayout(const model::IOIndex &index, ir::Layout layout) +void Execution::setOutputLayout(const ir::IOIndex &index, ir::Layout layout) { const auto &output_desc = _io_desc.outputs.at(index.value()); _io_desc.outputs.at(index.value()) = nnfw::cpp14::make_unique( diff --git a/runtime/neurun/core/src/exec/ExecutorBase.cc b/runtime/neurun/core/src/exec/ExecutorBase.cc index 2a2bf5e..afd5833 100644 --- a/runtime/neurun/core/src/exec/ExecutorBase.cc +++ b/runtime/neurun/core/src/exec/ExecutorBase.cc @@ -30,7 +30,7 @@ ExecutorBase::ExecutorBase(const graph::Graph &graph, // DO NOTHING } -std::unique_ptr ExecutorBase::source(const model::IOIndex &index, const ir::TypeInfo &type, +std::unique_ptr ExecutorBase::source(const ir::IOIndex &index, const ir::TypeInfo &type, const void *buffer, size_t length, ir::Layout io_layout) { @@ -51,7 +51,7 @@ std::unique_ptr ExecutorBase::source(const model::IOIndex &index, const } } -std::unique_ptr ExecutorBase::sink(const model::IOIndex &index, const ir::TypeInfo &type, +std::unique_ptr ExecutorBase::sink(const ir::IOIndex &index, const ir::TypeInfo &type, void *buffer, size_t length, ir::Layout io_layout) { using ir::DataType; @@ -84,8 +84,8 @@ void ExecutorBase::execute(const IODescription &desc) // Set input(s) for (uint32_t n = 0; n < _graph.getInputs().size(); ++n) { - model::IOIndex input_index{n}; - model::OperandIndex index{_graph.getInputs().at(input_index)}; + ir::IOIndex input_index{n}; + ir::OperandIndex index{_graph.getInputs().at(input_index)}; if (desc.inputs.at(n) == nullptr) { @@ -116,7 +116,7 @@ void ExecutorBase::execute(const IODescription &desc) // Get output(s) for (uint32_t n = 0; n < _graph.getOutputs().size(); ++n) { - neurun::model::IOIndex output_index{n}; + ir::IOIndex output_index{n}; // Optional output if (desc.outputs.at(n) == nullptr) { @@ -128,7 +128,7 @@ void ExecutorBase::execute(const IODescription &desc) auto getter = [&](::neurun::backend::operand::ITensor &tensor) { sinks.at(n)->pull(tensor); }; - ::neurun::model::OperandIndex index{_graph.getOutputs().at(output_index)}; + ir::OperandIndex index{_graph.getOutputs().at(output_index)}; auto object = _operand_context->at(index); object->access(getter); diff --git a/runtime/neurun/core/src/exec/ExecutorBase.h b/runtime/neurun/core/src/exec/ExecutorBase.h index 618d14b..40b68be 100644 --- a/runtime/neurun/core/src/exec/ExecutorBase.h +++ b/runtime/neurun/core/src/exec/ExecutorBase.h @@ -53,7 +53,7 @@ public: void execute(const IODescription &desc) final; // Used only in Dataflow and Parallel Executors - void setIndexedRanks(std::shared_ptr> ranks) final + void setIndexedRanks(std::shared_ptr> ranks) final { _indexed_ranks = std::move(ranks); }; @@ -63,13 +63,13 @@ public: void addObserver(std::unique_ptr ref) { _subject.add(std::move(ref)); }; private: - std::unique_ptr source(const model::IOIndex &index, const ir::TypeInfo &type, + std::unique_ptr source(const ir::IOIndex &index, const ir::TypeInfo &type, const void *buffer, size_t length, ir::Layout io_layout); - std::unique_ptr sink(const model::IOIndex &index, const ir::TypeInfo &type, void *buffer, + std::unique_ptr sink(const ir::IOIndex &index, const ir::TypeInfo &type, void *buffer, size_t length, ir::Layout io_layout); template - std::unique_ptr source(const model::IOIndex &index, const void *buffer, size_t length, + std::unique_ptr source(const ir::IOIndex &index, const void *buffer, size_t length, ir::Layout io_layout) { const auto operand_index = _graph.getInputs().at(index); @@ -92,7 +92,7 @@ private: } template - std::unique_ptr sink(const model::IOIndex &index, void *buffer, size_t length, + std::unique_ptr sink(const ir::IOIndex &index, void *buffer, size_t length, ir::Layout io_layout) { const auto operand_index = _graph.getOutputs().at(index); @@ -114,7 +114,7 @@ private: protected: ExecutionObservee _subject; - std::shared_ptr> _indexed_ranks; + std::shared_ptr> _indexed_ranks; const graph::Graph &_graph; std::shared_ptr _operand_context; std::unique_ptr _tensor_mgrs; diff --git a/runtime/neurun/core/src/exec/Job.h b/runtime/neurun/core/src/exec/Job.h index cf3a185..1516b92 100644 --- a/runtime/neurun/core/src/exec/Job.h +++ b/runtime/neurun/core/src/exec/Job.h @@ -20,8 +20,8 @@ #include #include "exec/IFunction.h" -#include "model/Index.h" -#include "model/OperandIndexSequence.h" +#include "ir/Index.h" +#include "ir/OperandIndexSequence.h" #include "backend/Backend.h" namespace neurun diff --git a/runtime/neurun/core/src/exec/ParallelExecutor.h b/runtime/neurun/core/src/exec/ParallelExecutor.h index 2f81ef3..4db5946 100644 --- a/runtime/neurun/core/src/exec/ParallelExecutor.h +++ b/runtime/neurun/core/src/exec/ParallelExecutor.h @@ -23,8 +23,8 @@ #include "FunctionSequence.h" #include "Job.h" -#include "model/OperandIndexSequence.h" -#include "model/Index.h" +#include "ir/OperandIndexSequence.h" +#include "ir/Index.h" #include "cpp14/memory.h" #include "exec/DataflowExecutor.h" #include "ParallelScheduler.h" diff --git a/runtime/neurun/core/src/exec/interp/Buffer.h b/runtime/neurun/core/src/exec/interp/Buffer.h index 3528e08..d60b59a 100644 --- a/runtime/neurun/core/src/exec/interp/Buffer.h +++ b/runtime/neurun/core/src/exec/interp/Buffer.h @@ -23,7 +23,7 @@ #include -#include "model/Data.h" +#include "ir/Data.h" namespace neurun { @@ -35,7 +35,7 @@ namespace interp /** * @brief Interface for writable data area */ -class Buffer : public model::Data +class Buffer : public ir::Data { public: /** diff --git a/runtime/neurun/core/src/exec/interp/ExecEnv.h b/runtime/neurun/core/src/exec/interp/ExecEnv.h index b3b6e65..a322fee 100644 --- a/runtime/neurun/core/src/exec/interp/ExecEnv.h +++ b/runtime/neurun/core/src/exec/interp/ExecEnv.h @@ -64,7 +64,7 @@ public: * @param[in] index Tensor index * @param[in] tensor Tensor */ - void assignTensor(const model::OperandIndex index, std::shared_ptr tensor) + void assignTensor(const ir::OperandIndex index, std::shared_ptr tensor) { assert(tensor->bufferRO() != nullptr); _tensors.emplace(index, tensor); @@ -75,17 +75,14 @@ public: * @param[in] index Tensor index * @return Tensor pointer */ - const ITensor *tensorAt(const model::OperandIndex index) const - { - return _tensors.at(index).get(); - } + const ITensor *tensorAt(const ir::OperandIndex index) const { return _tensors.at(index).get(); } /** * @brief Check environment contains tensor * @param[in] index Tensor index * @return @c true if environment contain tensor, otherwise @c false */ - bool contains(const model::OperandIndex index) const + bool contains(const ir::OperandIndex index) const { return (_tensors.find(index) != _tensors.end()); } @@ -97,7 +94,7 @@ public: * @note If already allocated, just return * @TODO More smart allocation policy */ - void allocateIfNeeded(const model::OperandIndex index, const model::OperandInfo &info) + void allocateIfNeeded(const ir::OperandIndex index, const ir::OperandInfo &info) { // already allocated, or constant if (contains(index)) @@ -117,8 +114,8 @@ public: * @param[in] info Operand info * @param[in] index_to_share Tensor index that have data to share */ - void allocateAndShareIfNeeded(const model::OperandIndex index, const model::OperandInfo &info, - const model::OperandIndex index_to_share) + void allocateAndShareIfNeeded(const ir::OperandIndex index, const ir::OperandInfo &info, + const ir::OperandIndex index_to_share) { if (!contains(index_to_share)) { @@ -144,7 +141,7 @@ public: * @param[in] index Tensor index * @note If allocated by outside, just return */ - void freeIfAllocated(const model::OperandIndex index) + void freeIfAllocated(const ir::OperandIndex index) { if (_buffers.find(index) != _buffers.end()) { @@ -156,9 +153,9 @@ private: const graph::Graph &_graph; // Tensor map to use in interpreter // It should map tensors that have allocated or assigned buffer pointer - std::unordered_map> _tensors; + std::unordered_map> _tensors; // Tensors allocated by allocateIfNeed (buffer) - std::unordered_set _buffers; + std::unordered_set _buffers; }; } // namespace interp diff --git a/runtime/neurun/core/src/exec/interp/ExecManager.cc b/runtime/neurun/core/src/exec/interp/ExecManager.cc index f5fe7db..92f182c 100644 --- a/runtime/neurun/core/src/exec/interp/ExecManager.cc +++ b/runtime/neurun/core/src/exec/interp/ExecManager.cc @@ -36,23 +36,23 @@ void ExecManager::execute(const IODescription &desc) It may execute divided model but now consider model inference is done at interpreter ***********************************************************************/ - model::OperandIndexMap> tensor_map; + ir::OperandIndexMap> tensor_map; for (uint32_t n = 0; n < _graph.getInputs().size(); n++) { - neurun::model::IOIndex index{n}; + ir::IOIndex index{n}; const auto input_index = _graph.getInputs().at(index); const auto &input = *desc.inputs.at(n); auto input_tensor = std::make_shared(input.info); - input_tensor->setData(std::make_shared( + input_tensor->setData(std::make_shared( reinterpret_cast(input.buffer), input.size)); tensor_map[input_index] = input_tensor; } for (uint32_t n = 0; n < _graph.getOutputs().size(); n++) { - neurun::model::IOIndex index{n}; + ir::IOIndex index{n}; const auto output_index = _graph.getOutputs().at(index); const auto &output = *desc.outputs.at(n); @@ -90,7 +90,7 @@ void ExecManager::execute(const IODescription &desc) } // Allocate constant tensor - _graph.operands().iterate([&](const model::OperandIndex &ind, const model::Operand &obj) { + _graph.operands().iterate([&](const ir::OperandIndex &ind, const ir::Operand &obj) { if (obj.isConstant()) { VERBOSE(INTERPRETER) << "Allocate and assign constant tensor. operand index:" << ind.value() @@ -99,7 +99,7 @@ void ExecManager::execute(const IODescription &desc) auto const_tensor = std::make_shared(obj.info()); // Assume that interpreter's tensor layout is same with model (NHWC) const_tensor->setData( - std::make_shared(obj.data().base(), obj.info().total_size())); + std::make_shared(obj.data().base(), obj.info().total_size())); interp_env->assignTensor(ind, const_tensor); } }); diff --git a/runtime/neurun/core/src/exec/interp/ExecManager.h b/runtime/neurun/core/src/exec/interp/ExecManager.h index 098c110..420bcb9 100644 --- a/runtime/neurun/core/src/exec/interp/ExecManager.h +++ b/runtime/neurun/core/src/exec/interp/ExecManager.h @@ -50,7 +50,7 @@ public: * @return Graph object */ const graph::Graph &graph() final { return _graph; } - void setIndexedRanks(std::shared_ptr>) override{ + void setIndexedRanks(std::shared_ptr>) override{ // Not implemented }; /** @@ -61,7 +61,7 @@ public: private: const graph::Graph &_graph; - model::OperandIndexMap> _tensor_map; + ir::OperandIndexMap> _tensor_map; }; } // namespace interp diff --git a/runtime/neurun/core/src/exec/interp/Interpreter.cc b/runtime/neurun/core/src/exec/interp/Interpreter.cc index 30bc71d..e28f1bd 100644 --- a/runtime/neurun/core/src/exec/interp/Interpreter.cc +++ b/runtime/neurun/core/src/exec/interp/Interpreter.cc @@ -21,7 +21,7 @@ #include "Registration.h" -#include "model/OperandIndexMap.h" +#include "ir/OperandIndexMap.h" #include "util/logging.h" #include "model/OperationVisitor.h" @@ -41,18 +41,18 @@ class OperationExecutor : model::OperationVisitor public: OperationExecutor(ExecEnv *env) : _env{env} { - _kernels[model::OpCode::Add] = getAdd(); - _kernels[model::OpCode::Conv2D] = getConv2D(); - _kernels[model::OpCode::MaxPool2D] = getMaxPool2D(); - _kernels[model::OpCode::Concat] = getConcat(); - _kernels[model::OpCode::AvgPool2D] = getAvgPool2D(); - _kernels[model::OpCode::FullyConnected] = getFullyConnected(); - _kernels[model::OpCode::Softmax] = getSoftMax(); - _kernels[model::OpCode::Reshape] = getReshape(); - _kernels[model::OpCode::DepthwiseConv2D] = getDepthwiseConv(); + _kernels[ir::OpCode::Add] = getAdd(); + _kernels[ir::OpCode::Conv2D] = getConv2D(); + _kernels[ir::OpCode::MaxPool2D] = getMaxPool2D(); + _kernels[ir::OpCode::Concat] = getConcat(); + _kernels[ir::OpCode::AvgPool2D] = getAvgPool2D(); + _kernels[ir::OpCode::FullyConnected] = getFullyConnected(); + _kernels[ir::OpCode::Softmax] = getSoftMax(); + _kernels[ir::OpCode::Reshape] = getReshape(); + _kernels[ir::OpCode::DepthwiseConv2D] = getDepthwiseConv(); } - void execute(const model::OperationIndex &idx) + void execute(const ir::OperationIndex &idx) { const auto nodeName = _env->graph().operations().at(idx).name(); VERBOSE(INTERPRETER) << "Prepare output operands and execute " << nodeName @@ -64,18 +64,18 @@ private: #define OP(InternalName) \ virtual void visit(const model::operation::InternalName &node) override \ { \ - if (_kernels[model::OpCode::InternalName]->prepare != nullptr) \ + if (_kernels[ir::OpCode::InternalName]->prepare != nullptr) \ { \ - _kernels[model::OpCode::InternalName]->prepare(_env, node); \ + _kernels[ir::OpCode::InternalName]->prepare(_env, node); \ } \ - _kernels[model::OpCode::InternalName]->invoke(_env, node); \ + _kernels[ir::OpCode::InternalName]->invoke(_env, node); \ } -#include "model/Operations.lst" +#include "ir/Operations.lst" #undef OP private: ExecEnv *_env; - std::unordered_map _kernels; + std::unordered_map _kernels; }; void Interpreter::run() @@ -83,7 +83,7 @@ void Interpreter::run() VERBOSE(INTERPRETER) << "Interpreter is invoked " << std::endl; // operand_stack: save operands prepared to use - std::stack operand_stack; + std::stack operand_stack; // Note: We should push input first, then constant. // We use use-def for find operators ready to execution, @@ -97,7 +97,7 @@ void Interpreter::run() operand_stack.push(ind); } - _env->graph().operands().iterate([&](const model::OperandIndex &ind, const model::Operand &obj) { + _env->graph().operands().iterate([&](const ir::OperandIndex &ind, const ir::Operand &obj) { if (obj.isConstant()) { VERBOSE(INTERPRETER) << "Constant: Push to operand stack " << ind.value() << std::endl; @@ -107,8 +107,8 @@ void Interpreter::run() }); // Execution - std::unordered_set ready_check; - std::unordered_set executed; + std::unordered_set ready_check; + std::unordered_set executed; OperationExecutor executor{_env.get()}; while (!operand_stack.empty()) { @@ -121,7 +121,7 @@ void Interpreter::run() ready_check.insert(current_operand_index); // Find prepared operations by scan use of current operand - std::stack operation_stack; + std::stack operation_stack; const auto use_operators = _env->graph().operands().at(current_operand_index).getUses(); for (auto use_operator : use_operators.list()) { diff --git a/runtime/neurun/core/src/exec/interp/Tensor.h b/runtime/neurun/core/src/exec/interp/Tensor.h index 947ae5e..c53fd46 100644 --- a/runtime/neurun/core/src/exec/interp/Tensor.h +++ b/runtime/neurun/core/src/exec/interp/Tensor.h @@ -23,7 +23,7 @@ #include "Buffer.h" -#include "model/OperandInfo.h" +#include "ir/OperandInfo.h" #include "backend/operand/ITensor.h" #include "ir/Layout.h" @@ -58,7 +58,7 @@ public: * @brief Return shared pointer for data * @return Data shared pointer */ - virtual std::shared_ptr shareData() const = 0; + virtual std::shared_ptr shareData() const = 0; /** * @brief Set internal/external buffer * @param[in] buffer Buffer pointer @@ -68,7 +68,7 @@ public: * @brief Set data reference (including constant, input) * @param[in] data Data pointer */ - virtual void setData(std::shared_ptr data) = 0; + virtual void setData(std::shared_ptr data) = 0; virtual void releaseData() = 0; virtual size_t total_size() const = 0; @@ -86,7 +86,7 @@ public: * @brief Return TensorInfo * @return TensorInfo */ - virtual const model::OperandInfo &tensorInfo() const = 0; + virtual const ir::OperandInfo &tensorInfo() const = 0; /** * @brief Return number of elements * @return Number of elements @@ -102,7 +102,7 @@ class ROTensor final : public ITensor { public: ROTensor() = delete; - ROTensor(const model::OperandInfo &info) : _info(info) + ROTensor(const ir::OperandInfo &info) : _info(info) { // DO NOTHING } @@ -114,9 +114,9 @@ public: throw std::runtime_error{"Read only tensor"}; } const uint8_t *bufferRO() const override { return _data->base(); } - std::shared_ptr shareData() const override { return _data; } + std::shared_ptr shareData() const override { return _data; } void setBuffer(std::shared_ptr buffer) override { _data = buffer; } - void setData(std::shared_ptr data) override { _data = data; } + void setData(std::shared_ptr data) override { _data = data; } void releaseData() override { _data = nullptr; } size_t total_size() const override { return _info.total_size(); } @@ -126,12 +126,12 @@ public: ir::Layout layout() const override; bool has_padding() const override { return false; } ir::DataType data_type() const override { return _info.typeInfo().type(); } - const model::OperandInfo &tensorInfo() const override { return _info; } + const ir::OperandInfo &tensorInfo() const override { return _info; } uint64_t num_elements() const override { return _info.shape().num_elements(); }; private: - const model::OperandInfo _info; - std::shared_ptr _data{nullptr}; + const ir::OperandInfo _info; + std::shared_ptr _data{nullptr}; }; /** @@ -141,7 +141,7 @@ class Tensor final : public ITensor { public: Tensor() = delete; - Tensor(const model::OperandInfo &info) : _info(info) + Tensor(const ir::OperandInfo &info) : _info(info) { // DO NOTHING } @@ -150,9 +150,9 @@ public: uint8_t *buffer() const override { return _buffer->baseWritable(); } std::shared_ptr shareBuffer() const override { return _buffer; }; const uint8_t *bufferRO() const override { return _buffer->base(); } - std::shared_ptr shareData() const override { return _buffer; } + std::shared_ptr shareData() const override { return _buffer; } void setBuffer(std::shared_ptr buffer) override { _buffer = buffer; } - void setData(std::shared_ptr) override + void setData(std::shared_ptr) override { throw std::runtime_error{"Passed data may read-only"}; } @@ -165,11 +165,11 @@ public: ir::Layout layout() const override; bool has_padding() const override { return false; } ir::DataType data_type() const override { return _info.typeInfo().type(); } - const model::OperandInfo &tensorInfo() const override { return _info; } + const ir::OperandInfo &tensorInfo() const override { return _info; } uint64_t num_elements() const override { return _info.shape().num_elements(); }; private: - const model::OperandInfo _info; + const ir::OperandInfo _info; std::shared_ptr _buffer{nullptr}; }; diff --git a/runtime/neurun/core/src/exec/interp/operations/Concat.cc b/runtime/neurun/core/src/exec/interp/operations/Concat.cc index bcd90c5..efaf7c0 100644 --- a/runtime/neurun/core/src/exec/interp/operations/Concat.cc +++ b/runtime/neurun/core/src/exec/interp/operations/Concat.cc @@ -65,7 +65,7 @@ void prepareConcat(ExecEnv *env, const model::Operation &node) auto out_shape = first_tensor->tensorInfo().shape(); out_shape.dim(axis) = out_axis_dimension; env->allocateIfNeeded(out_index, - model::OperandInfo{out_shape, first_tensor->tensorInfo().typeInfo()}); + ir::OperandInfo{out_shape, first_tensor->tensorInfo().typeInfo()}); auto out_tensor = env->tensorAt(out_index); UNUSED_RELEASE(out_tensor); diff --git a/runtime/neurun/core/src/exec/interp/operations/FullyConnected.cc b/runtime/neurun/core/src/exec/interp/operations/FullyConnected.cc index f12f2fe..1e7e143 100644 --- a/runtime/neurun/core/src/exec/interp/operations/FullyConnected.cc +++ b/runtime/neurun/core/src/exec/interp/operations/FullyConnected.cc @@ -61,7 +61,7 @@ void prepareFC(ExecEnv *env, const model::Operation &node) ir::Shape output_shape(2); output_shape.dim(0) = batch_size; output_shape.dim(1) = num_units; - const model::OperandInfo out_info{output_shape, in_tensor->tensorInfo().typeInfo()}; + const ir::OperandInfo out_info{output_shape, in_tensor->tensorInfo().typeInfo()}; env->allocateIfNeeded(out_index, out_info); auto out_tensor = env->tensorAt(out_index); diff --git a/runtime/neurun/core/src/exec/interp/operations/SoftMax.cc b/runtime/neurun/core/src/exec/interp/operations/SoftMax.cc index 5b1be21..666263d 100644 --- a/runtime/neurun/core/src/exec/interp/operations/SoftMax.cc +++ b/runtime/neurun/core/src/exec/interp/operations/SoftMax.cc @@ -83,7 +83,7 @@ void prepareSoftMax(ExecEnv *env, const model::Operation &node) const auto output_shape = env->graph().operands().at(in_index).info().shape(); const auto output_type = env->graph().operands().at(out_index).info().typeInfo(); - const model::OperandInfo output_info{output_shape, output_type}; + const ir::OperandInfo output_info{output_shape, output_type}; env->allocateIfNeeded(out_index, output_info); auto out_tensor = env->tensorAt(out_index); diff --git a/runtime/neurun/core/src/ir/Graph.cc b/runtime/neurun/core/src/ir/Graph.cc index cd2e291..ad05c66 100644 --- a/runtime/neurun/core/src/ir/Graph.cc +++ b/runtime/neurun/core/src/ir/Graph.cc @@ -43,31 +43,31 @@ Graph::Graph() = default; Graph::~Graph(void) = default; -model::OperandIndex Graph::addOperand(const ir::Shape &shape, const ir::TypeInfo &type) +ir::OperandIndex Graph::addOperand(const ir::Shape &shape, const ir::TypeInfo &type) { return _operands.emplace(shape, type); } -model::OperationIndex Graph::addOperation(std::unique_ptr &&node) +ir::OperationIndex Graph::addOperation(std::unique_ptr &&node) { assert(isBuildingPhase()); return _operations.push(std::move(node)); } -void Graph::setOperandValue(const model::OperandIndex &ind, std::unique_ptr &&data) +void Graph::setOperandValue(const ir::OperandIndex &ind, std::unique_ptr &&data) { assert(isBuildingPhase()); assert(_operands.exist(ind)); _operands.at(ind).data(std::move(data)); } -void Graph::addInput(const model::OperandIndex &ind) +void Graph::addInput(const ir::OperandIndex &ind) { assert(isBuildingPhase()); _inputs.append(ind); } -void Graph::addOutput(const model::OperandIndex &ind) +void Graph::addOutput(const ir::OperandIndex &ind) { assert(isBuildingPhase()); _outputs.append(ind); @@ -97,9 +97,9 @@ void Graph::lower(void) // Lower { // operand::LowerInfo holder - model::OperandIndexMap> operands_lower_info; + ir::OperandIndexMap> operands_lower_info; - _operands.iterate([&](const model::OperandIndex &index, const model::Operand &object) { + _operands.iterate([&](const ir::OperandIndex &index, const ir::Operand &object) { operands_lower_info[index] = nnfw::cpp14::make_unique(graph::operand::asShape4D(object.shape())); }); @@ -109,7 +109,7 @@ void Graph::lower(void) // Make subgraphs while checking whether a node can be merged into a subgraph. makeSubgraphs(operands_lower_info); - _subgraphs->iterate([&](const model::SubgraphIndex &, model::Subgraph &subg) { + _subgraphs->iterate([&](const ir::SubgraphIndex &, model::Subgraph &subg) { assert(subg.operations().size() > 0); std::reverse(std::begin(subg.operations()), std::end(subg.operations())); }); @@ -149,23 +149,22 @@ void Graph::lower(void) void Graph::initializeUseDef() { - operations().iterate( - [&](const model::OperationIndex &index, const model::Operation &node) -> void { - auto outputs = node.getOutputs(); - for (auto output : outputs) - { - operands().at(output).appendDef(index); - } + operations().iterate([&](const ir::OperationIndex &index, const model::Operation &node) -> void { + auto outputs = node.getOutputs(); + for (auto output : outputs) + { + operands().at(output).appendDef(index); + } - auto inputs = node.getInputs(); - for (auto input : inputs) - { - operands().at(input).appendUse(index); - } - }); + auto inputs = node.getInputs(); + for (auto input : inputs) + { + operands().at(input).appendUse(index); + } + }); } -const operation::LowerInfo *Graph::getLowerInfo(const model::SubgraphIndex &subg_index) const +const operation::LowerInfo *Graph::getLowerInfo(const ir::SubgraphIndex &subg_index) const { if (!_lower_info_map) return nullptr; @@ -175,14 +174,14 @@ const operation::LowerInfo *Graph::getLowerInfo(const model::SubgraphIndex &subg return itr->second.get(); } -void Graph::setLowerInfo(const model::SubgraphIndex &subg_index, +void Graph::setLowerInfo(const ir::SubgraphIndex &subg_index, std::unique_ptr &&lower_info) { assert(_lower_info_map); _lower_info_map->operation.insert(std::make_pair(subg_index, std::move(lower_info))); } -void Graph::removeLowerInfo(const model::SubgraphIndex &subg_index) +void Graph::removeLowerInfo(const ir::SubgraphIndex &subg_index) { auto &subg_lower_info = _lower_info_map->operation; assert(subg_lower_info.find(subg_index) != subg_lower_info.end()); @@ -196,7 +195,7 @@ void Graph::removeLowerInfo(const model::SubgraphIndex &subg_index) } } -const operand::LowerInfo *Graph::getLowerInfo(const model::OperandIndex &index) const +const operand::LowerInfo *Graph::getLowerInfo(const ir::OperandIndex &index) const { if (!_lower_info_map) return nullptr; @@ -206,7 +205,7 @@ const operand::LowerInfo *Graph::getLowerInfo(const model::OperandIndex &index) return itr->second.get(); } -operand::LowerInfo *Graph::getLowerInfo(const model::OperandIndex &index) +operand::LowerInfo *Graph::getLowerInfo(const ir::OperandIndex &index) { if (!_lower_info_map) return nullptr; @@ -216,20 +215,20 @@ operand::LowerInfo *Graph::getLowerInfo(const model::OperandIndex &index) return itr->second.get(); } -void Graph::setLowerInfo(const model::OperandIndex &index, +void Graph::setLowerInfo(const ir::OperandIndex &index, std::unique_ptr &&lower_info) { assert(_lower_info_map); _lower_info_map->operand.insert(std::make_pair(index, std::move(lower_info))); } -void Graph::removeLowerInfo(const model::OperandIndex &index) +void Graph::removeLowerInfo(const ir::OperandIndex &index) { _lower_info_map->operand.erase(index); } void Graph::makeSubgraphs( - model::OperandIndexMap> &operands_lower_info) + ir::OperandIndexMap> &operands_lower_info) { // if SUBG_MAX_NODE == 0, no limit on nodes of a subgraph const int subg_max_node = util::getConfigInt(util::config::SUBG_MAX_NODE); @@ -237,11 +236,11 @@ void Graph::makeSubgraphs( bool is_profiling = util::getConfigBool(util::config::PROFILING_MODE); model::Subgraph *subg = nullptr; - model::SubgraphIndex subg_index; + ir::SubgraphIndex subg_index; // NOTE: The below method appends nodes while making one subgraph if needed. If something better // ways, happy to update this code. - Graph::PostDfsConstIterator().iterate(*this, [&](const model::OperationIndex &node_index, + Graph::PostDfsConstIterator().iterate(*this, [&](const ir::OperationIndex &node_index, const model::Operation &node) { // LowerInfo for in/output operands auto backend = _backend_resolver->getBackend(node_index); @@ -293,7 +292,7 @@ void Graph::makeSubgraphs( lower_info->addDefPermuteFactor(operand::PermuteFactor{backend, backend_layout}); } - if (node.opcode() == model::OpCode::Split) + if (node.opcode() == ir::OpCode::Split) { // Ideally this condition must be like 'node.getOutputs().size() > 1' but // this is true for HashtableLookup also. TODO: Come up with more clever solution @@ -380,7 +379,7 @@ void Graph::makeSubgraphs( } void Graph::manipulateLowerInfo( - model::OperandIndexMap> &operands_lower_info) + ir::OperandIndexMap> &operands_lower_info) { const auto default_backend = backend::BackendManager::get().getDefault(); for (auto index : _inputs) @@ -404,7 +403,7 @@ void Graph::manipulateLowerInfo( } // Set LowerInfo for each operand from the operand::LowerInfo holder - _operands.iterate([&](const model::OperandIndex &index, model::Operand &) { + _operands.iterate([&](const ir::OperandIndex &index, ir::Operand &) { setLowerInfo(index, std::move(operands_lower_info[index])); }); } @@ -416,7 +415,7 @@ void Graph::dumpLowerInfo() std::map dumps; - _operands.iterate([&](const model::OperandIndex &index, model::Operand &object) { + _operands.iterate([&](const ir::OperandIndex &index, ir::Operand &object) { std::stringstream sstream; if (!getLowerInfo(index)->def_factors().empty() || !getLowerInfo(index)->use_factors().empty()) { @@ -431,7 +430,7 @@ void Graph::dumpLowerInfo() return "{ " + str + "}"; }; - auto operation_index_to_string = [](const model::OperationIndexList &operations) { + auto operation_index_to_string = [](const ir::OperationIndexList &operations) { std::string str; for (auto op : operations.list()) { @@ -475,8 +474,8 @@ void Graph::dumpLowerInfo() } } -bool Graph::mergeable(const model::SubgraphIndex &subg_index, - const model::OperationIndex &node_index, ir::Layout layout) +bool Graph::mergeable(const ir::SubgraphIndex &subg_index, const ir::OperationIndex &node_index, + ir::Layout layout) { // Are they mergeable? // 1. the same backend id and layout? @@ -547,9 +546,9 @@ bool Graph::mergeable(const model::SubgraphIndex &subg_index, return false; } -model::SubgraphIndex Graph::appendFreshSingleOpSubgraph(const model::OperationIndex &node_index, - const model::Operation &node, - ir::Layout layout) +ir::SubgraphIndex Graph::appendFreshSingleOpSubgraph(const ir::OperationIndex &node_index, + const model::Operation &node, + ir::Layout layout) { // Create a fresh subgraph with one operation, and append it to subgraphs // Create a fresh subgraph @@ -589,7 +588,7 @@ template void Graph::DefaultIterator::iterate(GraphRef graph, const IterFn &fn) const { graph.operations().iterate( - [&](const model::OperationIndex &index, NodeRef node) -> void { fn(index, node); }); + [&](const ir::OperationIndex &index, NodeRef node) -> void { fn(index, node); }); } // @@ -601,12 +600,12 @@ void Graph::PostDfsIterator::iterate(GraphRef graph, const IterFn &fn) { assert(!graph.isBuildingPhase()); // Restrict iteration condition - model::OperationIndexMap visited; + ir::OperationIndexMap visited; graph.operations().iterate( - [&](const model::OperationIndex &index, NodeRef) { visited[index] = false; }); + [&](const ir::OperationIndex &index, NodeRef) { visited[index] = false; }); - std::function dfs_recursive = - [&](const model::OperationIndex &index, NodeRef node) -> void { + std::function dfs_recursive = + [&](const ir::OperationIndex &index, NodeRef node) -> void { if (visited[index]) return; visited[index] = true; @@ -626,9 +625,8 @@ void Graph::PostDfsIterator::iterate(GraphRef graph, const IterFn &fn) graph.operations().iterate(dfs_recursive); // All of the operations(nodes) must have been visited. - assert( - std::all_of(visited.begin(), visited.end(), - [](const std::pair &v) { return v.second; })); + assert(std::all_of(visited.begin(), visited.end(), + [](const std::pair &v) { return v.second; })); } void Graph::setBackendResolver(std::unique_ptr &&br) diff --git a/runtime/neurun/core/src/model/OpCode.cc b/runtime/neurun/core/src/ir/OpCode.cc similarity index 92% rename from runtime/neurun/core/src/model/OpCode.cc rename to runtime/neurun/core/src/ir/OpCode.cc index 48b80b9..dc50e0d 100644 --- a/runtime/neurun/core/src/model/OpCode.cc +++ b/runtime/neurun/core/src/ir/OpCode.cc @@ -14,25 +14,25 @@ * limitations under the License. */ -#include "model/OpCode.h" +#include "ir/OpCode.h" #include namespace neurun { -namespace model +namespace ir { const char *toString(OpCode opcode) { static const std::unordered_map map{{OpCode::Invalid, "Invalid"}, #define OP(Name) {OpCode::Name, #Name}, -#include "model/Operations.lst" +#include "ir/Operations.lst" #undef OP {OpCode::Subgraph, "Subgraph"}, {OpCode::COUNT, "COUNT"}}; return map.at(opcode); } -} // namespace model +} // namespace ir } // namespace neurun diff --git a/runtime/neurun/core/src/model/Operand.cc b/runtime/neurun/core/src/ir/Operand.cc similarity index 79% rename from runtime/neurun/core/src/model/Operand.cc rename to runtime/neurun/core/src/ir/Operand.cc index d5970cd..f16ff04 100644 --- a/runtime/neurun/core/src/model/Operand.cc +++ b/runtime/neurun/core/src/ir/Operand.cc @@ -14,11 +14,11 @@ * limitations under the License. */ -#include "model/Operand.h" +#include "ir/Operand.h" namespace neurun { -namespace model +namespace ir { size_t Operand::operandSize(void) const @@ -38,11 +38,11 @@ size_t Operand::operandSize(void) const return element_size * elements; } -void Operand::appendUse(const ::neurun::model::OperationIndex &idx) { _uses.append(idx); } +void Operand::appendUse(const OperationIndex &idx) { _uses.append(idx); } -void Operand::removeUse(const ::neurun::model::OperationIndex &idx) { _uses.remove(idx); } +void Operand::removeUse(const OperationIndex &idx) { _uses.remove(idx); } -void Operand::appendDef(const ::neurun::model::OperationIndex &idx) +void Operand::appendDef(const OperationIndex &idx) { assert(!isConstant()); assert(_def.size() == 0); @@ -50,7 +50,7 @@ void Operand::appendDef(const ::neurun::model::OperationIndex &idx) _def.append(idx); } -void Operand::removeDef(const ::neurun::model::OperationIndex &idx) +void Operand::removeDef(const OperationIndex &idx) { assert(_def.contains(idx)); @@ -66,5 +66,5 @@ const graph::operand::ParentInfo *Operand::parent_info() const { return _parent_ graph::operand::ParentInfo *Operand::parent_info() { return _parent_info.get(); } -} // namespace model +} // namespace ir } // namespace neurun diff --git a/runtime/neurun/core/src/model/OperandIndexSequence.cc b/runtime/neurun/core/src/ir/OperandIndexSequence.cc similarity index 94% rename from runtime/neurun/core/src/model/OperandIndexSequence.cc rename to runtime/neurun/core/src/ir/OperandIndexSequence.cc index a9454df..3024441 100644 --- a/runtime/neurun/core/src/model/OperandIndexSequence.cc +++ b/runtime/neurun/core/src/ir/OperandIndexSequence.cc @@ -14,13 +14,13 @@ * limitations under the License. */ -#include "model/OperandIndexSequence.h" +#include "ir/OperandIndexSequence.h" #include namespace neurun { -namespace model +namespace ir { OperandIndexSequence::OperandIndexSequence(std::initializer_list list) : _set(list) @@ -54,5 +54,5 @@ void OperandIndexSequence::replace(const OperandIndex &from, const OperandIndex std::replace(_set.begin(), _set.end(), from, to); } -} // namespace model +} // namespace ir } // namespace neurun diff --git a/runtime/neurun/core/src/model/OperationIndexList.cc b/runtime/neurun/core/src/ir/OperationIndexList.cc similarity index 84% rename from runtime/neurun/core/src/model/OperationIndexList.cc rename to runtime/neurun/core/src/ir/OperationIndexList.cc index e2c077e..261cc5c 100644 --- a/runtime/neurun/core/src/model/OperationIndexList.cc +++ b/runtime/neurun/core/src/ir/OperationIndexList.cc @@ -14,13 +14,13 @@ * limitations under the License. */ -#include "model/OperationIndexList.h" +#include "ir/OperationIndexList.h" #include namespace neurun { -namespace model +namespace ir { OperationIndexList::OperationIndexList(std::initializer_list list) : _list(list) @@ -28,10 +28,10 @@ OperationIndexList::OperationIndexList(std::initializer_list lis // DO NOTHING } -bool OperationIndexList::contains(const ::neurun::model::OperationIndex &index) const +bool OperationIndexList::contains(const OperationIndex &index) const { return std::find(_list.begin(), _list.end(), index) != _list.end(); } -} // namespace model +} // namespace ir } // namespace neurun diff --git a/runtime/neurun/core/src/ir/pass/ConstantInsertionPass.cc b/runtime/neurun/core/src/ir/pass/ConstantInsertionPass.cc index 7243b81..b662131 100644 --- a/runtime/neurun/core/src/ir/pass/ConstantInsertionPass.cc +++ b/runtime/neurun/core/src/ir/pass/ConstantInsertionPass.cc @@ -28,8 +28,7 @@ namespace graph namespace pass { -void ConstantInsertionPass::callback(const model::OperationIndex &node_index, - model::Operation &node) +void ConstantInsertionPass::callback(const ir::OperationIndex &node_index, model::Operation &node) { const auto &subgraph_index = _graph.subgraphs().getOperation(node_index); const auto subg_lower_info = _graph.getLowerInfo(subgraph_index); @@ -48,8 +47,8 @@ void ConstantInsertionPass::callback(const model::OperationIndex &node_index, { auto new_object = object; // TODO Remove const_case - const_cast &>(new_object.getDef().list()).clear(); - const_cast &>(new_object.getUses().list()).clear(); + const_cast &>(new_object.getDef().list()).clear(); + const_cast &>(new_object.getUses().list()).clear(); const auto new_index = _graph.operands().emplace(new_object); _replace_operands_map[key] = new_index; diff --git a/runtime/neurun/core/src/ir/pass/ConstantInsertionPass.h b/runtime/neurun/core/src/ir/pass/ConstantInsertionPass.h index c7a5bd9..245a047 100644 --- a/runtime/neurun/core/src/ir/pass/ConstantInsertionPass.h +++ b/runtime/neurun/core/src/ir/pass/ConstantInsertionPass.h @@ -18,7 +18,7 @@ #define __NEURUN_GRAPH_PASS_CONSTANT_INSERTION_PASS_H__ #include -#include +#include #include "OperationPass.h" #include #include @@ -39,12 +39,12 @@ public: std::string id() final { return "ConstantInsertionPass"; } public: - void callback(const model::OperationIndex &index, model::Operation &node) final; + void callback(const ir::OperationIndex &index, model::Operation &node) final; private: struct ReplaceKey { - model::OperandIndex index; + ir::OperandIndex index; graph::operand::PermuteFactor factor; bool operator==(const ReplaceKey &other) const @@ -61,12 +61,12 @@ private: std::size_t operator()(const ReplaceKey &key) const noexcept { using std::hash; - return hash()(key.index) ^ + return hash()(key.index) ^ (hash()(key.factor) << 1); } }; - std::unordered_map _replace_operands_map; + std::unordered_map _replace_operands_map; }; } // namespace pass diff --git a/runtime/neurun/core/src/ir/pass/OperandPass.cc b/runtime/neurun/core/src/ir/pass/OperandPass.cc index cec131e..132ffcf 100644 --- a/runtime/neurun/core/src/ir/pass/OperandPass.cc +++ b/runtime/neurun/core/src/ir/pass/OperandPass.cc @@ -28,7 +28,7 @@ namespace pass void OperandPass::run() { _graph.operands().iterate( - [&](const model::OperandIndex &index, model::Operand &object) { callback(index, object); }); + [&](const ir::OperandIndex &index, ir::Operand &object) { callback(index, object); }); } } // namespace pass diff --git a/runtime/neurun/core/src/ir/pass/OperandPass.h b/runtime/neurun/core/src/ir/pass/OperandPass.h index 4b25929..b112256 100644 --- a/runtime/neurun/core/src/ir/pass/OperandPass.h +++ b/runtime/neurun/core/src/ir/pass/OperandPass.h @@ -18,14 +18,14 @@ #define __NEURUN_GRAPH_PASS_OPERAND_PASS_H__ #include "Pass.h" -#include "model/Index.h" +#include "ir/Index.h" namespace neurun { -namespace model +namespace ir { class Operand; -} // namespace graph +} // namespace ir } // namespace neurun namespace neurun @@ -43,7 +43,7 @@ public: public: std::string id() override = 0; void run() override final; - virtual void callback(const model::OperandIndex &i, model::Operand &o) = 0; + virtual void callback(const ir::OperandIndex &i, ir::Operand &o) = 0; }; } // namespace pass diff --git a/runtime/neurun/core/src/ir/pass/OperationPass.cc b/runtime/neurun/core/src/ir/pass/OperationPass.cc index b726dec..1f3bd41 100644 --- a/runtime/neurun/core/src/ir/pass/OperationPass.cc +++ b/runtime/neurun/core/src/ir/pass/OperationPass.cc @@ -16,7 +16,7 @@ #include "OperationPass.h" -#include "model/Index.h" +#include "ir/Index.h" #include "model/Operation.h" #include "ir/Graph.h" @@ -30,7 +30,7 @@ namespace pass void OperationPass::run() { _graph.operations().iterate( - [&](const model::OperationIndex &index, model::Operation &node) { callback(index, node); }); + [&](const ir::OperationIndex &index, model::Operation &node) { callback(index, node); }); } } // namespace pass diff --git a/runtime/neurun/core/src/ir/pass/OperationPass.h b/runtime/neurun/core/src/ir/pass/OperationPass.h index 51cf328..3bca503 100644 --- a/runtime/neurun/core/src/ir/pass/OperationPass.h +++ b/runtime/neurun/core/src/ir/pass/OperationPass.h @@ -23,7 +23,7 @@ #define __NEURUN_GRAPH_PASS_OPERATION_PASS_H__ #include "Pass.h" -#include "model/Index.h" +#include "ir/Index.h" namespace neurun { @@ -61,7 +61,7 @@ public: * @param index is the index of a node in graph * @param node is the node in graph */ - virtual void callback(const model::OperationIndex &index, model::Operation &node) = 0; + virtual void callback(const ir::OperationIndex &index, model::Operation &node) = 0; /** * @brief Run the pass diff --git a/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.cc b/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.cc index e848c88..71a6853 100644 --- a/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.cc +++ b/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.cc @@ -16,7 +16,7 @@ #include "PermutationEliminationPass.h" -#include "model/Operand.h" +#include "ir/Operand.h" #include "ir/operand/LowerInfo.h" #include "ir/Graph.h" #include "backend/IConfig.h" @@ -29,8 +29,7 @@ namespace graph { namespace pass { -void PermutationEliminationPass::callback(const model::OperandIndex &inp_index, - model::Operand &object) +void PermutationEliminationPass::callback(const ir::OperandIndex &inp_index, ir::Operand &object) { if (_graph.getInputs().contains(inp_index)) { @@ -42,8 +41,8 @@ void PermutationEliminationPass::callback(const model::OperandIndex &inp_index, } } -void PermutationEliminationPass::eliminateInput(const model::OperandIndex &inp_index, - model::Operand &object) +void PermutationEliminationPass::eliminateInput(const ir::OperandIndex &inp_index, + ir::Operand &object) { auto &model_inputs = _graph.getInputs(); @@ -91,8 +90,8 @@ void PermutationEliminationPass::eliminateInput(const model::OperandIndex &inp_i } } -void PermutationEliminationPass::eliminateOutput(const model::OperandIndex &out_index, - model::Operand &object) +void PermutationEliminationPass::eliminateOutput(const ir::OperandIndex &out_index, + ir::Operand &object) { auto &model_outputs = _graph.getOutputs(); @@ -147,7 +146,7 @@ void PermutationEliminationPass::eliminateOutput(const model::OperandIndex &out_ } bool PermutationEliminationPass::isPermuteLayerToEliminate( - const model::OperandIndexSequence &inp_indexes, const model::OperandIndexSequence &out_indexes, + const ir::OperandIndexSequence &inp_indexes, const ir::OperandIndexSequence &out_indexes, bool is_for_model_input) { auto input_def_factors = _graph.getLowerInfo(inp_indexes.at(0))->def_factors(); diff --git a/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.h b/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.h index 332eeb6..e95418f 100644 --- a/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.h +++ b/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.h @@ -18,8 +18,8 @@ #define __NEURUN_GRAPH_PASS_PERMUTATION_ELIMINATION_PASS_H__ #include "OperandPass.h" -#include "model/Operand.h" -#include "model/OperandIndexSequence.h" +#include "ir/Operand.h" +#include "ir/OperandIndexSequence.h" namespace neurun { @@ -36,7 +36,7 @@ public: public: std::string id() override { return "PermutationEliminationPass"; } - void callback(const model::OperandIndex &index, model::Operand &object) override; + void callback(const ir::OperandIndex &index, ir::Operand &object) override; private: /** @@ -50,7 +50,7 @@ private: * * @return */ - void eliminateInput(const model::OperandIndex &inp_index, model::Operand &object); + void eliminateInput(const ir::OperandIndex &inp_index, ir::Operand &object); /** * @brief Remove Permute operation that permutates output of a model @@ -63,7 +63,7 @@ private: * * @return */ - void eliminateOutput(const model::OperandIndex &out_index, model::Operand &object); + void eliminateOutput(const ir::OperandIndex &out_index, ir::Operand &object); /** * @brief Determine if passed operands are permute layer's input and output, that must be @@ -75,8 +75,8 @@ private: * * @return if it is permutation layer */ - bool isPermuteLayerToEliminate(const model::OperandIndexSequence &inp_indexes, - const model::OperandIndexSequence &out_indexes, + bool isPermuteLayerToEliminate(const ir::OperandIndexSequence &inp_indexes, + const ir::OperandIndexSequence &out_indexes, bool is_for_model_input); }; diff --git a/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.cc b/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.cc index 13a751b..190916f 100644 --- a/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.cc +++ b/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.cc @@ -20,7 +20,7 @@ #include #include -#include "model/Operand.h" +#include "ir/Operand.h" #include "ir/operation/LowerInfo.h" #include "ir/Graph.h" #include "backend/IConfig.h" @@ -37,7 +37,7 @@ namespace graph namespace pass { -void PermutationInsertionPass::callback(const model::OperandIndex &index, model::Operand &object) +void PermutationInsertionPass::callback(const ir::OperandIndex &index, ir::Operand &object) { auto &&operand_li = _graph.getLowerInfo(index); assert(operand_li); @@ -49,10 +49,10 @@ void PermutationInsertionPass::callback(const model::OperandIndex &index, model: return; } - std::list permute_indexes; + std::list permute_indexes; // Build a map for all necessary type of operands - std::unordered_map factor_to_index; + std::unordered_map factor_to_index; { assert(operand_li->def_factors().size() == 1); for (auto factor : operand_li->def_factors()) @@ -75,7 +75,7 @@ void PermutationInsertionPass::callback(const model::OperandIndex &index, model: // Update operations' input that uses this operand { - std::list remove_list; + std::list remove_list; auto uses = object.getUses(); for (auto use : uses.list()) @@ -118,9 +118,8 @@ void PermutationInsertionPass::callback(const model::OperandIndex &index, model: } } -model::OperationIndex -PermutationInsertionPass::insertPermute(const model::OperandIndex &operand_index, - const operand::PermuteFactor &factor) +ir::OperationIndex PermutationInsertionPass::insertPermute(const ir::OperandIndex &operand_index, + const operand::PermuteFactor &factor) { assert(!_graph.isBuildingPhase()); diff --git a/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.h b/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.h index a0bc0bf..7269e42 100644 --- a/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.h +++ b/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.h @@ -18,7 +18,7 @@ #define __NEURUN_GRAPH_PASS_PERMUTATION_INSERTION_PASS_H__ #include "OperandPass.h" -#include "model/Operand.h" //for model::OperationIndex +#include "ir/Operand.h" //for OperationIndex #include "backend/BackendManager.h" #include "ir/operand/PermuteFactor.h" @@ -36,7 +36,7 @@ public: public: std::string id() override { return "PermutationInsertionPass"; } - void callback(const model::OperandIndex &index, model::Operand &object) override; + void callback(const ir::OperandIndex &index, ir::Operand &object) override; /** * @brief Insert Permute operation that has given operand as input @@ -44,10 +44,10 @@ public: * @param operand_index is the target operand index for the insertion * @param factor is the output operand's backend type and layout * - * @return model::OperationIndex + * @return OperationIndex */ - model::OperationIndex insertPermute(const model::OperandIndex &operand_index, - const operand::PermuteFactor &factor); + ir::OperationIndex insertPermute(const ir::OperandIndex &operand_index, + const operand::PermuteFactor &factor); private: }; diff --git a/runtime/neurun/core/src/ir/pass/PermutationOperationPass.cc b/runtime/neurun/core/src/ir/pass/PermutationOperationPass.cc index 9faeb45..ad05c1a 100644 --- a/runtime/neurun/core/src/ir/pass/PermutationOperationPass.cc +++ b/runtime/neurun/core/src/ir/pass/PermutationOperationPass.cc @@ -27,7 +27,7 @@ namespace graph namespace pass { -void PermutationOperationPass::callback(const model::OperationIndex &, model::Operation &node) +void PermutationOperationPass::callback(const ir::OperationIndex &, model::Operation &node) { node.accept(*this); }; @@ -74,7 +74,7 @@ void PermutationOperationPass::changeToKeepLayout(const model::Operation &node) below_subg.setInputs(it->node->getInputs()); below_subg.setOutputs(it->node->getOutputs()); - std::vector remove_list; + std::vector remove_list; remove_list.emplace_back(it->index); while (++it != above_subg.end()) { diff --git a/runtime/neurun/core/src/ir/pass/PermutationOperationPass.h b/runtime/neurun/core/src/ir/pass/PermutationOperationPass.h index bc3ca0d..d228235 100644 --- a/runtime/neurun/core/src/ir/pass/PermutationOperationPass.h +++ b/runtime/neurun/core/src/ir/pass/PermutationOperationPass.h @@ -36,7 +36,7 @@ public: std::string id() final { return "PermutationOperationPass"; } public: - void callback(const model::OperationIndex &i, model::Operation &n) final; + void callback(const ir::OperationIndex &i, model::Operation &n) final; public: void visit(const model::operation::FullyConnected &) final; diff --git a/runtime/neurun/core/src/ir/verifier/Verifier.cc b/runtime/neurun/core/src/ir/verifier/Verifier.cc index c1e6d20..6c9e9ed 100644 --- a/runtime/neurun/core/src/ir/verifier/Verifier.cc +++ b/runtime/neurun/core/src/ir/verifier/Verifier.cc @@ -17,7 +17,7 @@ #include "Verifier.h" #include "ir/Graph.h" -#include "model/OperationIndexMap.h" +#include "ir/OperationIndexMap.h" #include "util/logging.h" @@ -37,14 +37,13 @@ bool DAGChecker::verify(const Graph &graph) const auto &operations = graph.operations(); bool cyclic = false; - model::OperationIndexMap visited; - operations.iterate([&](const model::OperationIndex &index, const model::Operation &) { - visited[index] = false; - }); - model::OperationIndexMap on_stack = visited; // Copy from visited + ir::OperationIndexMap visited; + operations.iterate( + [&](const ir::OperationIndex &index, const model::Operation &) { visited[index] = false; }); + ir::OperationIndexMap on_stack = visited; // Copy from visited - std::function dfs_recursive = - [&](const model::OperationIndex &index, const model::Operation &node) -> void { + std::function dfs_recursive = + [&](const ir::OperationIndex &index, const model::Operation &node) -> void { if (on_stack[index]) cyclic = true; if (visited[index]) @@ -77,7 +76,7 @@ bool EdgeConsistencyChecker::verify(const Graph &graph) const { auto &operations = graph.operations(); uint32_t mismatches = 0; - operations.iterate([&](const model::OperationIndex &index, const model::Operation &node) { + operations.iterate([&](const ir::OperationIndex &index, const model::Operation &node) { for (auto operand_index : node.getInputs()) { auto &operand = graph.operands().at(operand_index); diff --git a/runtime/neurun/core/src/model/OperandConstraint.cc b/runtime/neurun/core/src/model/OperandConstraint.cc deleted file mode 100644 index 2730f71..0000000 --- a/runtime/neurun/core/src/model/OperandConstraint.cc +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "model/OperandConstraint.h" - -namespace neurun -{ -namespace model -{ -namespace operation -{ - -} // namespace operation -} // namespace model -} // namespace neurun diff --git a/runtime/neurun/core/src/model/Subgraph.cc b/runtime/neurun/core/src/model/Subgraph.cc index e1028ad..b86b72e 100644 --- a/runtime/neurun/core/src/model/Subgraph.cc +++ b/runtime/neurun/core/src/model/Subgraph.cc @@ -67,7 +67,7 @@ void Subgraph::remove(const OperationIndex &index) } } -bool Subgraph::exist(const neurun::model::OperationIndex &index) const +bool Subgraph::exist(const OperationIndex &index) const { for (const auto &element : _operations) { diff --git a/runtime/neurun/frontend/base_loader/base_loader.h b/runtime/neurun/frontend/base_loader/base_loader.h index 2578fee..17ab326 100644 --- a/runtime/neurun/frontend/base_loader/base_loader.h +++ b/runtime/neurun/frontend/base_loader/base_loader.h @@ -69,9 +69,9 @@ protected: ir::DataType tensorTypeToDataType(TensorType type); // Create operands form tflite::Tensor - model::OperandIndex loadOperand(const Tensor *tensor); - void loadOperationIO(const Operator *op, model::OperandIndexSequence &inputs, - model::OperandIndexSequence &outputs); + ir::OperandIndex loadOperand(const Tensor *tensor); + void loadOperationIO(const Operator *op, ir::OperandIndexSequence &inputs, + ir::OperandIndexSequence &outputs); // Create operations from Operator void loadOperation(const Operator *op); // Load Strides and Paddings from options to param @@ -124,7 +124,7 @@ protected: graph::Graph &_graph; const Model *_model; // Maps Tensor indices to neurun Operands. - std::vector _tensor_to_operand; + std::vector _tensor_to_operand; }; template @@ -194,7 +194,7 @@ BaseLoader::BaseLoader::tensorTypeToDataType(const } template -model::OperandIndex BaseLoader::loadOperand(const Tensor *tensor) +ir::OperandIndex BaseLoader::loadOperand(const Tensor *tensor) { ir::Shape shape; // Shape @@ -244,7 +244,7 @@ model::OperandIndex BaseLoader::loadOperand(const const auto *data = _model->buffers()->Get(tensor->buffer())->data(); if (data != nullptr) { - auto ptr = nnfw::cpp14::make_unique(data->data(), data->size()); + auto ptr = nnfw::cpp14::make_unique(data->data(), data->size()); _graph.setOperandValue(operand_index, std::move(ptr)); } @@ -259,8 +259,8 @@ model::OperandIndex BaseLoader::loadOperand(const template void BaseLoader::loadOperationIO(const Operator *op, - model::OperandIndexSequence &inputs, - model::OperandIndexSequence &outputs) + ir::OperandIndexSequence &inputs, + ir::OperandIndexSequence &outputs) { for (const std::int32_t idx : *op->inputs()) { @@ -307,8 +307,8 @@ void BaseLoader::loadPool2D(Param ¶m, template void BaseLoader::loadConv2D(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -324,8 +324,8 @@ void BaseLoader::loadConv2D(const Operator *op) template void BaseLoader::loadDepthwiseConv2D(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -344,8 +344,8 @@ void BaseLoader::loadDepthwiseConv2D(const Operato template void BaseLoader::loadTransposeConv(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -360,8 +360,8 @@ void BaseLoader::loadTransposeConv(const Operator template void BaseLoader::loadAvgPool2D(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -377,8 +377,8 @@ void BaseLoader::loadAvgPool2D(const Operator *op) template void BaseLoader::loadReshape(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -392,8 +392,8 @@ void BaseLoader::loadReshape(const Operator *op) template void BaseLoader::loadSoftmax(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -409,8 +409,8 @@ void BaseLoader::loadSoftmax(const Operator *op) template void BaseLoader::loadMaxPool2D(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -426,8 +426,8 @@ void BaseLoader::loadMaxPool2D(const Operator *op) template void BaseLoader::loadConcatenation(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -444,8 +444,8 @@ void BaseLoader::loadConcatenation(const Operator template void BaseLoader::loadInstanceNorm(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -464,8 +464,8 @@ void BaseLoader::loadInstanceNorm(const Operator * template void BaseLoader::loadFC(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -483,8 +483,8 @@ void BaseLoader::loadFC(const Operator *op) template void BaseLoader::loadAdd(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -500,8 +500,8 @@ void BaseLoader::loadAdd(const Operator *op) template void BaseLoader::loadSub(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -517,8 +517,8 @@ void BaseLoader::loadSub(const Operator *op) template void BaseLoader::loadMul(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -534,8 +534,8 @@ void BaseLoader::loadMul(const Operator *op) template void BaseLoader::loadDiv(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -552,8 +552,8 @@ template void BaseLoader::loadPack(const Operator *op) { // This runtime_error will be removed if the one of backend supports this operation - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -569,8 +569,8 @@ void BaseLoader::loadPack(const Operator *op) template void BaseLoader::loadRelu(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -581,8 +581,8 @@ void BaseLoader::loadRelu(const Operator *op) template void BaseLoader::loadRelu6(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -593,8 +593,8 @@ void BaseLoader::loadRelu6(const Operator *op) template void BaseLoader::loadResizeBilinear(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); auto input = inputs.at(0); @@ -618,8 +618,8 @@ void BaseLoader::loadResizeBilinear(const Operator template void BaseLoader::loadRsqrt(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -630,8 +630,8 @@ void BaseLoader::loadRsqrt(const Operator *op) template void BaseLoader::loadSqrt(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -642,8 +642,8 @@ void BaseLoader::loadSqrt(const Operator *op) template void BaseLoader::loadSquaredDifference(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -655,8 +655,8 @@ void BaseLoader::loadSquaredDifference(const Opera template void BaseLoader::loadTanh(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -667,8 +667,8 @@ void BaseLoader::loadTanh(const Operator *op) template void BaseLoader::loadTranspose(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); auto input = inputs.at(0); @@ -688,8 +688,8 @@ void BaseLoader::loadTranspose(const Operator *op) template void BaseLoader::loadMean(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); auto input = inputs.at(0); @@ -709,8 +709,8 @@ void BaseLoader::loadMean(const Operator *op) template void BaseLoader::loadReduceMax(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); auto input = inputs.at(0); @@ -731,8 +731,8 @@ void BaseLoader::loadReduceMax(const Operator *op) template void BaseLoader::loadPad(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -743,8 +743,8 @@ void BaseLoader::loadPad(const Operator *op) template void BaseLoader::loadLogistic(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -755,8 +755,8 @@ void BaseLoader::loadLogistic(const Operator *op) template void BaseLoader::loadExp(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -767,8 +767,8 @@ void BaseLoader::loadExp(const Operator *op) template void BaseLoader::loadGather(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); model::operation::Gather::Param param; @@ -781,8 +781,8 @@ void BaseLoader::loadGather(const Operator *op) template void BaseLoader::loadSpaceToBatchND(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -793,8 +793,8 @@ void BaseLoader::loadSpaceToBatchND(const Operator template void BaseLoader::loadBatchToSpaceND(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); auto input = inputs.at(0); @@ -817,8 +817,8 @@ void BaseLoader::loadBatchToSpaceND(const Operator template void BaseLoader::loadReduceSum(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); auto input = inputs.at(0); @@ -839,15 +839,15 @@ void BaseLoader::loadReduceSum(const Operator *op) template void BaseLoader::loadCustom(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); auto *op_code = _model->operator_codes()->Get(op->opcode_index()); auto custom_op_id = op_code->custom_code()->str(); - auto constraint = model::operation::OperandConstraint::createExact(inputs.size()); + auto constraint = ir::OperandConstraint::createExact(inputs.size()); assert(op->custom_options_format() == CustomOptionsFormat::CustomOptionsFormat_FLEXBUFFERS && "Unsupported custom operation options format"); @@ -869,8 +869,8 @@ void BaseLoader::loadCustom(const Operator *op) template void BaseLoader::loadSqueeze(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); @@ -892,8 +892,8 @@ void BaseLoader::loadSqueeze(const Operator *op) template void BaseLoader::loadPrelu(const Operator *op) { - model::OperandIndexSequence inputs; - model::OperandIndexSequence outputs; + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); diff --git a/runtime/neurun/frontend/nnapi/model.cc b/runtime/neurun/frontend/nnapi/model.cc index e854b16..b1978ad 100644 --- a/runtime/neurun/frontend/nnapi/model.cc +++ b/runtime/neurun/frontend/nnapi/model.cc @@ -129,7 +129,7 @@ int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel *model, int32_t in VERBOSE(NNAPI::Model) << "setOperandValue: Invalid index value (negative)" << std::endl; return ANEURALNETWORKS_BAD_DATA; } - // NOTE ::neurun::model::OperandIndex uses uint32_t as its underlying type as various NNAPI + // NOTE OperandIndex uses uint32_t as its underlying type as various NNAPI // functions such as ANeuralNetworksModel_addOperation use uint32_t to represent operand // index // ANeuralNetworksModel_setOperandValue, however, uses int32_t to represent operand index. @@ -198,7 +198,7 @@ int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel *model, << std::endl; return ANEURALNETWORKS_BAD_DATA; } - // NOTE ::neurun::model::OperandIndex uses uint32_t as its underlying type as various NNAPI + // NOTE OperandIndex uses uint32_t as its underlying type as various NNAPI // functions such as ANeuralNetworksModel_addOperation use uint32_t to represent operand // index // ANeuralNetworksModel_setOperandValue, however, uses int32_t to represent operand index. diff --git a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc index 883d325..b8e43a6 100644 --- a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc +++ b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc @@ -18,50 +18,50 @@ #include "NNAPIConvert.h" #include "util/logging.h" -const neurun::model::OperandIndex +const neurun::ir::OperandIndex ANeuralNetworksExecution::getInputOperandIndex(int32_t index) noexcept { if (index < 0) { // Negative index: return invalid index - return neurun::model::OperandIndex{}; + return neurun::ir::OperandIndex{}; } uint32_t cast_index = static_cast(index); if (cast_index >= _execution->graph().getInputs().size()) { // Return invalid index - return neurun::model::OperandIndex{}; + return neurun::ir::OperandIndex{}; } - neurun::model::IOIndex input_index{cast_index}; + neurun::ir::IOIndex input_index{cast_index}; const auto operand_index = _execution->graph().getInputs().at(input_index); return operand_index; } -const neurun::model::OperandIndex +const neurun::ir::OperandIndex ANeuralNetworksExecution::getOutputOperandIndex(int32_t index) noexcept { if (index < 0) { // Negative index: return invalid index - return neurun::model::OperandIndex{}; + return neurun::ir::OperandIndex{}; } uint32_t cast_index = static_cast(index); if (cast_index >= _execution->graph().getOutputs().size()) { // Return invalid index - return neurun::model::OperandIndex{}; + return neurun::ir::OperandIndex{}; } - neurun::model::IOIndex output_index{cast_index}; + neurun::ir::IOIndex output_index{cast_index}; const auto operand_index = _execution->graph().getOutputs().at(output_index); return operand_index; } bool ANeuralNetworksExecution::compareDataType(const ANeuralNetworksOperandType *type, - const neurun::model::OperandIndex index) noexcept + const neurun::ir::OperandIndex index) noexcept { try { @@ -85,7 +85,7 @@ bool ANeuralNetworksExecution::compareDataType(const ANeuralNetworksOperandType } bool ANeuralNetworksExecution::compareShape(const ANeuralNetworksOperandType *type, - const neurun::model::OperandIndex index) noexcept + const neurun::ir::OperandIndex index) noexcept { // Passed shape should be specified if (haveUnspecifiedDims(index)) @@ -99,14 +99,14 @@ bool ANeuralNetworksExecution::compareShape(const ANeuralNetworksOperandType *ty return operand_shape == shape_from_type; } -bool ANeuralNetworksExecution::haveUnspecifiedDims(const neurun::model::OperandIndex index) noexcept +bool ANeuralNetworksExecution::haveUnspecifiedDims(const neurun::ir::OperandIndex index) noexcept { const auto operand_shape = _execution->graph().operands().at(index).shape(); return operand_shape.num_elements() == 0; } -size_t ANeuralNetworksExecution::getOperandSize(const neurun::model::OperandIndex index) noexcept +size_t ANeuralNetworksExecution::getOperandSize(const neurun::ir::OperandIndex index) noexcept { try { @@ -125,7 +125,7 @@ bool ANeuralNetworksExecution::setInput(uint32_t index, const ANeuralNetworksOpe { try { - neurun::model::IOIndex input_index{index}; + neurun::ir::IOIndex input_index{index}; const auto operand_index = getInputOperandIndex(index); const auto type_info = _execution->graph().operands().at(operand_index).typeInfo(); @@ -153,7 +153,7 @@ bool ANeuralNetworksExecution::setOutput(uint32_t index, const ANeuralNetworksOp { try { - neurun::model::IOIndex output_index{index}; + neurun::ir::IOIndex output_index{index}; const auto operand_index = getOutputOperandIndex(index); const auto type_info = _execution->graph().operands().at(operand_index).typeInfo(); @@ -217,7 +217,7 @@ bool ANeuralNetworksExecution::getOutputOperandRank(uint32_t index, uint32_t *ra { try { - neurun::model::IOIndex output_index{index}; + neurun::ir::IOIndex output_index{index}; const auto operand_index = getOutputOperandIndex(index); bool unspecified = haveUnspecifiedDims(operand_index); @@ -250,7 +250,7 @@ bool ANeuralNetworksExecution::getOutputOperandDimensions(uint32_t index, uint32 { try { - neurun::model::IOIndex output_index{index}; + neurun::ir::IOIndex output_index{index}; const auto operand_index = getOutputOperandIndex(index); bool unspecified = haveUnspecifiedDims(operand_index); if (unspecified) diff --git a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.h b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.h index 3f2b2bc..ecffedc 100644 --- a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.h +++ b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.h @@ -40,14 +40,14 @@ public: bool startExecute(void) noexcept; bool execute(void) noexcept; - const neurun::model::OperandIndex getInputOperandIndex(int32_t index) noexcept; - const neurun::model::OperandIndex getOutputOperandIndex(int32_t index) noexcept; + const neurun::ir::OperandIndex getInputOperandIndex(int32_t index) noexcept; + const neurun::ir::OperandIndex getOutputOperandIndex(int32_t index) noexcept; bool compareDataType(const ANeuralNetworksOperandType *type, - const neurun::model::OperandIndex index) noexcept; + const neurun::ir::OperandIndex index) noexcept; bool compareShape(const ANeuralNetworksOperandType *type, - const neurun::model::OperandIndex index) noexcept; - bool haveUnspecifiedDims(const neurun::model::OperandIndex index) noexcept; - size_t getOperandSize(const neurun::model::OperandIndex index) noexcept; + const neurun::ir::OperandIndex index) noexcept; + bool haveUnspecifiedDims(const neurun::ir::OperandIndex index) noexcept; + size_t getOperandSize(const neurun::ir::OperandIndex index) noexcept; const std::shared_ptr instance(void) noexcept; /** diff --git a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.cc b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.cc index 66b07a1..6e23308 100644 --- a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.cc +++ b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.cc @@ -53,7 +53,7 @@ bool ANeuralNetworksModel::addOperand(const ANeuralNetworksOperandType *type) no bool ANeuralNetworksModel::setOperandValue(uint32_t index, const void *buffer, size_t length, bool optional, bool copy) noexcept { - const neurun::model::OperandIndex ind{index}; + const neurun::ir::OperandIndex ind{index}; try { @@ -66,8 +66,8 @@ bool ANeuralNetworksModel::setOperandValue(uint32_t index, const void *buffer, s setOptionalOperand(ind); } - using ::neurun::model::CachedData; - using ::neurun::model::ExternalData; + using neurun::ir::CachedData; + using neurun::ir::ExternalData; if (copy) { _graph->operands().at(ind).data( @@ -146,7 +146,7 @@ bool ANeuralNetworksModel::addModelInput(uint32_t index) noexcept { _operand_usages[index] = OperandUsage::MODEL_INPUT; - const neurun::model::OperandIndex ind{index}; + const neurun::ir::OperandIndex ind{index}; _graph->addInput(ind); } catch (const std::exception &e) @@ -162,7 +162,7 @@ bool ANeuralNetworksModel::addModelOutput(uint32_t index) noexcept { try { - const neurun::model::OperandIndex ind{index}; + const neurun::ir::OperandIndex ind{index}; // Duplicated output is not allowed if (_graph->getOutputs().contains(ind)) @@ -206,14 +206,14 @@ bool ANeuralNetworksModel::isFinished() noexcept { return !_graph->isBuildingPha bool ANeuralNetworksModel::isExistOperand(uint32_t index) noexcept { - return _graph->operands().exist(neurun::model::OperandIndex{index}); + return _graph->operands().exist(neurun::ir::OperandIndex{index}); } size_t ANeuralNetworksModel::operandSize(uint32_t index) noexcept { try { - return _graph->operands().at(neurun::model::OperandIndex{index}).operandSize(); + return _graph->operands().at(neurun::ir::OperandIndex{index}).operandSize(); } catch (const std::exception &e) { @@ -233,7 +233,7 @@ bool ANeuralNetworksModel::isOperationOutput(uint32_t index) noexcept return (_operand_usages[index] == OperandUsage::OPERATION_OUTPUT); } -void ANeuralNetworksModel::setOptionalOperand(const neurun::model::OperandIndex idx) +void ANeuralNetworksModel::setOptionalOperand(const neurun::ir::OperandIndex idx) { _optional_operands.insert(idx); } @@ -241,7 +241,7 @@ void ANeuralNetworksModel::setOptionalOperand(const neurun::model::OperandIndex void ANeuralNetworksModel::fillOptionalOperand(void) { _graph->operations().iterate( - [&](const ::neurun::model::OperationIndex &, ::neurun::model::Operation &node) { + [&](const neurun::ir::OperationIndex &, ::neurun::model::Operation &node) { for (auto input : node.getInputs()) { // TODO fill default value for optional operands diff --git a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.h b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.h index a1af145..576c657 100644 --- a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.h +++ b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.h @@ -59,12 +59,12 @@ public: void release(std::shared_ptr &graph) { graph = _graph; } private: - void setOptionalOperand(const neurun::model::OperandIndex idx); + void setOptionalOperand(const neurun::ir::OperandIndex idx); void fillOptionalOperand(void); private: std::shared_ptr _graph; - std::unordered_set _optional_operands; + std::unordered_set _optional_operands; std::vector _operand_usages; }; diff --git a/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.cc b/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.cc index b3fef40..477d780 100644 --- a/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.cc +++ b/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.cc @@ -95,7 +95,7 @@ OperationFactory::OperationFactory() using namespace neurun::model; _map[ANEURALNETWORKS_BATCH_TO_SPACE_ND] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + Operands &) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -330,7 +330,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_CONCATENATION] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count >= 2); // At least one one input tensor and axis assert(init_param.output_count == 1); @@ -354,8 +354,7 @@ OperationFactory::OperationFactory() return new operation::Concat{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_RESHAPE] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_RESHAPE] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 2 && init_param.output_count == 1); // Each input should be interpreted as follows: @@ -414,8 +413,7 @@ OperationFactory::OperationFactory() return new operation::Softmax{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_CAST_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_CAST_EX] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -519,7 +517,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_REDUCE_SUM_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2); assert(init_param.output_count == 1); @@ -561,7 +559,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_STRIDED_SLICE] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 7 && init_param.output_count == 1); OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2], @@ -601,7 +599,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_TRANSPOSE] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { // TODO make this work with init_param.input_count == 1 (when permutation vector is optional) // Inputs @@ -649,7 +647,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_SQUEEZE] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 1 || init_param.input_count == 2); assert(init_param.output_count == 1); @@ -684,8 +682,7 @@ OperationFactory::OperationFactory() return new operation::Squeeze{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_TANH] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_TANH] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -698,8 +695,7 @@ OperationFactory::OperationFactory() return new operation::Tanh{inputs, outputs}; }; - _map[ANEURALNETWORKS_LOGISTIC] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_LOGISTIC] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -733,8 +729,7 @@ OperationFactory::OperationFactory() return new operation::Div{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_EXP_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_EXP_EX] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -748,7 +743,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_GREATER_EQUAL_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -769,7 +764,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_LESS_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -790,7 +785,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_REDUCE_MAX_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -810,7 +805,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_NOT_EQUAL_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -831,7 +826,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_LOGICAL_AND_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -850,8 +845,7 @@ OperationFactory::OperationFactory() return new operation::LogicalAnd{inputs, outputs}; }; - _map[ANEURALNETWORKS_RSQRT_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_RSQRT_EX] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -864,8 +858,7 @@ OperationFactory::OperationFactory() return new operation::RSQRT{inputs, outputs}; }; - _map[ANEURALNETWORKS_RELU] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_RELU] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -879,7 +872,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_RESIZE_BILINEAR] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 3 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -898,8 +891,7 @@ OperationFactory::OperationFactory() return new operation::ResizeBilinear{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_RELU1] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_RELU1] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -912,8 +904,7 @@ OperationFactory::OperationFactory() return new operation::ReLU1{inputs, outputs}; }; - _map[ANEURALNETWORKS_RELU6] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_RELU6] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -926,8 +917,7 @@ OperationFactory::OperationFactory() return new operation::ReLU6{inputs, outputs}; }; - _map[ANEURALNETWORKS_RNN] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + _map[ANEURALNETWORKS_RNN] = [](const OperationFactory::Param &init_param, Operands &operands) { assert(init_param.input_count == 6 && init_param.output_count == 2); // Each input should be interpreted as follows: @@ -958,8 +948,7 @@ OperationFactory::OperationFactory() return new operation::RNN{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_FLOOR] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_FLOOR] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -972,7 +961,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_SPACE_TO_BATCH_ND] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + Operands &) { assert(init_param.input_count == 3 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -992,7 +981,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_SPACE_TO_DEPTH] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1081,7 +1070,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_EMBEDDING_LOOKUP] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + Operands &) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1096,7 +1085,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_L2_NORMALIZATION] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1109,7 +1098,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_HASHTABLE_LOOKUP] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + Operands &) { assert(init_param.input_count == 3 && init_param.output_count == 2); // Each output should be interpreted as follows: @@ -1128,8 +1117,7 @@ OperationFactory::OperationFactory() return new operation::HashtableLookup{inputs, outputs}; }; - _map[ANEURALNETWORKS_PRELU_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_PRELU_EX] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1173,8 +1161,7 @@ OperationFactory::OperationFactory() return new operation::TransposeConv{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_SQRT_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_SQRT_EX] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1187,7 +1174,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_LOGICAL_OR_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1207,7 +1194,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_LOGICAL_NOT_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1224,8 +1211,7 @@ OperationFactory::OperationFactory() return new operation::LogicalNot{inputs, outputs}; }; - _map[ANEURALNETWORKS_LSTM] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + _map[ANEURALNETWORKS_LSTM] = [](const OperationFactory::Param &init_param, Operands &operands) { assert(init_param.input_count == 23 && init_param.output_count == 4); // Each input should be interpreted as follows: @@ -1301,7 +1287,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_EQUAL_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1322,7 +1308,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_SQUARED_DIFFERENCE_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + Operands &) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1337,7 +1323,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_TOPK_V2_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2 && init_param.output_count == 2); // Each output should be interpreted as follows: @@ -1359,7 +1345,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_GATHER_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 3 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1377,8 +1363,7 @@ OperationFactory::OperationFactory() return new operation::Gather{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_NEG_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_NEG_EX] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1391,8 +1376,7 @@ OperationFactory::OperationFactory() return new operation::Neg{inputs, outputs}; }; - _map[ANEURALNETWORKS_ABS_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_ABS_EX] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1406,7 +1390,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_ARGMAX_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1423,8 +1407,7 @@ OperationFactory::OperationFactory() return new operation::ArgMax{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_DEQUANTIZE] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &) { + _map[ANEURALNETWORKS_DEQUANTIZE] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1437,8 +1420,7 @@ OperationFactory::OperationFactory() return new operation::Dequantize{inputs, outputs}; }; - _map[ANEURALNETWORKS_MEAN] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + _map[ANEURALNETWORKS_MEAN] = [](const OperationFactory::Param &init_param, Operands &operands) { assert(init_param.input_count == 3 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1460,7 +1442,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 5 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1477,7 +1459,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_DEPTH_TO_SPACE] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1495,7 +1477,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_PACK_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count >= 3 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1515,7 +1497,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_REDUCE_MIN_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; @@ -1535,7 +1517,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_SPLIT_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 3); assert(init_param.output_count >= 1); // At least one output tensor and axis @@ -1554,7 +1536,7 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_UNPACK_EX] = [](const OperationFactory::Param &init_param, - neurun::model::Operands &operands) { + Operands &operands) { assert(init_param.input_count == 3 && init_param.output_count >= 1); OperandIndexSequence inputs{init_param.inputs[0]}; @@ -1585,7 +1567,7 @@ OperationFactory::OperationFactory() neurun::model::Operation *OperationFactory::create(ANeuralNetworksOperationType type, const OperationFactory::Param ¶m, - neurun::model::Operands &operands) + Operands &operands) { auto it = _map.find(type); if (it == _map.end()) diff --git a/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.h b/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.h index 4d5d02f..ad773c5 100644 --- a/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.h +++ b/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.h @@ -19,7 +19,7 @@ #include -#include "model/Operands.h" +#include "ir/Operands.h" #include "model/Operation.h" #include "NeuralNetworks.h" #include "NeuralNetworksEx.h" @@ -40,7 +40,7 @@ public: public: using Generator = std::function; + neurun::ir::Operands &)>; public: static OperationFactory &get(); @@ -51,7 +51,7 @@ private: public: neurun::model::Operation *create(ANeuralNetworksOperationType, const OperationFactory::Param ¶m, - neurun::model::Operands &operands); + neurun::ir::Operands &operands); // TODO add "register" method for separating registration, possibly supporting custom-ops private: diff --git a/runtime/neurun/test/core/backend/ExecTime.test.cc b/runtime/neurun/test/core/backend/ExecTime.test.cc index b5471c8..4b89e64 100644 --- a/runtime/neurun/test/core/backend/ExecTime.test.cc +++ b/runtime/neurun/test/core/backend/ExecTime.test.cc @@ -40,8 +40,7 @@ struct MockBackend : public ::neurun::backend::Backend return std::make_shared(); } std::unique_ptr - newContext(const model::Operands &, - const std::shared_ptr &kb) const override + newContext(const ir::Operands &, const std::shared_ptr &kb) const override { return nullptr; } diff --git a/runtime/neurun/test/core/compiler/Scheduler.cc b/runtime/neurun/test/core/compiler/Scheduler.cc index cf653b1..cb276c9 100644 --- a/runtime/neurun/test/core/compiler/Scheduler.cc +++ b/runtime/neurun/test/core/compiler/Scheduler.cc @@ -207,7 +207,7 @@ void setPermutationsExecutionTime(const std::vector &backends, using OIS = OperandIndexSequence; template -model::OperationIndex create(std::shared_ptr graph, Types &&... args) +OperationIndex create(std::shared_ptr graph, Types &&... args) { typename NodeT::Param op_params{Activation::NONE}; auto op = nnfw::cpp14::make_unique(std::forward(args)..., op_params); diff --git a/runtime/neurun/test/core/exec/ExecInstance.cc b/runtime/neurun/test/core/exec/ExecInstance.cc index c31af3d..3bc7697 100644 --- a/runtime/neurun/test/core/exec/ExecInstance.cc +++ b/runtime/neurun/test/core/exec/ExecInstance.cc @@ -26,7 +26,6 @@ namespace { using namespace neurun::model; -using DataType = DataType; class CompiledMockUpModel { diff --git a/runtime/neurun/test/core/exec/interp/ExecManager.cc b/runtime/neurun/test/core/exec/interp/ExecManager.cc index 69acb74..372b924 100644 --- a/runtime/neurun/test/core/exec/interp/ExecManager.cc +++ b/runtime/neurun/test/core/exec/interp/ExecManager.cc @@ -27,7 +27,6 @@ namespace { using namespace neurun::model; -using DataType = DataType; using ExecManager = neurun::exec::interp::ExecManager; using Execution = neurun::exec::Execution; diff --git a/runtime/neurun/test/graph/Graph.cc b/runtime/neurun/test/graph/Graph.cc index 4d52571..7e465af 100644 --- a/runtime/neurun/test/graph/Graph.cc +++ b/runtime/neurun/test/graph/Graph.cc @@ -22,15 +22,15 @@ TEST(Graph, inputs_and_outputs) { ::neurun::graph::Graph graph; - ::neurun::model::OperandIndex index0{0u}; - ::neurun::model::OperandIndex index1{1u}; + neurun::ir::OperandIndex index0{0u}; + neurun::ir::OperandIndex index1{1u}; graph.addInput({index0}); graph.addInput({index1}); - ::neurun::model::OperandIndex index10{10u}; - ::neurun::model::OperandIndex index11{11u}; - ::neurun::model::OperandIndex index12{12u}; + neurun::ir::OperandIndex index10{10u}; + neurun::ir::OperandIndex index11{11u}; + neurun::ir::OperandIndex index12{12u}; graph.addOutput({index10}); graph.addOutput({index11}); @@ -39,9 +39,9 @@ TEST(Graph, inputs_and_outputs) ASSERT_EQ(graph.getInputs().size(), 2); ASSERT_EQ(graph.getOutputs().size(), 3); - ::neurun::model::IOIndex io_index0{0}; - ::neurun::model::IOIndex io_index1{1}; - ::neurun::model::IOIndex io_index2{2}; + neurun::ir::IOIndex io_index0{0}; + neurun::ir::IOIndex io_index1{1}; + neurun::ir::IOIndex io_index2{2}; ASSERT_EQ(graph.getInputs().at(io_index0), 0); ASSERT_EQ(graph.getInputs().at(io_index1), 1); diff --git a/runtime/neurun/test/graph/MockNode.h b/runtime/neurun/test/graph/MockNode.h index 088140d..00b897c 100644 --- a/runtime/neurun/test/graph/MockNode.h +++ b/runtime/neurun/test/graph/MockNode.h @@ -18,7 +18,7 @@ #define __NEURUN_TEST_GRAPH_MOCK_NODE_H__ #include "model/Operation.h" -#include "model/OperandIndexSequence.h" +#include "ir/OperandIndexSequence.h" namespace neurun_test { @@ -28,9 +28,9 @@ namespace graph class SimpleMock : public neurun::model::Operation { public: - SimpleMock(const neurun::model::OperandIndexSequence &inputs, - const neurun::model::OperandIndexSequence &outputs) - : neurun::model::Operation{neurun::model::operation::OperandConstraint::createAny()} + SimpleMock(const neurun::ir::OperandIndexSequence &inputs, + const neurun::ir::OperandIndexSequence &outputs) + : neurun::model::Operation{neurun::ir::OperandConstraint::createAny()} { setInputs(inputs); setOutputs(outputs); @@ -38,7 +38,7 @@ public: public: void accept(neurun::model::OperationVisitor &) const override {} - neurun::model::OpCode opcode() const final { return neurun::model::OpCode::Invalid; } + neurun::ir::OpCode opcode() const final { return neurun::ir::OpCode::Invalid; } }; } // namespace graph diff --git a/runtime/neurun/test/graph/operand/IndexSet.cc b/runtime/neurun/test/graph/operand/IndexSet.cc index 73e7fd8..969290f 100644 --- a/runtime/neurun/test/graph/operand/IndexSet.cc +++ b/runtime/neurun/test/graph/operand/IndexSet.cc @@ -16,10 +16,10 @@ #include -#include "model/OperandIndexSequence.h" +#include "ir/OperandIndexSequence.h" -using neurun::model::OperandIndex; -using neurun::model::OperandIndexSequence; +using neurun::ir::OperandIndex; +using neurun::ir::OperandIndexSequence; TEST(graph_OperandIndexSequence, append) { @@ -31,8 +31,8 @@ TEST(graph_OperandIndexSequence, append) ASSERT_EQ(iset.size(), 5); - neurun::model::IOIndex index1{1}; - neurun::model::IOIndex index2{4}; + neurun::ir::IOIndex index1{1}; + neurun::ir::IOIndex index2{4}; ASSERT_EQ(iset.at(index1), 2); ASSERT_EQ(iset.at(index2), 10); diff --git a/runtime/neurun/test/graph/operand/Set.cc b/runtime/neurun/test/graph/operand/Set.cc index ee365684..e30a5b7 100644 --- a/runtime/neurun/test/graph/operand/Set.cc +++ b/runtime/neurun/test/graph/operand/Set.cc @@ -16,11 +16,11 @@ #include -#include "model/Operands.h" +#include "ir/Operands.h" TEST(graph_operand_Set, set_test) { - neurun::model::Operands set; + neurun::ir::Operands set; neurun::ir::Shape shape0{1, 2, 3}; @@ -35,11 +35,11 @@ TEST(graph_operand_Set, set_test) set.emplace(shape0, type); set.emplace(shape1, type); - ASSERT_EQ(set.exist(neurun::model::OperandIndex{0u}), true); - ASSERT_EQ(set.exist(neurun::model::OperandIndex{1u}), true); - ASSERT_EQ(set.exist(neurun::model::OperandIndex{2u}), false); + ASSERT_EQ(set.exist(neurun::ir::OperandIndex{0u}), true); + ASSERT_EQ(set.exist(neurun::ir::OperandIndex{1u}), true); + ASSERT_EQ(set.exist(neurun::ir::OperandIndex{2u}), false); - ASSERT_EQ(set.at(neurun::model::OperandIndex{0u}).shape().dim(0), 1); - ASSERT_EQ(set.at(neurun::model::OperandIndex{0u}).shape().dim(1), 2); - ASSERT_EQ(set.at(neurun::model::OperandIndex{0u}).shape().dim(2), 3); + ASSERT_EQ(set.at(neurun::ir::OperandIndex{0u}).shape().dim(0), 1); + ASSERT_EQ(set.at(neurun::ir::OperandIndex{0u}).shape().dim(1), 2); + ASSERT_EQ(set.at(neurun::ir::OperandIndex{0u}).shape().dim(2), 3); } diff --git a/runtime/neurun/test/graph/operand/UseDef.cc b/runtime/neurun/test/graph/operand/UseDef.cc index b049b8e..08c4d3f 100644 --- a/runtime/neurun/test/graph/operand/UseDef.cc +++ b/runtime/neurun/test/graph/operand/UseDef.cc @@ -26,7 +26,7 @@ namespace { -using IndexSet = neurun::model::OperandIndexSequence; +using IndexSet = neurun::ir::OperandIndexSequence; using Mock = neurun_test::graph::SimpleMock; } // namespace anonymous diff --git a/runtime/neurun/test/graph/operation/Set.cc b/runtime/neurun/test/graph/operation/Set.cc index b4b405e..4c0496b 100644 --- a/runtime/neurun/test/graph/operation/Set.cc +++ b/runtime/neurun/test/graph/operation/Set.cc @@ -21,7 +21,7 @@ using neurun::model::Operations; using neurun::model::Operation; -using neurun::model::OperationIndex; +using neurun::ir::OperationIndex; TEST(graph_operation_Set, operation_test) { diff --git a/runtime/neurun/test/graph/operation/SetIO.cc b/runtime/neurun/test/graph/operation/SetIO.cc index 31950b0..8e13e9e 100644 --- a/runtime/neurun/test/graph/operation/SetIO.cc +++ b/runtime/neurun/test/graph/operation/SetIO.cc @@ -17,8 +17,8 @@ #include #include "ir/Graph.h" -#include "model/Index.h" -#include "model/OperandIndexSequence.h" +#include "ir/Index.h" +#include "ir/OperandIndexSequence.h" #include "model/operation/Conv2D.h" #include "model/operation/Concat.h" @@ -26,8 +26,8 @@ #include -using Index = neurun::model::IOIndex; -using IndexSet = neurun::model::OperandIndexSequence; +using Index = neurun::ir::IOIndex; +using IndexSet = neurun::ir::OperandIndexSequence; TEST(graph_operation_setIO, operation_setIO_conv) { diff --git a/runtime/neurun/test/graph/verifier/Verifier.cc b/runtime/neurun/test/graph/verifier/Verifier.cc index ced5bda..f36fba0 100644 --- a/runtime/neurun/test/graph/verifier/Verifier.cc +++ b/runtime/neurun/test/graph/verifier/Verifier.cc @@ -20,10 +20,10 @@ #include "ir/Graph.h" #include "ir/verifier/Verifier.h" #include "cpp14/memory.h" -#include "model/Operand.h" +#include "ir/Operand.h" #include "../MockNode.h" -using IndexSet = neurun::model::OperandIndexSequence; +using IndexSet = neurun::ir::OperandIndexSequence; using Mock = neurun_test::graph::SimpleMock; TEST(Verifier, dag_checker) -- 2.7.4 From 08bca93539266a5cb21a77a901cee80e8659fbf6 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=9D=B4=ED=95=9C=EC=A2=85/On-Device=20Lab=28SR=29/Enginee?= =?utf8?q?r/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Mon, 9 Dec 2019 15:05:25 +0900 Subject: [PATCH 13/16] [neurun] Introduce EventCollectorGlobal (source) (#9457) Add source file `EventCollectorGlobal.cc` which should have been included in #9443. Signed-off-by: Hanjoung Lee --- .../neurun/core/src/util/EventCollectorGlobal.cc | 86 ++++++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 runtime/neurun/core/src/util/EventCollectorGlobal.cc diff --git a/runtime/neurun/core/src/util/EventCollectorGlobal.cc b/runtime/neurun/core/src/util/EventCollectorGlobal.cc new file mode 100644 index 0000000..6c3594f --- /dev/null +++ b/runtime/neurun/core/src/util/EventCollectorGlobal.cc @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "util/EventCollectorGlobal.h" + +#include +#include + +#include "util/ConfigSource.h" + +namespace neurun +{ +namespace util +{ + +EventCollectorGlobal::EventCollectorGlobal() : _recorder{}, _collector{&_recorder} +{ + // DO NOTHING +} + +EventCollectorGlobal::~EventCollectorGlobal() +{ + auto path = util::getConfigString(util::config::TRACE_FILEPATH); + if (!path.empty()) + { + // TODO Need better way for saved file path than just appending ".global" to the trace file path + std::ofstream ofs{path + ".global"}; + _recorder.writeToFile(ofs); + } +} + +EventCollectorGlobal &EventCollectorGlobal::get() +{ + static EventCollectorGlobal instance; + return instance; +} + +EventDurationBlock::EventDurationBlock(const std::string &tag) : _tag{tag} +{ + auto &glob = EventCollectorGlobal::get(); + glob.collector().onEvent(EventCollector::Event{EventCollector::Edge::BEGIN, "0", _tag}); +} +EventDurationBlock::~EventDurationBlock() +{ + auto &glob = EventCollectorGlobal::get(); + glob.collector().onEvent(EventCollector::Event{EventCollector::Edge::END, "0", _tag}); +} + +EventDurationManual::EventDurationManual(const std::string &tag) : _tag{tag}, _pair{true} {} + +EventDurationManual::~EventDurationManual() +{ + // Check if it has called begin-end pair + assert(_pair); +} + +void EventDurationManual::begin() +{ + _pair = false; + auto &glob = EventCollectorGlobal::get(); + glob.collector().onEvent(EventCollector::Event{EventCollector::Edge::BEGIN, "0", _tag}); +} + +void EventDurationManual::end() +{ + assert(!_pair); + _pair = true; + auto &glob = EventCollectorGlobal::get(); + glob.collector().onEvent(EventCollector::Event{EventCollector::Edge::END, "0", _tag}); +} + +} // namespace util +} // namespace neurun -- 2.7.4 From 8ef22054acea1281d72d7b9f91df7c028154182f Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EB=B0=95=EC=B2=9C=EA=B5=90/On-Device=20Lab=28SR=29/Enginee?= =?utf8?q?r/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Mon, 9 Dec 2019 16:12:03 +0900 Subject: [PATCH 14/16] [moco-import] Register FakeQuantWithMinMaxVars (#9460) This commit register FakeQuantWithMinMaxVars node to Nodes.h Signed-off-by: Cheongyo Bahk --- compiler/moco/import/include/moco/Import/Nodes.h | 1 + 1 file changed, 1 insertion(+) diff --git a/compiler/moco/import/include/moco/Import/Nodes.h b/compiler/moco/import/include/moco/Import/Nodes.h index eda191f..7fa5390 100644 --- a/compiler/moco/import/include/moco/Import/Nodes.h +++ b/compiler/moco/import/include/moco/Import/Nodes.h @@ -25,6 +25,7 @@ #include "Nodes/Conv2DBackpropInput.h" #include "Nodes/Conv2D.h" #include "Nodes/DepthwiseConv2dNative.h" +#include "Nodes/FakeQuantWithMinMaxVars.h" #include "Nodes/FusedBatchNorm.h" #include "Nodes/Identity.h" #include "Nodes/MaxPool.h" -- 2.7.4 From 684bd711fda718c02664103197c6ac7fdab00318 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EB=B0=95=EC=B2=9C=EA=B5=90/On-Device=20Lab=28SR=29/Enginee?= =?utf8?q?r/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Mon, 9 Dec 2019 16:38:12 +0900 Subject: [PATCH 15/16] Protobuf module mode import for Windows build (#9447) * Protobuf module mode import for Windows build Windows native build only can import with legacy 'module' mode import. This commit is to give a Windows build a way to import Protobuf. Note that this will not affect existing default behavior. Signed-off-by: Cheongyo Bahk * Move option to top * USE_PROTOBUF_LEGACY_IMPORT Change-Id: I8963f857a9b5be23fa7e958e25d1003a81444e28 Signed-off-by: Chunseok Lee --- infra/cmake/packages/ProtobufConfig.cmake | 8 ++++++-- infra/nncc/CMakeLists.txt | 4 ++++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/infra/cmake/packages/ProtobufConfig.cmake b/infra/cmake/packages/ProtobufConfig.cmake index 349caab..3c8d232 100644 --- a/infra/cmake/packages/ProtobufConfig.cmake +++ b/infra/cmake/packages/ProtobufConfig.cmake @@ -1,4 +1,3 @@ -# NOTE This function is unused, but remains for future reference function(_Protobuf_module_import) # Let's use find_package here not to export unnecessary definitions find_package(Protobuf MODULE QUIET) @@ -64,7 +63,12 @@ function(_Protobuf_build) endfunction(_Protobuf_build) _Protobuf_build() -_Protobuf_import() + +if(USE_PROTOBUF_LEGACY_IMPORT) + _Protobuf_module_import() +else(USE_PROTOBUF_LEGACY_IMPORT) + _Protobuf_import() +endif(USE_PROTOBUF_LEGACY_IMPORT) if(Protobuf_FOUND) function(Protobuf_Generate PREFIX OUTPUT_DIR PROTO_DIR) diff --git a/infra/nncc/CMakeLists.txt b/infra/nncc/CMakeLists.txt index d2901c3..aa84391 100644 --- a/infra/nncc/CMakeLists.txt +++ b/infra/nncc/CMakeLists.txt @@ -121,6 +121,10 @@ endif(${ENABLE_TEST}) option(ENABLE_STRICT_BUILD "Treat warning as error" OFF) +# This option might be turned ON for Windows native build. +# Check our ProtobufConfig.cmake for its usage. +option(USE_PROTOBUF_LEGACY_IMPORT "Use legacy MODULE mode import rather than CONFIG mode" OFF) + ### ### Target ### -- 2.7.4 From ce9f19de952582cc3369257d41287c4b6a0abd03 Mon Sep 17 00:00:00 2001 From: MyungJoo Ham Date: Tue, 10 Dec 2019 11:03:02 +0900 Subject: [PATCH 16/16] Dist/Tizen: revert reverted fix Do not hardcode shared library depedencies Change-Id: I66ad175038a2ed7df8759f9150ecc5080f19f14a Signed-off-by: MyungJoo Ham --- packaging/nnfw.spec | 1 - 1 file changed, 1 deletion(-) diff --git a/packaging/nnfw.spec b/packaging/nnfw.spec index 1800ee4..5c2f009 100644 --- a/packaging/nnfw.spec +++ b/packaging/nnfw.spec @@ -23,7 +23,6 @@ BuildRequires: zlib-devel # Require python for acl-ex library build pre-process BuildRequires: python BuildRequires: libarmcl-devel -Requires: libarmcl-release %endif Requires(post): /sbin/ldconfig -- 2.7.4