From 07f4eceacf7b8668696d5be4be2e6aecba7d2408 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=D0=90=D0=BD=D0=B4=D1=80=D0=B5=D0=B9=20=D0=A2=D0=B8=D1=89?= =?utf8?q?=D0=B5=D0=BD=D0=BA=D0=BE/AI=20Tools=20Lab=20/SRR/Staff=20Enginee?= =?utf8?q?r/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Wed, 16 Jan 2019 18:32:41 +0300 Subject: [PATCH] [nnc] The initial implementation of Shape op in ONNX (#2868) inception_v3 model requires this operation. In addition I changed the includes order inside some ONNX files. Signed-off-by: Andrew V. Tischenko a.tischenko@partner.samsung.com --- .../nnc/passes/onnx_frontend/ONNXImporterImpl.cpp | 19 ++++++++------- .../nnc/passes/onnx_frontend/ONNXImporterImpl.h | 2 +- contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp | 28 ++++++++++++++++------ contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h | 12 ++++++---- 4 files changed, 41 insertions(+), 20 deletions(-) diff --git a/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp b/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp index 3eb544c..04b7a54 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp +++ b/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp @@ -14,10 +14,9 @@ * limitations under the License. */ -#include -#include -#include -#include +#include "ONNXImporterImpl.h" +#include "ONNXPerfectHash.h" +#include "ONNXOpCreator.h" #include "core/modelIR/IrDotDumper.h" #include "core/modelIR/operations/ConstantOp.h" @@ -36,10 +35,10 @@ #include "passes/common_frontend/shape_helper.h" #include "pass/PassException.h" -#include "ONNXImporterImpl.h" -#include "ONNXPerfectHash.h" -#include "ONNXOpCreator.h" - +#include +#include +#include +#include namespace nnc { @@ -71,6 +70,7 @@ static void collectUnsupportedOps(std::unique_ptr& model) { case ONNXOpCode::opRelu: case ONNXOpCode::opReshape: case ONNXOpCode::opUnsqueeze: + case ONNXOpCode::opShape: case ONNXOpCode::opSigmoid: case ONNXOpCode::opScale: case ONNXOpCode::opSoftmax: @@ -304,6 +304,9 @@ mir::Graph *ONNXImporterImpl::createIR() { case ONNXOpCode::opSum: outputs = _opCreator.convertElementwise(inputs, mir::ops::ElementwiseOp::OpType::add); break; + case ONNXOpCode::opShape: + outputs = _opCreator.convertShape(inputs); + break; case ONNXOpCode::opMul: outputs = _opCreator.convertElementwise(inputs, mir::ops::ElementwiseOp::OpType::mul); break; diff --git a/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.h b/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.h index 51d0693..31f4919 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.h +++ b/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.h @@ -49,7 +49,7 @@ public: // This map maps onnx tensor names to MIR operations/nodes std::map _tensorNameToIODescriptor; // This map keeps named tensors used as graph input initializers. - // In addiotn here could be tensors from opGivenTensorFill and opConstant + // In addition here could be tensors from opGivenTensorFill and opConstant std::map _constantTensors; std::vector _graphOutputs; std::string _modelFilename; diff --git a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp index 4d67f8a..12593dd 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp +++ b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp @@ -14,10 +14,9 @@ * limitations under the License. */ -#include -#include -#include -#include +#include "ONNXOpCreator.h" +#include "ONNXImporterImpl.h" + #include "core/modelIR/Index.h" #include "core/modelIR/Graph.h" #include "core/modelIR/Scalar.h" @@ -48,8 +47,10 @@ #include "passes/common_frontend/op_creator_helper.h" #include "passes/common_frontend/shape_helper.h" #include "pass/PassException.h" -#include "ONNXOpCreator.h" -#include "ONNXImporterImpl.h" + +#include +#include +#include namespace nnc { @@ -310,7 +311,7 @@ ONNXOpCreator::convertReshape(const std::vector& inputs) { // The vector to build the new shape from std::vector shape_vector(cnt); ShapeRange out_range(shape_tensor_shape); - // FIXME: real type could be int64_t but _elementSize is correct that's why it works + // FIXME: real type should be int64_t Tensor tensor_accessor(shape_tensor); int i = 0; @@ -438,6 +439,19 @@ ONNXOpCreator::convertScale(const std::vector& inputs, } std::vector +ONNXOpCreator::convertShape(const std::vector& inputs) { + const auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index); + int size = input_shape.rank(); + Shape output_shape({size}); + std::vector data(size); + for (int i; i < size; i++) { + data[i] = input_shape.dim(i); + } + auto result = createOp(createTensor(data.data(), output_shape)); + return {result->getOutput(0)}; +} + +std::vector ONNXOpCreator::convertGivenTensorFill(const onnx::NodeProto& onnx_node, InputTensors& input_tensors) { auto values_att = findAttribute(onnx_node, "values"); diff --git a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h index f5b405a..14f5234 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h +++ b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h @@ -17,10 +17,6 @@ #ifndef NNCC_ONNX_OP_CREATOR_H #define NNCC_ONNX_OP_CREATOR_H -#include -#include -#include -#include #include "core/modelIR/Graph.h" #include "core/modelIR/TensorVariant.h" #include "core/modelIR/operations/CommonProps.h" @@ -29,6 +25,11 @@ #include "onnx/onnx.pb.h" #include "ONNXOpType.h" +#include +#include +#include +#include + namespace nnc { class ONNXOpCreator { @@ -90,6 +91,9 @@ public: const onnx::NodeProto& onnx_node); std::vector + convertShape(const std::vector& inputs); + + std::vector convertBatchNorm(const std::vector& inputs, const onnx::NodeProto& onnx_node, InputTensors& input_tensors); -- 2.7.4