From fb4e6e50283a9527c2432c6b6356b1409c207fa7 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=D0=90=D0=BD=D0=B4=D1=80=D0=B5=D0=B9=20=D0=A8=D0=B5=D0=B4?= =?utf8?q?=D1=8C=D0=BA=D0=BE/AI=20Tools=20Lab=20/SRR/Engineer/=EC=82=BC?= =?utf8?q?=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Wed, 16 Jan 2019 19:12:19 +0300 Subject: [PATCH] [nnc] New importers/style transfer ops support (#2705) Added resize to caffe2 and ONNX (as upsample) Tested with 1 layer nets. Signed-off-by: Andrei Shedko a.shedko@partner.samsung.com --- cmake/packages/ONNXSourceConfig.cmake | 1 - .../nnc/passes/caffe2_frontend/caffe2_importer.cpp | 42 ++++++----- .../passes/caffe2_frontend/caffe2_op_creator.cpp | 16 ++++ .../nnc/passes/caffe2_frontend/caffe2_op_creator.h | 3 + .../nnc/passes/caffe2_frontend/caffe2_op_types.h | 1 + .../nnc/passes/onnx_frontend/ONNXImporterImpl.cpp | 24 +++--- contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp | 85 +++++++++++++++------- contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h | 4 + 8 files changed, 120 insertions(+), 56 deletions(-) diff --git a/cmake/packages/ONNXSourceConfig.cmake b/cmake/packages/ONNXSourceConfig.cmake index 5115c2f..cece73a 100644 --- a/cmake/packages/ONNXSourceConfig.cmake +++ b/cmake/packages/ONNXSourceConfig.cmake @@ -6,7 +6,6 @@ function(_ONNXSource_import) nncc_include(ExternalSourceTools) nncc_include(OptionTools) - envoption(ONNX_URL https://github.com/onnx/onnx/archive/v1.3.0.zip) ExternalSource_Download(ONNX ${ONNX_URL}) diff --git a/contrib/nnc/passes/caffe2_frontend/caffe2_importer.cpp b/contrib/nnc/passes/caffe2_frontend/caffe2_importer.cpp index c075f99..87473e3 100644 --- a/contrib/nnc/passes/caffe2_frontend/caffe2_importer.cpp +++ b/contrib/nnc/passes/caffe2_frontend/caffe2_importer.cpp @@ -120,6 +120,7 @@ void Caffe2Importer::collectUnsupportedOp(const OperatorDef& op) { case SupportedCaffe2OpType::givenTensorInt64Fill: case SupportedCaffe2OpType::mul: case SupportedCaffe2OpType::relu: + case SupportedCaffe2OpType::resizeNearest: case SupportedCaffe2OpType::sigmoid: case SupportedCaffe2OpType::softmax: case SupportedCaffe2OpType::sum: @@ -148,7 +149,6 @@ void Caffe2Importer::preloadAllTensors() { void Caffe2Importer::createMIRNodesFromOp(const OperatorDef& op) { std::vector outputs; - // If op input not met yet - consider it as model input if (op.input_size() > 0 && _blobNameToIODescriptor.find(op.input(0)) == _blobNameToIODescriptor.end()) { @@ -194,6 +194,9 @@ void Caffe2Importer::createMIRNodesFromOp(const OperatorDef& op) { case SupportedCaffe2OpType::relu: outputs = _opCreator->convertRelu(inputs); break; + case SupportedCaffe2OpType::resizeNearest: + outputs = _opCreator->convertResize(inputs, op); + break; case SupportedCaffe2OpType::sigmoid: outputs = _opCreator->convertSigmoid(inputs); break; @@ -292,24 +295,25 @@ void Caffe2Importer::setGraphOutputs() { } const std::map Caffe2Importer::_operatorTypes = { -{"Add", SupportedCaffe2OpType::add}, -{"AveragePool", SupportedCaffe2OpType::averagePool}, -{"Conv", SupportedCaffe2OpType::conv}, -{"Concat", SupportedCaffe2OpType::concat}, -{"ConstantFill", SupportedCaffe2OpType::constantFill}, -{"Dropout", SupportedCaffe2OpType::dropout}, -{"FC", SupportedCaffe2OpType::FC}, -{"GivenTensorFill", SupportedCaffe2OpType::givenTensorFill}, -{"MaxPool", SupportedCaffe2OpType::maxPool}, -{"Mul", SupportedCaffe2OpType::mul}, -{"Relu", SupportedCaffe2OpType::relu}, -{"Sigmoid", SupportedCaffe2OpType::sigmoid}, -{"Softmax", SupportedCaffe2OpType::softmax}, -{"SpatialBN", SupportedCaffe2OpType::spatialBN}, -{"Sum", SupportedCaffe2OpType::sum}, -{"Clip", SupportedCaffe2OpType::clip}, -{"Reshape", SupportedCaffe2OpType::reshape}, -{"GivenTensorInt64Fill", SupportedCaffe2OpType::givenTensorInt64Fill}, + {"Add", SupportedCaffe2OpType::add}, + {"AveragePool", SupportedCaffe2OpType::averagePool}, + {"Conv", SupportedCaffe2OpType::conv}, + {"Concat", SupportedCaffe2OpType::concat}, + {"ConstantFill", SupportedCaffe2OpType::constantFill}, + {"Dropout", SupportedCaffe2OpType::dropout}, + {"FC", SupportedCaffe2OpType::FC}, + {"GivenTensorFill", SupportedCaffe2OpType::givenTensorFill}, + {"MaxPool", SupportedCaffe2OpType::maxPool}, + {"Mul", SupportedCaffe2OpType::mul}, + {"Relu", SupportedCaffe2OpType::relu}, + {"ResizeNearest", SupportedCaffe2OpType::resizeNearest}, + {"Sigmoid", SupportedCaffe2OpType::sigmoid}, + {"Softmax", SupportedCaffe2OpType::softmax}, + {"SpatialBN", SupportedCaffe2OpType::spatialBN}, + {"Sum", SupportedCaffe2OpType::sum}, + {"Clip", SupportedCaffe2OpType::clip}, + {"Reshape", SupportedCaffe2OpType::reshape}, + {"GivenTensorInt64Fill", SupportedCaffe2OpType::givenTensorInt64Fill}, }; } // namespace nnc diff --git a/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.cpp b/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.cpp index a446c19..fa2c4fc 100644 --- a/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.cpp +++ b/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.cpp @@ -26,6 +26,7 @@ #include "core/modelIR/operations/PoolOp.h" #include "core/modelIR/operations/ReluOp.h" #include "core/modelIR/operations/ReshapeOp.h" +#include "core/modelIR/operations/ResizeOp.h" #include "core/modelIR/operations/ScaleOp.h" #include "core/modelIR/operations/SigmoidOp.h" #include "core/modelIR/operations/SoftmaxOp.h" @@ -388,6 +389,21 @@ Caffe2OpCreator::convertRelu(const std::vector& inputs) { return {relu->getOutput(0)}; } +std::vector Caffe2OpCreator::convertResize(const std::vector& inputs, + const ::caffe2::OperatorDef& op) { + // assume NCHW and convert to MIR (NHWC) + std::vector scales(4); + assert(inputs[0].op->getOutputShape(0).rank() == 4 && "only 4d tensors is supported"); + scales[0] = 1; + // default to noop + scales[1] = getSingleArgument(op, "height_scale", 1.0f); + scales[2] = getSingleArgument(op, "width_scale", 1.0f); + scales[3] = 1; + auto resize = createOp( + convertCaffeToMIR(inputs[0]), ops::ResizeOp::ResizeMethod::nearestNeighbor, scales); + return {convertMIRToCaffe(resize->getOutput(0))}; +} + std::vector Caffe2OpCreator::convertSigmoid(const std::vector& inputs) { auto result = createOp("Sigmoid", inputs[0]); diff --git a/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.h b/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.h index 99058cb..b52a545 100644 --- a/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.h +++ b/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.h @@ -82,6 +82,9 @@ public: std::vector convertRelu(const std::vector&); + std::vector convertResize(const std::vector&, + const ::caffe2::OperatorDef&); + std::vector convertSigmoid(const std::vector&); std::vector convertSoftmax(const std::vector&, diff --git a/contrib/nnc/passes/caffe2_frontend/caffe2_op_types.h b/contrib/nnc/passes/caffe2_frontend/caffe2_op_types.h index 6613cda..1a6c259 100644 --- a/contrib/nnc/passes/caffe2_frontend/caffe2_op_types.h +++ b/contrib/nnc/passes/caffe2_frontend/caffe2_op_types.h @@ -34,6 +34,7 @@ enum class SupportedCaffe2OpType { mul, relu, reshape, + resizeNearest, sigmoid, softmax, spatialBN, diff --git a/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp b/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp index 04b7a54..ba13ddb 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp +++ b/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp @@ -65,6 +65,7 @@ static void collectUnsupportedOps(std::unique_ptr& model) { case ONNXOpCode::opGemm: case ONNXOpCode::opMax: case ONNXOpCode::opMaxPool: + case ONNXOpCode::opUpsample: case ONNXOpCode::opMul: case ONNXOpCode::opPad: case ONNXOpCode::opRelu: @@ -129,9 +130,9 @@ mir::TensorVariant ONNXImporterImpl::createTensor(const onnx::TensorProto* tenso buffer_size = tensor->int64_data_size() * element_size; auto src_data64 = reinterpret_cast(tensor->int64_data().data()); std::shared_ptr shared_buffer (new char[buffer_size], std::default_delete()); - auto dst_data = reinterpret_cast(shared_buffer.get()); + auto dst_data = reinterpret_cast(shared_buffer.get()); for (int i = 0; i < tensor->int64_data_size(); i++) { - dst_data[i] = static_cast(src_data64 [i]); + dst_data[i] = static_cast(src_data64[i]); } return mir::TensorVariant(shape, shared_buffer, type, element_size); } else if (tensor->raw_data().size() != 0) { @@ -212,14 +213,14 @@ void ONNXImporterImpl::dump(const std::vector& input_descrs, auto* onnx_op_type = ONNXPerfectHash::getONNXOpType(onnx_node.op_type().c_str(), onnx_node.op_type().size()); switch (onnx_op_type->opCode) { case ONNXOpCode::opConv: { - assert(dynamic_cast(op) != nullptr); - if (auto* conv = dynamic_cast(op->getPrevNodes()[0].op)) { + assert(dynamic_cast(op) != nullptr); + if (auto* conv = dynamic_cast(op->getPrevNodes()[0].op)) { std::cout << " (Conv2D)Weights" << conv->getKernel().getShape() << " Strides" << - conv->getStrides() << " Padding(" << conv->getPaddingBefore()[0] << - " " << conv->getPaddingBefore()[1] << ")" << ":(" << - conv->getPaddingAfter()[0] << " " << conv->getPaddingAfter()[1] << ")"; + conv->getStrides() << " Padding(" << conv->getPaddingBefore()[0] << + " " << conv->getPaddingBefore()[1] << ")" << ":(" << + conv->getPaddingAfter()[0] << " " << conv->getPaddingAfter()[1] << ")"; } else { - auto *dept = dynamic_cast(op->getPrevNodes()[0].op); + auto* dept = dynamic_cast(op->getPrevNodes()[0].op); assert(dept); std::cout << " (DepthwiseConv2D)Weights" << dept->getKernel().getShape() << " Strides" << dept->getStrides() << " Padding(" << dept->getPaddingBefore()[0] << @@ -237,9 +238,9 @@ void ONNXImporterImpl::dump(const std::vector& input_descrs, pool = dynamic_cast(op->getPrevNodes()[0].op); } assert(pool); - std::cout << " Kernel " << pool->getWindowShape() << " Strides " << pool->getStrides(); + std::cout << " Kernel " << pool->getWindowShape() << " Strides " << pool->getStrides(); std::cout << " Padding before: " << pool->getPaddingBefore()[0] << " " << - pool->getPaddingBefore()[1]; + pool->getPaddingBefore()[1]; std::cout << " After: " << pool->getPaddingAfter()[0] << " " << pool->getPaddingAfter()[1]; break; } @@ -327,6 +328,9 @@ mir::Graph *ONNXImporterImpl::createIR() { case ONNXOpCode::opReshape: outputs = _opCreator.convertReshape(inputs); break; + case ONNXOpCode::opUpsample: + outputs = _opCreator.convertUpsample(inputs, onnx_node); + break; case ONNXOpCode::opRelu: outputs = _opCreator.convertRelu(inputs); break; diff --git a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp index 12593dd..cfc0452 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp +++ b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp @@ -38,6 +38,7 @@ #include "core/modelIR/operations/PoolOp.h" #include "core/modelIR/operations/ReluOp.h" #include "core/modelIR/operations/ReshapeOp.h" +#include "core/modelIR/operations/ResizeOp.h" #include "core/modelIR/operations/ScaleOp.h" #include "core/modelIR/operations/SigmoidOp.h" #include "core/modelIR/operations/SoftmaxOp.h" @@ -55,8 +56,9 @@ namespace nnc { using namespace mir; -static const onnx::AttributeProto* findAttribute(const onnx::NodeProto& onnx_node, - std::string name) { + +inline static const onnx::AttributeProto* findAttribute(const onnx::NodeProto& onnx_node, + const std::string& name) { for (auto& att : onnx_node.attribute()) { if (!att.name().compare(name)) { return &att; @@ -65,26 +67,31 @@ static const onnx::AttributeProto* findAttribute(const onnx::NodeProto& onnx_nod return nullptr; } -static std::pair getIntAttribute(const onnx::NodeProto& onnx_node, - std::string name = "axis") { - for (auto att : onnx_node.attribute()) { - if (!att.name().compare(name)) { - assert(att.type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_INT); - return {true, att.i()}; - } - } - return {false, 0}; +inline static std::pair getIntAttribute(const onnx::NodeProto& onnx_node, + const std::string& name = "axis") { + auto result = findAttribute(onnx_node, name); + if (!result) + return {false, 0}; + assert(result->type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_INT); + return {true, result->i()}; } -static std::pair getFloatAttribute(const onnx::NodeProto& onnx_node, - std::string name) { - for (auto att : onnx_node.attribute()) { - if (!att.name().compare(name)) { - assert(att.type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_FLOAT); - return {true, att.f()}; - } - } - return {false, 0.0}; +inline static std::pair getStringAttribute(const onnx::NodeProto& onnx_node, + const std::string& name) { + auto result = findAttribute(onnx_node, name); + if (!result) + return {false, ""}; + assert(result->type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_STRING); + return {true, result->s()}; +} + +inline static std::pair getFloatAttribute(const onnx::NodeProto& onnx_node, + const std::string& name) { + auto result = findAttribute(onnx_node, name); + if (!result) + return {false, 0.0}; + assert(result->type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_FLOAT); + return {true, result->f()}; } // Create vector tensor filled with the given value @@ -131,9 +138,7 @@ static void getKernelStridesPadding(const onnx::NodeProto &onnx_node, KernelStri cdata.strides_shape = ShapeHelper::createShape(strides->ints(), strides->ints_size()); if (pads) { - // FIXME: it's for 2D only - assert(pads->ints_size() == 4); - // FIXME: how to use padding here? + assert(pads->ints_size() >= 2); cdata.padding_before[0] = pads->ints(0); cdata.padding_before[1] = pads->ints(1); // TODO: ONNX padding could be for the beginning and ending along each axis that's why we @@ -162,7 +167,7 @@ ONNXOpCreator::convertConv2D(const std::vector& inputs, auto out_channels = kernel_tensor.getShape().dim(3); bool found; int num_groups; - std::tie (found, num_groups) = getIntAttribute(onnx_node, "group"); + std::tie(found, num_groups) = getIntAttribute(onnx_node, "group"); if (!found) num_groups = 1; bool is_depthwise = (num_groups != 1) && (in_group_size == 1) && (out_channels == num_groups); @@ -374,6 +379,34 @@ ONNXOpCreator::convertElementwise(const std::vector& inputs, } std::vector +ONNXOpCreator::convertUpsample(const std::vector& inputs, + const onnx::NodeProto& node) { + bool success; + std::string mode; + std::tie(success, mode) = getStringAttribute(node, "mode"); + if (!success) mode = "nearest"; + assert(mode == "nearest" && "Unsupported upscale mode!"); + + assert(inputs.size() > 1); // relies on constants being lifted to initializer list + auto* scales = dynamic_cast(inputs[1].op); + assert(scales && "Weights could be a constant tensor only"); + auto scales_tensor = Tensor(scales->getValue()); + int rank = inputs[0].op->getOutputShape(0).rank(); + assert(scales_tensor.getShape().numElements() == rank && + "The number of elements of 'scales' should be the same as the rank of input 'X'" + ); + assert(rank == 4 && "Only rank 4 is supported"); + std::vector scales_vector(4); + const int onnx2mir[] = {0, 3, 1, 2}; + assert(scales_tensor.getShape().rank() == 1 && "Scales are a 1d tensor"); + for (int i = 0; i < scales_tensor.getShape().numElements(); i++) + scales_vector[onnx2mir[i]] = scales_tensor.atOffset(i); + return {convertMIRToONNX(createOp(convertONNXToMIR(inputs[0]), + ops::ResizeOp::ResizeMethod::nearestNeighbor, + scales_vector)->getOutput(0))}; +} + +std::vector ONNXOpCreator::convertBatchNorm(const std::vector& inputs, const onnx::NodeProto& onnx_node, InputTensors& input_tensors) { @@ -475,7 +508,7 @@ ONNXOpCreator::convertConstant(const onnx::NodeProto& onnx_node, (onnx_node.attribute(0).tensors_size() == 0)); assert(!onnx_node.attribute(0).name().compare("value")); auto name = onnx_node.output(0); - auto &onnx_tensor = onnx_node.attribute(0).t(); + auto& onnx_tensor = onnx_node.attribute(0).t(); auto mir_tensor = ONNXImporterImpl::createTensor(&onnx_tensor); input_tensors.insert(std::make_pair(name, mir_tensor)); auto op = _graph->create(name, mir_tensor)->getOutput(0); @@ -500,7 +533,7 @@ ONNXOpCreator::convertGemm(const std::vector& inputs, bool trans_a = found ? ivalue : 0; std::tie (found, ivalue) = getIntAttribute(onnx_node, "transB"); bool trans_b = found ? ivalue : 0; - std::tie (found, ivalue) = getIntAttribute(onnx_node, "broadcast"); + std::tie(found, ivalue) = getIntAttribute(onnx_node, "broadcast"); bool broadcast = found ? ivalue : 0; std::tie (found, fvalue) = getFloatAttribute(onnx_node, "alpha"); float alpha_val = found ? fvalue : 1.0; diff --git a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h index 14f5234..7c68e38 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h +++ b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h @@ -83,6 +83,10 @@ public: const onnx::NodeProto& onnx_node); std::vector + convertUpsample(const std::vector& inputs, + const onnx::NodeProto& onnx_node); + + std::vector convertElementwise(const std::vector& inputs, mir::ops::ElementwiseOp::OpType op_type); -- 2.7.4