From 3f33a8b6f652b5173e54bda4d9f100516d2314f5 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=D0=9F=D0=B0=D0=B2=D0=B5=D0=BB=20=D0=98=D0=BB=D1=8C=D1=8E?= =?utf8?q?=D1=82=D1=87=D0=B5=D0=BD=D0=BA=D0=BE/AI=20Tools=20Lab=20/SRR/Eng?= =?utf8?q?ineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Fri, 9 Aug 2019 21:52:50 +0300 Subject: [PATCH] [mir_onnx] Remove pair return from get attributes functions (#6359) * Create default values for attributes * Fix all converters which using this functions Signed-off-by: Pavel Iliutchenko --- compiler/mir-onnx-importer/ONNXHelpers.h | 24 +++++++++++----------- .../mir-onnx-importer/Op/BatchNormalization.cpp | 6 ++---- compiler/mir-onnx-importer/Op/Concat.cpp | 9 ++++---- compiler/mir-onnx-importer/Op/Conv.cpp | 8 +++----- compiler/mir-onnx-importer/Op/Dropout.cpp | 6 ++---- compiler/mir-onnx-importer/Op/Gather.cpp | 6 ++---- compiler/mir-onnx-importer/Op/Gemm.cpp | 19 +++++++---------- compiler/mir-onnx-importer/Op/Pad.cpp | 24 +++++++++++----------- compiler/mir-onnx-importer/Op/Scale.cpp | 6 ++---- compiler/mir-onnx-importer/Op/Softmax.cpp | 6 ++---- compiler/mir-onnx-importer/Op/Upsample.cpp | 7 ++----- 11 files changed, 50 insertions(+), 71 deletions(-) diff --git a/compiler/mir-onnx-importer/ONNXHelpers.h b/compiler/mir-onnx-importer/ONNXHelpers.h index 9700c11..945bc24 100644 --- a/compiler/mir-onnx-importer/ONNXHelpers.h +++ b/compiler/mir-onnx-importer/ONNXHelpers.h @@ -46,34 +46,34 @@ inline const onnx::AttributeProto *findAttribute(const onnx::NodeProto &onnx_nod return nullptr; } -inline std::pair getIntAttribute(const onnx::NodeProto &onnx_node, - const std::string &name = "axis") +inline int64_t getIntAttribute(const onnx::NodeProto &onnx_node, const std::string &name, + const int64_t default_value) { auto result = findAttribute(onnx_node, name); if (!result) - return {false, 0}; + return default_value; assert(result->type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_INT); - return {true, result->i()}; + return result->i(); } -inline std::pair getStringAttribute(const onnx::NodeProto &onnx_node, - const std::string &name) +inline std::string getStringAttribute(const onnx::NodeProto &onnx_node, const std::string &name, + const std::string &default_value) { auto result = findAttribute(onnx_node, name); if (!result) - return {false, ""}; + return default_value; assert(result->type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_STRING); - return {true, result->s()}; + return result->s(); } -inline std::pair getFloatAttribute(const onnx::NodeProto &onnx_node, - const std::string &name) +inline float getFloatAttribute(const onnx::NodeProto &onnx_node, const std::string &name, + const float default_value) { auto result = findAttribute(onnx_node, name); if (!result) - return {false, 0.0}; + return default_value; assert(result->type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_FLOAT); - return {true, result->f()}; + return result->f(); } // Create vector tensor filled with the given value diff --git a/compiler/mir-onnx-importer/Op/BatchNormalization.cpp b/compiler/mir-onnx-importer/Op/BatchNormalization.cpp index 30464f1..e22c89d 100644 --- a/compiler/mir-onnx-importer/Op/BatchNormalization.cpp +++ b/compiler/mir-onnx-importer/Op/BatchNormalization.cpp @@ -36,10 +36,8 @@ BatchNormalizationNodeConverter::convert(const onnx::NodeProto &onnx_node, mir::Graph *graph) const { // overall_res = (X - mean) / sqrt(var + epsilon) * scale + bias - bool found; - float value; - std::tie(found, value) = getFloatAttribute(onnx_node, "epsilon"); - float epsilon = found ? value : 1e-05f; + // 1e-05f is the default epsilon + float epsilon = getFloatAttribute(onnx_node, "epsilon", 1e-05f); const auto &scale_tensor = dynamic_cast(inputs[1]->getNode())->getValue(); const auto &bias_tensor = dynamic_cast(inputs[2]->getNode())->getValue(); diff --git a/compiler/mir-onnx-importer/Op/Concat.cpp b/compiler/mir-onnx-importer/Op/Concat.cpp index cfeb854..08ac1ca 100644 --- a/compiler/mir-onnx-importer/Op/Concat.cpp +++ b/compiler/mir-onnx-importer/Op/Concat.cpp @@ -28,11 +28,10 @@ ConcatNodeConverter::convert(const onnx::NodeProto &onnx_node, const std::vector &inputs, mir::Graph *graph) const { - bool found; - int axis; - std::tie(found, axis) = getIntAttribute(onnx_node); - if (!found) - throw std::runtime_error("Concat must have 'axis' attribute"); + auto attr = findAttribute(onnx_node, "axis"); + if (!attr) + throw std::runtime_error("Attribute axis is required!"); + int32_t axis = attr->i(); auto result = createOp(graph, inputs, axis); return {result->getOutput(0)}; } diff --git a/compiler/mir-onnx-importer/Op/Conv.cpp b/compiler/mir-onnx-importer/Op/Conv.cpp index 0a3d21d..a41d445 100644 --- a/compiler/mir-onnx-importer/Op/Conv.cpp +++ b/compiler/mir-onnx-importer/Op/Conv.cpp @@ -45,11 +45,9 @@ ConvNodeConverter::convert(const onnx::NodeProto &onnx_node, auto kernel_tensor = mir::transposeTensor<2, 3, 1, 0>(in_weights_tensor); auto in_group_size = kernel_tensor.getShape().dim(2); auto out_channels = kernel_tensor.getShape().dim(3); - bool found; - int num_groups; - std::tie(found, num_groups) = getIntAttribute(onnx_node, "group"); - if (!found) - num_groups = 1; + + // 1 is the default number of groups in convolution + int num_groups = getIntAttribute(onnx_node, "group", 1); bool is_depthwise = (num_groups != 1) && (in_group_size == 1) && (out_channels == num_groups); mir::Operation::Output *result; diff --git a/compiler/mir-onnx-importer/Op/Dropout.cpp b/compiler/mir-onnx-importer/Op/Dropout.cpp index e30923e..31c924a 100644 --- a/compiler/mir-onnx-importer/Op/Dropout.cpp +++ b/compiler/mir-onnx-importer/Op/Dropout.cpp @@ -28,10 +28,8 @@ DropoutNodeConverter::convert(const onnx::NodeProto &onnx_node, const std::vector &inputs, mir::Graph *graph) const { - bool found; - float value; - std::tie(found, value) = getFloatAttribute(onnx_node, "ratio"); - float ratio = found ? value : 0.5; // default 0.5 + // 0.5f is the default ration + float ratio = getFloatAttribute(onnx_node, "ratio", 0.5f); auto result = createOp(graph, inputs[0], ratio); return {result->getOutput(0)}; } diff --git a/compiler/mir-onnx-importer/Op/Gather.cpp b/compiler/mir-onnx-importer/Op/Gather.cpp index 7e82b6a..b6d04a0 100644 --- a/compiler/mir-onnx-importer/Op/Gather.cpp +++ b/compiler/mir-onnx-importer/Op/Gather.cpp @@ -28,10 +28,8 @@ GatherNodeConverter::convert(const onnx::NodeProto &onnx_node, const std::vector &inputs, mir::Graph *graph) const { - bool found; - int value; - std::tie(found, value) = getIntAttribute(onnx_node, "axis"); - int axis = found ? value : 0; + // 0 is the default axis number + int axis = getIntAttribute(onnx_node, "axis", 0); auto result = createOp(graph, inputs[0], inputs[1], axis); return {result->getOutput(0)}; } diff --git a/compiler/mir-onnx-importer/Op/Gemm.cpp b/compiler/mir-onnx-importer/Op/Gemm.cpp index e079137..5f4085a 100644 --- a/compiler/mir-onnx-importer/Op/Gemm.cpp +++ b/compiler/mir-onnx-importer/Op/Gemm.cpp @@ -34,10 +34,6 @@ GemmNodeConverter::convert(const onnx::NodeProto &onnx_node, const std::vector &inputs, mir::Graph *graph) const { - bool found; - int ivalue; - float fvalue; - // Compute Y = alpha * A' * B' + beta * C, where input tensor A has shape (M, K) or (K, M), // input tensor B has shape (K, N) or (N, K), // input tensor C is broadcastable to shape (M, N), and output tensor Y has shape (M, N). @@ -45,14 +41,13 @@ GemmNodeConverter::convert(const onnx::NodeProto &onnx_node, // same for B and transB. This operator supports unidirectional broadcasting // (tensor C should be unidirectional broadcastable to tensor A * B). - std::tie(found, ivalue) = getIntAttribute(onnx_node, "transA"); - bool trans_a = found ? static_cast(ivalue) : false; - std::tie(found, ivalue) = getIntAttribute(onnx_node, "transB"); - bool trans_b = found ? static_cast(ivalue) : false; - std::tie(found, fvalue) = getFloatAttribute(onnx_node, "alpha"); - float alpha_val = found ? fvalue : 1.0f; - std::tie(found, fvalue) = getFloatAttribute(onnx_node, "beta"); - float beta_val = found ? fvalue : 1.0f; + // 0 means that no transpose is needed. It is the default value + bool trans_a = getIntAttribute(onnx_node, "transA", 0); + bool trans_b = getIntAttribute(onnx_node, "transB", 0); + + // 1.0f is the default factor + float alpha_val = getFloatAttribute(onnx_node, "alpha", 1.0f); + float beta_val = getFloatAttribute(onnx_node, "beta", 1.0f); // 1. Prepare input matrix A // Flatten the shape by dim(0) diff --git a/compiler/mir-onnx-importer/Op/Pad.cpp b/compiler/mir-onnx-importer/Op/Pad.cpp index c3d3a68..48212a2 100644 --- a/compiler/mir-onnx-importer/Op/Pad.cpp +++ b/compiler/mir-onnx-importer/Op/Pad.cpp @@ -28,21 +28,21 @@ PadNodeConverter::convert(const onnx::NodeProto &onnx_node, const std::vector &inputs, mir::Graph *graph) const { - bool found; - float value; - std::tie(found, value) = getFloatAttribute(onnx_node, "value"); - assert(found); - auto padsAtt = findAttribute(onnx_node, "pads"); - assert(padsAtt); - auto modeAtt = findAttribute(onnx_node, "mode"); - assert(modeAtt); - auto mode = modeAtt->s(); + // 0.0f is the default value to be filled into padded cells. + float value = getFloatAttribute(onnx_node, "value", 0.0f); + auto pads_attr = findAttribute(onnx_node, "pads"); + assert(pads_attr); + // "constant" is the default mode + auto mode = getStringAttribute(onnx_node, "mode", "constant"); + if (mode != "constant") + throw std::runtime_error("Not supported Pad mode attribue!"); + const mir::Scalar scalar(reinterpret_cast(&value), mir::DTYPE::FLOAT32, sizeof(float)); - assert(padsAtt->ints_size() > 0); - int axis_size = padsAtt->ints_size() / 2; + assert(pads_attr->ints_size() > 0); + int axis_size = pads_attr->ints_size() / 2; std::vector> vec(axis_size); - auto *data = padsAtt->ints().data(); + auto *data = pads_attr->ints().data(); for (int i = 0; i < axis_size; i++) { auto pair = std::make_pair(data[i], data[axis_size + i]); diff --git a/compiler/mir-onnx-importer/Op/Scale.cpp b/compiler/mir-onnx-importer/Op/Scale.cpp index 6334082..655d67d 100644 --- a/compiler/mir-onnx-importer/Op/Scale.cpp +++ b/compiler/mir-onnx-importer/Op/Scale.cpp @@ -29,10 +29,8 @@ ScaleNodeConverter::convert(const onnx::NodeProto &onnx_node, const std::vector &inputs, mir::Graph *graph) const { - bool found; - float value; - std::tie(found, value) = getFloatAttribute(onnx_node, "scale"); - float scale_val = found ? value : 1.0; + // 1.0f is the default scale factor + float scale_val = getFloatAttribute(onnx_node, "scale", 1.0f); const auto &shape = inputs[0]->getShape(); auto scale_tensor = createScalarTensor(scale_val, shape); auto scale = createOp(graph, scale_tensor)->getOutput(0); diff --git a/compiler/mir-onnx-importer/Op/Softmax.cpp b/compiler/mir-onnx-importer/Op/Softmax.cpp index 7fc338d..88581cb 100644 --- a/compiler/mir-onnx-importer/Op/Softmax.cpp +++ b/compiler/mir-onnx-importer/Op/Softmax.cpp @@ -28,10 +28,8 @@ SoftmaxNodeConverter::convert(const onnx::NodeProto &onnx_node, const std::vector &inputs, mir::Graph *graph) const { - int axis; - bool found; - std::tie(found, axis) = getIntAttribute(onnx_node); - axis = found ? axis : 1; + // 1 is the default axis number + int axis = getIntAttribute(onnx_node, "axis", 1); auto result = createOp(graph, inputs[0], axis); return {result->getOutput(0)}; } diff --git a/compiler/mir-onnx-importer/Op/Upsample.cpp b/compiler/mir-onnx-importer/Op/Upsample.cpp index 4353c3a..cf2d7fe 100644 --- a/compiler/mir-onnx-importer/Op/Upsample.cpp +++ b/compiler/mir-onnx-importer/Op/Upsample.cpp @@ -31,11 +31,8 @@ UpsampleNodeConverter::convert(const onnx::NodeProto &onnx_node, const std::vector &inputs, mir::Graph *graph) const { - bool success; - std::string mode; - std::tie(success, mode) = getStringAttribute(onnx_node, "mode"); - if (!success) - mode = "nearest"; + // "nearest" is the default mode + std::string mode = getStringAttribute(onnx_node, "mode", "nearest"); assert(mode == "nearest" && "Unsupported upscale mode!"); // relies on attributes being lifted to constants (ONNX optimization pass) -- 2.7.4