From 7103b3aacc4371cd61eca77ce9750aabe89f7b34 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=D0=A1=D0=B5=D1=80=D0=B3=D0=B5=D0=B9=20=D0=91=D0=B0=D1=80?= =?utf8?q?=D0=B0=D0=BD=D0=BD=D0=B8=D0=BA=D0=BE=D0=B2/AI=20Tools=20Lab=20/S?= =?utf8?q?RR/Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Wed, 21 Aug 2019 04:02:53 +0900 Subject: [PATCH] [mir_onnx] Rework attribute parsing (#6651) * Make attribute parsing functions templated and extract them to a separate file. * Move definition of `getKernelStridesPadding` into cpp file. Signed-off-by: Sergei Barannikov --- compiler/mir-onnx-importer/AttributeHelpers.h | 97 ++++++++++++++++++++++ compiler/mir-onnx-importer/CMakeLists.txt | 1 + compiler/mir-onnx-importer/ONNXHelpers.cpp | 34 ++++++++ compiler/mir-onnx-importer/ONNXHelpers.h | 78 +---------------- compiler/mir-onnx-importer/Op/Add.cpp | 1 + compiler/mir-onnx-importer/Op/AveragePool.cpp | 7 +- .../mir-onnx-importer/Op/BatchNormalization.cpp | 7 +- compiler/mir-onnx-importer/Op/Concat.cpp | 3 +- compiler/mir-onnx-importer/Op/Constant.cpp | 10 +-- compiler/mir-onnx-importer/Op/Conv.cpp | 11 +-- compiler/mir-onnx-importer/Op/Gather.cpp | 3 +- compiler/mir-onnx-importer/Op/Gemm.cpp | 9 +- compiler/mir-onnx-importer/Op/MaxPool.cpp | 7 +- compiler/mir-onnx-importer/Op/Pad.cpp | 22 +++-- compiler/mir-onnx-importer/Op/Reshape.cpp | 2 + compiler/mir-onnx-importer/Op/Softmax.cpp | 3 +- compiler/mir-onnx-importer/Op/Unsqueeze.cpp | 11 +-- compiler/mir-onnx-importer/Op/Upsample.cpp | 11 +-- 18 files changed, 191 insertions(+), 126 deletions(-) create mode 100644 compiler/mir-onnx-importer/AttributeHelpers.h diff --git a/compiler/mir-onnx-importer/AttributeHelpers.h b/compiler/mir-onnx-importer/AttributeHelpers.h new file mode 100644 index 0000000..24e14d4 --- /dev/null +++ b/compiler/mir-onnx-importer/AttributeHelpers.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MIR_ONNX_ATTRIBUTE_HELPERS_H +#define MIR_ONNX_ATTRIBUTE_HELPERS_H + +#include "onnx/onnx.pb.h" + +#include +#include +#include +#include +#include +#include +#include + +namespace mir_onnx +{ + +template T getAttributeValue(const onnx::AttributeProto &attribute) = delete; + +template <> inline float getAttributeValue(const onnx::AttributeProto &attribute) +{ + assert(attribute.type() == onnx::AttributeProto::FLOAT); + return attribute.f(); +} + +template <> inline std::int64_t getAttributeValue(const onnx::AttributeProto &attribute) +{ + assert(attribute.type() == onnx::AttributeProto::INT); + return attribute.i(); +} + +template <> inline std::string getAttributeValue(const onnx::AttributeProto &attribute) +{ + assert(attribute.type() == onnx::AttributeProto::STRING); + return attribute.s(); +} + +template <> inline onnx::TensorProto getAttributeValue(const onnx::AttributeProto &attribute) +{ + assert(attribute.type() == onnx::AttributeProto::TENSOR); + return attribute.t(); +} + +template <> +inline std::vector getAttributeValue(const onnx::AttributeProto &attribute) +{ + assert(attribute.type() == onnx::AttributeProto::INTS); + return {attribute.ints().cbegin(), attribute.ints().cend()}; +} + +inline const onnx::AttributeProto *findAttribute(const onnx::NodeProto &node, + const std::string &name) +{ + const auto &attributes = node.attribute(); + const auto it = std::find_if( + attributes.cbegin(), attributes.cend(), + [&name](const onnx::AttributeProto &attribute) { return attribute.name() == name; }); + if (it == attributes.cend()) + return nullptr; + return &*it; +} + +template T getAttributeValue(const onnx::NodeProto &node, const std::string &name) +{ + const auto *attribute = findAttribute(node, name); + if (attribute == nullptr) + throw std::runtime_error("Cannot find attribute '" + name + "' in node '" + node.name() + "'."); + return getAttributeValue(*attribute); +} + +template +T getAttributeValue(const onnx::NodeProto &node, const std::string &name, T default_value) +{ + const auto *attribute = findAttribute(node, name); + if (attribute == nullptr) + return std::move(default_value); + return getAttributeValue(*attribute); +} + +} // namespace mir_onnx + +#endif // MIR_ONNX_ATTRIBUTE_HELPERS_H diff --git a/compiler/mir-onnx-importer/CMakeLists.txt b/compiler/mir-onnx-importer/CMakeLists.txt index 1a33476..10de9df 100644 --- a/compiler/mir-onnx-importer/CMakeLists.txt +++ b/compiler/mir-onnx-importer/CMakeLists.txt @@ -20,6 +20,7 @@ target_include_directories(mir_onnx_proto PUBLIC ${MIR_ONNX_PROTO_INCLUDE_DIRS}) target_link_libraries(mir_onnx_proto PUBLIC libprotobuf) set(MIR_ONNX_IMPORTER_SOURCES + AttributeHelpers.h ONNXHelpers.cpp ONNXHelpers.h ONNXImporterImpl.cpp diff --git a/compiler/mir-onnx-importer/ONNXHelpers.cpp b/compiler/mir-onnx-importer/ONNXHelpers.cpp index 4e4f1b0..2035918 100644 --- a/compiler/mir-onnx-importer/ONNXHelpers.cpp +++ b/compiler/mir-onnx-importer/ONNXHelpers.cpp @@ -15,6 +15,9 @@ */ #include "ONNXHelpers.h" +#include "AttributeHelpers.h" + +#include "mir/ShapeRange.h" namespace mir_onnx { @@ -124,4 +127,35 @@ mir::TensorVariant createTensor(const onnx::TensorProto *tensor) return mir::TensorVariant(type, shape, src_data); } +void getKernelStridesPadding(const onnx::NodeProto &onnx_node, KernelStridesPadding &cdata) +{ + const auto kernel_shape = getAttributeValue>(onnx_node, "kernel_shape"); + assert(!kernel_shape.empty()); + const auto strides = getAttributeValue>(onnx_node, "strides"); + assert(!strides.empty()); + const auto *pads_attribute = findAttribute(onnx_node, "pads"); + + cdata.kernel_shape = mir::Shape(kernel_shape.size()); + for (std::size_t i = 0; i < kernel_shape.size(); ++i) + { + cdata.kernel_shape.dim(i) = kernel_shape[i]; + } + + cdata.strides_shape = mir::Shape(strides.size()); + for (std::size_t i = 0; i < strides.size(); ++i) + { + cdata.strides_shape.dim(i) = strides[i]; + } + + if (pads_attribute != nullptr) + { + const auto pads = getAttributeValue>(*pads_attribute); + assert(pads.size() == 4); + cdata.padding_before[0] = pads[0]; + cdata.padding_before[1] = pads[1]; + cdata.padding_after[0] = pads[2]; + cdata.padding_after[1] = pads[3]; + } +} + } // namespace mir_onnx diff --git a/compiler/mir-onnx-importer/ONNXHelpers.h b/compiler/mir-onnx-importer/ONNXHelpers.h index ef00f43..8ccf1fc 100644 --- a/compiler/mir-onnx-importer/ONNXHelpers.h +++ b/compiler/mir-onnx-importer/ONNXHelpers.h @@ -19,63 +19,16 @@ #include "mir/Graph.h" #include "mir/TensorVariant.h" -#include "mir/ShapeRange.h" - #include "mir/ops/TransposeOp.h" #include "onnx/onnx.pb.h" -#include - namespace mir_onnx { mir::TensorVariant fixGroupedKernel(int groups, const mir::TensorVariant &folded_kernel); mir::TensorVariant createTensor(const onnx::TensorProto *tensor); -inline const onnx::AttributeProto *findAttribute(const onnx::NodeProto &onnx_node, - const std::string &name) -{ - for (auto &att : onnx_node.attribute()) - { - if (att.name() == name) - { - return &att; - } - } - return nullptr; -} - -inline int64_t getIntAttribute(const onnx::NodeProto &onnx_node, const std::string &name, - const int64_t default_value) -{ - auto result = findAttribute(onnx_node, name); - if (!result) - return default_value; - assert(result->type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_INT); - return result->i(); -} - -inline std::string getStringAttribute(const onnx::NodeProto &onnx_node, const std::string &name, - const std::string &default_value) -{ - auto result = findAttribute(onnx_node, name); - if (!result) - return default_value; - assert(result->type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_STRING); - return result->s(); -} - -inline float getFloatAttribute(const onnx::NodeProto &onnx_node, const std::string &name, - const float default_value) -{ - auto result = findAttribute(onnx_node, name); - if (!result) - return default_value; - assert(result->type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_FLOAT); - return result->f(); -} - struct KernelStridesPadding { mir::Shape kernel_shape; @@ -84,36 +37,7 @@ struct KernelStridesPadding std::vector padding_after{0, 0}; }; -inline void getKernelStridesPadding(const onnx::NodeProto &onnx_node, KernelStridesPadding &cdata) -{ - auto *kshape = findAttribute(onnx_node, "kernel_shape"); - assert(kshape && kshape->ints_size()); - auto *strides = findAttribute(onnx_node, "strides"); - assert(strides && strides->ints_size()); - auto *pads = findAttribute(onnx_node, "pads"); - - cdata.kernel_shape = mir::Shape(kshape->ints_size()); - for (int i = 0; i < kshape->ints_size(); ++i) - { - cdata.kernel_shape.dim(i) = kshape->ints(i); - } - cdata.strides_shape = mir::Shape(strides->ints_size()); - for (int i = 0; i < strides->ints_size(); ++i) - { - cdata.strides_shape.dim(i) = strides->ints(i); - } - - if (pads) - { - assert(pads->ints_size() == 4); - cdata.padding_before[0] = pads->ints(0); - cdata.padding_before[1] = pads->ints(1); - // TODO: ONNX padding could be for the beginning and ending along each axis that's why we - // should select the interesting ones. - cdata.padding_after[0] = pads->ints(2); - cdata.padding_after[1] = pads->ints(3); - } -} +void getKernelStridesPadding(const onnx::NodeProto &onnx_node, KernelStridesPadding &cdata); template mir::Operation *createOp(mir::Graph *graph, Types &&... args) diff --git a/compiler/mir-onnx-importer/Op/Add.cpp b/compiler/mir-onnx-importer/Op/Add.cpp index a9542de..e00e4f4 100644 --- a/compiler/mir-onnx-importer/Op/Add.cpp +++ b/compiler/mir-onnx-importer/Op/Add.cpp @@ -17,6 +17,7 @@ #include "Add.h" #include "ONNXHelpers.h" +#include "AttributeHelpers.h" #include "mir/ops/AddOp.h" diff --git a/compiler/mir-onnx-importer/Op/AveragePool.cpp b/compiler/mir-onnx-importer/Op/AveragePool.cpp index ca1593f..e1ea2a1 100644 --- a/compiler/mir-onnx-importer/Op/AveragePool.cpp +++ b/compiler/mir-onnx-importer/Op/AveragePool.cpp @@ -17,6 +17,7 @@ #include "AveragePool.h" #include "ONNXHelpers.h" +#include "AttributeHelpers.h" #include "mir/ops/PoolOp.h" @@ -40,7 +41,7 @@ void AveragePoolNodeConverter::convert(const onnx::NodeProto &onnx_node, void AveragePoolNodeConverter::convertV1(const onnx::NodeProto &onnx_node, ConverterContext *context) const { - const auto auto_pad = getStringAttribute(onnx_node, "auto_pad", "NOTSET"); + const auto auto_pad = getAttributeValue(onnx_node, "auto_pad", "NOTSET"); // auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. if (auto_pad != "NOTSET") throw std::runtime_error("Supported only explicit padding!"); @@ -69,7 +70,7 @@ void AveragePoolNodeConverter::convertV1(const onnx::NodeProto &onnx_node, void AveragePoolNodeConverter::convertV7(const onnx::NodeProto &onnx_node, ConverterContext *context) const { - const auto count_include_pad = getIntAttribute(onnx_node, "count_include_pad", 0); + const auto count_include_pad = getAttributeValue(onnx_node, "count_include_pad", 0); if (count_include_pad != 0) throw std::runtime_error("Not supported count_include_pad attribute!"); @@ -79,7 +80,7 @@ void AveragePoolNodeConverter::convertV7(const onnx::NodeProto &onnx_node, void AveragePoolNodeConverter::convertV10(const onnx::NodeProto &onnx_node, ConverterContext *context) const { - const auto ceil_mode = getIntAttribute(onnx_node, "ceil_mode", 0); + const auto ceil_mode = getAttributeValue(onnx_node, "ceil_mode", 0); if (ceil_mode != 0) throw std::runtime_error("Not supported ceil_mode attribute!"); diff --git a/compiler/mir-onnx-importer/Op/BatchNormalization.cpp b/compiler/mir-onnx-importer/Op/BatchNormalization.cpp index a6baf5e..9cc02e3 100644 --- a/compiler/mir-onnx-importer/Op/BatchNormalization.cpp +++ b/compiler/mir-onnx-importer/Op/BatchNormalization.cpp @@ -17,6 +17,7 @@ #include "BatchNormalization.h" #include "ONNXHelpers.h" +#include "AttributeHelpers.h" #include "mir/ShapeRange.h" #include "mir/Tensor.h" @@ -56,7 +57,7 @@ void BatchNormalizationNodeConverter::convertV1(const onnx::NodeProto &onnx_node void BatchNormalizationNodeConverter::convertV6(const onnx::NodeProto &onnx_node, ConverterContext *context) const { - const auto is_test = getIntAttribute(onnx_node, "is_test", 0); + const auto is_test = getAttributeValue(onnx_node, "is_test", 0); if (is_test != 0) throw std::runtime_error("Not supported is_test attribute!"); @@ -66,7 +67,7 @@ void BatchNormalizationNodeConverter::convertV6(const onnx::NodeProto &onnx_node void BatchNormalizationNodeConverter::convertV7(const onnx::NodeProto &onnx_node, ConverterContext *context) const { - const auto spatial = getIntAttribute(onnx_node, "spatial", 1); + const auto spatial = getAttributeValue(onnx_node, "spatial", 1); if (spatial != 1) throw std::runtime_error("Not supported spatial attribute!"); @@ -89,7 +90,7 @@ void BatchNormalizationNodeConverter::convertV9(const onnx::NodeProto &onnx_node auto var = inputs[4]; // 1e-05f is the default epsilon. - const float epsilon = getFloatAttribute(onnx_node, "epsilon", 1e-05f); + const auto epsilon = getAttributeValue(onnx_node, "epsilon", 1e-05f); // Y = (X - mean) * scale / sqrt(var + epsilon) + bias = // = (X + C1) * C2 + bias diff --git a/compiler/mir-onnx-importer/Op/Concat.cpp b/compiler/mir-onnx-importer/Op/Concat.cpp index 59ae590..229cb76 100644 --- a/compiler/mir-onnx-importer/Op/Concat.cpp +++ b/compiler/mir-onnx-importer/Op/Concat.cpp @@ -17,6 +17,7 @@ #include "Concat.h" #include "ONNXHelpers.h" +#include "AttributeHelpers.h" #include "mir/ops/ConcatOp.h" @@ -40,7 +41,7 @@ void ConcatNodeConverter::convertV1(const onnx::NodeProto &onnx_node, std::vector inputs = context->getNodeInputs(onnx_node); mir::Graph *graph = context->getGraph(); - const auto axis = getIntAttribute(onnx_node, "axis", 1); + const auto axis = getAttributeValue(onnx_node, "axis", 1); auto result = createOp(graph, inputs, axis)->getOutput(0); diff --git a/compiler/mir-onnx-importer/Op/Constant.cpp b/compiler/mir-onnx-importer/Op/Constant.cpp index 7c73cb6..3a1a9e9 100644 --- a/compiler/mir-onnx-importer/Op/Constant.cpp +++ b/compiler/mir-onnx-importer/Op/Constant.cpp @@ -17,6 +17,7 @@ #include "Constant.h" #include "ONNXHelpers.h" +#include "AttributeHelpers.h" #include "mir/TensorVariant.h" #include "mir/ops/ConstantOp.h" @@ -41,16 +42,13 @@ void ConstantNodeConverter::convert(const onnx::NodeProto &onnx_node, void ConstantNodeConverter::convertV1(const onnx::NodeProto &onnx_node, ConverterContext *context) const { - const auto *value_attr = findAttribute(onnx_node, "value"); - if (value_attr == nullptr) - throw std::runtime_error("Not enough value attribute in Constant operation!"); - assert(value_attr->type() == onnx::AttributeProto_AttributeType_TENSOR); + std::vector inputs = context->getNodeInputs(onnx_node); + mir::Graph *graph = context->getGraph(); + const auto onnx_tensor = getAttributeValue(onnx_node, "value"); const auto &name = onnx_node.output(0); - const auto &onnx_tensor = value_attr->t(); auto mir_tensor = createTensor(&onnx_tensor); - mir::Graph *graph = context->getGraph(); auto result = graph->create(name, mir_tensor)->getOutput(0); context->setNodeOutputs(onnx_node, {result}); diff --git a/compiler/mir-onnx-importer/Op/Conv.cpp b/compiler/mir-onnx-importer/Op/Conv.cpp index 6d9916f..e25f6b1 100644 --- a/compiler/mir-onnx-importer/Op/Conv.cpp +++ b/compiler/mir-onnx-importer/Op/Conv.cpp @@ -17,6 +17,7 @@ #include "Conv.h" #include "ONNXHelpers.h" +#include "AttributeHelpers.h" #include "mir/TensorUtil.h" @@ -39,7 +40,7 @@ void ConvNodeConverter::convert(const onnx::NodeProto &onnx_node, ConverterConte void ConvNodeConverter::convertV1(const onnx::NodeProto &onnx_node, ConverterContext *context) const { - const auto auto_pad = getStringAttribute(onnx_node, "auto_pad", "NOTSET"); + const auto auto_pad = getAttributeValue(onnx_node, "auto_pad", "NOTSET"); // auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. if (auto_pad != "NOTSET") throw std::runtime_error("Supported only explicit padding!"); @@ -64,8 +65,8 @@ void ConvNodeConverter::convertV1(const onnx::NodeProto &onnx_node, ConverterCon auto out_channels = kernel_tensor.getShape().dim(3); // 1 is the default number of groups. - int num_groups = getIntAttribute(onnx_node, "group", 1); - bool is_depthwise = (num_groups != 1) && (in_group_size == 1) && (out_channels == num_groups); + const auto group = getAttributeValue(onnx_node, "group", 1); + bool is_depthwise = (group != 1) && (in_group_size == 1) && (out_channels == group); mir::Operation::Output *result; auto transposed_input = convertONNXToMIR(graph, inputs[0]); @@ -82,8 +83,8 @@ void ConvNodeConverter::convertV1(const onnx::NodeProto &onnx_node, ConverterCon else { // first we need to convert kernel of grouped convolution to appropriate ordinary kernel - if (num_groups != 1) - kernel_tensor = fixGroupedKernel(num_groups, kernel_tensor); + if (group != 1) + kernel_tensor = fixGroupedKernel(group, kernel_tensor); kernel_tensor = mir::transposeTensor<3, 0, 1, 2>(kernel_tensor); auto kernel = createOp(graph, kernel_tensor)->getOutput(0); result = createOp(graph, transposed_input, kernel, cdata.strides_shape, diff --git a/compiler/mir-onnx-importer/Op/Gather.cpp b/compiler/mir-onnx-importer/Op/Gather.cpp index 425e478..517007e 100644 --- a/compiler/mir-onnx-importer/Op/Gather.cpp +++ b/compiler/mir-onnx-importer/Op/Gather.cpp @@ -17,6 +17,7 @@ #include "Gather.h" #include "ONNXHelpers.h" +#include "AttributeHelpers.h" #include "mir/ops/GatherOp.h" @@ -29,7 +30,7 @@ void GatherNodeConverter::convert(const onnx::NodeProto &onnx_node, ConverterCon mir::Graph *graph = context->getGraph(); // 0 is the default axis number. - int axis = getIntAttribute(onnx_node, "axis", 0); + const auto axis = getAttributeValue(onnx_node, "axis", 0); auto result = createOp(graph, inputs[0], inputs[1], axis)->getOutput(0); diff --git a/compiler/mir-onnx-importer/Op/Gemm.cpp b/compiler/mir-onnx-importer/Op/Gemm.cpp index 7fd78cf..793dada 100644 --- a/compiler/mir-onnx-importer/Op/Gemm.cpp +++ b/compiler/mir-onnx-importer/Op/Gemm.cpp @@ -17,6 +17,7 @@ #include "Gemm.h" #include "ONNXHelpers.h" +#include "AttributeHelpers.h" #include "mir/TensorVariant.h" @@ -41,12 +42,12 @@ void GemmNodeConverter::convert(const onnx::NodeProto &onnx_node, ConverterConte auto c = inputs[2]; // 1.0f is the default factor. - const float alpha_val = getFloatAttribute(onnx_node, "alpha", 1.0f); - const float beta_val = getFloatAttribute(onnx_node, "beta", 1.0f); + const auto alpha_val = getAttributeValue(onnx_node, "alpha", 1.0f); + const auto beta_val = getAttributeValue(onnx_node, "beta", 1.0f); // 0 means that no transpose is needed. It is the default value. - const bool trans_a = getIntAttribute(onnx_node, "transA", 0); - const bool trans_b = getIntAttribute(onnx_node, "transB", 0); + const auto trans_a = getAttributeValue(onnx_node, "transA", 0); + const auto trans_b = getAttributeValue(onnx_node, "transB", 0); // Transpose the A and B matrices as needed. if (trans_a) diff --git a/compiler/mir-onnx-importer/Op/MaxPool.cpp b/compiler/mir-onnx-importer/Op/MaxPool.cpp index 0da6e35..48e35eb 100644 --- a/compiler/mir-onnx-importer/Op/MaxPool.cpp +++ b/compiler/mir-onnx-importer/Op/MaxPool.cpp @@ -17,6 +17,7 @@ #include "MaxPool.h" #include "ONNXHelpers.h" +#include "AttributeHelpers.h" #include "mir/ops/PoolOp.h" @@ -40,7 +41,7 @@ void MaxPoolNodeConverter::convert(const onnx::NodeProto &onnx_node, void MaxPoolNodeConverter::convertV1(const onnx::NodeProto &onnx_node, ConverterContext *context) const { - const auto auto_pad = getStringAttribute(onnx_node, "auto_pad", "NOTSET"); + const auto auto_pad = getAttributeValue(onnx_node, "auto_pad", "NOTSET"); // auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. if (auto_pad != "NOTSET") throw std::runtime_error("Supported only explicit padding!"); @@ -69,7 +70,7 @@ void MaxPoolNodeConverter::convertV1(const onnx::NodeProto &onnx_node, void MaxPoolNodeConverter::convertV8(const onnx::NodeProto &onnx_node, ConverterContext *context) const { - const auto storage_order = getIntAttribute(onnx_node, "storage_order", 0); + const auto storage_order = getAttributeValue(onnx_node, "storage_order", 0); if (storage_order != 0) throw std::runtime_error("Not supported storage order attribute!"); @@ -79,7 +80,7 @@ void MaxPoolNodeConverter::convertV8(const onnx::NodeProto &onnx_node, void MaxPoolNodeConverter::convertV10(const onnx::NodeProto &onnx_node, ConverterContext *context) const { - const auto ceil_mode = getIntAttribute(onnx_node, "ceil_mode", 0); + const auto ceil_mode = getAttributeValue(onnx_node, "ceil_mode", 0); if (ceil_mode != 0) throw std::runtime_error("Not supported ceil_mode attribute!"); diff --git a/compiler/mir-onnx-importer/Op/Pad.cpp b/compiler/mir-onnx-importer/Op/Pad.cpp index b4cec79..f3fdeef 100644 --- a/compiler/mir-onnx-importer/Op/Pad.cpp +++ b/compiler/mir-onnx-importer/Op/Pad.cpp @@ -17,6 +17,7 @@ #include "Pad.h" #include "ONNXHelpers.h" +#include "AttributeHelpers.h" #include "mir/ops/PadOp.h" @@ -42,24 +43,21 @@ void PadNodeConverter::convertPadAttrName(const std::string pad_attr_name, mir::Graph *graph = context->getGraph(); // 0.0f is the default value to be filled into padded cells. - const float value = getFloatAttribute(onnx_node, "value", 0.0f); - const auto *pads_attr = findAttribute(onnx_node, pad_attr_name); - assert(pads_attr); + const float value = getAttributeValue(onnx_node, "value", 0.0f); + const auto pads = getAttributeValue>(onnx_node, "pads"); // "constant" is the default mode. - auto mode = getStringAttribute(onnx_node, "mode", "constant"); + auto mode = getAttributeValue(onnx_node, "mode", "constant"); if (mode != "constant") - throw std::runtime_error("Not supported Pad mode attribue!"); + throw std::runtime_error("Not supported Pad mode attribute!"); const mir::Scalar scalar(reinterpret_cast(&value), mir::DTYPE::FLOAT32, sizeof(float)); - assert(pads_attr->ints_size() > 0); - int axis_size = pads_attr->ints_size() / 2; - std::vector> vec(axis_size); - auto *data = pads_attr->ints().data(); - for (int i = 0; i < axis_size; i++) + assert(!pads.empty()); + const auto axis_size = pads.size() / 2; + std::vector> vec; + for (std::size_t i = 0; i < axis_size; i++) { - auto pair = std::make_pair(data[i], data[axis_size + i]); - vec[i] = pair; + vec.emplace_back(pads[i], pads[axis_size + i]); } auto result = createOp(graph, inputs[0], inputs[0]->getShape().rank(), vec, scalar) diff --git a/compiler/mir-onnx-importer/Op/Reshape.cpp b/compiler/mir-onnx-importer/Op/Reshape.cpp index f8fd69c..ac6a16d 100644 --- a/compiler/mir-onnx-importer/Op/Reshape.cpp +++ b/compiler/mir-onnx-importer/Op/Reshape.cpp @@ -17,8 +17,10 @@ #include "Reshape.h" #include "ONNXHelpers.h" +#include "AttributeHelpers.h" #include "mir/Tensor.h" +#include "mir/ShapeRange.h" #include "mir/ops/ConstantOp.h" #include "mir/ops/ReshapeOp.h" diff --git a/compiler/mir-onnx-importer/Op/Softmax.cpp b/compiler/mir-onnx-importer/Op/Softmax.cpp index 21e4c93..84db24e 100644 --- a/compiler/mir-onnx-importer/Op/Softmax.cpp +++ b/compiler/mir-onnx-importer/Op/Softmax.cpp @@ -17,6 +17,7 @@ #include "Softmax.h" #include "ONNXHelpers.h" +#include "AttributeHelpers.h" #include "mir/ops/SoftmaxOp.h" @@ -30,7 +31,7 @@ void SoftmaxNodeConverter::convert(const onnx::NodeProto &onnx_node, mir::Graph *graph = context->getGraph(); // 1 is the default axis number. - int axis = getIntAttribute(onnx_node, "axis", 1); + const auto axis = getAttributeValue(onnx_node, "axis", 1); auto result = createOp(graph, inputs[0], axis)->getOutput(0); diff --git a/compiler/mir-onnx-importer/Op/Unsqueeze.cpp b/compiler/mir-onnx-importer/Op/Unsqueeze.cpp index 0fcb307..e1a8061 100644 --- a/compiler/mir-onnx-importer/Op/Unsqueeze.cpp +++ b/compiler/mir-onnx-importer/Op/Unsqueeze.cpp @@ -17,6 +17,7 @@ #include "Unsqueeze.h" #include "ONNXHelpers.h" +#include "AttributeHelpers.h" #include "mir/ops/ReshapeOp.h" @@ -28,16 +29,16 @@ void UnsqueezeNodeConverter::convert(const onnx::NodeProto &onnx_node, { std::vector inputs = context->getNodeInputs(onnx_node); mir::Graph *graph = context->getGraph(); - auto *axes = findAttribute(onnx_node, "axes"); - assert(axes && axes->ints_size()); + const auto axes = getAttributeValue>(onnx_node, "axes"); + assert(!axes.empty()); const mir::Shape &input_shape = inputs[0]->getShape(); - const int out_rank = input_shape.rank() + axes->ints_size(); + const int out_rank = input_shape.rank() + static_cast(axes.size()); mir::Shape out_shape(out_rank); - auto ints_iterator = axes->ints().begin(); + auto ints_iterator = axes.cbegin(); int j = 0; for (int i = 0; i < out_rank; i++) { - if (ints_iterator < axes->ints().end() && i == *ints_iterator) + if (ints_iterator < axes.cend() && i == *ints_iterator) { out_shape.dim(i) = 1; ints_iterator++; diff --git a/compiler/mir-onnx-importer/Op/Upsample.cpp b/compiler/mir-onnx-importer/Op/Upsample.cpp index a7e4f78..4bc8326 100644 --- a/compiler/mir-onnx-importer/Op/Upsample.cpp +++ b/compiler/mir-onnx-importer/Op/Upsample.cpp @@ -17,6 +17,7 @@ #include "Upsample.h" #include "ONNXHelpers.h" +#include "AttributeHelpers.h" #include "mir/Tensor.h" @@ -49,11 +50,11 @@ void UpsampleNodeConverter::convertV1(const onnx::NodeProto &onnx_node, mir::Graph *graph = context->getGraph(); // "nearest" is the default mode. - std::string mode = getStringAttribute(onnx_node, "mode", "nearest"); + std::string mode = getAttributeValue(onnx_node, "mode", "nearest"); assert(mode == "nearest" && "Unsupported upscale mode!"); - const float h_scale = getFloatAttribute(onnx_node, "height_scale", 0.0f); // required - const float w_scale = getFloatAttribute(onnx_node, "width_scale", 0.0f); // required + const float h_scale = getAttributeValue(onnx_node, "height_scale", 0.0f); // required + const float w_scale = getAttributeValue(onnx_node, "width_scale", 0.0f); // required if (h_scale < 1.0f || w_scale < 1.0f) throw std::runtime_error("Wrong scale attributes!"); @@ -81,7 +82,7 @@ void UpsampleNodeConverter::convertV7(const onnx::NodeProto &onnx_node, mir::Graph *graph = context->getGraph(); // "nearest" is the default mode. - std::string mode = getStringAttribute(onnx_node, "mode", "nearest"); + std::string mode = getAttributeValue(onnx_node, "mode", "nearest"); assert(mode == "nearest" && "Unsupported upscale mode!"); const auto *scales_attr = findAttribute(onnx_node, "scales"); @@ -116,7 +117,7 @@ void UpsampleNodeConverter::convertV9(const onnx::NodeProto &onnx_node, mir::Graph *graph = context->getGraph(); // "nearest" is the default mode. - std::string mode = getStringAttribute(onnx_node, "mode", "nearest"); + const auto mode = getAttributeValue(onnx_node, "mode", "nearest"); assert(mode == "nearest" && "Unsupported upscale mode!"); // relies on attributes being lifted to constants (ONNX optimization pass) -- 2.7.4