From 7c69d9a9e527da0dde659f70639f658aa174d459 Mon Sep 17 00:00:00 2001
From: =?utf8?q?=D0=9F=D0=B0=D0=B2=D0=B5=D0=BB=20=D0=98=D0=BB=D1=8C=D1=8E?=
=?utf8?q?=D1=82=D1=87=D0=B5=D0=BD=D0=BA=D0=BE/AI=20Tools=20Lab=20/SRR/Eng?=
=?utf8?q?ineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?=
Date: Mon, 5 Aug 2019 21:12:18 +0300
Subject: [PATCH] Selecting all helper functions in a separate file (#6237)
* Move all helper functions in one file
* Fix other code
Signed-off-by: Pavel Iliutchenko
---
compiler/mir-onnx-importer/CMakeLists.txt | 2 +
compiler/mir-onnx-importer/ONNXHelpers.cpp | 127 ++++++++++++
compiler/mir-onnx-importer/ONNXHelpers.h | 149 ++++++++++++++
compiler/mir-onnx-importer/ONNXImporterImpl.cpp | 58 +-----
compiler/mir-onnx-importer/ONNXImporterImpl.h | 2 -
compiler/mir-onnx-importer/ONNXOpCreator.cpp | 254 +++++-------------------
compiler/mir-onnx-importer/ONNXOpCreator.h | 11 -
7 files changed, 333 insertions(+), 270 deletions(-)
create mode 100644 compiler/mir-onnx-importer/ONNXHelpers.cpp
create mode 100644 compiler/mir-onnx-importer/ONNXHelpers.h
diff --git a/compiler/mir-onnx-importer/CMakeLists.txt b/compiler/mir-onnx-importer/CMakeLists.txt
index 741630d..30af74b 100644
--- a/compiler/mir-onnx-importer/CMakeLists.txt
+++ b/compiler/mir-onnx-importer/CMakeLists.txt
@@ -5,6 +5,8 @@ if (NOT ONNXProto_FOUND)
endif ()
set(MIR_ONNX_IMPORTER_SOURCES
+ ONNXHelpers.cpp
+ ONNXHelpers.h
ONNXImporterImpl.cpp
ONNXImporterImpl.h
ONNXOpCreator.cpp
diff --git a/compiler/mir-onnx-importer/ONNXHelpers.cpp b/compiler/mir-onnx-importer/ONNXHelpers.cpp
new file mode 100644
index 0000000..4e4f1b0
--- /dev/null
+++ b/compiler/mir-onnx-importer/ONNXHelpers.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXHelpers.h"
+
+namespace mir_onnx
+{
+
+mir::TensorVariant fixGroupedKernel(int groups, const mir::TensorVariant &folded_kernel)
+{
+ const int kernel_in_chan_num = 2;
+ const int kernel_out_chan_num = 3;
+
+ const mir::Shape &kernel_shape = folded_kernel.getShape();
+ auto kernel_in_channels = kernel_shape.dim(kernel_in_chan_num);
+ auto kernel_out_channels = kernel_shape.dim(kernel_out_chan_num);
+ auto in_channels = kernel_in_channels * groups;
+
+ // Original kernel has shape [H, W, inputChannels/groups, outputChannels]
+ // here creates unfolded kernel with shape [H, W, inputChannels, outputChannels]
+ mir::Shape unfold_kernel_shape(kernel_shape);
+ unfold_kernel_shape.dim(kernel_in_chan_num) = in_channels;
+ size_t data_size = folded_kernel.getElementSize();
+ mir::TensorVariant unfold_kernel(folded_kernel.getDataType(), unfold_kernel_shape);
+
+ int in_group_size = kernel_in_channels;
+ int out_group_size = kernel_out_channels / groups;
+ assert(kernel_out_channels % groups == 0);
+
+ // Iterate over "unfolded" kernel Shape and insert appropriate values into result kernel
+ for (const mir::Index &idx : mir::ShapeRange(unfold_kernel_shape))
+ {
+ auto in_group_no = idx.at(kernel_in_chan_num) / in_group_size;
+ auto out_group_no = idx.at(kernel_out_chan_num) / out_group_size;
+ // check that input channel group fits output channel group
+ if (in_group_no == out_group_no)
+ {
+ // compute index in original kernel that corresponds output index
+ mir::Index folded_idx(idx);
+ folded_idx.at(kernel_in_chan_num) %= in_group_size;
+
+ std::copy(folded_kernel.at(folded_idx), folded_kernel.at(folded_idx) + data_size,
+ unfold_kernel.at(idx));
+ }
+ else
+ {
+ // fill element of output kernel with zero element
+ assert(folded_kernel.getDataType() == mir::DTYPE::FLOAT32 &&
+ "unsupported data type, add appropriate zero element creation");
+ auto elem = reinterpret_cast(unfold_kernel.at(idx));
+ *elem = 0.0f;
+ }
+ }
+ return unfold_kernel;
+}
+
+mir::TensorVariant createTensor(const onnx::TensorProto *tensor)
+{
+ mir::DTYPE type;
+ const void *src_data;
+ mir::Shape shape(tensor->dims_size());
+ for (int i = 0; i < tensor->dims_size(); ++i)
+ {
+ shape.dim(i) = tensor->dims(i);
+ }
+
+ if (tensor->float_data_size() != 0)
+ {
+ assert(tensor->data_type() == onnx::TensorProto::FLOAT);
+ type = mir::DTYPE::FLOAT32;
+ src_data = tensor->float_data().data();
+ }
+ else if (tensor->double_data_size() != 0)
+ {
+ assert(tensor->data_type() == onnx::TensorProto::DOUBLE);
+ type = mir::DTYPE::FLOAT64;
+ src_data = tensor->double_data().data();
+ }
+ else if (tensor->int32_data_size() != 0)
+ {
+ assert(tensor->data_type() == onnx::TensorProto::INT32);
+ type = mir::DTYPE::INT32;
+ src_data = tensor->int32_data().data();
+ }
+ else if (tensor->int64_data_size() != 0)
+ {
+ assert(tensor->data_type() == onnx::TensorProto::INT64);
+ type = mir::DTYPE::INT64;
+ src_data = tensor->int64_data().data();
+ }
+ else if (tensor->has_raw_data())
+ {
+ switch (tensor->data_type())
+ {
+ case onnx::TensorProto::FLOAT:
+ type = mir::DTYPE::FLOAT32;
+ break;
+ case onnx::TensorProto::INT64:
+ type = mir::DTYPE::INT64;
+ break;
+ default:
+ throw std::runtime_error("Unsupported data type");
+ }
+ src_data = tensor->raw_data().data();
+ }
+ else
+ {
+ throw std::runtime_error("Invalid data in Proto file, investigate");
+ }
+
+ return mir::TensorVariant(type, shape, src_data);
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/ONNXHelpers.h b/compiler/mir-onnx-importer/ONNXHelpers.h
new file mode 100644
index 0000000..9700c11
--- /dev/null
+++ b/compiler/mir-onnx-importer/ONNXHelpers.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MIR_ONNX_HELPERS_H__
+#define __MIR_ONNX_HELPERS_H__
+
+#include "mir/Graph.h"
+#include "mir/TensorVariant.h"
+#include "mir/ShapeRange.h"
+
+#include "mir/ops/TransposeOp.h"
+
+#include "onnx/onnx.pb.h"
+
+#include
+
+namespace mir_onnx
+{
+
+mir::TensorVariant fixGroupedKernel(int groups, const mir::TensorVariant &folded_kernel);
+mir::TensorVariant createTensor(const onnx::TensorProto *tensor);
+
+inline const onnx::AttributeProto *findAttribute(const onnx::NodeProto &onnx_node,
+ const std::string &name)
+{
+ for (auto &att : onnx_node.attribute())
+ {
+ if (att.name() == name)
+ {
+ return &att;
+ }
+ }
+ return nullptr;
+}
+
+inline std::pair getIntAttribute(const onnx::NodeProto &onnx_node,
+ const std::string &name = "axis")
+{
+ auto result = findAttribute(onnx_node, name);
+ if (!result)
+ return {false, 0};
+ assert(result->type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_INT);
+ return {true, result->i()};
+}
+
+inline std::pair getStringAttribute(const onnx::NodeProto &onnx_node,
+ const std::string &name)
+{
+ auto result = findAttribute(onnx_node, name);
+ if (!result)
+ return {false, ""};
+ assert(result->type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_STRING);
+ return {true, result->s()};
+}
+
+inline std::pair getFloatAttribute(const onnx::NodeProto &onnx_node,
+ const std::string &name)
+{
+ auto result = findAttribute(onnx_node, name);
+ if (!result)
+ return {false, 0.0};
+ assert(result->type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_FLOAT);
+ return {true, result->f()};
+}
+
+// Create vector tensor filled with the given value
+// TODO: it should be template
+inline mir::TensorVariant createScalarTensor(float value, const mir::Shape &shape)
+{
+ std::vector values(static_cast(shape.numElements()), value);
+ return mir::TensorVariant(mir::DTYPE::FLOAT32, {shape.numElements()}, values.data());
+}
+
+struct KernelStridesPadding
+{
+ mir::Shape kernel_shape;
+ mir::Shape strides_shape;
+ std::vector padding_before{0, 0};
+ std::vector padding_after{0, 0};
+};
+
+inline void getKernelStridesPadding(const onnx::NodeProto &onnx_node, KernelStridesPadding &cdata)
+{
+ auto *kshape = findAttribute(onnx_node, "kernel_shape");
+ assert(kshape && kshape->ints_size());
+ auto *strides = findAttribute(onnx_node, "strides");
+ assert(strides && strides->ints_size());
+ auto *pads = findAttribute(onnx_node, "pads");
+
+ cdata.kernel_shape = mir::Shape(kshape->ints_size());
+ for (int i = 0; i < kshape->ints_size(); ++i)
+ {
+ cdata.kernel_shape.dim(i) = kshape->ints(i);
+ }
+ cdata.strides_shape = mir::Shape(strides->ints_size());
+ for (int i = 0; i < strides->ints_size(); ++i)
+ {
+ cdata.strides_shape.dim(i) = strides->ints(i);
+ }
+
+ if (pads)
+ {
+ assert(pads->ints_size() == 4);
+ cdata.padding_before[0] = pads->ints(0);
+ cdata.padding_before[1] = pads->ints(1);
+ // TODO: ONNX padding could be for the beginning and ending along each axis that's why we
+ // should select the interesting ones.
+ cdata.padding_after[0] = pads->ints(2);
+ cdata.padding_after[1] = pads->ints(3);
+ }
+}
+
+template
+mir::Operation *createOp(mir::Graph *graph, Types &&... args)
+{
+ // TODO: how to name operations?
+ return graph->create("", std::forward(args)...);
+}
+
+inline mir::Operation::Output *convertONNXToMIR(mir::Graph *graph, mir::Operation::Output *arg)
+{
+ // NCHW -> NHWC
+ return createOp(graph, arg, std::vector{0, 2, 3, 1})
+ ->getOutput(0);
+}
+
+inline mir::Operation::Output *convertMIRToONNX(mir::Graph *graph, mir::Operation::Output *arg)
+{
+ // NHWC -> NCHW
+ return createOp(graph, arg, std::vector{0, 3, 1, 2})
+ ->getOutput(0);
+}
+
+} // namespace mir_onnx
+
+#endif // __MIR_ONNX_HELPERS_H__
diff --git a/compiler/mir-onnx-importer/ONNXImporterImpl.cpp b/compiler/mir-onnx-importer/ONNXImporterImpl.cpp
index 41eaea2..430bb11 100644
--- a/compiler/mir-onnx-importer/ONNXImporterImpl.cpp
+++ b/compiler/mir-onnx-importer/ONNXImporterImpl.cpp
@@ -15,6 +15,7 @@
*/
#include "ONNXImporterImpl.h"
+#include "ONNXHelpers.h"
#include "ONNXPerfectHash.h"
#include "ONNXOpCreator.h"
#include "ONNXOpType.h"
@@ -134,63 +135,6 @@ void ONNXImporterImpl::collectUnsupportedOps()
}
}
-mir::TensorVariant ONNXImporterImpl::createTensor(const onnx::TensorProto *tensor)
-{
- mir::DTYPE type;
- const void *src_data;
- mir::Shape shape(tensor->dims_size());
- for (int i = 0; i < tensor->dims_size(); ++i)
- {
- shape.dim(i) = tensor->dims(i);
- }
-
- if (tensor->float_data_size() != 0)
- {
- assert(tensor->data_type() == onnx::TensorProto::FLOAT);
- type = mir::DTYPE::FLOAT32;
- src_data = tensor->float_data().data();
- }
- else if (tensor->double_data_size() != 0)
- {
- assert(tensor->data_type() == onnx::TensorProto::DOUBLE);
- type = mir::DTYPE::FLOAT64;
- src_data = tensor->double_data().data();
- }
- else if (tensor->int32_data_size() != 0)
- {
- assert(tensor->data_type() == onnx::TensorProto::INT32);
- type = mir::DTYPE::INT32;
- src_data = tensor->int32_data().data();
- }
- else if (tensor->int64_data_size() != 0)
- {
- assert(tensor->data_type() == onnx::TensorProto::INT64);
- type = mir::DTYPE::INT64;
- src_data = tensor->int64_data().data();
- }
- else if (tensor->has_raw_data())
- {
- switch (tensor->data_type())
- {
- case onnx::TensorProto::FLOAT:
- type = mir::DTYPE::FLOAT32;
- break;
- case onnx::TensorProto::INT64:
- type = mir::DTYPE::INT64;
- break;
- default:
- throw std::runtime_error("Unsupported data type");
- }
- src_data = tensor->raw_data().data();
- }
- else
- {
- throw std::runtime_error("Invalid data in Proto file, investigate");
- }
-
- return mir::TensorVariant(type, shape, src_data);
-}
-
void ONNXImporterImpl::createGraphInputs()
{
auto &graph = _model->graph();
diff --git a/compiler/mir-onnx-importer/ONNXImporterImpl.h b/compiler/mir-onnx-importer/ONNXImporterImpl.h
index 6e352dd..9f08e42 100644
--- a/compiler/mir-onnx-importer/ONNXImporterImpl.h
+++ b/compiler/mir-onnx-importer/ONNXImporterImpl.h
@@ -36,8 +36,6 @@ public:
/// @brief Load the model and convert it into a MIR Graph.
std::unique_ptr importModel();
- static mir::TensorVariant createTensor(const onnx::TensorProto *tensor);
-
private:
void import();
std::unique_ptr createIR();
diff --git a/compiler/mir-onnx-importer/ONNXOpCreator.cpp b/compiler/mir-onnx-importer/ONNXOpCreator.cpp
index b4a497e..8f7b87b 100644
--- a/compiler/mir-onnx-importer/ONNXOpCreator.cpp
+++ b/compiler/mir-onnx-importer/ONNXOpCreator.cpp
@@ -15,6 +15,7 @@
*/
#include "ONNXOpCreator.h"
+#include "ONNXHelpers.h"
#include "ONNXImporterImpl.h"
#include "mir/ops/BatchNormOp.h"
@@ -53,146 +54,8 @@
namespace mir_onnx
{
-static mir::TensorVariant fixGroupedKernel(int groups, const mir::TensorVariant &folded_kernel)
-{
- const int kernel_in_chan_num = 2;
- const int kernel_out_chan_num = 3;
-
- const mir::Shape &kernel_shape = folded_kernel.getShape();
- auto kernel_in_channels = kernel_shape.dim(kernel_in_chan_num);
- auto kernel_out_channels = kernel_shape.dim(kernel_out_chan_num);
- auto in_channels = kernel_in_channels * groups;
-
- // Original kernel has shape [H, W, inputChannels/groups, outputChannels]
- // here creates unfolded kernel with shape [H, W, inputChannels, outputChannels]
- mir::Shape unfold_kernel_shape(kernel_shape);
- unfold_kernel_shape.dim(kernel_in_chan_num) = in_channels;
- size_t data_size = folded_kernel.getElementSize();
- mir::TensorVariant unfold_kernel(folded_kernel.getDataType(), unfold_kernel_shape);
-
- int in_group_size = kernel_in_channels;
- int out_group_size = kernel_out_channels / groups;
- assert(kernel_out_channels % groups == 0);
-
- // Iterate over "unfolded" kernel Shape and insert appropriate values into result kernel
- for (const mir::Index &idx : mir::ShapeRange(unfold_kernel_shape))
- {
- auto in_group_no = idx.at(kernel_in_chan_num) / in_group_size;
- auto out_group_no = idx.at(kernel_out_chan_num) / out_group_size;
- // check that input channel group fits output channel group
- if (in_group_no == out_group_no)
- {
- // compute index in original kernel that corresponds output index
- mir::Index folded_idx(idx);
- folded_idx.at(kernel_in_chan_num) %= in_group_size;
-
- std::copy(folded_kernel.at(folded_idx), folded_kernel.at(folded_idx) + data_size,
- unfold_kernel.at(idx));
- }
- else
- {
- // fill element of output kernel with zero element
- assert(folded_kernel.getDataType() == mir::DTYPE::FLOAT32 &&
- "unsupported data type, add appropriate zero element creation");
- auto elem = reinterpret_cast(unfold_kernel.at(idx));
- *elem = 0.0f;
- }
- }
- return unfold_kernel;
-}
-
using namespace mir;
-static const onnx::AttributeProto *findAttribute(const onnx::NodeProto &onnx_node,
- const std::string &name)
-{
- for (auto &att : onnx_node.attribute())
- {
- if (att.name() == name)
- {
- return &att;
- }
- }
- return nullptr;
-}
-
-static std::pair getIntAttribute(const onnx::NodeProto &onnx_node,
- const std::string &name = "axis")
-{
- auto result = findAttribute(onnx_node, name);
- if (!result)
- return {false, 0};
- assert(result->type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_INT);
- return {true, result->i()};
-}
-
-static std::pair getStringAttribute(const onnx::NodeProto &onnx_node,
- const std::string &name)
-{
- auto result = findAttribute(onnx_node, name);
- if (!result)
- return {false, ""};
- assert(result->type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_STRING);
- return {true, result->s()};
-}
-
-static std::pair getFloatAttribute(const onnx::NodeProto &onnx_node,
- const std::string &name)
-{
- auto result = findAttribute(onnx_node, name);
- if (!result)
- return {false, 0.0};
- assert(result->type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_FLOAT);
- return {true, result->f()};
-}
-
-// Create vector tensor filled with the given value
-// TODO: it should be template
-static TensorVariant createTensor(float value, const mir::Shape &shape)
-{
- std::vector values(static_cast(shape.numElements()), value);
- return mir::TensorVariant(mir::DTYPE::FLOAT32, {shape.numElements()}, values.data());
-}
-
-struct KernelStridesPadding
-{
- Shape kernel_shape;
- Shape strides_shape;
- std::vector padding_before{0, 0};
- std::vector padding_after{0, 0};
-};
-
-static void getKernelStridesPadding(const onnx::NodeProto &onnx_node, KernelStridesPadding &cdata)
-{
- auto *kshape = findAttribute(onnx_node, "kernel_shape");
- assert(kshape && kshape->ints_size());
- auto *strides = findAttribute(onnx_node, "strides");
- assert(strides && strides->ints_size());
- auto *pads = findAttribute(onnx_node, "pads");
-
- cdata.kernel_shape = mir::Shape(kshape->ints_size());
- for (int i = 0; i < kshape->ints_size(); ++i)
- {
- cdata.kernel_shape.dim(i) = kshape->ints(i);
- }
- cdata.strides_shape = mir::Shape(strides->ints_size());
- for (int i = 0; i < strides->ints_size(); ++i)
- {
- cdata.strides_shape.dim(i) = strides->ints(i);
- }
-
- if (pads)
- {
- assert(pads->ints_size() == 4);
- cdata.padding_before[0] = pads->ints(0);
- cdata.padding_before[1] = pads->ints(1);
- // TODO: ONNX padding could be for the beginning and ending along each axis that's why we
- // should select the interesting ones.
- cdata.padding_after[0] = pads->ints(2);
- cdata.padding_after[1] = pads->ints(3);
- }
-}
-
std::vector
ONNXOpCreator::convertConv2D(const std::vector &inputs,
const onnx::NodeProto &onnx_node)
@@ -217,13 +80,13 @@ ONNXOpCreator::convertConv2D(const std::vector &inputs
bool is_depthwise = (num_groups != 1) && (in_group_size == 1) && (out_channels == num_groups);
mir::Operation *result;
- auto transposed_input = convertONNXToMIR(inputs[0]);
+ auto transposed_input = convertONNXToMIR(_graph, inputs[0]);
if (is_depthwise)
{
// TODO handle properly kernel with layer multiplier
auto transposed_tensor = mir::transposeTensor<0, 1, 3, 2>(kernel_tensor);
- auto kernel = createOp(transposed_tensor)->getOutput(0);
- result = createOp(transposed_input, kernel, cdata.strides_shape,
+ auto kernel = createOp(_graph, transposed_tensor)->getOutput(0);
+ result = createOp(_graph, transposed_input, kernel, cdata.strides_shape,
cdata.padding_before, cdata.padding_after);
}
else
@@ -232,15 +95,15 @@ ONNXOpCreator::convertConv2D(const std::vector &inputs
if (num_groups != 1)
kernel_tensor = fixGroupedKernel(num_groups, kernel_tensor);
kernel_tensor = transposeTensor<3, 0, 1, 2>(kernel_tensor);
- auto kernel = createOp(kernel_tensor)->getOutput(0);
- result = createOp(transposed_input, kernel, cdata.strides_shape,
+ auto kernel = createOp(_graph, kernel_tensor)->getOutput(0);
+ result = createOp(_graph, transposed_input, kernel, cdata.strides_shape,
cdata.padding_before, cdata.padding_after);
}
if (inputs.size() > 2)
- result = createOp(result->getOutput(0), inputs[2]);
+ result = createOp(_graph, result->getOutput(0), inputs[2]);
- return {convertMIRToONNX(result->getOutput(0))};
+ return {convertMIRToONNX(_graph, result->getOutput(0))};
}
std::vector
@@ -252,7 +115,7 @@ ONNXOpCreator::convertConcat(const std::vector &inputs
std::tie(found, axis) = getIntAttribute(onnx_node);
if (!found)
throw std::runtime_error("Concat must have 'axis' attribute");
- auto result = createOp(inputs, axis);
+ auto result = createOp(_graph, inputs, axis);
return {result->getOutput(0)};
}
@@ -264,7 +127,7 @@ ONNXOpCreator::convertGather(const std::vector &inputs
int value;
std::tie(found, value) = getIntAttribute(onnx_node, "axis");
int axis = found ? value : 0;
- auto result = createOp(inputs[0], inputs[1], axis);
+ auto result = createOp(_graph, inputs[0], inputs[1], axis);
return {result->getOutput(0)};
}
@@ -293,7 +156,7 @@ ONNXOpCreator::convertPad(const std::vector &inputs,
auto pair = std::make_pair(data[i], data[last - i]);
vec[i] = pair;
}
- auto result = createOp(inputs[0], inputs[0]->getShape().rank(), vec, scalar);
+ auto result = createOp(_graph, inputs[0], inputs[0]->getShape().rank(), vec, scalar);
return {result->getOutput(0)};
}
@@ -306,7 +169,7 @@ ONNXOpCreator::convertPool(const std::vector &inputs,
KernelStridesPadding cdata;
// Transpose ONNX NCHW to MIR NHWC
- auto t_input = convertONNXToMIR(inputs[0]);
+ auto t_input = convertONNXToMIR(_graph, inputs[0]);
switch (op_code)
{
@@ -333,9 +196,10 @@ ONNXOpCreator::convertPool(const std::vector &inputs,
default:
assert(false);
}
- auto result = createOp(t_input, pool_type, cdata.kernel_shape, cdata.strides_shape,
- cdata.padding_before, cdata.padding_after, border_type);
- return {convertMIRToONNX(result->getOutput(0))};
+ auto result =
+ createOp(_graph, t_input, pool_type, cdata.kernel_shape, cdata.strides_shape,
+ cdata.padding_before, cdata.padding_after, border_type);
+ return {convertMIRToONNX(_graph, result->getOutput(0))};
}
std::vector
@@ -346,7 +210,7 @@ ONNXOpCreator::convertSoftmax(const std::vector &input
bool found;
std::tie(found, axis) = getIntAttribute(onnx_node);
axis = found ? axis : 1;
- auto result = createOp(inputs[0], axis);
+ auto result = createOp(_graph, inputs[0], axis);
return {result->getOutput(0)};
}
@@ -382,7 +246,7 @@ ONNXOpCreator::convertReshape(const std::vector &input
i++;
}
auto out_shape = Shape(shape_vector);
- auto result = createOp(inputs[0], out_shape);
+ auto result = createOp(_graph, inputs[0], out_shape);
return {result->getOutput(0)};
}
@@ -410,7 +274,7 @@ ONNXOpCreator::convertUnsqueeze(const std::vector &inp
j++;
}
}
- auto result = createOp(inputs[0], out_shape);
+ auto result = createOp(_graph, inputs[0], out_shape);
return {result->getOutput(0)};
}
@@ -418,7 +282,7 @@ std::vector
ONNXOpCreator::convertRelu(const std::vector &inputs)
{
assert(inputs.size() == 1);
- auto result = createOp(inputs[0]);
+ auto result = createOp(_graph, inputs[0]);
return {result->getOutput(0)};
}
@@ -426,7 +290,7 @@ std::vector
ONNXOpCreator::convertSigmoid(const std::vector &inputs)
{
assert(inputs.size() == 1);
- auto result = createOp(inputs[0]);
+ auto result = createOp(_graph, inputs[0]);
return {result->getOutput(0)};
}
@@ -434,7 +298,7 @@ std::vector
ONNXOpCreator::convertElementwise(const std::vector &inputs,
mir::ops::ElementwiseOp::OpType op_type)
{
- auto result = createOp(inputs, op_type);
+ auto result = createOp(_graph, inputs, op_type);
return {result->getOutput(0)};
}
@@ -463,10 +327,10 @@ ONNXOpCreator::convertUpsample(const std::vector &inpu
assert(scales_tensor.getShape().rank() == 1 && "Scales are a 1d tensor");
for (int i = 0; i < scales_tensor.getShape().numElements(); i++)
scales_vector[onnx2mir[i]] = scales_tensor.atOffset(i);
- return {convertMIRToONNX(createOp(convertONNXToMIR(inputs[0]),
- ops::ResizeOp::ResizeMethod::nearestNeighbor,
- scales_vector)
- ->getOutput(0))};
+ return {convertMIRToONNX(
+ _graph, createOp(_graph, convertONNXToMIR(_graph, inputs[0]),
+ ops::ResizeOp::ResizeMethod::nearestNeighbor, scales_vector)
+ ->getOutput(0))};
}
std::vector
@@ -490,23 +354,23 @@ ONNXOpCreator::convertBatchNorm(const std::vector &inp
for (auto &idx : ShapeRange(bias_data.getShape()))
bias_data.at(idx) *= -1;
- auto data = convertONNXToMIR(inputs[0]);
- auto mean = createOp(mean_tensor)->getOutput(0);
- auto result = createOp(data, mean);
+ auto data = convertONNXToMIR(_graph, inputs[0]);
+ auto mean = createOp(_graph, mean_tensor)->getOutput(0);
+ auto result = createOp(_graph, data, mean);
// res2 = res1 * scale / (var + epsilon)
Tensor multiplier(scale_tensor);
Tensor var_accessor(var_tensor);
for (auto &idx : ShapeRange(scale_tensor.getShape()))
multiplier.at(idx) /= std::sqrt(var_accessor.at(idx) + epsilon);
- auto scale = createOp(scale_tensor)->getOutput(0);
- result = createOp(result->getOutput(0), scale);
+ auto scale = createOp(_graph, scale_tensor)->getOutput(0);
+ result = createOp(_graph, result->getOutput(0), scale);
// overall_res = res2 + bias
- auto bias = createOp(bias_tensor)->getOutput(0);
- result = createOp(result->getOutput(0), bias);
+ auto bias = createOp(_graph, bias_tensor)->getOutput(0);
+ result = createOp(_graph, result->getOutput(0), bias);
- return {convertMIRToONNX(result->getOutput(0))};
+ return {convertMIRToONNX(_graph, result->getOutput(0))};
}
std::vector
@@ -517,7 +381,7 @@ ONNXOpCreator::convertDropout(const std::vector &input
float value;
std::tie(found, value) = getFloatAttribute(onnx_node, "ratio");
float ratio = found ? value : 1.0;
- auto result = createOp(inputs[0], ratio);
+ auto result = createOp(_graph, inputs[0], ratio);
return {result->getOutput(0)};
}
@@ -530,9 +394,9 @@ ONNXOpCreator::convertScale(const std::vector &inputs,
std::tie(found, value) = getFloatAttribute(onnx_node, "scale");
float scale_val = found ? value : 1.0;
const auto &shape = inputs[0]->getShape();
- auto scale_tensor = createTensor(scale_val, shape);
- auto scale = createOp(scale_tensor)->getOutput(0);
- auto result = createOp(inputs[0], scale);
+ auto scale_tensor = createScalarTensor(scale_val, shape);
+ auto scale = createOp(_graph, scale_tensor)->getOutput(0);
+ auto result = createOp(_graph, inputs[0], scale);
return {result->getOutput(0)};
}
@@ -548,7 +412,7 @@ ONNXOpCreator::convertShape(const std::vector &inputs)
data[i] = input_shape.dim(i);
}
TensorVariant tensor(DTYPE::FLOAT32, output_shape, data.data());
- auto result = createOp(tensor);
+ auto result = createOp(_graph, tensor);
return {result->getOutput(0)};
}
@@ -564,7 +428,7 @@ ONNXOpCreator::convertGivenTensorFill(const onnx::NodeProto &onnx_node, InputTen
shape.dim(i) = shape_att->ints(i);
TensorVariant tensor(DTYPE::FLOAT32, shape, values_att->floats().data());
input_tensors.insert(std::make_pair(onnx_node.output(0), tensor));
- auto result = createOp(tensor);
+ auto result = createOp(_graph, tensor);
return {result->getOutput(0)};
}
@@ -577,7 +441,7 @@ ONNXOpCreator::convertConstant(const onnx::NodeProto &onnx_node, InputTensors &i
assert(onnx_node.attribute(0).name() == "value");
auto name = onnx_node.output(0);
auto &onnx_tensor = onnx_node.attribute(0).t();
- auto mir_tensor = ONNXImporterImpl::createTensor(&onnx_tensor);
+ auto mir_tensor = createTensor(&onnx_tensor);
input_tensors.insert(std::make_pair(name, mir_tensor));
auto op = _graph->create(name, mir_tensor)->getOutput(0);
return {op};
@@ -611,21 +475,23 @@ ONNXOpCreator::convertGemm(const std::vector &inputs,
// Flatten the shape by dim(0)
const auto &in_shape = inputs[0]->getShape();
mir::Shape shape0{in_shape.dim(0), in_shape.numElements() / in_shape.dim(0)};
- auto input_a = createOp(inputs[0], shape0)->getOutput(0);
+ auto input_a = createOp(_graph, inputs[0], shape0)->getOutput(0);
if (trans_a)
- input_a = createOp(input_a, std::vector{1, 0})->getOutput(0);
+ input_a =
+ createOp(_graph, input_a, std::vector{1, 0})->getOutput(0);
if (alpha_val != 1.0)
{
- auto alpha_tensor = createTensor(alpha_val, input_a->getShape());
- auto alpha = createOp(alpha_tensor)->getOutput(0);
- input_a = createOp(input_a, alpha)->getOutput(0);
+ auto alpha_tensor = createScalarTensor(alpha_val, input_a->getShape());
+ auto alpha = createOp(_graph, alpha_tensor)->getOutput(0);
+ input_a = createOp(_graph, input_a, alpha)->getOutput(0);
}
// 2. Prepare input matrix B
//
auto input_b = inputs[1];
if (trans_b)
- input_b = createOp(input_b, std::vector{1, 0})->getOutput(0);
+ input_b =
+ createOp(_graph, input_b, std::vector{1, 0})->getOutput(0);
// Number of cols in tensor A must be equal to number of rows in tensor B
assert(input_a->getShape().dim(1) == input_b->getShape().dim(0));
Shape mult_a_b{input_a->getShape().dim(0), input_b->getShape().dim(1)};
@@ -633,29 +499,17 @@ ONNXOpCreator::convertGemm(const std::vector &inputs,
// 3. Prepare input matrix C
//
auto input_c = inputs[2];
- auto beta_tensor = createTensor(beta_val, input_c->getShape());
+ auto beta_tensor = createScalarTensor(beta_val, input_c->getShape());
if ((mult_a_b.rank() == 2) && (input_c->getShape().rank() == 1))
{
beta_tensor = TensorVariant(beta_tensor, mult_a_b);
}
- auto beta = createOp(beta_tensor)->getOutput(0);
+ auto beta = createOp(_graph, beta_tensor)->getOutput(0);
std::vector mul_inputs = {beta, input_c};
- auto c_mult =
- createOp(mul_inputs, ops::ElementwiseOp::OpType::mul)->getOutput(0);
+ auto c_mult = createOp(_graph, mul_inputs, ops::ElementwiseOp::OpType::mul)
+ ->getOutput(0);
assert(c_mult->getShape() == mult_a_b);
- auto result = createOp(input_a, input_b, c_mult);
+ auto result = createOp(_graph, input_a, input_b, c_mult);
return {result->getOutput(0)};
}
-
-mir::Operation::Output *ONNXOpCreator::convertONNXToMIR(mir::Operation::Output *arg)
-{
- // NCHW -> NHWC
- return createOp(arg, std::vector{0, 2, 3, 1})->getOutput(0);
-}
-
-mir::Operation::Output *ONNXOpCreator::convertMIRToONNX(mir::Operation::Output *arg)
-{
- // NHWC -> NCHW
- return createOp(arg, std::vector{0, 3, 1, 2})->getOutput(0);
-}
} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/ONNXOpCreator.h b/compiler/mir-onnx-importer/ONNXOpCreator.h
index 1bee16c..9cceb8f 100644
--- a/compiler/mir-onnx-importer/ONNXOpCreator.h
+++ b/compiler/mir-onnx-importer/ONNXOpCreator.h
@@ -112,19 +112,8 @@ public:
convertGemm(const std::vector &inputs,
const onnx::NodeProto &onnx_node);
- mir::Operation::Output *convertONNXToMIR(mir::Operation::Output *arg);
- mir::Operation::Output *convertMIRToONNX(mir::Operation::Output *arg);
-
private:
- template mir::Operation *createOp(Types &&... args);
mir::Graph *_graph = nullptr;
};
-
-template
-mir::Operation *ONNXOpCreator::createOp(Types &&... args)
-{
- // TODO: set operation names
- return _graph->create("", std::forward(args)...);
-}
} // namespace mir_onnx
#endif // _MIR_ONNX_OP_CREATOR_H
--
2.7.4