From: Павел Ильютченко/AI Tools Lab /SRR/Engineer/삼성전자
Date: Tue, 3 Sep 2019 17:02:49 +0000 (+0300)
Subject: [mir_onnx] Remove unsing converters before operation with NCHW layout (#7044)
X-Git-Tag: accepted/tizen/unified/20190904.110638~18
X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=5be0e368b73855748b9f5af9eb346292a2a46fff;p=platform%2Fcore%2Fml%2Fnnfw.git
[mir_onnx] Remove unsing converters before operation with NCHW layout (#7044)
* Removed converters
* Fixed some params
* Fixed consant shape in BatchNormalization (for right work of
broadcasting in elementwise operations)
Signed-off-by: Pavel Iliutchenko
---
diff --git a/compiler/mir-onnx-importer/ONNXHelpers.h b/compiler/mir-onnx-importer/ONNXHelpers.h
index 334fe17..1784a4c 100644
--- a/compiler/mir-onnx-importer/ONNXHelpers.h
+++ b/compiler/mir-onnx-importer/ONNXHelpers.h
@@ -35,20 +35,6 @@ mir::Operation *createOp(mir::Graph *graph, Types &&... args)
return graph->create(std::forward(args)...);
}
-inline mir::Operation::Output *convertONNXToMIR(mir::Graph *graph, mir::Operation::Output *arg)
-{
- // NCHW -> NHWC
- return createOp(graph, arg, std::vector{0, 2, 3, 1})
- ->getOutput(0);
-}
-
-inline mir::Operation::Output *convertMIRToONNX(mir::Graph *graph, mir::Operation::Output *arg)
-{
- // NHWC -> NCHW
- return createOp(graph, arg, std::vector{0, 3, 1, 2})
- ->getOutput(0);
-}
-
} // namespace mir_onnx
#endif // __MIR_ONNX_HELPERS_H__
diff --git a/compiler/mir-onnx-importer/Op/AveragePool.cpp b/compiler/mir-onnx-importer/Op/AveragePool.cpp
index bd4c10a..cc9e647 100644
--- a/compiler/mir-onnx-importer/Op/AveragePool.cpp
+++ b/compiler/mir-onnx-importer/Op/AveragePool.cpp
@@ -81,11 +81,9 @@ void AveragePoolNodeConverter::convertV1(const onnx::NodeProto &onnx_node,
padding_after);
}
- input = convertONNXToMIR(graph, input);
auto result = createOp(graph, input, kernel_shape, strides, padding_before,
- padding_after, false, mir::DataFormat::NHWC)
+ padding_after, false, mir::DataFormat::NCHW)
->getOutput(0);
- result = convertMIRToONNX(graph, result);
context->setNodeOutputs(onnx_node, {result});
}
diff --git a/compiler/mir-onnx-importer/Op/BatchNormalization.cpp b/compiler/mir-onnx-importer/Op/BatchNormalization.cpp
index cce1da8..67b7203 100644
--- a/compiler/mir-onnx-importer/Op/BatchNormalization.cpp
+++ b/compiler/mir-onnx-importer/Op/BatchNormalization.cpp
@@ -25,6 +25,7 @@
#include "mir/ops/AddOp.h"
#include "mir/ops/ConstantOp.h"
#include "mir/ops/MulOp.h"
+#include "mir/ops/ReshapeOp.h"
#include
@@ -114,12 +115,19 @@ void BatchNormalizationNodeConverter::convertV9(const onnx::NodeProto &onnx_node
for (const auto &idx : mir::ShapeRange(scale_accessor.getShape()))
scale_accessor.at(idx) /= std::sqrt(var_accessor.at(idx) + epsilon);
+ assert(mean_accessor.getShape().rank() == 1);
+ assert(input->getShape().rank() == 4 && "Supported only 4D input");
+ mir::Shape new_shape{1, 1, 1, 1};
+ new_shape.dim(1) = mean_accessor.getShape().dim(0); // set channel dim
+
+ auto reshaped_mean = createOp(graph, mean, new_shape)->getOutput(0);
+ auto reshaped_scale = createOp(graph, scale, new_shape)->getOutput(0);
+ auto reshaped_bias = createOp(graph, bias, new_shape)->getOutput(0);
+
// Y = (X + C1) * C2 + bias
- input = convertONNXToMIR(graph, input);
- auto result = createOp(graph, input, mean)->getOutput(0);
- result = createOp(graph, result, scale)->getOutput(0);
- result = createOp(graph, result, bias)->getOutput(0);
- result = convertMIRToONNX(graph, result);
+ auto result = createOp(graph, input, reshaped_mean)->getOutput(0);
+ result = createOp(graph, result, reshaped_scale)->getOutput(0);
+ result = createOp(graph, result, reshaped_bias)->getOutput(0);
context->setNodeOutputs(onnx_node, {result});
}
diff --git a/compiler/mir-onnx-importer/Op/Conv.cpp b/compiler/mir-onnx-importer/Op/Conv.cpp
index 7042fbe..ea29c3f 100644
--- a/compiler/mir-onnx-importer/Op/Conv.cpp
+++ b/compiler/mir-onnx-importer/Op/Conv.cpp
@@ -105,15 +105,15 @@ void ConvNodeConverter::convertV1(const onnx::NodeProto &onnx_node, ConverterCon
bool is_depthwise = (group != 1) && (in_group_size == 1) && (out_channels == group);
mir::Operation::Output *result;
- input = convertONNXToMIR(graph, input);
if (is_depthwise)
{
// TODO handle properly kernel with layer multiplier
auto transposed_tensor = mir::transposeTensor<0, 1, 3, 2>(kernel_tensor);
kernel = createOp(graph, transposed_tensor)->getOutput(0);
- result = createOp(graph, input, kernel, mir::Shape(strides),
- padding_before, padding_after)
- ->getOutput(0);
+ result =
+ createOp(graph, input, kernel, mir::Shape(strides),
+ padding_before, padding_after, mir::DataFormat::NCHW)
+ ->getOutput(0);
}
else
{
@@ -123,7 +123,7 @@ void ConvNodeConverter::convertV1(const onnx::NodeProto &onnx_node, ConverterCon
kernel_tensor = mir::transposeTensor<3, 0, 1, 2>(kernel_tensor);
kernel = createOp(graph, kernel_tensor)->getOutput(0);
result = createOp(graph, input, kernel, mir::Shape(strides), padding_before,
- padding_after)
+ padding_after, mir::DataFormat::NCHW)
->getOutput(0);
}
@@ -133,8 +133,6 @@ void ConvNodeConverter::convertV1(const onnx::NodeProto &onnx_node, ConverterCon
result = createOp(graph, result, bias)->getOutput(0);
}
- result = convertMIRToONNX(graph, result);
-
context->setNodeOutputs(onnx_node, {result});
}
diff --git a/compiler/mir-onnx-importer/Op/GlobalAveragePool.cpp b/compiler/mir-onnx-importer/Op/GlobalAveragePool.cpp
index 4b2e43b..fc161e1 100644
--- a/compiler/mir-onnx-importer/Op/GlobalAveragePool.cpp
+++ b/compiler/mir-onnx-importer/Op/GlobalAveragePool.cpp
@@ -43,11 +43,9 @@ void GlobalAveragePoolNodeConverter::convert(const onnx::NodeProto &onnx_node,
const std::vector padding_before{0, 0};
const std::vector padding_after{0, 0};
- input = convertONNXToMIR(graph, input);
auto result = createOp(graph, input, window_size, strides, padding_before,
- padding_after, true, mir::DataFormat::NHWC)
+ padding_after, true, mir::DataFormat::NCHW)
->getOutput(0);
- result = convertMIRToONNX(graph, result);
context->setNodeOutputs(onnx_node, {result});
}
diff --git a/compiler/mir-onnx-importer/Op/MaxPool.cpp b/compiler/mir-onnx-importer/Op/MaxPool.cpp
index 0ba4524..a8fa520 100644
--- a/compiler/mir-onnx-importer/Op/MaxPool.cpp
+++ b/compiler/mir-onnx-importer/Op/MaxPool.cpp
@@ -81,11 +81,9 @@ void MaxPoolNodeConverter::convertV1(const onnx::NodeProto &onnx_node,
padding_after);
}
- input = convertONNXToMIR(graph, input);
auto result = createOp(graph, input, kernel_shape, strides, padding_before,
- padding_after, mir::DataFormat::NHWC)
+ padding_after, mir::DataFormat::NCHW)
->getOutput(0);
- result = convertMIRToONNX(graph, result);
context->setNodeOutputs(onnx_node, {result});
}
diff --git a/compiler/mir-onnx-importer/Op/Upsample.cpp b/compiler/mir-onnx-importer/Op/Upsample.cpp
index 4bc8326..3c8e314 100644
--- a/compiler/mir-onnx-importer/Op/Upsample.cpp
+++ b/compiler/mir-onnx-importer/Op/Upsample.cpp
@@ -60,17 +60,16 @@ void UpsampleNodeConverter::convertV1(const onnx::NodeProto &onnx_node,
assert(inputs[0]->getShape().rank() == 4 && "Only rank 4 is supported");
std::vector scales_vector(4);
- // NHWC
+ // NCHW
scales_vector.at(0) = 1.0f;
- scales_vector.at(1) = h_scale;
- scales_vector.at(2) = w_scale;
- scales_vector.at(3) = 1.0f;
+ scales_vector.at(1) = 1.0f;
+ scales_vector.at(2) = h_scale;
+ scales_vector.at(3) = w_scale;
auto result =
- createOp(graph, convertONNXToMIR(graph, inputs[0]),
+ createOp(graph, inputs[0],
mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales_vector)
->getOutput(0);
- result = convertMIRToONNX(graph, result);
context->setNodeOutputs(onnx_node, {result});
}
@@ -95,17 +94,16 @@ void UpsampleNodeConverter::convertV7(const onnx::NodeProto &onnx_node,
assert(inputs[0]->getShape().rank() == 4 && "Only rank 4 is supported");
std::vector scales_vector(4);
- // NHWC
+ // NCHW
scales_vector.at(0) = scales_attr->floats(0);
- scales_vector.at(1) = scales_attr->floats(2);
- scales_vector.at(2) = scales_attr->floats(3);
- scales_vector.at(3) = scales_attr->floats(1);
+ scales_vector.at(1) = scales_attr->floats(1);
+ scales_vector.at(2) = scales_attr->floats(2);
+ scales_vector.at(3) = scales_attr->floats(3);
auto result =
- createOp(graph, convertONNXToMIR(graph, inputs[0]),
+ createOp(graph, inputs[0],
mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales_vector)
->getOutput(0);
- result = convertMIRToONNX(graph, result);
context->setNodeOutputs(onnx_node, {result});
}
@@ -130,16 +128,14 @@ void UpsampleNodeConverter::convertV9(const onnx::NodeProto &onnx_node,
"The number of elements of 'scales' should be the same as the rank of input 'X'");
assert(rank == 4 && "Only rank 4 is supported");
std::vector scales_vector(4);
- const int onnx2mir[] = {0, 3, 1, 2};
assert(scales_tensor.getShape().rank() == 1 && "Scales are a 1d tensor");
for (int i = 0; i < scales_tensor.getShape().numElements(); i++)
- scales_vector[onnx2mir[i]] = scales_tensor.atOffset(i);
+ scales_vector[i] = scales_tensor.atOffset(i);
auto result =
- createOp(graph, convertONNXToMIR(graph, inputs[0]),
+ createOp(graph, inputs[0],
mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales_vector)
->getOutput(0);
- result = convertMIRToONNX(graph, result);
context->setNodeOutputs(onnx_node, {result});
}